diff --git a/docs/docs/internals/explicit-nulls.md b/docs/docs/internals/explicit-nulls.md index 6164a6d330c1..87348b46098d 100644 --- a/docs/docs/internals/explicit-nulls.md +++ b/docs/docs/internals/explicit-nulls.md @@ -76,10 +76,9 @@ Within `Types.scala`, we defined some extractors to work with nullable unions: `OrNull` and `OrUncheckedNull`. ```scala -(tp: Type) match { - case OrNull(tp1) => // if tp is a nullable union: tp1 | Null - case _ => // otherwise -} +(tp: Type) match + case OrNull(tp1) => // if tp is a nullable union: tp1 | Null + case _ => // otherwise ``` This extractor will call utility methods in `NullOpsDecorator.scala`. All of these @@ -112,30 +111,25 @@ The reason for casting to `x.type & T`, as opposed to just `T`, is that it allow support flow typing for paths of length greater than one. ```scala -abstract class Node { - val x: String - val next: Node | Null -} - -def f = { - val l: Node|Null = ??? - if (l != null && l.next != null) { - val third: l.next.next.type = l.next.next - } -} +abstract class Node: + val x: String + val next: Node | Null + +def f = + val l: Node|Null = ??? + if l != null && l.next != null then + val third: l.next.next.type = l.next.next ``` After typing, `f` becomes: ```scala -def f = { - val l: Node|Null = ??? - if (l != null && l.$asInstanceOf$[l.type & Node].next != null) { - val third: - l.$asInstanceOf$[l.type & Node].next.$asInstanceOf$[(l.type & Node).next.type & Node].next.type = - l.$asInstanceOf$[l.type & Node].next.$asInstanceOf$[(l.type & Node).next.type & Node].next - } -} +def f = + val l: Node|Null = ??? + if l != null && l.$asInstanceOf$[l.type & Node].next != null then + val third: + l.$asInstanceOf$[l.type & Node].next.$asInstanceOf$[(l.type & Node).next.type & Node].next.type = + l.$asInstanceOf$[l.type & Node].next.$asInstanceOf$[(l.type & Node).next.type & Node].next ``` Notice that in the example above `(l.type & Node).next.type & Node` is still a stable path, so we can use it in the type and track it for flow typing. diff --git a/docs/docs/internals/syntax.md b/docs/docs/internals/syntax.md index ccd50f47607b..56648a81d4c2 100644 --- a/docs/docs/internals/syntax.md +++ b/docs/docs/internals/syntax.md @@ -104,10 +104,8 @@ yield ### Soft keywords ``` -as derives end extension inline opaque open transparent using -* + - +derives end extension inline infix opaque open transparent using | * + - ``` - ## Context-free Syntax The context-free syntax of Scala is given by the following EBNF diff --git a/docs/docs/reference/changed-features/compiler-plugins.md b/docs/docs/reference/changed-features/compiler-plugins.md index 9d86a485b9ab..3bfdaffdbf68 100644 --- a/docs/docs/reference/changed-features/compiler-plugins.md +++ b/docs/docs/reference/changed-features/compiler-plugins.md @@ -62,33 +62,30 @@ import dotty.tools.dotc.core.Symbols._ import dotty.tools.dotc.plugins.{PluginPhase, StandardPlugin} import dotty.tools.dotc.transform.{Pickler, Staging} -class DivideZero extends StandardPlugin { - val name: String = "divideZero" - override val description: String = "divide zero check" - - def init(options: List[String]): List[PluginPhase] = - (new DivideZeroPhase) :: Nil -} - -class DivideZeroPhase extends PluginPhase { - import tpd._ - - val phaseName = "divideZero" - - override val runsAfter = Set(Pickler.name) - override val runsBefore = Set(Staging.name) - - override def transformApply(tree: Apply)(implicit ctx: Context): Tree = { - tree match { - case Apply(Select(rcvr, nme.DIV), List(Literal(Constant(0)))) - if rcvr.tpe <:< defn.IntType => - report.error("dividing by zero", tree.pos) - case _ => - () - } - tree - } -} +class DivideZero extends StandardPlugin: + val name: String = "divideZero" + override val description: String = "divide zero check" + + def init(options: List[String]): List[PluginPhase] = + (new DivideZeroPhase) :: Nil + +class DivideZeroPhase extends PluginPhase: + import tpd._ + + val phaseName = "divideZero" + + override val runsAfter = Set(Pickler.name) + override val runsBefore = Set(Staging.name) + + override def transformApply(tree: Apply)(implicit ctx: Context): Tree = + tree match + case Apply(Select(rcvr, nme.DIV), List(Literal(Constant(0)))) + if rcvr.tpe <:< defn.IntType => + report.error("dividing by zero", tree.pos) + case _ => + () + tree +end DivideZeroPhase ``` The plugin main class (`DivideZero`) must extend the trait `StandardPlugin` @@ -111,13 +108,13 @@ import dotty.tools.dotc.core.Contexts.Context import dotty.tools.dotc.core.Phases.Phase import dotty.tools.dotc.plugins.ResearchPlugin -class DummyResearchPlugin extends ResearchPlugin { - val name: String = "dummy" - override val description: String = "dummy research plugin" +class DummyResearchPlugin extends ResearchPlugin: + val name: String = "dummy" + override val description: String = "dummy research plugin" - def init(options: List[String], phases: List[List[Phase]])(implicit ctx: Context): List[List[Phase]] = - phases -} + def init(options: List[String], phases: List[List[Phase]])(implicit ctx: Context): List[List[Phase]] = + phases +end DummyResearchPlugin ``` A research plugin must extend the trait `ResearchPlugin` and implement the diff --git a/docs/docs/reference/changed-features/implicit-conversions-spec.md b/docs/docs/reference/changed-features/implicit-conversions-spec.md index 007c05f1cd4e..e468c8ba2f43 100644 --- a/docs/docs/reference/changed-features/implicit-conversions-spec.md +++ b/docs/docs/reference/changed-features/implicit-conversions-spec.md @@ -17,7 +17,7 @@ The standard library defines an abstract class `Conversion`: package scala @java.lang.FunctionalInterface abstract class Conversion[-T, +U] extends Function1[T, U]: - def apply(x: T): U + def apply(x: T): U ``` Function literals are automatically converted to `Conversion` values. diff --git a/docs/docs/reference/changed-features/implicit-conversions.md b/docs/docs/reference/changed-features/implicit-conversions.md index 3e24d5acaa10..df713ed09072 100644 --- a/docs/docs/reference/changed-features/implicit-conversions.md +++ b/docs/docs/reference/changed-features/implicit-conversions.md @@ -34,7 +34,7 @@ method that expects a `java.lang.Integer` ```scala import scala.language.implicitConversions implicit def int2Integer(x: Int): java.lang.Integer = - x.asInstanceOf[java.lang.Integer] + x.asInstanceOf[java.lang.Integer] ``` The second example shows how to use `Conversion` to define an @@ -44,12 +44,11 @@ types: ```scala import scala.language.implicitConversions implicit def ordT[T, S]( - implicit conv: Conversion[T, S], - ordS: Ordering[S] - ): Ordering[T] = { - // `ordS` compares values of type `S`, but we can convert from `T` to `S` - (x: T, y: T) => ordS.compare(x, y) -} + implicit conv: Conversion[T, S], + ordS: Ordering[S] + ): Ordering[T] = + // `ordS` compares values of type `S`, but we can convert from `T` to `S` + (x: T, y: T) => ordS.compare(x, y) class A(val x: Int) // The type for which we want an `Ordering` diff --git a/docs/docs/reference/changed-features/implicit-resolution.md b/docs/docs/reference/changed-features/implicit-resolution.md index 732a9f5c4785..ed212bb44085 100644 --- a/docs/docs/reference/changed-features/implicit-resolution.md +++ b/docs/docs/reference/changed-features/implicit-resolution.md @@ -11,25 +11,25 @@ affect implicits on the language level. must be explicitly declared. Excepted are only values in local blocks where the type may still be inferred: ```scala - class C { + class C { - val ctx: Context = ... // ok + val ctx: Context = ... // ok - /*!*/ implicit val x = ... // error: type must be given explicitly + /*!*/ implicit val x = ... // error: type must be given explicitly - /*!*/ implicit def y = ... // error: type must be given explicitly - - val y = { + /*!*/ implicit def y = ... // error: type must be given explicitly + } + val y = { implicit val ctx = this.ctx // ok ... - } + } ``` **2.** Nesting is now taken into account for selecting an implicit. Consider for instance the following scenario: ```scala def f(implicit i: C) = { - def g(implicit j: C) = { - implicitly[C] - } + def g(implicit j: C) = { + implicitly[C] + } } ``` This will now resolve the `implicitly` call to `j`, because `j` is nested @@ -41,12 +41,12 @@ no longer applies. **3.** Package prefixes no longer contribute to the implicit search scope of a type. Example: ```scala package p + given a: A = A() - object o { - given b: B = B() - type C - } + object o: + given b: B = B() + type C ``` Both `a` and `b` are visible as implicits at the point of the definition of `type C`. However, a reference to `p.o.C` outside of package `p` will diff --git a/docs/docs/reference/changed-features/main-functions.md b/docs/docs/reference/changed-features/main-functions.md index b9e933329e11..3a063a17b412 100644 --- a/docs/docs/reference/changed-features/main-functions.md +++ b/docs/docs/reference/changed-features/main-functions.md @@ -7,22 +7,19 @@ Scala 3 offers a new way to define programs that can be invoked from the command A `@main` annotation on a method turns this method into an executable program. Example: ```scala -@main def happyBirthday(age: Int, name: String, others: String*) = { - val suffix = - (age % 100) match { +@main def happyBirthday(age: Int, name: String, others: String*) = + val suffix = + age % 100 match case 11 | 12 | 13 => "th" case _ => - (age % 10) match { - case 1 => "st" - case 2 => "nd" - case 3 => "rd" - case _ => "th" - } - } - val bldr = new StringBuilder(s"Happy $age$suffix birthday, $name") - for other <- others do bldr.append(" and ").append(other) - bldr.toString -} + age % 10 match + case 1 => "st" + case 2 => "nd" + case 3 => "rd" + case _ => "th" + val bldr = new StringBuilder(s"Happy $age$suffix birthday, $name") + for other <- others do bldr.append(" and ").append(other) + bldr.toString ``` This would generate a main program `happyBirthday` that could be called like this ``` @@ -59,18 +56,16 @@ The Scala compiler generates a program from a `@main` method `f` as follows: For instance, the `happyBirthDay` method above would generate additional code equivalent to the following class: ```scala -final class happyBirthday { - import scala.util.{CommandLineParser => CLP} - def main(args: Array[String]): Unit = - try - happyBirthday( - CLP.parseArgument[Int](args, 0), - CLP.parseArgument[String](args, 1), - CLP.parseRemainingArguments[String](args, 2)) - catch { - case error: CLP.ParseError => CLP.showError(error) - } -} +final class happyBirthday: + import scala.util.{CommandLineParser => CLP} + def main(args: Array[String]): Unit = + try + happyBirthday( + CLP.parseArgument[Int](args, 0), + CLP.parseArgument[String](args, 1), + CLP.parseRemainingArguments[String](args, 2)) + catch + case error: CLP.ParseError => CLP.showError(error) ``` **Note**: The `` modifier above expresses that the `main` method is generated as a static method of class `happyBirthDay`. It is not available for user programs in Scala. Regular "static" members are generated in Scala using objects instead. @@ -78,10 +73,9 @@ as a static method of class `happyBirthDay`. It is not available for user progra `@main` methods are the recommended scheme to generate programs that can be invoked from the command line in Scala 3. They replace the previous scheme to write program as objects with a special `App` parent class. In Scala 2, `happyBirthday` could be written also like this: ```scala -object happyBirthday extends App { - // needs by-hand parsing of arguments vector - ... -} +object happyBirthday extends App: + // needs by-hand parsing of arguments vector + ... ``` The previous functionality of `App`, which relied on the "magic" `DelayedInit` trait, is no longer available. `App` still exists in limited form for now, but it does not support command line arguments and will be deprecated in the future. If programs need to cross-build diff --git a/docs/docs/reference/changed-features/match-syntax.md b/docs/docs/reference/changed-features/match-syntax.md index f5b5b3800188..edada868b275 100644 --- a/docs/docs/reference/changed-features/match-syntax.md +++ b/docs/docs/reference/changed-features/match-syntax.md @@ -10,21 +10,31 @@ The syntactical precedence of match expressions has been changed. ```scala xs match { - case Nil => "empty" - case x :: xs1 => "nonempty" + case Nil => "empty" + case x :: xs1 => "nonempty" } match { - case "empty" => 0 - case "nonempty" => 1 + case "empty" => 0 + case "nonempty" => 1 } ``` + (or, dropping the optional braces) + + ```scala + xs match + case Nil => "empty" + case x :: xs1 => "nonempty" + match + case "empty" => 0 + case "nonempty" => 1 + ``` + 2. `match` may follow a period: ```scala - if xs.match { - case Nil => false - case _ => true - } + if xs.match + case Nil => false + case _ => true then "nonempty" else "empty" ``` diff --git a/docs/docs/reference/changed-features/numeric-literals.md b/docs/docs/reference/changed-features/numeric-literals.md index cb8c74547a18..9d1ce6b1ab49 100644 --- a/docs/docs/reference/changed-features/numeric-literals.md +++ b/docs/docs/reference/changed-features/numeric-literals.md @@ -14,9 +14,8 @@ val x: Long = -10_000_000_000 val y: BigInt = 0x123_abc_789_def_345_678_901 val z: BigDecimal = 110_222_799_799.99 -(y: BigInt) match { - case 123_456_789_012_345_678_901 => -} +(y: BigInt) match + case 123_456_789_012_345_678_901 => ``` The syntax of numeric literals is the same as before, except there are no pre-set limits how large they can be. @@ -63,9 +62,8 @@ To allow numeric literals, a type simply has to define a `given` instance of the `scala.util.FromDigits` type class, or one of its subclasses. `FromDigits` is defined as follows: ```scala -trait FromDigits[T] { - def fromDigits(digits: String): T -} +trait FromDigits[T]: + def fromDigits(digits: String): T ``` Implementations of the `fromDigits` convert strings of digits to the values of the implementation type `T`. @@ -77,28 +75,25 @@ The companion object `FromDigits` also defines subclasses of `FromDigits` for whole numbers with a given radix, for numbers with a decimal point, and for numbers that can have both a decimal point and an exponent: ```scala -object FromDigits { - - /** A subclass of `FromDigits` that also allows to convert whole - * number literals with a radix other than 10 - */ - trait WithRadix[T] extends FromDigits[T] { - def fromDigits(digits: String): T = fromDigits(digits, 10) - def fromDigits(digits: String, radix: Int): T - } - - /** A subclass of `FromDigits` that also allows to convert number - * literals containing a decimal point ".". - */ - trait Decimal[T] extends FromDigits[T] - - /** A subclass of `FromDigits`that allows also to convert number - * literals containing a decimal point "." or an - * exponent `('e' | 'E')['+' | '-']digit digit*`. - */ - trait Floating[T] extends Decimal[T] - ... -} +object FromDigits: + + /** A subclass of `FromDigits` that also allows to convert whole + * number literals with a radix other than 10 + */ + trait WithRadix[T] extends FromDigits[T]: + def fromDigits(digits: String): T = fromDigits(digits, 10) + def fromDigits(digits: String, radix: Int): T + + /** A subclass of `FromDigits` that also allows to convert number + * literals containing a decimal point ".". + */ + trait Decimal[T] extends FromDigits[T] + + /** A subclass of `FromDigits`that allows also to convert number + * literals containing a decimal point "." or an + * exponent `('e' | 'E')['+' | '-']digit digit*`. + */ + trait Floating[T] extends Decimal[T] ``` A user-defined number type can implement one of those, which signals to the compiler that hexadecimal numbers, decimal points, or exponents are also accepted in literals @@ -121,9 +116,8 @@ class MalformedNumber(msg: String = "malformed number literal") extends FromDigi As a fully worked out example, here is an implementation of a new numeric class, `BigFloat`, that accepts numeric literals. `BigFloat` is defined in terms of a `BigInt` mantissa and an `Int` exponent: ```scala -case class BigFloat(mantissa: BigInt, exponent: Int) { - override def toString = s"${mantissa}e${exponent}" -} +case class BigFloat(mantissa: BigInt, exponent: Int): + override def toString = s"${mantissa}e${exponent}" ``` `BigFloat` literals can have a decimal point as well as an exponent. E.g. the following expression should produce the `BigFloat` number `BigFloat(-123, 997)`: @@ -134,30 +128,32 @@ The companion object of `BigFloat` defines an `apply` constructor method to cons from a `digits` string. Here is a possible implementation: ```scala object BigFloat: - import scala.util.FromDigits - - def apply(digits: String): BigFloat = - val (mantissaDigits, givenExponent) = digits.toUpperCase.split('E') match - case Array(mantissaDigits, edigits) => - val expo = - try FromDigits.intFromDigits(edigits) - catch case ex: FromDigits.NumberTooLarge => - throw FromDigits.NumberTooLarge(s"exponent too large: $edigits") - (mantissaDigits, expo) - case Array(mantissaDigits) => - (mantissaDigits, 0) - val (intPart, exponent) = mantissaDigits.split('.') match - case Array(intPart, decimalPart) => - (intPart ++ decimalPart, givenExponent - decimalPart.length) - case Array(intPart) => - (intPart, givenExponent) - BigFloat(BigInt(intPart), exponent) + import scala.util.FromDigits + + def apply(digits: String): BigFloat = + val (mantissaDigits, givenExponent) = + digits.toUpperCase.split('E') match + case Array(mantissaDigits, edigits) => + val expo = + try FromDigits.intFromDigits(edigits) + catch case ex: FromDigits.NumberTooLarge => + throw FromDigits.NumberTooLarge(s"exponent too large: $edigits") + (mantissaDigits, expo) + case Array(mantissaDigits) => + (mantissaDigits, 0) + val (intPart, exponent) = + mantissaDigits.split('.') match + case Array(intPart, decimalPart) => + (intPart ++ decimalPart, givenExponent - decimalPart.length) + case Array(intPart) => + (intPart, givenExponent) + BigFloat(BigInt(intPart), exponent) ``` To accept `BigFloat` literals, all that's needed in addition is a `given` instance of type `FromDigits.Floating[BigFloat]`: ```scala - given FromDigits: FromDigits.Floating[BigFloat] with - def fromDigits(digits: String) = apply(digits) + given FromDigits: FromDigits.Floating[BigFloat] with + def fromDigits(digits: String) = apply(digits) end BigFloat ``` Note that the `apply` method does not check the format of the `digits` argument. It is @@ -181,18 +177,16 @@ with a small dose of metaprogramming. The idea is to turn the `fromDigits` metho into a macro, i.e. make it an inline method with a splice as right hand side. To do this, replace the `FromDigits` instance in the `BigFloat` object by the following two definitions: ```scala -object BigFloat { - ... +object BigFloat: + ... - class FromDigits extends FromDigits.Floating[BigFloat] { - def fromDigits(digits: String) = apply(digits) - } + class FromDigits extends FromDigits.Floating[BigFloat]: + def fromDigits(digits: String) = apply(digits) - given FromDigits { - override inline def fromDigits(digits: String) = ${ - fromDigitsImpl('digits) - } - } + given FromDigits: + override inline def fromDigits(digits: String) = ${ + fromDigitsImpl('digits) + } ``` Note that an inline method cannot directly fill in for an abstract method, since it produces no code that can be executed at runtime. That is why we define an intermediary class @@ -200,22 +194,18 @@ no code that can be executed at runtime. That is why we define an intermediary c method in the `FromDigits` given instance. That method is defined in terms of a macro implementation method `fromDigitsImpl`. Here is its definition: ```scala - private def fromDigitsImpl(digits: Expr[String])(using ctx: Quotes): Expr[BigFloat] = - digits.value match { - case Some(ds) => - try { - val BigFloat(m, e) = apply(ds) - '{BigFloat(${Expr(m)}, ${Expr(e)})} - } - catch { - case ex: FromDigits.FromDigitsException => - ctx.error(ex.getMessage) - '{BigFloat(0, 0)} - } - case None => - '{apply($digits)} - } -} // end BigFloat + private def fromDigitsImpl(digits: Expr[String])(using ctx: Quotes): Expr[BigFloat] = + digits.value match + case Some(ds) => + try + val BigFloat(m, e) = apply(ds) + '{BigFloat(${Expr(m)}, ${Expr(e)})} + catch case ex: FromDigits.FromDigitsException => + ctx.error(ex.getMessage) + '{BigFloat(0, 0)} + case None => + '{apply($digits)} +end BigFloat ``` The macro implementation takes an argument of type `Expr[String]` and yields a result of type `Expr[BigFloat]`. It tests whether its argument is a constant diff --git a/docs/docs/reference/changed-features/operators.md b/docs/docs/reference/changed-features/operators.md index aebf4bbd7995..f471d28efd1e 100644 --- a/docs/docs/reference/changed-features/operators.md +++ b/docs/docs/reference/changed-features/operators.md @@ -15,15 +15,16 @@ An `infix` modifier on a method definition allows using the method as an infix o ```scala import scala.annotation.targetName -trait MultiSet[T] { +trait MultiSet[T]: - infix def union(other: MultiSet[T]): MultiSet[T] + infix def union(other: MultiSet[T]): MultiSet[T] - def difference(other: MultiSet[T]): MultiSet[T] + def difference(other: MultiSet[T]): MultiSet[T] - @targetName("intersection") - def *(other: MultiSet[T]): MultiSet[T] -} + @targetName("intersection") + def *(other: MultiSet[T]): MultiSet[T] + +end MultiSet val s1, s2: MultiSet[Int] @@ -70,12 +71,13 @@ The purpose of the `infix` modifier is to achieve consistency across a code base 3. `infix` modifiers can be given to method definitions. The first non-receiver parameter list of an `infix` method must define exactly one parameter. Examples: ```scala - infix def op(x: S): R // ok - infix def op[T](x: T)(y: S): R // ok - infix def op[T](x: T, y: S): R // error: two parameters + infix def op1(x: S): R // ok + infix def op2[T](x: T)(y: S): R // ok + infix def op3[T](x: T, y: S): R // error: two parameters - infix def (x: A) op (y: B): R // ok - infix def (x: A) op (y1: B, y2: B): R // error: two parameters + extension (x: A) + infix def op4(y: B): R // ok + infix def op5(y1: B, y2: B): R // error: two parameters ``` 4. `infix` modifiers can also be given to type, trait or class definitions that have exactly two type parameters. An infix type like @@ -107,13 +109,13 @@ It is recommended that definitions of symbolic operators carry a [`@targetName` Infix operators can now appear at the start of lines in a multi-line expression. Examples: ```scala val str = "hello" - ++ " world" - ++ "!" + ++ " world" + ++ "!" def condition = - x > 0 - || xs.exists(_ > 0) - || xs.isEmpty + x > 0 + || xs.exists(_ > 0) + || xs.isEmpty ``` Previously, those expressions would have been rejected, since the compiler's semicolon inference would have treated the continuations `++ " world"` or `|| xs.isEmpty` as separate statements. diff --git a/docs/docs/reference/changed-features/pattern-bindings.md b/docs/docs/reference/changed-features/pattern-bindings.md index 247bac649a99..dd076d7b9b00 100644 --- a/docs/docs/reference/changed-features/pattern-bindings.md +++ b/docs/docs/reference/changed-features/pattern-bindings.md @@ -17,8 +17,8 @@ val (x: String) :: _ = xs // error: pattern's type String is more specialized ``` This code gives a compile-time warning in Scala 3.1 (and also in Scala 3.0 under the `-source 3.1` setting) whereas it will fail at runtime with a `ClassCastException` in Scala 2. In Scala 3.1, a pattern binding is only allowed if the pattern is _irrefutable_, that is, if the right-hand side's type conforms to the pattern's type. For instance, the following is OK: ```scala - val pair = (1, true) - val (x, y) = pair +val pair = (1, true) +val (x, y) = pair ``` Sometimes one wants to decompose data anyway, even though the pattern is refutable. For instance, if at some point one knows that a list `elems` is non-empty one might want to decompose it like this: @@ -37,14 +37,14 @@ Analogous changes apply to patterns in `for` expressions. For instance: ```scala val elems: List[Any] = List((1, 2), "hello", (3, 4)) -for ((x, y) <- elems) yield (y, x) // error: pattern's type (Any, Any) is more specialized - // than the right hand side expression's type Any +for (x, y) <- elems yield (y, x) // error: pattern's type (Any, Any) is more specialized + // than the right hand side expression's type Any ``` This code gives a compile-time warning in Scala 3.1 whereas in Scala 2 the list `elems` is filtered to retain only the elements of tuple type that match the pattern `(x, y)`. The filtering functionality can be obtained in Scala 3 by prefixing the pattern with `case`: ```scala - for (case (x, y) <- elems) yield (y, x) // returns List((2, 1), (4, 3)) +for case (x, y) <- elems yield (y, x) // returns List((2, 1), (4, 3)) ``` ## Syntax Changes diff --git a/docs/docs/reference/changed-features/pattern-matching.md b/docs/docs/reference/changed-features/pattern-matching.md index 71b82a91cfcc..87bb03bc3ad1 100644 --- a/docs/docs/reference/changed-features/pattern-matching.md +++ b/docs/docs/reference/changed-features/pattern-matching.md @@ -100,14 +100,13 @@ For example: ```scala -object Even { - def unapply(s: String): Boolean = s.size % 2 == 0 -} +object Even: + def unapply(s: String): Boolean = s.size % 2 == 0 + +"even" match + case s @ Even() => println(s"$s has an even number of characters") + case s => println(s"$s has an odd number of characters") -"even" match { - case s @ Even() => println(s"$s has an even number of characters") - case s => println(s"$s has an odd number of characters") -} // even has an even number of characters ``` @@ -122,24 +121,22 @@ For example: ```scala -class FirstChars(s: String) extends Product { - def _1 = s.charAt(0) - def _2 = s.charAt(1) - - // Not used by pattern matching: Product is only used as a marker trait. - def canEqual(that: Any): Boolean = ??? - def productArity: Int = ??? - def productElement(n: Int): Any = ??? -} +class FirstChars(s: String) extends Product: + def _1 = s.charAt(0) + def _2 = s.charAt(1) -object FirstChars { - def unapply(s: String): FirstChars = new FirstChars(s) -} + // Not used by pattern matching: Product is only used as a marker trait. + def canEqual(that: Any): Boolean = ??? + def productArity: Int = ??? + def productElement(n: Int): Any = ??? + +object FirstChars: + def unapply(s: String): FirstChars = new FirstChars(s) + +"Hi!" match + case FirstChars(char1, char2) => + println(s"First: $char1; Second: $char2") -"Hi!" match { - case FirstChars(char1, char2) => - println(s"First: $char1; Second: $char2") -} // First: H; Second: i ``` @@ -150,19 +147,17 @@ object FirstChars { ```scala -class Nat(val x: Int) { - def get: Int = x - def isEmpty = x < 0 -} +class Nat(val x: Int): + def get: Int = x + def isEmpty = x < 0 -object Nat { - def unapply(x: Int): Nat = new Nat(x) -} +object Nat: + def unapply(x: Int): Nat = new Nat(x) + +5 match + case Nat(n) => println(s"$n is a natural number") + case _ => () -5 match { - case Nat(n) => println(s"$n is a natural number") - case _ => () -} // 5 is a natural number ``` @@ -172,18 +167,16 @@ object Nat { - Pattern-matching on exactly `N` patterns with types `P1, P2, ..., PN` ```Scala -object ProdEmpty { - def _1: Int = ??? - def _2: String = ??? - def isEmpty = true - def unapply(s: String): this.type = this - def get = this -} - -"" match { - case ProdEmpty(_, _) => ??? - case _ => () -} +object ProdEmpty: + def _1: Int = ??? + def _2: String = ??? + def isEmpty = true + def unapply(s: String): this.type = this + def get = this + +"" match + case ProdEmpty(_, _) => ??? + case _ => () ``` @@ -193,10 +186,10 @@ object ProdEmpty { ```Scala type X = { - def lengthCompare(len: Int): Int // or, `def length: Int` - def apply(i: Int): T1 - def drop(n: Int): scala.Seq[T2] - def toSeq: scala.Seq[T3] + def lengthCompare(len: Int): Int // or, `def length: Int` + def apply(i: Int): T1 + def drop(n: Int): scala.Seq[T2] + def toSeq: scala.Seq[T3] } ``` @@ -206,16 +199,15 @@ type X = { ```scala -object CharList { - def unapplySeq(s: String): Option[Seq[Char]] = Some(s.toList) -} +object CharList: + def unapplySeq(s: String): Option[Seq[Char]] = Some(s.toList) + +"example" match + case CharList(c1, c2, c3, c4, _, _, _) => + println(s"$c1,$c2,$c3,$c4") + case _ => + println("Expected *exactly* 7 characters!") -"example" match { - case CharList(c1, c2, c3, c4, _, _, _) => - println(s"$c1,$c2,$c3,$c4") - case _ => - println("Expected *exactly* 7 characters!") -} // e,x,a,m ``` @@ -229,15 +221,13 @@ object CharList { ```Scala class Foo(val name: String, val children: Int *) -object Foo { - def unapplySeq(f: Foo): Option[(String, Seq[Int])] = - Some((f.name, f.children)) -} +object Foo: + def unapplySeq(f: Foo): Option[(String, Seq[Int])] = + Some((f.name, f.children)) -def foo(f: Foo) = f match { - case Foo(name, ns : _*) => - case Foo(name, x, y, ns : _*) => -} +def foo(f: Foo) = f match + case Foo(name, ns : _*) => + case Foo(name, x, y, ns : _*) => ``` There are plans for further simplification, in particular to factor out *product diff --git a/docs/docs/reference/changed-features/structural-types.md b/docs/docs/reference/changed-features/structural-types.md index def43b047b95..072f78f9108d 100644 --- a/docs/docs/reference/changed-features/structural-types.md +++ b/docs/docs/reference/changed-features/structural-types.md @@ -33,15 +33,12 @@ configure how fields and methods should be resolved. Here's an example of a structural type `Person`: ```scala - class Record(elems: (String, Any)*) extends Selectable { - private val fields = elems.toMap - def selectDynamic(name: String): Any = fields(name) - } - type Person = Record { - val name: String - val age: Int - } -``` + class Record(elems: (String, Any)*) extends Selectable: + private val fields = elems.toMap + def selectDynamic(name: String): Any = fields(name) + + type Person = Record { val name: String; val age: Int } + ``` The type `Person` adds a _refinement_ to its parent type `Record` that defines the two fields `name` and `age`. We say the refinement is _structural_ since `name` and `age` are not defined in the parent type. But they exist nevertheless as members of class `Person`. For instance, the following program would print "Emma is 42 years old.": ```scala @@ -76,15 +73,13 @@ Besides `selectDynamic`, a `Selectable` class sometimes also defines a method `a Structural types can also be accessed using Java reflection. Example: ```scala - type Closeable = { - def close(): Unit - } - class FileInputStream { + type Closeable = { def close(): Unit } + + class FileInputStream: def close(): Unit - } - class Channel { + + class Channel: def close(): Unit - } ``` Here, we define a structural type `Closeable` that defines a `close` method. There are various classes that have `close` methods, we just list `FileInputStream` and `Channel` as two examples. It would be easiest if the two classes shared a common interface that factors out the `close` method. But such factorings are often not possible if different libraries are combined in one application. Yet, we can still have methods that work on all classes with a `close` method by using the `Closeable` type. For instance, @@ -128,13 +123,13 @@ the database access example given at the beginning of this document. Local and anonymous classes that extend `Selectable` get more refined types than other classes. Here is an example: ```scala -trait Vehicle extends reflect.Selectable { - val wheels: Int -} -val i3 = new Vehicle { // i3: Vehicle { val range: Int } - val wheels = 4 - val range = 240 -} +trait Vehicle extends reflect.Selectable: + val wheels: Int + +val i3 = new Vehicle: // i3: Vehicle { val range: Int } + val wheels = 4 + val range = 240 + i3.range ``` The type of `i3` in this example is `Vehicle { val range: Int }`. Hence, @@ -144,14 +139,14 @@ defines the necessary `selectDynamic` member. `Vehicle` could also extend some other subclass of `scala.Selectable` that implements `selectDynamic` and `applyDynamic` differently. But if it does not extend a `Selectable` at all, the code would no longer typecheck: ```scala -class Vehicle { - val wheels: Int -} -val i3 = new Vehicle { // i3: Vehicle - val wheels = 4 - val range = 240 -} -i3.range: // error: range is not a member of `Vehicle` +trait Vehicle: + val wheels: Int + +val i3 = new Vehicle: // i3: Vehicle + val wheels = 4 + val range = 240 + +i3.range // error: range is not a member of `Vehicle` ``` The difference is that the type of an anonymous class that does not extend `Selectable` is just formed from the parent type(s) of the class, without adding any refinements. Hence, `i3` now has just type `Vehicle` and the selection `i3.range` gives a "member not found" error. diff --git a/docs/docs/reference/changed-features/vararg-patterns.md b/docs/docs/reference/changed-features/vararg-patterns.md index ce4b45e26c12..25f457d44dd5 100644 --- a/docs/docs/reference/changed-features/vararg-patterns.md +++ b/docs/docs/reference/changed-features/vararg-patterns.md @@ -7,10 +7,9 @@ The syntax of vararg patterns has changed. In the new syntax one writes varargs like one writes them in expressions, using a `: _*` type annotation: ```scala -xs match { - case List(1, 2, xs: _*) => println(xs) // binds xs - case List(1, _ : _*) => // wildcard pattern -} +xs match + case List(1, 2, xs: _*) => println(xs) // binds xs + case List(1, _ : _*) => // wildcard pattern ``` The old syntax, which is shorter but less regular, is no longer supported. diff --git a/docs/docs/reference/contextual/by-name-context-parameters.md b/docs/docs/reference/contextual/by-name-context-parameters.md index 78896562cf6e..868ab8a3f08f 100644 --- a/docs/docs/reference/contextual/by-name-context-parameters.md +++ b/docs/docs/reference/contextual/by-name-context-parameters.md @@ -6,16 +6,15 @@ title: "By-Name Context Parameters" Context parameters can be declared by-name to avoid a divergent inferred expansion. Example: ```scala -trait Codec[T] { - def write(x: T): Unit -} +trait Codec[T]: + def write(x: T): Unit given intCodec: Codec[Int] = ??? given optionCodec[T](using ev: => Codec[T]): Codec[Option[T]] with - def write(xo: Option[T]) = xo match - case Some(x) => ev.write(x) - case None => + def write(xo: Option[T]) = xo match + case Some(x) => ev.write(x) + case None => val s = summon[Codec[Option[Int]]] diff --git a/docs/docs/reference/contextual/context-functions-spec.md b/docs/docs/reference/contextual/context-functions-spec.md index fa5156bc4baf..1f212c299d4b 100644 --- a/docs/docs/reference/contextual/context-functions-spec.md +++ b/docs/docs/reference/contextual/context-functions-spec.md @@ -21,9 +21,8 @@ methods with context parameters. Specifically, the `N`-ary function type `ContextFunctionN[T1 , ... , TN, R]`. Such class types are assumed to have the following definitions, for any value of `N >= 1`: ```scala package scala -trait ContextFunctionN[-T1 , ... , -TN, +R] { - def apply(using x1: T1 , ... , xN: TN): R -} +trait ContextFunctionN[-T1 , ... , -TN, +R]: + def apply(using x1: T1 , ... , xN: TN): R ``` Context function types erase to normal function types, so these classes are generated on the fly for typechecking, but not realized in actual code. @@ -44,9 +43,8 @@ the context parameters `xi`. The context function literal is evaluated as the instance creation expression ```scala -new scala.ContextFunctionN[T1, ..., Tn, T] { - def apply(using x1: T1, ..., xn: Tn): T = e -} +new scala.ContextFunctionN[T1, ..., Tn, T]: + def apply(using x1: T1, ..., xn: Tn): T = e ``` A context parameter may also be a wildcard represented by an underscore `_`. In that case, a fresh name for the parameter is chosen arbitrarily. diff --git a/docs/docs/reference/contextual/context-functions.md b/docs/docs/reference/contextual/context-functions.md index 36eb1caf2f1d..284f07485a98 100644 --- a/docs/docs/reference/contextual/context-functions.md +++ b/docs/docs/reference/contextual/context-functions.md @@ -54,30 +54,28 @@ instance, here is how they can support the "builder pattern", where the aim is to construct tables like this: ```scala table { - row { - cell("top left") - cell("top right") - } - row { - cell("bottom left") - cell("bottom right") - } + row { + cell("top left") + cell("top right") + } + row { + cell("bottom left") + cell("bottom right") + } } ``` The idea is to define classes for `Table` and `Row` that allow the addition of elements via `add`: ```scala - class Table { - val rows = new ArrayBuffer[Row] - def add(r: Row): Unit = rows += r - override def toString = rows.mkString("Table(", ", ", ")") - } + class Table: + val rows = new ArrayBuffer[Row] + def add(r: Row): Unit = rows += r + override def toString = rows.mkString("Table(", ", ", ")") - class Row { - val cells = new ArrayBuffer[Cell] - def add(c: Cell): Unit = cells += c - override def toString = cells.mkString("Row(", ", ", ")") - } + class Row: + val cells = new ArrayBuffer[Cell] + def add(c: Cell): Unit = cells += c + override def toString = cells.mkString("Row(", ", ", ")") case class Cell(elem: String) ``` @@ -85,20 +83,18 @@ Then, the `table`, `row` and `cell` constructor methods can be defined with context function types as parameters to avoid the plumbing boilerplate that would otherwise be necessary. ```scala - def table(init: Table ?=> Unit) = { - given t: Table = Table() - init - t - } + def table(init: Table ?=> Unit) = + given t: Table = Table() + init + t - def row(init: Row ?=> Unit)(using t: Table) = { - given r: Row = Row() - init - t.add(r) - } + def row(init: Row ?=> Unit)(using t: Table) = + given r: Row = Row() + init + t.add(r) def cell(str: String)(using r: Row) = - r.add(new Cell(str)) + r.add(new Cell(str)) ``` With that setup, the table construction code above compiles and expands to: ```scala @@ -120,17 +116,16 @@ With that setup, the table construction code above compiles and expands to: As a larger example, here is a way to define constructs for checking arbitrary postconditions using an extension method `ensuring` so that the checked result can be referred to simply by `result`. The example combines opaque type aliases, context function types, and extension methods to provide a zero-overhead abstraction. ```scala -object PostConditions { - opaque type WrappedResult[T] = T +object PostConditions: + opaque type WrappedResult[T] = T - def result[T](using r: WrappedResult[T]): T = r + def result[T](using r: WrappedResult[T]): T = r - extension [T](x: T) - def ensuring(condition: WrappedResult[T] ?=> Boolean): T = { - assert(condition(using x)) - x - } -} + extension [T](x: T) + def ensuring(condition: WrappedResult[T] ?=> Boolean): T = + assert(condition(using x)) + x +end PostConditions import PostConditions.{ensuring, result} val s = List(1, 2, 3).sum.ensuring(result == 6) @@ -146,10 +141,10 @@ does not need boxing either. Hence, the implementation of `ensuring` is as about as the best possible code one could write by hand: ```scala -{ val result = List(1, 2, 3).sum - assert(result == 6) - result -} +val s = + val result = List(1, 2, 3).sum + assert(result == 6) + result ``` ### Reference diff --git a/docs/docs/reference/contextual/conversions.md b/docs/docs/reference/contextual/conversions.md index 1a045c90e1b6..5dae928c9620 100644 --- a/docs/docs/reference/contextual/conversions.md +++ b/docs/docs/reference/contextual/conversions.md @@ -7,13 +7,12 @@ Implicit conversions are defined by given instances of the `scala.Conversion` cl This class is defined in package `scala` as follows: ```scala abstract class Conversion[-T, +U] extends (T => U): - def apply (x: T): U + def apply (x: T): U ``` For example, here is an implicit conversion from `String` to `Token`: ```scala -given Conversion[String, Token] { - def apply(str: String): Token = new KeyWord(str) -} +given Conversion[String, Token] with + def apply(str: String): Token = new KeyWord(str) ``` Using an alias this can be expressed more concisely as: ```scala @@ -39,20 +38,20 @@ primitive number types to subclasses of `java.lang.Number`. For instance, the conversion from `Int` to `java.lang.Integer` can be defined as follows: ```scala given int2Integer: Conversion[Int, java.lang.Integer] = - java.lang.Integer.valueOf(_) + java.lang.Integer.valueOf(_) ``` 2. The "magnet" pattern is sometimes used to express many variants of a method. Instead of defining overloaded versions of the method, one can also let the method take one or more arguments of specially defined "magnet" types, into which various argument types can be converted. Example: ```scala - object Completions { + object Completions: - // The argument "magnet" type - enum CompletionArg { - case Error(s: String) - case Response(f: Future[HttpResponse]) - case Status(code: Future[StatusCode]) - } - object CompletionArg { + // The argument "magnet" type + enum CompletionArg: + case Error(s: String) + case Response(f: Future[HttpResponse]) + case Status(code: Future[StatusCode]) + + object CompletionArg: // conversions defining the possible arguments to pass to `complete` // these always come with CompletionArg @@ -60,17 +59,17 @@ conversion from `Int` to `java.lang.Integer` can be defined as follows: // // CompletionArg.fromStatusCode(statusCode) - given fromString : Conversion[String, CompletionArg] = Error(_) - given fromFuture : Conversion[Future[HttpResponse], CompletionArg] = Response(_) - given fromStatusCode: Conversion[Future[StatusCode], CompletionArg] = Status(_) - } - import CompletionArg._ + given fromString : Conversion[String, CompletionArg] = Error(_) + given fromFuture : Conversion[Future[HttpResponse], CompletionArg] = Response(_) + given fromStatusCode: Conversion[Future[StatusCode], CompletionArg] = Status(_) + end CompletionArg + import CompletionArg._ + + def complete[T](arg: CompletionArg) = arg match + case Error(s) => ... + case Response(f) => ... + case Status(code) => ... - def complete[T](arg: CompletionArg) = arg match { - case Error(s) => ... - case Response(f) => ... - case Status(code) => ... - } - } + end Completions ``` This setup is more complicated than simple overloading of `complete`, but it can still be useful if normal overloading is not available (as in the case above, since we cannot have two overloaded methods that take `Future[...]` arguments), or if normal overloading would lead to a combinatorial explosion of variants. diff --git a/docs/docs/reference/contextual/derivation-macro.md b/docs/docs/reference/contextual/derivation-macro.md index b37fd9529d6f..999f07fca774 100644 --- a/docs/docs/reference/contextual/derivation-macro.md +++ b/docs/docs/reference/contextual/derivation-macro.md @@ -16,9 +16,8 @@ macros. As in the original code, the type class definition is the same: ```scala -trait Eq[T] { - def eqv(x: T, y: T): Boolean -} +trait Eq[T]: + def eqv(x: T, y: T): Boolean ``` we need to implement a method `Eq.derived` on the companion object of `Eq` that @@ -41,31 +40,26 @@ from the signature. The body of the `derived` method is shown below: ```scala -given derived[T: Type](using Quotes): Expr[Eq[T]] = { - import quotes.reflect._ - - val ev: Expr[Mirror.Of[T]] = Expr.summon[Mirror.Of[T]].get - - ev match { - case '{ $m: Mirror.ProductOf[T] { type MirroredElemTypes = elementTypes }} => - val elemInstances = summonAll[elementTypes] - val eqProductBody: (Expr[T], Expr[T]) => Expr[Boolean] = (x, y) => { - elemInstances.zipWithIndex.foldLeft(Expr(true: Boolean)) { - case (acc, (elem, index)) => - val e1 = '{$x.asInstanceOf[Product].productElement(${Expr(index)})} - val e2 = '{$y.asInstanceOf[Product].productElement(${Expr(index)})} - - '{ $acc && $elem.asInstanceOf[Eq[Any]].eqv($e1, $e2) } - } - } - '{ - eqProduct((x: T, y: T) => ${eqProductBody('x, 'y)}) - } - - // case for Mirror.ProductOf[T] - // ... - } -} +given derived[T: Type](using Quotes): Expr[Eq[T]] = + import quotes.reflect._ + + val ev: Expr[Mirror.Of[T]] = Expr.summon[Mirror.Of[T]].get + + ev match + case '{ $m: Mirror.ProductOf[T] { type MirroredElemTypes = elementTypes }} => + val elemInstances = summonAll[elementTypes] + val eqProductBody: (Expr[T], Expr[T]) => Expr[Boolean] = (x, y) => + elemInstances.zipWithIndex.foldLeft(Expr(true: Boolean)) { + case (acc, (elem, index)) => + val e1 = '{$x.asInstanceOf[Product].productElement(${Expr(index)})} + val e2 = '{$y.asInstanceOf[Product].productElement(${Expr(index)})} + '{ $acc && $elem.asInstanceOf[Eq[Any]].eqv($e1, $e2) } + } + + '{ eqProduct((x: T, y: T) => ${eqProductBody('x, 'y)}) } + + // case for Mirror.ProductOf[T] + // ... ``` Note, that in the `inline` case we can merely write @@ -84,20 +78,19 @@ Instead we extract the tuple-type for element types using pattern matching over quotes and more specifically of the refined type: ```scala - case '{ $m: Mirror.ProductOf[T] { type MirroredElemTypes = elementTypes }} => ... + case '{ $m: Mirror.ProductOf[T] { type MirroredElemTypes = elementTypes }} => ... ``` -The implementation of `summonAll` as a macro can be show below assuming that we -have the given instances for our primitive types: +Shown below is the implementation of `summonAll` as a macro. We assume that +given instances for our primitive types exist. ```scala - def summonAll[T: Type](using Quotes): List[Expr[Eq[_]]] = - Type.of[T] match { +def summonAll[T: Type](using Quotes): List[Expr[Eq[_]]] = + Type.of[T] match case '[String *: tpes] => '{ summon[Eq[String]] } :: summonAll[tpes] case '[Int *: tpes] => '{ summon[Eq[Int]] } :: summonAll[tpes] case '[tpe *: tpes] => derived[tpe] :: summonAll[tpes] case '[EmptyTuple] => Nil - } ``` One additional difference with the body of `derived` here as opposed to the one @@ -108,9 +101,9 @@ class that holds a name of type `String` and an age of type `Int`, the equality check we want to generate is the following: ```scala -true - && Eq[String].eqv(x.productElement(0),y.productElement(0)) - && Eq[Int].eqv(x.productElement(1), y.productElement(1)) + true + && Eq[String].eqv(x.productElement(0),y.productElement(0)) + && Eq[Int].eqv(x.productElement(1), y.productElement(1)) ``` ### Calling the derived method inside the macro @@ -122,9 +115,9 @@ directly. The `eqGen` can trigger the derivation. ```scala extension [T](inline x: T) - inline def === (inline y: T)(using eq: Eq[T]): Boolean = eq.eqv(x, y) + inline def === (inline y: T)(using eq: Eq[T]): Boolean = eq.eqv(x, y) -implicit inline def eqGen[T]: Eq[T] = ${ Eq.derived[T] } +inline given eqGen[T]: Eq[T] = ${ Eq.derived[T] } ``` Note, that we use inline method syntax and we can compare instance such as @@ -134,10 +127,9 @@ types: ```scala case class Person(name: String, age: Int) -enum Opt[+T] { - case Sm(t: T) - case Nn -} +enum Opt[+T]: + case Sm(t: T) + case Nn ``` The full code is shown below: @@ -147,81 +139,65 @@ import scala.deriving._ import scala.quoted._ -trait Eq[T] { - def eqv(x: T, y: T): Boolean -} +trait Eq[T]: + def eqv(x: T, y: T): Boolean -object Eq { - given Eq[String] { - def eqv(x: String, y: String) = x == y - } +object Eq: + given Eq[String] with + def eqv(x: String, y: String) = x == y - given Eq[Int] { - def eqv(x: Int, y: Int) = x == y - } + given Eq[Int] with + def eqv(x: Int, y: Int) = x == y - def eqProduct[T](body: (T, T) => Boolean): Eq[T] = - new Eq[T] { - def eqv(x: T, y: T): Boolean = body(x, y) - } + def eqProduct[T](body: (T, T) => Boolean): Eq[T] = + new Eq[T]: + def eqv(x: T, y: T): Boolean = body(x, y) - def eqSum[T](body: (T, T) => Boolean): Eq[T] = - new Eq[T] { - def eqv(x: T, y: T): Boolean = body(x, y) - } + def eqSum[T](body: (T, T) => Boolean): Eq[T] = + new Eq[T]: + def eqv(x: T, y: T): Boolean = body(x, y) - def summonAll[T: Type](using Quotes): List[Expr[Eq[_]]] = - Type.of[T] match { - case '[String *: tpes] => '{ summon[Eq[String]] } :: summonAll[tpes] - case '[Int *: tpes] => '{ summon[Eq[Int]] } :: summonAll[tpes] - case '[tpe *: tpes] => derived[tpe] :: summonAll[tpes] - case '[EmptyTuple] => Nil - } + def summonAll[T: Type](using Quotes): List[Expr[Eq[_]]] = + Type.of[T] match + case '[String *: tpes] => '{ summon[Eq[String]] } :: summonAll[tpes] + case '[Int *: tpes] => '{ summon[Eq[Int]] } :: summonAll[tpes] + case '[tpe *: tpes] => derived[tpe] :: summonAll[tpes] + case '[EmptyTuple] => Nil - given derived[T: Type](using q: Quotes): Expr[Eq[T]] = { - import quotes.reflect._ + given derived[T: Type](using q: Quotes): Expr[Eq[T]] = + import quotes.reflect._ - val ev: Expr[Mirror.Of[T]] = Expr.summon[Mirror.Of[T]].get + val ev: Expr[Mirror.Of[T]] = Expr.summon[Mirror.Of[T]].get - ev match { - case '{ $m: Mirror.ProductOf[T] { type MirroredElemTypes = elementTypes }} => - val elemInstances = summonAll[elementTypes] - val eqProductBody: (Expr[T], Expr[T]) => Expr[Boolean] = (x, y) => { - elemInstances.zipWithIndex.foldLeft(Expr(true: Boolean)) { - case (acc, (elem, index)) => - val e1 = '{$x.asInstanceOf[Product].productElement(${Expr(index)})} - val e2 = '{$y.asInstanceOf[Product].productElement(${Expr(index)})} - - '{ $acc && $elem.asInstanceOf[Eq[Any]].eqv($e1, $e2) } - } - } - '{ - eqProduct((x: T, y: T) => ${eqProductBody('x, 'y)}) - } - - case '{ $m: Mirror.SumOf[T] { type MirroredElemTypes = elementTypes }} => - val elemInstances = summonAll[elementTypes] - val eqSumBody: (Expr[T], Expr[T]) => Expr[Boolean] = (x, y) => { - val ordx = '{ $m.ordinal($x) } - val ordy = '{ $m.ordinal($y) } - - val elements = Expr.ofList(elemInstances) - '{ - $ordx == $ordy && $elements($ordx).asInstanceOf[Eq[Any]].eqv($x, $y) - } - } - - '{ - eqSum((x: T, y: T) => ${eqSumBody('x, 'y)}) - } - } - } -} + ev match + case '{ $m: Mirror.ProductOf[T] { type MirroredElemTypes = elementTypes }} => + val elemInstances = summonAll[elementTypes] + val eqProductBody: (Expr[T], Expr[T]) => Expr[Boolean] = (x, y) => + elemInstances.zipWithIndex.foldLeft(Expr(true: Boolean)) { + case (acc, (elem, index)) => + val e1 = '{$x.asInstanceOf[Product].productElement(${Expr(index)})} + val e2 = '{$y.asInstanceOf[Product].productElement(${Expr(index)})} -object Macro3 { - extension [T](inline x: T) - inline def === (inline y: T)(using eq: Eq[T]): Boolean = eq.eqv(x, y) + '{ $acc && $elem.asInstanceOf[Eq[Any]].eqv($e1, $e2) } + } + '{ eqProduct((x: T, y: T) => ${eqProductBody('x, 'y)}) } - implicit inline def eqGen[T]: Eq[T] = ${ Eq.derived[T] } -} + case '{ $m: Mirror.SumOf[T] { type MirroredElemTypes = elementTypes }} => + val elemInstances = summonAll[elementTypes] + val eqSumBody: (Expr[T], Expr[T]) => Expr[Boolean] = (x, y) => + val ordx = '{ $m.ordinal($x) } + val ordy = '{ $m.ordinal($y) } + + val elements = Expr.ofList(elemInstances) + '{ $ordx == $ordy && $elements($ordx).asInstanceOf[Eq[Any]].eqv($x, $y) } + + '{ eqSum((x: T, y: T) => ${eqSumBody('x, 'y)}) } + end derived +end Eq + +object Macro3: + extension [T](inline x: T) + inline def === (inline y: T)(using eq: Eq[T]): Boolean = eq.eqv(x, y) + + inline given eqGen[T]: Eq[T] = ${ Eq.derived[T] } ``` diff --git a/docs/docs/reference/contextual/derivation.md b/docs/docs/reference/contextual/derivation.md index 5d54bb84828e..f2183cdd01ed 100644 --- a/docs/docs/reference/contextual/derivation.md +++ b/docs/docs/reference/contextual/derivation.md @@ -9,10 +9,9 @@ on. Common examples are `Eq`, `Ordering`, or `Show`. For example, given the foll (ADT), ```scala -enum Tree[T] derives Eq, Ordering, Show { - case Branch(left: Tree[T], right: Tree[T]) - case Leaf(elem: T) -} +enum Tree[T] derives Eq, Ordering, Show: + case Branch(left: Tree[T], right: Tree[T]) + case Leaf(elem: T) ``` The `derives` clause generates the following given instances for the `Eq`, `Ordering` and `Show` type classes in the @@ -41,41 +40,41 @@ They also provide minimal term level infrastructure to allow higher level librar derivation support. ```scala -sealed trait Mirror { +sealed trait Mirror: - /** the type being mirrored */ - type MirroredType + /** the type being mirrored */ + type MirroredType - /** the type of the elements of the mirrored type */ - type MirroredElemTypes + /** the type of the elements of the mirrored type */ + type MirroredElemTypes - /** The mirrored *-type */ - type MirroredMonoType + /** The mirrored *-type */ + type MirroredMonoType - /** The name of the type */ - type MirroredLabel <: String + /** The name of the type */ + type MirroredLabel <: String - /** The names of the elements of the type */ - type MirroredElemLabels <: Tuple -} + /** The names of the elements of the type */ + type MirroredElemLabels <: Tuple + +object Mirror: -object Mirror { /** The Mirror for a product type */ - trait Product extends Mirror { - - /** Create a new instance of type `T` with elements - * taken from product `p`. - */ - def fromProduct(p: scala.Product): MirroredMonoType - } - - trait Sum extends Mirror { self => - /** The ordinal number of the case class of `x`. - * For enums, `ordinal(x) == x.ordinal` - */ - def ordinal(x: MirroredMonoType): Int - } -} + trait Product extends Mirror: + + /** Create a new instance of type `T` with elements + * taken from product `p`. + */ + def fromProduct(p: scala.Product): MirroredMonoType + + trait Sum extends Mirror: + + /** The ordinal number of the case class of `x`. + * For enums, `ordinal(x) == x.ordinal` + */ + def ordinal(x: MirroredMonoType): Int + +end Mirror ``` Product types (i.e. case classes and objects, and enum cases) have mirrors which are subtypes of `Mirror.Product`. Sum @@ -85,42 +84,38 @@ For the `Tree` ADT from above the following `Mirror` instances will be automatic ```scala // Mirror for Tree -Mirror.Sum { - type MirroredType = Tree - type MirroredElemTypes[T] = (Branch[T], Leaf[T]) - type MirroredMonoType = Tree[_] - type MirroredLabels = "Tree" - type MirroredElemLabels = ("Branch", "Leaf") - - def ordinal(x: MirroredMonoType): Int = x match { - case _: Branch[_] => 0 - case _: Leaf[_] => 1 - } -} +new Mirror.Sum: + type MirroredType = Tree + type MirroredElemTypes[T] = (Branch[T], Leaf[T]) + type MirroredMonoType = Tree[_] + type MirroredLabels = "Tree" + type MirroredElemLabels = ("Branch", "Leaf") + + def ordinal(x: MirroredMonoType): Int = x match + case _: Branch[_] => 0 + case _: Leaf[_] => 1 // Mirror for Branch -Mirror.Product { - type MirroredType = Branch - type MirroredElemTypes[T] = (Tree[T], Tree[T]) - type MirroredMonoType = Branch[_] - type MirroredLabels = "Branch" - type MirroredElemLabels = ("left", "right") +new Mirror.Product: + type MirroredType = Branch + type MirroredElemTypes[T] = (Tree[T], Tree[T]) + type MirroredMonoType = Branch[_] + type MirroredLabels = "Branch" + type MirroredElemLabels = ("left", "right") - def fromProduct(p: Product): MirroredMonoType = - new Branch(...) -} + def fromProduct(p: Product): MirroredMonoType = + new Branch(...) // Mirror for Leaf -Mirror.Product { - type MirroredType = Leaf - type MirroredElemTypes[T] = Tuple1[T] - type MirroredMonoType = Leaf[_] - type MirroredLabels = "Leaf" - type MirroredElemLabels = Tuple1["elem"] - - def fromProduct(p: Product): MirroredMonoType = - new Leaf(...) -} +new Mirror.Product: + type MirroredType = Leaf + type MirroredElemTypes[T] = Tuple1[T] + type MirroredMonoType = Leaf[_] + type MirroredLabels = "Leaf" + type MirroredElemLabels = Tuple1["elem"] + + def fromProduct(p: Product): MirroredMonoType = + new Leaf(...) ``` Note the following properties of `Mirror` types, @@ -174,22 +169,19 @@ type-level constructs in Scala 3: inline methods, inline matches, and implicit s ```scala -trait Eq[T] { - def eqv(x: T, y: T): Boolean -} +trait Eq[T]: + def eqv(x: T, y: T): Boolean ``` we need to implement a method `Eq.derived` on the companion object of `Eq` that produces a given instance for `Eq[T]` given a `Mirror[T]`. Here is a possible implementation, ```scala -inline given derived[T](using m: Mirror.Of[T]): Eq[T] = { - val elemInstances = summonAll[m.MirroredElemTypes] // (1) - inline m match { // (2) - case s: Mirror.SumOf[T] => eqSum(s, elemInstances) - case p: Mirror.ProductOf[T] => eqProduct(p, elemInstances) - } -} +inline given derived[T](using m: Mirror.Of[T]): Eq[T] = + val elemInstances = summonAll[m.MirroredElemTypes] // (1) + inline m match // (2) + case s: Mirror.SumOf[T] => eqSum(s, elemInstances) + case p: Mirror.ProductOf[T] => eqProduct(p, elemInstances) ``` Note that `derived` is defined as an `inline` given. This means that the method will be expanded at @@ -204,10 +196,9 @@ implementation of `summonAll` is `inline` and uses Scala 3's `summonInline` cons ```scala inline def summonAll[T <: Tuple]: List[Eq[_]] = - inline erasedValue[T] match { - case _: EmptyTuple => Nil - case _: (t *: ts) => summonInline[Eq[t]] :: summonAll[ts] - } + inline erasedValue[T] match + case _: EmptyTuple => Nil + case _: (t *: ts) => summonInline[Eq[t]] :: summonAll[ts] ``` with the instances for children in hand the `derived` method uses an `inline match` to dispatch to methods which can @@ -221,12 +212,10 @@ instance for the appropriate ADT subtype using the auxiliary method `check` (4). ```scala def eqSum[T](s: Mirror.SumOf[T], elems: List[Eq[_]]): Eq[T] = - new Eq[T] { - def eqv(x: T, y: T): Boolean = { - val ordx = s.ordinal(x) // (3) - (s.ordinal(y) == ordx) && check(elems(ordx))(x, y) // (4) - } - } + new Eq[T]: + def eqv(x: T, y: T): Boolean = + val ordx = s.ordinal(x) // (3) + (s.ordinal(y) == ordx) && check(elems(ordx))(x, y) // (4) ``` In the product case, `eqProduct` we test the runtime values of the arguments to `eqv` for equality as products based @@ -234,12 +223,11 @@ on the `Eq` instances for the fields of the data type (5), ```scala def eqProduct[T](p: Mirror.ProductOf[T], elems: List[Eq[_]]): Eq[T] = - new Eq[T] { - def eqv(x: T, y: T): Boolean = - iterator(x).zip(iterator(y)).zip(elems.iterator).forall { // (5) - case ((x, y), elem) => check(elem)(x, y) - } - } + new Eq[T]: + def eqv(x: T, y: T): Boolean = + iterator(x).zip(iterator(y)).zip(elems.iterator).forall { // (5) + case ((x, y), elem) => check(elem)(x, y) + } ``` Pulling this all together we have the following complete implementation, @@ -249,67 +237,56 @@ import scala.deriving._ import scala.compiletime.{erasedValue, summonInline} inline def summonAll[T <: Tuple]: List[Eq[_]] = - inline erasedValue[T] match { - case _: EmptyTuple => Nil - case _: (t *: ts) => summonInline[Eq[t]] :: summonAll[ts] - } - -trait Eq[T] { - def eqv(x: T, y: T): Boolean -} - -object Eq { - - given Eq[Int] with { - def eqv(x: Int, y: Int) = x == y - } - - def check(elem: Eq[_])(x: Any, y: Any): Boolean = - elem.asInstanceOf[Eq[Any]].eqv(x, y) - - def iterator[T](p: T) = p.asInstanceOf[Product].productIterator - - def eqSum[T](s: Mirror.SumOf[T], elems: => List[Eq[_]]): Eq[T] = - new Eq[T] { - def eqv(x: T, y: T): Boolean = { - val ordx = s.ordinal(x) - (s.ordinal(y) == ordx) && check(elems(ordx))(x, y) - } - } - - def eqProduct[T](p: Mirror.ProductOf[T], elems: => List[Eq[_]]): Eq[T] = - new Eq[T] { - def eqv(x: T, y: T): Boolean = - iterator(x).zip(iterator(y)).zip(elems.iterator).forall { - case ((x, y), elem) => check(elem)(x, y) - } - } - - inline given derived[T](using m: Mirror.Of[T]): Eq[T] = { - lazy val elemInstances = summonAll[m.MirroredElemTypes] - inline m match { - case s: Mirror.SumOf[T] => eqSum(s, elemInstances) - case p: Mirror.ProductOf[T] => eqProduct(p, elemInstances) - } - } -} + inline erasedValue[T] match + case _: EmptyTuple => Nil + case _: (t *: ts) => summonInline[Eq[t]] :: summonAll[ts] + +trait Eq[T]: + def eqv(x: T, y: T): Boolean + +object Eq: + given Eq[Int] with + def eqv(x: Int, y: Int) = x == y + + def check(elem: Eq[_])(x: Any, y: Any): Boolean = + elem.asInstanceOf[Eq[Any]].eqv(x, y) + + def iterator[T](p: T) = p.asInstanceOf[Product].productIterator + + def eqSum[T](s: Mirror.SumOf[T], elems: => List[Eq[_]]): Eq[T] = + new Eq[T]: + def eqv(x: T, y: T): Boolean = + val ordx = s.ordinal(x) + (s.ordinal(y) == ordx) && check(elems(ordx))(x, y) + + def eqProduct[T](p: Mirror.ProductOf[T], elems: => List[Eq[_]]): Eq[T] = + new Eq[T]: + def eqv(x: T, y: T): Boolean = + iterator(x).zip(iterator(y)).zip(elems.iterator).forall { + case ((x, y), elem) => check(elem)(x, y) + } + + inline given derived[T](using m: Mirror.Of[T]): Eq[T] = + lazy val elemInstances = summonAll[m.MirroredElemTypes] + inline m match + case s: Mirror.SumOf[T] => eqSum(s, elemInstances) + case p: Mirror.ProductOf[T] => eqProduct(p, elemInstances) +end Eq ``` we can test this relative to a simple ADT like so, ```scala -enum Opt[+T] derives Eq { - case Sm(t: T) - case Nn -} - -object Test extends App { - import Opt._ - val eqoi = summon[Eq[Opt[Int]]] - assert(eqoi.eqv(Sm(23), Sm(23))) - assert(!eqoi.eqv(Sm(23), Sm(13))) - assert(!eqoi.eqv(Sm(23), Nn)) -} +enum Opt[+T] derives Eq: + case Sm(t: T) + case Nn + +@main def test = + import Opt._ + val eqoi = summon[Eq[Opt[Int]]] + assert(eqoi.eqv(Sm(23), Sm(23))) + assert(!eqoi.eqv(Sm(23), Sm(13))) + assert(!eqoi.eqv(Sm(23), Nn)) ``` In this case the code that is generated by the inline expansion for the derived `Eq` instance for `Opt` looks like the @@ -317,12 +294,13 @@ following, after a little polishing, ```scala given derived$Eq[T](using eqT: Eq[T]): Eq[Opt[T]] = - eqSum(summon[Mirror[Opt[T]]], - List( - eqProduct(summon[Mirror[Sm[T]]], List(summon[Eq[T]])) - eqProduct(summon[Mirror[Nn.type]], Nil) - ) - ) + eqSum( + summon[Mirror[Opt[T]]], + List( + eqProduct(summon[Mirror[Sm[T]]], List(summon[Eq[T]])) + eqProduct(summon[Mirror[Nn.type]], Nil) + ) + ) ``` Alternative approaches can be taken to the way that `derived` methods can be defined. For example, more aggressively @@ -333,17 +311,13 @@ As a third example, using a higher level library such as shapeless the type clas `derived` method as, ```scala -given eqSum[A](using inst: => K0.CoproductInstances[Eq, A]): Eq[A] { - def eqv(x: A, y: A): Boolean = inst.fold2(x, y)(false)( - [t] => (eqt: Eq[t], t0: t, t1: t) => eqt.eqv(t0, t1) - ) -} - -given eqProduct[A](using inst: K0.ProductInstances[Eq, A]): Eq[A] { - def eqv(x: A, y: A): Boolean = inst.foldLeft2(x, y)(true: Boolean)( - [t] => (acc: Boolean, eqt: Eq[t], t0: t, t1: t) => Complete(!eqt.eqv(t0, t1))(false)(true) - ) -} +given eqSum[A](using inst: => K0.CoproductInstances[Eq, A]): Eq[A] with + def eqv(x: A, y: A): Boolean = inst.fold2(x, y)(false)( + [t] => (eqt: Eq[t], t0: t, t1: t) => eqt.eqv(t0, t1) + +given eqProduct[A](using inst: K0.ProductInstances[Eq, A]): Eq[A] with + def eqv(x: A, y: A): Boolean = inst.foldLeft2(x, y)(true: Boolean)( + [t] => (acc: Boolean, eqt: Eq[t], t0: t, t1: t) => Complete(!eqt.eqv(t0, t1))(false)(true) inline def derived[A](using gen: K0.Generic[A]) as Eq[A] = gen.derive(eqSum, eqProduct) ``` @@ -380,11 +354,11 @@ ConstrApps ::= ConstrApp {‘with’ ConstrApp} Note: To align `extends` clauses and `derives` clauses, Scala 3 also allows multiple extended types to be separated by commas. So the following is now legal: -``` +```scala class A extends B, C { ... } ``` It is equivalent to the old form -``` +```scala class A extends B with C { ... } ``` diff --git a/docs/docs/reference/contextual/extension-methods.md b/docs/docs/reference/contextual/extension-methods.md index f2ba15d8db8e..e7bddf6dbc21 100644 --- a/docs/docs/reference/contextual/extension-methods.md +++ b/docs/docs/reference/contextual/extension-methods.md @@ -9,7 +9,7 @@ Extension methods allow one to add methods to a type after the type is defined. case class Circle(x: Double, y: Double, radius: Double) extension (c: Circle) - def circumference: Double = c.radius * math.Pi * 2 + def circumference: Double = c.radius * math.Pi * 2 ``` Like regular methods, extension methods can be invoked with infix `.`: @@ -36,11 +36,11 @@ The extension method syntax can also be used to define operators. Examples: ```scala extension (x: String) - def < (y: String): Boolean = ... + def < (y: String): Boolean = ... extension (x: Elem) - def +: (xs: Seq[Elem]): Seq[Elem] = ... + def +: (xs: Seq[Elem]): Seq[Elem] = ... extension (x: Number) - infix def min (y: Number): Number = ... + infix def min (y: Number): Number = ... "ab" < "c" 1 +: List(2, 3) @@ -68,10 +68,10 @@ It is also possible to extend generic types by adding type parameters to an exte ```scala extension [T](xs: List[T]) - def second = xs.tail.head + def second = xs.tail.head extension [T: Numeric](x: T) - def + (y: T): T = summon[Numeric[T]].plus(x, y) + def + (y: T): T = summon[Numeric[T]].plus(x, y) ``` If an extension method has type parameters, they come immediately after `extension` and are followed by the extended parameter. @@ -88,7 +88,7 @@ Extensions can also take using clauses. For instance, the `+` extension above co ```scala extension [T](x: T)(using n: Numeric[T]) - def + (y: T): T = n.plus(x, y) + def + (y: T): T = n.plus(x, y) ``` **Note**: Type parameters have to be given after the `extension` keyword; they cannot be given after the `def`. @@ -105,11 +105,11 @@ Example: ```scala extension (ss: Seq[String]) - def longestStrings: Seq[String] = - val maxLength = ss.map(_.length).max - ss.filter(_.length == maxLength) + def longestStrings: Seq[String] = + val maxLength = ss.map(_.length).max + ss.filter(_.length == maxLength) - def longestString: String = longestStrings.head + def longestString: String = longestStrings.head ``` The same can be written with braces as follows (note that indented regions can still be used inside braces): @@ -117,9 +117,10 @@ The same can be written with braces as follows (note that indented regions can s ```scala extension (ss: Seq[String]) { - def longestStrings: Seq[String] = - val maxLength = ss.map(_.length).max - ss.filter(_.length == maxLength) + def longestStrings: Seq[String] = { + val maxLength = ss.map(_.length).max + ss.filter(_.length == maxLength) + } def longestString: String = longestStrings.head } @@ -133,22 +134,22 @@ where each method is defined separately. For instance, the first extension above ```scala extension (ss: Seq[String]) - def longestStrings: Seq[String] = - val maxLength = ss.map(_.length).max - ss.filter(_.length == maxLength) + def longestStrings: Seq[String] = + val maxLength = ss.map(_.length).max + ss.filter(_.length == maxLength) extension (ss: Seq[String]) - def longestString: String = ss.longestStrings.head + def longestString: String = ss.longestStrings.head ``` Collective extensions also can take type parameters and have using clauses. Example: ```scala extension [T](xs: List[T])(using Ordering[T]) - def smallest(n: Int): List[T] = xs.sorted.take(n) - def smallestIndices(n: Int): List[Int] = - val limit = smallest(n).max - xs.zipWithIndex.collect { case (x, i) if x <= limit => i } + def smallest(n: Int): List[T] = xs.sorted.take(n) + def smallestIndices(n: Int): List[Int] = + val limit = smallest(n).max + xs.zipWithIndex.collect { case (x, i) if x <= limit => i } ``` ### Translation of Calls to Extension Methods @@ -169,27 +170,27 @@ Here is an example for the first rule: ```scala trait IntOps: - extension (i: Int) def isZero: Boolean = i == 0 + extension (i: Int) def isZero: Boolean = i == 0 - extension (i: Int) def safeMod(x: Int): Option[Int] = - // extension method defined in same scope IntOps - if x.isZero then None - else Some(i % x) + extension (i: Int) def safeMod(x: Int): Option[Int] = + // extension method defined in same scope IntOps + if x.isZero then None + else Some(i % x) object IntOpsEx extends IntOps: - extension (i: Int) def safeDiv(x: Int): Option[Int] = - // extension method brought into scope via inheritance from IntOps - if x.isZero then None - else Some(i / x) + extension (i: Int) def safeDiv(x: Int): Option[Int] = + // extension method brought into scope via inheritance from IntOps + if x.isZero then None + else Some(i / x) trait SafeDiv: - import IntOpsEx._ // brings safeDiv and safeMod into scope + import IntOpsEx._ // brings safeDiv and safeMod into scope - extension (i: Int) def divide(d: Int) : Option[(Int, Int)] = - // extension methods imported and thus in scope - (i.safeDiv(d), i.safeMod(d)) match - case (Some(d), Some(r)) => Some((d, r)) - case _ => None + extension (i: Int) def divide(d: Int) : Option[(Int, Int)] = + // extension methods imported and thus in scope + (i.safeDiv(d), i.safeMod(d)) match + case (Some(d), Some(r)) => Some((d, r)) + case _ => None ``` By the second rule, an extension method can be made available by defining a given instance containing it, like this: @@ -204,15 +205,15 @@ By the third and fourth rule, an extension method is available if it is in the i ```scala class List[T]: - ... + ... object List: + ... + extension [T](xs: List[List[T]]) + def flatten: List[T] = xs.foldLeft(Nil: List[T])(_ ++ _) - extension [T](xs: List[List[T]]) - def flatten: List[T] = xs.foldLeft(Nil: List[T])(_ ++ _) - - given [T: Ordering]: Ordering[List[T]] with - extension (xs: List[T]) - def < (ys: List[T]): Boolean = ... + given [T: Ordering]: Ordering[List[T]] with + extension (xs: List[T]) + def < (ys: List[T]): Boolean = ... end List // extension method available since it is in the implicit scope @@ -244,25 +245,25 @@ An extension method can also be referenced using a simple identifier without a p ```scala extension (x: T) - def f ... = ... g ... - def g ... + def f ... = ... g ... + def g ... ``` the identifier is rewritten to `x.g`. This is also the case if `f` and `g` are the same method. Example: ```scala extension (s: String) - def position(ch: Char, n: Int): Int = - if n < s.length && s(n) != ch then position(ch, n + 1) - else n + def position(ch: Char, n: Int): Int = + if n < s.length && s(n) != ch then position(ch, n + 1) + else n ``` The recursive call `position(ch, n + 1)` expands to `s.position(ch, n + 1)` in this case. The whole extension method rewrites to ```scala def position(s: String)(ch: Char, n: Int): Int = - if n < s.length && s(n) != ch then position(s)(ch, n + 1) - else n + if n < s.length && s(n) != ch then position(s)(ch, n + 1) + else n ``` ### Syntax diff --git a/docs/docs/reference/contextual/given-imports.md b/docs/docs/reference/contextual/given-imports.md index 8e2f37fc99df..d76055fb822b 100644 --- a/docs/docs/reference/contextual/given-imports.md +++ b/docs/docs/reference/contextual/given-imports.md @@ -6,16 +6,15 @@ title: "Importing Givens" A special form of import wildcard selector is used to import given instances. Example: ```scala -object A { - class TC - given tc: TC = ??? - def f(using TC) = ??? -} - -object B { - import A._ - import A.given -} +object A: + class TC + given tc: TC = ??? + def f(using TC) = ??? + +object B: + import A._ + import A.given + ... ``` In the code above, the `import A._` clause in object `B` imports all members @@ -23,9 +22,9 @@ of `A` _except_ the given instance `tc`. Conversely, the second import `import A The two import clauses can also be merged into one: ```scala -object B { - import A.{given, _} -} +object B: + import A.{given, _} + ... ``` Generally, a normal wildcard selector `_` brings all definitions other than givens or extensions into scope @@ -59,12 +58,11 @@ Importing all given instances of a parameterized type is expressed by wildcard a For instance, assuming the object ```scala -object Instances { - given intOrd: Ordering[Int] = ... - given listOrd[T: Ordering]: Ordering[List[T]] = ... - given ec: ExecutionContext = ... - given im: Monoid[Int] = ... -} +object Instances: + given intOrd: Ordering[Int] = ... + given listOrd[T: Ordering]: Ordering[List[T]] = ... + given ec: ExecutionContext = ... + given im: Monoid[Int] = ... ``` the import clause @@ -82,21 +80,6 @@ import Instances.{im, given Ordering[?]} ``` would import `im`, `intOrd`, and `listOrd` but leave out `ec`. - - - ### Migration The rules for imports stated above have the consequence that a library diff --git a/docs/docs/reference/contextual/givens.md b/docs/docs/reference/contextual/givens.md index e1f9925d9f0d..91e494d86aea 100644 --- a/docs/docs/reference/contextual/givens.md +++ b/docs/docs/reference/contextual/givens.md @@ -7,25 +7,24 @@ Given instances (or, simply, "givens") define "canonical" values of certain type that serve for synthesizing arguments to [context parameters](./using-clauses.md). Example: ```scala -trait Ord[T] { - def compare(x: T, y: T): Int - extension (x: T) def < (y: T) = compare(x, y) < 0 - extension (x: T) def > (y: T) = compare(x, y) > 0 -} +trait Ord[T]: + def compare(x: T, y: T): Int + extension (x: T) def < (y: T) = compare(x, y) < 0 + extension (x: T) def > (y: T) = compare(x, y) > 0 given intOrd: Ord[Int] with - def compare(x: Int, y: Int) = - if (x < y) -1 else if (x > y) +1 else 0 + def compare(x: Int, y: Int) = + if x < y then -1 else if x > y then +1 else 0 given listOrd[T](using ord: Ord[T]): Ord[List[T]] with - def compare(xs: List[T], ys: List[T]): Int = (xs, ys) match - case (Nil, Nil) => 0 - case (Nil, _) => -1 - case (_, Nil) => +1 - case (x :: xs1, y :: ys1) => - val fst = ord.compare(x, y) - if (fst != 0) fst else compare(xs1, ys1) + def compare(xs: List[T], ys: List[T]): Int = (xs, ys) match + case (Nil, Nil) => 0 + case (Nil, _) => -1 + case (_, Nil) => +1 + case (x :: xs1, y :: ys1) => + val fst = ord.compare(x, y) + if fst != 0 then fst else compare(xs1, ys1) ``` This code defines a trait `Ord` with two given instances. `intOrd` defines @@ -42,9 +41,9 @@ The name of a given can be left out. So the definitions of the last section can also be expressed like this: ```scala given Ord[Int] with - ... + ... given [T](using Ord[T]): Ord[List[T]] with - ... + ... ``` If the name of a given is missing, the compiler will synthesize a name from the implemented type(s). @@ -99,7 +98,7 @@ Given instances can also appear in patterns. Example: for given Context <- applicationContexts do pair match - case (ctx @ given Context, y) => ... + case (ctx @ given Context, y) => ... ``` In the first fragment above, anonymous given instances for class `Context` are established by enumerating over `applicationContexts`. In the second fragment, a given `Context` instance named `ctx` is established by matching against the first half of the `pair` selector. @@ -120,13 +119,13 @@ trait Tagged[A] case class Foo[A](value: Boolean) object Foo: - given fooTagged[A](using Tagged[A]): Foo[A] = Foo(true) - given fooNotTagged[A](using NotGiven[Tagged[A]]): Foo[A] = Foo(false) + given fooTagged[A](using Tagged[A]): Foo[A] = Foo(true) + given fooNotTagged[A](using NotGiven[Tagged[A]]): Foo[A] = Foo(false) @main def test() = - given Tagged[Int] with {} - assert(implicitly[Foo[Int]].value) // fooTagged is found - assert(!implicitly[Foo[String]].value) // fooNotTagged is found + given Tagged[Int] with {} + assert(implicitly[Foo[Int]].value) // fooTagged is found + assert(!implicitly[Foo[String]].value) // fooNotTagged is found ``` ## Given Instance Initialization diff --git a/docs/docs/reference/contextual/multiversal-equality.md b/docs/docs/reference/contextual/multiversal-equality.md index 53743b4d3428..c776981a8d4a 100644 --- a/docs/docs/reference/contextual/multiversal-equality.md +++ b/docs/docs/reference/contextual/multiversal-equality.md @@ -49,9 +49,8 @@ import annotation.implicitNotFound @implicitNotFound("Values of types ${L} and ${R} cannot be compared with == or !=") sealed trait CanEqual[-L, -R] -object CanEqual { - object derived extends CanEqual[Any, Any] -} +object CanEqual: + object derived extends CanEqual[Any, Any] ``` One can have several `CanEqual` given instances for a type. For example, the four @@ -159,24 +158,23 @@ we are dealing with a refinement of pre-existing, universal equality. It is best Say you want to come up with a safe version of the `contains` method on `List[T]`. The original definition of `contains` in the standard library was: ```scala -class List[+T] { - ... - def contains(x: Any): Boolean -} +class List[+T]: + ... + def contains(x: Any): Boolean ``` That uses universal equality in an unsafe way since it permits arguments of any type to be compared with the list's elements. The "obvious" alternative definition ```scala - def contains(x: T): Boolean + def contains(x: T): Boolean ``` does not work, since it refers to the covariant parameter `T` in a nonvariant context. The only variance-correct way to use the type parameter `T` in `contains` is as a lower bound: ```scala - def contains[U >: T](x: U): Boolean + def contains[U >: T](x: U): Boolean ``` This generic version of `contains` is the one used in the current (Scala 2.13) version of `List`. It looks different but it admits exactly the same applications as the `contains(x: Any)` definition we started with. However, we can make it more useful (i.e. restrictive) by adding a `CanEqual` parameter: ```scala - def contains[U >: T](x: U)(using CanEqual[T, U]): Boolean // (1) + def contains[U >: T](x: U)(using CanEqual[T, U]): Boolean // (1) ``` This version of `contains` is equality-safe! More precisely, given `x: T`, `xs: List[T]` and `y: U`, then `xs.contains(y)` is type-correct if and only if @@ -184,7 +182,7 @@ This version of `contains` is equality-safe! More precisely, given Unfortunately, the crucial ability to "lift" equality type checking from simple equality and pattern matching to arbitrary user-defined operations gets lost if we restrict ourselves to an equality class with a single type parameter. Consider the following signature of `contains` with a hypothetical `CanEqual1[T]` type class: ```scala - def contains[U >: T](x: U)(using CanEqual1[U]): Boolean // (2) + def contains[U >: T](x: U)(using CanEqual1[U]): Boolean // (2) ``` This version could be applied just as widely as the original `contains(x: Any)` method, since the `CanEqual1[Any]` fallback is always available! So we have gained nothing. What got lost in the transition to a single parameter type class was the original rule that `CanEqual[A, B]` is available only if neither `A` nor `B` have a reflexive `CanEqual` instance. That rule simply cannot be expressed if there is a single type parameter for `CanEqual`. diff --git a/docs/docs/reference/contextual/relationship-implicits.md b/docs/docs/reference/contextual/relationship-implicits.md index 7eb1b071731c..cefaa0c1f8f9 100644 --- a/docs/docs/reference/contextual/relationship-implicits.md +++ b/docs/docs/reference/contextual/relationship-implicits.md @@ -26,7 +26,7 @@ Given instances can be mapped to combinations of implicit objects, classes and i 2. Parameterized givens are mapped to combinations of classes and implicit methods. For instance, ```scala - given listOrd[T](using ord: Ord[T]): Ord[List[T]] with { ... } + given listOrd[T](using ord: Ord[T]): Ord[List[T]] with { ... } ``` maps to @@ -114,14 +114,14 @@ Extension methods have no direct counterpart in Scala 2, but they can be simulat ```scala extension (c: Circle) - def circumference: Double = c.radius * math.Pi * 2 + def circumference: Double = c.radius * math.Pi * 2 ``` could be simulated to some degree by ```scala implicit class CircleDecorator(c: Circle) extends AnyVal { - def circumference: Double = c.radius * math.Pi * 2 + def circumference: Double = c.radius * math.Pi * 2 } ``` @@ -153,7 +153,7 @@ one can write ```scala given stringToToken: Conversion[String, Token] with - def apply(str: String): Token = KeyWord(str) + def apply(str: String): Token = KeyWord(str) ``` or diff --git a/docs/docs/reference/contextual/type-classes.md b/docs/docs/reference/contextual/type-classes.md index 2e055e199182..e771595a28e5 100644 --- a/docs/docs/reference/contextual/type-classes.md +++ b/docs/docs/reference/contextual/type-classes.md @@ -17,47 +17,47 @@ Here's the `Monoid` type class definition: ```scala trait SemiGroup[T]: - extension (x: T) def combine (y: T): T + extension (x: T) def combine (y: T): T trait Monoid[T] extends SemiGroup[T]: - def unit: T + def unit: T ``` An implementation of this `Monoid` type class for the type `String` can be the following: ```scala given Monoid[String] with - extension (x: String) def combine (y: String): String = x.concat(y) - def unit: String = "" + extension (x: String) def combine (y: String): String = x.concat(y) + def unit: String = "" ``` Whereas for the type `Int` one could write the following: ```scala given Monoid[Int] with - extension (x: Int) def combine (y: Int): Int = x + y - def unit: Int = 0 + extension (x: Int) def combine (y: Int): Int = x + y + def unit: Int = 0 ``` This monoid can now be used as _context bound_ in the following `combineAll` method: ```scala def combineAll[T: Monoid](xs: List[T]): T = - xs.foldLeft(summon[Monoid[T]].unit)(_.combine(_)) + xs.foldLeft(summon[Monoid[T]].unit)(_.combine(_)) ``` To get rid of the `summon[...]` we can define a `Monoid` object as follows: ```scala object Monoid: - def apply[T](using m: Monoid[T]) = m + def apply[T](using m: Monoid[T]) = m ``` Which would allow to re-write the `combineAll` method this way: ```scala def combineAll[T: Monoid](xs: List[T]): T = - xs.foldLeft(Monoid[T].unit)(_.combine(_)) + xs.foldLeft(Monoid[T].unit)(_.combine(_)) ``` ### Functors @@ -69,7 +69,7 @@ The definition of a generic `Functor` would thus be written as: ```scala trait Functor[F[_]]: - def map[A, B](x: F[A], f: A => B): F[B] + def map[A, B](x: F[A], f: A => B): F[B] ``` Which could read as follows: "A `Functor` for the type constructor `F[_]` represents the ability to transform `F[A]` to `F[B]` through the application of function `f` with type `A => B`". We call the `Functor` definition here a _type class_. @@ -77,8 +77,8 @@ This way, we could define an instance of `Functor` for the `List` type: ```scala given Functor[List] with - def map[A, B](x: List[A], f: A => B): List[B] = - x.map(f) // List already has a `map` method + def map[A, B](x: List[A], f: A => B): List[B] = + x.map(f) // List already has a `map` method ``` With this `given` instance in scope, everywhere a `Functor` is expected, the compiler will accept a `List` to be used. @@ -87,7 +87,7 @@ For instance, we may write such a testing method: ```scala def assertTransformation[F[_]: Functor, A, B](expected: F[B], original: F[A], mapping: A => B): Unit = - assert(expected == summon[Functor[F]].map(original, mapping)) + assert(expected == summon[Functor[F]].map(original, mapping)) ``` And use it this way, for example: @@ -101,24 +101,25 @@ As in the previous example of Monoids, [`extension` methods](extension-methods.m ```scala trait Functor[F[_]]: - extension [A, B](x: F[A]) - def map(f: A => B): F[B] + extension [A, B](x: F[A]) + def map(f: A => B): F[B] ``` The instance of `Functor` for `List` now becomes: ```scala given Functor[List] with - extension [A, B](xs: List[A]) - def map(f: A => B): List[B] = - xs.map(f) // List already has a `map` method + extension [A, B](xs: List[A]) + def map(f: A => B): List[B] = + xs.map(f) // List already has a `map` method + ``` It simplifies the `assertTransformation` method: ```scala def assertTransformation[F[_]: Functor, A, B](expected: F[B], original: F[A], mapping: A => B): Unit = - assert(expected == original.map(mapping)) + assert(expected == original.map(mapping)) ``` The `map` method is now directly used on `original`. It is available as an extension method @@ -139,15 +140,15 @@ Here is the translation of this definition in Scala 3: ```scala trait Monad[F[_]] extends Functor[F]: - /** The unit value for a monad */ - def pure[A](x: A): F[A] + /** The unit value for a monad */ + def pure[A](x: A): F[A] - extension [A, B](x: F[A]) - /** The fundamental composition operation */ - def flatMap(f: A => F[B]): F[B] + extension [A, B](x: F[A]) + /** The fundamental composition operation */ + def flatMap(f: A => F[B]): F[B] - /** The `map` operation can now be defined in terms of `flatMap` */ - def map(f: A => B) = x.flatMap(f.andThen(pure)) + /** The `map` operation can now be defined in terms of `flatMap` */ + def map(f: A => B) = x.flatMap(f.andThen(pure)) end Monad ``` @@ -158,11 +159,11 @@ A `List` can be turned into a monad via this `given` instance: ```scala given listMonad: Monad[List] with - def pure[A](x: A): List[A] = - List(x) - extension [A, B](xs: List[A]) - def flatMap(f: A => List[B]): List[B] = - xs.flatMap(f) // rely on the existing `flatMap` method of `List` + def pure[A](x: A): List[A] = + List(x) + extension [A, B](xs: List[A]) + def flatMap(f: A => List[B]): List[B] = + xs.flatMap(f) // rely on the existing `flatMap` method of `List` ``` Since `Monad` is a subtype of `Functor`, `List` is also a functor. The Functor's `map` @@ -175,12 +176,12 @@ it explicitly. ```scala given optionMonad: Monad[Option] with - def pure[A](x: A): Option[A] = - Option(x) - extension [A, B](xo: Option[A]) - def flatMap(f: A => Option[B]): Option[B] = xo match - case Some(x) => f(x) - case None => None + def pure[A](x: A): Option[A] = + Option(x) + extension [A, B](xo: Option[A]) + def flatMap(f: A => Option[B]): Option[B] = xo match + case Some(x) => f(x) + case None => None ``` #### Reader @@ -223,12 +224,12 @@ The monad instance will look like this: ```scala given configDependentMonad: Monad[ConfigDependent] with - def pure[A](x: A): ConfigDependent[A] = - config => x + def pure[A](x: A): ConfigDependent[A] = + config => x - extension [A, B](x: ConfigDependent[A]) - def flatMap(f: A => ConfigDependent[B]): ConfigDependent[B] = - config => f(x(config))(config) + extension [A, B](x: ConfigDependent[A]) + def flatMap(f: A => ConfigDependent[B]): ConfigDependent[B] = + config => f(x(config))(config) end configDependentMonad ``` @@ -244,12 +245,12 @@ Using this syntax would turn the previous `configDependentMonad` into: ```scala given configDependentMonad: Monad[[Result] =>> Config => Result] with - def pure[A](x: A): Config => A = - config => x + def pure[A](x: A): Config => A = + config => x - extension [A, B](x: Config => A) - def flatMap(f: A => Config => B): Config => B = - config => f(x(config))(config) + extension [A, B](x: Config => A) + def flatMap(f: A => Config => B): Config => B = + config => f(x(config))(config) end configDependentMonad ``` @@ -259,12 +260,12 @@ It is likely that we would like to use this pattern with other kinds of environm ```scala given readerMonad[Ctx]: Monad[[X] =>> Ctx => X] with - def pure[A](x: A): Ctx => A = - ctx => x + def pure[A](x: A): Ctx => A = + ctx => x - extension [A, B](x: Ctx => A) - def flatMap(f: A => Ctx => B): Ctx => B = - ctx => f(x(ctx))(ctx) + extension [A, B](x: Ctx => A) + def flatMap(f: A => Ctx => B): Ctx => B = + ctx => f(x(ctx))(ctx) end readerMonad ``` diff --git a/docs/docs/reference/contextual/using-clauses.md b/docs/docs/reference/contextual/using-clauses.md index 33bfcc145728..f5ae506c80b1 100644 --- a/docs/docs/reference/contextual/using-clauses.md +++ b/docs/docs/reference/contextual/using-clauses.md @@ -12,7 +12,7 @@ For example, with the [given instances](./givens.md) defined previously, a `max` function that works for any arguments for which an ordering exists can be defined as follows: ```scala def max[T](x: T, y: T)(using ord: Ord[T]): T = - if ord.compare(x, y) < 0 then y else x + if ord.compare(x, y) < 0 then y else x ``` Here, `ord` is a _context parameter_ introduced with a `using` clause. The `max` function can be applied as follows: @@ -33,7 +33,7 @@ other context parameters. In that case one can avoid defining a parameter name and just provide its type. Example: ```scala def maximum[T](xs: List[T])(using Ord[T]): T = - xs.reduceLeft(max) + xs.reduceLeft(max) ``` `maximum` takes a context parameter of type `Ord` only to pass it on as an inferred argument to `max`. The name of the parameter is left out. @@ -44,12 +44,11 @@ Generally, context parameters may be defined either as a full parameter list `(p Here are two other methods that have a context parameter of type `Ord[T]`: ```scala -def descending[T](using asc: Ord[T]): Ord[T] = new Ord[T] { - def compare(x: T, y: T) = asc.compare(y, x) -} +def descending[T](using asc: Ord[T]): Ord[T] = new Ord[T]: + def compare(x: T, y: T) = asc.compare(y, x) def minimum[T](xs: List[T])(using Ord[T]) = - maximum(xs)(using descending) + maximum(xs)(using descending) ``` The `minimum` method's right hand side passes `descending` as an explicit argument to `maximum(xs)`. With this setup, the following calls are all well-formed, and they all normalize to the last one: diff --git a/docs/docs/reference/dropped-features/auto-apply.md b/docs/docs/reference/dropped-features/auto-apply.md index d7aa18ffcef3..d7bbab69329b 100644 --- a/docs/docs/reference/dropped-features/auto-apply.md +++ b/docs/docs/reference/dropped-features/auto-apply.md @@ -62,12 +62,10 @@ methods. It is no longer allowed to override a parameterless method by a nullary method or _vice versa_. Instead, both methods must agree exactly in their parameter lists. ```scala -class A { - def next(): Int -} -class B extends A { - def next: Int // overriding error: incompatible type -} +class A: + def next(): Int +class B extends A: + def next: Int // overriding error: incompatible type ``` Methods overriding Java or Scala 2 methods are again exempted from this requirement. diff --git a/docs/docs/reference/dropped-features/delayed-init.md b/docs/docs/reference/dropped-features/delayed-init.md index f391cb07143c..fd115ab08db0 100644 --- a/docs/docs/reference/dropped-features/delayed-init.md +++ b/docs/docs/reference/dropped-features/delayed-init.md @@ -7,8 +7,7 @@ The special handling of the `DelayedInit` trait is no longer supported. One consequence is that the `App` class, which used `DelayedInit` is -now partially broken. You can still use `App` for an easy and concise -way to set up a main program. Example: +now partially broken. You can still use `App` as a simple way to set up a main program. Example: ```scala object HelloWorld extends App { println("Hello, world!") @@ -19,8 +18,9 @@ some JVM's means that it will only be interpreted. So, better not use it for benchmarking! Also, if you want to access the command line arguments, you need to use an explicit `main` method for that. ```scala -object Hello { - def main(args: Array[String]) = - println(s"Hello, ${args(0)}") -} +object Hello: + def main(args: Array[String]) = + println(s"Hello, ${args(0)}") ``` +On the other hand, Scala 3 offers a convenient alternative to such "program" objects +with [@main methods](../changed-features/main-functions.html). diff --git a/docs/docs/reference/dropped-features/do-while.md b/docs/docs/reference/dropped-features/do-while.md index 6032b4e0a774..5fdd590de5e5 100644 --- a/docs/docs/reference/dropped-features/do-while.md +++ b/docs/docs/reference/dropped-features/do-while.md @@ -15,30 +15,23 @@ while ({ ; }) () For instance, instead of ```scala do - i += 1 + i += 1 while (f(i) == 0) ``` one writes ```scala -while ({ - i += 1 - f(i) == 0 -}) () -``` -Under the [new syntax rules](../other-new-features/control-syntax), this code can be written also without the awkward `({...})` bracketing like this: -```scala -while { - i += 1 - f(i) == 0 -} do () +while + i += 1 + f(i) == 0 +do () ``` The idea to use a block as the condition of a while also gives a solution -to the "loop-and-a-half" problem. For instance: +to the "loop-and-a-half" problem. Here is another example: ```scala -while { - val x: Int = iterator.next - x >= 0 -} do print(".") +while + val x: Int = iterator.next + x >= 0 +do print(".") ``` ### Why Drop The Construct? diff --git a/docs/docs/reference/dropped-features/package-objects.md b/docs/docs/reference/dropped-features/package-objects.md index 0a0ff6e606c1..4d4b18111b80 100644 --- a/docs/docs/reference/dropped-features/package-objects.md +++ b/docs/docs/reference/dropped-features/package-objects.md @@ -21,9 +21,7 @@ def b = a._2 case class C() -implicit object Cops { - extension (x: C) def pair(y: C) = (x, y) -} +extension (x: C) def pair(y: C) = (x, y) ``` There may be several source files in a package containing such top-level definitions, and source files can freely mix top-level value, method, and type definitions with classes and objects. diff --git a/docs/docs/reference/enums/adts.md b/docs/docs/reference/enums/adts.md index 53985f616190..f12d53c97cd0 100644 --- a/docs/docs/reference/enums/adts.md +++ b/docs/docs/reference/enums/adts.md @@ -8,10 +8,9 @@ types (ADTs) and their generalized version (GADTs). Here is an example how an `Option` type can be represented as an ADT: ```scala -enum Option[+T] { - case Some(x: T) - case None -} +enum Option[+T]: + case Some(x: T) + case None ``` This example introduces an `Option` enum with a covariant type @@ -24,10 +23,9 @@ The `extends` clauses that were omitted in the example above can also be given explicitly: ```scala -enum Option[+T] { - case Some(x: T) extends Option[T] - case None extends Option[Nothing] -} +enum Option[+T]: + case Some(x: T) extends Option[T] + case None extends Option[Nothing] ``` Note that the parent type of the `None` value is inferred as @@ -61,19 +59,20 @@ As all other enums, ADTs can define methods. For instance, here is `Option` agai `isDefined` method and an `Option(...)` constructor in its companion object. ```scala -enum Option[+T] { - case Some(x: T) - case None - - def isDefined: Boolean = this match { - case None => false - case some => true - } -} -object Option { - def apply[T >: Null](x: T): Option[T] = - if (x == null) None else Some(x) -} +enum Option[+T]: + case Some(x: T) + case None + + def isDefined: Boolean = this match + case None => false + case some => true + +object Option: + + def apply[T >: Null](x: T): Option[T] = + if x == null then None else Some(x) + +end Option ``` Enumerations and ADTs have been presented as two different @@ -84,12 +83,11 @@ implementation of `Color` either with three enum values or with a parameterized case that takes an RGB value. ```scala -enum Color(val rgb: Int) { - case Red extends Color(0xFF0000) - case Green extends Color(0x00FF00) - case Blue extends Color(0x0000FF) - case Mix(mix: Int) extends Color(mix) -} +enum Color(val rgb: Int): + case Red extends Color(0xFF0000) + case Green extends Color(0x00FF00) + case Blue extends Color(0x0000FF) + case Mix(mix: Int) extends Color(mix) ``` ### Parameter Variance of Enums @@ -102,21 +100,21 @@ The following `View` enum has a contravariant type parameter `T` and a single ca mapping a type `T` to itself: ```scala enum View[-T]: - case Refl(f: T => T) + case Refl(f: T => T) ``` The definition of `Refl` is incorrect, as it uses contravariant type `T` in the covariant result position of a function type, leading to the following error: ```scala -- Error: View.scala:2:12 -------- -2 | case Refl(f: T => T) - | ^^^^^^^^^ +2 | case Refl(f: T => T) + | ^^^^^^^^^ |contravariant type T occurs in covariant position in type T => T of value f |enum case Refl requires explicit declaration of type T to resolve this issue. ``` Because `Refl` does not declare explicit parameters, it looks to the compiler like the following: ```scala enum View[-T]: - case Refl[/*synthetic*/-T1](f: T1 => T1) extends View[T1] + case Refl[/*synthetic*/-T1](f: T1 => T1) extends View[T1] ``` The compiler has inferred for `Refl` the contravariant type parameter `T1`, following `T` in `View`. @@ -125,8 +123,8 @@ and can remedy the error by the following change to `Refl`: ```diff enum View[-T]: -- case Refl(f: T => T) -+ case Refl[R](f: R => R) extends View[R] +- case Refl(f: T => T) ++ case Refl[R](f: R => R) extends View[R] ``` Above, type `R` is chosen as the parameter for `Refl` to highlight that it has a different meaning to type `T` in `View`, but any name will do. @@ -136,10 +134,10 @@ as the function type `T => U`: ```scala enum View[-T, +U] extends (T => U): - case Refl[R](f: R => R) extends View[R, R] + case Refl[R](f: R => R) extends View[R, R] - final def apply(t: T): U = this match - case refl: Refl[r] => refl.f(t) + final def apply(t: T): U = this match + case refl: Refl[r] => refl.f(t) ``` ### Syntax of Enums diff --git a/docs/docs/reference/enums/desugarEnums.md b/docs/docs/reference/enums/desugarEnums.md index 4e613873feb4..35c40bfb2ac9 100644 --- a/docs/docs/reference/enums/desugarEnums.md +++ b/docs/docs/reference/enums/desugarEnums.md @@ -176,11 +176,10 @@ If `E` contains at least one simple case, its companion object will define in ad follows. ```scala - private def $new(_$ordinal: Int, $name: String) = new E with runtime.EnumValue { - def ordinal = _$ordinal - override def productPrefix = $name // if not overridden in `E` - override def toString = $name // if not overridden in `E` - } + private def $new(_$ordinal: Int, $name: String) = new E with runtime.EnumValue: + def ordinal = _$ordinal + override def productPrefix = $name // if not overridden in `E` + override def toString = $name // if not overridden in `E` ``` The anonymous class also implements the abstract `Product` methods that it inherits from `Enum`. diff --git a/docs/docs/reference/enums/enums.md b/docs/docs/reference/enums/enums.md index a7200980960c..2fbff1faf0bc 100644 --- a/docs/docs/reference/enums/enums.md +++ b/docs/docs/reference/enums/enums.md @@ -6,9 +6,8 @@ title: "Enumerations" An enumeration is used to define a type consisting of a set of named values. ```scala -enum Color { - case Red, Green, Blue -} +enum Color: + case Red, Green, Blue ``` This defines a new `sealed` class, `Color`, with three values, `Color.Red`, @@ -20,11 +19,10 @@ companion object. Enums can be parameterized. ```scala -enum Color(val rgb: Int) { - case Red extends Color(0xFF0000) - case Green extends Color(0x00FF00) - case Blue extends Color(0x0000FF) -} +enum Color(val rgb: Int): + case Red extends Color(0xFF0000) + case Green extends Color(0x00FF00) + case Blue extends Color(0x0000FF) ``` As the example shows, you can define the parameter value by using an @@ -62,33 +60,32 @@ val res2: Color = Red It is possible to add your own definitions to an enum. Example: ```scala -enum Planet(mass: Double, radius: Double) { - private final val G = 6.67300E-11 - def surfaceGravity = G * mass / (radius * radius) - def surfaceWeight(otherMass: Double) = otherMass * surfaceGravity - - case Mercury extends Planet(3.303e+23, 2.4397e6) - case Venus extends Planet(4.869e+24, 6.0518e6) - case Earth extends Planet(5.976e+24, 6.37814e6) - case Mars extends Planet(6.421e+23, 3.3972e6) - case Jupiter extends Planet(1.9e+27, 7.1492e7) - case Saturn extends Planet(5.688e+26, 6.0268e7) - case Uranus extends Planet(8.686e+25, 2.5559e7) +enum Planet(mass: Double, radius: Double): + private final val G = 6.67300E-11 + def surfaceGravity = G * mass / (radius * radius) + def surfaceWeight(otherMass: Double) = otherMass * surfaceGravity + + case Mercury extends Planet(3.303e+23, 2.4397e6) + case Venus extends Planet(4.869e+24, 6.0518e6) + case Earth extends Planet(5.976e+24, 6.37814e6) + case Mars extends Planet(6.421e+23, 3.3972e6) + case Jupiter extends Planet(1.9e+27, 7.1492e7) + case Saturn extends Planet(5.688e+26, 6.0268e7) + case Uranus extends Planet(8.686e+25, 2.5559e7) case Neptune extends Planet(1.024e+26, 2.4746e7) -} +end Planet ``` It is also possible to define an explicit companion object for an enum: ```scala -object Planet { - def main(args: Array[String]) = { - val earthWeight = args(0).toDouble - val mass = earthWeight / Earth.surfaceGravity - for (p <- values) - println(s"Your weight on $p is ${p.surfaceWeight(mass)}") - } -} +object Planet: + def main(args: Array[String]) = + val earthWeight = args(0).toDouble + val mass = earthWeight / Earth.surfaceGravity + for p <- values do + println(s"Your weight on $p is ${p.surfaceWeight(mass)}") +end Planet ``` ### Compatibility with Java Enums @@ -120,23 +117,20 @@ This trait defines a single public method, `ordinal`: package scala.reflect /** A base trait of all Scala enum definitions */ -transparent trait Enum extends Any with Product with Serializable { +transparent trait Enum extends Any, Product, Serializable: - /** A number uniquely identifying a case of an enum */ - def ordinal: Int -} + /** A number uniquely identifying a case of an enum */ + def ordinal: Int ``` Enum values with `extends` clauses get expanded to anonymous class instances. For instance, the `Venus` value above would be defined like this: ```scala -val Venus: Planet = - new Planet(4.869E24, 6051800.0) { - def ordinal: Int = 1 - override def productPrefix: String = "Venus" - override def toString: String = "Venus" - } +val Venus: Planet = new Planet(4.869E24, 6051800.0): + def ordinal: Int = 1 + override def productPrefix: String = "Venus" + override def toString: String = "Venus" ``` Enum values without `extends` clauses all share a single implementation diff --git a/docs/docs/reference/metaprogramming/erased-terms.md b/docs/docs/reference/metaprogramming/erased-terms.md index e8aa38a5b508..dde296ebff83 100644 --- a/docs/docs/reference/metaprogramming/erased-terms.md +++ b/docs/docs/reference/metaprogramming/erased-terms.md @@ -20,13 +20,11 @@ final class Off extends State @implicitNotFound("State must be Off") class IsOff[S <: State] -object IsOff { - given isOff: IsOff[Off] = new IsOff[Off] -} +object IsOff: + given isOff: IsOff[Off] = new IsOff[Off] -class Machine[S <: State] { - def turnedOn(using IsOff[S]): Machine[On] = new Machine[On] -} +class Machine[S <: State]: + def turnedOn(using IsOff[S]): Machine[On] = new Machine[On] val m = new Machine[Off] m.turnedOn @@ -53,7 +51,7 @@ in front of a parameter list (like `given`). def methodWithErasedEv(erased ev: Ev): Int = 42 val lambdaWithErasedEv: erased Ev => Int = - (erased ev: Ev) => 42 + (erased ev: Ev) => 42 ``` `erased` parameters will not be usable for computations, though they can be used @@ -61,10 +59,10 @@ as arguments to other `erased` parameters. ```scala def methodWithErasedInt1(erased i: Int): Int = - i + 42 // ERROR: can not use i + i + 42 // ERROR: can not use i def methodWithErasedInt2(erased i: Int): Int = - methodWithErasedInt1(i) // OK + methodWithErasedInt1(i) // OK ``` Not only parameters can be marked as erased, `val` and `def` can also be marked @@ -120,45 +118,38 @@ final class Off extends State @implicitNotFound("State must be Off") class IsOff[S <: State] -object IsOff { - // will not be called at runtime for turnedOn, the - // compiler will only require that this evidence exists - given IsOff[Off] = new IsOff[Off] -} +object IsOff: + // will not be called at runtime for turnedOn, the + // compiler will only require that this evidence exists + given IsOff[Off] = new IsOff[Off] @implicitNotFound("State must be On") class IsOn[S <: State] -object IsOn { - // will not exist at runtime, the compiler will only - // require that this evidence exists at compile time - erased given IsOn[On] = new IsOn[On] -} - -class Machine[S <: State] private { - // ev will disappear from both functions - def turnedOn(using erased ev: IsOff[S]): Machine[On] = new Machine[On] - def turnedOff(using erased ev: IsOn[S]): Machine[Off] = new Machine[Off] -} - -object Machine { - def newMachine(): Machine[Off] = new Machine[Off] -} - -object Test { - def main(args: Array[String]): Unit = { - val m = Machine.newMachine() - m.turnedOn - m.turnedOn.turnedOff - - // m.turnedOff - // ^ - // State must be On - - // m.turnedOn.turnedOn - // ^ - // State must be Off - } -} +object IsOn: + // will not exist at runtime, the compiler will only + // require that this evidence exists at compile time + erased given IsOn[On] = new IsOn[On] + +class Machine[S <: State] private (): + // ev will disappear from both functions + def turnedOn(using erased ev: IsOff[S]): Machine[On] = new Machine[On] + def turnedOff(using erased ev: IsOn[S]): Machine[Off] = new Machine[Off] + +object Machine: + def newMachine(): Machine[Off] = new Machine[Off] + +@main def test = + val m = Machine.newMachine() + m.turnedOn + m.turnedOn.turnedOff + + // m.turnedOff + // ^ + // State must be On + + // m.turnedOn.turnedOn + // ^ + // State must be Off ``` Note that in [Inline](./inline.md) we discussed `erasedValue` and inline @@ -172,32 +163,28 @@ sealed trait State final class On extends State final class Off extends State -class Machine[S <: State] { - transparent inline def turnOn(): Machine[On] = - inline erasedValue[S] match { - case _: Off => new Machine[On] - case _: On => error("Turning on an already turned on machine") - } - transparent inline def turnOff(): Machine[Off] = - inline erasedValue[S] match { - case _: On => new Machine[Off] - case _: Off => error("Turning off an already turned off machine") - } -} - -object Machine { - def newMachine(): Machine[Off] = { - println("newMachine") - new Machine[Off] - } -} - -object Test { - val m = Machine.newMachine() - m.turnOn() - m.turnOn().turnOff() - m.turnOn().turnOn() // error: Turning on an already turned on machine -} +class Machine[S <: State]: + transparent inline def turnOn(): Machine[On] = + inline erasedValue[S] match + case _: Off => new Machine[On] + case _: On => error("Turning on an already turned on machine") + + transparent inline def turnOff(): Machine[Off] = + inline erasedValue[S] match + case _: On => new Machine[Off] + case _: Off => error("Turning off an already turned off machine") + +object Machine: + def newMachine(): Machine[Off] = + println("newMachine") + new Machine[Off] +end Machine + +@main def test = + val m = Machine.newMachine() + m.turnOn() + m.turnOn().turnOff() + m.turnOn().turnOn() // error: Turning on an already turned on machine ``` [More Details](./erased-terms-spec.md) diff --git a/docs/docs/reference/metaprogramming/inline.md b/docs/docs/reference/metaprogramming/inline.md index 41a6bbda1a0a..07bb3b5deac5 100644 --- a/docs/docs/reference/metaprogramming/inline.md +++ b/docs/docs/reference/metaprogramming/inline.md @@ -9,25 +9,23 @@ title: Inline definition will be inlined at the point of use. Example: ```scala -object Config { - inline val logging = false -} +object Config: + inline val logging = false -object Logger { +object Logger: - private var indent = 0 + private var indent = 0 - inline def log[T](msg: String, indentMargin: =>Int)(op: => T): T = - if (Config.logging) { - println(s"${" " * indent}start $msg") - indent += indentMargin - val result = op - indent -= indentMargin - println(s"${" " * indent}$msg = $result") - result - } - else op -} + inline def log[T](msg: String, indentMargin: =>Int)(op: => T): T = + if Config.logging then + println(s"${" " * indent}start $msg") + indent += indentMargin + val result = op + indent -= indentMargin + println(s"${" " * indent}$msg = $result") + result + else op +end Logger ``` The `Config` object contains a definition of the **inline value** `logging`. @@ -42,7 +40,7 @@ method will always be inlined at the point of call. In the inlined code, an `if-then-else` with a constant condition will be rewritten to its `then`- or `else`-part. Consequently, in the `log` method above the -`if (Config.logging)` with `Config.logging == true` will get rewritten into its +`if Config.logging` with `Config.logging == true` will get rewritten into its `then`-part. Here's an example: @@ -50,21 +48,19 @@ Here's an example: ```scala var indentSetting = 2 -def factorial(n: BigInt): BigInt = { - log(s"factorial($n)", indentSetting) { - if (n == 0) 1 - else n * factorial(n - 1) - } -} +def factorial(n: BigInt): BigInt = + log(s"factorial($n)", indentSetting) { + if n == 0 then 1 + else n * factorial(n - 1) + } ``` If `Config.logging == false`, this will be rewritten (simplified) to: ```scala -def factorial(n: BigInt): BigInt = { - if (n == 0) 1 - else n * factorial(n - 1) -} +def factorial(n: BigInt): BigInt = + if n == 0 then 1 + else n * factorial(n - 1) ``` As you notice, since neither `msg` or `indentMargin` were used, they do not @@ -76,17 +72,16 @@ Consequently, the code was inlined directly and the call was beta-reduced. In the `true` case the code will be rewritten to: ```scala -def factorial(n: BigInt): BigInt = { - val msg = s"factorial($n)" - println(s"${" " * indent}start $msg") - Logger.inline$indent_=(indent.+(indentSetting)) - val result = - if (n == 0) 1 - else n * factorial(n - 1) - Logger.inline$indent_=(indent.-(indentSetting)) - println(s"${" " * indent}$msg = $result") - result -} +def factorial(n: BigInt): BigInt = + val msg = s"factorial($n)" + println(s"${" " * indent}start $msg") + Logger.inline$indent_=(indent.+(indentSetting)) + val result = + if n == 0 then 1 + else n * factorial(n - 1) + Logger.inline$indent_=(indent.-(indentSetting)) + println(s"${" " * indent}$msg = $result") + result ``` Note, that the by-value parameter `msg` is evaluated only once, per the usual Scala @@ -101,14 +96,12 @@ exponent `n`, the following method for `power` will be implemented by straight inline code without any loop or recursion. ```scala -inline def power(x: Double, n: Int): Double = { - if (n == 0) 1.0 - else if (n == 1) x - else { - val y = power(x, n / 2) - if (n % 2 == 0) y * y else y * y * x - } -} +inline def power(x: Double, n: Int): Double = + if n == 0 then 1.0 + else if n == 1 then x + else + val y = power(x, n / 2) + if n % 2 == 0 then y * y else y * y * x power(expr, 10) // translates to @@ -131,8 +124,8 @@ parameters: ```scala inline def funkyAssertEquals(actual: Double, expected: =>Double, inline delta: Double): Unit = - if (actual - expected).abs > delta then - throw new AssertionError(s"difference between ${expected} and ${actual} was larger than ${delta}") + if (actual - expected).abs > delta then + throw new AssertionError(s"difference between ${expected} and ${actual} was larger than ${delta}") funkyAssertEquals(computeActual(), computeExpected(), computeDelta()) // translates to @@ -150,14 +143,14 @@ Inline methods can override other non-inline methods. The rules are as follows: 1. If an inline method `f` implements or overrides another, non-inline method, the inline method can also be invoked at runtime. For instance, consider the scenario: ```scala - abstract class A { - def f(): Int - def g(): Int = f() - } - class B extends A { - inline def f() = 22 - override inline def g() = f() + 11 - } + abstract class A: + def f(): Int + def g(): Int = f() + + class B extends A: + inline def f() = 22 + override inline def g() = f() + 11 + val b = B() val a: A = b // inlined invocatons @@ -174,12 +167,12 @@ Inline methods can override other non-inline methods. The rules are as follows: 3. Inline methods can also be abstract. An abstract inline method can be implemented only by other inline methods. It cannot be invoked directly: ```scala - abstract class A { - inline def f(): Int - } - object B extends A { - inline def f(): Int = 22 - } + abstract class A: + inline def f(): Int + + object B extends A: + inline def f(): Int = 22 + B.f() // OK val a: A = B a.f() // error: cannot inline f() in A. @@ -240,13 +233,11 @@ inline val four: 4 = 4 It is also possible to have inline vals of types that do not have a syntax, such as `Short(4)`. ```scala -trait InlineConstants { - inline val myShort: Short -} +trait InlineConstants: + inline val myShort: Short -object Constants extends InlineConstants { - inline val myShort/*: Short(4)*/ = 4 -} +object Constants extends InlineConstants + inline val myShort/*: Short(4)*/ = 4 ``` ## Transparent Inline Methods @@ -257,12 +248,11 @@ specialized to a more precise type upon expansion. Example: ```scala class A -class B extends A { - def m() = true -} +class B extends A: + def m() = true transparent inline def choose(b: Boolean): A = - if b then new A() else new B() + if b then new A() else new B() val obj1 = choose(true) // static type is A val obj2 = choose(false) // static type is B @@ -302,15 +292,15 @@ Example: ```scala inline def update(delta: Int) = - inline if (delta >= 0) increaseBy(delta) - else decreaseBy(-delta) + inline if delta >= 0 then increaseBy(delta) + else decreaseBy(-delta) ``` A call `update(22)` would rewrite to `increaseBy(22)`. But if `update` was called with a value that was not a compile-time constant, we would get a compile time error like the one below: ```scala - | inline if (delta >= 0) ??? + | inline if delta >= 0 then ??? | ^ | cannot reduce inline if | its condition @@ -331,10 +321,10 @@ The example below defines an inline method with a single inline match expression that picks a case based on its static type: ```scala -transparent inline def g(x: Any): Any = inline x match { - case x: String => (x, x) // Tuple2[String, String](x, x) - case x: Double => x -} +transparent inline def g(x: Any): Any = + inline x match + case x: String => (x, x) // Tuple2[String, String](x, x) + case x: Double => x g(1.0d) // Has type 1.0d which is a subtype of Double g("test") // Has type (String, String) @@ -351,10 +341,10 @@ trait Nat case object Zero extends Nat case class Succ[N <: Nat](n: N) extends Nat -transparent inline def toInt(n: Nat): Int = inline n match { - case Zero => 0 - case Succ(n1) => toInt(n1) + 1 -} +transparent inline def toInt(n: Nat): Int = + inline n match + case Zero => 0 + case Succ(n1) => toInt(n1) + 1 final val natTwo = toInt(Succ(Succ(Zero))) val intTwo: 2 = natTwo @@ -375,10 +365,9 @@ type. import scala.compiletime.{constValue, S} transparent inline def toIntC[N]: Int = - inline constValue[N] match { - case 0 => 0 - case _: S[n1] => 1 + toIntC[n1] - } + inline constValue[N] match + case 0 => 0 + case _: S[n1] => 1 + toIntC[n1] final val ctwo = toIntC[2] ``` @@ -411,26 +400,26 @@ Using `erasedValue`, we can then define `defaultValue` as follows: ```scala import scala.compiletime.erasedValue -inline def defaultValue[T] = inline erasedValue[T] match { - case _: Byte => Some(0: Byte) - case _: Char => Some(0: Char) - case _: Short => Some(0: Short) - case _: Int => Some(0) - case _: Long => Some(0L) - case _: Float => Some(0.0f) - case _: Double => Some(0.0d) - case _: Boolean => Some(false) - case _: Unit => Some(()) - case _ => None -} +inline def defaultValue[T] = + inline erasedValue[T] match + case _: Byte => Some(0: Byte) + case _: Char => Some(0: Char) + case _: Short => Some(0: Short) + case _: Int => Some(0) + case _: Long => Some(0L) + case _: Float => Some(0.0f) + case _: Double => Some(0.0d) + case _: Boolean => Some(false) + case _: Unit => Some(()) + case _ => None ``` Then: ```scala - val dInt: Some[Int] = defaultValue[Int] - val dDouble: Some[Double] = defaultValue[Double] - val dBoolean: Some[Boolean] = defaultValue[Boolean] - val dAny: None.type = defaultValue[Any] +val dInt: Some[Int] = defaultValue[Int] +val dDouble: Some[Double] = defaultValue[Double] +val dBoolean: Some[Boolean] = defaultValue[Boolean] +val dAny: None.type = defaultValue[Any] ``` As another example, consider the type-level version of `toInt` below: @@ -441,10 +430,9 @@ Match_ section above. Here is how `toIntT` can be defined: ```scala transparent inline def toIntT[N <: Nat]: Int = - inline scala.compiletime.erasedValue[N] match { - case _: Zero.type => 0 - case _: Succ[n] => toIntT[n] + 1 - } + inline scala.compiletime.erasedValue[N] match + case _: Zero.type => 0 + case _: Succ[n] => toIntT[n] + 1 final val two = toIntT[Succ[Succ[Zero.type]]] ``` @@ -468,18 +456,18 @@ produces an error message containing the given `msgStr`. ```scala import scala.compiletime.{error, code} -inline def fail() = { - error("failed for a reason") -} +inline def fail() = + error("failed for a reason") + fail() // error: failed for a reason ``` or ```scala -inline def fail(p1: => Any) = { - error(code"failed on: $p1") -} +inline def fail(p1: => Any) = + error(code"failed on: $p1") + fail(identity("foo")) // error: failed on: identity("foo") ``` @@ -521,10 +509,9 @@ import scala.compiletime.ops._ import scala.annotation.infix -type +[X <: Int | String, Y <: Int | String] = (X, Y) match { - case (Int, Int) => int.+[X, Y] - case (String, String) => string.+[X, Y] -} +type +[X <: Int | String, Y <: Int | String] = (X, Y) match + case (Int, Int) => int.+[X, Y] + case (String, String) => string.+[X, Y] val concat: "a" + "b" = "ab" val addition: 1 + 1 = 2 @@ -542,12 +529,12 @@ not. We can create a set of implicit definitions like this: ```scala trait SetFor[T, S <: Set[T]] -class LowPriority { - implicit def hashSetFor[T]: SetFor[T, HashSet[T]] = ... -} -object SetsFor extends LowPriority { - implicit def treeSetFor[T: Ordering]: SetFor[T, TreeSet[T]] = ... -} + +class LowPriority: + implicit def hashSetFor[T]: SetFor[T, HashSet[T]] = ... + +object SetsFor extends LowPriority: + implicit def treeSetFor[T: Ordering]: SetFor[T, TreeSet[T]] = ... ``` Clearly, this is not pretty. Besides all the usual indirection of implicit @@ -572,8 +559,8 @@ would use it as follows: import scala.compiletime.summonFrom inline def setFor[T]: Set[T] = summonFrom { - case ord: Ordering[T] => new TreeSet[T](using ord) - case _ => new HashSet[T] + case ord: Ordering[T] => new TreeSet[T](using ord) + case _ => new HashSet[T] } ``` A `summonFrom` call takes a pattern matching closure as argument. All patterns @@ -586,8 +573,8 @@ Alternatively, one can also use a pattern-bound given instance, which avoids the import scala.compiletime.summonFrom inline def setFor[T]: Set[T] = summonFrom { - case given Ordering[T] => new TreeSet[T] - case _ => new HashSet[T] + case given Ordering[T] => new TreeSet[T] + case _ => new HashSet[T] } ``` @@ -603,16 +590,16 @@ println(setFor[String].getClass) // prints class scala.collection.immutable.Tree ``` **Note** `summonFrom` applications can raise ambiguity errors. Consider the following -code with two implicit values in scope of type `A`. The pattern match in `f` will raise +code with two givens in scope of type `A`. The pattern match in `f` will raise an ambiguity error of `f` is applied. ```scala class A -implicit val a1: A = new A -implicit val a2: A = new A +given a1: A = new A +given a2: A = new A inline def f: Any = summonFrom { - case given _: A => ??? // error: ambiguous implicits + case given _: A => ??? // error: ambiguous givens } ``` @@ -621,7 +608,7 @@ inline def f: Any = summonFrom { The shorthand `summonInline` provides a simple way to write a `summon` that is delayed until the call is inlined. ```scala transparent inline def summonInline[T]: T = summonFrom { - case t: T => t + case t: T => t } ``` diff --git a/docs/docs/reference/metaprogramming/macros-spec.md b/docs/docs/reference/metaprogramming/macros-spec.md index 4747556b27e6..aa3c59014949 100644 --- a/docs/docs/reference/metaprogramming/macros-spec.md +++ b/docs/docs/reference/metaprogramming/macros-spec.md @@ -175,25 +175,25 @@ implementation of `power` otherwise. import scala.quoted._ inline def power(x: Double, n: Int): Double = - ${ powerExpr('x, 'n) } + ${ powerExpr('x, 'n) } private def powerExpr(x: Expr[Double], n: Expr[Int]) (using Quotes): Expr[Double] = - n.value match - case Some(m) => powerExpr(x, m) - case _ => '{ dynamicPower($x, $n) } + n.value match + case Some(m) => powerExpr(x, m) + case _ => '{ dynamicPower($x, $n) } private def powerExpr(x: Expr[Double], n: Int) (using Quotes): Expr[Double] = - if n == 0 then '{ 1.0 } - else if n == 1 then x - else if n % 2 == 0 then '{ val y = $x * $x; ${ powerExpr('y, n / 2) } } - else '{ $x * ${ powerExpr(x, n - 1) } } + if n == 0 then '{ 1.0 } + else if n == 1 then x + else if n % 2 == 0 then '{ val y = $x * $x; ${ powerExpr('y, n / 2) } } + else '{ $x * ${ powerExpr(x, n - 1) } } private def dynamicPower(x: Double, n: Int): Double = - if n == 0 then 1.0 - else if n % 2 == 0 then dynamicPower(x * x, n / 2) - else x * dynamicPower(x, n - 1) + if n == 0 then 1.0 + else if n % 2 == 0 then dynamicPower(x * x, n / 2) + else x * dynamicPower(x, n - 1) ``` In the above, the method `.value` maps a constant expression of the type @@ -204,17 +204,15 @@ that maps expressions over functions to functions over expressions can be implemented in user code: ```scala given AsFunction1[T, U]: Conversion[Expr[T => U], Expr[T] => Expr[U]] with - def apply(f: Expr[T => U]): Expr[T] => Expr[U] = - (x: Expr[T]) => f match { - case Lambda(g) => g(x) - case _ => '{ ($f)($x) } - } + def apply(f: Expr[T => U]): Expr[T] => Expr[U] = + (x: Expr[T]) => f match + case Lambda(g) => g(x) + case _ => '{ ($f)($x) } ``` This assumes an extractor ```scala -object Lambda { - def unapply[T, U](x: Expr[T => U]): Option[Expr[T] => Expr[U]] -} +object Lambda: + def unapply[T, U](x: Expr[T => U]): Option[Expr[T] => Expr[U]] ``` Once we allow inspection of code via extractors, it’s tempting to also add constructors that create typed trees directly without going diff --git a/docs/docs/reference/metaprogramming/macros.md b/docs/docs/reference/metaprogramming/macros.md index df70302b7422..046bf797c3fc 100644 --- a/docs/docs/reference/metaprogramming/macros.md +++ b/docs/docs/reference/metaprogramming/macros.md @@ -34,11 +34,11 @@ prints it again in an error message if it evaluates to `false`. import scala.quoted._ inline def assert(inline expr: Boolean): Unit = - ${ assertImpl('expr) } + ${ assertImpl('expr) } def assertImpl(expr: Expr[Boolean])(using Quotes) = '{ - if (!$expr) - throw new AssertionError(s"failed assertion: ${${ showExpr(expr) }}") + if !$expr then + throw AssertionError(s"failed assertion: ${${ showExpr(expr) }}") } def showExpr(expr: Expr[Boolean])(using Quotes): Expr[String] = @@ -144,10 +144,10 @@ These conversions can be implemented as follows: ```scala def to[T: Type, R: Type](f: Expr[T] => Expr[R])(using Quotes): Expr[T => R] = - '{ (x: T) => ${ f('x) } } + '{ (x: T) => ${ f('x) } } def from[T: Type, R: Type](f: Expr[T => R])(using Quotes): Expr[T] => Expr[R] = - (x: Expr[T]) => '{ $f($x) } + (x: Expr[T]) => '{ $f($x) } ``` Note how the fundamental phase consistency principle works in two @@ -169,10 +169,9 @@ In some cases we want to remove the lambda from the code, for this we provide th describing a function into a function mapping trees to trees. ```scala -object Expr { - ... - def betaReduce[...](...)(...): ... = ... -} +object Expr: + ... + def betaReduce[...](...)(...): ... = ... ``` The definition of `Expr.betaReduce(f)(x)` is assumed to be functionally the same as @@ -196,7 +195,7 @@ usage. But the code can be rewritten by adding a binding of a `Type[T]` tag: ```scala def to[T, R](f: Expr[T] => Expr[R])(using Type[T], Type[R], Quotes): Expr[T => R] = - '{ (x: T) => ${ f('x) } } + '{ (x: T) => ${ f('x) } } ``` In this version of `to`, the type of `x` is now the result of @@ -209,14 +208,14 @@ For instance, the user-level definition of `to`: ```scala def to[T, R](f: Expr[T] => Expr[R])(using t: Type[T], r: Type[R])(using Quotes): Expr[T => R] = - '{ (x: T) => ${ f('x) } } + '{ (x: T) => ${ f('x) } } ``` would be rewritten to ```scala def to[T, R](f: Expr[T] => Expr[R])(using t: Type[T], r: Type[R])(using Quotes): Expr[T => R] = - '{ (x: t.Underlying) => ${ f('x) } } + '{ (x: t.Underlying) => ${ f('x) } } ``` The `summon` query succeeds because there is a given instance of @@ -233,12 +232,12 @@ a compiler through staging. ```scala import scala.quoted._ -enum Exp { - case Num(n: Int) - case Plus(e1: Exp, e2: Exp) - case Var(x: String) - case Let(x: String, e: Exp, in: Exp) -} +enum Exp: + case Num(n: Int) + case Plus(e1: Exp, e2: Exp) + case Var(x: String) + case Let(x: String, e: Exp, in: Exp) + import Exp._ ``` @@ -257,16 +256,15 @@ The compiler takes an environment that maps variable names to Scala `Expr`s. ```scala import scala.quoted._ -def compile(e: Exp, env: Map[String, Expr[Int]])(using Quotes): Expr[Int] = e match { - case Num(n) => - Expr(n) - case Plus(e1, e2) => - '{ ${ compile(e1, env) } + ${ compile(e2, env) } } - case Var(x) => - env(x) - case Let(x, e, body) => - '{ val y = ${ compile(e, env) }; ${ compile(body, env + (x -> 'y)) } } -} +def compile(e: Exp, env: Map[String, Expr[Int]])(using Quotes): Expr[Int] = e match + case Num(n) => + Expr(n) + case Plus(e1, e2) => + '{ ${ compile(e1, env) } + ${ compile(e2, env) } } + case Var(x) => + env(x) + case Let(x, e, body) => + '{ val y = ${ compile(e, env) }; ${ compile(body, env + (x -> 'y)) } } ``` Running `compile(letExp, Map())` would yield the following Scala code: @@ -286,12 +284,10 @@ The `Expr.apply` method is defined in package `quoted`: ```scala package quoted -object Expr { - ... - def apply[T: ToExpr](x: T)(using Quotes): Expr[T] = - summon[ToExpr[T]].toExpr(x) - ... -} +object Expr: + ... + def apply[T: ToExpr](x: T)(using Quotes): Expr[T] = + summon[ToExpr[T]].toExpr(x) ``` This method says that values of types implementing the `ToExpr` type class can be @@ -308,10 +304,9 @@ knowing anything about the representation of `Expr` trees. For instance, here is a possible instance of `ToExpr[Boolean]`: ```scala -given ToExpr[Boolean] { - def toExpr(b: Boolean) = - if (b) '{ true } else '{ false } -} +given ToExpr[Boolean] with + def toExpr(b: Boolean) = + if b then '{ true } else '{ false } ``` Once we can lift bits, we can work our way up. For instance, here is a @@ -319,15 +314,13 @@ possible implementation of `ToExpr[Int]` that does not use the underlying tree machinery: ```scala -given ToExpr[Int] { - def toExpr(n: Int) = n match { - case Int.MinValue => '{ Int.MinValue } - case _ if n < 0 => '{ - ${ toExpr(-n) } } - case 0 => '{ 0 } - case _ if n % 2 == 0 => '{ ${ toExpr(n / 2) } * 2 } - case _ => '{ ${ toExpr(n / 2) } * 2 + 1 } - } -} +given ToExpr[Int] with + def toExpr(n: Int) = n match + case Int.MinValue => '{ Int.MinValue } + case _ if n < 0 => '{ - ${ toExpr(-n) } } + case 0 => '{ 0 } + case _ if n % 2 == 0 => '{ ${ toExpr(n / 2) } * 2 } + case _ => '{ ${ toExpr(n / 2) } * 2 + 1 } ``` Since `ToExpr` is a type class, its instances can be conditional. For example, @@ -335,10 +328,9 @@ a `List` is liftable if its element type is: ```scala given [T: ToExpr : Type]: ToExpr[List[T]] with - def toExpr(xs: List[T]) = xs match { - case head :: tail => '{ ${ Expr(head) } :: ${ toExpr(tail) } } - case Nil => '{ Nil: List[T] } - } + def toExpr(xs: List[T]) = xs match + case head :: tail => '{ ${ Expr(head) } :: ${ toExpr(tail) } } + case Nil => '{ Nil: List[T] } ``` In the end, `ToExpr` resembles very much a serialization @@ -350,10 +342,9 @@ analogue of lifting. Using lifting, we can now give the missing definition of `showExpr` in the introductory example: ```scala -def showExpr[T](expr: Expr[T])(using Quotes): Expr[String] = { - val code: String = expr.show - Expr(code) -} +def showExpr[T](expr: Expr[T])(using Quotes): Expr[String] = + val code: String = expr.show + Expr(code) ``` That is, the `showExpr` method converts its `Expr` argument to a string (`code`), and lifts @@ -400,31 +391,26 @@ a macro library and a quoted program. For instance, here’s the `assert` macro again together with a program that calls `assert`. ```scala -object Macros { +object Macros: - inline def assert(inline expr: Boolean): Unit = - ${ assertImpl('expr) } + inline def assert(inline expr: Boolean): Unit = + ${ assertImpl('expr) } - def assertImpl(expr: Expr[Boolean])(using Quotes) = - val failMsg: Expr[String] = Expr("failed assertion: " + expr.show) - '{ if !($expr) then throw new AssertionError($failMsg) } -} + def assertImpl(expr: Expr[Boolean])(using Quotes) = + val failMsg: Expr[String] = Expr("failed assertion: " + expr.show) + '{ if !($expr) then throw new AssertionError($failMsg) } -object App { - val program = { - val x = 1 - Macros.assert(x != 0) - } -} +@main def program = + val x = 1 + Macros.assert(x != 0) ``` Inlining the `assert` function would give the following program: ```scala -val program = { - val x = 1 - ${ Macros.assertImpl('{ x != 0) } } -} +@main def program = + val x = 1 + ${ Macros.assertImpl('{ x != 0) } } ``` The example is only phase correct because `Macros` is a global value and @@ -444,7 +430,7 @@ compiled before they are used. Hence, conceptually the program part should be treated by the compiler as if it was quoted: ```scala -val program = '{ +@main def program = '{ val x = 1 ${ Macros.assertImpl('{ x != 0 }) } } @@ -479,15 +465,15 @@ implementation of the `power` function that makes use of a statically known expo inline def power(x: Double, inline n: Int) = ${ powerCode('x, 'n) } private def powerCode(x: Expr[Double], n: Expr[Int])(using Quotes): Expr[Double] = - n.value match - case Some(m) => powerCode(x, m) - case None => '{ Math.pow($x, $n.toDouble) } + n.value match + case Some(m) => powerCode(x, m) + case None => '{ Math.pow($x, $n.toDouble) } private def powerCode(x: Expr[Double], n: Int)(using Quotes): Expr[Double] = - if (n == 0) '{ 1.0 } - else if (n == 1) x - else if (n % 2 == 0) '{ val y = $x * $x; ${ powerCode('y, n / 2) } } - else '{ $x * ${ powerCode(x, n - 1) } } + if n == 0 then '{ 1.0 } + else if n == 1 then x + else if n % 2 == 0 then '{ val y = $x * $x; ${ powerCode('y, n / 2) } } + else '{ $x * ${ powerCode(x, n - 1) } } ``` ## Scope Extrusion @@ -539,24 +525,25 @@ Assume we have two methods, one `map` that takes an `Expr[Array[T]]` and a function `f` and one `sum` that performs a sum by delegating to `map`. ```scala -object Macros { - def map[T](arr: Expr[Array[T]], f: Expr[T] => Expr[Unit])(using Type[T], Quotes): Expr[Unit] = '{ - var i: Int = 0 - while (i < ($arr).length) { - val element: T = ($arr)(i) - ${f('element)} - i += 1 - } - } +object Macros: - def sum(arr: Expr[Array[Int]])(using Quotes): Expr[Int] = '{ - var sum = 0 - ${ map(arr, x => '{sum += $x}) } - sum - } + def map[T](arr: Expr[Array[T]], f: Expr[T] => Expr[Unit])(using Type[T], Quotes): Expr[Unit] = '{ + var i: Int = 0 + while i < ($arr).length do + val element: T = ($arr)(i) + ${f('element)} + i += 1 + } - inline def sum_m(arr: Array[Int]): Int = ${sum('arr)} -} + def sum(arr: Expr[Array[Int]])(using Quotes): Expr[Int] = '{ + var sum = 0 + ${ map(arr, x => '{sum += $x}) } + sum + } + + inline def sum_m(arr: Array[Int]): Int = ${sum('arr)} + +end Macros ``` A call to `sum_m(Array(1,2,3))` will first inline `sum_m`: @@ -595,11 +582,10 @@ val arr: Array[Int] = Array.apply(1, [2,3 : Int]:Int*) var sum = 0 val f = x => '{sum += $x} var i: Int = 0 -while (i < (arr).length) { - val element: Int = (arr)(i) - sum += element - i += 1 -} +while i < (arr).length do + val element: Int = (arr)(i) + sum += element + i += 1 sum ``` @@ -609,11 +595,10 @@ Finally cleanups and dead code elimination: val arr: Array[Int] = Array.apply(1, [2,3 : Int]:Int*) var sum = 0 var i: Int = 0 -while (i < arr.length) { - val element: Int = arr(i) - sum += element - i += 1 -} +while i < arr.length do + val element: Int = arr(i) + sum += element + i += 1 sum ``` @@ -626,12 +611,10 @@ in a quote context. For this we simply provide `scala.quoted.Expr.summon`: import scala.collection.immutable.{ TreeSet, HashSet } inline def setFor[T]: Set[T] = ${ setForExpr[T] } -def setForExpr[T: Type](using Quotes): Expr[Set[T]] = { - Expr.summon[Ordering[T]] match { - case Some(ord) => '{ new TreeSet[T]()($ord) } - case _ => '{ new HashSet[T] } - } -} +def setForExpr[T: Type](using Quotes): Expr[Set[T]] = + Expr.summon[Ordering[T]] match + case Some(ord) => '{ new TreeSet[T]()($ord) } + case _ => '{ new HashSet[T] } ``` ## Relationship with Whitebox Inline @@ -644,9 +627,9 @@ inline method that can calculate either a value of type `Int` or a value of type transparent inline def defaultOf(inline str: String) = ${ defaultOfImpl('str) } def defaultOfImpl(strExpr: Expr[String])(using Quotes): Expr[Any] = - strExpr.valueOrError match - case "int" => '{1} - case "string" => '{"a"} + strExpr.valueOrError match + case "int" => '{1} + case "string" => '{"a"} // in a separate file val a: Int = defaultOf("int") @@ -680,18 +663,18 @@ These could be used in the following way to optimize any call to `sum` that has ```scala inline def sum(inline args: Int*): Int = ${ sumExpr('args) } -private def sumExpr(argsExpr: Expr[Seq[Int]])(using Quotes): Expr[Int] = argsExpr match { - case Varargs(args @ Exprs(argValues)) => - // args is of type Seq[Expr[Int]] - // argValues is of type Seq[Int] - Expr(argValues.sum) // precompute result of sum - case Varargs(argExprs) => // argExprs is of type Seq[Expr[Int]] - val staticSum: Int = argExprs.map(_.value.getOrElse(0)).sum - val dynamicSum: Seq[Expr[Int]] = argExprs.filter(_.value.isEmpty) - dynamicSum.foldLeft(Expr(staticSum))((acc, arg) => '{ $acc + $arg }) - case _ => - '{ $argsExpr.sum } -} +private def sumExpr(argsExpr: Expr[Seq[Int]])(using Quotes): Expr[Int] = + argsExpr match + case Varargs(args @ Exprs(argValues)) => + // args is of type Seq[Expr[Int]] + // argValues is of type Seq[Int] + Expr(argValues.sum) // precompute result of sum + case Varargs(argExprs) => // argExprs is of type Seq[Expr[Int]] + val staticSum: Int = argExprs.map(_.value.getOrElse(0)).sum + val dynamicSum: Seq[Expr[Int]] = argExprs.filter(_.value.isEmpty) + dynamicSum.foldLeft(Expr(staticSum))((acc, arg) => '{ $acc + $arg }) + case _ => + '{ $argsExpr.sum } ``` ### Quoted patterns @@ -703,32 +686,30 @@ For example ```scala optimize { - sum(sum(1, a, 2), 3, b) + sum(sum(1, a, 2), 3, b) } // should be optimized to 6 + a + b ``` ```scala def sum(args: Int*): Int = args.sum inline def optimize(inline arg: Int): Int = ${ optimizeExpr('arg) } -private def optimizeExpr(body: Expr[Int])(using Quotes): Expr[Int] = body match { - // Match a call to sum without any arguments - case '{ sum() } => Expr(0) - // Match a call to sum with an argument $n of type Int. n will be the Expr[Int] representing the argument. - case '{ sum($n) } => n - // Match a call to sum and extracts all its args in an `Expr[Seq[Int]]` - case '{ sum(${Varargs(args)}: _*) } => sumExpr(args) - case body => body -} -private def sumExpr(args1: Seq[Expr[Int]])(using Quotes): Expr[Int] = { - def flatSumArgs(arg: Expr[Int]): Seq[Expr[Int]] = arg match { +private def optimizeExpr(body: Expr[Int])(using Quotes): Expr[Int] = body match + // Match a call to sum without any arguments + case '{ sum() } => Expr(0) + // Match a call to sum with an argument $n of type Int. n will be the Expr[Int] representing the argument. + case '{ sum($n) } => n + // Match a call to sum and extracts all its args in an `Expr[Seq[Int]]` + case '{ sum(${Varargs(args)}: _*) } => sumExpr(args) + case body => body + +private def sumExpr(args1: Seq[Expr[Int]])(using Quotes): Expr[Int] = + def flatSumArgs(arg: Expr[Int]): Seq[Expr[Int]] = arg match case '{ sum(${Varargs(subArgs)}: _*) } => subArgs.flatMap(flatSumArgs) case arg => Seq(arg) - } - val args2 = args1.flatMap(flatSumArgs) - val staticSum: Int = args2.map(_.value.getOrElse(0)).sum - val dynamicSum: Seq[Expr[Int]] = args2.filter(_.value.isEmpty) - dynamicSum.foldLeft(Expr(staticSum))((acc, arg) => '{ $acc + $arg }) -} + val args2 = args1.flatMap(flatSumArgs) + val staticSum: Int = args2.map(_.value.getOrElse(0)).sum + val dynamicSum: Seq[Expr[Int]] = args2.filter(_.value.isEmpty) + dynamicSum.foldLeft(Expr(staticSum))((acc, arg) => '{ $acc + $arg }) ``` ### Recovering precise types using patterns @@ -737,47 +718,45 @@ Sometimes it is necessary to get a more precise type for an expression. This can ```scala def f(expr: Expr[Any])(using Quotes) = - expr match - case '{ $x: t } => - // If the pattern match succeeds, then there is some type `t` such that - // - `x` is bound to a variable of type `Expr[t]` - // - `t` is bound to a new type `t` and a given instance `Type[t]` is provided for it - // That is, we have `x: Expr[t]` and `given Type[t]`, for some (unknown) type `t`. + expr match + case '{ $x: t } => + // If the pattern match succeeds, then there is some type `t` such that + // - `x` is bound to a variable of type `Expr[t]` + // - `t` is bound to a new type `t` and a given instance `Type[t]` is provided for it + // That is, we have `x: Expr[t]` and `given Type[t]`, for some (unknown) type `t`. ``` This might be used to then perform an implicit search as in: ```scala extension (inline sc: StringContext) - inline def showMe(inline args: Any*): String = ${ showMeExpr('sc, 'args) } - -private def showMeExpr(sc: Expr[StringContext], argsExpr: Expr[Seq[Any]])(using Quotes): Expr[String] = { - argsExpr match { - case Varargs(argExprs) => - val argShowedExprs = argExprs.map { - case '{ $arg: tp } => - val showTp = Type.of[Show[tp]] - Expr.summon(using showTp) match { - case Some(showExpr) => '{ $showExpr.show($arg) } - case None => report.error(s"could not find implicit for ${Type.show[Show[tp]]}", arg); '{???} - } - } - val newArgsExpr = Varargs(argShowedExprs) - '{ $sc.s($newArgsExpr: _*) } - case _ => - // `new StringContext(...).showMeExpr(args: _*)` not an explicit `showMeExpr"..."` - report.error(s"Args must be explicit", argsExpr) - '{???} - } -} + inline def showMe(inline args: Any*): String = ${ showMeExpr('sc, 'args) } + +private def showMeExpr(sc: Expr[StringContext], argsExpr: Expr[Seq[Any]])(using Quotes): Expr[String] = + argsExpr match + case Varargs(argExprs) => + val argShowedExprs = argExprs.map { + case '{ $arg: tp } => + val showTp = Type.of[Show[tp]] + Expr.summon(using showTp) match + case Some(showExpr) => + '{ $showExpr.show($arg) } + case None => + report.error(s"could not find implicit for ${Type.show[Show[tp]]}", arg); '{???} + } + val newArgsExpr = Varargs(argShowedExprs) + '{ $sc.s($newArgsExpr: _*) } + case _ => + // `new StringContext(...).showMeExpr(args: _*)` not an explicit `showMeExpr"..."` + report.error(s"Args must be explicit", argsExpr) + '{???} + +trait Show[-T]: + def show(x: T): String -trait Show[-T] { - def show(x: T): String -} // in a different file -given Show[Boolean] { - def show(b: Boolean) = "boolean!" -} +given Show[Boolean]: + def show(b: Boolean) = "boolean!" println(showMe"${true}") ``` @@ -789,8 +768,8 @@ then the rest of the quote can refer to this definition. ```scala '{ - val x: Int = 4 - x * x + val x: Int = 4 + x * x } ``` @@ -806,24 +785,21 @@ the subexpression of type `Expr[Int]` is bound to `body` as an `Expr[Int => Int] ```scala inline def eval(inline e: Int): Int = ${ evalExpr('e) } -private def evalExpr(e: Expr[Int])(using Quotes): Expr[Int] = { - e match { - case '{ val y: Int = $x; $body(y): Int } => +private def evalExpr(e: Expr[Int])(using Quotes): Expr[Int] = e match + case '{ val y: Int = $x; $body(y): Int } => // body: Expr[Int => Int] where the argument represents references to y evalExpr(Expr.betaReduce('{$body(${evalExpr(x)})})) - case '{ ($x: Int) * ($y: Int) } => + case '{ ($x: Int) * ($y: Int) } => (x.value, y.value) match - case (Some(a), Some(b)) => Expr(a * b) - case _ => e - case _ => e - } -} + case (Some(a), Some(b)) => Expr(a * b) + case _ => e + case _ => e ``` ```scala eval { // expands to the code: (16: Int) - val x: Int = 4 - x * x + val x: Int = 4 + x * x } ``` diff --git a/docs/docs/reference/metaprogramming/staging.md b/docs/docs/reference/metaprogramming/staging.md index 1634e64dd883..e1239afc7d88 100644 --- a/docs/docs/reference/metaprogramming/staging.md +++ b/docs/docs/reference/metaprogramming/staging.md @@ -110,9 +110,9 @@ import scala.quoted.staging._ given Toolbox = Toolbox.make(getClass.getClassLoader) val f: Array[Int] => Int = run { - val stagedSum: Expr[Array[Int] => Int] = '{ (arr: Array[Int]) => ${sum('arr)}} - println(stagedSum.show) // Prints "(arr: Array[Int]) => { var sum = 0; ... }" - stagedSum + val stagedSum: Expr[Array[Int] => Int] = '{ (arr: Array[Int]) => ${sum('arr)}} + println(stagedSum.show) // Prints "(arr: Array[Int]) => { var sum = 0; ... }" + stagedSum } f.apply(Array(1, 2, 3)) // Returns 6 diff --git a/docs/docs/reference/metaprogramming/tasty-inspect.md b/docs/docs/reference/metaprogramming/tasty-inspect.md index 42fffc0b5789..a1657fdc6c20 100644 --- a/docs/docs/reference/metaprogramming/tasty-inspect.md +++ b/docs/docs/reference/metaprogramming/tasty-inspect.md @@ -23,23 +23,19 @@ the following way. import scala.tasty.Reflection import scala.tasty.file._ -class Consumer extends TastyInspector { - final def apply(reflect: Reflection)(root: reflect.Tree): Unit = { - import reflect._ - // Do something with the tree - } -} +class Consumer extends TastyInspector: + final def apply(reflect: Reflection)(root: reflect.Tree): Unit = + import reflect._ + // Do something with the tree ``` Then the consumer can be instantiated with the following code to get the tree of the class `foo.Bar` for a foo in the classpath. ```scala -object Test { - def main(args: Array[String]): Unit = { - InspectTasty("", List("foo.Bar"), new Consumer) - } -} +object Test: + def main(args: Array[String]): Unit = + InspectTasty("", List("foo.Bar"), new Consumer) ``` Note that if we need to run the main (in the example below defined in an object called `Test`) after diff --git a/docs/docs/reference/metaprogramming/tasty-reflect.md b/docs/docs/reference/metaprogramming/tasty-reflect.md index 94b4d83ae299..d24539d56351 100644 --- a/docs/docs/reference/metaprogramming/tasty-reflect.md +++ b/docs/docs/reference/metaprogramming/tasty-reflect.md @@ -28,10 +28,9 @@ import scala.quoted._ inline def natConst(inline x: Int): Int = ${natConstImpl('{x})} -def natConstImpl(x: Expr[Int])(using Quotes): Expr[Int] = { - import quotes.reflect._ - ... -} +def natConstImpl(x: Expr[Int])(using Quotes): Expr[Int] = + import quotes.reflect._ + ... ``` ### Extractors @@ -40,22 +39,19 @@ def natConstImpl(x: Expr[Int])(using Quotes): Expr[Int] = { trees. For example the `Literal(_)` extractor used below. ```scala -def natConstImpl(x: Expr[Int])(using Quotes): Expr[Int] = { - import quotes.reflect._ - val xTree: Term = x.asTerm - xTree match { - case Inlined(_, _, Literal(IntConstant(n))) => - if (n <= 0) { - report.error("Parameter must be natural number") - '{0} - } else { - xTree.asExprOf[Int] - } - case _ => - report.error("Parameter must be a known constant") - '{0} - } -} +def natConstImpl(x: Expr[Int])(using Quotes): Expr[Int] = + import quotes.reflect._ + val xTree: Term = x.asTerm + xTree match + case Inlined(_, _, Literal(IntConstant(n))) => + if n <= 0 then + report.error("Parameter must be natural number") + '{0} + else + xTree.asExprOf[Int] + case _ => + report.error("Parameter must be a known constant") + '{0} ``` We can easily know which extractors are needed using `Printer.TreeStructure.show`, which returns the string representation the structure of the tree. Other printers can also be found in the `Printer` module. @@ -81,19 +77,19 @@ such as the start line, the end line or even the source code at the expansion point. ```scala -def macroImpl()(quotes: Quotes): Expr[Unit] = { - import quotes.reflect._ - val pos = rootPosition - - val path = pos.sourceFile.jpath.toString - val start = pos.start - val end = pos.end - val startLine = pos.startLine - val endLine = pos.endLine - val startColumn = pos.startColumn - val endColumn = pos.endColumn - val sourceCode = pos.sourceCode - ... +def macroImpl()(quotes: Quotes): Expr[Unit] = + import quotes.reflect._ + val pos = rootPosition + + val path = pos.sourceFile.jpath.toString + val start = pos.start + val end = pos.end + val startLine = pos.startLine + val endLine = pos.endLine + val startColumn = pos.startColumn + val endColumn = pos.endColumn + val sourceCode = pos.sourceCode + ... ``` ### Tree Utilities @@ -107,16 +103,12 @@ of type List[Symbol] if we want to collect symbols). The code below, for example, collects the pattern variables of a tree. ```scala -def collectPatternVariables(tree: Tree)(implicit ctx: Context): List[Symbol] = { - val acc = new TreeAccumulator[List[Symbol]] { - def apply(syms: List[Symbol], tree: Tree)(implicit ctx: Context) = - tree match { - case Bind(_, body) => apply(tree.symbol :: syms, body) - case _ => foldOver(syms, tree) - } - } - acc(Nil, tree) -} +def collectPatternVariables(tree: Tree)(implicit ctx: Context): List[Symbol] = + val acc = new TreeAccumulator[List[Symbol]]: + def apply(syms: List[Symbol], tree: Tree)(implicit ctx: Context) = tree match + case Bind(_, body) => apply(tree.symbol :: syms, body) + case _ => foldOver(syms, tree) + acc(Nil, tree) ``` A `TreeTraverser` extends a `TreeAccumulator` and performs the same traversal diff --git a/docs/docs/reference/new-types/dependent-function-types-spec.md b/docs/docs/reference/new-types/dependent-function-types-spec.md index d917749ac062..48f3102c8890 100644 --- a/docs/docs/reference/new-types/dependent-function-types-spec.md +++ b/docs/docs/reference/new-types/dependent-function-types-spec.md @@ -25,9 +25,8 @@ refinement types of `scala.FunctionN`. A dependent function type `(x1: K1, ..., xN: KN) => R` of arity `N` translates to: ```scala -FunctionN[K1, ..., Kn, R'] { - def apply(x1: K1, ..., xN: KN): R -} +FunctionN[K1, ..., Kn, R'] with + def apply(x1: K1, ..., xN: KN): R ``` where the result type parameter `R'` is the least upper approximation of the diff --git a/docs/docs/reference/new-types/intersection-types.md b/docs/docs/reference/new-types/intersection-types.md index 55c3199206e5..a2cfc1f380c8 100644 --- a/docs/docs/reference/new-types/intersection-types.md +++ b/docs/docs/reference/new-types/intersection-types.md @@ -10,16 +10,15 @@ Used on types, the `&` operator creates an intersection type. The type `S & T` represents values that are of the type `S` and `T` at the same time. ```scala -trait Resettable { - def reset(): Unit -} -trait Growable[T] { - def add(t: T): Unit -} -def f(x: Resettable & Growable[String]) = { - x.reset() - x.add("first") -} +trait Resettable: + def reset(): Unit + +trait Growable[T]: + def add(t: T): Unit + +def f(x: Resettable & Growable[String]) = + x.reset() + x.add("first") ``` The parameter `x` is required to be _both_ a `Resettable` and a @@ -35,12 +34,12 @@ If a member appears in both `A` and `B`, its type in `A & B` is the intersection of its type in `A` and its type in `B`. For instance, assume the definitions: ```scala -trait A { - def children: List[A] -} -trait B { - def children: List[B] -} +trait A: + def children: List[A] + +trait B: + def children: List[B] + val x: A & B = new C val ys: List[A & B] = x.children ``` @@ -60,9 +59,8 @@ So if one defines a class `C` that inherits `A` and `B`, one needs to give at that point a definition of a `children` method with the required type. ```scala -class C extends A with B { - def children: List[A & B] = ??? -} +class C extends A, B: + def children: List[A & B] = ??? ``` diff --git a/docs/docs/reference/new-types/match-types.md b/docs/docs/reference/new-types/match-types.md index 4d4753325d3a..019b6c67eade 100644 --- a/docs/docs/reference/new-types/match-types.md +++ b/docs/docs/reference/new-types/match-types.md @@ -7,11 +7,10 @@ A match type reduces to one of its right-hand sides, depending on the type of its scrutinee. For example: ```scala -type Elem[X] = X match { - case String => Char - case Array[t] => t - case Iterable[t] => t -} +type Elem[X] = X match + case String => Char + case Array[t] => t + case Iterable[t] => t ``` This defines a type that reduces as follows: @@ -38,21 +37,19 @@ variables in patterns start with a lower case letter, as usual. Match types can form part of recursive type definitions. Example: ```scala -type LeafElem[X] = X match { - case String => Char - case Array[t] => LeafElem[t] - case Iterable[t] => LeafElem[t] - case AnyVal => X -} +type LeafElem[X] = X match + case String => Char + case Array[t] => LeafElem[t] + case Iterable[t] => LeafElem[t] + case AnyVal => X ``` Recursive match type definitions can also be given an upper bound, like this: ```scala -type Concat[Xs <: Tuple, +Ys <: Tuple] <: Tuple = Xs match { - case Unit => Ys - case x *: xs => x *: Concat[xs, Ys] -} +type Concat[Xs <: Tuple, +Ys <: Tuple] <: Tuple = Xs match + case Unit => Ys + case x *: xs => x *: Concat[xs, Ys] ``` In this definition, every instance of `Concat[A, B]`, whether reducible or not, @@ -67,12 +64,11 @@ is the value level counterpart to the `LeafElem` type defined above (note the use of the match type as the return type): ```scala -def leafElem[X](x: X): LeafElem[X] = x match { - case x: String => x.charAt(0) - case x: Array[t] => leafElem(x(9)) - case x: Iterable[t] => leafElem(x.next()) - case x: AnyVal => x -} +def leafElem[X](x: X): LeafElem[X] = x match + case x: String => x.charAt(0) + case x: Array[t] => leafElem(x(9)) + case x: Iterable[t] => leafElem(x.next()) + case x: AnyVal => x ``` This special mode of typing for match expressions is only used when the @@ -195,9 +191,9 @@ mechanism in place. As a result, the following will already give a reasonable error message: ```scala -type L[X] = X match { - case Int => L[X] -} +type L[X] = X match + case Int => L[X] + def g[X]: L[X] = ??? ``` diff --git a/docs/docs/reference/new-types/polymorphic-function-types.md b/docs/docs/reference/new-types/polymorphic-function-types.md index 8c66d5dde415..24393e9512e7 100644 --- a/docs/docs/reference/new-types/polymorphic-function-types.md +++ b/docs/docs/reference/new-types/polymorphic-function-types.md @@ -47,8 +47,8 @@ in a strongly-typed way: ```scala enum Expr[A]: - case Var(name: String) - case Apply[A, B](fun: Expr[B => A], arg: Expr[B]) extends Expr[A] + case Var(name: String) + case Apply[A, B](fun: Expr[B => A], arg: Expr[B]) extends Expr[A] ``` We would like to provide a way for users to map a function @@ -59,9 +59,9 @@ Here is how to implement this using polymorphic function types: ```scala def mapSubexpressions[A](e: Expr[A])(f: [B] => Expr[B] => Expr[B]): Expr[A] = - e match - case Apply(fun, arg) => Apply(f(fun), f(arg)) - case Var(n) => Var(n) + e match + case Apply(fun, arg) => Apply(f(fun), f(arg)) + case Var(n) => Var(n) ``` And here is how to use this function to _wrap_ each subexpression @@ -71,7 +71,7 @@ defined as a variable: ```scala val e0 = Apply(Var("f"), Var("a")) val e1 = mapSubexpressions(e0)( - [B] => (se: Expr[B]) => Apply(Var[B => B]("wrap"), se)) + [B] => (se: Expr[B]) => Apply(Var[B => B]("wrap"), se)) println(e1) // Apply(Apply(Var(wrap),Var(f)),Apply(Var(wrap),Var(a))) ``` diff --git a/docs/docs/reference/new-types/union-types.md b/docs/docs/reference/new-types/union-types.md index 0a956a93b34b..c0a03907df75 100644 --- a/docs/docs/reference/new-types/union-types.md +++ b/docs/docs/reference/new-types/union-types.md @@ -10,13 +10,11 @@ A union type `A | B` has as values all values of type `A` and also all values of case class UserName(name: String) case class Password(hash: Hash) -def help(id: UserName | Password) = { - val user = id match { - case UserName(name) => lookupName(name) - case Password(hash) => lookupPassword(hash) - } - ... -} +def help(id: UserName | Password) = + val user = id match + case UserName(name) => lookupName(name) + case Password(hash) => lookupPassword(hash) + ... ``` Union types are duals of intersection types. `|` is _commutative_: @@ -32,10 +30,10 @@ val password: Password = Password(123) scala> val name = UserName("Eve") val name: UserName = UserName(Eve) -scala> if (true) name else password +scala> if true then name else password val res2: Object & Product = UserName(Eve) -scala> val either: Password | UserName = if (true) name else password +scala> val either: Password | UserName = if true then name else password val either: Password | UserName = UserName(Eve) ``` diff --git a/docs/docs/reference/other-new-features/control-syntax.md b/docs/docs/reference/other-new-features/control-syntax.md index 72987c6e195f..544eb5ebd584 100644 --- a/docs/docs/reference/other-new-features/control-syntax.md +++ b/docs/docs/reference/other-new-features/control-syntax.md @@ -8,11 +8,11 @@ enclosing the condition in parentheses, and also allows to drop parentheses or b around the generators of a `for`-expression. Examples: ```scala if x < 0 then - "negative" + "negative" else if x == 0 then - "zero" + "zero" else - "positive" + "positive" if x < 0 then -x else x @@ -22,10 +22,10 @@ for x <- xs if x > 0 yield x * x for - x <- xs - y <- ys + x <- xs + y <- ys do - println(x + y) + println(x + y) try body catch case ex: IOException => handle @@ -38,7 +38,7 @@ The rules in detail are: - The enumerators of a `for`-expression can be written without enclosing parentheses or braces if they are followed by a `yield` or `do`. - A `do` in a `for`-expression expresses a `for`-loop. - A `catch` can be followed by a single case on the same line. - If there are multiple cases, these have to be appear within braces (just like in Scala 2) + If there are multiple cases, these have to appear within braces (just like in Scala 2) or an indented block. ### Rewrites diff --git a/docs/docs/reference/other-new-features/creator-applications.md b/docs/docs/reference/other-new-features/creator-applications.md index 4035f12d0116..2b7cce294d50 100644 --- a/docs/docs/reference/other-new-features/creator-applications.md +++ b/docs/docs/reference/other-new-features/creator-applications.md @@ -7,9 +7,8 @@ Scala case classes generate apply methods, so that values of case classes can be Scala 3 generalizes this scheme to all concrete classes. Example: ```scala -class StringBuilder(s: String) { +class StringBuilder(s: String): def this() = this("") -} StringBuilder("abc") // same as new StringBuilder("abc") StringBuilder() // same as new StringBuilder() @@ -17,10 +16,9 @@ StringBuilder() // same as new StringBuilder() This works since a companion object with two apply methods is generated together with the class. The object looks like this: ```scala -object StringBuilder { - inline def apply(s: String): StringBuilder = new StringBuilder(s) - inline def apply(): StringBuilder = new StringBuilder() -} +object StringBuilder: + inline def apply(s: String): StringBuilder = new StringBuilder(s) + inline def apply(): StringBuilder = new StringBuilder() ``` The synthetic object `StringBuilder` and its `apply` methods are called _constructor proxies_. Constructor proxies are generated even for Java classes and classes coming from Scala 2. diff --git a/docs/docs/reference/other-new-features/explicit-nulls.md b/docs/docs/reference/other-new-features/explicit-nulls.md index 221d4bec108e..a651107f59d6 100644 --- a/docs/docs/reference/other-new-features/explicit-nulls.md +++ b/docs/docs/reference/other-new-features/explicit-nulls.md @@ -7,13 +7,13 @@ Explicit nulls is an opt-in feature that modifies the Scala type system, which m (anything that extends `AnyRef`) _non-nullable_. This means the following code will no longer typecheck: -``` +```scala val x: String = null // error: found `Null`, but required `String` ``` Instead, to mark a type as nullable we use a [union type](https://dotty.epfl.ch/docs/reference/new-types/union-types.html) -``` +```scala val x: String|Null = null // ok ``` @@ -38,10 +38,10 @@ The new type system is unsound with respect to `null`. This means there are stil The unsoundness happens because uninitialized fields in a class start out as `null`: ```scala -class C { - val f: String = foo(f) - def foo(f2: String): String = f2 -} +class C: + val f: String = foo(f) + def foo(f2: String): String = f2 + val c = new C() // c.f == "field is null" ``` @@ -81,8 +81,8 @@ So far, we have found the following useful: ```scala def[T] (x: T|Null) nn: x.type & T = - if (x == null) throw new NullPointerException("tried to cast away nullability, but value is null") - else x.asInstanceOf[x.type & T] + if x == null then new NullPointerException("tried to cast away nullability, but value is null") + else x.asInstanceOf[x.type & T] ``` This means that given `x: String|Null`, `x.nn` has type `String`, so we can call all the @@ -106,16 +106,15 @@ Specifically, we patch ```java class C { - String s; - int x; + String s; + int x; } ``` ==> ```scala - class C { - val s: String|UncheckedNull - val x: Int - } + class C: + val s: String|UncheckedNull + val x: Int ``` * We nullify type parameters because in Java a type parameter is always nullable, so the following code compiles. @@ -131,10 +130,9 @@ Specifically, we patch Notice this is rule is sometimes too conservative, as witnessed by ```scala - class InScala { - val c: C[Bool] = ??? // C as above - val b: Bool = c.foo() // no longer typechecks, since foo now returns Bool|Null - } + class InScala: + val c: C[Bool] = ??? // C as above + val b: Bool = c.foo() // no longer typechecks, since foo now returns Bool|Null ``` * This reduces the number of redundant nullable types we need to add. Consider @@ -169,10 +167,9 @@ Specifically, we patch ``` ==> ```scala - class BoxFactory[T] { - def makeBox(): Box[T | UncheckedNull] | UncheckedNull - def makeCrazyBoxes(): List[Box[List[T] | UncheckedNull]] | UncheckedNull - } + class BoxFactory[T]: + def makeBox(): Box[T | UncheckedNull] | UncheckedNull + def makeCrazyBoxes(): List[Box[List[T] | UncheckedNull]] | UncheckedNull ``` In this case, since `Box` is Scala-defined, we will get `Box[T|UncheckedNull]|UncheckedNull`. @@ -187,22 +184,21 @@ Specifically, we patch ```java class Constants { - final String NAME = "name"; - final int AGE = 0; - final char CHAR = 'a'; + final String NAME = "name"; + final int AGE = 0; + final char CHAR = 'a'; - final String NAME_GENERATED = getNewName(); + final String NAME_GENERATED = getNewName(); } ``` ==> ```scala - class Constants { - val NAME: String("name") = "name" - val AGE: Int(0) = 0 - val CHAR: Char('a') = 'a' + class Constants: + val NAME: String("name") = "name" + val AGE: Int(0) = 0 + val CHAR: Char('a') = 'a' - val NAME_GENERATED: String | Null = ??? - } + val NAME_GENERATED: String | Null = ??? ``` * We don't append `UncheckedNull` to a field nor to a return type of a method which is annotated with a @@ -210,18 +206,17 @@ Specifically, we patch ```java class C { - @NotNull String name; - @NotNull List getNames(String prefix); // List is Java-defined - @NotNull Box getBoxedName(); // Box is Scala-defined + @NotNull String name; + @NotNull List getNames(String prefix); // List is Java-defined + @NotNull Box getBoxedName(); // Box is Scala-defined } ``` ==> ```scala - class C { - val name: String - def getNames(prefix: String | UncheckedNull): List[String] // we still need to nullify the paramter types - def getBoxedName(): Box[String | UncheckedNull] // we don't append `UncheckedNull` to the outmost level, but we still need to nullify inside - } + class C: + val name: String + def getNames(prefix: String | UncheckedNull): List[String] // we still need to nullify the paramter types + def getBoxedName(): Box[String | UncheckedNull] // we don't append `UncheckedNull` to the outmost level, but we still need to nullify inside ``` The annotation must be from the list below to be recognized as `NotNull` by the compiler. @@ -275,15 +270,13 @@ Without `UncheckedNull`, the chaining becomes too cumbersome ```scala val ret = someJavaMethod() -val s2 = if (ret != null) { - val tmp = ret.trim() - if (tmp != null) { - val tmp2 = tmp.substring(2) - if (tmp2 != null) { - tmp2.toLowerCase() - } - } -} +val s2 = + if ret != null then + val tmp = ret.trim() + if tmp != null then + val tmp2 = tmp.substring(2) + if tmp2 != null then + tmp2.toLowerCase() // Additionally, we need to handle the `else` branches. ``` @@ -298,9 +291,9 @@ Example: ```scala val s: String|Null = ??? -if (s != null) { - // s: String -} +if s != null then + // s: String + // s: String|Null assert(x != null) @@ -310,11 +303,10 @@ assert(x != null) A similar inference can be made for the `else` case if the test is `p == null` ```scala -if (s == null) { - // s: String|Null -} else { - // s: String -} +if s == null then + // s: String|Null +else + // s: String ``` `==` and `!=` is considered a comparison for the purposes of the flow inference. @@ -326,18 +318,16 @@ We also support logical operators (`&&`, `||`, and `!`): ```scala val s: String|Null = ??? val s2: String|Null = ??? -if (s != null && s2 != null) { - // s: String - // s2: String -} - -if (s == null || s2 == null) { - // s: String|Null - // s2: String|Null -} else { - // s: String - // s2: String -} +if s != null && s2 != null then + // s: String + // s2: String + +if s == null || s2 == null then + // s: String|Null + // s2: String|Null +else + // s: String + // s2: String ``` ### Inside Conditions @@ -347,15 +337,13 @@ We also support type specialization _within_ the condition, taking into account ```scala val s: String|Null = ??? -if (s != null && s.length > 0) { // s: String in `s.length > 0` - // s: String -} +if s != null && s.length > 0 then // s: String in `s.length > 0` + // s: String -if (s == null || s.length > 0) { // s: String in `s.length > 0` - // s: String|Null -} else { - // s: String -} +if s == null || s.length > 0 then // s: String in `s.length > 0` + // s: String|Null +else + // s: String ``` ### Match Case @@ -365,10 +353,9 @@ The non-null cases can be detected in match statements. ```scala val s: String|Null = ??? -s match { - case _: String => // s: String - case _ => -} +s match + case _: String => // s: String + case _ => ``` ### Mutable Variable @@ -380,15 +367,14 @@ class C(val x: Int, val next: C|Null) var xs: C|Null = C(1, C(2, null)) // xs is trackable, since all assignments are in the same method -while (xs != null) { - // xs: C - val xsx: Int = xs.x - val xscpy: C = xs - xs = xscpy // since xscpy is non-null, xs still has type C after this line - // xs: C - xs = xs.next // after this assignment, xs can be null again - // xs: C | Null -} +while xs != null do + // xs: C + val xsx: Int = xs.x + val xscpy: C = xs + xs = xscpy // since xscpy is non-null, xs still has type C after this line + // xs: C + xs = xs.next // after this assignment, xs can be null again + // xs: C | Null ``` When dealing with local mutable variables, there are two questions: @@ -400,13 +386,12 @@ When dealing with local mutable variables, there are two questions: ```scala var x: String|Null = ??? - def y = { - x = null - } - if (x != null) { - // y can be called here, which would break the fact - val a: String = x // error: x is captured and mutated by the closure, not trackable - } + def y = + x = null + + if x != null then + // y can be called here, which would break the fact + val a: String = x // error: x is captured and mutated by the closure, not trackable ``` 2. Whether to generate and use flow typing on a specific _use_ of a local mutable variable. @@ -418,17 +403,14 @@ When dealing with local mutable variables, there are two questions: ```scala var x: String|Null = ??? - def y = { - if (x != null) { - // not safe to use the fact (x != null) here - // since y can be executed at the same time as the outer block - val _: String = x - } - } - if (x != null) { - val a: String = x // ok to use the fact here - x = null - } + def y = + if x != null then + // not safe to use the fact (x != null) here + // since y can be executed at the same time as the outer block + val _: String = x + if x != null then + val a: String = x // ok to use the fact here + x = null ``` See more examples in `tests/explicit-nulls/neg/var-ref-in-closure.scala`. @@ -440,15 +422,14 @@ For example, `x.a` if `x` is mutable. We don't support: -- flow facts not related to nullability (`if (x == 0) { // x: 0.type not inferred }`) +- flow facts not related to nullability (`if x == 0 then { // x: 0.type not inferred }`) - tracking aliasing between non-nullable paths ```scala val s: String|Null = ??? val s2: String|Null = ??? - if (s != null && s == s2) { - // s: String inferred - // s2: String not inferred - } + if s != null && s == s2 then + // s: String inferred + // s2: String not inferred ``` ## Binary Compatibility diff --git a/docs/docs/reference/other-new-features/export.md b/docs/docs/reference/other-new-features/export.md index 81fe1e8d0c27..8abd71e68936 100644 --- a/docs/docs/reference/other-new-features/export.md +++ b/docs/docs/reference/other-new-features/export.md @@ -8,26 +8,23 @@ An export clause defines aliases for selected members of an object. Example: class BitMap class InkJet -class Printer { - type PrinterType - def print(bits: BitMap): Unit = ??? - def status: List[String] = ??? -} - -class Scanner { - def scan(): BitMap = ??? - def status: List[String] = ??? -} - -class Copier { - private val printUnit = new Printer { type PrinterType = InkJet } - private val scanUnit = new Scanner - - export scanUnit.scan - export printUnit.{status => _, _} - - def status: List[String] = printUnit.status ++ scanUnit.status -} +class Printer: + type PrinterType + def print(bits: BitMap): Unit = ??? + def status: List[String] = ??? + +class Scanner: + def scan(): BitMap = ??? + def status: List[String] = ??? + +class Copier: + private val printUnit = new Printer { type PrinterType = InkJet } + private val scanUnit = new Scanner + + export scanUnit.scan + export printUnit.{status => _, _} + + def status: List[String] = printUnit.status ++ scanUnit.status ``` The two `export` clauses define the following _export aliases_ in class `Copier`: ```scala diff --git a/docs/docs/reference/other-new-features/indentation.md b/docs/docs/reference/other-new-features/indentation.md index 5cd601e65f03..6c5d6a3617f9 100644 --- a/docs/docs/reference/other-new-features/indentation.md +++ b/docs/docs/reference/other-new-features/indentation.md @@ -95,7 +95,7 @@ It is an error if the indentation width of the token following an `` do ```scala if x < 0 then - -x + -x else // error: `else` does not align correctly x ``` @@ -118,24 +118,24 @@ With these new rules, the following constructs are all valid: ```scala trait A: - def f: Int + def f: Int class C(x: Int) extends A: - def f = x + def f = x object O: - def f = 3 + def f = 3 enum Color: - case Red, Green, Blue + case Red, Green, Blue new A: - def f = 3 + def f = 3 package p: - def a = 1 + def a = 1 package q: - def b = 2 + def b = 2 ``` In each case, the `:` at the end of line can be replaced without change of meaning by a pair of braces that enclose the following indented definition(s). @@ -181,11 +181,11 @@ The rules allow to write `match` expressions where cases are not indented themse ```scala x match -case 1 => print("I") -case 2 => print("II") -case 3 => print("III") -case 4 => print("IV") -case 5 => print("V") + case 1 => print("I") + case 2 => print("II") + case 3 => print("III") + case 4 => print("IV") + case 5 => print("V") println(".") ``` @@ -198,12 +198,12 @@ To solve this problem, Scala 3 offers an optional `end` marker. Example: ```scala def largeMethod(...) = - ... - if ... then ... - else + ... + if ... then ... + else ... // a large block - end if - ... // more code + end if + ... // more code end largeMethod ``` @@ -230,47 +230,47 @@ For instance, the following end markers are all legal: ```scala package p1.p2: - abstract class C(): - - def this(x: Int) = - this() - if x > 0 then - val a :: b = - x :: Nil - end val - var y = - x - end y - while y > 0 do - println(y) - y -= 1 - end while - try - x match - case 0 => println("0") - case _ => - end match - finally - println("done") - end try - end if - end this - - def f: String - end C - - object C: - given C = - new C: - def f = "!" - end f - end new - end given - end C - - extension (x: C) - def ff: String = x.f ++ x.f - end extension + abstract class C(): + + def this(x: Int) = + this() + if x > 0 then + val a :: b = + x :: Nil + end val + var y = + x + end y + while y > 0 do + println(y) + y -= 1 + end while + try + x match + case 0 => println("0") + case _ => + end match + finally + println("done") + end try + end if + end this + + def f: String + end C + + object C: + given C = + new C: + def f = "!" + end f + end new + end given + end C + + extension (x: C) + def ff: String = x.f ++ x.f + end extension end p2 ``` @@ -302,49 +302,50 @@ Here is a (somewhat meta-circular) example of code using indentation. It provide ```scala enum IndentWidth: - case Run(ch: Char, n: Int) - case Conc(l: IndentWidth, r: Run) - - def <= (that: IndentWidth): Boolean = this match - case Run(ch1, n1) => - that match - case Run(ch2, n2) => n1 <= n2 && (ch1 == ch2 || n1 == 0) - case Conc(l, r) => this <= l - case Conc(l1, r1) => - that match - case Conc(l2, r2) => l1 == l2 && r1 <= r2 - case _ => false - - def < (that: IndentWidth): Boolean = - this <= that && !(that <= this) - - override def toString: String = this match - case Run(ch, n) => - val kind = ch match - case ' ' => "space" - case '\t' => "tab" - case _ => s"'$ch'-character" - val suffix = if n == 1 then "" else "s" - s"$n $kind$suffix" - case Conc(l, r) => - s"$l, $r" + case Run(ch: Char, n: Int) + case Conc(l: IndentWidth, r: Run) + + def <= (that: IndentWidth): Boolean = this match + case Run(ch1, n1) => + that match + case Run(ch2, n2) => n1 <= n2 && (ch1 == ch2 || n1 == 0) + case Conc(l, r) => this <= l + case Conc(l1, r1) => + that match + case Conc(l2, r2) => l1 == l2 && r1 <= r2 + case _ => false + + def < (that: IndentWidth): Boolean = + this <= that && !(that <= this) + + override def toString: String = + this match + case Run(ch, n) => + val kind = ch match + case ' ' => "space" + case '\t' => "tab" + case _ => s"'$ch'-character" + val suffix = if n == 1 then "" else "s" + s"$n $kind$suffix" + case Conc(l, r) => + s"$l, $r" object IndentWidth: - private inline val MaxCached = 40 + private inline val MaxCached = 40 - private val spaces = IArray.tabulate(MaxCached + 1)(new Run(' ', _)) - private val tabs = IArray.tabulate(MaxCached + 1)(new Run('\t', _)) + private val spaces = IArray.tabulate(MaxCached + 1)(new Run(' ', _)) + private val tabs = IArray.tabulate(MaxCached + 1)(new Run('\t', _)) - def Run(ch: Char, n: Int): Run = - if n <= MaxCached && ch == ' ' then - spaces(n) - else if n <= MaxCached && ch == '\t' then - tabs(n) - else - new Run(ch, n) - end Run + def Run(ch: Char, n: Int): Run = + if n <= MaxCached && ch == ' ' then + spaces(n) + else if n <= MaxCached && ch == '\t' then + tabs(n) + else + new Run(ch, n) + end Run - val Zero = Run(' ', 0) + val Zero = Run(' ', 0) end IndentWidth ``` @@ -367,15 +368,15 @@ option `-Yindent-colons`. This variant is more contentious and less stable than ```scala times(10): - println("ah") - println("ha") + println("ah") + println("ha") ``` or ```scala xs.map: - x => + x => val y = x - 1 y * y ``` @@ -385,10 +386,10 @@ also even for ordinary parameters: ```scala credentials ++ : - val file = Path.userHome / ".credentials" - if file.exists - then Seq(Credentials(file)) - else Seq() + val file = Path.userHome / ".credentials" + if file.exists + then Seq(Credentials(file)) + else Seq() ``` How does this syntax variant work? Colons at the end of lines are their own token, distinct from normal `:`. diff --git a/docs/docs/reference/other-new-features/matchable.md b/docs/docs/reference/other-new-features/matchable.md index 9c3153eb094f..ea2c3cd70587 100644 --- a/docs/docs/reference/other-new-features/matchable.md +++ b/docs/docs/reference/other-new-features/matchable.md @@ -21,7 +21,7 @@ However, there is a potential hole due to pattern matching. Consider: ```scala val imm: IArray[Int] = ... imm match - case a: Array[Int] => a(0) = 1 + case a: Array[Int] => a(0) = 1 ``` The test will succeed at runtime since `IArray`s _are_ represented as @@ -40,7 +40,7 @@ type `T` as match selector leads to the same problem: ```scala def f[T](x: T) = x match - case a: Array[Int] => a(0) = 0 + case a: Array[Int] => a(0) = 0 f(imm) ``` @@ -76,15 +76,15 @@ Here is the hierarchy of top-level classes and traits with their defined methods ```scala abstract class Any: - def getClass - def isInstanceOf - def asInstanceOf - def == - def != - def ## - def equals - def hashCode - def toString + def getClass + def isInstanceOf + def asInstanceOf + def == + def != + def ## + def equals + def hashCode + def toString trait Matchable extends Any diff --git a/docs/docs/reference/other-new-features/named-typeargs-spec.md b/docs/docs/reference/other-new-features/named-typeargs-spec.md index 404f96852aca..f56f97d89100 100644 --- a/docs/docs/reference/other-new-features/named-typeargs-spec.md +++ b/docs/docs/reference/other-new-features/named-typeargs-spec.md @@ -20,7 +20,7 @@ Note in particular that named arguments cannot be passed to type constructors: class C[T] val x: C[T = Int] = // error - new C[T = Int] // error + new C[T = Int] // error class E extends C[T = Int] // error ``` diff --git a/docs/docs/reference/other-new-features/opaques-details.md b/docs/docs/reference/other-new-features/opaques-details.md index 6c43e23c73f7..723d319e2f3e 100644 --- a/docs/docs/reference/other-new-features/opaques-details.md +++ b/docs/docs/reference/other-new-features/opaques-details.md @@ -29,9 +29,8 @@ type T >: L <: U ``` A special case arises if the opaque type alias is defined in an object. Example: ``` -object o { - opaque type T = R -} +object o: + opaque type T = R ``` In this case we have inside the object (also for non-opaque types) that `o.T` is equal to `T` or its expanded form `o.this.T`. Equality is understood here as mutual subtyping, i.e. @@ -39,10 +38,9 @@ In this case we have inside the object (also for non-opaque types) that `o.T` is that `o.this.T` equals `R`. The two equalities compose. That is, inside `o`, it is also known that `o.T` is equal to `R`. This means the following code type-checks: ```scala -object o { - opaque type T = Int - val x: Int = id(2) -} +object o: + opaque type T = Int + val x: Int = id(2) def id(x: o.T): o.T = x ``` @@ -84,22 +82,20 @@ objects and classes and in all other source files. Example: opaque type A = String val x: A = "abc" -object obj { - val y: A = "abc" // error: found: "abc", required: A -} +object obj: + val y: A = "abc" // error: found: "abc", required: A // in test2.scala def z: String = x // error: found: A, required: String ``` This behavior becomes clear if one recalls that top-level definitions are placed in their own synthetic object. For instance, the code in `test1.scala` would expand to ```scala -object test1$package { - opaque type A = String - val x: A = "abc" -} -object obj { - val y: A = "abc" // error: cannot assign "abc" to opaque type alias A -} +object test1$package: + opaque type A = String + val x: A = "abc" + +object obj: + val y: A = "abc" // error: cannot assign "abc" to opaque type alias A ``` The opaque type alias `A` is transparent in its scope, which includes the definition of `x`, but not the definitions of `obj` and `y`. diff --git a/docs/docs/reference/other-new-features/opaques.md b/docs/docs/reference/other-new-features/opaques.md index 9569fc6446d5..f7711f0a10f6 100644 --- a/docs/docs/reference/other-new-features/opaques.md +++ b/docs/docs/reference/other-new-features/opaques.md @@ -6,27 +6,28 @@ title: "Opaque Type Aliases" Opaque types aliases provide type abstraction without any overhead. Example: ```scala -object Logarithms { +object Logarithms: - opaque type Logarithm = Double + opaque type Logarithm = Double - object Logarithm { + object Logarithm: - // These are the two ways to lift to the Logarithm type + // These are the two ways to lift to the Logarithm type - def apply(d: Double): Logarithm = math.log(d) + def apply(d: Double): Logarithm = math.log(d) - def safe(d: Double): Option[Logarithm] = - if (d > 0.0) Some(math.log(d)) else None - } + def safe(d: Double): Option[Logarithm] = + if d > 0.0 then Some(math.log(d)) else None - // Extension methods define opaque types' public APIs - extension (x: Logarithm) { - def toDouble: Double = math.exp(x) - def + (y: Logarithm): Logarithm = Logarithm(math.exp(x) + math.exp(y)) - def * (y: Logarithm): Logarithm = x + y - } -} + end Logarithm + + // Extension methods define opaque types' public APIs + extension (x: Logarithm) + def toDouble: Double = math.exp(x) + def + (y: Logarithm): Logarithm = Logarithm(math.exp(x) + math.exp(y)) + def * (y: Logarithm): Logarithm = x + y + +end Logarithms ``` This introduces `Logarithm` as a new abstract type, which is implemented as `Double`. @@ -61,27 +62,28 @@ l / l2 // error: `/` is not a member of Logarithm Opaque type aliases can also come with bounds. Example: ```scala -object Access { - - opaque type Permissions = Int - opaque type PermissionChoice = Int - opaque type Permission <: Permissions & PermissionChoice = Int - - extension (x: Permissions) - def & (y: Permissions): Permissions = x | y - extension (x: PermissionChoice) - def | (y: PermissionChoice): PermissionChoice = x | y - extension (granted: Permissions) - def is(required: Permissions) = (granted & required) == required - extension (granted: Permissions) - def isOneOf(required: PermissionChoice) = (granted & required) != 0 - - val NoPermission: Permission = 0 - val Read: Permission = 1 - val Write: Permission = 2 - val ReadWrite: Permissions = Read | Write - val ReadOrWrite: PermissionChoice = Read | Write -} +object Access: + + opaque type Permissions = Int + opaque type PermissionChoice = Int + opaque type Permission <: Permissions & PermissionChoice = Int + + extension (x: Permissions) + def & (y: Permissions): Permissions = x | y + extension (x: PermissionChoice) + def | (y: PermissionChoice): PermissionChoice = x | y + extension (granted: Permissions) + def is(required: Permissions) = (granted & required) == required + extension (granted: Permissions) + def isOneOf(required: PermissionChoice) = (granted & required) != 0 + + val NoPermission: Permission = 0 + val Read: Permission = 1 + val Write: Permission = 2 + val ReadWrite: Permissions = Read | Write + val ReadOrWrite: PermissionChoice = Read | Write + +end Access ``` The `Access` object defines three opaque type aliases: @@ -105,24 +107,24 @@ All three opaque type aliases have the same underlying representation type `Int` it known outside the `Access` object that `Permission` is a subtype of the other two types. Hence, the following usage scenario type-checks. ```scala -object User { - import Access._ +object User: + import Access._ - case class Item(rights: Permissions) + case class Item(rights: Permissions) - val roItem = Item(Read) // OK, since Permission <: Permissions - val rwItem = Item(ReadWrite) - val noItem = Item(NoPermission) + val roItem = Item(Read) // OK, since Permission <: Permissions + val rwItem = Item(ReadWrite) + val noItem = Item(NoPermission) - assert( roItem.rights.is(ReadWrite) == false ) - assert( roItem.rights.isOneOf(ReadOrWrite) == true ) + assert( roItem.rights.is(ReadWrite) == false ) + assert( roItem.rights.isOneOf(ReadOrWrite) == true ) - assert( rwItem.rights.is(ReadWrite) == true ) - assert( rwItem.rights.isOneOf(ReadOrWrite) == true ) + assert( rwItem.rights.is(ReadWrite) == true ) + assert( rwItem.rights.isOneOf(ReadOrWrite) == true ) - assert( noItem.rights.is(ReadWrite) == false ) - assert( noItem.rights.isOneOf(ReadOrWrite) == false ) -} + assert( noItem.rights.is(ReadWrite) == false ) + assert( noItem.rights.isOneOf(ReadOrWrite) == false ) +end User ``` On the other hand, the call `roItem.rights.isOneOf(ReadWrite)` would give a type error since `Permissions` and `PermissionChoice` are different, unrelated types outside `Access`. diff --git a/docs/docs/reference/other-new-features/open-classes.md b/docs/docs/reference/other-new-features/open-classes.md index ff641f116135..19b4fe6584c4 100644 --- a/docs/docs/reference/other-new-features/open-classes.md +++ b/docs/docs/reference/other-new-features/open-classes.md @@ -8,21 +8,20 @@ An `open` modifier on a class signals that the class is planned for extensions. // File Writer.scala package p -open class Writer[T] { +open class Writer[T]: - /** Sends to stdout, can be overridden */ - def send(x: T) = println(x) + /** Sends to stdout, can be overridden */ + def send(x: T) = println(x) - /** Sends all arguments using `send` */ - def sendAll(xs: T*) = xs.foreach(send) -} + /** Sends all arguments using `send` */ + def sendAll(xs: T*) = xs.foreach(send) +end Writer // File EncryptedWriter.scala package p -class EncryptedWriter[T: Encryptable] extends Writer[T] { - override def send(x: T) = super.send(encrypt(x)) -} +class EncryptedWriter[T: Encryptable] extends Writer[T]: + override def send(x: T) = super.send(encrypt(x)) ``` An open class typically comes with some documentation that describes the internal calling patterns between methods of the class as well as hooks that can be overridden. We call this the _extension contract_ of the class. It is different from the _external contract_ between a class and its users. diff --git a/docs/docs/reference/other-new-features/parameter-untupling-spec.md b/docs/docs/reference/other-new-features/parameter-untupling-spec.md index 8622ddde6d8b..42ca89ed5361 100644 --- a/docs/docs/reference/other-new-features/parameter-untupling-spec.md +++ b/docs/docs/reference/other-new-features/parameter-untupling-spec.md @@ -15,14 +15,14 @@ and you want to map `xs` to a list of `Int`s so that each pair of numbers is map Previously, the best way to do this was with a pattern-matching decomposition: ```scala xs.map { - case (x, y) => x + y + case (x, y) => x + y } ``` While correct, this is inconvenient. Instead, we propose to write it the following way: ```scala xs.map { - (x, y) => x + y + (x, y) => x + y } ``` or, equivalently: @@ -58,12 +58,11 @@ If the function is typed as `ProductN[T1, ..., Tn] => Te`, then it will be transformed to ```scala -(x: TupleN[T1, ..., Tn]) => { +(x: TupleN[T1, ..., Tn]) => def p1: T1 = x._1 ... def pn: Tn = x._n e -} ``` ##### Generic tuples diff --git a/docs/docs/reference/other-new-features/parameter-untupling.md b/docs/docs/reference/other-new-features/parameter-untupling.md index d4da0ecb490d..24400f178e34 100644 --- a/docs/docs/reference/other-new-features/parameter-untupling.md +++ b/docs/docs/reference/other-new-features/parameter-untupling.md @@ -11,14 +11,14 @@ and you want to map `xs` to a list of `Int`s so that each pair of numbers is map their sum. Previously, the best way to do this was with a pattern-matching decomposition: ```scala xs map { - case (x, y) => x + y + case (x, y) => x + y } ``` While correct, this is also inconvenient and confusing, since the `case` suggests that the pattern match could fail. As a shorter and clearer alternative Scala 3 now allows ```scala xs.map { - (x, y) => x + y + (x, y) => x + y } ``` or, equivalently: diff --git a/docs/docs/reference/other-new-features/safe-initialization.md b/docs/docs/reference/other-new-features/safe-initialization.md index 0f5007c975bc..8bf27653bc84 100644 --- a/docs/docs/reference/other-new-features/safe-initialization.md +++ b/docs/docs/reference/other-new-features/safe-initialization.md @@ -14,15 +14,13 @@ To get a feel of how it works, we first show several examples below. Given the following code snippet: ``` scala -abstract class AbstractFile { +abstract class AbstractFile: def name: String val extension: String = name.substring(4) -} -class RemoteFile(url: String) extends AbstractFile { +class RemoteFile(url: String) extends AbstractFile: val localFile: String = s"${url.##}.tmp" // error: usage of `localFile` before it's initialized def name: String = localFile -} ``` The checker will report: @@ -41,12 +39,11 @@ The checker will report: Given the code below: ``` scala -object Trees { - class ValDef { counter += 1 } - class EmptyValDef extends ValDef - val theEmptyValDef = new EmptyValDef - private var counter = 0 // error -} +object Trees: + class ValDef { counter += 1 } + class EmptyValDef extends ValDef + val theEmptyValDef = new EmptyValDef + private var counter = 0 // error ``` The checker will report: @@ -66,15 +63,14 @@ The checker will report: Given the code below: ``` scala -abstract class Parent { - val f: () => String = () => this.message - def message: String -} -class Child extends Parent { - val a = f() - val b = "hello" // error - def message: String = b -} +abstract class Parent: + val f: () => String = () => this.message + def message: String + +class Child extends Parent: + val a = f() + val b = "hello" // error + def message: String = b ``` The checker reports: @@ -126,14 +122,13 @@ following example shows: ``` scala class MyException(val b: B) extends Exception("") -class A { - val b = try { new B } catch { case myEx: MyException => myEx.b } - println(b.a) -} -class B { - throw new MyException(this) - val a: Int = 1 -} +class A: + val b = try { new B } catch { case myEx: MyException => myEx.b } + println(b.a) + +class B: + throw new MyException(this) + val a: Int = 1 ``` In the code above, the control effect teleport the uninitialized value @@ -147,11 +142,10 @@ object under initialization. As an example, the following code will be rejected: ``` scala trait Reporter { def report(msg: String): Unit } -class FileReporter(ctx: Context) extends Reporter { - ctx.typer.reporter = this // ctx now reaches an uninitialized object - val file: File = new File("report.txt") - def report(msg: String) = file.write(msg) -} +class FileReporter(ctx: Context) extends Reporter: + ctx.typer.reporter = this // ctx now reaches an uninitialized object + val file: File = new File("report.txt") + def report(msg: String) = file.write(msg) ``` In the code above, suppose `ctx` points to a transitively initialized @@ -218,14 +212,13 @@ project boundaries. For example, the following code passes the check when the two classes are defined in the same project: ```Scala -class Base { - private val map: mutable.Map[Int, String] = mutable.Map.empty - def enter(k: Int, v: String) = map(k) = v -} -class Child extends Base { - enter(1, "one") - enter(2, "two") -} +class Base: + private val map: mutable.Map[Int, String] = mutable.Map.empty + def enter(k: Int, v: String) = map(k) = v + +class Child extends Base: + enter(1, "one") + enter(2, "two") ``` However, when the class `Base` and `Child` are defined in two different diff --git a/docs/docs/reference/other-new-features/targetName.md b/docs/docs/reference/other-new-features/targetName.md index 6ade68c7b29d..e95a54405296 100644 --- a/docs/docs/reference/other-new-features/targetName.md +++ b/docs/docs/reference/other-new-features/targetName.md @@ -8,9 +8,10 @@ A `@targetName` annotation on a definition defines an alternate name for the imp ```scala import scala.annotation.targetName -object VecOps { - @targetName("append") def (xs: Vec[T]) ++= [T] (ys: Vec[T]): Vec[T] = ... -} +object VecOps: + extension [T](xs: Vec[T]) + @targetName("append") + def ++= [T] (ys: Vec[T]): Vec[T] = ... ``` Here, the `++=` operation is implemented (in Byte code or native code) under the name `append`. The implementation name affects the code that is generated, and is the name under which code from other languages can call the method. For instance, `++=` could be invoked from Java like this: @@ -70,9 +71,9 @@ between two definitions that have otherwise the same names and types. So the fol ```scala import annotation.targetName class A: - def f(): Int = 1 + def f(): Int = 1 class B extends A: - @targetName("g") def f(): Int = 2 + @targetName("g") def f(): Int = 2 ``` The compiler reports here: @@ -97,9 +98,9 @@ be present in the original code. So the following example would also be in error ```scala import annotation.targetName class A: - def f(): Int = 1 + def f(): Int = 1 class B extends A: - @targetName("f") def g(): Int = 2 + @targetName("f") def g(): Int = 2 ``` Here, the original methods `g` and `f` do not override each other since they have diff --git a/docs/docs/reference/other-new-features/threadUnsafe-annotation.md b/docs/docs/reference/other-new-features/threadUnsafe-annotation.md index 85b8d66259c3..6bcd426425b1 100644 --- a/docs/docs/reference/other-new-features/threadUnsafe-annotation.md +++ b/docs/docs/reference/other-new-features/threadUnsafe-annotation.md @@ -12,7 +12,6 @@ faster mechanism which is not thread-safe. ```scala import scala.annotation.threadUnsafe -class Hello { - @threadUnsafe lazy val x: Int = 1 -} +class Hello: + @threadUnsafe lazy val x: Int = 1 ``` diff --git a/docs/docs/reference/other-new-features/trait-parameters.md b/docs/docs/reference/other-new-features/trait-parameters.md index 180e7243acdd..416b47fc08bc 100644 --- a/docs/docs/reference/other-new-features/trait-parameters.md +++ b/docs/docs/reference/other-new-features/trait-parameters.md @@ -6,13 +6,11 @@ title: "Trait Parameters" Scala 3 allows traits to have parameters, just like classes have parameters. ```scala -trait Greeting(val name: String) { - def msg = s"How are you, $name" -} +trait Greeting(val name: String): + def msg = s"How are you, $name" -class C extends Greeting("Bob") { - println(msg) -} +class C extends Greeting("Bob"): + println(msg) ``` Arguments to a trait are evaluated immediately before the trait is initialized. @@ -22,7 +20,7 @@ ambiguities. For instance, you might try to extend `Greeting` twice, with different parameters. ```scala -class D extends C with Greeting("Bill") // error: parameter passed twice +class D extends C, Greeting("Bill") // error: parameter passed twice ``` Should this print "Bob" or "Bill"? In fact this program is illegal, @@ -37,9 +35,8 @@ because it violates the second rule of the following for trait parameters: Here's a trait extending the parameterized trait `Greeting`. ```scala -trait FormalGreeting extends Greeting { - override def msg = s"How do you do, $name" -} +trait FormalGreeting extends Greeting: + override def msg = s"How do you do, $name" ``` As is required, no arguments are passed to `Greeting`. However, this poses an issue when defining a class that extends `FormalGreeting`: @@ -52,7 +49,7 @@ The correct way to write `E` is to extend both `Greeting` and `FormalGreeting` (in either order): ```scala -class E extends Greeting("Bob") with FormalGreeting +class E extends Greeting("Bob"), FormalGreeting ``` ### Reference diff --git a/docs/docs/reference/other-new-features/type-test.md b/docs/docs/reference/other-new-features/type-test.md index eae6b22ede33..935b16fee95e 100644 --- a/docs/docs/reference/other-new-features/type-test.md +++ b/docs/docs/reference/other-new-features/type-test.md @@ -10,15 +10,15 @@ When pattern matching there are two situations where a runtime type test must be The first kind is an explicit type test using the ascription pattern notation. ```scala (x: X) match - case y: Y => + case y: Y => ``` The second is when an extractor takes an argument that is not a subtype of the scrutinee type. ```scala (x: X) match - case y @ Y(n) => + case y @ Y(n) => object Y: - def unapply(x: Y): Some[Int] = ... + def unapply(x: Y): Some[Int] = ... ``` In both cases, a class test will be performed at runtime. @@ -30,17 +30,16 @@ A `TypeTest` can be provided to make this test possible. package scala.reflect trait TypeTest[-S, T]: - def unapply(s: S): Option[s.type & T] + def unapply(s: S): Option[s.type & T] ``` It provides an extractor that returns its argument typed as a `T` if the argument is a `T`. It can be used to encode a type test. ```scala -def f[X, Y](x: X)(using tt: TypeTest[X, Y]): Option[Y] = - x match - case tt(x @ Y(1)) => Some(x) - case tt(x) => Some(x) - case _ => None +def f[X, Y](x: X)(using tt: TypeTest[X, Y]): Option[Y] = x match + case tt(x @ Y(1)) => Some(x) + case tt(x) => Some(x) + case _ => None ``` To avoid the syntactic overhead the compiler will look for a type test automatically if it detects that the type test is on abstract types. @@ -48,22 +47,20 @@ This means that `x: Y` is transformed to `tt(x)` and `x @ Y(_)` to `tt(x @ Y(_)) The previous code is equivalent to ```scala -def f[X, Y](x: X)(using TypeTest[X, Y]): Option[Y] = - x match - case x @ Y(1) => Some(x) - case x: Y => Some(x) - case _ => None +def f[X, Y](x: X)(using TypeTest[X, Y]): Option[Y] = x match + case x @ Y(1) => Some(x) + case x: Y => Some(x) + case _ => None ``` We could create a type test at call site where the type test can be performed with runtime class tests directly as follows ```scala val tt: TypeTest[Any, String] = - new TypeTest[Any, String] - def unapply(s: Any): Option[s.type & String] = - s match - case s: String => Some(s) - case _ => None + new TypeTest[Any, String]: + def unapply(s: Any): Option[s.type & String] = s match + case s: String => Some(s) + case _ => None f[AnyRef, String]("acb")(using tt) ``` @@ -71,8 +68,7 @@ f[AnyRef, String]("acb")(using tt) The compiler will synthesize a new instance of a type test if none is found in scope as: ```scala new TypeTest[A, B]: - def unapply(s: A): Option[s.type & B] = - s match + def unapply(s: A): Option[s.type & B] = s match case s: B => Some(s) case _ => None ``` @@ -90,12 +86,12 @@ This alias can be used as ```scala def f[T: Typeable]: Boolean = - "abc" match - case x: T => true - case _ => false + "abc" match + case x: T => true + case _ => false f[String] // true -f[Int] // fasle +f[Int] // false ``` ### TypeTest and ClassTag @@ -112,20 +108,23 @@ Given the following abstract definition of `Peano` numbers that provides `TypeTe ```scala trait Peano: - type Nat - type Zero <: Nat - type Succ <: Nat - def safeDiv(m: Nat, n: Succ): (Nat, Nat) - val Zero: Zero - val Succ: SuccExtractor - trait SuccExtractor { - def apply(nat: Nat): Succ - def unapply(nat: Succ): Option[Nat] - } - given TypeTest[Nat, Zero] = typeTestOfZero - protected def typeTestOfZero: TypeTest[Nat, Zero] - given TypeTest[Nat, Succ] = typeTestOfSucc - protected def typeTestOfSucc: TypeTest[Nat, Succ] + type Nat + type Zero <: Nat + type Succ <: Nat + + def safeDiv(m: Nat, n: Succ): (Nat, Nat) + + val Zero: Zero + + val Succ: SuccExtractor + trait SuccExtractor: + def apply(nat: Nat): Succ + def unapply(nat: Succ): Option[Nat] + + given TypeTest[Nat, Zero] = typeTestOfZero + protected def typeTestOfZero: TypeTest[Nat, Zero] + given TypeTest[Nat, Succ] = typeTestOfSucc + protected def typeTestOfSucc: TypeTest[Nat, Succ] ``` it will be possible to write the following program @@ -134,9 +133,9 @@ it will be possible to write the following program val peano: Peano = ... import peano._ def divOpt(m: Nat, n: Nat): Option[(Nat, Nat)] = - n match - case Zero => None - case s @ Succ(_) => Some(safeDiv(m, s)) + n match + case Zero => None + case s @ Succ(_) => Some(safeDiv(m, s)) val two = Succ(Succ(Zero)) val five = Succ(Succ(Succ(two))) diff --git a/docs/docs/reference/soft-modifier.md b/docs/docs/reference/soft-modifier.md index 754ddcb009ed..03c71857cdd8 100644 --- a/docs/docs/reference/soft-modifier.md +++ b/docs/docs/reference/soft-modifier.md @@ -1,14 +1,25 @@ --- layout: doc-page -title: Soft Modifiers +title: Soft Keywords --- A soft modifier is one of the identifiers `opaque`, `inline`, `open`, `transparent`, and `infix`. - -It is treated as a potential modifier of a definition if it is followed by a hard modifier or a keyword combination starting a definition (`def`, `val`, `var`, `type`, `given`, `class`, `trait`, `object`, `enum`, `case class`, `case object`). Between the two words there may be a sequence of newline tokens and soft modifiers. In addition, `inline` is a keyword for `inline if`, `inline ... match`, and inline parameters. +A soft keyword is a soft modifier, or one of `derives`, `end`, `extension`, `using`, `|`, `+`, `-`, `*` -It is treated as a potential modifier of a parameter binding unless it is followed by `:`. +A soft modifier is treated as potential modifier of a definition if it is followed by a hard modifier or a keyword combination starting a definition (`def`, `val`, `var`, `type`, `given`, `class`, `trait`, `object`, `enum`, `case class`, `case object`). Between the two words there may be a sequence of newline tokens and soft modifiers. + +Otherwise, soft keywords are treated specially in the following situations: + + - `inline`, if it is followed by `if`, `match`, or a parameter definition. + - `derives`, if it appears after an extension clause or after + the name and possibly parameters of a class, trait, object, or enum definition. + - `end`, if it appears at the start of a line following a statement (i.e. definition or toplevel expression) + - `extension`, if it appears at the start of a statement and is followed by `(` or `[`. + - `using`, if it appears at the start of a parameter or argument list. + - `|`, if it separates two patterns in an alternative. + - `+`, `-`, if they appear in front of a type parameter. + - `*`, if it follows the type of a parameter or if it appears in + a vararg type ascription `x: _*`. + +Everywhere else a soft keyword is treated as a normal identifier. diff --git a/docs/docs/reference/syntax.md b/docs/docs/reference/syntax.md index bc5f76295727..a3841db7da50 100644 --- a/docs/docs/reference/syntax.md +++ b/docs/docs/reference/syntax.md @@ -103,9 +103,10 @@ type val var while with yield ### Soft keywords ``` -as derives end extension inline opaque open transparent using -* + - +derives end extension inline infix opaque open transparent using | * + - ``` +See the [separate section on soft keywords](./soft-modifier.md) for additional +details on where a soft keyword is recognized. ## Context-free Syntax