Snippets Collections
// compilation succeeds
val p1: LowerCased = LowerCased("Abc")

// compilation succeeds
LowerCased.from("abc")

// compilation fails:
// got a string which is not all lower case: Abc
LowerCased.from("Abc")

// compilation fails:
// got a value which is not a constant string: Usage.z
val z = "x" * 10
​​LowerCased.from(z)
object LowerCased:
  private def fromImpl(s: Expr[String])(using Quotes): Expr[LowerCased] =
    import quotes.reflect.*

    s.asTerm match
      case Inlined(_, _, Literal(StringConstant(str))) =>
        if LowerCased(str) == str then s
        else report.errorAndAbort(
          s"got a string which is not all lower case: $str")
      case _ => report.errorAndAbort(
        s"got a value which is not a constant string: ${s.show}")
object LowerCased:
  inline def from(inline s: String): LowerCased = ${ fromImpl('s) }

  import scala.quoted.*
  private def fromImpl(s: Expr[String])(using Quotes): Expr[LowerCased] = 
    ???
object LowerCased:
  inline def from(inline s: String): LowerCased = ${ fromImpl('s) }

  import scala.quoted.*
  private def fromImpl(s: Expr[String])(using Quotes): Expr[LowerCased] = 
    ???
opaque type LowerCased = String

object LowerCased:
  def apply(s: String): LowerCased = s.toLowerCase(Locale.US)

  given Conversion[LowerCased, String] with
    inline def apply(lc: LowerCased): String = lc
import scala.compiletime.{error, requireConst}

opaque type NonEmptyString = String

object NonEmptyString:
  def apply(s: String): Option[NonEmptyString] = 
    if s.isEmpty then None else Some(s)

  inline def from(inline s: String): NonEmptyString =
    requireConst(s)
    inline if s == "" then error("got an empty string") else s

  given Conversion[NonEmptyString, String] with
    inline def apply(nes: NonEmptyString): String = nes  
import org.openjdk.jmh.annotations.{Benchmark, BenchmarkMode, Fork, Measurement, Mode, Param, Scope, State, Warmup}

import java.util.ArrayDeque
import scala.util.Random
import org.openjdk.jmh.annotations.Setup
import scala.jdk.StreamConverters.*

case class Cls(i: Int)

@BenchmarkMode(Array(Mode.Throughput))
@State(Scope.Benchmark)
@Measurement(time = 1, iterations = 5)
@Warmup(time = 1, iterations = 5)
@Fork(value = 2)
class Bench:

  @Param(Array("100", "10000", "100000", "1000000"))
  var size: Int = _

  var data: Array[Cls] = _
  var list: List[Cls] = _
  var vector: Vector[Cls] = _

  @Setup
  def setup(): Unit =
    data = Array.fill(size)(Cls(Random.nextInt))
    list = List.from(data)
    vector = Vector.from(data)

  @Benchmark
  def listBuilderAndSum: Long =
    val builder = List.newBuilder[Cls]
    for cls <- data do builder.addOne(cls)
    val list = builder.result()

    list.filter(_.i > 0).map(_.i.toLong).sum


  @Benchmark
  def vectorBuilderAndSum: Long =
    val builder = Vector.newBuilder[Cls]
    for i <- data do builder.addOne(i)
    val vec = builder.result

    vec.filter(_.i > 0).map(_.i.toLong).sum

  @Benchmark
  def vectorBuildDirectAndSum: Long =
    var vec = Vector.empty[Cls]
    for cls <- data do vec = vec :+ cls

    vec.filter(_.i > 0).map(_.i.toLong).sum


  @Benchmark
  def listPrependAndSum: Long =
    var lst = List.empty[Cls]
    for cls <- data do lst = cls :: lst
    lst.filter(_.i > 0).map(_.i.toLong).sum

  @Benchmark
  def vectorPrependAndSum: Long =
    var vec = Vector.empty[Cls]
    for cls <- data do vec = cls +: vec
    vec.filter(_.i > 0).map(_.i.toLong).sum
  
  @Benchmark
  def listSum: Long =
    list.filter(_.i > 0).map(_.i.toLong).sum

  @Benchmark
  def vectorSum: Long =
    vector.filter(_.i > 0).map(_.i.toLong).sum

  @Benchmark
  def listViewSum: Long =
    list.view.filter(_.i > 0).map(_.i.toLong).sum

  @Benchmark
  def vectorViewSum: Long =
    vector.view.filter(_.i > 0).map(_.i.toLong).sum

  @Benchmark
  def listJavaStreamSum: Long =
    list.asJavaSeqStream.filter(_.i > 0).mapToLong(_.i.toLong).sum

  @Benchmark
  def vectorJavaStreamSum: Long =
    vector.asJavaSeqStream.filter(_.i > 0).mapToLong(_.i.toLong).sum
class Contains(r: Range) { def unapply(i: Int): Boolean = r contains i }

val C1 = new Contains(3 to 10)
val C2 = new Contains(20 to 30)

scala> 5 match { case C1() => println("C1"); case C2() => println("C2"); case _ => println("none") }
C1

scala> 23 match { case C1() => println("C1"); case C2() => println("C2"); case _ => println("none") }
C2

scala> 45 match { case C1() => println("C1"); case C2() => println("C2"); case _ => println("none") }
none
n match {
  case it if 0 until 5 contains it  => "less than five"
  case it if 5 until 10 contains it => "less than ten"
  case _ => "a lot"
}
def mkPropEqMatcher[T,V](name:String, extr: T => V)(expVal : V): HavePropertyMatcher[T,V] =
     new HavePropertyMatcher[T, V] {
       def apply(t: T): HavePropertyMatchResult[V] =
         HavePropertyMatchResult(
           extr(t) == expVal,
           name,
           expVal,
           extr(t)
         )
     }
 
   val title = mkPropEqMatcher("title", (_ : Book).title) _
   val author = mkPropEqMatcher("author", (_ : Book).author) _  
val classes = Seq(
  getClass,                   // To get the jar with our own code.
  classOf[mysql.jdbc.Driver]  // To get the connector.
)
val jars = classes.map(_.getProtectionDomain().getCodeSource().getLocation().getPath())
val conf = new SparkConf().setJars(jars)
val columnsToSum = List(col("var1"), col("var2"), col("var3"), col("var4"), col("var5"))

val output = input.withColumn("sums", columnsToSum.reduce(_ + _))
import org.apache.spark.sql.functions._

val sc: SparkContext = ...
val sqlContext = new SQLContext(sc)

import sqlContext.implicits._

val input = sc.parallelize(Seq(
  ("a", 5, 7, 9, 12, 13),
  ("b", 6, 4, 3, 20, 17),
  ("c", 4, 9, 4, 6 , 9),
  ("d", 1, 2, 6, 8 , 1)
)).toDF("ID", "var1", "var2", "var3", "var4", "var5")

val columnsToSum = List(col("var1"), col("var2"), col("var3"), col("var4"), col("var5"))

val output = input.withColumn("sums", columnsToSum.reduce(_ + _))

output.show()
val keysDF = mapTypeDF.select(explode(map_keys($"property"))).distinct()
val keys = keysDF.collect().map(f=>f.get(0))
val keyCols = keys.map(f=> col("property").getItem(f).as(f.toString))
val expandedDf = mapTypeDF.select(col("name") +: keyCols:_*).show(false)
star

Mon Apr 25 2022 20:16:17 GMT+0000 (Coordinated Universal Time) https://softwaremill.com/fancy-strings-in-scala-3/

#scala
star

Mon Apr 25 2022 20:16:10 GMT+0000 (Coordinated Universal Time) https://softwaremill.com/fancy-strings-in-scala-3/

#scala
star

Mon Apr 25 2022 20:15:08 GMT+0000 (Coordinated Universal Time) https://softwaremill.com/fancy-strings-in-scala-3/

#scala
star

Mon Apr 25 2022 20:11:57 GMT+0000 (Coordinated Universal Time) https://softwaremill.com/fancy-strings-in-scala-3/

#scala
star

Mon Apr 25 2022 20:06:42 GMT+0000 (Coordinated Universal Time) https://softwaremill.com/fancy-strings-in-scala-3/

#scala
star

Mon Apr 25 2022 20:05:41 GMT+0000 (Coordinated Universal Time) https://softwaremill.com/fancy-strings-in-scala-3/

#scala
star

Sat Mar 19 2022 07:45:45 GMT+0000 (Coordinated Universal Time) https://august.nagro.us/list-vs-vector.html

#scala
star

Fri Feb 25 2022 21:50:26 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/3160888/how-can-i-pattern-match-on-a-range-in-scala

#scala
star

Fri Feb 25 2022 21:48:18 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/3160888/how-can-i-pattern-match-on-a-range-in-scala

#scala
star

Wed Dec 08 2021 17:23:04 GMT+0000 (Coordinated Universal Time) https://users.scala-lang.org/t/type-safe-checking-of-arbitary-properties-custom-class-for-without-boilerplate-code/6751/3

#scala #scala-test
star

Tue Aug 10 2021 13:28:39 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/24916852/how-can-i-connect-to-a-postgresql-database-into-apache-spark-using-scala

#scala #spark #postgresql
star

Wed Aug 04 2021 09:11:31 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/37624699/adding-a-column-of-rowsums-across-a-list-of-columns-in-spark-dataframe

#scala #spark
star

Mon Apr 05 2021 18:00:47 GMT+0000 (Coordinated Universal Time) https://stackoverflow.com/questions/37624699/adding-a-column-of-rowsums-across-a-list-of-columns-in-spark-dataframe

#scala
star

Mon Apr 05 2021 14:20:34 GMT+0000 (Coordinated Universal Time) https://sparkbyexamples.com/spark/spark-convert-map-to-multiple-columns/

#scala

Save snippets that work with our extensions

Available in the Chrome Web Store Get Firefox Add-on Get VS Code extension