%%classpath add mvn
org.apache.spark spark-sql_2.11 2.2.1
%%spark
SparkSession.builder()
.appName("Simple Application")
.master("local[4]")
import scala.math.random
val NUM_SAMPLES = 10000000
val count2 = spark.sparkContext.parallelize(1 to NUM_SAMPLES).map{i =>
val x = random
val y = random
if (x*x + y*y < 1) 1 else 0
}.reduce(_ + _)
println("Pi is roughly " + 4.0 * count2 / NUM_SAMPLES)
val tornadoesPath = java.nio.file.Paths.get("../../resources/data/tornadoes_2014.csv").toAbsolutePath()
val ds = spark.read.format("csv").option("header", "true").load(tornadoesPath.toString())
ds
ds.display(1)
%%spark --start
%%spark -v 2.3.1 -s