In [1]:
spark.version
Out[1]:
2.4.8
In [2]:
import java.sql.Date
import org.apache.spark.sql.expressions.Window
import org.apache.spark.sql.functions._
In [3]:
val purchasesDF = Seq(
    (Date.valueOf("2019-01-01"), "01"),
    (Date.valueOf("2019-05-10"), "01"),
    (Date.valueOf("2019-03-05"), "02"),
    (Date.valueOf("2019-02-20"), "03"),
    (Date.valueOf("2019-01-20"), "02")
).toDF("purchase_date", "device_id")
purchasesDF = [purchase_date: date, device_id: string]
Out[3]:
[purchase_date: date, device_id: string]
In [4]:
val devicesDF = Seq(
    ("01", "notebook", 600.00),
    ("02", "small phone", 100.00),
    ("03", "camera",150.00),
    ("04", "large phone", 700.00)
).toDF("device_id", "device_name", "price")
devicesDF = [device_id: string, device_name: string ... 1 more field]
Out[4]:
[device_id: string, device_name: string ... 1 more field]

error message example

In [5]:
%%dataframe
purchasesDF.join(devicesDF, col("device_id") === col("device_id"), "inner")
lastException: Throwable = null
Out[5]:
An error occurred converting DataFrame to html.
Reference 'device_id' is ambiguous, could be: device_id, device_id.;

adding aliases

In [6]:
%%dataframe
%%scan
purchasesDF.as("purchases")
  .join(devicesDF.as("devices"), col("purchases.device_id") === col("devices.device_id"))
Out[6]:
purchase_datedevice_iddevice_iddevice_nameprice
2019-05-100101notebook600.0
2019-01-010101notebook600.0
2019-01-200202small phone100.0
2019-03-050202small phone100.0
2019-02-200303camera150.0

Viewing qualified column names via exceptions

An engineer's gotta do what an engineer's gotta do

In [7]:
purchasesDF.as("purchases")
  .join(devicesDF.as("devices"), col("purchases.device_id") === col("devices.device_id"))
  .select("xxxx")
org.apache.spark.sql.AnalysisException: cannot resolve '`xxxx`' given input columns: [purchases.device_id, devices.device_id, purchases.purchase_date, devices.device_name, devices.price];;
'Project ['xxxx]
+- Join Inner, (device_id#6 = device_id#16)
   :- SubqueryAlias `purchases`
   :  +- Project [_1#2 AS purchase_date#5, _2#3 AS device_id#6]
   :     +- LocalRelation [_1#2, _2#3]
   +- SubqueryAlias `devices`
      +- Project [_1#12 AS device_id#16, _2#13 AS device_name#17, _3#14 AS price#18]
         +- LocalRelation [_1#12, _2#13, _3#14]
  at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
  at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$3.applyOrElse(CheckAnalysis.scala:113)
  at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$3.applyOrElse(CheckAnalysis.scala:110)
  at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:280)
  at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:280)
  at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:69)
  at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:279)
  at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$transformExpressionsUp$1.apply(QueryPlan.scala:93)
  at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$transformExpressionsUp$1.apply(QueryPlan.scala:93)
  at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$1.apply(QueryPlan.scala:105)
  at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$1.apply(QueryPlan.scala:105)
  at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:69)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:104)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1(QueryPlan.scala:116)
  at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1$2.apply(QueryPlan.scala:121)
  at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
  at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
  at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
  at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
  at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
  at scala.collection.AbstractTraversable.map(Traversable.scala:104)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1(QueryPlan.scala:121)
  at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$2.apply(QueryPlan.scala:126)
  at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:186)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:126)
  at org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpressionsUp(QueryPlan.scala:93)
  at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:110)
  at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:88)
  at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:126)
  at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$class.checkAnalysis(CheckAnalysis.scala:88)
  at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:95)
  at org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:108)
  at org.apache.spark.sql.catalyst.analysis.Analyzer$$anonfun$executeAndCheck$1.apply(Analyzer.scala:105)
  at org.apache.spark.sql.catalyst.plans.logical.AnalysisHelper$.markInAnalyzer(AnalysisHelper.scala:201)
  at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:105)
  at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:58)
  at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:56)
  at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:48)
  at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:78)
  at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withPlan(Dataset.scala:3411)
  at org.apache.spark.sql.Dataset.select(Dataset.scala:1340)
  at org.apache.spark.sql.Dataset.select(Dataset.scala:1358)
  ... 44 elided

Dropping

In [8]:
%%dataframe
%%scan
purchasesDF.as("purchases")
  .join(devicesDF.as("devices"), col("purchases.device_id") === col("devices.device_id"))
  .drop("device_id")
lastException: Throwable = null
Out[8]:
purchase_datedevice_nameprice
2019-05-10notebook600.0
2019-01-01notebook600.0
2019-01-20small phone100.0
2019-03-05small phone100.0
2019-02-20camera150.0
In [ ]:

In [ ]:

In [ ]: