Last active
January 7, 2016 19:24
-
-
Save tmcgrath/fc4414ccde15411919d0 to your computer and use it in GitHub Desktop.
Scala based Spark Transformations which require Key, Value pair RDDs
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
scala> val babyNames = sc.textFile("baby_names.csv") | |
babyNames: org.apache.spark.rdd.RDD[String] = baby_names.csv MappedRDD[27] at textFile at <console>:12 | |
scala> val rows = babyNames.map(line => line.split(",")) | |
rows: org.apache.spark.rdd.RDD[Array[String]] = MappedRDD[28] at map at <console>:14 | |
scala> val namesToCounties = rows.map(name => (name(1),name(2))) | |
namesToCounties: org.apache.spark.rdd.RDD[(String, String)] = MappedRDD[29] at map at <console>:16 | |
scala> namesToCounties.groupByKey.collect | |
res6: Array[(String, Iterable[String])] = Array((BRADEN,CompactBuffer(SUFFOLK, SARATOGA, SUFFOLK, ERIE, SUFFOLK, SUFFOLK, ERIE)), (MATTEO,CompactBuffer(NEW YORK, SUFFOLK, NASSAU, KINGS, WESTCHESTER, WESTCHESTER, KINGS, SUFFOLK, NASSAU, QUEENS, QUEENS, NEW YORK, NASSAU, QUEENS, KINGS, SUFFOLK, WESTCHESTER, WESTCHESTER, SUFFOLK, KINGS, NASSAU, QUEENS, SUFFOLK, NASSAU, WESTCHESTER)), (HAZEL,CompactBuffer(ERIE, MONROE, KINGS, NEW YORK, KINGS, MONROE, NASSAU, SUFFOLK, QUEENS, KINGS, SUFFOLK, NEW YORK, KINGS, SUFFOLK)), (SKYE,CompactBuffer(NASSAU, KINGS, MONROE, BRONX, KINGS, KINGS, NASSAU)), (JOSUE,CompactBuffer(SUFFOLK, NASSAU, WESTCHESTER, BRONX, KINGS, QUEENS, SUFFOLK, QUEENS, NASSAU, WESTCHESTER, BRONX, BRONX, QUEENS, SUFFOLK, KINGS, WESTCHESTER, QUEENS, NASSAU, SUFFOLK, BRONX, KINGS, QU... | |
scala> val filteredRows = babyNames.filter(line => !line.contains("Count")).map(line => line.split(",")) | |
filteredRows: org.apache.spark.rdd.RDD[Array[String]] = MappedRDD[32] at map at <console>:14 | |
scala> filteredRows.map(n => (n(1),n(4).toInt)).reduceByKey((v1,v2) => v1 + v2).collect | |
res7: Array[(String, Int)] = Array((BRADEN,39), (MATTEO,279), (HAZEL,133), (SKYE,63), (JOSUE,404), (RORY,12), (NAHLA,16), (ASIA,6), (MEGAN,581), (HINDY,254), (ELVIN,26), (AMARA,10), (CHARLOTTE,1737), (BELLA,672), (DANTE,246), (PAUL,712), (EPHRAIM,26), (ANGIE,295), (ANNABELLA,38), (DIAMOND,16), (ALFONSO,6), (MELISSA,560), (AYANNA,11), (ANIYAH,365), (DINAH,5), (MARLEY,32), (OLIVIA,6467), (MALLORY,15), (EZEQUIEL,13), (ELAINE,116), (ESMERALDA,71), (SKYLA,172), (EDEN,199), (MEGHAN,128), (AHRON,29), (KINLEY,5), (RUSSELL,5), (TROY,88), (MORDECHAI,521), (JALIYAH,10), (AUDREY,690), (VALERIE,584), (JAYSON,285), (SKYLER,26), (DASHIELL,24), (SHAINDEL,17), (AURORA,86), (ANGELY,5), (ANDERSON,369), (SHMUEL,315), (MARCO,370), (AUSTIN,1345), (MITCHELL,12), (SELINA,187), (FATIMA,421), (CESAR,292), (CARIN... | |
scala> val names1 = sc.parallelize(List("abe", "abby", "apple")).map(a => (a, 1)) | |
names1: org.apache.spark.rdd.RDD[(String, Int)] = MappedRDD[36] at map at <console>:12 | |
scala> val names2 = sc.parallelize(List("apple", "beatty", "beatrice")).map(a => (a, 1)) | |
names2: org.apache.spark.rdd.RDD[(String, Int)] = MappedRDD[38] at map at <console>:12 | |
scala> names1.join(names2).collect | |
res8: Array[(String, (Int, Int))] = Array((apple,(1,1))) | |
scala> names1.leftOuterJoin(names2).collect | |
res9: Array[(String, (Int, Option[Int]))] = Array((abby,(1,None)), (apple,(1,Some(1))), (abe,(1,None))) | |
scala> names1.rightOuterJoin(names2).collect | |
res10: Array[(String, (Option[Int], Int))] = Array((apple,(Some(1),1)), (beatty,(None,1)), (beatrice,(None,1))) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment