package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Code16Join {
  def main(args: Array[String]): Unit = {

    val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("Mysql2Text"))

    val stuInfoRDD: RDD[(String, (String, String, String, String))] = sc
      .textFile("scala_code/data/students.txt")
      .map {
        case oneLine => {
          val splitRes: Array[String] = oneLine.split(",")
          (splitRes(0), (splitRes(1), splitRes(2), splitRes(3), splitRes(4)))
        }
      }


    val stuScoreRDD: RDD[(String, (String, Int))] = sc
      .textFile("spark_code/data/score.txt", 4)
      .map {
        case oneLine => {
          val splitRes: Array[String] = oneLine.split(",")
          (splitRes(0), (splitRes(1), splitRes(2).toInt))
        }
      }


    stuInfoRDD
      .join(stuScoreRDD)
      // (1500100792,((窦平卉,22,女,理科三班),(1000009,29)))
      .mapValues {
        case ((name,age,gender,clazz),(courseID,score)) => {
          (name,age,gender,clazz,courseID,score)
        }
      }
      .foreach(println)
  }
}
