package dmp.beans.sparkcore

import com.google.gson.Gson
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * 读取hdfs上的数据，将读取的数据转换成json格式，然后存储到MySQL的dmp库中
  */
object LocalTest {
  def main(args: Array[String]): Unit = {
/*    if (args!=2){
      println("参数不合法，退出程序")
      sys.exit()
    }*/
    val conf=new SparkConf().setMaster("local[*]").setAppName("LocalTest").set("spark.serializer","org.apache.spark.serializer.KryoSerializer")

    val spark=SparkSession.builder().config(conf).getOrCreate()

    //设置集合，存储输入输出路径
    val Array(inputPath,outputPath)=args
    //开始读取数据，从hdfs上
    //val rdd: RDD[String] = spark.sparkContext.textFile(inputPath)
     val df: DataFrame = spark.read.parquet(inputPath)
    //将datafram类型转为rdd
    val rdd: RDD[Row] = df.rdd
    //提取省市信息
    val provinceAndCity: RDD[(String, Int)] = rdd.map(t => {
      val provincename: String = t.getAs("provincename")
      val cityname: String = t.getAs("cityname")
      (provincename + "_" + cityname, 1)
    })
    val result: RDD[(String, Int)] = provinceAndCity.reduceByKey(_+_)

    val jsonStr: RDD[String] = result.map(x => {
     // val provincename = x._1.split("_")(0)
     // val cityname = x._1.split("_")(1)
      val fields=x._1.split("_")
      val area = new Area (fields(0),fields(1))    //(provincename, cityname)
      val gson = new Gson()
      val str = gson.toJson(area)
      str
    })

    jsonStr.saveAsTextFile(outputPath)
spark.stop()
  }
}
case class Area(str: String, str1: String)



