package com.wei.violet.app

import com.google.gson.Gson
import com.wei.violet.bean.ProCity
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext}

object SparkCoreHandle {
  Logger.getLogger("org").setLevel(Level.WARN)
  def main(args: Array[String]): Unit = {
    val Array(dataInputPath, dataOutputPath) = args
    val conf: SparkConf = new SparkConf()
      .setAppName("将统计结果添加到mysql数据库中")
      .setMaster("local[*]")
      .set("spark.serizlizer", "org.apache.spark.serializer.KryoSerializer")

    val sc = new SparkContext(conf)
    val sQLContext = new SQLContext(sc)
    val dataFrame: DataFrame = sQLContext.read.parquet(dataInputPath)
/*
    转成rdd计算的方法
    val dataRdd: RDD[String] = dataFrame.rdd.map(_.toString())
    val reduceBykeydata: RDD[((String, String), Int)] = dataRdd.map(r => {
      val split: Array[String] = r.split(",")
      val provincename = split(24)
      val cityname = split(25)
      ((provincename, cityname), 1)

    }).reduceByKey(_ + _)*/


    val reduceBykeydata=dataFrame.map(df=>{

      val provincename = df.getAs[String]("provincename")
      val cityname= df.getAs[String]("cityname")
      ((provincename, cityname), 1)

    }).reduceByKey(_ + _)


    val fs: FileSystem = FileSystem.get(sc.hadoopConfiguration)
    val path = new Path(dataOutputPath)
    if(fs.exists(path)){
      fs.delete(path,true)
    }

   /* reduceBykeydata.map(t => (t._1._1, t._1._2, t._2)).saveAsTextFile(dataOutputPath)*/

    reduceBykeydata.map(tp=>{
      val gson = new Gson()
      gson.toJson(ProCity(tp._1._1, tp._1._2, tp._2))
      ProCity(tp._1._1, tp._1._2, tp._2)

    }).saveAsTextFile("F:\\bigdataFile\\coreJson")

    sc.stop()

  }



}
