package cn.devil.two


import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object SparkCoreMySQL {
  def main(args: Array[String]): Unit = {
    if(args.length!=1){
      println("输入：读文件的目录—outPutFilePath")
      sys.exit(-1)
    }
    val Array(outPutFilePath) = args
    val conf: SparkConf = new SparkConf()
      .setAppName("Devil")
      .setMaster("local[*]")
    val sc: SparkContext = new SparkContext(conf)
    val sQLContext = new SQLContext(sc)
    val frame: DataFrame = sQLContext.read.parquet(outPutFilePath)
    //转成rdd
    val rdd: RDD[Row] = frame.rdd
    val rbkDat = rdd.map(m => {
      val province = m.getString(13)
 //     val city = m.getString(25)
      ((province), 1)
    }).reduceByKey(_ + _).foreach(println)

    import sQLContext.implicits._

    //saveAsTextFile("D:/leopard-result")   rdd 写入到本地磁盘

    sc.stop()
  }
}
