package com.need1

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by zhuang on 2018/3/1.
  */
object Dmp extends App {
  val conf = new SparkConf().setMaster("local[*]").setAppName(this.getClass.getSimpleName)
    .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
  val sc = new SparkContext(conf)
  //拿到sqlcontext对象，为了转换能parque文件
  val context: SQLContext = new SQLContext(sc)
  private val parquet: DataFrame = context.read.parquet("file:///e:/project2.parquet")
  //用sparksql将df转换为rdd
  private val rdd: RDD[Row] = parquet.rdd
  //将row转换为shtring
  private val map: RDD[((String, String), Int)] = rdd.map(t => {
    val pro = t.getAs[String]("provincename")
    val cit = t.getAs[String]("cityname")
    ((pro, cit), 1)
  })
  ///对每行进行处理,根据城市进行统计
  private val key: RDD[((String, String), Int)] = map.reduceByKey(_ + _)
  //将结果写进mysql数据库，定义一个方法,也可以用sqlsparkwrite，jdbc的方式写
  Utils.writeDataInMysql(key)

  val map1: RDD[Row] = key.map(t => {
    Row(t._2, t._1._1, t._1._2)
  })
  val structType: StructType = StructType(List(StructField("ct", IntegerType), StructField("provincename", StringType), StructField("cityname", StringType)))
  //这里也可以直接用toDF并指定表头信息即可
  val df: DataFrame = context.createDataFrame(map1, structType)
  //df.show()
  //df.rdd.foreach(println)
  //写成json格式,有就覆盖的存储模式
  df.write.mode(SaveMode.Overwrite).json("file:///e:/project2_json.json")


}
