package cn.devil.two

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext}

import cn.devil.two.myConf.MyConf
import org.apache.hadoop.fs.{FileSystem, Path}
object SparkSQLMySql {
  def main(args: Array[String]): Unit = {
    if(args.length!=2){
      println(
        """
          |输入：读文件的path—InputFilePath  或者
          |输入：写文件的path-outPutFilePath
        """.stripMargin)
      sys.exit(-1)
    }
    val Array(inputFilePath,outPutFilePath) = args
    val conf: SparkConf = new SparkConf()
      .setAppName("Devil")
      .setMaster("local[*]")
    val sc: SparkContext = new SparkContext(conf)
    val sQLContext = new SQLContext(sc)
    val frame: DataFrame = sQLContext.read.parquet(inputFilePath)
    frame.registerTempTable("leopard_t")
    val data = sQLContext.sql(" select count(1),provincename, cityname from leopard_t group by provincename, cityname")
    //写文件到本地   4个文件  json格式
    val fs = FileSystem.get(sc.hadoopConfiguration)
    val path = new Path(outPutFilePath)
    if (fs.exists(path)) {
      fs.delete(path, true)
    }
    data.coalesce(4).write.json(outPutFilePath)
    data.write.jdbc(MyConf.url,"leopard_table",MyConf.pro)
    sc.stop()
  }
}
