package com.wei.violet.app

import java.util.Properties

import com.typesafe.config.{Config, ConfigFactory}
import com.wei.violet.config.ConfigHandle
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{DataFrame, SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}

object SQLHandleParquet {
  Logger.getLogger("org").setLevel(Level.WARN)
  def main(args: Array[String]): Unit = {
    val Array(dataInputPath, dataOutputPath) = args
    val conf: SparkConf = new SparkConf()
      .setAppName("将统计结果添加到mysql数据库中")
      .setMaster("local[*]")
      .set("spark.serizlizer", "org.apache.spark.serializer.KryoSerializer")

      val sc = new SparkContext(conf)
      val sQLContext = new SQLContext(sc)
      val dataFrame: DataFrame = sQLContext.read.parquet(dataInputPath)
      dataFrame.registerTempTable("adlog")

     val result: DataFrame = sQLContext.sql("select provincename , cityname ,count(*) from adlog group by provincename, cityname")

    val fs: FileSystem = FileSystem.get(sc.hadoopConfiguration)
    val path = new Path(dataOutputPath)
    if(fs.exists(path)){
      fs.delete(path,true)
    }

    result.coalesce(4).write.json(dataOutputPath)


    result.write.mode(SaveMode.Overwrite).jdbc(
      ConfigHandle.url,
      ConfigHandle.protable,
      ConfigHandle.conn

    )


  }



}
