package cn.edu360
import java.sql
import java.util.Properties
import java.sql.{Connection, DriverManager, ResultSet}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}
object Report2 {
  def main(args: Array[String]): Unit = {
    if(args.length!=2){
      println( """
                 |cn.edu360.Report
                 |参数:input output
               """.stripMargin
      )
      sys.exit(1)
    }
    //接收参数
    val Array(input,output)=args
    //conf
    val conf: SparkConf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sc: SparkContext = new SparkContext(conf)
    //读取原始数据
    val proReslut: RDD[(String, Int)] = sc.textFile(input).map(t => {
      val shuxing: Option[Shuxing] = Shuxing.makelog(t)
      shuxing match {
        case s if s.nonEmpty => ((s.get.provincename+"-"+s.get.cityname), 1)
        case s => ("null", 0)
      }
    })
    val list: List[(String, Int)] = proReslut.filter(f=>f._1!="null").reduceByKey(_+_).toLocalIterator.toList
     list.sortBy(_._2).reverse.foreach(println)
   // proReslut.filter(f=>f!=null).reduceByKey(_+_).saveAsTextFile(output)
   // val finalReslut: List[(String, Int)] = proReslut.filter(f=>f!="null").reduceByKey(_+_).toLocalIterator.toList
    // 获取读取配置的对象
   //val config: Config = ConfigFactory.load()
   // Utils.writeToMysql(list)
    val scalalikejdbcResult: RDD[(String, Int)] = proReslut.filter(f=>f._1!="null").reduceByKey(_+_)
    Utils.scalijeJdbcToMysql(scalalikejdbcResult)
    sc.stop()
  }


}
