package xubo.wangcaifeng.love.log

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{ SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

object DealWithLog {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("log")
      .setMaster("local")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .set("spark.sql.parquet.compression.codec", "Snappy")
    val sc = new SparkContext(conf)
    val sqlc = new SQLContext(sc)
    val line: RDD[String] = sc.textFile("data/2016-10-01_06_p1_invalid.1475274123982.log.FINISH.bz2")
    val ResData = line.map(_.split(",")).filter(t=>t.length==85)
    val value: RDD[((String, String), Int)] = ResData.map(t => {
      val provinceName = t(24)
      val cityName = t(25)
      ((provinceName, cityName), 1)
    })
    val count: RDD[((String, String), Int)] = value.reduceByKey(_+_)
    //将数据写入到数据库
/*    count.foreach(t=>{
      var conn: Connection = null
      var pstm: PreparedStatement = null
      try{
        val url = "jdbc:mysql://localhost:3306/dmt?characterEncoding=utf8"
        conn = DriverManager.getConnection(url, "root", "217410")
        val sql = "insert into dmtcount values(?,?,?)"
        pstm = conn.prepareStatement(sql)
        //赋值
        pstm.setInt(1,t._2)
        pstm.setString(2,t._1._1)
        pstm.setString(3,t._1._2)
        pstm.execute()
      }catch {
        //scala 中的try  catch  使用case进行错误类型的匹配
        case e : Exception => e.printStackTrace()
      }finally {
        if (pstm != null) pstm.close()
        if (conn != null) conn.close()
      }
    })*/
    //将数据写入到本地
    //val res: String = JSON.toJSONString("count")
    sc.stop()

  }

}
