package dataProcess

import org.apache.spark.{SparkConf, SparkContext}
import org.slf4j.LoggerFactory
import scala.collection.mutable.ArrayBuffer
import  util._
/**
  * Created by THINKPAD on 2017/11/8.
  */
object guXinchengTencent {
  val log= LoggerFactory.getLogger(guXinchengTencent.getClass)

  def main(args: Array[String]) {
    val(textData,   dest, partition)=
      (args(0),args(1),args(2))
    val destOutName=dest.split('/').last

    val conf =new SparkConf().setAppName(s"guXinchengTencent-" + destOutName)
//      .setMaster("local[8]")
//      .setJars(List("C:\\Codes\\IdeaProjects\\MachineLearning\\JiQiXueXi\\outJar\\JiQiXueXi.jar"))
    conf.set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    val sc=new SparkContext(conf)

    log.info("-----------------------Start Spark Context-----------------------" +destOutName)
    val oriData = sc.textFile(textData, partition.toInt)
//    时间共四个
    val timeArr = Array("2016-01-18 10:00:00" , "2016-01-18 23:00:00" , "2016-01-19 10:00:00" , "2016-01-19 23:00:00" )
//    经纬度的范围
    val (latMin , latMax, lonMin , lonMax) = (39.66, 40.53, 115.89, 117.21)
//    经纬度的精度
    val step = 0.01
//    原有数据
    var data1 = oriData.map(_.split("\t"))   .map(f=>{
//  （（时间，纬度Double，经度Double），人数）
      ((f(0) , util.two(f(1).toDouble) , util.two(f(2).toDouble) ), f.last.toInt)
    })
    val arr  = ArrayBuffer[((String, Double, Double), Int)]()
//    生成 count=0 的点阵
    for (i<- timeArr){
      var lat = latMin
      do {
        var lon = lonMin
        do {
          arr += ( ( (i , util.two(lat) ,  util.two( lon) ) , 0))
          lon += step
        }while (lon <= lonMax)
        lat  += step
      }while (lat <= latMax)
    }
    val array1= arr.toArray
    val arrRDD = sc.parallelize(array1, partition.toInt)
    data1 = data1.union( arrRDD)
//    做聚合
    data1.reduceByKey(_+_, partition.toInt)
      .map(f=> Array(f._1._1, f._1._2.toString, f._1._3.toString , f._2).mkString("\t"))
      .coalesce(1).saveAsTextFile(dest)

  }

}
