package xubo.wangcaifeng.love.method

import ch.hsr.geohash.GeoHash
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.SQLContext
import org.codehaus.jackson.map.ser.StdSerializers.UtilDateSerializer
import xubo.wangcaifeng.love.Utils.{Geo, JedisPool, TU}

object Need4 {
  def main(args: Array[String]): Unit = {
    if (args.length != 2) {
      println(
        """
          |cn.dmp.report.AnalyseProvince
          |params:
          | dataInputPath parquet输入路径
          | outputPath  结果输出路径
        """.stripMargin)
      sys.exit()
    }
    val Array(dataInputPath, outputPath) = args
    val conf = new SparkConf()
      .setAppName("按照查询")
      .setMaster("local[*]")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val sc = new SparkContext(conf)
    val sqlc = new SQLContext(sc)
    // 加载本地客户端的配置信息 core-default.xml hdfs-default.xml
    val hadoopConfiguration = sc.hadoopConfiguration
    val fs = FileSystem.get(hadoopConfiguration) // fs 就是一个本地的文件操作系统
    val destDir = new Path(outputPath)
    if (fs.exists(destDir)) { // 如果存在，删之
      fs.delete(destDir, true) // 递归删除
    }
    val frame = sqlc.read.parquet(dataInputPath)
    val longAndLat: RDD[(String, String)] = frame.where(TU.longandlat).map(row => {
      //获取经度 73.66~135.05
      val long = row.getAs[String]("long")
      //获取纬度 3.86~53.55
      val lat = row.getAs[String]("lat")
      (long, lat)
    })
    longAndLat.foreachPartition(it=>{
      val jedis = JedisPool.getConnection()
      it.foreach(t=>{
        val value = Geo.bussinessTagFromBaidu(t._2+","+t._1)
        val key = GeoHash.withCharacterPrecision(t._2.toDouble,t._1.toDouble,8).toBase32
        if (value.nonEmpty) {
          jedis.set(key,value)
        }
      })
      jedis.close()
    })
    sc.stop()
  }

}
