import java.text.SimpleDateFormat
import java.util.Properties

import ch.hsr.geohash.GeoHash
import cn.doitedu.commons.util.SparkUtil
import org.apache.commons.lang3.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

import scala.collection.mutable

/**
  * @author: 余辉  
  * @blog: https://blog.csdn.net/silentwolfyh
  * @create: 2020-01-11 15:44
  * @description:
  *
  *
  * 问题描述：
  * 启动之后运行8个task，前面4个运行很快，后面一直卡顿不动
  **/
object AppEventLogPreBak {

  System.setProperty("HADOOP_USER_NAME", "ROOT")

  def main(args: Array[String]): Unit = {

    //    if (args.length != 3) {
    //      println(
    //        """
    //          |第一个参数为：启动模式 比如：yarn
    //          |第二个参数为：文件输入路径
    //          |第三个参数为：文件保存路径
    //          |""".stripMargin)
    //    }

    val mode = args(0)
    val inputFile = args(1)
    val outputFile = args(2)

    //    val mode = "local[*]"
    //    val inputFile = "data/2019-10-29"
    //    val outputFile = "data/output/2019-10-29"

    val spark: SparkSession = SparkUtil.getSparkSession(mode)
    import spark.implicits._
    //    val frame: DataFrame = spark.read.json("data/2019-10-29")
    //    frame.printSchema()
    //    frame.show(10,false)

    /*val pro = new Properties()
    pro.setProperty("user", "root")
    pro.setProperty("password", "123456")
    val area = spark.read.jdbc("jdbc:mysql://hadoop11:3306/bigdata", "area", pro)*/
    val area: DataFrame = spark.read.parquet("data/dict/geo_dict/output")

    val areaMap: mutable.HashMap[String, (String, String, String)] = area.rdd.map(row => {
      val geo = row.getAs[String]("geo")
      val province = row.getAs[String]("province")
      val city = row.getAs[String]("city")
      val district = row.getAs[String]("district")
      (geo, (province, city, district))
    }).collectAsMap().asInstanceOf[mutable.HashMap[String, (String, String, String)]]

    println("---------------------------------------")

    val bc = spark.sparkContext.broadcast(areaMap)

    // 本地默认是64M ， 集群默认是128M 一个分区
    val data: RDD[String] = spark.sparkContext.textFile(inputFile)
    val result = data
      .map(line => EventBean.getEventBean(line))
      // 过滤掉json格式不正确的（脏数据）！
      .filter(bean => {
      val boolean01 = bean != null

      val boolean02 = bean.uid != "" || bean.imei != "" || bean.uuid != "" || bean.mac != "" || bean.androidId != ""

      boolean01 && boolean02
    }).map((bean: EventBean) => {

      val areaValues: mutable.HashMap[String, (String, String, String)] = bc.value
      var province = bean.province
      var city: String = bean.city
      var district: String = bean.district

      val lg = bean.longtitude
      val lat = bean.latitude
      // 经度 73°33′E至135°05′E；纬度范围：3°51′N至53°33′N
      if (lg > 0 && lg < 120 && lat > 0 && lat < 70) {
        val geo = GeoHash.withCharacterPrecision(lat, lg, 5).toBase32
        val area = areaValues.get(geo)
        if (area.isDefined) {
          bean.province = area.get._1
          bean.city = area.get._2
          bean.district = area.get._3
        }
      }

      bean.province = province
      bean.city = city
      bean.district = district

      val sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss")
      val str = sdf.format(bean.timestamp).split(" ")
      bean.dateStr = str(0)
      bean.timeStr = str(1)

      bean
    }).toDF()

    /** *
      * 	过滤掉json格式不正确的（脏数据）！
      * 	过滤掉日志中： uid |  imei | uuid | mac |androidId | ip全为空的记录！
      * 	过滤掉日志中缺少关键字段（event/eventid/sessionid 缺任何一个都不行）的记录！
      */

    /**
      * 数据修正（uid回补）
      */
    val uidNotNull: DataFrame = result.where(" uid is not null and trim(uid) !='' ").toDF()
    //    uidNotNull.selectExpr(" uid ").show(10, false)
    val uidNull: DataFrame = result.where(" uid is null or trim(uid) =='' ").toDF()
    //    uidNull.selectExpr(" uid ").show(10, false)

    println(uidNotNull.count())
    println(uidNull.count())

    sys.exit()

    uidNotNull.createTempView("notuid")
    uidNull.createTempView("haveuid")

    // 判断是否相等
    val isEqual: (String, String) => Boolean = (a: String, b: String) => {
      if (a != b || StringUtils.isNotBlank(a) || StringUtils.isNotBlank(b)) {
        false
      } else {
        true
      }
    }

    spark.udf.register("is_equal", isEqual)

    val uids: DataFrame = spark.sql(
      """
        |select
        |
        |a.eventid,
        |a.event,
        |if(b.uid is not null,b.uid,a.uid) as uid,
        |a.account,
        |a.addr,
        |a.imei,
        |a.mac,
        |a.imsi,
        |a.osName,
        |a.osVer,
        |a.androidId,
        |a.resolution,
        |a.deviceType,
        |a.deviceId,
        |a.uuid,
        |a.appid,
        |a.appVer,
        |a.release_ch,
        |a.promotion_ch,
        |a.areacode,
        |a.longtitude,
        |a.latitude,
        |a.carrier,
        |a.netType,
        |a.cid_sn,
        |a.ip,
        |a.sessionId,
        |a.timestamp,
        |a.dateStr,
        |a.timeStr,
        |a.province,
        |a.city,
        |a.district
        |
        |from
        |notuid a left join haveuid b
        |  on is_equal(a.imei,b.imei)
        |  or is_equal(a.imsi,b.imsi)
        |  or is_equal(a.mac,b.mac)
        |  or is_equal(a.uuid,b.uuid)
        |  or is_equal(a.androidId,b.androidId)
        |  or is_equal(a.deviceId,b.deviceId)
        |
        |""".stripMargin)

    println("first==>" + System.currentTimeMillis())


    /** *
      * 20/01/12 14:33:36 WARN Utils: Truncated the string representation of a plan since it was too large.
      * This behavior can be adjusted by setting 'spark.debug.maxToStringFields' in SparkEnv.conf.
      *
      * 在读入数据字段量较大的表时，会提示此错误，在windows平台配置文件上可以直接修改，或者在程序内部直接修改
      * 如代码中所示，.conf(config('spark.debug.maxToStringFields', '100'))来修改配置
      */
    //    uidNotNull.toDF().write.parquet(outputFile)
    println("=========>"+uids.count())

    //    println("uidNotNull.count()==>"+uidNotNull.count())
    //    uids.printSchema()
    //    uids.show(10,false)
    //    println(uids.count())
    //    uids.select("eventid", "imsi", "uid").toDF().write.parquet("data/output/2019-10-29/uids")
    //    val results: Dataset[Row] = uidNotNull.union(uids)
    //    println(results.repartition(4).count())
    //    results.toDF().write.parquet(outputFile)
    uids.write.parquet(outputFile)
    spark.close()
    println("second==>" + System.currentTimeMillis())
  }
}
