package com.example.bigdata.SparkSQL
import org.apache.spark.streaming.kafka010._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.security.UserGroupInformation
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.io.Source

object SparkSQLOnLine {
    /**
      * /data/datacenter/locationTag/tagFromWifiFlat_D/2020
      * 从这里面找到tagname=学前教育、艺术兴趣 的所有Wi-Fi
      * 找到近一个月连接过这个Wi-Fi的所有tdid
      *
      * data_src = /data/datacenter/locationTag/tagFromWifiFlat_D/2020
      *
      * 1. tagname=学前教育,所有Wi-Fi的 tdid
      * val PreschoolEducation
      *
      */
    def main(args: Array[String]): Unit = {
        streamingWithKafka()
//        val spark = SparkSession
//                .builder()
//                .master("local[1]")
//                .appName("SparkSQLOnLine")
//                .getOrCreate()
//
//
//        dfsLogin()
//        readBadFileTest(spark)
        //关联库(spark)
        //消费偏好类(spark)
        //coffee_D(spark)
        //Fshop(spark)
        //val hdfsFile = "hdfs://172.17.128.2:9820/data/datacenter/locationTag/tagFromWifiFlat_D/2020/05/30"
        //tagFromWifiFlat_Tdid(spark,hdfsFile)
        //wifi_Test(spark)
//        spark.stop()

    }

    def streamingWithKafka(): Unit ={
       /* val brokers = "172.23.6.159:9092,172.23.6.160:9092,172.23.6.161:9092"
        val topic = "Demo_topic"
        val groupid = "Demo_topic"
        val maxPoll = 20000
        // offset保存路径
        val checkpointDir="./checkpointDir/kafka-direct"
*/
        val brokers = "10.8.128.106:9092"
        val topic = "filebeat_hdfs_audit_jd"
        val groupid = "jd_hdfsaudit_group"
        val maxPoll = 20000
        // offset保存路径
        val checkpointDir="./checkpointDir/jd_hdfsaudit_group"

        val conf = new SparkConf()
                .setMaster("local")
                .setAppName("jd_hdfsaudit_group")
        val ssc = new StreamingContext(conf,Seconds(2))
        ssc.checkpoint(checkpointDir)
        val kafkaParams = Map[String,Object](
            ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> brokers ,
            ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
            ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
            ConsumerConfig.GROUP_ID_CONFIG -> groupid ,
            ConsumerConfig.MAX_POLL_RECORDS_CONFIG -> maxPoll.toString,
            ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "latest",
            ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> (false:java.lang.Boolean)
        )
//        val kafkaTopicD2 = KafkaUtils.createDirectStream(ssc,PreferConsistent,Subscribe[String,String](List(topic),kafkaParams))
        val kafkaTopicDS = KafkaUtils.createDirectStream(ssc, LocationStrategies.PreferConsistent,
            ConsumerStrategies.Subscribe[String, String](Set(topic), kafkaParams))
        kafkaTopicDS.map(_.value()).print()
//        kafkaTopicDS.map(_.value)
//                .flatMap(_.split(" "))
//                .map(x => (x, 1L))
//                .reduceByKey(_ + _)
//                .transform(data => {
//                    val sortData = data.sortBy(_._2, false)
//                    sortData
//                })
//                .print()

        ssc.start()
        ssc.awaitTermination()
    }

    /**
      *     tdid 为key，统计tags中的每一个tagId 字段描述：consume_{n}_days_{tagid}
      *     用tagId 与 idcode字典表补充加工父标签parentid相关特征，tags.tagId对应字典表中id
      */
    def 消费偏好类(spark:SparkSession): Unit ={
        import spark.implicits._
        import org.apache.spark.sql.functions
        val file = "hdfs://172.17.128.2:9820/datascience/etl2/label/offline_consume/2020/06/01/part-00009-2912bed2-7045-4118-8573-3567f8ee2bbf-c000.gz.parquet"
        val df = spark.read.parquet(file).select($"tags")
        df.printSchema()
        df.show()
        //df.printSchema

    }
    /**
      * 加工周期：周四
      * 加工逻辑：
      * 1.从idinfo库中过滤imei和idfa的size 都 >0 && < 100的数据
      * 2.分别对imei idfa做正则校验，过滤掉不符合格式的数据，如果过滤完后size = 0该条记录也过滤掉
      * 3.分别以imei 和 imsi为key，聚合它对应的其他的所有的imsi/imei的信息，并合并相同id的日期
      * 4.通过从对应的imsi或imsi列表中选取一个数据，并计算priority，source，选取及计算规则如下：
      */
    def 关联库(spark:SparkSession): Unit ={
        import spark.implicits._
        val parquetFile = "hdfs://172.17.128.2:9820/datascience/etl2/id_info/2020/06/01/part-13499-a7f5bc52-2afa-4063-820d-b411d116ad15-c000.gz.parquet"
        val df = spark.read.parquet(parquetFile)
        import org.apache.spark.sql.functions._
        //val map_keys = udf[Seq[String],Map[String,Row]](_.keys.toSeq)
        val imei =df.select($"imei",$"idfa")
        imei.show()
    }

    /**
      * 咖啡店数据
      * 数据集地址：/data/datacenter/locationTag/coffee_M（旧数据/data/datacenter/locationTag/coffee_D）
      * 加工周期：天 月
      * 加工逻辑：
      * 以wifiFlat一个月的数据为数据源。
      * 取 ssid转小写后，包含“coffee”，“starbucks”，“cafe或“咖啡”四个关键词的数据。
      * 以bssid,ssid为key，在poi数据中找到对应的wifi信息。 没有找到的，都补为空
      * 数据格式：
      */
    def coffee_D(spark: SparkSession): Unit = {
        import spark.implicits._
        val rdd_ssid = spark.read.parquet("hdfs://172.17.128.2:9820/data/datacenter/solar-system/wifiFlat/2020/05/01/part-02999-c2951c60-19ba-4e33-8e98-a2960472a7bd-c000.gz.parquet")
                .select("bssid", "ssid")
                .filter($"ssid".contains("coffee") ||
                        $"ssid".contains("starbucks") ||
                        $"ssid".contains("cafe") ||
                        $"ssid".contains("咖啡"))
                .map(x => (x.getAs("bssid").toString + "_" + x.getAs[String]("ssid").toLowerCase(), 1))
                        .rdd.distinct()
        rdd_ssid.foreach(println(_))
        val poi_file = spark.read.parquet("hdfs://172.17.128.2:9820/datascience/datacloud/datagroup/data/ap_poi/car_ap_poi_v01")
        val rdd_poi = poi_file.rdd.map(x => {
            val k = x.getAs("bssid").toString + "_" + x.getAs[String]("ssid").toLowerCase()
            val v = (x.getAs[String]("wifi_lat"), x.getAs[String]("wifi_lng"))
            (k, v)
        })
        println("count "+rdd_poi.join(rdd_ssid).count())
        //rdd_poi.foreach(println(_))
    }

    /**
      * 4S店数据 日库：
      * 数据集地址：/data/datacenter/locationTag/car_M
      * 加工逻辑：
      * 用每个月的wififlat和poi数据进行连接，分别找出4S店的人群。
      * poi数据：/datascience/datacloud/datagroup/data/ap_poi/car_ap_poi_v01  与4S店相关的wifi字典数据
      * 从wifi行为库得到"tdid", "bssid", "ssid", "sentTime", "current"。并对bssid，ssid是否在上述poi字典数据中分别进行判断。 其中在学校中的数据要求current==true，医院和4S店无此要求。最后分别输出三份文件。
      * 加工周期：天  月
      */
    def Fshop(spark: SparkSession): Unit = {
        /**
          * bssid，ssid 字段合并为key 字段 ，其他字段为value 字段，对 key 字段进行比对，得到匹配数据，在回复数据原始结构，存储
          */
        import spark.implicits._
        val wf_file = spark.read.parquet("hdfs://172.17.128.2:9820/data/datacenter/solar-system/wifiFlat/2020/05/01/part-02999-c2951c60-19ba-4e33-8e98-a2960472a7bd-c000.gz.parquet")
        //val wf_file = spark.read.parquet("hdfs://172.17.128.2:9820/data/datacenter/solar-system/wifiFlat/2020/05/01")
        val filter_wf = wf_file.select("ssid","bssid","tdid","sentTime","current")
                .rdd.map(x => {
            val key = x.getAs[Long]("bssid") + "_" + x.getAs[String]("ssid")
            val value = (x.getAs[String]("tdid"),
                    x.getAs[Long]("sentTime"),
                    x.getAs[Boolean]("current"))
            (key, value)
        }).distinct()
        //filter_wf.cache()
        //println("wf.count : " + filter_wf.count())
        val car_poi = spark.read.parquet("hdfs://172.17.128.2:9820/datascience/datacloud/datagroup/data/ap_poi/car_ap_poi_v01")
                .rdd.map((x => (x.getAs[Long]("bssid") + "_" + x.getAs[String]("ssid"), 1))).distinct()
        //car_poi.cache
        val wf_car = filter_wf.join(car_poi).map(x => {
            val k = x._1.split("_")
            (k(0), k(1), x._2._1._1, x._2._1._2, x._2._1._3)
        }).toDF("bssid", "ssid", "tdid", "sentTime", "current")
        wf_car.show
        wf_car.printSchema

        //filter_wf.repartition(1).write.format("parquet").save("hdfs://172.17.128.2:9820/tmp/jingwei.shi/spark/WifiFlat_Tdid/2020/05/01/tt")
    }

    def tagFromWifiFlat_Tdid(spark: SparkSession, hdfsFile: String): Unit = {
        import spark.implicits._
        //spark.read.parquet(hdfsFile).printSchema()
        // 所有连接过的数据
        val hdfsFile = "hdfs://172.17.128.2:9820/data/datacenter/locationTag/tagFromWifiFlat_D/2020/05/30"
        val df1 = spark.read.parquet(hdfsFile).filter($"connect")
        val df_con_count = df1.map(_.getAs[String]("tdid")).distinct.count
        // 在线设配总数量
        println("df_con_count : " + df_con_count)

        val rdd = df1.rdd.map(x => (
                x.getAs("tagname").toString + "_" + x.getAs("tdid").toString))
                .distinct(2)
        println("rdd count : " + rdd.count())
        val rdd01 = rdd.map(x => (x.split("_")(0), x.split("_")(1))).groupByKey()
        rdd01.foreach(println(_))
        val df02 = rdd01.map(x => (x._1, x._2.toList, x._2.size)).toDF("tagname", "tdidList", "tdidcount")
    }

    def dfsLogin(): Unit = {
        val path = "conf/yhhadoop310001/"
        val krb5File = path + "krb5.conf"
        val coreFile = path + "core-site.xml"
        val hdfsFile = path + "hdfs-site.xml"
        val conf = new Configuration
        conf.addResource(new Path(coreFile))
        conf.addResource(new Path(hdfsFile))
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem")
        conf.set("dfs.client.failover.proxy.provider.yhhadoop310001", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider")
        System.setProperty("java.security.krb5.conf", krb5File)
        UserGroupInformation.setConfiguration(conf)
        val keytabFile = path + "hadoop.keytab"
        val kerbersUser = "hadoop/bj-jd-dc-namenode-prod-0009.tendcloud.com@HADOOP.COM"
        UserGroupInformation.loginUserFromKeytab(kerbersUser, keytabFile)

    }

    def wifi_Test(spark: SparkSession): Unit = {
        import spark.implicits._
        val cl = "hdfs://172.17.128.2:9820"
        val hdfsFile = cl + "/user/datamodeling/tagFromWifiFlat_Tdid/hobby/2020"
        val dataframe01 = spark.read.parquet(hdfsFile).filter($"".contains() || $"".equals(""))
        println("count : " + dataframe01.count())

    }
    def readBadFileTest(spark:SparkSession): Unit ={
        val file = "hdfs://172.17.128.2:9820/fintech/id_mapping/imei2tdid/2020/03/27/part-00003"
        val file1 = "hdfs://172.17.128.2:9820/fintech/id_mapping/imei2tdid/2020/03/27/part-00002"

        spark.read.textFile(file).show()
        //spark.read.textFile(file1).printSchema()
    }

    def findBadBlock(): Unit = {
        val namenode = "10.16.16.204:9870"
        val url = "http://10.16.16.204:9870/fsck?ugi=hadoop&files=1&blocks=1&locations=1&path=/tmp/port.json"
        val urlinfo = Source.fromURL(url, "UTF-8").mkString
        for (line <- urlinfo.split("\n") if (line.startsWith("0"))) {
            println(line.split(" ").mkString("->"))
        }
    }


}
