package cn.dmp.tags

import cn.dmp.utils.{JedisPools, TagsUtils}
import com.typesafe.config.ConfigFactory
import org.apache.hadoop.hbase.{HColumnDescriptor, HTableDescriptor, TableName}
import org.apache.hadoop.hbase.client.{ConnectionFactory, Put}
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.hbase.mapred.TableOutputFormat
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.mapred.JobConf
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}

object Tags4Ctx {


    def main(args: Array[String]): Unit = {
        if (args.length != 5) {
            println(
                """
                  |cn.dmp.tags.Tags4Ctx
                  |参数：
                  | 输入路径
                  | 字典文件路径
                  | 停用词库
                  | 日期
                  | 输出路径
                """.stripMargin)
            sys.exit()
        }

        val Array(inputPath, dictFilePath, stopWordsFilePath, day, outputPath) = args


        // 2 创建sparkconf->sparkContext
        val sparkConf = new SparkConf()
        sparkConf.setAppName(s"${this.getClass.getSimpleName}")
        sparkConf.setMaster("local[*]")
        // RDD 序列化到磁盘 worker与worker之间的数据传输
        sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

        val sc = new SparkContext(sparkConf)
        val sQLContext = new SQLContext(sc)


        // 字典文件---appMapping
        val dictMap = sc.textFile(dictFilePath).map(line => {
            val fields = line.split("\t", -1)
            (fields(4), fields(1))
        }).collect().toMap

        // 字典文件 --- stopwords
        val stopWordsMap = sc.textFile(dictFilePath).map((_, 0)).collect().toMap

        // 将字典数据广播executor
        val broadcastAppDict = sc.broadcast(dictMap)
        val broadcastStopWordsDict = sc.broadcast(stopWordsMap)


        /*val load = ConfigFactory.load()
        val hbTableName = load.getString("hbase.table.name")

        // 判断hbase中的表是否存在，如果不存在则创建
        val configuration = sc.hadoopConfiguration
        configuration.set("hbase.zookeeper.quorum", load.getString("hbase.zookeeper.host"))
        val hbConn = ConnectionFactory.createConnection(configuration)
        val hbAdmin = hbConn.getAdmin

        if (!hbAdmin.tableExists(TableName.valueOf(hbTableName))) {
            println(s"$hbTableName 不存在....")
            println(s"正在创建 $hbTableName ....")

            val tableDescriptor = new HTableDescriptor(TableName.valueOf(hbTableName))
            val columnDescriptor = new HColumnDescriptor("cf")
            tableDescriptor.addFamily(columnDescriptor)
            hbAdmin.createTable(tableDescriptor)

            // 释放连接
            hbAdmin.close()
            hbConn.close()
        }

        // 指定输出类型
        val jobConf = new JobConf(configuration)
        jobConf.setOutputFormat(classOf[TableOutputFormat])
        // 指定表的名称
        jobConf.set(TableOutputFormat.OUTPUT_TABLE, hbTableName)*/


        // 读取日志parquet文件
        sQLContext.read.parquet(inputPath).where(TagsUtils.hasSomeUserIdConditition).mapPartitions(par => {

            val jedis = JedisPools.getJedis()

            val listBuffer = new collection.mutable.ListBuffer[(String, List[(String, Int)])]()

            par.foreach(row => {

                // 行数据进行标签化处理
                val ads = Tags4Ads.makeTags(row)
                val apps = Tags4App.makeTags(row, broadcastAppDict.value)
                val devices = Tags4Devices.makeTags(row)
                val keywords = Tags4KeyWords.makeTags(row, broadcastStopWordsDict.value)

                // 商圈的标签
                val business = Tags4Business.makeTags(row, jedis)

                val allUserId = TagsUtils.getAllUserId(row)
                listBuffer.append((allUserId(0), (ads ++ apps ++ devices ++ keywords ++ business).toList))
                listBuffer
            })
            jedis.close()
            listBuffer.iterator

        }).reduceByKey((a, b) => {
            // List(("K电视剧", 1), ("APP爱奇艺", 1),("K电视剧", 1)) => groupBy => Map["K电视剧"，List(("K电视剧", 2), ("K电视剧", 1))]
            // (a ++ b).groupBy(_._1).mapValues(_.foldLeft(0)(_ + _._2)).toList

            (a ++ b).groupBy(_._1).map {
                case (k, sameTags) => (k, sameTags.map(_._2).sum)
            }.toList

        })
//          .map{
//            case (userId, userTags) => {
//                val put = new Put(Bytes.toBytes(userId))
//                val tags = userTags.map(t => t._1+":"+t._2).mkString(",")
//                put.addColumn(Bytes.toBytes("cf"), Bytes.toBytes(s"day$day"), Bytes.toBytes(tags))
//
//                (new ImmutableBytesWritable(), put)   // ImmutableBytesWritable => rowkey
//            }
//        }.saveAsHadoopDataset(jobConf)
          .saveAsTextFile(outputPath)
        sc.stop()

    }

}
