package xubo.wangcaifeng.love.method

import java.io

import org.apache.commons.lang3.StringUtils
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{DataFrame, SQLContext}
import xubo.wangcaifeng.love.Utils.SaveData

import scala.io.Source

object Need2 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("按照查询")
      .setMaster("local[*]")
      .set("spark.serializer","org.apache.spark.serializer.KryoSerializer")
    val sc = new SparkContext(conf)
    val sqlc = new SQLContext(sc)
    //读取规则库中的数据
    val lines: RDD[String] = sc.textFile("data/app_dict.txt")
    val ipRules = lines.map(_.split("\t",-1))
      .filter(_.length>=9)
      .map(t=>{
      val ruleAppname: String = t(1)
      val ruleAppid: String = t(4)
      (ruleAppid,ruleAppname)

    }).collect().toMap//.coalesce(1).saveAsTextFile("data/result1")
    val ipBC = sc.broadcast(ipRules)
    //读取文件
    val frame: DataFrame = sqlc.read.parquet("data/parquet/")
    val resdata = frame.map(t => {
      var appname = t.getAs[String]("appname")
      val appid = t.getAs[String]("appid")
      if (StringUtils.isEmpty(appname)) {
        if (StringUtils.isNotEmpty(appid)) appname = ipBC.value.getOrElse(appid,appid)
        else appname="未知"
      }
      (appname, SaveData(t))
    })
    val result = resdata.reduceByKey((list1, list2) => {
      list1.zip(list2).map(t => t._1 + t._2)
    }).sortBy(t => t._2(0), false)
    val fres = result.map(t => {
      (t._1, t._2(0), t._2(1), t._2(2), t._2(3), t._2(4), t._2(5), t._2(6), t._2(7), t._2(8))
    })
    fres.coalesce(1).saveAsTextFile("data/result1")
    sc.stop()
  }

}
