package app.classifier

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.{Term => HanLPTerm}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.JavaConversions._


/**
  * Created by duanxiping on 2016/11/1.
  */
object RemoveDup{

  //spark,hive
  var hiveContext: HiveContext = null
  var sc: SparkContext = null
  var sqlContext: SQLContext = null

  val src_table = "project.edl_owl_thread_merge_di"
  val dst_table = "project.dxp_edl_owl_thread_merge_nodup"

  def getDataFromHive(table:String, stat_date:BigInt, stat_hour:BigInt):DataFrame = {
    val select_sql = s"select title from $table where stat_date=$stat_date " +
      "and title is not null and length(title)>2"
    hiveContext.sql(select_sql)
  }

  def main(args:Array[String]): Unit ={
    initSpark("Remove Dumplicate")
    val sim_thres:Double = args(0).toDouble
    val stat_date:BigInt = args(1).toInt
    val stat_hour:BigInt = args(2).toInt

    val data_df = getDataFromHive(src_table, stat_date, stat_hour)

    val sameMsgRdd = statSameMsg(data_df.rdd.map(_.getAs[String]("title").trim))
    val simRdd = statNearSameMsg(sameMsgRdd,sim_thres)

    //print data
    val st = StructType(
      StructField("id", LongType, false) ::
        StructField("cluster_id", LongType, false) ::
        StructField("cnt", IntegerType, false) ::
        StructField("title", StringType, false) ::Nil)

    val tmptable: String = "dxp_tmp_table"
    hiveContext.createDataFrame(simRdd, st).registerTempTable(tmptable)

    val create_table_sql: String = s"create table if not exists $dst_table " +
      "(id bigint, cluster_id bigint, cnt int, msg string) partitioned by " +
      "(stat_date bigint,stat_hour bigint) stored as textfile"
    hiveContext.sql(create_table_sql)

    val insert_sql: String = s"insert overwrite table $dst_table " +
      s"partition(stat_date = $stat_date,stat_hour=$stat_hour) " +
      s"select * from $tmptable"
    hiveContext.sql(insert_sql)
  }

  def initSpark(appname: String): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName(appname)
    sc = new SparkContext(sparkConf)
    hiveContext = new HiveContext(sc)
    sqlContext = new SQLContext(sc)
  }


  def segMsg(msg: String): Array[String] = {
    val wordarr = scala.collection.mutable.ArrayBuffer.empty[String]
    for (term: HanLPTerm <- HanLP.segment(msg) if !term.nature.startsWith('w')) {
      wordarr += term.word
    }
    wordarr.toArray
  }

  def statSameMsg(msgRdd: RDD[String]): RDD[(String,Int)] = {
    msgRdd.map(r=>(r,1)).reduceByKey(_ + _)
  }

  def statNearSameMsg(msgRdd:RDD[(String,Int)],sim_thres:Double):RDD[Row] = {
    val MINSIM = sim_thres
    val splitRdd = msgRdd.zipWithUniqueId().map(r=>{
      val wordSet = segMsg(r._1._1).toSet
      (r._2, r._1._1, wordSet,r._1._2)
    }).persist()

    val splitArr = splitRdd.map(r=>(r._1,r._3)).collect()

    val clusterMap = new scala.collection.mutable.HashMap[Long,Long].empty
    splitRdd.repartition(10).flatMap(r=>{
      val idx = r._1
      val outSet = r._3
      splitArr.filter(_._1 > idx).map(w=>{
        val diffSet = w._2 & outSet
        (w._1, (idx,(diffSet.size * 2).toDouble / (w._2.size + outSet.size)))
      }).filter(_._2._2 > MINSIM)
    }).reduceByKey((f,s)=>{if(f._2 > s._2) f else s}).collect.sortBy(_._1).foreach(r=>{
      println(r._1 + "\t"+r._2._1)
      var childKey = r._2._1
      clusterMap(r._1) = childKey
      while(clusterMap.contains(childKey) && clusterMap(childKey) != childKey) {
        clusterMap(r._1) = clusterMap(childKey)
        childKey = clusterMap(childKey)
      }
    })

    splitRdd.map(r=>{
      val cluster_id = if(clusterMap.contains(r._1)) clusterMap(r._1) else r._1
      Row(r._1,cluster_id,r._4,r._2)
    })
  }

}
