package app.classifier

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.{Term => HanLPTerm}
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.JavaConversions._


/**
  * Created by duanxiping on 2016/11/1.
  */
object SubjectExtract {

  //spark,hive
  var hiveContext: HiveContext = null
  var sc: SparkContext = null
  var sqlContext: SQLContext = null

  /*
  project.mdl_weblog_owl_tokenized_post_hi
  project.mdl_weblog_owl_tokenized_thread_hi
  val src_table = "project.mdl_weblog_owl_tokenized_thread_hi"
  val dst_table = "project.dxp_mdl_weblog_owl_tokenized_thread_extract"
  */
  val src_table = "project.edl_owl_reply_merge_di"
  val dst_table = "project.dxp_mdl_weblog_owl_tokenized_post_extract"

  def getDataFromHive(table: String, stat_date: String, stat_hour: String): DataFrame = {
    /*
    val select_sql = s"select topicid,topicname from $table where dt=${stat_date} " +
      s" and topicid is not null and topicname is not null"
      */
    val select_sql = s"select id, content from $table where stat_date=${stat_date} " +
      s"and stat_hour=${stat_hour} " +
      s" and id is not null and content is not null"
    hiveContext.sql(select_sql)
  }

  def initSpark(appname: String): Unit = {
    System.setProperty("user.name", "project")
    System.setProperty("HADOOP_USER_NAME", "project")
    val sparkConf: SparkConf = new SparkConf().setAppName(appname)
    sc = new SparkContext(sparkConf)
    hiveContext = new HiveContext(sc)
    sqlContext = new SQLContext(sc)
  }

  def main(args: Array[String]): Unit = {
    initSpark("Subject Extract")
    val stat_date = args(0)
    val stat_hour = args(1)
    val outPartition = args(0) + args(1)

    val dataDF = getDataFromHive(src_table, stat_date, stat_hour)
    val wordAndPhaseDF = calWordScore(dataDF)
    //saveDataFrame(wordAndPhaseDF,dst_table,outPartition)

    val wordAndPhaseRDD = wordAndPhaseDF.map(r => {
      val words = r.get(0).toString
      val freq = r.get(1).toString.toInt
      val degree = r.get(2).toString.toInt
      (words, freq, degree)
    }).persist()

    val wordMap = wordAndPhaseRDD.filter(!_._1.contains(",")).map(r => {
      (r._1, r._2 / (r._3 * 1.0))
    }).collect().toMap


    val phaseRDD = wordAndPhaseRDD.filter(_._1.contains(",")).map(r => {
      val wordPosArray = r._1.split(",")

      val wordsSetSize = wordPosArray.map(r => {
        r.split("|")(0)
      }).toSet.size

      val nx_mSize = wordPosArray.map(r=>{
        r.split("|")(1)
      }).filter(Array("nx","m").contains(_)).length

      if(wordsSetSize * 2 <= wordPosArray.length || nx_mSize * 2 >= wordPosArray.length){
        Row(r._1,r._2, 0)
      }else {
        val score = wordPosArray.map(r => {
          if (wordMap.contains(r)) wordMap(r) else 0.0
        }).sum
        Row(r._1, r._2, (score * 100).toInt)
      }
    })

    val phaseDF = hiveContext.createDataFrame(phaseRDD, wordAndPhaseDF.schema)
    val phaseTable = "project.dxp_mdl_weblog_owl_tokenized_post_phase"
    saveDataFrame(phaseDF, phaseTable, outPartition)

    sc.stop()
    System.clearProperty("spark.driver.port")
  }

  def saveDataFrame(df: DataFrame, outTable: String, dt: String): Unit = {
    val cols = df.columns
    val sma = df.schema
    val colsType = cols.map(r => {
      sma(r).dataType match {
        case IntegerType => "int"
        case LongType => "bigint"
        case StringType => "string"
        case BooleanType => "boolean"
        case DoubleType => "double"
      }
    })

    val colsString = cols.zip(colsType).map(r => r._1 + " " + r._2).mkString(",")
    val create_table_sql: String = s"create table if not exists $outTable " +
      s" ($colsString) partitioned by (stat_date bigint) stored as textfile"
    println(create_table_sql)
    hiveContext.sql(create_table_sql)

    val tmptable = "dxp_tmp_table"
    df.registerTempTable(tmptable)

    val insert_sql: String = s"insert overwrite table $outTable partition(stat_date = $dt) " +
      s"select * from $tmptable"
    hiveContext.sql(insert_sql)
    hiveContext.dropTempTable(tmptable)
  }

  def calWordScore(df: DataFrame): DataFrame = {
    /*词性网址
    http://www.hankcs.com/nlp/part-of-speech-tagging.html
     */
    //停用pos
    val filterPosMap = scala.collection.mutable.HashMap[String, Array[String]]()
    filterPosMap("b") = Array("b", "begin", "bg", "bl")
    filterPosMap("c") = Array("c", "cc")
    filterPosMap("d") = Array("d", "dg", "dl")
    filterPosMap("e") = Array("e", "end")
    filterPosMap("f") = Array("f")
    filterPosMap("j") = Array("j")
    filterPosMap("o") = Array("o")
    filterPosMap("p") = Array("p", "pba", "pbei")
    filterPosMap("q") = Array("q")
    filterPosMap("r") = Array("r", "rg", "rr", "ry", "rys", "ryt", "ryv", "rz", "rzs", "rzt")
    filterPosMap("t") = Array("t")
    filterPosMap("u") = Array("u", "ud", "ude1", "ude2", "ude3", "udeng", "udh", "uj", "ul", "ule") ++
      Array("ulian", "uls", "usuo", "uv", "uyy", "uz", "uzhe", "uzhi")
    filterPosMap("v") = Array("vyou", "vshi")
    filterPosMap("w") = Array("w", "wb", "wd", "wf", "wh", "wj", "wky", "wkz",
      "wm", "wn", "ws", "wt", "ww", "wyy", "wyz")
    filterPosMap("y") = Array("y")

    val stopWords = Array("是", "有", "求助", "http", "嗯嗯", "额", "&nbsp", "谢谢", "感谢", "发表",
      "回复", "您好", "你好","顶顶")

    val wordRDD = df.flatMap(r => {
      val arrBuf = scala.collection.mutable.ArrayBuffer.empty[(String, (Int, Int))]
      val arrWords = scala.collection.mutable.ArrayBuffer.empty[String]
      val sen = r.get(1).toString
      val segRes = segMsgWithNature(sen)
      val nxMNum = segRes.filter(r => Array("nx", "m").contains(r._2)).length

      if (segRes.toSet.size * 2 > segRes.length && nxMNum * 2 < segRes.length) {
        segRes.foreach(elem => {
          if ((filterPosMap.contains(elem._2.substring(0, 1)) &&
            filterPosMap(elem._2.substring(0, 1)).contains(elem._2)) ||
            stopWords.contains(elem._1)) {
            for (word <- arrWords) {
              arrBuf.append((word, (1, arrWords.length - 1)))
            }
            if (arrWords.length > 1) {
              arrBuf.append((arrWords.mkString(","), (1, 0)))
            }
            arrWords.clear()
          } else {
            arrWords.append(elem._1 + "|" + elem._2)
          }
        })
      }
      arrBuf
    }).reduceByKey((a, b) => (a._1 + b._1, a._2 + b._2)).map(r => {
      Row(r._1, r._2._1, r._2._2)
    })

    //print data
    val st = StructType(
      StructField("word", StringType, false) ::
        StructField("freq", IntegerType, false) ::
        StructField("degree", IntegerType, false) :: Nil)
    hiveContext.createDataFrame(wordRDD, st)
  }

  def segMsgWithNature(msg: String): Array[(String, String)] = {
    val wordarr = scala.collection.mutable.ArrayBuffer.empty[(String, String)]
    for (term: HanLPTerm <- HanLP.segment(msg)) {
      //println(Array(term.word,term.nature).mkString(":"))
      wordarr += ((term.word, term.nature.name()))
    }
    wordarr.toArray
  }


  def segMsg(msg: String): Array[String] = {
    val wordarr = scala.collection.mutable.ArrayBuffer.empty[String]
    for (term: HanLPTerm <- HanLP.segment(msg) if !term.nature.startsWith('w')) {
      wordarr += term.word
    }
    wordarr.toArray
  }
}
