package app.classifier

/**
  * Created by duanxiping on 2016/11/4.
  */
import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.corpus.tag.Nature
import com.hankcs.hanlp.seg.common.{Term => HanLPTerm}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.JavaConversions._


/**
  * Created by duanxiping on 2016/11/1.
  */
object DiscoverNewWord {
  //spark,hive
  var hiveContext: HiveContext = null
  var sc: SparkContext = null
  var sqlContext: SQLContext = null

  val src_table = "project.dxp_edl_owl_thread_merge_nodup"
  val dst_table = "project.dxp_edl_owl_thread_merge_newword"

  def getDataFromHive(table: String, stat_date: BigInt, stat_hour: BigInt): DataFrame = {
    val select_sql = s"select msg,cluster_id from $table where stat_date=$stat_date and " +
      s"stat_hour=$stat_hour and msg is not null and length(msg)>2 group by msg,cluster_id"
    hiveContext.sql(select_sql)
  }

  def main(args: Array[String]): Unit = {
    initSpark("GetNewWords")
    val user_define_path = args(0)
    val stat_date: BigInt = args(1).toInt
    val stat_hour: BigInt = args(2).toInt

    val data_rdd = getDataFromHive(src_table, stat_date, stat_hour).map(r => {
      r.getAs[String]("msg")
    })

    val wordRdd = discoverNewWord(data_rdd)

    //print data
    val st = StructType(
      StructField("pattern", StringType, false) ::
        StructField("w1", StringType, false) ::
        StructField("w1nature", StringType, false) ::
        StructField("w2", StringType, false) ::
        StructField("w2nature", StringType, false) ::
        StructField("cnt", IntegerType, false) :: Nil)

    val tmptable: String = "dxp_tmp_table"
    hiveContext.createDataFrame(wordRdd.map(r => Row(r._1._1, r._1._2,r._1._3,r._1._4, r._1._5, r._2)), st).registerTempTable(tmptable)

    val create_table_sql: String = s"create table if not exists $dst_table " +
      "(pattern string, w1 string, w1nature string,w2 string,w2nature string,cnt int) partitioned by " +
      "(stat_date bigint,stat_hour bigint) stored as textfile"
    hiveContext.sql(create_table_sql)

    val insert_sql: String = s"insert overwrite table $dst_table " +
      s"partition(stat_date = $stat_date,stat_hour=$stat_hour) " +
      s"select * from $tmptable"
    hiveContext.sql(insert_sql)
  }

  def initSpark(appname: String): Unit = {
    val sparkConf: SparkConf = new SparkConf().setAppName(appname)
    sc = new SparkContext(sparkConf)
    hiveContext = new HiveContext(sc)
    sqlContext = new SQLContext(sc)
  }


  def segMsg(msg: String): Array[String] = {
    val wordarr = scala.collection.mutable.ArrayBuffer.empty[String]
    for (term: HanLPTerm <- HanLP.segment(msg) if !term.nature.startsWith('w')) {
      wordarr += term.word
    }
    wordarr.toArray
  }


  def discoverNewWord(msgRdd: RDD[String]): RDD[((String, String, String, String, String),Int)] = {
    msgRdd.flatMap(r => {
      var first = new HanLPTerm("", Nature.w)
      val arr = new scala.collection.mutable.ArrayBuffer[((String, String, String, String, String), Int)]
      for (term: HanLPTerm <- HanLP.segment(r)) {
        if (first.nature != Nature.w && term.nature != Nature.w) {
          arr.append(((first.word + term.word, first.word, first.nature.toString,
            term.word, term.nature.toString), 1))
        }
        first = term
      }
      arr
    }).reduceByKey(_ + _)
  }
}

