package preprocess

/**
  * Created by Alex on 2016/7/26.
  */

import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import jointlab.core.analyzer.LuceneDocumentAnalyzer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.feature.HashingTF

//import org.apache.spark.m
import java.text.SimpleDateFormat

import entity.Wechat
import entity.WechatRawTerms

import jointlab.api.core.Term
import scala.collection.JavaConversions._
import scala.collection.mutable.HashMap

object VectorizeDriver {
  def merge(dfs: HashMap[String, Int], wt: WechatRawTerms): HashMap[String, Int] = {
    val tfs = wt.terms
    tfs.keySet.foreach {
      term => {
        val termT: Term = tfs.get(term)
        val count = if (termT == null) 0 else 1
        dfs += termT.getWord -> (dfs.getOrElse(termT.getWord, 0) + count)
      }
    }
    dfs
  }
  def combo(dfs1: HashMap[String, Int], dfs2: HashMap[String, Int]): HashMap[String, Int] = {
    for ((term, count) <- dfs2) {
      dfs1 += term -> (dfs1.getOrElse(term, 0) + count)
    }
    dfs1
  }
  Logger.getLogger("org").setLevel(Level.ERROR)
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("TextCategorization").setMaster("local[*]")
    val sc = new SparkContext(conf)
    val path = "D://userData/c*/"
    //val path =  "/bjtu/Alex/data/train/c*/"
    //key is the word term ,the value is the doc count which contains this word,that 's document frequency
    //val docFreqs  = new HashMap[String,Int]()
    //the total document count we used
    var docCount: Long = 0
    val tf = new HashingTF()
    val records = sc.textFile(path)
    val docTermsFreqs = records.map { line =>
      val p = line.split("\t")
      val sdf = new SimpleDateFormat("yyyyMMdd")
      try {
        Some(new Wechat(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), sdf.parse(p(9)), sdf.parse(p(10))))
      } catch {
        case e: Exception => None
      }
      //records -> wechat
    }.filter(o => !o.isEmpty).mapPartitions(it => {
      val analyzer = new LuceneDocumentAnalyzer();
      val res = it.map(ele => {
        val wt = ele.get
        val id = wt.getUniqueKey()
        val terms = analyzer.getWordMap(wt.content)
        val count: Double = terms.size()
        for ((k, v) <- terms) {
          v.setTf(v.getFreq / count)
        }
        new WechatRawTerms(id, terms)
      })
      res
    }).cache()
    docTermsFreqs.take(5).foreach(wrt => println(wrt.id, wrt.terms))
    //    wt=>{
    //      //segmentation words
    //      val analyzer = new LuceneDocumentAnalyzer()
    //      val id = wt.getUniqueKey()
    //      //这里判断是不是在词典里面
    //
    //      val terms = analyzer.getWordMap(wt.content)
    //      new WechatRawTerms(id,terms)
    //    }
    //    val docTermsFreqsWithTF = docTermsFreqs.map(wrt =>{
    //      //set the tf
    //
    //      val terms = wrt.terms
    //      val count:Double = wrt.terms.size()
    //      for((k,v)<-terms){
    //        v.setTf(v.getFreq/count)
    //      }
    //
    //      wrt
    //    })
    //    docTermsFreqsWithTF.take(5).foreach(wrt=>println(wrt.id,wrt.terms))
    //so we got the total number of documents
    docCount = docTermsFreqs.count()
    println("doc number: ", docCount)
    //calc document freqency
    val docFreqs = docTermsFreqs.aggregate(new HashMap[String, Int]())(merge, combo)
    docFreqs.take(5).foreach(println)
    //calcs the idf
    val idfs = docFreqs.map { case (term, count) => (term, math.log(docCount.toDouble / count)) }
    idfs.take(5).foreach(println)
    val vecs = docTermsFreqs.map {
      wrt =>
        var termfreqs = wrt.terms
        val termScores = termfreqs.map { case (term, freq) =>
          (term, idfs(term) * termfreqs(term).getFreq() / docCount)
        }
    }
//    vecs.take(5).foreach(println)
    //set the idf to the term
    //    docTermsFreqs.map{
    //      wrt=>{
    //        val terms = wrt.terms
    //        for((k,v)<-terms) {
    //          val wordidf:Int = docFreqs.getOrElse(k,0)
    //          v.setIdf(wordidf)
    //          val tfidf = v.getTf() * v.getIdf()
    //          v.setTfidf(tfidf)
    //        }
    //      }
    //    }
  }
}




