package preprocess

import java.text.SimpleDateFormat

import entity.{Wechat, WechatRawTerms}
import jointlab.core.analyzer.LuceneDocumentAnalyzer
import org.apache.spark.ml.feature.HashingTF
import org.apache.spark.{SparkConf, SparkContext}

/**
  * Created by Alex on 2016/9/2.
  */
object ExampleDriver {
  def main(args:Array[String]):Unit= {
    val conf = new SparkConf().setMaster("yarn-client").setAppName("f")
    val sc = new SparkContext(conf)
    val sqlContext = new org.apache.spark.sql.SQLContext(sc)

    var wechatRawTerms = sc.textFile("D://userData/c*/").map {
      x =>
        val p = x.split("\t")
        val sdf = new SimpleDateFormat("yyyyMMdd")

        try {
          Some(new Wechat(p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), sdf.parse(p(9)), sdf.parse(p(10))))
        } catch {
          case e: Exception => None
        }
    }.filter(x=> !x.isEmpty).map(x=>x.get).mapPartitions(it => {
      val analyzer = new LuceneDocumentAnalyzer();
      val res = it.map(wt => {
        val id = wt.getUniqueKey()
        val terms = analyzer.getWordMap(wt.content)
        val count: Double = terms.size()

//        new WechatRawTerms(id, terms)
        val arr = terms.keySet().toArray(Array.empty[String])
        arr
      })
      res
    })

    //val srdd = sqlContext.createSchemaRDD(wechatRawTerms)

//    var hashingTF = new HashingTF()
//    val tf = hashingTF.transform(wechatRawTerms)
//    tf.foreach(println)



    //分词




  }
}
