package com.spark.rdd


import com.alibaba.fastjson.{JSON, JSONObject}
import org.ansj.splitWord.analysis.{BaseAnalysis, ToAnalysis}
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.{FileSplit, InputSplit, TextInputFormat}
import org.apache.spark.mllib.feature.{HashingTF, IDF}
import org.apache.spark.rdd.{HadoopRDD, RDD}
import org.apache.spark.{SparkConf, SparkContext}
import org.nlpcn.commons.lang.util.StringUtil

import scala.collection.SortedMap
import org.apache.spark.mllib.linalg.Vector

/**
  * Created by zhaochao on 2017/5/18.
  *
  */
object helloSpark {


  def main(args: Array[String]): Unit = {
    System.setProperty("hadoop.home.dir", "D:\\openSource\\hadoop-2.7.3\\")
    //设置本机Spark配置
    val conf = new SparkConf().setAppName("wordCount").setMaster("local")
      .set("spark.driver.memory", "2G")
    //创建Spark上下
    val sc = new SparkContext(conf)
    //从文件中获取数据
    val path = "E:\\zjol\\21531000.json"
    val rdd = sc.textFile(path)

    rdd.cache()

    rdd.saveAsTextFile("file:/E:/test")

    val docs = rdd.map(x => (jsonToTuple(x)._1, jsonToTuple(x)._3))

    val txtSource = docs.groupByKey().flatMapValues(x => x).mapValues(x => StringUtil.rmHtmlTag(x))
      .filter(x => (!x._2.equals("")))







    val docCount = txtSource.count()

    println("文件总数:" + docCount)

    //分词
    val splitWords = txtSource.mapValues(x => sourceToRdd(x))




    val hashingTF = new HashingTF()
    val tf: RDD[Vector] = hashingTF.transform(splitWords.values)
    tf.cache()

    tf.foreach(x=>println(x))

    val idf = new IDF().fit(tf)
    val tfidf: RDD[Vector] = idf.transform(tf)


    tfidf.foreach(x=>println(x))


  }


  def countWord(list: List[String], rdd: RDD[String], docCount: Long) = {



    var map = Map[String, Double]()
    for (s <- list) {
      val count = map.getOrElse(s, 0.0) + 1.0
      map += (s -> count)
    }
    val allWord = map.size
    map.mapValues(count => count*1.0 / allWord)


    map


  }


  def sourceToRdd(source: String) = {
    val terms = ToAnalysis.parse(source).getTerms
    var list = List[String]()
    for (i <- 0 until terms.size()) {
      val name = terms.get(i).getName
      if (!name.equals("")) {
        list = list :+ name
      }

    }
    list

    //sc.parallelize(list)

  }


  def jsonToTuple(json: String) = {
    val obj: JSONObject = JSON.parseObject(json)
    (obj.getString("newsid"), obj.getString("title"), obj.getString("source"), obj.getString("pub_time"))
  }


  def readPath(sc: SparkContext) = {
    val fileRDD = sc.hadoopFile[LongWritable, Text, TextInputFormat]("E:\\zjol\\*")

    val hadoopRdd = fileRDD.asInstanceOf[HadoopRDD[LongWritable, Text]]

    val fileAndLine = hadoopRdd.mapPartitionsWithInputSplit((inputSplit: InputSplit, iterator: Iterator[(LongWritable, Text)]) => {
      val file = inputSplit.asInstanceOf[FileSplit]
      iterator.map(x => {
        file.getPath.toString() + "\t" + x._2
      })
    }
    )

    fileAndLine.foreach(println)
  }

}
