package com.atblue

import util.SparkUtil
import java.io.File
import org.apache.spark.ml.feature.Tokenizer
import org.apache.spark.ml.feature.HashingTF
import org.apache.spark.ml.feature.IDF
import org.apache.spark.ml.clustering.KMeans
import com.hankcs.hanlp.dictionary.CustomDictionary
import com.hankcs.hanlp.dictionary.stopword.CoreStopWordDictionary
import com.hankcs.hanlp.tokenizer.StandardTokenizer
import java.io.PrintWriter

//将RDD 转为df
//序号
//	专业类别	一级要素	二级要素	检查内容	问题描述	建议措施	检查人	检查时间	要求整改完成时间	问题整改
case class EventData(val content: String)
object SparkClusterDemo {
  def main(args: Array[String]): Unit = {
    
    val numFeatures=2
   
    val filePath=args(0)
    val outFile=args(1)
    val k=args(2).toInt
    //val filePath="t1.txt"
    val sc = SparkUtil.getSparkContext("SparkClusterDemo", false)
    val sqlcontext = SparkUtil.getSQLContext(sc)
    // 将本地的数据读入 RDD， 并将 RDD 与 case class 关联
    val eventRdd = sc.textFile(filePath).map(line => EventData(line))

    import sqlcontext.implicits._
    // 将RDD 转换成 DataFrames
    val dfDatas = eventRdd.toDF("content")

    //将DataFrames创建成一个临时的视图//与上面的等价
    //  df.createOrReplaceTempView("edata")
    //使用SQL语句进行查询
    //  sqlcontext.sql("select * from edata").show(50)

    //分词
    val tokenizer = new Tokenizer().setInputCol("content").setOutputCol("words")
    println("wordsData----------------")
    val wordsData = tokenizer.transform(dfDatas)
    //wordsData.show(3)
    // 求TF
    println("featurizedData----------------")
    val hashingTF = new HashingTF().setInputCol("words").setOutputCol("rawFeatures").setNumFeatures(numFeatures)
    val featurizedData = hashingTF.transform(wordsData)
    //featurizedData.show(3)
    // 求IDF
    println("recaledData----------------")
    val idf = new IDF().setInputCol("rawFeatures").setOutputCol("features")
    val idfModel = idf.fit(featurizedData)
    val rescaledData = idfModel.transform(featurizedData)
    //rescaledData.show(3)
    //println("----------------")
    //rescaledData.select("features").take(3).foreach(println)

    //kmeans 聚类
    println("creating kmeans model ...")
    val kmeans = new KMeans().setK(k).setSeed(1L)
    val model = kmeans.fit(rescaledData)
    
    println("calculating wssse ...")
    val WSSSE = model.computeCost(rescaledData)
    println(s"Within Set Sum of Squared Errors = $WSSSE")
    model.clusterCenters.foreach(println)
    //save 
    val writer = new PrintWriter(new File(outFile))
    model.clusterCenters.foreach(center=> writer.write(center.toString+"\r\n"))
    writer.close
    //val output = model.transform(rescaledData)
    
    
    //output.select("*").collect().foreach(println)
    //output.select("words","rawFeatures","features").collect().foreach(println)
    
    //output.show(20).toString()
    
    //deleteDir(new File("target"))
    //model.save("target/org/apache/spark/KMeansExample/KMeansModel")
    //output.rdd.saveAsTextFile("file:///F:\\Users\\yan\\git\\spark-cluster-demo\\kmeans-result.txt")
    //output.rdd.repartition(1).saveAsTextFile("kmeans-restlt")
    //output.rdd.saveAsTextFile("kmeans-restlt/kmeans-restlt.csv")
    sc.stop()

  }

  def deleteDir(dir: File): Unit = {
    val files = dir.listFiles()
    files.foreach(f => {
      if (f.isDirectory) {
        deleteDir(f)
      } else {
        f.delete()
        println("delete file " + f.getAbsolutePath)
      }
    })
    dir.delete()
    println("delete dir " + dir.getAbsolutePath)
  }
  
}