package newstage

import java.text.SimpleDateFormat
import java.util

import jointlab.core.analyzer.LuceneDocumentAnalyzer
import org.apache.spark.{SparkConf, SparkContext, sql}
import org.apache.spark.ml.feature.{IDFModel, _}
import org.apache.spark.sql._
import org.apache.spark.mllib.linalg.Vector

import scala.collection.mutable.ArrayBuffer
import jointlab.api.core.Term
import org.apache.spark.sql.types.{DateType, StringType, StructField, StructType}
import org.apache.log4j.{Level, Logger}
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.mllib.classification.NaiveBayesModel
import org.apache.spark.mllib.classification.NaiveBayes
import org.apache.spark.mllib.regression.LabeledPoint

case class MyRow(key:Int,clz:Int,value:Seq[String])
class TestHashingTF {}
object TestHashingTF{
  val path = "D://userData/c*/"
  def schemaString = "tag biz id idx mid title weName weAcc weAccDesc content date1 date2"
  val schemaType = StructType(schemaString.split(" ").map(x=> if (!x.contains("date")) StructField(x,StringType,true) else StructField(x,DateType,true)).toSeq)
  val sdf = new SimpleDateFormat("yyyyMMdd")
  def readDataFromLocal(sc:SparkContext,sq: SQLContext): DataFrame ={
      val res = sc.textFile(path).map(line=>{
          try {
            val tag = line.substring(0,line.indexOf(","))
            val p = line.split("\t")
            Some(Row(tag ,p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7), p(8), new java.sql.Date(sdf.parse(p(9)).getTime),  new java.sql.Date(sdf.parse(p(10)).getTime)))
          } catch {
            case e: Exception => println(e);None
          }
        })
        .filter(x=> !x.isEmpty)
        .map(x=>x.get)
        sq.createDataFrame(res,schemaType)
  }
  def readDataFromLocal2(sc:SparkContext,sq: SQLContext): DataFrame = {
    val res = sc.textFile(path).mapPartitions(part=>{
         part.map(line=>{
          try {
            val tag = line.substring(0,line.indexOf(","))
            val p = line.split("\t")
            val analyzer = new LuceneDocumentAnalyzer();//            var words:Array[String]  =  analyzer.getWordMap(p(8)).values().toArray().map(x=> x.asInstanceOf[Term].getWord);
            var words:Array[String]  =  analyzer.getWordMap(p(8)).values().toArray().flatMap(
              x=> {
                val term:Term = x.asInstanceOf[Term];
                val word = term.getWord;
                val sum = term.getFreq;
                val res = new ArrayBuffer[String](sum);
                (1 to sum).map(_=>{
                  res+=word;
                })
                res
              }
            )
            Some(Row(tag ,p(0), p(1), p(2), p(3), p(4), p(5), p(6), p(7),words.reduce(_+" "+_), javaString2SqlDate(p(9),sdf), javaString2SqlDate(p(10),sdf)))
          } catch {case e: Exception => println("error:" + e);None}
        }).filter(x=> !x.isEmpty).map(x=>x.get)
    })
    sq.createDataFrame(res,schemaType);
  }

  Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
  Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)
  def main(args:Array[String]): Unit ={
    val conf = new SparkConf().setAppName("TextCat").setMaster("local[*]")
    val sc = new SparkContext(conf)
    var sqlContext = new SQLContext(sc)
    val schema = readDataFromLocal2(sc,sqlContext)


    val tokenizer = new Tokenizer().setInputCol("content").setOutputCol("words")
    val tf = new HashingTF().setInputCol(tokenizer.getOutputCol).setOutputCol("terms").setNumFeatures(50000)
    val idf = new IDF().setInputCol(tf.getOutputCol).setOutputCol("idf")
    val scaler = new StandardScaler().setInputCol(idf.getOutputCol).setOutputCol("scaledFeatures")

    val pip = new Pipeline();
    pip.setStages(Array(tokenizer,tf,idf,scaler))

//    val wordsRow = tokenizer.transform(schema)
//    val wordsSchema = tf.transform(wordsRow)
//    val tfidf = idf.fit(wordsSchema)
//    val res = tfidf.transform(wordsSchema);
//    val result = scaler.fit(res)
//    val temp = result.transform(res)

    val pipModel = pip.fit(schema)
    val temp = pipModel.transform(schema)
    //res
    val ds = temp.map(row => {
      val tag = row.getString(0).substring(1).toInt
      val features = row.getAs[Vector]("scaledFeatures")
      new LabeledPoint(tag,features)
    })
    val splits = ds.randomSplit(Array(0.8,0.2));
    val nb = NaiveBayes.train(splits(0),lambda=1.0,modelType = "multinomial")
    val test = splits(1)


    val predictionAndLabel = test.map(p => (nb.predict(p.features), p.label))
    val zero:Array[Array[Double]] = Array.ofDim(15,15);

    val confuseMat = predictionAndLabel.aggregate(zero)(seq,comb)

    val accuracy = 1.0 * predictionAndLabel.filter(x => x._1 == x._2).count() / test.count()

    println("accuracy :" + accuracy)

    for(x<-confuseMat) {
      println(beautifyString(x))
    }

    readParaAndClassify(readParaAndClassify(sqlContext),pipModel,nb)
  }
  case class ReadRecord(val content:String)
  def readParaAndClassify(sc:SQLContext): DataFrame ={
    val para = Console.readLine()
    val rr = ReadRecord(para);
    val df = sc.createDataFrame(Seq(rr)).toDF();
    df
  }
  def readParaAndClassify(df:DataFrame,model:PipelineModel,nb:NaiveBayesModel): DataFrame ={
    val res = model.transform(df)
    val lp = res.map(df2LabeledPoint);
    lp.foreach(point=>{
      val c = nb.predict(point.features)
      println(s"Predict values:$c")
    })
    null
  }
  def df2LabeledPoint(row:Row):LabeledPoint = {
//    val tag = row.getString(0).substring(1).toInt
    val features = row.getAs[Vector]("scaledFeatures")
    new LabeledPoint(-1,features)
  }


  def seq(mat:Array[Array[Double]],sample:(Double,Double)): Array[Array[Double]] ={
    val x = sample._1.toInt -1
    val y = sample._2.toInt - 1
    mat(x)(y) += 1
    mat
  }
  def comb(mat1:Array[Array[Double]],mat2:Array[Array[Double]]) = {
    for(i <- mat1.indices){
      for(j <- mat1(0).indices){
        mat1(i)(j) += mat2(i)(j)
      }
    }
    mat1
  }
  def javaString2SqlDate(str:String,sdf:SimpleDateFormat): java.sql.Date ={
    new java.sql.Date(sdf.parse(str).getTime)
  }
  def beautifyString(a:Array[Double]): String ={
    if (a == null) return "null"
    val iMax: Int = a.length - 1
    if (iMax == -1) return "[]"

    val b: StringBuilder = new StringBuilder
    b.append('[')

    b.append(a.map(x=>"%10s".format(x)).reduce(_+" "+_))
    b.append(']')

    b.toString()
  }



}
//    sqlContext.registerRDDAsTable(schame,"x")
//    sqlContext.sql("select title,words from x limit 5 ").collect().foreach(println)



//    MLUtils.load
//    wordsRow.take(5).foreach(row=>{
//      val r = row.getAs[ArrayBuffer[String]](12);
//    })
//    val dis = sqlContext.sql("select clz from x").map(x=>x.getInt(0)).cache()


//    val trainSet = dis.zip(tfidf)
//    trainSet.foreach(println)
//    sqlContext.sql("select _1  from x").collect().foreach(println)
//
//    schama.printSchema()
//    val tf = new HashingTF()
//    tf.setInputCol("_2").setOutputCol("_3")
//    tf.transform(schama).foreach(println)

//    val tranSet = sqlContext.sql("select tag,terms from y")

//  root
//  |-- biz: string (nullable = true)
//  |-- id: string (nullable = true)
//  |-- idx: string (nullable = true)
//  |-- mid: string (nullable = true)
//  |-- title: string (nullable = true)
//  |-- weName: string (nullable = true)
//  |-- weAcc: string (nullable = true)
//  |-- weAccDesc: string (nullable = true)
//  |-- content: string (nullable = true)
//  |-- date1: date (nullable = true)
//  |-- date2: date (nullable = true)
//    predictionAndLabel.foreach(println)

//    res.take(10).foreach(println)
//    wordsSchame.registerTempTable("y")

//    println(schema.count(),wordsRow.count(),vecs.count(), wordsSchame.count(),tfidf.count());
//wordsSchame.take(10).foreach(println)
//    val ds = wordsSchame.map(row=>{
//      row.getString(0).substring(1).toInt
//    }).zip(tfidf).map(inst=>{
//      LabeledPoint(inst._1,inst._2)
//    }).cache();
//    ds.foreach(println)



//    root
//    |-- tag: string (nullable = true)
//    |-- biz: string (nullable = true)
//    |-- id: string (nullable = true)
//    |-- idx: string (nullable = true)
//    |-- mid: string (nullable = true)
//    |-- title: string (nullable = true)
//    |-- weName: string (nullable = true)
//    |-- weAcc: string (nullable = true)
//    |-- weAccDesc: string (nullable = true)
//    |-- content: string (nullable = true)
//    |-- date1: date (nullable = true)
//    |-- date2: date (nullable = true)
//    |-- words: array (nullable = true)
//    |    |-- element: string (containsNull = true)
//    |-- terms: vector (nullable = true)