package app.classifier

import java.io.{File, PrintWriter}
import java.util.{HashMap => HMap, List => JList}

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.{Term => HanLPTerm}

import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import scala.io.Source

object Proprecess{

  def toInt(s: String): Option[Int] = {
    try {
      Some(s.toInt)
    } catch {
      case e: Exception => None
    }
  }

  //map get rid of  duplicate
  def getData(filename: String): HMap[Array[String],(String,Int)] = {
    val train = new HMap[Array[String], (String,Int)]()
    var id: Int = 0
    var msg: String = ""
    for (data <- Source.fromFile(filename, "utf8").getLines()) {
      var line = data
      //some file starts with FEFF or FFFE,need to be deleted
      if (data(0) == '\uFEFF' || data(0) == '\uFFFE') {
        line = data.substring(1)
      }
      val idx = line.indexOf('\t')
      if (idx != -1 && idx + 1 < line.length) {
        val bid = toInt(line.substring(0, idx))
        msg = line.substring(idx + 1).trim
        if (bid != None && msg.length > 12) {
          val wordlist:Array[String] = segMsg(msg)
          id = bid.get
          train(wordlist) = (msg,id)
        }
      }
    }
    train
  }

  def segMsg(msg: String): Array[String] = {
    val wordlist = scala.collection.mutable.ArrayBuffer.empty[String]
    for (term: HanLPTerm <- HanLP.segment(msg)) {
      wordlist += term.word
    }
    wordlist.toArray
  }

  def selectWord(data: HMap[Array[String], (String,Int)]): HMap[String, Double] = {
    val wordmap = new HMap[String, HMap[Int, Int]]()
    val featureMap = new HMap[String,Double]
    data.foreach(r => {
      val cid = r._2._2
      r._1.foreach(w => {
        //letter string is less than 10
        if (w.length < 20) {
          if (wordmap.contains(w)) {
            wordmap(w)(cid) = if (wordmap(w).contains(cid)) wordmap(w)(cid) + 1 else 1
          } else {
            wordmap(w) = new HMap[Int, Int]()
            wordmap(w)(cid) = 1
          }
        }
      })
    })

    //select word

    val stopWords = Array("的","很","啊","吧","呀","了")
    wordmap.foreach(r=>{
      val cidmap: HMap[Int,Int] = r._2
      val class_cnt:Int = cidmap.keys.size
      val all_cnt:Int= cidmap.values().sum
      /*
      val avg:Double = all_cnt.toDouble/class_cnt.toDouble
      var std:Double = 0/
      cidmap.foreach(r=>{
        std += (r._2-avg)*(r._2-avg)
      })
      std = std/(all_cnt*all_cnt)
      */
      if(!stopWords.contains(r._1) && (all_cnt > 2 || cidmap.contains(1))) {
        featureMap(r._1) = all_cnt
      }
    })
    featureMap
  }

  /*check data to do
   if two instance have allmost the some wordlist and different class id
   there must be some wrong instance
    */
  def clusterData(data:HMap[Array[String],(String,Int)],featureMap:HMap[String,Double]): Unit ={
    //import scala.collection.mutable.{Set => MuSet}
    val wordSetMap:HMap[(Set[String],String,Int),Int] = new HMap[(Set[String],String,Int),Int]
    data.map(r=>{
      val subSet:Set[String] =  r._1.toSet.filter(w=>{
        featureMap.containsKey(w)
      })
      // need at least 3 words
      if(subSet.size > 2){
        wordSetMap((subSet,r._2._1, r._2._2)) = -1
      }
    })

    var cluster_id = -1
    wordSetMap.foreach(out_r=>{
      if(out_r._2 == -1) {
        cluster_id += 1
        wordSetMap(out_r._1) = cluster_id

        val out_cluster_id = wordSetMap(out_r._1)
        val outWordSet = out_r._1._1
        for (in_r <- wordSetMap if in_r._2 == -1) {
          val inWordSet = in_r._1._1
          val diffSet = outWordSet & inWordSet
          if (diffSet.size * 2 > (outWordSet.size + inWordSet.size) * 0.5) {
            wordSetMap(in_r._1) = out_cluster_id
          }
        }
      }

    })

    val writer = new PrintWriter(new File("test.txt" ))
    //val clusterSet = MuSet.empty[Int]

    wordSetMap.foreach(r=>{
      writer.write(Array(r._2,r._1._3,r._1._2.replaceAll("\t"," "),r._1._1.mkString("|"),"\n").mkString("\t"))
    })
    writer.close()
  }

  def checkAndFilterData(data: HMap[Array[String],(String,Int)],featureMap:HMap[String,Double]){
    val filtered_arr = new ArrayBuffer[(Set[String],(String,Int))]
    val writer = new PrintWriter(new File("test.txt" ))
    data.foreach(r=>{
      val sb:StringBuffer = new StringBuffer()
      val wordset:Set[String] = r._1.toSet.filter(r=>{
        featureMap.contains(r)
      })
      sb.append("-"*20+"\n")
      sb.append(r._2._2 + "\t" + r._2._1+"\n")
      sb.append(wordset.mkString("|")+"\n")
      filtered_arr.append((wordset,(r._2._1, r._2._2)))
      for(ele <- filtered_arr){
        val diffset = wordset & ele._1

        if (diffset.size *2 > (wordset.size+ele._1.size)*0.3 && r._2._1 != ele._2._1){
          sb.append("*"*20+"\n")
          sb.append(ele._2._2+"\t" + ele._2._1+"\n")
          sb.append(ele._1.mkString("|")+"\n")
        }
      }
      writer.write(sb.toString)
    })
    writer.close()
  }

  def main(args:Array[String]): Unit ={
    val data: HMap[Array[String],(String,Int)] = getData("data/alldata.txt")
    val featureMap = selectWord(data)
    //checkAndFilterData(data, featureMap)
    clusterData(data,featureMap)
  }

}
