package app.classifier

import java.io.{File, PrintWriter}
import java.nio.charset.MalformedInputException
import java.util.{HashMap => HMap, List => JList}

import com.hankcs.hanlp.HanLP
import com.hankcs.hanlp.seg.common.{Term => HanLPTerm}
import weka.classifiers.bayes.DMNBtext
import weka.core.{Attribute, FastVector, Instance, Instances, SparseInstance}

import scala.collection.JavaConversions._
import scala.io.Source

object DMNBTextClassifier {

  //data structure
  var m_structure: Instances = null
  var m_indexDict: HMap[String, Attribute] = null
  var m_indexWord: HMap[String, Int] = null

  val msg_min_length = 4
  val msg_max_length = 144


  def toInt(s: String): Option[Int] = {
    try {
      Some(s.toInt)
    } catch {
      case e: Exception => None
    }
  }

  //try to open file with different decode format
  def tryOpenFile(filename: String): Array[String] = {
    try {
      Source.fromFile(filename, "utf8").getLines().toArray
    } catch {
      case e: MalformedInputException => Source.fromFile(filename, "cp936").getLines().toArray
    }
  }

  def getTestData(filename: String): Array[(String, Int)] = {
    val train = new scala.collection.mutable.ArrayBuffer[(String, Int)]
    val alldata = tryOpenFile(filename)
    var id: Int = 0
    var msg: String = ""
    for (data <- alldata) {
      var line = data
      //some file starts with FEFF or FFFE,need to be deleted
      if (data(0) == '\uFEFF' || data(0) == '\uFFFE') {
        line = data.substring(1)
      }
      val idx = line.indexOf('\t')
      if (idx != -1 && idx + 1 < line.length) {
        val bid = toInt(line.substring(0, idx))
        msg = line.substring(idx + 1).trim
        if (bid != None && msg.length > msg_min_length) {
          id = bid.get
          train.append((msg, id))
        }
      }
    }
    train.toArray
  }

  //map get rid of  duplicate
  def getData(filename: String): HMap[String, Int] = {
    val train = new HMap[String, Int]()
    val alldata = tryOpenFile(filename)
    var id: Int = 0
    var msg: String = ""
    for (data <- alldata) {
      var line = data
      //some file starts with FEFF or FFFE,need to be deleted
      if (data(0) == '\uFEFF' || data(0) == '\uFFFE') {
        line = data.substring(1)
      }
      val idx = line.indexOf('\t')
      if (idx != -1 && idx + 1 < line.length) {
        val bid = toInt(line.substring(0, idx))
        msg = line.substring(idx + 1)
        if (bid != None) {
          id = bid.get
          train(msg) = id
        }
      }
    }
    train
  }

  // return: HMap[wordlist,(msg, classid)]
  def segData(data: HMap[String, Int]): HMap[Array[String], (String, Int)] = {
    val segdata = new HMap[Array[String], (String, Int)]()
    data.foreach(r => {
      val termlist: JList[HanLPTerm] = HanLP.segment(r._1)
      val wordlist = scala.collection.mutable.ArrayBuffer.empty[String]
      for (term: HanLPTerm <- termlist) {
        wordlist += term.word
      }
      segdata(wordlist.toArray) = (r._1, r._2)
    })
    segdata
  }

  def segMsg(msg: String): Array[String] = {
    val wordlist = scala.collection.mutable.ArrayBuffer.empty[String]
    for (term: HanLPTerm <- HanLP.segment(msg)) {
      wordlist += term.word
    }
    wordlist.toArray
  }

  def selectWord(data: HMap[Array[String], (String, Int)]): HMap[String, Double] = {
    val wordmap = new HMap[String, HMap[Int, Int]]()
    val featureMap = new HMap[String, Double]
    data.foreach(r => {
      val cid = r._2._2
      r._1.foreach(w => {
        //letter string is less than 10
        if (w.length < 20) {
          if (wordmap.contains(w)) {
            wordmap(w)(cid) = if (wordmap(w).contains(cid)) wordmap(w)(cid) + 1 else 1
          } else {
            wordmap(w) = new HMap[Int, Int]()
            wordmap(w)(cid) = 1
          }
        }
      })
    })

    //select word
    val stopWords = Array("的", "很", "啊", "吧", "呀", "了")
    wordmap.foreach(r => {
      val cidmap: HMap[Int, Int] = r._2
      val class_cnt: Int = cidmap.keys.size
      val all_cnt: Int = cidmap.values().sum
      /*
      val avg:Double = all_cnt.toDouble/class_cnt.toDouble
      var std:Double = 0
      cidmap.foreach(r=>{
        std += (r._2-avg)*(r._2-avg)
      })
      std = std/(all_cnt*all_cnt)
      */
      if (!stopWords.contains(r._1) && (all_cnt > 2 || cidmap.contains(1))) {
        featureMap(r._1) = all_cnt
      }
    })

    featureMap
  }


  // instances attribute
  def createStructure(oridata: HMap[String, Int], classIDs: Array[String]) {
    val segdata: HMap[Array[String], (String, Int)] = segData(oridata)
    val wordmap: HMap[String, Double] = selectWord(segdata)
    val attributes: FastVector = new FastVector(wordmap.size)
    wordmap.foreach(r => {
      attributes.addElement(new Attribute(r._1))
    })

    //class value
    val my_nominal_values: FastVector = new FastVector(classIDs.length + 1)
    my_nominal_values.addElement("dummy")
    classIDs.foreach(r => {
      my_nominal_values.addElement(r)
    })

    val classAttr: Attribute = new Attribute("class attribute", my_nominal_values)
    attributes.addElement(classAttr)

    m_structure = new Instances("filter bad message", attributes, 0)
    m_structure.setClass(classAttr)

    m_indexWord = new HMap[String, Int]()

    val numAttr = m_structure.numAttributes()
    m_indexDict = new HMap[String, Attribute]
    for (i <- 0 until numAttr) {
      m_indexDict(m_structure.attribute(i).name()) = m_structure.attribute(i)
      m_indexWord(m_structure.attribute(i).name()) = m_structure.attribute(i).index()
    }
  }

  def createInstance(msg: String, classid: Int, needCount: Boolean): (Array[String], Instance) = {
    val numAttr = m_structure.numAttributes()
    val inst: Instance = new Instance(numAttr)
    for (i <- 0 until numAttr) {
      inst.setValue(m_structure.attribute(i), 0)
    }
    //set class value
    inst.setValue(m_structure.classAttribute(), classid.toString)
    val wordlist: Array[String] = segMsg(msg)
    wordlist.foreach(w => {
      if (m_indexDict.contains(w)) {
        if (needCount) {
          inst.setValue(m_indexDict(w), 1 + inst.value(m_indexDict(w)))
        } else {
          inst.setValue(m_indexDict(w), 1)
        }
      }
    })
    (wordlist, new SparseInstance(inst))
  }

  def createSparseInstance(msg: String, classid: Int, needCount: Boolean): (Array[String], Instance) = {
    val numAttr = m_structure.numAttributes()
    val attrValues: Array[Double] = new Array[Double](numAttr)
    val wordMap: HMap[String, Int] = new HMap[String, Int]()
    segMsg(msg).filter(r => {
      m_indexWord.contains(r)
    }).foreach(r => {
      wordMap(r) = if (wordMap.contains(r)) 1 + wordMap(r) else 1
    })

    wordMap.foreach(w => {
      if (needCount) {
        attrValues(m_indexWord(w._1)) = w._2
      } else {
        attrValues(m_indexWord(w._1)) = 1
      }
    })
    val wordlist: Array[String] = wordMap.map(r => {
      r._1
    }).toArray

    val inst: SparseInstance = new SparseInstance(1, attrValues)
    //println(msg + ":" + classid)
    //println(inst)
    inst.setValue(m_structure.classAttribute(), classid.toString)
    (wordlist, inst)
  }

  def main(args: Array[String]): Unit = {
    val train: HMap[String, Int] = getData(args(0))
    createStructure(train, Array("-1","0", "1"))
    val instances: Instances = new Instances(m_structure)
    //println(instances)
    var i = 0
    train.foreach(r => {
      instances.add(createSparseInstance(r._1, r._2, false)._2)
      i += 1
      if (i % 1000 == 0) println("creatInstance:" + i)
    })
    println("start building...")
    val c: DMNBtext = new DMNBtext
    c.buildClassifier(instances)
    println("complete build!!")

    val writer = new PrintWriter(new File("test.txt"))

    val test: Array[(String, Int)] = getTestData(args(1))
    test.foreach(r => {
      val (wordlist: Array[String], instance: Instance) = createSparseInstance(r._1, r._2, false)
      if (wordlist.length == 0) {
        println(r._1)
      } else {
        instance.setDataset(m_structure)
        val res: Array[Double] = c.distributionForInstance(instance)
        val maxvalue: Double = res.max
        val maxindex: Int = res.indexOf(maxvalue)
        val classvalue = m_structure.classAttribute().value(maxindex)
        //println(Array(r._2, maxindex - 1, maxvalue, r._1).mkString("\t"))
        writer.write(Array(r._2, classvalue, maxvalue, r._1).mkString("\t") + "\n")
      }
    })
    writer.close()

    /*
    c.distributionForInstance (instances.instance (0) ).foreach (println)
    println (instances.instance (0).classValue () )
    segdata.foreach(r=>{
      r._1.foreach(w=>{print(w+" ")})
      println(r._2)
    })
    wordstatis.foreach(r=>{
      print(r._1+":")
      r._2.foreach(w=>{
        print(w._1+","+w._2+" ")
      })
      println()
    })
    */
  }
}
