#1、Train language model
import datetime
import time 
import math
import random
import sys

dataFileName = "F:\\RenMinData.txt"        #默认训练数据文件名
#modelFileName = "F:\\LanguageModel.txt"    #默认模型文件名
#resultFileName = "F:\\TestResult.txt"      #默认测试结果输出文件名
modelFileName = ""
resultFileName = ""
testFileName = dataFileName + ".test"
trainFileName = dataFileName + ".train"
logFileName = "log.csv"

ntest = 2

dictory = {"◆":0,"◇":0}   #词表，词出现的次数
dicIndex = {"◆":0,"◇":1}  #单词索引
transformation = [{},{}]             #二元概率
defaultLM = {}            #缺省概率

method = "Jelinek-Mercer" #默认平滑算法
katz0 = 0                 #Katz平滑，出现零次时赋予的概率
param = 0

methodList = ["Laplace","Jelinek-Mercer","Good-Turing","Katz"]

#ntest<2，训练与测试数据相同；ntest>=2，抽取1/ntest为测试数据，其余为训练数据
def init():
    global dataFileName
    global trainFileName
    global testFileName
    global modelFileName
    global resultFileName
    global ntest

    if len(sys.argv) >= 2:
        ntest = int(sys.argv[1])
    
    if len(sys.argv) >= 3:
        dataFileName = sys.argv[2]
    
    if len(sys.argv) >= 4:
        resultFileName = sys.argv[3]
    
    if len(sys.argv) >= 5:
        modelFileName = sys.argv[4]
    
    if len(sys.argv) < 5:
        print("Warning, Usage also:segmentation.py [ntest [data_file [result_file [model_file]]]")
        print("default:segmentation.py "+str(ntest)+" "+dataFileName+" "+resultFileName+" "+modelFileName)
        
    if ntest < 2:
        testFileName = dataFileName
        trainFileName = dataFileName
        return
        
    file = open(dataFileName,"r",encoding="GB2312")
    fileTrain = open(trainFileName,"w",encoding="GB2312")
    fileTest = open(testFileName,"w",encoding="GB2312")

    for lines in file.readlines():
        #使用(n-1)/n训练，1/n封闭测试
        if random.randint(1,ntest) == 1:
            fileTest.write(lines)
        else:
            fileTrain.write(lines)

    file.close()
    fileTrain.close()
    fileTest.close()

#训练模型
def trainModel(method2,param2):

    global dictory
    global dicIndex
    global transformation
    global defaultLM
    global katz0
    
    global method
    global param

    #训练样本中没有词"◆"和"◇"，如测试数据包含这两个词可换成其他词
    dictory = {"◆":0,"◇":0}   #词表，词出现的次数
    dicIndex = {"◆":0,"◇":1}  #单词索引
    transformation = [{},{}]  #二元概率，转化概率
    defaultLM = {}            #缺省概率

    katz0 = 0                 #Katz平滑，出现零次时赋予的概率

    method = method2
    param = param2
#    print("method:"+method)
#    print("param:"+str(param))
    print("============="+method+" "+str(param)+"==================")
    
    logFile = open(logFileName,"a",encoding="GB2312")
    logFile.write(time.strftime("%Y-%m-%d %H:%M:%S",time.localtime()) + ","+method+" "+str(param)+","+method+","+str(param))
    logFile.close()
    
    #读文件训练模型
    file = open(dataFileName,"r",encoding="GB2312")

    total_words = 0

    #读取单词出现次数
    for lines in file.readlines():
            
        preWord = "◆"
        dictory[preWord] += 1
        for word in lines.replace('\n', ' ').split(' '):
            word = word.strip()
            if len(word) < 1: continue

            total_words += 1

            #更新词出现的次数
            if word in dicIndex:
                dictory[word] += 1
            else:
                dictory[word] = 1
                dicIndex[word] = len(dicIndex)
                transformation.append({})

            #更新二元频次
            if word in transformation[dicIndex[preWord]]:
                transformation[dicIndex[preWord]][word] += 1
            else:
                transformation[dicIndex[preWord]][word] = 1

            preWord = word

        #处理结束符号
        word = "◇"
        dictory[word] += 1

        if word in transformation[dicIndex[preWord]]:
             transformation[dicIndex[preWord]][word] += 1
        else:
             transformation[dicIndex[preWord]][word] = 1

    file.close()

    #通过频次计算模型
    if method == "Laplace":
        #二元概率
        for preWord,index in dicIndex.items():
            for word in transformation[index].keys():
                transformation[index][word] = (transformation[index][word] + param) / (dictory[word] + (len(dictory) * param))

        #缺省概率--在指定词后出现未知词的概率
        for word in dictory.keys():
            defaultLM[word] = (param / (dictory[word] + (len(dictory) * param)))

    elif method == "Jelinek-Mercer":
        #param is lambda
        #状态转化概率
        for preWord,index in dicIndex.items():
            for word in transformation[index].keys():
                if dictory[word] == 0:
                    print("param:" + str(param))
                    print("transformation[index][word]:" + str(transformation[index][word]))
                    print("dictory[word]:" + str(dictory[word]))
                    print("total_words:" + str(total_words))
                    print("transformation[index]:" + str(transformation[index]))
#                    print("word:" + word.encode("GBK"))
                transformation[index][word] = param * transformation[index][word] / dictory[word] + (1 - param) * dictory[word] / total_words

        #缺省概率--与前面词无联时，当前词默认出现概率
        for word in dictory.keys():
            defaultLM[word] = (1 - param) * dictory[word] / total_words

#    elif method == "Good-Turing":
#         pass
    
    elif method == "Katz":

        nr_array = [0] * (param + 2)
        
#        print(len(nr_array))
#        print(nr_array)
        for preWord,index in dicIndex.items():
            for word in transformation[index].keys():
#                print(transformation[index][word])
                
                if transformation[index][word] <= param+1:
#                    print("transformation[index][word]:"+ str(transformation[index][word]))
                    nr_array[transformation[index][word]] += 1
#                    try:
#                        nr_array[transformation[index][word]] += 1
#                    except:
#                        print("index:"+ str(index))
#                        print("transformation[index]:"+ str(transformation[index]))
#                        print("transformation[index][word]:"+ str(transformation[index][word]))
                    
#                if transformation[index][word]-1 > 1000:
#                    print(preWord + " " + word + " " + str(transformation[index][word]-1))

        nr_array[0] = len(dictory) ** 2
        for ii in range(len(transformation)):
            nr_array[0] -= len(transformation[ii])

#        for ii in range(len(nr_array)):
#            if nr_array[ii] != 0:
#                print(str(ii) + " " + str(nr_array[ii]))
                
        nr_array2 = [0] * (param + 1)
        for ii in range(len(nr_array2)):
            if nr_array[ii] == 0:
                print(str(nr_array))
                print(str(ii))
            nr_array2[ii] = (ii + 1) * nr_array[ii + 1] / nr_array[ii] 
            
#        for ii in range(len(nr_array2)):
#            print(str(ii) + " " + str(nr_array2[ii]))
            
        global katz0
        katz0 = (nr_array2[0] + 0.0) / total_words
#        print("katz0: " + str(katz0))
            
        #状态转化概率
        for preWord,index in dicIndex.items():
            for word in transformation[index].keys():
                if transformation[index][word] > param:
                    transformation[index][word] = (transformation[index][word] + 0.0) / total_words
                else:
                    transformation[index][word] = (nr_array2[transformation[index][word]] + 0.0) / total_words
        
        
    #输出模型文件
    if modelFileName != "":
        modelFile = open(modelFileName,"w",encoding="UTF-8")

        #输出状态
        for word in dictory.keys():
            modelFile.write(word + " ")
            if method == "Jelinek-Mercer":
                modelFile.write(str(defaultLM[preWord]) + " ")
        modelFile.write("\n")
        modelFile.write("\n")

        #输出状态转化概率
        for preWord,index in dicIndex.items():
            #输出缺省概率
            if method == "Laplace":
                modelFile.write("-1 " + str(defaultLM[preWord]) + " ")

            for word in transformation[index].keys():
                modelFile.write(str(dicIndex[word]) + " " + str(transformation[index][word]) + " ")
            modelFile.write("\n")
            
        modelFile.close()


#2、segmentation
    
class Node(object):
    def __init__(self,word,score,preNode):
        self.word = word
        self.score = score
        self.preNode = preNode

def segmentation(source):
    global katz0
    global method
#    print("method:"+method)
#    print("katz0:"+str(katz0))
    
    LT = [[Node("◆",0,None)]]

    #计算分词最大长度
    maxWordLen = 0
    for word in dictory:
        if maxWordLen < len(word):
            maxWordLen = len(word)

    curNode = None
    preNode = None

    #建立链表
    for i in range(len(source)):
        curNode = Node(source[i:i+1],0,None)
        linkTableRow = []
        linkTableRow.append(curNode)
        for j in range(maxWordLen)[1:]:
            if j >= len(source): break

            if source[i-j:i+1] in dictory:
                linkTableRow.append(Node(source[i-j:i+1],0,None))

        LT.append(linkTableRow)

    linkTableRow = []
    linkTableRow.append(Node("◇",0,None))
    LT.append(linkTableRow)

    #计算最优分词
    for i in range(len(LT))[1:]:
        for j in range(len(LT[i])):
            maxScore = 0
            maxNode = None

            curEdgeScore = 0
            
            preWordPosition = i-len(LT[i][j].word)

            #计算到当前单词的最大分值
            for k in range(len(LT[preWordPosition])):

                if LT[preWordPosition][k].word not in dicIndex:
                    curEdgeScore = 5e-324

                elif LT[i][j].word not in transformation[dicIndex[LT[preWordPosition][k].word]]:
                    
                    if method == "Jelinek-Mercer":
                        if LT[i][j].word not in defaultLM:
                            curEdgeScore = 5e-324
#                            print("LT[i][j].word:" + LT[i][j].word)
#                            print("dictory[LT[i][j].word]:" + dictory[LT[i][j].word])
                        else:
                            curEdgeScore = defaultLM[LT[i][j].word]
#                        print("Jelinek-Mercer")
                        
                    elif method == "Katz":
                        curEdgeScore = katz0
#                        print("Katz")
#                        print("Katz0" + str(katz0))
                        
                    else:
                        curEdgeScore = defaultLM[LT[preWordPosition][k].word]
#                        print("default")
                        
#                    if curEdgeScore <= 0:
#                        print("curEdgeScore444"+str(curEdgeScore))
                    
                
                else:
                    curEdgeScore = transformation[dicIndex[LT[preWordPosition][k].word]][LT[i][j].word]

                if curEdgeScore <= 0:
#                    print("curEdgeScore"+str(curEdgeScore))
                    curEdgeScore = 5e-324
                curEdgeScore = math.log(curEdgeScore)
                
                if maxScore == 0 or maxScore < LT[preWordPosition][k].score + curEdgeScore:
                    maxScore = LT[preWordPosition][k].score + curEdgeScore
                    maxNode = LT[preWordPosition][k]
            LT[i][j].preNode = maxNode
            LT[i][j].score = maxScore

    #从结束节点向前拼接分词以后的句子
    travelNode = LT[len(LT)-1][0].preNode
    travelArray = []

    while travelNode != None:
        travelArray.insert(0,travelNode.word)
        travelNode = travelNode.preNode

    travelArray = travelArray[1:]

    splited_line = ""
    for word in travelArray:
        splited_line += " " + word

    splited_line = splited_line[1:]
    return splited_line

#测试
def test_segmentation(fileType):
    global method
    global param
    
    fileTypeMapping = {"data":dataFileName,"train":trainFileName,"test":testFileName}
    
    if fileType not in fileTypeMapping:
        print("Train Fail: file type "+fileType+" not in "+str(fileTypeMapping.keys()))
        return
    
    testFileNameSelector = fileTypeMapping[fileType]

    print("------------"+fileType+"----------------")
#    print("test file type:"+fileType)
#    print("method:"+method)
#    print("param:"+param)
    
    lines_total = 0
    lines_right = 0
    words_total = 0
    words_right = 0
    
    testFile = open(testFileNameSelector,"r",encoding="GB2312")
    logFile = open(logFileName,"a",encoding="GB2312")
    
    if resultFileName != "":
        resultFile = open(resultFileName+"."+method+"."+str(param),"w",encoding="GB2312")
        resultFile.write("\n============" + method+"  "+str(param) + "==============\n")

    for line in testFile.readlines():
        lines_total += 1
        words_total += len(line.split(" "))

        trimedLine = line.replace(" ","")
#        trimedLine = trimedLine.replace(" ","")
#        trimedLine = trimedLine.replace("\n","")
        
        splited_line = segmentation(trimedLine)
        if(splited_line != line):
            if resultFileName != "":
                resultFile.write(line + "\n")
                resultFile.write(splited_line + "\n")
            words_right += len(set(line.split(" ")) & set(splited_line.split(" ")))
        else:
            lines_right += 1
            words_right += len(line.split(" "))

    print("Total lines:" + str(lines_total))
    print("Right lines:" + str(lines_right))
    print("Line error rate:" + str((lines_total - lines_right) * 100 / lines_total))
    print("Total words:" + str(words_total))
    print("Right words:" + str(words_right))
    print("Word error rate:" + str((words_total - words_right) * 100 / words_total))

    logFile.write("," + str(lines_total))
    logFile.write("," + str(lines_right))
    logFile.write("," + str((lines_total - lines_right) * 100 / lines_total))
    logFile.write("," + str(words_total))
    logFile.write("," + str(words_right))
    logFile.write("," + str((words_total - words_right) * 100 / words_total))
    
    if fileType == "test" or fileType == "data":
        logFile.write(time.strftime(",%Y-%m-%d %H:%M:%S",time.localtime()) + "\n")
    
    
    testFile.close()
    logFile.close()
    
    if resultFileName != "":
        resultFile.close()



init()

lamb = [0.00001,0.0001,0.001,0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,0.99,0.999,0.9999,0.99999]

#Jelinek-Mercer smooth test
for i in range(len(lamb)):
    trainModel("Jelinek-Mercer",lamb[i])
    test_segmentation("train")
    test_segmentation("test")

#对于未出现的转换关系，用很小的值5e-324，代表出现的次数
delta = [5e-324,0.00001,0.0001,0.001,0.01,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,1.1,1.2,1.3,1.4,1.5,1.6,1.7,1.8,1.9,2]

#laplace smooth test
for i in range(len(delta)):
    trainModel("Laplace",delta[i])
    test_segmentation("train")
    test_segmentation("test")

#Katz smooth test
for i in [1,10]:
    for j in range(10)[1:]:
        trainModel("Katz",i*j)
        test_segmentation("train")
        test_segmentation("test")

