# -*- encoding: utf-8 -*-
'''
@File    :   计算ppl并进行排序
'''
import os, sys
import re
import argparse
import os.path
import jieba
# ngram-count -order 3 -kndiscount -limit-vocab -vocab vacob -map-unk "UNK" -text tmp_text -lm trigram_lm.arpa
# ngram -debug 2 -order 3 -lm trigram_lm.arpa  -ppl tmp_test > simplified.ppl

NGRAM_PATH = ""   # ngram 路径


def get_args():
    parser = argparse.ArgumentParser(description="""计算ppl值""")
    parser.add_argument('-i','--input', type=str, required=True, default='',
                        help='input txt path')
    parser.add_argument('-o', '--outdir', type=str, required=True, default='', 
                        help='output dir')
    args = parser.parse_args()
    return args

# 移除无用的符号
def remove_pun(text):
    text = text.lower().replace(',',' ').replace('.',' ').replace('?',' ').replace('!',' ')\
                .replace(':',' ').replace(';',' ').replace('<',' ').replace('>',' ').replace('/',' ')
    return text

# 读取文本生成字典,进行分词
def generate_vocab(text_file, vocab_dict):
    vocab = {}
    with open(text_file, 'r', encoding = 'utf-8') as fin:
        for line in fin:
            line = line.strip()
            word_list = jieba.cut(line, cut_all = False, HMM= True)
            print(word_list)
            for word in word_list:
                if word not in vocab:
                    vocab[word]=1
                else:
                    vocab[word]+=1

    fout = open(vocab_dict, 'w', encoding = 'utf-8')
    for word in vocab:
        fout.write("%s\n" % (word))
    fout.close()
    pass

# 读取文本生成字典，不分词
def normal_generate_vocab(text_file, vocab_dict):
    vocab = {}
    with open(text_file, 'r', encoding='utf-8') as fin:
        for line in fin:
            line = line.strip()
            text = remove_pun(line)
            word_list = text.split()
            for word in word_list:
                if word not in vocab:
                    vocab[word]=1
                else:
                    vocab[word]+=1
    
    fout = open(vocab_dict, 'w', encoding='utf-8')
    # TODO: 输出词�
    d_order = sorted(vocab.items(), key=lambda x: x[0], reverse=False)
    for word in d_order:
        fout.write("%s\n"%(word[0]))
    fout.close()
    pass

# 语言模型的生成
def generater_lm(vocab_dict, text_file, lm):
    cmd = 'ngram-count -order 3 -limit-vocab -vocab %s -map-unk "UNK" -text %s -lm %s'%(vocab_dict, text_file, lm)
    os.system(cmd)
    pass

# 计算ppl的值
def compute_ppl(lm, text_file, ppl_file):
   """ """
   cmd = "ngram -debug 2 -order 3 -lm %s -ppl %s > %s"%(lm, text_file, ppl_file)
   os.system(cmd)
   pass

# 计算ppl
def post_process(text_file,ppl_file, result_file):
    ppt = {}
    with open(text_file, 'r',encoding='utf-8') as fl:
        for line in fl:
            line = line.strip()
            if line in ppt:
                continue
            else:
                ppt[line] = ''

    tmp_text = ''
    with open(ppl_file, 'r',encoding='utf-8') as pl:
        for line in pl:
            line = line.strip()
            if line in ppt:
                tmp_text = line
                # print(tmp_text)
            elif 'ppl=' in line:
                ppl = float((re.findall("ppl= (.*?) ppl1=", line))[0])
                # print(ppl)
                ppt[tmp_text] = ppl
            else:
                continue

    file = open(result_file, 'w', encoding='utf-8')
    d_order = sorted(ppt.items(), key=lambda x: x[1], reverse=False)
    for key, value in d_order:
        file.write("%s\t%s\n" % (value, key))
    file.close()
    pass


def main():
    args = get_args()
    text_file = args.input
    outdir = args.outdir
    switch = False
    # 对生成的文件夹进行检查，并创建
    if os.path.isdir(outdir):
        print("The %s is exist" %outdir )
        sys.exit(1)
    else:
        os.mkdir(outdir)

    vocab_dict=os.path.join(outdir, 'vocab.dict')    # 词典
    lm_path = os.path.join(outdir,'trigram_lm.arpa') # 语言模型
    ppl_file = os.path.join(outdir,'tmp.ppl')        # ppl文件
    result_file = os.path.join(outdir,'result.ppl')  # 最终的文件
    
    # 生成词典
    if switch:
        generate_vocab(text_file, vocab_dict)
    else:
        normal_generate_vocab(text_file, vocab_dict)

    # 生成LM
    generater_lm(vocab_dict, text_file, lm_path)
   
    # 计算ppl
    compute_ppl(lm_path, text_file, ppl_file)
    
    # 解析提取文本对应的ppl,并生成最终结果
    post_process(text_file, ppl_file, result_file)
   
    print("Done")


if __name__ == "__main__":
    main()

