import glob
import jieba
import os
from collections import defaultdict

def load_ngram_model(path):
    #lmplz -o 3 --prune 0 0 1 --text sp_oral.txt.100w --arpa sp_oral.arpa
    ngram_forward = defaultdict(list)
    ngram_backward = defaultdict(list)
    max_cnt = 500
    mark = False
    for i, line in enumerate(open(path, 'r', encoding='utf-8')):
        if line.startswith('\\3-grams:'):
            mark = True
            continue
        if mark is False:
            continue
        t = line.split()
        if len(t) < 4:
            continue
        
        if t[1] == '<s>':
            t[1] = '[CLS]'
        if t[-1] == '</s>':
            t[-1] = '[SEP]'
        
        k = t[1] + t[2]
        v = (t[3], eval(t[0]))
        
        if t[3] != '[SEP]':
            ngram_forward[k].append(v)
            if len(ngram_forward[k]) >= max_cnt * 100:
                ngram_forward[k] = sorted(ngram_forward[k], key=lambda x:x[1], reverse=True)[0:max_cnt]

        k = t[2] + t[3]
        v = (t[1], eval(t[0]))

        if t[1] != '[CLS]':
            ngram_backward[k].append(v)
            if len(ngram_backward[k]) >= max_cnt * 100:
                ngram_backward[k] = sorted(ngram_backward[k], key=lambda x:x[1], reverse=True)[0:max_cnt]
        
    

    dump(ngram_forward, path+'.forward')
    dump(ngram_backward, path+'.backward')

def dump(kv, path):
    f = open(path, 'w', encoding='utf-8')
    for k, v in kv.items():
        v = ' '.join([i[0] for i in v])
        print(f'{k}\t{v}', file=f)
    f.flush()

def seg_dir(datadir):
    files = glob.iglob(datadir + '/**/*', recursive=True)
    fw = open('/evafs/angzhao/chat_corpus/ime_202202.1.txt', 'w', encoding='utf-8')
    for f in files:
        if os.path.isdir(f):
            continue
        fr = open(f, 'r', encoding='utf-8')
        lines = []
        for line in fr:
            if len(line) <= 6:
                continue
            words = ' '.join(jieba.cut(line.strip()))
            lines.append(words)
        print('\n'.join(lines), file=fw)

        fw.flush()



if __name__ == '__main__':
    #analysis(filepath='/evafs/angzhao/Checkpoints/felix_output/patch2_train.raw.txt.pred',
    #        origin = '/evafs/angzhao/DisfDetection/origin/train/patch2_train.raw.txt')
    #seg_dir('/evafs/angzhao/chat_corpus/2022_02')
    load_ngram_model(path='/evafs/angzhao/chat_corpus/sp_oral.1.arpa')


