import fasttext
import re
import jieba
from numpy.ma.core import count
from setuptools import glob

fasttext.FastText.eprint = lambda x: None
train_text_path = "data/train.txt"
model_path = "../aiModel/autotune.bin"
model = fasttext.load_model(model_path)
# 读取停用词
stopwords = {word.strip() for word in open('../fastText模块/data/stopwords.txt', encoding='utf-8')}


def clean_text(text):
    # 去除非中文字符
    text = re.sub(r'[^\u4e00-\u9fa5]', ' ', text)
    # 文本分词
    text = jieba.cut(text)
    # 去除停用词
    cleaned_text = []
    for word in text:
        word = word.strip()
        if len(word) <= 1:
            continue
        if word not in stopwords:
            cleaned_text.append(word)
    # print(len(cleaned_text), cleaned_text)
    return cleaned_text


def preprocessing():
    corpus = open('./temp/corpus.txt', 'w', encoding='utf-8')

    folders = glob.glob('./data/corpus/*')
    for folder in folders:
        folder_name = folder.split('\\')[-1]

        f_names = glob.glob('./data/corpus/%s/*.txt' % folder_name)
        print(f_names)
        for f_name in f_names:
            print(f_name)
            with open(f_name, 'r', encoding='utf-8') as file:
                lines = file.readlines()
                for content in lines:
                    # print(content.strip())
                    content = clean_text(content.strip())
                    content = ' '.join(content)
                    corpus.write("__label__%s " % folder_name + content + '\n')
    corpus.close()

 # 模型训练
def model_train():
    model_1 = fasttext.train_supervised(
        input='./data/train.txt',
        epoch=1,
        lr=0.1,
        dim=300,
        loss='softmax',
        minCount=3,
        wordNgrams=1,
        pretrainedVectors='./data/cc.zh.300.vec',
        thread=16,
                                        )
    # 模型评估 (测试集数量, P@1, R@1)
    test_result = model_1.test('./data/test.txt', threshold=0.0)
    model_1.save_model(model_path)
    print(test_result)

def m_predict(content):
    word_labels = model.predict(clean_text(content))[0]
    # print('m_predict',word_labels)
    a_count = word_labels.count(['__label__军事'])
    word_ls_len = len(word_labels)
    if word_ls_len ==0:
        return False
    else:
        p = a_count/len(word_labels)
        # print('m_predict',p)
        if p > 0.8:return True
        else:return False

if __name__ == '__main__':

    s = input("训练模型输入1；预处理语料输入2，测试文本输入3：")
    if s == '1':
        model_train()
    elif s == '2':
        preprocessing()
    elif s == '3':

        while True:
            to_word = input("请输入你想分类的文本：")
            # print(model.get_nearest_neighbors(to_word))
            word_label = model.predict([to_word])
            # word_label = model.predict(clean_text(to_word))
            print("111",word_label)

            if m_predict(to_word):
                print("是军事题材")
