import os.path
import random

import pandas
import chatbot.lib.cut_sentence as cut_sentence
import chatbot.config as config
from tqdm import tqdm
import json

xiaohuangji_path = "corpus/classify/origin_corpus/小黄鸡未分词.conv"
byhand_path = "corpus/classify/origin_corpus/手动构造的问题.json"
crawled_path = "corpus/classify/origin_corpus/爬虫抓取的问题.csv"


def keywords_in_line(line):
    """判断是否有不符合要求的词"""
    keywords_list = ["传智播客", "传智", "黑马程序员", "黑马", "python",
                     "人工智能", "c语言", "c++", "java", "javaee", "前端",
                     "移动开发", "ui",
                     "ue", "大数据", "软件测试", "php", "h5", "产品经理", "linux", "运维", "go语言",
                     "区块链", "影视制作", "pmp", "项目管理", "新媒体", "小程序", "前端"]
    for keyword in keywords_list:
        if keyword in line:
            return True
    return False


def process_xiaohuangji(train_file, test_file):
    """处理小黄鸡语料"""
    flag = False
    for line in tqdm(open(xiaohuangji_path, encoding='utf-8').readlines(), desc="小黄鸡"):
        if line.startswith("E"):
            flag = False
            continue
        elif line.startswith("M"):
            if not flag:  # 第一个M
                flag = True
                line = line[1:].strip()
            else:
                continue
        line_cuted = cut_sentence.cut(line)
        if not keywords_in_line(line):
            line_cut = " ".join(
                line_cuted) + "\t" + "__label__chat"  # __label__是fasttext默认的，如果用别的，那么使用fasttext的时候就是传入label参数

            if random.randint(1, 5) % 4 == 1:
                test_file.write(line_cut + "\n")
            else:
                train_file.write(line_cut + "\n")


def process_byhand_data(train_file, test_file):
    """处理手动构造的数据"""
    total_lines = json.loads(open(byhand_path, encoding='utf-8').read())
    for key in tqdm(total_lines, desc="处理数据"):
        for lines in total_lines[key]:
            for line in lines:
                line_cuted = cut_sentence.cut(line)
                line_cut = " ".join(line_cuted) + "\t" + "__label__QA"
                if random.randint(1, 5) % 4 == 1:
                    test_file.write(line_cut + "\n")
                else:
                    train_file.write(line_cut + "\n")


def process_crawled_data(train_file, test_file):
    """处理爬虫的数据"""
    for line in tqdm(open(crawled_path, encoding='utf-8').readlines(), desc="爬虫"):
        line_cuted = cut_sentence.cut(line)
        line_cut = " ".join(line_cuted) + "\t" + "__label__QA"
        if random.randint(1, 5) % 4 == 1:
            test_file.write(line_cut + "\n")
        else:
            train_file.write(line_cut + "\n")


def process(by_word=False):
    train_file_path = config.classify_corpus_train_path if not by_word else config.classify_corpus_by_word_train_path
    if os.path.exists(train_file_path):
        os.remove(train_file_path)
    train_file = open(train_file_path, "a", encoding='utf-8')

    test_file_path = config.classify_corpus_test_path if not by_word else config.classify_corpus_by_word_test_path
    if os.path.exists(test_file_path):
        os.remove(test_file_path)
    test_file = open(test_file_path, "a", encoding='utf-8')
    process_xiaohuangji(train_file, test_file)
    process_byhand_data(train_file, test_file)
    process_crawled_data(train_file, test_file)

    train_file.close()
    test_file.close()
