import os
import re

from sklearn.model_selection import train_test_split
# 显示进度条
from tqdm import tqdm
import pickle


class FourteenPfr:
    "PFR人民日报标注语料库2014"

    def __init__(self,
                 in_path='./docs/corpus/originalData/2014_pfr_corpus.txt',
                 out_path='./docs/corpus/tempData/2014_processed_text.txt',
                 test_size=0.2, random_state=10):
        self.in_path = in_path
        self.out_path = out_path
        self.test_size = test_size
        self.random_state = random_state

    def data_processing(self):
        # 读取原始语料文件
        file = open(self.in_path, encoding='utf-8')
        in_data = file.readlines()

        # 预处理后的语料库
        corpus_file = open(self.out_path, 'w', encoding='utf-8')

        # 删除词性标注(最短匹配)
        for line in in_data:
            if line != '\n':
                words = line.strip().split(' ')
                # print(words)

                for word in words:
                    if word.strip() != '':
                        # []里面可能含有多个词，所以使用if、elif
                        # 存在个问题，专有名词【】一个整体
                        if word.startswith('['):
                            word = word[1:]
                        elif ']' in word:
                            word = word[0:word.index(']')]

                        # # 含{}的词是一个完整词  14年语料库中不存在{}标注的词
                        # if '{' in word:
                        #     word = word[0:word.index('{')]

                        w_c = word.split('/')
                        # 生成语料库
                        corpus_file.write(w_c[0] + ' ')

                corpus_file.write('\n')

    def data_split(self):
        if not os.path.isfile(self.out_path):
            self.data_processing()
        return Until.data_split(self.out_path, self.test_size, self.random_state)

    def generate_dic(self, dic_path='./docs/corpus/tempData/2014_dic.txt'):
        return Until.generate_dic(self.data_split()[0], dic_path)

    def generate_test_text(self, test_text_path='./docs/corpus/tempData/2014_test_text.txt'):
        return Until.generate_test_text(self.data_split()[1], test_text_path)


class NinetyEightPfr:
    "PFR人民日报标注语料库1998"

    def __init__(self,
                 in_path='./docs/corpus/originalData/199801_PeopleDaily.txt',
                 out_path='./docs/corpus/tempData/199801_processed_text.txt',
                 test_size=0.2, random_state=10):
        self.in_path = in_path
        self.out_path = out_path
        self.test_size = test_size
        self.random_state = random_state

    # 这个方法与14的不同
    def data_processing(self):
        # 读取原始语料文件
        file = open(self.in_path, encoding='utf-8')
        in_data = file.readlines()

        # 预处理后的语料库
        corpus_file = open(self.out_path, 'w', encoding='utf-8')

        # 删除段前标号,[],{},词性标注(最短匹配)
        for sentence in in_data:
            if sentence != '\n':
                words = sentence.strip().split(' ')
                words.pop(0)

                for word in words:
                    if word.strip() != '':
                        if word.startswith('['):
                            word = word[1:]
                        elif ']' in word:
                            word = word[0:word.index(']')]

                        if '{' in word:
                            word = word[0:word.index('{')]

                        w_c = word.split('/')
                        # 生成语料库
                        corpus_file.write(w_c[0] + ' ')

                corpus_file.write('\n')

    def data_split(self):
        if not os.path.isfile(self.out_path):
            self.data_processing()
        return Until.data_split(self.out_path, self.test_size, self.random_state)

    def generate_dic(self, dic_path='./docs/corpus/tempData/199801_dic.txt'):
        return Until.generate_dic(self.data_split()[0], dic_path)

    def generate_test_text(self, test_text_path='./docs/corpus/tempData/199801_test_text.txt'):
        return Until.generate_test_text(self.data_split()[1], test_text_path)


class Until:

    @staticmethod
    def data_split(out_path, test_size, random_state):
        # 随机划分
        corpus = open(out_path, encoding='utf-8').readlines()
        # 根据行进行打乱（就是每行内容不会变）
        train_data, test_data = train_test_split(
            corpus, test_size=test_size, random_state=random_state)

        return train_data, test_data

    @staticmethod
    def generate_dic(train_data, dic_path=''):
        # 如果文件存在，直接读取不用生成
        if os.path.isfile(dic_path):
            with open(dic_path, 'rb') as text:
                return pickle.load(text)

        # 利用训练集中的词语来生成词典  set比list添加元素更快
        dic = set()
        remove_chars = '[·’!"\#$%&\'()＃！（）*+,-./:;<=>?\@，：?￥★、…．＞【】［］《》？“”‘’\[\\]^_`{|}~]+'
        for sentence in tqdm(train_data):
            words = sentence.strip().split(' ')
            for word in words:

                w = re.sub(remove_chars, "", word)

                if w.strip() != '':
                    if w not in dic:
                        dic.add(w)
                else:
                    # 表示标点符号
                    if len(word) == 1:
                        if word not in dic:
                            dic.add(word)

        with open(dic_path, 'wb') as text:
            pickle.dump(dic, text)

        return dic

    @staticmethod
    def generate_test_text(test_data, test_text_path=''):
        # 如果文件存在，直接读取不用生成
        if os.path.isfile(test_text_path):
            with open(test_text_path, 'rb') as text:
                return pickle.load(text)

        # 生成分词测试语句
        test_text = []
        for sentence in tqdm(test_data):
            # 每行都是一个独立的语句
            words = sentence.strip().split(' ')
            test_text.append(''.join(words))

        with open(test_text_path, 'wb') as text:
            pickle.dump(test_text, text)

        return test_text
