import json
import random
import re


class MyTokenizer():
    def __init__(self, vocab_dir):
        self.vocab_file = vocab_dir + "sentence.txt"
        self.json_data_file = vocab_dir + 'sentences_data.json'
        self.csv_data_file = vocab_dir + 'sentences_data.csv'
        self.words2ids = {}
        self.ids2words = {}
        self.word_classes = {
            0: "verbs1",  # 动词
            1: "verbs2",  # 非谓语动词
            2: "nouns",  # 名词
            3: "pronouns",  # 代词
            4: "articles",  # 冠词
            5: "conjunctions",  # 连词
            6: "preposition",  # 介词
            7: "numerals",  # 数词
            8: "quantifiers",  # 量词
            9: "adjectives",  # 形容词
            10: "adverbs",  # 副词
            11: "interrogative_words",  # 疑问词
            12: "interjections"}  # 感叹词
        self.punctuations = {
            0: " ",
            1: ".",
            2: ",",
            3: "?",
            4: "!",
            5: ":",
            6: "--",
            7: ";",
            8: "-",
            9: "\"",
            10: "\'",
            11: "(",
            12: ")",
            13: "[",
            14: "]",
            15: "{",
            16: "}",
            17: "`",
        }
        self.sentence_classes = {
        }
        self.separators = ['\\' + self.punctuations[i] for i in self.punctuations]
        self.create_vocab()
        self.sentences_data()
        self.write_sentence_data_to_file()
        self.sentence_data_to_csv()

    def create_vocab(self):
        w_idx = max(self.punctuations) + 1
        with open(self.vocab_file, "r") as f:
            sentences = f.readlines()
            sentences = [s.replace("\n", "").strip() for s in sentences]
            for sentence in sentences:
                words = self.word_split(sentence)
                for w in words:
                    if w in [self.punctuations[k] for k in self.punctuations]:
                        if w not in self.words2ids:
                            self.words2ids[w] = [k for k in self.punctuations if self.punctuations[k] == w][0]
                    else:
                        if w not in self.words2ids:
                            self.words2ids[w] = w_idx
                            w_idx += 1

        self.ids2words = {self.words2ids[k]: k for k in self.words2ids}
        return self.words2ids, self.ids2words

    def sentences_data(self):
        sentences_info = []
        with open(self.vocab_file, "r") as f:
            sentences = f.readlines()
            sentences = [s.replace("\n", "").strip() for s in sentences]
            for sentence in sentences:
                words = self.word_split(sentence)
                sentence_data1 = [self.words2ids[w] for w in words]
                sentence_data2 = [self.ids2words[self.words2ids[w]] for w in words]
                sentences_info.append((sentence_data1, sentence_data2))

        index = 0
        final_info = []
        for sentence in sentences_info:
            for i in range(len(sentence[0])):
                # print(sentence)
                # print(len(sentence[0]),len(sentence[1]))
                single_word_info = {
                    "session_id": index,
                    "word_id": sentence[0][i],
                    "word": sentence[1][i],
                    "sentence_type": 1,
                    "word_type": sentence[0][i] if sentence[0][i] in self.punctuations else -1
                }
                final_info.append(single_word_info)
            index += 1

            for i in range(9):
                # 创建错误数据
                err_list = [j for j in range(len(sentence[0]))]
                random.shuffle(err_list)
                for j in err_list:
                    # print(sentence)
                    # print(len(sentence[0]),len(sentence[1]))
                    single_word_info = {
                        "session_id": index,
                        "word_id": sentence[0][j],
                        "word": sentence[1][j],
                        "sentence_type": 0,
                        "word_type": sentence[0][j] if sentence[0][j] in self.punctuations else -1
                    }
                    final_info.append(single_word_info)

                index += 1
        self.sentence_datas = final_info
        return final_info

    def sentence_data_to_csv(self):
        csv_lines = []
        with open(self.json_data_file, "r") as file:
            word_lines = [word.replace("\n", "").strip() for word in file.readlines()]
            for line in word_lines:
                d = json.loads(line)
                csv_lines.append(
                    "{},{},{},{}".format(d["session_id"], d["word_id"], d["sentence_type"], d["word_type"]))
        with open(self.csv_data_file, "w") as file:
            for data in csv_lines:
                file.write(data)
                file.write("\n")  # 添加换行符以分隔不同的JSON对象

    def write_sentence_data_to_file(self):
        # 将JSON数据列表逐个写入文件
        with open(self.json_data_file, "w") as file:
            for data in self.sentence_datas:
                if data["word_id"] != 0:
                    json.dump(data, file)
                    file.write("\n")  # 添加换行符以分隔不同的JSON对象

    # 将英文句子使用标点符号进行分割，切分开来。
    def word_split(self, text):
        text = text.strip()
        final_result = []
        result = re.split('|'.join(self.separators), text)
        # 使用正则表达式找到分隔符的位置
        matches = re.finditer('|'.join(self.separators), text)
        # 输出分隔符的位置和匹配的分隔符
        for i in range(len(result)):
            final_result.append(result[i].lower())
            final_result.append("")
        j = 0
        for match in matches:
            final_result[j * 2 + 1] = match.group()
            j += 1
        final_result = [i for i in final_result if i != ""]
        return final_result

    def predict_words(self, text):
        sentences_info = []
        words = self.word_split(text)

        sentence_data1 = []
        sentence_data2 = []

        for w in words:
            if w in self.words2ids:
                sentence_data1.append(self.words2ids[w])
                sentence_data2.append(self.ids2words[self.words2ids[w]])
            else:
                max_id = max([key for key in self.ids2words])
                self.words2ids[w] = max_id + 1
                self.ids2words[max_id + 1] = w
                sentence_data1.append(self.words2ids[w])
                sentence_data2.append(self.ids2words[self.words2ids[w]])

        sentence_data1 = [self.words2ids[w] if w in self.words2ids else -1 for w in words]
        sentence_data2 = [self.ids2words[self.words2ids[w]] if w in self.words2ids else "<unk>" for w in
                          words]
        sentences_info.append((sentence_data1, sentence_data2))
        index = 0
        final_info = []
        for sentence in sentences_info:
            for i in range(len(sentence[0])):
                # print(sentence)
                # print(len(sentence[0]),len(sentence[1]))
                single_word_info = {
                    "session_id": index,
                    "word_id": sentence[0][i],
                    "word": sentence[1][i],
                    "sentence_type": 1,
                    "word_type": sentence[0][i] if sentence[0][i] in self.punctuations else -1
                }
                final_info.append(single_word_info)
            index += 1

        return [data for data in final_info if data["word_id"] != 0]


base_dir ="/home/yitian001/code/gnn/EnglishLearning/data/"
Tokenizer = MyTokenizer(base_dir)
