#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import os, json, random
import re, sklearn
from functools import reduce
from pyltp import Segmentor
from pyltp import SentenceSplitter
from pyltp import Postagger
from functools import reduce
import jieba

special_char = set([".", "*", "|", "^", "+"])
LTP_DATA_DIR = 'E:/data/nlp/ltp_data_v3.4.0'  # ltp模型目录的路径
cws_model_path = os.path.join(LTP_DATA_DIR, 'cws.model')  # 分词模型路径，模型名称为`cws.model`
pos_model_path = os.path.join(LTP_DATA_DIR, "pos.model")    # 词性标注模型路径
pos_dict = {
    "a": "形容词", "b": "其他修饰词", "c": "连词", "d": "副词", "e": "感叹词", "g": "形态素",
    "h": "前缀", "i": "成语", "j": "缩写", "k": "后缀", "m": "数词", "n": "一般名词", "nd": "方向名词",
    "nh": "人名", "ni": "组织名", "nl": "方位名词", "ns": "地理名词", "nt": "时间名词", "nz": "其他专有名词",
    "o": "拟声词", "p": "介词", "q": "量词", "r": "代词", "u": "副词", "v": "动词",
    "wp": "标点", "ws": "外语", "x": "非语义词", "z": "强化描述词"
}


def transferOperator(text):
    Dict = [(">", "大于"), ("<", "大于"), ("=", "等于"), ("\*", "乘"), ("/", "除以")]
    new_text = []
    for line in text:
        new_text.append(reduce(lambda k, v: k.replace(*v), Dict, line))

    return new_text

def removeStopWords(text):
    stop_words = []
    fin = open(os.path.join(data_path, "stop_words.txt"), "r", encoding="GBK")
    for line in fin:
        word = line.strip()
        stop_words.append(word)
    # pattern = "|".join(stop_words)

    text_no_stop_words = []
    for line in text:
        new_words = []
        words = line.strip().split("\t")
        for word in words:
            if word not in stop_words:
                new_words.append(word)
        text_no_stop_words.append("\t".join(new_words) + "\n")
        # text_no_stop_words.append(re.sub(pattern, "", line))
    return text_no_stop_words

def word_segmentor(text):
    # segmentor = Segmentor()  # 初始化实例
    # segmentor.load_with_lexicon(cws_model_path, os.path.join("data", "lexicon"))
    jieba.load_userdict(os.path.join(".", "data", "lexicon"))
    text_cut = []
    for line in text:
        # text_cut.append("\t".join(segmentor.segment(line.strip())))
        text_cut.append("\t".join(jieba.cut(line.strip())))
    return text_cut

def sentence_splitter(text):
    new_text = []
    for line in text:
        sents = SentenceSplitter.split(line)  # 分句
        new_text.extend(sents)
    new_text = [line for line in new_text if line != ""]
    return new_text

def removeIrrelativeLetter(text):
    letters = ["r", "x", 'Hz', "s", "kV"]
    new_text = []
    for line in text:
        words = line.strip().split("\t")
        new_words = []
        for word in words:
            if word not in letters:
                new_words.append(word)
        new_text.append("\t".join(new_words) + "\n")
    return new_text

def buildWordDict(embedding_len):
    words, embeddings, word_dict, idx = [], [], {}, 0
    with open("./data/sgns.baidubaike.bigram-char/sgns.baidubaike.bigram-char", "r", encoding="utf-8") as fin:
        fin.readline()
        for line in fin:
            elems = line.strip().split()
            word = "".join(elems[:-embedding_len])
            words.append(word)
            embedding = [float(elem) for elem in elems[-embedding_len:]]
            embeddings.append(embedding)
            word_dict[elems[0]] = embedding
            idx += 1
    with open(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "words.txt"), "w", encoding="utf-8") as fout:
        fout.write(str(len(words)) + "\n")
        for word in words:
            fout.write(word + "\n")

    np.savetxt(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "word_vector.txt"), embeddings, delimiter="\t", fmt="%f")
    with open(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "word_dict.json"), "w", encoding="utf-8") as fout:
        json.dump(word_dict, fout)


class DataPreprocess(object):
    def __init__(self, seg, sub_seg, base_path, words_path, window_size, embedding_dim):
        self.seg = seg
        self.sub_seg = sub_seg
        self.base_path = base_path

        if seg == "jieba":
            # jieba 分词
            jieba.load_userdict(os.path.join(base_path, "lexicon"))
            self.segmentor = jieba
        elif seg == "ltp":
            # ltp 分词
            self.segmentor = Segmentor()  # 初始化实例
            self.segmentor.load_with_lexicon(cws_model_path, os.path.join(base_path, "lexicon"))
        else:
            raise ValueError("Segmentor must be set...")

        if sub_seg == "jieba":
            # jieba 分词
            jieba.load_userdict(os.path.join(base_path, "lexicon"))
            self.sub_segmentor = jieba
        elif sub_seg == "ltp":
            # ltp 分词
            self.sub_segmentor = Segmentor()  # 初始化实例
            self.sub_segmentor.load_with_lexicon(cws_model_path, os.path.join(base_path, "lexicon"))
        else:
            raise ValueError("Sub segmentor must be set...")

        self.words = set()
        fin = open(os.path.join(words_path), "r", encoding="utf-8")
        for word in fin:
            self.words.add(word.strip())

        self.window_size = window_size
        self.embedding_dim = embedding_dim


    def cutSentence(self, sentence):
        ignore_words = ['\ufeff']
        unk_words_context = {}
        if self.seg == "jieba":
            words = list(jieba.cut(sentence))
        elif self.seg == "ltp":
            words = list(self.segmentor.segment(sentence))
        words = [word for word in words if len(word) > 0 and word not in ignore_words]
        idx = 0
        while idx < len(words):
            word = words[idx]
            if word in self.words:
                idx += 1
                continue
            if self.sub_seg == "jieba":
                sub_words = list(jieba.cut(word))
            elif self.sub_seg == "ltp":
                sub_words = list(self.sub_segmentor.segment(word))
            sub_words = [sub_word for sub_word in sub_words if len(sub_word) > 0 and sub_word not in ignore_words]
            words = words[: idx] + sub_words + words[idx + 1:]
            sub_idx = 0
            while sub_idx < len(sub_words):
                sub_word = sub_words[sub_idx]
                if sub_word not in self.words:
                    # print(word)
                    unk_words_context[word] = words[max(idx + sub_idx - self.window_size//2, 0): idx] \
                                                           + words[idx + 1: min(idx + sub_idx + (self.window_size - self.window_size//2), len(words))]
                sub_idx += 1
            idx += len(sub_words)
        return words, unk_words_context


    def calculateVector(self, context):
        if not hasattr(self, "word_vector_dict"):
            with open(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "word_dict.json"), "r",
                      encoding="utf-8") as fin:
                self.word_vector_dict = json.load(fin)
        vector = np.zeros(self.embedding_dim)
        for word in context:
            try:
                vector += np.array(self.word_vector_dict[word])
            except: continue
        return list(vector)


    def addVector(self, unk_words_context):
        if not hasattr(self, "word_vector_dict"):
            with open(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "word_dict.json"), "r",
                      encoding="utf-8") as fin:
                self.word_vector_dict = json.load(fin)
        for word in unk_words_context:
            self.word_vector_dict[word] = self.calculateVector(unk_words_context[word])
        # with open(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "word_vector.txt"), "a+",
        #           encoding="utf-8") as fout:
        #     for word in unk_words_context:
        #         fout.write(" ".join([str(num) for num in self.word_vector_dict[word]]) + "\n")


    def splitSentenceAndTranslatedSentence(self, filename, save_path):
        sentences = []
        all_unk_words_context = []
        fin = open(os.path.join(self.base_path, filename + ".txt"), "r", encoding="utf-8")
        for line in fin:
            sentence = line.strip()
            sentence_words, unk_words_context = self.cutSentence(sentence)
            if len(unk_words_context):
                all_unk_words_context.append(unk_words_context)
            sentences.append(sentence_words)
        if len(all_unk_words_context):
            for unk_words_context in all_unk_words_context:
                self.addVector(unk_words_context)
                with open(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "word_vector.txt"), "a+",
                          encoding="utf-8") as fout:
                    for word in unk_words_context:
                        fout.write(" ".join([str(num) for num in self.word_vector_dict[word]]) + "\n")
                with open(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "words.txt"), "a+",
                          encoding="utf-8") as fout:
                    for word in unk_words_context:
                        fout.write(word + "\n")
            with open(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "word_dict.json"), "w",
                          encoding="utf-8") as fout:
                json.dump(self.word_vector_dict, fout)
        with open(os.path.join(save_path, filename + ".atok"), "w", encoding="utf-8") as fout:
            for sentence_words in sentences:
                fout.write(" ".join(sentence_words) + "\n")

        # sentences = []
        # fin = open(os.path.join(data_path, "translate_data.txt"), "r", encoding="utf-8")
        # for line in fin:
        #     sentence = line.strip()
        #     sentence_words, unk_words_context = self.cutSentence(sentence)
        #     if len(unk_words_context):
        #         self.addVector(unk_words_context)
        #     sentences.append(sentence_words)
        # with open(os.path.join(data_path, "translate_sentences.txt"), "w", encoding="utf-8") as fout:
        #     for sentence_words in sentences:
        #         fout.write(" ".join(sentence_words) + "\n")


def splitSentenceandTriple(seg, sub_seg, window_size, embedding_len):
    words = set()
    ignore_words = ['\ufeff']
    fin = open(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "words.txt"), "r", encoding="utf-8")
    for word in fin.readlines():
        words.add(word.strip())

    if seg == "jieba":
        # jieba 分词
        jieba.load_userdict(os.path.join(".", "data", "lexicon"))
    elif seg == "ltp":
        # ltp 分词
        segmentor = Segmentor()  # 初始化实例
        segmentor.load_with_lexicon(cws_model_path, os.path.join("data", "lexicon"))
    else:
        raise ValueError("Segmentor must be set...")

    if sub_seg == "jieba":
        # jieba 分词
        jieba.load_userdict(os.path.join(".", "data", "lexicon"))
    elif sub_seg == "ltp":
        # ltp 分词
        segmentor = Segmentor()  # 初始化实例
        segmentor.load_with_lexicon(cws_model_path, os.path.join("data", "lexicon"))
    else:
        raise ValueError("Sub segmentor must be set...")

    fin = open(os.path.join(".", "data", "dialogue.txt"), "r", encoding="utf-8")
    unk_words, unk_word_sentence_dict = set(), {}
    for line in fin:
        elems = line.strip().split("\t")
        elems = [elem for elem in elems if elem]
        sentence = elems[0]
        triples = elems[1:]
        sentences = []

        if seg == "jieba":
            sentence_words = list(jieba.cut(sentence))
        elif seg == "ltp":
            sentence_words = list(segmentor.segment(sentence))
        sentence_words = [word for word in sentence_words if word not in ignore_words]
        for idx, word in enumerate(sentence_words):
            if word not in words:
                sub_words = list(segmentor.segment(word))
                sentence_words = sentence_words[: idx] + sub_words + sentence_words[idx + 1:]
                for sub_word in sub_words:
                    if sub_word not in words:
                        print(word)
                        unk_words.add(word)
                        unk_word_sentence_dict[word] = sentence_words[max(idx - window_size//2, 0): idx] \
                                                       + sentence_words[idx + 1: min(idx + (window_size - window_size//2), len(sentence_words))]
        sentences.append(sentence_words)

        entities, relations = [], []
        for triple in triples:
            try:
                head, relation, tail = triple.split(" ")
            except:
                print("error")
            if seg == "jieba":
                head_words = list(jieba.cut(head))
                relation_words = list(jieba.cut(relation))
                tail_words = list(jieba.cut(tail))
            elif seg == "ltp":
                head_words = list(segmentor.segment(head))
                relation_words = list(segmentor.segment(relation))
                tail_words = list(segmentor.segment(tail))

            for idx, word in enumerate(head_words):
                if word not in words:
                    sub_words = list(segmentor.segment(word))
                    head_words = head_words[: idx] + sub_words + head_words[idx+1:]
                    for sub_word in sub_words:
                        if sub_word not in words:
                            print(word)
                            unk_words.add(word)
                            unk_word_sentence_dict[word] = head_words[max(idx - window_size//2, 0): idx] \
                                                           + head_words[idx + 1: min(idx + (window_size - window_size//2), len(head_words))]
            for idx, word in enumerate(relation_words):
                if word not in words:
                    sub_words = list(segmentor.segment(word))
                    relation_words = relation_words[: idx] + sub_words + relation_words[idx+1:]
                    for sub_word in sub_words:
                        if sub_word not in words:
                            print(word)
                            unk_words.add(word)
                            unk_word_sentence_dict[word] = relation_words[max(idx - window_size//2, 0) :idx] \
                                                           + relation_words[idx + 1: min(idx + (window_size - window_size//2), len(relation_words))]
            for idx, word in enumerate(tail_words):
                if word not in words:
                    sub_words = list(segmentor.segment(word))
                    tail_words = tail_words[:idx] + sub_words + tail_words[idx+1:]
                    for sub_word in sub_words:
                        if sub_word not in words:
                            print(word)
                            unk_words.add(word)
                            unk_word_sentence_dict[word] = tail_words[max(idx - window_size//2, 0):idx] \
                                                           + tail_words[idx + 1: min(idx + (window_size - window_size//2), len(tail_words))]
    with open(os.path.join(data_path, "sgns.baidubaike.bigram-char", "word_dict.json"), "r", encoding="utf-8") as fin:
        word_vector_dict = json.load(fin)

    vectors = []
    for word in unk_word_sentence_dict:
        context = unk_word_sentence_dict[word]
        # vector = reduce(lambda x, y: x + y, [np.array(word_vector_dict[context_word]) for context_word in context]) / len(context)
        vector = np.zeros(embedding_len)
        for context_word in context:
            try:
                vector += np.array(word_vector_dict[context_word])
            except: continue
        word_vector_dict[word] = list(vector)
        vectors.append(vector)

    with open(os.path.join(data_path, "sgns.baidubaike.bigram-char", "word_dict.json"), "w", encoding="utf-8") as fout:
        json.dump(word_vector_dict, fout)

    with open(os.path.join(data_path, "sgns.baidubaike.bigram-char", "word_vector.txt"), "a+", encoding="utf-8") as fout:
        for vector in vectors:
            fout.write("\t".join([str(num) for num in vector]) + "\n")
    with open(os.path.join(data_path, "sgns.baidubaike.bigram-char", "words.txt"), "a+", encoding="utf-8") as fout:
        for word in unk_word_sentence_dict:
            fout.write(word + "\n")


    print("finish")

def generateDialogData():
    fin = open(os.path.join(data_path, "dialogue_supp.txt"), "r", encoding="utf-8")
    raw_sentence, template_sentence = [], []
    conditions, operators = [], []
    for line in fin:
        sentence = line.strip().split("\t")[0]
        elems = sentence.strip().split("，")
        condition = elems[0]
        operator = "，".join(elems[1:])
        try:
            condition_pos = re.search("如果是|如果", condition).span()
            operator_pos = re.search("则", operator).span()
        except: continue
        condition = condition[condition_pos[1]:]
        operator = operator[operator_pos[1]:]

        if condition[:4] != "起始状态":
            conditions.append(condition)
        operators.append(operator)

    prepositions = ["则", "就", "那么", "应该", "就需要", "就应该", "则需要", "则应该"]
    for operator in operators:
        sentences = __generateSentence("起始状态", operator, prepositions)
        template_sentence += sentences
        for _ in range(len(sentences)):
            raw_sentence.append("如果是起始状态，则" + operator)
    for condition in conditions:
        for operator in operators:
            sentences = __generateSentence(condition, operator, prepositions)
            template_sentence += sentences
            for _ in range(len(sentences)):
                if condition[:2] == "起始":
                    if np.random.rand() < 0.5:
                        raw_sentence.append("如果是" + condition + "，则" + operator)
                    else:
                        raw_sentence.append("如果是" + condition + "的状态，则" + operator)
                else:
                    raw_sentence.append("如果" + condition + "，则" + operator)

    with open(os.path.join(data_path, "transformer", "tgt_data.txt"), "w", encoding="utf-8") as fout:
        for line in raw_sentence:
            fout.write(line + "\n")

    with open(os.path.join(data_path, "transformer", "src_data.txt"), "w", encoding="utf-8") as fout:
        for line in template_sentence:
            fout.write(line + "\n")

def __generateSentence(condition, operator, prepositions):
    # num = random.randint(1, len(prepositions))
    sentences = []
    choice_prepositions = np.random.choice(prepositions, 1, replace=False)
    if condition != "起始状态":
        for preposition in choice_prepositions:
            sentences.append(condition + "，" + preposition + operator)
            sentences.append("如果处于" + condition + "的状态，" + preposition + operator)
            sentences.append("若是" + condition + "，" + preposition + operator)
            sentences.append("若" + condition + "，" + preposition + operator)
            sentences.append("若处于" + condition + "的状态，" + preposition + operator)
            sentences.append("当处于" + condition + "的状态，" + preposition + operator)
            sentences.append("当" + condition + "的状态，" + preposition + operator)
            sentences.append("一旦处于" + condition + "的状态，" + preposition + operator)
            sentences.append("一旦" + condition + "的状态，" + preposition + operator)
    else:
        for preposition in choice_prepositions:
            # sentences.append(operator)
            sentences.append("如果处于" + condition + "，" + preposition + operator)
            sentences.append("若是" + condition + "，" + preposition + operator)
            sentences.append("若处于" + condition + "，" + preposition + operator)
            sentences.append("当处于" + condition + "，" + preposition + operator)
            sentences.append("一旦处于" + condition + "，" + preposition + operator)
    return sentences

def splitTrainAndValid(data_path, src_file, tgt_file, rate):
    fin = open(os.path.join(data_path, src_file), "r", encoding="utf-8")
    src = fin.readlines()

    fin = open(os.path.join(data_path, tgt_file), "r", encoding="utf-8")
    tgt = fin.readlines()

    assert len(src) == len(tgt)
    train_len = int(len(src) * rate)

    src, tgt = sklearn.utils.shuffle(src, tgt)
    train_src, valid_src = src[:train_len], src[train_len:]
    train_tgt, valid_tgt = tgt[:train_len], tgt[train_len:]

    with open(os.path.join(data_path, "train.src.txt"), "w", encoding="utf-8") as fout:
        for line in train_src:
            fout.write(line)
    with open(os.path.join(data_path, "train.tgt.txt"), "w", encoding="utf-8") as fout:
        for line in train_tgt:
            fout.write(line)
    with open(os.path.join(data_path, "valid.src.txt"), "w", encoding="utf-8") as fout:
        for line in valid_src:
            fout.write(line)
    with open(os.path.join(data_path, "valid.tgt.txt"), "w", encoding="utf-8") as fout:
        for line in valid_tgt:
            fout.write(line)


def postaggerText(data_path, min_times, max_times):
    segment_model = DataPreprocess("jieba", "ltp", data_path,
                                   os.path.join(".", "data", "sgns.baidubaike.bigram-char", "words.txt"), 5, 300)

    sentences_words = []
    fin = open(os.path.join(data_path, "dialogue.txt"), "r", encoding="utf-8")
    for line in fin:
        sentences_words.append(segment_model.cutSentence(line.strip())[0])

    with open(os.path.join(data_path, "dialogue_cut.txt"), "w", encoding="utf-8") as fout:
        for words in sentences_words:
            fout.write(" ".join(words) + "\n")

    all_postag_pos_dict = []
    postag_word_dict = {}
    postagger = Postagger()
    postagger.load(pos_model_path)
    for words in sentences_words:
        postag_pos_dict = {}
        postags = list(postagger.postag(words))
        for idx, pos in enumerate(postags):
            try:
                postag_pos_dict[pos].append(idx)
            except:
                postag_pos_dict[pos] = [idx]
        all_postag_pos_dict.append(postag_pos_dict)
        for pos, word in zip(postags, words):
            try:
                postag_word_dict[pos].add(word)
            except:
                postag_word_dict[pos] = set([word])

    for postag in postag_word_dict:
        postag_word_dict[postag] = list(postag_word_dict[postag])

    sentences = []
    for idx, words in enumerate(sentences_words):
        sentences.append("".join(words))
        postag_pos_dict = all_postag_pos_dict[idx]
        noun_poses = postag_pos_dict["n"]
        times = np.random.randint(min_times, max_times)
        for _ in range(times):
            choice_nouns = np.random.choice(postag_word_dict["n"], len(noun_poses))
            new_words = words.copy()
            for indice, pos in enumerate(noun_poses):
                new_words = new_words[:pos] + [choice_nouns[indice]] + new_words[pos + 1:]
            sentences.append("".join(new_words))

    with open(os.path.join(data_path, "dialogue_supp.txt"), "w", encoding="utf-8") as fout:
        for sentence in sentences:
            fout.write(sentence + "\n")



if __name__ == "__main__":
    # 去运算符 -> 分句 -> 分词 -> 去除停用词

    template = [[""]]

    data_path = os.path.join(".", "data")

    # fin = open(os.path.join(data_path, "power_grid.txt"), "r", encoding="utf-8")
    # text = fin.readlines()
    # new_text = transferOperator(text)
    # with open(os.path.join(data_path, "power_grid.without_operator.txt"), "w", encoding="utf-8") as fout:
    #     for line in new_text:
    #         fout.write(line)
    #
    # fin = open(os.path.join(data_path, "power_grid.without_operator.txt"), "r", encoding="utf-8")
    # text = fin.readlines()
    # text_cut = sentence_splitter(text)
    # # print(text_cut)
    # with open(os.path.join(data_path, "data.sentence_cut.txt"), "w", encoding="utf-8") as fout:
    #     for line in text_cut:
    #         fout.write(line + "\n")
    #
    # fin = open(os.path.join(data_path, "data.sentence_cut.txt"), "r", encoding="utf-8")
    # text = fin.readlines()
    # text_cut = word_segmentor(text)
    # # print(text_cut)
    # with open(os.path.join(data_path, "data.cut.jieba.txt"), "w", encoding="utf-8") as fout:
    #     for line in text_cut:
    #         fout.write(line + "\n")
    #
    # fin = open(os.path.join(data_path, "data.cut.jieba.txt"), "r", encoding="utf-8")
    # text = fin.readlines()
    # text_no_stop_words = removeStopWords(text)
    # with open(os.path.join(data_path, "data.no_stop_words.txt"), "w", encoding="utf-8") as fout:
    #     for line in text_no_stop_words:
    #         fout.write(line)
    #
    # text_no_letter = removeIrrelativeLetter(text_no_stop_words)
    # with open(os.path.join(data_path, "data.no_irrelative_letter.txt"), "w", encoding="utf-8") as fout:
    #     for line in text_no_letter:
    #         fout.write(line)

    # buildWordDict(300)
    # embeddings = np.loadtxt(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "word_vector.txt"), delimiter="\t")
    # np.savetxt(os.path.join(".", "data", "sgns.baidubaike.bigram-char", "word_vector.txt"), embeddings, delimiter="\t",
    #            fmt="%f")
    # print("ok")

    # splitSentenceandTriple("jieba", "ltp", 5, 300)

    # postaggerText(os.path.join(".", "data"), 1, 3)

    # generateDialogData()

    splitTrainAndValid(os.path.join(".", "data", "transformer"), "src_data.txt", "tgt_data.txt", 0.9)

    window_size, embedding_dim = 5, 300
    data_model = DataPreprocess("jieba", "ltp", os.path.join(".", "data", "transformer"),
                                os.path.join(".", "data", "sgns.baidubaike.bigram-char", "words.txt"), window_size, embedding_dim)
    data_model.splitSentenceAndTranslatedSentence("train.src", os.path.join(".", "data", "transformer"))
    print("finish train.src")
    data_model.splitSentenceAndTranslatedSentence("train.tgt", os.path.join(".", "data", "transformer"))
    print("finish train.tgt")
    data_model.splitSentenceAndTranslatedSentence("valid.src", os.path.join(".", "data", "transformer"))
    print("finish valid.src")
    data_model.splitSentenceAndTranslatedSentence("valid.tgt", os.path.join(".", "data", "transformer"))
    print("finish valid.tgt")

    # postaggerText(data_path, 1, 3)




