import numpy
import json
import torch

from util.data_generator import DataGenerator
from torch.utils.data import DataLoader

class XLoader():
    def __init__(self, config):
      self.config = config
      self.load_schema()
      self.load_vocab()

    def load_train_data(self, file_path):
        corpus = []
        corpus_word, corpus_label = self.load_file_data(file_path)
        for words, labels in zip(corpus_word, corpus_label):
            sentence_word = []
            sentence_label = []
            for index, word in enumerate(words):
                sentence_word.append(self.vocab.get(words[index], self.vocab["unk"]))
                sentence_label.append(self.lable_2_index[labels[index]])
            corpus.append([self.padding(sentence_word)
                          , self.padding(sentence_label, -100)])
        return corpus


        # with open(self.config[file_path],"r",encoding="utf-8") as f:
        #     #读取所有内容
        #     data = f.read()
        #     #以\n\n为分割成句子
        #     data = data.split('\n\n')
        #     #遍历句子
        #     for sentence in data:
        #         #以\n为分割成单词和标签
        #         sentence = sentence.split('\n')
        #         #遍历单词和标签
        #         sentence_word = []
        #         sentence_label = []
        #
        #         for word in sentence:
        #             #以\t为分割成单词和标签
        #             word = word.split()
        #             #将单词和标签添加到列表中
        #             if len(word) == 2:
        #                 sentence_word.append(self.vocab.get(word[0], self.vocab["unk"]))
        #                 sentence_label.append(self.lable_2_index[word[1]])
        #         corpus.append([self.padding(sentence_word)
        #                        , self.padding(sentence_label,-100)])
        # return corpus

    # 加载测试数据
    def load_evaluate_data(self,file_path):
        corpus_word,corpus_label = self.load_file_data(file_path)
        # return corpus_word,corpus_label
        texts = []
        new_labels = []
        for words,labels in zip(corpus_word,corpus_label):
            texts.append(''.join(words))
            # labels.append(''.join(labels))
            label_text = ""
            for label in labels:
                label_text += str(self.lable_2_index[label])
            new_labels.append(label_text)
        return texts,new_labels

    def load_file_data(self,file_path):
        corpus_word = [] #句子
        corpus_label = [] #label
        with open(self.config[file_path],"r",encoding="utf-8") as f:
            #读取所有内容
            data = f.read()
            #以\n\n为分割成句子
            data = data.split('\n\n')
            #遍历句子
            for sentence in data:
                #以\n为分割成单词和标签
                sentence = sentence.split('\n')
                #遍历单词和标签
                sentence_word = []
                sentence_label = []

                for word in sentence:
                    #以\t为分割成单词和标签
                    word = word.split()
                    #将单词和标签添加到列表中
                    if len(word) == 2:
                        sentence_word.append(word[0])
                        sentence_label.append((word[1]))
                corpus_word.append(sentence_word)
                corpus_label.append(sentence_label)
        return corpus_word,corpus_label
    
    def get_data_loader_train(self, path,shuffle=True):
        return DataGenerator(self.load_train_data(path)).build_data_loader(batch_size=self.config["batch_size"], shuffle=shuffle)


    def load_schema(self):
       with open(self.config["schema"],"r",encoding="utf-8") as f:
          #读取所有内容
          data = f.read()
          #转成json
          self.lable_2_index = json.loads(data)
          self.index_2_lable = {v:k for k,v in self.lable_2_index.items()}


    def load_vocab(self):
        with open(self.config["vocab"],"r",encoding="utf-8") as f:
            self.vocab=json.loads(f.read())


    
    def padding(self, input_id, pad_token=0):
        max_length = self.config["max_length"]
        if len(input_id) < max_length:
            return input_id + [0]*(max_length-len(input_id))
        else:
            return input_id[:max_length]
          

#main
if __name__ == "__main__":
    config = {
        "train": "ner_data/train",
        "test": "ner_data/test",
        "dev": "ner_data/dev",
        "schema": "ner_data/schema.json",
        "vocab": "ner_data/vocab.json",
        "max_length": 128,
        "batch_size": 32
    }
    loader = XLoader(config)

    corpus_word,corpus_label = loader.load_file_data("dev")
    print(corpus_word)
    print(corpus_label)
    print(1)

    # corpus = loader.load_train_data("dev")
    # print(corpus[-1])
    # dataLoader = DataGenerator(corpus).build_data_loader(batch_size=config["batch_size"], shuffle=True)
    # dataLoader = loader.get_data_loader_train("dev")
    # for step, (batch_x, batch_y) in enumerate(dataLoader):
    #     print(step)
    #     print(batch_x)
    #     print(batch_y)
    #     break




