from dataset.corpus_2014 import Corpus2014Dataset
from itertools import chain
from collections import Counter
from keras.preprocessing.sequence import pad_sequences
import numpy as np



class DataProcessor():
    def __init__(self,max_len,vocab_size):
        super(DataProcessor,self).__init__()
        self.max_len = max_len
        self.vocab_size = vocab_size
        self.word2id = {}
        self.tags = {}
        self.id2tag = {}
        self.class_nums = 0
        self.sample_nums = 0

    def read_data(self,is_training_data = True):
        ds = Corpus2014Dataset(nltk_style=True)
        train_data = ds.train_data
        test_data = ds.test_data
        train_x, trainy = Corpus2014Dataset.features_and_labels(train_data)
        test_x, testy = Corpus2014Dataset.features_and_labels(test_data)
        if is_training_data:
            self.tags = sorted(list(set(chain(*trainy))))
            self.tag2id = {tag: idx + 1 for idx, tag in enumerate(self.tags)}
            self.id2tag = {idx + 1: tag for idx, tag in enumerate(self.tags)}
            self.tag2id['padding'] = 0
            self.id2tag[0] = 'padding'
            self.class_nums = len(self.id2tag)
            self.sample_nums = len(train_x)
            #print(self.id2tag)
            #print(self.class_nums)

            vocab = list(chain(*train_x))
            # print("vocab lenth", len(set(vocab)))
            vocab = Counter(vocab).most_common(self.vocab_size - 2)
            vocab = [v[0] for v in vocab]
            for index, word in enumerate(vocab):
                self.word2id[word] = index + 2

            # OOV 为1，padding为0
            self.word2id['padding'] = 0
            self.word2id['OOV'] = 1

        return train_x, trainy,test_x, testy

    def encoder(self,X,y):
        X = [[self.word2id.get(word,1) for word in x]for x in X]
        X = pad_sequences(X,maxlen=self.max_len,value=0)
        y = [[self.tag2id.get(tag,0) for tag in t] for t in y]
        y = pad_sequences(y,maxlen=self.max_len,value=0)

        def label_to_one_hot(index: []):
            data = []
            for line in index:
                data_line = []
                for i, index in enumerate(line):
                    line_line = [0] * self.class_nums
                    line_line[index] = 1
                    data_line.append(line_line)
                data.append(data_line)
            return np.array(data)
        y = label_to_one_hot(index=y)
        # print(y.shape)
        print(X.shape)
        return X,y


# class data_generator(DataGenerator):




if __name__ == '__main__':
    # load_data()
    data = DataProcessor(max_len=80,vocab_size=6000)
    train_x, trainy,test_x, testy = data.read_data()
    X,y = data.encoder(train_x,trainy)
    print(X[0])