import numpy as np
import os,sys
import pickle
import random
from numpy.core.records import array
import pandas as pd
from keras.utils import to_categorical

def checkdirs(directory):
    if not os.path.exists(directory):
        os.makedirs(directory)


def read_text(filename):
    with open(filename, 'r') as f:
        best_epoch = int(f.read())
        return best_epoch


def write_text(text, filename):
    with open(filename, 'w') as f:
        f.write(text)


def load_pickle_data(pickle_dir, dataset):
    assert os.path.exists(pickle_dir)
    pickle_data = pickle.load(open(pickle_dir+dataset, "rb"))
    return pickle_data


def get_idx_from_sent(sent, word_idx_map, max_length):
    x = []
    words = sent.split()[:max_length]
    for word in words:
        if word in word_idx_map:
            x.append(word_idx_map[word])
    while len(x) < max_length:
        x.append(0)
    return x


def make_idx_data(raw_datas, word_idx_map, len_train, max_length):
    data = []
    for raw_data in raw_datas:
        # print(raw_data['text'])
        # sys.exit(1)
        sent = get_idx_from_sent(raw_data["text"], word_idx_map, max_length)
        sent.append(raw_data["y"])
        data.append(sent)
    split = len_train
    train = np.array(data[:split], dtype="int")
    test = np.array(data[split:], dtype="int")
    return train, test


def preprocessing(data_path, dataset, long_sent=800):
    """

    :param data_path: base directory
    :param dataset: select dataset {'20news', 'mr', 'trec', 'mpqa'}
    :param long_sent: if dataset has long sentences, set to be constant length value
    :return: seq_length, num_classes, vocab_size, x_train, y_train, x_test, y_test, pre-train_word (GloVe 840b),
    word_idx
    """

    assert os.path.exists(data_path) is True

    x = load_pickle_data(data_path, dataset)
    data_frame, pretrain_word, len_train, n_exist_word, vocab, word_idx = x
    '''
    data_frame数据，以第0行为例
    num_words: ,
    y: ,
    text: ,
    '''

    max_l = int(np.max(pd.DataFrame(data_frame)["num_words"]))
    print(data_frame[0])
    print('--',len_train)
    if dataset in ["reuters", "20news", "imdb", 'mr']:
        # 获取序列数据
        train, test = make_idx_data(data_frame, word_idx, len_train, long_sent)
    else:
        train, test = make_idx_data(data_frame, word_idx, len_train, max_l)

    # sys.exit(2)
    # train[:, :-1] = word idx
    # train[:, -1] = true label
    x_train = train[:, :-1]
    y_train = train[:, -1]

    x_test = test[:, :-1]
    y_test = test[:, -1]
    sequence_length = len(x_train[0])

    # make one-hot
    labels = sorted(list(set(y_train)))
    one_hot = np.zeros((len(labels), len(labels)), int)
    np.fill_diagonal(one_hot, 1)
    label_dict = dict(zip(labels, one_hot))
    # print("查看label")
    # print(labels)
    # print("\n查看lable_dict")
    # print(label_dict)
    # sys.exit(2)

    y_train_2 = to_categorical(y_train,num_classes=len(labels))

    y_train = np.eye(len(label_dict))[y_train]
    num_class = y_train.shape[1]

    # print(y_train_2[:6])
    # print('---')
    # print(y_train[:6])
    # sys.exit(1)

    y_test = np.eye(len(label_dict))[y_test]
    vocab_size = pretrain_word.shape[0]

    print("sequence length :", sequence_length)
    print("vocab size :", vocab_size)
    print("num classes :", num_class)
    print(type(pretrain_word))
    print(pretrain_word.shape)
    # sys.exit(1)
    return sequence_length, num_class, vocab_size, x_train, y_train, x_test, y_test, pretrain_word, word_idx

UNK, PAD = 'UNK', 'PAD'  # 未知字，padding符号

def preprocessing_sougo(data_path,dataset,long_sent=100):
    print("这里是处理搜狗文件夹")
    assert os.path.exists(data_path) is True

    all_line = load_pickle_data(data_path,"split_all_sentence.pkl")
    vocab = load_pickle_data(data_path,'word2index.pkl')
    contents = []
    for i, line in enumerate(all_line):
        label,content = line[0],line[1]
        
        tokens = content.split(" ")
        words_line = []
        if long_sent:
            if len(tokens) < long_sent:
                tokens.extend([PAD] * (long_sent - len(tokens)))
            else:
                tokens = tokens[:long_sent]
                seq_len = long_sent
        
        for word in tokens:
            x = vocab.get(word,vocab.get('UNK'))
            words_line.append(x)
        
        words_line.append(label)
        # if i < 2:
        #     print(len(words_line),label,type(label))
        contents.append(words_line)
    # print(contents[0][-1])
    random.shuffle(contents)
    # print(contents[0][-1])
    dataset = np.array(contents,dtype=int) # (106466, 101)
    dataset_len = len(dataset)
    train_len = int(len(dataset) * 0.8)
    dev_len = int( (dataset_len - train_len)  * 0.5)

    # 划分测试集
    train = dataset[:train_len]
    dev = dataset[train_len: train_len+dev_len]
    test = dataset[train_len+dev_len: ]
    print("语料数：",len(contents))
    # print("形状",dataset.shape,train.shape,dev.shape,test.shape)

    # print("----")
    # print(train[0][-2],train[0][-1])
    x_train = train[:,:-1]
    y_train = train[:,-1]
    
    # print(x_train[0][-1],y_train[0])
    sequence_length = len(x_train[0])

    x_dev = dev[:,:-1]
    y_dev = dev[:,-1]

    x_test = test[:,:-1]
    y_test = test[:,-1]

    # make one-hot
    labels = sorted(list(set(y_train)))
    # print("查看label")
    # print(labels)

    y_train = to_categorical(y_train,num_classes=len(labels))
    y_dev = to_categorical(y_dev,num_classes=len(labels))
    y_test = to_categorical(y_test,num_classes=len(labels))

    # print(y_train[:2])
    num_class = y_train.shape[1]
    print("sequence length :", sequence_length)
    print("vocab size :", len(vocab))
    print("num classes :", num_class)
    return sequence_length,num_class,len(vocab), \
        x_train,y_train,x_dev,y_dev,x_test,y_test, \
        vocab