# coding:utf-8
import os

import pandas as pd
from keras import Input
from keras.models import Sequential,Model
from keras.layers import Dense, Flatten, MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.layers import Conv1D,MaxPool1D
from keras.preprocessing import sequence
from keras.layers import LSTM,Dropout
import numpy as np
from sklearn.model_selection import train_test_split
import keras
from keras.datasets import imdb


def emotion():
    (x_train,y_train),(x_test,y_test) = imdb.load_data()
    print(x_train.shape,x_test.shape)
    # print(X_train[1],y[1]) # 一条评论

    avg_len=list(map(len,x_train))
    max_text_length = max(map(len,x_train+x_test))
    # m = max(list(map(len,x_train)),list(map(len,x_test)))
    print(np.mean(avg_len))
    print(max_text_length,'\n')
    """ 
     一个文本特别长，居然有
     2494个字符。这种异常值需要排除，考虑到文本
     的平均长度为230个字符，可以设定最多输入的
     文本长度为400个字符，不足400个字符的文本用
     空格填充，超过400个字符的文本截取400个字符
     
    """
    max_word=400
    x_train=sequence.pad_sequences(x_train,maxlen=max_word)
    print('train length',x_train[0])
    x_test=sequence.pad_sequences(x_test,maxlen=max_word)
    # 这里1代表空格，其索引被认为是0
    vocab_size=np.max([np.max(x_train[i]) for i in range(x_train.shape[0])]) + 1 # 找最大词代表的数字大小
    model=Sequential()
    # REW:Embedding三个参数:字典的长度（文本中多少词向量），词向量的维度，每个文本输入的长度
    # 因为要训练嵌入层
    # 定义了嵌入层的矩阵为vocab_size×64。
    # 每个训练段落为其中的maxword×64矩阵，作为数据的输入，填入输入
    # REW:形如（samples，timesteps，input_dim）的3D张量 LSTM需要的数据输入维度
    model.add(Embedding(vocab_size,64,input_length=max_word))  # 返回batch_size, sequence_length, output_dim
    model.add(Flatten()) # 一维的长度为maxword×64的向量
    model.add(Dense(2000,activation="relu")) # relu=f(x)=max(0,x)
    model.add(Dense(500,activation="relu")) # relu=f(x)=max(0,x)
    model.add(Dense(200,activation="relu")) # relu=f(x)=max(0,x)
    model.add(Dense(50,activation="relu")) # relu=f(x)=max(0,x)
    model.add(Dense(1,activation='sigmoid'))
    # 交叉熵主要是衡量预测的0，1概率分布和实际的0，1值是不是匹配，交叉熵越小，说明匹配得越准确
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    print(model.summary())
    model.fit(x_train,y_train,validation_data=(x_test,y_test),epochs=2,
              batch_size=100,verbose=1)
    score = model.evaluate(x_test,y_test)
    print(score)
def emotioCnn():
    """
    自然语言领域，卷积的作用在于利用文字的局部特征
    """
    (x_train, y_train), (x_test, y_test) = imdb.load_data()
    print(x_train.shape, x_test.shape)
    # print(X_train[1],y[1]) # 一条评论

    avg_len = list(map(len, x_train))
    max_text_length = max(map(len, x_train + x_test))
    # m = max(list(map(len,x_train)),list(map(len,x_test)))
    print(np.mean(avg_len))
    print(max_text_length, '\n')
    """ 
     一个文本特别长，居然有
     2494个字符。这种异常值需要排除，考虑到文本
     的平均长度为230个字符，可以设定最多输入的
     文本长度为400个字符，不足400个字符的文本用
     空格填充，超过400个字符的文本截取400个字符

    """
    max_word = 400
    x_train = sequence.pad_sequences(x_train, maxlen=max_word)
    x_test = sequence.pad_sequences(x_test, maxlen=max_word)
    # 这里1代表空格，其索引被认为是0
    vocab_size = np.max([np.max(x_train[i]) for i in range(x_train.shape[0])]) + 1  # 找最大词代表的数字大小

    model = Sequential()
    model.add(Embedding(vocab_size, 64, input_length=max_word))
    # Conv1d针对1维数据 对比Conv2D针对2维
    model.add(Conv1D(filters=64,kernel_size=3,padding='same',activation='relu'))
    model.add(MaxPool1D(pool_size=2))
    model.add(Dropout(0.25))
    model.add(Conv1D(filters=128,kernel_size=3,padding='same',activation='relu'))
    model.add(MaxPool1D(pool_size=2))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(64,activation='relu'))
    model.add(Dense(32,activation='relu'))
    model.add(Dense(1,activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    print(model.summary())
    model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=20,
              batch_size=100, verbose=1)
    score = model.evaluate(x_test, y_test)
    print(score)
    # import matplotlib.pyplot as plt
    # plt.hist(avg_len,bins=range(min(avg_len),max(avg_len)+50,50))
    # plt.show()
def emotionLSTM():
    (x_train, y_train), (x_test, y_test) = imdb.load_data()
    print(x_train.shape, x_test.shape)
    # print(X_train[1],y[1]) # 一条评论

    avg_len = list(map(len, x_train))
    max_text_length = max(map(len, x_train + x_test))
    # m = max(list(map(len,x_train)),list(map(len,x_test)))
    print(np.mean(avg_len))
    print(max_text_length, '\n')

    max_word = 400
    x_train = sequence.pad_sequences(x_train, maxlen=max_word)
    x_test = sequence.pad_sequences(x_test, maxlen=max_word)
    # 这里1代表空格，其索引被认为是0
    vocab_size = np.max([np.max(x_train[i]) for i in range(x_train.shape[0])]) + 1  # 找最大词代表的数字大小
    model = Sequential()
    model.add(Embedding(vocab_size,64,input_length=max_word))
    # REW:自变量数据按照[样本数，时间步，特征变量数]
    model.add(LSTM(128,return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(64,return_sequences=True))
    model.add(Dropout(0.2))
    model.add(LSTM(32))
    model.add(Dropout(0.2))
    model.add(Dense(1,activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    print(model.summary())
    model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=20,
              batch_size=100, verbose=1)
    score = model.evaluate(x_test, y_test)
    print(score)

# 新闻lstm
def NewLSTM():
    TEXT_DATA_DIR = r'F:\Resources\Dataset\20_newsgroups'
    texts = []  # list of text samples
    labels_index = {}  # dictionary mapping label name to numeric id
    labels = []  # list of label ids
    for name in sorted(os.listdir(TEXT_DATA_DIR)):
        path = os.path.join(TEXT_DATA_DIR, name)
        if os.path.isdir(path):
            label_id = len(labels_index)
            #REW:技巧
            labels_index[name] = label_id
            for fname in sorted(os.listdir(path)):
                if fname.isdigit():
                    fpath = os.path.join(path, fname)
                    f = open(fpath)
                    print(f'fpath:{fpath}')
                    texts.append(f.read())
                    f.close()
                    labels.append(label_id)

    print('Found %s texts.' % len(texts))

    # 新闻样本转化为神经网络训练所用的张量
    from keras.preprocessing.text import Tokenizer
    from keras.preprocessing.sequence import pad_sequences
    MAX_NB_WORDS=20000
    tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
    tokenizer.fit_on_texts(texts)
    sequences = tokenizer.texts_to_sequences(texts)

    word_index = tokenizer.word_index
    print('Found %s unique tokens.' % len(word_index))

    MAX_SEQUENCE_LENGTH=1000
    data = pad_sequences(sequences,maxlen=MAX_SEQUENCE_LENGTH)

    labels = keras.utils.to_categorical(np.asarray(labels))
    print('Shape of data tensor:', data.shape)
    print('Shape of label tensor:', labels.shape)
    EMBEDDING_DIM = 100
    VALIDATION_SPLIT = 0.2
    # split the data into a training set and a validation set
    indices = np.arange(data.shape[0])
    np.random.shuffle(indices)
    data = data[indices]
    labels = labels[indices]
    nb_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
    x_train = data[:-nb_validation_samples]
    y_train = labels[:-nb_validation_samples]
    x_train,x_test,y_train,y_test = train_test_split(x_train,
                                                     y_train,
                                                     test_size=0.2,
                                                     random_state=1)
    x_val = data[-nb_validation_samples:]
    y_val = labels[-nb_validation_samples:]

    # 从GloVe文件中解析出每个词和它所对应的词向量，并用字典的方式存储
    GLOVE_DIR = r'F:\Resources\Dataset\glove.6B'
    embeddings_index = {}
    f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
    for line in f:
        values = line.split()
        word = values[0]
        coefs = np.asarray(values[1:],dtype='float32')
        embeddings_index[word] = coefs
    f.close()
    print('Found %s word vectors.' % len(embeddings_index))

    # REW:以根据得到的字典生成上文所定义的词向量矩阵(单词数量,向量矩阵维度)
    # 外部词向量矩阵
    embedding_matrix = np.zeros((len(word_index)+1,EMBEDDING_DIM))
    for word, i in word_index.items():
        embedding_vector=embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[i] = embedding_vector

    # 现在我们将这个词向量矩阵加载到Embedding层中，
    # 我们设置trainable = False使得这个编码层不可再训练
    # REW:weights~载入已预训练有的词向量层
    embedding_layer = Embedding(len(word_index) + 1,EMBEDDING_DIM,
                                trainable=False,
                                weights=[embedding_matrix],input_length=MAX_SEQUENCE_LENGTH)
    # 使用一个小型的1D卷积解决这个新闻分类问题
    sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
    embedded_sequences = embedding_layer(sequence_input)
    x = Conv1D(128, 5, activation='relu')(embedded_sequences)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(35)(x)  # global max pooling
    x = Flatten()(x)
    x = Dense(128, activation='relu')(x)
    preds = Dense(len(labels_index), activation='softmax')(x)
    model = Model(sequence_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc'])
    model.fit(x_train,y_train,validation_data=(x_val,y_val),
              nb_epoch=5,batch_size=128)
    print(model.evaluate())
    #    做一个对比实验，直接使用Keras自带的Embedding层训练词向量而不用GloVe向量
    embedding_layer = Embedding(len(word_index) + 1,
                                EMBEDDING_DIM,
                                input_length=MAX_SEQUENCE_LENGTH)
emotion()