import paddle
from paddle import nn
import pandas as pd
import joblib
import jieba
from paddle.io import Dataset, DataLoader
from paddle.optimizer import AdamW
import numpy as np
from sklearn.model_selection import train_test_split
import gensim


class Config():
    # 标签列表
    labels = {'FilmTele-Play': 1,
              'Video-Play': 2,
              'Music-Play': 3,
              'Radio-Listen': 4,
              'Alarm-Update': 5,
              'Weather-Query': 6,
              'Travel-Query': 7,
              'HomeAppliance-Control': 8,
              'Calendar-Query': 9,
              'TVProgram-Play': 10,
              'Audio-Play': 11,
              'Other': 12}

    words_count = 0  # 单词个数
    embedding_size = 100  # 词向量维度
    lstm_hidden_size = 256  # hidden大小
    dropout = 0.2  # dropout系数

    train_data = 'data/train.csv'
    test_data = 'data/test.csv'
    vocab = 'vocab.pkl'
    wv_model = 'word2vec.model'
    max_seq_size = 10  # 最大句子长度
    batch_size = 256  # 批大小
    learning_rate = 5e-4
    # label的词典
    label_k_v_vocab = {'HomeAppliance-Control': 0, 'Calendar-Query': 1, 'Audio-Play': 2, 'Music-Play': 3,
                   'TVProgram-Play': 4, 'Video-Play': 5, 'Weather-Query': 6, 'Other': 7, 'Alarm-Update': 8,
                   'Travel-Query': 9, 'FilmTele-Play': 10, 'Radio-Listen': 11}
    label_v_k_vocab = dict(zip(label_k_v_vocab.values(), label_k_v_vocab.keys()))

    mode = 'train'
    print_loss = 20
    epochs = 150


def tokenizer(seq, stop_words, vocab_dict, max_seq_size):
    '''
    将一句话转换为模型识别的序列
    :param seq: 句子
    :param stop_words: 停用词
    :param vocab_dict: 词典{词：数字序号}
    :param max_seq_size: 最大句子长度
    :return:
    '''
    # 初始化，默认0是填充位[pad]
    result = np.zeros(max_seq_size, dtype=int)

    # 分词
    seg_list = jieba.cut(seq)  # 分词
    # 转列表
    words = ' '.join(seg_list).split(' ')
    # 去除停用词&返回
    words_final = [vocab_dict[i] for i in words if i not in stop_words]
    # 转换定长数组
    if len(words_final) < max_seq_size:
        # 如果词长度小于，则填充
        result[:len(words_final)] = words_final
    else:
        # 如果词长度大于，则截断
        result = words_final[:max_seq_size]
    return result


class LSTMModel(nn.Layer):
    def __init__(self, config: Config, words2vec):
        super(LSTMModel, self).__init__()
        # 创建embedding层
        pretrained_attr = paddle.ParamAttr(name='embedding',
                                           initializer=paddle.nn.initializer.Assign(words2vec),
                                           trainable=False)
        self.embed = nn.Embedding(num_embeddings=config.words_count, embedding_dim=config.embedding_size,
                                  weight_attr=pretrained_attr)
        # 创建LSTM层
        self.lstm = nn.LSTM(input_size=config.embedding_size,
                            hidden_size=config.lstm_hidden_size,
                            num_layers=2,  # 双层lstm
                            direction='bidirectional',  # 来回循环
                            dropout=config.dropout)  # dropout系数
        self.flatten = nn.Flatten()
        # 创建全连接层
        self.fc = nn.Linear(config.lstm_hidden_size * 2, 128)
        self.fc2 = nn.Linear(128, len(config.label_k_v_vocab))
        self.dropout = nn.Dropout(config.dropout)
        self.relu = nn.ReLU()

    def forward(self, x):
        '''
        模型推理
        :param x:
        :return:
        '''
        # 生成词典 [batch_size, seq_len] => [batch_size, seq_len, embedding_size]
        embedding = self.dropout(self.embed(x))

        # input: [batch_size, seq_len, embedding_size]
        # output: [batch_size, seq_len, lstm_hidden_size*2]
        # hidden/h: [num_layers*2, seq_len, lstm_hidden_size]
        # cell/c: [num_layers*2, seq_len, lstm_hidden_size]
        output, (hidden, cell) = self.lstm(embedding)
        output = self.dropout(output)
        output = paddle.mean(output, axis=1)
        # 线性推理
        out = self.relu(self.fc(output))
        out = self.fc2(out)

        return out


class LstmDataset(Dataset):
    def __init__(self, X, y=None):
        '''
        训练包装的dataset
        :param data_df:
        :param max_seq_size:
        '''
        super(LstmDataset, self).__init__()
        self.seqs = X  # 数据
        self.labels = y
        self.len = len(X)  # 长度

    def __getitem__(self, index):
        input_ids = np.array(self.seqs[index], dtype='int64')
        result = {'input_ids': input_ids}
        if self.labels is not None:
            result['label'] = np.array([self.labels[index]]).astype("int64")
        return result

    def __len__(self):
        return self.len


def create_dataloader(config: Config):
    # 加载停用词表
    stop_words = pd.read_csv('baidu_stopwords.txt', header=None)[0].tolist()

    if config.mode == 'train':
        # 读取数据
        train_df = pd.read_csv(config.train_data, sep='\t', header=None)
        train_df = train_df.rename(columns={0: 'text', 1: 'label'})
        # 加载词典
        vocab_dict = joblib.load(config.vocab)
        config.words_count = len(vocab_dict)
        # tokenizer
        train_df['tokenizer'] = train_df['text'].apply(
            lambda x: tokenizer(x, stop_words, vocab_dict, config.max_seq_size))
        train_df['label_new'] = train_df['label'].apply(lambda x: config.label_k_v_vocab[x])
        # 切分数据
        X_train, X_test, y_train, y_test = train_test_split(train_df['tokenizer'], train_df['label_new'], test_size=0.1,
                                                            random_state=42)
        # 创建dataloader
        train_dataset = LstmDataset(X_train.tolist(), y_train.tolist())
        val_dataset = LstmDataset(X_test.tolist(), y_test.tolist())
        train_dataloader = DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True, num_workers=0)
        val_dataloader = DataLoader(val_dataset, batch_size=config.batch_size, shuffle=True, num_workers=0)
        # 返回
        return train_dataloader, val_dataloader
    else:
        test_df = pd.read_csv(config.test_data, sep='\t', header=None)
        test_df = test_df.rename(columns={0: 'text'})
        # 加载词典
        vocab_dict = joblib.load(config.vocab)
        config.words_count = len(vocab_dict)
        # tokenizer
        test_df['tokenizer'] = test_df['text'].apply(
            lambda x: tokenizer(x, stop_words, vocab_dict, config.max_seq_size))
        # 创建dataloader
        test_dataset = LstmDataset(test_df['tokenizer'].tolist())
        test_dataloader = DataLoader(test_dataset, batch_size=config.batch_size, num_workers=0)
        return test_df, test_dataloader




@paddle.no_grad()
def evaluation(model, loss_fn, metric, val_dataloader):
    '''
    模型校验
    :param model: 模型
    :param loss_fn: 损失函数
    :param metric:
    :param val_dataloader:
    :return:
    '''
    accu = []
    model.eval()
    metric.reset()
    losses = []
    for iter_id, mini_batch in enumerate(val_dataloader):
        x = mini_batch['input_ids']
        y_true = mini_batch['label']
        y_pred = model(x)

        loss = loss_fn(y_pred, y_true)
        losses.append(loss.numpy())
        correct = metric.compute(y_pred, y_true)
        metric.update(correct)
        acc = metric.accumulate()
        accu.append(acc)
    model.train()
    metric.reset()
    return np.mean(losses), np.mean(accu)
@paddle.no_grad()
def predict(model, test_dataloader):
    '''
    预测
    :param model:
    :param test_dataloader:
    :return:
    '''
    predict_labels = []
    model.eval()

    for iter_id, mini_batch in enumerate(test_dataloader):
        x = mini_batch['input_ids']
        y_pred = model(x)
        y_pred = paddle.argmax(y_pred, axis=1).numpy()
        predict_labels += list(y_pred)
    return predict_labels



def train(train_dataloader, val_dataloader, config: Config):
    '''
    训练模型
    :param train_dataloader: 训练集
    :param val_dataloader: 校验集
    :param config: 配置文件
    :return:
    '''
    # 加载词向量模型
    word2vec = gensim.models.word2vec.Word2Vec.load(config.wv_model).wv.vectors
    # 初始化模型
    model = LSTMModel(config, word2vec)
    # 定义优化器
    opt = AdamW(learning_rate=config.learning_rate, parameters=model.parameters())
    # 定义损失函数
    loss_fn = nn.loss.CrossEntropyLoss()
    metric = paddle.metric.Accuracy()
    # 遍历训练次数训练
    for epoch in range(config.epochs):
        model.train()
        for iter_id, mini_batch in enumerate(train_dataloader):
            x = mini_batch['input_ids']
            y_true = mini_batch['label']
            y_pred = model(x)
            # 计算损失函数
            loss = loss_fn(y_pred, y_true)
            # 计算具体值并校验
            # probs = paddle.nn.functional.softmax(y_pred, axis=1)
            correct = metric.compute(y_pred, y_true)
            metric.update(correct)
            acc = metric.accumulate()
            # 反向传播
            loss.backward()
            opt.step()
            opt.clear_grad()
            # 打印模型性能
            if iter_id % config.print_loss == 0:
                print('epoch:{}, iter_id:{}, loss:{}, acc:{}'.format(epoch, iter_id, loss.item(), acc))
        # 运行完一个epoch验证机校验
        avg_val_loss, acc = evaluation(model, loss_fn, metric, val_dataloader)
        print('-' * 50)
        print('epoch: {}, val_loss: {}, val_acc: {}'.format(epoch, avg_val_loss, acc))
        print('-' * 50)
    return model


if __name__ == '__main__':
    config = Config()
    paddle.device.set_device('gpu:0')
    '''
        训练
    '''
    config.mode = 'train'
    # 加载数据
    train_dataloader, val_dataloader = create_dataloader(config) 
    # 训练
    model = train(train_dataloader, val_dataloader, config)
    '''
        预测
    '''
    config.mode = 'test'
    test_df, test_dataloader = create_dataloader(config)
    y_pred = predict(model, test_dataloader)
    print(y_pred)
    test_df['Target'] = [config.label_v_k_vocab[y] for y in y_pred]
    test_df['ID'] = test_df.index
    test_df[['ID','Target']].to_csv('result/lstm.csv', index=False)



