import os

import numpy as np
import torch
from torch import nn
from d2l import torch as d2l

def read_imdb(data_dir, is_train):
    """读取IMDb评论数据集文本序列和标签"""
    data, labels = [], []
    for label in ('pos', 'neg'):
        folder_name = os.path.join(data_dir, 'train' if is_train else 'test',
                                   label)
        for file in os.listdir(folder_name):
            with open(os.path.join(folder_name, file), 'rb') as f:
                review = f.read().decode('utf-8').replace('\n', '')
                data.append(review)
                labels.append(1 if label == 'pos' else 0)
    return data, labels


def load_data_imdb(batch_size, num_steps=500):
    """返回数据迭代器和IMDb评论数据集的词表"""
    data_dir = d2l.download_extract('aclImdb', 'aclImdb')
    train_data = read_imdb(data_dir, True)
    test_data = read_imdb(data_dir, False)
    train_tokens = d2l.tokenize(train_data[0], token='word')
    test_tokens = d2l.tokenize(test_data[0], token='word')
    vocab = d2l.Vocab(train_tokens, min_freq=5)
    train_features = torch.tensor([d2l.truncate_pad(
        vocab[line], num_steps, vocab['<pad>']) for line in train_tokens])
    test_features = torch.tensor([d2l.truncate_pad(
        vocab[line], num_steps, vocab['<pad>']) for line in test_tokens])
    train_iter = d2l.load_array((train_features, torch.tensor(train_data[1])),
                                batch_size)
    test_iter = d2l.load_array((test_features, torch.tensor(test_data[1])),
                               batch_size,
                               is_train=False)
    return train_iter, test_iter, vocab


def get_is_correct_arr(y_true, y_pred):
    """
    计算预测是否正确的数组
    :param y_true: 样本的真实分类（可以是 NumPy 数组、列表或 PyTorch 张量）
    :param y_pred: 样本的预测分类（可以是 NumPy 数组、列表或 PyTorch 张量）
    :return: 一个 NumPy 数组，表示每个样本是否被正确分类
    """
    # 如果是 PyTorch 张量，则移动到 CPU 并转换为 NumPy 数组
    if isinstance(y_true, torch.Tensor):
        y_true = y_true.cpu().numpy()
    if isinstance(y_pred, torch.Tensor):
        y_pred = y_pred.cpu().numpy()

    # 如果是列表，则转换为 NumPy 数组
    if isinstance(y_true, list):
        y_true = np.array(y_true)
    if isinstance(y_pred, list):
        y_pred = np.array(y_pred)

    return (y_true == y_pred).astype(int)

# 评估准确率
def evaluate_accuracy_gpu(net, data_iter, device=None):
    """使用GPU计算模型在数据集上的准确率并记录预测结果和真实结果"""
    if isinstance(net, nn.Module):
        net.eval()  # 设置为评估模式
        if device is None:
            device = next(iter(net.parameters())).device

    # 正确预测的数量，总预测的数量
    metric = d2l.Accumulator(2)
    predictions_list = []  # 用于存储预测结果
    labels_list = []  # 用于存储真实标签

    with torch.no_grad():
        for X, y in data_iter:
            if isinstance(X, list):
                # BERT微调所需
                X = [x.to(device) for x in X]
            else:
                X = X.to(device)
            y = y.to(device)

            # 记录预测结果和真实标签
            outputs = net(X)
            _, predicted = torch.max(outputs, 1)  # 获取预测的类别
            predictions_list.extend(predicted.cpu().numpy())
            labels_list.extend(y.cpu().numpy())

            metric.add(d2l.accuracy(outputs, y), y.numel())

    accuracy = metric[0] / metric[1]
    is_correct_arr = get_is_correct_arr(labels_list, predictions_list)

    print(is_correct_arr)
    return accuracy, is_correct_arr


# 训练函数
def train_net(net, train_iter, test_iter, loss, trainer, num_epochs, devices):
    predicted_result = []

    net = nn.DataParallel(net, device_ids=devices).to(devices[0])
    print(f"Training on {devices}")

    for epoch in range(num_epochs):
        net.train()
        metric = d2l.Accumulator(3)  # 累加器用于保存训练损失总和、训练精度总和、样本数
        for i, (X, y) in enumerate(train_iter):
            X, y = X.to(devices[0]), y.to(devices[0])
            y_hat = net(X)
            l = loss(y_hat, y).mean()
            trainer.zero_grad()
            l.backward()
            trainer.step()
            metric.add(l * y.shape[0], d2l.accuracy(y_hat, y), y.numel())

        train_l = metric[0] / metric[2]
        train_acc = metric[1] / metric[2]

        # 测试
        test_acc, is_correct_arr = evaluate_accuracy_gpu(net, test_iter)
        predicted_result.append(is_correct_arr)
        print(f'epoch {epoch + 1}, loss {train_l:.3f}, '
              f'train acc {train_acc:.3f}, test acc {test_acc:.3f}')
    return predicted_result

class BiRNN(nn.Module):
    def __init__(self, vocab_size, embed_size, num_hiddens,
                 num_layers, **kwargs):
        super(BiRNN, self).__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        # 将bidirectional设置为True以获取双向循环神经网络
        self.encoder = nn.LSTM(embed_size, num_hiddens, num_layers=num_layers,
                                bidirectional=True)
        self.decoder = nn.Linear(4 * num_hiddens, 2)

    def forward(self, inputs):
        # inputs的形状是（批量大小，时间步数）
        # 因为长短期记忆网络要求其输入的第一个维度是时间维，
        # 所以在获得词元表示之前，输入会被转置。
        # 输出形状为（时间步数，批量大小，词向量维度）
        embeddings = self.embedding(inputs.T)
        self.encoder.flatten_parameters()
        # 返回上一个隐藏层在不同时间步的隐状态，
        # outputs的形状是（时间步数，批量大小，2*隐藏单元数）
        outputs, _ = self.encoder(embeddings)
        # 连结初始和最终时间步的隐状态，作为全连接层的输入，
        # 其形状为（批量大小，4*隐藏单元数）
        encoding = torch.cat((outputs[0], outputs[-1]), dim=1)
        outs = self.decoder(encoding)
        return outs

def init_weights(m):
    if type(m) == nn.Linear:
        nn.init.xavier_uniform_(m.weight)
    if type(m) == nn.LSTM:
        for param in m._flat_weights_names:
            if "weight" in param:
                nn.init.xavier_uniform_(m._parameters[param])

def save_list_to_file(data_list, filename):
    """
    将列表保存到指定的文本文件中，每个元素占一行。

    :param data_list: 要保存的列表
    :param filename: 文件名（包括路径）
    """
    with open(filename, 'w') as file:
        for item in data_list:
            file.write(f"{item}\n")  # 每个元素写一行

# 定义超参数和获取数据迭代器
batch_size = 64
train_iter, test_iter, vocab = d2l.load_data_imdb(batch_size)  # 获取IMDb数据集的训练和测试迭代器，以及词汇表

# 设置模型参数
embed_size, num_hiddens, num_layers = 100, 100, 2
devices = d2l.try_all_gpus()  # 获取所有可用的GPU设备
net = BiRNN(len(vocab), embed_size, num_hiddens, num_layers)  # 初始化双向LSTM模型

# 初始化模型权重
net.apply(init_weights)  # 对网络中的线性层和LSTM层进行Xavier初始化

# 加载预训练的GloVe词向量
glove_embedding = d2l.TokenEmbedding('glove.6b.100d')  # 加载GloVe 100维的预训练词嵌入
embeds = glove_embedding[vocab.idx_to_token]  # 从词汇表中获取每个词对应的词嵌入向量

# 将预训练的词向量复制到模型的嵌入层中
net.embedding.weight.data.copy_(embeds)
net.embedding.weight.requires_grad = False  # 冻结嵌入层的权重，不参与训练

# 设置学习率、训练周期、优化器和损失函数
# 设置训练参数
epochs = 200
threshold = 30
threshold_value = 30
lr = 0.01  # 学习率
trainer = torch.optim.Adam(net.parameters(), lr=lr)  # 使用Adam优化器
loss = nn.CrossEntropyLoss(reduction="none")  # 使用交叉熵损失函数，reduction="none"表示不进行平均

# 训练模型
predicted_result = train_net(net, train_iter, test_iter, loss, trainer, epochs, devices)  # 调用自定义的训练函数

import numpy as np

# 假设 predicted_result 是一个 NumPy 数组
predicted_result = np.array(predicted_result, dtype=np.int32)

def count_elements_above_threshold(array, threshold_value):
    """计算大于阈值的元素个数"""
    return np.sum(array >= threshold_value)


print(predicted_result)
# 计算可以忽略的比例
ignore_rate_list = []
for i in range(1, epochs + 1):
    if i < threshold:
        ignore_rate_list.append(0.0)
        continue
    span_array = predicted_result[i - 30:i]
    sum_along_columns = np.sum(span_array, axis=0)
    ignore_num = count_elements_above_threshold(sum_along_columns, threshold_value)
    ignore_rate = ignore_num / sum_along_columns.size * 100
    ignore_rate_list.append(ignore_rate)

print(ignore_rate_list)
save_list_to_file(ignore_rate_list, "./data_result/lstm.txt")
