import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
from gensim.models import KeyedVectors
from nltk.tokenize import word_tokenize
from sklearn.metrics import accuracy_score
from torch.utils.data import DataLoader, Dataset

# 1. 加载预训练的 Word2Vec 模型
word2vec_path = "./save/word2vec/model/word2vec_google_news.bin"
word2vec_model = KeyedVectors.load_word2vec_format(word2vec_path, binary=True)


# 2. 数据预处理函数，将句子转换为词嵌入表示
def preprocess_sentence(sentence):
    tokens = word_tokenize(sentence.lower())  # 分词并转换为小写
    embeddings = [
        word2vec_model[token] for token in tokens if token in word2vec_model
    ]  # 获取每个单词的词嵌入
    embeddings = np.array(embeddings)  # 将列表转换为 NumPy 数组
    embeddings = torch.tensor(embeddings, dtype=torch.float32)  # 将 NumPy 数组转换为张量
    if embeddings.size(0) > 0:  # 如果句子中有词嵌入的单词
        return torch.mean(embeddings, dim=0)  # 返回词嵌入向量的平均值作为句子表示
    else:
        return torch.zeros(
            word2vec_model.vector_size, dtype=torch.float32
        )  # 如果句子中没有词嵌入的单词，则返回零向量


# 6. 定义模型参数
input_size = word2vec_model.vector_size
hidden_size = 128
output_size = 2
num_epochs = 10


# 3. 数据集类
# 定义自定义数据集类
class CustomDataset(Dataset):
    def __init__(self, data_path):
        self.data = pd.read_csv(data_path)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        sentence = self.data.iloc[idx]["sentence"]
        label = self.data.iloc[idx]["label"]

        # 将标签转换为张量
        label = torch.tensor(label, dtype=torch.long).unsqueeze(0)

        embedding = preprocess_sentence(sentence)

        # 检查标签的形状
        # print(f"Label shape: {label.shape}")

        # print(f"Sentence: {sentence}")
        # print(f"Label: {label}")
        # print(f"Embedding: {embedding}")

        if embedding is not None:  # 确保样本中有嵌入向量
            return embedding, label
        else:
            return None


# 4. 加载数据集
train_dataset = CustomDataset("./SST-2/train.csv")
validation_dataset = CustomDataset("./SST-2/validation.csv")
test_dataset = CustomDataset("./SST-2/test.csv")

train_loader = DataLoader(train_dataset, batch_size=64, shuffle=False, drop_last=True)
validation_loader = DataLoader(
    validation_dataset, batch_size=64, shuffle=False, drop_last=True
)
test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False, drop_last=True)

# 遍历几个批次数据并输出样本信息
# for inputs, labels in train_loader:
#     print("Batch Inputs:", inputs)
#     print("Batch Labels Shape:", labels.shape)  # 打印标签的形状
#     print("Batch Labels:", labels)
#     break  # 只输出第一个批次的数据


# 5. 定义模型
class GRUModel(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(GRUModel, self).__init__()
        self.gru = nn.GRU(input_size, hidden_size, batch_first=True)
        self.fc = nn.Linear(hidden_size, output_size)  # 确保输出层的大小为类别的数量

    def forward(self, x):
        _, hidden_state = self.gru(x)

        # 提取最后一个时间步的隐藏状态
        out = hidden_state[-1, :]

        # 将输出张量的形状调整为 (1, hidden_size)
        # out = out.view(1, -1)

        # 将调整后的张量传递给全连接层
        # out = self.fc(out)
        out = self.fc(hidden_state[-1])  # 映射到输出大小上

        # 打印输出张量的形状
        # print("Output shape:", out.shape)

        # 打印输出张量的形状和类型
        # print("Output shape before softmax:", out.shape)
        # print("Output type before softmax:", type(out))

        out = nn.Softmax(dim=-1)(out)  # 添加 Softmax 激活函数
        return out


# 7. 定义交叉训练过程
gru1 = GRUModel(input_size, hidden_size, output_size)
gru2 = GRUModel(input_size, hidden_size, output_size)
models = [gru1, gru2]
optimizers = [optim.Adam(model.parameters(), lr=0.001) for model in models]
criterion = nn.CrossEntropyLoss()

# # # 打印模型定义
# print(gru1)
#
# # 打印模型参数
# for name, param in gru1.named_parameters():
#     print(f"Parameter name: {name}, Shape: {param.shape}")


def train_epoch(model, optimizer, criterion, dataloader):
    model.train()
    running_loss = 0.0
    for inputs, labels in dataloader:
        optimizer.zero_grad()

        # 打印输入数据的形状
        # print(f"Batch {batch_idx + 1}, Input shape: {inputs.shape}")

        outputs = model(inputs)
        # outputs = torch.squeeze(outputs)  # 将模型输出的第一维度压缩

        # 检查模型输出的形状是否正确
        # assert outputs.shape == torch.Size([inputs.size(0), output_size]), "模型输出形状与预期不一致"
        #
        # # 检查标签的形状是否正确
        # assert labels.shape == torch.Size([inputs.size(0)]), "标签形状与预期不一致"

        # print('outputs_shape:', outputs.shape)
        # print('labels_shape:', labels.shape)

        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()
        running_loss += loss.item() * inputs.size(0)
    return running_loss / len(dataloader.dataset)


# # 示例输入数据
# example_input = torch.randn(1, 300, input_size)  # 假设输入形状为 (batch_size, sequence_length, input_size)
#
# # 执行前向传播
# with torch.no_grad():
#     output = gru1(example_input)
#
# # 打印输出结果
# print("Output shape:", output.shape)
# print("Output values:", output)


# 在每个 epoch 结束后按置信度对训练集数据进行排序
def confidence_sorting(model, dataloader):
    model.eval()
    confidences = []
    targets = []

    # 遍历训练集，计算每个样本的预测置信度
    with torch.no_grad():
        for inputs, labels in dataloader:
            outputs = model(inputs)
            probs = nn.Softmax(dim=1)(outputs)  # 使用 Softmax 将输出转换为概率
            max_probs, preds = torch.max(probs, dim=1)  # 获取最大概率和对应的预测类别
            confidences.extend(max_probs.tolist())
            targets.extend(labels.tolist())

    # 将置信度和真实标签合并成一个列表
    data = list(zip(confidences, targets))

    # 根据置信度对数据进行排序
    sorted_data = sorted(data, key=lambda x: x[0], reverse=True)

    # 分离排序后的置信度、预测结果和真实标签
    sorted_confidences, sorted_targets = zip(*sorted_data)

    return sorted_confidences, sorted_targets


class SortedDataset(Dataset):
    def __init__(self, confidences, embeddings, labels):
        # 将置信度、词嵌入表示和标签合并成一个列表
        self.data = list(zip(confidences, embeddings, labels))
        # 根据置信度对数据进行排序
        self.data.sort(key=lambda x: x[0], reverse=True)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, idx):
        confidence, embedding, label = self.data[idx]
        return embedding, label  # 返回按置信度排好序的词嵌入表示和标签


# 定义评估函数
def evaluate_model_with_voting(models, dataloader):
    for model in models:
        model.eval()

    predictions = []
    targets = []

    with torch.no_grad():
        for inputs, labels in dataloader:
            all_probs = []  # 存储每个模型的预测概率
            for model in models:
                outputs = model(inputs)
                probs = nn.Softmax(dim=1)(outputs)
                all_probs.append(probs)

            # 计算每个样本的概率平均值
            avg_probs = torch.mean(torch.stack(all_probs), dim=0)

            # 使用平均概率值确定最终的预测结果
            preds = torch.argmax(avg_probs, dim=1)
            predictions.extend(preds.tolist())
            targets.extend(labels.tolist())

    accuracy = accuracy_score(targets, predictions)
    print(f"Test Accuracy with Voting: {accuracy}")


for epoch in range(num_epochs):
    model_index = epoch % 2
    model = models[model_index]
    optimizer = optimizers[model_index]

    # print(f"Epoch {epoch + 1} started...")

    # 训练模型
    train_loss = train_epoch(model, optimizer, criterion, train_loader)

    # print(f"Epoch {epoch + 1} ended. Train Loss: {train_loss}")

    # 在每个 epoch 结束后按置信度对训练集数据进行排序
    sorted_confidences, sorted_targets = confidence_sorting(model, train_loader)

    # 重新构建数据加载器
    sorted_dataset = SortedDataset(sorted_confidences, sorted_targets)
    sorted_loader = DataLoader(
        sorted_dataset, batch_size=64, shuffle=False, drop_last=True
    )

    # 评估模型在验证集上的性能
    val_predictions = []
    val_targets = []
    with torch.no_grad():
        for inputs, labels in validation_loader:
            outputs = model(inputs)
            probs = nn.Softmax(dim=1)(outputs)
            preds = torch.argmax(probs, dim=1)
            val_predictions.extend(preds.tolist())
            val_targets.extend(labels.tolist())
    accuracy = accuracy_score(val_targets, val_predictions)
    print(f"Epoch {epoch + 1}, Validation Accuracy: {accuracy}")

    # 评估模型在测试集上的性能
    evaluate_model_with_voting(models, test_loader)

    # 保存模型
    model_save_path = f"./save/2GRU/model/gru_model_epoch_{epoch + 1}.pt"
    torch.save(model.state_dict(), model_save_path)
    print(f"Model saved at: {model_save_path}")
