import torch
import torch.nn as nn
from transformers import BertTokenizer, BertModel
from sklearn.metrics import roc_auc_score
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

# 检查是否有可用的 GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 定义TextCNN模型
class TextCNN(nn.Module):
    def __init__(self, input_channels, num_filters, filter_sizes):
        super(TextCNN, self).__init__()
        self.convs = nn.ModuleList([
            nn.Conv2d(input_channels, num_filters, (fs, 768)) for fs in filter_sizes
        ])  # 768是bertBaseChinese输出的隐藏层维度

    def forward(self, x):
        # x.size() = (batch_size, seq_len, 768)
        x = x.unsqueeze(1)  # (batch_size, 1, seq_len, 768)
        x = [torch.relu(conv(x)).squeeze(3) for conv in self.convs]  # [(batch_size, num_filters, seq_len - filter_sizes[i] + 1), ...]
        x = [torch.max_pool1d(i, i.size(2)).squeeze(2) for i in x]  # [(batch_size, num_filters), ...]
        x = torch.cat(x, 1)  # (batch_size, num_filters * len(filter_sizes))
        return x

# 定义多头注意力机制
class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, num_heads):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.d_model = d_model
        self.d_k = d_model // num_heads
        self.W_q = nn.Linear(d_model, d_model)
        self.W_k = nn.Linear(d_model, d_model)
        self.W_v = nn.Linear(d_model, d_model)
        self.W_o = nn.Linear(d_model, d_model)

    def forward(self, x):
        batch_size = x.size(0)
        Q = self.W_q(x).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
        K = self.W_k(x).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
        V = self.W_v(x).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)

        attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.d_k, dtype=torch.float32))
        attn_probs = torch.softmax(attn_scores, dim=-1)
        output = torch.matmul(attn_probs, V).transpose(1, 2).contiguous().view(batch_size, -1, self.d_model)
        return self.W_o(output)

# 定义完整的推荐模型
class RecommendationModel(nn.Module):
    def __init__(self, num_filters=100, filter_sizes=[3, 4, 5], d_model=768, num_heads=6):
        super(RecommendationModel, self).__init__()
        self.textcnn = TextCNN(1, num_filters, filter_sizes)
        self.multi_head_attn = MultiHeadAttention(num_filters * len(filter_sizes), num_heads)
        self.fc = nn.Linear(num_filters * len(filter_sizes), 1)

    def forward(self, input_vectors):
        textcnn_output = self.textcnn(input_vectors)
        attn_output = self.multi_head_attn(textcnn_output)
        output = self.fc(attn_output)
        return torch.sigmoid(output).squeeze(-1)

# 计算 DCG
def dcg_score(y_true, y_score, k=5):
    order = np.argsort(y_score)[::-1]
    y_true = np.take(y_true, order[:k])
    gains = 2 ** y_true - 1
    discounts = np.log2(np.arange(len(y_true)) + 2)
    return np.sum(gains / discounts)

# 计算 nDCG
def ndcg_score(y_true, y_score, k=5):
    best_dcg = dcg_score(y_true, y_true, k)
    if best_dcg == 0:
        return 0
    return dcg_score(y_true, y_score, k) / best_dcg

# 读取数据集
train_data = pd.read_csv('train_product_2.csv')
test_data = pd.read_csv('test_product_2.csv')

# 提取输入文本和标签
train_input_texts = train_data['description'].tolist()
train_labels = torch.tensor(train_data['label'].tolist())

test_input_texts = test_data['description'].tolist()
test_labels = torch.tensor(test_data['label'].tolist())

# 加载Bert模型和分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
bert_model = BertModel.from_pretrained('bert-base-uncased')
bert_model = bert_model.to(device)
bert_model.eval()

# 生成词向量
def generate_embeddings(texts):
    all_embeddings = []
    for text in texts:
        input_ids = tokenizer.encode(text, add_special_tokens=True)
        input_ids = torch.tensor([input_ids]).to(device)
        attention_mask = (input_ids != tokenizer.pad_token_id).long()
        with torch.no_grad():
            outputs = bert_model(input_ids=input_ids, attention_mask=attention_mask)
            last_hidden_states = outputs.last_hidden_state
        all_embeddings.append(last_hidden_states.squeeze(0).cpu())

    # 处理长度不一致问题
    max_length = max([embedding.size(0) for embedding in all_embeddings])
    padded_embeddings = []
    for embedding in all_embeddings:
        padding = torch.zeros((max_length - embedding.size(0), 768))
        padded_embedding = torch.cat([embedding, padding], dim=0)
        padded_embeddings.append(padded_embedding)
    return torch.stack(padded_embeddings).to(device)

train_embeddings = generate_embeddings(train_input_texts)
test_embeddings = generate_embeddings(test_input_texts)

train_labels = train_labels.to(device)
test_labels = test_labels.to(device)

# 初始化模型、损失函数和优化器
model = RecommendationModel()
model = model.to(device)
criterion = nn.BCELoss()
criterion = criterion.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# 用于记录训练损失和评估指标
train_losses = []
aucs = []
ndcg_5s = []
ndcg_10s = []

# 训练过程
num_epochs = 100
for epoch in range(num_epochs):
    model.train()
    optimizer.zero_grad()
    outputs = model(train_embeddings)
    outputs = outputs.squeeze()
    loss = criterion(outputs, train_labels.float())
    loss.backward()
    optimizer.step()
    train_loss = loss.item()
    train_losses.append(train_loss)
    print(f'Epoch {epoch + 1}, Train Loss: {train_loss}')

    # 测试环节
    model.eval()
    with torch.no_grad():
        test_outputs = model(test_embeddings)
        test_outputs = test_outputs.squeeze().cpu().numpy()
        test_labels_np = test_labels.cpu().numpy()

        # 检查测试集样本数量
        if len(test_labels_np) < 2:
            print(f'Epoch {epoch + 1}, Not enough samples in test set to calculate AUC.')
            auc = None
        else:
            # 计算 AUC
            auc = roc_auc_score(test_labels_np, test_outputs)

        # 计算 nDCG@5
        ndcg_5 = ndcg_score(test_labels_np, test_outputs, k=5)

        # 计算 nDCG@10
        ndcg_10 = ndcg_score(test_labels_np, test_outputs, k=10)

        aucs.append(auc)
        ndcg_5s.append(ndcg_5)
        ndcg_10s.append(ndcg_10)

        if auc is not None:
            print(f'Epoch {epoch + 1}, AUC: {auc}, nDCG@5: {ndcg_5}, nDCG@10: {ndcg_10}')
        else:
            print(f'Epoch {epoch + 1}, nDCG@5: {ndcg_5}, nDCG@10: {ndcg_10}')

# 保存评估指标到 CSV 文件
data = {
    'Epoch': range(1, num_epochs + 1),
    'AUC': aucs,
    'nDCG@5': ndcg_5s,
    'nDCG@10': ndcg_10s
}
df = pd.DataFrame(data)
df.to_csv('ca2.csv', index=False)

# 绘制训练损失折线图
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.plot(range(1, num_epochs + 1), train_losses, marker='o')
plt.title('Train Loss over Epochs')
plt.xlabel('Epoch')
plt.ylabel('Train Loss')

# 绘制评估指标折线图
plt.subplot(1, 2, 2)
plt.plot(range(1, num_epochs + 1), aucs, marker='o', label='AUC')
plt.title('Evaluation Metrics over Epochs')
plt.xlabel('Epoch')
plt.ylabel('Metric Value')
plt.legend()

plt.tight_layout()
plt.show()

# 绘制最后一个 epoch 的评估指标柱状图
last_epoch_metrics = {
    'nDCG@5': ndcg_5s[-1],
    'nDCG@10': ndcg_10s[-1]
}
metrics_names = list(last_epoch_metrics.keys())
metrics_values = list(last_epoch_metrics.values())

plt.figure(figsize=(8, 6))
plt.bar(metrics_names, metrics_values)
plt.title('Last Epoch Evaluation Metrics')
plt.xlabel('Metrics')
plt.ylabel('Value')
for i, v in enumerate(metrics_values):
    plt.text(i, v, str(round(v, 4)), ha='center')
plt.show()