import torch
import torch.nn as nn
from transformers import BertTokenizer, BertModel
from sklearn.metrics import roc_auc_score
import numpy as np


# 定义TextCNN模型
class TextCNN(nn.Module):
    def __init__(self, input_channels, num_filters, filter_sizes):
        super(TextCNN, self).__init__()
        self.convs = nn.ModuleList([
            nn.Conv2d(input_channels, num_filters, (fs, 768)) for fs in filter_sizes
        ])  # 768是bertBaseChinese输出的隐藏层维度

    def forward(self, x):
        # x.size() = (batch_size, seq_len, 768)
        x = x.unsqueeze(1)  # (batch_size, 1, seq_len, 768)
        x = [torch.relu(conv(x)).squeeze(3) for conv in self.convs]  # [(batch_size, num_filters, seq_len - filter_sizes[i] + 1), ...]
        x = [torch.max_pool1d(i, i.size(2)).squeeze(2) for i in x]  # [(batch_size, num_filters), ...]
        x = torch.cat(x, 1)  # (batch_size, num_filters * len(filter_sizes))
        return x


# 定义多头注意力机制
class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, num_heads):
        super(MultiHeadAttention, self).__init__()
        self.num_heads = num_heads
        self.d_model = d_model
        self.d_k = d_model // num_heads
        self.W_q = nn.Linear(d_model, d_model)
        self.W_k = nn.Linear(d_model, d_model)
        self.W_v = nn.Linear(d_model, d_model)
        self.W_o = nn.Linear(d_model, d_model)

    def forward(self, x):
        batch_size = x.size(0)
        Q = self.W_q(x).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
        K = self.W_k(x).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)
        V = self.W_v(x).view(batch_size, -1, self.num_heads, self.d_k).transpose(1, 2)

        attn_scores = torch.matmul(Q, K.transpose(-2, -1)) / torch.sqrt(torch.tensor(self.d_k, dtype=torch.float32))
        attn_probs = torch.softmax(attn_scores, dim=-1)
        output = torch.matmul(attn_probs, V).transpose(1, 2).contiguous().view(batch_size, -1, self.d_model)
        return self.W_o(output)


# 定义完整的推荐模型
class RecommendationModel(nn.Module):
    def __init__(self, num_filters=100, filter_sizes=[3, 4, 5], d_model=768, num_heads=6):
        super(RecommendationModel, self).__init__()
        self.bert = BertModel.from_pretrained('bert-base-chinese')
        self.textcnn = TextCNN(1, num_filters, filter_sizes)
        self.multi_head_attn = MultiHeadAttention(num_filters * len(filter_sizes), num_heads)
        self.fc = nn.Linear(num_filters * len(filter_sizes), 1)

    def forward(self, input_ids, attention_mask):
        outputs = self.bert(input_ids=input_ids, attention_mask=attention_mask)
        last_hidden_states = outputs.last_hidden_state
        textcnn_output = self.textcnn(last_hidden_states)
        attn_output = self.multi_head_attn(textcnn_output)
        output = self.fc(attn_output)
        return torch.sigmoid(output).squeeze(-1)


# 计算 DCG
def dcg_score(y_true, y_score, k=5):
    order = np.argsort(y_score)[::-1]
    y_true = np.take(y_true, order[:k])
    gains = 2 ** y_true - 1
    discounts = np.log2(np.arange(len(y_true)) + 2)
    return np.sum(gains / discounts)


# 计算 nDCG
def ndcg_score(y_true, y_score, k=5):
    best_dcg = dcg_score(y_true, y_true, k)
    if best_dcg == 0:
        return 0
    return dcg_score(y_true, y_score, k) / best_dcg


# 计算 MRR
def mrr_score(y_true, y_score):
    order = np.argsort(y_score)[::-1]
    y_true = np.take(y_true, order)
    rr_score = y_true / (np.arange(len(y_true)) + 1)
    return np.sum(rr_score) / len(rr_score)


# 模拟数据
tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')
# 增加模拟数据量
input_texts = ["产品描述1", "产品描述2", "产品描述3", "产品描述4", "产品描述5", "产品描述6", "产品描述7", "产品描述8", "产品描述9", "产品描述10"]
input_ids = [tokenizer.encode(text, add_special_tokens=True) for text in input_texts]
# 处理输入序列长度不一致问题
max_length = max(len(ids) for ids in input_ids)
input_ids = [ids + [tokenizer.pad_token_id] * (max_length - len(ids)) for ids in input_ids]
input_ids = torch.tensor(input_ids)
attention_mask = (input_ids != tokenizer.pad_token_id).long()
labels = torch.tensor([0, 1, 0, 1, 0, 1, 0, 1, 0, 1])  # 模拟用户是否喜欢的标签

# 划分训练集和验证集
train_size = int(0.8 * len(input_ids))
train_input_ids = input_ids[:train_size]
train_attention_mask = attention_mask[:train_size]
train_labels = labels[:train_size]

val_input_ids = input_ids[train_size:]
val_attention_mask = attention_mask[train_size:]
val_labels = labels[train_size:]

# 初始化模型、损失函数和优化器
model = RecommendationModel()
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

# 训练过程
for epoch in range(10):
    model.train()
    optimizer.zero_grad()
    outputs = model(train_input_ids, train_attention_mask)
    outputs = outputs.squeeze()
    loss = criterion(outputs, train_labels.float())
    loss.backward()
    optimizer.step()
    print(f'Epoch {epoch + 1}, Train Loss: {loss.item()}')

    # 验证环节
    model.eval()
    with torch.no_grad():
        val_outputs = model(val_input_ids, val_attention_mask)
        val_outputs = val_outputs.squeeze().cpu().numpy()
        val_labels_np = val_labels.cpu().numpy()

        # 检查验证集样本数量
        if len(val_labels_np) < 2:
            print(f'Epoch {epoch + 1}, Not enough samples in validation set to calculate AUC.')
            auc = None
        else:
            # 计算 AUC
            auc = roc_auc_score(val_labels_np, val_outputs)

        # 计算 MRR
        mrr = mrr_score(val_labels_np, val_outputs)

        # 计算 nDCG@5
        ndcg_5 = ndcg_score(val_labels_np, val_outputs, k=5)

        # 计算 nDCG@10
        ndcg_10 = ndcg_score(val_labels_np, val_outputs, k=10)

        if auc is not None:
            print(f'Epoch {epoch + 1}, AUC: {auc}, MRR: {mrr}, nDCG@5: {ndcg_5}, nDCG@10: {ndcg_10}')
        else:
            print(f'Epoch {epoch + 1}, MRR: {mrr}, nDCG@5: {ndcg_5}, nDCG@10: {ndcg_10}')