import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
from transformers import BertTokenizer, BertModel
import numpy as np
import pandas as pd

# 检查 GPU 是否可用
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 从 tsv 文件中读取数据
def load_data():
    behaviors_df = pd.read_csv('behaviors.tsv', sep='\t',
                               names=['id', 'user_id', 'timestamp', 'history_news', 'candidate_news'])
    news_df = pd.read_csv('news.tsv', sep='\t',
                          names=['news_id', 'category', 'subcategory', 'title', 'description', 'url', 'entities', 'tags'])
    return behaviors_df, news_df

# 数据预处理，将新闻文本转换为 BERT 输入格式
def preprocess_data(behaviors_df, news_df, tokenizer, bert_model, batch_size=10):
    num_rows = len(behaviors_df)
    user_news_list = []
    candidate_news_list = []
    labels = []

    max_user_history_len = 0
    max_candidate_len = 0

    # 先找出最大的用户历史新闻数量和候选新闻数量
    for _, row in behaviors_df.iterrows():
        # 检查 history_news 是否为 NaN
        if pd.isna(row['history_news']):
            user_history = []
        else:
            user_history = row['history_news'].split()
        candidate = row['candidate_news'].split()

        max_user_history_len = max(max_user_history_len, len(user_history))
        max_candidate_len = max(max_candidate_len, len(candidate))

    for start_idx in range(0, num_rows, batch_size):
        end_idx = min(start_idx + batch_size, num_rows)
        batch_behaviors = behaviors_df.iloc[start_idx:end_idx]

        batch_user_news = []
        batch_candidate_news = []
        batch_labels = []

        for _, row in batch_behaviors.iterrows():
            # 检查 history_news 是否为 NaN
            if pd.isna(row['history_news']):
                user_history = []
            else:
                user_history = row['history_news'].split()
            candidate = row['candidate_news'].split()

            user_history_inputs = []
            for news_id in user_history:
                title_values = news_df[news_df['news_id'] == news_id]['title'].values
                if len(title_values) > 0:
                    news_title = title_values[0]
                else:
                    news_title = ""
                input_ids = torch.tensor([tokenizer.encode(news_title, add_special_tokens=True)]).to(device)
                with torch.no_grad():
                    outputs = bert_model(input_ids)
                    news_embedding = outputs[0].mean(dim=1)  # 取词向量的平均作为新闻表示
                user_history_inputs.append(news_embedding)

            if len(user_history_inputs) == 0:
                # 如果 user_history_inputs 为空，创建一个零张量
                user_history_inputs = torch.zeros(0, 768).to(device)
            else:
                user_history_inputs = torch.cat(user_history_inputs, dim=0)

            # 对用户历史新闻进行填充，使其长度达到最大长度
            padding_size = max_user_history_len - user_history_inputs.size(0)
            if padding_size > 0:
                padding = torch.zeros(padding_size, user_history_inputs.size(1)).to(device)
                user_history_inputs = torch.cat([user_history_inputs, padding], dim=0)

            candidate_inputs = []
            for news_id in candidate:
                title_values = news_df[news_df['news_id'] == news_id]['title'].values
                if len(title_values) > 0:
                    news_title = title_values[0]
                else:
                    news_title = ""
                input_ids = torch.tensor([tokenizer.encode(news_title, add_special_tokens=True)]).to(device)
                with torch.no_grad():
                    outputs = bert_model(input_ids)
                    news_embedding = outputs[0].mean(dim=1)  # 取词向量的平均作为新闻表示
                candidate_inputs.append(news_embedding)

            candidate_inputs = torch.cat(candidate_inputs, dim=0)

            # 对候选新闻进行填充，使其长度达到最大长度
            padding_size = max_candidate_len - candidate_inputs.size(0)
            if padding_size > 0:
                padding = torch.zeros(padding_size, candidate_inputs.size(1)).to(device)
                candidate_inputs = torch.cat([candidate_inputs, padding], dim=0)

            batch_user_news.append(user_history_inputs)
            batch_candidate_news.append(candidate_inputs)

            # 简单模拟标签，假设每个候选新闻是否被用户点击（这里都设为 0）
            label = [0] * len(candidate)
            # 对标签进行填充，使其长度达到最大候选新闻数量
            padding_size = max_candidate_len - len(label)
            if padding_size > 0:
                label.extend([0] * padding_size)
            batch_labels.append(label)

        batch_user_news = torch.stack(batch_user_news)
        batch_candidate_news = torch.stack(batch_candidate_news)
        batch_labels = torch.tensor(batch_labels, dtype=torch.float32).to(device)

        user_news_list.append(batch_user_news)
        candidate_news_list.append(batch_candidate_news)
        labels.append(batch_labels)

    user_news_tensor = torch.cat(user_news_list, dim=0)
    candidate_news_tensor = torch.cat(candidate_news_list, dim=0)
    labels_tensor = torch.cat(labels, dim=0)

    return user_news_tensor, candidate_news_tensor, labels_tensor

# 多头自注意力层
class MultiHeadSelfAttention(nn.Module):
    def __init__(self, embed_size, num_heads):
        super(MultiHeadSelfAttention, self).__init__()
        self.embed_size = embed_size
        self.num_heads = num_heads
        self.head_dim = embed_size // num_heads

        assert (
                self.head_dim * num_heads == embed_size
        ), "Embedding size needs to be divisible by num_heads"

        self.values = nn.Linear(self.head_dim, self.head_dim, bias=False)
        self.keys = nn.Linear(self.head_dim, self.head_dim, bias=False)
        self.queries = nn.Linear(self.head_dim, self.head_dim, bias=False)
        self.fc_out = nn.Linear(num_heads * self.head_dim, embed_size)

    def forward(self, values, keys, query, mask):
        N = query.shape[0]
        value_len, key_len, query_len = values.shape[1], keys.shape[1], query.shape[1]

        # Split the embedding into self.num_heads different pieces
        values = values.reshape(N, value_len, self.num_heads, self.head_dim)
        keys = keys.reshape(N, key_len, self.num_heads, self.head_dim)
        queries = query.reshape(N, query_len, self.num_heads, self.head_dim)

        values = self.values(values)
        keys = self.keys(keys)
        queries = self.queries(queries)

        # Scaled dot - product attention
        energy = torch.einsum("nqhd,nkhd->nhqk", [queries, keys])
        if mask is not None:
            energy = energy.masked_fill(mask == 0, float("-1e20"))

        attention = torch.softmax(energy / (self.embed_size ** (1 / 2)), dim=3)

        out = torch.einsum("nhql,nlhd->nqhd", [attention, values]).reshape(
            N, query_len, self.num_heads * self.head_dim
        )

        out = self.fc_out(out)
        return out

# 新闻编码器
class NewsEncoder(nn.Module):
    def __init__(self, embed_size, num_heads, hidden_size):
        super(NewsEncoder, self).__init__()
        self.self_attention = MultiHeadSelfAttention(embed_size, num_heads)
        self.fc = nn.Linear(embed_size, hidden_size)
        self.dropout = nn.Dropout(0.1)

    def forward(self, news):
        attn_output = self.self_attention(
            news, news, news, mask=None
        )
        news_repr = torch.mean(attn_output, dim=1)
        news_repr = self.fc(news_repr)
        news_repr = self.dropout(news_repr)
        news_repr = torch.relu(news_repr)
        return news_repr

# 用户编码器
class UserEncoder(nn.Module):
    def __init__(self, hidden_size, num_heads):
        super(UserEncoder, self).__init__()
        self.self_attention = MultiHeadSelfAttention(hidden_size, num_heads)
        self.dropout = nn.Dropout(0.1)

    def forward(self, news_reprs):
        user_repr = self.self_attention(
            news_reprs, news_reprs, news_reprs, mask=None
        )
        user_repr = torch.mean(user_repr, dim=1)
        user_repr = self.dropout(user_repr)
        return user_repr

# NRMS 模型
class NRMS(nn.Module):
    def __init__(self, embed_size, num_heads, hidden_size):
        super(NRMS, self).__init__()
        self.news_encoder = NewsEncoder(embed_size, num_heads, hidden_size)
        self.user_encoder = UserEncoder(hidden_size, num_heads)

    def forward(self, user_news, candidate_news):
        # 编码用户历史新闻
        user_news_reprs = []
        for news in user_news.transpose(0, 1):
            news_repr = self.news_encoder(news)
            user_news_reprs.append(news_repr)
        user_news_reprs = torch.stack(user_news_reprs, dim=1)
        user_repr = self.user_encoder(user_news_reprs)

        # 编码候选新闻
        candidate_news_reprs = []
        for news in candidate_news.transpose(0, 1):
            news_repr = self.news_encoder(news)
            candidate_news_reprs.append(news_repr)
        candidate_news_reprs = torch.stack(candidate_news_reprs, dim=1)

        # 计算得分
        scores = torch.bmm(candidate_news_reprs, user_repr.unsqueeze(-1)).squeeze(-1)
        return scores

def padding_collate_fn(batch):
    user_news, candidate_news, labels = zip(*batch)
    user_news = torch.stack(user_news)
    candidate_news = torch.stack(candidate_news)
    labels = torch.stack(labels)
    return user_news, candidate_news, labels

if __name__ == "__main__":
    embed_size = 768  # BERT 的隐藏层大小
    num_heads = 4
    hidden_size = 200
    num_epochs = 5
    learning_rate = 0.001
    batch_size = 10

    tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
    bert_model = BertModel.from_pretrained('bert-base-uncased').to(device)
    bert_model.eval()  # 将 BERT 模型设置为评估模式

    behaviors_df, news_df = load_data()
    user_news, candidate_news, labels = preprocess_data(behaviors_df, news_df, tokenizer, bert_model, batch_size)

    train_size = int(0.8 * len(user_news))
    train_user_news = user_news[:train_size]
    train_candidate_news = candidate_news[:train_size]
    train_labels = labels[:train_size]

    val_user_news = user_news[train_size:]
    val_candidate_news = candidate_news[train_size:]
    val_labels = labels[train_size:]

    train_dataset = TensorDataset(train_user_news, train_candidate_news, train_labels)
    train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=padding_collate_fn)

    val_dataset = TensorDataset(val_user_news, val_candidate_news, val_labels)
    val_dataloader = DataLoader(val_dataset, batch_size=batch_size, collate_fn=padding_collate_fn)

    model = NRMS(embed_size, num_heads, hidden_size).to(device)
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    for epoch in range(num_epochs):
        model.train()
        for batch_user_news, batch_candidate_news, batch_labels in train_dataloader:
            scores = model(batch_user_news, batch_candidate_news)
            loss = criterion(scores, batch_labels)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        model.eval()
        val_loss = 0
        with torch.no_grad():
            for batch_user_news, batch_candidate_news, batch_labels in val_dataloader:
                scores = model(batch_user_news, batch_candidate_news)
                val_loss += criterion(scores, batch_labels).item()
            val_loss /= len(val_dataloader)

        print(f'Epoch {epoch + 1}/{num_epochs}, Train Loss: {loss.item()}, Val Loss: {val_loss}')
