import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
from transformers import BertConfig, BertModel
import pandas as pd
import numpy as np
from tqdm import tqdm  # 用于显示训练进度

 
# --------------------------
# 1. 数据预处理与数据集定义
# --------------------------
class UserBehaviorDataset(Dataset):
    def __init__(self, ratings_df, max_seq_length=20, mask_prob=0.15):
        """
        处理用户行为序列，生成BERT4Rec训练数据
        :param ratings_df: 包含用户行为的DataFrame，需包含'userId'、'movieId'、'timestamp'列
        :param max_seq_length: 最大序列长度（超过截断，不足填充）
        :param mask_prob: 掩码概率（类似BERT的MLM任务）
        """
        self.max_seq_length = max_seq_length
        self.mask_prob = mask_prob
        
        # 1. 按用户分组，按时间排序行为序列
        self.user_sequences = self._build_user_sequences(ratings_df)
        
        # 2. 构建物品ID到索引的映射（BERT需要整数索引）
        all_items = ratings_df['MOVIE_ID'].unique()
        self.item2idx = {item: idx+1 for idx, item in enumerate(all_items)}  # 预留0作为PAD
        self.idx2item = {v: k for k, v in self.item2idx.items()}
        self.vocab_size = len(self.item2idx) + 1  # 包含PAD的总词汇量
        
        # 3. 特殊token定义
        self.PAD_TOKEN = 0  # 填充符
        self.MASK_TOKEN = self.vocab_size  # 掩码符（值为词汇量大小，不与物品索引冲突）

    def _build_user_sequences(self, ratings_df):
        """按用户分组，按时间戳排序行为，生成行为序列"""
        # 按用户ID和时间戳排序
        ratings_df = ratings_df.sort_values(by=['USER_MD5', 'RATING_TIME'])
        # 分组并聚合为物品序列
        user_sequences = ratings_df.groupby('USER_MD5')['MOVIE_ID'].agg(list).reset_index()
        return user_sequences['MOVIE_ID'].tolist()  # 返回所有用户的行为序列列表

    def __len__(self):
        return len(self.user_sequences)

    def __getitem__(self, idx):
        """生成单个样本：输入序列（含掩码）和标签（被掩码位置的真实物品）"""
        seq = self.user_sequences[idx]
        
        # 1. 转换物品ID为索引，并截断/填充到最大长度
        seq_idx = [self.item2idx[item] for item in seq]
        if len(seq_idx) > self.max_seq_length:
            seq_idx = seq_idx[-self.max_seq_length:]  # 取最近的max_seq_length个行为
        else:
            seq_idx += [self.PAD_TOKEN] * (self.max_seq_length - len(seq_idx))  # 填充
        
        # 2. 生成掩码（类似BERT的MLM任务）
        input_ids = []
        labels = []
        for item_idx in seq_idx:
            if item_idx == self.PAD_TOKEN:
                # 填充位置不参与训练
                input_ids.append(self.PAD_TOKEN)
                labels.append(-100)  # 损失计算时忽略-100的标签
            else:
                # 随机掩码
                prob = np.random.random()
                if prob < self.mask_prob:
                    input_ids.append(self.MASK_TOKEN)  # 掩码位置
                    labels.append(item_idx)  # 标签为原始物品
                else:
                    input_ids.append(item_idx)  # 不掩码
                    labels.append(-100)  # 不计算损失
        
        return {
            'input_ids': torch.tensor(input_ids, dtype=torch.long),
            'labels': torch.tensor(labels, dtype=torch.long)
        }


# --------------------------
# 2. BERT4Rec模型定义（复用原模型结构）
# --------------------------
class BERT4Rec(nn.Module):
    def __init__(self, vocab_size, hidden_dim, num_layers, max_seq_length):
        super(BERT4Rec, self).__init__()
        self.config = BertConfig(
            vocab_size=vocab_size,
            hidden_size=hidden_dim,
            num_hidden_layers=num_layers,
            num_attention_heads=4,
            intermediate_size=hidden_dim * 2,
            max_position_embeddings=max_seq_length,
            type_vocab_size=1  # 单句序列，无需区分句子类型
        )
        self.bert = BertModel(self.config)
        self.output_layer = nn.Linear(hidden_dim, vocab_size)  # 预测物品索引

    def forward(self, input_ids):
        outputs = self.bert(input_ids=input_ids)
        last_hidden_state = outputs.last_hidden_state  # [batch_size, seq_len, hidden_dim]
        logits = self.output_layer(last_hidden_state)  # [batch_size, seq_len, vocab_size]
        return logits


# --------------------------
# 3. 训练函数
# --------------------------
def train_bert4rec(
    ratings_path,
    max_seq_length=20,
    hidden_dim=128,
    num_layers=2,
    batch_size=32,
    learning_rate=1e-4,
    num_epochs=10,
    save_path='bert4rec_model.pth'
):
    # 1. 加载数据并构建数据集
    ratings_df = pd.read_csv(ratings_path)
    dataset = UserBehaviorDataset(
        ratings_df,
        max_seq_length=max_seq_length,
        mask_prob=0.15
    )
    dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True)
    
    # 2. 初始化模型、损失函数和优化器
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = BERT4Rec(
        vocab_size=dataset.vocab_size + 1,  # +1 包含MASK_TOKEN
        hidden_dim=hidden_dim,
        num_layers=num_layers,
        max_seq_length=max_seq_length
    ).to(device)
    
    criterion = nn.CrossEntropyLoss(ignore_index=-100)  # 忽略-100的标签（非掩码位置）
    optimizer = optim.AdamW(model.parameters(), lr=learning_rate)
    
    # 3. 训练循环
    model.train()
    for epoch in range(num_epochs):
        total_loss = 0.0
        progress_bar = tqdm(dataloader, desc=f"Epoch {epoch+1}/{num_epochs}")
        
        for batch in progress_bar:
            input_ids = batch['input_ids'].to(device)
            labels = batch['labels'].to(device)
            
            # 前向传播
            logits = model(input_ids)  # [batch_size, seq_len, vocab_size]
            
            # 计算损失（需调整维度以匹配CrossEntropyLoss要求）
            # CrossEntropyLoss要求输入为[batch_size * seq_len, vocab_size]，标签为[batch_size * seq_len]
            loss = criterion(
                logits.view(-1, logits.size(-1)),  # 展平为二维
                labels.view(-1)  # 展平为一维
            )
            
            # 反向传播与优化
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            
            total_loss += loss.item()
            progress_bar.set_postfix(loss=loss.item())
        
        # 打印本轮平均损失
        avg_loss = total_loss / len(dataloader)
        print(f"Epoch {epoch+1} | 平均损失: {avg_loss:.4f}")
    
    # 4. 保存模型
    torch.save({
        'model_state_dict': model.state_dict(),
        'vocab_size': dataset.vocab_size,
        'item2idx': dataset.item2idx,
        'max_seq_length': max_seq_length
    }, save_path)
    print(f"模型已保存至 {save_path}")


# --------------------------
# 4. 运行微调
# --------------------------
if __name__ == "__main__":
    train_bert4rec(
        ratings_path='filtered_data/ratings_filtered.csv',
        max_seq_length=20,
        hidden_dim=128,
        num_layers=2,
        batch_size=32,
        num_epochs=10,
        save_path='bert4rec_recommender.pth'
    )