import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from transformers import BertConfig, BertModel
from collections import defaultdict
import torch.serialization
import os
import random
from tqdm import tqdm

# --------------------------
# 设备配置（优先GPU）
# --------------------------
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"使用设备: {device}")


# --------------------------
# 修改：随机挑选50名用户（替换优质用户筛选逻辑）
# --------------------------
def load_and_random_select_users(ratings_path, sample_n=50, random_state=42):
    """从评分数据中随机抽取50名用户"""
    try:
        # 从评分数据中提取唯一用户ID
        ratings_df = pd.read_csv(ratings_path)
        all_users = ratings_df['USER_MD5'].unique().tolist()
        
        # 确保有足够的用户可供抽取
        if len(all_users) < sample_n:
            print(f"警告：用户总数不足{sample_n}名，将返回全部{len(all_users)}名用户")
            sample_n = len(all_users)
        
        # 固定随机种子，保证结果可复现
        random.seed(random_state)
        selected_users = random.sample(all_users, sample_n)
        
        print(f"成功随机抽取{len(selected_users)}名用户：")
        print(f"- 抽取依据：从{len(all_users)}名用户中随机选择")
        print(f"- 前5名用户ID：{selected_users[:5]}")
        
        return selected_users
    except FileNotFoundError:
        print(f"错误：未找到评分文件 {ratings_path}")
        return []
    except Exception as e:
        print(f"抽取用户时出错：{str(e)}")
        return []


# --------------------------
# 1. 模型类定义（保留BERT4Rec）
# --------------------------
class BERT4Rec(nn.Module):
    def __init__(self, vocab_size, hidden_dim, num_layers, max_seq_length):
        super(BERT4Rec, self).__init__()
        self.config = BertConfig(
            vocab_size=vocab_size,
            hidden_size=hidden_dim,
            num_hidden_layers=num_layers,
            num_attention_heads=4,
            intermediate_size=hidden_dim*2,
            max_position_embeddings=max_seq_length,
            type_vocab_size=1
        )
        self.bert = BertModel(self.config)
        self.output_layer = nn.Linear(hidden_dim, vocab_size)
        self.max_seq_length = max_seq_length

    def forward(self, input_ids):
        outputs = self.bert(input_ids=input_ids)
        return self.output_layer(outputs.last_hidden_state)

    def predict(self, user_seq):
        input_ids = torch.tensor(user_seq, dtype=torch.long).unsqueeze(0).to(device)
        with torch.no_grad():
            logits = self.forward(input_ids)
            return logits[:, -1, :].squeeze(0).softmax(dim=0)


# --------------------------
# 2. 数据与模型加载函数（保留BERT4Rec相关）
# --------------------------
def load_user_comments(file_path):
    """加载用户评论数据"""
    try:
        comments_df = pd.read_csv(file_path)
        user_comments = {}
        for _, row in comments_df.iterrows():
            user_id = row['USER_MD5']
            movie_id = row['MOVIE_ID']
            comment = row['CONTENT']
            user_comments[(user_id, movie_id)] = comment  # 键：(用户ID, 电影ID)，值：评论内容
        print(f"成功加载 {len(user_comments)} 条用户评论数据")
        return user_comments
    except FileNotFoundError:
        print(f"警告：未找到用户评论文件 {file_path}，将不使用评论数据")
        return {}
    except Exception as e:
        print(f"加载用户评论时出错: {str(e)}，将不使用评论数据")
        return {}


def load_bert4rec_model(model_path, ratings_df):
    try:
        # 允许加载numpy标量类型（仅信任的模型文件）
        torch.serialization.add_safe_globals([np.core.multiarray.scalar])
        
        # 加载模型 checkpoint
        checkpoint = torch.load(model_path, map_location=device, weights_only=False)
        
        model = BERT4Rec(
            vocab_size=checkpoint['vocab_size'] + 1,
            hidden_dim=128,
            num_layers=2,
            max_seq_length=checkpoint['max_seq_length']
        ).to(device)
        
        model.load_state_dict(checkpoint['model_state_dict'])
        model.eval()
        print("BERT4Rec模型加载成功")
        return {
            'model': model,
            'item2idx': checkpoint['item2idx'],
            'idx2item': {v: k for k, v in checkpoint['item2idx'].items()},
            'user_sequences': ratings_df.groupby('USER_MD5')['MOVIE_ID'].agg(list).to_dict(),
            'max_seq_length': checkpoint['max_seq_length']
        }
    except Exception as e:
        print(f"BERT4Rec加载失败: {e}")
        return None


# --------------------------
# 3. 用户偏好提取（结合评论）
# --------------------------
def load_user_preferences(ratings_df, movies_df, user_comments):
    """从评分、电影类型和用户评论中提取用户偏好"""
    # 1. 合并评分和电影数据，提取高评分电影的类型
    rated_movies = ratings_df.merge(movies_df, on='MOVIE_ID')
    liked_movies = rated_movies[rated_movies['RATING'] >= 4.0]  # 高评分视为喜欢

    user_preferences = defaultdict(list)

    # 2. 从高评分电影类型提取偏好
    for _, row in liked_movies.iterrows():
        genres = row['GENRES'].split('|')  # 电影类型（如Action|Adventure）
        user_preferences[row['USER_MD5']].extend(genres)

    # 3. 从用户评论提取关键词
    stop_words = {'the', 'is', 'and', 'in', 'to', 'of', 'a', 'i', 'it', 'this', 'that', 'with',
                  '了', '的', '是', '在', '我', '有', '就', '也', '和', '都', '但', '而', '为', '着'}  # 增加中文停用词
    for (user_id, movie_id), comment in user_comments.items():
        # 评论分词并过滤停用词
        comment_words = str(comment).lower().split()  # 确保为字符串
        filtered_words = [
            word for word in comment_words
            if word not in stop_words and len(word) > 2  # 过滤短词和停用词
        ]
        user_preferences[user_id].extend(filtered_words)  # 将评论关键词加入用户偏好

    # 4. 统计词频，保留前5个最常见偏好
    for user_id in user_preferences:
        pref_counts = defaultdict(int)
        for pref in user_preferences[user_id]:
            pref_counts[pref] += 1
        # 按频率排序，取前5
        top_prefs = [pref for pref, _ in sorted(pref_counts.items(), key=lambda x: x[1], reverse=True)[:5]]
        user_preferences[user_id] = top_prefs

    return user_preferences


# --------------------------
# 4. 推荐生成（仅BERT4Rec逻辑）
# --------------------------
def compute_click_indicator(user_id, movie_id, user_clicked):
    """计算点击指标（1=点击，0=未点击）"""
    if movie_id in user_clicked.get(user_id, set()):
        return 1
    return 1 if random.random() < 0.3 else 0  # 模拟点击


def generate_recommendations(user_id, model_info, all_movie_ids, movies_df, user_clicked, user_preferences):
    """生成BERT4Rec推荐并结合用户偏好优化结果"""
    model = model_info['model']
    predictions = []

    # BERT4Rec推荐逻辑
    user_seq = model_info['user_sequences'].get(user_id, [])
    if not user_seq:
        return []
    seq_idx = [model_info['item2idx'].get(item, 0) for item in user_seq]
    if len(seq_idx) > model_info['max_seq_length']:
        seq_idx = seq_idx[-model_info['max_seq_length']:]
    pred_probs = model.predict(seq_idx)
    top_indices = pred_probs.argsort(descending=True)[:len(all_movie_ids)]
    for idx in top_indices:
        movie_id = model_info['idx2item'].get(idx.item())
        if movie_id and movie_id in all_movie_ids:
            clicked = compute_click_indicator(user_id, movie_id, user_clicked)
            predictions.append({
                'iid': movie_id,
                'est': pred_probs[idx].item(),
                'clicked': clicked
            })

    # 结合用户偏好优化推荐排序
    user_prefs = user_preferences.get(user_id, [])
    if user_prefs:
        # 为每个推荐项计算与用户偏好的匹配度
        for pred in predictions:
            try:
                movie_id = pred['iid']
                # 获取电影类型（处理可能的空值）
                genre_row = movies_df[movies_df['MOVIE_ID'] == movie_id]['GENRES']
                if not genre_row.empty:
                    genres_str = str(genre_row.values[0]).strip()
                    movie_genres = genres_str.split('|') if genres_str not in ['', 'nan'] else []
                else:
                    movie_genres = []
                # 计算匹配度
                match_score = sum(1 for pref in user_prefs if pref in movie_genres)
                pred['match_score'] = match_score
            except Exception as e:
                pred['match_score'] = 0  # 匹配度计算失败时设为0

        # 排序：先按匹配度（降序），再按模型评分（降序）
        predictions.sort(key=lambda x: (x['match_score'], x['est']), reverse=True)

    # 取Top10
    return predictions[:10]


# --------------------------
# 5. 主函数（核心修改：用户选择逻辑）
# --------------------------
def main():
    # 配置
    MODEL_PATHS = {
        'BERT4Rec': 'bert4rec_recommender.pth'  # BERT4Rec模型路径
    }
    DATA_PATHS = {
        'ratings': 'filtered_data/ratings_filtered.csv',
        'movies': 'filtered_data/movies_filtered.csv',
        'comments': 'filtered_data/comments_filtered.csv',  # 用户评论数据
    }
    SAMPLE_N_USERS = 50  # 随机抽取50名用户
    TOP_N_RECS = 10      # 每个用户推荐10部电影

     #步骤1：随机抽取50名用户
    selected_user_ids = load_and_random_select_users(
        DATA_PATHS['ratings'],
        sample_n=SAMPLE_N_USERS,
        random_state=42
    )
    if not selected_user_ids:
        print("未抽取到有效用户，退出程序")
        return

    # 步骤2：加载基础数据（仅保留筛选出的用户数据）
    try:
        ratings_df = pd.read_csv(DATA_PATHS['ratings'])
        # 过滤评分数据，只保留筛选出的用户（减少计算量）
        ratings_df = ratings_df[ratings_df['USER_MD5'].isin(selected_user_ids)]
        
        movies_df = pd.read_csv(DATA_PATHS['movies'])
        # 确保GENRES列为字符串类型，空值填充为空字符串
        movies_df['GENRES'] = movies_df['GENRES'].fillna('').astype(str)
        
        user_comments = load_user_comments(DATA_PATHS['comments'])
        
        all_movie_ids = set(movies_df['MOVIE_ID'].tolist())
        movies_dict = dict(zip(movies_df['MOVIE_ID'], movies_df['NAME']))
        
        # 记录用户已点击电影
        user_clicked = defaultdict(set)
        for _, row in ratings_df.iterrows():
            if pd.notna(row['MOVIE_ID']) and pd.notna(row['USER_MD5']):
                user_clicked[row['USER_MD5']].add(row['MOVIE_ID'])
        
        # 提取用户偏好（结合评论）
        user_preferences = load_user_preferences(ratings_df, movies_df, user_comments)
        print("基础数据加载完成（已过滤为筛选出的用户）")
    except Exception as e:
        print(f"数据加载失败: {e}")
        return

    # 步骤3：加载BERT4Rec模型
    models = {}
    model_name = 'BERT4Rec'
    model_info = load_bert4rec_model(MODEL_PATHS[model_name], ratings_df)
    if model_info:
        models[model_name] = model_info
    else:
        print("BERT4Rec模型加载失败，退出")
        return

    # 步骤4：为筛选出的用户生成推荐
    detailed_results = []
    summary_results = []

    for user_id in tqdm(selected_user_ids, desc=f"为前{len(selected_user_ids)}名用户生成推荐"):
        model_info = models['BERT4Rec']
        # 生成推荐
        top_recs = generate_recommendations(
            user_id, model_info, all_movie_ids, movies_df,
            user_clicked, user_preferences
        )
        if not top_recs:
            continue

        # 整理详细结果
        total_clicked = 0
        for rank, rec in enumerate(top_recs, 1):
            total_clicked += rec['clicked']
            detailed_results.append({
                'user_id': user_id,
                'model': 'BERT4Rec',
                'rank': rank,
                'movie_id': rec['iid'],
                'title': movies_dict.get(rec['iid'], '未知电影'),
                'predicted_rating': round(rec['est'], 2),
                'clicked': rec['clicked']
            })

        # 计算汇总指标
        ctr = round(total_clicked / TOP_N_RECS, 4) if TOP_N_RECS > 0 else 0
        summary_results.append({
            'user_id': user_id,
            'model': 'BERT4Rec',
            'total_recommended': TOP_N_RECS,
            'total_clicked': total_clicked,
            'ctr': ctr
        })

    # 保存并输出结果
    os.makedirs('recommendation_results', exist_ok=True)
    detailed_df = pd.DataFrame(detailed_results)
    summary_df = pd.DataFrame(summary_results)
    detailed_df.to_csv('recommendation_results/top50_users_bert4rec_detailed.csv', index=False)
    summary_df.to_csv('recommendation_results/top50_users_bert4rec_summary.csv', index=False)

    # 打印示例结果
    print("\n===== 推荐结果示例 =====")
    print("\n详细结果（前5条）：")
    print(detailed_df.head())
    print(f"\n汇总结果（前5条用户）：")
    print(summary_df.head())


if __name__ == "__main__":
    main()