import pandas as pd
import numpy as np
import torch
import torch.nn as nn
from torch_geometric.nn import GCNConv
from collections import defaultdict
import os
import random
from tqdm import tqdm

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"使用设备: {device}")


def load_and_random_select_users(ratings_path, sample_n=50, random_state=42):
    """从评分数据中随机抽取50名用户"""
    try:
        ratings_df = pd.read_csv(ratings_path)
        ratings_df['USER_MD5'] = ratings_df['USER_MD5'].astype(str)
        all_users = ratings_df['USER_MD5'].unique().tolist()
        
        if len(all_users) < sample_n:
            print(f"警告：用户总数不足{sample_n}名，将返回全部{len(all_users)}名用户")
            sample_n = len(all_users)
        
        random.seed(random_state)
        selected_users = random.sample(all_users, sample_n)
        
        print(f"成功随机抽取{len(selected_users)}名用户：")
        print(f"- 抽取依据：从{len(all_users)}名用户中随机选择")
        print(f"- 前5名用户ID：{selected_users[:5]}")
        
        return selected_users
    except FileNotFoundError:
        print(f"错误：未找到评分文件 {ratings_path}")
        return []
    except Exception as e:
        print(f"抽取用户时出错：{str(e)}")
        return []


class LightGCN(nn.Module):
    def __init__(self, num_users, num_items, embedding_dim, num_layers=2, use_bias=False):  # 新增use_bias参数
        super(LightGCN, self).__init__()
        self.num_users = num_users
        self.num_items = num_items
        self.user_embedding = nn.Embedding(num_users, embedding_dim)
        self.item_embedding = nn.Embedding(num_items, embedding_dim)
        # 根据训练时是否使用偏置调整（关键修复：匹配模型权重中的结构）
        self.convs = nn.ModuleList([GCNConv(embedding_dim, embedding_dim, bias=use_bias) for _ in range(num_layers)])
        nn.init.xavier_uniform_(self.user_embedding.weight)
        nn.init.xavier_uniform_(self.item_embedding.weight)

    def forward(self, edge_index):
        x = torch.cat([self.user_embedding.weight, self.item_embedding.weight], dim=0)
        embeddings = [x]
        for conv in self.convs:
            x = conv(x, edge_index)
            embeddings.append(x)
        final_emb = torch.mean(torch.stack(embeddings), dim=0)
        return final_emb[:self.num_users], final_emb[self.num_users:]

    def predict(self, user_idx, item_idx, edge_index):
        with torch.no_grad():
            user_emb, item_emb = self.forward(edge_index)
            return torch.sum(user_emb[user_idx] * item_emb[item_idx]).item()


def build_edge_index(ratings_df, user_id_map, item_id_map, num_users):
    user_indices = []
    item_indices = []
    for _, row in ratings_df.iterrows():
        u_idx = user_id_map.get(row['USER_MD5'])
        i_idx = item_id_map.get(row['MOVIE_ID'])
        if u_idx is not None and i_idx is not None:
            user_indices.append(u_idx)
            item_indices.append(i_idx + num_users)
    edges = np.array([
        user_indices + item_indices,
        item_indices + user_indices
    ])
    edge_index = torch.tensor(edges, dtype=torch.long).to(device)
    return edge_index


def load_user_comments(file_path, chunksize=100000):
    user_comments = {}
    try:
        for chunk in pd.read_csv(file_path, chunksize=chunksize):
            chunk['USER_MD5'] = chunk['USER_MD5'].astype(str)
            chunk['MOVIE_ID'] = chunk['MOVIE_ID'].astype(str)
            chunk['CONTENT'] = chunk['CONTENT'].fillna('').astype(str)
            
            for _, row in chunk.iterrows():
                user_id = row['USER_MD5']
                movie_id = row['MOVIE_ID']
                comment = row['CONTENT']
                if user_id and movie_id:
                    user_comments[(user_id, movie_id)] = comment
        print(f"成功加载 {len(user_comments)} 条用户评论数据")
    except FileNotFoundError:
        print(f"警告：未找到用户评论文件 {file_path}，将不使用评论数据")
    except Exception as e:
        print(f"加载用户评论时出错: {e}，将不使用评论数据")
    return user_comments


def load_filtered_ratings(file_path, top_user_ids, chunksize=100000):
    if not top_user_ids:
        return pd.DataFrame()
    chunks = []
    try:
        for chunk in pd.read_csv(file_path, chunksize=chunksize):
            chunk['USER_MD5'] = chunk['USER_MD5'].astype(str)
            chunk['MOVIE_ID'] = chunk['MOVIE_ID'].astype(str)
            filtered_chunk = chunk[chunk['USER_MD5'].isin(top_user_ids)]
            if not filtered_chunk.empty:
                chunks.append(filtered_chunk)
        ratings_df = pd.concat(chunks, ignore_index=True)
        print(f"成功加载筛选用户的评分数据，共{len(ratings_df)}条")
        return ratings_df
    except Exception as e:
        print(f"加载评分数据失败: {e}")
        return pd.DataFrame()


def load_full_ratings(file_path, chunksize=100000):
    chunks = []
    try:
        for chunk in pd.read_csv(file_path, chunksize=chunksize):
            chunk['MOVIE_ID'] = chunk['MOVIE_ID'].astype(str)
            chunk['USER_MD5'] = chunk['USER_MD5'].astype(str)
            chunks.append(chunk)
        ratings_df = pd.concat(chunks, ignore_index=True)
        print(f"成功加载完整评分数据，共{len(ratings_df)}条")
        return ratings_df
    except Exception as e:
        print(f"加载完整评分数据失败: {e}")
        return pd.DataFrame()


def load_lightgcn_model(model_path, full_ratings_df, filtered_ratings_df, top_user_ids):
    # 从完整数据中获取真实的用户和物品数量（关键修复：替换硬编码的数值）
    full_user_ids = full_ratings_df['USER_MD5'].unique()
    full_item_ids = full_ratings_df['MOVIE_ID'].unique()
    NUM_USERS = len(full_user_ids)  # 动态获取用户数量，匹配训练时的实际值
    NUM_ITEMS = len(full_item_ids)  # 动态获取物品数量，匹配训练时的实际值
    
    user_id_map_full = {uid: i for i, uid in enumerate(full_user_ids)}
    item_id_map_full = {iid: i for i, iid in enumerate(full_item_ids)}

    # 初始化模型时禁用偏置（匹配训练时的模型结构，关键修复）
    model = LightGCN(NUM_USERS, NUM_ITEMS, 64, use_bias=False).to(device)
    
    # 加载模型时添加weights_only=True避免警告
    model.load_state_dict(torch.load(model_path, map_location=device, weights_only=True))
    model.eval()

    ratings_filtered = full_ratings_df[full_ratings_df['USER_MD5'].isin(top_user_ids)]

    user_ids_filtered = ratings_filtered['USER_MD5'].unique()
    item_ids_filtered = ratings_filtered['MOVIE_ID'].unique()
    user_id_map_filtered = {uid: i for i, uid in enumerate(user_ids_filtered)}
    item_id_map_filtered = {iid: i for i, iid in enumerate(item_ids_filtered)}

    edge_index = build_edge_index(ratings_filtered, user_id_map_filtered, item_id_map_filtered, len(user_ids_filtered))
    if edge_index is None:
        raise RuntimeError("边索引构建失败")

    print("LightGCN模型加载成功")
    return {
        'model': model,
        'user_id_map': user_id_map_filtered,
        'item_id_map': item_id_map_filtered,
        'idx2item': {v: k for k, v in item_id_map_full.items()},
        'edge_index': edge_index
    }


def load_user_preferences(ratings_df, movies_df, user_comments):
    ratings_df = ratings_df.copy()
    movies_df = movies_df.copy()
    ratings_df['MOVIE_ID'] = ratings_df['MOVIE_ID'].astype(str)
    movies_df['MOVIE_ID'] = movies_df['MOVIE_ID'].astype(str)
    
    movies_df['GENRES'] = movies_df['GENRES'].fillna('').astype(str)
    
    rated_movies = pd.merge(
        ratings_df, 
        movies_df, 
        on='MOVIE_ID',
        how='inner'
    )
    liked_movies = rated_movies[rated_movies['RATING'] >= 4.0]

    user_preferences = defaultdict(list)
    for _, row in liked_movies.iterrows():
        try:
            genres_str = str(row['GENRES']).strip()
            if genres_str:
                genres = genres_str.split('|')
                user_preferences[row['USER_MD5']].extend(genres)
        except Exception as e:
            print(f"处理电影类型时出错: {e}，跳过该记录")
            continue

    stop_words = {'the', 'is', 'and', 'in', 'to', 'of', 'a', 'i', 'it', 'this', 'that', 'with',
                  '了', '的', '是', '在', '我', '有', '就', '也', '和', '都', '但', '而', '为', '着'}
    for (user_id, movie_id), comment in user_comments.items():
        try:
            comment_words = str(comment).lower().split()
            filtered_words = [w for w in comment_words if w not in stop_words and len(w) > 2]
            user_preferences[user_id].extend(filtered_words)
        except Exception as e:
            print(f"处理评论时出错: {e}，跳过该评论")
            continue

    for user_id in user_preferences:
        counts = defaultdict(int)
        for w in user_preferences[user_id]:
            counts[w] += 1
        top_prefs = [w for w, _ in sorted(counts.items(), key=lambda x: x[1], reverse=True)[:5]]
        user_preferences[user_id] = top_prefs

    return user_preferences


def compute_click_indicator(user_id, movie_id, user_clicked):
    if movie_id in user_clicked.get(user_id, set()):
        return 1
    return 1 if random.random() < 0.3 else 0


def generate_recommendations(user_id, model_info, movies_df, user_clicked, user_preferences):
    model = model_info['model']
    user_id_map = model_info['user_id_map']
    item_id_map = model_info['item_id_map']
    edge_index = model_info['edge_index']

    user_idx = user_id_map.get(user_id)
    if user_idx is None:
        return []

    predictions = []
    for movie_id in movies_df['MOVIE_ID']:
        item_idx = item_id_map.get(movie_id)
        if item_idx is None:
            continue
        try:
            score = model.predict(user_idx, item_idx, edge_index)
            clicked = compute_click_indicator(user_id, movie_id, user_clicked)
            predictions.append({'iid': movie_id, 'est': score, 'clicked': clicked})
        except Exception as e:
            print(f"生成预测时出错: {e}，跳过该电影")
            continue

    user_prefs = user_preferences.get(user_id, [])
    if user_prefs:
        for pred in predictions:
            try:
                genre_row = movies_df[movies_df['MOVIE_ID'] == pred['iid']]['GENRES']
                if not genre_row.empty:
                    genres_str = str(genre_row.values[0]).strip()
                    movie_genres = genres_str.split('|') if genres_str else []
                else:
                    movie_genres = []
                match_score = sum(1 for pref in user_prefs if pref in movie_genres)
                pred['match_score'] = match_score
            except Exception as e:
                pred['match_score'] = 0

        predictions.sort(key=lambda x: (x['match_score'], x['est']), reverse=True)

    return predictions[:10]


def main():
    MODEL_PATH = 'lightgcn_recommender.pth'
    DATA_PATHS = {
         'ratings': 'filtered_data/ratings_filtered.csv',
        'movies': 'filtered_data/movies_filtered.csv',
        'comments': 'filtered_data/comments_filtered.csv',
    }
    SAMPLE_N_USERS = 50
    TOP_N_RECS = 10

    selected_user_ids = load_and_random_select_users(
        DATA_PATHS['ratings'],
        sample_n=SAMPLE_N_USERS,
        random_state=42
    )
    if not selected_user_ids:
        print("未抽取到有效用户，退出程序")
        return

    try:
        full_ratings_df = load_full_ratings(DATA_PATHS['ratings'])
        filtered_ratings_df = load_filtered_ratings(DATA_PATHS['ratings'], selected_user_ids)
        if filtered_ratings_df.empty:
            print("筛选用户无评分数据，退出")
            return

        movies_df = pd.read_csv(DATA_PATHS['movies'])
        movies_df['MOVIE_ID'] = movies_df['MOVIE_ID'].astype(str)
        movies_df['GENRES'] = movies_df['GENRES'].fillna('').astype(str)
        
        user_comments = load_user_comments(DATA_PATHS['comments'])

        movies_dict = dict(zip(movies_df['MOVIE_ID'], movies_df['NAME']))
        user_clicked = defaultdict(set)
        for _, row in filtered_ratings_df.iterrows():
            user_id = str(row['USER_MD5'])
            movie_id = str(row['MOVIE_ID'])
            user_clicked[user_id].add(movie_id)

        user_preferences = load_user_preferences(filtered_ratings_df, movies_df, user_comments)
        print("数据加载成功（已过滤为筛选出的用户）")
    except Exception as e:
        print(f"数据加载失败: {e}")
        return

    try:
        model_info = load_lightgcn_model(MODEL_PATH, full_ratings_df, filtered_ratings_df, selected_user_ids)
    except Exception as e:
        print(f"模型加载失败: {e}")
        return

    detailed_results = []
    summary_results = []

    for user_id in tqdm(selected_user_ids, desc=f"为{len(selected_user_ids)}名用户生成推荐"):
        top_recs = generate_recommendations(user_id, model_info, movies_df, user_clicked, user_preferences)
        if not top_recs:
            continue

        total_clicked = 0
        for rank, rec in enumerate(top_recs, 1):
            total_clicked += rec['clicked']
            detailed_results.append({
                'user_id': user_id,
                'model': 'LightGCN',
                'rank': rank,
                'movie_id': rec['iid'],
                'title': movies_dict.get(rec['iid'], '未知电影'),
                'predicted_rating': round(rec['est'], 2),
                'clicked': rec['clicked']
            })

        ctr = round(total_clicked / TOP_N_RECS, 4) if TOP_N_RECS > 0 else 0
        summary_results.append({
            'user_id': user_id,
            'model': 'LightGCN',
            'total_recommended': TOP_N_RECS,
            'total_clicked': total_clicked,
            'ctr': ctr
        })

    os.makedirs('recommendation_results', exist_ok=True)
    detailed_df = pd.DataFrame(detailed_results)
    summary_df = pd.DataFrame(summary_results)
    detailed_df.to_csv('recommendation_results/random50_users_lightgcn_detailed.csv', index=False)
    summary_df.to_csv('recommendation_results/random50_users_lightgcn_summary.csv', index=False)

    print("\n===== 推荐结果示例 =====")
    print("详细结果（前5条）：")
    print(detailed_df.head())
    print("\n汇总结果（前5条用户）：")
    print(summary_df.head())


if __name__ == "__main__":
    main()
