import pandas as pd
import numpy as np
import re
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics.pairwise import cosine_distances
import os
from typing import Dict, List, Tuple
from transformers import AutoTokenizer, AutoModel
import torch
import matplotlib.pyplot as plt

plt.rcParams["font.family"] = ["sans-serif"]
plt.rcParams["font.sans-serif"] = plt.rcParamsDefault["font.sans-serif"]

class MovieSemanticDiversity:
    def __init__(self):
        """初始化语义多样性计算器（使用 transformers 替代 SentenceTransformer）"""
        # 本地模型路径（已指定为modelscope缓存的模型）
        self.model_name = "/home/ps/.cache/modelscope/hub/models/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"
        # 加载分词器和模型
        self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
        self.model = AutoModel.from_pretrained(self.model_name)
        # 多标签编码器（用于电影类型）
        self.genre_encoder = MultiLabelBinarizer()

    def _mean_pooling(self, model_output, attention_mask):
        """对句子token向量进行平均池化，生成句子级语义向量"""
        token_embeddings = model_output[0]  # 取模型最后一层输出
        input_mask = attention_mask.unsqueeze(-1).expand(token_embeddings.size())
        return torch.sum(token_embeddings * input_mask, 1) / torch.clamp(input_mask.sum(1), min=1e-9)

    def load_data(self, 
                 recommendations_path: str, 
                 movies_path: str) -> Tuple[pd.DataFrame, pd.DataFrame]:
        """加载推荐结果和电影数据（保持不变）"""
        print("加载数据...")
        recommendations = pd.read_csv(recommendations_path)
        movies = pd.read_csv(movies_path)
        
        # 预处理电影类型（将字符串转换为列表）
        movies['GENRES'] = movies['GENRES'].apply(
            lambda x: re.split(r'[;/,，]', str(x).strip()) if pd.notna(x) else []
        )
        
        print(f"推荐结果：{len(recommendations)} 条记录")
        print(f"电影数据：{len(movies)} 部电影")
        return recommendations, movies

    def preprocess_semantic_features(self, movies: pd.DataFrame) -> pd.DataFrame:
        """预处理电影语义特征（修复剧情嵌入生成逻辑）"""
        print("提取电影语义特征...")
        
        # 1. 电影类型编码（独热向量，保持不变）
        all_genres = [genre for sublist in movies['GENRES'] for genre in sublist]
        self.genre_encoder.fit([all_genres])
        movies['genre_vector'] = movies['GENRES'].apply(
            lambda x: self.genre_encoder.transform([x])[0]
        )
        
        # 2. 剧情简介语义嵌入（使用transformers模型生成，替代错误的sentence_model）
        valid_storyline = movies['STORYLINE'].fillna("").apply(lambda x: str(x).strip()).tolist()
        storyline_embeddings = []
        batch_size = 32  # 分批次处理，避免内存溢出
        
        for i in range(0, len(valid_storyline), batch_size):
            batch = valid_storyline[i:i+batch_size]
            # 分词处理
            encoded_input = self.tokenizer(
                batch,
                padding=True,
                truncation=True,
                return_tensors="pt"
            )
            # 模型推理（不计算梯度，提高效率）
            with torch.no_grad():
                model_output = self.model(** encoded_input)
            # 池化生成句子向量
            sentence_embeddings = self._mean_pooling(model_output, encoded_input['attention_mask'])
            # 转换为numpy数组并添加到列表
            storyline_embeddings.extend(sentence_embeddings.cpu().numpy())
        
        movies['storyline_vector'] = storyline_embeddings
        return movies

    # 以下方法完全保持不变
    def calculate_pairwise_diversity(self, vectors: List[np.ndarray], weight: float = 0.5) -> float:
        n = len(vectors)
        if n < 2:
            return 0.0
        
        total_distance = 0.0
        count = 0
        
        for i in range(n):
            for j in range(i + 1, n):
                genre_dist = cosine_distances([vectors[i][0]], [vectors[j][0]])[0][0]
                story_dist = cosine_distances([vectors[i][1]], [vectors[j][1]])[0][0]
                combined_dist = weight * genre_dist + (1 - weight) * story_dist
                
                total_distance += combined_dist
                count += 1
        
        return total_distance / count if count > 0 else 0.0

    def compute_diversity_for_users(self, 
                                   recommendations: pd.DataFrame, 
                                   movies: pd.DataFrame, 
                                   k: int = 10, 
                                   genre_weight: float = 0.5) -> Dict[str, float]:
        print(f"计算每个用户前 {k} 个推荐的语义多样性...")
        
        recommendations = recommendations.sort_values(['user_id', 'rank'])
        movie_id_to_vectors = {
            str(row['MOVIE_ID']): (row['genre_vector'], row['storyline_vector'])
            for _, row in movies.iterrows()
        }
        
        user_diversity = {}
        for user_id, group in recommendations.groupby('user_id'):
            top_k_movies = group.head(k)['movie_id'].astype(str).tolist()
            valid_vectors = []
            for mid in top_k_movies:
                if mid in movie_id_to_vectors:
                    valid_vectors.append(movie_id_to_vectors[mid])
            
            diversity = self.calculate_pairwise_diversity(valid_vectors, weight=genre_weight)
            user_diversity[user_id] = diversity
        
        print(f"完成 {len(user_diversity)} 个用户的多样性计算")
        return user_diversity

    def analyze_results(self, user_diversity: Dict[str, float], output_dir: str = "diversity_results"):
        os.makedirs(output_dir, exist_ok=True)
        
        diversity_df = pd.DataFrame({
            'user_id': list(user_diversity.keys()),
            'semantic_diversity': list(user_diversity.values())
        })
        
        diversity_df.to_csv(os.path.join(output_dir, "lightgcn_user_semantic_diversity.csv"), index=False)
        print(f"用户语义多样性结果已保存到 {output_dir}/lightgcn_user_semantic_diversity.csv")
        
        avg_diversity = diversity_df['semantic_diversity'].mean()
        print(f"\n平均语义多样性：{avg_diversity:.4f}")
        print(f"多样性分布：最小值 {diversity_df['semantic_diversity'].min():.4f}, "
              f"最大值 {diversity_df['semantic_diversity'].max():.4f}, "
              f"中位数 {diversity_df['semantic_diversity'].median():.4f}")
        
        plt.figure(figsize=(10, 5))
        plt.hist(diversity_df['semantic_diversity'], bins=20, alpha=0.7)
        plt.axvline(avg_diversity, color='r', linestyle='dashed', linewidth=2, 
                   label=f"Average: {avg_diversity:.4f}")
        plt.title('Distribution of Semantic Diversity')
        plt.xlabel('Semantic Diversity Score')
        plt.ylabel('Number of Users')
        plt.legend()
        plt.tight_layout()
        plt.savefig(os.path.join(output_dir, "lightgcn_diversity_distribution.png"))
        print(f"多样性分布图已保存到 {output_dir}/lightgcn_diversity_distribution.png")
        plt.show()

def main():
    # 文件路径（请替换为实际路径）
    RECOMMENDATIONS_PATH = "recommendation_results/random50_users_lightgcn_detailed.csv"
    MOVIES_PATH = "filtered_data/movies_filtered.csv"
    K = 10
    GENRE_WEIGHT = 0.5
    
    # 初始化计算器
    diversity_calculator = MovieSemanticDiversity()
    
    # 加载数据
    recommendations, movies = diversity_calculator.load_data(RECOMMENDATIONS_PATH, MOVIES_PATH)
    
    # 预处理语义特征
    movies_with_vectors = diversity_calculator.preprocess_semantic_features(movies)
    
    # 计算每个用户的语义多样性
    user_diversity = diversity_calculator.compute_diversity_for_users(
        recommendations, 
        movies_with_vectors, 
        k=K, 
        genre_weight=GENRE_WEIGHT
    )
    
    # 分析结果
    diversity_calculator.analyze_results(user_diversity)

if __name__ == "__main__":
    main()