import pandas as pd
import numpy as np
import tensorflow as tf
import pickle
import logging
from typing import List, Dict, Optional, Tuple
import os
from tqdm import tqdm

class HybridRecommender:
    """
    混合推荐器 - 使用训练好的深度学习混合模型进行推荐
    """
    
    def __init__(self, model_path: str, data_path: str = "data/processed/"):
        self.model_path = model_path
        # 规范化数据路径，避免依赖尾部分隔符
        self.data_path = data_path
        self.logger = self._setup_logging()
        self.model = None
        self.user_id_map = None
        self.movie_id_map = None
        self.movie_features = None
        self.user_features = None
        self.movies_metadata = None
        self.user_history = None
        self.cold_start_recommender = None
        # 初始化加载所有必要数据
        self._load_resources()
        # 初始化冷启动推荐器
        try:
            from .cold_start import ColdStartRecommender
            # 仅用于热门兜底，不在此处加载内容相似器，避免重复占用内存
            self.cold_start_recommender = ColdStartRecommender(self.data_path, content_recommender=None, enable_content=False)
        except Exception as e:
            self.logger.warning(f"冷启动推荐器初始化失败: {e}")
    
    def _setup_logging(self) -> logging.Logger:
        """设置日志"""
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
        )
        return logging.getLogger(__name__)
    
    def _load_resources(self):
        """加载所有必要的资源"""
        self.logger.info("加载推荐器资源...")
        
        try:
            # 1. 加载模型
            self.model = tf.keras.models.load_model(
                self.model_path,
                custom_objects={
                    'LeakyReLU': tf.keras.layers.LeakyReLU
                }
            )
            self.logger.info(f"✅ 模型加载成功: {self.model_path}")
            
            # 2. 加载映射文件
            with open(os.path.join(self.data_path, 'user_id_map.pkl'), 'rb') as f:
                self.user_id_map = pickle.load(f)
            # 统一映射方向与类型：期望 { 原始userId(int) -> 索引(int) }
            try:
                if isinstance(self.user_id_map, dict) and len(self.user_id_map) > 0:
                    # 判定方向：若 keys 看起来像 0..N-1 连续，而 values 是大于 0 的原始ID，则反转
                    ks = list(self.user_id_map.keys())
                    vs = list(self.user_id_map.values())
                    def _is_consecutive_zero_based(arr):
                        try:
                            if not arr:
                                return False
                            a = [int(x) for x in arr]
                            mn, mx = min(a), max(a)
                            return mn == 0 and mx == len(a) - 1 and len(set(a)) == len(a)
                        except Exception:
                            return False
                    # 检测并反转
                    if _is_consecutive_zero_based(ks) and (max(vs) if vs else 0) > (max(ks) if ks else -1):
                        # 当前是 {idx -> userId}，需要反转
                        self.user_id_map = { int(v): int(k) for k, v in self.user_id_map.items() if v is not None }
                    else:
                        # 规范键值类型为 Python int
                        self.user_id_map = { int(k): int(v) for k, v in self.user_id_map.items() }
            except Exception as e:
                self.logger.warning(f"user_id_map 规范化失败: {e}")
            with open(os.path.join(self.data_path, 'movie_id_map.pkl'), 'rb') as f:
                self.movie_id_map = pickle.load(f)
            
            # 创建反向映射
            self.id_to_movie = {v: k for k, v in self.movie_id_map.items()}
            
            # 3. 加载特征数据
            self.movie_features = pd.read_pickle(os.path.join(self.data_path, 'movie_features.pkl'))
            self.user_features = pd.read_pickle(os.path.join(self.data_path, 'user_features.pkl'))
            # 统一 user_features 行索引：应为原始 userId（int）。若检测到索引是 embedding 索引，则据映射反转成 userId。
            try:
                if hasattr(self.user_features, 'index') and self.user_features.index is not None and len(self.user_features) > 0:
                    # 将索引尝试转为 int
                    try:
                        self.user_features.index = self.user_features.index.map(lambda x: int(x))
                    except Exception:
                        pass
                    # 若大部分索引值范围位于 0..N-1 且与 user_id_map 的 values 重叠较多，则视为“索引是 embedding index”
                    if isinstance(self.user_id_map, dict) and self.user_id_map:
                        emb_indices = set(int(v) for v in self.user_id_map.values())
                        idx_values = set(int(x) for x in self.user_features.index[:min(1000, len(self.user_features))])
                        overlap = len(idx_values.intersection(emb_indices))
                        if overlap > max(10, len(idx_values) * 0.6):
                            # 构造反向映射：embedding idx -> userId
                            idx_to_user = { int(v): int(k) for k, v in self.user_id_map.items() }
                            self.user_features = self.user_features.rename(index=idx_to_user)
                            # 再确保 index 为 int
                            self.user_features.index = self.user_features.index.map(lambda x: int(x))
            except Exception as e:
                self.logger.warning(f"user_features 索引规范化失败: {e}")
            
            # 4. 加载电影元数据
            self.movies_metadata = pd.read_pickle(os.path.join(self.data_path, 'movies_cleaned.pkl'))
            
            # 5. 加载用户历史数据（用于排除已看过的电影）
            self.user_history = self._load_user_history()
            
            self.logger.info("✅ 所有资源加载完成")
            
        except Exception as e:
            self.logger.error(f"❌ 资源加载失败: {e}")
            raise
    
    def _load_user_history(self) -> Dict[int, List[int]]:
        """加载用户历史评分数据"""
        try:
            import gc
            # 仅读取必要列，并尽量降精度以节省内存
            ratings_path = os.path.join(self.data_path, 'ratings_cleaned.feather')
            ratings_df = None
            if os.path.exists(ratings_path):
                try:
                    ratings_df = pd.read_feather(ratings_path, columns=['userId', 'movieId'])
                except Exception:
                    ratings_df = pd.read_feather(ratings_path)[['userId', 'movieId']]
            else:
                ratings_path = os.path.join(self.data_path, 'ratings_cleaned.pkl')
                ratings_df = pd.read_pickle(ratings_path)
                # 只保留两列
                if not set(['userId', 'movieId']).issubset(ratings_df.columns):
                    raise ValueError('ratings 数据缺少 userId/movieId 列')
                ratings_df = ratings_df[['userId', 'movieId']]

            # 降精度，减少内存占用
            if ratings_df['userId'].dtype != 'int32':
                ratings_df['userId'] = pd.to_numeric(ratings_df['userId'], errors='coerce').fillna(-1).astype('int32')
            if ratings_df['movieId'].dtype != 'int32':
                ratings_df['movieId'] = pd.to_numeric(ratings_df['movieId'], errors='coerce').fillna(-1).astype('int32')

            # 构建用户历史字典（按 userId 排序后迭代，避免一次性巨大中间对象）
            ratings_df = ratings_df.sort_values('userId', kind='mergesort')
            user_history: Dict[int, List[int]] = {}
            last_uid = None
            buf: List[int] = []
            for uid, mid in zip(ratings_df['userId'].to_numpy(), ratings_df['movieId'].to_numpy()):
                if last_uid is None:
                    last_uid = int(uid)
                if uid != last_uid:
                    user_history[int(last_uid)] = buf
                    buf = [int(mid)]
                    last_uid = int(uid)
                else:
                    buf.append(int(mid))
            if last_uid is not None:
                user_history[int(last_uid)] = buf

            # 释放中间数据
            del ratings_df
            gc.collect()

            self.logger.info(f"✅ 用户历史数据加载完成: {len(user_history)} 个用户")
            return user_history
            
        except Exception as e:
            self.logger.warning(f"⚠️ 用户历史数据加载失败: {e}")
            return {}
    
    def _get_user_watched_movies(self, user_id: int) -> List[int]:
        """获取用户已看过的电影列表"""
        return self.user_history.get(user_id, [])
    
    def _prepare_candidate_movies(self, user_id: int) -> pd.DataFrame:
        """准备候选电影集（排除用户已看过的电影，仅保留数值型特征）"""
        watched_movies = self._get_user_watched_movies(user_id)
        all_movie_ids = list(self.movie_id_map.keys())
        candidate_movie_ids = [mid for mid in all_movie_ids if mid not in watched_movies]
        if len(candidate_movie_ids) == 0:
            self.logger.warning(f"用户 {user_id} 已看过所有电影，返回热门电影")
            candidate_movie_ids = all_movie_ids[:1000]
        self.logger.info(f"候选电影数量: {len(candidate_movie_ids)}")
        candidate_df = pd.DataFrame({'movieId': candidate_movie_ids})
        candidate_df = candidate_df.merge(self.movie_features, on='movieId', how='left')
        # 只保留数值型特征（float、int），移除object类型（如title、genres等）
        num_cols = [col for col in candidate_df.columns if candidate_df[col].dtype in [np.float32, np.float64, np.int32, np.int64]]
        if 'movieId' not in num_cols:
            num_cols = ['movieId'] + num_cols  # 确保movieId在首位
        candidate_df_model = candidate_df[num_cols].copy()
        return candidate_df_model
    
    def _prepare_prediction_data(self, user_id: int, candidate_df: pd.DataFrame) -> Dict[str, np.ndarray]:
        """
        只用数值型特征准备模型预测数据，移除object类型。
        """
        num_candidates = len(candidate_df)
        try:
            uid = int(user_id)
        except Exception:
            uid = user_id
        user_idx = self.user_id_map.get(uid)
        if user_idx is None:
            self.logger.warning(f"用户ID {user_id} 不在映射表中，回退热门。")
            # 返回空数组，触发上层回退
            return np.array([])
        candidate_movie_ids = candidate_df['movieId'].tolist()
        movie_indices = [self.movie_id_map[mid] for mid in candidate_movie_ids]
        # 获取用户特征（如5维），并复制到每个候选电影
        try:
            user_feature = self.user_features.loc[uid].values.astype(np.float32)
        except Exception:
            self.logger.warning(f"未找到用户 {user_id} 的特征，回退热门。")
            return np.array([])
        user_features_array = np.tile(user_feature, (num_candidates, 1))  # shape=(N, 5)
        prediction_data = {
            'user_id_input': np.full(num_candidates, user_idx, dtype=np.int32),
            'movie_id_input': np.array(movie_indices, dtype=np.int32),
            'user_feature_input': user_features_array
        }
        self.logger.info(f"准备好的预测数据包含以下输入: {list(prediction_data.keys())}")
        return prediction_data
    
    def predict_ratings(self, user_id: int, candidate_df: pd.DataFrame, 
                       batch_size: int = 1024) -> np.ndarray:
        """批量预测用户对候选电影的评分"""
        self.logger.info(f"为用户 {user_id} 预测 {len(candidate_df)} 部电影的评分...")
        
        # 准备预测数据
        try:
            prediction_data = self._prepare_prediction_data(user_id, candidate_df)
        except ValueError as e:
            self.logger.error(e)
            return np.array([])

        # 使用 model.predict 进行批量预测
        if prediction_data is None or len(prediction_data) == 0:
            return np.array([])
        predictions = self.model.predict(prediction_data, batch_size=batch_size, verbose=0)
        
        return predictions.flatten()
    
    def recommend(self, user_id: int, top_k: int = 10, 
                 diversity: bool = False) -> List[Dict]:
        """
        为用户生成推荐（支持新用户冷启动）
        Args:
            user_id: 用户ID
            top_k: 推荐数量
            diversity: 是否增加多样性（避免推荐同类型电影）
        Returns:
            推荐结果列表，包含电影信息和预测评分
        """
        self.logger.info(f"为用户 {user_id} 生成 {top_k} 个推荐...")
        # 新用户冷启动逻辑
        is_new_user = (self.user_id_map is None or user_id not in self.user_id_map)
        if is_new_user:
            self.logger.info(f"用户 {user_id} 为新用户，采用冷启动推荐策略")
            if self.cold_start_recommender:
                return self.cold_start_recommender.recommend_popular(top_k)
            else:
                self.logger.warning("冷启动推荐器不可用，无法为新用户推荐")
                return []
        try:
            # 1. 准备候选电影
            candidate_df = self._prepare_candidate_movies(user_id)
            if len(candidate_df) == 0:
                self.logger.warning("没有候选电影可用")
                return self.cold_start_recommender.recommend_popular(top_k) if self.cold_start_recommender else []
            # 2. 预测评分
            predictions = self.predict_ratings(user_id, candidate_df)
            if predictions is None or len(predictions) == 0:
                self.logger.info("预测为空，回退热门推荐")
                return self.cold_start_recommender.recommend_popular(top_k) if self.cold_start_recommender else []
            # 3. 创建结果DataFrame
            results_df = candidate_df[['movieId']].copy()
            results_df['predicted_rating'] = predictions
            # 4. 排序并选择top_k
            results_df = results_df.nlargest(top_k * 2, 'predicted_rating')  # 先取2倍数量用于多样性筛选
            # 5. 增加多样性（可选）
            if diversity and len(results_df) > top_k:
                results_df = self._apply_diversity(results_df, top_k)
            else:
                results_df = results_df.head(top_k)
            # 6. 添加电影元数据
            recommendations = self._add_movie_metadata(results_df.head(top_k))
            self.logger.info(f"✅ 成功生成 {len(recommendations)} 个推荐")
            return recommendations
        except Exception as e:
            self.logger.error(f"❌ 推荐生成失败: {e}")
            return self.cold_start_recommender.recommend_popular(top_k) if self.cold_start_recommender else []
    
    def _apply_diversity(self, results_df: pd.DataFrame, top_k: int) -> pd.DataFrame:
        """应用多样性策略，避免推荐同类型电影"""
        # 简单的多样性实现：确保不同类型电影都有代表
        diverse_results = []
        used_genres = set()
        
        for _, row in results_df.iterrows():
            movie_id = row['movieId']
            movie_genres = self._get_movie_genres(movie_id)
            
            # 检查是否已经有类似类型的电影被选中
            genre_overlap = used_genres.intersection(movie_genres)
            
            if len(diverse_results) < top_k or len(genre_overlap) == 0:
                diverse_results.append(row)
                used_genres.update(movie_genres)
            
            if len(diverse_results) >= top_k:
                break
        
        return pd.DataFrame(diverse_results)
    
    def _get_movie_genres(self, movie_id: int) -> set:
        """获取电影类型集合"""
        if self.movies_metadata is not None:
            movie_data = self.movies_metadata[self.movies_metadata['movieId'] == movie_id]
            if not movie_data.empty:
                genres = movie_data.iloc[0]['genres']
                if isinstance(genres, str):
                    return set(genres.split('|'))
        return set()
    
    def _add_movie_metadata(self, results_df: pd.DataFrame) -> List[Dict]:
        """为推荐结果添加电影元数据"""
        recommendations = []
        
        for _, row in results_df.iterrows():
            movie_id = row['movieId']
            predicted_rating = row['predicted_rating']
            
            # 获取电影元数据
            movie_data = self.movies_metadata[
                self.movies_metadata['movieId'] == movie_id
            ]
            
            if not movie_data.empty:
                movie_info = movie_data.iloc[0]
                recommendation = {
                    'movieId': movie_id,
                    'title': movie_info['title'],
                    'genres': movie_info['genres'],
                    'predicted_rating': round(float(predicted_rating), 2),
                    'year': movie_info.get('year', '未知')
                }
                recommendations.append(recommendation)
        
        return recommendations
    
    def batch_recommend(self, user_ids: List[int], top_k: int = 10) -> Dict[int, List[Dict]]:
        """为多个用户批量生成推荐"""
        results = {}
        
        for user_id in user_ids:
            try:
                recommendations = self.recommend(user_id, top_k)
                results[user_id] = recommendations
                self.logger.info(f"用户 {user_id}: 生成 {len(recommendations)} 个推荐")
            except Exception as e:
                self.logger.error(f"用户 {user_id} 推荐失败: {e}")
                results[user_id] = []
        
        return results