import requests
import pandas as pd
import json
from collections import defaultdict
import pickle

# ===== 配置与参数 =====
TOP_N = 10  # 推荐数量
MODEL_PATHS = {
    'SVD': 'svd_recommender_model.pkl',
    'KNN': 'knn_recommender_model.pkl'
}
USER_TAGS_PATH = 'processed_user_comments.csv'  # 用户标签数据文件路径

# 工作流API配置
WORKFLOW_API_URL = "https://api.dify.ai/v1/workflows/run"
WORKFLOW_API_KEY = "app-AsjwuTlT4SVgzya3HSEO50BU"


# ===== 工作流API调用函数（修复后）=====
def get_workflow_scores(user_preferences, movie_info, user_tag):
    """调用工作流API获取语音匹配度和情感满意度分数"""
    try:
        # 确保输入参数不为None（避免JSON序列化问题）
        user_preferences = user_preferences or "无"
        movie_info = movie_info or "无"
        user_tag = user_tag or "无"

        # 准备请求数据
        payload = {
            "inputs": {
                "user_preferences": user_preferences,
                "movie_info": movie_info,
                "user_tag": user_tag
            },
            "response_mode": "blocking",
            "user": "default_user"
        }

        # 调用工作流API
        response = requests.post(
            WORKFLOW_API_URL,
            headers={
                "Content-Type": "application/json",
                "Authorization": f"Bearer {WORKFLOW_API_KEY}"
            },
            data=json.dumps(payload)
        )

        # 处理响应
        if response.status_code == 200:
            if not response.text:
                print("工作流API返回空内容")
                return {"语音匹配度": 0.5, "情感满意度": 0.0}

            try:
                response_data = response.json()

                # 关键修复1：确保response_data是字典类型
                if not isinstance(response_data, dict):
                    print(f"API返回数据格式错误（非字典）: {response_data}")
                    return {"语音匹配度": 0.5, "情感满意度": 0.0}

                # 关键修复2：确保data是字典类型
                data = response_data.get('data', {})
                if not isinstance(data, dict):
                    print(f"API返回的data格式错误（非字典）: {data}")
                    return {"语音匹配度": 0.5, "情感满意度": 0.0}

                # 关键修复3：确保outputs是字典类型（避免None或其他类型）
                outputs = data.get('outputs', {})
                if not isinstance(outputs, dict):
                    print(f"API返回的outputs格式错误（非字典）: {outputs}")
                    outputs = {}  # 强制设为空字典

                # 验证必要字段
                if not all(key in outputs for key in ["score1", "score2"]):
                    print(f"API返回格式不正确，缺少必要字段: {outputs}")
                    return {"语音匹配度": 0.5, "情感满意度": 0.0}

                # 转换分数并验证范围
                score1 = max(0.0, min(1.0, float(outputs['score1'])))
                score2 = max(-1.0, min(1.0, float(outputs['score2'])))
                return {"语音匹配度": score1, "情感满意度": score2}

            except json.JSONDecodeError as e:
                print(f"解析JSON失败: {e}，内容: {response.text}")
                return {"语音匹配度": 0.5, "情感满意度": 0.0}
            except (TypeError, ValueError) as e:
                print(f"分数转换失败: {e}")
                return {"语音匹配度": 0.5, "情感满意度": 0.0}

        else:
            print(f"API请求失败: {response.status_code}, 内容: {response.text}")
            return {"语音匹配度": 0.5, "情感满意度": 0.0}

    except Exception as e:
        print(f"调用工作流API时发生错误: {str(e)}")
        return {"语音匹配度": 0.5, "情感满意度": 0.0}


# ===== 其他函数保持不变 =====
def build_genres_dict(movies_df):
    return dict(zip(movies_df['movieId'], movies_df['genres']))


def compute_click_indicator(user_id, movie_id, user_clicked_movies):
    return 1 if movie_id in user_clicked_movies.get(user_id, set()) else 0


def compute_ctr(user_click_results):
    if len(user_click_results) == 0:
        return 0.0
    return sum(user_click_results) / len(user_click_results)


def load_model(path):
    try:
        with open(path, 'rb') as f:
            return pickle.load(f)
    except FileNotFoundError:
        print(f"错误：未找到模型文件 {path}")
        return None


def load_user_tags(file_path):
    try:
        tags_df = pd.read_csv(file_path)
        user_tags = {}
        for _, row in tags_df.iterrows():
            user_id = row['userId']
            movie_id = row['movieId']
            tag = row['tag'] if pd.notna(row['tag']) else ""  # 处理NaN标签
            user_tags[(user_id, movie_id)] = tag
        print(f"成功加载 {len(user_tags)} 条用户标签数据")
        return user_tags
    except FileNotFoundError:
        print(f"错误：未找到用户标签文件 {file_path}")
        return {}


def load_user_preferences(ratings_df, movies_df, user_tags):
    rated_movies = ratings_df.merge(movies_df, on='movieId')
    liked_movies = rated_movies[rated_movies['rating'] >= 4.0]

    user_preferences = defaultdict(list)

    for _, row in liked_movies.iterrows():
        genres = row['genres'].split('|')
        user_preferences[row['userId']].extend(genres)

    for (user_id, movie_id), tag in user_tags.items():
        tag_words = tag.lower().split()
        user_preferences[user_id].extend(tag_words)

    for user_id in user_preferences:
        pref_counts = defaultdict(int)
        for pref in user_preferences[user_id]:
            pref_counts[pref] += 1
        top_prefs = [pref for pref, _ in sorted(pref_counts.items(),
                                                key=lambda x: x[1],
                                                reverse=True)[:5]]
        user_preferences[user_id] = top_prefs

    return user_preferences


def generate_user_recommendations(user_id, model, all_movie_ids):
    if not model:
        return []
    predictions = [model.predict(user_id, movie_id) for movie_id in all_movie_ids]
    predictions.sort(key=lambda x: x.est, reverse=True)
    return predictions[:TOP_N]


def process_multi_users(user_ids, models, movies_df, ratings_df, user_tags):
    movies_dict = dict(zip(movies_df['movieId'], movies_df['title']))
    genres_dict = build_genres_dict(movies_df)
    all_movie_ids = movies_df['movieId'].tolist()
    user_preferences = load_user_preferences(ratings_df, movies_df, user_tags)

    user_clicked_movies = defaultdict(set)
    for _, row in ratings_df.iterrows():
        user_clicked_movies[row['userId']].add(row['movieId'])

    results = []
    summary = []

    for user_id in user_ids:
        print(f"正在处理用户 {user_id}...")
        user_prefs = user_preferences.get(user_id, [])
        user_prefs_text = ", ".join(user_prefs) if user_prefs else "无明确偏好"

        for model_name, model in models.items():
            if not model:
                continue

            top_predictions = generate_user_recommendations(user_id, model, all_movie_ids)
            user_clicks = []

            for rank, pred in enumerate(top_predictions, 1):
                movie_id = pred.iid
                title = movies_dict.get(movie_id, "未知电影")
                genres = genres_dict.get(movie_id, "")
                movie_info = f"{title} (类型: {genres})"
                user_tag = user_tags.get((user_id, movie_id), "")

                # 调用工作流API获取分数
                workflow_scores = get_workflow_scores(
                    user_preferences=user_prefs_text,
                    movie_info=movie_info,
                    user_tag=user_tag
                )

                clicked = compute_click_indicator(user_id, movie_id, user_clicked_movies)
                user_clicks.append(clicked)

                results.append({
                    'user_id': user_id,
                    'model': model_name,
                    'rank': rank,
                    'movie_id': movie_id,
                    'title': title,
                    'predicted_rating': round(pred.est, 2),
                    'clicked': clicked,
                    '语音匹配度': round(workflow_scores['语音匹配度'], 4),
                    '情感满意度': round(workflow_scores['情感满意度'], 4)
                })

            ctr = compute_ctr(user_clicks)
            summary.append({
                'user_id': user_id,
                'model': model_name,
                'total_recommended': TOP_N,
                'total_clicked': sum(user_clicks),
                'ctr': round(ctr, 4)
            })

    return pd.DataFrame(results), pd.DataFrame(summary)


def main():
    try:
        ratings_df = pd.read_csv('processed_ratings.csv')
        movies_df = pd.read_csv('processed_movies.csv')
        user_tags = load_user_tags(USER_TAGS_PATH)
        print("数据加载成功！")
    except FileNotFoundError as e:
        print(f"数据加载错误：{e}")
        return

    models = {}
    for name, path in MODEL_PATHS.items():
        model = load_model(path)
        if model:
            models[name] = model

    if not models:
        print("没有可用的推荐模型，程序退出")
        return

    user_ids = ratings_df['userId'].unique()[:10]
    print(f"将为以下用户生成推荐：{user_ids}")

    detailed_results, summary_results = process_multi_users(
        user_ids, models, movies_df, ratings_df, user_tags
    )

    try:
        detailed_results.to_csv('results/detailed_results.csv', index=False)
        summary_results.to_csv('results/summary_results.csv', index=False)
        print("\n推荐结果已保存：")
        print(" - 详细推荐列表及指标：results/detailed_results.csv")
        print(" - 用户-模型汇总指标（含CTR）：results/summary_results.csv")
    except Exception as e:
        print(f"保存结果时出错：{e}")
        return

    print("\n===== 推荐结果示例 =====")
    print("\n详细结果（前5条）：")
    print(detailed_results.head())

    print("\n汇总结果：")
    print(summary_results)


if __name__ == "__main__":
    main()