from typing import Dict, List
import numpy as np
import pandas as pd
import json
import requests
from collections import defaultdict, Counter
import os
from tqdm import tqdm
import re
import torch
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModel, BertTokenizer, BertForSequenceClassification
from datetime import datetime, timedelta
import math

# ===== Configuration and Parameters =====
INPUT_CSV_PATH = 'recommendation_results/random50_users_lightgcn_detailed.csv'
OUTPUT_CSV_PATH = 'results/lightgcn_detailed_results.csv'
USER_TAGS_PATH = 'filtered_data/utf_comments.csv'
RATINGS_PATH = 'filtered_data/ratings_filtered.csv'
MOVIES_PATH = 'filtered_data/movies_filtered.csv'
PREDICTED_COMMENTS_PATH = "results/lightgcn_predicted_comments.csv" 

# API配置
MODEL_API_URL = 'http://192.168.20.139:11434/api/generate'
MODEL_NAME = "qwen2.5:32b"
API_TIMEOUT = 30  # 超时时间（秒）

# 语义匹配维度权重
WEIGHTS = {
    "genre": 0.3,
    "theme": 0.3,
    "keywords": 0.1,
    "cast_director": 0.1,
    "other": 0.2
}  # 补充闭合括号

# 模型配置（仅保留语义模型，删除情感模型相关）
SEMANTIC_MODEL = "/home/ps/.cache/modelscope/hub/models/sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"

# 文本分块参数
MAX_CHUNK_LENGTH = 510

# 初始化模型（仅加载语义模型）
print("Loading models...")
try:
    # 加载多语言语义模型
    tokenizer = AutoTokenizer.from_pretrained(SEMANTIC_MODEL)
    model = AutoModel.from_pretrained(SEMANTIC_MODEL)
    print("Models loaded successfully")
except Exception as e:
    print(f"Model loading failed: {str(e)}")
    exit(1)

# ===== API调用函数（生成评论并直接返回离散情感值）=====
def generate_comment_and_sentiment(user_prefs: Dict, movie_info: Dict) -> tuple[str, int]:
    """调用模型API生成评论，并直接返回离散情感满意度（-1=负面，0=中性，1=正面）"""
    prompt = f"""请完成以下两个任务，严格按照格式要求输出：
1. 以用户口吻写一段50-100字的电影评论，体现对电影的真实感受。
2. 根据评论情感，输出离散情感值（仅允许-1、0、1）：
   - 1：正面情感（喜欢、推荐等积极态度）
   - 0：中性情感（无明显倾向、客观描述）
   - -1：负面情感（不喜欢、失望等消极态度）

用户偏好：
- 喜欢的类型：{user_prefs.get('genre', '无')}
- 喜欢的主题：{user_prefs.get('theme', '无')}
- 关注的关键词：{user_prefs.get('keywords', '无')}
- 喜欢的演员/导演：{user_prefs.get('cast_director', '无')}

电影信息：
- 名称：{movie_info.get('name', '未知')}
- 类型：{movie_info.get('genre', '无')}
- 剧情：{movie_info.get('theme', '无')}
- 演员/导演：{movie_info.get('cast_director', '无')}

输出格式（必须严格遵守，先评论后情感值，情感值单独成行）：
评论：[你的评论内容]
情感值：[1/0/-1]"""

    payload = {
        "model": MODEL_NAME,
        "prompt": prompt,
        "stream": False,
        "options": {
            "temperature": 0.7,
            "max_tokens": 300  # 足够容纳评论和情感值
        }
    }

    try:
        response = requests.post(
            MODEL_API_URL,
            headers={"Content-Type": "application/json"},
            json=payload,
            timeout=API_TIMEOUT
        )
        response.raise_for_status()
        result = response.json().get("response", "").strip()
        
        # 解析结果（提取评论和情感值）
        comment = ""
        sentiment = 0  # 默认中性
        lines = [line.strip() for line in result.split('\n') if line.strip()]
        
        for line in lines:
            if line.startswith("评论："):
                comment = line[3:].strip()
            elif line.startswith("情感值："):
                try:
                    sentiment_str = line[4:].strip()
                    if sentiment_str in ['1', '0', '-1']:
                        sentiment = int(sentiment_str)
                except:
                    sentiment = 0  # 解析失败默认中性
        
        return comment, sentiment
    except Exception as e:
        print(f"API调用失败：{str(e)}")
        return "", 0  # 失败时返回空评论和中性值

# ===== 文本处理工具函数 =====
def chunk_text(text, tokenizer, max_chunk_length=MAX_CHUNK_LENGTH):
    if not text or text.strip() == "":
        return [""]
    tokens = tokenizer.encode(text, add_special_tokens=False)
    chunks = []
    start = 0
    while start < len(tokens):
        end = start + max_chunk_length
        chunk_tokens = tokens[start:end]
        chunks.append(tokenizer.decode(chunk_tokens, skip_special_tokens=False))
        start = end
    return chunks

def get_embedding(text):
    chunks = chunk_text(text, tokenizer)
    if not chunks:
        return torch.zeros(model.config.hidden_size)
    chunk_embeddings = []
    for chunk in chunks:
        inputs = tokenizer(
            chunk,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=MAX_CHUNK_LENGTH + 2
        )
        with torch.no_grad():
            outputs = model(**inputs)
        chunk_embeddings.append(outputs.last_hidden_state[:, 0, :].squeeze())
    return torch.mean(torch.stack(chunk_embeddings), dim=0)

def cosine_similarity(a, b):
    return torch.nn.functional.cosine_similarity(a.unsqueeze(0), b.unsqueeze(0)).item()

# ===== 评分计算主函数（直接使用API返回的离散情感值）=====
def calculate_scores_with_llm(user_prefs: Dict, movie_info: Dict, user_id: str, movie_id: str, comments_list: List) -> Dict:
    try:
        # 1. 调用API生成评论和离散情感值
        predicted_comment, satisfaction = generate_comment_and_sentiment(user_prefs, movie_info)
        comments_list.append([user_id, movie_id, predicted_comment, satisfaction])  # 保存情感值
        
        # 2. 计算语义匹配分（保留原逻辑）
        dimension_scores = {}
        total_match_score = 0.0
        for dim, weight in WEIGHTS.items():
            user_pref = user_prefs.get(dim, f"无{dim}偏好")
            movie_attr = movie_info.get(dim, "")
            if not user_pref.strip() or user_pref == f"无{dim}偏好":
                dimension_scores[dim] = 0.5
            elif not movie_attr.strip():
                dimension_scores[dim] = 0.3
            else:
                u_emb = get_embedding(user_pref)
                m_emb = get_embedding(movie_attr)
                sim = cosine_similarity(u_emb, m_emb)
                dimension_scores[dim] = 0.3 + sim * 0.7  # 映射到[0.3, 1.0]
            total_match_score += dimension_scores[dim] * weight
        
        return {
            "match_score": round(total_match_score, 4),
            "satisfaction": satisfaction,  # 直接使用API返回的离散值
            "dimension_scores": dimension_scores
        }
    except Exception as e:
        print(f"评分计算失败（用户{user_id}，电影{movie_id}）: {e}")
        return None

# ===== 数据加载与主流程函数 =====
def load_and_merge_data():
    # 加载电影信息
    movies = pd.read_csv(
        MOVIES_PATH,
        usecols=["MOVIE_ID", "NAME", "GENRES", "STORYLINE", "TAGS", 
                "ACTORS", "DIRECTORS", "LANGUAGES", "REGIONS", "YEAR"],
        dtype={"MOVIE_ID": str}
    )
    movies = movies.fillna({
        "GENRES": "", "STORYLINE": "无剧情介绍", "TAGS": "", 
        "ACTORS": "", "DIRECTORS": "", "LANGUAGES": "未知", "REGIONS": "未知"
    })
    print(f"电影数据加载完成，共 {len(movies)} 部")
    
    # 加载推荐结果
    recommendations = pd.read_csv(
        INPUT_CSV_PATH,
        usecols=["user_id", "movie_id"],
        dtype={"user_id": str, "movie_id": str}
    )
    recommendations = recommendations.rename(
        columns={"user_id": "USER_MD5", "movie_id": "MOVIE_ID"}
    )
    print(f"推荐结果加载完成，共 {len(recommendations)} 条")
    
    # 加载评分数据
    ratings = pd.read_csv(
        RATINGS_PATH,
        usecols=["USER_MD5", "MOVIE_ID", "RATING"],
        dtype={"USER_MD5": str, "MOVIE_ID": str}
    )
    if "RATING" not in ratings.columns:
        print("错误：评分数据中未找到'RATING'列")
        exit(1)
    print(f"评分数据加载完成，共 {len(ratings)} 条")
    
    return {"movies": movies, "recommendations": recommendations, "ratings": ratings}

def extract_user_preferences(merged_data):
    ratings = merged_data["ratings"]
    movies = merged_data["movies"]
    
    high_rating_threshold = 4.0
    rated_movies = ratings[ratings["RATING"] >= high_rating_threshold].merge(
        movies, on="MOVIE_ID", how="inner"
    )
    print(f"高评分电影数量: {len(rated_movies)}")
    
    user_prefs = defaultdict(lambda: {
        "genre": [], "theme": [], "keywords": [], "cast_director": [], "other": []
    })
    
    for _, row in rated_movies.iterrows():
        user_id = row["USER_MD5"]
        if row["GENRES"]:
            user_prefs[user_id]["genre"].extend(row["GENRES"].split("|"))
        if row["STORYLINE"] and row["STORYLINE"] != "无剧情介绍":
            user_prefs[user_id]["theme"].append(row["STORYLINE"])
        if row["TAGS"]:
            user_prefs[user_id]["keywords"].extend(row["TAGS"].split(","))
        cast_dir = []
        if row["ACTORS"]:
            cast_dir.extend(row["ACTORS"].split(","))
        if row["DIRECTORS"]:
            cast_dir.extend(row["DIRECTORS"].split(","))
        user_prefs[user_id]["cast_director"].extend(cast_dir)
        other_info = []
        if row["LANGUAGES"]:
            other_info.append(f"语言:{row['LANGUAGES']}")
        if row["REGIONS"]:
            other_info.append(f"地区:{row['REGIONS']}")
        if row["YEAR"]:
            other_info.append(f"年份:{row['YEAR']}")
        user_prefs[user_id]["other"].extend(other_info)
    
    all_users = set(ratings["USER_MD5"].unique())
    for user_id in all_users:
        for dim in WEIGHTS.keys():
            if not user_prefs[user_id][dim]:
                user_prefs[user_id][dim] = [f"无{dim}偏好"]
    
    for user_id in user_prefs:
        for dim in WEIGHTS.keys():
            cnt = Counter(user_prefs[user_id][dim])
            user_prefs[user_id][dim] = ", ".join([item for item, _ in cnt.most_common(3)])
    
    return user_prefs

def process_recommendation_scores():
    os.makedirs(os.path.dirname(OUTPUT_CSV_PATH), exist_ok=True)
    predicted_comments = []  # 格式：[user_id, movie_id, comment, satisfaction]
    try:
        print("加载并合并数据...")
        merged_data = load_and_merge_data()
        movies = merged_data["movies"]
        recommendations = merged_data["recommendations"]
        
        # 构建电影信息索引
        print("构建电影信息索引...")
        movie_info_dict = {}
        for _, row in movies.iterrows():
            movie_id = str(row["MOVIE_ID"]).strip()
            movie_info_dict[movie_id] = {
                "genre": row["GENRES"],
                "theme": row["STORYLINE"],
                "keywords": row["TAGS"],
                "cast_director": f"演员:{row['ACTORS']}; 导演:{row['DIRECTORS']}",
                "other": f"语言:{row['LANGUAGES']}; 地区:{row['REGIONS']}; 年份:{row['YEAR']}",
                "name": row["NAME"]
            }
        
        # 提取用户偏好
        print("提取用户偏好...")
        user_preferences = extract_user_preferences(merged_data)
        
        # 处理推荐结果
        print(f"处理 {len(recommendations)} 条推荐记录...")
        result_rows = []
        valid_count = 0
        for _, row in tqdm(recommendations.iterrows(), total=len(recommendations)):
            user_id = str(row["USER_MD5"]).strip()
            movie_id = str(row["MOVIE_ID"]).strip()
            
            if movie_id not in movie_info_dict:
                continue
            
            user_prefs = user_preferences.get(user_id, {})
            movie_info = movie_info_dict[movie_id]
            
            # 计算分数（包含API返回的离散情感值）
            scores = calculate_scores_with_llm(
                user_prefs, 
                movie_info, 
                user_id, 
                movie_id,
                predicted_comments
            )
            
            if scores is not None:
                valid_count += 1
                result_rows.append({
                    "user_id": user_id,
                    "movie_id": movie_id,
                    "movie_name": movie_info["name"],
                    "match_score": scores["match_score"],
                    "satisfaction": scores["satisfaction"],  # 离散值：-1/0/1**{f"{dim}_score": round(scores["dimension_scores"][dim], 4) for dim in WEIGHTS.keys()}
                })
        
        # 保存推测评论（包含情感值）
        if predicted_comments:
            comments_df = pd.DataFrame(
                predicted_comments,
                columns=["user_id", "movie_id", "predicted_comment", "satisfaction"]
            )
            comments_df.to_csv(PREDICTED_COMMENTS_PATH, index=False)
            print(f"推测评论及情感值已保存至 {PREDICTED_COMMENTS_PATH}")
        
        # 保存结果
        result_df = pd.DataFrame(result_rows)
        result_df.to_csv(OUTPUT_CSV_PATH, index=False)
        print(f"\n处理完成！有效记录共 {valid_count} 条，已保存至 {OUTPUT_CSV_PATH}")
        if valid_count > 0:
            print(result_df[["user_id", "movie_id", "movie_name", "match_score", "satisfaction"]].head())
        else:
            print("警告：无有效记录")
        
    except FileNotFoundError as e:
        print(f"数据文件未找到: {e}")
    except Exception as e:
        print(f"处理出错: {str(e)}")

if __name__ == "__main__":
    process_recommendation_scores()