import pandas as pd
import random
import os
from transformers import pipeline

# 创建结果存储目录
RESULT_DIR = "analysis_results"
os.makedirs(RESULT_DIR, exist_ok=True)  # 确保目录存在

# 加载情感分析模型
sentiment_model = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")


# 计算情感满意度函数（1-5分）
def compute_sentiment_score(comment):
    result = sentiment_model(comment)[0]
    if result['label'] == 'POSITIVE':
        return round(3 + 2 * result['score'], 2)  # 3-5分
    else:
        return round(1 + 2 * (1 - result['score']), 2)  # 1-3分


# 计算语义匹配度函数（1-5分）
def compute_semantic_match(user_history, item_id, movie_descriptions):
    # 将用户历史的电影简介拼接作为“用户需求”
    user_preferences = " ".join(
        [movie_descriptions[movie_id] for movie_id in user_history if movie_id in movie_descriptions])
    item_description = movie_descriptions.get(item_id, '')

    # 简单的相似度模拟（实际应用中可替换为更复杂的语义匹配算法）
    if not user_preferences or not item_description:
        return 3.0  # 信息不足时返回中间值
    # 这里使用随机值模拟，实际可使用Sentence-BERT等计算余弦相似度
    match_score = random.uniform(1, 5)
    return round(match_score, 2)


def main():
    # 加载数据集
    try:
        ratings_df = pd.read_csv('processed_ratings.csv')
        movies_df = pd.read_csv('processed_movies.csv')
        tags_df = pd.read_csv('tags.csv')
        print("数据集加载成功！")
    except FileNotFoundError as e:
        print(f"文件未找到: {e}")
        return
    except Exception as e:
        print(f"加载数据集出错: {e}")
        return

    # 构建电影描述（这里使用标题+类型作为简单描述）
    movie_descriptions = {}
    for _, row in movies_df.iterrows():
        movie_id = row['movieId']
        title = row['title']
        genres = row['genres'].replace('|', ', ')
        movie_descriptions[movie_id] = f"{title}. Genres: {genres}"

    # 处理标签情感分析并保存结果
    print("\n=== 标签情感分析处理 ===")
    sentiment_results = []
    sample_tags = tags_df.head(10)  # 取前10条标签数据
    for _, row in sample_tags.iterrows():
        tag = row['tag']
        user_id = row['userId']
        movie_id = row['movieId']
        timestamp = row['timestamp']
        sentiment = compute_sentiment_score(tag)

        # 收集结果
        sentiment_results.append({
            'userId': user_id,
            'movieId': movie_id,
            'tag': tag,
            'timestamp': timestamp,
            'sentiment_score': sentiment,
            'sentiment_label': 'POSITIVE' if sentiment >= 3 else 'NEGATIVE'
        })
        print(f"处理完成: 用户{user_id}对电影{movie_id}的标签'{tag}'")

    # 保存情感分析结果到CSV
    sentiment_df = pd.DataFrame(sentiment_results)
    sentiment_save_path = os.path.join(RESULT_DIR, "sentiment_analysis_results.csv")
    sentiment_df.to_csv(sentiment_save_path, index=False)
    print(f"情感分析结果已保存至: {sentiment_save_path}")

    # 计算语义匹配度并保存结果
    print("\n=== 语义匹配度计算 ===")
    match_results = []
    # 获取第一个用户的观影历史
    first_user_id = ratings_df['userId'].iloc[0]
    user_history = ratings_df[ratings_df['userId'] == first_user_id]['movieId'].tolist()[:5]  # 取前5部
    print(f"用户{first_user_id}的观影历史（电影ID）: {user_history}")

    # 随机选择3部电影计算匹配度
    sample_movie_ids = random.sample(list(movies_df['movieId'].unique()), 3)
    for movie_id in sample_movie_ids:
        match_score = compute_semantic_match(user_history, movie_id, movie_descriptions)

        # 收集结果
        match_results.append({
            'userId': first_user_id,
            'movieId': movie_id,
            'movie_title': movies_df[movies_df['movieId'] == movie_id]['title'].iloc[0],
            'user_history': ','.join(map(str, user_history)),
            'match_score': match_score
        })
        print(f"计算完成: 电影{movie_id}与用户历史的匹配度")

    # 保存语义匹配度结果到CSV
    match_df = pd.DataFrame(match_results)
    match_save_path = os.path.join(RESULT_DIR, "semantic_match_results.csv")
    match_df.to_csv(match_save_path, index=False)
    print(f"语义匹配度结果已保存至: {match_save_path}")


if __name__ == "__main__":
    main()
