# ===== 启动阶段：先检查依赖和路径（核心修改）=====
import sys
# 检查必要模块是否安装
try:
    import pandas as pd
    import numpy as np
    import re
    import os
    import shutil
    from collections import defaultdict, Counter
    import matplotlib.pyplot as plt
    import matplotlib.font_manager as fm
    import seaborn as sns
    from wordcloud import WordCloud
    import jieba
    from sklearn.feature_extraction.text import TfidfVectorizer
    from transformers import BertTokenizer, BertForSequenceClassification
    import torch
    import torch.nn.functional as F
    from tqdm import tqdm
    print("所有依赖模块导入成功")
except ImportError as e:
    print(f"模块导入失败：{e}，请使用pip安装缺失模块（如 pip install 模块名）")
    sys.exit(1)  # 退出程序


# ===== 字体配置 =====
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
FONT_ABSOLUTE_PATH = os.path.join(CURRENT_DIR, "ziti", "NotoSansCJKsc", "NotoSansCJKsc-Regular.otf")

# 检查字体文件是否存在
if not os.path.exists(FONT_ABSOLUTE_PATH):
    print(f"字体文件不存在：{FONT_ABSOLUTE_PATH}")
    print(f"请确认路径是否正确，当前脚本目录：{CURRENT_DIR}")
    sys.exit(1)
print("✅ 字体文件检查通过")

# 清理缓存
def clear_matplotlib_cache():
    cache_dir = os.path.expanduser('~/.cache/matplotlib')
    if os.path.exists(cache_dir):
        try:
            shutil.rmtree(cache_dir)
            print("Matplotlib缓存已清理")
        except PermissionError:
            print(f"无权限清理缓存：{cache_dir}，可能影响字体加载")

clear_matplotlib_cache()

# 加载字体（捕获加载失败）
try:
    font_prop = fm.FontProperties(fname=FONT_ABSOLUTE_PATH)
    plt.rcParams["axes.unicode_minus"] = False
    print("字体加载成功")
except Exception as e:
    print(f"字体加载失败：{e}，请检查字体文件是否损坏")
    sys.exit(1)


# ===== 配置参数 =====
USER_PATH = "filtered_data/user_filtered.csv"
RATINGS_PATH = "filtered_data/ratings_filtered.csv"
MOVIES_PATH = "filtered_data/movies_filtered.csv"
COMMENTS_PATH = "filtered_data/comments_filtered.csv"
OUTPUT_DIR = "user_profiles"
HIGH_RATING_THRESHOLD = 4.0
SENTIMENT_MODEL = "/home/ps/.cache/modelscope/hub/models/dienstag/chinese-macbert-large"

# 检查数据文件是否存在
data_files = {
    "用户数据": USER_PATH,
    "评分数据": RATINGS_PATH,
    "电影数据": MOVIES_PATH,
    "评论数据": COMMENTS_PATH
}
for name, path in data_files.items():
    if not os.path.exists(path):
        print(f"{name}不存在：{path}")
        print(f"请确认数据文件是否在该路径下，当前脚本目录：{CURRENT_DIR}")
        sys.exit(1)
print("所有数据文件检查通过")

# 检查输出目录是否可写
try:
    os.makedirs(OUTPUT_DIR, exist_ok=True)
    # 测试写入权限
    test_file = os.path.join(OUTPUT_DIR, "test_write.txt")
    with open(test_file, "w") as f:
        f.write("test")
    os.remove(test_file)
    print("输出目录权限正常")
except PermissionError:
    print(f"无权限写入输出目录：{OUTPUT_DIR}，请修改目录权限")
    sys.exit(1)

# 检查情感模型路径
if not os.path.exists(SENTIMENT_MODEL):
    print(f" 情感模型路径不存在：{SENTIMENT_MODEL}")
    print("请确认模型已下载到该路径，或修改SENTIMENT_MODEL参数")
    sys.exit(1)
print("情感模型路径检查通过")


# ===== 工具函数 =====
def load_data():
    """加载并预处理所有数据"""
    print("\n开始加载数据...")
    try:
        users = pd.read_csv(USER_PATH, dtype={"USER_MD5": str}).fillna("未知")
        print(f"用户数据：{len(users)} 条记录")
    except Exception as e:
        print(f"加载用户数据失败：{e}")
        sys.exit(1)
    
    try:
        ratings = pd.read_csv(
            RATINGS_PATH,
            usecols=["USER_MD5", "MOVIE_ID", "RATING", "RATING_TIME"],
            dtype={"USER_MD5": str, "MOVIE_ID": str, "RATING": float}
        )
        ratings["RATING_TIME"] = pd.to_datetime(ratings["RATING_TIME"], errors="coerce")
        print(f"评分数据：{len(ratings)} 条记录")
    except Exception as e:
        print(f"加载评分数据失败：{e}")
        sys.exit(1)
    
    try:
        movies = pd.read_csv(
            MOVIES_PATH,
            usecols=["MOVIE_ID", "NAME", "GENRES", "ACTORS", "DIRECTORS", "STORYLINE", "TAGS"],
            dtype={"MOVIE_ID": str}
        ).fillna("")
        for col in ["GENRES", "ACTORS", "DIRECTORS", "TAGS"]:
            movies[col] = movies[col].apply(
                lambda x: [i.strip() for i in re.split(r"[;/,，|]", x) if i.strip()]
            )
        print(f"电影数据：{len(movies)} 条记录")
    except Exception as e:
        print(f"加载电影数据失败：{e}")
        sys.exit(1)
    
    try:
        comments = pd.read_csv(
            COMMENTS_PATH,
            usecols=["USER_MD5", "MOVIE_ID", "CONTENT", "RATING"],
            dtype={"USER_MD5": str, "MOVIE_ID": str}
        )
        comments = comments[comments["CONTENT"].fillna("").str.strip() != ""].copy()
        print(f"评论数据：{len(comments)} 条记录")
    except Exception as e:
        print(f"加载评论数据失败：{e}")
        sys.exit(1)
    
    return {"users": users, "ratings": ratings, "movies": movies, "comments": comments}


def init_sentiment_model():
    """初始化情感分析模型"""
    print("\n开始加载情感分析模型...")
    try:
        tokenizer = BertTokenizer.from_pretrained(SENTIMENT_MODEL)
        model = BertForSequenceClassification.from_pretrained(SENTIMENT_MODEL, num_labels=2)
        # 检查是否可用GPU（可选）
        if torch.cuda.is_available():
            model = model.to("cuda")
            print("情感模型已加载至GPU")
        else:
            print("情感模型已加载至CPU（未检测到可用GPU）")
        return tokenizer, model
    except Exception as e:
        print(f"加载情感模型失败：{e}，请检查模型路径或网络连接")
        sys.exit(1)


def analyze_sentiment(texts, tokenizer, model, batch_size=8):
    """分析文本情感倾向"""
    model.eval()
    results = []
    for i in range(0, len(texts), batch_size):
        batch = texts[i:i+batch_size]
        inputs = tokenizer(
            batch,
            padding=True,
            truncation=True,
            return_tensors="pt",
            max_length=512
        )
        if torch.cuda.is_available():
            inputs = {k: v.to("cuda") for k, v in inputs.items()}
        with torch.no_grad():
            outputs = model(** inputs)
            probs = F.softmax(outputs.logits, dim=1).cpu().numpy()
        results.extend([{"positive": p[1], "negative": p[0]} for p in probs])
    return results


def filter_special_chars(text):
    """过滤文本中的特殊字符和emoji"""
    if not isinstance(text, str):
        return ""
    emoji_pattern = re.compile(
        "["
        u"\U0001F600-\U0001F64F"  # 表情符号
        u"\U0001F300-\U0001F5FF"  # 符号& Pictographs
        u"\U0001F680-\U0001F6FF"  # 运输&地图符号
        u"\U0001F1E0-\U0001F1FF"  # 国旗
        u"\U00002500-\U00002BEF"  # 汉字补充
        u"\U00002702-\U000027B0"
        u"\U00002702-\U000027B0"
        u"\U000024C2-\U0001F251"
        u"\U0001f926-\U0001f937"
        u"\U00010000-\U0010ffff"
        u"\u2640-\u2642"
        u"\u2600-\u2B55"
        u"\u200d"
        u"\u23cf"
        u"\u23e9"
        u"\u231a"
        u"\ufe0f"  # 变体选择符
        u"\u3030"
        "]+", flags=re.UNICODE
    )
    return emoji_pattern.sub(r'', text)


def extract_keywords(texts, top_n=10):
    """从文本中提取关键词（增强容错性）"""
    # 1. 过滤空文本和特殊字符
    valid_texts = [filter_special_chars(text.strip()) for text in texts if isinstance(text, str) and text.strip()]
    if not valid_texts:
        return ["无主题"]  # 兜底：至少返回一个有效词
    
    # 2. 中文分词（放宽条件，确保有词）
    def tokenize(text):
        words = jieba.cut(text)
        # 允许1个字符的词，排除纯数字
        return [w for w in words if len(w) >= 1 and not (w.isdigit() and len(w) < 5)]
    
    # 3. 初始化TF-IDF
    vectorizer = TfidfVectorizer(
        tokenizer=tokenize,
        stop_words=["电影", "剧情", "故事", "一部", "没有", "什么"],
        token_pattern=None,
        min_df=1,  # 允许低频词
        max_df=1.0
    )
    
    try:
        tfidf_matrix = vectorizer.fit_transform(valid_texts)
    except ValueError:
        return ["无主题"]  # 异常时兜底
    
    # 4. 提取关键词（确保非空）
    feature_names = vectorizer.get_feature_names_out()
    if len(feature_names) == 0:
        return ["无主题"]
    
    avg_tfidf = np.mean(tfidf_matrix.toarray(), axis=0)
    top_indices = avg_tfidf.argsort()[-top_n:][::-1]
    keywords = [feature_names[i] for i in top_indices if feature_names[i].strip()]
    
    return keywords if keywords else ["无主题"]


def build_user_profile(user_id, data, sentiment_model):
    """为单个用户构建画像"""
    users = data["users"]
    ratings = data["ratings"]
    movies = data["movies"]
    comments = data["comments"]
    tokenizer, model = sentiment_model
    
    user_info = users[users["USER_MD5"] == user_id].iloc[0].to_dict() if len(users[users["USER_MD5"] == user_id]) > 0 else {}
    profile = {
        "user_id": user_id,
        "nickname": filter_special_chars(user_info.get("USER_NICKNAME", "未知")),  # 过滤用户名特殊字符
        "total_rated_movies": 0,
        "total_comments": 0
    }
    
    user_ratings = ratings[ratings["USER_MD5"] == user_id].copy()
    if len(user_ratings) > 0:
        profile["total_rated_movies"] = len(user_ratings)
        profile["avg_rating"] = round(user_ratings["RATING"].mean(), 2)
        profile["rating_distribution"] = dict(user_ratings["RATING"].value_counts().sort_index())
        profile["latest_rating_time"] = user_ratings["RATING_TIME"].max().strftime("%Y-%m-%d") if not pd.isna(user_ratings["RATING_TIME"].max()) else "未知"
    else:
        profile["avg_rating"] = "无数据"
        profile["rating_distribution"] = {}
        profile["latest_rating_time"] = "无数据"
    
    high_rating_movies = user_ratings[user_ratings["RATING"] >= HIGH_RATING_THRESHOLD]["MOVIE_ID"].tolist()
    if high_rating_movies:
        liked_movies = movies[movies["MOVIE_ID"].isin(high_rating_movies)]
        
        # 偏好类型（确保至少1个）
        genre_counter = Counter()
        for genres in liked_movies["GENRES"]:
            genre_counter.update(genres)
        profile["preferred_genres"] = dict(genre_counter.most_common(5)) or {"无类型": 1}  # 兜底
        
        # 偏好演员（确保至少1个）
        actor_counter = Counter()
        for actors in liked_movies["ACTORS"]:
            actor_counter.update(actors)
        profile["preferred_actors"] = dict(actor_counter.most_common(3)) or {"无演员": 1}  # 兜底
        
        # 偏好导演（确保至少1个）
        director_counter = Counter()
        for directors in liked_movies["DIRECTORS"]:
            director_counter.update(directors)
        profile["preferred_directors"] = dict(director_counter.most_common(3)) or {"无导演": 1}  # 兜底
        
        # 偏好主题（通过extract_keywords确保至少1个词）
        storylines = [s for s in liked_movies["STORYLINE"].tolist() if s.strip()]
        profile["preferred_themes"] = extract_keywords(storylines, top_n=10)
    else:
        # 无高评分电影时，强制填充兜底数据（确保能生成图表）
        profile["preferred_genres"] = {"无类型": 1}
        profile["preferred_actors"] = {"无演员": 1}
        profile["preferred_directors"] = {"无导演": 1}
        profile["preferred_themes"] = ["无主题"]
    
    user_comments = comments[comments["USER_MD5"] == user_id]["CONTENT"].tolist()
    profile["total_comments"] = len(user_comments)
    if user_comments:
        sentiments = analyze_sentiment(user_comments, tokenizer, model)
        avg_positive = round(np.mean([s["positive"] for s in sentiments]), 4)
        profile["comment_sentiment"] = {
            "positive_ratio": avg_positive,
            "negative_ratio": 1 - avg_positive,
            "sample_comments": user_comments[:2]
        }
    else:
        profile["comment_sentiment"] = {"positive_ratio": 0, "negative_ratio": 0, "sample_comments": []}
    
    return profile


def visualize_profile(profile, output_dir):
    """可视化用户画像，若词云无内容则跳过当前用户"""
    user_id = profile["user_id"]
    os.makedirs(os.path.join(output_dir, user_id), exist_ok=True)
    has_all_plots = True

    # 1. 评分分布直方图
    if profile["rating_distribution"]:
        plt.figure(figsize=(8, 4))
        sns.barplot(
            x=list(profile["rating_distribution"].keys()),
            y=list(profile["rating_distribution"].values())
        )
        plt.title(f"{profile['nickname']}的评分分布", fontproperties=font_prop)
        plt.xlabel("评分", fontproperties=font_prop)
        plt.ylabel("电影数量", fontproperties=font_prop)
        plt.savefig(os.path.join(output_dir, user_id, "rating_distribution.png"))
        plt.close()
    else:
        has_all_plots = False

    # 2. 偏好类型饼图
    if profile["preferred_genres"]:
        plt.figure(figsize=(8, 8))
        genres = list(profile["preferred_genres"].keys())
        counts = list(profile["preferred_genres"].values())
        wedges, texts, autotexts = plt.pie(counts, labels=genres, autopct="%1.1f%%")
        for t in texts + autotexts:
            t.set_fontproperties(font_prop)
        plt.title(f"{profile['nickname']}的偏好电影类型", fontproperties=font_prop)
        plt.savefig(os.path.join(output_dir, user_id, "preferred_genres.png"))
        plt.close()
    else:
        has_all_plots = False

    # 3. 偏好主题词云（核心修改：为空则跳过）
    themes = profile["preferred_themes"]
    filtered_themes = [t for t in themes if isinstance(t, str) and t.strip()]
    
    # 若过滤后仍为空，直接标记为无效用户并返回
    if not filtered_themes:
        print(f"用户{user_id}的preferred_themes为空，跳过该用户")
        return False  # 不保存该用户，继续下一个
    
    # 生成词云（确保有内容）
    plt.figure(figsize=(10, 6))
    wordcloud = WordCloud(
        font_path=FONT_ABSOLUTE_PATH,
        background_color="white",
        width=800,
        height=400
    ).generate(" ".join(filtered_themes))
    plt.imshow(wordcloud)
    plt.axis("off")
    plt.title(f"{profile['nickname']}的偏好主题关键词", fontproperties=font_prop)
    plt.savefig(os.path.join(output_dir, user_id, "preferred_themes.png"))
    plt.close()

    if has_all_plots:
        print(f"用户{user_id}的画像可视化完成，保存至 {os.path.join(output_dir, user_id)}")
    return has_all_plots


def main():
    print("\n===== 开始执行用户画像构建流程 =====")
    # 加载数据（带错误处理）
    data = load_data()
    
    # 初始化情感模型（带错误处理）
    sentiment_model = init_sentiment_model()
    
    # 获取所有用户ID
    all_user_ids = set(data["ratings"]["USER_MD5"].unique()) | set(data["comments"]["USER_MD5"].unique())
    print(f"\n共发现 {len(all_user_ids)} 个用户，开始构建画像...")
    
    all_profiles = []
    valid_profiles = []
    for user_id in tqdm(all_user_ids, desc="构建用户画像"):
        try:  # 捕获单个用户处理错误
            profile = build_user_profile(user_id, data, sentiment_model)
            all_profiles.append(profile)
            if visualize_profile(profile, OUTPUT_DIR):
                valid_profiles.append(profile)
        except Exception as e:
            print(f"\n 处理用户{user_id}时出错：{str(e)}，跳过该用户")
            continue  # 跳过当前用户，继续处理下一个
    
    # 保存结果
    try:
        pd.DataFrame(all_profiles).to_csv(os.path.join(OUTPUT_DIR, "all_user_profiles.csv"), index=False)
        pd.DataFrame(valid_profiles).to_csv(os.path.join(OUTPUT_DIR, "valid_user_profiles.csv"), index=False)
        print(f"\n===== 执行完成 =====")
        print(f"所有用户画像已保存至：{os.path.join(OUTPUT_DIR, 'all_user_profiles.csv')}")
        print(f"有效用户画像（三图齐全）已保存至：{os.path.join(OUTPUT_DIR, 'valid_user_profiles.csv')}")
    except Exception as e:
        print(f"\n 保存结果失败：{e}，请检查输出目录权限")


if __name__ == "__main__":
    main()