# -*- coding: utf-8 -*-
"""
IMDb电影AI分析脚本 - Web可视化兼容完整版（修正版：解决中文乱码 + 正确输出路径 + 防缓存机制 + 自动上传到后端）
"""

import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import json, os, sys, re, traceback, io, time, requests
from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, roc_curve, confusion_matrix

# ===============================
# 强制stdout/stderr为UTF-8，防止前端乱码
# ===============================
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')

# ===============================
# Matplotlib 中文配置
# ===============================
plt.rcParams["font.sans-serif"] = ["SimHei", "Microsoft YaHei"]
plt.rcParams["axes.unicode_minus"] = False

# ===============================
# 路径配置（✅ 修正输出路径）
# ===============================
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_FILE = os.path.join(SCRIPT_DIR, "movies_data.csv")

# ✅ 改为 Spring Boot 实际运行目录
OUTPUT_DIR = r"E:/java_qimo/media-analysis-system_1/target/classes/static/uploads/ai-analysis"

LOG_FILE = os.path.join(SCRIPT_DIR, "error_log.txt")
os.makedirs(OUTPUT_DIR, exist_ok=True)


# ===============================
# 上传函数定义
# ===============================
def upload_image_to_server(file_path):
    """上传图片到 Spring Boot 后端"""
    try:
        upload_url = "http://localhost:8000/api/file/upload"  # 后端上传接口
        with open(file_path, "rb") as f:
            res = requests.post(upload_url, files={"file": f}, timeout=15)
        if res.status_code == 200:
            print(f"[INFO] 成功上传: {os.path.basename(file_path)} -> {res.text.strip()}", file=sys.stderr)
            return res.text.strip()
        else:
            print(f"[WARN] 上传失败 {file_path}: 状态码 {res.status_code}", file=sys.stderr)
            return None
    except Exception as ex:
        print(f"[ERROR] 上传异常 {file_path}: {ex}", file=sys.stderr)
        return None


# ===============================
# 主函数
# ===============================
def main():
    try:
        print("[INFO] 正在读取数据文件: {}".format(DATA_FILE), file=sys.stderr)
        if not os.path.exists(DATA_FILE):
            raise FileNotFoundError("未找到数据文件: {}".format(DATA_FILE))
        df = pd.read_csv(DATA_FILE, encoding="utf-8")
        print("[INFO] 数据集加载成功，共 {} 条记录".format(len(df)), file=sys.stderr)

        required_cols = ["电影名称", "类型", "简介", "上映年份", "IMDb评分", "导演姓名"]
        for col in required_cols:
            if col not in df.columns:
                raise KeyError("缺少必要列: {}".format(col))

        name_col = "电影名称"
        type_col = "类型"
        year_col = "上映年份"
        score_col = "IMDb评分"
        director_col = "导演姓名"

        def split_multi(x):
            if pd.isna(x):
                return []
            return [p.strip() for p in re.split(r"[、,，/|\\s]+", str(x)) if p.strip()]

        cnt = Counter([g for lst in df[type_col].dropna().apply(split_multi) for g in lst])
        top_genres = [g for g, _ in cnt.most_common(10)]
        for g in top_genres:
            df["genre_" + g] = df[type_col].apply(lambda s: 1 if g in split_multi(s) else 0)

        # === 评分分布图 ===
        plt.figure(figsize=(8, 5))
        plt.hist(df[score_col], bins=10, color="steelblue", edgecolor="black")
        plt.title("IMDb评分分布")
        plt.xlabel("IMDb评分")
        plt.ylabel("电影数量")
        plt.tight_layout()
        plt.savefig(os.path.join(OUTPUT_DIR, "rating_distribution.png"))
        plt.close()

        threshold = 8.9
        df["high_rating"] = (df[score_col] >= threshold).astype(int)
        class_counts = df["high_rating"].value_counts()
        if len(class_counts) < 2 or class_counts.min() < 5:
            threshold = df[score_col].median()
            df["high_rating"] = (df[score_col] >= threshold).astype(int)
            print("[WARN] 高评分样本过少，自动调整阈值为中位数 {:.2f}".format(threshold), file=sys.stderr)

        numeric_features = [year_col] + [c for c in df.columns if c.startswith("genre_")]
        categorical_features = [director_col]
        X = df[numeric_features + categorical_features]
        y = df["high_rating"]

        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.3, stratify=y, random_state=42
        )

        numeric_transformer = Pipeline([
            ("imputer", SimpleImputer(strategy="median")),
            ("scaler", StandardScaler())
        ])
        categorical_transformer = Pipeline([
            ("imputer", SimpleImputer(strategy="most_frequent")),
            ("onehot", OneHotEncoder(handle_unknown="ignore", sparse_output=False))
        ])
        preprocess = ColumnTransformer([
            ("num", numeric_transformer, numeric_features),
            ("cat", categorical_transformer, categorical_features)
        ])

        models = {
            "LogisticRegression": LogisticRegression(max_iter=1000),
            "SVC": SVC(probability=True),
            "RandomForest": RandomForestClassifier(n_estimators=200, random_state=42),
            "GradientBoosting": GradientBoostingClassifier(random_state=42),
            "KNN": KNeighborsClassifier(n_neighbors=5)
        }

        results, roc_data = [], []
        best_auc, best_name, best_y_pred = 0, None, None

        plt.figure(figsize=(8, 6))
        for name, model in models.items():
            try:
                pipe = Pipeline([("preprocess", preprocess), ("clf", model)])
                pipe.fit(X_train, y_train)
                y_pred = pipe.predict(X_test)
                y_proba = pipe.predict_proba(X_test)[:, 1]

                acc = accuracy_score(y_test, y_pred)
                f1 = f1_score(y_test, y_pred)
                auc = roc_auc_score(y_test, y_proba)

                results.append({
                    "model": name,
                    "accuracy": round(acc, 3),
                    "f1": round(f1, 3),
                    "rocAuc": round(auc, 3)
                })

                fpr, tpr, _ = roc_curve(y_test, y_proba)
                plt.plot(fpr, tpr, label="{} (AUC={:.3f})".format(name, auc))

                if auc > best_auc:
                    best_auc, best_name, best_y_pred = auc, name, y_pred

            except Exception as model_err:
                print(f"[WARN] 模型 {name} 训练出错: {model_err}", file=sys.stderr)
                continue

        plt.plot([0, 1], [0, 1], "--", color="gray")
        plt.title("模型ROC曲线对比")
        plt.legend()
        plt.tight_layout()
        plt.savefig(os.path.join(OUTPUT_DIR, "roc_curves.png"))
        plt.close()

        # ✅ 防止 best_name 未定义
        if best_name is None or best_y_pred is None:
            print("[WARN] 所有模型评估失败或AUC为0，使用默认LogisticRegression", file=sys.stderr)
            best_name = "LogisticRegression"
            pipe = Pipeline([("preprocess", preprocess), ("clf", LogisticRegression(max_iter=1000))])
            pipe.fit(X_train, y_train)
            best_y_pred = pipe.predict(X_test)

        cm = confusion_matrix(y_test, best_y_pred)
        sns.heatmap(cm, annot=True, fmt="d", cmap="Blues")
        plt.title("最佳模型 {} 混淆矩阵".format(best_name))
        plt.tight_layout()
        plt.savefig(os.path.join(OUTPUT_DIR, f"confusion_matrix_{best_name}.png"))
        plt.close()

        # ===============================
        # 上传图片到 Java 服务器
        # ===============================
        uploaded_urls = {}
        for img in [
            "rating_distribution.png",
            "roc_curves.png",
            f"confusion_matrix_{best_name}.png"
        ]:
            full_path = os.path.join(OUTPUT_DIR, img)
            if os.path.exists(full_path):
                url = upload_image_to_server(full_path)
                if url:
                    uploaded_urls[img] = url

        # ✅ 动态时间戳防缓存
        ts = str(int(time.time()))

        output = {
            "movieStats": {
                "totalCount": len(df),
                "avgRating": round(float(df[score_col].mean()), 2),
                "highRatingCount": int(df["high_rating"].sum())
            },
            "ratingDistribution": None,
            "yearTrend": None,
            "correlationMatrix": None,
            "rocCurves": {"models": roc_data},
            "modelResults": {"bestModel": best_name, "results": results},
            "confusionMatrix": {"matrix": cm.tolist()},
            "images": []
        }

        # ✅ 写入上传后的URL
        for img_name, title in [
            ("rating_distribution.png", "评分分布"),
            ("roc_curves.png", "ROC曲线"),
            (f"confusion_matrix_{best_name}.png", f"混淆矩阵 - {best_name}")
        ]:
            final_url = uploaded_urls.get(img_name, f"/uploads/ai-analysis/{img_name}")
            output["images"].append({
                "title": title,
                "filename": final_url + "?ts=" + ts
            })

        # ✅ 确保中文JSON无转义
        print(json.dumps(output, ensure_ascii=False))

    except Exception as e:
        print("=== Python脚本发生异常 ===", file=sys.stderr)
        traceback.print_exc(file=sys.stderr)
        with open(LOG_FILE, "w", encoding="utf-8") as f:
            traceback.print_exc(file=f)
        error_output = {
            'movieStats': None, 'ratingDistribution': None,
            'yearTrend': None, 'correlationMatrix': None,
            'rocCurves': None, 'modelResults': None,
            'confusionMatrix': None, 'images': None,
            'error': str(e)
        }
        print(json.dumps(error_output, ensure_ascii=False))
        sys.exit(1)


if __name__ == "__main__":
    main()
