"""
豆瓣电影AI分析脚本 - Web可视化版本
生成JSON结果和可视化图片供前端展示
"""

import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg')  # 使用非GUI后端
import matplotlib.pyplot as plt
import re
import seaborn as sns
import logging
import json
import os
import sys

from collections import Counter
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.impute import SimpleImputer
from sklearn.pipeline import Pipeline

from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, roc_curve, confusion_matrix

# 关闭 Matplotlib 所有非错误级别的日志
logging.getLogger('matplotlib').setLevel(logging.ERROR)

# 设置中文显示
plt.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'SimSun']
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# ============================================================================
# 配置部分
# ============================================================================

# 获取脚本所在目录
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))

# 数据文件路径(在同一目录下)
DATA_FILE = os.path.join(SCRIPT_DIR, "douban_movies.csv")

# 输出目录
OUTPUT_DIR = "D:/media-as/media-analysis-system_1/uploads/ai-analysis"
IMAGE_DIR = OUTPUT_DIR

# 确保输出目录存在
os.makedirs(OUTPUT_DIR, exist_ok=True)
os.makedirs(IMAGE_DIR, exist_ok=True)

# ============================================================================
# 主程序
# ============================================================================

def main():
    try:
        # 读取豆瓣电影数据
        df = pd.read_csv(DATA_FILE)
        
        # 统一列名为小写
        df.columns = [c.strip().lower() for c in df.columns]
        
        # 关键列
        rating_col = "豆瓣评分"
        year_col = "上映日期"
        genre_col = "类型"
        country_col = "制片国家/地区"
        duration_col = "片长"
        
        # 类型拆分函数
        def split_multi(x):
            if pd.isna(x):
                return []
            return [p.strip() for p in re.split(r"[,/|，、\s]+", str(x)) if p.strip()]
        
        # 年份数字提取
        df[year_col] = df[year_col].astype(str).str.extract(r"(\d{4})").astype(float)
        
        # 片长提取分钟数
        df[duration_col] = df[duration_col].astype(str).str.extract(r"(\d+)").astype(float)
        
        # 统计类型出现频次
        cnt = Counter([g for lst in df[genre_col].dropna().apply(split_multi) for g in lst])
        top_genres = [g for g, _ in cnt.most_common(12)]
        
        # 为前12种类型生成多热编码特征
        for g in top_genres:
            df[f"genre_{g}"] = df[genre_col].apply(lambda s: 1 if g in split_multi(s) else 0)
        
        # ========================================================================
        # 可视化1: 评分分布图
        # ========================================================================
        plt.figure(figsize=(10, 6))
        plt.hist(df[rating_col].dropna(), bins=30, edgecolor='black', alpha=0.7, color='steelblue')
        plt.xlabel("豆瓣评分", fontsize=12)
        plt.ylabel("电影数量", fontsize=12)
        plt.title("豆瓣评分分布", fontsize=14, fontweight='bold')
        plt.grid(axis='y', alpha=0.3)
        rating_dist_path = os.path.join(IMAGE_DIR, 'rating_distribution.png')
        plt.savefig(rating_dist_path, dpi=100, bbox_inches='tight')
        plt.close()
        
        # ========================================================================
        # 可视化2: 年份与平均评分趋势
        # ========================================================================
        plt.figure(figsize=(12, 6))
        yearly = df.groupby(year_col)[rating_col].mean()
        yearly.plot(kind="line", linewidth=2, color='coral')
        plt.ylabel("平均评分", fontsize=12)
        plt.xlabel("年份", fontsize=12)
        plt.title("年份与平均评分变化趋势", fontsize=14, fontweight='bold')
        plt.grid(alpha=0.3)
        year_trend_path = os.path.join(IMAGE_DIR, 'year_trend.png')
        plt.savefig(year_trend_path, dpi=100, bbox_inches='tight')
        plt.close()
        
        # ========================================================================
        # 准备机器学习数据
        # ========================================================================
        
        # 设置分类阈值
        threshold = 9.1
        df["high_rating"] = (df[rating_col] >= threshold).astype(int)
        
        # 检查类别分布
        class_counts = df["high_rating"].value_counts()
        
        # 如果只有一个类别，调整阈值
        if len(class_counts) < 2:
            threshold = df[rating_col].median()
            df["high_rating"] = (df[rating_col] >= threshold).astype(int)
            class_counts = df["high_rating"].value_counts()
        
        # 数值与类别特征
        numeric_features = [year_col, duration_col] + [c for c in df.columns if c.startswith("genre_")]
        categorical_features = [country_col]
        
        # 特征矩阵与标签
        X = df[numeric_features + categorical_features]
        y = df["high_rating"]
        
        # 数据集划分
        if len(y.unique()) >= 2:
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=0.2, stratify=y, random_state=42
            )
        else:
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=0.2, random_state=42
            )
        
        # 数值预处理
        numeric_transformer = Pipeline([
            ("imputer", SimpleImputer(strategy="median")),
            ("scaler", StandardScaler())
        ])
        
        # 类别预处理
        categorical_transformer = Pipeline([
            ("imputer", SimpleImputer(strategy="most_frequent")),
            ("onehot", OneHotEncoder(handle_unknown="ignore", sparse_output=False))
        ])
        
        # 列变换组合
        preprocess = ColumnTransformer([
            ("num", numeric_transformer, numeric_features),
            ("cat", categorical_transformer, categorical_features)
        ])
        
        # ========================================================================
        # 可视化3: 特征相关性热力图
        # ========================================================================
        corr = df[[rating_col, duration_col] + [c for c in df.columns if c.startswith("genre_")]].corr()
        plt.figure(figsize=(12, 10))
        sns.heatmap(corr, cmap="coolwarm", annot=False, cbar_kws={'label': '相关系数'})
        plt.title("特征与豆瓣评分相关性热力图", fontsize=14, fontweight='bold')
        plt.tight_layout()
        corr_path = os.path.join(IMAGE_DIR, 'correlation_matrix.png')
        plt.savefig(corr_path, dpi=100, bbox_inches='tight')
        plt.close()
        
        # ========================================================================
        # 模型训练
        # ========================================================================
        
        models = {
            "LogisticRegression": LogisticRegression(max_iter=1000),
            "SVC(RBF)": SVC(probability=True),
            "RandomForest": RandomForestClassifier(n_estimators=300, random_state=42),
            "GradientBoosting": GradientBoostingClassifier(random_state=42),
            "KNN": KNeighborsClassifier(n_neighbors=15)
        }
        
        results = []
        roc_data = []
        best_model_name = None
        best_auc = 0
        best_pipe = None
        best_y_pred = None
        
        plt.figure(figsize=(10, 8))
        
        for name, clf in models.items():
            pipe = Pipeline([("preprocess", preprocess), ("clf", clf)])
            pipe.fit(X_train, y_train)
            y_pred = pipe.predict(X_test)
            y_proba = pipe.predict_proba(X_test)[:, 1]
            
            acc = accuracy_score(y_test, y_pred)
            f1 = f1_score(y_test, y_pred)
            auc = roc_auc_score(y_test, y_proba)
            
            cm = confusion_matrix(y_test, y_pred)
            tn, fp, fn, tp = cm.ravel()
            
            results.append({
                'model': name,
                'accuracy': round(float(acc), 3),
                'f1': round(float(f1), 3),
                'rocAuc': round(float(auc), 3),
                'tn': int(tn),
                'fp': int(fp),
                'fn': int(fn),
                'tp': int(tp)
            })
            
            # ROC曲线数据
            fpr, tpr, _ = roc_curve(y_test, y_proba)
            indices = np.linspace(0, len(fpr)-1, min(50, len(fpr)), dtype=int)
            
            roc_data.append({
                'name': name,
                'auc': round(float(auc), 3),
                'fpr': [round(float(fpr[i]), 4) for i in indices],
                'tpr': [round(float(tpr[i]), 4) for i in indices]
            })
            
            plt.plot(fpr, tpr, linewidth=2, label=f"{name} (AUC={auc:.3f})")
            
            if auc > best_auc:
                best_auc = auc
                best_model_name = name
                best_pipe = pipe
                best_y_pred = y_pred
        
        # ========================================================================
        # 可视化4: ROC曲线对比
        # ========================================================================
        plt.plot([0, 1], [0, 1], '--', color='gray', label='Random Guess')
        plt.legend(loc='lower right', fontsize=10)
        plt.title("五种模型ROC曲线对比", fontsize=14, fontweight='bold')
        plt.xlabel("False Positive Rate", fontsize=12)
        plt.ylabel("True Positive Rate", fontsize=12)
        plt.grid(alpha=0.3)
        roc_path = os.path.join(IMAGE_DIR, 'roc_curves.png')
        plt.savefig(roc_path, dpi=100, bbox_inches='tight')
        plt.close()
        
        # ========================================================================
        # 可视化5: 最佳模型混淆矩阵
        # ========================================================================
        plt.figure(figsize=(8, 6))
        cm = confusion_matrix(y_test, best_y_pred)
        sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", cbar_kws={'label': '数量'})
        plt.title(f"最佳模型 {best_model_name} 混淆矩阵", fontsize=14, fontweight='bold')
        plt.xlabel("预测标签", fontsize=12)
        plt.ylabel("真实标签", fontsize=12)
        cm_path = os.path.join(IMAGE_DIR, f'confusion_matrix_{best_model_name}.png')
        plt.savefig(cm_path, dpi=100, bbox_inches='tight')
        plt.close()
        
        # ========================================================================
        # 生成JSON结果
        # ========================================================================
        
        # 统计信息
        high_rating_count = int(df["high_rating"].sum())
        
        # 计算评分分布(bins和counts)
        hist_counts, bin_edges = np.histogram(df[rating_col].dropna(), bins=20)
        bins_labels = [f"{bin_edges[i]:.1f}-{bin_edges[i+1]:.1f}" for i in range(len(bin_edges)-1)]
        
        # 年份趋势数据
        yearly = df.groupby(year_col)[rating_col].mean().dropna()
        years_list = [str(int(y)) if not np.isnan(y) else "" for y in yearly.index]
        avg_ratings_list = [round(float(r), 2) for r in yearly.values]
        
        # 相关性矩阵数据
        corr = df[[rating_col, duration_col] + [c for c in df.columns if c.startswith("genre_")]].corr()
        corr_features = corr.columns.tolist()
        corr_values = [[round(float(v), 3) for v in row] for row in corr.values]
        
        # 混淆矩阵(2x2)
        cm = confusion_matrix(y_test, best_y_pred)
        cm_matrix = [[int(v) for v in row] for row in cm]
        
        # 构建符合Java类结构的JSON
        output = {
            # 电影统计信息
            'movieStats': {
                'totalCount': len(df),
                'avgRating': round(float(df[rating_col].mean()), 2),
                'highRatingCount': high_rating_count
            },
            
            # 评分分布
            'ratingDistribution': {
                'bins': bins_labels,
                'counts': [int(c) for c in hist_counts]
            },
            
            # 年度趋势
            'yearTrend': {
                'years': years_list,
                'avgRatings': avg_ratings_list
            },
            
            # 相关性矩阵
            'correlationMatrix': {
                'features': corr_features,
                'values': corr_values
            },
            
            # ROC曲线数据
            'rocCurves': {
                'models': roc_data
            },
            
            # 模型结果
            'modelResults': {
                'bestModel': best_model_name,
                'results': results
            },
            
            # 混淆矩阵
            'confusionMatrix': {
                'matrix': cm_matrix
            },
            
            # 图片列表（前端需要的数组格式）
            'images': [
                {
                    'title': 'Rating Distribution',
                    'filename': 'rating_distribution.png'
                },
                {
                    'title': 'Year Trend',
                    'filename': 'year_trend.png'
                },
                {
                    'title': 'Correlation Matrix',
                    'filename': 'correlation_matrix.png'
                },
                {
                    'title': 'ROC Curves',
                    'filename': 'roc_curves.png'
                },
                {
                    'title': f'Confusion Matrix - {best_model_name}',
                    'filename': f'confusion_matrix_{best_model_name}.png'
                }
            ]
        }
        
        # 输出JSON到stdout供Java读取(只输出JSON,不输出任何其他内容)
        print(json.dumps(output, ensure_ascii=False))
        
    except Exception as e:
        # 错误时返回空的结构,避免解析错误
        import traceback
        error_msg = str(e) + "\n" + traceback.format_exc()
        # 输出空的符合结构的JSON,所有字段为null
        error_output = {
            'movieStats': None,
            'ratingDistribution': None,
            'yearTrend': None,
            'correlationMatrix': None,
            'rocCurves': None,
            'modelResults': None,
            'confusionMatrix': None,
            'images': None
        }
        print(json.dumps(error_output, ensure_ascii=False))
        sys.exit(1)

if __name__ == "__main__":
    main()
