import random
import os, json
from ydata_profiling import ProfileReport
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
import plotly.express as px
import seaborn as sns
import matplotlib.pyplot as plt
from flask import Flask, render_template, request, jsonify
import pandas as pd
import numpy as np
import matplotlib
import joblib
import pickle
matplotlib.use('Agg')  # 使用非交互式后端

app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'static/uploads'
app.config['GENERATED_DATA_FOLDER'] = 'data'

# 确保必要的文件夹存在
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
os.makedirs(app.config['GENERATED_DATA_FOLDER'], exist_ok=True)

# 首页路由


@app.route('/')
def index():
    return render_template('index.html')

# 生成假数据路由


@app.route('/generate_data', methods=['POST'])
def generate_data():
    # 从请求中获取参数
    num_students = int(request.form.get('num_students', 100))
    # 将百分比权重转换为小数
    attendance_weight = float(request.form.get('attendance_weight', 20)) / 100
    homework_weight = float(request.form.get('homework_weight', 30)) / 100
    midterm_weight = float(request.form.get('midterm_weight', 50)) / 100

    # 生成假数据
    data = generate_fake_student_data(
        num_students, attendance_weight, homework_weight, midterm_weight)

    # 保存数据
    file_path = os.path.join(
        app.config['GENERATED_DATA_FOLDER'], 'student_data.csv')
    data.to_csv(file_path, index=False)

    # 保存参数到文件
    params = {
        'num_students': num_students,
        'attendance_weight': attendance_weight,
        'homework_weight': homework_weight,
        'midterm_weight': midterm_weight
    }
    params_path = os.path.join(app.config['GENERATED_DATA_FOLDER'], 'generation_params.json')
    with open(params_path, 'w') as f:
        json.dump(params, f)

    # 返回数据预览
    return jsonify({
        'preview': data.head(10).to_dict(orient='records'),
        'file_path': file_path
    })

# 修改后的数据分析路由


@app.route('/analyze_data', methods=['GET', 'POST'])
def analyze_data():
    # 检查缓存文件是否存在
    cache_path = os.path.join(
        app.config['UPLOAD_FOLDER'], 'analysis_cache.pkl')

    # 如果是POST请求或缓存不存在，则重新分析
    if request.method == 'POST' or not os.path.exists(cache_path):
        # 加载数据
        file_path = os.path.join(
            app.config['GENERATED_DATA_FOLDER'], 'student_data.csv')
        if not os.path.exists(file_path):
            return "尚未生成数据，请先在首页生成数据"
        data = pd.read_csv(file_path)

        # 数据清洗
        cleaned_data = clean_data(data)

        # 执行EDA
        eda_results = perform_eda(cleaned_data)

        # 训练模型
        models = train_models(cleaned_data)

        # 保存模型
        model_dir = os.path.join(app.config['UPLOAD_FOLDER'], 'models')
        os.makedirs(model_dir, exist_ok=True)
        joblib.dump(models['logistic_regression'], os.path.join(model_dir, 'logistic_regression_model.pkl'))
        joblib.dump(models['decision_tree'], os.path.join(model_dir, 'decision_tree_model.pkl'))

        # 将模型结果合并到eda_results中
        eda_results['models'] = models

        # 生成洞察
        insights = generate_insights(cleaned_data, models)

        # 保存结果到缓存
        # import pickle
        with open(cache_path, 'wb') as f:
            pickle.dump({'eda': eda_results, 'insights': insights}, f)

        return render_template('analysis.html', eda=eda_results, insights=insights)

    # GET请求且缓存存在，直接加载缓存
    # import pickle
    with open(cache_path, 'rb') as f:
        cached_results = pickle.load(f)
        return render_template('analysis.html',
                               eda=cached_results['eda'],
                               insights=cached_results['insights'])

# 预测路由


@app.route('/predict', methods=['GET', 'POST'])
def predict():
    if request.method == 'POST':
        # 获取表单数据
        attendance = float(request.form.get('attendance'))
        homework = float(request.form.get('homework'))
        midterm = float(request.form.get('midterm'))

        # 加载模型
        models = load_models()
        if not models:
            return render_template('prediction_result.html', error="模型尚未训练，请先进行数据分析")

        # 准备输入数据
        input_data = pd.DataFrame({
            'attendance': [attendance],
            'homework': [homework],
            'midterm': [midterm]
        })


        # 使用标准化后的数据进行预测
        fail_prediction = models['logistic_regression'].predict(input_data)[0]
        score_prediction = models['decision_tree'].predict(input_data)[0]

        return render_template('prediction_result.html',
                               attendance=attendance,
                               homework=homework,
                               midterm=midterm,
                               fail_prediction='挂科' if fail_prediction else '不挂科',
                               score_prediction=score_prediction)

    return render_template('predict.html')

# 辅助函数 - 生成假数据


def generate_fake_student_data(num_students, attendance_weight, homework_weight, midterm_weight):
    # 生成基础数据
    data = pd.DataFrame({
        'student_id': range(1, num_students + 1),
        'attendance': np.random.uniform(0, 100, num_students),
        'homework': np.random.uniform(0, 100, num_students),
        'midterm': np.random.uniform(0, 100, num_students)
    })

    # 添加异常值
    num_outliers = max(1, int(num_students * 0.05))
    outlier_indices = random.sample(range(num_students), num_outliers)
    data.loc[outlier_indices, 'attendance'] = np.random.choice(
        [0, 100], num_outliers)
    data.loc[outlier_indices[:num_outliers//2],
             'homework'] = np.random.uniform(0, 30, num_outliers//2)

    # 计算期末成绩
    data['final_grade'] = (
        data['attendance'] * attendance_weight +
        data['homework'] * homework_weight +
        data['midterm'] * midterm_weight
    )

    # 添加±5%的随机波动
    data['final_grade'] = np.clip(data['final_grade'] * (1 + np.random.uniform(-0.05, 0.05, size=len(data))), 0, 100)

    # 保留一位小数
    data['final_grade'] = data['final_grade'].round(1)

    # 创建挂科标签
    data['failed'] = data['final_grade'] < 60

    # 创建分数段
    data['score_range'] = pd.cut(data['final_grade'], bins=range(
        0, 101, 10), labels=[f'{i}-{i+9}' for i in range(0, 100, 10)])

    return data

# 辅助函数 - 数据清洗


def clean_data(data):
    # 处理异常值
    cleaned_data = data.copy()
    for col in ['attendance', 'homework', 'midterm', 'final_grade']:
        # 使用IQR方法检测异常值
        Q1 = cleaned_data[col].quantile(0.25)
        Q3 = cleaned_data[col].quantile(0.75)
        IQR = Q3 - Q1
        lower_bound = Q1 - 1.5 * IQR
        upper_bound = Q3 + 1.5 * IQR
        cleaned_data[col] = cleaned_data[col].clip(lower_bound, upper_bound)


    return cleaned_data

# 辅助函数 - 执行EDA


def perform_eda(data):
    # 确保使用非交互式后端
    plt.switch_backend('Agg')
    results = {}

    # 相关系数矩阵
    corr_matrix = data[['attendance', 'homework',
                        'midterm', 'final_grade']].corr()
    results['corr_matrix'] = corr_matrix

    # 热力图
    plt.figure(figsize=(10, 8))
    sns.heatmap(corr_matrix, annot=True, cmap='coolwarm', vmin=-1, vmax=1)
    heatmap_path = os.path.join(app.config['UPLOAD_FOLDER'], 'heatmap.png')
    plt.savefig(heatmap_path)
    plt.close()
    results['heatmap_path'] = heatmap_path

    # K-Means聚类
    kmeans = KMeans(n_clusters=3, random_state=42)
    data['cluster'] = kmeans.fit_predict(
        data[['attendance', 'homework', 'midterm']])
    cluster_plot = px.scatter_3d(
        data, x='attendance', y='homework', z='midterm', color='cluster',
        title='学生成绩K-Means聚类分析'
    )
    cluster_path = os.path.join(app.config['UPLOAD_FOLDER'], 'cluster.html')
    cluster_plot.write_html(cluster_path)
    results['cluster_path'] = cluster_path

    # ydata-profiling报告
    profile = ProfileReport(data, title='学生成绩数据分析报告')
    profile_path = os.path.join(
        app.config['UPLOAD_FOLDER'], 'profiling_report.html')
    profile.to_file(profile_path)
    results['profile_path'] = profile_path

    return results

# 辅助函数 - 训练模型
# 辅助函数 - 训练模型


def train_models(data):
    # 准备特征和目标变量
    X = data[['attendance', 'homework', 'midterm']]
    y_fail = data['failed']
    y_score = data['score_range']

    # 划分训练集和测试集
    X_train, X_test, y_fail_train, y_fail_test = train_test_split(
        X, y_fail, test_size=0.2, random_state=42)
    _, _, y_score_train, y_score_test = train_test_split(
        X, y_score, test_size=0.2, random_state=42)
    

    # 保存scaler
    model_dir = os.path.join(app.config['UPLOAD_FOLDER'], 'models')

    # 训练逻辑回归模型（挂科预测）
    logistic_reg = LogisticRegression()
    logistic_reg.fit(X_train, y_fail_train)

    # 训练决策树模型（分数段预测）
    decision_tree = DecisionTreeClassifier()
    decision_tree.fit(X_train, y_score_train)

    # 获取逻辑回归特征重要性（改进版）
    coef_abs = np.abs(logistic_reg.coef_[0])  # 取绝对值
    coef_normalized = coef_abs / coef_abs.sum()  # 归一化
    logistic_features = pd.DataFrame({
        'feature': X.columns,
        'importance': coef_normalized
    }).sort_values('importance', ascending=False)

    # 获取决策树特征重要性
    tree_features = pd.DataFrame({
        'feature': X.columns,
        'importance': decision_tree.feature_importances_
    }).sort_values('importance', ascending=False)

    return {
        'logistic_regression': logistic_reg,
        'decision_tree': decision_tree,
        'accuracy': {
            'logistic_regression': logistic_reg.score(X_test, y_fail_test),
            'decision_tree': decision_tree.score(X_test, y_score_test)
        },
        # 添加特征重要性数据
        'logistic_regression_features': dict(zip(logistic_features['feature'], logistic_features['importance'])),
        'decision_tree_features': dict(zip(tree_features['feature'], tree_features['importance']))
    }

# 辅助函数 - 生成学习建议


def generate_insights(data, models):
    insights = {}

    # 特征重要性
    if hasattr(models['decision_tree'], 'feature_importances_'):
        feature_importance = pd.DataFrame({
            'feature': ['attendance', 'homework', 'midterm'],
            'importance': models['decision_tree'].feature_importances_
        }).sort_values('importance', ascending=False)
        insights['feature_importance'] = feature_importance.to_html(
            index=False)

    # 相关性分析洞察
    corr_insights = []
    corr = data[['attendance', 'homework', 'midterm', 'final_grade']].corr()[
        'final_grade']
    for feature in ['attendance', 'homework', 'midterm']:
        if corr[feature] > 0.7:
            corr_insights.append(
                f'{feature}与期末成绩高度相关（相关系数：{corr[feature]:.2f}），对成绩影响显著。')
        elif corr[feature] > 0.3:
            corr_insights.append(
                f'{feature}与期末成绩中度相关（相关系数：{corr[feature]:.2f}），对成绩有一定影响。')
        else:
            corr_insights.append(
                f'{feature}与期末成绩相关性较低（相关系数：{corr[feature]:.2f}），对成绩影响有限。')
    insights['correlation'] = corr_insights

    # 学习建议
    insights['suggestions'] = [
        '根据分析结果，建议学生重点关注与期末成绩相关性最高的因素。',
        '定期完成作业并保持良好的出勤记录对提高期末成绩有积极影响。',
        '期中考试表现良好的学生往往期末成绩也较好，建议重视期中考试的复习。',
        '对于有挂科风险的学生，建议增加学习投入，特别是在薄弱环节上加强。'
    ]

    return insights


# 辅助函数 - 加载模型
def load_models():
    model_dir = os.path.join(app.config['UPLOAD_FOLDER'], 'models')
    try:
        logistic_regression = joblib.load(os.path.join(model_dir, 'logistic_regression_model.pkl'))
        decision_tree = joblib.load(os.path.join(model_dir, 'decision_tree_model.pkl'))
        return {
            'logistic_regression': logistic_regression,
            'decision_tree': decision_tree
        }
    except FileNotFoundError:
        return None

@app.route('/data_preview')
def data_preview():
    # 加载数据
    file_path = os.path.join(
        app.config['GENERATED_DATA_FOLDER'], 'student_data.csv')
    if not os.path.exists(file_path):
        return "尚未生成数据，请先在首页生成数据"

    data = pd.read_csv(file_path)
    
    # 添加分页逻辑
    page = request.args.get('page', 1, type=int)
    per_page = 50
    start = (page - 1) * per_page
    end = start + per_page
    paginated_data = data.iloc[start:end]
    total_pages = (len(data) + per_page - 1) // per_page  # 计算总页数

    # 加载生成参数
    params_path = os.path.join(app.config['GENERATED_DATA_FOLDER'], 'generation_params.json')
    with open(params_path, 'r') as f:
        generation_params = json.load(f)
    
    return render_template('data_preview.html', 
                          data=paginated_data.to_dict(orient='records'),
                          current_page=page,
                          total_pages=total_pages,
                          num_students=generation_params['num_students'],
                          attendance_weight=generation_params['attendance_weight'],
                          homework_weight=generation_params['homework_weight'],
                          midterm_weight=generation_params['midterm_weight'])


if __name__ == '__main__':
    app.run(debug=True)
