import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from dash import html
from matplotlib.figure import Figure
from typing import List, Tuple
from src.common.descriptors.load_data import (
    load_stu_data, load_grade_data, 
    load_kaoqin_data, load_kaoqin_type_data, 
    load_consumption_data
)

def process_core_data(selected_grade: str = None) -> Tuple[pd.DataFrame, pd.DataFrame, List[int]]:
    student_info = load_stu_data().drop_duplicates('bf_StudentID')
    student_info['bf_StudentID'] = student_info['bf_StudentID'].astype(int)

    if selected_grade:
        student_info = student_info[student_info['cla_Name'].str.contains(selected_grade)]
    grade_students = student_info['bf_StudentID'].tolist()

    chengji = load_grade_data()
    valid = chengji[
        (chengji['mes_Score'] >= 0) & 
        (chengji['mes_StudentID'].isin(grade_students))
    ].copy()

    valid['Z_score'] = valid.groupby(['exam_number', 'mes_sub_id'])['mes_Score'].transform(
        lambda x: (x - x.mean()) / (x.std() + 1e-6)
    )

    student_zscores = valid.groupby(['mes_StudentID', 'mes_sub_name'])['Z_score'].mean().reset_index()
    student_std = student_zscores.groupby('mes_StudentID')['Z_score'].std().reset_index(name='std_dev')

    threshold = student_std['std_dev'].quantile(0.9)
    biased_students = student_std[student_std['std_dev'] >= threshold]['mes_StudentID'].tolist()

    merged = pd.merge(
        student_info,
        student_std.rename(columns={'mes_StudentID': 'bf_StudentID'}),
        on='bf_StudentID',
        how='left'
    )
    merged['is_biased'] = merged['bf_StudentID'].isin(biased_students)

    return merged, valid, biased_students

def get_attendance_stats() -> pd.DataFrame:
    kaoqin = load_kaoqin_data()
    kaoqin_type = load_kaoqin_type_data()
    merged = pd.merge(
        kaoqin,
        kaoqin_type[['controler_id', 'controler_name']],
        left_on='ControllerID',
        right_on='controler_id',
        how='left'
    )
    merged['is_negative'] = merged['controler_name_y'].str.contains('迟到|早退|校服')
    return merged.groupby('bf_StudentID')['is_negative'].sum().reset_index(name='negative_count')

def get_consumption_stats() -> pd.DataFrame:
    consumption = load_consumption_data()
    df = consumption.copy()
    df['MonDeal'] = df['MonDeal'].abs()
    return df.groupby('bf_StudentID').agg(
        total_spent=('MonDeal', 'sum'),
        avg_spent=('MonDeal', 'mean')
    ).reset_index()


def plot_subject_distribution(valid_scores: pd.DataFrame, biased_students: List[int]) -> Figure:
    biased_subjects = valid_scores[valid_scores['mes_StudentID'].isin(biased_students)]
    fig = plt.figure(figsize=(10, 6))
    biased_subjects['mes_sub_name'].value_counts().plot.pie(autopct='%1.1f%%')
    return fig

def plot_combined_analysis(merged_data: pd.DataFrame) -> Figure:
    fig, axes = plt.subplots(1, 2, figsize=(10, 8))

    merged_data.groupby('is_biased')['total_spent'].mean().plot.bar(ax=axes[0])
    axes[0].set_title('消费金额对比')
    axes[0].set_xticks([0, 1])
    axes[0].set_xticklabels(['非偏科生', '偏科生'], rotation=0)
    axes[0].set_xlabel('')
    axes[0].set_ylabel('人均消费金额（元）')

    merged_data.groupby('is_biased')['negative_count'].mean().plot.bar(ax=axes[1])
    axes[1].set_title('负面考勤对比')
    axes[1].set_xticks([0, 1])
    axes[1].set_xticklabels(['非偏科生', '偏科生'], rotation=0)
    axes[1].set_xlabel('')
    axes[1].set_ylabel('人均负面考勤次数')
    
    plt.tight_layout()
    return fig

def stats_table_div(merged_data: pd.DataFrame) -> html.Div:
    return html.Div([
                html.Div([
                    html.Div([
                        html.Div('👥 学生总数', style={'fontSize': '16px', 'color': '#7f8c8d'}),
                        html.Div(f"{len(merged_data)}", style={'fontSize': '24px', 'fontWeight': 'bold', 'color': '#2c3e50'})
                    ], style={'padding': '15px', 'textAlign': 'center', 'flex': 1}),
                    
                    html.Div([
                        html.Div('📈 偏科比例', style={'fontSize': '16px', 'color': '#7f8c8d'}),
                        html.Div(f"{merged_data['is_biased'].mean():.1%}", style={'fontSize': '24px', 'fontWeight': 'bold', 'color': '#e74c3c'})
                    ], style={'padding': '15px', 'textAlign': 'center', 'flex': 1}),
                    
                    html.Div([
                        html.Div('💵 最高消费', style={'fontSize': '16px', 'color': '#7f8c8d'}),
                        html.Div(f"¥{merged_data['total_spent'].max():.1f}", style={'fontSize': '24px', 'fontWeight': 'bold', 'color': '#27ae60'})
                    ], style={'padding': '15px', 'textAlign': 'center', 'flex': 1}),
                    
                    html.Div([
                        html.Div('📅 平均考勤', style={'fontSize': '16px', 'color': '#7f8c8d'}),
                        html.Div(f"{merged_data['negative_count'].mean():.1f}次", style={'fontSize': '24px', 'fontWeight': 'bold', 'color': '#f39c12'})
                    ], style={'padding': '15px', 'textAlign': 'center', 'flex': 1})
                ], style={'display': 'flex', 'justifyContent': 'space-around', 'flexWrap': 'wrap'})
            ])