import gradio as gr
import numpy as np
import cv2
import os
import pickle
import joblib
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, GlobalMaxPooling2D
from tensorflow.keras.utils import load_img, img_to_array
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (
    RandomForestClassifier, BaggingClassifier, AdaBoostClassifier,
    GradientBoostingClassifier, VotingClassifier, StackingClassifier
)
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import inspect

# 全局变量
trained_classifiers = None
vgg_feature_extractor = None
label_map = {'猫': 0, '狗': 1}
reverse_label_map = {0: '猫', 1: '狗'}
model_cache_dir = "trained_models"  # 模型保存目录
feature_cache_dir = "."  # 特征缓存目录

# ---------------------- 核心函数（保持不变） ----------------------
def build_classifiers(random_state=2023):
    lr = LogisticRegression(max_iter=2000, random_state=random_state)
    rf = RandomForestClassifier(n_estimators=100, random_state=random_state)
    svm = SVC(kernel='rbf', probability=True, random_state=random_state)
    dt = DecisionTreeClassifier(random_state=random_state)

    hard_voting = VotingClassifier(estimators=[('lr', lr), ('rf', rf), ('svc', svm)], voting='hard')
    soft_voting = VotingClassifier(estimators=[('lr', lr), ('rf', rf), ('svc', svm)], voting='soft')
    
    bagging_kwargs = {}
    sig = inspect.signature(BaggingClassifier)
    if 'estimator' in sig.parameters:
        bagging_kwargs['estimator'] = dt
    else:
        bagging_kwargs['base_estimator'] = dt
    bagging = BaggingClassifier(n_estimators=10, random_state=random_state,** bagging_kwargs)

    pasting_kwargs = {k: v for k, v in bagging_kwargs.items()}
    pasting = BaggingClassifier(n_estimators=10, bootstrap=False, random_state=random_state, **pasting_kwargs)
    adaboost = AdaBoostClassifier(n_estimators=50, random_state=random_state)
    gradient_boosting = GradientBoostingClassifier(n_estimators=100, random_state=random_state)
    stacking = StackingClassifier(
        estimators=[('lr', lr), ('rf', rf), ('svc', svm)],
        final_estimator=LogisticRegression(),
        passthrough=False
    )

    return {
        'logistic_regression': lr,
        'random_forest': rf,
        'svm': svm,
        'hard_voting': hard_voting,
        'soft_voting': soft_voting,
        'bagging': bagging,
        'pasting': pasting,
        'adaboost': adaboost,
        'gradient_boosting': gradient_boosting,
        'stacking': stacking,
    }

def build_vgg_extractor():
    input_shape = (224, 224, 3)
    inputs = Input(shape=input_shape)
    
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(inputs)
    x = Conv2D(64, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(128, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    
    x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(256, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same')(x)
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    
    outputs = GlobalMaxPooling2D()(x)
    return Model(inputs=inputs, outputs=outputs)

def load_or_train_models():
    global trained_classifiers, vgg_feature_extractor
    
    vgg_feature_extractor = build_vgg_extractor()
    
    if not os.path.exists(model_cache_dir):
        os.makedirs(model_cache_dir)
    
    trained_classifiers = build_classifiers()
    all_models_loaded = True
    
    for name, clf in trained_classifiers.items():
        model_path = os.path.join(model_cache_dir, f"{name}_model.pkl")
        if os.path.exists(model_path):
            try:
                trained_classifiers[name] = joblib.load(model_path)
                print(f"已加载模型：{name}")
            except Exception as e:
                print(f"加载模型 {name} 失败，将重新训练：{e}")
                all_models_loaded = False
        else:
            all_models_loaded = False
    
    if not all_models_loaded:
        print("部分或全部模型未找到，开始自动训练...")
        try:
            X_path = os.path.join(feature_cache_dir, "X_vgg.pkl")
            y_path = os.path.join(feature_cache_dir, "y_vgg.pkl")
            
            if not os.path.exists(X_path) or not os.path.exists(y_path):
                raise FileNotFoundError("未找到训练特征缓存，请先运行 train_ensemble.py 生成特征")
            
            with open(X_path, 'rb') as f:
                X = pickle.load(f)
            with open(y_path, 'rb') as f:
                y = pickle.load(f)
            
            from sklearn.model_selection import train_test_split
            from sklearn.preprocessing import StandardScaler
            X_train, _, y_train, _ = train_test_split(X, y, test_size=0.25, random_state=2023)
            scaler = StandardScaler()
            X_train_scaled = scaler.fit_transform(X_train)
            
            for name, clf in trained_classifiers.items():
                print(f"正在训练：{name}")
                need_scale = hasattr(clf, 'kernel') or isinstance(clf, (LogisticRegression, SVC))
                X_input = X_train_scaled if need_scale else X_train
                clf.fit(X_input, y_train)
                
                model_path = os.path.join(model_cache_dir, f"{name}_model.pkl")
                joblib.dump(clf, model_path)
                print(f"模型 {name} 已保存到：{model_path}")
            
            print("所有模型训练完成！")
        except Exception as e:
            raise Exception(f"模型训练失败：{e}\n请先运行 train_ensemble.py 生成特征缓存，或检查数据目录")
    
    print("模型加载/训练完成，可正常预测！")

def preprocess_image(image):
    temp_path = "temp_upload.jpg"
    image.save(temp_path)
    
    img = load_img(temp_path, target_size=(224, 224))
    img = img_to_array(img)
    img = img[:, :, ::-1]
    img -= [103.939, 116.779, 123.68]
    img = np.expand_dims(img, axis=0)
    
    feature = vgg_feature_extractor.predict(img, verbose=0)
    os.remove(temp_path)
    return feature.flatten().reshape(1, -1)

def predict_image(image):
    global trained_classifiers
    if trained_classifiers is None:
        load_or_train_models()
    
    feature = preprocess_image(image)
    
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    feature_scaled = scaler.fit_transform(feature)
    
    results = []
    predictions = []
    confidences = []
    
    for name, clf in trained_classifiers.items():
        try:
            need_scale = hasattr(clf, 'kernel') or isinstance(clf, (LogisticRegression, SVC))
            input_feature = feature_scaled if need_scale else feature
            
            pred_label = clf.predict(input_feature)[0]
            pred_name = reverse_label_map[pred_label]
            
            if hasattr(clf, 'predict_proba'):
                proba = clf.predict_proba(input_feature)[0]
                confidence = round(max(proba) * 100, 2)
            elif hasattr(clf, 'decision_function'):
                score = clf.decision_function(input_feature)[0]
                confidence = round(100 / (1 + np.exp(-score)), 2)
            else:
                confidence = 0.0
            
            formatted_name = name.replace('_', ' ').title()
            results.append([formatted_name, pred_name, confidence])
            predictions.append(pred_label)
            confidences.append(confidence)
        except Exception as e:
            formatted_name = name.replace('_', ' ').title()
            results.append([formatted_name, "预测失败", 0.0])
    
    from collections import Counter
    if predictions:
        vote_result = Counter(predictions).most_common(1)[0][0]
        best_name = reverse_label_map[vote_result]
        best_confidence = max(confidences) if confidences else 0.0
    else:
        best_name = "无法预测"
        best_confidence = 0.0
    
    final_text = f"🐱🐶 最终预测结果：{best_name}\n📊 综合置信度：{best_confidence}%"
    return final_text, results

# ---------------------- 兼容低版本的 Gradio 界面 ----------------------
def main():
    try:
        load_or_train_models()
    except Exception as e:
        print(f"初始化失败：{e}")
        return
    
    # 仅使用 Gradio 基础参数，兼容所有版本
    with gr.Blocks(title="猫狗识别集成模型") as demo:
        gr.Markdown("""
        # 🐱🐶 猫狗识别集成模型
        基于10种机器学习模型的集成方案，支持上传图片快速识别
        """)
        
        with gr.Row():
            # 左侧上传区域
            with gr.Column(scale=1):
                image_input = gr.Image(type="pil", label="上传图片")
                submit_btn = gr.Button("开始识别")
                gr.Markdown("### 示例图片（点击测试）")
                # 移除 cache_examples 参数，避免低版本不兼容
                gr.Examples(
                    examples=[
                        ["example_cat.jpg"],  # 可选：放入实际图片路径，或删除示例
                        ["example_dog.jpg"]
                    ],
                    inputs=image_input,
                    outputs=[gr.Textbox(), gr.Dataframe()],
                    fn=predict_image
                )
            
            # 右侧结果区域
            with gr.Column(scale=2):
                result_text = gr.Textbox(
                    label="最终预测结果",
                    lines=3
                )
                result_table = gr.Dataframe(
                    label="所有模型预测详情",
                    headers=["模型名称", "预测结果", "置信度(%)"],
                    datatype=["str", "str", "number"]
                )
        
        # 绑定事件
        submit_btn.click(
            fn=predict_image,
            inputs=image_input,
            outputs=[result_text, result_table]
        )
    
    # 简化启动参数
    demo.launch(
        server_port=7860,
        share=False
    )

if __name__ == "__main__":
    main()