import seaborn as sns
sns.set_theme(style='whitegrid', palette='Set2', font_scale=1.2)

from torchvision import datasets, transforms
import numpy as np
from models.logistic_regression import get_logistic_regression
from models.decision_tree import get_decision_tree
from models.svm import get_svm
from utils import evaluate
import matplotlib.pyplot as plt
import json
from sklearn.metrics import confusion_matrix, classification_report
import time
import random
# import torch
# print(torch.cuda.is_available())

# 用 PyTorch 下载 MNIST
transform = transforms.Compose([transforms.ToTensor()])
train_set = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
test_set = datasets.MNIST(root='./data', train=False, download=True, transform=transform)

# 转为 numpy
X_train = train_set.data.numpy().reshape(-1, 28*28) / 255.0
y_train = train_set.targets.numpy()
X_test = test_set.data.numpy().reshape(-1, 28*28) / 255.0
y_test = test_set.targets.numpy()

methods = ['Logistic Regression', 'Decision Tree', 'SVM', 'CNN']
accs = [0, 0, 0, 0]
pres = [0, 0, 0, 0]
recs = [0, 0, 0, 0]
f1s = [0, 0, 0, 0]

# 逻辑回归
start = time.time()
lr = get_logistic_regression()
lr.fit(X_train, y_train)
lr_time = time.time() - start
y_pred_lr = lr.predict(X_test)
acc, pre, rec, f1 = evaluate(y_test, y_pred_lr)
accs[0], pres[0], recs[0], f1s[0] = acc, pre, rec, f1
print(f"Logistic Regression: Acc={acc:.4f}, Precision={pre:.4f}, Recall={rec:.4f}, F1={f1:.4f}")

# 决策树
start = time.time()
dt = get_decision_tree()
dt.fit(X_train, y_train)
dt_time = time.time() - start
y_pred_dt = dt.predict(X_test)
acc, pre, rec, f1 = evaluate(y_test, y_pred_dt)
accs[1], pres[1], recs[1], f1s[1] = acc, pre, rec, f1
print(f"Decision Tree: Acc={acc:.4f}, Precision={pre:.4f}, Recall={rec:.4f}, F1={f1:.4f}")

# SVM（只用部分数据以加快训练）
start = time.time()
svm = get_svm()
svm.fit(X_train[:10000], y_train[:10000])
svm_time = time.time() - start
y_pred_svm = svm.predict(X_test[:2000])
acc, pre, rec, f1 = evaluate(y_test[:2000], y_pred_svm)
accs[2], pres[2], recs[2], f1s[2] = acc, pre, rec, f1
print(f"SVM (partial data): Acc={acc:.4f}, Precision={pre:.4f}, Recall={rec:.4f}, F1={f1:.4f}")

# CNN指标（从文件读取）
try:
    with open('cnn_metrics.json', 'r') as f:
        cnn_metrics = json.load(f)
    accs[3], pres[3], recs[3], f1s[3] = cnn_metrics
    print(f"CNN: Acc={cnn_metrics[0]:.4f}, Precision={cnn_metrics[1]:.4f}, Recall={cnn_metrics[2]:.4f}, F1={cnn_metrics[3]:.4f}")
except:
    print('请先运行train.py生成cnn_metrics.json')

# 美化对比柱状图
plt.figure(figsize=(10,6))
bar_width = 0.18
x = np.arange(len(methods))
bars1 = plt.bar(x, accs, width=bar_width, label='Accuracy')
bars2 = plt.bar(x+bar_width, pres, width=bar_width, label='Precision')
bars3 = plt.bar(x+2*bar_width, recs, width=bar_width, label='Recall')
bars4 = plt.bar(x+3*bar_width, f1s, width=bar_width, label='F1-score')
plt.xticks(x+1.5*bar_width, methods)
plt.ylim(0, 1.05)
plt.legend(loc='upper left', bbox_to_anchor=(1,1))
plt.title('Comparison of Different Methods on MNIST', fontsize=16)
plt.ylabel('Score')
plt.tight_layout()
# 添加数值标签
for bars in [bars1, bars2, bars3, bars4]:
    for bar in bars:
        plt.text(bar.get_x() + bar.get_width()/2, bar.get_height()+0.01, f'{bar.get_height():.2f}', 
                 ha='center', va='bottom', fontsize=10)
plt.savefig('method_comparison.png', dpi=300)
print('图已保存：method_comparison.png')
plt.show()

# 美化混淆矩阵

def plot_confusion(cm, title):
    plt.figure(figsize=(5,4))
    cm_percent = cm / cm.sum(axis=1, keepdims=True)
    sns.heatmap(cm_percent, annot=cm, fmt='d', cmap='YlGnBu', cbar=False,
                annot_kws={'size':10}, linewidths=0.5, linecolor='gray')
    plt.title(title, fontsize=14)
    plt.xlabel('Predicted')
    plt.ylabel('True')
    plt.tight_layout()
    fname = f'{title.replace(" ", "_").lower()}.png'
    plt.savefig(fname, dpi=300)
    print(f'图已保存：{fname}')
    plt.show()

cm_lr = confusion_matrix(y_test, y_pred_lr)
plot_confusion(cm_lr, 'Confusion Matrix - Logistic Regression')

cm_dt = confusion_matrix(y_test, y_pred_dt)
plot_confusion(cm_dt, 'Confusion Matrix - Decision Tree')

cm_svm = confusion_matrix(y_test[:2000], y_pred_svm)
plot_confusion(cm_svm, 'Confusion Matrix - SVM')

# 各方法分类报告
print("\nLogistic Regression Classification Report:")
print(classification_report(y_test, y_pred_lr))
print("\nDecision Tree Classification Report:")
print(classification_report(y_test, y_pred_dt))
print("\nSVM Classification Report:")
print(classification_report(y_test[:2000], y_pred_svm))

# 各方法预测样例可视化
indices = random.sample(range(len(X_test)), 10)
plt.figure(figsize=(15, 4))
for i, idx in enumerate(indices):
    plt.subplot(2, 5, i+1)
    img = X_test[idx].reshape(28, 28)
    plt.imshow(img, cmap='gray', interpolation='nearest')
    plt.title(f'True:{y_test[idx]}\nLR:{y_pred_lr[idx]}, DT:{y_pred_dt[idx]}' + 
              (f', SVM:{y_pred_svm[idx]}' if idx < 2000 else ''), fontsize=9, color='navy')
    plt.axis('off')
    plt.gca().spines['top'].set_color('green')
    plt.gca().spines['right'].set_color('green')
    plt.gca().spines['bottom'].set_color('green')
    plt.gca().spines['left'].set_color('green')
plt.suptitle('Prediction Examples of Different Methods', fontsize=16)
plt.tight_layout(rect=[0, 0, 1, 0.93])
plt.savefig('prediction_examples.png', dpi=300)
print('图已保存：prediction_examples.png')
plt.show()

# 各方法训练时间对比
plt.figure(figsize=(7,5))
bars = plt.bar(['Logistic Regression', 'Decision Tree', 'SVM'], [lr_time, dt_time, svm_time], color=sns.color_palette('Set2'))
plt.ylabel('Training Time (s)')
plt.title('Training Time Comparison', fontsize=15)
for bar in bars:
    plt.text(bar.get_x() + bar.get_width()/2, bar.get_height()+0.1, f'{bar.get_height():.2f}', 
             ha='center', va='bottom', fontsize=11)
plt.tight_layout()
plt.savefig('training_time_comparison.png', dpi=300)
print('图已保存：training_time_comparison.png')
plt.show() 