import torch
import numpy as np
from PIL import Image, ImageFilter, ImageEnhance
import matplotlib
import matplotlib.pyplot as plt
import cv2


plt.rcParams['font.sans-serif'] = ['SimHei']  # 设置黑体
plt.rcParams['axes.unicode_minus'] = False     # 正确显示负号


def plt_show(epochs, train_losses, test_losses, train_accuracies, test_accuracies):
    # 可视化训练过程（保持不变）
    plt.figure(figsize=(12, 4))
    plt.subplot(1, 2, 1)
    plt.plot(range(1, epochs + 1), train_losses, 'b-', label='Train Loss')
    plt.plot(range(1, epochs + 1), test_losses, 'r-', label='Test Loss')
    plt.xlabel('Epoch')
    plt.ylabel('Loss')
    plt.title('Training and Test Loss')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(range(1, epochs + 1), train_accuracies, 'b-', label='Train Accuracy')
    plt.plot(range(1, epochs + 1), test_accuracies, 'r-', label='Test Accuracy')
    plt.xlabel('Epoch')
    plt.ylabel('Accuracy (%)')
    plt.title('Training and Test Accuracy')
    plt.legend()

    plt.tight_layout()
    plt.savefig('training_metrics.png')
    plt.show()

# 可视化预测结果（保持不变）
def visualize_predictions(model, device, test_loader, num_samples=5):
    model.eval()
    fig, axes = plt.subplots(1, num_samples, figsize=(15, 3))

    with torch.no_grad():
        data, target = next(iter(test_loader))
        data, target = data.to(device), target.to(device)
        output = model(data)
        pred = output.argmax(dim=1, keepdim=True)

        for i in range(num_samples):
            img = data[i].cpu().squeeze().numpy()
            axes[i].imshow(img, cmap='gray')
            axes[i].set_title(f'Pred: {pred[i].item()}, True: {target[i].item()}')
            axes[i].axis('off')

    plt.tight_layout()
    plt.savefig('predictions.png')
    plt.show()


# 图像预处理函数
def preprocess_image(image_path, show_steps=True):
    """处理手机拍摄的手写数字图像"""
    # 读取图像并转换为灰度图
    img = Image.open(image_path).convert('L')

    if show_steps:
        plt.figure(figsize=(15, 10))
        plt.subplot(231)
        plt.title('原始图像')
        plt.imshow(img, cmap='gray')

    # 增强对比度
    enhancer = ImageEnhance.Contrast(img)
    img = enhancer.enhance(1)

    # 降噪（高斯模糊）
    img = img.filter(ImageFilter.GaussianBlur(radius=0.5))

    # 自动阈值二值化（Otsu方法）
    img_array = np.array(img)
    threshold = np.mean(img_array) * 0.8  # 自适应阈值
    img_binary = (img_array < threshold).astype(np.uint8) * 255

    cv2.imwrite('output.jpg', img_binary)
    # image_pil = Image.fromarray(img_binary)
    # image_pil.save('output.jpg')

    if show_steps:
        plt.subplot(232)
        plt.title('二值化图像')
        plt.imshow(img_binary, cmap='gray')

    # 提取数字区域（找到最大轮廓）
    coords = np.where(img_binary == 0)
    if len(coords[0]) == 0:  # 如果没有找到黑色像素
        print("警告：未检测到数字区域！")
        return None

    x_min, x_max = np.min(coords[1]), np.max(coords[1])
    y_min, y_max = np.min(coords[0]), np.max(coords[0])

    # 扩展边界（增加10%边距）
    width, height = x_max - x_min, y_max - y_min
    margin = int(max(width, height) * 0.1)

    x_min = max(0, x_min - margin)
    x_max = min(img_binary.shape[1], x_max + margin)
    y_min = max(0, y_min - margin)
    y_max = min(img_binary.shape[0], y_max + margin)

    # 裁剪数字区域
    # y_min = int(y_max/3)
    # x_min = int(x_max/3)
    #
    # y_max = int(y_max*2/3)
    # x_max = int(x_max*2/3)

    digit_region = img_binary[y_min:y_max, x_min:x_max]

    if show_steps:
        plt.subplot(233)
        plt.title('裁剪后的数字')
        plt.imshow(digit_region, cmap='gray')

    # 调整为正方形（保持宽高比）
    size = max(digit_region.shape)
    square_img = np.ones((size, size), dtype=np.uint8) * 255

    # 居中放置数字
    x_offset = (size - digit_region.shape[1]) // 2
    y_offset = (size - digit_region.shape[0]) // 2
    square_img[y_offset:y_offset + digit_region.shape[0], x_offset:x_offset + digit_region.shape[1]] = digit_region

    if show_steps:
        plt.subplot(234)
        plt.title('居中的数字')
        plt.imshow(square_img, cmap='gray')

    # 调整为 32x32 像素（使用双线性插值）
    img_pil = Image.fromarray(digit_region).resize((128, 128), Image.Resampling.BILINEAR)

    img_pil = img_pil.filter(ImageFilter.SHARPEN)
    enhancer = ImageEnhance.Contrast(img_pil)
    img_pil = enhancer.enhance(8)

    img_32x32 = np.array(img_pil)

    if show_steps:
        plt.subplot(235)
        plt.title('调整为32x32')
        plt.imshow(img_32x32, cmap='gray')

    # 反转颜色（MNIST是黑底白字）
    if np.mean(img_32x32) > 127:  # 如果图像偏亮，认为是白底黑字
        img_32x32 = 255 - img_32x32

    # 归一化到[0,1]
    img_normalized = img_32x32 / 255.0

    if show_steps:
        plt.subplot(236)
        plt.title('最终输入')
        plt.imshow(img_normalized, cmap='gray')
        plt.tight_layout()
        plt.show()

    # 转换为PyTorch张量并调整维度 [1, 1, 28, 28]
    tensor = torch.tensor(img_normalized, dtype=torch.float32).unsqueeze(0).unsqueeze(0)
    return tensor

