import cv2
import numpy as np
from matplotlib import pyplot as plt
import os
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from PIL import Image
import os
import shutil
# 设置设备和随机种子
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.manual_seed(42)

# 数据预处理
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

# 加载数据集
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transform, download=True)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transform)

train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1000)


# 定义模型
class ConvNet(nn.Module):
    def __init__(self):
        super(ConvNet, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 32, 3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(32, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(64, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.fc = nn.Sequential(
            nn.Linear(128 * 3 * 3, 512),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(512, 10)
        )

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = x.view(-1, 128 * 3 * 3)
        x = self.fc(x)
        return x


model = ConvNet().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=2)


# 训练函数
def train(epochs):
    best_acc = 0
    for epoch in range(epochs):
        model.train()
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()

            if batch_idx % 100 == 0:
                print(f'Epoch {epoch}, Batch {batch_idx}, Loss: {loss.item():.4f}')

        # 评估
        model.eval()
        correct = 0
        total = 0
        val_loss = 0
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(device), target.to(device)
                output = model(data)
                val_loss += criterion(output, target).item()
                _, predicted = output.max(1)
                total += target.size(0)
                correct += predicted.eq(target).sum().item()

        accuracy = 100. * correct / total
        print(f'Epoch {epoch}: Accuracy: {accuracy:.2f}%')

        scheduler.step(val_loss)

        if accuracy > best_acc:
            best_acc = accuracy
            torch.save(model.state_dict(), 'best_mnist_model.pth')


def predict_digit(image_path, model_path='best_mnist_model.pth'):
    model = ConvNet()
    model.load_state_dict(torch.load(model_path))
    model.eval()

    transform = transforms.Compose([
        transforms.Grayscale(),
        transforms.Resize((28, 28)),
        transforms.ToTensor(),
        transforms.Normalize((0.1307,), (0.3081,))
    ])

    image = Image.open(image_path)
    image = transform(image).unsqueeze(0)

    with torch.no_grad():
        output = model(image)
        _, predicted = output.max(1)
        probability = torch.nn.functional.softmax(output, dim=1)
        confidence = probability[0][predicted].item() * 100

    return {
        'digit': predicted.item(),
        'confidence': confidence
    }


def segment_digits(image_path, output_dir='segmented_digits'):
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    img = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)

    _, binary = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    projection = np.sum(binary, axis=0)

    valleys = []
    in_digit = False
    start = 0

    for i, val in enumerate(projection):
        if val > 0 and not in_digit:
            in_digit = True
            start = i
        elif val == 0 and in_digit:
            in_digit = False
            valleys.append((start, i))

    if in_digit:
        valleys.append((start, len(projection) - 1))

    digits = []
    height = binary.shape[0]

    for i, (start, end) in enumerate(valleys):
        digit = binary[:, start:end]
        digit_width = end - start
        max_dim = max(height, digit_width)

        square_digit = np.zeros((max_dim, max_dim), dtype=np.uint8)

        pad_top = (max_dim - height) // 2
        pad_left = (max_dim - digit_width) // 2

        square_digit[pad_top:pad_top + height, pad_left:pad_left + digit_width] = digit

        min_size = 28
        if max_dim < min_size:
            padding = ((min_size - max_dim) // 2,) * 2
            square_digit = np.pad(square_digit, (padding, padding), mode='constant')

        digits.append(square_digit)

        output_path = os.path.join(output_dir, f'digit_{i}.png')
        cv2.imwrite(output_path, square_digit)

    return digits, binary


def count_files_os_listdir(directory):
    try:
        # 列出目录中的所有条目（文件和文件夹）
        entries = os.listdir(directory)
        # 过滤出文件（排除子文件夹）
        files = [entry for entry in entries if os.path.isfile(os.path.join(directory, entry))]
        return len(files)
    except FileNotFoundError:
        print(f"目录 '{directory}' 未找到。")
        return 0
    except Exception as e:
        print(f"发生错误: {e}")
        return 0


def shibie(filepath):
    shutil.rmtree("./segmented_digits")
    digits, binary = segment_digits(filepath)
    num = count_files_os_listdir("./segmented_digits")
    digit = ""
    for i in range(num):
        result = predict_digit(f"./segmented_digits/digit_{i}.png")
        digit += str(result['digit'])
    return digit


