import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from deap import base, creator, tools, algorithms

# 定义简单的卷积神经网络模型
class SimpleCNN(nn.Module):
    def __init__(self, in_channels, out_channels, hidden_channels, kernel_size, learning_rate):
        super(SimpleCNN, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, hidden_channels, kernel_size=kernel_size, padding=kernel_size//2)
        self.relu = nn.ReLU()
        self.pool = nn.MaxPool2d(2)
        self.fc1 = nn.Linear(hidden_channels*8*8, 128)
        self.fc2 = nn.Linear(128, out_channels)
        self.criterion = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.parameters(), lr=learning_rate)

    def forward(self, x):
        x = self.pool(self.relu(self.conv1(x)))
        x = x.view(-1, hidden_channels*8*8)
        x = self.relu(self.fc1(x))
        x = self.fc2(x)
        return x

# 定义训练函数
def train_model(model, device, train_loader, epochs=5):
    model.train()
    for epoch in range(epochs):
        for data, target in train_loader:
            data, target = data.to(device), target.to(device)
            model.optimizer.zero_grad()
            output = model(data)
            loss = model.criterion(output, target)
            loss.backward()
            model.optimizer.step()

# 定义评估函数
def evaluate_model(model, device, test_loader):
    model.eval()
    correct = 0
    total = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            outputs = model(data)
            _, predicted = torch.max(outputs.data, 1)
            total += target.size(0)
            correct += (predicted == target).sum().item()
    return 100. * correct / total

# 定义遗传算法个体
creator.create("FitnessMax", base.Fitness, weights=(1.0,))
creator.create("Individual", list, fitness=creator.FitnessMax)

# 定义遗传算法参数
toolbox = base.Toolbox()
toolbox.register("attr_float", lambda low, up: low + torch.rand(1) * (up - low))
toolbox.register("individual", tools.initRepeat, creator.Individual, 
                  toolbox.attr_float, n=4, low=[1, 16, 16, 0.001], up=[10, 128, 11, 0.1])
toolbox.register("population", tools.initRepeat, list, toolbox.individual)

# 定义遗传算法操作
toolbox.register("mate", tools.cxBlend, alpha=0.5)
toolbox.register("mutate", tools.mutPolynomialBounded, low=0, up=1, eta=20.0, indpb=0.2)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", evaluate_model)

# 加载数据
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, batch_size=1000, shuffle=False)

# 遗传算法搜索
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
population = toolbox.population(n=300)
hof = tools.HallOfFame(1)
stats = tools.Statistics(lambda ind: ind.fitness.values)
stats.register("avg", lambda ind: sum(ind.fitness.values) / len(ind.fitness.values))
stats.register("std", lambda ind: torch.std(ind.fitness.values))
stats.register("min", min)
stats.register("max", max)

algorithms.eaSimple(population, toolbox, cxpb=0.5, mutpb=0.2, ngen=40, stats=stats, halloffame=hof, verbose=True)

# 输出最优超参数
best_ind = hof[0]
print(f"Best hyperparameters: in_channels={int(best_ind[0])}, hidden_channels={int(best_ind[1])}, kernel_size={int(best_ind[2])}, learning_rate={best_ind[3]:.5f}")

