import torch
import torch.nn as nn
from torchvision import transforms, datasets
from torch.utils.data import DataLoader
import torch.optim as optim
from tqdm import tqdm
import matplotlib.pyplot as plt
import os

# 定义数据预处理和加载器
input_size = 160
scale_factor = 4

transform_train = transforms.Compose([
    transforms.Resize((input_size, input_size)),
    transforms.Grayscale(num_output_channels=3),
    transforms.ToTensor(),
    transforms.Lambda(lambda img_tensor: torch.round(img_tensor * 255 / scale_factor) - 32),
])

transform_val = transforms.Compose([
    transforms.Resize((input_size, input_size)),
    transforms.Grayscale(num_output_channels=3),
    transforms.ToTensor(),
    transforms.Lambda(lambda img_tensor: torch.round(img_tensor * 255 / scale_factor) - 32),
])

batch_size = 32

# 定义ReLU6激活函数类
class ReLU6(nn.Module):
    def forward(self, x):
        return torch.clamp(x, 0, 6)

class ResidualBlock(nn.Module):
    def __init__(self, in_channels, out_channels, stride=1):
        super(ResidualBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False)
        self.relu = ReLU6()  # 使用ReLU6代替ReLU
        self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False)

        self.downsample = None
        if stride != 1 or in_channels != out_channels:
            self.downsample = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False)
            )

        # 初始化权重为整数范围内的随机值
        # self._init_weights()

    # def _init_weights(self):
    #     for m in self.modules():
    #         if isinstance(m, nn.Conv2d):
    #             init_weights(m.weight, -32, 32)  # 使用均匀分布初始化权重在 -32 到 31 之间

    def forward(self, x):
        identity = x

        out = self.conv1(x)
        out = self.relu(out)

        out = self.conv2(out)

        if self.downsample is not None:
            identity = self.downsample(x)

        out += identity
        out = self.relu(out)

        return out

class MyResNet(nn.Module):
    def __init__(self, num_classes=10):
        super(MyResNet, self).__init__()
        self.in_channels = 64
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
        self.relu = ReLU6()  # 使用ReLU6代替ReLU
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = self.make_layer(64, 2, stride=1)
        self.layer2 = self.make_layer(128, 2, stride=2)
        self.layer3 = self.make_layer(256, 2, stride=2)
        self.layer4 = self.make_layer(512, 2, stride=2)

        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Linear(512, num_classes)

        # 获取初始化时的权重信息
        # self.init_weights_info()

    def make_layer(self, out_channels, num_blocks, stride):
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(ResidualBlock(self.in_channels, out_channels, stride))
            self.in_channels = out_channels
        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = torch.flatten(x, 1)
        x = self.fc(x)

        return x

    # def init_weights_info(self):
        # 打印每一层的初始化权重信息
        # for name, param in self.named_parameters():
            # print(f'Layer: {name}, Initial Weight Range: [{param.min().item()}, {param.max().item()}]')

# 自定义权重初始化函数，实现整数范围内的均匀分布
# def init_weights(tensor, low=-32, high=31):
#     with torch.no_grad():
#         tensor.uniform_(low, high + 1)  # high + 1 是因为 uniform_ 方法不包括 high

train_set = datasets.ImageFolder(root='../data/sdfcar/train', transform=transform_train)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=0)

val_set = datasets.ImageFolder(root='../data/sdfcar/valid', transform=transform_val)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False, num_workers=0)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = MyResNet(num_classes=196)
model = model.to(device)

# 获取初始化时的权重信息
# model.init_weights_info()

# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.AdamW(model.parameters(), lr=0.003)

# 训练参数
num_epochs = 10
train_accuracy_history = []
val_accuracy_history = []

# 训练循环
for epoch in range(num_epochs):
    print(f'Epoch {epoch + 1}/{num_epochs}')

    # 训练模式
    model.train()

    running_loss = 0.0
    correct_train = 0
    total_train = 0

    if epoch % 5 == 0 and epoch != 0:
        for param_group in optimizer.param_groups:
            param_group['lr'] *= 0.5
        print(f'Learning rate adjusted to: {optimizer.param_groups[0]["lr"]}')

    optimizer = optim.AdamW(model.parameters(), lr=optimizer.param_groups[0]['lr'])

    for inputs, labels in tqdm(train_loader):
        inputs = inputs.to(device)
        labels = labels.to(device)

        optimizer.zero_grad()

        outputs = model(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()

        _, predicted = torch.max(outputs, 1)
        total_train += labels.size(0)
        correct_train += (predicted == labels).sum().item()

    epoch_train_accuracy = correct_train / total_train
    train_accuracy_history.append(epoch_train_accuracy)

    print(f'Training Loss: {running_loss / len(train_loader):.4f}, Training Accuracy: {epoch_train_accuracy * 100:.2f}%')

    # 验证模式
    model.eval()
    correct_val = 0
    total_val = 0

    with torch.no_grad():
        for inputs, labels in tqdm(val_loader):
            inputs = inputs.to(device)
            labels = labels.to(device)

            outputs = model(inputs)
            _, predicted = torch.max(outputs, 1)
            total_val += labels.size(0)
            correct_val += (predicted == labels).sum().item()

    epoch_val_accuracy = correct_val / total_val
    val_accuracy_history.append(epoch_val_accuracy)

    print(f'Validation Accuracy: {epoch_val_accuracy * 100:.2f}%')

    # 保存模型权重
    state_dict = model.state_dict()
    torch.save(state_dict, f'models/model_epoch_{epoch + 1}.pth')

# 保存训练准确率和验证准确率的曲线图
plt.figure(figsize=(10, 5))
plt.plot(range(1, num_epochs + 1), train_accuracy_history, label='Training Accuracy')
plt.plot(range(1, num_epochs + 1), val_accuracy_history, label='Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.title('Training and Validation Accuracy over Epochs')
plt.legend()
plt.grid(True)
plt.savefig('accuracy_plot.png')
plt.show()
