"""
没有优化，执行epoch=3，得到良率25.6%
"""

import torch
from torch import nn
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
from torch import optim
_transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))  # 归一化 -1， 1
])

cifar10_classes_detail = {
    0: {"en": "airplane", "zh": "飞机"},
    1: {"en": "automobile", "zh": "汽车"},
    2: {"en": "bird", "zh": "鸟"},
    3: {"en": "cat", "zh": "猫"},
    4: {"en": "deer", "zh": "鹿"},
    5: {"en": "dog", "zh": "狗"},
    6: {"en": "frog", "zh": "青蛙"},
    7: {"en": "horse", "zh": "马"},
    8: {"en": "ship", "zh": "轮船"},
    9: {"en": "truck", "zh": "卡车"}
}


# 1. datas
def load_data(return_loader=True, batch_size=64):
    train_data = torchvision.datasets.CIFAR10(root='./', train=True, download=True, transform=_transform)
    test_data = torchvision.datasets.CIFAR10(root='./', train=False, download=True, transform=_transform)
    print(f"train data length: {len(train_data)}, test data length: {len(test_data)}")
    if return_loader:
        train_data_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
        test_data_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=True)
        return train_data_loader, test_data_loader
    else:
        return train_data, test_data


# 2. network
class Network1(nn.Module):
    def __init__(self):
        super(Network1, self).__init__()
        # 使用Sequential组织卷积部分
        self.conv_layers = nn.Sequential(
            # 卷积块1: 3->32 channels
            nn.Conv2d(3, 32, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),

            # 卷积块2: 32->64 channels
            nn.Conv2d(32, 64, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),

            # 卷积块3: 64->128 channels
            nn.Conv2d(64, 128, kernel_size=3, padding=1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2)
        )

        # 使用Sequential组织全连接部分
        self.fc_layers = nn.Sequential(
            nn.Linear(128 * 4 * 4, 512),
            nn.ReLU(),
            nn.Dropout(0.25),

            nn.Linear(512, 256),
            nn.ReLU(),
            nn.Dropout(0.25),

            nn.Linear(256, 10),
            nn.Softmax(dim=1)  # 在输出层添加Softmax
        )

    def forward(self, x):
        # 卷积部分
        x = self.conv_layers(x)

        # 展平
        x = x.view(x.size(0), -1)

        # 全连接部分
        x = self.fc_layers(x)
        return x


# 3. loss
def get_loss():
    loss_function = nn.NLLLoss()
    return loss_function

def show_img(img):
    import matplotlib.pyplot as plt
    """
    plt.imshow() 期望的是(height, width, channels)，或 (height, width)（灰度图）
    而输入的张量是(3, 32, 32)
    """
    # 如果张量是 PyTorch 张量，用 .permute(1, 2, 0)
    image = img.permute(1, 2, 0).numpy()
    # 检查范围并归一化
    if image.min() < 0 or image.max() > 255:
        image = (image - image.min()) / (image.max() - image.min())  # 强制归一化到 [0, 255]
    plt.imshow(image)
    plt.show()

def check_the_data():
    train_data_loader, _ = load_data()
    for batch, (image, name) in enumerate(train_data_loader):
        show_img(image[0])
        _label = name[0].item()  # tensor(6) 转换为python整形
        print(f"Label {_label}: {cifar10_classes_detail[_label]['en']} ({cifar10_classes_detail[_label]['zh']})")

def check_the_init_yield():
    train_data_loader, _ = load_data()
    net = Network1()
    total_count = 0
    ture_count = 0
    print("collecting init yield...")
    for batch, (image, name) in enumerate(train_data_loader):
        with torch.no_grad():
            outputs = net(image)
            for index, output in enumerate(outputs):
                result = torch.argmax(output)
                _label = name[index]
                ture_count = ture_count + 1 if result.item() == _label else ture_count
                total_count += 1
    print(f"total count: {total_count}, ture count: {ture_count}, yield: {(ture_count/total_count)*100}%")


# 4. train
def train(epochs):
    train_data_loader, _ = load_data()
    net = Network1()
    total_count = 0
    ture_count = 0
    for epoch in range(epochs):
        for batch, (image, name) in enumerate(train_data_loader):
            net.zero_grad()
            outputs = net(image)
            _loss = get_loss()(outputs, name)
            _loss.backward()
            optimization = optim.Adam(net.parameters(), 0.001)
            optimization.step()
            print(f"epoch {epoch + 1}/{epochs}, batch {batch+1}/{len(train_data_loader)}...")
            for index, output in enumerate(outputs):
                result = torch.argmax(output)
                _label = name[index]
                ture_count = ture_count + 1 if result.item() == _label else ture_count
                total_count += 1
        print(f"total count: {total_count}, ture count: {ture_count}, yield: {(ture_count / total_count) * 100}%")



def _main():
    train(3)


if __name__ == '__main__':
    _main()