import jittor as jt  # 将 jittor 引入
from jittor import nn, Module  # 引入相关的模块
import numpy as np
import sys, os
import random
import math
from jittor import init
from jittor import transform
if jt.has_cuda:
    jt.flags.use_cuda = 1 # jt.flags.use_cuda 表示是否使用 gpu 训练。
# 如果 jt.flags.use_cuda=1，表示使用GPU训练 如果 jt.flags.use_cuda = 0 表示使用 CPU
from jittor.dataset.mnist import MNIST
#由于 MNIST 是一个常见的数据集，其数据载入已经被封装进 jittor 所以可以直接调用。
import matplotlib.pyplot as plt
import pylab as pl # 用于绘制 Loss 曲线 和 MNIST 数据
from ljp.dataset.cifar import CIFAR100
from model.resnet import resnet18
from model.resnet_sub import resnet18_sub, resnet50_sub
class Model(Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv(3, 32, 3, 1)  # no padding

        self.conv2 = nn.Conv(32, 64, 3, 1)
        self.bn = nn.BatchNorm(64)

        self.max_pool = nn.Pool(2, 2)
        self.relu = nn.Relu()
        self.fc1 = nn.Linear(12544, 256)
        self.fc2 = nn.Linear(256, 100)

    def execute(self, x):
        # it's simliar to forward function in Pytorch
        x = self.conv1(x)
        x = self.relu(x)

        x = self.conv2(x)
        x = self.bn(x)
        x = self.relu(x)

        x = self.max_pool(x)
        x = jt.reshape(x, [x.shape[0], -1])
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x


def train(model, train_loader, optimizer, epoch, losses, losses_idx):
    model.train()
    lens = len(train_loader)
    for batch_idx, (inputs, targets) in enumerate(train_loader):
        outputs = model(inputs)
        loss = nn.cross_entropy_loss(outputs, targets)
        optimizer.step(loss)
        losses.append(loss.numpy()[0])
        losses_idx.append(epoch * lens + batch_idx)

        if batch_idx % 10 == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx, len(train_loader),
                100. * batch_idx / len(train_loader), loss.numpy()[0]))


def val(model, val_loader, epoch):
    model.eval()

    test_loss = 0
    correct = 0
    total_acc = 0
    total_num = 0
    for batch_idx, (inputs, targets) in enumerate(val_loader):
        batch_size = inputs.shape[0]
        outputs = model(inputs)
        pred = np.argmax(outputs.numpy(), axis=1)
        acc = np.sum(targets.numpy() == pred)
        total_acc += acc
        total_num += batch_size
        acc = acc / batch_size
        print(f'Test Epoch: {epoch} [{batch_idx}/{len(val_loader)}]\tAcc: {acc:.6f}')
        print('Test Acc =', total_acc / total_num)


batch_size = 64
learning_rate = 0.1
momentum = 0.9
weight_decay = 1e-4
epochs = 100
losses = []
losses_idx = []
# train_loader = MNIST(train=True, batch_size=batch_size, shuffle=True, data_root=r'D:/data/MNIST/raw/', download=False)
# val_loader = MNIST(train=False, batch_size=batch_size, shuffle=False, data_root=r'D:/data/MNIST/raw/', download=False)


def get_train_transforms():
    return transform.Compose([
        transform.RandomCropAndResize((32, 32)),
        transform.RandomHorizontalFlip(),
        transform.ToTensor(),
        transform.ImageNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
    ])


def get_valid_transforms():
    return transform.Compose([
        transform.Resize(32),
        transform.CenterCrop(32),
        transform.ToTensor(),
        transform.ImageNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
    ])
train_loader = CIFAR100(train=True, batch_size=batch_size, shuffle=True, root=r'D:/data/cifar100/', download=False,
                        transform=get_train_transforms())
val_loader = CIFAR100(train=False, batch_size=batch_size, shuffle=True, root=r'D:/data/cifar100/', download=False,
                      transform=get_valid_transforms())
# model = Model()
# model = resnet18(num_classes=100)
model = resnet18_sub(num_classes=100)
print(model)
# optimizer = nn.SGD(model.parameters(), learning_rate, momentum, weight_decay)
optimizer = nn.Adan(model.parameters(), learning_rate)
for epoch in range(epochs):
    train(model, train_loader, optimizer, epoch, losses, losses_idx)
    val(model, val_loader, epoch)
