import jittor
from ljp.dataset.cifar import CIFAR100
from model.resnet import resnet18, resnet50
from model.resnet_sub import resnet18_sub, resnet50_sub
from jittor import transform
from engine1 import train, val, train_sub

if jittor.has_cuda:
    jittor.flags.use_cuda = 1  # jt.flags.use_cuda 表示是否使用 gpu 训练。


# train_loader = MNIST(train=True, batch_size=batch_size, shuffle=True, data_root=r'D:/data/MNIST/raw/', download=False)
# val_loader = MNIST(train=False, batch_size=batch_size, shuffle=False, data_root=r'D:/data/MNIST/raw/', download=False)
# train_loader = DataLoader(CIFAR100(train=True, root=r'D:/data/cifar100/', download=False), batch_size=batch_size,
#                           shuffle=True, num_workers=0)
# val_loader = DataLoader(CIFAR100(train=False, root=r'D:/data/cifar100/', download=False), batch_size=batch_size, )
def get_train_transforms():
    return transform.Compose([
        transform.RandomCropAndResize((32, 32)),
        transform.RandomHorizontalFlip(),
        transform.ToTensor(),
        transform.ImageNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
    ])


def get_valid_transforms():
    return transform.Compose([
        transform.Resize(32),
        transform.CenterCrop(32),
        transform.ToTensor(),
        transform.ImageNormalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))
    ])


batch_size = 64
learning_rate = 0.1
momentum = 0.9
weight_decay = 1e-4
epochs = 20

train_loader = CIFAR100(train=True, root=r'D:/data/cifar100/', download=False, transform=get_train_transforms())
train_loader.set_attrs(batch_size=batch_size, shuffle=True)
val_loader = CIFAR100(train=False, root=r'D:/data/cifar100/', download=False, transform=get_valid_transforms())
val_loader.set_attrs(batch_size=batch_size)


class Model(jittor.nn.Module):
    def __init__(self, num_classes=100):
        super(Model, self).__init__()
        self.conv1 = jittor.nn.Conv(3, 32, 3, 1)  # no padding

        self.conv2 = jittor.nn.Conv(32, 64, 3, 1)
        self.bn = jittor.nn.BatchNorm(64)

        self.max_pool = jittor.nn.Pool(2, 2)
        self.relu = jittor.nn.Relu()
        self.fc1 = jittor.nn.Linear(12544, 256)
        self.fc2 = jittor.nn.Linear(256, num_classes)

    def execute(self, x):
        # it's simliar to forward function in Pytorch
        x = self.conv1(x)
        x = self.relu(x)

        x = self.conv2(x)
        x = self.bn(x)
        x = self.relu(x)

        x = self.max_pool(x)
        x = jittor.reshape(x, [x.shape[0], -1])
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x


from ljp.cell import SubtractorConv2D, SubtractorLinear


class Modelaa(jittor.nn.Module):
    def __init__(self, num_classes=100):
        super(Modelaa, self).__init__()
        self.conv1 = SubtractorConv2D(3, 32, 3, 1)  # no padding

        self.conv2 = SubtractorConv2D(32, 64, 3, 1)
        self.bn = jittor.nn.BatchNorm(64)

        self.max_pool = jittor.nn.Pool(2, 2)
        self.relu = jittor.nn.Relu()
        self.fc1 = SubtractorLinear(12544, 256)
        self.fc2 = SubtractorLinear(256, num_classes)

    def execute(self, x):
        # it's simliar to forward function in Pytorch
        x = self.conv1(x)
        x = self.relu(x)

        x = self.conv2(x)
        x = self.bn(x)
        x = self.relu(x)

        x = self.max_pool(x)
        x = jittor.reshape(x, [x.shape[0], -1])
        x = self.fc1(x)
        x = self.relu(x)
        x = self.fc2(x)
        return x


# model = Model(num_classes=100)
# model = Modelaa(num_classes=100)
# model = resnet18(num_classes=100)
model = resnet18_sub(num_classes=100)
# optimizer = nn.Adam(model.parameters(), learning_rate, momentum, weight_decay)
optimizer = jittor.nn.Adam(model.parameters(), learning_rate)
print(model)
for epoch in range(epochs):
    # train(model, train_loader, optimizer, desc=f'Epoch {epoch + 1}/{epochs}')
    train_sub(model, train_loader, optimizer, desc=f'Epoch {epoch + 1}/{epochs}')
    val(model, val_loader)
