#!/usr/bin/env python
# encoding: utf-8
'''
@author: wangjianrong
@software: pycharm
@file: train_lenet.py
@time: 2020/10/15 10:53
@desc:
'''

from torch_op.common import init_seed
from torchvision.datasets import MNIST
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.models.resnet import resnet50
from torch_op.models.lenet import Lenet
from torch import nn,optim
import torch
import os
import torch
from torch_op.common import init_seed,init_weights
import torch.nn.utils.prune as prune

with_dcn = False
if with_dcn:
    from dcn_v2 import DCN


init_seed(0)

batch_size =64
num_workers = 16
epochs = 100
lr = 0.01
best_acc = 0
model_save_folder = './torch_op/mnist/models_lenet/'
os.makedirs(model_save_folder,exist_ok=True)




train_dataset = MNIST('torch_op/mnist/data',train=True,download=True,
                      transform=transforms.Compose([
                          transforms.ToTensor(),
                          # transforms.Normalize((0.1307,),(0.3081))
                      ])
                      )

test_dataset = MNIST('torch_op/mnist/data',train=False,download=True,
                      transform=transforms.Compose([
                          transforms.ToTensor(),
                          # transforms.Normalize((0.1307,),(0.3081))
                      ])
                      )

train_loader = DataLoader(train_dataset,batch_size=batch_size,shuffle=True,num_workers=num_workers)
test_loader = DataLoader(test_dataset,batch_size=batch_size,shuffle=False,num_workers=num_workers)


model = Lenet()
model.cuda()


optimizer = optim.SGD(model.parameters(),lr=lr)
lr_policy = optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='max',patience=4,cooldown=2,threshold=0.001,threshold_mode='abs')
criterion = nn.CrossEntropyLoss()

def train():
    model.train()
    print('lr:{}'.format(optimizer.param_groups[0]['lr']))
    for batch_idx,(data,target) in enumerate(train_loader):
        # if data.size(1)==1:
        #     data = torch.cat([data,data,data],dim=1)
        data = data.cuda()
        target = target.cuda()

        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output,target)
        loss.backward()
        optimizer.step()
        # if batch_idx % 100 == 0:
        #     print('loss:{}'.format(loss.item()))


def test(e):
    global best_acc
    model.eval()
    total_right = 0
    total_wrong = 0
    with torch.no_grad():
        for batch_idx,(data,target) in enumerate(test_loader):
            # if data.size(1) == 1:
            #     data = torch.cat([data, data, data], dim=1)
            data = data.cuda()
            target = target.cuda()
            output = model(data)
            output = output.max(dim=1)[1]
            right = torch.sum(output==target).item()
            wrong = len(data) - right
            total_right += right
            total_wrong += wrong
    acc = total_right/(total_wrong+total_right)
    print('epoch-{} acc:{}/{}={}'.format(e,total_right,total_right+total_wrong,acc))
    state = {'net':model.state_dict(), 'epoch':e, 'acc':acc}
    lr_policy.step(acc)
    if acc > best_acc:
        best_acc = acc
        torch.save(state,model_save_folder+'best.pt')
    else:
        torch.save(state,model_save_folder+'latest.pt')



for i in range(epochs):
    train()
    test(i)











