import torch.nn as nn
import torch
import torchvision
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
import torch.optim as optim
import os
import time
from torchvision import models
import argparse
import random
import numpy as np
import logging
import datetime

def setup_logger(logger_name, save_dir, phase, level=logging.INFO, screen=False, to_file=False):
    lg = logging.getLogger(logger_name)
    formatter = logging.Formatter('%(asctime)s.%(msecs)03d - %(levelname)s: %(message)s', datefmt='%y-%m-%d %H:%M:%S')
    lg.setLevel(level)
    if to_file:
        log_file = os.path.join(save_dir, phase + '_{}.log'.format(get_timestamp()))
        fh = logging.FileHandler(log_file, mode = 'w')
        fh.setFormatter(formatter)
        lg.addHandler(fh)
    if screen:
        sh = logging.StreamHandler()
        sh.setFormatter(formatter)
        lg.addHandler(sh)


def get_timestamp():
    return (datetime.datetime.utcnow()+datetime.timedelta(hours=8)).strftime('%y%m%d-%H%M%S')

log_path = './log'

class BasicBlock(nn.Module): 
    def __init__(self, in_channel, out_channel, stride=1, downsample=None, **kwargs):
        super(BasicBlock, self).__init__()

        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,kernel_size=3, stride=stride, padding=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channel)  

        self.conv2 = nn.Conv2d(in_channels=out_channel, out_channels=out_channel,kernel_size=3, stride=1, padding=1, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channel)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample

    def forward(self, x):
        identity = x
        if self.downsample is not None:
            identity = self.downsample(x)

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        out += identity
        out = self.relu(out)

        return out



class Bottleneck(nn.Module):  
    expansion = 4

    def __init__(self, in_channel, out_channel, stride=1, downsample=None, groups=1, width_per_group=64):
        super(Bottleneck, self).__init__()

        width = int(out_channel * (width_per_group / 64.)) * groups
    

        self.conv1 = nn.Conv2d(in_channels=in_channel, out_channels=width,kernel_size=1, stride=1, bias=False)  # squeeze channels
        self.bn1 = nn.BatchNorm2d(width)
        # -----------------------------------------
        self.conv2 = nn.Conv2d(in_channels=width, out_channels=width, groups=groups,kernel_size=3, stride=stride, bias=False, padding=1)
        self.bn2 = nn.BatchNorm2d(width)
        # -----------------------------------------
        self.conv3 = nn.Conv2d(in_channels=width, out_channels=out_channel * self.expansion,kernel_size=1, stride=1, bias=False)  # unsqueeze channels
        self.bn3 = nn.BatchNorm2d(out_channel * self.expansion)

        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample

    def forward(self, x):
        identity = x
        
        if self.downsample is not None:
            identity = self.downsample(x)

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        # out=F(X)+X
        out += identity
        out = self.relu(out)

        return out

#ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)
class ResNet(nn.Module):

    def __init__(self,
                 block,  
                 blocks_num,  
                 num_classes=1000,  
                 include_top=True,  
                 groups=1,
                 width_per_group=64):

        super(ResNet, self).__init__()
        self.include_top = include_top
        self.in_channel = 64

        self.groups = groups
        self.width_per_group = width_per_group

        
        self.conv1 = nn.Conv2d(3, self.in_channel, kernel_size=7, stride=2,padding=3, bias=False)
        self.bn1 = nn.BatchNorm2d(self.in_channel)

        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(block, 64, blocks_num[0])
        self.layer2 = self._make_layer(block, 128, blocks_num[1], stride=2)
        self.layer3 = self._make_layer(block, 256, blocks_num[2], stride=2)
        self.layer4 = self._make_layer(block, 512, blocks_num[3], stride=2)

        if self.include_top:  
            self.avgpool = nn.AdaptiveAvgPool2d((1, 1))  
            self.fc = nn.Linear(512 * block.expansion, num_classes)  

        for m in self.modules():  
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')

    
    def _make_layer(self, block, channel, block_num, stride=1):
        downsample = None

        
        if stride != 1 or self.in_channel != channel * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.in_channel, channel * block.expansion, kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(channel * block.expansion))
        layers = []
        
        layers.append(block(self.in_channel,
                            channel,
                            downsample=downsample,
                            stride=stride,
                            groups=self.groups,
                            width_per_group=self.width_per_group))

        self.in_channel = channel * block.expansion
        
        for _ in range(1, block_num):
            layers.append(block(self.in_channel,
                                channel,
                                groups=self.groups,
                                width_per_group=self.width_per_group))
        
        return nn.Sequential(*layers)

    def forward(self, x):

        x = self.conv1(x)
        x = self.bn1(x)

        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        if self.include_top:
            x = self.avgpool(x)
            x = torch.flatten(x, 1)
            x = self.fc(x)

        return x



def resnet50(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnet50-19c8e357.pth
    return ResNet(Bottleneck, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)


def resnet34(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnet34-333f7ec4.pth
    return ResNet(BasicBlock, [3, 4, 6, 3], num_classes=num_classes, include_top=include_top)


def resnet101(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnet101-5d3b4d8f.pth
    return ResNet(Bottleneck, [3, 4, 23, 3], num_classes=num_classes, include_top=include_top)


def resnext50_32x4d(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth
    groups = 32
    width_per_group = 4
    return ResNet(Bottleneck, [3, 4, 6, 3],
                  num_classes=num_classes,
                  include_top=include_top,
                  groups=groups,
                  width_per_group=width_per_group)


def resnext101_32x8d(num_classes=1000, include_top=True):
    # https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth
    groups = 32
    width_per_group = 8
    return ResNet(Bottleneck, [3, 4, 23, 3],
                  num_classes=num_classes,
                  include_top=include_top,
                  groups=groups,
                  width_per_group=width_per_group)
def train(model, criterion, optimizer, args):   
    step = 0
    train_loader = DataLoader(dataset=train_data,batch_size=args.batchsize,shuffle=True,num_workers=2)
    step_per_epoch = train_loader.__len__()
    step = 0 + step_per_epoch

    print("training")
    time_stamp = time.time()
    for epoch in range(args.epoch):
        for i,data in enumerate(train_loader):
            data_time = time.time()-time_stamp
            time_stamp = time.time()
            
            inputs,labels = data
            inputs,labels = inputs.to(device),labels.to(device)
            print(inputs.shape)
            outputs = model(inputs)
            optimizer.zero_grad()
            loss = criterion(outputs,labels) 
            loss.backward()
            optimizer.step()
            train_time = time.time()- time_stamp
            time_stamp = time.time()
            print('epoch:{}  {}/{} time:{:.2f}+{:.2f} loss_avg:{:.4e}'.format(epoch, i, step_per_epoch, data_time, train_time, loss))
            step +=1

     #   print('epoch{} loss:{:.4f}'.format(epoch+1,loss.item()))
        test_loader = DataLoader(dataset=test_data,batch_size=args.batchsize,shuffle=True,num_workers=2)
        test(model, test_loader, epoch)
        save_model(model)

def test(model, test_loader, epoch):

    model.eval()
    #model.train()
    with torch.no_grad():
        correct,total = 0,0
        for j,data in enumerate(test_loader):
            inputs,labels = data
            inputs,labels = inputs.to(device),labels.to(device)
            outputs = model(inputs)
            _, predicted = torch.max(outputs.data,1)
            total =total+labels.size(0)
            correct = correct +(predicted == labels).sum().item()


    print('accuracy:{:.4f}%'.format(100.0*correct/total))
        
def save_model(model): 
    torch.save(model,'cifar10_densenet161.pt')
    print('cifar10_densenet161.pt saved')
    model = torch.load('cifar10_densenet161.pt')
    
    

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='resnet on muxi')
    parser.add_argument('--batchsize', type=int, default=64, help='batchsize')
    parser.add_argument('--epoch', default=100, type = int)
    parser.add_argument('--learning_rate', default = 0.001, type = float)
    parser.add_argument('--transform', action='store_true', default = True, help='dataset transform')
    args = parser.parse_args()
    seed = 1234
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.benchmark = True
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    setup_logger('base', log_path, 'train', level=logging.INFO, screen = True, to_file = True)
    logger = logging.getLogger('base')
    
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),
        transforms.RandomGrayscale(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if args.transform else None
    transform_test = transforms.Compose([     
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) if args.transform else None
    train_data = datasets.CIFAR10(root=os.getcwd(), train=True,transform=transform_train,download=False)
    test_data =datasets.CIFAR10(root=os.getcwd(),train=False,transform=transform_test,download=False)
    
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    model = resnet50(10).to(device)
    model.eval()
    logger.info(device)
    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = optim.Adam(model.parameters(),lr=args.learning_rate)
    input = torch.ones(64,3,32,32).to(device)
    logger.info('inputsize:{}'.format(input.shape))
    for _ in range(5):
        start = time.time()
        output = model(input)
        torch.cuda.synchronize()
        warm_up_time = time.time()-start
        logger.info("Warm up time:{:.4f}".format(warm_up_time))

    #train(model, criterion, optimizer, args)
    with torch.autograd.profiler.profile(enabled=True, use_cuda=True, record_shapes=False, profile_memory=False) as prof:
        outputs = model(input)
    logger.info(prof.table())
