import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torchvision
from torchvision import datasets,models,transforms
import matplotlib.pyplot as plt
import time
import os
import copy
print("PyTorch Version:{},TorchVision Version:{}".format(torch.__version__,torchvision.__version__))
data_dir = '/home/liushuai/下载/hymenoptera_data'
models_name = "squeezenet"
num_classes = 2
batch_size = 8
num_epochs = 15
feature_extract = True
def train_model(model, dataloaders, criterion, optimizer, num_epochs=25, is_inception=False):
    since = time.time()

    val_acc_history = []
    # 拷贝模型
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)

        # 训练数据被组织成了train和val数据，phase用来对其判断实现在不同的阶段完成不同的任务
        for phase in ['train', 'val']:
            if phase == 'train':
                model.train()  # Set model to training mode
            else:
                model.eval()   # Set model to evaluate mode

            running_loss = 0.0
            running_corrects = 0
            for inputs, labels in dataloaders[phase]:
                # 将数据放入device
                inputs = inputs.to(device)
                labels = labels.to(device)

                # 设置优化器梯度为0
                optimizer.zero_grad()

                # 训练的时候设置梯度为可更新状态
                with torch.set_grad_enabled(phase == 'train'):
                    # 计算输出和损失，以为inceptionv3输出分为final输入和auxiliary输出损失计算
                    # 需要综合两者结果
                    if is_inception and phase == 'train':
                        # From https://discuss.pytorch.org/t/how-to-optimize-inception-model-with-auxiliary-classifiers/7958
                        outputs, aux_outputs = model(inputs)
                        loss1 = criterion(outputs, labels)
                        loss2 = criterion(aux_outputs, labels)
                        loss = loss1 + 0.4*loss2
                    else:
                        outputs = model(inputs)
                        loss = criterion(outputs, labels)
                    # 根据前向计算预测结果
                    _, preds = torch.max(outputs, 1)

                    # 如果是训练阶段，这时候开始反向传播更新梯度
                    if phase == 'train':
                        loss.backward()
                        optimizer.step()
                # 训练输入数据batch=8，损失需要乘以8
                running_loss += loss.item() * inputs.size(0)
                # 计算正确预测的数目
                running_corrects += torch.sum(preds == labels.data)
            # 计算当前epoch记录的总体损失和精度
            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects.double() / len(dataloaders[phase].dataset)

            print('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))

            # 将最好的模型精度保存，同时将模型保存为best_model_wts
            if phase == 'val' and epoch_acc > best_acc:
                best_acc = epoch_acc
                best_model_wts = copy.deepcopy(model.state_dict())
            # 将每个epoch的精度保存在val_acc_history
            if phase == 'val':
                val_acc_history.append(epoch_acc)

        print()

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
    print('Best val Acc: {:4f}'.format(best_acc))

    # 载入保存的最好的模型参数
    model.load_state_dict(best_model_wts)
    return model, val_acc_history

# def train_model(model,dataloaders,criterion,optimizer,num_epochs = 25,is_inception=False):
#     since = time.time()
#     val_acc_history = []
#     best_model_wts = copy.deepcopy(model.state_dict())
#     best_acc = 0.0
#     for epoch in range(num_epochs):
#         print("Epoch {}/{}".format(epoch,num_epochs-1))
#         for phase in ['train','val']:
#             if phase == 'train':
#                 model.train()
#             else:
#                 model.eval()
#             running_loss = 0.0
#             running_corrects = 0
#             for inputs,labels in dataloaders[phase]:
#                 inputs = inputs.to(device)
#                 labels = labels.to(device)
#                 optimizer.zero_grad()
#                 with torch.set_grad_enable(phase == 'train'):
#                     if is_inception and phase == 'train':
#                         outputs,aux_outputs = model(inputs)
#                         loss1 = criterion(outputs,labels)
#                         loss2 = criterion(aux_outputs,labels)
#                         loss = loss1+0.4*loss2
#                     else:
#                         outputs = model(inputs)
#                         loss = criterion(outputs,labels)
#                     _,preds = torch.max(outputs,1)
#                     if phase == 'train':
#                         loss.backward()
#                         optimizer.step()
#                     running_loss += loss.item()*inputs.size(0)
#                     running_corrects += torch.sum(preds == labels.data)
#                 epoch_loss = running_loss/len(dataloaders[phase].dataset)
#                 epoch_acc = running_corrects.double()/len(dataloaders[phase].dataset)
#                 print("{} Loss:{:.4f} Acc:{:.4f}".format(phase,epoch_loss,epoch_acc))
#                 if phase == 'val' and epoch_acc >best_acc:
#                     best_acc = epoch_acc
#                     best_model_wts = copy.deepcopy(model.state_dict())
#                 if phase == 'val':
#                     val_acc_history.append(epoch_acc)
#             print()
#         time_elapse = time.time() - since
#         print("training complete in {:.0f}m {:.0f}s".format(time_elapse//60,time_elapse%60))
#         model.load_state_dict(best_model_wts)
#         return model,val_acc_history
# 冻结模型参数使得模型参数不可更新
def set_parameter_requires_grad(model,feature_extracting):
    if feature_extracting:
        for param in model.parameters():
            param.requires_grad = False
# 根据model_name初始化模型
def initialize_model(model_name,num_classes,feature_extract,use_pretrained = True):
    model_ft = None
    input_size = 0
    if model_name == 'resnet':
        model_ft = models.resnet18(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft,feature_extract)
        num_ftrs = model_ft.fc.in_feature
        model_ft.fc = nn.Linear(num_ftrs,num_classes)
        input_size = 224
    elif model_name == 'alexnet':
        model_ft = models.alexnet(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft,feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
        input_size = 224
    elif model_name == 'vgg':
        model_ft = models.vgg11_bn(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft,feature_extract)
        num_ftrs = model_ft.classifier[6].in_features
        model_ft.classifier[6] = nn.Linear(num_ftrs,num_classes)
        input_size = 224
    elif model_name == 'squeezenet':
        model_ft = models.squeezenet1_0(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft,feature_extract)
        model_ft.classifier[1] = nn.Conv2d(512,num_classes,kernel_size=(1,1),stride=(1,1))
        model_ft.num_classes = num_classes
        input_size = 224
    elif model_name == 'densenet':
        model_ft = models.densenet121(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft,feature_extract)
        num_ftrs = model_ft.classifier.in_features
        model_ft.classifier = nn.Linear(num_ftrs,num_classes)
        input_size = 224
    elif model_name == 'inception':
        model_ft = models.inception_v3(pretrained=use_pretrained)
        set_parameter_requires_grad(model_ft,feature_extract)
        num_ftrs = model_ft.AuxLogits.fc.in_features
        model_ft.AuxLogits.fc = nn.Linear(num_ftrs,num_classes)
        model_ft.fc = nn.Linear(num_ftrs,num_classes)
        input_size = 299
    else:
        print("invalid model name,existing ... ")
        exit()
    return model_ft,input_size
model_ft,input_size = initialize_model(models_name,num_classes,feature_extract,use_pretrained=True)
print(model_ft)

data_transforms = {
    'train': transforms.Compose([
        transforms.RandomResizedCrop(input_size),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
    'val': transforms.Compose([
        transforms.Resize(input_size),
        transforms.CenterCrop(input_size),
        transforms.ToTensor(),
        transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    ]),
}
# data_transforms = {
#     'train':transforms.Compose([transforms.RandomResizedCrop(input_size),
#                                 transforms.RandomHorizontalFlip,
#                                 transforms.ToTensor(),
#                                 transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])]),
#     'val':transforms.Compose([transforms.Resize(input_size),
#                               transforms.CenterCrop(input_size),
#                               transforms.ToTensor(),
#                               transforms.Normalize([0.485,0.456,0.406],[0.229,0.224,0.225])]
#
#     )
# }
print("initializing Datasets and Dataliader")
image_datasets = {x:datasets.ImageFolder(os.path.join(data_dir,x),data_transforms[x]) for x in ['train','val']}
dataloaders_dict = {x:torch.utils.data.DataLoader(image_datasets[x],batch_size = batch_size,
                                                  shuffle=True,num_workers = 4) for x in ['train','val']}
device = torch.device("cuda:0" if torch.cuda.is_available() else 'cpu')

model_ft = model_ft.to(device)
params_to_update = model_ft.parameters()
print("Params to learn:")
if feature_extract:
    params_to_update = []
    for name,param in model_ft.named_parameters():
        if param.requires_grad == True:
            params_to_update.append(param)
            print("\t",name)
else:
    for name,param in model_ft.named_parameters():
        if param.requires_grad == True:
            print("\t",name)
# if feature_extract:
#     params_to_update = []
#     for name,param in model_ft.named_parameters():
#         if param.requires_grad == True:
#             print("# \t",name)
# else:
#     for name,param in model_ft.named_parameters():
#         if param.requires_grad == True:
#             print("\t",name)

optimizer_ft = optim.SGD(params_to_update,lr=0.001,momentum=0.9)
# Setup the loss fxn
criterion = nn.CrossEntropyLoss()
model_name = "squeezenet"
model_ft, hist = train_model(model_ft, dataloaders_dict, criterion, optimizer_ft,
                             num_epochs=num_epochs, is_inception=(model_name=="inception"))
# criterion = nn.CrossEntropyLoss()
# model_ft,hist = train_model(model_ft,dataloaders_dict,criterion,optimizer_ft,
#                             num_epochs,is_inception=(model_name == 'inception'))


