import torch
import torchvision
from torchvision import datasets,models,transforms
import os
from torch.autograd import Variable
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
from d2l import torch as d2l
import gc
gc.collect()#清理内存
#加载函数
def getmean_str(data_dir,name):
    data_trainsforms = transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor(),
    ])
    image_datasets = datasets.ImageFolder(root=data_dir,
                          transform=data_trainsforms)
    data_loader = torch.utils.data.DataLoader(
        image_datasets, batch_size=1, shuffle=False, num_workers=0)

    mean = torch.zeros(3)
    std = torch.zeros(3)
    for X, _ in data_loader:
        for d in range(3):
            mean[d] += X[:, d, :, :].mean()
            std[d] += X[:, d, :, :].std()
    mean.div_(len(image_datasets))
    std.div_(len(image_datasets))
    print(name ," mean:",mean,"std:",std)
    return list(mean.numpy()), list(std.numpy())

def load_data_cartoonvsnormal(data_dir,meanlist, stdlist,batch_size=32):
    data_trainsforms = transforms.Compose([
    transforms.Resize((224,224)),
    transforms.ToTensor(),
    transforms.Normalize(meanlist, stdlist),
    ])
    #拼接路径
    image_datasets = datasets.ImageFolder(root=data_dir,
                          transform=data_trainsforms)
    #数据加载器
    data_iter=torch.utils.data.DataLoader(image_datasets, batch_size=batch_size, shuffle=True)
    return (data_iter,image_datasets)#(train_iter,test_iter,image_train_datasets,image_test_datasets)
#训练函数
def train_epoch_ch4_9(net, train_iter, loss, updater, Use_gpu,detector=None):
    train_acc=0.0
    train_loss=0.0
    for batch,data in enumerate(train_iter,1):
            X,y = data
            if Use_gpu:
                X,y = Variable(X.cuda()),Variable(y.cuda())
            else:
                X,y = Variable(X),Variable(y)
            y_hat = net(X)
            if detector!=None:

                beta= detector(X.detach())
                if Use_gpu:
                    beta=Variable(beta.cuda())
            optimizer.zero_grad()
            l = loss(y_hat,y) if detector==None else loss(y_hat,y,beta)
            l.backward()#反向传播
            optimizer.step()#优化
            _,pred =torch.max(y_hat,1)
            train_acc += torch.sum(pred == y)
            train_loss += l.item()

            if batch%200 == 0 :
                print("Batch:{},Train Loss:{:.4f},Train ACC:{:.4f}%".format(batch,train_loss/batch,100*train_acc/(y.numel()*batch)))
    return (train_loss/(len(train_iter))),(train_acc/((batch_size)*len(train_iter))).cpu().numpy()
#评价函数
def evaluate_accuracy_ch4_9(net, data_iter, Use_gpu):
    if Use_gpu:
        net = net.cuda()
    net.eval()  # 将模型设置为评估模式
    #metric = d2l.Accumulator(2)  # 正确预测数、预测总数
    test_acc=0.0
    number=0.0
    with torch.no_grad():
        for data in data_iter:
            X,y=data
            if Use_gpu:#有gpu在gpu下评估
                X,y = Variable(X.cuda()),Variable(y.cuda())
            else:
                X,y = Variable(X),Variable(y)
            y_pred = net(X)#metric.add(d2l.accuracy(net(X), y), y.numel())
            _,pred =torch.max(y_pred,1)
            test_acc += torch.sum(pred == y)
            number+=y.numel()
        test_acc=((test_acc / number).cpu().numpy())
    return test_acc
#训练函数
def train_ch4_9(net, train_iter, test_iter, loss, num_epochs, updater,Use_gpu,test_iter2=None,savename='../data/ch04-4-9-2and4-9-3/covariate_shift_detectormodel.pth',detector=None):
    if test_iter2==None:
        animator = d2l.Animator(xlabel='epoch',ylabel='Y', xlim=[1, num_epochs],
                        legend=['train loss', 'train acc', 'test acc'])
    else:
        animator = d2l.Animator(xlabel='epoch',ylabel='Y', xlim=[1, num_epochs],
                        legend=['train loss', 'train acc', 'test cartoon acc','test normal acc '])

    for epoch in range(num_epochs):
        train_metrics = train_epoch_ch4_9(net, train_iter, loss, updater,Use_gpu,detector)
        test_acc = evaluate_accuracy_ch4_9(net, test_iter,Use_gpu)
        if test_iter2!=None:
            test_acc2 = evaluate_accuracy_ch4_9(net, test_iter2,Use_gpu)
            animator.add(epoch + 1, train_metrics + (test_acc,test_acc2,))
            print("epoch{} Loss:{:.4f} Train Acc:{:.4f}% Test Cartoon Acc:{:.4f}% Test Normal Acc:{:.4f}%".format(epoch+1, train_metrics[0], 100*train_metrics[1],100*test_acc,100*test_acc2))
        else:
            animator.add(epoch + 1, train_metrics + (test_acc,))
            print("epoch{} Loss:{:.4f} Train Acc:{:.4f}% Test  Acc:{:.4f}%".format(epoch+1, train_metrics[0], 100*train_metrics[1],100*test_acc))

    train_loss, train_acc = train_metrics

    torch.save(net.state_dict(),savename)

    torch.cuda.empty_cache()
    print("save",savename,"over")

    assert train_loss < 0.5, train_loss
    assert train_acc <= 1 and train_acc > 0.7, train_acc
    assert test_acc <= 1 and test_acc > 0.7, test_acc


#数据加载

batch_size=32
data_dir = "../data/ch04-4-9-2and4-9-3/cartoonvsnormal"
mean_train_list,std_train_list=getmean_str(os.path.join(data_dir, 'train'),'train')
mean_test_list,std_test_list=getmean_str(os.path.join(data_dir, 'test'),'test')
cartoon_num=1.0*(len(os.listdir(os.path.join(data_dir, 'train','cartoon')))+len(os.listdir(os.path.join(data_dir, 'test','cartoon'))))
normal_num=1.0*(len(os.listdir(os.path.join(data_dir, 'train','normal')))+len(os.listdir(os.path.join(data_dir, 'test','normal'))))
train_iter,image_train_datasets=load_data_cartoonvsnormal(os.path.join(data_dir, 'train'),mean_train_list,std_train_list,batch_size)
test_iter,image_test_datasets=load_data_cartoonvsnormal(os.path.join(data_dir, 'test'),mean_test_list,std_test_list,batch_size)
index_classes = list(image_test_datasets.class_to_idx.keys())
print("类别一 {}，数量{}，类别二 {} 数量{}".format(index_classes[0],cartoon_num,index_classes[1],normal_num))
print("类别一 {}，数量{}，类别二 {} 数量{}".format(index_classes[0],cartoon_num,index_classes[1],normal_num))
#加载模型
model = models.resnet50(weights=models.ResNet50_Weights.IMAGENET1K_V1)
Use_gpu = torch.cuda.is_available()

for parma in model.parameters():
    parma.requires_grad = False#屏蔽预训练模型的权重，只训练最后一层的全连接的权重
model.fc = torch.nn.Linear(2048,2)
nn.init.xavier_uniform_(model.fc.weight)


if Use_gpu:
    model = model.cuda()

#损失函数和优化器
weight=torch.tensor([normal_num,cartoon_num])
weight=weight.cuda() if Use_gpu else weight
loss_f = torch.nn.CrossEntropyLoss(weight=weight)#不同类别数据量不同 添加权重来平衡
optimizer = torch.optim.SGD(model.fc.parameters(),lr = 1e-4)

num_epochs = 15
train_ch4_9(model, train_iter, test_iter, loss_f, num_epochs,optimizer,Use_gpu)
#清除内存
del model
torch.cuda.empty_cache()