import dataset
import models
import utils
import numpy as np
import torch  
import torch.nn as nn  
import torch.optim as optim
from torchvision import transforms, datasets, models
from dataset.MultiLabelImageDataset import MultiLabelImageDataset
from torch.utils.data import DataLoader
from models.MultiLabelBinaryClassifier import MultiLabelBinaryClassifier
import time
from utils.utils import get_accurary
from utils.utils import set_seed
import matplotlib.pyplot as plt
from config import opt
from datetime import datetime, timedelta


def criterion(loss_fn, preds, gts, device):
    losses = torch.zeros((len(preds.keys())), device=device)
    for i, key in enumerate(preds):
        losses[i] = loss_fn(preds[key].squeeze(), gts[key].float().to(device))
    return torch.mean(losses)
def train(net, optimizer, epochs,  batch_size):
    since = time.time()
    
    best_acc = 0.0      # 记录模型测试时的最高准确率
    best_model = net# 记录模型测试出的最佳参数
    epoch_losses = []
    val_losses = []
    mseloss = nn.MSELoss()
    for epoch in range(epochs):
        print('-' * 30)
        print('Epoch {}/{}'.format(epoch+1, epochs))

        # 训练模型
        net.train()
        running_loss = 0.0
        epoch_loss  = 0.0
        for i, data_info in enumerate(train_loader):
            inputs, labels = data_info['image_tensors'].to(DEVICE), data_info['labels']

            # 前向传播，计算损失
            preds,trans_feature,features = net(inputs)


            loss = criterion(torch.nn.BCELoss(),preds, data_info['labels'],"cuda")
            # features = features.cpu()
            # ave_trans_feature  = torch.zeros_like(features).cpu()
            # for classifier_num in trans_feature.keys():
            #     ave_trans_feature  = ave_trans_feature + trans_feature[classifier_num].detach().cpu()
            # ave_trans_feature = ave_trans_feature/len(trans_feature.keys())

            # dis = torch.zeros(ave_trans_feature.shape[0], device='cuda')
            # for batch_index in range(ave_trans_feature.shape[0]):
            #     dis[batch_index] = 1 - spatial.distance.cosine(ave_trans_feature[batch_index] , features[batch_index])
            # loss = criterion(torch.nn.BCELoss(),preds, data_info['labels'],"cuda")  + mseloss(ave_trans_feature , features)

            # loss = criterion(torch.nn.BCELoss(),preds, data_info['labels'],"cuda") + torch.mean(dis)

            # 反向传播+优化
            optimizer.zero_grad()
            loss.backward()
            # 进行梯度更新
            optimizer.step()
            #scheduler.step()

            running_loss += loss.item()
            epoch_loss += loss.item()
            # 每x批图片打印训练数据
            if (i != 0) and (i % 2 == 0):
                print('step: {:d},  loss: {:.3f}'.format(i, running_loss/5))
                running_loss = 0.0
            if i == 0:
                print("11111")
                # print(ave_trans_feature.shape,features.shape,criterion(torch.nn.BCELoss(),preds, data_info['labels'],"cuda"),ave_trans_feature.shape[0])

                # print(criterion(torch.nn.BCELoss(),preds, data_info['labels'],"cuda"))

        epoch_losses.append(epoch_loss/ len(train_loader) )  

        


        # 每个epoch以测试集数据的整体准确率为标准，测试一下模型
        net.eval()
        correct = 0
        total = 0
        val_loss = 0
        acc = 0
        with torch.no_grad():
            for i, data_info in enumerate(test_loader):
                torch.manual_seed(1)  # 你可以将0替换为任何整数

                inputs, labels = data_info['image_tensors'].to(DEVICE), data_info['labels']

                # 前向传播，计算损失
                preds,trans_feature,features = net(inputs)

                loss = criterion(torch.nn.BCELoss(),preds, data_info['labels'],"cuda")
                # features = features.cpu()
                # ave_trans_feature  = torch.zeros_like(features).cpu()
                # for classifier_num in trans_feature.keys():
                #     ave_trans_feature  = ave_trans_feature + trans_feature[classifier_num].detach().cpu()
                # ave_trans_feature = ave_trans_feature/len(trans_feature.keys())

                # dis = torch.zeros(ave_trans_feature.shape[0], device='cuda')
                # for batch_index in range(ave_trans_feature.shape[0]):
                #     dis[batch_index] = 1 - spatial.distance.cosine(ave_trans_feature[batch_index] , features[batch_index])
                # loss = criterion(torch.nn.BCELoss(),preds, data_info['labels'],"cuda")  + mseloss(ave_trans_feature , features)

                # loss = criterion(torch.nn.BCELoss(),preds, data_info['labels'],"cuda") + torch.mean(dis)
                val_loss += loss.item()

                acc += get_accurary(preds, labels, "cuda",0.5)
                if i == 0:
                    print("1231231")
                    # print(criterion(torch.nn.BCELoss()))
                    # print( mseloss(ave_trans_feature , features))
                # print(acc)



        acc /= (i + 1)
        # print(acc)

        if acc > best_acc:      # 当前准确率更高时更新
            best_acc = acc
            best_model = net
        val_losses.append(val_loss/ len(test_loader))
        

        if epoch % 5 == 0:
            torch.save(net, './checkpoints/model'+opt.log_time+ '_' + str(epoch)+'.pt')
            print('保存模型参数完成1')
    time_elapsed = time.time() - since
    print('-' * 30)
    print('训练用时： {:.0f}m {:.0f}s'.format(time_elapsed//60, time_elapsed%60))
    print('最高准确率: {}%'.format(100 * best_acc))


    plt.plot(epoch_losses, label='Training loss')
    plt.plot(val_losses, label='Validation Loss')

    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.savefig("./picture/train/" + opt.log_time  +"_losses.png") # 你给图片起的名字
    with open("./picture/train/" + opt.log_time  + "_res.txt", "a", encoding="utf-8") as file:
        file.write('num_epochs： %d\n'%(opt.num_epochs))
        file.write('训练用时： %.0fm %.0fs\n'%(time_elapsed//60, time_elapsed%60))
        file.write('最高准确率: %.4f\n'%(100 * best_acc))
    # 返回测试出的最佳模型
    net = best_model


    return net


if __name__ == '__main__':        
    set_seed(opt.seed)
    DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')     # 使用GPU
    log_time = (datetime.utcnow()+ timedelta(hours=8)).strftime("%Y%m%d:%H:%M:%S")

    transformer = transforms.Compose([
        
        transforms.Resize(256),
        transforms.CenterCrop(224),  # 通常在调整大小后进行中心裁剪

        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])


    # 数据加载器,DataLoader将输入的数据按照batch_size封装成tensor
    # 然后，后续需要再包装成Variable就可以作为模型的输入 了
    train_dataset = MultiLabelImageDataset( opt.path + 'train/', transform=transformer)
    test_dataset = MultiLabelImageDataset(opt.path + 'val/', transform=transformer)

    train_loader = DataLoader(train_dataset, batch_size=opt.train_batch_size, shuffle=True, num_workers=4)
    test_loader = DataLoader(test_dataset, batch_size=opt.val_batch_size, shuffle=False, num_workers=4)
    net = MultiLabelBinaryClassifier(opt.known_sig, 'resnet50', 6, DEVICE) 
    net = net.to(DEVICE) 

    # SGD优化器，epoch=5时loss已基本不会减小了（0.37~0.46）
    # optimizer = optim.SGD(net.fc.parameters(), lr=0.001, momentum=0.9)

    # 将优化器换成Adam试试
    optimizer = optim.Adam(net.parameters(), lr = opt.lr)
    # lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=800, gamma=0.1) # 学习率调整


    # net是train过程中准确度最高的模型（包好训练好的参数）
    net = train(net, optimizer,opt.num_epochs,  opt.train_batch_size) # epoch=70

    torch.save(net, './checkpoints/model_'+opt.log_time+'.pt')

    print('保存模型参数完成')


