import torch
from torch.utils.data import DataLoader,Dataset
from Model import Mymodel,Siamese
from dataset import Mydataset
from Config import Config
import albumentations as A
from albumentations.pytorch import ToTensorV2
import time
import torch.nn as nn
from sklearn.metrics import roc_auc_score
import torch.optim as opt
from loss import *
from simdataset import Simdataset
img1_transformer = A.Compose([
    A.Resize(height=Config.img_size, width=Config.img_size),
    A.Normalize(max_pixel_value=255.0, p=1.0),
    ToTensorV2(p=1.0),
])
train_transformer = A.Compose([
    A.Resize(height=Config.img_size, width=Config.img_size),
    A.OneOf([
        A.RandomBrightnessContrast(brightness_limit=0.1, contrast_limit=0.1, p=0.5),

    ], p=1),
    A.HorizontalFlip(p=0.5),
    A.VerticalFlip(p=0.5),
    A.RandomRotate90(p=0.5),
    A.ShiftScaleRotate(rotate_limit=1, p=0.5),
    # A.OneOf([
    #     A.ElasticTransform(p=0.5),
    #
    # ], p=1),
    #A.Normalize(mean=(0.5, 0.5,0.5),  max_pixel_value=255.0, p=1.0),
    A.Normalize(max_pixel_value=255.0,p=1.0),
    ToTensorV2(p=1.0),
])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def exp_lr_scheduler(optimizer, epoch, init_lr=Config.lr, lr_decay_epoch=5):
    LR = init_lr * (0.8**(epoch / lr_decay_epoch))
    print('Learning Rate is {:.5f}'.format(LR))
    for param_group in optimizer.param_groups:
        param_group['LR'] = LR
    return optimizer
def train_model(model,criterion,optimizer,lr_sheduler,dataloader,data_size,model_name):
    train_loss = []
    since = time.time()
    bast_acc = 0.0
    model = model.to(device)
    model.train(True)

    print('start training ...')
    for epoch in range(1,Config.epoch+1):
        start_time = time.time()
        print('epoch{}/{}'.format(Config.epoch,epoch-1))
        print('-'*10)
        optimizer = lr_sheduler(optimizer,epoch)
        running_loss = 0.0
        running_correct = 0
        count=0
        pred_all=[]
        label_all=[]
        for i,(input1,input2,label) in enumerate(dataloader):
            count+=1
            #label = label.type(torch.float())
            input1,input2,label = input1.to(device),input2.to(device) ,label.to(device)
            output = model(input1,input2)

            #print(pred_all.shape)
            loss = criterion(output,label)

            #loss = TripletLoss(output,label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            pred = output.data.cpu().numpy()
            #print(pred.shape,output.shape)
            pred_all.extend(pred)
            label_all.extend(label.data.cpu().numpy())
           # print(len(pred_all),len(label_all))
            #print(i)
            if i %Config.printsize==0:
                spend_time = time.time() - start_time
                print(' Epoch:{}({}/{}) loss:{:.3f} epoch_Time:{}min'.format(epoch, i,
                                                                                    data_size // Config.batch_size,
                                                                                    loss.item(),
                                                                                    spend_time / count * data_size / Config.batch_size // 60 - spend_time // 60))
                train_loss.append(loss.item())
            running_loss += loss.item()*input1.size(0)
            #running_correct += torch.sum(preds == label.data)
        print("AUC:{:.4f}".format(roc_auc_score(label_all, pred_all)))

        epoch_loss = running_loss/data_size
        #epoch_acc = running_correct.double()/data_size
        print('Epoch:[{}/{}]\t Loss={:.5f}\t Acc={:.3f}'.format(epoch, Config.epoch, epoch_loss, roc_auc_score(label_all, pred_all)))
        torch.save({'model': model.state_dict()}, Config.model_dir+model_name+'.pth')


if __name__ == '__main__':
    #dataset = Mydataset(Config.path,transformer=train_transformer,mode='train')
    dataset = Simdataset(Config.path,img1_transformer=img1_transformer,img2_transformer=train_transformer)
    dataloader = DataLoader(dataset,batch_size=Config.batch_size,shuffle=True,num_workers=16)
    model = Siamese()
    #model = Mymodel(pretain=False,num_class=Config.num_class,model_name=Config.model_name).to(device)
    #criterion = nn.CrossEntropyLoss().to(device)
    criterion = nn.BCELoss().to(device)
    # optimizer = torch.optim.AdamW(model.parameters(), lr=Config.lr, weight_decay=1e-3)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=2, T_mult=2,
    #                                                                  eta_min=1e-5)  # 余弦退火学习率,T_0是周期，T_mult就是之后每个周期T_0 = T_0 * T_mult，eta_min最低学习率
   # criterion = nn.TripletMarginLoss(margin=1.0, p=2)
    optimizer = opt.SGD((model.parameters()),lr=Config.lr,momentum=Config.momentum,weight_decay=0.0004)
    #optimizer = opt.Adam(model.parameters(),lr=Config.lr)
    train_model(model,criterion,optimizer,exp_lr_scheduler,dataloader,len(dataset),model_name=Config.model_name)