from SemanticNet import SemanticNet
import torch
import torch.nn as nn
from torch.autograd import Variable
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"                                       #change1
import argparse
from datetime import datetime
from utils.dataSetLoader import get_loader, test_dataset
from utils.utils import clip_gradient, AvgMeter, poly_lr
from utils.loss import structure_loss, dice_loss
import torch.nn.functional as F
import numpy as np


log = open("log/train_detail.txt", "a")
torch.manual_seed(23)
torch.cuda.manual_seed(23)
np.random.seed(23)
torch.backends.cudnn.benchmark = True
best_mae = 2
best_epoch = 0 
def get_trained_model(finished_epochs):
    pth_path = "./checkpoints/SemanticNet/SemanticNet-{}.pth".format(finished_epochs-1)
    model = SemanticNet()
    model.load_state_dict(torch.load(pth_path))
    model.cuda()
    return model

def train(train_loader, model, optimizer, epoch,all_epochs, batchsize, total_step, save_path="SemanticNet" ):
    model.train()
    criterion = nn.CrossEntropyLoss()
    
    loss_dice_Record, loss_BCE_IOU_Record = AvgMeter(), AvgMeter()
    for i, data in enumerate(train_loader, start=1):
        optimizer.zero_grad()
        # ---- data prepare ----
        images, gts, category = data
        images = Variable(images).cuda()          
        gts = Variable(gts).cuda()
        category = Variable(category).cuda()

        # ---- forward ----
        classfier, detail_map, out = model(images)
        detail_map = F.interpolate(detail_map, images.size()[2:], mode='bilinear', align_corners=False)
        out = F.interpolate(out, images.size()[2:], mode='bilinear', align_corners=False)
        
        for k in range(len(category)):
             if int(category[k]) == -1:
                _, predicted = torch.max(classfier[k], 0)
                category[k] = predicted
#                print("one unlabeled image appear predicted is ", predicted)
                
        loss_classfication = criterion(classfier, category)
        
        loss_dice = dice_loss(detail_map, gts)
        loss_BCE_IOU = structure_loss(out, gts)
        
        loss = 0.5*loss_classfication + loss_dice + 3* loss_BCE_IOU
        loss.backward()

        optimizer.step()
            
        # ---- recording loss ----
        loss_dice_Record.update(loss_dice.data, batchsize)
        loss_BCE_IOU_Record.update(loss_BCE_IOU.data, batchsize)
        
        # ---- train visualization ----
        if i % 10 == 0 or i == total_step:
            print('Time:{} Epoch [{:02d}/{:02d}], Step [{:04d}/{:04d}], '
                  '[loss_Classfication: {:.4f}], [loss_detail: {:.4f}], [loss_out: {:.4f}]]'.
                  format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), epoch+1, all_epochs, i, total_step,
                         loss_classfication.item(), loss_dice_Record.avg,loss_BCE_IOU_Record.avg))
            log.write('Time:{} Epoch [{:02d}/{:02d}], Step [{:04d}/{:04d}], '
                  '[loss_Classfication: {:.4f}], [loss_detail: {:.4f}], [loss_out: {:.4f}]]\n'.
                  format(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), epoch+1, all_epochs, i, total_step,
                         loss_classfication.item(), loss_dice_Record.avg,loss_BCE_IOU_Record.avg))

    save_path = 'checkpoints/{}/'.format(save_path)
    os.makedirs(save_path, exist_ok=True)
    if (epoch + 1) % 20 == 0 or (epoch + 1) == all_epochs:
        torch.save(model.state_dict(), save_path + 'SemanticNet-%d.pth' % epoch)
        print('[Saving Snapshot:]', save_path + 'SemanticNet-%d.pth' % epoch)
        log.write('[Saving Snapshot:]' + save_path + 'SemanticNet-%d.pth \n' % epoch)
        
# 这里为验证函数   
def Val_Save(val_loader,model, epoch, save_path):
   global best_mae, best_epoch                   #声名之后才能修改
   print("validing")
   model.eval()
   with torch.no_grad():
       mae_sum = 0
       for i, data in enumerate(val_loader, start=1):
           images, gts ,category = data
           images = Variable(images).cuda()          #change2
           gts = Variable(gts).cuda()
           _, _, res = model(images)
           res = F.interpolate(res, images.size()[2:], mode='bilinear', align_corners=False) 
           res = res.sigmoid()
           res = (res - res.min()) / (res.max() - res.min() + 1e-8)
           mae_sum += torch.sum(torch.abs(res - gts)) * 1.0 / (gts.shape[0] * gts.shape[2]* gts.shape[3])
       mae = mae_sum / len(val_loader)
       
       log.write("Epoch: [{}], MAE: [{}], last_bestMAE: [{}] last_best_epoch: [{}]\n".format(epoch, mae, best_mae, best_epoch))
       print('Epoch: [{}], MAE: [{}], last_bestMAE: [{}] last_best_epoch: [{}].'.format(epoch, mae, best_mae, best_epoch))
       if epoch == 0:
           best_mae = mae
           best_epoch = 0
       else:
           if mae < best_mae :
               best_mae = mae
               best_epoch = epoch
               if epoch>=10:
	                torch.save(model.state_dict(), save_path + 'SemanticNet-best.pth')
                    print('Save state_dict successfully! Best epoch:{}.'.format(epoch))
                    log.write('Save state_dict successfully! Best epoch:{}.'.format(epoch))


if __name__ == '__main__':

    lr = 1e-4
    train_path = "./Dataset/TrainDataset"
    epochs = 100
    trained = 0
    # ---- build models ----
    model = SemanticNet().cuda()
    save_path = 'checkpoints/{}/'.format("SemanticNet")
    if trained>0:
        model = get_trained_model(trained)
    		
    params = model.parameters()
    optimizer = torch.optim.Adam(params, lr)

    image_root = '{}/Imgs/'.format(train_path)
    gt_root = '{}/GT/'.format(train_path)
    edge_root = '{}/Edge/'.format(train_path)
    # print(image_root, gt_root, edge_root)
    train_loader,val_loader = get_loader(image_root, gt_root, batchsize=16, trainsize=448)
    totalstep = len(train_loader)

    print("Start Training")
    log.write("Start Training" + '\n' )

    for epoch in range(trained,epochs):
        poly_lr(optimizer, lr, epoch, epochs)
        print("Time:{}           Start epoch {}".format(datetime.now() ,epoch))
        log.write("Time:{}           Start epoch {} \n".format(datetime.now() ,epoch) )
        train(train_loader, model, optimizer, epoch,epochs, batchsize=16 ,total_step = totalstep)

    log.close()
