# coding = utf-8
import os
import datetime
import time
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F

from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
import torch.optim as optim
import torchvision.transforms as standard_transforms
import random
import numpy as np
# import glob
import pandas as pd

from data_loader_SPARCS import ToTensorNorm, CloudDataset

from model import BoundaryNets
from joint_trainsform import RandomHorizontallyFlip # , RandomRotate, RandomGaussianBlur
# from utils import clip_gradient, adjust_lr

# for visualDL
from PIL import Image
# from visualdl import LogWriter

import pytorch_iou

from model.sync_batchnorm.replicate import patch_replication_callback

# ------- 0. define rely function --------
# enable cudnn accelerate
torch.backends.cudnn.benchmark = True
# set the GPU id
os.environ["CUDA_VISIBLE_DEVICES"] = "1,2"


# define the global seed
def set_seed(seed = 20210313):
    random.seed(seed)
    np.random.seed(seed)
    os.environ['PYTHONHASHSEED'] = str(seed)
    torch.manual_seed(seed)

# make sure the path exisits
def ensure_dir(dir_path):
    if not os.path.isdir(dir_path):
        os.makedirs(dir_path)


# ------- 1. define loss function --------

bce_loss = nn.BCELoss(size_average=True)
iou_loss = pytorch_iou.IOU(size_average=True)

def dl_loss(pred, target):
    
    bce_out = bce_loss(pred, target)
    iou_out = iou_loss(pred, target)

    loss = bce_out + iou_out
    
    return loss
    

def muti_loss_fusion(d0, d1, d2, d3, d4, d5, d6, d7, labels_v):
    loss0 = dl_loss(d0, labels_v)
    loss1 = dl_loss(d1, labels_v)
    loss2 = dl_loss(d2, labels_v)
    loss3 = dl_loss(d3, labels_v)
    loss4 = dl_loss(d4, labels_v)
    loss5 = dl_loss(d5, labels_v)
    loss6 = dl_loss(d6, labels_v)
    loss7 = dl_loss(d7, labels_v)
    
    
    loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5 + loss6 + loss7
    
    return loss0, loss


# ------- 2. set the train details --------
set_seed(2021130)

# train details
start_epoch = 0
epoch_num = 200
batch_size_train = 6
batch_size_val = 6
learn_rate = 1e-05
save_interval_epoch = 10

is_resume_train = False
path_checkpoint = "REPLACE WITH YOUR RESUME TRAIN MODEL PATH"

model_dir = "./saved_models/"
ensure_dir(model_dir)


# ------- 3. load the train dataset --------
train_txt = "/home/data/wukang/datasets/l8/train.txt"
with open(train_txt, 'r',encoding='utf-8') as f:
    train_list = f.readlines()

valid_list = []

train_num = len(train_list)
val_num = len(valid_list)

print("---")
print("train images: ", len(train_list))
print("valid images: ", len(valid_list))
print("---")

## for train
salobj_dataset = CloudDataset(
    file_name_list=train_list,
    transform=transforms.Compose([
        ToTensorNorm(),
        # RandomHorizontallyFlip(),
    ]))
salobj_dataloader = DataLoader(salobj_dataset, batch_size=batch_size_train, shuffle=True, num_workers=4)

## for valid
salobj_dataset_valid = CloudDataset(
    file_name_list=valid_list,
    transform=transforms.Compose([
        ToTensorNorm(),
        # RandomHorizontallyFlip(),
    ]))
salobj_dataloader_valid = DataLoader(salobj_dataset_valid, batch_size=batch_size_val, shuffle=False, num_workers=4)


# ------- 4. define model --------
# define the net
net = BoundaryNets(10, 1)
if torch.cuda.is_available():
    if len(os.environ["CUDA_VISIBLE_DEVICES"].split(",")) >1:
        net = nn.DataParallel(net)
        patch_replication_callback(net)
    net = net.cuda()

# ------- 5. define optimizer --------
print("---define optimizer...")
optimizer = optim.Adam(net.parameters(), lr=learn_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0)



# ------- 6. training process --------
# load the resume train param
if is_resume_train:
    checkpoint = torch.load(path_checkpoint)
    net.load_state_dict(checkpoint['model_state_dict']) 
    optimizer.load_state_dict(checkpoint['optim_state_dict'])  
    start_epoch = checkpoint['epoch'] + 1

print("---start training...")
print("Start time:" + str(datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')))

ite_num = 0
running_loss = 0.0
running_tar_loss = 0.0
ite_num4val = 0

valid_loss = 0.0
valid_target_loss = 0.0

train_start_time = time.time()
time_for_each_epoch = []
for epoch in range(start_epoch, start_epoch+epoch_num):
    net.train()
    start_time = time.time()
    # optimizer.param_groups[0]['lr'] = 0.01 * (1 - epoch / (epoch_num + 1))
    for i, data in enumerate(salobj_dataloader):
        ite_num = ite_num + 1
        ite_num4val = ite_num4val + 1
        
        inputs, labels = data['image'], data['mask']
        
        inputs = inputs.type(torch.FloatTensor)
        labels = labels.type(torch.FloatTensor)
        
        # wrap them in Variable
        if torch.cuda.is_available():
            inputs_v, labels_v = Variable(inputs.cuda(), requires_grad=False), Variable(labels.cuda(),
                                                                                        requires_grad=False)
        else:
            inputs_v, labels_v = Variable(inputs, requires_grad=False), Variable(labels, requires_grad=False)
        
        # y zero the parameter gradients
        optimizer.zero_grad()
        
        # forward + backward + optimize
        d0, d1, d2, d3, d4, d5, d6, d7 = net(inputs_v)
        loss2, loss = muti_loss_fusion(d0, d1, d2, d3, d4, d5, d6, d7, labels_v)
        
        loss.backward()
        # clip_gradient(optimizer, clip)
        optimizer.step()
        
        # # print statistics
        running_loss += loss.data.item()
        running_tar_loss += loss2.data.item()
        
        print("[epoch: %3d/%3d, batch: %5d/%5d, ite: %d] train loss: %3f, tar: %3f, tar loss: %3f " % (
            epoch + 1, epoch_num, (i + 1) * batch_size_train, train_num, ite_num, running_loss / ite_num4val,
            running_tar_loss / ite_num4val, loss2.item()))

        del d0, d1, d2, d3, d4, d5, d6, d7, loss2, loss
    

    checkpoint_dict = {
        'epoch': epoch, 
        'model_state_dict': net.state_dict(), 
        'optim_state_dict': optimizer.state_dict()
    }

    if (epoch+1) % save_interval_epoch == 0:
        torch.save(checkpoint_dict, model_dir + "epoch%d_boundarynets_bsi_itr_%d_train_%3f_tar_%3f.pth" % (
            epoch+1, ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))
    
    running_loss = 0.0
    running_tar_loss = 0.0
    valid_loss = 0.0
    valid_target_loss = 0.0
    ite_num4val = 0
    end_time = time.time()
    time_for_each_epoch.append(end_time - start_time)
    print(end_time - start_time)
    

print('-------------Congratulations! Training Done!!!-------------')
print('-'*20, "Now save the log file of the train processing...", '-'*20)

with open("time_recoder.txt", "w+") as f:
    f.writelines(time_for_each_epoch)
print('-'*20, "Done!", '-'*20)
print("End time:" + str(datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S')))


