import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0'  # 这行代码须放在程序访问GPU之前，如程序的开头。
import torch
from torch.autograd import Variable
from datetime import datetime
from lib.MSU_Net import MSU_Net
from lib.MACUNet import MACUNet
from lib.TransFuse import TransFuse_S
from dataset.zunYi.dataloader import getTrainLoader,getTestLoader
from utils.loss import get_loss
from utils.EvaluateUtils import AvgMeter,evaluate

from config import zhongShanParameter as para


def train(train_loader, test_loader,model, optimizer, epoch, scheduler=None):
    model.train()
    loss_record = AvgMeter()
    accum = 0
    lossf=get_loss()
    for i, pack in enumerate(train_loader, start=1):
        # ---- data prepare ----
        images, gts = pack
        images = Variable(images).cuda()
        gts = Variable(gts).cuda()
        # ---- forward ----
        _,_, output = model(images)

        # ---- loss function ----

        loss = lossf(output, gts)

        # ---- backward ----
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), para.grad_norm)
        optimizer.step()
        optimizer.zero_grad()

        # ---- recording loss ----
        loss_record.update(loss.data, para.train_batch_size)

        # ---- train visualization ----
        if i % 50 == 0 or i == total_step:
            print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], '
                  '[lateral-2: {:.4f}]'.
                  format(datetime.now(), epoch, para.epoch, i, total_step,
                         loss_record.show()))

    save_path = 'zhongShanSnapshots/{}/'.format(para.train_save)
    os.makedirs(save_path, exist_ok=True)
    if (epoch+1) % 5 == 0:
        result = evaluate(model,test_loader)
        print('epoch {} result is:{}'.format(epoch,result))
        torch.save(model.state_dict(), save_path + 'TransFuse-%d.pth' % epoch)
        # print('[Saving Snapshot:]', save_path + 'TransFuse-%d.pth'% epoch)
    return None





if __name__ == '__main__':

    # ---- build models ----
    model = TransFuse_S(img_size = 512, in_chans = 1, pretrained = False).cuda()
    params = model.parameters()
    optimizer = torch.optim.Adam(params, para.learning_rate*10, betas = (0.5, 0.99))

    #下面是BST-ST的配置方法
    # optimizer = torch.optim.SGD(params, lr = para.learning_rate, momentum = 0.9,weight_decay = 1e-4)
    # scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor = 0.35, verbose = 1,
    #                                                        min_lr = 0.0001, patience = 125)

    train_loader = getTrainLoader(para.cut2d_save_path, para.train2d_choose_id_path, batchsize=para.train_batch_size)
    test_loader=getTestLoader(para.cut2d_save_path, id_path = para.test2d_id_path, batchsize = 1)
    total_step = len(train_loader)

    print("#"*20, "Start Training", "#"*20)

    best_loss = 1e5
    for epoch in range(1, para.epoch + 1):
        train(train_loader,test_loader, model, optimizer, epoch, None)
