import sys
import torch
import torch.nn as nn
import numpy as np
import time
import os
from torch.utils.data import DataLoader
from torch.optim.lr_scheduler import  CosineAnnealingLR
from tqdm import tqdm

from model import model_unet_res50
from dataset import MyDataset
from data_aug import aug_fun
from val import val_fun
NumEpoch = 120
Lr = 3e-4
BatchSize = 8

ExpeName = 'unet_res50_baseline'
LogDir = 'log/' + ExpeName + '.txt'
ModelSavePath = '../model_save/new/' + ExpeName
DataPath = '../suichang_round1_train_210120/suichang_round1_train_210120'
TrainJsonDir = 'label_file/train.json'
ValJsonDir='label_file/val.json'


loss_fun = nn.CrossEntropyLoss().cuda()
model = model_unet_res50.cuda()

optimizer = torch.optim.AdamW(model.parameters(),lr=1e-4, weight_decay=1e-3)
scheduler = CosineAnnealingLR(optimizer,T_max=NumEpoch)


train_dataset = MyDataset(data_path=DataPath,name_list_dir=TrainJsonDir,transform=aug_fun)
train_dataloader = DataLoader(dataset=train_dataset,batch_size=BatchSize,shuffle=True,num_workers=1)
val_dataset = MyDataset(data_path=DataPath,name_list_dir=ValJsonDir,transform=None)
val_dataloader = DataLoader(dataset=val_dataset,batch_size=BatchSize,shuffle=False,num_workers=1)

os.makedirs(ModelSavePath,exist_ok=True)

## 固定print的格式。
header = r'''
        Train | Valid
Epoch |  Loss |  Loss | Time, m
'''
#          Epoch         metrics            time
raw_line = '{:6d}' + '\u2502{:7.3f}'*2 + '\u2502{:6.2f}'
print(header)
class_name = ['farm','land','forest','grass','road','urban_area',
                 'countryside','industrial_land','construction',
                 'water', 'bareland']

def train():
    best_iou = 0
    log_file = open(LogDir,'a')
    for epoch in range(NumEpoch):
        model.train()
        scheduler.step()
        losses = []
        start_time = time.time()
        for step,(batch_img,batch_mask) in enumerate(tqdm(train_dataloader)):
            batch_img = batch_img.cuda()
            batch_mask = batch_mask.cuda()
            optimizer.zero_grad()
            output = model(batch_img)
            loss = loss_fun(output, batch_mask)
            loss.backward()
            optimizer.step()

            losses.append(loss.item())

        viou = val_fun(model,val_dataloader)
        ## 保存到log文件
        print('\t'.join(viou.astype(str)),file=log_file,flush=True)
        print(raw_line.format(epoch, np.array(losses).mean(), np.mean(viou),
                              (time.time() - start_time) / 60 ** 1),file=log_file,flush=True)
        print('\t'.join(viou.astype(str)), file=sys.stdout)
        print(raw_line.format(epoch, np.array(losses).mean(), np.mean(viou),
                              (time.time() - start_time) / 60 ** 1), file=sys.stdout)


        if best_iou < np.mean(viou):
            best_iou = np.mean(viou)
            print('***********************save best model ******************',file=log_file,flush=True)
            print('***********************save best model ******************',file=sys.stdout)
            torch.save(model.state_dict(), ModelSavePath+'/best.pth')



if __name__ == '__main__':
    # model.load_state_dict(torch.load('../model_save/new/unet_res50_baseline/best.pth'))
    train()



