import os
from timm.models import hrnet
from losses import city_loss
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
import torch
from torch import optim
from model.edge_model import SegUNet
from dataset.cityspces import CityScapes
from torch.utils.data import DataLoader
from tqdm import tqdm
from torch import nn
import time
from utils.metrics import Metrics
from utils import clc
import mlflow
mlflow.set_tracking_uri('http://192.168.10.78:5000')
mlflow.set_experiment('dsl_net_seg')


def run():
    with mlflow.start_run(run_name='convnext_unet_r2block'):
        train_data = CityScapes('/home/dsl/dataset/cityspace', mode='train')
        test_data = CityScapes('/home/dsl/dataset/cityspace', mode='val')

        #train_data = Fpv(root='F:/dl_data/seg')
        trainloader = DataLoader(train_data, batch_size=8, shuffle=True, num_workers=8)
        testloader = DataLoader(test_data, batch_size=4, shuffle=True, num_workers=4)
        model = SegUNet(19)
        #model.load_state_dict(torch.load('/data/check/xag_unet112.pth'))
        cre_fun1 = city_loss.OhemCELoss(0.7)
        #loss_f2 = loss_fun.FocalCity(label_num=19)

        model.cuda()
        model.train()
        #optimizer = optim.AdamW(model.parameters(), lr=0.0001)
        optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
        #sch = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=100, eta_min=2e-5)
        sch = optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=10, T_mult=2, eta_min=1e-6, verbose=True)


        for epoch in range(600):
            losses = []
            sig_losses = []
            model.train()
            for img, target in tqdm(trainloader):
                img = torch.autograd.Variable(img.cuda())
                #print(img)
                target = torch.autograd.Variable(target.cuda())
                pred = model(img)
                loss = cre_fun1(pred, target)
                #sig_loss = loss_f2(pred, target)
                total_loss = loss
                optimizer.zero_grad()
                total_loss.backward()

                #parameters = list(filter(lambda p: p.grad is not None, model.parameters()))
                #total_norm = max(p.grad.data.abs().max() for p in parameters)

                # print('total', total_norm, loss.item())
                nn.utils.clip_grad_norm_(model.parameters(), max_norm=5.0, norm_type=2)
                optimizer.step()
                losses.append(loss.item())
                #sig_losses.append(sig_loss.item())

            mlflow.log_metric('cross_loss', sum(losses)/len(losses), step=epoch)
            #mlflow.log_metric('sig_loss', sum(sig_losses) / len(sig_losses), step=epoch)

            sch.step()
            if epoch%2==0:
                model.eval()
                me = Metrics(n_classes=19)

                for img, target in tqdm(testloader):
                    img = torch.autograd.Variable(img.cuda())
                    target = torch.autograd.Variable(target.cuda())
                    pred = model(img)
                    pred = torch.softmax(pred,dim=1)
                    pred = torch.argmax(pred, dim=1)
                    me.update(pred,target)
                metr = me.compute_metrics()
                print(metr)
                mlflow.log_metric('miou', metr['miou'], step=epoch)
                mlflow.log_metric('fw_miou', metr['fw_miou'], step=epoch)


            if epoch%2==0:
                torch.save(model.state_dict(), '/data/check/xag_city_new' + str(epoch) + '.pth')




if __name__ == '__main__':
    run()
