from operator import mod
import torch
from alike import ALike, configs
from tqdm import tqdm
import cv2
import glob
import os
from data.kitti import loadkitti, KITTI_IMG
from data.cityscape import loadcityscape
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
import torch.nn as nn
from demo import SimpleTracker
import numpy as np
from utils.gen_color import create_pascal_label_colormap, to_color_img


kitti_path = '/Dataset/KITTI_SEG/training/'
city_path = ''

eval_configs = {
    'alike-t': {'c1': 8, 'c2': 16, 'c3': 32, 'c4': 64, 'dim': 64, 'single_head': True, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'kitti', 'kptAndSeg.pth')},
    'alike-s': {'c1': 8, 'c2': 16, 'c3': 48, 'c4': 96, 'dim': 96, 'single_head': True, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'kitti', 'kptAndSeg.pth')},
    'alike-n': {'c1': 16, 'c2': 32, 'c3': 64, 'c4': 128, 'dim': 128, 'single_head': True, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'kitti', 'kptAndSeg.pth')},
    'alike-l': {'c1': 32, 'c2': 64, 'c3': 128, 'c4': 128, 'dim': 128, 'single_head': False, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'kitti', 'kptAndSeg.pth')},
}

def train_seg(dataset: str):
    # 训练设备
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    # 定义模型
    model = ALike(**configs['alike-t'], 
                  device=device,
                  top_k=-1,
                  scores_th=0.2,
                  n_limit=5000)
    # 数据集
    if dataset == "kitti":
        data = loadkitti(kitti_path)
        loader = DataLoader(data, batch_size=1)
    # TODO
    elif dataset == "cityscape":
        data = loadcityscape(city_path)
        loader = DataLoader(data, batch_size=2)
    
    # LOSS
    loss_fn = nn.CrossEntropyLoss()
    loss_fn = loss_fn.to(device)
    
    learning_rate = 0.01
    # trainable = filter(lambda x: x.requires_grad, model.parameters())
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
    
    # 设置训练网络的一些参数
    # 记录训练的次数
    # total_train_step = 0
    # 记录测试的次数
    # total_test_step = 0
    # 训练的轮数
    epoch = 100
    
    writer = SummaryWriter("./log/logs_kitti")
    
    for i in range(epoch):
        print("-------第 {} 轮训练开始-------".format(i+1))
        
    # 训练步骤开始
        model.train()
        total_loss = 0
        for batch in tqdm(loader):
            imgs, marks = batch
            imgs = imgs.to(device)
            marks = marks.to(device)
            _, seg, _ = model(imgs)
            loss = loss_fn(seg, marks.long())
            
            total_loss += loss
            
            # 优化器优化模型
            optimizer.zero_grad() # 清理前一次梯度
            loss.backward()       # 反向求导
            optimizer.step()      # 更新网络数据
        avg_loss = total_loss.item() / len(data)
            # total_train_step = total_train_step + 1
        print("Loss: {}".format(avg_loss)) # 为啥取平均还这么多
        writer.add_scalar("train_loss", avg_loss, i)
                
        scheduler.step()      # 更新学习率
        # print("learning_rate:{}".format(optimizer.param_groups[-1]['lr']))
        # writer.add_scalar("learning_rate", optimizer.param_groups[-1]['lr'], i)
                
    torch.save(model.state_dict(), os.path.join("models", dataset, "kptAndSeg-constlr.pth"))
    print("模型已保存")
        
    writer.close()

def demo():
    model = ALike(**eval_configs['alike-t'], 
                  device="cuda",
                  top_k=-1,
                  scores_th=0.2,
                  n_limit=5000)
    # 34为分割类别
    color = create_pascal_label_colormap(34)
    # model = torch.load("./models/kitti/kitti_19.pth").to("cuda")
    model = model.to("cuda")
    model.eval()
    tracker = SimpleTracker()
    
    cv2.namedWindow("kpdetAndSeg")
    image_loader = KITTI_IMG()
    runtime = []
    # progress_bar = tqdm(DataLoader(image_loader, batch_size=1))
    progress_bar = tqdm(image_loader)
    for image, img in progress_bar:
        
        # img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        img = img.to("cuda").unsqueeze(0)
        # 输出
        pred, seg, _ = model(img, sub_pixel=False)
        # seg = seg.argmax(dim=1)
        # 处理分割的输出
        seg = to_color_img(seg, color)
        kpts = pred['keypoints']
        desc = pred['descriptors']
        runtime.append(pred['time'])
        # out为特征点输出
        out, N_matches = tracker.update(image, kpts, desc)

        result = np.concatenate((seg, out), axis=0)
        ave_fps = (1. / np.stack(runtime)).mean()
        status = f"Fps:{ave_fps:.1f}, Keypoints/Matches: {len(kpts)}/{N_matches}"
        progress_bar.set_description(status)
        # cv2.imwrite("./seg.png", seg)
        # cv2.imwrite("./kpt.png", out)
        # break
        # cv2.setWindowTitle(args.model, args.model + ': ' + status)
        cv2.imshow("kpdetAndSeg", result)
        if cv2.waitKey(24) == ord('q'):
            break

if __name__ == "__main__":
    demo()
    # train_seg("kitti")