import torch
import rospy
import yaml
from alike import ALike, configs
from tqdm import tqdm
from tqdm.auto import trange
import cv2
# import glob
import os
from data.kitti import KITTI_IMG
from data.loaddata import LoadData
from torch.utils.tensorboard import SummaryWriter
import wandb
from torch.utils.data import DataLoader
import torch.nn as nn
from demo import SimpleTracker
import numpy as np
from utils.gen_color import create_pascal_label_colormap, to_color_img, label_img_to_color
import argparse
from seg import SimpleSeg
from deeplabv3 import DeepLabV3
import sys
sys.path.append("..")
from rospub.pub import PerceptionPublisher
# metri_learning for global descriptor training
# from pytorch_metric_learning import losses, samplers, miners

# segmentation
datapath = {'kitti_path' : '/workspace/wzj/dataset/slam/kitti/data_odometry_color/sequences/00',
            'cityscape_path' : '/workspace/wzj/dataset/cityscape',

            # LCD
            'newcollege_path' : '/workspace/wzj/dataset/image_retrieval/New_College/Images',
            'oxford_path' : '/workspace/wzj/revisitop/data/datasets/roxford5k',
            'paris_path' : '/workspace/wzj/revisitop/data/datasets/rparis6k',
            'mapillary_path': '/workspace/wzj/dataset/image_retrieval/Mapillary'
}

# 因为不是一起训练的，所以需要训完一个头导入新的配置进行训练
# 训练顺序:先seg再global
global_configs = {
    'alike-t': {'c1': 8, 'c2': 16, 'c3': 32, 'c4': 64, 'dim': 64, 'single_head': True, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'global', 'KptSegGlb-non-const-lr.pth')},
    'alike-s': {'c1': 8, 'c2': 16, 'c3': 48, 'c4': 96, 'dim': 96, 'single_head': True, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'global', 'KptSegGlb-non-const-lr.pth')},
    'alike-n': {'c1': 16, 'c2': 32, 'c3': 64, 'c4': 128, 'dim': 128, 'single_head': True, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'global', 'KptSegGlb-non-const-lr.pth')},
    'alike-l': {'c1': 32, 'c2': 64, 'c3': 128, 'c4': 128, 'dim': 128, 'single_head': False, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'global', 'KptSegGlb-non-const-lr.pth')},
}

seg_configs = {
    'alike-t': {'c1': 8, 'c2': 16, 'c3': 32, 'c4': 64, 'dim': 64, 'single_head': True, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'semantic', 'kptAndSeg.pth')},
    'alike-s': {'c1': 8, 'c2': 16, 'c3': 48, 'c4': 96, 'dim': 96, 'single_head': True, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'semantic', 'kptAndSeg.pth')},
    'alike-n': {'c1': 16, 'c2': 32, 'c3': 64, 'c4': 128, 'dim': 128, 'single_head': True, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'semantic', 'kptAndSeg.pth')},
    'alike-l': {'c1': 32, 'c2': 64, 'c3': 128, 'c4': 128, 'dim': 128, 'single_head': False, 'radius': 2,
                'model_path': os.path.join(os.path.split(__file__)[0], 'models', 'semantic', 'kptAndSeg.pth')},
}

# train seg head with cityscape dataset
def train_seg(config, dataset: str='cityscape'):

    # 配置
    train_config = config['train']
    val_config = config['val']
    model_config = config['model']
    
    # 训练设备
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # wandb.init(project="train_seg")
    # 定义模型
    model = ALike(**configs['alike-t'], 
                  device=device,
                  top_k=-1,
                  scores_th=0.2,
                  n_limit=5000,
                  global_des=model_config['output_dim'],
                  num_class=model_config['num_class'])
    # model = SimpleSeg(3).to(device)
    # model = DeepLabV3().to(device)
    # 多卡用数据并行
    if train_config['gpus']:
        model = torch.nn.DataParallel(model, device_ids=[0,1])
    else:
    # 单卡用这个
        model = model.to(device)

    # 数据集
    batch_size = train_config['batch_size']
    path = dataset + "_path"
    train_data = LoadData(dataset, datapath[path], mode='train')
    val_data = LoadData(dataset, datapath[path], mode='val')
    train_loader = DataLoader(train_data, batch_size=batch_size, num_workers=train_config['num_workers'])
    val_loader = DataLoader(val_data, batch_size=1, num_workers=train_config['num_workers'])
    
    # LOSS
    # weights = [500.0, 100.0, 1.0]
    # weights = torch.FloatTensor(weights).to(device)
    loss_fn = nn.CrossEntropyLoss()
    loss_fn = loss_fn.to(device)
    
    learning_rate = train_config['lr']
    # trainable = filter(lambda x: x.requires_grad, model.parameters())
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
    # scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.5)

    epoches = train_config['epoch']
    
    # 实例化tensorboard对象
    writer = SummaryWriter("./log/logs_seg")
    
    for epoch in range(1, epoches+1):
       
    # 训练步骤开始
        model.train()
        total_loss = 0
        train_loop = tqdm(train_loader, leave=False)
        train_loop.set_description(f'Epoch [{epoch}/{epoches}]')
        tcount = 0
        for batch in train_loop:
            imgs, marks = batch
            imgs = imgs.to(device)
            marks = marks.to(device)
            # _, seg, _ = model(imgs)
            # 只有在推理时才输出三个任务
            segs = model(imgs)
            # 一个batch的avgloss
            loss = loss_fn(segs, marks.long())
            # 损失累加
            total_loss += loss
            tcount += 1

            # 优化器优化模型
            optimizer.zero_grad() # 清理前一次梯度
            loss.backward()       # 反向求导
            optimizer.step()      # 更新网络数据
            
            del imgs, marks, segs, batch
            train_loop.set_postfix(loss = loss.item())
            
        model.eval()
        with torch.no_grad():
            total_pixcel_acc = 0
            total_miou = 0
            vcount = 0
            for batch in val_loader:
                imgs, marks = batch
                imgs = imgs.to(device)
                # gt
                marks = marks.to(device)
                seg = model(imgs)
                # conpute
                _, _, hs, ws = seg.shape
                _, hm, wm = marks.shape
                assert hs == hm and ws == wm
                batch_pixcel_acc, batch_miou = eval_metrics(seg, marks)
                total_pixcel_acc += batch_pixcel_acc
                total_miou += batch_miou
                vcount += 1

                del imgs, marks, batch, seg
            pixcel_acc = total_pixcel_acc.item() / vcount
            miou = total_miou.item() / vcount
            writer.add_scalar('Val1129/pixcel_acc', pixcel_acc, epoch)
            writer.add_scalar('Val1129/miou', miou, epoch)
                
        avg_loss = total_loss.item() / tcount
            # total_train_step = total_train_step + 1
        # print("Loss: {}".format(avg_loss)) # 求一个批量的平均损失
        # 用tensorboard进行数据记录
        writer.add_scalar("Train1129/train_loss", avg_loss, epoch)
                
        # if epoch % 25 == 0: 
        #     scheduler.step()
        # writer.add_scalar("Train1e-4_noweight/learning_rate", optimizer.param_groups[-1]['lr'], epoch)
        
    if train_config["gpus"]:
        torch.save(model.module.state_dict(), os.path.join("models", "semantic", f"kptAndSeg.pth"))
    else:
        torch.save(model.state_dict(), os.path.join("models", "semantic", f"kptAndSeg.pth"))
    print("模型已保存")
        
    writer.close()

# 用mapillary作为默认训练集
def train_globalDes(config, dataset: str='mapillary'):
    # for index
    import faiss
    from Mapillary.mapillary_sls.datasets.generic_dataset import ImagesFromList
    from Mapillary.mapillary_sls.utils.utils import configure_transform
    
    # 配置
    train_config = config['train']
    val_config = config['val']
    model_config = config['model']
    
    # 训练设备
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # 定义模型
    model = ALike(**configs['alike-t'], 
                  device=device,
                  top_k=-1,
                  scores_th=0.2,
                  n_limit=5000,
                  global_des=model_config['output_dim'],
                  num_class = model_config['num_class'])

    # 多卡用数据并行
    if train_config['gpus']:
        model = torch.nn.DataParallel(model, device_ids=[0, 1])
    else:
    # 单卡用这个
        model = model.to(device)
    # 数据集
    path = dataset + "_path"
    train_data = LoadData(dataset, datapath[path], mode='train')
    val_data = LoadData(dataset, datapath[path], mode='val')
    # loader = DataLoader(data, batch_size=2)
    '''
        下面这部分是mapillary自带的
    '''
    # divides dataset into smaller cache sets
    # data.new_epoch()

    # creates triplets on the smaller cache set
    # data.update_subcache()

    # create data loader
    # opt = {'batch_size': 1, 'shuffle': False}
    # trainDataloader = DataLoader(data, **opt)

    # LOSS
    loss_fn = nn.TripletMarginLoss()
    loss_fn.to(device)

    # miner = miners.MultiSimilarityMiner()
    # miner = miner.to(device)

    # l2 = losses.MultiSimilarityLoss()
    # loss_fn = nn.TripletMarginLoss()
    # loss_fn = loss_fn.to(device)
    
    learning_rate = train_config['lr']
    # trainable = filter(lambda x: x.requires_grad, model.parameters())
    optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99)

    epoches = train_config['epoch']
    
    writer = SummaryWriter("./log/logs_globalDes")

    not_improved = 0
    best_score = 0
    for epoch in range(1, epoches+1):   
        '''
            TODO: pack the func in the future
        '''
        # Training step
        model.train()
        epoch_loss = 0
        train_data.new_epoch()
        loop = trange(train_data.nCacheSubset, leave=False)
        loop.set_description(f'Epoch [{epoch}/{epoches}]')
        count = 0
        # for subIter in trange(data.nCacheSubset, desc='Cache refresh'.rjust(15), position=1, leave=False):
        for _ in loop:
            train_data.update_subcache()

            trainDataloader = DataLoader(dataset=train_data, num_workers=train_config['num_workers'],
                                            batch_size=1, shuffle=True,
                                             pin_memory=True)
            for batch in trainDataloader:
                # imgs: [1, nNeg * seq_length, 3, h, w]
                imgs, img_indices = batch
                n_size = train_data.nNeg
                imgs = imgs.squeeze(0)
                imgs = imgs.to(device)
                img_indices = img_indices.squeeze(0)
                img_indices = img_indices.to(device)

                _, _, embeddings = model(imgs)
                # [num, des_dim]
                Query, Positive, Negative = torch.split(embeddings, [1, 1, n_size])
                loss = 0
                for neg in Negative:
                    neg = neg.unsqueeze(0)
                    loss += loss_fn(Query, Positive, neg)

                loss = loss.mean()
                optimizer.zero_grad() # 清理前一次梯度
                loss.backward()       # 反向求导
                optimizer.step()      # 更新网络数据
                
                del batch, imgs, img_indices, embeddings
                del Query, Positive, Negative
                
                batch_loss = loss.item()
                epoch_loss += batch_loss
                count += 1
                loop.set_postfix(loss = batch_loss)

        # Validation step
        '''
            TODO: pack the func in the future
        '''
        model.eval()
        meta = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
        transform = configure_transform(image_dim = (480, 640), meta = meta)
        # get dataloader
        qEval_set = ImagesFromList(val_data.qImages, transform)
        dbEval_set = ImagesFromList(val_data.dbImages, transform)
        qLoader = DataLoader(qEval_set, num_workers=val_config['num_workers'])
        dbLoader = DataLoader(dbEval_set, num_workers=val_config['num_workers'])
        
        with torch.no_grad():
            # query and db feature array
            qFeat = np.empty((len(qEval_set), model_config['output_dim']), dtype=np.float32)
            dbFeat = np.empty((len(dbEval_set), model_config['output_dim']), dtype=np.float32)

            # start eval
            for feat, test_data_loader in zip([qFeat, dbFeat], [qLoader, dbLoader]):
                for input_data, indices in test_data_loader:
                    input_data = input_data.to(device)
                    embeddings = model(input_data)
                    feat[indices.detach().numpy(), :] = embeddings.detach().cpu().numpy()
                    del input_data, embeddings

        del qLoader, dbLoader

        # Building faiss index
        
        faiss_index = faiss.IndexFlatL2(model_config['output_dim'])
        faiss_index.add(dbFeat)
        
        # Calculating recall @ N
        n_values = [1, 5, 10, 20, 50, 100]
        
        # for each query get those within threshold distance
        gt = val_data.all_pos_indices
        
        qEndPosTot = 0
        dbEndPosTot = 0
        for cityNum, (qEndPos, dbEndPos) in enumerate(zip(val_data.qEndPosList, val_data.dbEndPosList)):
            faiss_index = faiss.IndexFlatL2(model_config['output_dim'])
            faiss_index.add(dbFeat[dbEndPosTot:dbEndPosTot+dbEndPos, :])
            _, preds = faiss_index.search(qFeat[qEndPosTot:qEndPosTot+qEndPos, :], max(n_values))
            if cityNum == 0:
                predictions = preds
            else:
                predictions = np.vstack((predictions, preds))
            qEndPosTot += qEndPos
            dbEndPosTot += dbEndPos
        
        correct_at_n = np.zeros(len(n_values))
        # TODO can we do this on the matrix in one go?
        for qIx, pred in enumerate(predictions):
            for i, n in enumerate(n_values):
                # if in top N then also in top NN, where NN > N
                if np.any(np.in1d(pred[:n], gt[qIx])):
                    correct_at_n[i:] += 1
                    break
        recall_at_n = correct_at_n / len(val_data.qIdx)

        all_recalls = {}  # make dict for output
        for i, n in enumerate(n_values):
            all_recalls[n] = recall_at_n[i]
            tqdm.write("====> Recall@{}: {:.4f}".format(n, recall_at_n[i]))
            writer.add_scalar('Val/Recall@' + str(n), recall_at_n[i], epoch)

        # is_best = all_recalls[5] > best_score
        # if is_best:
        #     not_improved = 0
        #     best_score = all_recalls[5]
        # else:
        #     not_improved += 1
        # # if not improvement, means the model converged and stop training
        # if int(train_config['patience']) > 0 and not_improved > (int(train_config['patience']) / int(train_config['evalEvery'])):
        #     print('Performance did not improve for', train_config['patience'], 'epochs. Stopping.')
        #     break
        
        # 用tensorboard进行数据记录
        avg_loss = epoch_loss / count
        writer.add_scalar("training_loss", avg_loss, epoch)

        if epoch % 20 == 0: 
            scheduler.step()      # 更新学习率
        #         print("Loss: {}".format(batch_loss)) # 求一个批量的平均损失
        #         print("learning_rate:{}".format(optimizer.param_groups[-1]['lr']))
        writer.add_scalar("learning_rate", optimizer.param_groups[-1]['lr'], epoch)
    
    if train_config["gpus"]:
        torch.save(model.module.state_dict(), os.path.join("models", dataset, "KptSegGlb-non-const-lr.pth"))
    else:
        torch.save(model.state_dict(), os.path.join("models", dataset, "KptSegGlb-non-const-lr.pth"))
    print("模型已保存")
        
    writer.close()


def demo():
    # model = ALike(**seg_configs['alike-t'], 
    #               device="cuda",
    #               top_k=-1,
    #               scores_th=0.2,
    #               n_limit=5000,
    #               num_class=3)
    model = DeepLabV3()
    state_dict = torch.load("./models/semantic/kptAndSeg.pth")
    model.load_state_dict(state_dict)
    # model = torch.load("./models/kitti/kitti_19.pth").to("cuda")
    model = model.to("cuda")
    model.eval()
    tracker = SimpleTracker()
    
    dataset = "cityscape"
    path = dataset + "_path"
    image_loader = LoadData(dataset, datapath[path], mode="val")
    # image_loader = KITTI_IMG("/workspace/wzj/dataset/slam/kitti/data_odometry_color/sequences/00")
    runtime = []
    # progress_bar = tqdm(DataLoader(image_loader, batch_size=1))
    progress_bar = tqdm(image_loader)
    for image, cvimg in progress_bar:
        
        # img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        # Timage = torch.from_numpy(image).to("cuda").to(torch.float32).permute(2, 0, 1)[None] / 255.0
        Timage = image.unsqueeze(0).to("cuda")
        # 输出
        # pred, seg= model(Timage, sub_pixel=False)
        seg = model(Timage)
        # seg = seg.argmax(dim=1)
        # 处理分割的输出
        seg = label_img_to_color(seg)
        # result = np.concatenate((cvimg, mask, seg), axis=0)
        cv2.imwrite("./pred.png", seg)
        break
        
        # kpts = pred['keypoints']
        # desc = pred['descriptors']
        # runtime.append(pred['time'])
        # out为特征点输出
        # out, N_matches = tracker.update(cvimg, kpts, desc)


        # result = np.concatenate((seg, cvimg), axis=0)
        # ave_fps = (1. / np.stack(runtime)).mean()
        # status = f"Fps:{ave_fps:.1f}, Keypoints/Matches: {len(kpts)}/{N_matches}"
        # progress_bar.set_description(status)
        # cv2.imwrite("./1121seg.png", seg)
        # cv2.imwrite("./kpt.png", out)
        # cv2.setWindowTitle(args.model, args.model + ': ' + status)
        # cv2.imshow("kpdetAndSeg", result)
        # if cv2.waitKey(24) == ord('q'):
        #     break

def single_infer(img):
    img = cv2.imread(img)
    color = create_pascal_label_colormap(10)
    image = torch.from_numpy(img).to(torch.float32).permute(2, 0, 1).unsqueeze(0) / 255.0

    model = ALike(**seg_configs['alike-t'], 
                  device="cpu",
                  top_k=-1,
                  scores_th=0.2,
                  n_limit=5000)
    _, seg, _ = model(image, sub_pixel=False)
    seg = to_color_img(seg, color)
    cv2.imshow("Seg", seg)
    cv2.waitKey()
    if cv2.waitKey(24) == ord('q'):
        cv2.destroyAllWindows()
   
    
def eval_metrics(predict, target, ignore_label=255):
    # 预处理 将ignore label对应的像素点筛除
    # 将背景也认为一类
    target_mask = (target != ignore_label)  # [batch, height, width]筛选出所有需要训练的像素点标签
    target = target[target_mask]  # [num_pixels]
    _, num_class, _, _ = predict.size()
    predict = predict.permute(0, 2, 3, 1)  # [batch, height, width, num_class]
    
    # 计算pixel accuracy
    predict = predict[target_mask.unsqueeze(-1).repeat(1, 1, 1, num_class)].view(-1, num_class)
    predict = predict.argmax(dim=1)
    num_pixels = target.numel()
    correct = (predict == target).sum()
    pixel_acc = correct / num_pixels
    
    # 计算所有类别的mIoU
    predict = predict + 1
    target = target + 1
    intersection = predict * (predict == target).long()
    area_inter = torch.histc(intersection.float(), bins=num_class, max=num_class, min=1)
    area_pred = torch.histc(predict.float(), bins=num_class, max=num_class, min=1)
    area_label = torch.histc(target.float(), bins=num_class, max=num_class, min=1)
    mIoU = area_inter.mean() / (area_pred + area_label - area_inter).mean()
    return pixel_acc, mIoU

if __name__ == "__main__":
    # demo()
    # train_seg("kitti")
    # single_infer('/workspace/wzj/dataset/slam/kitti/data_odometry_color/sequences/00/image_2/000000.png')
    parser = argparse.ArgumentParser()
    parser.add_argument('--demo', action='store_true', default=False, help="demo")
    parser.add_argument('--head', type=str, help="'seg' or 'des'")
    parser.add_argument('--config', type=str, help="yaml file in 'configs'")
    
    args = parser.parse_args()
    if args.demo:
        # single_infer("/workspace/wzj/dataset/cityscape/leftImg8bit/train/aachen/aachen_000000_000019_leftImg8bit.png")
        demo()
    else:
        # TODO: to be modified
        with open(args.config, 'r') as f:
            config = yaml.safe_load(f)
            if args.head == 'seg':
                train_seg(config)
            elif args.head == 'des':
                train_globalDes(config)
            else:
                raise NotImplementedError("only support seg head and des head")
