import numpy as np

from model.fpn import FPN
from model.repvit import *
from model.fpn_head import FPNHead
from dataset.ADE20 import ADE20K
from model.SegMatch import SegMatch
from loss.Segmatchloss import *
from loss.sampler import NghSampler2DS
from loss.Descloss import PixelAPLoss
import yaml
import torch
from tqdm import tqdm
from abc import ABC, abstractmethod
from path import Path
from dataset import *
from tools.dataloader import *
import cv2
import dataset.data_utils as dutils
from PIL import Image as Im
import matplotlib.pyplot as plt

toy_db_debug = """SyntheticPairDataset(
    ImgFolder('imgs'), 
            'RandomScale(640,1024,can_upscale=True)', 
            'RandomTilting(0.5), PixelNoise(25)')"""

db_web_images = """SyntheticPairDataset(
    web_images, 
        'RandomScale(640,1024,can_upscale=True)',
        'RandomTilting(0.5), PixelNoise(25)')"""

db_aachen_images = """SyntheticPairDataset(
    aachen_db_images, 
        'RandomScale(640,1024,can_upscale=True)', 
        'RandomTilting(0.5), PixelNoise(25)')"""

db_aachen_style_transfer = """TransformedPairs(
    aachen_style_transfer_pairs,
            'RandomScale(640,1024,can_upscale=True), RandomTilting(0.5), PixelNoise(25)')"""

db_aachen_flow = "aachen_flow_pairs"


data_sources = dict(
    D = toy_db_debug,
    W = db_web_images,
    A = db_aachen_images,
    F = db_aachen_flow,
    S = db_aachen_style_transfer,
    )

default_dataloader = """PairLoader(CatPairDataset(`data`),
    scale   = 'RandomScale(640,1024,can_upscale=True)',
    distort = 'ColorJitter(0.2,0.2,0.2,0.1)',
    crop    = 'RandomCrop((480,640))')"""

def generate_query_kpts( img, num_pts, h, w, mode='sift'):
    if mode == 'sift':
        gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        # sift = cv2.xfeatures2d.SIFT_create(nfeatures=num_pts)
        sift = cv2.SIFT_create(nfeatures=num_pts)
        kp1 = sift.detect(gray1)
        coord = np.array([[kp.pt[0], kp.pt[1], 1] for kp in kp1])

    return coord
def mnn_matcher(descriptors_a, descriptors_b):
    device = descriptors_a.device
    sim = descriptors_a @ descriptors_b.t()
    nn12 = torch.max(sim, dim=1)[1]
    nn21 = torch.max(sim, dim=0)[1]
    ids1 = torch.arange(0, sim.shape[0], device=device)
    mask = (ids1 == nn21[nn12])
    matches = torch.stack([ids1[mask], nn12[mask]])
    return matches.t().data.cpu().numpy()

def cycle(iterable):
    while True:
        for x in iterable:
            yield x

class Trainer(ABC):
    def __init__(self, args):
        self.args = args
        with open(self.args.config, 'r') as f:
            self.config = yaml.load(f, Loader=yaml.FullLoader)
        self.save_root = Path('./ckpts_test/{}'.format(self.config['checkpoint_name']))
        ckpt_path = None
        if 'load_path' in list(self.config.keys()):
            if self.config['load_path'] is not None:
                ckpt_path = Path(self.config['load_path'])

        # init model
        self.model = SegMatch(self.config)
        parameters = []
        for module_name, module_lr in zip(self.config['optimal_modules'], self.config['optimal_lrs']):
            tmp_module = getattr(self.model, module_name)
            parameters.append({'params': tmp_module.parameters(), 'lr': module_lr})
        if ckpt_path is not None:
            self.logger.info('load checkpoint from {}'.format(ckpt_path))
            self.model.load_checkpoint(ckpt_path)
        # loss
        sampler = NghSampler2DS(ngh=7, subq=-4, subd=1, pos_d=3, neg_d=5, border=8,
                                subd_neg=-4, maxpool_pos=True,
                                scaling_step=2)
        self.loss = PixelAPLoss(sampler).cuda()
        # self.loss = SegMatchloss()
        ## optimizer
        tmp_optimizer = getattr(torch.optim, self.config['optimizer'])
        self.optimizer = tmp_optimizer(parameters)
        self.scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,
                                                         step_size=self.config['lr_decay_step'],
                                                         gamma=self.config['lr_decay_factor'])

        # dataloader
        # dataset_train = ADE20K(**self.config["data_config_train"], mode="train")
        db = [data_sources[key] for key in "WASF"]
        dataset_train = eval(default_dataloader.replace('`data`',','.join(db)).replace('\n',''))
        self.train_loader = torch.utils.data.DataLoader(dataset_train,
                                                        batch_size=self.config['data_config_train']['batchsize'],
                                                        num_workers=self.config['data_config_train']['num_workers'],
                                                        shuffle=True)
        # val_dataset = ADE20K(**self.config["data_config_train"], mode="val")
        val_loader = torch.utils.data.DataLoader(dataset_train,
                                                 batch_size=self.config['data_config_train']['batchsize'],
                                                 num_workers=self.config['data_config_train']['num_workers'],
                                                 shuffle=True)
        val_iter = iter(cycle(val_loader))
        self.val_data = next(val_iter)
        for key, value in self.val_data.items():
            self.val_data[key] = value.to("cuda")
        del val_loader, val_iter


    def train(self):
        batch_size_val = self.val_data['img1'].shape[0]
        epoch_path = self.save_root / '{:>03d}'.format(0)
        epoch_path.makedirs_p()
        self.model.save_checkpoint(epoch_path)
        # train
        for epoch in range(self.config['epoch']):
            epoch += 1
            epoch_path = self.save_root / '{:>03d}'.format(epoch)
            epoch_path.makedirs_p()
            batch_path_list = []
            for i in range(batch_size_val):
                batch_path = epoch_path / '{}'.format(i)
                batch_path.makedirs_p()
                batch_path_list.append(batch_path)

            if self.config['epoch_step'] > 0:
                total_steps = self.config['epoch_step']
            else:
                total_steps = len(self.train_loader)
            bar = tqdm(self.train_loader, total=int(total_steps), ncols=80)
            bar.set_description(
                "{}/{}".format(epoch, self.config['epoch']))
            self.model.set_train()

            losses = np.zeros(total_steps)
            for idx, inputs in enumerate(bar):
                for key, value in inputs.items():
                    inputs[key] = value.to("cuda")

                self.model.set_eval()
                if idx % self.config['log_freq'] == 0:
                    self.val_and_vis(batch_path_list, idx)
                    torch.cuda.empty_cache()
                for module in self.config['optimal_modules']:
                    tmp_module = getattr(self.model, module)
                    tmp_module.train()
                outputs = self.model(inputs)
                loss = self.loss(outputs, inputs)
                print("loss: {}".format(loss))
                # print("loss: {}".format(loss),"accuracy: {}".format(accuracy))
                #draw loss
                losses[idx] = loss
                plt.figure(1)
                plt.clf()
                plt.title('Training Loss')
                plt.xlabel('Iterations')
                plt.ylabel('Loss')
                plt.plot(losses[:idx + 1], 'b-')
                plt.xlim(0, total_steps)
                plt.ylim(0, 1.5)  # 根据实际情况调整 y 轴范围
                plt.grid(True)
                plt.pause(0.01)  # 暂停一小段时间，使得图形能够刷新

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                # x = backbone(inputs["img"])
                # x = neck(x)
                # x = decoder(x)
                # loss = segloss(x,inputs["img_label"])
                # continue

                if idx % 1000 == 0:
                    self.model.save_checkpoint(epoch_path)
                torch.cuda.empty_cache()
            self.model.save_checkpoint(epoch_path)
            save_path = epoch_path+'/loss_plot.png'
            plt.savefig(save_path)
            plt.close()
        # val and vis

    @torch.no_grad()
    def val_and_vis(self, batch_path_list, idx):
        val_config = self.config['val_config']
        self.model.set_eval()
        outputs = self.model(self.val_data)
        mid_pad = 20

        preds1 = outputs['preds1']
        preds2 = outputs['preds2']

        b, c, h, w = self.val_data['img1'].shape

        all_images = ['0_matches_less', '1_matches_all']

        for i, cur_path in enumerate(batch_path_list):
            for image_folder in all_images:
                tmp_path = cur_path/image_folder
                if not tmp_path.exists():
                    tmp_path.makedirs_p()
            cur_img1 = self.val_data['img1_meta'][i,...]
            cur_img2 = self.val_data['img2_meta'][i,...]
            comb_img = torch.cat((cur_img1, torch.zeros_like(cur_img1)[:,:mid_pad,:], cur_img2), dim=1)


            cur_kps1 = generate_query_kpts(cur_img1.cpu().numpy(),1024,h,w)[:,:2].round()
            cur_kps2 = generate_query_kpts(cur_img2.cpu().numpy(),1024,h,w)[:,:2].round()
            cur_kps1 = torch.tensor(cur_kps1).cuda().float()
            cur_kps2 = torch.tensor(cur_kps2).cuda().float()
            cur_score1 = torch.ones_like(cur_kps1)[..., 0:1]
            cur_score2 = torch.ones_like(cur_kps2)[..., 0:1]


            cur_kps1_n = normalize_coords(cur_kps1, h, w).unsqueeze(0)
            cur_kps2_n = normalize_coords(cur_kps2, h, w).unsqueeze(0)

            cur_desc1 = sample_feat_by_coord(preds1['xf'][i:i + 1, ...],
                                                    cur_kps1_n, False).squeeze(0)
            cur_desc2 = sample_feat_by_coord(preds2['xf'][i:i + 1, ...],
                                                    cur_kps2_n, False).squeeze(0)
            cur_matches = mnn_matcher(cur_desc1, cur_desc2)
            cur_matchkp1 = cur_kps1[cur_matches[:, 0], :2]
            cur_matchkp2 = cur_kps2[cur_matches[:, 1], :2]
            cur_kpscore_m1 = cur_score1[cur_matches[:, 0], :1]
            cur_kpscore_m2 = cur_score2[cur_matches[:, 1], :1]
            cur_kpscore = cur_kpscore_m1 + cur_kpscore_m2.to(cur_score1)
            _, topk_idx = cur_kpscore.topk(min(val_config['vis_topk'], cur_kpscore.shape[0]), dim=0)


            gt_match =self.val_data['aflow'][i,...][:,cur_matchkp1[:,1].long(),cur_matchkp1[:,0].long()]
            gt_macth = torch.concatenate((gt_match[0].unsqueeze(1),gt_match[1].unsqueeze(1)),axis=1)

            from scipy.spatial.distance import cdist
            dists = cdist(gt_macth.cpu().numpy(), cur_matchkp2.cpu().numpy())
            dists = torch.diag(torch.tensor(dists)).unsqueeze(1)
            dists = dists.clamp(min=0, max=val_config['vis_err_thr']).repeat(1, 2)

            match_color = dutils.tensor2array(val_config['vis_err_thr'] - dists,
                                              max_value=val_config['vis_err_thr'], colormap='RdYlGn')[:3, :,
                          :1].transpose(1, 2, 0)
            match_color = (255 * match_color).astype(np.uint8)
            match_color = cv2.cvtColor(match_color, cv2.COLOR_RGB2BGR).squeeze(1)

            cur_matchkp1_less = cur_matchkp1[topk_idx, :2]
            cur_matchkp2_less = cur_matchkp2[topk_idx, :2]
            match_color_less = match_color[topk_idx.cpu().numpy()[:, 0], :3]

            cur_kps1 = list(map(tuple, cur_kps1.reshape(-1, 2).cpu().numpy()))
            cur_kps2 = list(map(tuple, cur_kps2.reshape(-1, 2).cpu().numpy()))
            cur_matchkp1 = list(map(tuple, cur_matchkp1.reshape(-1, 2).cpu().numpy()))
            cur_matchkp2 = list(map(tuple, cur_matchkp2.reshape(-1, 2).cpu().numpy()))
            match_color = list(map(tuple, match_color))
            cur_matchkp1_less = list(map(tuple, cur_matchkp1_less.reshape(-1, 2).cpu().numpy()))
            cur_matchkp2_less = list(map(tuple, cur_matchkp2_less.reshape(-1, 2).cpu().numpy()))
            match_color_less = list(map(tuple, match_color_less))
            comb_img = comb_img.cpu().numpy()


            comb_img_m_less = cv2.cvtColor(comb_img, cv2.COLOR_RGB2BGR)
            for kp1, kp2, color in zip(cur_matchkp1_less, cur_matchkp2_less, match_color_less):
                # kp2_comb = kp2 + torch.tensor([w, 0]).reshape(1,2).to(kp2)
                kp2_comb = (int(kp2[0] + w + mid_pad), int(kp2[1]))
                kp1_comb = (int(kp1[0]), int(kp1[1]))
                color = (int(color[0]), int(color[1]), int(color[2]))
                cv2.line(comb_img_m_less, kp1_comb, kp2_comb, color, thickness=2)
                cv2.circle(comb_img_m_less, kp1_comb, radius=2, color=(0, 255, 0), thickness=-1)
                cv2.circle(comb_img_m_less, kp2_comb, radius=2, color=(0, 255, 0), thickness=-1)
            comb_img_m_less = cv2.cvtColor(comb_img_m_less, cv2.COLOR_BGR2RGB)
            save_img = comb_img_m_less
            save_img = Im.fromarray(save_img.astype(np.uint8))
            save_img.save(cur_path / '0_matches_less/{}.jpg'.format(idx))

            comb_img_m = cv2.cvtColor(comb_img, cv2.COLOR_RGB2BGR)
            for kp1, kp2, color in zip(cur_matchkp1, cur_matchkp2, match_color):
                # kp2_comb = kp2 + torch.tensor([w, 0]).reshape(1,2).to(kp2)
                kp2_comb = (int(kp2[0] + w + mid_pad), int(kp2[1]))
                kp1_comb = (int(kp1[0]), int(kp1[1]))
                color = (int(color[0]), int(color[1]), int(color[2]))
                cv2.line(comb_img_m, kp1_comb, kp2_comb, color, thickness=2)
                cv2.circle(comb_img_m, kp1_comb, radius=2, color=(0, 255, 0), thickness=-1)
                cv2.circle(comb_img_m, kp2_comb, radius=2, color=(0, 255, 0), thickness=-1)
            comb_img_m = cv2.cvtColor(comb_img_m, cv2.COLOR_BGR2RGB)
            save_img = comb_img_m
            save_img = Im.fromarray(save_img.astype(np.uint8))
            save_img.save(cur_path / '1_matches_all/{}.jpg'.format(idx))
            # from tools.plotting import plot_matches
            # plot_matches(cur_img1.cpu().numpy(),cur_img2.cpu().numpy(),np.concatenate((cur_matchkp1.cpu().numpy(),cur_matchkp2.cpu().numpy()),axis=1),lines=True)
            # plot_matches(cur_img1.cpu().numpy(),cur_img2.cpu().numpy(),np.concatenate((cur_matchkp1.cpu().numpy(),gt_macth.cpu().numpy()),axis=1),lines=True)

