import numpy as np
import torch
import cv2

from models.solver import Solver
from models.model import SegmentModel, ClassifyModel, UnsupervisedModel
from utils.oss_file import upload_context2file, aliyun_prefix, upload_image
from utils.funcs import denorm
from utils.mysql_crud import set_predict_by_id_batch, create_connection, select_mask_color_by_dataset_id, select_data
import torch.nn.functional as F
from scipy.ndimage import gaussian_filter
from matplotlib import cm
from PIL import Image


def post_process(probability, threshold, min_size):
    """
    对每个预测掩码进行后处理时，将忽略像素数小于 'min_size' 的
    :param probability: 预测的mask
    :param threshold: 阈值
    :param min_size:
    :return:
    """
    mask = cv2.threshold(probability, threshold, 1, cv2.THRESH_BINARY)[1]
    num_component, component = cv2.connectedComponents(mask.astype(np.uint8))
    predictions = np.zeros(probability.shape, np.float32)
    num = 0
    for c in range(1, num_component):
        p = (component == c)
        if p.sum() > min_size:
            predictions[p] = 1
            num += 1
    return predictions, num


def get_segment_results(predict_masks, process_flag=True, thresholds=None, minareas=None):
    """

    :param predict_masks: 预测结果
    :param process_flag: 是否经过像素阈值和最小连通域
    :return: predict_masks: 一个batch的数据经过分割网络后得到的预测结果，维度为[batch, class_num, height, width]。
        当 process_flag=True 的时候，返回的结果经过了阈值以及最小连通域，得到的 predict_masks 为二值化的
        当 process_flag=False 的时候，返回的结果未经过阈值以及最小连通域，得到的 predict_masks 为非二值化的，值处于 [0, 1] 之间
    """

    if process_flag:
        for index, predict_masks_classes in enumerate(predict_masks):
            for each_class, pred in enumerate(predict_masks_classes):
                pred_binary, _ = post_process(pred.detach().cpu().numpy(), thresholds[each_class],
                                              minareas[each_class])
                predict_masks[index, each_class] = torch.from_numpy(pred_binary)
    return predict_masks


def stack_mask(image, mask, class_color):
    """
    讲预测mask添加到原图
    :param image: b,3,h,w  torch.Tensor
    :param mask: b,num_class,h,w torch.Tensor
    :return:
    """
    batch_size = image.shape[0]
    res = []
    for index in range(batch_size):
        img = image[index].permute(1, 2, 0)
        ma = mask[index]
        # 叠加
        mm = torch.zeros([ma.shape[1], ma.shape[2], 3], device=image.device)
        for i in range(ma.shape[0]):
            ma_0 = ma[i] * class_color[i][0]
            ma_1 = ma[i] * class_color[i][1]
            ma_2 = ma[i] * class_color[i][2]
            mm += torch.stack([ma_0, ma_1, ma_2], dim=2).to(image.device)
        img_ma = torch.clip(img + mm, 0, 1)
        res.append(img_ma.permute(2, 0, 1))
    return torch.stack(res, dim=0)


def cal_anomaly_map(ft_list, fs_list, out_size=[224, 224], uni_am=False, use_cos=True, amap_mode='add',
                    gaussian_sigma=4, weights=None):
    bs = ft_list[0].shape[0]
    weights = weights if weights else [1] * len(ft_list)
    anomaly_map = np.ones([bs] + out_size) if amap_mode == 'mul' else np.zeros([bs] + out_size)
    a_map_list = []
    if uni_am:
        size = (ft_list[0].shape[2], ft_list[0].shape[3])
        for i in range(len(ft_list)):
            ft_list[i] = F.interpolate(F.normalize(ft_list[i], p=2), size=size, mode='bilinear', align_corners=True)
            fs_list[i] = F.interpolate(F.normalize(fs_list[i], p=2), size=size, mode='bilinear', align_corners=True)
        ft_map, fs_map = torch.cat(ft_list, dim=1), torch.cat(fs_list, dim=1)
        if use_cos:
            a_map = 1 - F.cosine_similarity(ft_map, fs_map, dim=1)
            a_map = a_map.unsqueeze(dim=1)
        else:
            a_map = torch.sqrt(torch.sum((ft_map - fs_map) ** 2, dim=1, keepdim=True))
        a_map = F.interpolate(a_map, size=out_size, mode='bilinear', align_corners=True)
        a_map = a_map.squeeze(dim=1).cpu().detach().numpy()
        anomaly_map = a_map
        a_map_list.append(a_map)
    else:
        for i in range(len(ft_list)):
            ft = ft_list[i]
            fs = fs_list[i]
            # fs_norm = F.normalize(fs, p=2)
            # ft_norm = F.normalize(ft, p=2)
            if use_cos:
                a_map = 1 - F.cosine_similarity(ft, fs, dim=1)
                a_map = a_map.unsqueeze(dim=1)
            else:
                a_map = torch.sqrt(torch.sum((ft - fs) ** 2, dim=1, keepdim=True))
            a_map = F.interpolate(a_map, size=out_size, mode='bilinear', align_corners=True)
            a_map = a_map.squeeze(dim=1)
            a_map = a_map.cpu().detach().numpy()
            a_map_list.append(a_map)
            if amap_mode == 'add':
                anomaly_map += a_map * weights[i]
            else:
                anomaly_map *= a_map
        if amap_mode == 'add':
            anomaly_map /= (len(ft_list) * sum(weights))
    if gaussian_sigma > 0:
        for idx in range(anomaly_map.shape[0]):
            anomaly_map[idx] = gaussian_filter(anomaly_map[idx], sigma=gaussian_sigma)
    return anomaly_map, a_map_list


class ClassifyResult:
    def __init__(self, config, device, dataloader):
        self.config = config
        self.device = device
        self.dataloader = dataloader
        # 加载模型极其权重
        self.model = ClassifyModel(config.classify_model.name, pretrained=False,
                                   class_num=config.classify_model.class_num, **config.classify_model.parameters).to(
            device)

        self.solver = Solver(self.model, self.device)
        self.solver.load_checkpoint_from_oss(config.classify_model.checkpoint_link, config.classify_model.local_path)
        self.model.eval()

        self.connection = create_connection()
        self.res = []

    def get_result(self, image, threshold=0.5):
        if self.config.classify_model.tta_flag:
            pred = self.solver.tta(image, seg=False)
        else:
            pred = self.solver.forward(image)
        pred = pred > threshold
        return pred

    def do_task(self):
        for ids, images in self.dataloader:
            output = self.get_result(images.to(self.device))
            self.res.extend((str(ot.detach().cpu().int().tolist()), id.item()) for id, ot in zip(ids, output))

        self.upload()

    def upload(self):
        set_predict_by_id_batch(self.connection, self.res)
        self.connection.close()


class SegmentResult:
    def __init__(self, config, device, dataloader):
        self.config = config
        self.device = device
        self.dataloader = dataloader
        self.model = SegmentModel(config.segment_model.name, pretrained=False, class_num=config.segment_model.class_num,
                                  **config.segment_model.parameters).to(device)
        self.solver = Solver(self.model, device)
        self.solver.load_checkpoint_from_oss(config.segment_model.checkpoint_link, config.segment_model.local_path)
        self.model.eval()

        self.connection = create_connection()
        self.res = []
        self.get_thresholds_minareas()

    def get_result(self, image, process_flag=True):
        if self.config.segment_model.tta_flag:
            preds = self.solver.tta(image, seg=True).detach()
        else:
            preds = self.solver.forward(image).detach()
        # 是否经过阈值
        if process_flag:
            for index, predict_masks_classes in enumerate(preds):
                for each_class, pred in enumerate(predict_masks_classes):
                    pred_binary, _ = post_process(pred.detach().cpu().numpy(), self.best_thresholds[each_class],
                                                  self.best_minareas[each_class])
                    preds[index, each_class] = torch.from_numpy(pred_binary)
        return preds

    def get_thresholds_minareas(self):
        query = f"SELECT other_res FROM train_task WHERE id = {self.config.train_task_id}"
        res = select_data(self.connection, query)
        res = [eval(item[0]) for item in res][0]
        self.best_thresholds = res['best_thresholds']
        self.best_minareas = res['best_minareas']

    def do_task(self):
        class_color = np.array(self.config.dataset.mask_color) / 255.0
        for samples in self.dataloader:
            ids, images = samples
            preds = self.get_result(images.to(self.device))
            images = denorm(images, self.config.dataset.mean, self.config.dataset.std)
            img_pred = stack_mask(images, preds, class_color)
            self.res.extend(
                (upload_image((ot.permute(1, 2, 0).detach().cpu().numpy() * 255).astype(np.uint8)), id.item()) for
                id, ot in zip(ids, img_pred))
        self.upload()

    def upload(self):
        set_predict_by_id_batch(self.connection, self.res)
        self.connection.close()


class ClassifySegmentResult:
    def __init__(self, config, device, dataloader):
        self.config = config
        self.device = device
        self.dataloader = dataloader
        self.classify_res = ClassifyResult(config, device, dataloader)
        self.segment_res = SegmentResult(config, device, dataloader)
        self.connection = create_connection()
        self.res = []

    def get_result(self, image):
        class_pred = self.classify_res.get_result(image)
        segment_pred = self.segment_res.get_result(image)
        for i, classes in enumerate(class_pred):
            for each_class, pred in enumerate(classes):
                if pred == 0:
                    segment_pred[i, each_class, ...] = 0
        return segment_pred

    def do_task(self):
        class_color = np.array(self.config.dataset.mask_color) / 255.0
        for samples in self.dataloader:
            ids, images = samples
            preds = self.get_result(images.to(self.device))
            images = denorm(images, self.config.dataset.mean, self.config.dataset.std)
            img_pred = stack_mask(images, preds, class_color)
            self.res.extend(
                (upload_image((ot.permute(1, 2, 0).detach().cpu().numpy() * 255).astype(np.uint8)), id.item()) for
                id, ot in zip(ids, img_pred))
        self.upload()

    def upload(self):
        set_predict_by_id_batch(self.connection, self.res)
        self.connection.close()


class UnsupervisedSegmentResult:
    def __init__(self, config, device, dataloader):
        self.config = config
        self.device = device
        self.dataloader = dataloader
        self.model = UnsupervisedModel(config.unsupervised_model.name, **config.unsupervised_model.parameters).to(
            device)
        self.solver = Solver(self.model, device)
        self.solver.load_checkpoint_from_oss(config.unsupervised_model.checkpoint_link,
                                             config.unsupervised_model.local_path)
        self.model.eval()

        self.connection = create_connection()
        self.res = []

    def get_result(self, image, threshold=0.5):
        if self.config.unsupervised_model.tta_flag:
            image_hflp = torch.flip(image, dims=[3])  # 水平翻转
            image_vflp = torch.flip(image, dims=[2])  # 垂直翻转

            origin_feats, rec_feats = self.solver.forward(image)
            anomaly_map_1, _ = cal_anomaly_map(origin_feats, rec_feats, [image.shape[2], image.shape[3]], uni_am=False,
                                               amap_mode='add', gaussian_sigma=4)

            origin_feats, rec_feats = self.solver.forward(image_hflp)
            anomaly_map_2, _ = cal_anomaly_map(origin_feats, rec_feats, [image.shape[2], image.shape[3]], uni_am=False,
                                               amap_mode='add', gaussian_sigma=4)
            anomaly_map_2 = np.flip(anomaly_map_2, axis=2)

            origin_feats, rec_feats = self.solver.forward(image_vflp)
            anomaly_map_3, _ = cal_anomaly_map(origin_feats, rec_feats, [image.shape[2], image.shape[3]], uni_am=False,
                                               amap_mode='add', gaussian_sigma=4)
            anomaly_map_3 = np.flip(anomaly_map_3, axis=1)

            anomaly_map = (anomaly_map_1 + anomaly_map_2 + anomaly_map_3) / 3.0

        else:
            origin_feats, rec_feats = self.solver.forward(image)
            anomaly_map, _ = cal_anomaly_map(origin_feats, rec_feats, [image.shape[2], image.shape[3]], uni_am=False,
                                             amap_mode='add', gaussian_sigma=4)
        return anomaly_map

    def do_task(self):
        for samples in self.dataloader:
            ids, images = samples
            preds = self.get_result(images.to(self.device))
            images = denorm(images, self.config.dataset.mean, self.config.dataset.std)
            for id, pred, image in zip(ids, preds, images):
                pred = pred / pred.max()
                pred = cm.jet(pred)
                image = Image.fromarray((image.detach().cpu().numpy().transpose(1, 2, 0) * 255).astype(np.uint8))
                pred = Image.fromarray((pred[:, :, :3] * 255).astype(np.uint8))
                image_pred = Image.blend(image, pred, alpha=0.4)
                self.res.append((upload_image(np.array(image_pred)), id.item()))
        self.upload()

    def upload(self):
        set_predict_by_id_batch(self.connection, self.res)
        self.connection.close()


if __name__ == '__main__':
    import os
    from omegaconf import OmegaConf
    from dataset.data_factory import UnsupervisedDataset
    from torch.utils.data import DataLoader

    config = OmegaConf.load('../inference_config.yaml')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    root = '../dataset/temp/ko'
    image_names = os.listdir(root)
    image_paths = [os.path.join(root, image_name) for image_name in image_names]
    dataset = UnsupervisedDataset(image_paths, config.dataset.height, config.dataset.width, config.dataset.mean,
                                  config.dataset.std)
    dataloader = DataLoader(dataset, batch_size=2, shuffle=False, num_workers=1)
    application = UnsupervisedSegmentResult(config, device, dataloader)
    application.do_task()
