import os
import cv2
import numpy
import numpy as np
from glob import glob
import os.path as osp
from PyVGrid.vgrid import VGrid
from SegPython import *
from PyAutoWLWW import *
from tqdm import tqdm
import xml.etree.ElementTree as ET
import shutil

def makesure_dir(path_):
    if not os.path.exists(path_):
        os.makedirs(path_)


def getSizeFormIQ(iq_path):
    tree = ET.parse(iq_path)
    root = tree.getroot()
    ImageParam = root.find('ImageParam')
    height = ImageParam.find('Height').text
    width = ImageParam.find('Width').text
    return int(height), int(width)


def getDiffBrightnessSequence(sequence, bright_list=None, vgrid_handle=None, tqdm_bar=None):
    if bright_list is None:
        bright_list = [0.34, 0.44, 0.54, 0.64]
    if vgrid_handle is None:
        vgrid_handle = VGrid()

    _frame_count, _height, _width = sequence.shape
    vgrid_handle.Initialization(_width, _height, 16)
    result_list = []
    b_len = len(bright_list)
    for b_id, b in enumerate(bright_list):
        result_seq = []
        for _id, _img in enumerate(sequence):
            _img_out = np.zeros_like(_img)
            vg.Process(_img, _img_out, 3.5, 3.5, 3.5, 1.0, b, 0,
                       True, True, True, width, height, False,
                       1, 0, 1,
                       20, 15, 30, -30, _id + 1, 0, False
                       )
            _img_out = np.clip(_img_out, 0, 65535)
            result_seq.append(_img_out)
            if tqdm_bar is not None:
                tqdm_bar.set_postfix_str(rf"B:{b} {b_id+1}/{b_len} {_id+1}/{_frame_count}")
        result_seq = np.array(result_seq, dtype=np.uint16)
        result_list.append(result_seq)
    return np.array(result_list, np.uint16)


def getForegroundMaskSequence(sequence, seg_handle=None, tqdm_bar=None):
    if seg_handle is None:
        seg = Segment(ModelsFlag.SegVesselCatheterBinary_FP16)
    _frame_count, _height, _width = sequence.shape
    seg_handle.initialization(_width, _height)
    result_sequence = []
    v_seg_id = seg_handle.getSegClass().vessel
    c_seg_id = seg_handle.getSegClass().catheter
    for _id, _img in enumerate(sequence):
        vessel_mask = seg_handle.process(_img, v_seg_id)
        catheter_mask = seg_handle.process(_img, c_seg_id)
        vessel_mask[catheter_mask > 0] = 1
        result_sequence.append(vessel_mask)
        if tqdm_bar is not None:
            tqdm_bar.set_postfix_str(f"segment vessel {_id+1}/{_frame_count}")
    result_sequence = np.array(result_sequence, dtype=np.uint16)
    return result_sequence


def exposure_fusion_withmask(sequence, mask, alphas=(1.0, 1.0, 1.0), best_illumination=0.5, sigma=0.2, layers_num=7):
    sequence = numpy.stack([it.astype("float32") / 65535 for it in sequence], axis=0)
    S = len(sequence)
    # H, W, C = sequence[0].shape
    H, W = sequence[0].shape
    C = 1
    # 准备一些中间变量
    laplace_kernel = numpy.array(([0, 1, 0], [1, -4, 1], [0, 1, 0]), dtype="float32")
    mse = lambda l, r: (l - r) * (l - r)
    best_illumination = numpy.full((H, W), best_illumination, dtype='float32')
    # best_illumination[sequence[3, :, :, 0] < 1] = 0.7  # TODO
    # best_illumination[sequence[3, :, :, 0] < 0.5] = 0.5  # TODO
    # mask = cv2.dilate(mask, np.ones((4, 4), np.uint8), iterations=1)
    # forground_val = np.sum(sequence[3, :, :, 0] * mask) / np.sum(mask) * 0.7
    # best_illumination[mask > 0] = forground_val
    gray_best_frame_id = 2
    best_illumination[sequence[gray_best_frame_id, :, :] < 1] = 0.65  # TODO
    best_illumination[sequence[gray_best_frame_id, :, :] < 0.5] = 0.45  # TODO

    mask = cv2.dilate(mask, np.ones((4, 4), np.uint16), iterations=1)
    foregrount_count = np.sum(mask)
    if foregrount_count > 0:
        forground_val = np.sum(sequence[gray_best_frame_id, :, :] * mask) / foregrount_count * 0.7
        best_illumination[mask > 0] = forground_val
    else:
        best_illumination[sequence[gray_best_frame_id, :, :] < 0.3] = 0.15

    normalize = lambda x: x / numpy.expand_dims(numpy.sum(x, axis=0), axis=0)
    contrasts, illuminations = [], []
    for s in range(S):
        # 求亮度
        illumination = [numpy.exp(-0.5 * mse(sequence[s][:, :], best_illumination) / (sigma * sigma))]
        illumination = numpy.prod(illumination, axis=0)
        illuminations.append(illumination)

    illuminations = np.array(illuminations)
    illuminations = illuminations / numpy.sum(illuminations, axis=0)
    illuminations = illuminations * alphas[2]

    weights = illuminations
    weights = normalize(weights)
    # 这里要把 sequence 还原回来
    sequence *= 65535
    # 根据最高分辨率的图像 high_res, 得到高度 layers 的高斯金字塔
    def build_gaussi_pyramid(high_res, layers, sigma_inner=1.2):
        this_flash = [high_res]
        for i in range(1, layers):
            blurred = cv2.GaussianBlur(this_flash[i - 1], (5, 5), sigma_inner)
            blurred = blurred[::2, ::2]
            this_flash.append(blurred)
        return this_flash

    # 根据已知的高斯金字塔, 从最底层开始上采样, 得到每一个尺度的 laplace 细节
    def build_laplace_pyramaid(gaussi_pyramid, layers):
        upsampled = gaussi_pyramid[layers - 1]
        pyramid = [upsampled]
        for i in range(layers - 1, 0, -1):
            size = (gaussi_pyramid[i - 1].shape[1], gaussi_pyramid[i - 1].shape[0])
            upsampled = cv2.resize(gaussi_pyramid[i], size)
            pyramid.append(gaussi_pyramid[i - 1] - upsampled)
        pyramid.reverse()  # 目前分辨率都是从高到低排列的
        return pyramid

    sequence_weights_pyramids = [build_gaussi_pyramid(weights[s], layers_num, 0.83) for s in range(S)]
    sequence_gaussi_pyramids = [build_gaussi_pyramid(sequence[s], layers_num, 0.83) for s in range(S)]
    sequence_laplace_pyramids = [build_laplace_pyramaid(sequence_gaussi_pyramids[s], layers_num) for s in range(S)]
    fused_laplace_pyramid = [numpy.sum([sequence_laplace_pyramids[k][n] * sequence_weights_pyramids[k][n] for k in range(S)], axis=0) for n in range(layers_num)]

    start = fused_laplace_pyramid[layers_num - 1]
    for i in range(layers_num - 2, -1, -1):
        upsampled = cv2.resize(start, (fused_laplace_pyramid[i].shape[1], fused_laplace_pyramid[i].shape[0]))
        start = fused_laplace_pyramid[i] + upsampled
    start = numpy.clip(start, 0, 65535).astype("uint16")
    return start


if __name__ == '__main__':
    base_path = r"C:\Users\15519\Desktop\stent\org"
    save_path = r"C:\Users\15519\Desktop\stent\org\VGRID+SUB+SR+WLWW+HDR+WLWW"
    makesure_dir(save_path)
    name_list = [osp.basename(path).split(".raw")[0] for path in glob(osp.join(base_path, "*.raw"))]
    name_list.sort()

    tqdm_bar = tqdm(enumerate(name_list), total=len(name_list))
    vg = VGrid()
    seg = Segment(ModelsFlag.SegVesselCatheterBinary_FP16)
    wlww = AutoWLWW()
    next_step = 50
    pause_count = next_step
    for ii, name in tqdm_bar:
        if ii == pause_count:           # 避免持续运行电脑过热/(ㄒoㄒ)/~~
            next_step_str = input("请输入下次执行的个数，避免主机过热\r\n")
            try:
                next_step = int(next_step_str)
            except ValueError:
                next_step = 50
            pause_count = ii + next_step
        iq_path = osp.join(base_path, name+".iq")
        raw_path = osp.join(base_path, name+".raw")
        if not osp.exists(raw_path) or not osp.exists(iq_path):
            print(f"have not paired raw or iq file, {name}")
            continue
        height, width = getSizeFormIQ(iq_path)

        imgs = np.fromfile(raw_path, dtype=np.uint16).reshape((-1, height, width))
        result_imgs = []
        frame_count = len(imgs)
        wlww.Initialization(height, width, True)
        diff_bright_list = getDiffBrightnessSequence(imgs, vgrid_handle=vg, tqdm_bar=tqdm_bar)
        velsse_mask_img = getForegroundMaskSequence(imgs, seg_handle=seg, tqdm_bar=tqdm_bar)
        for frame_id in range(frame_count):
            b_sequence = diff_bright_list[:, frame_id, :, :]
            mask_img = velsse_mask_img[frame_id]
            fused_results = exposure_fusion_withmask(b_sequence, mask_img, alphas=(1, 1.0, 5), best_illumination=0.42, sigma=0.1, layers_num=7)
            fused_results = wlww.Process(fused_results, WLWWModeOptType().CardiacFluCine)
            result_imgs.append(fused_results)
            tqdm_bar.set_postfix_str(f"fused img {frame_id+1}/{frame_count}")
        result_imgs = np.array(result_imgs, np.uint16)
        result_imgs.tofile(osp.join(save_path, name+".raw"))
        shutil.copy(iq_path, save_path)









