# encoding=utf-8

"""CNFDForBatch
    - 本代码根据陆家懿代码进行效率优化得到
    - 本类对外提供常规Python处理Batch实现，以及使用了c++ cuda的Batch实现
    - 如果需要切换，需要设置Application.SystemConfig.USE_CUDA_CV选项
"""
import os
import cv2
import numpy as np
from PIL import Image
from seg_cuda.CudaMatrixTools import MatBuilder

from seg_system.vascular.service.VascularToolsForBatch.VascularEachProcessor.VascularForBatchBase import \
    VascularForBatchBase


class CNFDForBatch(VascularForBatchBase):
    """CNFD优化总结：
        - 本算法主要用于练手c++,cuda,python混合编程
            - opencv对于算法的优化，传递损失优化强于我们自己调试
        - 算法在CPU忙碌的时候应该使用cxx,将任务提交给显卡
            -  总测试 50 pic, 6 thread for pre-read
            -  cxx/cuda cost: 0.562080978999802, 16 pic for each
            -  cxx/cuda cost: 0.5521126250000634, 25 pic for each (-1.77% lower 16)
            -  cxx/cuda cost: 0.5422945119998985 , 50 pic for each (-1.77% lower 25, -3.52% lower 16)
                -  1111MiB(没有合适的一起计算的方法，暂时只能一次送如一个384 x 384 图像, 从消耗来看也送不了太多)
                -  第二个问题就是没有set，map之类的数据结构用于统计联通域
            -  python cv cost: 0.14673992799998814, 16 pic for each
            -  python cv cost: 0.13923102899934747, 25 pic for each (-5.12% lower 16)
            -  python cv cost: 0.13903011000002152, 50 pic for each (-0.1% lower 25, -5.25% lower 16)
    """

    def process_with_python(self, big_matrix: np.ndarray, usable_matrix: np.ndarray, each_matrix_shape: tuple,
                            stride: int, **kwargs):
        """尽量保证和VascularTools/CNFD一致
        """
        gray_img = cv2.cvtColor(big_matrix, cv2.COLOR_BGR2GRAY)
        usable_shape = usable_matrix.shape
        CNFD_list = []  # 没写并发处理，保存结果的时候不许并发
        output = np.zeros((big_matrix.shape[0], big_matrix.shape[1], 3), np.uint8)
        for i in range(usable_shape[0]):
            for j in range(usable_shape[1]):
                if usable_matrix[i][j] == 0:
                    continue

                h_start = i * (each_matrix_shape[0] + stride)
                w_start = j * (each_matrix_shape[1] + stride)

                each_mat = gray_img[h_start: h_start + each_matrix_shape[0],
                                    w_start: w_start + each_matrix_shape[1]]
                num_labels, labels, stats, centroids = cv2.connectedComponentsWithStats(each_mat, connectivity=8)
                CNFD_list.append(num_labels - 1)

                for k in range(1, num_labels):
                    mask = labels == k
                    output[h_start: h_start + each_matrix_shape[0],
                    w_start: w_start + each_matrix_shape[1], 0][mask] = np.random.randint(0, 255)
                    output[h_start: h_start + each_matrix_shape[0],
                    w_start: w_start + each_matrix_shape[1], 1][mask] = np.random.randint(0, 255)
                    output[h_start: h_start + each_matrix_shape[0],
                    w_start: w_start + each_matrix_shape[1], 2][mask] = np.random.randint(0, 255)

        output = MatBuilder.split(output, usable_matrix, each_matrix_shape, stride)
        return output, CNFD_list

    def process_with_cxx(self, big_matrix: np.ndarray, usable_matrix: np.ndarray, each_matrix_shape: tuple, stride: int,
                         **kwargs):
        output, CNFD_list = self.cxxPyVascular.processCNFD(
            big_matrix, usable_matrix.tolist(), [], each_matrix_shape[0], each_matrix_shape[1], stride
        )
        output = MatBuilder.split(output, usable_matrix, each_matrix_shape, stride)
        return output, CNFD_list

    def batch_save(self, file_name: list, save_path: str, process_output, **kwargs):
        output, CNFD_list = process_output

        for e_n, e_o in zip(file_name, output):
            each_path = os.path.join(save_path, e_n)
            cv2.imwrite(each_path, e_o)
            assert os.path.exists(save_path)

        return CNFD_list


