# encoding=utf-8

"""VascularBatchLoaderImpl
    和Pytorch一致，实现两个类，一个是DataSet,一个是DataLoader
        - DataSet和其他BatchFactor一致进行声明
        - DataLoader在那时没有进行抽象，主要实现文件预读取、拼接、基础信息提供等方案
"""
import os
import cv2
from threading import Thread
from seg_cuda.CudaMatrixTools import MatBuilder
from seg_common.BatchLoader import BatchLoaderFactory
from seg_common.annotation import CommonAnnotation


class VascularBatchLoaderFactory(BatchLoaderFactory):
    """DataSet类
        实现文件信息读取
    """
    def __init__(self):
        self.from_path = ""
        self.resize_shape = (384, 384)
        self.idx_to_token = []

    @CommonAnnotation.override()
    def load_data(self, path: str, **kwargs):
        resize_shape = kwargs.get('resize_shape', None)
        if resize_shape and isinstance(resize_shape, tuple):
            self.resize_shape = resize_shape

        self.from_path = path
        self.idx_to_token.clear()

        for each_file in os.listdir(path):
            self.idx_to_token.append(each_file)

    @CommonAnnotation.override()
    def prepare_data(self, index: int):
        name = os.path.join(self.from_path, self.idx_to_token[index])
        img = cv2.imread(name)
        img = cv2.resize(img, self.resize_shape)
        return img

    @CommonAnnotation.override()
    def get_data_lens(self):
        return len(self.idx_to_token)

    @CommonAnnotation.override()
    def get_index(self, tokens):
        pass

    @CommonAnnotation.override()
    def get_tokens(self, indices):
        if not isinstance(indices, (list, tuple)):
            return self.idx_to_token[indices]
        return [self.idx_to_token[index] for index in indices]


class VascularDataLoader:
    def __init__(self, dataset: BatchLoaderFactory, batch_size: int, stride: int = 0, num_worker: int = 1):
        self.stride = stride
        self.dataset = dataset

        self.read_idx_last = 0
        self.read_idx_next = batch_size if dataset.__len__() > batch_size else dataset.__len__()
        self.batch_size = batch_size
        self.batch_tmp_dict = {}  # 存储num_worker, img_name:numpy_array的映射

        self.num_worker = num_worker
        self.num_worker_list = []

        self.start_num_worker()

    def load_task(self, slice_idx: list, worker_id: int):
        thread_img_list = []
        thread_token_list = []
        for each_index in slice_idx:
            thread_img_list.append(self.dataset.__getitem__(each_index))
        thread_token_list.extend(self.dataset.get_tokens(slice_idx))
        self.batch_tmp_dict[worker_id] = (thread_token_list, thread_img_list)

    def start_num_worker(self):
        load_size = self.read_idx_next - self.read_idx_last
        if load_size == 0:
            return
        # 节约系统资源, 2是一个超参数，性能好的时候用
        if self.num_worker > load_size // 2:
            use_num_worker = load_size // 2 if (load_size // 2) != 0 else 1
        else:
            use_num_worker = self.num_worker

        each_task_size = (load_size + use_num_worker) // use_num_worker

        for num_worker_idx in range(use_num_worker):
            start = self.read_idx_last + num_worker_idx * each_task_size
            if start > self.dataset.__len__():
                break
            end = self.read_idx_last + (num_worker_idx + 1) * each_task_size
            if end - self.read_idx_last > load_size:
                end = self.read_idx_last + load_size
            range_list = list(range(start, end))
            t = Thread(target=self.load_task, name=str(num_worker_idx), args=(range_list, num_worker_idx))
            self.num_worker_list.append(t)
            t.start()

    def __iter__(self):
        return self

    def __next__(self):
        if self.read_idx_last >= self.dataset.__len__():
            raise StopIteration()

        # 等待线程读取成功
        for each_thread in self.num_worker_list:
            if each_thread.isAlive():
                each_thread.join()

        # 组合读取的信息
        file_name_list, numpy_list = [], []
        for each_key in self.batch_tmp_dict:
            o = self.batch_tmp_dict[each_key]
            file_name_list.extend(o[0])
            numpy_list.extend(o[1])

        # 计算相关属性
        big_matrix, usable_location_info = MatBuilder.build(numpy_list, self.stride, False)

        # 清空数据，读取下一轮
        self.num_worker_list.clear()
        self.batch_tmp_dict.clear()
        self.read_idx_last += self.batch_size
        self.read_idx_next += self.batch_size
        set_len = self.dataset.__len__()
        if self.read_idx_last >= set_len:
            self.read_idx_last = set_len
        if self.read_idx_next >= set_len:
            self.read_idx_next = set_len
        self.start_num_worker()

        # 返回
        return big_matrix, usable_location_info, file_name_list
