import threading
from queue import Queue
from concurrent.futures import ThreadPoolExecutor, as_completed
import torch
from torch.utils.data import DataLoader
import numpy as np
import time

def get_loader(cfg, dataset, type, sampler):
    return DataLoader(
        dataset=dataset,
        batch_size=cfg[type].BATCH_SIZE,
        shuffle=cfg.DATASET.SHUFFLE,
        pin_memory=cfg.DATASET.PIN_MEMORY,
        num_workers=cfg.DATASET.N_WORKERS,
        sampler=sampler,
        drop_last=cfg.DATASET.DROP_LAST
    )


def DBindex(cl_data_file):
    # For the definition Davis Bouldin index (DBindex), see https://en.wikipedia.org/wiki/Davies%E2%80%93Bouldin_index
    # DB index present the intra-class variation of the data
    # As baseline/baseline++ do not train few-shot classifier in training, this is an alternative metric to evaluate the validation set
    # Emperically, this only works for CUB dataset but not for miniImagenet dataset

    class_list = cl_data_file.keys()
    cl_num = len(class_list)
    cl_means = []
    stds = []
    DBs = []
    for cl in class_list:
        cl_means.append(np.mean(cl_data_file[cl], axis=0))
        stds.append(np.sqrt(np.mean(np.sum(np.square(cl_data_file[cl] - cl_means[-1]), axis=1))))

    mu_i = np.tile(np.expand_dims(np.array(cl_means), axis=0), (len(class_list), 1, 1))
    mu_j = np.transpose(mu_i, (1, 0, 2))
    mdists = np.sqrt(np.sum(np.square(mu_i - mu_j), axis=2))

    for i in range(cl_num):
        DBs.append(np.max([(stds[i] + stds[j]) / mdists[i, j] for j in range(cl_num) if j != i]))
    return np.mean(DBs)



def timer_decorator(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()
        execution_time = end_time - start_time
        print(f"the cost of {func.__name__} is: {execution_time} seconds")
        return result
    return wrapper



# def multi_read_image(read_image, support_query_set):
#     with ThreadPoolExecutor() as executor:
#         # 提交任务并获取Future对象
#         futures = [executor.submit(read_image, path) for path in support_query_set]
#
#         # 获取按照完成顺序排序的结果
#         results = [future.result() for future in as_completed(futures)]
#         results = sorted(results, key=lambda x: support_query_set.index(x[0]))
#         results = [x[1] for x in results]
#
#     return results

# calculate the euclidean distance
