import cv2
import os
from dataclasses import dataclass
from utils.superpoint import SuperPoint
import torch
import numpy as np
import h5py as h5
from tqdm import tqdm


"""
    建立h5特征库，描述符为superpoint
    --------input_path  影像所在根目录
        |
        |---basemap  基准图像所在根目录
            |--- 1.tif
            |--- 2.tif
            |--- ···
        |---test_image  查询影像所在目录，对于建库没有要求
            |--- ···
        |---database  h5特征库所在目录，没有将会新建
            |--- 1.h5
            |--- 2.h5
            |--- ···
"""


def get_corners(shape, crop_size, rate=0):  # 对于建库粗粒度搜索，重叠度可设为0
    overlap = int(crop_size * rate)

    result = []
    h, w, _ = shape
    y_coords = range(0, h, crop_size - overlap)
    x_coords = range(0, w, crop_size - overlap)
    for y in y_coords:
        for x in x_coords:
            # 确定子影像的结束位置
            x_end = x + crop_size
            y_end = y + crop_size
            # 裁剪子影像
            if(x + crop_size > w):
                x = w - crop_size
                x_end = w
            if(y + crop_size > h):
                y = h - crop_size
                y_end = h
            result.append([y, y_end, x, x_end])
    return result


def numpy_image_to_torch(image: np.ndarray) -> torch.Tensor:
    """Normalize the image tensor and reorder the dimensions."""
    if image.ndim == 3:
        image = image.transpose((2, 0, 1))  # HxWxC to CxHxW
    elif image.ndim == 2:
        image = image[None]  # add channel axis
    else:
        raise ValueError(f"Not an image: {image.shape}")
    return torch.tensor(image / 255.0, dtype=torch.float)


def zoom_img(img, zoom):
    h, w = img.shape[:2]
    nh, nw = int(h // zoom), int(w // zoom)
    zoomed_img = cv2.resize(img, (nw, nh), interpolation=cv2.INTER_LINEAR)
    return zoomed_img


def extract_features(img, num_pts=None):
    extractor = SuperPoint(max_num_keypoints=num_pts).eval().cuda()
    features = extractor.extract(img)
    return features


def build_database(img, device, zoom, file_name, group_name, num_pts=400):
    img_tensor = numpy_image_to_torch(img[..., ::-1]).to(device)
    img_features = extract_features(img_tensor, num_pts*zoom*zoom)
    with h5.File(file_name, 'a') as f:
        fg = f.create_group(group_name)
        for key, value in img_features.items():
            fg.create_dataset(key, data=np.squeeze(value.detach().cpu().numpy()))
        del key, value
    f.close()
    del img, img_tensor, img_features


def run(config):
    config.data_path = f'{config.input_path}/database'
    os.makedirs(config.data_path, exist_ok=True)
    img_list = os.listdir(f'{config.input_path}/basemap')
    for img_name in tqdm(img_list, total=len(img_list)):
        path = f'{config.input_path}/basemap/{img_name}'
        img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
        for zoom in config.zoom_list:
            zoomed_img = zoom_img(img, zoom)
            if (zoomed_img.shape[0] > 3000) & (zoomed_img.shape[1] > 3000):
                crop_list = get_corners(zoomed_img.shape, 2000)
                for item in crop_list:
                    y, y_end, x, x_end = item
                    croped_img = zoomed_img[y:y_end, x:x_end]
                    file_name = f'{config.data_path}/{zoom}.h5'
                    group_name = f'{img_name[:-4]}_{x*zoom}_{y*zoom}_{x_end*zoom}_{y_end*zoom}'
                    build_database(croped_img, config.device, zoom, file_name, group_name, config.num_ptsz*zoom*zoom)
                    del croped_img
            else:
                file_name = f'{config.data_path}/{zoom}.h5'
                group_name = f'{img_name[:-4]}_{0}_{0}_{img.shape[1]}_{img.shape[0]}'
                build_database(zoomed_img, config.device, zoom, file_name, group_name, config.num_pts*zoom*zoom)
            del zoomed_img
        del img


#  config文件中必需元素，可合并
@dataclass
class Configuration:
    zoom_list=[16, 8, 4, 2, 1]
    device: str = "cuda"
    input_path = r'D:\Temp'
    num_pts = 400


if __name__ == '__main__':
    config = Configuration()
    run(config)
