from tqdm import tqdm
import h5py
import cv2
import numpy as np
import torch
import pandas as pd
from PIL import  Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import os
import glob
import torch.nn as nn
import torchvision.models as models


#实现测试 参数设置  直接实现测试 可以使用文件进行保存
df_sample_fileter = pd.read_csv('UBC-OCEAN/test.csv') 

#获取测试文件的df文件
def slect_image_id(df,path="UBC-OCEAN/test_thumbnails"):
    filenames=[]
    for id in range(len(df)):
        filenames.append(os.path.join(path,f"{df['image_id'][id]}_thumbnail.png" ))
    return filenames


def crop_rect_from_png(png, idx,resize_to):
    x=resize_to*(idx%(png.shape[0]//resize_to))
    y=resize_to*(idx//(png.shape[0]//resize_to))
    return png[x:x+resize_to,y:y+resize_to],[x,y,resize_to,resize_to]

def tile_is_not_empty(tile):
    return False if np.any(tile == 0) else True

class BagOfTiles(Dataset):
    def __init__(self, png, resize_to=196):
        self.png = png
        shapes = self.png.shape[:2]
        self.length_tiles = (shapes[0]//resize_to)*(shapes[1]//resize_to)
        self.resize_to = resize_to  

        self.roi_transforms = transforms.Compose(
            [
                transforms.Resize(resize_to),
                transforms.ToTensor(),
            ]
        )

    def __len__(self):
        return self.length_tiles

    def __getitem__(self, idx):
        tile ,coords= crop_rect_from_png(self.png,idx,resize_to=self.resize_to)
        is_tile_kept =tile_is_not_empty(tile)
        # tile = tile.convert("RGB")#颜色转换
        width, height = tile.shape[:2]
        assert width == height, "input image is not a square"
        tile=Image.fromarray(tile)
        tile = self.roi_transforms(tile).unsqueeze(0)
        return tile, coords, is_tile_kept
    
def collate_features(batch):
    img = torch.cat([item[0] for item in batch if item[2]], dim=0)
    coords = np.vstack([item[1] for item in batch if item[2]])
    return [img, coords]


def write_to_h5(file, asset_dict):
    for key, val in asset_dict.items():
        if key not in file:
            maxshape = (None,) + val.shape[1:]
            dset = file.create_dataset(
                key, shape=val.shape, maxshape=maxshape, dtype=val.dtype
            )
            dset[:] = val
        else:
            dset = file[key]
            dset.resize(len(dset) + val.shape[0], axis=0)
            dset[-val.shape[0] :] = val


def load_encoder(backbone, checkpoint_file, use_imagenet_weights, device):
    class DecapitatedResnet(nn.Module):
        def __init__(self, base_encoder, pretrained):
            super(DecapitatedResnet, self).__init__()
            self.encoder = base_encoder(pretrained=pretrained)

        def forward(self, x):
            x = self.encoder.conv1(x)
            x = self.encoder.bn1(x)
            x = self.encoder.relu(x)
            x = self.encoder.maxpool(x)

            x = self.encoder.layer1(x)
            x = self.encoder.layer2(x)
            x = self.encoder.layer3(x)
            x = self.encoder.layer4(x)

            x = self.encoder.avgpool(x)
            x = torch.flatten(x, 1)

            return x

    model = DecapitatedResnet(models.__dict__[backbone], use_imagenet_weights)
    checkpoint = torch.load(checkpoint_file, map_location="cpu")
    state_dict = checkpoint["state_dict"]
    for k in list(state_dict.keys()):
        if k.startswith("encoder_q.") and not k.startswith("encoder_q.fc"):
            state_dict[k[len("encoder_q.") :]] = state_dict[k]
        del state_dict[k]

    msg = model.encoder.load_state_dict(state_dict, strict=False)
    model = model.to(device)
    model.eval()
    
    return model

def extract_features(model, device, png, workers, out_size, batch_size):
    kwargs = ({"num_workers": workers, "pin_memory": True} if device.type == "cuda" else {})
    loader = DataLoader(
        dataset=BagOfTiles(png, resize_to=out_size),
        batch_size=batch_size,
        collate_fn=collate_features,
        **kwargs,
    )
    with torch.no_grad():
        for batch, coords in loader:
            batch = batch.to(device, non_blocking=True)
            features = model(batch).cpu().numpy()
            yield features, coords



device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = load_encoder(
    backbone="resnet50",
    checkpoint_file="exp/test08-part-data-01/checkpoint_4770.pth.tar",
    use_imagenet_weights=False,
    device=device,
)
filenames=slect_image_id(df_sample_fileter)
for png_path in tqdm(filenames):
    id, _ = os.path.splitext(os.path.basename(png_path))
    slide_id = id.split("_")[0]
    wip_file_path = os.path.join(args.output_dir, slide_id + "_wip.h5")
    output_file_path = os.path.join(args.output_dir, slide_id + "_features.h5")
    os.makedirs(args.output_dir, exist_ok=True)
    if os.path.exists(output_file_path):
        os.remove(output_file_path)

    png=cv2.imread(png_path)
    generator = extract_features(
        model,
        device,
        png,
        8,
        196,
        1024,
    )
    count_features = 0
    with h5py.File(wip_file_path, "w") as file:
        for i, (features, coords) in enumerate(generator):
            count_features += features.shape[0]
            write_to_h5(file, {"features": features, "coords": coords})
    os.rename(wip_file_path, output_file_path)



