from tqdm import tqdm
import h5py
import cv2
import numpy as np
import torch
import pandas as pd
from PIL import  Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import os
import glob

from moco.config import Config


args =Config('im4MEC/configjson/precess_config.json').get_config()
df_sample_fileter = pd.read_csv('UBC-OCEAN/train_sampled_filter.csv') #实现测试



def slect_image_id(df,path="/home/fd_chen/kaggle/cancer/UBC-OCEAN/train_thumbnails"):
    filenames=[]
    for id in range(len(df)):
        filenames.append(os.path.join(path,f"{df['image_id'][id]}_thumbnail.png" ))
    return filenames


def crop_rect_from_png(png, idx,resize_to):
    
    x=resize_to*(idx%(png.shape[0]//resize_to))
    y=resize_to*(idx//(png.shape[0]//resize_to))
    return png[x:x+resize_to,y:y+resize_to],[x,y,resize_to,resize_to]

def tile_is_not_empty(tile):
    return False if np.any(tile == 0) else True

class BagOfTiles(Dataset):
    def __init__(self, png, resize_to=args.out_size):
        self.png = png
        shapes = self.png.shape[:2]
        self.length_tiles = (shapes[0]//resize_to)*(shapes[1]//resize_to)
        self.resize_to = resize_to  

        self.roi_transforms = transforms.Compose(
            [
                transforms.Resize(resize_to),
                transforms.ToTensor(),
            ]
        )

    def __len__(self):
        return self.length_tiles

    def __getitem__(self, idx):
        
        tile ,coords= crop_rect_from_png(self.png,idx,resize_to=self.resize_to)
        is_tile_kept =tile_is_not_empty(tile)
        # tile = tile.convert("RGB")#颜色转换
        width, height = tile.shape[:2]
        assert width == height, "input image is not a square"
        tile=Image.fromarray(tile)

        tile = self.roi_transforms(tile).unsqueeze(0)
        return tile, coords, is_tile_kept
    
def collate_features(batch):
    # Item 2 is the boolean value from tile filtering.
    img = torch.cat([item[0] for item in batch if item[2]], dim=0)
    coords = np.vstack([item[1] for item in batch if item[2]])
    return [img, coords]


def write_to_h5(file, asset_dict):
    for key, val in asset_dict.items():
        if key not in file:
            maxshape = (None,) + val.shape[1:]
            dset = file.create_dataset(
                key, shape=val.shape, maxshape=maxshape, dtype=val.dtype
            )
            dset[:] = val
        else:
            dset = file[key]
            dset.resize(len(dset) + val.shape[0], axis=0)
            dset[-val.shape[0] :] = val


def load_encoder(backbone, checkpoint_file, use_imagenet_weights, device):
    import torch.nn as nn
    import torchvision.models as models

    class DecapitatedResnet(nn.Module):
        def __init__(self, base_encoder, pretrained):
            super(DecapitatedResnet, self).__init__()
            self.encoder = base_encoder(pretrained=pretrained)

        def forward(self, x):
            # Same forward pass function as used in the torchvision 'stock' ResNet code
            # but with the final FC layer removed.
            x = self.encoder.conv1(x)
            x = self.encoder.bn1(x)
            x = self.encoder.relu(x)
            x = self.encoder.maxpool(x)

            x = self.encoder.layer1(x)
            x = self.encoder.layer2(x)
            x = self.encoder.layer3(x)
            x = self.encoder.layer4(x)

            x = self.encoder.avgpool(x)
            x = torch.flatten(x, 1)

            return x

    model = DecapitatedResnet(models.__dict__[backbone], use_imagenet_weights)

    if use_imagenet_weights:
        if checkpoint_file is not None:
            raise Exception(
                "Either provide a weights checkpoint or the --imagenet flag, not both."
            )
        print(f"Created encoder with Imagenet weights")
    else:
        checkpoint = torch.load(checkpoint_file, map_location="cpu")
        state_dict = checkpoint["state_dict"]
        for k in list(state_dict.keys()):
            # retain only encoder_q up to before the embedding layer # 没有使用分布式的代码进行训练，所以需要去掉module
            if k.startswith("encoder_q.") and not k.startswith("encoder_q.fc"):
                # remove prefix from key names
                state_dict[k[len("encoder_q.") :]] = state_dict[k]
            # delete renamed or unused k
            del state_dict[k]

        # Verify that the checkpoint did not contain data for the final FC layer
        msg = model.encoder.load_state_dict(state_dict, strict=False)
        assert set(msg.missing_keys) == {"fc.weight", "fc.bias"}
        print(f"Loaded checkpoint {checkpoint_file}")
        print(msg)

    model = model.to(device)
    if torch.cuda.device_count() > 1 and args.distributed:
        model = torch.nn.DataParallel(model)
    model.eval()

    return model

def extract_features(model, device, png, workers, out_size, batch_size):
    kwargs = ({"num_workers": workers, "pin_memory": True} if device.type == "cuda" else {})
    loader = DataLoader(
        dataset=BagOfTiles(png, resize_to=out_size),
        batch_size=batch_size,
        collate_fn=collate_features,
        **kwargs,
    )
    i=0
    with torch.no_grad():
        for batch, coords in loader:
            save_batch(batch,i)
            i+=1
            batch = batch.to(device, non_blocking=True)
            features = model(batch).cpu().numpy()
            yield features, coords


def save_batch(batch,i):
    from torchvision.utils import save_image,make_grid
    grid_img = make_grid(batch, nrow=8, padding=2, normalize=True)
    save_image(grid_img, f'test_{i}.png')

if __name__ == "__main__":

    # Open the slide for reading
    device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    model = load_encoder(
        backbone=args.backbone,
        checkpoint_file=args.checkpoint,
        use_imagenet_weights=args.imagenet,
        device=device,
    )
    path=[]
    filenames=slect_image_id(df_sample_fileter)
    for png_path in tqdm(filenames):
        try:
            id, _ = os.path.splitext(os.path.basename(png_path))
            slide_id = id.split("_")[0]
            wip_file_path = os.path.join(args.output_dir, slide_id + "_wip.h5")
            output_file_path = os.path.join(args.output_dir, slide_id + "_features.h5")
            os.makedirs(args.output_dir, exist_ok=True)
            if os.path.exists(output_file_path):
                os.remove(output_file_path)

            png=cv2.imread(png_path)
            generator = extract_features(
                model,
                device,
                png,
                args.workers,
                args.out_size,
                args.batch_size,
            )
            count_features = 0
            with h5py.File(wip_file_path, "w") as file:
                for i, (features, coords) in enumerate(generator):
                    count_features += features.shape[0]
                    write_to_h5(file, {"features": features, "coords": coords})
            os.rename(wip_file_path, output_file_path)
    
        except Exception as e:
            print(f"Failed to process {png_path}: {e}")
            path.append(png_path)
            np.savetxt(np.array(path),f"{id}_error.txt",fmt='%s')
            continue
        finally:
            pass
                    


