import os
os.environ["OPENCV_IO_MAX_IMAGE_PIXELS"] = pow(2,40).__str__()
import cv2
import numpy as np
import pandas as pd
from PIL import  Image
from tqdm import tqdm


import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import transforms
import torchvision.models as models
from torch.utils.data import DataLoader, Dataset


class Attn_Net_Gated(nn.Module):
    
    def __init__(self, L=1024, D=256, dropout=False, p_dropout_atn=0.25, n_classes=1):
        super(Attn_Net_Gated, self).__init__()

        att_a = [nn.Linear(L, D), nn.Tanh()]

        att_b = [nn.Linear(L, D), nn.Sigmoid()]

        if dropout:
            att_a.append(nn.Dropout(p_dropout_atn))
            att_b.append(nn.Dropout(p_dropout_atn))

        self.attention_a = nn.Sequential(*att_a)
        self.attention_b = nn.Sequential(*att_b)
        self.attention_c = nn.Linear(D, n_classes)

    def forward(self, x):
        a = self.attention_a(x)
        b = self.attention_b(x)
        A = a.mul(b)
        A = self.attention_c(A)  # N x n_classes
        return A, x


class AttentionNet(nn.Module):
    def __init__(
        self,
        model_size="small",
        input_feature_size=1024,
        dropout=True,
        p_dropout_fc=0.25,
        p_dropout_atn=0.25,
        n_classes=2,
    ):
        super(AttentionNet, self).__init__()
        size_dict = {
            "micro": [input_feature_size, 384, 128],
            "tiny": [input_feature_size, 384, 256],
            "small": [input_feature_size, 512, 256],
            "big": [input_feature_size, 512, 384],
        }
        size = size_dict[model_size]

        # From experiments, adding a first FC layer to reduce dimension helps even when size[0]==size[1].
        fc = [nn.Linear(size[0], size[1]), nn.ReLU()]

        if dropout:
            fc.append(nn.Dropout(p_dropout_fc))

        fc.append(
            # The attention network has a head for each class.
            Attn_Net_Gated(
                L=size[1],
                D=size[2],
                dropout=dropout,
                p_dropout_atn=p_dropout_atn,
                n_classes=n_classes,
            )
        )
        self.attention_net = nn.Sequential(*fc)

        # We use an independent linear layer to predict each class.
        self.classifiers = nn.ModuleList(
            [nn.Linear(size[1], 1) for i in range(n_classes)]
        )

        self.n_classes = n_classes

        for m in self.modules():
            if isinstance(m, nn.Linear):
                nn.init.xavier_normal_(m.weight)
                m.bias.data.zero_()

    def forward(self, h):
        A_, h = self.attention_net(h)  # NxK
        A_raw = torch.transpose(A_, 1, 0)  # KxN
        A = F.softmax(A_raw, dim=1)  # softmax over N
        M = torch.mm(A, h)  # recompute slide embeddings

        # We have one attention score per class.
        logits = torch.empty(1, self.n_classes).float().to(h.device)
        for c in range(self.n_classes):
            logits[0, c] = self.classifiers[c](M[c])
        Y_hat = torch.topk(logits, 1, dim=1)[1]
        Y_prob = F.softmax(logits, dim=1)

        # Note that we removed the clustering objective.
        return logits, Y_prob, Y_hat, A_raw, M

def slect_image_id(df,path):
    filenames=[]
    for id in range(len(df)):
        filenames.append(os.path.join(path,f"{df['image_id'][id]}_thumbnail.png" ))
    return filenames

def crop_rect_from_png(png, idx,resize_to):
    x=resize_to*(idx%(png.shape[0]//resize_to))
    y=resize_to*(idx//(png.shape[0]//resize_to))
    return png[x:x+resize_to,y:y+resize_to],[x,y,resize_to,resize_to]

# def tile_is_not_empty(tile):
#     return False if np.any(tile == 0) else True

def tile_is_not_empty(tile):
    # Count the number of pixels that are black
    black_pixels = np.sum(tile == 0)
    
    # Calculate the proportion of pixels that are black
    total_pixels = np.prod(tile.shape)
    black_ratio = black_pixels / total_pixels
    
    # If more than 20% of the pixels are black, return False
    return black_ratio <= 0.5



class BagOfTiles(Dataset):
    def __init__(self, png, resize_to=196):
        self.png = png
        shapes = self.png.shape[:2]
        self.length_tiles = (shapes[0]//resize_to)*(shapes[1]//resize_to)
        self.resize_to = resize_to  

        self.roi_transforms = transforms.Compose(
            [
                transforms.Resize(resize_to),
                transforms.ToTensor(),
            ]
        )

    def __len__(self):
        return self.length_tiles

    def __getitem__(self, idx):
        tile ,coords= crop_rect_from_png(self.png,idx,resize_to=self.resize_to)
        is_tile_kept =tile_is_not_empty(tile)
        width, height = tile.shape[:2]
        assert width == height, "input image is not a square"
        tile=Image.fromarray(tile)
        tile = self.roi_transforms(tile).unsqueeze(0)
        return tile, coords, is_tile_kept
    
    
def collate_features(batch):
    img = torch.cat([item[0] for item in batch if item[2]], dim=0)
    coords = np.vstack([item[1] for item in batch if item[2]])
    return [img, coords]


def load_encoder(backbone, checkpoint_file, use_imagenet_weights, device):
    class DecapitatedResnet(nn.Module):
        def __init__(self, base_encoder, pretrained):
            super(DecapitatedResnet, self).__init__()
            self.encoder = base_encoder(pretrained=pretrained)

        def forward(self, x):
            x = self.encoder.conv1(x)
            x = self.encoder.bn1(x)
            x = self.encoder.relu(x)
            x = self.encoder.maxpool(x)

            x = self.encoder.layer1(x)
            x = self.encoder.layer2(x)
            x = self.encoder.layer3(x)
            x = self.encoder.layer4(x)

            x = self.encoder.avgpool(x)
            x = torch.flatten(x, 1)

            return x

    model = DecapitatedResnet(models.__dict__[backbone], use_imagenet_weights)
    checkpoint = torch.load(checkpoint_file, map_location="cpu")
    state_dict = checkpoint["state_dict"]
    for k in list(state_dict.keys()):
        if k.startswith("encoder_q.") and not k.startswith("encoder_q.fc"):
            state_dict[k[len("encoder_q.") :]] = state_dict[k]
        del state_dict[k]

    msg = model.encoder.load_state_dict(state_dict, strict=False)
    model = model.to(device)
    model.eval()
    
    return model

def extract_features(model, device, png, workers, out_size, batch_size):
    kwargs = ({"num_workers": workers, "pin_memory": True} if device.type == "cuda" else {})
    loader = DataLoader(
        dataset=BagOfTiles(png, resize_to=out_size),
        batch_size=batch_size,
        collate_fn=collate_features,
        **kwargs,
    )
    with torch.no_grad():
        for batch, coords in loader:
            batch = batch.to(device, non_blocking=True)
            features = model(batch).cpu().numpy()
            yield features, coords
            
''' 参数设置'''      
dry_run = False

''' 路径设置'''
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if dry_run:
    df_test = pd.read_csv('/kaggle/input/UBC-OCEAN/test.csv') 
    moco_checkpoint_file_path=r'/kaggle/input/moco-depth-196/checkpoint_4770.pth.tar'
    attention_model_path=r'/kaggle/input/attention-70-checkpoint/attention_70_checkpoint.pt'
    testdatasetpath=r'/kaggle/input/UBC-OCEAN/test_thumbnails'

else:
    df_test = pd.read_csv('/home/fd_chen/kaggle/cancer/UBC-OCEAN/test.csv')
    moco_checkpoint_file_path=r'/home/fd_chen/kaggle/cancer/exp/test08-part-data-01/checkpoint_4770.pth.tar'
    attention_model_path=r'/home/fd_chen/kaggle/cancer/runs/small_random_hp0_e883d17_20231115-1056/70_checkpoint.pt'
    testdatasetpath=r'/home/fd_chen/kaggle/cancer/UBC-OCEAN/test_thumbnails'
    

filenames=slect_image_id(df_test,testdatasetpath)
filenames

slide_ids = df_test["image_id"] #遍历所有病人的图像名称
slide_ids

''' 特征提取模型''' 
model = load_encoder(
    backbone="resnet50",
    checkpoint_file=moco_checkpoint_file_path,
    use_imagenet_weights=False,
    device=device,
)


'''构建分类模型'''
attention_model = AttentionNet(
    model_size='small',
    input_feature_size=2048,
    dropout=True,
    p_dropout_fc=0.5,
    p_dropout_atn=0.25,
    n_classes=5,
)

''' 加载分类模型权重'''
attention_model.load_state_dict(torch.load(attention_model_path))
attention_model.to(device)
attention_model.eval()


''' 类别实现对应'''
labels_to_class={
    0:'CC',
    1:'EC',
    2:'HGSC',
    3:'LGSC',
    4:'MC'
}


''' 结果预测'''

with torch.no_grad(): 
    for png_path in tqdm(filenames):
        id, _ = os.path.splitext(os.path.basename(png_path))
        image_id = id.split("_")[0]
        png=cv2.imread(png_path)
        generator = extract_features(
            model,
            device,
            png,
            8,
            256,
            1024,
        )
        for i, (features, coords) in enumerate(generator):
            features = torch.from_numpy(features)
            features = features.to(device)
            _, _, Y_hat, _, _ = attention_model(features)
            df_test.loc[df_test['image_id'] == int(image_id), 'label'] = Y_hat.cpu().numpy() #对应的位置设置为预测的标签
df_test['label'] = df_test['label'].map(labels_to_class)
submission_df = df_test[["image_id", "label"]]
submission_df.to_csv("submission.csv", index=False)





