'''
用 pretrain 的 Image/Text Encoder 提取特征.

- 这个版本的 image_encoder 和 text_encoder 是用 RN50_PubmedBERT_200 训练的.
- 和之前的主要区别是, 去掉了 MoCo, Fusion Module(融合文本图像特征) 训练出来的; 所以可以看作 MedCLIP-base

P.S. Attention,
1. 得到的 image_feature, text_feature 的维度是 1024
2. text_encoder 直接输出的 feature_dim = 768, 是因为 pretrained [PubmedBERT](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext) 它是如何设置的
3. 但是我不小心设置 img_encoder: feature_dim = 1024; 所以需要 `text_projection` 映射 768 -> 1024
4. 最终得到的, image_feature: [batch_size, 1024]; text_feature: [batch_size, 1024]
'''

# %%
import torch
from transformers import AutoTokenizer

# %% [markdown]

DATASET = 'ROCO'

if DATASET == 'PMC':
    # Load Model pretrained on PMC
    PMC_DIR = '/remote-home/share/weixionglin/medclip/vlp/open_clip/src/extract_component/extracted/PMC'
    img_checkpoint_path = f"{PMC_DIR}/2023_02_01-12_24_48-model_RN50_PubmedBERT_200-p_amp/ImageEncoder.bin"
    text_checkpoint_path = f'{PMC_DIR}/2023_02_01-12_10_05-model_RN50_PubmedBERT_200-p_amp/TextEncoder.bin'
    text_proj_path = f'{PMC_DIR}/2023_02_01-12_10_05-model_RN50_PubmedBERT_200-p_amp/text_projection.bin'
elif DATASET == 'ROCO':
    # Load Model pretrained on ROCO
    ROCO_DIR = '/remote-home/share/weixionglin/medclip/vlp/open_clip/src/extract_component/extracted/ROCO'
    img_checkpoint_path = f"{ROCO_DIR}/2023_02_07-14_37_50-model_RN50_PubmedBERT_200-p_amp/ImageEncoder.bin"
    text_checkpoint_path = f'{ROCO_DIR}/2023_02_07-14_35_40-model_RN50_PubmedBERT_200-p_amp/TextEncoder.bin'
    text_proj_path = f'{ROCO_DIR}/2023_02_07-14_35_40-model_RN50_PubmedBERT_200-p_amp/text_projection.bin'


image_encoder = torch.load(img_checkpoint_path)
image_encoder.eval()

tokenizer = AutoTokenizer.from_pretrained('microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext')
text_encoder = torch.load(text_checkpoint_path)
text_encoder.eval()

text_projection = torch.load(text_proj_path)


# %%
# 超参
batch_size = 32
dataset_path = '/remote-home/share/medical/public/ROCO/test/radiology/processed_test.csv'
image_size = 224
device = 'cuda:1'

# %%
# Prepare Data
from data import get_csv_dataset

from transform import image_transform
preprocess_train = image_transform(
    image_size,
    is_train=True,
)
preprocess_val = image_transform(
    image_size,
    is_train=False,
)

dataloader = get_csv_dataset(dataset_path, batch_size, preprocess_train)

# %%
# Extract Image feature
batch_data = iter(dataloader).next()
images = batch_data['images'].to(device)  # [batch_size, 3, image_size, image_size]
image_encoder.to(device)
image_feature = image_encoder(images)['image_features']
print(f'image size: {images.shape}; image_feature: {image_feature.shape}')

# %%
# text_output = text_encoder(batch_data)

encoded_input = tokenizer(batch_data["bert_input"], padding='max_length', truncation=True, max_length=77, return_tensors='pt')
encoded_input['input_ids'] = encoded_input['input_ids'].to(device)  # [128, 77]

text_encoder.to(device)
text_output = text_encoder(encoded_input['input_ids'])

text_output.keys()

# %%
last_hidden_state = text_output['last_hidden_state'].to(device)
pooler_output = text_output['pooler_output'].to(device)
print(last_hidden_state.shape)
print(pooler_output.shape)

# %%
text_projection = text_projection.to(device)
pooler_output = pooler_output @ text_projection
print(f"text_feature: {pooler_output.shape}")

# %%
