# coding=UTF-8
# %%
# 用 pretrain 的 Image/Text Encoder 提取特征
import torch
from transformers import AutoTokenizer

# %%
# Load Pretrained Model
img_checkpoint_path = "/remote-home/share/weixionglin/medclip/vlp/open_clip/src/extract_component/extracted/CLIP-RN/2023_02_10-01_19_27-model_CLIP-RN50x16-p_amp/ImageEncoder.bin"
image_encoder = torch.load(img_checkpoint_path)
image_encoder.eval()

tokenizer = AutoTokenizer.from_pretrained('microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext')
text_checkpoint_path = '/remote-home/share/weixionglin/medclip/vlp/open_clip/src/extract_component/extracted/CLIP-RN/2023_02_10-01_12_27-model_CLIP-RN50x16-p_amp/TextEncoder.bin'
text_encoder = torch.load(text_checkpoint_path)
text_encoder.eval()

# %%
# 超参
batch_size = 8
dataset_path = '/remote-home/share/medical/public/ROCO/test/radiology/processed_test.csv'
image_size = 384
device = 'cuda:0'

# %%
# Prepare Data
from data import get_csv_dataset

from transform import image_transform
preprocess_train = image_transform(
    image_size,
    is_train=True,
)
preprocess_val = image_transform(
    image_size,
    is_train=False,
)

dataloader = get_csv_dataset(dataset_path, batch_size, preprocess_train)

# %%
# Extract Image feature
batch_data = iter(dataloader).next()
images = batch_data['images'].to(device)  # [batch_size, 3, image_size, image_size]
image_encoder.to(device)
image_feature = image_encoder(images)['image_features']
print(f'image size: {images.shape}; feature size: {image_feature.shape}')

# %%
encoded_input = tokenizer(batch_data["bert_input"], padding='max_length', truncation=True, max_length=77, return_tensors='pt')
encoded_input['input_ids'] = encoded_input['input_ids'].to(device)  # [128, 77]

text_encoder.to(device)
text_output = text_encoder(encoded_input['input_ids'])

text_output.keys()

# %%
last_hidden_state = text_output['last_hidden_state'].to(device)
pooler_output = text_output['pooler_output'].to(device)
print(last_hidden_state.shape)
print(pooler_output.shape)

# %%
