import torch.nn as nn
import torch.nn.functional as F

from transformers import ViTImageProcessor, ViTModel

from peft import get_peft_model, LoraConfig

__all__ = ['ImageEncoder']

# vit base model from https://huggingface.co/google/vit-base-patch16-224
# vit large model from https://huggingface.co/google/vit-large-patch16-224

class ImageEncoder(nn.Module):
    def __init__(self, pretrained_dir, image_encoder='base'):
        """
        image_encoder: base / large
        """
        super(ImageEncoder, self).__init__()

        assert image_encoder in ['vit_base', 'vit_large']

        tokenizer = ViTImageProcessor
        model = ViTModel

        lora_config = LoraConfig(
        r=16,
        lora_alpha=16,
        target_modules=["query", "key", "value"],
        lora_dropout=0.1,
        bias="none"
    )
        # directory is fine
        if image_encoder in ['vit_base']:
            self.tokenizer = tokenizer.from_pretrained(pretrained_dir+'/vit-base-patch16-224/')
            self.model = model.from_pretrained(pretrained_dir+'/vit-base-patch16-224/')

        else:
            self.tokenizer = tokenizer.from_pretrained(pretrained_dir+'/vit_large_224/')
            self.model = model.from_pretrained(pretrained_dir+'/vit_large_224/')

    def get_tokenizer(self):
        return self.tokenizer

    def forward(self, pixel_values):
        """
        pixel_values:
        """
        last_hidden_states = self.model(pixel_values=pixel_values).last_hidden_state

        return last_hidden_states


if __name__ == "__main__":

    from huggingface_hub import snapshot_download

    snapshot_download(
    
        repo_id="google/vit-base-patch16-224-in21k",
    
        local_dir="/data/jzw/mmfl/pretrained/vit-base-patch16-224-in21k",
    
        proxies={"https": "http://localhost:7890"},
    
        max_workers=8
    
    )
