import copy
import torch
from torch import nn
import torch.nn.functional as F

from models import load_pretrained
from models.xvlm import build_vision_encoder

class XVLM(nn.Module):  # for domain pretrain
    def __init__(self, config):
        super().__init__()
        self.vision_encoder, vision_width = build_vision_encoder(config, load_params=False)

    def load_pretrained(self, ckpt_rpath, config):
        state_dict = load_pretrained(ckpt_rpath, config, is_eval=True)

        msg = self.load_state_dict(state_dict, strict=False)
        print('load checkpoint from %s' % ckpt_rpath)
        print("missing_keys: ", [p for p in msg.missing_keys if 'vision_encoder' not in p])
        print("unexpected_keys: ", msg.unexpected_keys)

    def forward(self, image):
        image_embeds = self.vision_encoder(image)
        return image_embeds