import torch
import torch.nn as nn
from transformers import ViTModel, ViTConfig


class Classifier(nn.Module):
    '''default image_size=3*224*224, we want 3*24*24, so need to change the position_embeddings'''
    def __init__(self, config=None, num_classes=10) -> None:
        super().__init__()
        self.vit_model_path = r'E:/Python/cache/huggingface/transformers/vit-base-patch16-224-in21k/'
        if config is None:
            self.vit = ViTModel.from_pretrained(self.vit_model_path)
            self.config = self.vit.config
            print(f'the default vit in {self.vit_model_path} is :\n{self.config}')
            pass
        else:
            self.config = config
            self.vit = ViTModel(config)
            print('create a vit model with no pre-trained parameters')
            pass
        self.classifier = nn.Linear(self.config.hidden_size, num_classes)
        pass

    def forward(self, x):
        x = self.vit(x)['pooler_output']
        x = self.classifier(x)
        return x

    def freeze_encoder(self):
        for p in self.vit.encoder.parameters():
            p.requires_grad = False
            pass
        pass

    def load(self, filepath):
        self.load_state_dict(torch.load(filepath))
        print('load model successfully')
        pass

    def save(self, filename):
        torch.save(self.state_dict(), filename)
        print(f"model save sussceefully at {filename}")
        pass

def getConfig():
    return ViTConfig(image_size=32, hidden_dropout_prob=0.1)