from clip.simple_tokenizer import SimpleTokenizer as _Tokenizer
from clip import clip
from utils import weights_init_classifier,weights_init_kaiming

from torch import nn
import torch
from timm.models.layers import trunc_normal_


def load_clip_to_cpu(backbone_name, h_resolution, w_resolution, vision_stride_size):
    url = clip._MODELS[backbone_name]
    model_path = clip._download(url)
    try:
        # loading JIT archive
        model = torch.jit.load(model_path, map_location="cpu").eval()
        state_dict = None

    except RuntimeError:
        state_dict = torch.load(model_path, map_location="cpu")

    model = clip.build_model(state_dict or model.state_dict(), h_resolution, w_resolution, vision_stride_size)

    return model

class TextEncoder(nn.Module):
    def __init__(self,clip_model):
        super().__init__()
        self.transformer = clip_model.transformer
        self.positional_embedding = clip_model.positional_embedding
        self.ln_final = clip_model.ln_final
        self.text_projection = clip_model.text_projection
        self.dtype = clip_model.dtype

    def forward(self,prompts,tokenized_prompts):
        x = prompts + self.positional_embedding.type(self.dtype)
        x = x.permute(1,0,2) # NLD -> LND
        x = self.transformer(x)
        x = x.permute(1,0,2)
        x = self.ln_final(x).type(self.dtype)

        x= x[torch.arange(x.shape[0]),tokenized_prompts.argmax(dim=-1)]@ self.text_projection 
        return x

class PromptLearner(nn.Module):
    def __init__(self,num_class,dtype,token_embedding):
        super().__init__()

        ctx_init = 'A photo of a X X X X person'
        ctx_dim = 512
        n_ctx = 4

        tokenized_prompts = clip.tokenize(ctx_init).cpu()
        with torch.no_grad():
            embedding = token_embedding(tokenized_prompts).type(dtype)
        
        self.tokenized_prompts = tokenized_prompts

        n_cls_ctx = 4
        cls_vectors = torch.empty(num_class,n_cls_ctx,ctx_dim,dtype=dtype)
        nn.init.normal_(cls_vectors,std=0.02)
        self.cls_ctx = nn.Parameter(cls_vectors)


        self.register_buffer('token_prefix',embedding[:,:n_ctx+1,:])
        self.register_buffer('token_suffix',embedding[:,n_ctx+1+n_cls_ctx:,:])
        self.num_class = num_class
        self.n_cls_ctx = n_cls_ctx

    def forward(self,label):
        cls_ctx = self.cls_ctx[label]
        b = label.shape[0]
        prefix = self.token_prefix.expand(b,-1,-1)
        suffix = self.token_suffix.expand(b,-1,-1)
        
        prompts = torch.cat([
            prefix,
            cls_ctx,
            suffix,
        ],
        dim=1
        )
        return prompts
    
class ImageEncoder(nn.Module):
    def __init__(self, 
                 num_class,
                 camera_num=0,
                 view_num=0,
                 model_name='ViT-B-16',
                 SIE_CAMERA=False,
                 SIE_VIEW=False,
                 ) -> None:
        super().__init__()

        assert model_name in ['ViT-B-16','RN50']
        self.model_name = model_name
        if model_name == 'ViT-B-16':
            self.in_planes = 768
            self.in_planes_proj = 512
        elif model_name == 'RN50':
            self.in_planes = 2048
            self.in_planes_proj = 1024
        
        self.num_classes =  num_class
        self.camera_num  =  camera_num
        self.view_num    =  view_num
        self.sie_coe     =  3.0


        self.classifier = nn.Linear(self.in_planes,self.num_classes,bias=False)
        self.classifier.apply(weights_init_classifier)
        # ⚠️
        self.classifier_proj = nn.Linear(self.in_planes_proj,self.num_classes,bias=False)
        self.classifier_proj.apply(weights_init_classifier)

        self.bottleneck = nn.BatchNorm1d(self.in_planes)
        self.bottleneck.bias.requires_grad_(False)
        self.bottleneck.apply(weights_init_kaiming)
        self.bottleneck_proj = nn.BatchNorm1d(self.in_planes_proj)
        self.bottleneck_proj.bias.requires_grad_(False)
        self.bottleneck_proj.apply(weights_init_kaiming)

        self.vision_stride_size = 16
        self.h_resolution = 256//self.vision_stride_size 
        self.w_resolution = 128//self.vision_stride_size 
        

        clip_model = load_clip_to_cpu(
            model_name,
            self.h_resolution,
            self.w_resolution,
            self.vision_stride_size
        ).to('cpu')

        self.image_encoder = clip_model.visual

        if SIE_CAMERA and SIE_VIEW:
            self.cv_embed = nn.Parameter(torch.zeros(camera_num*view_num,self.in_planes))
            trunc_normal_(self.cv_embed,std=.02)
            print('camera number is : {}'.format(camera_num))
        elif SIE_CAMERA:
            self.cv_embed = nn.Parameter(torch.zeros(camera_num,self.in_planes))
            trunc_normal_(self.cv_embed,std=.02)
            print('camera number is : {}'.format(camera_num))
        elif SIE_VIEW:
            self.cv_embed = nn.Parameter(torch.zeros(view_num,self.in_planes))
            trunc_normal_(self.cv_embed,std=.02)
            print('camera number is : {}'.format(camera_num))

        self.prompt_learner = PromptLearner(
            num_class,
            clip_model.dtype,
            clip_model.token_embedding
        )
        self.text_encoder = TextEncoder(clip_model)
    
    def forward(self,x=None,label=None,get_image=False,get_text=False,cam_label=None,view_label=None):
        if get_text == True:
            prompts = self.prompt_learner(label)
            text_features = self.text_encoder(prompts,self.prompt_learner.tokenized_prompts)
            return text_features
        
        if get_image == True:
            image_features_last,image_features,image_features_proj = self.image_encoder(x)
            if self.model_name == 'RN50':
                return image_features_proj[0]
            elif self.model_name == 'ViT-B-16':
                return image_features_proj[:,0]
            
        if self.model_name == 'RN50':
            image_features_last, image_features, image_features_proj = self.image_encoder(x) 
            img_feature_last = nn.functional.avg_pool2d(image_features_last, image_features_last.shape[2:4]).view(x.shape[0], -1) 
            img_feature = nn.functional.avg_pool2d(image_features, image_features.shape[2:4]).view(x.shape[0], -1) 
            img_feature_proj = image_features_proj[0]

        elif self.model_name == 'ViT-B-16':
            if cam_label != None and view_label!=None:
                cv_embed = self.sie_coe * self.cv_embed[cam_label * self.view_num + view_label]
            elif cam_label != None:
                cv_embed = self.sie_coe * self.cv_embed[cam_label]
            elif view_label!=None:
                cv_embed = self.sie_coe * self.cv_embed[view_label]
            else:
                cv_embed = None
            image_features_last, image_features, image_features_proj = self.image_encoder(x, cv_embed) 
            img_feature_last = image_features_last[:,0]
            img_feature = image_features[:,0]
            img_feature_proj = image_features_proj[:,0]

        feat = self.bottleneck(img_feature) 
        feat_proj = self.bottleneck_proj(img_feature_proj)

        if self.training:
            cls_score = self.classifier(feat)
            cls_score_proj = self.classifier_proj(feat_proj)
            return [cls_score, cls_score_proj], [img_feature_last, img_feature, img_feature_proj], img_feature_proj

        else:
            if self.neck_feat == 'after':
                # print("Test with feature after BN")
                return torch.cat([feat, feat_proj], dim=1)
            else:
                return torch.cat([img_feature, img_feature_proj], dim=1)

