import open_clip
from dataclasses import dataclass, field
from typing import Tuple, Type
from torch import nn
import torchvision
import torch

@dataclass
class OpenCLIPNetworkConfig:
    _target: Type = field(default_factory=lambda: OpenCLIPNetwork)
    clip_model_type: str = "ViT-B-16"
    clip_model_pretrained: str = "/mnt/c/Users/cyt/Downloads/open_clip_pytorch_model.bin" #laion2b_s34b_b88k #./ckpts/open_clip_pytorch_model.bin
    clip_n_dims: int = 512
    negatives: Tuple[str] = ("object", "things", "stuff", "texture")
    positives: Tuple[str] = ("",)

class OpenCLIPNetwork(nn.Module):
    def __init__(self, config: OpenCLIPNetworkConfig):
        super().__init__()
        self.config = config
        self.process = torchvision.transforms.Compose(
            [
                torchvision.transforms.Resize((224, 224)),
                torchvision.transforms.Normalize(
                    mean=[0.48145466, 0.4578275, 0.40821073],
                    std=[0.26862954, 0.26130258, 0.27577711],
                ),
            ]
        )
        self.device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
        self.model = self.init_clip_model()
        self.tokenizer = open_clip.get_tokenizer(self.config.clip_model_type)
        self.clip_n_dims = self.config.clip_n_dims
        self.positives = self.config.positives    
        self.negatives = self.config.negatives

        # init the positive and negative embeddings
        self.pos_embeds,self.neg_embeds = self.init_pos_neg_embeds()
        
    @property
    def name(self) -> str:
        return "openclip_{}_{}".format(self.config.clip_model_type, self.config.clip_model_pretrained)
    
    @property
    def embedding_dim(self) -> int:
        return self.config.clip_n_dims
    
    @torch.no_grad()
    def set_positives(self, text_list):
        self.positives = text_list
        tokens = torch.cat([self.tokenizer(phrase) for phrase in self.positives]).to(self.device)
        self.pos_embeds = self.model.encode_text(tokens)
        self.pos_embeds /= self.pos_embeds.norm(dim=-1, keepdim=True)

    @torch.no_grad()
    def get_relevancy(self, embed: torch.Tensor, positive_id: int) -> torch.Tensor:
        phrases_embeds = torch.cat([self.pos_embeds, self.neg_embeds], dim=0)
        p = phrases_embeds.to(embed.dtype)  # phrases x 512
        # 矩阵乘法 计算相似度矩阵
        output = torch.mm(embed, p.T)  # rays x phrases
        # 提取正样本得分
        positive_vals = output[..., positive_id : positive_id + 1]  # rays x 1
        # 提取负样本得分
        negative_vals = output[..., len(self.positives) :]  # rays x N_phrase
        # 构造对比对
        repeated_pos = positive_vals.repeat(1, len(self.negatives))  # rays x N_phrase

        sims = torch.stack((repeated_pos, negative_vals), dim=-1)  # rays x N-phrase x 2
        # 计算对比概率
        softmax = torch.softmax(10 * sims, dim=-1)  # rays x n-phrase x 2
        # 寻找最干扰负样本
        best_id = softmax[..., 0].argmin(dim=1)  # rays x 2
        # 收集最终对比概率
        return torch.gather(softmax, 1, best_id[..., None, None].expand(best_id.shape[0], len(self.negatives), 2))[:, 0, :]

    def encode_image(self, input):
        processed_input = self.process(input).half()
        return self.model.encode_image(processed_input)
    
    def encode_text(self, text_list, device):
        text = self.tokenizer(text_list).to(device)
        return self.model.encode_text(text)

    def init_clip_model(self):
        model, _,_ = open_clip.create_model_and_transforms(
            self.config.clip_model_type,
            pretrained=self.config.clip_model_pretrained,
            precision="fp32" # on cpu
            #precision="fp16", # on gpu
        )
        model.eval().to(self.device)
        return model
    
    @torch.no_grad()
    def init_pos_neg_embeds(self):
        tokens = torch.cat([self.tokenizer(pharse) for pharse in self.positives]).to(self.device)
        pos_embeds = self.model.encode_text(tokens)
        tokens = torch.cat([self.tokenizer(pharse) for pharse in self.negatives]).to(self.device)
        neg_embeds = self.model.encode_text(tokens)
        pos_embeds /= pos_embeds.norm(dim=-1, keepdim=True)
        neg_embeds /= neg_embeds.norm(dim=-1, keepdim=True)
        return pos_embeds, neg_embeds

    def get_max_across(self, sem_map):
        n_phrases = len(self.positives)
        n_phrases_sims = [None for _ in range(n_phrases)]
        
        h, w, _ = sem_map.shape
        clip_output = sem_map.flatten(0, 1)

        n_levels_sims = [None]
        for j in range(n_phrases):
            # probs [h*w,2] 第二个维度[正向概率，负向概率]
            probs = self.get_relevancy(clip_output[..., :], j)
            pos_prob = probs[..., 0:1] # 使用的是0:1,所以pos_prob的shape是[h*w,1]
            n_phrases_sims[j] = pos_prob
        n_levels_sims[0] = torch.stack(n_phrases_sims) # [n_phrases, h*w, 1]
        
        relev_map = torch.stack(n_levels_sims).view(1,n_phrases, h, w)
        return relev_map   