from PIL import Image
import requests
import torch
import pickle
from modelscope import CLIPProcessor, CLIPModel
from torch import nn
# 加载预训练的 CLIP 模型和处理器

text_inputs_processed_chinese = [
    '一位女性',
    '一个拿着手提包的人',
    '一个背着单肩包的人',
    '一个背着双肩包的人',
    '一个穿着短袖衬衫的人',
    '一个穿着长袖衬衫的人',
    '一个穿着长裤的人',
    '一个穿着短裤的人',
    '一个拖着行李的人',
    '一个穿着白色上衣的人',
    '一个穿着黑色上衣的人',
    '一个穿着蓝色上衣的人',
    '一个穿着红色上衣的人',
    '一个穿着绿色上衣的人',
    '一个穿着黄色上衣的人',
    '一个穿着紫色上衣的人',
    '一个穿着粉色上衣的人',
    '一个穿着灰色上衣的人',
    '一个穿着白色下装的人',
    '一个穿着黑色下装的人',
    '一个穿着蓝色下装的人',
    '一个穿着红色下装的人',
    '一个穿着绿色下装的人',
    '一个穿着黄色下装的人',
    '一个穿着紫色下装的人',
    '一个穿着粉色下装的人',
    '一个穿着灰色下装的人',
    '一个拿着白色袋子的人',
    '一个拿着黑色袋子的人',
    '一个拿着蓝色袋子的人',
    '一个拿着红色袋子的人',
    '一个拿着绿色袋子的人',
    '一个拿着黄色袋子的人',
    '一个拿着紫色袋子的人',
    '一个拿着粉色袋子的人',
    '一个拿着灰色袋子的人',
    '一个高个子的人',
    '一个中等身高的人',
    '一个矮个子的人',
    '一个儿童',
    '一个成年人',
    '一个超重的人',
    '一个身材普通的人',
    '一个身材瘦削的人',
    '一个留着长发的人',
    '一个留着短发的人',
    '一个秃头的人',
    '一个戴着帽子的人',
    '一个穿着圆领上衣的人',
    '一个穿着有领上衣的人',
    '一个穿着长裙的人',
    '一个穿着短裙的人',
    '一个戴着太阳镜的人',
    '一个戴着口罩的人',
    '一个围着围巾的人',
    '一个拿着大袋子的人',
    '一个拿着小袋子的人'
]


text_inputs_processed_english =[
    'a female person',
    'a person carrying a handbag',
    'a person slinging a shoulder bag',
    'a person wearing a backpack',
    'a person wearing a short - sleeved shirt',
    'a person wearing a long - sleeved shirt',
    'a person wearing long pants',
    'a person wearing shorts',
    'a person pulling luggage',
    'a person in a white top',
    'a person in a black top',
    'a person in a blue top',
    'a person in a red top',
    'a person in a green top',
    'a person in a yellow top',
    'a person in a purple top',
    'a person in a pink top',
    'a person in a gray top',
    'a person wearing white bottoms',
    'a person wearing black bottoms',
    'a person wearing blue bottoms',
    'a person wearing red bottoms',
    'a person wearing green bottoms',
    'a person wearing yellow bottoms',
    'a person wearing purple bottoms',
    'a person wearing pink bottoms',
    'a person wearing gray bottoms',
    'a person holding a white bag',
    'a person holding a black bag',
    'a person holding a blue bag',
    'a person holding a red bag',
    'a person holding a green bag',
    'a person holding a yellow bag',
    'a person holding a purple bag',
    'a person holding a pink bag',
    'a person holding a gray bag',
    'a tall person',
    'a person of medium height',
    'a short person',
    'a child',
    'an adult',
    'an overweight person',
    'a person of average build',
    'a thin person',
    'a person with long hair',
    'a person with short hair',
    'a bald person',
    'a person wearing a hat',
    'a person wearing a round - neck top',
    'a person wearing a collared top',
    'a person wearing a long skirt',
    'a person wearing a short skirt',
    'a person wearing sunglasses',
    'a person wearing a mask',
    'a person wearing a scarf',
    'a person carrying a large bag',
    'a person carrying a small bag'
]





class HumanAttribute(nn.Module):
    def __init__(self,device='cpu',lan='cn'):
        super().__init__()
        if lan == "cn":
            self.model = CLIPModel.from_pretrained(r"F:\temp2\TEMP\temp_tra\weights\clip_chinese\multi-modal_clip-vit-base-patch16_zh")
            self.processor = CLIPProcessor.from_pretrained(r"F:\temp2\TEMP\temp_tra\weights\clip_chinese\multi-modal_clip-vit-base-patch16_zh")
            self.text_inputs_processed = text_inputs_processed_chinese
        else:
            self.model = CLIPModel.from_pretrained(r"F:\temp2\TEMP\temp_tra\weights\clip_english\AI-ModelScope\clip-vit-large-patch14")
            self.processor = CLIPProcessor.from_pretrained(r"F:\temp2\TEMP\temp_tra\weights\clip_english\AI-ModelScope\clip-vit-large-patch14")
            self.text_inputs_processed = text_inputs_processed_english
    def forward(self,x):
        return self.image_process(x)
       

    def store_text_info(self):
        with open(self.pickle_file,'wb') as f:
            pickle.dump(self.text_features,f)
    def restore_text_info(self):
        return pickle.load(open(self.pickle_file,'rb'))
    @torch.no_grad()
    def text_process(self):
        text_processed = self.processor(text=self.text_inputs_processed,return_tensors='pt',padding=True)
        text_features = self.model.get_text_features(**text_processed)
        return text_features/text_features.norm(dim=-1,keepdim=True)
    @torch.no_grad()
    def image_process(self,image):
        assert isinstance(image,Image.Image)
        image_processed = self.processor(images=image,return_tensors='pt')
        image_features = self.model.get_image_features(**image_processed)
        return image_features/image_features.norm(dim=-1,keepdim=True)
    
    def run(self,image):
        image_features = self.image_process(image)
        logits_per_image = 100. * image_features @ self.text_features.T
        probs = logits_per_image.softmax(dim=1)

        return torch.argmax(probs,dim=1)
    
    
    
if __name__ == "__main__":
    model = HumanAttribute(pickle_file='./human_attribute.pickle',device='cuda')
    # 下载图像
    url = "http://images.cocodataset.org/val2017/000000039769.jpg"
    image = Image.open(requests.get(url, stream=True).raw)
    image.show()
    output = model.run(image)
    # print(text_inputs_processed[output])