import os, sys
from pyexpat import model
import re
import json
import torch
import numpy as np
import itertools
import tqdm
from torch.cuda.amp import autocast

sys.path.insert(0, './src')

from src.clip.model import build_model,CLIP
from src.clip.clip import tokenize

def load_attr_dict(file):
    # 读取属性字典
    with open(file, 'r',encoding='utf-8') as f:
        attr_dict = {}
        for attr, attrval_list in json.load(f).items():
            attrval_list = list(map(lambda x: x.split('='), attrval_list))
            attr_dict[attr] = list(itertools.chain.from_iterable(attrval_list))
    return attr_dict

def match_attrval(title, attr, attr_dict):
    # 在title中匹配属性值
    attrvals = "|".join(attr_dict[attr])
    ret = re.findall(attrvals, title)
    return "这件商品的{}特点是{}".format(attr, ''.join(ret))

def load_model(checkpoint_path):
    checkpoint = torch.load(checkpoint_path, map_location="cuda")
    model = CLIP(1024,32,(3, 4, 6, 3),64,None,256,49408,512,8,12).cuda()
    sd = {}
    for k, v in checkpoint["state_dict"].items():
        k = k.replace('module.', '')
        sd[k] = v
    model.load_state_dict(sd)
    
    for p in model.parameters():
        p.data = p.data.float()
        if p.grad:
            p.grad.data = p.grad.data.float()
    
    return model.eval()

checkpoint_path = "./logs/demo1/checkpoints/epoch_22.pt"
test_data = "./data/test.txt"
attr_dict_file = "./data/attr_to_attrvals.json"
out_file = "test_pred22.txt"

# build model
model=load_model(checkpoint_path)


# test
attr_dict = load_attr_dict(attr_dict_file)
rets = []
with open(test_data, 'r',encoding='utf-8') as f:
    for i, data in enumerate(tqdm.tqdm(f)):
        
        data = json.loads(data)
        feature = np.array(data['feature']).astype(np.float32)
        texts = [data['title'] if a=='图文' else match_attrval(data['title'], a, attr_dict) for a in data['query']]
        features = torch.from_numpy(feature)[None, ].repeat(len(texts), 1)
        tokens = tokenize(texts)
        
        features = features.cuda()
        tokens = tokens.cuda()
        with autocast():
            with torch.no_grad():
                image_features, text_features, _ = model(features, tokens)
                similarities = (image_features*text_features).sum(dim=-1)
                similarities = similarities.cpu().tolist()
        
        if i < 10:
            print(data['img_name']+':')
            for txt, sim in zip(texts, similarities):
                print(txt, sim)
                for a, s in zip(data['query'], similarities):
                    a=int(s>0.4 if a=='图文' else s>0.2)
                    print(a,s)

                
        ret = {
            "img_name": data["img_name"],
            "match": {
                a: int(s>0.6 if a=='图文' else s>0.2) for a, s in zip(data['query'], similarities)
            }
        }
        rets.append(json.dumps(ret, ensure_ascii=False)+'\n')

with open(out_file, 'w',encoding='utf-8') as f:
    f.writelines(rets)
