import os
from concurrent.futures import ThreadPoolExecutor
from cn_clip.clip import load_from_name, available_models

from app.milvus.milvus_connection import create_connection
from app.conf.milvus_conn import milvus_conn_conf
from app.utll.word_dict_util import substitute_dict
import torch


## 全局变量
create_connection(milvus_conn_conf["host"], milvus_conn_conf["port"],
                  milvus_conn_conf["user"], milvus_conn_conf["password"])

# 进程池
executor = ThreadPoolExecutor(max_workers=10)

# 模型加载
# 加载clip模型
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"device: {device}")

print("Available models:", available_models)
# Available models: ['ViT-B-16', 'ViT-L-14', 'ViT-L-14-336', 'ViT-H-14', 'RN50']
current_dir = os.path.dirname(os.path.abspath(__file__))

model_B16, preprocess_B16 = load_from_name("ViT-B-16", device=device, download_root=current_dir + '/assets')
model_B16.eval()

model_L14, preprocess_L14 = load_from_name("ViT-L-14", device=device, download_root=current_dir + '/assets')
model_L14.eval()

model_H14, preprocess_H14 = load_from_name("ViT-H-14", device=device, download_root=current_dir + '/assets')
model_H14.eval()

print(f"使用的device: {device}")
print(f"加载clip模型完毕")

substitute_dict = substitute_dict()
