import torch
import numpy as np
from transformers import ChineseCLIPProcessor, ChineseCLIPModel, AutoTokenizer
import time
from redis.commands.search.query import Query
from common.logger_config import setup_logger
from common.common import get_vector_index_name
# from modelscope.pipelines import pipeline
# from modelscope.utils.constant import Tasks
from util.redis_pool import RedisSingleton


logger = setup_logger(__name__)


model_name_or_local_path = "/data/model"
model = ChineseCLIPModel.from_pretrained(model_name_or_local_path)
processor = ChineseCLIPProcessor.from_pretrained(model_name_or_local_path)
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)
model.eval()

# general_recognition = pipeline(Tasks.general_recognition, model='/data/resnest/')


def get_text_embedding(text):
    # 处理文本需要引入
    tokenizer = AutoTokenizer.from_pretrained(model_name_or_local_path)

    inputs = tokenizer(text, return_tensors="pt")
    embeddings = model.get_text_features(**inputs)
    embeddings = embeddings.detach().numpy()
    embeddings = embeddings.astype(np.float32).tobytes()
    # logger.info(f"text embedding = {embeddings}, text = {text}")
    return embeddings


def process_image(image, application_name, index_name, image_id):
    result = False
    vector_index_name = get_vector_index_name(application_name, index_name)
    try:
        with torch.no_grad():
            # 初始化 Redis Pipeline
            pipeline = RedisSingleton().get_redis().pipeline()

            start = time.time() * 1000

            inputs = processor(images=image, return_tensors="pt", padding=True)
            image_features = model.get_image_features(inputs.pixel_values)[0]
            embeddings = image_features.numpy().astype(np.float32).tolist()
            # logger.info(f"image_features embeddings = {embeddings}, image_name = {image_name}, index_name = {index_name}")

            vector_dimension = len(embeddings)
            # logger.info(f"vector_dimension = {vector_dimension}, image_name = {image_name}, index_name = {index_name}")

            logger.info(f"【process_image】image vectorization finish. cost time = {time.time() * 1000 - start}, image_id = {image_id}, index_name = {index_name}")

            # 更新 Redis 数据库中的文件向量
            pipeline.json().set(f"{vector_index_name}:{image_id}", "$", embeddings)
            result = pipeline.execute()
    except Exception as e:
        logger.error(f"【process_image】process image error. image_id = {image_id}, index_name = {index_name}, error = {e}")
    return result


def delete_image(application_name, index_name, image_id):
    result = False
    vector_index_name = get_vector_index_name(application_name, index_name)
    try:
        with torch.no_grad():
            # 初始化 Redis Pipeline
            pipeline = RedisSingleton().get_redis().pipeline()
            # 更新 Redis 数据库中的文件向量
            pipeline.json().delete(f"{vector_index_name}:{image_id}")
            result = pipeline.execute()
            logger.info(f"delete image finish. image_id = {image_id}, index_name = {index_name}, result = {result}, result_type = {type(result)}")
    except Exception as e:
        logger.error(f"delete image error. image_id = {image_id}, index_name = {index_name}, error = {e}")
    return result


# category_list = ['人物', '动物', '小猫', '小狗', '西瓜', '水果', ]
category_list = [
    '风景', '山峰', '夜景', '建筑', '天空', '日出', '日落', '蓝天', '星空', '雪地',
    '雨天', '河流湖泊', '海洋', '街景', '寺庙', '瀑布', '峡谷/山谷', '教堂', '草原',
    '动物', '猫咪', '小狗', '鱼', '熊猫', '兔子', '飞禽', '植物', '花朵', '绿树',
    '绿植', '人物', '运动', '萌娃', '婚礼', '演出', '美食', '聚餐', '火锅', '汽车',
    '图书', '船舶', '火车', '飞机', '机器', '电脑', '手机', '眼镜', '水杯', '座椅',
    '文档', '证件', '截图', '动漫', '票据', '课表'
]


def get_image_category_threshold() -> int:
    # 从 Flask 的配置加载 Redis 连接池参数
    from flask import current_app
    image_category_threshold = current_app.config.get("IMAGE_CATEGORY_THRESHOLD")
    return image_category_threshold


def verify_image_category(image):
    # return []
    # category_type_result = []
    #
    start_time = time.time() * 1000
    #
    # result = general_recognition(image)
    # for index, element in enumerate(result["scores"]):
    #     if (element > 0.6):
    #         name = result["labels"][index]
    #         category_type_result.append(name)
    #
    # return category_type_result


    # ========

    # compute image-text similarity scores
    inputs = processor(text=category_list, images=image, return_tensors="pt", padding=True)
    outputs = model(**inputs, output_attentions=False, output_hidden_states=False)
    logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
    probs = logits_per_image.softmax(dim=1)  # probs: [[1.1419e-02, 1.0478e-02, 5.2018e-04, 9.7758e-01]]
    logger.info(f"【verify_image_category】finish image-text similarity. cost time = {time.time() * 1000 - start_time}")

    # 将 probs 展平并获取大于阈值的索引
    image_category_threshold = get_image_category_threshold()
    logger.info(f"【verify_image_category】image_category_threshold = {image_category_threshold}")
    filtered_indices = (probs > image_category_threshold).nonzero(as_tuple=True)[1]
    # logger.info(f"filtered_indices = {filtered_indices}")
    # ========

    # 获取对应的类别
    filtered_categories = [category_list[i] for i in filtered_indices]
    logger.info(f"filtered_categories = {filtered_categories}")
    #
    return filtered_categories

    # # 使用处理器对图像和文本进行预处理
    # inputs = processor(text=category_list, images=Image.open(os.path.join(file_dir_path, filename)), return_tensors="pt", padding=True)
    #
    # # 将输入移动到设备上
    # inputs = {key: value.to(device) for key, value in inputs.items()}
    #
    # # 禁用梯度计算进行推理
    # with torch.no_grad():
    #     # 获取图像和文本的特征
    #     image_features = model.get_image_features(**inputs)
    #     text_features = model.get_text_features(**inputs)
    #
    #     # 对特征进行归一化
    #     image_features /= image_features.norm(p=2, dim=-1, keepdim=True)
    #     text_features /= text_features.norm(p=2, dim=-1, keepdim=True)
    #
    #     # 计算图像和文本之间的相似度
    #     # 使用torch.matmul计算点积
    #     similarity = torch.matmul(image_features, text_features.T)
    #
    #     # 获取每个图像的概率分布
    #     probs = similarity.softmax(dim=-1).cpu().numpy()
    #     print("Probabilities:", probs)
    #
    #     # 设置相似度阈值
    #     threshold = 0.9  # 你可以调整这个值，例如 0.5
    #
    #     # 筛选出相似度大于阈值的文本
    #     similar_texts = [category_list[i] for i in range(len(category_list)) if similarity[0][i] > threshold]
    #
    #     # for text in category_list:
    #     #     query_vector = get_text_embedding(text)
    #     #     result_docs = dump_query(query, query_vector, index_name, {})
    #     #     for doc in result_docs:
    #     #         if doc['id'] == filename:
    #     #             result_category.append(text)
    #     #         else:
    #     #             print('not match')
    #     #             continue
    #     return similar_texts


def query_image_by_text(application_name, index_name, score, page_num, page_size, text, extra_params={}):
    vector_index_name = get_vector_index_name(application_name, index_name)

    with torch.no_grad():
        query_vector = get_text_embedding(text)
        query_score = float(score)
        query = (
            Query("@vector:[VECTOR_RANGE $query_score $query_vector]=>{$yield_distance_as: vector_score}")
            .sort_by("vector_score")
            .return_fields("id", "vector_score")
            .paging(page_num, page_size)
            .dialect(2)
        )

        # query = (
        #     Query("(*)=>[KNN 10 @vector $query_vector AS vector_score]")
        #     .sort_by("vector_score")
        #     .return_fields("$")
        #     .dialect(2)
        # )

        try:
            result_docs = (
                RedisSingleton().get_redis().ft(vector_index_name).search(
                    query,
                    {
                        "query_vector": query_vector,
                        "query_score": query_score
                    }
                    | extra_params,
                ).docs
            )
            logger.info(f"query result = {result_docs}, index_name = {index_name}, text = {text}, result_type = {type(result_docs)}")

            return result_docs
        except Exception as e:
            logger.error(f"query image by text error. text = {text}, index_name = {index_name}, error = {e}")

        return []
