# Copyright (c) 2025 BytevalKit-Emb Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
from dataset.retrieval import RetrievalDataset
from vector_db import HNSWVectorDB
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import json
import traceback
import numpy as np
import pickle
from .base import BaseTask
from utils import TASKS, EVALUATORS
from collections import defaultdict


@TASKS.register_module("retrieval")
class RetrievalTask(BaseTask):
    MODE = "retrieval"

    def __init__(self, work_dir, model_cfg, dataset_cfg, **kwargs):
        super().__init__(work_dir, model_cfg, dataset_cfg, **kwargs)

        self.vector_db = None
        self._build_vector_db()
        self.query_emb_dict = defaultdict(dict)
        self._build_query_emb_dict()

    def _load_dataset(self):
        dataset_dir = self.dataset_cfg["data_dir"]
        file_type = self.dataset_cfg["data_type"]
        shot = self.dataset_cfg.get("shot")
        if shot is not None and isinstance(shot, dict):
            dataset_dir = os.path.join(dataset_dir, shot["test_name"])
        query_cfg = self.dataset_cfg["query"]
        target_cfg = self.dataset_cfg["target"]
        ground_truth_cfg = self.dataset_cfg["ground_truth"]
        dynamic_target_cfg = self.dataset_cfg.get("dynamic_target")
        image_cfg = self.dataset_cfg.get("image")
        self.dataset = RetrievalDataset(self.dataset_name, file_type, dataset_dir, query_cfg,
                                        target_cfg, ground_truth_cfg, dynamic_target_cfg, image_cfg=image_cfg)

    def _dataset_len(self):
        dataset_len = len(self.dataset.query) + len(self.dataset.target)
        if self.dataset.dynamic_target:
            dataset_len += len(self.dataset.dynamic_target)
        return dataset_len

    def _build_vector_db(self):
        # 在检索任务中，需要对target转化为emb，建立向量库
        # 如果有动态target，还需要对动态target转化为emb，加入到向量库中
        db_max_elements = len(self.dataset.target)
        if self.dataset.dynamic_target is not None:
            db_max_elements += len(self.dataset.dynamic_target)

        if self.vector_db is None:
            db_name = "target"
            db_save_dir = os.path.join(self.work_dir, "vector_db")
            os.makedirs(db_save_dir, exist_ok=True)
            self.vector_db = HNSWVectorDB(db_name, int(db_max_elements * 2), save_dir=db_save_dir)
            # 先尝试加载本地缓存的向量库
            if self.vector_db.try_load_db():
                self.logger.info(f"[BuildVectorDB] load vector db from local cache")
                return

        def infer_func(id_value, data, is_dynamic_target=False):
            try:
                emb_dict = self.target_model.infer(data)
                for emb_name, emb in emb_dict.items():
                    if isinstance(emb, np.ndarray):
                        emb = emb.tolist()
                    self.vector_db.add_data(emb_name, id_value, emb, is_dynamic_target=is_dynamic_target)
            except Exception as e:
                self.logger.error(f"[BuildVectorDB] model infer failed, error: {e}")
                self.logger.error(traceback.format_exc())
                self._emit_infer_failed_count()

        with ThreadPoolExecutor(max_workers=self.target_model_worker_num) as pool:
            futures = []
            for target_id, target_data in tqdm(self.dataset.target, total=len(self.dataset.target), desc="Load Target"):
                futures.append(pool.submit(infer_func, target_id, target_data))
            if self.dataset.dynamic_target is not None:
                for dynamic_target_id, dynamic_target_data in tqdm(self.dataset.dynamic_target, total=len(self.dataset.dynamic_target), desc="Load Dynamic Target"):
                    futures.append(pool.submit(infer_func, dynamic_target_id,
                                               dynamic_target_data, is_dynamic_target=True))

            for future in tqdm(as_completed(futures), total=len(futures), desc="Build Vector DB"):
                future.result()
        self.vector_db.save_db()

    def _build_query_emb_dict(self):

        def infer_func(id_value, data):
            try:
                emb_dict = self.query_model.infer(data)
                return str(id_value), emb_dict
            except Exception as e:
                self.logger.error(f"[BuildQueryEmbDict] model infer failed, error: {e}")
                self._emit_infer_failed_count()
                return None

        query_emb_dict_save_dir = os.path.join(self.work_dir, "query_emb")
        os.makedirs(query_emb_dict_save_dir, exist_ok=True)
        query_emb_dict_save_path = os.path.join(query_emb_dict_save_dir, "query_emb_dict.pkl")
        if os.path.exists(query_emb_dict_save_path):
            try:
                with open(query_emb_dict_save_path, "rb") as f:
                    self.query_emb_dict = pickle.load(f)
                self.logger.info(f"[BuildQueryEmbDict] load query emb dict from local successfully")
                return
            except Exception as e:
                self.logger.error(
                    f"[BuildQueryEmbDict] load query emb dict from local failed, build new query emb dict. error: {e}")

        with ThreadPoolExecutor(max_workers=self.query_model_worker_num) as pool:
            futures = []
            for query_id, query_data in tqdm(self.dataset.query, total=len(self.dataset.query), desc="Load Query"):
                futures.append(pool.submit(infer_func, query_id, query_data))
            for future in tqdm(as_completed(futures), total=len(futures), desc="Build Query Emb"):
                result = future.result()
                if result is None:
                    continue
                query_id, query_emb = result
                for emb_name, emb in query_emb.items():
                    self.query_emb_dict[emb_name][query_id] = emb

        try:
            with open(query_emb_dict_save_path, "wb") as f:
                pickle.dump(self.query_emb_dict, f)
            self.logger.info(f"[BuildQueryEmbDict] save query emb dict to local successfully")
        except Exception as e:
            self.logger.error(f"[BuildQueryEmbDict] save query emb dict to local failed, error: {e}")

    def _single_query_retrieval(self, emb_name, query_id, gt_target_id_list, max_topk=10000, mask_dynamic_target=False):
        # 单query检索，获取检索结果
        query_emb = self.query_emb_dict.get(emb_name, {}).get(query_id, None)
        gt_target_id_dedup_list = [str(x) for x in list(set(gt_target_id_list))]
        if query_emb is None:
            return []
        # 检索，如果需要mask动态target，需要额外写过滤条件
        vector_db_search_args = {}
        if mask_dynamic_target:
            vector_db_search_args["mask_other_dynamic_target"] = True
            vector_db_search_args["keep_dynamic_target_ids"] = gt_target_id_dedup_list
        results = self.vector_db.search(emb_name, query_emb, max_topk, **vector_db_search_args)
        return results

    def _retrieval(self, emb_name, max_topk=500, mask_dynamic_target=False, thread_num=20):

        def handler_row(idx, row, emb_name):
            query_id = row[self.dataset.gt_query_id_field_name]
            target_id = row[self.dataset.gt_target_id_field_name]
            # 防一手，如果是字符串的话，看看首尾是不是中括号，是的话尝试eval把列表还原回来
            if isinstance(query_id, str) and query_id.startswith("[") and query_id.endswith("]"):
                query_id = eval(query_id)
            elif isinstance(query_id, np.ndarray):
                query_id = query_id.tolist()

            if isinstance(target_id, str) and target_id.startswith("[") and target_id.endswith("]"):
                target_id = eval(target_id)
            elif isinstance(target_id, np.ndarray):
                target_id = target_id.tolist()

            if not isinstance(query_id, list):
                query_id = [str(query_id)]
            if not isinstance(target_id, list):
                target_id = [str(target_id)]
            query_id = list(set(query_id))
            target_id = list(set(target_id))
            query_retrieval_result = {}
            for q_id in query_id:
                result = self._single_query_retrieval(emb_name, q_id, target_id, max_topk, mask_dynamic_target)
                query_retrieval_result[q_id] = [[str(x[0]), x[1]] for x in result]
            return idx, query_id, target_id, query_retrieval_result

        # 根据ground_truth，对每行做检索
        retrieval_result = []
        with ThreadPoolExecutor(max_workers=thread_num) as pool:
            futures = []
            for idx, row in tqdm(self.dataset.ground_truth.iterrows(), total=len(self.dataset.ground_truth), desc=f"Retrieval {emb_name}"):
                futures.append(pool.submit(handler_row, idx, row, emb_name))
            for future in tqdm(as_completed(futures), total=len(futures), desc=f"Retrieval {emb_name}"):
                try:
                    idx, query_id, target_id, query_retrieval_result = future.result()
                    self.dataset.ground_truth.loc[idx, f"{emb_name}_retrieval_result"] = json.dumps(
                        query_retrieval_result, ensure_ascii=False)
                    retrieval_result.append((query_id, target_id, query_retrieval_result, ))
                except Exception as e:
                    self.logger.error(f"[Retrieval] retrieval failed, error: {e}")
                    self.logger.error(traceback.format_exc())
                    continue
        return retrieval_result

    def eval(self):
        retrieval_kwargs = self.dataset_cfg.get("retrieval_kwargs", {})
        assert isinstance(retrieval_kwargs, dict)
        evaluator_cfg_list = self.dataset_cfg["evaluator"]

        final_score = {}
        for emb_name in self.query_emb_dict.keys():
            retrieval_result = self._retrieval(emb_name, **retrieval_kwargs)
            emb_score = {}
            for evaluator_cfg in evaluator_cfg_list:
                evaluator_name = evaluator_cfg["name"]
                evaluator_kwargs = evaluator_cfg.get("kwargs", {})
                evaluator_build_cfg = {
                    "type": evaluator_name,
                    "retrieval_result": retrieval_result,
                }
                evaluator_build_cfg.update(evaluator_kwargs)
                evaluator = EVALUATORS.build(evaluator_build_cfg)
                emb_score.update(evaluator.score())
            final_score[emb_name] = emb_score
        # 在这里存retrieval结果
        retrieval_result_save_dir = os.path.join(self.work_dir, "retrieval_result")
        os.makedirs(retrieval_result_save_dir, exist_ok=True)
        retrieval_result_save_path = os.path.join(retrieval_result_save_dir, "result.parquet")
        try:
            self.dataset.ground_truth.to_parquet(retrieval_result_save_path)
            self.logger.info(f"save retrieval result to local successfully")
        except Exception as e:
            self.logger.error(f"save retrieval result to local failed, error: {e}")

        return final_score
