# Copyright (c) 2025 BytevalKit-Emb Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
from dataset.retrieval import RetrievalDataset
# from vector_db import HNSWVectorDB
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
import json
import traceback
import numpy as np
import pickle
from .base import BaseTask
from utils import TASKS, EVALUATORS
from collections import defaultdict


@TASKS.register_module("mmeb")
class MMEBTask(BaseTask):
    MODE = "mmeb"

    def __init__(self, work_dir, model_cfg, dataset_cfg, **kwargs):
        super().__init__(work_dir, model_cfg, dataset_cfg, **kwargs)

        self.query_emb_dict = self._build_emb_dict("query")
        self.target_emb_dict = self._build_emb_dict("target")

    def _load_dataset(self):
        dataset_dir = self.dataset_cfg["data_dir"]
        file_type = self.dataset_cfg["data_type"]
        shot = self.dataset_cfg.get("shot")
        if shot is not None and isinstance(shot, dict):
            dataset_dir = os.path.join(dataset_dir, shot["test_name"])
        query_cfg = self.dataset_cfg["query"]
        target_cfg = self.dataset_cfg["target"]
        ground_truth_cfg = self.dataset_cfg["ground_truth"]
        self.dataset = RetrievalDataset(self.dataset_name, file_type, dataset_dir,
                                        query_cfg, target_cfg, ground_truth_cfg)

    def _dataset_len(self):
        dataset_len = len(self.dataset.query) + len(self.dataset.target)
        return dataset_len

    def _build_emb_dict(self, split):
        model_dict = {
            "query": self.query_model,
            "target": self.target_model,
        }
        model = model_dict.get(split, None)
        assert model is not None, f"split {split} not supported"

        def infer_func(id_value, data):
            try:
                emb = model.infer(data)
                return str(id_value), emb
            except Exception as e:
                self.logger.error(f"[BuildEmbDict] model infer failed, split: {split}, error: {e}")
                return None

        emb_dict_save_dir = os.path.join(self.work_dir, f"{split}_emb")
        os.makedirs(emb_dict_save_dir, exist_ok=True)
        emb_dict_save_path = os.path.join(emb_dict_save_dir, "emb_dict.pkl")
        if os.path.exists(emb_dict_save_path):
            try:
                with open(emb_dict_save_path, "rb") as f:
                    emb_dict = pickle.load(f)
                self.logger.info(f"[BuildEmbDict] load {split} emb dict from local successfully")
                return emb_dict
            except Exception as e:
                self.logger.error(
                    f"[BuildEmbDict] load {split} emb dict from local failed, build new emb dict. error: {e}")

        workers_num = self.query_model_worker_num if split == "query" else self.target_model_worker_num
        sub_dataset = getattr(self.dataset, split)
        emb_dict = defaultdict(dict)
        with ThreadPoolExecutor(max_workers=workers_num) as pool:
            futures = []
            for data_id, data in tqdm(sub_dataset, total=len(sub_dataset), desc=f"Load {split}"):
                futures.append(pool.submit(infer_func, data_id, data))
            for future in tqdm(as_completed(futures), total=len(futures), desc=f"Build {split} Emb"):
                result = future.result()
                if result is None:
                    continue
                data_id, data_emb = result
                for emb_name, emb in data_emb.items():
                    emb_dict[emb_name][data_id] = emb

        try:
            with open(emb_dict_save_path, "wb") as f:
                pickle.dump(emb_dict, f)
            self.logger.info(f"[BuildmbDict] save {split} emb dict to local successfully")
        except Exception as e:
            self.logger.error(f"[BuildEmbDict] save {split} emb dict to local failed, error: {e}")
        return emb_dict

    def _single_query_eval(self, emb_name, query_id, target_id_list):
        scores = []
        pred = -1
        query_emb = self.query_emb_dict.get(emb_name, {}).get(query_id, None)
        target_embs = []
        for target_id in target_id_list:
            target_emb = self.target_emb_dict.get(emb_name, {}).get(target_id, None)
            if target_emb is None:
                continue
            target_embs.append(target_emb)
        if query_emb is None or len(target_embs) == 0:
            return scores, pred

        query_emb_norm = np.linalg.norm(query_emb)
        target_embs_norm = np.linalg.norm(target_embs, axis=1)
        scores = np.dot(target_embs, query_emb) / (target_embs_norm * query_emb_norm)
        pred = np.argmax(scores)
        return scores, pred

    def _eval(self, emb_name, thread_num=20):

        def handler_row(idx, row, emb_name):
            query_id = row[self.dataset.gt_query_id_field_name]
            target_id = row[self.dataset.gt_target_id_field_name]
            if isinstance(target_id, str) and target_id.startswith("[") and target_id.endswith("]"):
                target_id = eval(target_id)
            elif isinstance(target_id, np.ndarray):
                target_id = target_id.tolist()
            if not isinstance(target_id, list):
                target_id = [str(target_id)]

            query_eval_result = {}
            scores, pred = self._single_query_eval(emb_name, query_id, target_id)
            if isinstance(scores, np.ndarray):
                scores = scores.tolist()
            pred = int(pred)
            query_eval_result[query_id] = [pred, scores]
            return idx, query_id, target_id, query_eval_result

        # 根据ground_truth，对每行做检索
        eval_result = []
        with ThreadPoolExecutor(max_workers=thread_num) as pool:
            futures = []
            for idx, row in tqdm(self.dataset.ground_truth.iterrows(), total=len(self.dataset.ground_truth), desc=f"Eval {emb_name}"):
                futures.append(pool.submit(handler_row, idx, row, emb_name))
            for future in tqdm(as_completed(futures), total=len(futures), desc=f"Retrieval {emb_name}"):
                try:
                    idx, query_id, target_id, query_eval_result = future.result()
                    self.dataset.ground_truth.loc[idx, f"{emb_name}_eval_result"] = \
                        json.dumps(query_eval_result, ensure_ascii=False)
                    eval_result.append((query_id, target_id, query_eval_result))
                except Exception as e:
                    self.logger.error(f"[Eval] eval failed, error: {e}")
                    self.logger.error(traceback.format_exc())
                    continue
        return eval_result

    def eval(self):
        evaluator_cfg_list = self.dataset_cfg["evaluator"]

        final_score = {}
        for emb_name in self.query_emb_dict.keys():
            eval_result = self._eval(emb_name)
            emb_score = {}
            for evaluator_cfg in evaluator_cfg_list:
                evaluator_name = evaluator_cfg["name"]
                evaluator_kwargs = evaluator_cfg.get("kwargs", {})
                evaluator_build_cfg = {
                    "type": evaluator_name,
                    "retrieval_result": eval_result,
                }
                evaluator_build_cfg.update(evaluator_kwargs)
                evaluator = EVALUATORS.build(evaluator_build_cfg)
                emb_score.update(evaluator.score())
            final_score[emb_name] = emb_score

        eval_result_save_dir = os.path.join(self.work_dir, "eval_result")
        os.makedirs(eval_result_save_dir, exist_ok=True)
        eval_result_save_path = os.path.join(eval_result_save_dir, "result.parquet")
        try:
            self.dataset.ground_truth.to_parquet(eval_result_save_path)
            self.logger.info(f"save eval result to local successfully")
        except Exception as e:
            self.logger.error(f"save eval result to local failed, error: {e}")

        return final_score
