# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import pickle
import numpy as np
from .base import BaseTask
from utils import EVALUATORS, TASKS, auto_eval
from dataset.retrieval import RetrievalDataset
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
from collections import defaultdict
from sklearn.metrics.pairwise import cosine_similarity


@TASKS.register_module("similarity_classification")
class SimilarityClassificationTask(BaseTask):
    MODE = "similarity_classification"

    def __init__(self, work_dir, model_cfg, dataset_cfg, **kwargs):
        super().__init__(work_dir, model_cfg, dataset_cfg, **kwargs)

        self.query_emb_dict = self._build_emb_dict("query")
        self.target_emb_dict = self._build_emb_dict("target")

    def _load_dataset(self):
        dataset_dir = self.dataset_cfg["data_dir"]
        file_type = self.dataset_cfg["data_type"]
        shot = self.dataset_cfg.get("shot")
        if shot is not None and isinstance(shot, dict):
            dataset_dir = os.path.join(dataset_dir, shot["test_name"])
        query_cfg = self.dataset_cfg["query"]
        target_cfg = self.dataset_cfg["target"]
        ground_truth_cfg = self.dataset_cfg["ground_truth"]
        image_cfg = self.dataset_cfg.get("image")
        self.dataset = RetrievalDataset(self.dataset_name, file_type, dataset_dir, query_cfg,
                                        target_cfg, ground_truth_cfg, image_cfg=image_cfg)

    def _dataset_len(self):
        return len(self.dataset.query) + len(self.dataset.target)

    def _build_emb_dict(self, split):

        def infer_func(id_value, data):
            model = self.query_model
            if split == "target":
                model = self.target_model
            try:
                infer_emb_dict = model.infer(data)
                return str(id_value), infer_emb_dict
            except Exception as e:
                self.logger.error(f"[BuildEmbDict] model infer failed, error: {e}")
                self._emit_infer_failed_count()
                return

        emb_dict = {}
        emb_dict_save_dir = os.path.join(self.work_dir, f"{split}_emb")
        os.makedirs(emb_dict_save_dir, exist_ok=True)
        emb_dict_save_path = os.path.join(emb_dict_save_dir, f"{split}_emb_dict.pkl")
        if os.path.exists(emb_dict_save_path):
            try:
                with open(emb_dict_save_path, "rb") as f:
                    emb_dict = pickle.load(f)
                self.logger.info(f"[BuildEmbDict] load {split} emb dict from local successfully")
                return emb_dict
            except Exception as e:
                self.logger.error(
                    f"[BuildEmbDict] load {split} emb dict from local failed, build new {split} emb dict. error: {e}")

        assert hasattr(self.dataset, split), f"dataset hasn't {split}"
        dataset_iter = getattr(self.dataset, split)
        worker_num = self.query_model_worker_num
        if split == "target":
            worker_num = self.target_model_worker_num
        with ThreadPoolExecutor(max_workers=worker_num) as pool:
            futures = []
            for data_id, data in tqdm(dataset_iter, total=len(dataset_iter), desc=f"Load {split}"):
                futures.append(pool.submit(infer_func, data_id, data))
            for future in tqdm(as_completed(futures), total=len(futures), desc=f"Build {split} Emb"):
                result = future.result()
                if result is None:
                    continue
                data_id, infer_emb_dict = result
                for emb_name, emb in infer_emb_dict.items():
                    if emb_name not in emb_dict:
                        emb_dict[emb_name] = {}
                    emb_dict[emb_name][data_id] = emb

        try:
            with open(emb_dict_save_path, "wb") as f:
                pickle.dump(emb_dict, f)
            self.logger.info(f"[BuildEmbDict] save {split} emb dict to local successfully")
        except Exception as e:
            self.logger.error(f"[BuildEmbDict] save {split} emb dict to local failed, error: {e}")
        return emb_dict

    def eval(self):

        def handler_row(row, emb_name):
            query_id = row[self.dataset.gt_query_id_field_name]
            target_id = row[self.dataset.gt_target_id_field_name]
            # 防一手，如果是字符串的话，看看首尾是不是中括号，是的话尝试eval把列表还原回来
            if isinstance(query_id, str) and query_id.startswith("[") and query_id.endswith("]"):
                query_id = eval(query_id)
            elif isinstance(query_id, np.ndarray):
                query_id = query_id.tolist()

            if isinstance(target_id, str) and target_id.startswith("[") and target_id.endswith("]"):
                target_id = eval(target_id)
            elif isinstance(target_id, np.ndarray):
                target_id = target_id.tolist()

            if not isinstance(query_id, list):
                query_id = [str(query_id)]
            if not isinstance(target_id, list):
                target_id = [str(target_id)]
            query_id = list(set(query_id))
            target_id = list(set(target_id))
            # 暂时认为这个任务下的query_id和target_id都只有一个
            query_id = query_id[0]
            target_id = target_id[0]
            query_emb = self.query_emb_dict[emb_name].get(query_id, None)
            target_emb = self.target_emb_dict[emb_name].get(target_id, None)
            if query_emb is None or target_emb is None:
                return
            label = row[self.dataset.gt_label_field_name]
            cosine_similarity_score = cosine_similarity([query_emb], [target_emb]).tolist()[0][0]
            return cosine_similarity_score, label

        # 遍历emb_name，
        final_score = {}
        emb_name_list = list(self.target_emb_dict.keys())
        # eval_context = self._get_eval_context()
        for emb_name in emb_name_list:
            emb_final_score = {}
            y_truth = []
            y_pred_proba = []
            for _, row in tqdm(self.dataset.ground_truth.iterrows(), total=len(self.dataset.ground_truth), desc=f"Similarity Classification {emb_name}"):
                result = handler_row(row, emb_name)
                if result is None:
                    continue
                score, label = result
                y_pred_proba.append(score)
                y_truth.append(label)
            evaluator_cfg_list = self.dataset_cfg["evaluator"]
            # depedency_eval_score = None
            # if eval_context is not None:
            #     depedency_eval_score = eval_context.get(emb_name)
            for evaluator_cfg in evaluator_cfg_list:
                evaluator_name = evaluator_cfg["name"]
                evaluator_kwargs = evaluator_cfg.get("kwargs", {})
                evaluator_build_cfg = {
                    "type": evaluator_name,
                    "y_truth": y_truth,
                    "y_pred_proba": y_pred_proba,
                }
                evaluator_build_cfg.update(evaluator_kwargs)
                # if depedency_eval_score is None:

                # else:
                #     for k, v in evaluator_kwargs.items():
                #         evaluator_build_cfg[k] = auto_eval(v, depedency_eval_score)
                evaluator = EVALUATORS.build(evaluator_build_cfg)
                emb_final_score.update(evaluator.score())
            final_score[emb_name] = emb_final_score
        return final_score
