# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import pickle
from .base import BaseTask
from utils import EVALUATORS, CLASSIFICATION_HEADS, TASKS
from dataset.classification import ClassificationDataset
from concurrent.futures import ThreadPoolExecutor, as_completed
from tqdm import tqdm
from collections import defaultdict


@TASKS.register_module("classification")
class ClassificationTask(BaseTask):
    MODE = "classification"

    def __init__(self, work_dir, model_cfg, dataset_cfg, **kwargs):
        super().__init__(work_dir, model_cfg, dataset_cfg, **kwargs)

        self.train_emb_dict = self._build_emb_dict("train")
        self.test_emb_dict = self._build_emb_dict("test")
        self._align_emb_dict()

    def _load_dataset(self):
        dataset_dir = self.dataset_cfg["data_dir"]
        file_type = self.dataset_cfg["data_type"]
        shot = self.dataset_cfg.get("shot")
        if shot is not None and isinstance(shot, dict):
            dataset_dir = os.path.join(dataset_dir, shot["test_name"])
        train_cfg = self.dataset_cfg["train"]
        test_cfg = self.dataset_cfg["test"]
        self.dataset = ClassificationDataset(self.dataset_name, file_type, dataset_dir, train_cfg, test_cfg)

    def _dataset_len(self):
        return len(self.dataset.train) + len(self.dataset.test)

    def _build_emb_dict(self, split):

        def infer_func(id_value, data):
            try:
                label = data.get("label", 0)
                label_name = data.get("label_name", "none")
                infer_emb_dict = self.target_model.infer(data)
                return str(id_value), str(label_name), int(label), infer_emb_dict
            except Exception as e:
                self.logger.error(f"[BuildEmbDict] model infer failed, error: {e}")
                self._emit_infer_failed_count()
                return

        emb_dict = {}
        emb_dict_save_dir = os.path.join(self.work_dir, f"{split}_emb")
        os.makedirs(emb_dict_save_dir, exist_ok=True)
        emb_dict_save_path = os.path.join(emb_dict_save_dir, f"{split}_emb_dict.pkl")
        if os.path.exists(emb_dict_save_path):
            try:
                with open(emb_dict_save_path, "rb") as f:
                    emb_dict = pickle.load(f)
                self.logger.info(f"[BuildEmbDict] load {split} emb dict from local successfully")
                return emb_dict
            except Exception as e:
                self.logger.error(
                    f"[BuildEmbDict] load {split} emb dict from local failed, build new {split} emb dict. error: {e}")

        assert hasattr(self.dataset, split), f"dataset hasn't {split}"
        dataset_iter = getattr(self.dataset, split)
        with ThreadPoolExecutor(max_workers=self.target_model_worker_num) as pool:
            futures = []
            for query_id, query_data in tqdm(dataset_iter, total=len(dataset_iter), desc=f"Load {split}"):
                futures.append(pool.submit(infer_func, query_id, query_data))
            for future in tqdm(as_completed(futures), total=len(futures), desc=f"Build {split} Emb"):
                result = future.result()
                if result is None:
                    continue
                query_id, label_name, label, infer_emb_dict = result
                for emb_name, emb in infer_emb_dict.items():
                    if emb_name not in emb_dict:
                        emb_dict[emb_name] = {}
                    if label_name not in emb_dict[emb_name]:
                        emb_dict[emb_name][label_name] = {
                            "positive": {},
                            "negative": {},
                        }
                    if label == 0:
                        emb_dict[emb_name][label_name]["negative"][query_id] = emb
                    else:
                        emb_dict[emb_name][label_name]["positive"][query_id] = emb

        try:
            with open(emb_dict_save_path, "wb") as f:
                pickle.dump(emb_dict, f)
            self.logger.info(f"[BuildEmbDict] save {split} emb dict to local successfully")
        except Exception as e:
            self.logger.error(f"[BuildEmbDict] save {split} emb dict to local failed, error: {e}")
        return emb_dict

    def _align_emb_dict(self):
        # 对齐训练集和测试集的emb名称、label_name，并在这里保证每个label下均有正负例
        train_emb_name_set = set(self.train_emb_dict.keys())
        test_emb_name_set = set(self.test_emb_dict.keys())
        emb_name_set = train_emb_name_set & test_emb_name_set
        for discard_train_emb_name in train_emb_name_set - emb_name_set:
            self.train_emb_dict.pop(discard_train_emb_name)
        for discard_test_emb_name in test_emb_name_set - emb_name_set:
            self.test_emb_dict.pop(discard_test_emb_name)
        for emb_name in emb_name_set:
            train_label_name_set = set(self.train_emb_dict[emb_name].keys())
            test_label_name_set = set(self.test_emb_dict[emb_name].keys())
            label_name_set = train_label_name_set & test_label_name_set
            for discard_train_label_name in train_label_name_set - label_name_set:
                self.train_emb_dict[emb_name].pop(discard_train_label_name)
            for discard_test_label_name in test_label_name_set - label_name_set:
                self.test_emb_dict[emb_name].pop(discard_test_label_name)
            for label_name in label_name_set:
                train_positive_emb_dict = self.train_emb_dict[emb_name][label_name]["positive"]
                train_negative_emb_dict = self.train_emb_dict[emb_name][label_name]["negative"]
                test_positive_emb_dict = self.test_emb_dict[emb_name][label_name]["positive"]
                test_negative_emb_dict = self.test_emb_dict[emb_name][label_name]["negative"]
                if len(train_positive_emb_dict) == 0 or len(train_negative_emb_dict) == 0 or len(test_positive_emb_dict) == 0 or len(test_negative_emb_dict) == 0:
                    self.train_emb_dict[emb_name].pop(label_name)
                    self.test_emb_dict[emb_name].pop(label_name)

    def _single_label_eval(self, emb_name, label_name):
        train_datasets = self.train_emb_dict[emb_name][label_name]
        test_datasets = self.test_emb_dict[emb_name][label_name]
        classification_kwargs = self.dataset_cfg.get("classification_kwargs", {})
        assert isinstance(classification_kwargs, dict)
        assert "head" in classification_kwargs
        classification_head_build_cfg = {
            "type": classification_kwargs["head"],
            "label_name": label_name,
            "head_params": classification_kwargs.get("head_params"),
        }
        classification_head = CLASSIFICATION_HEADS.build(classification_head_build_cfg)

        classification_head_save_dir = os.path.join(self.work_dir, "classification_head")
        os.makedirs(classification_head_save_dir, exist_ok=True)
        # 尝试从存储目录下加载分类头
        try:
            classification_head.load(classification_head_save_dir)
        except Exception as e:
            # 加载失败，训练分类头
            self.logger.warning(f"load classification head failed, error: {e}")
            self.logger.info(f"train classification head")
            classification_head.train(train_datasets)
            # 训练后导出
            classification_head.export(classification_head_save_dir)
        # predict
        infer_result = classification_head.infer(test_datasets)
        y_truth = []
        y_pred_proba = []
        for test_dataset_key, test_dataset in test_datasets.items():
            if test_dataset_key not in infer_result:
                continue
            for item_id in test_dataset.keys():
                if item_id not in infer_result[test_dataset_key]:
                    continue
                y_pred_proba.append(infer_result[test_dataset_key][item_id])
                if test_dataset_key == "positive":
                    y_truth.append(1)
                else:
                    y_truth.append(0)
        return y_truth, y_pred_proba

    def eval(self):
        # 遍历emb_name和label_name, 训练分类头拿到指标
        final_score = {}
        emb_name_list = list(self.test_emb_dict.keys())
        for emb_name in emb_name_list:
            emb_final_score = {}
            label_final_score_list = []
            label_name_list = list(self.test_emb_dict[emb_name].keys())
            for label_name in label_name_list:
                y_truth, y_pred_proba = self._single_label_eval(emb_name, label_name)
                evaluator_cfg_list = self.dataset_cfg["evaluator"]
                label_final_score = {}
                for evaluator_cfg in evaluator_cfg_list:
                    evaluator_name = evaluator_cfg["name"]
                    evaluator_kwargs = evaluator_cfg.get("kwargs", {})
                    evaluator_build_cfg = {
                        "type": evaluator_name,
                        "y_truth": y_truth,
                        "y_pred_proba": y_pred_proba,
                    }
                    evaluator_build_cfg.update(evaluator_kwargs)
                    evaluator = EVALUATORS.build(evaluator_build_cfg)
                    label_final_score.update(evaluator.score())
                label_final_score_list.append(label_final_score)
            flatten_label_final_score = defaultdict(list)
            for label_final_score in label_final_score_list:
                for k, v in label_final_score.items():
                    flatten_label_final_score[k].append(v)
            for k, v in flatten_label_final_score.items():
                emb_final_score[k] = sum(v) / len(v)
            final_score[emb_name] = emb_final_score
        return final_score
