# Copyright (c) 2025 BytevalKit-Emb Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import json
import pickle
import numpy as np
import pandas as pd
from copy import deepcopy
from tqdm import tqdm
from task.base import BaseTask
from utils import EVALUATORS, CLASSIFICATION_HEADS, TASKS
from dataset.classification import ClassificationDataset
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed


@TASKS.register_module("mteb_classification")
class MtebClassificationTask(BaseTask):
    MODE = "mteb_classification"

    def __init__(self, work_dir, model_cfg, dataset_cfg, **kwargs):
        super().__init__(work_dir, model_cfg, dataset_cfg, **kwargs)
        mteb_kwargs = dataset_cfg.get('mteb_kwargs', {})
        self.n_experiments = int(mteb_kwargs.get('n_experiments', 8))
        self.samples_per_label = int(mteb_kwargs.get('samples_per_label', 0))
        self.cur_experiment = 0
        self.all_train_data = None
        self.all_test_data = None

    def _load_dataset(self):
        dataset_dir = self.dataset_cfg["data_dir"]
        file_type = self.dataset_cfg["data_type"]
        shot = self.dataset_cfg.get("shot")
        if shot is not None and isinstance(shot, dict):
            dataset_dir = os.path.join(dataset_dir, shot["test_name"])
        train_cfg = self.dataset_cfg["train"]
        test_cfg = self.dataset_cfg["test"]
        self.dataset = ClassificationDataset(self.dataset_name, file_type, dataset_dir,
                                             train_cfg, test_cfg, type=self.dataset_cfg['type'])

    def _dataset_len(self):
        return len(self.dataset.train) + len(self.dataset.test)

    def _build_emb_dict(self, split):
        original_split = split
        split = 'train' if original_split == 'train_simpled' else original_split
        split = 'test' if original_split == 'test_all' else original_split

        def infer_func(id_value, data):
            try:
                label_name = data.get("label_name", "none")
                infer_emb_dict = self.target_model.infer(data)
                return str(id_value), label_name, infer_emb_dict
            except Exception as e:
                self.logger.error(f"[BuildEmbDict] model infer failed, error: {e}")
                self._emit_infer_failed_count()
                return

        emb_dict = {}
        emb_dict_save_dir = os.path.join(self.work_dir, f"experiment_{self.cur_experiment}", f"{split}_emb")
        os.makedirs(emb_dict_save_dir, exist_ok=True)
        emb_dict_save_path = os.path.join(emb_dict_save_dir, f"{split}_emb_dict.pkl")
        if os.path.exists(emb_dict_save_path):
            try:
                with open(emb_dict_save_path, "rb") as f:
                    emb_dict = pickle.load(f)
                self.logger.info(f"[BuildEmbDict] load {split} emb dict from local successfully")
                return emb_dict
            except Exception as e:
                self.logger.error(
                    f"[BuildEmbDict] load {split} emb dict from local failed, build new {split} emb dict. error: {e}")

        assert hasattr(self.dataset, original_split), f"dataset hasn't {original_split}"
        dataset_iter = getattr(self.dataset, original_split)
        with ThreadPoolExecutor(max_workers=self.target_model_worker_num) as pool:
            futures = []
            for query_id, query_data in tqdm(dataset_iter, total=len(dataset_iter), desc=f"Load {split}"):
                futures.append(pool.submit(infer_func, query_id, query_data))
            for future in tqdm(as_completed(futures), total=len(futures), desc=f"Build {split} Emb"):
                result = future.result()
                if result is None:
                    continue
                query_id, label_name, infer_emb_dict = result
                for emb_name, emb in infer_emb_dict.items():
                    if emb_name not in emb_dict:
                        emb_dict[emb_name] = {}
                    if label_name not in emb_dict[emb_name]:
                        emb_dict[emb_name][label_name] = {}
                    emb_dict[emb_name][label_name][query_id] = emb

        try:
            with open(emb_dict_save_path, "wb") as f:
                pickle.dump(emb_dict, f)
            self.logger.info(f"[BuildEmbDict] save {split} emb dict to local successfully")
        except Exception as e:
            self.logger.error(f"[BuildEmbDict] save {split} emb dict to local failed, error: {e}")
        return emb_dict

    def _undersample_data(self, X, y, samples_per_label: int, idxs=None):
        X_sampled = []
        y_sampled = []
        if idxs is None:
            idxs = np.arange(len(y))
        np.random.shuffle(idxs)
        label_counter = defaultdict(int)
        for i in idxs:
            if label_counter[y[i]] < samples_per_label:
                X_sampled.append(X[i])
                y_sampled.append(y[i])
                label_counter[y[i]] += 1
        return X_sampled, y_sampled, idxs

    def _sampled_train_data(self):
        self.logger.info('Sampling train data...')
        if self.all_train_data is None:
            self.all_train_data = []
            for _, train_row in self.dataset.train:
                self.all_train_data.append(train_row)
        assert len(self.all_train_data) > 0, "train data is empty"
        train_df = pd.DataFrame(self.all_train_data)
        train_id = train_df['item_id'].to_list()
        train_text = train_df['text'].to_list()
        train_label = train_df['label_name'].to_list()
        # 训练集下采样
        X_sampled, y_sampled, idxs = self._undersample_data(train_text, train_label, self.samples_per_label, train_id)
        sampled_train_data = []
        for i in range(len(X_sampled)):
            sampled_train_data.append((idxs[i], {
                'item_id': idxs[i],
                'text': X_sampled[i],
                'label_name': y_sampled[i]
            }))
        return sampled_train_data

    def _multi_label_eval(self, emb_name):
        train_datasets = self.train_emb_dict[emb_name]
        test_datasets = self.test_emb_dict[emb_name]
        classification_kwargs = self.dataset_cfg.get("classification_kwargs", {})
        assert isinstance(classification_kwargs, dict)
        assert "head" in classification_kwargs
        classification_head_build_cfg = {
            "type": classification_kwargs["head"],
            "label_name": 'multi_label',
            "head_params": classification_kwargs.get("head_params"),
        }
        classification_head = CLASSIFICATION_HEADS.build(classification_head_build_cfg)
        classification_head_save_dir = os.path.join(
            self.work_dir, f"experiment_{self.cur_experiment}", f"classification_head")
        os.makedirs(classification_head_save_dir, exist_ok=True)
        # 尝试从存储目录下加载分类头
        try:
            classification_head.load(classification_head_save_dir)
        except Exception as e:
            # 加载失败，训练分类头
            self.logger.warning(f"load classification head failed, error: {e}")
            self.logger.info(f"train classification head")
            classification_head.train(train_datasets)
            # 训练后导出
            classification_head.export(classification_head_save_dir)
        infer_result = classification_head.infer(test_datasets)
        y_test, y_pred = [], []
        for label, data in infer_result.items():
            y_pred += list(data.values())
            y_test += [label] * len(data.values())
        return y_test, y_pred

    def _build_test_data(self):
        self.logger.info(f'Building test data...')
        if self.all_test_data is None:
            self.all_test_data = []
            for idx, test_row in self.dataset.test:
                self.all_test_data.append(((idx, test_row)))
        assert len(self.all_test_data) > 0, "test data is empty"
        return self.all_test_data

    def detail_score_df(self, detail_score):
        try:
            file_path = os.path.join(self.work_dir, "detail_score.json")
            with open(file_path, "w", encoding="utf-8") as file:
                json.dump(detail_score, file, ensure_ascii=False, indent=4)
            self.logger.info(f"save detail score to {file_path}")
        except Exception as e:
            self.logger.error(f"save detail score failed, error: {e}")

    def eval(self):
        detail_score = {}
        for i in range(self.n_experiments):
            self.logger.info(f"start {i}th experiment!")
            self.cur_experiment = i
            detail_score[i] = {}
            self.logger.info('Build test emb dict...')
            self.dataset.test_all = self._build_test_data()
            self.test_emb_dict = self._build_emb_dict("test_all")
            self.dataset.train_simpled = self._sampled_train_data()
            self.logger.info('Build train emb dict...')
            self.train_emb_dict = self._build_emb_dict("train_simpled")
            emb_name_list = list(self.test_emb_dict.keys())
            for emb_name in emb_name_list:
                y_test, y_pred_proba = self._multi_label_eval(emb_name)
                label_final_score = {}
                evaluator_cfg_list = self.dataset_cfg["evaluator"]
                for evaluator_cfg in evaluator_cfg_list:
                    evaluator_name = evaluator_cfg["name"]
                    evaluator_kwargs = evaluator_cfg.get("kwargs", {})
                    self.logger.info(f"start calculate {evaluator_name} metric!")
                    evaluator_build_cfg = {
                        "type": evaluator_name,
                        "y_truth": y_test,
                        "y_pred_proba": y_pred_proba,
                    }
                    evaluator_build_cfg.update(evaluator_kwargs)
                    evaluator = EVALUATORS.build(evaluator_build_cfg)
                    label_final_score.update(evaluator.score())
                detail_score[i][emb_name] = deepcopy(label_final_score)
        temp_score = {}
        for experiment_round, score_dict in detail_score.items():
            for emb_name, metric_dict in score_dict.items():
                if emb_name not in temp_score:
                    temp_score[emb_name] = {}
                for metric_key, metric_value in metric_dict.items():
                    if metric_key not in temp_score[emb_name]:
                        temp_score[emb_name][metric_key] = []
                    temp_score[emb_name][metric_key].append(metric_value)
        final_score = {}
        for emb_name, metric_dict in temp_score.items():
            if emb_name not in final_score:
                final_score[emb_name] = {}
            for metric_key, metric_value_list in metric_dict.items():
                final_score[emb_name][metric_key] = np.mean(metric_value_list)
        detail_score["final_score"] = final_score
        self.detail_score_df(detail_score)
        return final_score
