# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
from utils import get_logger, MODELS
import pandas as pd
import threading
import time


def load_model_from_cfg(model_name, model_cfg):
    model_type = model_cfg["type"]
    model_path = model_cfg.get("path_or_dir", "-")
    model_kwargs = model_cfg.get("model_kwargs", {})
    model_build_cfg = {
        "type": model_type,
        "model_name": model_name,
        "model_path": model_path,
    }
    model_build_cfg.update(model_kwargs)
    return MODELS.build(model_build_cfg)


class BaseTask:
    MODE = "base"

    def __init__(self, work_dir, model_cfg, dataset_cfg, eval_score_save_context=False, **kwargs):
        self.model_cfg = model_cfg
        self.dataset_cfg = dataset_cfg
        self.eval_score_save_context = eval_score_save_context
        assert "name" in model_cfg and "name" in dataset_cfg, "model_cfg and dataset_cfg must contain name"
        self.model_name = model_cfg["name"]
        self.dataset_name = dataset_cfg["name"]
        self.logger = get_logger(self.__class__.__name__)

        self.work_dir = os.path.join(work_dir, self.model_name, self.dataset_name)
        os.makedirs(self.work_dir, exist_ok=True)

        self.query_model = None
        self.target_model = None
        self.query_model_worker_num = 1
        self.target_model_worker_num = 1
        self._load_model()

        self.dataset = None
        self._load_dataset()

        self.logger.info(f"Running Task: {self.model_name} x {self.dataset_name}")
        self.eval_score = {}  # 存储评测分数，在eval方法返回的字典用于修改这个变量，score_df方法做统一的输出格式化

        # 运行时指标
        self._emit_runtime_info_lock = threading.Lock()
        self.infer_failed_count = 0
        self._start_time = time.time()
        self.duration = 0

    def _dataset_len(self):
        raise NotImplementedError()

    def _load_dataset(self):
        raise NotImplementedError()

    def _load_model(self):
        # 判断query和target是否为不同的模型
        if "query" in self.model_cfg and "target" in self.model_cfg:
            self.query_model = load_model_from_cfg(f"{self.model_name}_query", self.model_cfg["query"])
            self.target_model = load_model_from_cfg(f"{self.model_name}_target", self.model_cfg["target"])
            self.query_model_worker_num = self.model_cfg["query"].get("worker_num", 1)
            self.target_model_worker_num = self.model_cfg["target"].get("worker_num", 1)
        else:
            self.query_model = load_model_from_cfg(self.model_name, self.model_cfg)
            self.target_model = self.query_model
            self.query_model_worker_num = self.model_cfg.get("worker_num", 1)
            self.target_model_worker_num = self.query_model_worker_num

    def eval(self):
        raise NotImplementedError()

    def score_df(self):
        score_df_list = []
        for emb_name, metrics_dict in self.eval_score.items():
            emb_score_data_list = []
            for metric_name, metric_value in metrics_dict.items():
                real_model_name = self.model_name
                if len(self.eval_score) > 1:
                    real_model_name = f"{self.model_name}-{emb_name}"
                emb_score_data_list.append({
                    "dataset": self.dataset_name,
                    "version": "1",
                    "metric": metric_name,
                    "mode": self.MODE,
                    real_model_name: metric_value,
                })
            score_df_list.append(pd.DataFrame(emb_score_data_list))
        score_df = None
        for tmp_df in score_df_list:
            if score_df is None:
                score_df = tmp_df
            else:
                score_df = pd.merge(score_df, tmp_df, on=["dataset", "version", "metric", "mode"], how="outer")
        score_save_path = os.path.join(self.work_dir, "result.csv")
        if score_df is not None:
            score_df.to_csv(score_save_path, index=False)
        return score_df

    def run(self):
        self.eval_score = self.eval()
        score_df = self.score_df()
        return score_df
