# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import pandas as pd
import yaml
from copy import deepcopy

# Define paths
# Generate evaluation YAML scripts for different models based on paraphrase-multilingual-MiniLM-L12-v2
csv_path = "../model_list/cmteb_leaderboard_open_source_evaluation_models_list.csv"
classification_template_path = "./mteb_classification/JDReview-classification.yaml"
classification_output_dir = "./mteb_classification"
retrieval_template_dir = "./retrieval/paraphrase-multilingual-MiniLM-L12-v2"
retrieval_output_dir = "./retrieval"
similarity_template_dir = "./similarity_classification/paraphrase-multilingual-MiniLM-L12-v2"
similarity_output_dir = "./similarity_classification"

# Classification demo dataset names
classification_datasets = [
    "JDReview-classification",
    "MultilingualSentiment-classification",
    "OnlineShopping-classification"
]

df = pd.read_csv(csv_path)

# Load the classification template
with open(classification_template_path, 'r', encoding='utf-8') as f:
    classification_template = yaml.safe_load(f)

# Get retrieval template file names
retrieval_template_files = [f for f in os.listdir(retrieval_template_dir) if f.endswith('.yaml')]

# Load retrieval templates
retrieval_templates = {}
for template_file in retrieval_template_files:
    template_path = os.path.join(retrieval_template_dir, template_file)
    with open(template_path, 'r', encoding='utf-8') as f:
        retrieval_templates[template_file] = yaml.safe_load(f)

# Get similarity classification template file names
similarity_template_files = [f for f in os.listdir(similarity_template_dir) if f.endswith('.yaml')]

# Load similarity classification templates
similarity_templates = {}
for template_file in similarity_template_files:
    template_path = os.path.join(similarity_template_dir, template_file)
    with open(template_path, 'r', encoding='utf-8') as f:
        similarity_templates[template_file] = yaml.safe_load(f)

for _, row in df.iterrows():
    model_name = row["model_name"]
    model_path = row["model_hf_path"]

    if pd.isna(model_name) or pd.isna(model_path):
        continue

    # Create classification configs
    model_classification_dir = os.path.join(classification_output_dir, model_name)
    os.makedirs(model_classification_dir, exist_ok=True)

    for dataset_name in classification_datasets:
        config = deepcopy(classification_template)

        # Update dataset section
        dataset_key = next(iter(config["DATASET"].keys()))
        dataset_value = config["DATASET"].pop(dataset_key)

        # Create new dataset entry with updated values
        config["DATASET"][f"dataset_{dataset_name}"] = dataset_value
        config["DATASET"][f"dataset_{dataset_name}"]["name"] = dataset_name
        config["DATASET"][f"dataset_{dataset_name}"]["data_dir"] = f"./demo/datasets/mteb_classification/{dataset_name}"
        model_key = next(iter(config["MODEL"].keys()))
        config["MODEL"] = {}

        # Create new model entry
        config["MODEL"][f"model_{model_name}"] = {
            "type": "sentence_transformer",
            "name": model_name,
            "path_or_dir": model_path,
            "preprocessors": [],
            "worker_num": 20
        }

        # Save the config
        output_path = os.path.join(model_classification_dir, f"{dataset_name}.yaml")
        with open(output_path, 'w', encoding='utf-8') as f:
            yaml.dump(config, f, default_flow_style=False, sort_keys=False)

    # Create retrieval configs
    model_retrieval_dir = os.path.join(retrieval_output_dir, model_name)
    os.makedirs(model_retrieval_dir, exist_ok=True)

    for template_file, template in retrieval_templates.items():
        config = deepcopy(template)

        # Update model section
        model_key = next(iter(config["MODEL"].keys()))
        config["MODEL"] = {}

        # Create new model entry
        config["MODEL"][f"model_{model_name}"] = {
            "type": "sentence_transformer",
            "name": model_name,
            "path_or_dir": model_path,
            "preprocessors": [],
            "worker_num": 20
        }

        # Save the config
        output_path = os.path.join(model_retrieval_dir, template_file)
        with open(output_path, 'w', encoding='utf-8') as f:
            yaml.dump(config, f, default_flow_style=False, sort_keys=False)

    # Create similarity classification configs
    model_similarity_dir = os.path.join(similarity_output_dir, model_name)
    os.makedirs(model_similarity_dir, exist_ok=True)

    for template_file, template in similarity_templates.items():
        config = deepcopy(template)

        # Update model section
        model_key = next(iter(config["MODEL"].keys()))
        config["MODEL"] = {}

        # Create new model entry
        config["MODEL"][f"model_{model_name}"] = {
            "type": "sentence_transformer",
            "name": model_name,
            "path_or_dir": model_path,
            "preprocessors": [],
            "worker_num": 20
        }

        # Save the config
        output_path = os.path.join(model_similarity_dir, template_file)
        with open(output_path, 'w', encoding='utf-8') as f:
            yaml.dump(config, f, default_flow_style=False, sort_keys=False)
