# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
from .utils import DaskRowIterator, read_parquet, AUTO_INCREMENT_TOKEN
from utils import get_logger

read_file_method_map = {
    "parquet": read_parquet
}


class ClassificationDataset:
    def __init__(self, dataset_name, file_type, dataset_dir, train_cfg, test_cfg, image_cfg=None, **kwargs):
        self.dataset_name = dataset_name
        self.file_type = file_type
        self.dataset_dir = dataset_dir
        self.train_cfg = train_cfg
        self.test_cfg = test_cfg
        self.image_cfg = image_cfg
        self.type = kwargs.get('type')

        self.logger = get_logger("ClassificationDataset")
        self.logger.info(f"Loading Dataset: {self.dataset_name}")

        assert self.file_type in read_file_method_map, f"file_type {self.file_type} not supported"
        self.read_file_method = read_file_method_map[self.file_type]

        self.train_dir = None
        self.test_dir = None
        self.image_dir = None
        self._check_path()

        self.dataset = {}
        self._load()

        self.logger.info(f"Load Dataset: {self.dataset_name} completed")

    def _check_path(self):
        assert "dir_name" in self.train_cfg, "train_cfg must contain dir_name"
        assert "dir_name" in self.test_cfg, "test_cfg must contain dir_name"
        if self.image_cfg:
            assert "dir_name" in self.image_cfg, "image_cfg must contain dir_name"

        self.train_dir = os.path.join(self.dataset_dir, self.train_cfg["dir_name"])
        self.test_dir = os.path.join(self.dataset_dir, self.test_cfg["dir_name"])
        if self.image_cfg:
            self.image_dir = os.path.join(self.dataset_dir, self.image_cfg["dir_name"])
        assert os.path.exists(self.train_dir), f"train_dir {self.train_dir} not exists"
        assert os.path.exists(self.test_dir), f"test_dir {self.test_dir} not exists"
        if self.image_dir:
            assert os.path.exists(self.image_dir), f"image_dir {self.image_dir} not exists"

    def _load(self):
        split = ["train", "test"]
        split_cfg_map = {
            "train": self.train_cfg,
            "test": self.test_cfg,
        }
        split_dir_map = {
            "train": self.train_dir,
            "test": self.test_dir,
        }
        for s in split:
            split_cfg = split_cfg_map[s]
            split_dir = split_dir_map[s]
            feature_columns = split_cfg.get("feature_columns", {})
            assert isinstance(feature_columns, dict) and len(
                feature_columns) > 0, "feature_columns must be a non-empty dict"
            id_column = split_cfg.get("id_column", "")
            assert isinstance(id_column, str) and len(id_column) > 0, "id_column must be a non-empty string"
            label_column = split_cfg.get("label_column")
            label_name_column = split_cfg.get("label_name_column")
            read_columns = [id_column] + list(feature_columns.values())
            if self.type != 'mteb_classification':
                assert isinstance(label_column, str) and len(
                    label_column) > 0, "label_column must be a non-empty string"
                read_columns += [label_column]
            feature_columns["label"] = label_column
            if isinstance(label_name_column, str) and len(label_name_column) > 0:
                read_columns.append(label_name_column)
                feature_columns["label_name"] = label_name_column
            read_columns.remove(AUTO_INCREMENT_TOKEN)
            df = self.read_file_method(split_dir, columns=read_columns)
            self.dataset[s] = DaskRowIterator(
                df, id_column=id_column, export_features=feature_columns, image_dir=self.image_dir)

    @property
    def train(self):
        return self.dataset["train"]

    @property
    def test(self):
        return self.dataset["test"]
