# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import glob
import dask.dataframe as dd
import numpy as np
import threading
from queue import Queue
import gc
from concurrent.futures import ThreadPoolExecutor, as_completed


AUTO_INCREMENT_TOKEN = "<AUTO_INCREMENT>"


def read_parquet(path, columns=None):
    # 支持读目录下所有parquet文件或者单独的parquet文件
    # 校验逻辑
    # 如果path以.parquet结尾，那么path必须是一个文件且存在
    # 如果path不以.parquet结尾，那么path必须是一个目录，该目录下至少有一个.parquet文件
    if path.endswith(".parquet"):
        assert os.path.exists(path), f"path {path} does not exist"
        assert os.path.isfile(path), f"path {path} is not a file"
        return dd.read_parquet(path, columns=columns)
    else:
        assert os.path.exists(path), f"path {path} does not exist"
        assert os.path.isdir(path), f"path {path} is not a directory"
        parquet_files = glob.glob(os.path.join(path, "*.parquet"))
        assert len(parquet_files) > 0, f"path {path} does not contain any parquet files"
        return dd.read_parquet(parquet_files, columns=columns)


class DaskRowIterator:
    def __init__(self, dask_df: dd.DataFrame, id_column=None, export_features=None, image_dir=None):
        """
        Initialize the DaskRowIterator.
        Args:
            dask_df (dd.DataFrame): The Dask DataFrame to iterate over.
            id_column (str, optional): The column name containing the unique identifiers. Defaults to None.
        """
        self.dask_df = dask_df
        self.partitions_num = dask_df.npartitions
        self.current_partition_iterator = None
        self._partition_queue = Queue()
        self._read_partition_started = False

        self.id_column = id_column
        self._increment_id = 0  # 当 id_column 为 <AUTO_INCREMENT> 时用于生成自增 id
        self.export_features = export_features
        self.image_dir = image_dir

    def _read_partition(self):

        def put_partition_to_queue(idx):
            partition = self.dask_df.get_partition(idx).compute()
            self._partition_queue.put(partition)

        with ThreadPoolExecutor(max_workers=10) as executor:
            futures = [executor.submit(put_partition_to_queue, i) for i in range(self.partitions_num)]
            for future in as_completed(futures):
                future.result()
        # 标识队列结束
        self._partition_queue.put(None)

    def __iter__(self):
        return self

    def __len__(self):
        return len(self.dask_df)

    def __next__(self):
        if not self._read_partition_started:
            threading.Thread(target=self._read_partition, daemon=True).start()
            self._read_partition_started = True

        if self.current_partition_iterator is None:
            # 从queue获取partition
            partition = self._partition_queue.get()
            if partition is None:
                # 队列结束，已经没有剩余的partition了
                raise StopIteration
            self.current_partition_iterator = partition.iterrows()
        try:
            _, row = next(self.current_partition_iterator)
        except StopIteration:
            # 说明当前分片遍历完了，切换到下一个分片
            # 直接把self.current_partition_iterator置为None，然后在__next__中重新获取
            self.current_partition_iterator = None
            # 释放内存
            gc.collect()
            return self.__next__()
        row_dict = row.to_dict()
        for k, v in row_dict.items():
            if isinstance(v, (np.ndarray)):
                row_dict[k] = v.tolist()
            elif isinstance(v, str):
                if v.startswith("[") and v.endswith("]"):
                    try:
                        row_dict[k] = eval(v)
                    except:
                        row_dict[k] = v
        id_value = None
        if self.id_column and self.id_column in row_dict:
            id_value = row_dict[self.id_column]
        if self.id_column == AUTO_INCREMENT_TOKEN:
            id_value = self._increment_id
            self._increment_id += 1
        if self.export_features and isinstance(self.export_features, dict):
            features = {k: row_dict.get(v) for k, v in self.export_features.items()}
            features["item_id"] = id_value
            return id_value, features
        return id_value, row_dict
