import math
from contextlib import contextmanager
from typing import Any, Callable, Iterable, Iterator, List, Optional

from ray.data.block import Block, BlockAccessor, BlockMetadata
from ray.data.datasource.datasource import Datasource, ReadTask
from ray.data import read_datasource, ActorPoolStrategy

import boto3
import os

from modules.s3_utils import split_s3_path
from process_pdf_enroll import BatchDocInfer
import pyarrow as pa


source_prefix = []
target_prefix = []

def list_all_objects(bucket_name, prefix):
    s3 = boto3.client(
            "s3",
            aws_access_key_id="7X9CWNHIVOHH3LXRD5WK",
            aws_secret_access_key="IHLyTsv7h4ArzReLWUGZNKvwqB7CMrRi6e7ZyUt0",
            endpoint_url="http://211.95.10.133:30000",
        )
    # Initialize the continuation token
    continuation_token = None

    # Function to list all objects under a prefix
    def _list_all_objects(bucket, prefix):
        continuation_token = None
        results = []
        while True:
            if continuation_token:
                response = s3.list_objects_v2(
                    Bucket=bucket,
                    Prefix=prefix,
                    ContinuationToken=continuation_token
                )
            else:
                response = s3.list_objects_v2(
                    Bucket=bucket,
                    Prefix=prefix,
                    MaxKeys=10,
                )
            # Check if the bucket has objects
            if 'Contents' in response:
                for obj in response['Contents']:
                    if not obj["Key"].endswith(".jsonl"):
                        continue
                    results.append(f"s3://{bucket_name}/" + obj["Key"])

            # Check if there are more objects to retrieve
            if 'NextContinuationToken' in response:
                continuation_token = response['NextContinuationToken']
            else:
                break
        return results
    return _list_all_objects(bucket_name, prefix)


class S3Datasource(Datasource):

    def __init__(self, source_s3_path: str, target_s3_path: str):
        self.source_s3_path = source_s3_path
        self.target_s3_path = target_s3_path 

    def estimate_inmemory_data_size(self) -> Optional[int]:
        None

    def _fetch_s3_files(self):
        bucket, prefix = split_s3_path(self.target_s3_path)
        target_list_objects = list_all_objects(bucket, prefix)
        target_map_s = set()
        for fn in target_list_objects:
            basename = os.path.basename(fn)
            target_map_s.add(basename)

        results = []
        bucket, prefix = split_s3_path(self.source_s3_path)
        src_list_objects = list_all_objects(bucket, prefix)
        for fn in src_list_objects:
            basename = os.path.basename(fn)
            if basename not in target_map_s:
                results.append(fn)
        return results

    def get_read_tasks(self, parallelism: int) -> List[ReadTask]:
        list_objects = self._fetch_s3_files()
        num_rows_total = len(list_objects)
        num_rows_per_block = num_rows_total // parallelism
        num_blocks_with_extra_row = num_rows_total % parallelism

        tasks = []
        offset = 0
        estimated_size_bytes_per_row = 0 
        if len(list_objects) > 0:
            estimated_size_bytes_per_row = len(list_objects[0])

        schema = pa.schema([("s3path", pa.string())])
        for i in range(parallelism):
            num_rows = num_rows_per_block
            if i < num_blocks_with_extra_row:
                num_rows += 1
            read_fn = self._create_read_fn(num_rows, offset, list_objects)
            metadata = BlockMetadata(
                num_rows,
                estimated_size_bytes_per_row * num_rows,
                schema,
                None,
                None,
            )
            tasks.append(ReadTask(read_fn, metadata))
            offset += num_rows

        return tasks

    def _create_read_fn(self, num_rows: int, offset: int, rows):
        def read_fn() -> Iterable[Block]:
            table = pa.Table.from_arrays([rows[offset: num_rows+offset]], names=["s3path"])
            return [table]
        return read_fn


def dump_args(batch):
    import json
    with open("/tmp/dump.json", "w") as f:
        f.write(batch["s3path"][0])
        f.write("\n")
        f.write(str(len(batch["s3path"])))
    return {"haha": [1]}



"""
class ProcArgs:
    def __init__(self):
        self.infer = BatchDocInfer(True, True)

    def __call__(self, batch):
        for fn in batch["s3path"]:
            self.infer(fn, target_prefix[-1] + os.path.basename(fn))
        return {"results": []}
"""


if __name__ == "__main__":
    """
    import ray
    runtime_env = {"working_dir": "/cpfs01/user/xurui/doc-infer/ray-pipeline",}

    ray.init(runtime_env=runtime_env)

    
    sources = [
        "s3://llm-process-pperf/ebook_index_v4/en-ebook-physicsandmathstutor/v001/pdf/",
        "s3://llm-process-pperf/uf-en-ebook-2012bookproject/v001/pdf",
        "s3://llm-process-pperf/ebook_index_v4/intech_pdf/v001/",
        "s3://llm-process-pperf/zh-ebook-shuge/v001/pdf/"
    ]
    targets = [
        "s3://llm-pdf-text-1/pdf_gpu_output/en-ebook-physicsandmathstutor/v001/pdf/",
        "s3://llm-pdf-text-1/pdf_gpu_output/uf-en-ebook-2012bookproject/v001/pdf",
        "s3://llm-pdf-text-1/pdf_gpu_output/intech_pdf/v001/",
        "s3://llm-pdf-text-1/pdf_gpu_output/zh-ebook-shuge/v001/pdf"
    ]

    for s, t in zip(sources, targets):
        source_prefix.append(s)
        target_prefix.append(t)
        ds = S3Datasource(source_prefix[-1],
                    target_prefix[-1])
        dataset = read_datasource(ds, parallelism=240).map_batches(ProcArgs, num_gpus=1, batch_size=1, compute=ActorPoolStrategy(size=240)).take_all()
    """
    pass
