
import os
from ray.data import read_datasource, ActorPoolStrategy

from s3_datasource import S3Datasource
from process_pdf_enroll import BatchDocInfer


sources = [
    "s3://llm-process-pperf/ebook_index_v4/en-ebook-physicsandmathstutor/v001/pdf/",
    "s3://llm-process-pperf/uf-en-ebook-2012bookproject/v001/pdf",
    "s3://llm-process-pperf/ebook_index_v4/intech_pdf/v001/",
    "s3://llm-process-pperf/zh-ebook-shuge/v001/pdf/",
    "s3://llm-process-pperf/ebook_index_v4/zh-web-shanghaiyanfa/v002/pdf/",
]
targets = [
    "s3://llm-pdf-text-1/pdf_gpu_output/en-ebook-physicsandmathstutor/v001/pdf/",
    "s3://llm-pdf-text-1/pdf_gpu_output/uf-en-ebook-2012bookproject/v001/pdf",
    "s3://llm-pdf-text-1/pdf_gpu_output/intech_pdf/v001/",
    "s3://llm-pdf-text-1/pdf_gpu_output/zh-ebook-shuge/v001/pdf",
    "s3://llm-pdf-text-1/pdf_gpu_output/zh-web-shanghaiyanfa/v002/pdf/",
]


source_prefix = sources[4]
target_prefix = targets[4]


class ProcArgs:
    def __init__(self):
        self.infer = BatchDocInfer(True, True)

    def __call__(self, batch):
        for fn in batch["s3path"]:
            self.infer(fn, target_prefix + os.path.basename(fn))
        return {"results": []}


if __name__ == "__main__":
    import ray
    runtime_env = {"working_dir": "/cpfs01/user/xurui/doc-infer/ray-pipeline",}

    ray.init(runtime_env=runtime_env)
    ds = S3Datasource(source_prefix, target_prefix)
    dataset = read_datasource(ds, parallelism=120).map_batches(ProcArgs, num_gpus=1, batch_size=1, compute=ActorPoolStrategy(size=150)).take_all()