import json
from datasets import load_dataset
import pandas as pd

from agentic_system.environments.env_package.swe.data_processor.arm64_instance import ARM64_INSTANCES

# from agentic_system.environments.env_package.swe.arm64_instance import ARM64_INSTANCES

dataset_path = "/home/tianye/hf_dir/datasets--princeton-nlp--SWE-Bench_Verified"
dataset_path = "princeton-nlp/SWE-Bench_Verified"
split = "test"
arm64_image_only = True

print(f"Loading dataset {dataset_path}, split {split}...")
instances = list(load_dataset(dataset_path, split=split))

if arm64_image_only:
    instances = [inst for inst in instances if inst["instance_id"] in ARM64_INSTANCES]
print(f" instance length : {len(instances)}")

for inst in instances:
    inst_str = json.dumps(inst)
    inst["prompt"] = [inst_str]
    inst["inst"] = [inst_str]

    inst["agent_name"] = "swe_agent"
    inst["reward_model"] = {}
    inst["reward_model"]["ground_truth"] = ""
    

train_instances = instances[:int(len(instances)*0.8)]
test_instances = instances[int(len(instances)*0.8):]
# while len(self.instances) < 48:
#     self.instances.append(self.instances[-1])

train_df = pd.DataFrame(train_instances)
test_df = pd.DataFrame(test_instances)


train_df.to_parquet("swe_bench_verified_train.parquet", engine='pyarrow', compression='snappy')
test_df.to_parquet("swe_bench_verified_test.parquet", engine='pyarrow', compression='snappy')
