import os
import glob
import argparse
from pod5 import Reader as Pod5Reader
from pod5 import  Writer as Pod5Writer
import pod5

def main():
    parser = argparse.ArgumentParser(description="Extract reads from multiple pod5 files based on a read ID list and split them into chunks.")
    parser.add_argument("-i", "--input_dir", required=True, help="Input directory containing .pod5 files.")
    parser.add_argument("-o", "--output_dir", required=True, help="Output directory to store extracted pod5 files.")
    parser.add_argument("-r", "--read_id_file", required=True, help="File containing read IDs to extract, one per line.")
    parser.add_argument("-c", "--chunk_size", type=int, default=4000, help="Number of reads per output pod5 file (default: 4000).")

    args = parser.parse_args()

    input_dir = args.input_dir
    output_dir = args.output_dir
    read_id_file = args.read_id_file
    chunk_size = args.chunk_size

    # 创建输出目录（如果不存在）
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 读取需要提取的read_id列表
    read_ids_to_extract = set(line.strip() for line in open(read_id_file, "r") if line.strip())
    print(f"Read IDs to extract: {len(read_ids_to_extract)}")

    # 找到输入目录中所有的pod5文件
    if os.path.isdir(input_dir):
        pod5_files = glob.glob(os.path.join(input_dir, "**", "*.pod5"), recursive=True)
    else:
        pod5_files = [input_dir]
    if not pod5_files:
        print("No .pod5 files found in the input directory.")
        return

    current_batch = []
    batch_num = 1
    run_info_copied = False
    run_info_example = None
    process_num=0
    # 逐个pod5文件处理
    for pod5_file in pod5_files:
        try:
            with Pod5Reader(pod5_file) as reader:
                # 保存run_info以用于后续写出（确保多个pod5文件兼容）
                if not run_info_copied:
                    
                    run_info_copied = True

                for read in reader.reads():
                    if str(read.read_id) in read_ids_to_extract:
                        process_num+=1
                        r = pod5.Read(
                            read_id=read.read_id,
                            read_number=read.read_number,
                            start_sample=read.start_sample,
                            median_before=read.median_before,
                            end_reason=read.end_reason,
                            calibration=read.calibration,
                            pore=read.pore,
                            signal=read.signal,
                            run_info=read.run_info
                        )
                        current_batch.append(r)
                        # 达到chunk_size后输出一个pod5文件
                        if len(current_batch) == chunk_size:
                            output_file = os.path.join(output_dir, f"subset_{batch_num}.pod5")
                            with Pod5Writer(output_file) as writer:
                                for r in current_batch:
                                    writer.add_read(r)
                            batch_num += 1
                            current_batch = []
        except Exception as e:
                continue

    # 处理最后不满chunk_size的剩余reads
    if current_batch:
        output_file = os.path.join(output_dir, f"subset_{batch_num}.pod5")
        with Pod5Writer(output_file) as writer:
            for r in current_batch:
                writer.add_read(r)
    print(f"Processed {process_num} reads.")

    print("Extraction and splitting completed successfully.")

if __name__ == "__main__":
    main()
