"""
call modifications from fast5 files or extracted features,
using tensorflow and the trained model.
output format: chromosome, pos, strand, pos_in_strand, read_name, read_loc,
prob_0, prob_1, called_label, seq
"""

from __future__ import absolute_import

import os
os.environ['OPENBLAS_NUM_THREADS'] = '1'

import torch
import argparse
import sys
import numpy as np
from sklearn import metrics
import gzip
import torch.multiprocessing as mp

try:
    mp.set_start_method("spawn")
except RuntimeError:
    pass

from torch.multiprocessing import Queue
from torch.utils.data import DataLoader, DistributedSampler
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
import time
import pod5
import pyslow5

from .models import ModelBiLSTM, ModelDLinear,ModelCatch,ModelPatchBiLSTM,SharedEmbedding
from .mtms.mtm import MTM
from .softshape.SoftShapeModel import SoftShapeNet
from .utils.process_utils import base2code_dna, code2base_dna, str2bool, display_args, normalize_signals
from .utils.process_utils import nproc_to_call_mods_in_cpu_mode, get_refloc_of_methysite_in_motif
from .utils.process_utils import get_motif_seqs, get_files, fill_files_queue, read_position_file,detect_file_type,validate_path
from .extract_features import _extract_preprocess
from .utils.constants_torch import FloatTensor, use_cuda
from .extract_features import _get_read_sequened_strand, get_aligner
from .extract_features import _group_signals_by_movetable_v2, _get_signals_rect
from .utils_dataloader import SignalDataset, Fast5Dataset, TsvDataset
from .utils_dataloader import collate_fn_inference, worker_init_fn
from .utils.process_utils import get_logger, _get_gpus
from .utils import bam_reader
import mappy
import threading
import warnings
import uuid

from accelerate import Accelerator
from accelerate.utils import DataLoaderConfiguration

warnings.filterwarnings("ignore", category=FutureWarning)

LOGGER = get_logger(__name__)
os.environ["MKL_THREADING_LAYER"] = "GNU"

queue_size_border = 2000
qsize_size_border_p5batch = 40
queue_size_border_f5batch = 100
time_wait = 0.01
key_sep = "||"

def call_mods(args):
    start = time.time()
    LOGGER.info("[call_mods] starts")
    
    model_path = validate_path(args.model_path, "--model_path")
    input_path = validate_path(args.input_path, "--input_path")
    success_file = prepare_success_file(input_path)
    
    file_type = detect_file_type(input_path, str2bool(args.recursively))
    if file_type in ['pod5', 'slow5', 'fast5']:
        handle_signal_or_fast5_input(args, input_path, model_path, success_file, file_type)
    else:
        handle_file_input(args, input_path, model_path, success_file)  # 处理 TSV 文件

    cleanup_success_file(success_file)
    LOGGER.info("[call_mods] costs %.2f seconds.." % (time.time() - start))


def prepare_success_file(input_path):
    success_file = input_path.rstrip("/") + "." + str(uuid.uuid1()) + ".success"
    if os.path.exists(success_file):
        os.remove(success_file)
    return success_file

def handle_signal_or_fast5_input(args, input_path, model_path, success_file, file_type):
    try:
        validate_path(args.bam, "--bam")
        ref_path = validate_reference_path(args.reference_path) if args.reference_path else None
        is_dna = not args.rna
        is_recursive = str2bool(args.recursively)
        
        if file_type in ['pod5', 'slow5']:
            bam_index = bam_reader.ReadIndexedBam(args.bam)
            motif_seqs = get_motif_seqs(args.motifs, is_dna)
            positions = read_position_file(args.positions) if args.positions else None
            files_dr = get_files(input_path, is_recursive, ".pod5" if file_type == 'pod5' else (".slow5", ".blow5"))
            
            files_queue = Queue()
            fill_files_queue(files_queue, files_dr)
            
            if use_cuda and args.ddp:
                pred_str_q = Queue()
                p_w = mp.Process(target=_write_predstr_to_file, args=(args.result_file, pred_str_q), name="writer")
                p_w.daemon = True
                p_w.start()
                world_size = torch.cuda.device_count()
                param = (files_dr, bam_index, success_file, model_path, motif_seqs, positions, pred_str_q, files_queue, args, file_type)
                mp.spawn(_call_mods_from_signal_gpu_distributed, args=(world_size, param), nprocs=world_size, join=True)
                pred_str_q.put("kill")
                p_w.join()
            elif use_cuda and not args.ddp:
                _call_mods_from_signal_gpu(files_dr, bam_index, success_file, model_path, motif_seqs, positions, files_queue, args, file_type)
            else:
                _call_mods_from_signal_cpu(files_dr, bam_index, success_file, model_path, motif_seqs, positions, files_queue, args, file_type)
        elif file_type == 'fast5':
            read_strand = _get_read_sequened_strand(args.basecall_subgroup)
            motif_seqs, chrom2len, _, len_fast5s, positions, contigs = _extract_preprocess(
                input_path, is_recursive, args.motifs, is_dna, ref_path, args.r_batch_size, args.positions, args
            )
            aligner = get_aligner(ref_path, args.best_n) if args.mapping else None
            fast5s_q = get_files(input_path, is_recursive, ".fast5")
            
            if use_cuda:
                _call_mods_from_fast5s_gpu(ref_path, motif_seqs, chrom2len, fast5s_q, len_fast5s, positions, contigs, model_path, success_file, read_strand, args, aligner)
            else:
                _call_mods_from_fast5s_cpu(ref_path, motif_seqs, chrom2len, fast5s_q, len_fast5s, positions, contigs, model_path, success_file, read_strand, args, aligner)
    except Exception as e:
        print(f"Error processing: {e}", flush=True)

def handle_directory_input(args, input_path, model_path, success_file):
    ref_path = validate_reference_path(args.reference_path) if args.reference_path else None
    is_dna = not args.rna
    is_recursive = str2bool(args.recursively)
    file_type = detect_file_type(input_path, is_recursive)
    
    if file_type in ['pod5', 'slow5']:
        handle_signal_input(args, input_path, model_path, success_file, is_dna, is_recursive, file_type)
    elif file_type == 'fast5':
        handle_fast5_input(args, input_path, model_path, success_file, ref_path, is_dna, is_recursive)
    else:
        raise ValueError(f"No valid signal files (.pod5, .slow5, .blow5, .fast5) found in {input_path}")

def validate_reference_path(ref_path):
    return validate_path(ref_path, "--reference_path")

def handle_signal_input(args, input_path, model_path, success_file, is_dna, is_recursive, file_type):
    bam_index = bam_reader.ReadIndexedBam(args.bam)
    motif_seqs = get_motif_seqs(args.motifs, is_dna)
    positions = read_position_file(args.positions) if args.positions else None
    files_dr = get_files(input_path, is_recursive, ".pod5" if file_type == 'pod5' else (".slow5", ".blow5"))
    
    files_queue = Queue()
    fill_files_queue(files_queue, files_dr)
    
    if use_cuda:
        pred_str_q = Queue()
        p_w = mp.Process(target=_write_predstr_to_file, args=(args.result_file, pred_str_q), name="writer")
        p_w.daemon = True
        p_w.start()
        world_size = torch.cuda.device_count()
        param = (files_dr, bam_index, success_file, model_path, motif_seqs, positions, pred_str_q, files_queue, args, file_type)
        mp.spawn(_call_mods_from_signal_gpu_distributed, args=(world_size, param), nprocs=world_size, join=True)
        pred_str_q.put("kill")
        p_w.join()
    else:
        _call_mods_from_signal_cpu(files_dr, bam_index, success_file, model_path, motif_seqs, positions, files_queue, args, file_type)

def handle_fast5_input(args, input_path, model_path, success_file, ref_path, is_dna, is_recursive):
    read_strand = _get_read_sequened_strand(args.basecall_subgroup)
    motif_seqs, chrom2len, _, len_fast5s, positions, contigs = _extract_preprocess(
        input_path, is_recursive, args.motifs, is_dna, ref_path, args.r_batch_size, args.positions, args
    )
    aligner = get_aligner(ref_path, args.best_n) if args.mapping else None
    fast5s_q = get_files(input_path, is_recursive, ".fast5")
    
    if use_cuda:
        _call_mods_from_fast5s_gpu(ref_path, motif_seqs, chrom2len, fast5s_q, len_fast5s, positions, contigs, model_path, success_file, read_strand, args, aligner)
    else:
        _call_mods_from_fast5s_cpu(ref_path, motif_seqs, chrom2len, fast5s_q, len_fast5s, positions, contigs, model_path, success_file, read_strand, args, aligner)

def handle_file_input(args, input_path, model_path, success_file):
    if use_cuda:
        _call_mods_from_file_gpu(input_path, model_path, args)

def determine_process_count(args):
    if use_cuda:
        return max(1, args.nproc)
    return max(1, args.nproc)

def cleanup_success_file(success_file):
    if os.path.exists(success_file):
        os.remove(success_file)
def check_model_devices(model):
    print("\n--- 检查模型内部 Buffers/Parameters 的设备 ---")
    
    # 检查 Parameters (可训练参数)
    for name, param in model.named_parameters():
        if param.is_cuda:
            print(f"Parameter: {name} | Device: {param.device}")
        else:
            print(f"Parameter: {name} | Device: CPU")

    # 检查 Buffers (非训练张量)
    for name, buf in model.named_buffers():
        if buf.is_cuda:
            print(f"Buffer: {name} | Device: {buf.device}")
        else:
            print(f"Buffer: {name} | Device: CPU")
def load_model_distributed(model_path, device, args):
    model = ModelBiLSTM(
        args.seq_len, args.signal_len, args.layernum1, args.layernum2, args.class_num,
        args.dropout_rate, args.hid_rnn, args.n_vocab, args.n_embed, str2bool(args.is_base),
        str2bool(args.is_signallen), str2bool(args.is_trace), args.model_type
    )
    try:
        para_dict = torch.load(model_path, map_location=torch.device(device))
    except Exception:
        para_dict = torch.jit.load(model_path)
    model_dict = model.state_dict()
    model_dict.update(para_dict)
    model.load_state_dict(model_dict)
    
    del model_dict
    #model = model.cuda()
    if use_cuda:
        gpulist = _get_gpus()
        #check_model_devices(model)
        #print('check_model_devices before model.to(device)', flush=True)
        #model = model.to(0)#如果使用DP模式，这里就要.to(0)
        #check_model_devices(model)
        #print('check_model_devices after model.to(device)', flush=True)
        #model = torch.nn.DataParallel(model, device_ids=gpulist)
        model = model.to(device)
        model = DDP(model, device_ids=[device.index], output_device=device.index)
        
    return model

def load_model(model_path, device, args):
    model = ModelBiLSTM(
        args.seq_len, args.signal_len, args.layernum1, args.layernum2, args.class_num,
        args.dropout_rate, args.hid_rnn, args.n_vocab, args.n_embed, str2bool(args.is_base),
        str2bool(args.is_signallen), str2bool(args.is_trace), args.model_type
    )
    try:
        para_dict = torch.load(model_path, map_location=torch.device(device))
    except Exception:
        para_dict = torch.jit.load(model_path)
    model_dict = model.state_dict()
    model_dict.update(para_dict)
    model.load_state_dict(model_dict)
    
    del model_dict
    #model = model.cuda()
    if use_cuda:
        gpulist = _get_gpus()
        #check_model_devices(model)
        #print('check_model_devices before model.to(device)', flush=True)
        model = model.to(device)#如果使用DP模式，这里就要.to(0)
        #check_model_devices(model)
        #print('check_model_devices after model.to(device)', flush=True)
        model = torch.nn.DataParallel(model, device_ids=gpulist)

    return model

def setup(rank, world_size):
    os.environ['MASTER_ADDR'] = 'localhost'
    os.environ['MASTER_PORT'] = '12355'
    dist.init_process_group("nccl", rank=rank, world_size=world_size)

def cleanup():
    dist.destroy_process_group()

def load_model_distributed_softshape(model_path, device, args):
    shape_size = args.softshape_shape_size
    sparse_rate = args.softshape_sparse_rate
    depth = args.softshape_depth
    num_experts = args.softshape_num_experts
    stride = args.softshape_stride
    warm_up_epoch = args.softshape_warm_up_epoch
    moeloss_rate = args.softshape_moe_loss
    emb_dim=args.softshape_emb_dim
    num_chn = 1 +args.n_embed # Only signals are used
    num_cls = args.class_num
    model = SoftShapeNet(
        seq_len=args.seq_len * args.signal_len,  # Total sequence length after flattening
        shape_size=shape_size,
        num_channels=num_chn,
        emb_dim=emb_dim,
        sparse_rate=sparse_rate,
        depth=depth,
        num_classes=num_cls,
        num_experts=num_experts,
        stride=stride,
        vocab_size=args.n_vocab,
        embedding_size=args.n_embed
    )
    # 初始化 SharedEmbedding
    # shared_embedding = SharedEmbedding(vocab_size=args.n_vocab, embedding_size=args.n_embed)
    # num_chn = 4 + args.n_embed  # 通道数：signals (1) + base_means (1) + base_stds (1) + base_signal_lens (1) + kmer_embed
    # d_static = 0  # 无静态特征
    # num_cls = args.class_num  # 分类任务类别数
    # ratios = [2, 2, 2]  # 下采样比率，可根据任务调整
    # d_model = args.hid_rnn  # 隐藏维度与嵌入维度一致
    # model = MTM(
    #     num_chn=num_chn,
    #     d_static=d_static,
    #     num_cls=num_cls,
    #     ratios=ratios,
    #     d_model=d_model,
    #     r_hid=4,
    #     drop=0.2,
    #     norm_first=True,
    #     down_mode='concat'
    # )

    # model = ModelCatch(
    #     args.seq_len, args.signal_len,  args.class_num,args.n_vocab, args.n_embed
    # )
    try:
        checkpoint = torch.load(model_path, map_location=device)
        # shared_embedding.load_state_dict(checkpoint['shared_embedding'], strict=True)
        model.load_state_dict(checkpoint, strict=True)
    except Exception as e:
        print(f"Error loading model from {model_path}: {e}", flush=True)
        raise RuntimeError(f"Error loading model from {model_path}: {e}")
    # 移动到 GPU 并启用 DDP
    # shared_embedding = shared_embedding.to(device)
    model = model.to(device)
    # shared_embedding = DDP(shared_embedding, device_ids=[device.index], output_device=device.index, find_unused_parameters=True)
    model = DDP(model, device_ids=[device.index], output_device=device.index, find_unused_parameters=True)
    print(f"Model loaded on device {device}", flush=True)
    return model#, shared_embedding

def load_model_distributed_mtm(model_path, device, args):
    
    # 初始化 SharedEmbedding
    # shared_embedding = SharedEmbedding(vocab_size=args.n_vocab, embedding_size=args.n_embed)
    num_chn = 1 + args.n_embed  # 通道数：signals (1) + base_means (1) + base_stds (1) + base_signal_lens (1) + kmer_embed
    d_static = args.mtm_d_static  # 无静态特征
    num_cls = args.class_num  # 分类任务类别数
    ratios = args.mtm_ratios  # 下采样比率，可根据任务调整
    d_model = args.hid_rnn  # 隐藏维度与嵌入维度一致
    model = MTM(
        num_chn=num_chn,
        d_static=d_static,
        num_cls=num_cls,
        ratios=ratios,
        d_model=d_model,
        r_hid=args.mtm_r_hid,
        drop=args.dropout_rate,
        norm_first=args.mtm_norm_first,
        down_mode=args.mtm_down_mode,
        vocab_size=args.n_vocab, 
        embedding_size=args.n_embed
    )

    # model = ModelCatch(
    #     args.seq_len, args.signal_len,  args.class_num,args.n_vocab, args.n_embed
    # )
    try:
        checkpoint = torch.load(model_path, map_location=device)
        # shared_embedding.load_state_dict(checkpoint['shared_embedding'], strict=True)
        model.load_state_dict(checkpoint, strict=True)
    except Exception as e:
        print(f"Error loading model from {model_path}: {e}", flush=True)
        raise RuntimeError(f"Error loading model from {model_path}: {e}")
    # 移动到 GPU 并启用 DDP
    # shared_embedding = shared_embedding.to(device)
    model = model.to(device)
    # shared_embedding = DDP(shared_embedding, device_ids=[device.index], output_device=device.index, find_unused_parameters=True)
    model = DDP(model, device_ids=[device.index], output_device=device.index)
    print(f"Model loaded on device {device}", flush=True)
    return model#, shared_embedding
def load_model_mtm(model_path, device, args):
    
    # 初始化 SharedEmbedding
    # shared_embedding = SharedEmbedding(vocab_size=args.n_vocab, embedding_size=args.n_embed)
    num_chn = 1 + args.n_embed  # 通道数：signals (1) + base_means (1) + base_stds (1) + base_signal_lens (1) + kmer_embed
    d_static = args.mtm_d_static  # 无静态特征
    num_cls = args.class_num  # 分类任务类别数
    ratios = args.mtm_ratios  # 下采样比率，可根据任务调整
    d_model = args.hid_rnn  # 隐藏维度与嵌入维度一致
    model = MTM(
        num_chn=num_chn,
        d_static=d_static,
        num_cls=num_cls,
        ratios=ratios,
        d_model=d_model,
        r_hid=args.mtm_r_hid,
        drop=args.dropout_rate,
        norm_first=args.mtm_norm_first,
        down_mode=args.mtm_down_mode,
        vocab_size=args.n_vocab, 
        embedding_size=args.n_embed,
        use_channel_attn=args.mtm_use_channel,
        use_mixer=args.mtm_use_mixer,
        moe=args.mtm_moe
    )

    # model = ModelCatch(
    #     args.seq_len, args.signal_len,  args.class_num,args.n_vocab, args.n_embed
    # )
    try:
        checkpoint = torch.load(model_path, map_location=device)
        # shared_embedding.load_state_dict(checkpoint['shared_embedding'], strict=True)
        model.load_state_dict(checkpoint, strict=True)
    except Exception as e:
        print(f"Error loading model from {model_path}: {e}", flush=True)
        raise RuntimeError(f"Error loading model from {model_path}: {e}")
    # 移动到 GPU 并启用 DDP
    # shared_embedding = shared_embedding.to(device)
    if str2bool(args.use_compile):
        try:
            model = torch.compile(model,mode="reduce-overhead")
        except:
            raise ImportError('torch.compile does not exist in PyTorch<2.0.')
    gpulist = _get_gpus()
    model = model.to(device)
    # shared_embedding = DDP(shared_embedding, device_ids=[device.index], output_device=device.index, find_unused_parameters=True)
    torch.nn.DataParallel(model, device_ids=gpulist)
    print(f"Model loaded on device {device}", flush=True)
    return model#, shared_embedding

def _call_mods_from_signal_gpu_distributed(rank, world_size, param):
    print(f"Process {rank} started with world size {world_size}", flush=True)
    try:
        files_dr, bam_index, success_file, model_path, motif_seqs, positions, pred_str_q, files_queue, args, file_type = param
        setup(rank, world_size)
        LOGGER.info(f"Process {rank} initialized")
        nproc = determine_process_count(args)
        device = torch.device(f"cuda:{rank}")
        if args.mtm:
            model = load_model_distributed_mtm(model_path, device, args)
        elif args.softshape:
            model = load_model_distributed_softshape(model_path, device, args)
        else:
            model = load_model_distributed(model_path, device, args)
        model.eval()
        #shared_embedding.eval()
        
        dataset = SignalDataset(files_dr, bam_index, motif_seqs, positions, device, files_queue, args, format_type=file_type)
        data_loader = DataLoader(
            dataset, batch_size=args.batch_size, num_workers=nproc, collate_fn=collate_fn_inference,
            worker_init_fn=worker_init_fn, pin_memory=True
        )
        print(f"Dataset loaded on device {device}", flush=True)
        with torch.no_grad():
            for batch in data_loader:
                if batch is None:
                    LOGGER.info(f"Rank {rank}: batch is None")
                    continue
                with autocast():
                    #print(f"Rank {rank}: processing batch", flush=True)
                    if args.mtm:
                        pred_str, accuracy, batch_num = _call_mods_mtm(batch, model, args.batch_size,args)
                    elif args.softshape:
                        pred_str, accuracy, batch_num = _call_mods_softshape(batch, model, args.batch_size,args)
                    else:
                        pred_str, accuracy, batch_num = _call_mods(batch, model, args.batch_size)
                    #print(f"Rank {rank}: processed batch", flush=True)
                pred_str_q.put(pred_str)
    except Exception as e:
        print('error in gpu_distributed',flush=True)
        LOGGER.error(f"Error in process {rank}: {e}")
        raise
    finally:    
        cleanup()

def _call_mods_from_signal_gpu(
    pod5_dr, bam_index, success_file, model_path, motif_seqs, positions, files_queue, args, file_type
):
    
    # dist.init_process_group(backend='nccl',init_method="tcp://127.0.0.1:12315",rank=0,world_size=4)
    # local_rank, world_size = dist.get_rank(), dist.get_world_size()
    # LOGGER.info(f"local_rank: {local_rank}, world_size: {world_size}")
    
    nproc = determine_process_count(args)
    gpus=_get_gpus()
    gpuindex = 0
    device = torch.device(f"cuda:{gpus[gpuindex]}" if torch.cuda.is_available() else "cpu")
    dataset = SignalDataset(pod5_dr, bam_index, motif_seqs, positions, device, files_queue, args, file_type)
    #sampler = DistributedSampler(dataset, shuffle=False)
    data_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=nproc,
                        worker_init_fn=worker_init_fn,collate_fn=collate_fn_inference,#sampler=sampler,
                        pin_memory=True
                        )
    
    # 创建两个队列：一个用于传递特征批次，一个用于存储预测字符串
    # features_batch_q = Queue()
    pred_str_q = Queue()

    # 启动写进程，将预测结果写入文件
    p_w = mp.Process(
        target=_write_predstr_to_file,
        args=(args.result_file, pred_str_q),
        name="writer",
    )
    p_w.daemon = True
    p_w.start()
    if args.mtm:
        model = load_model_mtm(model_path,device, args)
    else:
        model=load_model(model_path,device, args)

    # 初始化模型并设置到GPU
    
    model.eval()
    with torch.no_grad() and torch.cuda.amp.autocast():
        for batch in data_loader:
            if batch is None:
                print("batch is None")
                continue
            if args.mtm:
                    pred_str, accuracy, batch_num = _call_mods_mtm(batch, model, args.batch_size,args)
            else:
                pred_str, accuracy, batch_num = _call_mods(batch,model,args.batch_size)
            if pred_str ==[]:
                print("pred_str is empty")
                continue
            #pred_str=['test']
            pred_str_q.put(pred_str)

    # 在处理完成后，发送结束信号
    # features_batch_q.put("kill")
    pred_str_q.put("kill")

    # # 等待所有进程结束
    # for p in predstr_procs:
    #     p.join()

    p_w.join()

def _call_mods_from_signal_cpu(files_dr, bam_index, success_file, model_path, motif_seqs, positions, files_queue, args, file_type):
    nproc = determine_process_count(args)
    dataset = SignalDataset(files_dr, bam_index, motif_seqs, positions, 'cpu', files_queue, args, format_type=file_type)
    data_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=nproc,
                             worker_init_fn=worker_init_fn, collate_fn=collate_fn_inference)
    
    pred_str_q = Queue()
    p_w = mp.Process(target=_write_predstr_to_file, args=(args.result_file, pred_str_q), name="writer")
    p_w.daemon = True
    p_w.start()
    model = load_model(model_path, 'cpu', args)
    model.eval()
    
    with torch.no_grad():
        for batch in data_loader:
            if batch is None:
                print("batch is None")
                continue
            pred_str, accuracy, batch_num = _call_mods(batch, model, args.batch_size)
            if pred_str == []:
                print("pred_str is empty")
                continue
            pred_str_q.put(pred_str)
    
    pred_str_q.put("kill")
    p_w.join()

def _call_mods_from_fast5s_gpu(ref_path, motif_seqs, chrom2len, fast5s_q, len_fast5s, positions, chrom2seqs, model_path, success_file, read_strand, args, aligner):
    nproc = determine_process_count(args)
    gpus = _get_gpus()
    device = torch.device(f"cuda:{gpus[0]}" if torch.cuda.is_available() else "cpu")
    dataset = Fast5Dataset(fast5s_q, motif_seqs, positions, device, args, chrom2len, read_strand, chrom2seqs, aligner)
    data_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=nproc,
                             worker_init_fn=worker_init_fn, collate_fn=collate_fn_inference, pin_memory=True)
    
    pred_str_q = Queue()
    p_w = mp.Process(target=_write_predstr_to_file, args=(args.result_file, pred_str_q), name="writer")
    p_w.daemon = True
    p_w.start()
    model = load_model(model_path, device, args)
    model.eval()
    
    with torch.no_grad() and torch.cuda.amp.autocast():
        for batch in data_loader:
            if batch is None:
                print("batch is None")
                continue
            pred_str, accuracy, batch_num = _call_mods(batch, model, args.batch_size)
            if pred_str == []:
                print("pred_str is empty")
                continue
            pred_str_q.put(pred_str)
    
    pred_str_q.put("kill")
    p_w.join()

def _call_mods_from_fast5s_cpu(ref_path, motif_seqs, chrom2len, fast5s_q, len_fast5s, positions, chrom2seqs, model_path, success_file, read_strand, args, aligner=None):
    pass  # Implement if needed

def _call_mods_from_file_gpu(input_path, model_path, args):
    nproc = determine_process_count(args)
    gpus = _get_gpus()
    device = torch.device(f"cuda:{gpus[0]}" if torch.cuda.is_available() else "cpu")
    dataset = TsvDataset(input_path, device)
    data_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=nproc,
                             worker_init_fn=worker_init_fn, collate_fn=collate_fn_inference, pin_memory=True)
    
    pred_str_q = Queue()
    p_w = mp.Process(target=_write_predstr_to_file, args=(args.result_file, pred_str_q), name="writer")
    p_w.daemon = True
    p_w.start()
    model = load_model(model_path, device, args)
    model.eval()
    
    with torch.no_grad() and torch.cuda.amp.autocast():
        for batch in data_loader:
            if batch is None:
                print("batch is None")
                continue
            pred_str, accuracy, batch_num = _call_mods(batch, model, args.batch_size)
            if pred_str == []:
                print("pred_str is empty")
                continue
            pred_str_q.put(pred_str)
    
    pred_str_q.put("kill")
    p_w.join()

def _call_mods(features_batch, model, batch_size, device=0):
    sampleinfo, kmers, base_means, base_stds, base_signal_lens, k_signals, labels, tags = features_batch
    pred_str = []
    accuracys = []
    batch_num = 0
    
    for i in np.arange(0, len(sampleinfo), batch_size):
        batch_s, batch_e = i, i + batch_size
        b_sampleinfo = sampleinfo[batch_s:batch_e]
        b_kmers = kmers[batch_s:batch_e].long()
        b_base_means = base_means[batch_s:batch_e].float()
        b_base_stds = base_stds[batch_s:batch_e].float()
        b_base_signal_lens = base_signal_lens[batch_s:batch_e].float()
        b_k_signals = k_signals[batch_s:batch_e].float()
        b_labels = labels[batch_s:batch_e].long()
        #print(f"Processing batch {batch_num}, size: {len(b_sampleinfo)}", flush=True)
        if len(b_sampleinfo) > 0:
            
            b_kmers=b_kmers.cuda(non_blocking=True)
            b_base_means=b_base_means.cuda(non_blocking=True)
            
            b_base_stds=b_base_stds.cuda(non_blocking=True)
            b_base_signal_lens=b_base_signal_lens.cuda(non_blocking=True)
            
            b_k_signals=b_k_signals.cuda(non_blocking=True)
            x_mask = torch.isnan(b_k_signals)
            b_k_signals[x_mask] = 0
           
            # print(f"DEBUG: Calling model. Main module on cuda:0.")
            # print(f"DEBUG: Input tensor devices are:")
            # print(f"  b_kmers: {b_kmers.device}", flush=True)
            # print(f"  b_base_means: {b_base_means.device}", flush=True)
            # print(f"  b_base_stds: {b_base_stds.device}", flush=True)
            # print(f"  b_base_signal_lens: {b_base_signal_lens.device}", flush=True)
            # print(f"  b_k_signals: {b_k_signals.device}", flush=True)           
            _,  vlogits = model(
                #b_kmers, b_base_means, b_base_stds, b_base_signal_lens, b_k_signals
                # b_kmers.cuda(device,non_blocking=True), b_base_means.cuda(device,non_blocking=True),
                # b_base_stds.cuda(device,non_blocking=True), b_base_signal_lens.cuda(device,non_blocking=True),
                # b_k_signals.cuda(device,non_blocking=True)
                b_kmers, b_base_means, b_base_stds, b_base_signal_lens, b_k_signals
            )
            _, vpredicted = torch.max(vlogits.data, 1)
            #print(f"batch {batch_num} processed, logits shape: {vlogits.shape}", flush=True)
           
            if use_cuda:
                vlogits = vlogits.cpu()
                vpredicted = vpredicted.cpu()
            
            predicted = vpredicted.numpy()
            logits = vlogits.data.numpy()
            acc_batch = metrics.accuracy_score(y_true=b_labels, y_pred=predicted)
            accuracys.append(acc_batch)
            
            for idx in range(len(b_sampleinfo)):
                prob_0, prob_1 = logits[idx][0], logits[idx][1]
                prob_0_norm = round(prob_0 / (prob_0 + prob_1), 6)
                prob_1_norm = round(1 - prob_0_norm, 6)
                b_idx_kmer = "".join([code2base_dna[int(x)] for x in b_kmers[idx]])
                center_idx = int(np.floor(len(b_idx_kmer) / 2))
                bkmer_start = center_idx - 2 if center_idx - 2 >= 0 else 0
                bkmer_end = center_idx + 3 if center_idx + 3 <= len(b_idx_kmer) else len(b_idx_kmer)
                
                pred_str.append(
                    "\t".join([b_sampleinfo[idx], str(prob_0_norm), str(prob_1_norm), str(predicted[idx]), b_idx_kmer[bkmer_start:bkmer_end]])
                )
            batch_num += 1
    
    accuracy = np.mean(accuracys) if len(accuracys) > 0 else 0
    return pred_str, accuracy, batch_num

def _call_mods_mtm(features_batch, model, batch_size, args, device=0):
    """
    调用 MTM 模型进行预测，处理序列长度从 41 截取到 21，使用 SharedEmbedding 处理 kmer
    """
    sampleinfo, kmers, base_means, base_stds, base_signal_lens, k_signals, labels,tag = features_batch
    pred_str = []
    accuracys = []
    batch_num = 0

    # model.eval()
    # shared_embedding.eval()

    with torch.no_grad():
        for i in np.arange(0, len(sampleinfo), batch_size):
            batch_s, batch_e = i, i + batch_size
            b_sampleinfo = sampleinfo[batch_s:batch_e]
            b_kmers = kmers[batch_s:batch_e].long()  # (B, 41)
            # b_base_means = base_means[batch_s:batch_e].float()  # (B, 41)
            # b_base_stds = base_stds[batch_s:batch_e].float()  # (B, 41)
            # b_base_signal_lens = base_signal_lens[batch_s:batch_e].float()  # (B, 41)
            b_k_signals = k_signals[batch_s:batch_e].float()  # (B, 41, S)
            b_labels = labels[batch_s:batch_e].long()  # (B,)
            b_tag = tag[batch_s:batch_e].long()

            #print(f"处理批次 {batch_num}, 大小: {len(b_sampleinfo)}", flush=True)

            if len(b_sampleinfo) > 0:
                expected_seq_len = args.seq_len  # 21
                if b_k_signals.shape[1] < expected_seq_len or b_kmers.shape[1] < expected_seq_len:
                    print(f"警告：批次 {batch_num} 数据长度不足。k_signals.shape: {b_k_signals.shape}, b_kmers.shape: {b_kmers.shape}, 预期序列长度: {expected_seq_len}。跳过批次")
                    continue

                # 截取前21个时间步
                b_kmers = b_kmers[:, :args.seq_len]  # (B, 21)
                # b_base_means = b_base_means[:, :args.seq_len]  # (B, 21)
                # b_base_stds = b_base_stds[:, :args.seq_len]  # (B, 21)
                # b_base_signal_lens = b_base_signal_lens[:, :args.seq_len]  # (B, 21)
                b_k_signals = b_k_signals[:, :args.seq_len, :]  # (B, 21, S)

                # 数据预处理
                batch_size = b_k_signals.shape[0]
                seq_len = args.seq_len  # 21
                signal_len = args.signal_len
                embedding_size = args.n_embed
                d_static = 0

                b_kmers = b_kmers.cuda(device, non_blocking=True)
                # b_base_means = b_base_means.cuda(device, non_blocking=True)
                # b_base_stds = b_base_stds.cuda(device, non_blocking=True)
                # b_base_signal_lens = b_base_signal_lens.cuda(device, non_blocking=True)
                b_k_signals = b_k_signals.cuda(device, non_blocking=True)
                b_labels = b_labels.cuda(device, non_blocking=True)
                b_tag = b_tag.cuda(device, non_blocking=True)

                signals = b_k_signals.view(batch_size, -1, 1)  # (B, 21*S, 1)
                #kmer_embed = shared_embedding(b_kmers).to(device, non_blocking=True)  # (B, 21, embedding_size)
                kmer_embed = b_kmers.repeat_interleave(signal_len, dim=1)  # (B, 21*S, embedding_size)
                #torch.cuda.empty_cache()
                # base_means = b_base_means.repeat_interleave(signal_len, dim=1)#.unsqueeze(-1)  # (B, 21*S, 1)
                # base_stds = b_base_stds.repeat_interleave(signal_len, dim=1)#.unsqueeze(-1)  # (B, 21*S, 1)
                # base_signal_lens = b_base_signal_lens.repeat_interleave(signal_len, dim=1)#.unsqueeze(-1)  # (B, 21*S, 1)
                #x = torch.cat([signals, kmer_embed], dim=-1)  # (B, 21*S, C)
                #x_mask = torch.isnan(x).any(dim=-1, keepdim=True).repeat(1, 1, x.shape[-1])  # (B, 21*S, C)
                # x_mask = (x == 0.0).any(dim=-1, keepdim=True).repeat(1, 1, x.shape[-1])
                x_mask = torch.isnan(signals)  # 形状: [..., signal_dim]
                false_mask = torch.zeros((*x_mask.shape[:-1], args.n_embed), dtype=torch.bool, device=x_mask.device) # 形状: [..., kmer_dim]，全 False
                x_mask = torch.cat([x_mask, false_mask], dim=-1)
                t = torch.arange(seq_len * signal_len, device=signals.device).repeat(batch_size, 1)  # (B, 21*S)
                x_static = b_tag.unsqueeze(-1)#torch.zeros(batch_size, d_static, device=x.device)  # (B, D_static)
                #torch.cuda.empty_cache()

                vlogits = model(signals, kmer_embed, x_mask, t, x_static)  # (B, num_cls)
                probs = torch.softmax(vlogits, dim=-1)  # (B, num_cls)
                _, vpredicted = torch.max(vlogits.data, 1)
                probs = probs.cpu().numpy()  # (B, num_cls)
                predicted = vpredicted.cpu().numpy()
                #logits = vlogits.data.numpy()
                #acc_batch = metrics.accuracy_score(y_true=b_labels.cpu().numpy(), y_pred=predicted)
                #accuracys.append(acc_batch)

                for idx in range(len(b_sampleinfo)):
                    prob_0, prob_1 = probs[idx][0], probs[idx][1]
                    prob_0_norm = round(prob_0, 6)
                    prob_1_norm = round(prob_1, 6)
                    b_idx_kmer = "".join([code2base_dna[int(x)] for x in b_kmers[idx].cpu().numpy()])
                    center_idx = int(np.floor(len(b_idx_kmer) / 2))  # 长度 21 的中心索引
                    bkmer_start = center_idx - 2 if center_idx - 2 >= 0 else 0
                    bkmer_end = center_idx + 3 if center_idx + 3 <= len(b_idx_kmer) else len(b_idx_kmer)
                    pred_str.append(
                        "\t".join([b_sampleinfo[idx], str(prob_0_norm), str(prob_1_norm), str(predicted[idx]), b_idx_kmer[bkmer_start:bkmer_end]])
                    )

                batch_num += 1
                torch.cuda.empty_cache()

    #accuracy = np.mean(accuracys) if len(accuracys) > 0 else 0
    return pred_str, _, batch_num

def _call_mods_softshape(features_batch, model, batch_size, args, device=0):
    """
    调用 SoftShape 模型进行预测，处理序列长度从 41 截取到 21
    """
    sampleinfo, kmers, base_means, base_stds, base_signal_lens, k_signals, labels,tags = features_batch
    pred_str = []
    accuracys = []
    batch_num = 0

    # model.eval()
    # shared_embedding.eval()

    with torch.no_grad():
        for i in np.arange(0, len(sampleinfo), batch_size):
            batch_s, batch_e = i, i + batch_size
            b_sampleinfo = sampleinfo[batch_s:batch_e]
            b_kmers = kmers[batch_s:batch_e].long()  # (B, 41)
            # b_base_means = base_means[batch_s:batch_e].float()  # (B, 41)
            # b_base_stds = base_stds[batch_s:batch_e].float()  # (B, 41)
            # b_base_signal_lens = base_signal_lens[batch_s:batch_e].float()  # (B, 41)
            b_k_signals = k_signals[batch_s:batch_e].float()  # (B, 41, S)
            b_labels = labels[batch_s:batch_e].long()  # (B,)

            #print(f"处理批次 {batch_num}, 大小: {len(b_sampleinfo)}", flush=True)

            if len(b_sampleinfo) > 0:
                # expected_seq_len = args.seq_len  # 21
                # if b_k_signals.shape[1] < expected_seq_len or b_kmers.shape[1] < expected_seq_len:
                #     print(f"警告：批次 {batch_num} 数据长度不足。k_signals.shape: {b_k_signals.shape}, b_kmers.shape: {b_kmers.shape}, 预期序列长度: {expected_seq_len}。跳过批次")
                #     continue

                # 截取前21个时间步
                b_kmers = b_kmers[:, :args.seq_len]  # (B, 21)
                # b_base_means = b_base_means[:, :args.seq_len]  # (B, 21)
                # b_base_stds = b_base_stds[:, :args.seq_len]  # (B, 21)
                # b_base_signal_lens = b_base_signal_lens[:, :args.seq_len]  # (B, 21)
                b_k_signals = b_k_signals[:, :args.seq_len, :]  # (B, 21, S)

                # 数据预处理
                batch_size = b_k_signals.shape[0]
                seq_len = args.seq_len  # 21
                signal_len = args.signal_len
                embedding_size = args.n_embed

                b_kmers = b_kmers.cuda(device, non_blocking=True)
                # b_base_means = b_base_means.cuda(device, non_blocking=True)
                # b_base_stds = b_base_stds.cuda(device, non_blocking=True)
                # b_base_signal_lens = b_base_signal_lens.cuda(device, non_blocking=True)
                b_k_signals = b_k_signals.cuda(device, non_blocking=True)
                b_labels = b_labels.cuda(device, non_blocking=True)

                signals = b_k_signals.view(batch_size, 1, -1)  # (B, 21*S, 1)
                kmer_embed = b_kmers.repeat_interleave(signal_len, dim=1)  # (B, 21*S, embedding_size)
                #torch.cuda.empty_cache()
                # base_means = b_base_means.repeat_interleave(signal_len, dim=1)#.unsqueeze(-1)  # (B, 21*S, 1)
                # base_stds = b_base_stds.repeat_interleave(signal_len, dim=1)#.unsqueeze(-1)  # (B, 21*S, 1)
                # base_signal_lens = b_base_signal_lens.repeat_interleave(signal_len, dim=1)#.unsqueeze(-1)  # (B, 21*S, 1)
                # x_mask = torch.isnan(signals)
                # signals[x_mask] = 0
                signals=torch.nan_to_num(signals, nan=0.0)
                #torch.cuda.empty_cache()

                vlogits,_ = model(signals,kmer_embed)  # (B, num_cls)
                probs = torch.softmax(vlogits, dim=-1)  # (B, num_cls)
                _, vpredicted = torch.max(vlogits.data, 1)
                probs = probs.cpu().numpy()  # (B, num_cls)
                predicted = vpredicted.cpu().numpy()
                #logits = vlogits.cpu().data.numpy()
                #acc_batch = metrics.accuracy_score(y_true=b_labels.cpu().numpy(), y_pred=predicted)
                #accuracys.append(acc_batch)

                for idx in range(len(b_sampleinfo)):
                    prob_0, prob_1 = probs[idx][0], probs[idx][1]
                    prob_0_norm = round(prob_0, 6)
                    prob_1_norm = round(prob_1, 6)
                    b_idx_kmer = "".join([code2base_dna[int(x)] for x in b_kmers[idx].cpu().numpy()])
                    center_idx = int(np.floor(len(b_idx_kmer) / 2))  # 长度 21 的中心索引
                    bkmer_start = center_idx - 2 if center_idx - 2 >= 0 else 0
                    bkmer_end = center_idx + 3 if center_idx + 3 <= len(b_idx_kmer) else len(b_idx_kmer)
                    pred_str.append(
                        "\t".join([b_sampleinfo[idx], str(prob_0_norm), str(prob_1_norm), str(predicted[idx]), b_idx_kmer[bkmer_start:bkmer_end]])
                    )

                batch_num += 1
                #torch.cuda.empty_cache()

    #accuracy = np.mean(accuracys) if len(accuracys) > 0 else 0
    return pred_str, _, batch_num

def _write_predstr_to_file(write_fp, predstr_q):
    LOGGER.info("write_process-{} starts".format(os.getpid()))
    with open(write_fp, "w") as wf:
        while True:
            if predstr_q.empty():
                time.sleep(time_wait)
                continue
            pred_str = predstr_q.get()
            if pred_str == "kill":
                LOGGER.info("write_process-{} finished".format(os.getpid()))
                break
            for one_pred_str in pred_str:
                wf.write(one_pred_str + "\n")
            wf.flush()

# def inference_mtm(args):
#     start = time.time()
#     LOGGER.info("[call_mods] starts")
#     dataLoaderConfiguration = DataLoaderConfiguration(
#         dispatch_batches=False
#     )
#     accelerator = Accelerator(
#         dataloader_config=dataLoaderConfiguration
#         )
#     model_path = validate_path(args.model_path, "--model_path")
#     input_path = validate_path(args.input_path, "--input_path")
#     #success_file = prepare_success_file(input_path)
    
#     file_type = detect_file_type(input_path, str2bool(args.recursively))
#     #ref_path = validate_reference_path(args.reference_path) if args.reference_path else None
#     is_dna = not args.rna
#     is_recursive = str2bool(args.recursively)
#     bam_index = bam_reader.ReadIndexedBam(args.bam)
#     motif_seqs = get_motif_seqs(args.motifs, is_dna)
#     positions = read_position_file(args.positions) if args.positions else None
#     files_dr = get_files(input_path, is_recursive, ".pod5" if file_type == 'pod5' else (".slow5", ".blow5"))
    
#     files_queue = Queue()
#     fill_files_queue(files_queue, files_dr)

#     pred_str_q = Queue()
#     device = accelerator.device
#     rank = accelerator.process_index
#     world_size = accelerator.num_processes

#     # ========== 1. 每个进程写独立临时文件 ==========
#     tmp_dir = os.path.dirname(args.result_file)
#     base_name = os.path.basename(args.result_file)
#     tmp_file = os.path.join(tmp_dir, f"{base_name}.tmp.rank{rank}")

#     # ---------- 1. 只在 rank 0 启动 writer ----------

#     # if accelerator.is_main_process:
#     #     p_w = mp.Process(
#     #         target=_write_predstr_to_file,
#     #         args=(args.result_file, pred_str_q),
#     #         name="writer"
#     #     )
#     #     p_w.daemon = True
#     #     p_w.start()
#     #device=''#这里实际没起作用，随便填
#     dataset = SignalDataset(files_dr, bam_index, motif_seqs, positions, device, files_queue, args, format_type=file_type)
#     data_loader = DataLoader(
#         dataset, batch_size=args.batch_size, num_workers=args.nproc, collate_fn=collate_fn_inference, pin_memory=True
             #，worker_init_fn=worker_init_fn #加不加这个可能都无法自动分片，还需要修改
#     )
#     dataloader = accelerator.prepare(data_loader)

#     num_chn = 1 + args.n_embed  # 通道数：signals (1) + base_means (1) + base_stds (1) + base_signal_lens (1) + kmer_embed
#     d_static = args.mtm_d_static  # 无静态特征
#     num_cls = args.class_num  # 分类任务类别数
#     ratios = args.mtm_ratios  # 下采样比率，可根据任务调整
#     d_model = args.hid_rnn  # 隐藏维度与嵌入维度一致
#     model = MTM(
#         num_chn=num_chn,
#         d_static=d_static,
#         num_cls=num_cls,
#         ratios=ratios,
#         d_model=d_model,
#         r_hid=args.mtm_r_hid,
#         drop=args.dropout_rate,
#         norm_first=args.mtm_norm_first,
#         down_mode=args.mtm_down_mode,
#         vocab_size=args.n_vocab, 
#         embedding_size=args.n_embed
#     )
#     model.load_state_dict(torch.load(model_path), strict=True)
#     model.eval()
#     model = accelerator.prepare(model)
#     with open(tmp_file, "w") as wf:
#         with torch.no_grad():
#             for batch in dataloader:                
#                 with autocast():
#                     pred_str, accuracy, batch_num = _call_mods_mtm(batch, model, args.batch_size,args)
#                     for line in pred_str:
#                         wf.write(line + "\n")
#             #gathered = accelerator.gather_for_metrics([pred_str])

#             # 只有 rank 0 写入队列
#             # if accelerator.is_main_process:
#             #     # gathered 已经是 [[pred_str_rank0], [pred_str_rank1], ...]
#             #     # 按进程顺序合并（保持 DataLoader 的分片顺序）
#             #     for pred_list in gathered:
#             #         pred_str_q.put(pred_list)
#     accelerator.wait_for_everyone()

#     # 只有在所有进程都到达这里后，才进入下面的 if
#     if accelerator.is_main_process:
#         # pred_str_q.put("kill")
#         # p_w.join()
#         final_file = args.result_file
#         with open(final_file, "w") as final_wf:
#             for r in range(world_size):
#                 tmp_r = os.path.join(tmp_dir, f"{base_name}.tmp.rank{r}")
#                 if os.path.exists(tmp_r):
#                     with open(tmp_r, "r") as f:
#                         for line in f:
#                             final_wf.write(line)
#                     os.remove(tmp_r)  # 清理
#     LOGGER.info("[call_mods] costs %.2f seconds.." % (time.time() - start))

def inference_mtm(args):
    start = time.time()
    LOGGER.info("[call_mods] starts")

    # ========== 1. 分布式环境 ==========
    local_rank = int(os.environ.get("LOCAL_RANK", "0"))
    world_size = int(os.environ.get("WORLD_SIZE", "1"))

    if mp.get_start_method(allow_none=True) != "spawn":
        mp.set_start_method("spawn", force=True)

    torch.cuda.set_device(local_rank)
    device = torch.device(f"cuda:{local_rank}")
    print(f"[rank {local_rank}/{world_size}] Using GPU {local_rank}")

    # ========== 2. 临时文件 ==========
    tmp_dir = os.path.dirname(args.result_file)
    base_name = os.path.basename(args.result_file)
    tmp_file = os.path.join(tmp_dir, f"{base_name}.tmp.rank{local_rank}")

    # ========== 3. 数据准备 ==========
    model_path = validate_path(args.model_path, "--model_path")
    input_path = validate_path(args.input_path, "--input_path")
    file_type = detect_file_type(input_path, str2bool(args.recursively))
    is_dna = not args.rna
    is_recursive = str2bool(args.recursively)
    bam_index = bam_reader.ReadIndexedBam(args.bam)
    motif_seqs = get_motif_seqs(args.motifs, is_dna)
    positions = read_position_file(args.positions) if args.positions else None
    files_dr = get_files(input_path, is_recursive, ".pod5" if file_type == 'pod5' else (".slow5", ".blow5"))

    # ========== 关键：按 rank 分文件 ==========
    import math
    total_files = len(files_dr)
    per_rank = math.ceil(total_files / world_size)
    start_idx = local_rank * per_rank
    end_idx = min(start_idx + per_rank, total_files)
    my_files = files_dr[start_idx:end_idx]

    print(f"[rank {local_rank}] Processing {len(my_files)} files: {start_idx} ~ {end_idx}")

    files_queue = Queue()
    fill_files_queue(files_queue, my_files)  # 只填自己的文件

    # ========== 4. Dataset & DataLoader ==========
    dataset = SignalDataset(
        input_dr=my_files,           # 关键：只传自己的文件
        bam_index=bam_index,
        motif_seqs=motif_seqs,
        positions=positions,
        device="",                   # 保持不变
        files_queue=files_queue,     # 自己的队列
        args=args,
        format_type=file_type
    )

    dataloader = DataLoader(
        dataset,
        batch_size=args.batch_size,
        num_workers=args.nproc,
        collate_fn=collate_fn_inference,
        pin_memory=True,
        shuffle=False,
        multiprocessing_context="spawn"
        # 删除 worker_init_fn！
    )

    # ========== 5. 模型 ==========
    model = MTM(
        num_chn=1 + args.n_embed,
        d_static=args.mtm_d_static,
        num_cls=args.class_num,
        ratios=args.mtm_ratios,
        d_model=args.hid_rnn,
        r_hid=args.mtm_r_hid,
        drop=args.dropout_rate,
        norm_first=args.mtm_norm_first,
        down_mode=args.mtm_down_mode,
        vocab_size=args.n_vocab,
        embedding_size=args.n_embed
    )
    model.load_state_dict(torch.load(model_path, map_location=device), strict=True)
    model = model.to(device)
    model.eval()

    # ========== 6. 推理 ==========
    with open(tmp_file, "w") as wf:
        with torch.no_grad() and autocast():
            for batch in dataloader:
                pred_str, _, _ = _call_mods_mtm(batch, model, args.batch_size, args, device)
                for line in pred_str:
                    wf.write(line + "\n")

    print(f"[rank {local_rank}] Finished, saved to {tmp_file}")
    LOGGER.info("[call_mods] costs %.2f seconds.." % (time.time() - start))

def main():
    parser = argparse.ArgumentParser("call modifications")
    p_input = parser.add_argument_group("INPUT")
    p_input.add_argument("--input_path", "-i", type=str, required=True, help="the input path (signal_feature file or directory of fast5/pod5/slow5 files)")
    p_input.add_argument("--r_batch_size", type=int, default=50, help="number of files to process per batch, default 50")
    p_input.add_argument("--bam", type=str, help="the bam filepath")

    # =================================================================
    # ▼▼▼ 在这里添加 MTM 模型的专属参数组 ▼▼▼
    # =================================================================
    p_mtm = parser.add_argument_group("MTM MODEL_HYPER")
    p_mtm.add_argument('--mtm_num_base_features', type=int, default=1, 
                        help="Number of non-embedding features for MTM input channels. Final num_chn = num_base_features + n_embed. Default: 1")
    p_mtm.add_argument('--mtm_d_static', type=int, default=0, 
                        help="Dimension of static features for MTM. Default: 0")
    p_mtm.add_argument('--mtm_ratios', nargs='+', type=int, default=[2, 2, 2], 
                        help="Downsampling ratios for MTM blocks. e.g., --mtm_ratios 2 2 2. Default: [2, 2, 2]")
    p_mtm.add_argument('--mtm_r_hid', type=int, default=4, 
                        help="Hidden dimension ratio for the feed-forward network in MTM blocks. Default: 4")
    p_mtm.add_argument('--mtm_norm_first', type=str2bool, default='True', 
                        help="Whether to use pre-normalization (LayerNorm before attention/FFN) in MTM. Default: True")
    p_mtm.add_argument('--mtm_down_mode', type=str, default='concat', choices=['concat', 'pool'], 
                        help="Downsampling mode for MTM. Default: 'concat'")
    p_mtm.add_argument('--mtm_use_channel', action="store_true", default=False, 
                        help="Whether to use channel attention in MTM. Default: False")

    # =================================================================
    # ▼▼▼ 在这里添加 softshape 模型的专属参数组 ▼▼▼
    # =================================================================
    p_softshape = parser.add_argument_group("SOFTSHAPE MODEL_HYPER")
    p_softshape.add_argument('--softshape_emb_dim', type=int, default=128,
                        help="Embedding dimension for SoftShapeNet. Default: 128")
    p_softshape.add_argument('--softshape_depth', type=int, default=2,
                        help="Number of layers for SoftShapeNet. Default: 2")
    p_softshape.add_argument('--softshape_sparse_rate', type=float, default=0.5,
                        help="Sparse rate for SoftShapeNet. Default: 0.5")
    p_softshape.add_argument('--softshape_moe_loss', type=float, default=0.001,
                        help="moe loss for SoftShapeNet. Default: 0.001")
    p_softshape.add_argument('--softshape_shape_size', type=int, default=8,
                        help="Shape size for SoftShapeNet. Default: 8")
    p_softshape.add_argument('--softshape_num_experts', type=int, default=8,
                        help="Number of experts for SoftShapeNet MoE. Default: 8")
    p_softshape.add_argument('--softshape_stride', type=int, default=4,
                        help="Stride for SoftShapeNet shape embedding. Default: 4")
    p_softshape.add_argument('--softshape_warm_up_epoch', type=int, default=20,
                        help="Warm up epoch for SoftShapeNet. Default: 20")


    p_call = parser.add_argument_group("CALL")
    p_call.add_argument(
        "--mtm",
        action="store_true",
        default=False,
        help="weather use mtm model",
    )
    p_call.add_argument(
        "--mpts",
        action="store_true",
        default=False,
        help="weather use mpts model",
    )
    p_call.add_argument(
        "--softshape",
        action="store_true",
        default=False,
        help="weather use softshape model",
    )
    p_call.add_argument(
        "--infer",
        action="store_true",
        default=False,
        help="weather use infer mode",
    )
    p_call.add_argument(
        "--ddp",
        action="store_true",
        default=False,
        help="weather use ddp mode",
    )
    p_call.add_argument("--model_path", "-m", type=str, required=True, help="path to the trained model (.ckpt)")
    p_call.add_argument("--model_type", type=str, default="both_bilstm", choices=["both_bilstm", "seq_bilstm", "signal_bilstm"], help="type of model, default: both_bilstm")
    p_call.add_argument("--seq_len", type=int, default=21, help="len of kmer, default 21")
    p_call.add_argument("--signal_len", type=int, default=15, help="signal num of one base, default 15")
    p_call.add_argument("--layernum1", type=int, default=3, help="lstm layer num for combined feature, default 3")
    p_call.add_argument("--layernum2", type=int, default=1, help="lstm layer num for seq feature, default 1")
    p_call.add_argument("--class_num", type=int, default=2)
    p_call.add_argument("--dropout_rate", type=float, default=0)
    p_call.add_argument("--n_vocab", type=int, default=16, help="base_seq vocab_size, default 16")
    p_call.add_argument("--n_embed", type=int, default=4, help="base_seq embedding_size")
    p_call.add_argument("--is_base", type=str, default="yes", help="use base features in seq model, default yes")
    p_call.add_argument("--is_signallen", type=str, default="yes", help="use signal length feature, default yes")
    p_call.add_argument("--is_trace", type=str, default="no", help="use trace feature, default no")
    p_call.add_argument("--batch_size", "-b", type=int, default=512, help="batch size, default 512")
    p_call.add_argument("--hid_rnn", type=int, default=256, help="BiLSTM hidden_size, default 256")
    p_call.add_argument('--use_compile', type=str, default="no", required=False,
                             help="[EXPERIMENTAL] if using torch.compile, yes or no, "
                                  "default no ('yes' only works in pytorch>=2.0)")

    p_output = parser.add_argument_group("OUTPUT")
    p_output.add_argument("--result_file", "-o", type=str, required=True, help="path to save the predicted result")

    p_f5 = parser.add_argument_group("EXTRACTION")
    p_f5.add_argument("--single", action="store_true", default=False, help="fast5 files are in single-read format")
    p_f5.add_argument("--recursively", "-r", type=str, default="yes", help="find files recursively, default yes")
    p_f5.add_argument("--rna", action="store_true", default=False, help="fast5 files are from RNA samples")
    p_f5.add_argument("--basecall_group", type=str, default=None, help="basecall group from Guppy")
    p_f5.add_argument("--basecall_subgroup", type=str, default="BaseCalled_template", help="basecall subgroup, default BaseCalled_template")
    p_f5.add_argument("--reference_path", type=str, help="reference file (.fa)")
    p_f5.add_argument("--normalize_method", type=str, choices=["mad", "zscore"], default="mad", help="signal normalization method, default mad")
    p_f5.add_argument("--methy_label", type=int, choices=[1, 0], default=1, help="label of modified bases, default 1")
    p_f5.add_argument("--motifs", type=str, default="CG", help="motif seq to extract, default CG")
    p_f5.add_argument("--mod_loc", type=int, default=0, help="0-based location of targeted base in motif, default 0")
    p_f5.add_argument("--pad_only_r", action="store_true", default=False, help="pad zeros only to right of signals")
    p_f5.add_argument("--positions", type=str, default=None, help="file with list of positions")
    p_f5.add_argument("--trace", action="store_true", default=False, help="use trace, default false")

    p_mape = parser.add_argument_group("MAPe")
    p_mape.add_argument("--corrected_group", type=str, default="RawGenomeCorrected_000", help="corrected_group of fast5 files")

    p_mapping = parser.add_argument_group("MAPPING")
    p_mapping.add_argument("--mapping", action="store_true", default=False, help="use mapping to get alignment")
    p_mapping.add_argument("--mapq", type=int, default=1, help="mapping quality cutoff, default 1")
    p_mapping.add_argument("--identity", type=float, default=0.0, help="identity cutoff, default 0.0")
    p_mapping.add_argument("--coverage_ratio", type=float, default=0.50, help="coverage percent, default 0.50")
    p_mapping.add_argument("--best_n", "-n", type=int, default=1, help="best_n arg in mappy, default 1")

    parser.add_argument("--nproc", "-p", type=int, default=10, help="number of processes, default 10")
    parser.add_argument("--nproc_gpu", type=int, default=2, help="number of processes to use gpu, default 2")

    args = parser.parse_args()
    display_args(args)
    if args.infer:
        if args.mtm:
            inference_mtm(args)
    else:
        call_mods(args)

if __name__ == "__main__":
    sys.exit(main())