# -*- coding: utf-8 -*-
from math import e
from turtle import st
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau,OneCycleLR
from sklearn import metrics
import numpy as np
import argparse
import os
import sys
import time
import re

from .models import (
    ModelBiLSTM, 
    ModelDLinear,
    ModelCatch,
    ModelPatchBiLSTM,
    frequency_loss,
    MethylationHead,
    SharedBackbone,
    SignalPredictionHead,
    SharedEmbedding,
    #ModelFITS
)
from .mtms.mtm import MTM
from .mptsnet.MPTSNet import MPTSmodel
from .mptsnet.utils import fft_main_periods_wo_duplicates
from .softshape.SoftShapeModel import SoftShapeNet


from .dataloader import (
    SignalFeaData1s,
    generate_offsets,
)
from .dataloader import clear_linecache
from .utils.process_utils import display_args
from .utils.process_utils import str2bool
from .utils.process_utils import count_line_num

from .utils.constants_torch import use_cuda

import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel as DDP

from datetime import timedelta
from torch.cuda.amp import autocast, GradScaler # [新增] 引入混合精度

# add this export temporarily
# https://github.com/pytorch/pytorch/issues/37377
os.environ['MKL_THREADING_LAYER'] = 'GNU'
os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "2"

# https://zhuanlan.zhihu.com/p/350301395
# https://github.com/tczhangzhi/pytorch-distributed/blob/master/multiprocessing_distributed.py
def reduce_mean(tensor, nprocs):
    rt = tensor.clone()
    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= nprocs
    return rt


# https://github.com/dpoulopoulos/examples/blob/feature-group-shuffle-split/distributed/ranzcr/utils.py
def cleanup():
    dist.destroy_process_group()


# https://github.com/dpoulopoulos/examples/blob/feature-group-shuffle-split/distributed/ranzcr/utils.py
# TODO: only for single node, or multi nodes in shared file system?
def checkpoint(model, gpu, model_save_path):
    """Saves the model in master process and loads it everywhere else.

    Args:
        model: the model to save
        gpu: the device identifier
        model_save_path:
    Returns:
        model: the loaded model
    """
    if gpu == 0:
        # All processes should see same parameters as they all start from same
        # random parameters and gradients are synchronized in backward passes.
        # Therefore, saving it in one process is sufficient.
        torch.save(model.module.state_dict(), model_save_path)

    # use a barrier() to make sure that process 1 loads the model after process
    # 0 saves it.
    dist.barrier()
    # configure map_location properly
    map_location = {'cuda:%d' % 0: 'cuda:%d' % gpu}
    model.module.load_state_dict(
        torch.load(model_save_path, map_location=map_location))

def check_grad_hook(module, grad_input, grad_output):
    for name, param in module.named_parameters():
        if param.grad is None:
            #print(f"Module {module.__class__.__name__} parameter {name} has no gradient!", flush=True)
            sys.stderr.write(f"Module {module.__class__.__name__} parameter {name} has no gradient! and requires_grad: {param.requires_grad}\n")
        else:
            #print(f"Module {module.__class__.__name__} parameter {name} has gradient: {param.grad.norm()}", flush=True)
            sys.stderr.write(f"Module {module.__class__.__name__} parameter {name} has gradient: {param.grad.norm()} and requires_grad: {param.requires_grad}\n")
    sys.stderr.flush()
def train_worker(local_rank, global_world_size, args):
    global_rank = args.node_rank * args.ngpus_per_node + local_rank

    dist.init_process_group(
        backend="nccl",
        init_method=args.dist_url,
        world_size=global_world_size,
        rank=global_rank,
    )

    # device = torch.device("cuda", local_rank)
    # torch.cuda.set_device(local_rank)

    sys.stderr.write("training_process-{} [init] == local rank: {}, global rank: {} ==\n".format(os.getpid(),
                                                                                                local_rank,
                                                                                                global_rank))
    
    # 1. define network
    if global_rank == 0 or args.epoch_sync:
        model_dir = args.model_dir
        if model_dir != "/":
            model_dir = os.path.abspath(model_dir).rstrip("/")
            if local_rank == 0:
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                else:
                    model_regex = re.compile(
                        r"" + args.model_type + r"\.b\d+_s\d+_p\d+_epoch\d+\.ckpt*"
                    )
                    for mfile in os.listdir(model_dir):
                        if model_regex.match(mfile) is not None:
                            os.remove(model_dir + "/" + mfile)
            model_dir += "/"

    model = ModelBiLSTM(
        args.seq_len,
        args.signal_len,
        args.layernum1,
        args.layernum2,
        args.class_num,
        args.dropout_rate,
        args.hid_rnn,
        args.n_vocab,
        args.n_embed,
        str2bool(args.is_base),
        str2bool(args.is_signallen),
        str2bool(args.is_trace),
        args.model_type,
        #local_rank
    )

    if args.init_model is not None:
        sys.stderr.write("training_process-{} loading pre-trained model: {}\n".format(os.getpid(), args.init_model))
        para_dict = torch.load(args.init_model, map_location=torch.device('cpu'))
        model_dict = model.state_dict()
        model_dict.update(para_dict)
        model.load_state_dict(model_dict)
    
    if str2bool(args.use_compile):
        try:
            model = torch.compile(model)
        except:
            raise ImportError('torch.compile does not exist in PyTorch<2.0.')

    dist.barrier()

    model = model.cuda(local_rank)
    # DistributedDataParallel
    model = DDP(model, device_ids=[local_rank], output_device=local_rank)
    
    # 2. define dataloader
    sys.stderr.write("training_process-{} reading data..\n".format(os.getpid()))
    
    train_linenum = count_line_num(args.train_file, False)
    train_offsets = generate_offsets(args.train_file)
    train_dataset = SignalFeaData1s(args.train_file, train_offsets, train_linenum)
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,
                                                                    shuffle=True)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.dl_num_workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    valid_linenum = count_line_num(args.valid_file, False)
    valid_offsets = generate_offsets(args.valid_file)
    valid_dataset = SignalFeaData1s(args.valid_file, valid_offsets, valid_linenum)
    valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset,
                                                                    shuffle=True)
    valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.dl_num_workers,
                                               pin_memory=True,
                                               sampler=valid_sampler)
    
    # Loss and optimizer
    weight_rank = torch.from_numpy(np.array([1, args.pos_weight])).float()
    weight_rank = weight_rank.cuda(local_rank)
    criterion = nn.CrossEntropyLoss(weight=weight_rank)
    if args.optim_type == "Adam":
        optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
    elif args.optim_type == "RMSprop":
        optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr)
    elif args.optim_type == "SGD":
        optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.8)
    else:
        raise ValueError("optim_type is not right!")
    if args.lr_scheduler == "StepLR":
        scheduler = StepLR(optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
    elif args.lr_scheduler == "ReduceLROnPlateau":
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=args.lr_decay,
                                      patience=args.lr_patience, verbose=True)
    else:
        raise ValueError("--lr_scheduler is not right!")
    

    # Train the model
    total_step = len(train_loader)
    sys.stderr.write("training_process-{} total_step: {}\n".format(os.getpid(), total_step))
    curr_best_accuracy = 0
    curr_best_accuracy_loc = 0
    curr_lowest_loss = 10000
    v_accuracy_epoches = []

    early_stop_counter = 0
    early_stop_patience = 3  # 连续3轮无改进才early stop

    model.train()
    for epoch in range(args.max_epoch_num):
        # set train sampler
        train_loader.sampler.set_epoch(epoch)

        no_best_model = True
        tlosses = []
        start = time.time()
        for i, sfeatures in enumerate(train_loader):
            _, kmer, base_means, base_stds, base_signal_lens, signals, labels,tags = (
                sfeatures
            )
            kmer = kmer[:, args.bias:args.seq_len+args.bias] # (B, T), T=21
            base_means = base_means[:, args.bias:args.seq_len+args.bias]
            base_stds = base_stds[:, args.bias:args.seq_len+args.bias]
            base_signal_lens = base_signal_lens[:, args.bias:args.seq_len+args.bias]
            signals = signals[:, args.bias:args.seq_len+args.bias, :]

            kmer = kmer.cuda(local_rank, non_blocking=True)
            base_means = base_means.unsqueeze(-1).cuda(local_rank, non_blocking=True).float()
            base_stds = base_stds.unsqueeze(-1).cuda(local_rank, non_blocking=True).float()
            base_signal_lens = base_signal_lens.unsqueeze(-1).cuda(local_rank, non_blocking=True).float()
            # base_probs = base_probs.cuda(local_rank, non_blocking=True)
            signals = signals.cuda(local_rank, non_blocking=True)
            labels = labels.cuda(local_rank, non_blocking=True)

            # Handle NaNs
            x_mask = torch.isnan(signals)
            signals[x_mask] = 0  # Replace NaNs with 0

            # Forward pass
            outputs, _ = model(
                kmer, base_means, base_stds, base_signal_lens, signals
            )
            loss = criterion(outputs, labels)

            # TODO: reduce loss? - no need
            # TODO: maybe don't need barrier() either
            # dist.barrier()
            # loss = reduce_mean(loss, global_world_size)

            # Backward and optimize
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
            optimizer.step()

            tlosses.append(loss.detach().item())
            if global_rank == 0 and ((i + 1) % args.step_interval == 0 or (i + 1) == total_step):
                time_cost = time.time() - start
                sys.stderr.write("Epoch [{}/{}], Step [{}/{}]; "
                                 "TrainLoss: {:.4f}; Time: {:.2f}s\n".format(epoch + 1,
                                                                             args.max_epoch_num, i + 1,
                                                                             total_step, np.mean(tlosses),
                                                                             time_cost))
                sys.stderr.flush()
                start = time.time()
                tlosses = []

        model.eval()
        with torch.no_grad():
            vlosses, vlabels_total, vpredicted_total = [], [], []
            v_meanloss = 10000
            for vi, vsfeatures in enumerate(valid_loader):
                (
                    _,
                    vkmer,
                    vbase_means,
                    vbase_stds,
                    vbase_signal_lens,
                    vsignals,
                    vlabels,
                    vtags,
                ) = vsfeatures

                vkmer = vkmer[:, args.bias:args.seq_len+args.bias]
                vbase_means = vbase_means[:, args.bias:args.seq_len+args.bias]
                vbase_stds = vbase_stds[:, args.bias:args.seq_len+args.bias]
                vbase_signal_lens = vbase_signal_lens[:, args.bias:args.seq_len+args.bias]
                vsignals = vsignals[:, args.bias:args.seq_len+args.bias, :]

                vkmer = vkmer.cuda(local_rank, non_blocking=True)
                vbase_means = vbase_means.unsqueeze(-1).cuda(local_rank, non_blocking=True).float()
                vbase_stds = vbase_stds.unsqueeze(-1).cuda(local_rank, non_blocking=True).float()
                vbase_signal_lens = vbase_signal_lens.unsqueeze(-1).cuda(local_rank, non_blocking=True).float()
                # vbase_probs = vbase_probs.cuda(local_rank, non_blocking=True)
                vsignals = vsignals.cuda(local_rank, non_blocking=True)
                vlabels = vlabels.cuda(local_rank, non_blocking=True)

                # Handle NaNs
                vx_mask = torch.isnan(vsignals)
                vsignals[vx_mask] = 0  # Replace NaNs with 0
                voutputs, vlogits = model(
                    vkmer, vbase_means, vbase_stds, vbase_signal_lens, vsignals
                )
                vloss = criterion(voutputs, vlabels)

                dist.barrier()
                vloss = reduce_mean(vloss, global_world_size)

                _, vpredicted = torch.max(vlogits.data, 1)

                vlabels = vlabels.cpu()
                vpredicted = vpredicted.cpu()

                vlosses.append(vloss.item())
                vlabels_total += vlabels.tolist()
                vpredicted_total += vpredicted.tolist()

            v_accuracy = metrics.accuracy_score(vlabels_total, vpredicted_total)
            v_precision = metrics.precision_score(vlabels_total, vpredicted_total)
            v_recall = metrics.recall_score(vlabels_total, vpredicted_total)
            v_meanloss = np.mean(vlosses)

            # -------------------- Model Save Logic --------------------
            if v_accuracy > curr_best_accuracy - 0.0001:
                if global_rank == 0:
                    torch.save(model.module.state_dict(),
                               model_dir + args.model_type +
                               '.b{}_s{}_epoch{}.ckpt'.format(args.seq_len, args.signal_len, epoch + 1))
                if v_accuracy > curr_best_accuracy:
                    curr_best_accuracy = v_accuracy
                    curr_best_accuracy_loc = epoch + 1

                if len(v_accuracy_epoches) > 0 and v_accuracy > v_accuracy_epoches[-1]:
                    if global_rank == 0:
                        torch.save(model.module.state_dict(),
                                   model_dir + args.model_type +
                                   '.betterthanlast.b{}_s{}_epoch{}.ckpt'.format(
                                       args.seq_len, args.signal_len, epoch + 1))
            if v_meanloss < curr_lowest_loss:
                curr_lowest_loss = v_meanloss
                no_best_model = False  # 表示本轮有改进

            v_accuracy_epoches.append(v_accuracy)

            # -------------------- Log --------------------
            time_cost = time.time() - start
            if global_rank == 0:
                try:
                    last_lr = scheduler.get_last_lr()
                    sys.stderr.write('Epoch [{}/{}]; LR: {:.4e}; '
                                     'ValidLoss: {:.4f}, Acc: {:.4f}, Prec: {:.4f}, Reca: {:.4f}, '
                                     'Best_acc: {:.4f}; Time: {:.2f}s\n'
                                     .format(epoch + 1, args.max_epoch_num, last_lr,
                                             v_meanloss, v_accuracy, v_precision, v_recall,
                                             curr_best_accuracy, time_cost))
                except Exception:
                    sys.stderr.write('Epoch [{}/{}]; ValidLoss: {:.4f}, '
                                     'Acc: {:.4f}, Prec: {:.4f}, Reca: {:.4f}, '
                                     'Best_acc: {:.4f}; Time: {:.2f}s\n'
                                     .format(epoch + 1, args.max_epoch_num,
                                             v_meanloss, v_accuracy, v_precision, v_recall,
                                             curr_best_accuracy, time_cost))
                sys.stderr.flush()

        model.train()

        # -------------------- Early Stop Logic --------------------
        if no_best_model:
            early_stop_counter += 1
            sys.stderr.write(f"training_process-{os.getpid()} no improvement ({early_stop_counter}/{early_stop_patience})\n")
        else:
            early_stop_counter = 0  # reset if improved

        if early_stop_counter >= early_stop_patience and epoch >= args.min_epoch_num - 1:
            sys.stderr.write(f"training_process-{os.getpid()} early stop triggered after {early_stop_counter} stagnant epochs!\n")
            break

        # -------------------- Sync & Scheduler --------------------
        if args.epoch_sync:
            sync_ckpt = model_dir + args.model_type + \
                        '.epoch_sync_node{}.b{}_epoch{}.ckpt'.format(args.node_rank, args.seq_len, epoch + 1)
            checkpoint(model, local_rank, sync_ckpt)

        if args.lr_scheduler == "ReduceLROnPlateau":
            scheduler.step(v_meanloss)
        else:
            scheduler.step()
    if global_rank == 0:
        sys.stderr.write("best model is in epoch {} (Acc: {})\n".format(
            curr_best_accuracy_loc, curr_best_accuracy))
    clear_linecache()
    cleanup()

def train_worker_catch(local_rank, global_world_size, args):
    global_rank = args.node_rank * args.ngpus_per_node + local_rank

    dist.init_process_group(
        backend="nccl",
        init_method=args.dist_url,
        world_size=global_world_size,
        rank=global_rank,
    )

    sys.stderr.write("training_process-{} [init] == local rank: {}, global rank: {} ==\n".format(os.getpid(),
                                                                                                local_rank,
                                                                                                global_rank))
    
    # 1. 定义网络
    if global_rank == 0 or args.epoch_sync:
        model_dir = args.model_dir
        if model_dir != "/":
            model_dir = os.path.abspath(model_dir).rstrip("/")
            if local_rank == 0:
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                else:
                    model_regex = re.compile(
                        r"" + args.model_type + r"\.b\d+_s\d+_epoch\d+\.ckpt*"
                    )
                    for mfile in os.listdir(model_dir):
                        if model_regex.match(mfile) is not None:
                            os.remove(model_dir + "/" + mfile)
            model_dir += "/"

    model = ModelCatch(
        seq_len=args.seq_len,
        signal_len=args.signal_len,
        num_classes=args.class_num,
        vocab_size=args.n_vocab,
        embedding_size=args.n_embed,
    )

    if args.init_model is not None:
        sys.stderr.write("training_process-{} loading pre-trained model: {}\n".format(os.getpid(), args.init_model))
        para_dict = torch.load(args.init_model, map_location=torch.device('cpu'))
        model_dict = model.state_dict()
        model_dict.update(para_dict)
        model.load_state_dict(model_dict)
    
    if str2bool(args.use_compile):
        try:
            model = torch.compile(model)
        except:
            raise ImportError('torch.compile does not exist in PyTorch<2.0.')

    dist.barrier()

    model = model.cuda(local_rank)
    # # register backward hook to check gradients
    # for name, module in model.named_modules():
    #     module.register_backward_hook(check_grad_hook)
    model = DDP(model, device_ids=[local_rank], output_device=local_rank)
    
    # 2. 定义数据加载器
    sys.stderr.write("training_process-{} reading data..\n".format(os.getpid()))
    
    train_linenum = count_line_num(args.train_file, False)
    train_offsets = generate_offsets(args.train_file)
    train_dataset = SignalFeaData1s(args.train_file, train_offsets, train_linenum)
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,
                                                                    shuffle=True)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.dl_num_workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    valid_linenum = count_line_num(args.valid_file, False)
    valid_offsets = generate_offsets(args.valid_file)
    valid_dataset = SignalFeaData1s(args.valid_file, valid_offsets, valid_linenum)
    valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset,
                                                                    shuffle=True)
    valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.dl_num_workers,
                                               pin_memory=True,
                                               sampler=valid_sampler)
    
    # 损失函数和优化器
    weight_rank = torch.from_numpy(np.array([1, args.pos_weight])).float()
    weight_rank = weight_rank.cuda(local_rank)
    criterion = nn.CrossEntropyLoss(weight=weight_rank)
    
    # 分离参数
    all_params = list(model.module.parameters())
    mask_params = list(model.module.channel_mask_gen.parameters())
    classifier_params = (
        #list(model.module.classifier.parameters()) +
        list(model.module.lstm_time.parameters()) +
        list(model.module.fc_time.parameters()) +
        list(model.module.relu_time.parameters()) +
        list(model.module.lstm_freq.parameters()) +
        list(model.module.fc_freq.parameters()) +
        list(model.module.relu_freq.parameters()) +
        list(model.module.classifier.parameters())
    )
    mask_param_ids = {id(p) for p in mask_params}
    classifier_param_ids = {id(p) for p in classifier_params}
    main_params = [p for p in all_params if id(p) not in mask_param_ids and id(p) not in classifier_param_ids]
    
    # 定义三个优化器
    if args.optim_type == "Adam":
        main_optimizer = torch.optim.Adam(main_params, lr=0.0001)
        mask_optimizer = torch.optim.Adam(mask_params, lr=0.00001)
        classifier_optimizer = torch.optim.Adam(classifier_params, lr=args.lr)
    elif args.optim_type == "RMSprop":
        main_optimizer = torch.optim.RMSprop(main_params, lr=0.0001)
        mask_optimizer = torch.optim.RMSprop(mask_params, lr=0.00001)
        classifier_optimizer = torch.optim.RMSprop(classifier_params, lr=args.lr)
    elif args.optim_type == "SGD":
        main_optimizer = torch.optim.SGD(main_params, lr=0.0001, momentum=0.8)
        mask_optimizer = torch.optim.SGD(mask_params, lr=0.00001, momentum=0.8)
        classifier_optimizer = torch.optim.SGD(classifier_params, lr=args.lr, momentum=0.8)
    else:
        raise ValueError("optim_type is not right!")
    
    frequencyloss = frequency_loss(model.module.catch_configs)
    recloss = nn.MSELoss()

    # 定义学习率调度器
    #train_steps = len(train_loader)
    #total_batches = len(train_loader) * args.max_epoch_num
    # scheduler_main = OneCycleLR(optimizer=main_optimizer, 
    #                             steps_per_epoch=train_steps,
    #                             pct_start=0.3,
    #                             max_lr=0.0001,
    #                             epochs=args.max_epoch_num,
    #                             #total_steps=total_batches
    #                             )
    # scheduler_mask = OneCycleLR(optimizer=mask_optimizer, 
    #                             steps_per_epoch=train_steps// 100,
    #                             pct_start=0.3,
    #                             max_lr=0.00001,
    #                             epochs=args.max_epoch_num,
    #                             #total_steps=total_batches // 100
    #                             )
    if args.lr_scheduler == "StepLR":
        scheduler_classifier = StepLR(classifier_optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
    elif args.lr_scheduler == "ReduceLROnPlateau":
        scheduler_classifier = ReduceLROnPlateau(classifier_optimizer, mode='min', factor=args.lr_decay,
                                                 patience=args.lr_patience, verbose=True)
    else:
        raise ValueError("--lr_scheduler is not right!")
    
    # 初始化全局步数计数器
    #global_step = 0
    
    # 训练模型
    total_step = len(train_loader)
    sys.stderr.write("training_process-{} total_step: {}\n".format(os.getpid(), total_step))
    curr_best_accuracy = 0
    curr_best_accuracy_loc = 0
    curr_lowest_loss = 10000
    v_accuracy_epoches = []
    model.train()
    for epoch in range(args.max_epoch_num):
        train_loader.sampler.set_epoch(epoch)

        # 动态调整 main_optimizer 和 mask_optimizer 的学习率
        if epoch >= 1:
            lr_adjust = 0.0001 * (0.5 ** (epoch // 1))
            lr_adjust_mask =  0.00001 * (0.5 ** (epoch // 1))
            for param_group in main_optimizer.param_groups:
                param_group['lr'] = lr_adjust
            for param_group in mask_optimizer.param_groups:
                param_group['lr'] = lr_adjust_mask
            if global_rank == 0:
                sys.stderr.write(f"Epoch [{epoch + 1}/{args.max_epoch_num}]; Main LR: {lr_adjust:.4e}; Mask LR: {lr_adjust_mask:.4e}\n")

        no_best_model = True
        tlosses = []
        methy_losses = []
        rec_losses = []
        dc_losses = []
        auxi_losses = []
        start = time.time()
        for i, sfeatures in enumerate(train_loader):
            _, kmer, base_means, base_stds, base_signal_lens, signals, labels = sfeatures
            kmer = kmer.cuda(local_rank, non_blocking=True)
            base_means = base_means.cuda(local_rank, non_blocking=True).float()
            base_stds = base_stds.cuda(local_rank, non_blocking=True).float()
            base_signal_lens = base_signal_lens.cuda(local_rank, non_blocking=True).float()
            signals = signals.cuda(local_rank, non_blocking=True).float()
            labels = labels.cuda(local_rank, non_blocking=True).long()

            # 清零所有优化器的梯度
            main_optimizer.zero_grad()
            if i % 100 == 0:
                mask_optimizer.zero_grad()
            classifier_optimizer.zero_grad()

            # 前向传播
            outputs, softmax_out, recon_signals, complex_z, dcloss, input_tensor = model(
                kmer, base_means, base_stds, base_signal_lens, signals, training=True
            )
            norm_input = model.module.revin_layer(input_tensor, 'norm')
            methy_loss = criterion(outputs, labels)
            rec_loss = recloss(recon_signals, input_tensor)
            auxi_loss = frequencyloss(complex_z, norm_input)
            loss = methy_loss + 0.005 * dcloss + 0.5 * rec_loss + 0.005 * auxi_loss

            # 反向传播
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
            
            # 优化器更新
            main_optimizer.step()
            classifier_optimizer.step()
            
            # 每 100 步优化一次 mask_optimizer
            if i % 100 == 0:
                mask_optimizer.step()

            tlosses.append(loss.detach().item())
            methy_losses.append(methy_loss.detach().item())
            rec_losses.append(rec_loss.detach().item())
            dc_losses.append(dcloss.detach().item())
            auxi_losses.append(auxi_loss.detach().item())
            if global_rank == 0 and ((i + 1) % args.step_interval == 0 or (i + 1) == total_step):
                time_cost = time.time() - start
                sys.stderr.write(
                    "Epoch [{}/{}], Step [{}/{}]; "
                    "TotalLoss: {:.4f}; MethyLoss: {:.4f}; "
                    "RecLoss: {:.4f}; DCLoss: {:.4f}; AuxiLoss: {:.4f}; Time: {:.2f}s\n".format(
                        epoch + 1, args.max_epoch_num, i + 1, len(train_loader),
                        np.mean(tlosses), np.mean(methy_losses),
                        np.mean(rec_losses), np.mean(dc_losses), np.mean(auxi_losses), time_cost
                    )
                )
                sys.stderr.flush()
                start = time.time()
                tlosses = []

        model.eval()
        with torch.no_grad():
            vlosses, vmethy_losses, vrec_losses, vdc_losses, vauxi_losses = [], [], [], [], []
            vlabels_total, vpredicted_total = [], []
            v_meanloss = 10000
            for vi, vsfeatures in enumerate(valid_loader):
                _, vkmer, vbase_means, vbase_stds, vbase_signal_lens, vsignals, vlabels = vsfeatures
                vkmer = vkmer.cuda(local_rank, non_blocking=True)
                vbase_means = vbase_means.cuda(local_rank, non_blocking=True).float()
                vbase_stds = vbase_stds.cuda(local_rank, non_blocking=True).float()
                vbase_signal_lens = vbase_signal_lens.cuda(local_rank, non_blocking=True).float()
                vsignals = vsignals.cuda(local_rank, non_blocking=True).float()
                vlabels = vlabels.cuda(local_rank, non_blocking=True).long()
                voutputs, vlogits, vrecon_signals, vcomplex_z, vdcloss, vinput_tensor = model(
                    vkmer, vbase_means, vbase_stds, vbase_signal_lens, vsignals, training=True
                )
                vnorm_input = model.module.revin_layer(vinput_tensor, 'norm')
                vmethy_loss = criterion(voutputs, vlabels)
                vrec_loss = recloss(vrecon_signals, vinput_tensor)
                vauxi_loss = frequencyloss(vcomplex_z, vnorm_input)
                vloss = vmethy_loss + 0.005 * vdcloss + 0.5 * vrec_loss + 0.005 * vauxi_loss

                dist.barrier()
                vloss = reduce_mean(vloss, global_world_size)

                _, vpredicted = torch.max(vlogits.data, 1)
                vlabels = vlabels.cpu()
                vpredicted = vpredicted.cpu()

                vlosses.append(vloss.item())
                vmethy_losses.append(vmethy_loss.item())
                vrec_losses.append(vrec_loss.item())
                vdc_losses.append(vdcloss.item())
                vauxi_losses.append(vauxi_loss.item())
                vlabels_total += vlabels.tolist()
                vpredicted_total += vpredicted.tolist()

            v_accuracy = metrics.accuracy_score(vlabels_total, vpredicted_total)
            v_precision = metrics.precision_score(vlabels_total, vpredicted_total)
            v_recall = metrics.recall_score(vlabels_total, vpredicted_total)
            v_meanloss = np.mean(vlosses)

            if v_accuracy > curr_best_accuracy - 0.0001:
                if global_rank == 0:
                    torch.save(model.module.state_dict(),
                               model_dir + args.model_type +
                               '.b{}_s{}_epoch{}.ckpt'.format(args.seq_len, args.signal_len, epoch + 1))
                if v_accuracy > curr_best_accuracy:
                    curr_best_accuracy = v_accuracy
                    curr_best_accuracy_loc = epoch + 1

                if len(v_accuracy_epoches) > 0 and v_accuracy > v_accuracy_epoches[-1]:
                    if global_rank == 0:
                        torch.save(model.module.state_dict(),
                                   model_dir + args.model_type +
                                   '.betterthanlast.b{}_s{}_epoch{}.ckpt'.format(args.seq_len,
                                                                                 args.signal_len,
                                                                                 epoch + 1))
            if v_meanloss < curr_lowest_loss:
                curr_lowest_loss = v_meanloss
                no_best_model = False

            v_accuracy_epoches.append(v_accuracy)

            time_cost = time.time() - start
            if global_rank == 0:
                try:
                    last_lr = scheduler_classifier.get_last_lr()
                    sys.stderr.write('Epoch [{}/{}]; LR: {:.4e}; '
                                     'ValidLoss: {:.4f}, '
                                     'Acc: {:.4f}, Prec: {:.4f}, Reca: {:.4f}, '
                                     'Best_acc: {:.4f}; Time: {:.2f}s\n'
                                     .format(epoch + 1, args.max_epoch_num, last_lr,
                                             v_meanloss, v_accuracy, v_precision, v_recall,
                                             curr_best_accuracy, time_cost))
                except Exception:
                    sys.stderr.write('Epoch [{}/{}]; '
                                    'ValidLoss: {:.4f}, '
                                    'Acc: {:.4f}, Prec: {:.4f}, Reca: {:.4f}, '
                                    'Best_acc: {:.4f}; Time: {:.2f}s\n'
                                    .format(epoch + 1, args.max_epoch_num,
                                            v_meanloss, v_accuracy, v_precision, v_recall,
                                            curr_best_accuracy, time_cost))

                sys.stderr.flush()
        model.train()

        if no_best_model and epoch >= args.min_epoch_num - 1:
            sys.stderr.write("training_process-{} early stop!\n".format(os.getpid()))
            break

        if args.epoch_sync:
            sync_ckpt = model_dir + args.model_type + \
                        '.epoch_sync_node{}.b{}_epoch{}.ckpt'.format(args.node_rank, args.seq_len, epoch + 1)
            checkpoint(model, local_rank, sync_ckpt)

        if args.lr_scheduler == "ReduceLROnPlateau":
            lr_reduce_metric = v_meanloss
            scheduler_classifier.step(lr_reduce_metric)
        else:
            scheduler_classifier.step()
        #scheduler_main.step()
        #scheduler_mask.step()

    if global_rank == 0:
        sys.stderr.write("best model is in epoch {} (Acc: {})\n".format(curr_best_accuracy_loc,
                                                                        curr_best_accuracy))
    clear_linecache()
    cleanup()


def normalize_channels(x):
    """对张量的每个通道独立归一化（均值 0，标准差 1），RIN 风格"""
    mean = x.mean(dim=1, keepdim=True)
    x_centered = x - mean
    var = torch.var(x_centered, dim=1, keepdim=True) + 1e-5
    return x_centered / torch.sqrt(var)

def train_worker_fits(local_rank, global_world_size, args):
    global_rank = args.node_rank * args.ngpus_per_node + local_rank

    dist.init_process_group(
        backend="nccl",
        init_method=args.dist_url,
        world_size=global_world_size,
        rank=global_rank,
    )

    sys.stderr.write("training_process-{} [init] == local rank: {}, global rank: {} ==\n".format(os.getpid(),
                                                                                                local_rank,
                                                                                                global_rank))
    
    if global_rank == 0 or args.epoch_sync:
        model_dir = args.model_dir
        if model_dir != "/":
            model_dir = os.path.abspath(model_dir).rstrip("/")
            if local_rank == 0:
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                else:
                    model_regex = re.compile(
                        r"" + args.model_type + r"\.b\d+_s\d+_p\d+_epoch\d+\.ckpt*"
                    )
                    for mfile in os.listdir(model_dir):
                        if model_regex.match(mfile) is not None:
                            os.remove(model_dir + "/" + mfile)
            model_dir += "/"

    backbone = SharedBackbone(
        seq_len=args.seq_len,
        signal_len=args.signal_len,
        pred_len=args.offset,
        vocab_size=args.n_vocab,
        embedding_size=args.n_embed,
    )
    methylation_head = MethylationHead(
        channels=4 + args.n_embed,
        num_classes=args.class_num,
    )

    nn.init.normal_(backbone.embedding.weight, mean=0, std=0.05)

    if args.init_model is not None:
        sys.stderr.write("training_process-{} loading pre-trained model: {}\n".format(os.getpid(), args.init_model))
        para_dict = torch.load(args.init_model, map_location=torch.device('cpu'))
        backbone_dict = backbone.state_dict()
        methylation_dict = methylation_head.state_dict()
        backbone_dict.update({k: v for k, v in para_dict.items() if k in backbone_dict})
        methylation_dict.update({k: v for k, v in para_dict.items() if k in methylation_dict})
        backbone.load_state_dict(backbone_dict)
        methylation_head.load_state_dict(methylation_dict)

    dist.barrier()

    backbone = backbone.cuda(local_rank)
    methylation_head = methylation_head.cuda(local_rank)
    backbone = DDP(backbone, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
    methylation_head = DDP(methylation_head, device_ids=[local_rank], output_device=local_rank)

    sys.stderr.write("training_process-{} reading data..\n".format(os.getpid()))
    
    train_linenum = count_line_num(args.train_file, False)
    train_offsets = generate_offsets(args.train_file)
    train_dataset = SignalFeaData1s(args.train_file, train_offsets, train_linenum)
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.dl_num_workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    valid_linenum = count_line_num(args.valid_file, False)
    valid_offsets = generate_offsets(args.valid_file)
    valid_dataset = SignalFeaData1s(args.valid_file, valid_offsets, valid_linenum)
    valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset, shuffle=True)
    valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.dl_num_workers,
                                               pin_memory=True,
                                               sampler=valid_sampler)
    
    weight_rank = torch.from_numpy(np.array([1, args.pos_weight])).float().cuda(local_rank)
    criterion = nn.CrossEntropyLoss(weight=weight_rank)
    mse_loss_fn = nn.MSELoss()

    main_params = list(backbone.module.parameters())
    classifier_params = list(methylation_head.module.parameters())

    for name, param in backbone.module.named_parameters():
        sys.stderr.write(f"Parameter {name} is in main_params\n")
    for name, param in methylation_head.module.named_parameters():
        sys.stderr.write(f"Parameter {name} is in classifier_params\n")

    if args.optim_type == "Adam":
        main_optimizer = torch.optim.Adam(main_params, lr=0.0002)
        classifier_optimizer = torch.optim.Adam(classifier_params, lr=args.lr / 2)
    elif args.optim_type == "RMSprop":
        main_optimizer = torch.optim.RMSprop(main_params, lr=0.0002)
        classifier_optimizer = torch.optim.RMSprop(classifier_params, lr=args.lr / 2)
    elif args.optim_type == "SGD":
        main_optimizer = torch.optim.SGD(main_params, lr=0.0002, momentum=0.8)
        classifier_optimizer = torch.optim.SGD(classifier_params, lr=args.lr / 2, momentum=0.8)
    else:
        raise ValueError("optim_type is not right!")

    if args.lr_scheduler == "StepLR":
        scheduler_classifier = StepLR(classifier_optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
    elif args.lr_scheduler == "ReduceLROnPlateau":
        scheduler_classifier = ReduceLROnPlateau(classifier_optimizer, mode='min', factor=args.lr_decay,
                                                 patience=args.lr_patience, verbose=True)
    else:
        raise ValueError("--lr_scheduler is not right!")

    total_step = len(train_loader)
    sys.stderr.write("training_process-{} total_step: {}\n".format(os.getpid(), total_step))
    curr_best_accuracy = 0
    curr_best_accuracy_loc = 0
    curr_lowest_loss = 10000
    v_accuracy_epoches = []
    backbone.train()
    methylation_head.train()
    mse_weight = 0.5

    for epoch in range(args.max_epoch_num):
        train_loader.sampler.set_epoch(epoch)

        if epoch >= 1:
            lr_adjust = 0.0002 * (0.5 ** (epoch // 3))
            for param_group in main_optimizer.param_groups:
                param_group['lr'] = lr_adjust
            if global_rank == 0:
                sys.stderr.write(f"Epoch [{epoch + 1}/{args.max_epoch_num}]; Main LR: {lr_adjust:.4e}\n")

        no_best_model = True
        tlosses = []
        methy_losses = []
        mse_losses = []
        start = time.time()
        for i, sfeatures in enumerate(train_loader):
            _, kmer, base_means, base_stds, base_signal_lens, signals, labels = sfeatures
            kmer = kmer.cuda(local_rank, non_blocking=True)
            base_means = base_means.cuda(local_rank, non_blocking=True).float()
            base_stds = base_stds.cuda(local_rank, non_blocking=True).float()
            base_signal_lens = base_signal_lens.cuda(local_rank, non_blocking=True).float()
            signals = signals.cuda(local_rank, non_blocking=True).float()
            labels = labels.cuda(local_rank, non_blocking=True).long()

            #sys.stderr.write(f"signals shape: {signals.shape}, range: min={signals.min().item()}, max={signals.max().item()}\n")
            #sys.stderr.write(f"base_means shape: {base_means.shape}, range: min={base_means.min().item()}, max={base_means.max().item()}\n")
            #sys.stderr.write(f"base_stds shape: {base_stds.shape}, range: min={base_stds.min().item()}, max={base_stds.max().item()}\n")
            #sys.stderr.write(f"base_signal_lens shape: {base_signal_lens.shape}, range: min={base_signal_lens.min().item()}, max={base_signal_lens.max().item()}\n")

            expected_seq_len = args.seq_len + args.offset
            if signals.shape[1] != expected_seq_len or kmer.shape[1] != expected_seq_len:
                sys.stderr.write(f"Warning: Insufficient data length. signals.shape: {signals.shape}, kmer.shape: {kmer.shape}, expected seq_len: {expected_seq_len}. Skipping batch {i+1}\n")
                continue

            train_kmer = kmer[:, :args.seq_len]
            train_base_means = base_means[:, :args.seq_len]
            train_base_stds = base_stds[:, :args.seq_len]
            train_base_signal_lens = base_signal_lens[:, :args.seq_len]
            train_signals = signals[:, :args.seq_len, :]

            pred_kmer = kmer[:, args.seq_len:]
            pred_base_means = base_means[:, args.seq_len:]
            pred_base_stds = base_stds[:, args.seq_len:]
            pred_base_signal_lens = base_signal_lens[:, args.seq_len:]
            pred_signals = signals[:, args.seq_len:, :]

            if pred_kmer.shape[1] == 0:
                sys.stderr.write(f"Warning: Empty prediction data (offset={args.offset}). Skipping MSE loss for batch {i+1}\n")
                mse_loss = torch.tensor(0.0, device=signals.device, requires_grad=True)
            else:
                pred_signals = pred_signals.reshape(pred_signals.shape[0], -1, 1)
                pred_kmer_embed = backbone.module.embedding(pred_kmer.long())
                pred_kmer_embed = pred_kmer_embed.repeat_interleave(args.signal_len, dim=1)
                #sys.stderr.write(f"pred_kmer_embed shape: {pred_kmer_embed.shape}, range: min={pred_kmer_embed.min().item()}, max={pred_kmer_embed.max().item()}\n")
                pred_base_means = pred_base_means.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                pred_base_stds = pred_base_stds.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                pred_base_signal_lens = pred_base_signal_lens.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                ground_truth_pred = torch.cat([pred_signals, pred_base_means, pred_base_stds, pred_base_signal_lens, pred_kmer_embed], dim=-1)
                #sys.stderr.write(f"ground_truth_pred shape: {ground_truth_pred.shape}\n")

            main_optimizer.zero_grad()
            classifier_optimizer.zero_grad()

            # Forward pass through backbone
            time_features, low_specxy_, xy, low_xy_adjusted = backbone(
                train_kmer, train_base_means, train_base_stds, train_base_signal_lens, train_signals
            )

            # Methylation loss with detached inputs
            out, softmax_out = methylation_head(time_features.detach(), low_specxy_.detach())
            methy_loss = criterion(out, labels)
            methy_loss.backward(retain_graph=True)
            
            for name, param in backbone.module.named_parameters():
                grad_norm = param.grad.norm().item() if param.grad is not None else 0
                sys.stderr.write(f"Batch {i+1}, After methy_loss.backward, {name} grad norm: {grad_norm}\n")
            
            classifier_optimizer.step()
            main_optimizer.zero_grad()

            # MSE loss
            if pred_kmer.shape[1] > 0:
                pred_xy = xy[:, backbone.module.flatten_len:, :]
                mse_loss = 0
                # channel_weights = [1.0, 0.2, 0.2, 0.001, 0.1, 0.1, 0.1, 0.1]
                # for c in range(ground_truth_pred.shape[2]):
                #     channel_mse = mse_loss_fn(pred_xy[:, :, c], ground_truth_pred[:, :, c])
                #     #sys.stderr.write(f"Batch {i+1}, Channel {c} MSE (unnormalized): {channel_mse.item()}\n")
                #     channel_mse_norm = mse_loss_fn(normalize_channels(pred_xy[:, :, c:c+1]), normalize_channels(ground_truth_pred[:, :, c:c+1]))
                #     #sys.stderr.write(f"Batch {i+1}, Channel {c} MSE (normalized): {channel_mse_norm.item()}\n")
                #     mse_loss += channel_weights[c] * channel_mse
                # mse_loss /= sum(channel_weights)
                mse_loss = mse_loss_fn(pred_xy, normalize_channels(ground_truth_pred))
                mse_loss.backward()
                
                for name, param in backbone.module.named_parameters():
                    grad_norm = param.grad.norm().item() if param.grad is not None else 0
                    #sys.stderr.write(f"Batch {i+1}, After mse_loss.backward, {name} grad norm: {grad_norm}\n")
                
                torch.nn.utils.clip_grad_norm_(backbone.module.parameters(), 2.0)
                main_optimizer.step()
            else:
                mse_loss = torch.tensor(0.0, device=signals.device, requires_grad=True)

            loss = methy_loss + mse_weight * mse_loss

            tlosses.append(loss.detach().item())
            methy_losses.append(methy_loss.detach().item())
            mse_losses.append(mse_loss.item())
            if global_rank == 0 and ((i + 1) % args.step_interval == 0 or (i + 1) == total_step):
                time_cost = time.time() - start
                sys.stderr.write(
                    "Epoch [{}/{}], Step [{}/{}]; "
                    "TotalLoss: {:.4f}; MethyLoss: {:.4f}; "
                    "MSELoss: {:.4f}; Time: {:.2f}s\n".format(
                        epoch + 1, args.max_epoch_num, i + 1, len(train_loader),
                        np.mean(tlosses), np.mean(methy_losses),
                        np.mean(mse_losses), time_cost
                    )
                )
                sys.stderr.flush()
                start = time.time()
                tlosses = []

        backbone.eval()
        methylation_head.eval()
        with torch.no_grad():
            vlosses, vmethy_losses, vmse_losses = [], [], []
            vlabels_total, vpredicted_total = [], []
            v_meanloss = 10000
            for vi, vsfeatures in enumerate(valid_loader):
                _, vkmer, vbase_means, vbase_stds, vbase_signal_lens, vsignals, vlabels = vsfeatures
                vkmer = vkmer.cuda(local_rank, non_blocking=True)
                vbase_means = vbase_means.cuda(local_rank, non_blocking=True).float()
                vbase_stds = vbase_stds.cuda(local_rank, non_blocking=True).float()
                vbase_signal_lens = vbase_signal_lens.cuda(local_rank, non_blocking=True).float()
                vsignals = vsignals.cuda(local_rank, non_blocking=True).float()
                vlabels = vlabels.cuda(local_rank, non_blocking=True).long()

                if vsignals.shape[1] != expected_seq_len or vkmer.shape[1] != expected_seq_len:
                    sys.stderr.write(f"Warning: Insufficient validation data length. vsignals.shape: {vsignals.shape}, vkmer.shape: {vkmer.shape}, expected seq_len: {expected_seq_len}. Skipping batch {vi+1}\n")
                    continue

                vtrain_kmer = vkmer[:, :args.seq_len]
                vtrain_base_means = vbase_means[:, :args.seq_len]
                vtrain_base_stds = vbase_stds[:, :args.seq_len]
                vtrain_base_signal_lens = vbase_signal_lens[:, :args.seq_len]
                vtrain_signals = vsignals[:, :args.seq_len, :]

                vpred_kmer = vkmer[:, args.seq_len:]
                vpred_base_means = vbase_means[:, args.seq_len:]
                vpred_base_stds = vbase_stds[:, args.seq_len:]
                vpred_base_signal_lens = vbase_signal_lens[:, args.seq_len:]
                vpred_signals = vsignals[:, args.seq_len:, :]

                if vpred_kmer.shape[1] == 0:
                    sys.stderr.write(f"Warning: Empty validation prediction data (offset={args.offset}). Skipping MSE loss for batch {vi+1}\n")
                    vmse_loss = torch.tensor(0.0, device=vsignals.device, requires_grad=True)
                else:
                    vpred_signals = vpred_signals.reshape(vpred_signals.shape[0], -1, 1)
                    vpred_kmer_embed = backbone.module.embedding(vpred_kmer.long())
                    vpred_kmer_embed = vpred_kmer_embed.repeat_interleave(args.signal_len, dim=1)
                    vpred_base_means = vpred_base_means.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                    vpred_base_stds = vpred_base_stds.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                    vpred_base_signal_lens = vpred_base_signal_lens.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                    vground_truth_pred = torch.cat([vpred_signals, vpred_base_means, vpred_base_stds, vpred_base_signal_lens, vpred_kmer_embed], dim=-1)

                vtime_features, vlow_specxy_, vxy, vlow_xy_adjusted = backbone(
                    vtrain_kmer, vtrain_base_means, vtrain_base_stds, vtrain_base_signal_lens, vtrain_signals
                )
                vout, vsoftmax_out = methylation_head(vtime_features, vlow_specxy_)
                vpred_xy = vxy[:, backbone.module.flatten_len:, :]
                vmethy_loss = criterion(vout, vlabels)

                if vpred_kmer.shape[1] > 0:
                    vmse_loss = 0
                    #channel_weights = [1.0, 0.2, 0.2, 0.001, 0.1, 0.1, 0.1, 0.1]
                    # for c in range(vground_truth_pred.shape[2]):
                    #     channel_mse = mse_loss_fn(vpred_xy[:, :, c], vground_truth_pred[:, :, c])
                    #     #sys.stderr.write(f"Validation Batch {vi+1}, Channel {c} MSE (unnormalized): {channel_mse.item()}\n")
                    #     channel_mse_norm = mse_loss_fn(normalize_channels(vpred_xy[:, :, c:c+1]), normalize_channels(vground_truth_pred[:, :, c:c+1]))
                    #     #sys.stderr.write(f"Validation Batch {vi+1}, Channel {c} MSE (normalized): {channel_mse_norm.item()}\n")
                    #     vmse_loss += channel_weights[c] #* channel_mse
                    # vmse_loss /= sum(channel_weights)
                    vmse_loss = mse_loss_fn(vpred_xy, normalize_channels(vground_truth_pred))

                vloss = vmethy_loss + mse_weight * vmse_loss

                dist.barrier()
                vloss = reduce_mean(vloss, global_world_size)

                _, vpredicted = torch.max(vsoftmax_out.data, 1)
                vlabels = vlabels.cpu()
                vpredicted = vpredicted.cpu()

                vlosses.append(vloss.item())
                vmethy_losses.append(vmethy_loss.item())
                vmse_losses.append(vmse_loss.item())
                vlabels_total += vlabels.tolist()
                vpredicted_total += vpredicted.tolist()

            v_accuracy = metrics.accuracy_score(vlabels_total, vpredicted_total)
            v_precision = metrics.precision_score(vlabels_total, vpredicted_total)
            v_recall = metrics.recall_score(vlabels_total, vpredicted_total)
            v_meanloss = np.mean(vlosses)

            if v_accuracy > curr_best_accuracy - 0.0001:
                if global_rank == 0:
                    torch.save(
                        {'backbone': backbone.module.state_dict(), 'methylation_head': methylation_head.module.state_dict()},
                        model_dir + args.model_type +
                        f'.b{args.seq_len}_s{args.signal_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
                    )
                if v_accuracy > curr_best_accuracy:
                    curr_best_accuracy = v_accuracy
                    curr_best_accuracy_loc = epoch + 1

                if len(v_accuracy_epoches) > 0 and v_accuracy > v_accuracy_epoches[-1]:
                    if global_rank == 0:
                        torch.save(
                            {'backbone': backbone.module.state_dict(), 'methylation_head': methylation_head.module.state_dict()},
                            model_dir + args.model_type +
                            f'.betterthanlast.b{args.seq_len}_s{args.signal_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
                        )
            if v_meanloss < curr_lowest_loss:
                curr_lowest_loss = v_meanloss
                no_best_model = False

            v_accuracy_epoches.append(v_accuracy)

            time_cost = time.time() - start
            if global_rank == 0:
                try:
                    last_lr = scheduler_classifier.get_last_lr()[0]
                    sys.stderr.write('Epoch [{}/{}]; LR: {:.4e}; '
                                     'ValidLoss: {:.4f}, '
                                     'Acc: {:.4f}, Prec: {:.4f}, Reca: {:.4f}, '
                                     'Best_acc: {:.4f}; Time: {:.2f}s\n'
                                     .format(epoch + 1, args.max_epoch_num, last_lr,
                                             v_meanloss, v_accuracy, v_precision, v_recall,
                                             curr_best_accuracy, time_cost))
                except Exception:
                    sys.stderr.write('Epoch [{}/{}]; '
                                    'ValidLoss: {:.4f}, '
                                    'Acc: {:.4f}, Prec: {:.4f}, Reca: {:.4f}, '
                                    'Best_acc: {:.4f}; Time: {:.2f}s\n'
                                    .format(epoch + 1, args.max_epoch_num,
                                            v_meanloss, v_accuracy, v_precision, v_recall,
                                            curr_best_accuracy, time_cost))
                sys.stderr.flush()
        backbone.train()
        methylation_head.train()

        if no_best_model and epoch >= args.min_epoch_num - 1:
            sys.stderr.write("training_process-{} early stop!\n".format(os.getpid()))
            break

        if args.epoch_sync:
            sync_ckpt = model_dir + args.model_type + \
                        f'.epoch_sync_node{args.node_rank}.b{args.seq_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
            checkpoint({'backbone': backbone, 'methylation_head': methylation_head}, local_rank, sync_ckpt)

        if args.lr_scheduler == "ReduceLROnPlateau":
            lr_reduce_metric = v_meanloss
            scheduler_classifier.step(lr_reduce_metric)
        else:
            scheduler_classifier.step()

    if global_rank == 0:
        sys.stderr.write("best model is in epoch {} (Acc: {})\n".format(curr_best_accuracy_loc,
                                                                        curr_best_accuracy))
    clear_linecache()
    cleanup()

def train_worker_fits_pretrain(local_rank, global_world_size, args):
    global_rank = args.node_rank * args.ngpus_per_node + local_rank

    dist.init_process_group(
        backend="nccl",
        init_method=args.dist_url,
        world_size=global_world_size,
        rank=global_rank,
    )

    sys.stderr.write(f"training_process-{{}} [init] == local rank: {{}}, global rank: {{}} ==\n".format(os.getpid(), local_rank, global_rank))
    
    # Initialize model directory
    model_dir = args.model_dir
    if global_rank == 0:
        if model_dir != "/":
            model_dir = os.path.abspath(model_dir).rstrip("/")
            if local_rank == 0:
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                else:
                    model_regex = re.compile(r"" + args.model_type + r"\.pretrain_best_mse\.b\d+_s\d+_p\d+\.ckpt*")
                    for mfile in os.listdir(model_dir):
                        if model_regex.match(mfile) is not None:
                            os.remove(model_dir + "/" + mfile)
            model_dir += "/"

    # Initialize shared embedding
    shared_embedding = SharedEmbedding(vocab_size=args.n_vocab, embedding_size=args.n_embed)
    nn.init.normal_(shared_embedding.embedding.weight, mean=0, std=0.05)

    # Initialize backbone and signal prediction head
    backbone = SharedBackbone(
        seq_len=args.seq_len,
        signal_len=args.signal_len,
        pred_len=args.offset,
        embedding_size=args.n_embed,
    )
    signal_prediction_head = SignalPredictionHead(
        embedding_size=args.n_embed,
        channels=4 + args.n_embed,
        pred_len=args.offset,
        signal_len=args.signal_len,
        seq_len=args.seq_len,
    )

    if args.init_model is not None:
        sys.stderr.write(f"training_process-{{}} loading pre-trained model: {{}}\n".format(os.getpid(), args.init_model))
        para_dict = torch.load(args.init_model, map_location=torch.device('cpu'))
        backbone_dict = backbone.state_dict()
        backbone_dict.update({k: v for k, v in para_dict.items() if k in backbone_dict})
        backbone.load_state_dict(backbone_dict)

    dist.barrier()

    shared_embedding = shared_embedding.cuda(local_rank)
    backbone = backbone.cuda(local_rank)
    signal_prediction_head = signal_prediction_head.cuda(local_rank)
    shared_embedding = DDP(shared_embedding, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
    backbone = DDP(backbone, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
    signal_prediction_head = DDP(signal_prediction_head, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)

    sys.stderr.write(f"training_process-{{}} reading data..\n".format(os.getpid()))
    
    # Data loading
    train_linenum = count_line_num(args.train_file, False)
    train_offsets = generate_offsets(args.train_file)
    train_dataset = SignalFeaData1s(args.train_file, train_offsets, train_linenum)
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.dl_num_workers,
        pin_memory=True,
        sampler=train_sampler
    )

    valid_linenum = count_line_num(args.valid_file, False)
    valid_offsets = generate_offsets(args.valid_file)
    valid_dataset = SignalFeaData1s(args.valid_file, valid_offsets, valid_linenum)
    valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset, shuffle=True)
    valid_loader = torch.utils.data.DataLoader(
        dataset=valid_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.dl_num_workers,
        pin_memory=True,
        sampler=valid_sampler
    )

    mse_loss_fn = nn.MSELoss()
    main_params = list(shared_embedding.module.parameters()) + list(backbone.module.parameters()) + list(signal_prediction_head.module.parameters())

    for name, param in shared_embedding.module.named_parameters():
        sys.stderr.write(f"Parameter {name} is in main_params\n")
    for name, param in backbone.module.named_parameters():
        sys.stderr.write(f"Parameter {name} is in main_params\n")
    for name, param in signal_prediction_head.module.named_parameters():
        sys.stderr.write(f"Parameter {name} is in main_params\n")

    if args.optim_type == "Adam":
        main_optimizer = torch.optim.Adam(main_params, lr=args.lr)
    elif args.optim_type == "RMSprop":
        main_optimizer = torch.optim.RMSprop(main_params, lr=args.lr)
    elif args.optim_type == "SGD":
        main_optimizer = torch.optim.SGD(main_params, lr=args.lr, momentum=0.8)
    else:
        raise ValueError("optim_type is not right!")

    if args.lr_scheduler == "StepLR":
        scheduler_main = StepLR(main_optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
    elif args.lr_scheduler == "ReduceLROnPlateau":
        scheduler_main = ReduceLROnPlateau(main_optimizer, mode='min', factor=args.lr_decay, patience=args.lr_patience, verbose=True)
    else:
        raise ValueError("--lr_scheduler is not right!")

    total_step = len(train_loader)
    sys.stderr.write(f"training_process-{{}} total_step: {{}}\n".format(os.getpid(), total_step))
    
    curr_lowest_mse = float('inf')
    no_best_model = True
    min_epoch_num = args.min_epoch_num  # Default to 5 if not provided
    max_epoch_num = args.max_epoch_num

    shared_embedding.train()
    backbone.train()
    signal_prediction_head.train()

    # Pre-training loop
    for epoch in range(max_epoch_num):
        train_loader.sampler.set_epoch(epoch)
        if global_rank == 0:
            sys.stderr.write(f"Pretrain Epoch [{epoch + 1}/{max_epoch_num}]; Main LR: {main_optimizer.param_groups[0]['lr']:.4e}\n")

        mse_losses = []
        start = time.time()
        for i, sfeatures in enumerate(train_loader):
            _, kmer, base_means, base_stds, base_signal_lens, signals, labels = sfeatures
            kmer = kmer.cuda(local_rank, non_blocking=True)
            base_means = base_means.cuda(local_rank, non_blocking=True).float()
            base_stds = base_stds.cuda(local_rank, non_blocking=True).float()
            base_signal_lens = base_signal_lens.cuda(local_rank, non_blocking=True).float()
            signals = signals.cuda(local_rank, non_blocking=True).float()

            expected_seq_len = args.seq_len + args.offset
            if signals.shape[1] != expected_seq_len or kmer.shape[1] != expected_seq_len:
                sys.stderr.write(f"Warning: Insufficient data length. signals.shape: {signals.shape}, kmer.shape: {kmer.shape}, expected seq_len: {expected_seq_len}. Skipping batch {i+1}\n")
                continue

            train_kmer = kmer[:, :args.seq_len]
            train_base_means = base_means[:, :args.seq_len]
            train_base_stds = base_stds[:, :args.seq_len]
            train_base_signal_lens = base_signal_lens[:, :args.seq_len]
            train_signals = signals[:, :args.seq_len, :]

            pred_kmer = kmer[:, args.seq_len:]
            pred_base_means = base_means[:, args.seq_len:]
            pred_base_stds = base_stds[:, args.seq_len:]
            pred_base_signal_lens = base_signal_lens[:, args.seq_len:]
            pred_signals = signals[:, args.seq_len:, :]

            if pred_kmer.shape[1] == 0:
                sys.stderr.write(f"Warning: Empty prediction data (offset={args.offset}). Using pseudo MSE for batch {i+1}\n")
                time_features, low_specxy_, xy, low_xy_adjusted = backbone(
                    shared_embedding, train_kmer, train_base_means, train_base_stds, train_base_signal_lens, train_signals
                )
                mse_loss = mse_loss_fn(xy[:, backbone.module.flatten_len:, :], torch.zeros_like(xy[:, backbone.module.flatten_len:, :]))
            else:
                pred_signals = pred_signals.reshape(pred_signals.shape[0], -1, 1)
                pred_kmer_embed = shared_embedding(pred_kmer)
                pred_kmer_embed = pred_kmer_embed.repeat_interleave(args.signal_len, dim=1)
                pred_base_means = pred_base_means.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                pred_base_stds = pred_base_stds.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                pred_base_signal_lens = pred_base_signal_lens.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                ground_truth_pred = torch.cat([pred_signals, pred_base_means, pred_base_stds, pred_base_signal_lens, pred_kmer_embed], dim=-1)
                # sys.stderr.write(f"ground_truth_pred shape: {ground_truth_pred.shape}\n")

                main_optimizer.zero_grad()
                time_features, low_specxy_, xy, low_xy_adjusted = backbone(
                    shared_embedding, train_kmer, train_base_means, train_base_stds, train_base_signal_lens, train_signals
                )
                pred_xy = signal_prediction_head(shared_embedding, (time_features, low_specxy_), pred_kmer)[:, backbone.module.flatten_len:, :]
                # sys.stderr.write(f"pred_xy shape: {pred_xy.shape}\n")
                mse_loss = mse_loss_fn(pred_xy, ground_truth_pred)
            mse_loss.backward()
            torch.nn.utils.clip_grad_norm_(main_params, 5.0)
            main_optimizer.step()

            mse_losses.append(mse_loss.item())
            if global_rank == 0 and ((i + 1) % args.step_interval == 0 or (i + 1) == total_step):
                time_cost = time.time() - start
                sys.stderr.write(
                    f"Pretrain Epoch [{epoch + 1}/{max_epoch_num}], Step [{i + 1}/{total_step}]; "
                    f"MSELoss: {np.mean(mse_losses):.4f}; Time: {time_cost:.2f}s\n"
                )
                sys.stderr.flush()
                mse_losses = []
                start = time.time()

        # Validation for pre-training
        backbone.eval()
        signal_prediction_head.eval()
        shared_embedding.eval()
        with torch.no_grad():
            vmse_losses = []
            for vi, vsfeatures in enumerate(valid_loader):
                _, vkmer, vbase_means, vbase_stds, vbase_signal_lens, vsignals, vlabels = vsfeatures
                vkmer = vkmer.cuda(local_rank, non_blocking=True)
                vbase_means = vbase_means.cuda(local_rank, non_blocking=True).float()
                vbase_stds = vbase_stds.cuda(local_rank, non_blocking=True).float()
                vbase_signal_lens = vbase_signal_lens.cuda(local_rank, non_blocking=True).float()
                vsignals = vsignals.cuda(local_rank, non_blocking=True).float()

                if vsignals.shape[1] != expected_seq_len or vkmer.shape[1] != expected_seq_len:
                    sys.stderr.write(f"Warning: Insufficient validation data length. vsignals.shape: {vsignals.shape}, vkmer.shape: {vkmer.shape}, expected seq_len: {expected_seq_len}. Skipping batch {vi+1}\n")
                    continue

                vtrain_kmer = vkmer[:, :args.seq_len]
                vtrain_base_means = vbase_means[:, :args.seq_len]
                vtrain_base_stds = vbase_stds[:, :args.seq_len]
                vtrain_base_signal_lens = vbase_signal_lens[:, :args.seq_len]
                vtrain_signals = vsignals[:, :args.seq_len, :]

                vpred_kmer = vkmer[:, args.seq_len:]
                vpred_base_means = vbase_means[:, args.seq_len:]
                vpred_base_stds = vbase_stds[:, args.seq_len:]
                vpred_base_signal_lens = vbase_signal_lens[:, args.seq_len:]
                vpred_signals = vsignals[:, args.seq_len:, :]

                if vpred_kmer.shape[1] == 0:
                    sys.stderr.write(f"Warning: Empty validation prediction data (offset={args.offset}). Skipping MSE loss for batch {vi+1}\n")
                    vmse_loss = torch.tensor(0.0, device=vsignals.device)
                else:
                    vpred_signals = vpred_signals.reshape(vpred_signals.shape[0], -1, 1)
                    vpred_kmer_embed = shared_embedding(vpred_kmer)
                    vpred_kmer_embed = vpred_kmer_embed.repeat_interleave(args.signal_len, dim=1)
                    vpred_base_means = vpred_base_means.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                    vpred_base_stds = vpred_base_stds.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                    vpred_base_signal_lens = vpred_base_signal_lens.repeat_interleave(args.signal_len, dim=1).unsqueeze(-1)
                    vground_truth_pred = torch.cat([vpred_signals, vpred_base_means, vpred_base_stds, vpred_base_signal_lens, vpred_kmer_embed], dim=-1)

                    vtime_features, vlow_specxy_, vxy, vlow_xy_adjusted = backbone(
                        shared_embedding, vtrain_kmer, vtrain_base_means, vtrain_base_stds, vtrain_base_signal_lens, vtrain_signals
                    )
                    vpred_xy = signal_prediction_head(shared_embedding, (vtime_features, vlow_specxy_), vpred_kmer)[:, backbone.module.flatten_len:, :]
                    vmse_loss = mse_loss_fn(vpred_xy, vground_truth_pred)

                    # if (vi + 1) % 100 == 0:
                    #     for c in range(vground_truth_pred.shape[2]):
                    #         channel_mse = mse_loss_fn(vpred_xy[:, :, c], vground_truth_pred[:, :, c])
                    #         sys.stderr.write(f"Pretrain Validation Batch {vi+1}, Channel {c} MSE: {channel_mse.item()}\n")

                vmse_losses.append(vmse_loss.item())

            v_mean_mse = np.mean(vmse_losses)
            if global_rank == 0:
                no_best_model = True
                if v_mean_mse < curr_lowest_mse:
                    curr_lowest_mse = v_mean_mse
                    no_best_model = False
                    torch.save(
                        {
                            'shared_embedding': shared_embedding.module.state_dict(),
                            'backbone': backbone.module.state_dict(),
                            'signal_prediction_head': signal_prediction_head.module.state_dict()
                        },
                        model_dir + args.model_type + f'.pretrain_best_mse.b{args.seq_len}_s{args.signal_len}_p{args.offset}.ckpt'
                    )
                sys.stderr.write(f"Pretrain Epoch [{epoch + 1}/{max_epoch_num}]; ValidMSELoss: {v_mean_mse:.4f}, No Best Model: {no_best_model}\n")

        if args.lr_scheduler == "ReduceLROnPlateau":
            scheduler_main.step(v_mean_mse)
        else:
            scheduler_main.step()

        # Early stopping
        if no_best_model and epoch >= min_epoch_num - 1:
            if global_rank == 0:
                sys.stderr.write(f"training_process-{{}} early stop at epoch {epoch + 1} due to no improvement in ValidMSELoss\n".format(os.getpid()))
            dist.barrier()
            break

    clear_linecache()
    cleanup()
    
def train_worker_fits_finetune(local_rank, global_world_size, args):
    global_rank = args.node_rank * args.ngpus_per_node + local_rank

    dist.init_process_group(
        backend="nccl",
        init_method=args.dist_url,
        world_size=global_world_size,
        rank=global_rank,
    )

    sys.stderr.write(f"training_process-{{}} [init] == local rank: {{}}, global rank: {{}} ==\n".format(os.getpid(), local_rank, global_rank))
    
    # Initialize model directory
    model_dir = args.model_dir
    if global_rank == 0:
        if model_dir != "/":
            model_dir = os.path.abspath(model_dir).rstrip("/")
            if local_rank == 0:
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                else:
                    model_regex = re.compile(r"" + args.model_type + r"\.b\d+_s\d+_p\d+_epoch\d+\.ckpt*")
                    for mfile in os.listdir(model_dir):
                        if model_regex.match(mfile) is not None:
                            os.remove(model_dir + "/" + mfile)
            model_dir += "/"

    # Initialize models
    shared_embedding = SharedEmbedding(vocab_size=args.n_vocab, embedding_size=args.n_embed)
    nn.init.normal_(shared_embedding.embedding.weight, mean=0, std=0.05)
    backbone = SharedBackbone(
        seq_len=args.seq_len,
        signal_len=args.signal_len,
        pred_len=args.offset,
        embedding_size=args.n_embed,
    )
    methylation_head = MethylationHead(
        channels=4 + args.n_embed,
        num_classes=args.class_num,
    )

    # Load pre-trained models
    pretrain_model_path = model_dir + args.model_type + f'.pretrain_best_mse.b{args.seq_len}_s{args.signal_len}_p{args.offset}.ckpt'
    if os.path.exists(pretrain_model_path):
        sys.stderr.write(f"training_process-{{}} loading pre-trained models: {{}}\n".format(os.getpid(), pretrain_model_path))
        para_dict = torch.load(pretrain_model_path, map_location=torch.device('cuda', local_rank))
        shared_embedding.load_state_dict(para_dict['shared_embedding'])
        backbone.load_state_dict(para_dict['backbone'])
    else:
        sys.stderr.write(f"Warning: Pre-trained model {pretrain_model_path} not found, using initialized models\n")

    if args.init_model is not None:
        sys.stderr.write(f"training_process-{{}} loading pre-trained methylation_head: {{}}\n".format(os.getpid(), args.init_model))
        para_dict = torch.load(args.init_model, map_location=torch.device('cuda', local_rank))
        methylation_dict = methylation_head.state_dict()
        methylation_dict.update({k: v for k, v in para_dict.items() if k in methylation_dict})
        methylation_head.load_state_dict(methylation_dict)

    dist.barrier()

    shared_embedding = shared_embedding.cuda(local_rank)
    backbone = backbone.cuda(local_rank)
    methylation_head = methylation_head.cuda(local_rank)
    shared_embedding = DDP(shared_embedding, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
    backbone = DDP(backbone, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
    methylation_head = DDP(methylation_head, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)

    sys.stderr.write(f"training_process-{{}} reading data..\n".format(os.getpid()))
    
    # Data loading
    train_linenum = count_line_num(args.train_file, False)
    train_offsets = generate_offsets(args.train_file)
    train_dataset = SignalFeaData1s(args.train_file, train_offsets, train_linenum)
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.dl_num_workers,
        pin_memory=True,
        sampler=train_sampler
    )

    valid_linenum = count_line_num(args.valid_file, False)
    valid_offsets = generate_offsets(args.valid_file)
    valid_dataset = SignalFeaData1s(args.valid_file, valid_offsets, valid_linenum)
    valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset, shuffle=True)
    valid_loader = torch.utils.data.DataLoader(
        dataset=valid_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.dl_num_workers,
        pin_memory=True,
        sampler=valid_sampler
    )

    weight_rank = torch.from_numpy(np.array([1, args.pos_weight])).float().cuda(local_rank)
    criterion = nn.CrossEntropyLoss(weight=weight_rank)

    main_params = list(shared_embedding.module.parameters()) + list(backbone.module.parameters())
    classifier_params = list(methylation_head.module.parameters())

    for name, param in shared_embedding.module.named_parameters():
        sys.stderr.write(f"Parameter {name} is in main_params\n")
    for name, param in backbone.module.named_parameters():
        sys.stderr.write(f"Parameter {name} is in main_params\n")
    for name, param in methylation_head.module.named_parameters():
        sys.stderr.write(f"Parameter {name} is in classifier_params\n")

    if args.optim_type == "Adam":
        main_optimizer = torch.optim.Adam(main_params, lr=args.lr)
        classifier_optimizer = torch.optim.Adam(classifier_params, lr=args.lr / 2)
    elif args.optim_type == "RMSprop":
        main_optimizer = torch.optim.RMSprop(main_params, lr=args.lr)
        classifier_optimizer = torch.optim.RMSprop(classifier_params, lr=args.lr / 2)
    elif args.optim_type == "SGD":
        main_optimizer = torch.optim.SGD(main_params, lr=args.lr, momentum=0.8)
        classifier_optimizer = torch.optim.SGD(classifier_params, lr=args.lr / 2, momentum=0.8)
    else:
        raise ValueError("optim_type is not right!")

    if args.lr_scheduler == "StepLR":
        scheduler_main = StepLR(main_optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
        scheduler_classifier = StepLR(classifier_optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
    elif args.lr_scheduler == "ReduceLROnPlateau":
        scheduler_main = ReduceLROnPlateau(main_optimizer, mode='min', factor=args.lr_decay,
                                           patience=args.lr_patience, verbose=True)
        scheduler_classifier = ReduceLROnPlateau(classifier_optimizer, mode='min', factor=args.lr_decay,
                                                 patience=args.lr_patience, verbose=True)
    else:
        raise ValueError("--lr_scheduler is not right!")

    total_step = len(train_loader)
    sys.stderr.write(f"training_process-{{}} total_step: {{}}\n".format(os.getpid(), total_step))
    curr_best_accuracy = 0
    curr_best_accuracy_loc = 0
    curr_lowest_loss = 10000
    v_accuracy_epoches = []

    shared_embedding.train()
    backbone.train()
    methylation_head.train()

    # Fine-tuning loop
    for epoch in range(args.max_epoch_num):
        train_loader.sampler.set_epoch(epoch)
        no_best_model = True
        tlosses = []
        start = time.time()
        for i, sfeatures in enumerate(train_loader):
            _, kmer, base_means, base_stds, base_signal_lens, signals, labels = sfeatures
            kmer = kmer.cuda(local_rank, non_blocking=True)
            base_means = base_means.cuda(local_rank, non_blocking=True).float()
            base_stds = base_stds.cuda(local_rank, non_blocking=True).float()
            base_signal_lens = base_signal_lens.cuda(local_rank, non_blocking=True).float()
            signals = signals.cuda(local_rank, non_blocking=True).float()
            labels = labels.cuda(local_rank, non_blocking=True).long()

            expected_seq_len = args.seq_len + args.offset
            if signals.shape[1] != expected_seq_len or kmer.shape[1] != expected_seq_len:
                sys.stderr.write(f"Warning: Insufficient data length. signals.shape: {signals.shape}, kmer.shape: {kmer.shape}, expected seq_len: {expected_seq_len}. Skipping batch {i+1}\n")
                continue

            train_kmer = kmer[:, :args.seq_len]
            train_base_means = base_means[:, :args.seq_len]
            train_base_stds = base_stds[:, :args.seq_len]
            train_base_signal_lens = base_signal_lens[:, :args.seq_len]
            train_signals = signals[:, :args.seq_len, :]

            main_optimizer.zero_grad()
            classifier_optimizer.zero_grad()

            time_features, low_specxy_, xy, low_xy_adjusted = backbone(
                shared_embedding, train_kmer, train_base_means, train_base_stds, train_base_signal_lens, train_signals
            )
            out, softmax_out = methylation_head(time_features, low_specxy_)
            loss = criterion(out, labels)

            loss.backward()
            torch.nn.utils.clip_grad_norm_(main_params, 2.0)
            torch.nn.utils.clip_grad_norm_(classifier_params, 2.0)
            main_optimizer.step()
            classifier_optimizer.step()

            tlosses.append(loss.item())
            if global_rank == 0 and ((i + 1) % args.step_interval == 0 or (i + 1) == total_step):
                time_cost = time.time() - start
                sys.stderr.write(
                    f"Finetune Epoch [{epoch + 1}/{args.max_epoch_num}], Step [{i + 1}/{total_step}]; "
                    f"Loss: {np.mean(tlosses):.4f}; Time: {time_cost:.2f}s\n"
                )
                sys.stderr.flush()
                start = time.time()
                tlosses = []

        # Validation for fine-tuning
        shared_embedding.eval()
        backbone.eval()
        methylation_head.eval()
        with torch.no_grad():
            vlosses = []
            vlabels_total, vpredicted_total = [], []
            v_meanloss = 10000
            for vi, vsfeatures in enumerate(valid_loader):
                _, vkmer, vbase_means, vbase_stds, vbase_signal_lens, vsignals, vlabels = vsfeatures
                vkmer = vkmer.cuda(local_rank, non_blocking=True)
                vbase_means = vbase_means.cuda(local_rank, non_blocking=True).float()
                vbase_stds = vbase_stds.cuda(local_rank, non_blocking=True).float()
                vbase_signal_lens = vbase_signal_lens.cuda(local_rank, non_blocking=True).float()
                vsignals = vsignals.cuda(local_rank, non_blocking=True).float()
                vlabels = vlabels.cuda(local_rank, non_blocking=True).long()

                if vsignals.shape[1] != expected_seq_len or vkmer.shape[1] != expected_seq_len:
                    sys.stderr.write(f"Warning: Insufficient validation data length. vsignals.shape: {vsignals.shape}, vkmer.shape: {vkmer.shape}, expected seq_len: {expected_seq_len}. Skipping batch {vi+1}\n")
                    continue

                vtrain_kmer = vkmer[:, :args.seq_len]
                vtrain_base_means = vbase_means[:, :args.seq_len]
                vtrain_base_stds = vbase_stds[:, :args.seq_len]
                vtrain_base_signal_lens = vbase_signal_lens[:, :args.seq_len]
                vtrain_signals = vsignals[:, :args.seq_len, :]

                vtime_features, vlow_specxy_, vxy, vlow_xy_adjusted = backbone(
                    shared_embedding, vtrain_kmer, vtrain_base_means, vtrain_base_stds, vtrain_base_signal_lens, vtrain_signals
                )
                vout, vsoftmax_out = methylation_head(vtime_features, vlow_specxy_)
                vloss = criterion(vout, vlabels)
                vloss = reduce_mean(vloss, global_world_size)

                _, vpredicted = torch.max(vsoftmax_out.data, 1)
                vlabels = vlabels.cpu()
                vpredicted = vpredicted.cpu()

                vlosses.append(vloss.item())
                vlabels_total += vlabels.tolist()
                vpredicted_total += vpredicted.tolist()

            v_accuracy = metrics.accuracy_score(vlabels_total, vpredicted_total)
            v_precision = metrics.precision_score(vlabels_total, vpredicted_total, zero_division=0)
            v_recall = metrics.recall_score(vlabels_total, vpredicted_total, zero_division=0)
            v_meanloss = np.mean(vlosses)

            if v_accuracy > curr_best_accuracy - 0.0001 and global_rank == 0:
                torch.save(
                    {
                        'shared_embedding': shared_embedding.module.state_dict(),
                        'backbone': backbone.module.state_dict(),
                        'methylation_head': methylation_head.module.state_dict()
                    },
                    model_dir + args.model_type + f'.b{args.seq_len}_s{args.signal_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
                )
                if v_accuracy > curr_best_accuracy:
                    curr_best_accuracy = v_accuracy
                    curr_best_accuracy_loc = epoch + 1

                if len(v_accuracy_epoches) > 0 and v_accuracy > v_accuracy_epoches[-1]:
                    torch.save(
                        {
                            'shared_embedding': shared_embedding.module.state_dict(),
                            'backbone': backbone.module.state_dict(),
                            'methylation_head': methylation_head.module.state_dict()
                        },
                        model_dir + args.model_type + f'.betterthanlast.b{args.seq_len}_s{args.signal_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
                    )
            if v_meanloss < curr_lowest_loss:
                curr_lowest_loss = v_meanloss
                no_best_model = False

            v_accuracy_epoches.append(v_accuracy)

            time_cost = time.time() - start
            if global_rank == 0:
                try:
                    last_lr = scheduler_classifier.get_last_lr()[0]
                    sys.stderr.write(
                        f"Finetune Epoch [{epoch + 1}/{args.max_epoch_num}]; LR: {last_lr:.4e}; "
                        f"ValidLoss: {v_meanloss:.4f}, "
                        f"Acc: {v_accuracy:.4f}, Prec: {v_precision:.4f}, Reca: {v_recall:.4f}, "
                        f"Best_acc: {curr_best_accuracy:.4f}; Time: {time_cost:.2f}s\n"
                    )
                except Exception:
                    sys.stderr.write(
                        f"Finetune Epoch [{epoch + 1}/{args.max_epoch_num}]; "
                        f"ValidLoss: {v_meanloss:.4f}, "
                        f"Acc: {v_accuracy:.4f}, Prec: {v_precision:.4f}, Reca: {v_recall:.4f}, "
                        f"Best_acc: {curr_best_accuracy:.4f}; Time: {time_cost:.2f}s\n"
                    )
                sys.stderr.flush()

        shared_embedding.train()
        backbone.train()
        methylation_head.train()

        if no_best_model and epoch >= args.min_epoch_num - 1:
            if global_rank == 0:
                sys.stderr.write(f"training_process-{{}} early stop at epoch {epoch + 1} due to no improvement in ValidLoss\n".format(os.getpid()))
            dist.barrier()
            break

        if args.epoch_sync:
            sync_ckpt = model_dir + args.model_type + \
                        f'.epoch_sync_node{args.node_rank}.b{args.seq_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
            checkpoint(
                {
                    'shared_embedding': shared_embedding,
                    'backbone': backbone,
                    'methylation_head': methylation_head
                },
                local_rank,
                sync_ckpt
            )

        if args.lr_scheduler == "ReduceLROnPlateau":
            scheduler_main.step(v_meanloss)
            scheduler_classifier.step(v_meanloss)
        else:
            scheduler_main.step()
            scheduler_classifier.step()

    if global_rank == 0:
        sys.stderr.write(f"Best fine-tuned model is in epoch {curr_best_accuracy_loc} (Acc: {curr_best_accuracy})\n")
    clear_linecache()
    cleanup()

def train_worker_mtm_stack(local_rank, global_world_size, args):
    """
    分布式训练函数，适配 MTM 模型。
    特征处理方式：将一个碱基对应的信号（长度 S）视为特征通道的一部分，而非展平为时间序列。
    处理序列长度从 args.bias 截取到 args.seq_len+args.bias。
    """
    global_rank = args.node_rank * args.ngpus_per_node + local_rank

    # 初始化分布式训练
    dist.init_process_group(
        backend="nccl",
        init_method=args.dist_url,
        world_size=global_world_size,
        rank=global_rank,
        timeout=timedelta(minutes=30)
    )

    sys.stderr.write("训练进程-{} [初始化] == 本地 rank: {}, 全局 rank: {} ==\n".format(os.getpid(), local_rank, global_rank))

    # 模型目录初始化
    if global_rank == 0 or args.epoch_sync:
        model_dir = args.model_dir
        if model_dir != "/":
            model_dir = os.path.abspath(model_dir).rstrip("/")
            if local_rank == 0:
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                else:
                    model_regex = re.compile(
                        r"" + args.model_type + r"\.b\d+_s\d+_p\d+_epoch\d+\.ckpt*"
                    )
                    for mfile in os.listdir(model_dir):
                        if model_regex.match(mfile) is not None:
                            os.remove(model_dir + "/" + mfile)
            model_dir += "/"

    # 初始化 MTM 模型
    # MTM 的输入通道数 num_chn = 信号长度 S + kmer嵌入维度 E
    # 假设 MTM 模型内部处理 kmer 嵌入
    num_chn = args.signal_len + args.n_embed # NEW: 通道数：信号长度(S) + kmer嵌入维度(E)
    
    model = MTM(
        num_chn=num_chn,
        d_static=args.mtm_d_static,
        num_cls=args.class_num,
        ratios=args.mtm_ratios,
        d_model=args.hid_rnn,
        r_hid=args.mtm_r_hid,
        drop=args.dropout_rate,
        norm_first=args.mtm_norm_first,
        down_mode=args.mtm_down_mode,
        vocab_size=args.n_vocab, 
        embedding_size=args.n_embed,
        use_channel_attn=args.mtm_use_channel,
        use_mixer=args.mtm_use_mixer,
        moe=args.mtm_moe
    )

    # 移动到 GPU 并启用 DDP
    model = model.cuda(local_rank)
    model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)

    sys.stderr.write("训练进程-{} 读取数据..\n".format(os.getpid()))

    # 数据加载 (保持不变)
    train_linenum = count_line_num(args.train_file, False)
    train_offsets = generate_offsets(args.train_file)
    train_dataset = SignalFeaData1s(args.train_file, train_offsets, train_linenum)
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.dl_num_workers,
                                             pin_memory=True,
                                             sampler=train_sampler)

    valid_linenum = count_line_num(args.valid_file, False)
    valid_offsets = generate_offsets(args.valid_file)
    valid_dataset = SignalFeaData1s(args.valid_file, valid_offsets, valid_linenum)
    valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset, shuffle=True)
    valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.dl_num_workers,
                                             pin_memory=True,
                                             sampler=valid_sampler)

    # 损失函数
    weight_rank = torch.from_numpy(np.array([1, args.pos_weight])).float().cuda(local_rank)
    criterion = nn.CrossEntropyLoss(weight=weight_rank)

    # 统一优化器
    all_params = list(model.module.parameters())
    if args.optim_type == "Adam":
        optimizer = torch.optim.Adam(all_params, lr=args.lr)
    elif args.optim_type == "RMSprop":
        optimizer = torch.optim.RMSprop(all_params, lr=args.lr)
    elif args.optim_type == "SGD":
        optimizer = torch.optim.SGD(all_params, lr=args.lr, momentum=0.8)
    else:
        raise ValueError("优化器类型错误！")

    # 学习率调度器
    if args.lr_scheduler == "StepLR":
        scheduler = StepLR(optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
    elif args.lr_scheduler == "ReduceLROnPlateau":
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=args.lr_decay,
                                     patience=args.lr_patience, verbose=True)
    else:
        raise ValueError("学习率调度器类型错误！")

    total_step = len(train_loader)
    sys.stderr.write("训练进程-{} 总步数: {}\n".format(os.getpid(), total_step))
    curr_best_accuracy = 0
    curr_best_accuracy_loc = 0
    curr_lowest_loss = 10000
    v_accuracy_epoches = []
    model.train()

    patience = 3 
    no_improve_count = 0

    # 训练循环
    for epoch in range(args.max_epoch_num):
        train_loader.sampler.set_epoch(epoch)

        no_best_model = True
        tlosses = []
        start = time.time()
        for i, sfeatures in enumerate(train_loader):
            # kmer: (B, 41); signals: (B, 41, S)
            _, kmer, _, _, _, signals, labels,tags = sfeatures
            
            # 截取序列长度为 args.seq_len (21)
            train_kmer = kmer[:, args.bias:args.seq_len+args.bias] # (B, T), T=21
            train_signals = signals[:, args.bias:args.seq_len+args.bias, :] # (B, T, S), S=args.signal_len

            # 转移到 CUDA
            train_kmer = train_kmer.cuda(local_rank, non_blocking=True).long()
            train_signals = train_signals.cuda(local_rank, non_blocking=True).float()
            labels = labels.cuda(local_rank, non_blocking=True).long()
            tags = tags.cuda(local_rank, non_blocking=True).long()

            # 数据预处理（新逻辑）
            batch_size = train_signals.shape[0]
            seq_len = args.seq_len # T=21
            signal_len = args.signal_len # S

            # signals: (B, T, S) - S 作为通道特征
            signals = train_signals
            # kmer: (B, T) - 直接作为 kmer 索引输入
            kmer = train_kmer
            
            # 构造掩码 x_mask: (B, T, F), F = S + E
            # 掩码只关注 signals 中的 NaN
            signal_mask = torch.isnan(signals) # (B, T, S)
            kmer_dim = args.n_embed
            # kmer 嵌入部分全为 False
            false_mask = torch.zeros((batch_size, seq_len, kmer_dim), dtype=torch.bool, device=signals.device)
            x_mask = torch.cat([signal_mask, false_mask], dim=-1) # (B, T, S+E)
            
            # 构造时间索引 t: (B, T)
            t = torch.arange(seq_len, device=signals.device).repeat(batch_size, 1) # (B, T)

            # 静态特征 x_static: (B, D_static)
            x_static = tags.unsqueeze(-1)

            # 前向传播
            optimizer.zero_grad()
            outputs = model(signals, kmer, x_mask, t, x_static,args.mtm_use_channel,args.mtm_use_mixer,args.mtm_moe) # (B, num_cls)
            loss = criterion(outputs, labels)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(all_params, 2.0)
            optimizer.step()

            tlosses.append(loss.detach().item())
            if global_rank == 0 and ((i + 1) % args.step_interval == 0 or (i + 1) == total_step):
                time_cost = time.time() - start
                sys.stderr.write(
                    f"轮次 [{epoch + 1}/{args.max_epoch_num}]，步数 [{i + 1}/{total_step}];"
                    f"损失: {np.mean(tlosses):.4f}；时间: {time_cost:.2f}s\n"
                )
                sys.stderr.flush()
                start = time.time()
                tlosses = []

        # 验证循环
        model.eval()
        with torch.no_grad():
            vlosses, vlabels_total, vpredicted_total = [], [], []
            v_meanloss = 10000
            for vi, vsfeatures in enumerate(valid_loader):
                _, vkmer, _, _, _, vsignals, vlabels,vtags = vsfeatures
                
                # 截取序列长度为 args.seq_len (21)
                vtrain_kmer = vkmer[:, args.bias:args.seq_len+args.bias] # (B, T)
                vtrain_signals = vsignals[:, args.bias:args.seq_len+args.bias, :] # (B, T, S)

                # 转移到 CUDA
                vtrain_kmer = vtrain_kmer.cuda(local_rank, non_blocking=True).long()
                vtrain_signals = vtrain_signals.cuda(local_rank, non_blocking=True).float()
                vlabels = vlabels.cuda(local_rank, non_blocking=True).long()
                vtags =vtags.cuda(local_rank, non_blocking=True).long()

                # 验证数据预处理（新逻辑）
                batch_size = vtrain_signals.shape[0]
                # vsignals: (B, T, S) - S 作为通道特征
                vsignals = vtrain_signals
                # vkmer: (B, T) - 直接作为 kmer 索引输入
                vkmer = vtrain_kmer
                
                # 构造掩码 vx_mask: (B, T, F), F = S + E
                vsignal_mask = torch.isnan(vsignals) # (B, T, S)
                kmer_dim = args.n_embed
                vfalse_mask = torch.zeros((batch_size, seq_len, kmer_dim), dtype=torch.bool, device=vsignals.device)
                vx_mask = torch.cat([vsignal_mask, vfalse_mask], dim=-1) # (B, T, S+E)

                # 构造时间索引 vt: (B, T)
                vt = torch.arange(seq_len, device=vsignals.device).repeat(batch_size, 1) # (B, T)
                
                # 静态特征 vx_static: (B, D_static)
                vx_static = vtags.unsqueeze(-1)

                # 验证前向传播
                voutputs = model(vsignals, vkmer, vx_mask, vt, vx_static,args.mtm_use_channel,args.mtm_use_mixer,args.mtm_moe) # (B, num_cls)
                vloss = criterion(voutputs, vlabels)
                vloss = reduce_mean(vloss, global_world_size)

                _, vpredicted = torch.max(voutputs.data, 1)
                vlabels = vlabels.cpu()
                vpredicted = vpredicted.cpu()

                vlosses.append(vloss.item())
                vlabels_total += vlabels.tolist()
                vpredicted_total += vpredicted.tolist()

            v_accuracy = metrics.accuracy_score(vlabels_total, vpredicted_total)
            v_precision = metrics.precision_score(vlabels_total, vpredicted_total, zero_division=0)
            v_recall = metrics.recall_score(vlabels_total, vpredicted_total, zero_division=0)
            v_meanloss = np.mean(vlosses)

            # 保存模型 (保持不变)
            if v_accuracy > curr_best_accuracy - 0.0001 and global_rank == 0:
                torch.save(
                    model.module.state_dict(),
                    model_dir + args.model_type + f'.b{args.seq_len}_s{args.signal_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
                )
                if v_accuracy > curr_best_accuracy:
                    curr_best_accuracy = v_accuracy
                    curr_best_accuracy_loc = epoch + 1

                if len(v_accuracy_epoches) > 0 and v_accuracy > v_accuracy_epoches[-1]:
                    torch.save(
                        model.module.state_dict(),
                        model_dir + args.model_type + f'.betterthanlast.b{args.seq_len}_s{args.signal_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
                    )
            
            v_accuracy_epoches.append(v_accuracy)

            time_cost = time.time() - start
            if global_rank == 0:
                try:
                    last_lr = scheduler.get_last_lr()[0]
                    sys.stderr.write(
                        f"轮次 [{epoch + 1}/{args.max_epoch_num}]；学习率: {last_lr:.4e};"
                        f"验证损失: {v_meanloss:.4f}, "
                        f"准确率: {v_accuracy:.4f}, 精确率: {v_precision:.4f}, 召回率: {v_recall:.4f}, "
                        f"最佳准确率: {curr_best_accuracy:.4f}；时间: {time_cost:.2f}s\n"
                    )
                except Exception:
                    sys.stderr.write(
                        f"轮次 [{epoch + 1}/{args.max_epoch_num}];"
                        f"验证损失: {v_meanloss:.4f}, "
                        f"准确率: {v_accuracy:.4f}, 精确率: {v_precision:.4f}, 召回率: {v_recall:.4f}, "
                        f"最佳准确率: {curr_best_accuracy:.4f}；时间: {time_cost:.2f}s\n"
                    )
                sys.stderr.flush()

        model.train()

        # 早停逻辑 (保持不变)
        if v_meanloss < curr_lowest_loss:
            no_improve_count = 0
            sys.stderr.write(f"训练进程-{os.getpid()} 轮次 {epoch + 1}: 验证损失改进至 {v_meanloss:.4f}\n")
            curr_lowest_loss = v_meanloss
        else:
            no_improve_count += 1
            sys.stderr.write(f"训练进程-{os.getpid()} 轮次 {epoch + 1}: 验证损失无改进，当前计数: {no_improve_count}/{patience}\n")

        if no_improve_count >= patience and epoch >= args.min_epoch_num - 1:
            if global_rank == 0:
                sys.stderr.write(f"训练进程-{os.getpid()} 在轮次 {epoch + 1} 因验证损失连续 {no_improve_count} 次无改进而提前停止\n")
            dist.barrier()
            break

        # Epoch 同步 (保持不变)
        if args.epoch_sync:
            sync_ckpt = model_dir + args.model_type + \
                        f'.epoch_sync_node{args.node_rank}.b{args.seq_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
            checkpoint(
                model,
                local_rank,
                sync_ckpt
            )

        # 学习率调度 (保持不变)
        if args.lr_scheduler == "ReduceLROnPlateau":
            scheduler.step(v_meanloss)
        else:
            scheduler.step()

    if global_rank == 0:
        sys.stderr.write(f"最佳模型在轮次 {curr_best_accuracy_loc} (准确率: {curr_best_accuracy})\n")
    clear_linecache()
    cleanup()



def train_worker_mtm(local_rank, global_world_size, args):
    """
    分布式训练函数，适配 MTM 模型，使用 SharedEmbedding 处理 kmer 输入，处理序列长度从41截取到21
    """
    global_rank = args.node_rank * args.ngpus_per_node + local_rank

    # 初始化分布式训练
    dist.init_process_group(
        backend="nccl",
        init_method=args.dist_url,
        world_size=global_world_size,
        rank=global_rank,
        timeout=timedelta(minutes=30)
    )

    sys.stderr.write("训练进程-{} [初始化] == 本地 rank: {}, 全局 rank: {} ==\n".format(os.getpid(), local_rank, global_rank))

    # 模型目录初始化
    if global_rank == 0 or args.epoch_sync:
        model_dir = args.model_dir
        if model_dir != "/":
            model_dir = os.path.abspath(model_dir).rstrip("/")
            if local_rank == 0:
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                else:
                    model_regex = re.compile(
                        r"" + args.model_type + r"\.b\d+_s\d+_p\d+_epoch\d+\.ckpt*"
                    )
                    for mfile in os.listdir(model_dir):
                        if model_regex.match(mfile) is not None:
                            os.remove(model_dir + "/" + mfile)
            model_dir += "/"

    # 初始化 SharedEmbedding 和 MTM
    #shared_embedding = SharedEmbedding(vocab_size=args.n_vocab, embedding_size=args.n_embed)
    #nn.init.normal_(shared_embedding.embedding.weight, mean=0, std=0.05)  # 初始化嵌入权重

    num_chn = args.mtm_num_base_features + args.n_embed  # 通道数：signals (1) + base_means (1) + base_stds (1) + base_signal_lens (1) + kmer_embed
    # d_static = 0  # 无静态特征
    # num_cls = args.class_num  # 分类任务类别数
    # ratios = [2, 2, 2]  # 下采样比率，可根据任务调整
    model = MTM(
        num_chn=num_chn,
        d_static=args.mtm_d_static,
        num_cls=args.class_num,      # 复用已有参数
        ratios=args.mtm_ratios,
        d_model=args.hid_rnn,        # 复用已有参数
        r_hid=args.mtm_r_hid,
        drop=args.dropout_rate,      # 复用已有的 dropout_rate 参数
        norm_first=args.mtm_norm_first,
        down_mode=args.mtm_down_mode,
        vocab_size=args.n_vocab, 
        embedding_size=args.n_embed,
        use_channel_attn=args.mtm_use_channel,
        use_mixer=args.mtm_use_mixer,
        moe=args.mtm_moe,
        use_swiglu=args.mtm_use_swiglu 
    )
    if str2bool(args.use_compile):
        try:
            model = torch.compile(model,mode="reduce-overhead")
        except:
            raise ImportError('torch.compile does not exist in PyTorch<2.0.')

    # 移动到 GPU 并启用 DDP
    #shared_embedding = shared_embedding.cuda(local_rank)
    model = model.cuda(local_rank)
    #shared_embedding = DDP(shared_embedding, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
    model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=False)

    sys.stderr.write("训练进程-{} 读取数据..\n".format(os.getpid()))

    # 数据加载
    train_linenum = count_line_num(args.train_file, False)
    train_offsets = generate_offsets(args.train_file)
    train_dataset = SignalFeaData1s(args.train_file, train_offsets, train_linenum)
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset, shuffle=True)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.dl_num_workers,
                                               pin_memory=True,
                                               sampler=train_sampler)

    valid_linenum = count_line_num(args.valid_file, False)
    valid_offsets = generate_offsets(args.valid_file)
    valid_dataset = SignalFeaData1s(args.valid_file, valid_offsets, valid_linenum)
    valid_sampler = torch.utils.data.distributed.DistributedSampler(valid_dataset, shuffle=True)
    valid_loader = torch.utils.data.DataLoader(dataset=valid_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=False,
                                               num_workers=args.dl_num_workers,
                                               pin_memory=True,
                                               sampler=valid_sampler)

    # 损失函数
    weight_rank = torch.from_numpy(np.array([1, args.pos_weight])).float().cuda(local_rank)
    criterion = nn.CrossEntropyLoss(weight=weight_rank)

    # 统一优化器
    #all_params = list(shared_embedding.module.parameters()) + list(model.module.parameters())
    all_params = list(model.module.parameters())
    if args.optim_type == "Adam":
        optimizer = torch.optim.Adam(all_params, lr=args.lr)
    elif args.optim_type == "RMSprop":
        optimizer = torch.optim.RMSprop(all_params, lr=args.lr)
    elif args.optim_type == "SGD":
        optimizer = torch.optim.SGD(all_params, lr=args.lr, momentum=0.8)
    else:
        raise ValueError("优化器类型错误！")

    # 学习率调度器
    if args.lr_scheduler == "StepLR":
        scheduler = StepLR(optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
    elif args.lr_scheduler == "ReduceLROnPlateau":
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=args.lr_decay,
                                      patience=args.lr_patience, verbose=True)
    else:
        raise ValueError("学习率调度器类型错误！")

    total_step = len(train_loader)
    sys.stderr.write("训练进程-{} 总步数: {}\n".format(os.getpid(), total_step))
    curr_best_accuracy = 0
    curr_best_accuracy_loc = 0
    curr_lowest_loss = 10000
    v_accuracy_epoches = []
    #shared_embedding.train()
    model.train()

    patience = 3 
    no_improve_count = 0

    # 训练循环
    for epoch in range(args.max_epoch_num):
        train_loader.sampler.set_epoch(epoch)

        
        no_best_model = True
        tlosses = []
        start = time.time()
        for i, sfeatures in enumerate(train_loader):
            _, kmer, base_means, base_stds, base_signal_lens, signals, labels,tags = sfeatures
            # 截取前21个时间步
            # center_idx = kmer.shape[1] // 2
            # start_idx = center_idx - args.seq_len // 2
            # end_idx = center_idx + args.seq_len // 2+1
            train_kmer = kmer[:, args.bias:args.seq_len+args.bias]  # (B, 21)
            # if args.mtm_num_base_features > 1:
            #     train_base_means = base_means[:, :args.seq_len]  # (B, 21)
            #     train_base_stds = base_stds[:, :args.seq_len]  # (B, 21)
            #     train_base_signal_lens = base_signal_lens[:, :args.seq_len]  # (B, 21)
            train_signals = signals[:, args.bias:args.seq_len+args.bias, :]  # (B, 21, S)

            # 转移到 CUDA
            train_kmer = train_kmer.cuda(local_rank, non_blocking=True).long()  # (B, 21)
            # if args.mtm_num_base_features > 1:
            #     train_base_means = train_base_means.cuda(local_rank, non_blocking=True).float()  # (B, 21)
            #     train_base_stds = train_base_stds.cuda(local_rank, non_blocking=True).float()  # (B, 21)
            #     train_base_signal_lens = train_base_signal_lens.cuda(local_rank, non_blocking=True).float()  # (B, 21)
            train_signals = train_signals.cuda(local_rank, non_blocking=True).float()  # (B, 21, S)
            labels = labels.cuda(local_rank, non_blocking=True).long()  # (B,)
            tags = tags.cuda(local_rank, non_blocking=True).long()

            # 数据预处理
            batch_size = train_signals.shape[0]
            seq_len = args.seq_len  # 21
            signal_len = args.signal_len
            #embedding_size = args.n_embed

            # signals: (B, 21, S) -> (B, 21*S, 1)
            signals = train_signals.view(batch_size, -1, 1)  # (B, 21*S, 1)

            

            # if args.mtm_num_base_features > 1:
            #     # kmer: (B, 21) -> (B, 21*S, embedding_size)
            #     kmer_embed = shared_embedding(train_kmer)  # (B, 21, embedding_size)
            #     kmer_embed = kmer_embed.repeat_interleave(signal_len, dim=1)  # (B, 21*S, embedding_size)

            #     #base_means, base_stds, base_signal_lens: (B, 21) -> (B, 21*S, 1)
            #     base_means = train_base_means.repeat_interleave(signal_len, dim=1).unsqueeze(-1)  # (B, 21*S, 1)
            #     base_stds = train_base_stds.repeat_interleave(signal_len, dim=1).unsqueeze(-1)  # (B, 21*S, 1)
            #     base_signal_lens = train_base_signal_lens.repeat_interleave(signal_len, dim=1).unsqueeze(-1)  # (B, 21*S, 1)
            kmer = train_kmer.repeat_interleave(signal_len, dim=1)
            # # 拼接 x: (B, 21*S, C), C = 1 + 1 + 1 + 1 + embedding_size
            # if args.mtm_num_base_features > 1:
            #     x = torch.cat([signals, base_means, base_stds, base_signal_lens,
            #                 kmer_embed], dim=-1)  # (B, 21*S, C)
            # else:
            #     x = torch.cat([signals, #base_means, base_stds, base_signal_lens,
            #                     kmer_embed], dim=-1)  # (B, 21*S, C)

            # 生成掩码 x_mask: (B, 21*S, C) #error如下方法实际上是在最后一维上检测nan然后广播到整列，会将不是nan的地方也盖上nan
            #x_mask = torch.isnan(x).any(dim=-1, keepdim=True).repeat(1, 1, x.shape[-1])  # (B, 21*S, C)
            # 生成掩码，检查值为 0 的位置
            #x_mask = (x == 0.0).any(dim=-1, keepdim=True).repeat(1, 1, x.shape[-1])
            #x_mask = torch.isnan(x)
            x_mask = torch.isnan(signals)  # 形状: [..., signal_dim]
            false_mask = torch.zeros((*x_mask.shape[:-1], args.n_embed), dtype=torch.bool, device=x_mask.device)  # 形状: [..., kmer_dim]，全 False
            x_mask = torch.cat([x_mask, false_mask], dim=-1)

            # 生成时间索引 t: (B, 21*S)
            t = torch.arange(seq_len * signal_len, device=signals.device).repeat(batch_size, 1)  # (B, 21*S)

            # 静态特征 x_static: (B, D_static)
            x_static = tags.unsqueeze(-1)#torch.zeros(batch_size, d_static, device=x.device)  # 占位符

            # 前向传播
            optimizer.zero_grad()
            outputs = model(signals, kmer, x_mask, t, x_static,args.mtm_use_channel,args.mtm_use_mixer,args.mtm_moe)  # (B, num_cls)
            loss = criterion(outputs, labels)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(all_params, 2.0)
            optimizer.step()

            tlosses.append(loss.detach().item())
            if global_rank == 0 and ((i + 1) % args.step_interval == 0 or (i + 1) == total_step):
                time_cost = time.time() - start
                sys.stderr.write(
                    f"轮次 [{epoch + 1}/{args.max_epoch_num}]，步数 [{i + 1}/{total_step}]；"
                    f"损失: {np.mean(tlosses):.4f}；时间: {time_cost:.2f}s\n"
                )
                sys.stderr.flush()
                start = time.time()
                tlosses = []

        # 验证循环
        model.eval()
        #shared_embedding.eval()
        with torch.no_grad():
            vlosses, vlabels_total, vpredicted_total = [], [], []
            v_meanloss = 10000
            for vi, vsfeatures in enumerate(valid_loader):
                _, vkmer, vbase_means, vbase_stds, vbase_signal_lens, vsignals, vlabels,vtags = vsfeatures
                # 截取前21个时间步
                vtrain_kmer = vkmer[:, args.bias:args.seq_len+args.bias]  # (B, 21)
                # if args.mtm_num_base_features > 1:
                #     vtrain_base_means = vbase_means[:, :args.seq_len]  # (B, 21)
                #     vtrain_base_stds = vbase_stds[:, :args.seq_len]  # (B, 21)
                #     vtrain_base_signal_lens = vbase_signal_lens[:, :args.seq_len]  # (B, 21)
                vtrain_signals = vsignals[:, args.bias:args.seq_len+args.bias, :]  # (B, 21, S)

                # 转移到 CUDA
                vtrain_kmer = vtrain_kmer.cuda(local_rank, non_blocking=True).long()  # (B, 21)
                # if args.mtm_num_base_features > 1:
                #     vtrain_base_means = vtrain_base_means.cuda(local_rank, non_blocking=True).float()  # (B, 21)
                #     vtrain_base_stds = vtrain_base_stds.cuda(local_rank, non_blocking=True).float()  # (B, 21)
                #     vtrain_base_signal_lens = vtrain_base_signal_lens.cuda(local_rank, non_blocking=True).float()  # (B, 21)
                vtrain_signals = vtrain_signals.cuda(local_rank, non_blocking=True).float()  # (B, 21, S)
                vlabels = vlabels.cuda(local_rank, non_blocking=True).long()  # (B,)
                vtags =vtags.cuda(local_rank, non_blocking=True).long()

                # 验证数据预处理
                batch_size = vtrain_signals.shape[0]
                vsignals = vtrain_signals.view(batch_size, -1, 1)  # (B, 21*S, 1)
                # vkmer_embed = shared_embedding(vtrain_kmer)  # (B, 21, embedding_size)
                # vkmer_embed = vkmer_embed.repeat_interleave(signal_len, dim=1)  # (B, 21*S, embedding_size)
                # if args.mtm_num_base_features > 1:
                #     vbase_means = vtrain_base_means.repeat_interleave(signal_len, dim=1).unsqueeze(-1)  # (B, 21*S, 1)
                #     vbase_stds = vtrain_base_stds.repeat_interleave(signal_len, dim=1).unsqueeze(-1)  # (B, 21*S, 1)
                #     vbase_signal_lens = vtrain_base_signal_lens.repeat_interleave(signal_len, dim=1).unsqueeze(-1)  # (B, 21*S, 1)
                vkmer = vtrain_kmer.repeat_interleave(signal_len, dim=1)
                # if args.mtm_num_base_features > 1:
                #     vx = torch.cat([vsignals, vbase_means, vbase_stds, vbase_signal_lens, 
                #                     vkmer_embed], dim=-1)  # (B, 21*S, C)
                # else:
                #     vx = torch.cat([vsignals, #vbase_means, vbase_stds, vbase_signal_lens, 
                #                     vkmer_embed], dim=-1)  # (B, 21*S, C)
                #vx_mask = torch.isnan(vx).any(dim=-1, keepdim=True).repeat(1, 1, vx.shape[-1])  # (B, 21*S, C)
                # 生成掩码，检查值为 0 的位置
                #vx_mask = (vx == 0.0).any(dim=-1, keepdim=True).repeat(1, 1, vx.shape[-1])
                #vx_mask = torch.isnan(vx)
                vx_mask = torch.isnan(vsignals)  # 形状: [..., signal_dim]
                vfalse_mask = torch.zeros((*vx_mask.shape[:-1], args.n_embed), dtype=torch.bool, device=vx_mask.device) # 形状: [..., kmer_dim]，全 False
                vx_mask = torch.cat([vx_mask, vfalse_mask], dim=-1)
                vt = torch.arange(seq_len * signal_len, device=vsignals.device).repeat(batch_size, 1)  # (B, 21*S)
                vx_static = vtags.unsqueeze(-1)#torch.zeros(batch_size, d_static, device=vx.device)  # (B, D_static)

                # 验证前向传播
                voutputs = model(vsignals, vkmer, vx_mask, vt, vx_static,args.mtm_use_channel,args.mtm_use_mixer,args.mtm_moe)  # (B, num_cls)
                vloss = criterion(voutputs, vlabels)
                vloss = reduce_mean(vloss, global_world_size)

                _, vpredicted = torch.max(voutputs.data, 1)
                vlabels = vlabels.cpu()
                vpredicted = vpredicted.cpu()

                vlosses.append(vloss.item())
                vlabels_total += vlabels.tolist()
                vpredicted_total += vpredicted.tolist()

            v_accuracy = metrics.accuracy_score(vlabels_total, vpredicted_total)
            v_precision = metrics.precision_score(vlabels_total, vpredicted_total, zero_division=0)
            v_recall = metrics.recall_score(vlabels_total, vpredicted_total, zero_division=0)
            v_meanloss = np.mean(vlosses)

            # 保存模型
            if v_accuracy > curr_best_accuracy - 0.0001 and global_rank == 0:
                torch.save(
                    model.module.state_dict(),
                    # {
                    #     'shared_embedding': shared_embedding.module.state_dict(),
                    #     'model': model.module.state_dict()
                    # },
                    model_dir + args.model_type + f'.b{args.seq_len}_s{args.signal_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
                )
                if v_accuracy > curr_best_accuracy:
                    curr_best_accuracy = v_accuracy
                    curr_best_accuracy_loc = epoch + 1

                if len(v_accuracy_epoches) > 0 and v_accuracy > v_accuracy_epoches[-1]:
                    torch.save(
                        model.module.state_dict(),
                        # {
                        #     'shared_embedding': shared_embedding.module.state_dict(),
                        #     'model': model.module.state_dict()
                        # },
                        model_dir + args.model_type + f'.betterthanlast.b{args.seq_len}_s{args.signal_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
                    )
            # if v_meanloss < curr_lowest_loss:
            #     curr_lowest_loss = v_meanloss
            #     no_best_model = False
            

            v_accuracy_epoches.append(v_accuracy)

            time_cost = time.time() - start
            if global_rank == 0:
                try:
                    last_lr = scheduler.get_last_lr()[0]
                    sys.stderr.write(
                        f"轮次 [{epoch + 1}/{args.max_epoch_num}]；学习率: {last_lr:.4e};"
                        f"验证损失: {v_meanloss:.4f}, "
                        f"准确率: {v_accuracy:.4f}, 精确率: {v_precision:.4f}, 召回率: {v_recall:.4f}, "
                        f"最佳准确率: {curr_best_accuracy:.4f}；时间: {time_cost:.2f}s\n"
                    )
                except Exception:
                    sys.stderr.write(
                        f"轮次 [{epoch + 1}/{args.max_epoch_num}];"
                        f"验证损失: {v_meanloss:.4f}, "
                        f"准确率: {v_accuracy:.4f}, 精确率: {v_precision:.4f}, 召回率: {v_recall:.4f}, "
                        f"最佳准确率: {curr_best_accuracy:.4f}；时间: {time_cost:.2f}s\n"
                    )
                sys.stderr.flush()

        model.train()
        #shared_embedding.train()

        if v_meanloss < curr_lowest_loss:
            no_improve_count = 0
            sys.stderr.write(f"训练进程-{os.getpid()} 轮次 {epoch + 1}: 验证损失改进至 {v_meanloss:.4f}\n")
            curr_lowest_loss = v_meanloss
        else:
            no_improve_count += 1
            sys.stderr.write(f"训练进程-{os.getpid()} 轮次 {epoch + 1}: 验证损失无改进，当前计数: {no_improve_count}/{patience}\n")

        if no_improve_count >= patience and epoch >= args.min_epoch_num - 1:
            if global_rank == 0:
                sys.stderr.write(f"训练进程-{os.getpid()} 在轮次 {epoch + 1} 因验证损失连续 {no_improve_count} 次无改进而提前停止\n")
            dist.barrier()
            break

        if args.epoch_sync:
            sync_ckpt = model_dir + args.model_type + \
                        f'.epoch_sync_node{args.node_rank}.b{args.seq_len}_p{args.offset}_epoch{epoch + 1}.ckpt'
            checkpoint(
                model,
                # {
                #     'shared_embedding': shared_embedding,
                #     'model': model
                # },
                local_rank,
                sync_ckpt
            )

        if args.lr_scheduler == "ReduceLROnPlateau":
            scheduler.step(v_meanloss)
        else:
            scheduler.step()

    if global_rank == 0:
        sys.stderr.write(f"最佳模型在轮次 {curr_best_accuracy_loc} (准确率: {curr_best_accuracy})\n")
    clear_linecache()
    cleanup()

def train_worker_mpts(local_rank, global_world_size, args):
    """
    Distributed training function for MPTSNet using SignalFeaData1s dataset loader.
    """
    # Fixed hyperparameters (originally from train.py, not in MTM args)
    num_heads = args.mpts_num_heads
    ff_dim = args.mpts_ff_dim
    num_layers = args.mpts_layers
    patience = 3  # Early stopping patience



    global_rank = args.node_rank * args.ngpus_per_node + local_rank

    # Initialize distributed training 
    dist.init_process_group(
        backend="nccl",
        init_method=args.dist_url,
        world_size=global_world_size,
        rank=global_rank,
    )

    sys.stderr.write(f"Training process-{os.getpid()} [Initialization] == Local rank: {local_rank}, Global rank: {global_rank} ==\n")

    # Model directory initialization
    if global_rank == 0 or args.epoch_sync:
        model_dir = args.model_dir
        if model_dir != "/":
            model_dir = os.path.abspath(model_dir).rstrip("/")
            if local_rank == 0:
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                else:
                    model_regex = re.compile(r"mptsnet\.b\d+_s\d+_epoch\d+\.ckpt*")
                    for mfile in os.listdir(model_dir):
                        if model_regex.match(mfile):
                            os.remove(os.path.join(model_dir, mfile))
            model_dir += "/"

    # Load dataset using SignalFeaData1s
    sys.stderr.write(f"Training process-{os.getpid()} Loading data...\n")
    train_linenum = count_line_num(args.train_file, False)
    train_offsets = generate_offsets(args.train_file)
    train_dataset = SignalFeaData1s(args.train_file, train_offsets, train_linenum)
    train_sampler = torch.utils.data.DistributedSampler(train_dataset, shuffle=True)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=args.batch_size // 2,
        shuffle=False,
        num_workers=args.dl_num_workers,
        pin_memory=True,
        sampler=train_sampler
    )

    valid_linenum = count_line_num(args.valid_file, False)
    valid_offsets = generate_offsets(args.valid_file)
    valid_dataset = SignalFeaData1s(args.valid_file, valid_offsets, valid_linenum)
    valid_sampler = torch.utils.data.DistributedSampler(valid_dataset, shuffle=False)
    valid_loader = torch.utils.data.DataLoader(
        dataset=valid_dataset,
        batch_size=args.batch_size // 2,
        shuffle=False,
        num_workers=args.dl_num_workers,
        pin_memory=True,
        sampler=valid_sampler
    )

    # Data preprocessing (based on first batch to determine shapes)
    device = torch.device(f'cuda:{local_rank}' if torch.cuda.is_available() else 'cpu')
    sample_batch = next(iter(train_loader))
    _, kmer, base_means, base_stds, base_signal_lens, signals, labels, tags = sample_batch
    signals = signals[:, :args.seq_len, :].to(device)  # Truncate to seq_len (e.g., 21)
    kmer = kmer[:, :args.seq_len].to(device)  # Truncate to seq_len

    # Compute FFT periods
    
    if global_rank == 0:
        batch_size = signals.shape[0]
        mask = torch.isnan(signals)
        signals[mask] = 0
        signals_fft = signals.view(batch_size, -1, 1).detach().cpu().numpy()  # (batch, signal_len*seq_len, 1)
        periods = fft_main_periods_wo_duplicates(signals_fft, 5, 'MPTSNet')
        periods = [int(round(p)) for p in periods if p > 0]  # Round and filter out non-positive values
    else:
        periods = None

    # Broadcast periods to all processes
    periods_tensor = torch.tensor(periods if periods is not None else [0] * 5, dtype=torch.int, device=device)
    dist.broadcast(periods_tensor, src=0)
    periods = periods_tensor.cpu().numpy().tolist()
    sys.stderr.write(f"Synchronized periods: {periods}\n")

    # Model parameters
    seq_len = args.seq_len  # e.g., 21
    signal_len = signals.shape[2]  # Signal length per time step
    num_channels = 1 + args.n_embed  # Signals (1) + kmer embedding
    num_classes = args.class_num
    embed_dim = max(min(num_channels * 4, 256), 64)
    embed_dim_t = max(min(embed_dim * 4, 512), 256)

    #sys.stderr.write(f"Num channels: {num_channels}, Seq length: {seq_len}, Num classes: {num_classes}\n")
    sys.stderr.write(f"Adaptive embed_dim: {embed_dim}, embed_dim_t: {embed_dim_t}\n")

    # Initialize MPTSNet model
    flag = False
    model = MPTSmodel(
        periods=periods,
        flag=flag,
        num_channels=num_channels,
        seq_length=seq_len * signal_len,  # Total sequence length after flattening
        num_classes=num_classes,
        embed_dim=embed_dim,
        embed_dim_t=embed_dim_t,
        num_heads=num_heads,
        ff_dim=ff_dim,
        num_layers=num_layers
    ).to(device)

    total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    sys.stderr.write(f"Total parameters of model: {total_params}\n")

    # Wrap model with DDP
    model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)

    # Loss function and optimizer
    weight_rank = torch.from_numpy(np.array([1, args.pos_weight])).float().to(device)
    criterion = nn.CrossEntropyLoss(weight=weight_rank)
    all_params = list(model.module.parameters())
    if args.optim_type == "Adam":
        optimizer = torch.optim.Adam(all_params, lr=args.lr)
    elif args.optim_type == "RMSprop":
        optimizer = torch.optim.RMSprop(all_params, lr=args.lr)
    elif args.optim_type == "SGD":
        optimizer = torch.optim.SGD(all_params, lr=args.lr, momentum=0.8)
    else:
        raise ValueError("优化器类型错误！")

    # 学习率调度器
    if args.lr_scheduler == "StepLR":
        scheduler = StepLR(optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
    elif args.lr_scheduler == "ReduceLROnPlateau":
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=args.lr_decay,
                                      patience=args.lr_patience, verbose=True)
    else:
        raise ValueError("学习率调度器类型错误！")

    # Training loop
    total_step = len(train_loader)
    sys.stderr.write(f"Training process-{os.getpid()} Total steps: {total_step}\n")
    curr_best_accuracy = 0
    curr_best_accuracy_loc = 0
    curr_lowest_loss = 10000
    no_improve_count = 0
    model.train()

    for epoch in range(args.max_epoch_num):
        train_loader.sampler.set_epoch(epoch)
        tlosses = []
        start = time.time()

        for i, sfeatures in enumerate(train_loader):
            _, kmer, base_means, base_stds, base_signal_lens, signals, labels, tags = sfeatures
            # center_idx = kmer.shape[1] // 2
            # start_idx = center_idx - args.seq_len // 2
            # end_idx = center_idx + args.seq_len // 2+1
            # Truncate to seq_len (e.g., 21)
            train_kmer = kmer[:, :args.seq_len].to(device, non_blocking=True).long()
            train_signals = signals[:, :args.seq_len, :].to(device, non_blocking=True).float()
            labels = labels.to(device, non_blocking=True).long()
            tags = tags.to(device, non_blocking=True).long()

            # Data preprocessing
            batch_size = train_signals.shape[0]
            signals = train_signals.view(batch_size, -1, 1)  # (B, seq_len*signal_len, 1)
            kmer = train_kmer.repeat_interleave(signal_len, dim=1)#.unsqueeze(-1)#.float()  # (B, seq_len*signal_len, 1)

            # Concatenate signals and kmer
            #x = torch.cat([signals, kmer_embed], dim=-1)  # (B, seq_len*signal_len, 2)

            # Handle NaNs
            x_mask = torch.isnan(signals)
            signals[x_mask] = 0  # Replace NaNs with 0

            optimizer.zero_grad()
            outputs = model(signals,kmer)  # (B, num_classes)
            loss = criterion(outputs, labels)
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 2.0)
            optimizer.step()
            tlosses.append(loss.detach().item())

            if global_rank == 0 and ((i + 1) % args.step_interval == 0 or (i + 1) == total_step):
                time_cost = time.time() - start
                sys.stderr.write(
                    f"Epoch [{epoch + 1}/{args.max_epoch_num}], Step [{i + 1}/{total_step}]; "
                    f"Loss: {np.mean(tlosses):.4f}; Time: {time_cost:.2f}s\n"
                )
                sys.stderr.flush()
                start = time.time()
                tlosses = []

        # Validation loop
        model.eval()
        with torch.no_grad():
            vlosses, vlabels_total, vpredicted_total = [], [], []
            for vi, vsfeatures in enumerate(valid_loader):
                _, vkmer, vbase_means, vbase_stds, vbase_signal_lens, vsignals, vlabels, vtags = vsfeatures
                vtrain_kmer = vkmer[:, :args.seq_len].to(device, non_blocking=True).long()
                vtrain_signals = vsignals[:, :args.seq_len, :].to(device, non_blocking=True).float()
                vlabels = vlabels.to(device, non_blocking=True).long()

                batch_size = vtrain_signals.shape[0]
                vsignals = vtrain_signals.view(batch_size, -1, 1)  # (B, seq_len*signal_len, 1)
                vkmer = vtrain_kmer.repeat_interleave(signal_len, dim=1)#.unsqueeze(-1)#.float()  # (B, seq_len*signal_len, 1)
                #vx = torch.cat([vsignals, vkmer_embed], dim=-1)  # (B, seq_len*signal_len, 2)

                # Handle NaNs
                vx_mask = torch.isnan(vsignals)
                vsignals[vx_mask] = 0

                voutputs = model(vsignals, vkmer)
                vloss = criterion(voutputs, vlabels)
                vloss = reduce_mean(vloss, global_world_size)

                _, vpredicted = torch.max(voutputs.data, 1)
                vlabels = vlabels.cpu()
                vpredicted = vpredicted.cpu()

                vlosses.append(vloss.item())
                vlabels_total += vlabels.tolist()
                vpredicted_total += vpredicted.tolist()

            v_accuracy = metrics.accuracy_score(vlabels_total, vpredicted_total)
            v_precision = metrics.precision_score(vlabels_total, vpredicted_total, zero_division=0)
            v_recall = metrics.recall_score(vlabels_total, vpredicted_total, zero_division=0)
            v_meanloss = np.mean(vlosses)

            # Save model if improved
            if v_accuracy > curr_best_accuracy - 0.0001 and global_rank == 0:
                torch.save(
                    model.module.state_dict(),
                    os.path.join(model_dir, f"mptsnet.b{seq_len}_s{signal_len}_epoch{epoch + 1}.ckpt")
                )
                if v_accuracy > curr_best_accuracy:
                    curr_best_accuracy = v_accuracy
                    curr_best_accuracy_loc = epoch + 1
                    sys.stderr.write(
                        f"New best model saved with test_acc={v_accuracy:.4f}\n"
                    )

            time_cost = time.time() - start
            if global_rank == 0:
                try:
                    last_lr = scheduler.get_last_lr()[0]
                    sys.stderr.write(
                        f"Epoch [{epoch + 1}/{args.max_epoch_num}]; Learning rate: {last_lr:.4e}; "
                        f"Validation loss: {v_meanloss:.4f}, "
                        f"Accuracy: {v_accuracy:.4f}, Precision: {v_precision:.4f}, Recall: {v_recall:.4f}, "
                        f"Best accuracy: {curr_best_accuracy:.4f}; Time: {time_cost:.2f}s\n"
                    )
                except Exception:
                    sys.stderr.write(
                        f"Epoch [{epoch + 1}/{args.max_epoch_num}]; "
                        f"Validation loss: {v_meanloss:.4f}, "
                        f"Accuracy: {v_accuracy:.4f}, Precision: {v_precision:.4f}, Recall: {v_recall:.4f}, "
                        f"Best accuracy: {curr_best_accuracy:.4f}; Time: {time_cost:.2f}s\n"
                    )
                sys.stderr.flush()

        model.train()

        # Early stopping and learning rate scheduling
        if v_meanloss < curr_lowest_loss:
            no_improve_count = 0
            sys.stderr.write(f"Training process-{os.getpid()} Epoch {epoch + 1}: Validation loss improved to {v_meanloss:.4f}\n")
            curr_lowest_loss = v_meanloss
        else:
            no_improve_count += 1
            sys.stderr.write(f"Training process-{os.getpid()} Epoch {epoch + 1}: No improvement in validation loss, count: {no_improve_count}/{patience}\n")

        if no_improve_count >= patience and epoch >= args.min_epoch_num - 1:
            if global_rank == 0:
                sys.stderr.write(f"Training process-{os.getpid()} at epoch {epoch + 1} stopped early due to {no_improve_count} epochs without improvement\n")
                sys.stderr.write(f"Best test accuracy on {args.dataset_name}: {curr_best_accuracy:.4f}\n")
            dist.barrier()
            break

        if args.epoch_sync:
            sync_ckpt = os.path.join(model_dir, f"mptsnet.epoch_sync_node{args.node_rank}.b{seq_len}_s{signal_len}_epoch{epoch + 1}.ckpt")
            torch.save(model.module.state_dict(), sync_ckpt)

        scheduler.step(v_meanloss)

    if global_rank == 0:
        sys.stderr.write(f"Best model at epoch {curr_best_accuracy_loc} (accuracy: {curr_best_accuracy:.4f})\n")

    # Clean up
    torch.cuda.empty_cache()
    dist.destroy_process_group()

def train_worker_softshape(local_rank, global_world_size, args):
    """
    Distributed training function for SoftShapeNet using SignalFeaData1s dataset loader.
    """
    # Fixed hyperparameters for SoftShapeNet
    shape_size = args.softshape_shape_size
    sparse_rate = args.softshape_sparse_rate
    depth = args.softshape_depth
    num_experts = args.softshape_num_experts
    stride = args.softshape_stride
    warm_up_epoch = args.softshape_warm_up_epoch
    moeloss_rate = args.softshape_moe_loss
    emb_dim=args.softshape_emb_dim
    patience = 3  # Early stopping patience

    global_rank = args.node_rank * args.ngpus_per_node + local_rank

    # Initialize distributed training 
    dist.init_process_group(
        backend="nccl",
        init_method=args.dist_url,
        world_size=global_world_size,
        rank=global_rank,
    )

    sys.stderr.write(f"Training process-{os.getpid()} [Initialization] == Local rank: {local_rank}, Global rank: {global_rank} ==\n")

    # Model directory initialization
    if global_rank == 0 or args.epoch_sync:
        model_dir = args.model_dir
        if model_dir != "/":
            model_dir = os.path.abspath(model_dir).rstrip("/")
            if local_rank == 0:
                if not os.path.exists(model_dir):
                    os.makedirs(model_dir)
                else:
                    model_regex = re.compile(r"softshapenet\.b\d+_s\d+_epoch\d+\.ckpt*")
                    for mfile in os.listdir(model_dir):
                        if model_regex.match(mfile):
                            os.remove(os.path.join(model_dir, mfile))
            model_dir += "/"

    # Load dataset using SignalFeaData1s
    sys.stderr.write(f"Training process-{os.getpid()} Loading data...\n")
    train_linenum = count_line_num(args.train_file, False)
    train_offsets = generate_offsets(args.train_file)
    train_dataset = SignalFeaData1s(args.train_file, train_offsets, train_linenum)
    train_sampler = torch.utils.data.DistributedSampler(train_dataset, shuffle=True)
    train_loader = torch.utils.data.DataLoader(
        dataset=train_dataset,
        batch_size=args.batch_size ,
        shuffle=False,
        num_workers=args.dl_num_workers,
        pin_memory=True,
        sampler=train_sampler
    )

    valid_linenum = count_line_num(args.valid_file, False)
    valid_offsets = generate_offsets(args.valid_file)
    valid_dataset = SignalFeaData1s(args.valid_file, valid_offsets, valid_linenum)
    valid_sampler = torch.utils.data.DistributedSampler(valid_dataset, shuffle=False)
    valid_loader = torch.utils.data.DataLoader(
        dataset=valid_dataset,
        batch_size=args.batch_size ,
        shuffle=False,
        num_workers=args.dl_num_workers,
        pin_memory=True,
        sampler=valid_sampler
    )

    # Data preprocessing (based on first batch to determine shapes)
    device = torch.device(f'cuda:{local_rank}' if torch.cuda.is_available() else 'cpu')
    # Model parameters
    seq_len = args.seq_len  # e.g., 21
    signal_len = args.signal_len  # Signal length per time step
    num_channels = 1 +args.n_embed # Only signals are used
    num_classes = args.class_num

    
    # Initialize SoftShapeNet model
    model = SoftShapeNet(
        seq_len=seq_len * signal_len,  # Total sequence length after flattening
        shape_size=shape_size,
        num_channels=num_channels,
        emb_dim=emb_dim,
        sparse_rate=sparse_rate,
        depth=depth,
        num_classes=num_classes,
        num_experts=num_experts,
        stride=stride,
        vocab_size=args.n_vocab,
        embedding_size=args.n_embed
    ).to(device)

    total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    sys.stderr.write(f"Total parameters of model: {total_params}\n")

    # Wrap model with DDP
    model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)

    # Loss function and optimizer
    weight_rank = torch.from_numpy(np.array([1, args.pos_weight])).float().to(device)
    criterion = nn.CrossEntropyLoss(weight=weight_rank)
    all_params = list(model.module.parameters())
    if args.optim_type == "Adam":
        optimizer = torch.optim.Adam(all_params, lr=args.lr)
    elif args.optim_type == "RMSprop":
        optimizer = torch.optim.RMSprop(all_params, lr=args.lr)
    elif args.optim_type == "SGD":
        optimizer = torch.optim.SGD(all_params, lr=args.lr, momentumtok=0.8)
    else:
        raise ValueError("优化器类型错误！")

    # Learning rate scheduler
    if args.lr_scheduler == "StepLR":
        scheduler = StepLR(optimizer, step_size=args.lr_decay_step, gamma=args.lr_decay)
    elif args.lr_scheduler == "ReduceLROnPlateau":
        scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=args.lr_decay,
                                      patience=args.lr_patience, verbose=True)
    else:
        raise ValueError("学习率调度器类型错误！")

    # Training loop
    total_step = len(train_loader)
    sys.stderr.write(f"Training process-{os.getpid()} Total steps: {total_step}\n")
    curr_best_accuracy = 0
    curr_best_accuracy_loc = 0
    curr_lowest_loss = 10000
    no_improve_count = 0
    model.train()

    

    for epoch in range(args.max_epoch_num):
        train_loader.sampler.set_epoch(epoch)
        tlosses = []
        tlosses_ce=[]
        tlosses_moe=[]
        start = time.time()

        for i, sfeatures in enumerate(train_loader):
            _, kmer, base_means, base_stds, base_signal_lens, signals, labels, tags = sfeatures

            # center_idx = kmer.shape[1] // 2
            # start_idx = center_idx - args.seq_len // 2
            # end_idx = center_idx + args.seq_len // 2+1

            train_kmer = kmer[:, :args.seq_len].to(device, non_blocking=True).long()
            kmer = train_kmer.repeat_interleave(signal_len, dim=1)  # (B, seq_len*signal_len)

            train_signals = signals[:, :args.seq_len, :].to(device, non_blocking=True).float()
            labels = labels.to(device, non_blocking=True).long()
            tags = tags.to(device, non_blocking=True).long()

            # Data preprocessing
            batch_size = train_signals.shape[0]
            signals = train_signals.view(batch_size, 1, -1)  # (B, 1, seq_len*signal_len)

            # Handle NaNs
            x_mask = torch.isnan(signals)
            signals[x_mask] = 0  # Replace NaNs with 0

            optimizer.zero_grad()
            outputs, moe_loss = model(signals,kmer, num_epoch_i=epoch, warm_up_epoch=warm_up_epoch)
            celoss=criterion(outputs, labels)
            loss = celoss + moeloss_rate * moe_loss
            loss.backward()
            torch.nn.utils.clip_grad_norm_(model.parameters(), 2.0)
            optimizer.step()
            tlosses.append(loss.detach().item())
            tlosses_ce.append(celoss.detach().item())
            tlosses_moe.append(moe_loss)

            if global_rank == 0 and ((i + 1) % args.step_interval == 0 or (i + 1) == total_step):
                time_cost = time.time() - start
                sys.stderr.write(
                    f"Epoch [{epoch + 1}/{args.max_epoch_num}], Step [{i + 1}/{total_step}]; "
                    f"CE Loss: {np.mean(tlosses_ce):.4f}; MoE Loss: {np.mean(tlosses_moe):.4f}; "
                    f"Loss: {np.mean(tlosses):.4f}; Time: {time_cost:.2f}s\n"
                )
                sys.stderr.flush()
                start = time.time()
                tlosses = []
                tlosses_ce=[]
                tlosses_moe=[]

        # Validation loop
        model.eval()
        with torch.no_grad():
            vlosses, vlabels_total, vpredicted_total = [], [], []
            vlosses_moe=[]
            for vi, vsfeatures in enumerate(valid_loader):
                _, vkmer, vbase_means, vbase_stds, vbase_signal_lens, vsignals, vlabels, vtags = vsfeatures
                vtrain_kmer = vkmer[:, :args.seq_len].to(device, non_blocking=True).long()
                vkmer = vtrain_kmer.repeat_interleave(signal_len, dim=1)  # (B, seq_len*signal_len)

                vtrain_signals = vsignals[:, :args.seq_len, :].to(device, non_blocking=True).float()
                vlabels = vlabels.to(device, non_blocking=True).long()

                batch_size = vtrain_signals.shape[0]
                vsignals = vtrain_signals.view(batch_size, 1, -1)  # (B, 1, seq_len*signal_len)

                # Handle NaNs
                vx_mask = torch.isnan(vsignals)
                vsignals[vx_mask] = 0

                voutputs, vmoe_loss = model(vsignals, vkmer, num_epoch_i=epoch, warm_up_epoch=warm_up_epoch)
                vloss = criterion(voutputs, vlabels)
                vloss = reduce_mean(vloss, global_world_size)

                vloss_moe_tensor = torch.tensor(vmoe_loss, device=device)
                vmoe_loss = reduce_mean(vloss_moe_tensor, global_world_size)


                _, vpredicted = torch.max(voutputs.data, 1)
                vlabels = vlabels.cpu()
                vpredicted = vpredicted.cpu()

                vlosses.append(vloss.item())
                vlabels_total += vlabels.tolist()
                vpredicted_total += vpredicted.tolist()
                vlosses_moe.append(vmoe_loss.cpu().item())

            v_accuracy = metrics.accuracy_score(vlabels_total, vpredicted_total)
            v_precision = metrics.precision_score(vlabels_total, vpredicted_total, zero_division=0)
            v_recall = metrics.recall_score(vlabels_total, vpredicted_total, zero_division=0)
            v_meanloss = np.mean(vlosses)

            vlosses_moe = np.mean(vlosses_moe)

            # Save model if improved
            if v_accuracy > curr_best_accuracy - 0.0001 and global_rank == 0:
                torch.save(
                    model.module.state_dict(),
                    os.path.join(model_dir, f"softshapenet.b{seq_len}_s{signal_len}_epoch{epoch + 1}.ckpt")
                )
                if v_accuracy > curr_best_accuracy:
                    curr_best_accuracy = v_accuracy
                    curr_best_accuracy_loc = epoch + 1
                    sys.stderr.write(
                        f"New best model saved with test_acc={v_accuracy:.4f}\n"
                    )

            time_cost = time.time() - start
            if global_rank == 0:
                try:
                    last_lr = scheduler.get_last_lr()[0]
                    sys.stderr.write(
                        f"Epoch [{epoch + 1}/{args.max_epoch_num}]; Learning rate: {last_lr:.4e}; "
                        f"Validation loss: {v_meanloss:.4f}, "
                        f"Validation MoE loss: {vlosses_moe:.4f}, "
                        f"Accuracy: {v_accuracy:.4f}, Precision: {v_precision:.4f}, Recall: {v_recall:.4f}, "
                        f"Best accuracy: {curr_best_accuracy:.4f}; Time: {time_cost:.2f}s\n"
                    )
                except Exception:
                    sys.stderr.write(
                        f"Epoch [{epoch + 1}/{args.max_epoch_num}]; "
                        f"Validation loss: {v_meanloss:.4f}, "
                        f"Validation MoE loss: {vlosses_moe:.4f}, "
                        f"Accuracy: {v_accuracy:.4f}, Precision: {v_precision:.4f}, Recall: {v_recall:.4f}, "
                        f"Best accuracy: {curr_best_accuracy:.4f}; Time: {time_cost:.2f}s\n"
                    )
                sys.stderr.flush()

        model.train()

        # Early stopping and learning rate scheduling
        if v_meanloss < curr_lowest_loss:
            no_improve_count = 0
            sys.stderr.write(f"Training process-{os.getpid()} Epoch {epoch + 1}: Validation loss improved to {v_meanloss:.4f}\n")
            curr_lowest_loss = v_meanloss
        else:
            no_improve_count += 1
            sys.stderr.write(f"Training process-{os.getpid()} Epoch {epoch + 1}: No improvement in validation loss, count: {no_improve_count}/{patience}\n")

        if no_improve_count >= patience and epoch >= args.min_epoch_num - 1:
            if global_rank == 0:
                sys.stderr.write(f"Training process-{os.getpid()} at epoch {epoch + 1} stopped early due to {no_improve_count} epochs without improvement\n")
                sys.stderr.write(f"Best test accuracy: {curr_best_accuracy:.4f}\n")
            dist.barrier()
            break

        if args.epoch_sync:
            sync_ckpt = os.path.join(model_dir, f"softshapenet.epoch_sync_node{args.node_rank}.b{seq_len}_s{signal_len}_epoch{epoch + 1}.ckpt")
            torch.save(model.module.state_dict(), sync_ckpt)

        scheduler.step(v_meanloss)

    if global_rank == 0:
        sys.stderr.write(f"Best model at epoch {curr_best_accuracy_loc} (accuracy: {curr_best_accuracy:.4f})\n")

    # Clean up
    torch.cuda.empty_cache()
    dist.destroy_process_group()

# def train_multigpu(args):
#     total_start = time.time()
#     torch.manual_seed(args.tseed)
    
#     if torch.cuda.is_available():
#         torch.cuda.manual_seed(args.tseed)
#         print("GPU is available!")
#     else:
#         raise RuntimeError("No GPU is available!")

#     if not dist.is_available():
#         raise RuntimeError("torch.distributed is not available!")

#     if torch.cuda.device_count() < args.ngpus_per_node:
#         raise RuntimeError(f"There are not enough GPUs, has {torch.cuda.device_count()}, request {args.ngpus_per_node}.")

#     global_world_size = args.ngpus_per_node * args.nodes

#     # Pre-training phase
#     print("Starting pre-training phase...")
#     #mp.spawn(train_worker_fits_pretrain, nprocs=args.ngpus_per_node, args=(global_world_size, args))

#     # Fine-tuning phase
#     print("Starting fine-tuning phase...")
#     mp.spawn(train_worker_fits_finetune, nprocs=args.ngpus_per_node, args=(global_world_size, args))

#     endtime = time.time()
#     clear_linecache()
#     print(f"[main] train_multigpu costs {endtime - total_start:.1f} seconds")
def train_multigpu(args):
    total_start = time.time()
    torch.manual_seed(args.tseed)
    
    if use_cuda:
        torch.cuda.manual_seed(args.tseed)

    if use_cuda:
        print("GPU is available!")
    else:
        raise RuntimeError("No GPU is available!")

    if not dist.is_available():
        raise RuntimeError("torch.distributed is not available!")

    if torch.cuda.device_count() < args.ngpus_per_node:
        raise RuntimeError("There are not enough gpus, has {}, request {}.".format(torch.cuda.device_count(),
                                                                                   args.ngpus_per_node))

    global_world_size = args.ngpus_per_node * args.nodes
    if args.mtm:
        if args.mtm_stack:
            mp.spawn(train_worker_mtm_stack, nprocs=args.ngpus_per_node, args=(global_world_size, args))
        else:
            mp.spawn(train_worker_mtm, nprocs=args.ngpus_per_node, args=(global_world_size, args))
        
    elif args.mpts:
        mp.spawn(train_worker_mpts, nprocs=args.ngpus_per_node, args=(global_world_size, args))
    elif args.softshape:
        mp.spawn(train_worker_softshape, nprocs=args.ngpus_per_node, args=(global_world_size, args))
    else:
        mp.spawn(train_worker, nprocs=args.ngpus_per_node, args=(global_world_size, args))

    endtime = time.time()
    clear_linecache()
    print("[main]train_multigpu costs {:.1f} seconds".format(endtime - total_start))

def main():
    parser = argparse.ArgumentParser("[EXPERIMENTAL]train a model, use torch.nn.parallel.DistributedDataParallel")
    st_input = parser.add_argument_group("INPUT")
    st_input.add_argument('--train_file', type=str, required=True)
    st_input.add_argument('--valid_file', type=str, required=True)

    st_input.add_argument(
        "--mtm",
        action="store_true",
        default=False,
        help="weather use mtm model",
    )
    st_input.add_argument(
        "--mpts",
        action="store_true",
        default=False,
        help="weather use mpts model",
    )
    st_input.add_argument(
        "--softshape",
        action="store_true",
        default=False,
        help="weather use softshape model",
    )

    st_output = parser.add_argument_group("OUTPUT")
    st_output.add_argument('--model_dir', type=str, required=True)
    
    # =================================================================
    # ▼▼▼ 在这里添加 MTM 模型的专属参数组 ▼▼▼
    # =================================================================
    st_mtm = parser.add_argument_group("MTM MODEL_HYPER")
    st_mtm.add_argument('--mtm_num_base_features', type=int, default=1, 
                        help="Number of non-embedding features for MTM input channels. Final num_chn = num_base_features + n_embed. Default: 1")
    st_mtm.add_argument('--mtm_d_static', type=int, default=0, 
                        help="Dimension of static features for MTM. Default: 0")
    st_mtm.add_argument('--mtm_ratios', nargs='+', type=int, default=[2, 2, 2], 
                        help="Downsampling ratios for MTM blocks. e.g., --mtm_ratios 2 2 2. Default: [2, 2, 2]")
    st_mtm.add_argument('--mtm_r_hid', type=int, default=4, 
                        help="Hidden dimension ratio for the feed-forward network in MTM blocks. Default: 4")
    st_mtm.add_argument('--mtm_norm_first', type=str2bool, default='True', 
                        help="Whether to use pre-normalization (LayerNorm before attention/FFN) in MTM. Default: True")
    st_mtm.add_argument('--mtm_down_mode', type=str, default='concat', choices=['concat', 'pool'], 
                        help="Downsampling mode for MTM. Default: 'concat'")
    st_mtm.add_argument('--mtm_use_channel', action="store_true", default=False, 
                        help="Whether to use channel attention in MTM. Default: False")
    st_mtm.add_argument('--mtm_use_mixer', action="store_false", default=True, 
                        help="Whether to use mixer in MTM. Default: True")
    st_mtm.add_argument('--mtm_moe', action="store_true", default=False, 
                        help="Whether to use moe in MTM TokenMixer. Default: False")
    st_mtm.add_argument('--mtm_stack', action="store_true", default=False, 
                        help="Whether to use stack mode in MTM. Default: False")  
    st_mtm.add_argument('--mtm_use_swiglu', action="store_true", default=False, 
                        help="Whether to use swiglu in MTM. Default: False")
    # =================================================================
    # ▼▼▼ 在这里添加 MPTS 模型的专属参数组 ▼▼▼
    # =================================================================
    st_mpts = parser.add_argument_group("MPTS MODEL_HYPER")
    st_mpts.add_argument('--mpts_num_heads', type=int, default=4, 
                        help="Number of heads for MPTS. Default: 4")
    st_mpts.add_argument('--mpts_ff_dim', type=int, default=256, 
                        help="Hidden dimension of feed-forward network for MPTS. Default: 256")
    st_mpts.add_argument('--mpts_layers', type=int, default=4, 
                        help="Number of layers for MPTS. Default: 4")

    # =================================================================
    # ▼▼▼ 在这里添加 softshape 模型的专属参数组 ▼▼▼
    # =================================================================
    st_softshape = parser.add_argument_group("SOFTSHAPE MODEL_HYPER")
    st_softshape.add_argument('--softshape_emb_dim', type=int, default=128,
                        help="Embedding dimension for SoftShapeNet. Default: 128")
    st_softshape.add_argument('--softshape_depth', type=int, default=2,
                        help="Number of layers for SoftShapeNet. Default: 2")
    st_softshape.add_argument('--softshape_sparse_rate', type=float, default=0.5,
                        help="Sparse rate for SoftShapeNet. Default: 0.5")
    st_softshape.add_argument('--softshape_moe_loss', type=float, default=0.001,
                        help="moe loss for SoftShapeNet. Default: 0.001")
    st_softshape.add_argument('--softshape_shape_size', type=int, default=8,
                        help="Shape size for SoftShapeNet. Default: 8")
    st_softshape.add_argument('--softshape_num_experts', type=int, default=8,
                        help="Number of experts for SoftShapeNet MoE. Default: 8")
    st_softshape.add_argument('--softshape_stride', type=int, default=4,
                        help="Stride for SoftShapeNet shape embedding. Default: 4")
    st_softshape.add_argument('--softshape_warm_up_epoch', type=int, default=20,
                        help="Warm up epoch for SoftShapeNet. Default: 20")

    st_train = parser.add_argument_group("TRAIN MODEL_HYPER")
    st_train.add_argument(
        "--model_type",
        type=str,
        default="both_bilstm",
        choices=["both_bilstm", "seq_bilstm", "signal_bilstm"],
        required=False,
        help="type of model to use, 'both_bilstm', 'seq_bilstm' or 'signal_bilstm', "
        "'both_bilstm' means to use both seq and signal bilstm, default: both_bilstm",
    )
    st_train.add_argument(
        "--seq_len",
        type=int,
        default=21,
        required=False,
        help="len of kmer. default 21",
    )
    st_train.add_argument(
        "--signal_len",
        type=int,
        default=15,
        required=False,
        help="the number of signals of one base to be used in deepsignal, default 15",
    )
    st_train.add_argument(
        "--bias",
        action="store",
        type=int,
        required=False,
        default=0,
        help="the number of bias to be used in deepsignal, default 0",
    )
    st_train.add_argument(
        "--offset",
        action="store",
        type=int,
        required=False,
        default=0,
        help="the number of kmer offset to be used in Time Series Forecasting, default 0",
    )
    # model param
    st_train.add_argument(
        "--layernum1",
        type=int,
        default=3,
        required=False,
        help="lstm layer num for combined feature, default 3",
    )
    st_train.add_argument(
        "--layernum2",
        type=int,
        default=1,
        required=False,
        help="lstm layer num for seq feature (and for signal feature too), default 1",
    )
    st_train.add_argument("--class_num", type=int, default=2, required=False)
    st_train.add_argument("--dropout_rate", type=float, default=0.5, required=False)
    st_train.add_argument(
        "--n_vocab",
        type=int,
        default=16,
        required=False,
        help="base_seq vocab_size (15 base kinds from iupac)",
    )
    st_train.add_argument(
        "--n_embed", type=int, default=4, required=False, help="base_seq embedding_size"
    )
    st_train.add_argument(
        "--is_base",
        type=str,
        default="yes",
        required=False,
        help="is using base features in seq model, default yes",
    )
    st_train.add_argument(
        "--is_signallen",
        type=str,
        default="yes",
        required=False,
        help="is using signal length feature of each base in seq model, default yes",
    )
    st_train.add_argument(
        "--is_trace",
        type=str,
        default="no",
        required=False,
        help="is using trace (base prob) feature of each base in seq model, default yes",
    )
    # BiLSTM model param
    st_train.add_argument(
        "--hid_rnn",
        type=int,
        default=256,
        required=False,
        help="BiLSTM hidden_size for combined feature",
    )

    st_training = parser.add_argument_group("TRAINING")
    # model training
    st_training.add_argument('--optim_type', type=str, default="Adam", choices=["Adam", "RMSprop", "SGD",
                                                                                "Ranger", "LookaheadAdam"],
                             required=False, help="type of optimizer to use, 'Adam', 'SGD', 'RMSprop', "
                                                  "'Ranger' or 'LookaheadAdam', default Adam")
    st_training.add_argument('--batch_size', type=int, default=512, required=False)
    st_training.add_argument('--lr_scheduler', type=str, default='StepLR', required=False,
                             choices=["StepLR", "ReduceLROnPlateau"],
                             help="StepLR or ReduceLROnPlateau, default StepLR")
    st_training.add_argument('--lr', type=float, default=0.001, required=False,
                             help="default 0.001. [lr should be lr*world_size when using multi gpus? "
                                  "or lower batch_size?]")
    st_training.add_argument('--lr_decay', type=float, default=0.1, required=False,
                             help="default 0.1")
    st_training.add_argument('--lr_decay_step', type=int, default=1, required=False,
                             help="effective in StepLR. default 1")
    st_training.add_argument('--lr_patience', type=int, default=0, required=False,
                             help="effective in ReduceLROnPlateau. default 0")
    st_training.add_argument("--max_epoch_num", action="store", default=40, type=int,
                             required=False, help="max epoch num, default 40")
    st_training.add_argument("--min_epoch_num", action="store", default=5, type=int,
                             required=False, help="min epoch num, default 5")
    st_training.add_argument('--pos_weight', type=float, default=1.0, required=False)
    st_training.add_argument('--step_interval', type=int, default=500, required=False)
    st_training.add_argument('--dl_num_workers', type=int, default=0, required=False,
                             help="default 0")

    st_training.add_argument('--init_model', type=str, default=None, required=False,
                             help="file path of pre-trained model parameters to load before training")
    st_training.add_argument('--tseed', type=int, default=1234,
                             help='random seed for pytorch')
    st_training.add_argument('--use_compile', type=str, default="no", required=False,
                             help="[EXPERIMENTAL] if using torch.compile, yes or no, "
                                  "default no ('yes' only works in pytorch>=2.0)")
    st_training.add_argument('--lambda_corr','--a', type=float, default=0.1)
    st_trainingp = parser.add_argument_group("TRAINING PARALLEL")
    st_trainingp.add_argument("--nodes", default=1, type=int,
                              help="number of nodes for distributed training, default 1")
    st_trainingp.add_argument("--ngpus_per_node", default=2, type=int,
                              help="number of GPUs per node for distributed training, default 2")
    st_trainingp.add_argument("--dist-url", default="tcp://127.0.0.1:12315", type=str,
                              help="url used to set up distributed training")
    st_trainingp.add_argument("--node_rank", default=0, type=int,
                              help="node rank for distributed training, default 0")
    st_trainingp.add_argument("--epoch_sync", action="store_true", default=False,
                              help="if sync model params of gpu0 to other local gpus after per epoch")
    
    args = parser.parse_args()

    display_args(args)
    train_multigpu(args)


if __name__ == "__main__":
    main()
