#!/usr/bin/env python
# -*- coding: utf-8 -*-

from __future__ import absolute_import

import torch
#import pytorch_lightning as pl
import torch.nn as nn
import torch.autograd as autograd
import torch.nn.functional as F

import torch.utils
import torch.utils.checkpoint

from .utils.constants_torch import use_cuda
import numpy as np
import math

from einops import rearrange, einsum

import sys


# inner module ================================================
# https://github.com/kuangliu/pytorch-cifar/blob/master/models/resnet.py
"""ResNet in PyTorch.

For Pre-activation ResNet, see 'preact_resnet.py'.

Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
    Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""


class BasicBlock(nn.Module):
    """use Conv1d and BatchNorm1d"""

    expansion = 1

    def __init__(self, in_planes, planes, stride=1):
        super(BasicBlock, self).__init__()
        self.conv1 = nn.Conv1d(
            in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
        )
        self.bn1 = nn.BatchNorm1d(planes)
        self.conv2 = nn.Conv1d(
            planes, planes, kernel_size=3, stride=1, padding=1, bias=False
        )
        self.bn2 = nn.BatchNorm1d(planes)

        self.shortcut = nn.Sequential()
        if stride != 1 or in_planes != self.expansion * planes:
            self.shortcut = nn.Sequential(
                nn.Conv1d(
                    in_planes,
                    self.expansion * planes,
                    kernel_size=1,
                    stride=stride,
                    bias=False,
                ),
                nn.BatchNorm1d(self.expansion * planes),
            )

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        out += self.shortcut(x)
        out = F.relu(out)
        return out


class ResNet_3layers(nn.Module):
    """Conv1d"""

    def __init__(
        self, block, num_blocks, strides, out_channels=128, init_channels=1, in_planes=4
    ):
        super(ResNet_3layers, self).__init__()
        self.in_planes = in_planes

        self.conv1 = nn.Conv1d(
            init_channels,
            self.in_planes,
            kernel_size=3,
            stride=1,
            padding=1,
            bias=False,
        )
        self.bn1 = nn.BatchNorm1d(self.in_planes)
        # three group of blocks
        self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=strides[0])
        self.layer2 = self._make_layer(block, 64, num_blocks[1], stride=strides[1])
        self.layer3 = self._make_layer(
            block, out_channels, num_blocks[2], stride=strides[2]
        )

    def _make_layer(self, block, planes, num_blocks, stride):
        strides = [stride] + [1] * (num_blocks - 1)
        layers = []
        for stride in strides:
            layers.append(block(self.in_planes, planes, stride))
            self.in_planes = planes * block.expansion
        return nn.Sequential(*layers)

    def forward(self, x):
        out = F.relu(self.bn1(self.conv1(x)))  # (N, 1, L) --> (N, 4, L)
        out = self.layer1(out)
        out = self.layer2(out)
        out = self.layer3(out)
        return out


def get_lout(lin, strides):
    import math

    lout = lin
    for stride in strides:
        lout = math.floor(float(lout - 1) / stride + 1)
    return lout


def ResNet3(out_channels=128, strides=(1, 2, 2), init_channels=1, in_planes=4):
    """ResNet with 3 blocks"""
    return ResNet_3layers(
        BasicBlock, [1, 1, 1], strides, out_channels, init_channels, in_planes
    )


# model ===============================================
class ModelBiLSTM(nn.Module):
    def __init__(
        self,
        seq_len=13,
        signal_len=16,
        num_layers1=3,
        num_layers2=1,
        num_classes=2,
        dropout_rate=0.5,
        hidden_size=256,
        vocab_size=16,
        embedding_size=4,
        is_base=True,
        is_signallen=True,
        is_trace=False,
        module="both_bilstm",
        #device=0,
    ):
        super(ModelBiLSTM, self).__init__()
        self.model_type = "BiLSTM"
        self.module = module
        #self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size

        self.nhid_seq = self.hidden_size // 2
        self.nhid_signal = self.hidden_size - self.nhid_seq

        # seq feature
        self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
        self.is_base = is_base
        self.is_signallen = is_signallen
        self.is_trace = is_trace
        self.sigfea_num = 3 if self.is_signallen else 2

        self.lstm_seq = nn.LSTM(
            embedding_size + self.sigfea_num,
            self.nhid_seq,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.lstm_seq.flatten_parameters()
        # (batch_size,seq_len,hidden_size*2)
        self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
        # self.dropout_seq = nn.Dropout(p=dropout_rate)
        self.relu_seq = nn.ReLU()

        # signal feature
            # self.convs = ResNet3(self.nhid_signal, (1, 1, 1), self.signal_len, self.signal_len)  # (N, C, L)
        self.lstm_signal = nn.LSTM(
            self.signal_len,
            self.nhid_signal,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.lstm_signal.flatten_parameters()
        self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
        # self.dropout_signal = nn.Dropout(p=dropout_rate)
        self.relu_signal = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(
            self.hidden_size,
            self.hidden_size,
            self.num_layers1,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.lstm_comb.flatten_parameters()
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2, hidden_size)  # 2 for bidirection
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(1)
        #self.projection = nn.Linear(self.nhid_seq, self.nhid_signal )
        #self.save_hyperparameters()
        
    # def training_step(self, batch, batch_idx):
    #     kmer, base_means, base_stds, base_signal_lens, signals, labels = batch
    #     outputs, _ = self(kmer, base_means, base_stds, base_signal_lens, signals)
    #     loss = F.cross_entropy(outputs, labels)
    #     self.log('train_loss', loss)
    #     return loss

    # def validation_step(self, batch, batch_idx):
    #     kmer, base_means, base_stds, base_signal_lens, signals, labels = batch
    #     outputs, _ = self(kmer, base_means, base_stds, base_signal_lens, signals)
    #     loss = F.cross_entropy(outputs, labels)
    #     self.log('val_loss', loss)
    #     return loss

    # def configure_optimizers(self):
    #     optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
    #     scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
    #     return {"optimizer": optimizer, "lr_scheduler": scheduler}

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size, device):
        # Set initial states
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size,device=device))#.to(device)
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size,device=device))#.to(device)
        # if use_cuda:
        #     h0 = h0.cuda(self.device)
        #     c0 = c0.cuda(self.device)
        return h0, c0

    def forward(self, kmer, base_means, base_stds, base_signal_lens, signals):
        # 序列特征处理
        kmer_embed = self.embed(kmer.long())  # (batch_size, seq_len, embedding_size)
        batch_size, seq_len, embedding_size = kmer_embed.shape

        # 预分配 out_seq 张量，避免 torch.cat
        out_seq = torch.empty(batch_size, seq_len, embedding_size + 3, device=kmer_embed.device)
        out_seq[:, :, :embedding_size] = kmer_embed
        out_seq[:, :, embedding_size:embedding_size + 1] = base_means
        out_seq[:, :, embedding_size + 1:embedding_size + 2] = base_stds
        out_seq[:, :, embedding_size + 2:embedding_size + 3] = base_signal_lens

        out_seq, _ = self.lstm_seq(out_seq, self.init_hidden(batch_size, self.num_layers2, self.nhid_seq, kmer_embed.device))
        out_seq = self.fc_seq(out_seq)
        out_seq = self.relu_seq(out_seq)  # (batch_size, seq_len, nhid_seq)

        # 信号特征处理
        out_signal = signals.float()  # (batch_size, seq_len, signal_len)
        out_signal, _ = self.lstm_signal(out_signal, self.init_hidden(batch_size, self.num_layers2, self.nhid_signal, kmer_embed.device))
        out_signal = self.fc_signal(out_signal)
        out_signal = self.relu_signal(out_signal)  # (batch_size, seq_len, nhid_signal)

        # 组合特征
        hidden_size = self.nhid_seq + self.nhid_signal
        out = torch.empty(batch_size, seq_len, hidden_size, device=kmer_embed.device)
        out[:, :, :self.nhid_seq] = out_seq
        out[:, :, self.nhid_seq:] = out_signal

        out, _ = self.lstm_comb(out, self.init_hidden(batch_size, self.num_layers1, hidden_size, kmer_embed.device))
        
        # 预分配最终输出张量
        out_final = torch.empty(batch_size, 2 * hidden_size, device=kmer_embed.device)
        out_final[:, :hidden_size] = out[:, -1, :hidden_size]  # 前向最后输出
        out_final[:, hidden_size:] = out[:, 0, hidden_size:]  # 反向最初输出

        # 解码
        out = self.dropout1(out_final)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)

        return out, self.softmax(out)


class ModelExtraction(nn.Module):
    def __init__(
        self,
        seq_len=13,
        signal_len=16,
        num_layers1=3,
        num_layers2=1,
        num_classes=2,
        dropout_rate=0.5,
        hidden_size=256,
        vocab_size=16,
        embedding_size=4,
        is_base=True,
        is_signallen=True,
        is_trace=False,
        module="both_bilstm",
        device=0,
        lambd=1.0,
    ):
        super(ModelExtraction, self).__init__()
        self.model_type = "BiLSTM"
        self.module = module
        self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size
        self.lambd = lambd

        if self.module == "both_bilstm":
            self.nhid_seq = self.hidden_size // 2
            self.nhid_signal = self.hidden_size - self.nhid_seq
        elif self.module == "seq_bilstm":
            self.nhid_seq = self.hidden_size
        elif self.module == "signal_bilstm":
            self.nhid_signal = self.hidden_size
        else:
            raise ValueError("--model_type is not right!")

        # seq feature
        if self.module != "signal_bilstm":
            self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
            self.is_base = is_base
            self.is_signallen = is_signallen
            self.is_trace = is_trace
            self.sigfea_num = 3 if self.is_signallen else 2
            if self.is_trace:
                self.sigfea_num += 1
            if self.is_base:
                self.lstm_seq = nn.LSTM(
                    embedding_size + self.sigfea_num,
                    self.nhid_seq,
                    self.num_layers2,
                    dropout=dropout_rate,
                    batch_first=True,
                    bidirectional=True,
                )
            else:
                self.lstm_seq = nn.LSTM(
                    self.sigfea_num,
                    self.nhid_seq,
                    self.num_layers2,
                    dropout=dropout_rate,
                    batch_first=True,
                    bidirectional=True,
                )
            self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
            # self.dropout_seq = nn.Dropout(p=dropout_rate)
            self.relu_seq = nn.ReLU()

        # signal feature
        if self.module != "seq_bilstm":
            # self.convs = ResNet3(self.nhid_signal, (1, 1, 1), self.signal_len, self.signal_len)  # (N, C, L)
            self.lstm_signal = nn.LSTM(
                self.signal_len,
                self.nhid_signal,
                self.num_layers2,
                dropout=dropout_rate,
                batch_first=True,
                bidirectional=True,
            )
            self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
            # self.dropout_signal = nn.Dropout(p=dropout_rate)
            self.relu_signal = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(
            self.hidden_size,
            self.hidden_size,
            self.num_layers1,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size):
        # Set initial states
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        if use_cuda:
            h0 = h0.cuda(self.device)
            c0 = c0.cuda(self.device)
        return h0, c0

    # def backward(self, grad_output):
    #    # 在反向传播中，将梯度乘以-lambd
    #    return -self.lambd * grad_output

    def forward(self, kmer, base_means, base_stds, base_signal_lens, signals):
        # seq feature ============================================
        if self.module != "signal_bilstm":
            base_means = torch.reshape(base_means, (-1, self.seq_len, 1)).float()
            base_stds = torch.reshape(base_stds, (-1, self.seq_len, 1)).float()
            base_signal_lens = torch.reshape(
                base_signal_lens, (-1, self.seq_len, 1)
            ).float()
            # base_probs = torch.reshape(base_probs, (-1, self.seq_len, 1)).float()
            if self.is_base:
                kmer_embed = self.embed(kmer.long())
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, base_signal_lens), 2
                    )  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, base_signal_lens), 2
                    )  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds), 2
                    )  # (N, L, C)
                else:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds), 2
                    )  # (N, L, C)
            else:
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat(
                        (base_means, base_stds, base_signal_lens), 2
                    )  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat(
                        (base_means, base_stds, base_signal_lens), 2
                    )  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)
                else:
                    out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)

            out_seq, _ = self.lstm_seq(
                out_seq,
                self.init_hidden(out_seq.size(0), self.num_layers2, self.nhid_seq),
            )  # (N, L, nhid_seq*2)
            out_seq = self.fc_seq(out_seq)  # (N, L, nhid_seq)
            # out_seq = self.dropout_seq(out_seq)
            out_seq = self.relu_seq(out_seq)

        # signal feature ==========================================
        if self.module != "seq_bilstm":
            out_signal = signals.float()
            # resnet ---
            # out_signal = out_signal.transpose(1, 2)  # (N, C, L)
            # out_signal = self.convs(out_signal)  # (N, nhid_signal, L)
            # out_signal = out_signal.transpose(1, 2)  # (N, L, nhid_signal)
            # lstm ---
            out_signal, _ = self.lstm_signal(
                out_signal,
                self.init_hidden(
                    out_signal.size(0), self.num_layers2, self.nhid_signal
                ),
            )
            out_signal = self.fc_signal(out_signal)  # (N, L, nhid_signal)
            # out_signal = self.dropout_signal(out_signal)
            out_signal = self.relu_signal(out_signal)

        # combined ================================================
        if self.module == "seq_bilstm":
            out = out_seq
        elif self.module == "signal_bilstm":
            out = out_signal
        elif self.module == "both_bilstm":
            out = torch.cat((out_seq, out_signal), 2)  # (N, L, hidden_size)
        out, _ = self.lstm_comb(
            out, self.init_hidden(out.size(0), self.num_layers1, self.hidden_size)
        )  # (N, L, hidden_size*2)
        out_fwd_last = out[:, -1, : self.hidden_size]
        out_bwd_last = out[:, 0, self.hidden_size :]
        combine_out = torch.cat((out_fwd_last, out_bwd_last), 1)

        return combine_out


class ModelDomainExtraction(nn.Module):
    def __init__(
        self,
        seq_len=13,
        signal_len=16,
        num_layers1=3,
        num_layers2=1,
        num_classes=2,
        dropout_rate=0.5,
        hidden_size=256,
        vocab_size=16,
        embedding_size=4,
        is_base=True,
        is_signallen=True,
        is_trace=False,
        module="both_bilstm",
        device=0,
        lambd=1.0,
    ):
        super(ModelDomainExtraction, self).__init__()
        self.model_type = "BiLSTM"
        self.module = module
        self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size
        self.lambd = lambd

        if self.module == "both_bilstm":
            self.nhid_seq = self.hidden_size // 2
            self.nhid_signal = self.hidden_size - self.nhid_seq
        elif self.module == "seq_bilstm":
            self.nhid_seq = self.hidden_size
        elif self.module == "signal_bilstm":
            self.nhid_signal = self.hidden_size
        else:
            raise ValueError("--model_type is not right!")

        # seq feature
        if self.module != "signal_bilstm":
            self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
            self.is_base = is_base
            self.is_signallen = is_signallen
            self.is_trace = is_trace
            self.sigfea_num = 4 if self.is_signallen else 3
            if self.is_trace:
                self.sigfea_num += 1
            if self.is_base:
                self.lstm_seq = nn.LSTM(
                    embedding_size + self.sigfea_num,
                    self.nhid_seq,
                    self.num_layers2,
                    dropout=dropout_rate,
                    batch_first=True,
                    bidirectional=True,
                )
            else:
                self.lstm_seq = nn.LSTM(
                    self.sigfea_num,
                    self.nhid_seq,
                    self.num_layers2,
                    dropout=dropout_rate,
                    batch_first=True,
                    bidirectional=True,
                )
            self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
            # self.dropout_seq = nn.Dropout(p=dropout_rate)
            self.relu_seq = nn.ReLU()

        # signal feature
        if self.module != "seq_bilstm":
            # self.convs = ResNet3(self.nhid_signal, (1, 1, 1), self.signal_len, self.signal_len)  # (N, C, L)
            self.lstm_signal = nn.LSTM(
                self.signal_len,
                self.nhid_signal,
                self.num_layers2,
                dropout=dropout_rate,
                batch_first=True,
                bidirectional=True,
            )
            self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
            # self.dropout_signal = nn.Dropout(p=dropout_rate)
            self.relu_signal = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(
            self.hidden_size,
            self.hidden_size,
            self.num_layers1,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size):
        # Set initial states
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        if use_cuda:
            h0 = h0.cuda(self.device)
            c0 = c0.cuda(self.device)
        return h0, c0

    # def backward(self, grad_output):
    #    # 在反向传播中，将梯度乘以-lambd
    #    return -self.lambd * grad_output

    def forward(self, kmer, base_means, base_stds, base_signal_lens, signals, tags):
        # seq feature ============================================
        if self.module != "signal_bilstm":
            base_means = torch.reshape(base_means, (-1, self.seq_len, 1)).float()
            base_stds = torch.reshape(base_stds, (-1, self.seq_len, 1)).float()
            base_signal_lens = torch.reshape(
                base_signal_lens, (-1, self.seq_len, 1)
            ).float()
            # base_probs = torch.reshape(base_probs, (-1, self.seq_len, 1)).float()
            tags = torch.reshape(tags, (-1, self.seq_len, 1)).float()
            if self.is_base:
                kmer_embed = self.embed(kmer.long())
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, base_signal_lens, tags), 2
                    )  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, base_signal_lens, tags), 2
                    )  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, tags), 2
                    )  # (N, L, C)
                else:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, tags), 2
                    )  # (N, L, C)
            else:
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat(
                        (base_means, base_stds, base_signal_lens, tags), 2
                    )  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat(
                        (base_means, base_stds, base_signal_lens, tags), 2
                    )  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat((base_means, base_stds, tags), 2)  # (N, L, C)
                else:
                    out_seq = torch.cat((base_means, base_stds, tags), 2)  # (N, L, C)

            out_seq, _ = self.lstm_seq(
                out_seq,
                self.init_hidden(out_seq.size(0), self.num_layers2, self.nhid_seq),
            )  # (N, L, nhid_seq*2)
            out_seq = self.fc_seq(out_seq)  # (N, L, nhid_seq)
            # out_seq = self.dropout_seq(out_seq)
            out_seq = self.relu_seq(out_seq)

        # signal feature ==========================================
        if self.module != "seq_bilstm":
            out_signal = signals.float()
            # resnet ---
            # out_signal = out_signal.transpose(1, 2)  # (N, C, L)
            # out_signal = self.convs(out_signal)  # (N, nhid_signal, L)
            # out_signal = out_signal.transpose(1, 2)  # (N, L, nhid_signal)
            # lstm ---
            out_signal, _ = self.lstm_signal(
                out_signal,
                self.init_hidden(
                    out_signal.size(0), self.num_layers2, self.nhid_signal
                ),
            )
            out_signal = self.fc_signal(out_signal)  # (N, L, nhid_signal)
            # out_signal = self.dropout_signal(out_signal)
            out_signal = self.relu_signal(out_signal)

        # combined ================================================
        if self.module == "seq_bilstm":
            out = out_seq
        elif self.module == "signal_bilstm":
            out = out_signal
        elif self.module == "both_bilstm":
            out = torch.cat((out_seq, out_signal), 2)  # (N, L, hidden_size)
        out, _ = self.lstm_comb(
            out, self.init_hidden(out.size(0), self.num_layers1, self.hidden_size)
        )  # (N, L, hidden_size*2)
        out_fwd_last = out[:, -1, : self.hidden_size]
        out_bwd_last = out[:, 0, self.hidden_size :]
        combine_out = torch.cat((out_fwd_last, out_bwd_last), 1)

        return combine_out


class combineLoss(nn.Module):
    def __init__(self, device=0, β=0.1):
        super(combineLoss, self).__init__()
        # weight_rank = torch.from_numpy(np.array([1, 1.0])).float()
        weight_rank = torch.from_numpy(np.array([1,1, 1.0])).float()
        self.device = device
        self.β = β
        if use_cuda:
            weight_rank = weight_rank.cuda(self.device)
            # self.β=β.cuda(self.device)
            # weight_rank2 = weight_rank2.cuda(self.device)
        self.loss = nn.CrossEntropyLoss(weight=weight_rank)#nn.BCEWithLogitsLoss(pos_weight=weight_rank)  # nn.BCELoss()
        self.project = nn.Sigmoid()
        # self.loss_2 = nn.CrossEntropyLoss(weight=weight_rank2)

    def forward(self, domain_classes, tags):
        # classes = classes.reshape(classes.shape[0], 2)
        # labels = labels.reshape(labels.shape[0], 1)
        # print('classes shape: {}'.format(classes.shape))
        # print('labels shape: {}'.format(labels.shape))
        # left = F.relu(0.9 - classes[0], inplace=True) ** 2
        # print('left shape: {}'.format(left.shape))
        # right = F.relu(classes[1] - 0.1, inplace=True) ** 2
        # print('right shape: {}'.format(right.shape))

        # margin_loss = labels * left + 0.5 * (1.0 - labels) * right
        # margin_loss = margin_loss.sum()
        onehot_tags = torch.eye(3)[tags.long(), :].cuda(self.device)
        return self.β * self.loss(domain_classes, onehot_tags)


class GradientReverseFunction(torch.autograd.Function):
    @staticmethod
    def forward(ctx, input, coeff=1.0):
        ctx.coeff = coeff
        return input.view_as(input)

    @staticmethod
    def backward(ctx, grad_output):
        return grad_output.neg() * ctx.coeff, None


class GradientReverseLayer(nn.Module):
    def __init__(self, coeff=1.0):
        super(GradientReverseLayer, self).__init__()
        self.coeff = coeff

    def forward(self, input):
        return GradientReverseFunction.apply(input, self.coeff)


class Classifier1(nn.Module):
    def __init__(self, dropout_rate=0.5, hidden_size=256, num_classes=2, device=0):
        super(Classifier1, self).__init__()
        self.device = device

        self.dropout = nn.Dropout(p=dropout_rate)
        self.fc = nn.Linear(hidden_size * 2, hidden_size)  # 2 for bidirection
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(1)

    def forward(self, combine_out):
        # decode
        out = self.dropout(combine_out)  # .cuda(self.device)
        out = self.fc(out)
        out = self.relu(out)
        out = self.dropout1(out)
        out = self.fc1(out)
        return out, self.softmax(out)


class Classifier2(nn.Module):
    def __init__(self, dropout_rate=0.5, hidden_size=256, num_classes=3, device=0):
        super(Classifier2, self).__init__()
        self.device = device
        self.grl = GradientReverseLayer()
        self.dropout = nn.Dropout(p=dropout_rate)
        self.fc = nn.Linear(hidden_size * 2, hidden_size)  # 2 for bidirection
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        # self.softmax = nn.Softmax(1)

    def forward(self, combine_out):
        # decode
        out = self.grl(combine_out)
        out = self.dropout(out)  # .cuda(self.device)
        out = self.fc(out)
        out = self.relu(out)
        out = self.dropout1(out)
        out = self.fc1(out)
        return out


class ModelCNN(nn.Module):
    def __init__(
        self,
        seq_len=13,
        signal_len=16,
        num_layers1=3,
        num_layers2=1,
        num_classes=2,
        dropout_rate=0.5,
        hidden_size=256,
        vocab_size=16,
        embedding_size=4,
        is_base=True,
        is_signallen=True,
        is_trace=False,
        module="both_bilstm",
        device=0,
    ):
        super(ModelCNN, self).__init__()
        self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size

        self.nhid_seq = self.hidden_size // 2
        self.nhid_signal = self.hidden_size - self.nhid_seq

        # seq feature
        self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
        self.is_base = is_base
        self.is_signallen = is_signallen
        self.is_trace = is_trace
        self.sigfea_num = 3 if self.is_signallen else 2
        # (batch_size,seq_len,embedding_size+sigfea_num)
        cnn_seq_out = int((2 * self.nhid_seq * self.hidden_size) / 4)
        self.cnn_seq = nn.Conv1d(self.seq_len, cnn_seq_out, 4)
        self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
        # self.dropout_seq = nn.Dropout(p=dropout_rate)
        self.relu_seq = nn.ReLU()

        # signal feature

        # self.convs = ResNet3(self.nhid_signal, (1, 1, 1), self.signal_len, self.signal_len)  # (N, C, L)
        cnn_signal_out = int((2 * self.nhid_signal * self.hidden_size) / 8)
        self.cnn_signal = nn.Conv1d(self.seq_len, cnn_signal_out, 9)
        self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
        # self.dropout_signal = nn.Dropout(p=dropout_rate)
        self.relu_signal = nn.ReLU()

        # combined
        self.cnn_comb = nn.Conv1d(self.hidden_size, self.hidden_size * 2, 1)
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2, hidden_size)  # 2 for bidirection
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(1)

    def forward(self, kmer, base_means, base_stds, base_signal_lens, signals):
        # seq feature ============================================
        base_means = torch.reshape(base_means, (-1, self.seq_len, 1)).float()
        base_stds = torch.reshape(base_stds, (-1, self.seq_len, 1)).float()
        base_signal_lens = torch.reshape(
            base_signal_lens, (-1, self.seq_len, 1)
        ).float()
        # base_probs = torch.reshape(base_probs, (-1, self.seq_len, 1)).float()
        kmer_embed = self.embed(kmer.long())
        out_seq = torch.cat(
            (kmer_embed, base_means, base_stds, base_signal_lens), 2
        )  # (N, L, C)

        out_seq = self.cnn_seq(out_seq)  # (N, L, nhid_seq*2)
        out_seq = torch.reshape(
            out_seq, (-1, self.nhid_seq * 2, self.hidden_size)
        ).float()
        out_seq = self.fc_seq(out_seq)  # (N, L, nhid_seq)
        # out_seq = self.dropout_seq(out_seq)
        out_seq = self.relu_seq(out_seq)

        # signal feature ==========================================
        out_signal = signals.float()
        # resnet ---
        # out_signal = out_signal.transpose(1, 2)  # (N, C, L)
        # out_signal = self.convs(out_signal)  # (N, nhid_signal, L)
        # out_signal = out_signal.transpose(1, 2)  # (N, L, nhid_signal)
        # lstm ---
        out_signal = self.cnn_signal(out_signal)
        out_signal = torch.reshape(
            out_signal, (-1, self.nhid_signal * 2, self.hidden_size)
        ).float()
        out_signal = self.fc_signal(out_signal)  # (N, L, nhid_signal)
        # out_signal = self.dropout_signal(out_signal)
        out_signal = self.relu_signal(out_signal)

        # combined ================================================
        out = torch.cat((out_seq, out_signal), 2)  # (N, L, hidden_size)
        out = self.cnn_comb(out)  # (N, L, hidden_size*2)
        out = torch.reshape(out, (-1, self.hidden_size, self.hidden_size * 2)).float()
        out_fwd_last = out[:, -1, : self.hidden_size]
        out_bwd_last = out[:, 0, self.hidden_size :]
        out = torch.cat((out_fwd_last, out_bwd_last), 1)

        # decode
        out = self.dropout1(out)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)

        return out, self.softmax(out)


class ModelCG(nn.Module):
    def __init__(
        self,
        seq_len=13,
        signal_len=16,
        num_layers1=3,
        num_layers2=1,
        num_classes=2,
        dropout_rate=0.5,
        hidden_size=256,
        vocab_size=16,
        embedding_size=4,
        is_base=True,
        is_signallen=True,
        is_trace=False,
        module="both_bilstm",
        device=0,
    ):
        super(ModelCG, self).__init__()
        self.model_type = "BiLSTM"
        self.module = module
        self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size

        if self.module == "both_bilstm":
            self.nhid_seq = self.hidden_size // 2
            self.nhid_signal = self.hidden_size - self.nhid_seq
        elif self.module == "seq_bilstm":
            self.nhid_seq = self.hidden_size
        elif self.module == "signal_bilstm":
            self.nhid_signal = self.hidden_size
        else:
            raise ValueError("--model_type is not right!")

        # seq feature
        if self.module != "signal_bilstm":
            self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
            self.is_base = is_base
            self.is_signallen = is_signallen
            self.is_trace = is_trace
            self.sigfea_num = 5 if self.is_signallen else 4
            if self.is_trace:
                self.sigfea_num += 1
            if self.is_base:
                self.lstm_seq = nn.LSTM(
                    embedding_size + self.sigfea_num,
                    self.nhid_seq,
                    self.num_layers2,
                    dropout=dropout_rate,
                    batch_first=True,
                    bidirectional=True,
                )
                # (batch_size,seq_len,hidden_size*2)
            else:
                self.lstm_seq = nn.LSTM(
                    self.sigfea_num,
                    self.nhid_seq,
                    self.num_layers2,
                    dropout=dropout_rate,
                    batch_first=True,
                    bidirectional=True,
                )
            self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
            # self.dropout_seq = nn.Dropout(p=dropout_rate)
            self.relu_seq = nn.ReLU()

        # signal feature
        if self.module != "seq_bilstm":
            # self.convs = ResNet3(self.nhid_signal, (1, 1, 1), self.signal_len, self.signal_len)  # (N, C, L)
            self.lstm_signal = nn.LSTM(
                self.signal_len,
                self.nhid_signal,
                self.num_layers2,
                dropout=dropout_rate,
                batch_first=True,
                bidirectional=True,
            )
            self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
            # self.dropout_signal = nn.Dropout(p=dropout_rate)
            self.relu_signal = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(
            self.hidden_size,
            self.hidden_size,
            self.num_layers1,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2, hidden_size)  # 2 for bidirection
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(1)

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size):
        # Set initial states
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        if use_cuda:
            h0 = h0.cuda(self.device)
            c0 = c0.cuda(self.device)
        return h0, c0

    def forward(
        self, kmer, base_means, base_stds, base_signal_lens, signals, tags, cg_contents
    ):
        # seq feature ============================================
        if self.module != "signal_bilstm":
            base_means = torch.reshape(base_means, (-1, self.seq_len, 1)).float()
            base_stds = torch.reshape(base_stds, (-1, self.seq_len, 1)).float()
            base_signal_lens = torch.reshape(
                base_signal_lens, (-1, self.seq_len, 1)
            ).float()
            tags = torch.reshape(tags, (-1, self.seq_len, 1)).float()
            cg_contents = torch.reshape(cg_contents, (-1, self.seq_len, 1)).float()
            # base_probs = torch.reshape(base_probs, (-1, self.seq_len, 1)).float()
            if self.is_base:
                kmer_embed = self.embed(kmer.long())
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat(
                        (
                            kmer_embed,
                            base_means,
                            base_stds,
                            base_signal_lens,
                            tags,
                            cg_contents,
                        ),
                        2,
                    )  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat(
                        (
                            kmer_embed,
                            base_means,
                            base_stds,
                            base_signal_lens,
                            tags,
                            cg_contents,
                        ),
                        2,
                    )  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, tags, cg_contents), 2
                    )  # (N, L, C)
                else:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, tags, cg_contents), 2
                    )  # (N, L, C)
            else:
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat(
                        (base_means, base_stds, base_signal_lens, tags, cg_contents), 2
                    )  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat(
                        (base_means, base_stds, base_signal_lens, tags, cg_contents), 2
                    )  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat(
                        (base_means, base_stds, tags, cg_contents), 2
                    )  # (N, L, C)
                else:
                    out_seq = torch.cat(
                        (base_means, base_stds, tags, cg_contents), 2
                    )  # (N, L, C)

            out_seq, _ = self.lstm_seq(
                out_seq,
                self.init_hidden(out_seq.size(0), self.num_layers2, self.nhid_seq),
            )  # (N, L, nhid_seq*2)
            out_seq = self.fc_seq(out_seq)  # (N, L, nhid_seq)
            # out_seq = self.dropout_seq(out_seq)
            out_seq = self.relu_seq(out_seq)

        # signal feature ==========================================
        if self.module != "seq_bilstm":
            out_signal = signals.float()
            # resnet ---
            # out_signal = out_signal.transpose(1, 2)  # (N, C, L)
            # out_signal = self.convs(out_signal)  # (N, nhid_signal, L)
            # out_signal = out_signal.transpose(1, 2)  # (N, L, nhid_signal)
            # lstm ---
            out_signal, _ = self.lstm_signal(
                out_signal,
                self.init_hidden(
                    out_signal.size(0), self.num_layers2, self.nhid_signal
                ),
            )
            out_signal = self.fc_signal(out_signal)  # (N, L, nhid_signal)
            # out_signal = self.dropout_signal(out_signal)
            out_signal = self.relu_signal(out_signal)

        # combined ================================================
        if self.module == "seq_bilstm":
            out = out_seq
        elif self.module == "signal_bilstm":
            out = out_signal
        elif self.module == "both_bilstm":
            out = torch.cat((out_seq, out_signal), 2)  # (N, L, hidden_size)
        out, _ = self.lstm_comb(
            out, self.init_hidden(out.size(0), self.num_layers1, self.hidden_size)
        )  # (N, L, hidden_size*2)
        out_fwd_last = out[:, -1, : self.hidden_size]
        out_bwd_last = out[:, 0, self.hidden_size :]
        out = torch.cat((out_fwd_last, out_bwd_last), 1)

        # decode
        out = self.dropout1(out)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)

        return out, self.softmax(out)


class ModelCombine(nn.Module):
    def __init__(
        self,
        seq_len=13,
        signal_len=16,
        num_layers1=3,
        num_layers2=1,
        num_classes=2,
        dropout_rate=0.5,
        hidden_size=256,
        vocab_size=16,
        embedding_size=4,
        is_base=True,
        is_signallen=True,
        is_trace=False,
        module="both_bilstm",
        device=0,
    ):
        super(ModelCombine, self).__init__()
        self.model_type = "BiLSTM"
        self.module = module
        self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size

        if self.module == "both_bilstm":
            self.nhid_seq = self.hidden_size // 2
            self.nhid_signal = self.hidden_size - self.nhid_seq
        elif self.module == "seq_bilstm":
            self.nhid_seq = self.hidden_size
        elif self.module == "signal_bilstm":
            self.nhid_signal = self.hidden_size
        else:
            raise ValueError("--model_type is not right!")

        # seq feature
        if self.module != "signal_bilstm":
            self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
            self.is_base = is_base
            self.is_signallen = is_signallen
            self.is_trace = is_trace
            self.sigfea_num = 3 if self.is_signallen else 2
            if self.is_trace:
                self.sigfea_num += 1
            if self.is_base:
                self.lstm_seq = nn.LSTM(
                    embedding_size + self.sigfea_num,
                    self.nhid_seq,
                    self.num_layers2,
                    dropout=dropout_rate,
                    batch_first=True,
                    bidirectional=True,
                )
                # (batch_size,seq_len,hidden_size*2)
            else:
                self.lstm_seq = nn.LSTM(
                    self.sigfea_num,
                    self.nhid_seq,
                    self.num_layers2,
                    dropout=dropout_rate,
                    batch_first=True,
                    bidirectional=True,
                )
            self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
            # self.dropout_seq = nn.Dropout(p=dropout_rate)
            self.relu_seq = nn.ReLU()

        # signal feature
        if self.module != "seq_bilstm":
            # self.convs = ResNet3(self.nhid_signal, (1, 1, 1), self.signal_len, self.signal_len)  # (N, C, L)
            self.lstm_signal = nn.LSTM(
                self.signal_len,
                self.nhid_signal,
                self.num_layers2,
                dropout=dropout_rate,
                batch_first=True,
                bidirectional=True,
            )
            self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
            # self.dropout_signal = nn.Dropout(p=dropout_rate)
            self.relu_signal = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(
            self.hidden_size,
            self.hidden_size,
            self.num_layers1,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2 + 2, hidden_size)  # 2 for bidirection
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(1)

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size):
        # Set initial states
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        if use_cuda:
            h0 = h0.cuda(self.device)
            c0 = c0.cuda(self.device)
        return h0, c0

    def forward(
        self, kmer, base_means, base_stds, base_signal_lens, signals, tags, cg_contents
    ):
        # seq feature ============================================
        if self.module != "signal_bilstm":
            base_means = torch.reshape(base_means, (-1, self.seq_len, 1)).float()
            base_stds = torch.reshape(base_stds, (-1, self.seq_len, 1)).float()
            base_signal_lens = torch.reshape(
                base_signal_lens, (-1, self.seq_len, 1)
            ).float()
            tags = tags.float()
            cg_contents = cg_contents.float()

            # base_probs = torch.reshape(base_probs, (-1, self.seq_len, 1)).float()
            if self.is_base:
                kmer_embed = self.embed(kmer.long())
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, base_signal_lens), 2
                    )  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds, base_signal_lens), 2
                    )  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds), 2
                    )  # (N, L, C)
                else:
                    out_seq = torch.cat(
                        (kmer_embed, base_means, base_stds), 2
                    )  # (N, L, C)
            else:
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat(
                        (base_means, base_stds, base_signal_lens), 2
                    )  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat(
                        (base_means, base_stds, base_signal_lens), 2
                    )  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)
                else:
                    out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)

            out_seq, _ = self.lstm_seq(
                out_seq,
                self.init_hidden(out_seq.size(0), self.num_layers2, self.nhid_seq),
            )  # (N, L, nhid_seq*2)
            out_seq = self.fc_seq(out_seq)  # (N, L, nhid_seq)
            # out_seq = self.dropout_seq(out_seq)
            out_seq = self.relu_seq(out_seq)

        # signal feature ==========================================
        if self.module != "seq_bilstm":
            out_signal = signals.float()
            # resnet ---
            # out_signal = out_signal.transpose(1, 2)  # (N, C, L)
            # out_signal = self.convs(out_signal)  # (N, nhid_signal, L)
            # out_signal = out_signal.transpose(1, 2)  # (N, L, nhid_signal)
            # lstm ---
            out_signal, _ = self.lstm_signal(
                out_signal,
                self.init_hidden(
                    out_signal.size(0), self.num_layers2, self.nhid_signal
                ),
            )
            out_signal = self.fc_signal(out_signal)  # (N, L, nhid_signal)
            # out_signal = self.dropout_signal(out_signal)
            out_signal = self.relu_signal(out_signal)

        # combined ================================================
        if self.module == "seq_bilstm":
            out = out_seq
        elif self.module == "signal_bilstm":
            out = out_signal
        elif self.module == "both_bilstm":
            out = torch.cat((out_seq, out_signal), 2)  # (N, L, hidden_size)
        out, _ = self.lstm_comb(
            out, self.init_hidden(out.size(0), self.num_layers1, self.hidden_size)
        )  # (N, L, hidden_size*2)
        out_fwd_last = out[:, -1, : self.hidden_size]
        out_bwd_last = out[:, 0, self.hidden_size :]
        out = torch.cat((out_fwd_last, out_bwd_last), 1)
        extrac_fea = torch.cat((tags, cg_contents), 1)
        out = torch.cat((out, extrac_fea), 1)

        # decode
        out = self.dropout1(out)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)

        return out, self.softmax(out)


class ModelFrequency(nn.Module):
    def __init__(
        self,
        seq_len=13,
        signal_len=16,
        num_layers1=3,
        num_layers2=1,
        num_classes=2,
        dropout_rate=0.5,
        hidden_size=256,
        vocab_size=16,
        embedding_size=4,
        is_base=True,
        is_signallen=True,
        is_trace=False,
        module="both_bilstm",
        device=0,
    ):
        super(ModelFrequency, self).__init__()
        self.model_type = "BiLSTM"
        self.module = module
        self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size

        if self.module == "both_bilstm":
            self.nhid_seq = self.hidden_size // 2
            self.nhid_signal = self.hidden_size - self.nhid_seq
        elif self.module == "seq_bilstm":
            self.nhid_seq = self.hidden_size
        elif self.module == "signal_bilstm":
            self.nhid_signal = self.hidden_size
        else:
            raise ValueError("--model_type is not right!")

        # seq feature
        self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
        self.is_base = is_base
        self.is_signallen = is_signallen
        self.is_trace = is_trace
        self.sigfea_num = 3 if self.is_signallen else 2
        if self.is_trace:
            self.sigfea_num += 1
        if self.is_base:
            self.lstm_seq = nn.LSTM(
                embedding_size + self.sigfea_num,
                self.nhid_seq,
                self.num_layers2,
                dropout=dropout_rate,
                batch_first=True,
                bidirectional=True,
            )
            # (batch_size,seq_len,hidden_size*2)
        else:
            self.lstm_seq = nn.LSTM(
                self.sigfea_num,
                self.nhid_seq,
                self.num_layers2,
                dropout=dropout_rate,
                batch_first=True,
                bidirectional=True,
            )
        self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
        # self.dropout_seq = nn.Dropout(p=dropout_rate)
        self.relu_seq = nn.ReLU()

        # signal feature
        self.lstm_signal = nn.LSTM(
            self.signal_len,
            self.nhid_signal,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
        # self.dropout_signal = nn.Dropout(p=dropout_rate)
        self.relu_signal = nn.ReLU()

        # signal frequency feature
        self.lstm_signal_freq = nn.LSTM(
            self.signal_len,
            self.nhid_signal,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.fc_signal_freq = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
        # self.dropout_signal = nn.Dropout(p=dropout_rate)
        self.relu_signal_freq = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(
            self.hidden_size + self.nhid_signal,
            self.hidden_size,
            self.num_layers1,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2, hidden_size)  # 2 for bidirection
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(1)

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size):
        # Set initial states
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        if use_cuda:
            h0 = h0.cuda(self.device)
            c0 = c0.cuda(self.device)
        return h0, c0

    def forward(
        self, kmer, base_means, base_stds, base_signal_lens, signals, signals_freq
    ):
        # seq feature ============================================
        base_means = torch.reshape(base_means, (-1, self.seq_len, 1)).float()
        base_stds = torch.reshape(base_stds, (-1, self.seq_len, 1)).float()
        base_signal_lens = torch.reshape(
            base_signal_lens, (-1, self.seq_len, 1)
        ).float()
        # base_probs = torch.reshape(base_probs, (-1, self.seq_len, 1)).float()
        if self.is_base:
            kmer_embed = self.embed(kmer.long())
            if self.is_signallen and self.is_trace:
                out_seq = torch.cat(
                    (kmer_embed, base_means, base_stds, base_signal_lens), 2
                )  # (N, L, C)
            elif self.is_signallen:
                out_seq = torch.cat(
                    (kmer_embed, base_means, base_stds, base_signal_lens), 2
                )  # (N, L, C)
            elif self.is_trace:
                out_seq = torch.cat((kmer_embed, base_means, base_stds), 2)  # (N, L, C)
            else:
                out_seq = torch.cat((kmer_embed, base_means, base_stds), 2)  # (N, L, C)
        else:
            if self.is_signallen and self.is_trace:
                out_seq = torch.cat(
                    (base_means, base_stds, base_signal_lens), 2
                )  # (N, L, C)
            elif self.is_signallen:
                out_seq = torch.cat(
                    (base_means, base_stds, base_signal_lens), 2
                )  # (N, L, C)
            elif self.is_trace:
                out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)
            else:
                out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)

        out_seq, _ = self.lstm_seq(
            out_seq, self.init_hidden(out_seq.size(0), self.num_layers2, self.nhid_seq)
        )  # (N, L, nhid_seq*2)
        out_seq = self.fc_seq(out_seq)  # (N, L, nhid_seq)
        # out_seq = self.dropout_seq(out_seq)
        out_seq = self.relu_seq(out_seq)

        # signal feature ==========================================
        out_signal = signals.float()
        # resnet ---
        # out_signal = out_signal.transpose(1, 2)  # (N, C, L)
        # out_signal = self.convs(out_signal)  # (N, nhid_signal, L)
        # out_signal = out_signal.transpose(1, 2)  # (N, L, nhid_signal)
        # lstm ---
        out_signal, _ = self.lstm_signal(
            out_signal,
            self.init_hidden(out_signal.size(0), self.num_layers2, self.nhid_signal),
        )
        out_signal = self.fc_signal(out_signal)  # (N, L, nhid_signal)
        # out_signal = self.dropout_signal(out_signal)
        out_signal = self.relu_signal(out_signal)

        # signal feature ==========================================
        out_signal_freq = signals_freq.float()
        out_signal_freq, _ = self.lstm_signal_freq(
            out_signal_freq,
            self.init_hidden(
                out_signal_freq.size(0), self.num_layers2, self.nhid_signal
            ),
        )
        out_signal_freq = self.fc_signal_freq(out_signal_freq)
        out_signal_freq = self.relu_signal(out_signal_freq)

        # combined ================================================
        out = torch.cat(
            (out_seq, out_signal, out_signal_freq), 2
        )  # (N, L, hidden_size)
        out, _ = self.lstm_comb(
            out, self.init_hidden(out.size(0), self.num_layers1, self.hidden_size)
        )  # (N, L, hidden_size*2)
        out_fwd_last = out[:, -1, : self.hidden_size]
        out_bwd_last = out[:, 0, self.hidden_size :]
        out = torch.cat((out_fwd_last, out_bwd_last), 1)

        # decode
        out = self.dropout1(out)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)

        return out, self.softmax(out)


class ModelFrequency_mp(nn.Module):
    def __init__(
        self,
        seq_len=13,
        signal_len=16,
        num_layers1=3,
        num_layers2=1,
        num_classes=2,
        dropout_rate=0.5,
        hidden_size=256,
        vocab_size=16,
        embedding_size=4,
        is_base=True,
        is_signallen=True,
        is_trace=False,
        module="both_bilstm",
        device=0,
    ):
        super(ModelFrequency_mp, self).__init__()
        self.model_type = "BiLSTM"
        self.module = module
        self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size

        if self.module == "both_bilstm":
            self.nhid_seq = self.hidden_size // 2
            self.nhid_signal = self.hidden_size - self.nhid_seq
        elif self.module == "seq_bilstm":
            self.nhid_seq = self.hidden_size
        elif self.module == "signal_bilstm":
            self.nhid_signal = self.hidden_size
        else:
            raise ValueError("--model_type is not right!")

        # seq feature
        self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
        self.is_base = is_base
        self.is_signallen = is_signallen
        self.is_trace = is_trace
        self.sigfea_num = 3 if self.is_signallen else 2
        if self.is_trace:
            self.sigfea_num += 1
        if self.is_base:
            self.lstm_seq = nn.LSTM(
                embedding_size + self.sigfea_num,
                self.nhid_seq,
                self.num_layers2,
                dropout=dropout_rate,
                batch_first=True,
                bidirectional=True,
            )
            # (batch_size,seq_len,hidden_size*2)
        else:
            self.lstm_seq = nn.LSTM(
                self.sigfea_num,
                self.nhid_seq,
                self.num_layers2,
                dropout=dropout_rate,
                batch_first=True,
                bidirectional=True,
            )
        self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
        # self.dropout_seq = nn.Dropout(p=dropout_rate)
        self.relu_seq = nn.ReLU()

        # signal feature
        self.lstm_signal = nn.LSTM(
            self.signal_len,
            self.nhid_signal,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
        # self.dropout_signal = nn.Dropout(p=dropout_rate)
        self.relu_signal = nn.ReLU()

        # signal frequency Phase feature
        self.lstm_signal_freq_p = nn.LSTM(
            self.signal_len,
            self.nhid_signal,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.fc_signal_freq_p = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
        # self.dropout_signal = nn.Dropout(p=dropout_rate)
        self.relu_signal_freq_p = nn.ReLU()

        # signal frequency Magnitude feature
        self.lstm_signal_freq_m = nn.LSTM(
            self.signal_len,
            self.nhid_signal,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.fc_signal_freq_m = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
        # self.dropout_signal = nn.Dropout(p=dropout_rate)
        self.relu_signal_freq_m = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(
            self.hidden_size * 2,
            self.hidden_size,
            self.num_layers1,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2, hidden_size)  # 2 for bidirection
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(1)

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size):
        # Set initial states
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        if use_cuda:
            h0 = h0.cuda(self.device)
            c0 = c0.cuda(self.device)
        return h0, c0

    def forward(
        self, kmer, base_means, base_stds, base_signal_lens, signals, magnitude, phase
    ):
        # seq feature ============================================
        base_means = torch.reshape(base_means, (-1, self.seq_len, 1)).float()
        base_stds = torch.reshape(base_stds, (-1, self.seq_len, 1)).float()
        base_signal_lens = torch.reshape(
            base_signal_lens, (-1, self.seq_len, 1)
        ).float()
        # base_probs = torch.reshape(base_probs, (-1, self.seq_len, 1)).float()
        if self.is_base:
            kmer_embed = self.embed(kmer.long())
            if self.is_signallen and self.is_trace:
                out_seq = torch.cat(
                    (kmer_embed, base_means, base_stds, base_signal_lens), 2
                )  # (N, L, C)
            elif self.is_signallen:
                out_seq = torch.cat(
                    (kmer_embed, base_means, base_stds, base_signal_lens), 2
                )  # (N, L, C)
            elif self.is_trace:
                out_seq = torch.cat((kmer_embed, base_means, base_stds), 2)  # (N, L, C)
            else:
                out_seq = torch.cat((kmer_embed, base_means, base_stds), 2)  # (N, L, C)
        else:
            if self.is_signallen and self.is_trace:
                out_seq = torch.cat(
                    (base_means, base_stds, base_signal_lens), 2
                )  # (N, L, C)
            elif self.is_signallen:
                out_seq = torch.cat(
                    (base_means, base_stds, base_signal_lens), 2
                )  # (N, L, C)
            elif self.is_trace:
                out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)
            else:
                out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)

        out_seq, _ = self.lstm_seq(
            out_seq, self.init_hidden(out_seq.size(0), self.num_layers2, self.nhid_seq)
        )  # (N, L, nhid_seq*2)
        out_seq = self.fc_seq(out_seq)  # (N, L, nhid_seq)
        # out_seq = self.dropout_seq(out_seq)
        out_seq = self.relu_seq(out_seq)

        # signal feature ==========================================
        out_signal = signals.float()
        # resnet ---
        # out_signal = out_signal.transpose(1, 2)  # (N, C, L)
        # out_signal = self.convs(out_signal)  # (N, nhid_signal, L)
        # out_signal = out_signal.transpose(1, 2)  # (N, L, nhid_signal)
        # lstm ---
        out_signal, _ = self.lstm_signal(
            out_signal,
            self.init_hidden(out_signal.size(0), self.num_layers2, self.nhid_signal),
        )
        out_signal = self.fc_signal(out_signal)  # (N, L, nhid_signal)
        # out_signal = self.dropout_signal(out_signal)
        out_signal = self.relu_signal(out_signal)

        # signal magnitude feature ==========================================
        out_signal_freq_m = magnitude.float()
        out_signal_freq_m, _ = self.lstm_signal_freq_m(
            out_signal_freq_m,
            self.init_hidden(
                out_signal_freq_m.size(0), self.num_layers2, self.nhid_signal
            ),
        )
        out_signal_freq_m = self.fc_signal_freq_m(out_signal_freq_m)
        out_signal_freq_m = self.relu_signal_freq_m(out_signal_freq_m)

        # signal magnitude feature ==========================================
        out_signal_freq_p = phase.float()
        out_signal_freq_p, _ = self.lstm_signal_freq_p(
            out_signal_freq_p,
            self.init_hidden(
                out_signal_freq_p.size(0), self.num_layers2, self.nhid_signal
            ),
        )
        out_signal_freq_p = self.fc_signal_freq_p(out_signal_freq_p)
        out_signal_freq_p = self.relu_signal_freq_p(out_signal_freq_p)

        # combined ================================================
        out = torch.cat(
            (out_seq, out_signal, out_signal_freq_m, out_signal_freq_p), 2
        )  # (N, L, hidden_size)
        out, _ = self.lstm_comb(
            out, self.init_hidden(out.size(0), self.num_layers1, self.hidden_size)
        )  # (N, L, hidden_size*2)
        out_fwd_last = out[:, -1, : self.hidden_size]
        out_bwd_last = out[:, 0, self.hidden_size :]
        out = torch.cat((out_fwd_last, out_bwd_last), 1)

        # decode
        out = self.dropout1(out)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)

        return out, self.softmax(out)

class moving_avg(nn.Module):
    def __init__(self, kernel_size, stride):
        super(moving_avg, self).__init__()
        self.kernel_size = kernel_size
        self.avg = nn.AvgPool1d(kernel_size=kernel_size, stride=stride, padding=0)

    def forward(self, x):
        front = x[:, 0:1, :].repeat(1, (self.kernel_size - 1) // 2, 1)
        end = x[:, -1:, :].repeat(1, (self.kernel_size - 1) // 2, 1)
        x = torch.cat([front, x, end], dim=1)
        x = x.permute(0, 2, 1)
        x = self.avg(x).permute(0, 2, 1)
        return x

class series_decomp(nn.Module):
    def __init__(self, kernel_size):
        super(series_decomp, self).__init__()
        self.moving_avg = moving_avg(kernel_size, stride=1)

    def forward(self, x):
        moving_mean = self.moving_avg(x)
        res = x - moving_mean
        return res, moving_mean
    
class ModelDLinear(nn.Module):
    def __init__(self, seq_len=13, signal_len=16, num_layers1=3, num_layers2=1, num_classes=2, dropout_rate=0.5, 
                 hidden_size=256, vocab_size=16, embedding_size=4, is_base=True, is_signallen=True, is_trace=False, 
                 module="both_bilstm"):
        super(ModelDLinear, self).__init__()
        self.model_type = "BiLSTM"
        self.module = module

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1
        self.num_layers2 = num_layers2
        self.num_classes = num_classes

        self.hidden_size = hidden_size
        self.nhid_seq = self.hidden_size // 2
        self.nhid_signal = self.hidden_size - self.nhid_seq

        # 序列特征
        self.embed = nn.Embedding(vocab_size, embedding_size)
        self.is_base = is_base
        self.is_signallen = is_signallen
        self.is_trace = is_trace
        self.sigfea_num = 3 if self.is_signallen else 2

        self.lstm_seq = nn.LSTM(
            embedding_size + self.sigfea_num,
            self.nhid_seq,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.lstm_seq.flatten_parameters()
        self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
        self.relu_seq = nn.ReLU()

        # 信号特征
        self.decomp = series_decomp(kernel_size=3)
        # 投影层：将分解后的特征（2*signal_len）映射到 nhid_signal
        self.signal_projection = nn.Linear(2 * self.signal_len, self.nhid_signal)
        self.relu_signal = nn.ReLU()

        # 信号预测头
        self.signal_pred = nn.Linear(2 * self.signal_len, self.signal_len)

        # 组合特征
        self.lstm_comb = nn.LSTM(
            self.hidden_size,
            self.hidden_size,
            self.num_layers1,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.lstm_comb.flatten_parameters()
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2, hidden_size)
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(dim=1)

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size, device):
        h0 = torch.randn(num_layers * 2, batch_size, hidden_size, device=device, dtype=torch.float32)
        c0 = torch.randn(num_layers * 2, batch_size, hidden_size, device=device, dtype=torch.float32)
        return h0, c0

    def forward(self, kmer, base_means, base_stds, base_signal_lens, signals, training=False):
        # 序列特征处理
        kmer_embed = self.embed(kmer.long())
        batch_size, seq_len, embedding_size = kmer_embed.shape

        out_seq = torch.empty(batch_size, seq_len, embedding_size + 3, device=kmer_embed.device, dtype=torch.float32)
        out_seq[:, :, :embedding_size] = kmer_embed
        out_seq[:, :, embedding_size:embedding_size + 1] = base_means
        out_seq[:, :, embedding_size + 1:embedding_size + 2] = base_stds
        out_seq[:, :, embedding_size + 2:embedding_size + 3] = base_signal_lens

        out_seq, _ = self.lstm_seq(out_seq, self.init_hidden(batch_size, self.num_layers2, self.nhid_seq, kmer_embed.device))
        out_seq = self.fc_seq(out_seq)
        out_seq = self.relu_seq(out_seq)

        # 信号特征处理
        res, trend = self.decomp(signals.float())
        lstm_input = torch.cat([res, trend], dim=-1)  # [batch_size, seq_len, 2*signal_len]
        out_signal = self.signal_projection(lstm_input)  # [batch_size, seq_len, nhid_signal]
        out_signal = self.relu_signal(out_signal)

        # 信号预测（仅训练时）
        pred_signal = None
        if training:
            split_idx = self.seq_len // 2
            pred_input = lstm_input[:, :split_idx, :]  # 前seq_len//2
            pred_signal = self.signal_pred(pred_input)  # 预测后seq_len//2信号

        # 组合特征
        hidden_size = self.nhid_seq + self.nhid_signal
        out = torch.empty(batch_size, seq_len, hidden_size, device=kmer_embed.device, dtype=torch.float32)
        out[:, :, :self.nhid_seq] = out_seq
        out[:, :, self.nhid_seq:] = out_signal

        out, _ = self.lstm_comb(out, self.init_hidden(batch_size, self.num_layers1, hidden_size, kmer_embed.device))
        
        out_final = torch.empty(batch_size, 2 * hidden_size, device=kmer_embed.device, dtype=torch.float32)
        out_final[:, :hidden_size] = out[:, -1, :hidden_size]
        out_final[:, hidden_size:] = out[:, 0, hidden_size:]

        out = self.dropout1(out_final)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)

        return out, out_seq, out_signal, self.softmax(out), pred_signal
    










class CatchConfig:
    def __init__(self, **kwargs):
        self.seq_len = kwargs.get('seq_len', 21)
        self.c_in = kwargs.get('c_in', 15)
        self.patch_size = kwargs.get('patch_size', 4)
        self.patch_stride = kwargs.get('patch_stride', 1)
        self.e_layers = kwargs.get('e_layers', 3)
        self.n_heads = kwargs.get('n_heads', 2)
        self.cf_dim = kwargs.get('cf_dim', 64)
        self.d_ff = kwargs.get('d_ff', 256)
        self.d_model = kwargs.get('d_model', 128)
        self.head_dim = kwargs.get('head_dim', 64)
        self.dropout = kwargs.get('dropout', 0.2)
        self.head_dropout = kwargs.get('head_dropout', 0.1)
        self.regular_lambda = kwargs.get('regular_lambda', 0.5)
        self.temperature = kwargs.get('temperature', 0.07)
        self.individual = kwargs.get('individual', 0)
        self.auxi_loss = kwargs.get('auxi_loss', 'MAE')
        self.auxi_type = kwargs.get('auxi_type', 'complex')
        self.auxi_mode = kwargs.get('auxi_mode', 'fft')
        self.auxi_lambda = kwargs.get('auxi_lambda', 0.005)
        self.dc_lambda = kwargs.get('dc_lambda', 0.005)
        self.revin = kwargs.get('revin', 1)
        self.affine = kwargs.get('affine', 0)
        self.subtract_last = kwargs.get('subtract_last', 0)

# 动态对比损失
class DynamicalContrastiveLoss(torch.nn.Module):
    def __init__(self, temperature=0.5, k=0.3):
        super(DynamicalContrastiveLoss, self).__init__()
        self.temperature = temperature
        self.k = k

    def _stable_scores(self, scores):
        max_scores = torch.max(scores, dim=-1)[0].unsqueeze(-1)
        stable_scores = scores - max_scores
        return stable_scores

    def forward(self, scores, attn_mask, norm_matrix):
        b = scores.shape[0]
        n_vars = scores.shape[-1]

        cosine = (scores / norm_matrix).mean(1)
        pos_scores = torch.exp(cosine / self.temperature) * attn_mask

        all_scores = torch.exp(cosine / self.temperature)

        clustering_loss = -torch.log(pos_scores.sum(dim=-1) / all_scores.sum(dim=-1))

        eye = torch.eye(attn_mask.shape[-1]).unsqueeze(0).repeat(b, 1, 1).to(attn_mask.device)
        regular_loss = 1 / (n_vars * (n_vars - 1)) * torch.norm(eye.reshape(b, -1) - attn_mask.reshape((b, -1)),
                                                                p=1, dim=-1)
        loss = clustering_loss.mean(1) + self.k * regular_loss

        mean_loss = loss.mean()
        return mean_loss

# 频率损失
class frequency_loss(nn.Module):
    def __init__(self, configs):
        super().__init__()
        self.auxi_loss = configs.auxi_loss
        self.auxi_lambda = configs.auxi_lambda

    def forward(self, complex_z, signals):
        true_z_fft = torch.fft.fft(signals, dim=1)
        if self.auxi_loss == 'MAE':
            loss = torch.mean(torch.abs(complex_z.real - true_z_fft.real))
        else:
            loss = torch.mean((complex_z.real - true_z_fft.real) ** 2)
        return loss * self.auxi_lambda

# 前层归一化
class PreNorm(nn.Module):
    def __init__(self, dim, fn):
        super().__init__()
        self.norm = nn.LayerNorm(dim)
        self.fn = fn

    def forward(self, x, **kwargs):
        return self.fn(self.norm(x), **kwargs)

# 前馈网络
class FeedForward(nn.Module):
    def __init__(self, dim, hidden_dim, dropout=0.2):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(dim, hidden_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim, dim),
            nn.Dropout(dropout)
        )

    def forward(self, x):
        return self.net(x)

# 自定义注意力机制
class c_Attention(nn.Module):
    def __init__(self, dim, heads, dim_head, dropout=0.8, regular_lambda=0.3, temperature=0.1):
        super().__init__()
        self.dim_head = dim_head
        self.heads = heads
        self.d_k = math.sqrt(self.dim_head)
        inner_dim = dim_head * heads
        self.attend = nn.Softmax(dim=-1)
        self.to_q = nn.Linear(dim, inner_dim)
        self.to_k = nn.Linear(dim, inner_dim)
        self.to_v = nn.Linear(dim, inner_dim)
        self.to_out = nn.Sequential(
            nn.Linear(inner_dim, dim),
            nn.Dropout(dropout)
        )
        self.dynamicalContranstiveLoss = DynamicalContrastiveLoss(k=regular_lambda, temperature=temperature)

    def forward(self, x, attn_mask=None):
        h = self.heads
        q = self.to_q(x)
        k = self.to_k(x)
        v = self.to_v(x)
        scale = 1 / self.d_k

        q = rearrange(q, 'b n (h d) -> b h n d', h=h)
        k = rearrange(k, 'b n (h d) -> b h n d', h=h)
        v = rearrange(v, 'b n (h d) -> b h n d', h=h)

        dynamical_contrastive_loss = None

        scores = einsum(q, k,'b h i d, b h j d -> b h i j')

        q_norm = torch.norm(q, dim=-1, keepdim=True)
        k_norm = torch.norm(k, dim=-1, keepdim=True)
        norm_matrix = torch.einsum('bhid,bhjd->bhij', q_norm, k_norm)
        if attn_mask is not None:
            def _mask(scores, attn_mask):
                large_negative = -math.log(1e10)
                attention_mask = torch.where(attn_mask == 0, large_negative, 0)
                scores = scores * attn_mask.unsqueeze(1) + attention_mask.unsqueeze(1)
                return scores

            masked_scores = _mask(scores, attn_mask)

            dynamical_contrastive_loss = self.dynamicalContranstiveLoss(scores, attn_mask, norm_matrix)
        else:
            masked_scores = scores

        attn = self.attend(masked_scores * scale)
        out = einsum(attn, v,'b h i j, b h j d -> b h i d')
        out = rearrange(out, 'b h n d -> b n (h d)')

        return self.to_out(out), attn, dynamical_contrastive_loss

class c_Transformer(nn.Module):  ##Register the blocks into whole network
    def __init__(self, dim, depth, heads, dim_head, mlp_dim, dropout=0.8, regular_lambda=0.3, temperature=0.1):
        super().__init__()
        self.layers = nn.ModuleList([])
        for _ in range(depth):
            self.layers.append(nn.ModuleList([
                PreNorm(dim,
                        c_Attention(dim, heads=heads, dim_head=dim_head, dropout=dropout, regular_lambda=regular_lambda,
                                    temperature=temperature)),
                PreNorm(dim, FeedForward(dim, mlp_dim, dropout=dropout))
            ]))

    def forward(self, x, attn_mask=None):
        total_loss = 0
        for attn, ff in self.layers:
            x_n, attn, dcloss = attn(x, attn_mask=attn_mask)
            total_loss += dcloss
            x = x_n + x
            x = ff(x) + x
        dcloss = total_loss / len(self.layers)
        return x, attn, dcloss

class Trans_C(nn.Module):
    def __init__(self, *, dim, depth, heads, mlp_dim, dim_head, dropout, patch_dim, d_model,
                 regular_lambda=0.3, temperature=0.1):
        super().__init__()

        self.dim = dim
        self.patch_dim = patch_dim
        self.to_patch_embedding = nn.Sequential(nn.Linear(patch_dim, dim), nn.Dropout(dropout))
        self.dropout = nn.Dropout(dropout)
        self.transformer = c_Transformer(dim, depth, heads, dim_head, mlp_dim, dropout, regular_lambda=regular_lambda,
                                         temperature=temperature)

        self.mlp_head = nn.Linear(dim, d_model)  # horizon)

    def forward(self, x, attn_mask=None):
        x = self.to_patch_embedding(x)
        x, attn, dcloss = self.transformer(x, attn_mask)
        x = self.dropout(x)
        x = self.mlp_head(x).squeeze()
        return x, dcloss  # ,attn

# 可逆实例归一化
class RevIN(nn.Module):
    def __init__(self, num_features, eps=1e-5, affine=True, subtract_last=False):
        super().__init__()
        self.num_features = num_features
        self.eps = eps
        self.affine = affine
        self.subtract_last = subtract_last
        if self.affine:
            self.affine_weight = nn.Parameter(torch.ones(1, 1, num_features))
            self.affine_bias = nn.Parameter(torch.zeros(1, 1, num_features))

    def _get_statistics(self, x):
        dim2reduce = tuple(range(1, x.ndim - 1))
        if self.subtract_last:
            self.last = x[:, -1, :].unsqueeze(1)
        else:
            self.mean = torch.mean(x, dim=dim2reduce, keepdim=True).detach()
        self.stdev = torch.sqrt(torch.var(x, dim=dim2reduce, keepdim=True, unbiased=False) + self.eps).detach()

    def _normalize(self, x):       
        if self.subtract_last:
            x = x - self.last
        else:
            x = x - self.mean
        x = x / self.stdev
        if self.affine:
            x = x * self.affine_weight + self.affine_bias
        return x

    def _denormalize(self, x):
        if self.affine:
            x = (x - self.affine_bias) / (self.affine_weight + self.eps)
        x = x * self.stdev
        if self.subtract_last:
            x = x + self.last
        else:
            x = x + self.mean
        return x

    def forward(self, x, mode='norm'):
        if mode == 'norm':
            self._get_statistics(x)
            x = self._normalize(x)
        elif mode == 'denorm':
            x = self._denormalize(x)
        else:
            raise NotImplementedError
        return x

# 展平头部
class Flatten_Head(nn.Module):
    def __init__(self, individual, n_vars, nf, seq_len, head_dropout=0):
        super().__init__()
        self.individual = individual
        self.n_vars = n_vars
        if self.individual:
            self.linears1 = nn.ModuleList()
            self.dropouts = nn.ModuleList()
            self.flattens = nn.ModuleList()
            for i in range(self.n_vars):
                self.flattens.append(nn.Flatten(start_dim=-2))
                self.linears1.append(nn.Linear(nf, seq_len))
                self.dropouts.append(nn.Dropout(head_dropout))
        else:
            self.flatten = nn.Flatten(start_dim=-2)
            self.linear1 = nn.Linear(nf, nf)
            self.linear2 = nn.Linear(nf, nf)
            self.linear3 = nn.Linear(nf, nf)
            self.linear4 = nn.Linear(nf, seq_len)
            self.dropout = nn.Dropout(head_dropout)

    def forward(self, x):
        if self.individual:
            x_out = []
            for i in range(self.n_vars):
                z = self.flattens[i](x[:, i, :, :])
                z = self.linears1[i](z)
                z = self.dropouts[i](z)
                x_out.append(z)
            x = torch.stack(x_out, dim=1)
        else:
            x = self.flatten(x)
            x = F.relu(self.linear1(x)) + x
            x = F.relu(self.linear2(x)) + x
            x = F.relu(self.linear3(x)) + x
            x = self.linear4(x)
        return x

# 通道掩码生成器
class channel_mask_generator(nn.Module):
    def __init__(self, input_size, n_vars):
        super(channel_mask_generator, self).__init__()
        self.generator = nn.Sequential(nn.Linear(input_size * 2, n_vars, bias=False), nn.Sigmoid())
        with torch.no_grad():
            self.generator[0].weight.zero_()
        self.n_vars = n_vars

    def forward(self, x):
        distribution_matrix = self.generator(x)
        resample_matrix = self._bernoulli_gumbel_rsample(distribution_matrix)
        inverse_eye = 1 - torch.eye(self.n_vars).to(x.device)
        diag = torch.eye(self.n_vars).to(x.device)
        resample_matrix = torch.einsum("bcd,cd->bcd", resample_matrix, inverse_eye) + diag
        return resample_matrix

    def _bernoulli_gumbel_rsample(self, distribution_matrix):
        b, c, d = distribution_matrix.shape
        flatten_matrix = rearrange(distribution_matrix, 'b c d -> (b c d) 1')
        r_flatten_matrix = 1 - flatten_matrix
        log_flatten_matrix = torch.log(flatten_matrix / r_flatten_matrix)
        log_r_flatten_matrix = torch.log(r_flatten_matrix / flatten_matrix)
        new_matrix = torch.concat([log_flatten_matrix, log_r_flatten_matrix], dim=-1)
        resample_matrix = F.gumbel_softmax(new_matrix, hard=True)
        resample_matrix = rearrange(resample_matrix[..., 0], '(b c d) -> b c d', b=b, c=c, d=d)
        return resample_matrix

# 主模型（仅显示相关部分）
class ModelCatch(nn.Module):
    def __init__(
        self,
        seq_len=13,
        signal_len=16,       
        num_classes=2,       
        vocab_size=16,
        embedding_size=4,
    ):
        super(ModelCatch, self).__init__()
        self.model_type = "ModelCatch"
        self.seq_len = seq_len
        self.signal_len = signal_len
        self.flatten_len = seq_len * signal_len  # 21 * 15 
        self.n_vars = 5  # Number of channels

        # CatchConfig remains unchanged as per request
        self.catch_configs = CatchConfig(
            seq_len=self.seq_len,  # Default from CatchConfig
            c_in=self.signal_len,    # Default from CatchConfig
            patch_size=15,
            patch_stride=8,
            e_layers=3,
            n_heads=8,
            cf_dim=64,
            d_ff=256,
            d_model=32,
            head_dim=64,
            dropout=0.2,
            head_dropout=0.1,
            regular_lambda=0.5,
            temperature=0.07,
            individual=0,
            auxi_loss="MAE",
            auxi_type="complex",
            auxi_mode="fft",
            auxi_lambda=0.005,
            dc_lambda=0.005,
            revin=1,
            affine=0,
            subtract_last=0
        )
        self.revin_layer = RevIN(num_features=self.n_vars, affine=self.catch_configs.affine, subtract_last=self.catch_configs.subtract_last)
        self.patch_size = self.catch_configs.patch_size
        self.patch_stride = self.catch_configs.patch_stride
        
        self.patch_num = (self.flatten_len - self.patch_size) // self.patch_stride + 1
        self.embedding = nn.Embedding(vocab_size, embedding_size)
        self.kmer_proj = nn.Linear(embedding_size, 1)
        self.channel_mask_gen = channel_mask_generator(
            input_size=self.patch_size,
            n_vars=self.n_vars,
        )
        self.frequency_transformer = Trans_C(
            patch_dim=self.patch_size * 2,
            dim=self.catch_configs.cf_dim,
            depth=self.catch_configs.e_layers,
            heads=self.catch_configs.n_heads,
            dim_head=self.catch_configs.head_dim,
            mlp_dim=self.catch_configs.d_ff,
            dropout=self.catch_configs.dropout,
            regular_lambda=self.catch_configs.regular_lambda,
            temperature=self.catch_configs.temperature,
            d_model=self.catch_configs.d_model * 2
        )
        self.head_nf_f = self.catch_configs.d_model * 2 * self.patch_num
        self.head_f1 = Flatten_Head(
            individual=self.catch_configs.individual, 
            n_vars=self.n_vars, 
            nf=self.head_nf_f, 
            seq_len=self.flatten_len, 
            head_dropout=self.catch_configs.head_dropout
        )
        self.head_f2 = Flatten_Head(
            individual=self.catch_configs.individual, 
            n_vars=self.n_vars,
            nf=self.head_nf_f, 
            seq_len=self.flatten_len, 
            head_dropout=self.catch_configs.head_dropout
        )
        self.ircom = nn.Linear(self.flatten_len * 2, self.flatten_len)
        self.get_r = nn.Linear(self.catch_configs.d_model * 2, self.catch_configs.d_model * 2)
        self.get_i = nn.Linear(self.catch_configs.d_model * 2, self.catch_configs.d_model * 2)

        # Time-domain LSTM, adjusted input_size to match flattened input
        self.nhid_time = 128
        self.lstm_time = nn.LSTM(
            input_size=self.patch_size,  # Patched input size
            hidden_size=self.nhid_time,
            num_layers=1,
            dropout=0.5,
            batch_first=True,
            bidirectional=True
        )
        self.fc_time = nn.Linear(self.nhid_time * 2, self.nhid_time)
        self.relu_time = nn.ReLU()

        # Frequency-domain LSTM
        self.lstm_freq = nn.LSTM(
            input_size=self.catch_configs.d_model * 2,  # Frequency feature dimension
            hidden_size=self.nhid_time,
            num_layers=1,
            dropout=0.5,
            batch_first=True,
            bidirectional=True
        )
        self.fc_freq = nn.Linear(self.nhid_time * 2, self.nhid_time)
        self.relu_freq = nn.ReLU()

        # Classifier, combining time and frequency features
        freq_dim = self.nhid_time
        time_dim = self.nhid_time
        feature_dim = freq_dim + time_dim
        self.classifier = nn.Sequential(
            nn.Linear(feature_dim, 256),
            nn.ReLU(),
            nn.Linear(256, num_classes)
        )
        self.softmax = nn.Softmax(dim=1)

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size, device):
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size)).to(device)
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size)).to(device)
        return h0, c0

    def forward(self, kmer, base_means, base_stds, base_signal_lens, signals, training=False):
        batch_size = signals.size(0)

        # Adjust signals to match CatchConfig dimensions
        signals = signals.view(batch_size, -1, 1)  # (batch_size, seq_len * signal_len, 1) = (batch_size, 315, 1)

        # Process kmer
        kmer_embed = self.embedding(kmer.long())  # (batch_size, seq_len, embedding_size)
        kmer_feat = self.kmer_proj(kmer_embed)  # (batch_size, seq_len, 1)
        kmer_feat = kmer_feat.unsqueeze(2).repeat(1, 1, self.catch_configs.c_in, 1).view(batch_size, -1, 1)  # (batch_size, 315, 1)

        # Process base_means, base_stds, base_signal_lens
        base_means = base_means.repeat(1, self.signal_len).unsqueeze(-1)  # (batch_size, 315, 1)
        base_stds = base_stds.repeat(1, self.signal_len).unsqueeze(-1)  # (batch_size, 315, 1)
        base_signal_lens = base_signal_lens.repeat(1, self.signal_len).unsqueeze(-1)  # (batch_size, 315, 1)

        # Concatenate inputs
        input_tensor = torch.cat([signals, kmer_feat, base_means, base_stds, base_signal_lens], dim=-1)  # (batch_size, 315, 5)

        # Time-domain features with patching
        time_input = self.revin_layer(input_tensor, 'norm')  # (batch_size, 315, 5)
        time_input = time_input.permute(0, 2, 1)  # (batch_size, 5, 315)
        time_input = time_input.unfold(dimension=-1, size=self.patch_size, step=self.patch_stride)  # (batch_size, 5, patch_num, patch_size)
        time_input = time_input.permute(0, 2, 1, 3).reshape(batch_size * self.n_vars, self.patch_num, self.patch_size)  # (batch_size * n_vars, patch_num, patch_size)
        time_hidden = self.init_hidden(batch_size * self.n_vars, 1, self.nhid_time, time_input.device)
        time_out, _ = self.lstm_time(time_input, time_hidden)  # (batch_size * n_vars, patch_num, nhid_time * 2)
        time_out = time_out[:, -1, :]  # (batch_size * n_vars, nhid_time * 2)
        time_out = time_out.view(batch_size, self.n_vars, self.nhid_time * 2).mean(dim=1)  # (batch_size, nhid_time * 2)
        time_features = self.fc_time(time_out)  # (batch_size, nhid_time)
        time_features = self.relu_time(time_features)

        # Frequency-domain processing
        z = self.revin_layer(input_tensor, 'norm')
        z = z.permute(0, 2, 1)  # (batch_size, 5, flatten_len)
        z = torch.fft.fft(z)
        z1 = z.real  # (batch_size, 5, flatten_len)
        z2 = z.imag  # (batch_size, 5, flatten_len)

        # Patching
        z1 = z1.unfold(dimension=-1, size=self.patch_size, step=self.patch_stride) 
        z2 = z2.unfold(dimension=-1, size=self.patch_size, step=self.patch_stride)  # (batch_size, 5, patch_num, patch_size)
        z1 = z1.permute(0, 2, 1, 3).reshape(batch_size * self.patch_num, self.n_vars, self.patch_size)
        z2 = z2.permute(0, 2, 1, 3).reshape(batch_size * self.patch_num, self.n_vars, self.patch_size)
        z_cat = torch.cat((z1, z2), dim=-1)  # (batch_size * patch_num, n_vars, patch_size * 2)

        # Channel mask
        channel_mask = self.channel_mask_gen(z_cat)  # (batch_size * patch_num, n_vars, n_vars)

        # Frequency Transformer
        z, dcloss = self.frequency_transformer(z_cat, channel_mask)  # (batch_size * patch_num, n_vars, d_model * 2)

        # Frequency-domain LSTM
        z_freq = z.permute(0, 2, 1).reshape(batch_size * self.n_vars, self.patch_num, self.catch_configs.d_model * 2)  # (batch_size * n_vars, patch_num, d_model * 2)
        freq_hidden = self.init_hidden(batch_size * self.n_vars, 1, self.nhid_time, z.device)
        freq_out, _ = self.lstm_freq(z_freq, freq_hidden)  # (batch_size * n_vars, patch_num, nhid_time * 2)
        freq_out = freq_out[:, -1, :]  # (batch_size * n_vars, nhid_time * 2)
        freq_out = freq_out.view(batch_size, self.n_vars, self.nhid_time * 2).mean(dim=1)  # (batch_size, nhid_time * 2)
        freq_features = self.fc_freq(freq_out)  # (batch_size, nhid_time)
        freq_features = self.relu_freq(freq_features)

        # Combine time and frequency features
        combined_features = torch.cat([time_features, freq_features], dim=1)  # (batch_size, 2 * nhid_time)

        # Classification
        out = self.classifier(combined_features)

        if training:
            # Separate real and imaginary parts
            z1 = self.get_r(z)
            z2 = self.get_i(z)
            z1 = z1.view(batch_size, self.patch_num, self.n_vars, -1).permute(0, 2, 1, 3)
            z2 = z2.view(batch_size, self.patch_num, self.n_vars, -1).permute(0, 2, 1, 3)
            z1 = self.head_f1(z1)  # (batch_size, n_vars, flatten_len)
            z2 = self.head_f2(z2)  # (batch_size, n_vars, flatten_len)
            complex_z = torch.complex(z1, z2)
            z = torch.fft.ifft(complex_z)
            zr = z.real
            zi = z.imag
            z = self.ircom(torch.cat((zr, zi), dim=-1))
            recon_signals = z.permute(0, 2, 1)
            recon_signals = self.revin_layer(recon_signals, 'denorm')
            return out, self.softmax(out), recon_signals, complex_z.permute(0, 2, 1), dcloss, input_tensor
        else:
            return out, self.softmax(out)





class ModelPatchBiLSTM(nn.Module):
    def __init__(self,
                 seq_len=13,
                 signal_len=16,
                 num_layers1=3,
                 num_layers2=1,
                 num_classes=2,
                 dropout_rate=0.5,
                 hidden_size=256,
                 vocab_size=16,
                 embedding_size=4,
                 is_base=True,
                 is_signallen=True,
                 is_trace=False,
                 module="both_bilstm",
                 patch_size=5,
                 patch_stride=3):
        super(ModelPatchBiLSTM, self).__init__()
        self.model_type = "BiLSTM"
        self.module = module
        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1
        self.num_layers2 = num_layers2
        self.num_classes = num_classes
        self.hidden_size = hidden_size
        self.nhid_seq = self.hidden_size // 2
        self.nhid_signal = self.hidden_size - self.nhid_seq
        self.patch_size = patch_size
        self.patch_stride = patch_stride
        self.patch_num = (seq_len - self.patch_size) // self.patch_stride + 1

        # seq feature
        self.embed = nn.Embedding(vocab_size, embedding_size)
        self.is_base = is_base
        self.is_signallen = is_signallen
        self.is_trace = is_trace
        self.sigfea_num = 3 if self.is_signallen else 2
        #input_size_seq = embedding_size + (3 if is_base and is_signallen and is_trace else 0)
        self.lstm_seq = nn.LSTM(
            self.patch_size,
            self.nhid_seq,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.lstm_seq.flatten_parameters()
        self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
        self.relu_seq = nn.ReLU()

        # signal feature
        self.lstm_signal = nn.LSTM(
            self.patch_size,
            self.nhid_signal,
            self.num_layers2,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.lstm_signal.flatten_parameters()
        self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
        self.relu_signal = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(
            self.hidden_size,
            self.hidden_size,
            self.num_layers1,
            dropout=dropout_rate,
            batch_first=True,
            bidirectional=True,
        )
        self.lstm_comb.flatten_parameters()
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2, hidden_size)
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)
        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(dim=1)
        #self.projection = nn.Linear(self.nhid_seq, self.nhid_signal)

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size, device):
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size)).to(device)
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size)).to(device)
        return h0, c0

    def forward(self, kmer, base_means, base_stds, base_signal_lens, signals):
        # 序列特征处理
        kmer_embed = self.embed(kmer.long())  # (batch_size, seq_len, embedding_size)
        batch_size, seq_len, embedding_size = kmer_embed.shape
        out_seq = torch.empty(batch_size, seq_len, embedding_size + 3, device=kmer_embed.device)
        out_seq[:, :, :embedding_size] = kmer_embed
        out_seq[:, :, embedding_size:embedding_size + 1] = base_means
        out_seq[:, :, embedding_size + 1:embedding_size + 2] = base_stds
        out_seq[:, :, embedding_size + 2:embedding_size + 3] = base_signal_lens

        out_seq = out_seq.permute(0, 2, 1)  # (batch_size, embedding_size + 3, seq_len)
        out_seq = out_seq.unfold(dimension=-1, size=self.patch_size, step=self.patch_stride)  # (batch_size, embedding_size + 3, patch_num, patch_size)
        out_seq = out_seq.permute(0, 2, 1, 3).reshape(batch_size * (embedding_size + 3), self.patch_num, self.patch_size)
        h0_seq, c0_seq = self.init_hidden(batch_size * (embedding_size + 3), self.num_layers2, self.nhid_seq, kmer_embed.device)
        out_seq, _ = self.lstm_seq(out_seq, (h0_seq, c0_seq))  # (batch_size * (embedding_size + 3), patch_num, nhid_seq * 2)
        out_seq = self.fc_seq(out_seq)  # (batch_size * (embedding_size + 3), patch_num, nhid_seq)
        out_seq = self.relu_seq(out_seq)
        out_seq = out_seq.mean(dim=1)  # (batch_size * (embedding_size + 3), nhid_seq)
        out_seq = out_seq.view(batch_size, embedding_size + 3, self.nhid_seq).mean(dim=1)  # (batch_size, nhid_seq)

        # 信号特征处理
        out_signal = signals.float()  # (batch_size, seq_len, signal_len)
        out_signal = out_signal.permute(0, 2, 1)  # (batch_size, signal_len, seq_len)
        out_signal = out_signal.unfold(dimension=-1, size=self.patch_size, step=self.patch_stride)  # (batch_size, signal_len, patch_num, patch_size)
        out_signal = out_signal.permute(0, 2, 1, 3).reshape(batch_size * self.signal_len, self.patch_num, self.patch_size)
        h0_signal, c0_signal = self.init_hidden(batch_size * self.signal_len, self.num_layers2, self.nhid_signal, kmer_embed.device)
        out_signal, _ = self.lstm_signal(out_signal, (h0_signal, c0_signal))  # (batch_size * signal_len, patch_num, nhid_signal * 2)
        out_signal = self.fc_signal(out_signal)  # (batch_size * signal_len, patch_num, nhid_signal)
        out_signal = self.relu_signal(out_signal)
        out_signal = out_signal.mean(dim=1)  # (batch_size * signal_len, nhid_signal)
        out_signal = out_signal.view(batch_size, self.signal_len, self.nhid_signal).mean(dim=1)  # (batch_size, nhid_signal)

        # 组合特征
        out = torch.cat((out_seq, out_signal), dim=1)  # (batch_size, nhid_seq + nhid_signal)
        out = out.unsqueeze(1).repeat(1, self.seq_len, 1)  # (batch_size, seq_len, nhid_seq + nhid_signal)
        h0_comb, c0_comb = self.init_hidden(batch_size, self.num_layers1, self.hidden_size, kmer_embed.device)
        out, _ = self.lstm_comb(out, (h0_comb, c0_comb))  # (batch_size, seq_len, hidden_size * 2)

        # 预分配最终输出张量
        out_final = torch.empty(batch_size, 2 * self.hidden_size, device=kmer_embed.device)
        out_final[:, :self.hidden_size] = out[:, -1, :self.hidden_size]  # 前向最后输出
        out_final[:, self.hidden_size:] = out[:, 0, self.hidden_size:]  # 反向最初输出

        # 解码
        out = self.dropout1(out_final)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)

        return out,  self.softmax(out)

class ModelFITS(nn.Module):
    # FITS: Frequency Interpolation Time Series Forecasting
    def __init__(self, seq_len=21,
                 signal_len=15,
                 pred_len=20,
                 vocab_size=16,
                 embedding_size=4,
                 num_classes=2,
                 ):
        super(ModelFITS, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_size)
        self.seq_len = seq_len
        self.signal_len = signal_len
        self.pred_len = pred_len
        self.flatten_len = seq_len * signal_len  # 21 * 15 = 315
        self.pred_flatten_len = (seq_len + pred_len) * signal_len  # (21 + 20) * 15 = 615
        self.channels = 4 + embedding_size  # signals, base_means, base_stds, base_signal_lens + kmer_embed
        self.dominance_freq = 50
        self.length_ratio = self.pred_flatten_len / self.flatten_len  # 615 / 315 ≈ 1.952

        self.freq_upsampler = nn.ModuleList()
        for i in range(self.channels):
            self.freq_upsampler.append(nn.Linear(self.dominance_freq, int(self.dominance_freq * self.length_ratio)).to(torch.cfloat))

        # Feature extractor for time-domain input
        self.feature_extractor = nn.Sequential(
            nn.AdaptiveAvgPool1d(1),  # (batch_size, channels, flatten_len) -> (batch_size, channels, 1)
            nn.Flatten()  # (batch_size, channels, 1) -> (batch_size, channels)
        )
        # Feature extractor for frequency-domain (from low_specxy_)
        self.freq_feature_extractor = nn.Sequential(
            nn.AdaptiveAvgPool1d(1),  # (batch_size, channels*2, new_freq) -> (batch_size, channels*2, 1)
            nn.Flatten()  # (batch_size, channels*2, 1) -> (batch_size, channels*2)
        )

        feature_dim = self.channels * 3  # time-domain: channels, freq-domain: channels*2 (real + imag)
        self.classifier = nn.Sequential(
            nn.Linear(feature_dim, 256),
            nn.ReLU(),
            nn.Linear(256, num_classes)
        )
        self.softmax = nn.Softmax(dim=1)

    def forward(self, kmer, base_means, base_stds, base_signal_lens, signals, training=False):
        batch_size = signals.size(0)

        # Adjust signals
        signals = signals.view(batch_size, -1, 1)  # (batch_size, flatten_len, 1)

        # Process kmer
        kmer_embed = self.embedding(kmer.long())  # (batch_size, seq_len, embedding_size)
        kmer_embed = kmer_embed.repeat_interleave(self.signal_len, dim=1)  # (batch_size, flatten_len, embedding_size)

        # Process base_means, base_stds, base_signal_lens
        base_means = base_means.repeat_interleave(self.signal_len, dim=1).unsqueeze(-1)  # (batch_size, flatten_len, 1)
        base_stds = base_stds.repeat_interleave(self.signal_len, dim=1).unsqueeze(-1)  # (batch_size, flatten_len, 1)
        base_signal_lens = base_signal_lens.repeat_interleave(self.signal_len, dim=1).unsqueeze(-1)  # (batch_size, flatten_len, 1)

        # Concatenate rin_tensor
        rin_tensor = torch.cat([signals, base_means, base_stds, base_signal_lens, kmer_embed], dim=-1)  # (batch_size, flatten_len, 4 + embedding_size)

        # RIN
        x_mean = torch.mean(rin_tensor, dim=1, keepdim=True)
        x = rin_tensor - x_mean
        x_var = torch.var(x, dim=1, keepdim=True) + 1e-5
        input_tensor = x / torch.sqrt(x_var)  # (batch_size, flatten_len, channels)

        # Time-domain features
        time_features = self.feature_extractor(input_tensor.permute(0, 2, 1))  # (batch_size, channels)

        # Frequency-domain processing
        low_specx = torch.fft.rfft(input_tensor, dim=1)  # (batch_size, flatten_len//2 + 1, channels)
        low_specx[:, self.dominance_freq:] = 0  # LPF
        low_specx = low_specx[:, 0:self.dominance_freq, :]  # (batch_size, dominance_freq, channels)

        low_specxy_ = torch.zeros([low_specx.size(0), int(self.dominance_freq * self.length_ratio), low_specx.size(2)], dtype=low_specx.dtype).to(low_specx.device)
        for i in range(self.channels):
            low_specxy_[:, :, i] = self.freq_upsampler[i](low_specx[:, :, i].permute(0, 1)).permute(0, 1)  # (batch_size, new_freq, channels)

        # Frequency-domain features
        freq_real = low_specxy_.real  # (batch_size, new_freq, channels)
        freq_imag = low_specxy_.imag  # (batch_size, new_freq, channels)
        freq_cat = torch.cat([freq_real, freq_imag], dim=2)  # (batch_size, new_freq, channels*2)
        freq_features = self.freq_feature_extractor(freq_cat.permute(0, 2, 1))  # (batch_size, channels*2)

        # Combine features
        combined_features = torch.cat([time_features, freq_features], dim=1)  # (batch_size, channels*3)

        # Classification
        out = self.classifier(combined_features)  # (batch_size, num_classes)

        # Predicted sequence
        low_specxy = torch.zeros([low_specxy_.size(0), int(self.pred_flatten_len / 2 + 1), low_specxy_.size(2)], dtype=low_specxy_.dtype).to(low_specxy_.device)
        low_specxy[:, 0:low_specxy_.size(1), :] = low_specxy_  # zero padding
        low_xy = torch.fft.irfft(low_specxy, n=self.pred_flatten_len, dim=1)  # (batch_size, pred_flatten_len, channels)
        low_xy = low_xy * self.length_ratio  # energy compensation
        xy = low_xy * torch.sqrt(x_var) + x_mean  # reverse RIN
        low_xy_adjusted = low_xy * torch.sqrt(x_var)

        return out, self.softmax(out), xy, low_xy_adjusted
        
class SharedEmbedding(nn.Module):
    def __init__(self, vocab_size, embedding_size):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, embedding_size)

    def forward(self, kmer):
        return self.embedding(kmer.long())

class SharedBackbone(nn.Module):
    def __init__(self, seq_len=21, signal_len=15, pred_len=20, embedding_size=4):
        super().__init__()
        self.seq_len = seq_len
        self.signal_len = signal_len
        self.pred_len = pred_len
        self.flatten_len = seq_len * signal_len
        self.pred_flatten_len = (seq_len + pred_len) * signal_len
        self.channels = 4 + embedding_size
        self.dominance_freq = 50
        self.length_ratio = self.pred_flatten_len / self.flatten_len

        self.freq_upsampler = nn.ModuleList()
        for i in range(self.channels):
            self.freq_upsampler.append(nn.Linear(self.dominance_freq, int(self.dominance_freq * self.length_ratio)).to(torch.cfloat))

        self.feature_extractor = nn.Sequential(
            nn.AdaptiveAvgPool1d(1),
            nn.Flatten()
        )

    def forward(self, shared_embedding, kmer, base_means, base_stds, base_signal_lens, signals):
        batch_size = signals.size(0)

        # Adjust signals
        signals = signals.view(batch_size, -1, 1)  # (batch_size, flatten_len, 1)

        # Process kmer
        kmer_embed = shared_embedding(kmer)  # (batch_size, seq_len, embedding_size)
        kmer_embed = kmer_embed.repeat_interleave(self.signal_len, dim=1)  # (batch_size, flatten_len, embedding_size)

        # Process base_means, base_stds, base_signal_lens
        base_means = base_means.repeat_interleave(self.signal_len, dim=1).unsqueeze(-1)
        base_stds = base_stds.repeat_interleave(self.signal_len, dim=1).unsqueeze(-1)
        base_signal_lens = base_signal_lens.repeat_interleave(self.signal_len, dim=1).unsqueeze(-1)

        # Concatenate rin_tensor
        rin_tensor = torch.cat([signals, base_means, base_stds, base_signal_lens, kmer_embed], dim=-1)

        # RIN
        x_mean = torch.mean(rin_tensor, dim=1, keepdim=True)
        x = rin_tensor - x_mean
        x_var = torch.var(x, dim=1, keepdim=True) + 1e-5
        input_tensor = x / torch.sqrt(x_var)

        # Time-domain features
        time_features = self.feature_extractor(input_tensor.permute(0, 2, 1))

        # Frequency-domain processing
        low_specx = torch.fft.rfft(input_tensor, dim=1)
        low_specx[:, self.dominance_freq:] = 0
        low_specx = low_specx[:, 0:self.dominance_freq, :]

        low_specxy_ = torch.zeros([low_specx.size(0), int(self.dominance_freq * self.length_ratio), low_specx.size(2)], dtype=low_specx.dtype).to(low_specx.device)
        #sys.stderr.write(f"low_specxy_ shape: {low_specxy_.shape}\n")
        for i in range(self.channels):
            low_specxy_[:, :, i] = self.freq_upsampler[i](low_specx[:, :, i].permute(0, 1)).permute(0, 1)

        low_specxy = torch.zeros([low_specxy_.size(0), int(self.pred_flatten_len / 2 + 1), low_specxy_.size(2)], dtype=low_specxy_.dtype).to(low_specx.device)
        low_specxy[:, 0:low_specxy_.size(1), :] = low_specxy_
        low_xy = torch.fft.irfft(low_specxy, n=self.pred_flatten_len, dim=1)
        low_xy = low_xy * self.length_ratio
        xy = low_xy * torch.sqrt(x_var) + x_mean
        low_xy_adjusted = low_xy * torch.sqrt(x_var)

        return time_features, low_specxy_, xy, low_xy_adjusted

class SignalPredictionHead(nn.Module):
    def __init__(self, embedding_size=4, channels=8, pred_len=20, signal_len=15, seq_len=21):
        super().__init__()
        self.embedding_size = embedding_size
        self.channels = channels
        self.pred_len = pred_len
        self.signal_len = signal_len
        self.flatten_len = seq_len * signal_len
        self.pred_flatten_len = (seq_len + pred_len) * signal_len
        self.dominance_freq = 50
        self.length_ratio = self.pred_flatten_len / self.flatten_len
        self.freq_len = int(self.dominance_freq * self.length_ratio)  # 47
        self.fusion_layer = nn.Linear(channels + (self.freq_len * channels * 2) + embedding_size, 256)
        self.reduction_layer = nn.Linear(256, self.channels)
        self.output_layer = nn.Sequential(
            nn.ReLU(),
            nn.Linear(self.channels, self.pred_flatten_len * self.channels)
        )

    def forward(self, shared_embedding, past_features, pred_kmer):
        batch_size = pred_kmer.size(0)

        # Future kmer embed
        pred_kmer_embed = shared_embedding(pred_kmer)  # (batch_size, pred_len, embedding_size)
        pred_kmer_embed = pred_kmer_embed.repeat_interleave(self.signal_len, dim=1)  # (batch_size, pred_flatten_len, embedding_size)

        # Past features: time_features (batch_size, channels) + low_specxy_ (batch_size, freq_len, channels)
        past_time, past_freq = past_features
        #sys.stderr.write(f"past_freq shape: {past_freq.shape}\n")
        past_time = past_time.unsqueeze(1).repeat(1, pred_kmer_embed.size(1), 1)
        past_freq_real = past_freq.real
        past_freq_imag = past_freq.imag
        past_freq_flat = torch.cat([past_freq_real.view(batch_size, -1), past_freq_imag.view(batch_size, -1)], dim=1)
        #sys.stderr.write(f"past_freq_flat shape: {past_freq_flat.shape}\n")
        fused_past = torch.cat([past_time.mean(dim=1), past_freq_flat], dim=1)
        fused_input = torch.cat([fused_past, pred_kmer_embed.mean(dim=1)], dim=1)
        #sys.stderr.write(f"fused_input shape: {fused_input.shape}\n")
        fused = self.fusion_layer(fused_input)
        reduced = self.reduction_layer(fused)
        pred_xy = self.output_layer(reduced).view(batch_size, self.pred_flatten_len, self.channels)

        return pred_xy


class MethylationHead(nn.Module):
    def __init__(self, channels=8, num_classes=2):
        super().__init__()
        self.freq_feature_extractor = nn.Sequential(
            nn.AdaptiveAvgPool1d(1),
            nn.Flatten()
        )
        feature_dim = channels * 3
        self.classifier = nn.Sequential(
            nn.Linear(feature_dim, 256),
            nn.ReLU(),
            nn.Linear(256, num_classes)
        )
        self.softmax = nn.Softmax(dim=1)

    def forward(self, time_features, low_specxy_):
        freq_real = low_specxy_.real
        freq_imag = low_specxy_.imag
        freq_cat = torch.cat([freq_real, freq_imag], dim=2)
        freq_features = self.freq_feature_extractor(freq_cat.permute(0, 2, 1))

        combined_features = torch.cat([time_features, freq_features], dim=1)
        out = self.classifier(combined_features)
        softmax_out = self.softmax(out)
        return out, softmax_out