from __future__ import absolute_import

import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from deepsignal3.utils.process_utils import str2bool
from deepsignal3.utils.constants_torch import FloatTensor
from deepsignal3.utils.constants_torch import use_cuda
from deepsignal3.utils.process_utils import base2code_dna
import torch
import argparse
import torch
import torch.nn as nn
import torch.autograd as autograd
import random
import umap

def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--singleton",
        type=str,
        default="/home/xiaoyifu/data/HG002/Bisulfite/singleton.bisulfite.bed",
        help="singleton address.",
    )
    parser.add_argument('--model_type', type=str, default="both_bilstm",
                        choices=["both_bilstm", "seq_bilstm", "signal_bilstm"],
                        required=False,
                        help="type of model to use, 'both_bilstm', 'seq_bilstm' or 'signal_bilstm', "
                             "'both_bilstm' means to use both seq and signal bilstm, default: both_bilstm")
    parser.add_argument('--seq_len', type=int, default=21, required=False,
                        help="len of kmer. default 13")
    parser.add_argument('--signal_len', type=int, default=16, required=False,
                        help="signal num of one base, default 15")

    # model param
    parser.add_argument('--layernum1', type=int, default=3,
                        required=False, help="lstm layer num for combined feature, default 3")
    parser.add_argument('--layernum2', type=int, default=1,
                        required=False, help="lstm layer num for seq feature (and for signal feature too), default 1")
    parser.add_argument('--class_num', type=int, default=2, required=False)
    parser.add_argument('--dropout_rate', type=float, default=0, required=False)
    parser.add_argument('--n_vocab', type=int, default=16, required=False,
                        help="base_seq vocab_size (15 base kinds from iupac)")
    parser.add_argument('--n_embed', type=int, default=4, required=False,
                        help="base_seq embedding_size")
    parser.add_argument('--is_base', type=str, default="yes", required=False,
                        help="is using base features in seq model, default yes")
    parser.add_argument('--is_signallen', type=str, default="yes", required=False,
                        help="is using signal length feature of each base in seq model, default yes")
    parser.add_argument('--is_trace', type=str, default="no", required=False,
                        help="is using trace (base prob) feature of each base in seq model, default yes")

    parser.add_argument("--batch_size", "-b", default=512, type=int, required=False,
                        action="store", help="batch size, default 512")

    # BiLSTM model param
    parser.add_argument('--hid_rnn', type=int, default=256, required=False,
                        help="BiLSTM hidden_size for combined feature")
    parser.add_argument('--outputdir', type=str, 
                        default="/home/xiaoyifu/data/HG002/R10.4/20221109_1654_5D_PAG68757_39c39833/train1/",
                        help='the picture output filepath')
    parser.add_argument('--pname', type=str, 
                        default="valid",
                        help='the picture output name')
    parser.add_argument('--motif', type=str, 
                        default="CG",
                        help='the motif to draw')
    parser.add_argument('--label', type=int, 
                        help='the label to draw')
    parser.add_argument("--model_path", "-m", action="store", type=str, 
                        default='/home/xiaoyifu/methylation/deepsignal3/model/21mer/both_bilstm.b21_s16_epoch6.ckpt',#model.r10.CG/hg002.rmet0.95_0.05.r10.4.10x.CG.epoch8.ckpt',
                        help="file path of the trained model (.ckpt)")
    parser.add_argument("--input_path", "-i", action="store", type=str,
                         default='/home/xiaoyifu/data/HG002/R10.4/20221109_1654_5D_PAG68757_39c39833/train1/21mer.gc.100000.tsv',
                         help="the input path, can be a signal_feature file from extract_features.py, "
                              "or a directory of fast5 files. If a directory of fast5 files is provided, "
                              "args in FAST5_EXTRACTION and MAPPING should (reference_path must) be provided.")
    return parser.parse_args()

class ModelBiLSTM(nn.Module):
    def __init__(self, seq_len=13, signal_len=16, num_layers1=3, num_layers2=1, num_classes=2,
                 dropout_rate=0.5, hidden_size=256,
                 vocab_size=16, embedding_size=4, is_base=True, is_signallen=True,
                 is_trace=False,
                 module="both_bilstm", device=0):
        super(ModelBiLSTM, self).__init__()
        self.model_type = 'BiLSTM'
        self.module = module
        self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size

        if self.module == "both_bilstm":
            self.nhid_seq = self.hidden_size // 2
            self.nhid_signal = self.hidden_size - self.nhid_seq
        elif self.module == "seq_bilstm":
            self.nhid_seq = self.hidden_size
        elif self.module == "signal_bilstm":
            self.nhid_signal = self.hidden_size
        else:
            raise ValueError("--model_type is not right!")

        # seq feature
        if self.module != "signal_bilstm":
            self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
            self.is_base = is_base
            self.is_signallen = is_signallen
            self.is_trace = is_trace
            self.sigfea_num = 3 if self.is_signallen else 2
            if self.is_trace:
                self.sigfea_num += 1
            if self.is_base:
                self.lstm_seq = nn.LSTM(embedding_size + self.sigfea_num, self.nhid_seq, self.num_layers2,
                                        dropout=dropout_rate, batch_first=True, bidirectional=True)
            else:
                self.lstm_seq = nn.LSTM(self.sigfea_num, self.nhid_seq, self.num_layers2,
                                        dropout=dropout_rate, batch_first=True, bidirectional=True)
            self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
            # self.dropout_seq = nn.Dropout(p=dropout_rate)
            self.relu_seq = nn.ReLU()

        # signal feature
        if self.module != "seq_bilstm":
            # self.convs = ResNet3(self.nhid_signal, (1, 1, 1), self.signal_len, self.signal_len)  # (N, C, L)
            self.lstm_signal = nn.LSTM(self.signal_len, self.nhid_signal, self.num_layers2,
                                       dropout=dropout_rate, batch_first=True, bidirectional=True)
            self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
            # self.dropout_signal = nn.Dropout(p=dropout_rate)
            self.relu_signal = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(self.hidden_size, self.hidden_size, self.num_layers1,
                                 dropout=dropout_rate, batch_first=True, bidirectional=True)
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2, hidden_size)  # 2 for bidirection
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(1)
        self.feas = []

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size):
        # Set initial states
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        if use_cuda:
            h0 = h0.cuda(self.device)
            c0 = c0.cuda(self.device)
        return h0, c0

    def forward(self, kmer, base_means, base_stds, base_signal_lens, base_probs, signals):
        # seq feature ============================================
        if self.module != "signal_bilstm":
            base_means = torch.reshape(base_means, (-1, self.seq_len, 1)).float()
            base_stds = torch.reshape(base_stds, (-1, self.seq_len, 1)).float()
            base_signal_lens = torch.reshape(base_signal_lens, (-1, self.seq_len, 1)).float()
            base_probs = torch.reshape(base_probs, (-1, self.seq_len, 1)).float()
            if self.is_base:
                kmer_embed = self.embed(kmer.long())
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat((kmer_embed, base_means, base_stds, base_signal_lens,
                                         base_probs), 2)  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat((kmer_embed, base_means, base_stds, base_signal_lens), 2)  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat((kmer_embed, base_means, base_stds, base_probs), 2)  # (N, L, C)
                else:
                    out_seq = torch.cat((kmer_embed, base_means, base_stds), 2)  # (N, L, C)
            else:
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat((base_means, base_stds, base_signal_lens, base_probs), 2)  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat((base_means, base_stds, base_signal_lens), 2)  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat((base_means, base_stds, base_probs), 2)  # (N, L, C)
                else:
                    out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)

            out_seq, _ = self.lstm_seq(out_seq, self.init_hidden(out_seq.size(0),
                                                                 self.num_layers2,
                                                                 self.nhid_seq))  # (N, L, nhid_seq*2)
            out_seq = self.fc_seq(out_seq)  # (N, L, nhid_seq)
            # out_seq = self.dropout_seq(out_seq)
            out_seq = self.relu_seq(out_seq)

        # signal feature ==========================================
        if self.module != "seq_bilstm":
            out_signal = signals.float()
            # resnet ---
            # out_signal = out_signal.transpose(1, 2)  # (N, C, L)
            # out_signal = self.convs(out_signal)  # (N, nhid_signal, L)
            # out_signal = out_signal.transpose(1, 2)  # (N, L, nhid_signal)
            # lstm ---
            out_signal, _ = self.lstm_signal(out_signal, self.init_hidden(out_signal.size(0),
                                                                          self.num_layers2,
                                                                          self.nhid_signal))
            out_signal = self.fc_signal(out_signal)  # (N, L, nhid_signal)
            # out_signal = self.dropout_signal(out_signal)
            out_signal = self.relu_signal(out_signal)

        # combined ================================================
        if self.module == "seq_bilstm":
            out = out_seq
        elif self.module == "signal_bilstm":
            out = out_signal
        elif self.module == "both_bilstm":
            out = torch.cat((out_seq, out_signal), 2)  # (N, L, hidden_size)
        out, _ = self.lstm_comb(out, self.init_hidden(out.size(0),
                                                      self.num_layers1,
                                                      self.hidden_size))  # (N, L, hidden_size*2)
        out_fwd_last = out[:, -1, :self.hidden_size]
        out_bwd_last = out[:, 0, self.hidden_size:]
        out = torch.cat((out_fwd_last, out_bwd_last), 1)
        self.feas.append(out)

        # decode
        out = self.dropout1(out)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)
        #self.feas.append(out)

        return out, self.softmax(out)

class ModelBiLSTM_new(nn.Module):
    def __init__(self, seq_len=21, signal_len=16, num_layers1=3, num_layers2=1, num_classes=2,
                 dropout_rate=0.5, hidden_size=256,
                 vocab_size=16, embedding_size=4, is_base=True, is_signallen=True,
                 is_trace=False,
                 module="both_bilstm", device=0):
        super(ModelBiLSTM_new, self).__init__()
        self.model_type = 'BiLSTM'
        self.module = module
        self.device = device

        self.seq_len = seq_len
        self.signal_len = signal_len
        self.num_layers1 = num_layers1  # for combined (seq+signal) feature
        self.num_layers2 = num_layers2  # for seq and signal feature separately
        self.num_classes = num_classes

        self.hidden_size = hidden_size

        if self.module == "both_bilstm":
            self.nhid_seq = self.hidden_size // 2
            self.nhid_signal = self.hidden_size - self.nhid_seq
        elif self.module == "seq_bilstm":
            self.nhid_seq = self.hidden_size
        elif self.module == "signal_bilstm":
            self.nhid_signal = self.hidden_size
        else:
            raise ValueError("--model_type is not right!")

        # seq feature
        if self.module != "signal_bilstm":
            self.embed = nn.Embedding(vocab_size, embedding_size)  # for dna/rna base
            self.is_base = is_base
            self.is_signallen = is_signallen
            self.is_trace = is_trace
            self.sigfea_num = 3 if self.is_signallen else 2
            if self.is_trace:
                self.sigfea_num += 1
            if self.is_base:
                self.lstm_seq = nn.LSTM(embedding_size + self.sigfea_num, self.nhid_seq, self.num_layers2,
                                        dropout=dropout_rate, batch_first=True, bidirectional=True)
            else:
                self.lstm_seq = nn.LSTM(self.sigfea_num, self.nhid_seq, self.num_layers2,
                                        dropout=dropout_rate, batch_first=True, bidirectional=True)
            self.fc_seq = nn.Linear(self.nhid_seq * 2, self.nhid_seq)
            # self.dropout_seq = nn.Dropout(p=dropout_rate)
            self.relu_seq = nn.ReLU()

        # signal feature
        if self.module != "seq_bilstm":
            # self.convs = ResNet3(self.nhid_signal, (1, 1, 1), self.signal_len, self.signal_len)  # (N, C, L)
            self.lstm_signal = nn.LSTM(self.signal_len, self.nhid_signal, self.num_layers2,
                                       dropout=dropout_rate, batch_first=True, bidirectional=True)
            self.fc_signal = nn.Linear(self.nhid_signal * 2, self.nhid_signal)
            # self.dropout_signal = nn.Dropout(p=dropout_rate)
            self.relu_signal = nn.ReLU()

        # combined
        self.lstm_comb = nn.LSTM(self.hidden_size, self.hidden_size, self.num_layers1,
                                 dropout=dropout_rate, batch_first=True, bidirectional=True)
        self.dropout1 = nn.Dropout(p=dropout_rate)
        self.fc1 = nn.Linear(hidden_size * 2, hidden_size)  # 2 for bidirection
        self.dropout2 = nn.Dropout(p=dropout_rate)
        self.fc2 = nn.Linear(hidden_size, num_classes)

        self.relu = nn.ReLU()
        self.softmax = nn.Softmax(1)
        self.feas = []

    def get_model_type(self):
        return self.model_type

    def init_hidden(self, batch_size, num_layers, hidden_size):
        # Set initial states
        h0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        c0 = autograd.Variable(torch.randn(num_layers * 2, batch_size, hidden_size))
        if use_cuda:
            h0 = h0.cuda(self.device)
            c0 = c0.cuda(self.device)
        return h0, c0

    def forward(self, kmer, base_means, base_stds, base_signal_lens, signals):
        # seq feature ============================================
        if self.module != "signal_bilstm":
            base_means = torch.reshape(base_means, (-1, self.seq_len, 1)).float()
            base_stds = torch.reshape(base_stds, (-1, self.seq_len, 1)).float()
            base_signal_lens = torch.reshape(base_signal_lens, (-1, self.seq_len, 1)).float()
            #base_probs = torch.reshape(base_probs, (-1, self.seq_len, 1)).float()
            if self.is_base:
                kmer_embed = self.embed(kmer.long())
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat((kmer_embed, base_means, base_stds, base_signal_lens), 2)  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat((kmer_embed, base_means, base_stds, base_signal_lens), 2)  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat((kmer_embed, base_means, base_stds), 2)  # (N, L, C)
                else:
                    out_seq = torch.cat((kmer_embed, base_means, base_stds), 2)  # (N, L, C)
            else:
                if self.is_signallen and self.is_trace:
                    out_seq = torch.cat((base_means, base_stds, base_signal_lens), 2)  # (N, L, C)
                elif self.is_signallen:
                    out_seq = torch.cat((base_means, base_stds, base_signal_lens), 2)  # (N, L, C)
                elif self.is_trace:
                    out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)
                else:
                    out_seq = torch.cat((base_means, base_stds), 2)  # (N, L, C)

            out_seq, _ = self.lstm_seq(out_seq, self.init_hidden(out_seq.size(0),
                                                                 self.num_layers2,
                                                                 self.nhid_seq))  # (N, L, nhid_seq*2)
            out_seq = self.fc_seq(out_seq)  # (N, L, nhid_seq)
            # out_seq = self.dropout_seq(out_seq)
            out_seq = self.relu_seq(out_seq)

        # signal feature ==========================================
        if self.module != "seq_bilstm":
            out_signal = signals.float()
            # resnet ---
            # out_signal = out_signal.transpose(1, 2)  # (N, C, L)
            # out_signal = self.convs(out_signal)  # (N, nhid_signal, L)
            # out_signal = out_signal.transpose(1, 2)  # (N, L, nhid_signal)
            # lstm ---
            out_signal, _ = self.lstm_signal(out_signal, self.init_hidden(out_signal.size(0),
                                                                          self.num_layers2,
                                                                          self.nhid_signal))
            out_signal = self.fc_signal(out_signal)  # (N, L, nhid_signal)
            # out_signal = self.dropout_signal(out_signal)
            out_signal = self.relu_signal(out_signal)

        # combined ================================================
        if self.module == "seq_bilstm":
            out = out_seq
        elif self.module == "signal_bilstm":
            out = out_signal
        elif self.module == "both_bilstm":
            out = torch.cat((out_seq, out_signal), 2)  # (N, L, hidden_size)
        out, _ = self.lstm_comb(out, self.init_hidden(out.size(0),
                                                      self.num_layers1,
                                                      self.hidden_size))  # (N, L, hidden_size*2)
        out_fwd_last = out[:, -1, :self.hidden_size]
        out_bwd_last = out[:, 0, self.hidden_size:]
        out = torch.cat((out_fwd_last, out_bwd_last), 1)
        self.feas.append(out)

        # decode
        out = self.dropout1(out)
        out = self.fc1(out)
        out = self.relu(out)
        out = self.dropout2(out)
        out = self.fc2(out)

        return out, self.softmax(out)

class HookTool:
    def __init__(self):
        self.fea = None

    def hook_fun(self, module, fea_in, fea_out):
        self.fea = fea_out

def get_feas_by_hook(model):
    #fea_hooks = []
    for n, m in model.named_modules():
        if n== 'lstm_comb':
            cur_hook = HookTool()
            m.register_forward_hook(cur_hook.hook_fun)
            fea_hooks=cur_hook
            print('find!!!!!!!!!!!')
    return fea_hooks


def read_bed(input_file):
    sites=set()
    with open(input_file, 'r') as rf:
        for line in rf:
            words = line.strip().split("\t")
            sites.add((words[0],words[1],words[5]))
    return sites

def read_feature_tag(args,device=0,max_num=10000):
    infile = open(args.input_path, 'r')
    #line = next(infile)
    #words = line.strip().split("\t")
    #readid_pre = words[4]
    sampleinfo = []  # contains: chromosome, pos, strand, pos_in_strand, read_name, read_strand
    kmers = []
    base_means = []
    base_stds = []
    base_signal_lens = []
    base_probs = []
    k_signals = []
    labels=[]
    tags=[]

    #sampleinfo.append("\t".join(words[0:6]))
    #kmers.append([base2code_dna[x] for x in words[6]])
    #base_means.append([float(x) for x in words[7].split(",")])
    #base_stds.append([float(x) for x in words[8].split(",")])
    #base_signal_lens.append([int(x) for x in words[9].split(",")])
    #base_probs.append(np.zeros(13))#[float(x) for x in words[10].split(",")])
    #k_signals.append([[float(y) for y in x.split(",")] for x in words[11].split(";")])
    #labels.append(int(words[12]))
    data_num=0

    color=[]
    singletons = read_bed(args.singleton)

    for line in infile:
        data_num+=1
        if data_num>=max_num:
            break
        words = line.strip().split()
        #readidtmp = words[4]

        sampleinfo.append("\t".join(words[0:6]))
        key=(words[0],words[1],words[2])
        if key in singletons:
            color.append('r')
        else:
            color.append('b')
        #try:
        #    kmers.append([base2code_dna[x] for x in words[6]])
        #except:
        #    print(words[6])
        kmers.append([base2code_dna[x] for x in words[6]])
        base_means.append([float(x) for x in words[7].split(",")])
        base_stds.append([float(x) for x in words[8].split(",")])
        base_signal_lens.append([int(x) for x in words[9].split(",")])
        base_probs.append(np.zeros(13))#[float(x) for x in words[10].split(",")])
        k_signals.append([[float(y) for y in x.split(",")] for x in words[11].split(";")])
        label=int(words[12])
        tag=int(words[13])
        labels.append(int(words[12]))
        tags.append(int(words[13]))
        #if tag==1 and label==1:
        #    color.append('g')
        #elif tag==1 and label==0:
        #    color.append('r')
        #elif tag==0 and label==1:
        #    color.append('b')
        #else:
        #    color.append('c')
    infile.close()
    model = ModelBiLSTM(args.seq_len, args.signal_len, args.layernum1, args.layernum2, args.class_num,
                        args.dropout_rate, args.hid_rnn,
                        args.n_vocab, args.n_embed, str2bool(args.is_base), str2bool(args.is_signallen),
                        str2bool(args.is_trace),
                        args.model_type, device=device)
    try:
        para_dict = torch.load(args.model_path, map_location=torch.device('cpu'))
    except Exception:
        para_dict = torch.jit.load(args.model_path)
    # para_dict = torch.load(model_path, map_location=torch.device(device))
    model_dict = model.state_dict()
    model_dict.update(para_dict)
    model.load_state_dict(model_dict)
    del model_dict

    if use_cuda:
        model = model.cuda(device)
    model.eval()
    voutputs, vlogits = model(FloatTensor(kmers, device), FloatTensor(base_means, device),
                                      FloatTensor(base_stds, device), FloatTensor(base_signal_lens, device),
                                      FloatTensor(np.array(base_probs), device), FloatTensor(k_signals, device))
    #fea_hooks = get_feas_by_hook(model)
    #print('The feature is:', fea_hooks.fea)
    #out_fwd_last = fea_hooks.fea[:, -1, :args.hid_rnn]
    #out_bwd_last = fea_hooks.fea[:, 0, args.hid_rnn:]
    #embeddings = torch.cat((out_fwd_last, out_bwd_last), 1)
    #logits = vlogits.data.numpy()
    _, vpredicted = torch.max(vlogits.data, 1)
    if use_cuda:
        vpredicted = vpredicted.cpu()
    predicted = vpredicted.numpy()
    #for i in range(len(labels)):
    #    tag=tags[i]
    #    label=labels[i]
    #    pred=predicted[i]
    #    if tag==1 and label==pred:
    #        color.append('g')
    #    elif tag==1 and label!=pred:
    #        color.append('r')
    #    elif tag==0 and label==pred:
    #        color.append('b')
    #    else:
    #        color.append('c')
    features=model.feas
    
    for fea in features:
        embeddings=(fea.cpu().detach().numpy())
    #pca = PCA(n_components=2)  # 选择要降至的维度
    #reduced_embeddings = pca.fit_transform(embeddings)
    tsne = TSNE(n_components=2, perplexity=30, n_iter=300)
    reduced_embeddings = tsne.fit_transform(embeddings)
    plt.scatter(reduced_embeddings[:, 0], reduced_embeddings[:, 1],c=color)
    plt.title("2D Embedding Visualization")
    plt.savefig(args.outputdir+args.pname+".png")
    #plt.show()

def read_feature_chr(args,device=0,max_num=10000):
    infile = open(args.input_path, 'r')
    #line = next(infile)
    #words = line.strip().split("\t")
    #readid_pre = words[4]
    sampleinfo = []  # contains: chromosome, pos, strand, pos_in_strand, read_name, read_strand
    kmers = []
    base_means = []
    base_stds = []
    base_signal_lens = []
    base_probs = []
    k_signals = []
    labels=[]
    tags=[]

    #sampleinfo.append("\t".join(words[0:6]))
    #kmers.append([base2code_dna[x] for x in words[6]])
    #base_means.append([float(x) for x in words[7].split(",")])
    #base_stds.append([float(x) for x in words[8].split(",")])
    #base_signal_lens.append([int(x) for x in words[9].split(",")])
    #base_probs.append(np.zeros(13))#[float(x) for x in words[10].split(",")])
    #k_signals.append([[float(y) for y in x.split(",")] for x in words[11].split(";")])
    #labels.append(int(words[12]))
    data_num=0

    color=[]
    #singletons = read_bed(args.singleton)

    for line in infile:
        data_num+=1
        if data_num>=max_num:
            break
        words = line.strip().split()
        #readidtmp = words[4]

        sampleinfo.append("\t".join(words[0:6]))
        key=(words[0],words[1],words[2])
        colorBoard = [
            "dimgray", "darkorange", "tan", "silver", "forestgreen",
            "darkgreen", "royalblue", "navy", "red", "darksalmon",
            "peru", "olive", "yellow", "cyan", "mediumaquamarine",
            "skyblue", "purple", "fuchsia", "indigo", "khaki",
            "maroon", "teal", "blueviolet", "coral", "pink"
            ]
        chromosomes = [
            "chr1", "chr2", "chr3", "chr4", "chr5", "chr6", "chr7", "chr8", "chr9", "chr10",
            "chr11", "chr12", "chr13", "chr14", "chr15", "chr16", "chr17", "chr18", "chr19", "chr20",
            "chr21", "chr22", "chrX", "chrY", "chrM"
            ]
        colorDict = dict(zip(chromosomes, colorBoard))


        color.append(colorDict[words[0]])

        kmers.append([base2code_dna[x] for x in words[6]])
        base_means.append([float(x) for x in words[7].split(",")])
        base_stds.append([float(x) for x in words[8].split(",")])
        base_signal_lens.append([int(x) for x in words[9].split(",")])
        base_probs.append(np.zeros(len([int(x) for x in words[9].split(",")])))#[float(x) for x in words[10].split(",")])
        k_signals.append([[float(y) for y in x.split(",")] for x in words[11].split(";")])
        label=int(words[12])
        tag=int(words[13])
        labels.append(int(words[12]))
        tags.append(int(words[13]))
        #if tag==1 and label==1:
        #    color.append('g')
        #elif tag==1 and label==0:
        #    color.append('r')
        #elif tag==0 and label==1:
        #    color.append('b')
        #else:
        #    color.append('c')
    infile.close()
    model = ModelBiLSTM(args.seq_len, args.signal_len, args.layernum1, args.layernum2, args.class_num,
                        args.dropout_rate, args.hid_rnn,
                        args.n_vocab, args.n_embed, str2bool(args.is_base), str2bool(args.is_signallen),
                        str2bool(args.is_trace),
                        args.model_type, device=device)
    try:
        para_dict = torch.load(args.model_path, map_location=torch.device('cpu'))
    except Exception:
        para_dict = torch.jit.load(args.model_path)
    # para_dict = torch.load(model_path, map_location=torch.device(device))
    model_dict = model.state_dict()
    model_dict.update(para_dict)
    model.load_state_dict(model_dict)
    del model_dict

    if use_cuda:
        model = model.cuda(device)
    model.eval()
    voutputs, vlogits = model(FloatTensor(kmers, device), FloatTensor(base_means, device),
                                      FloatTensor(base_stds, device), FloatTensor(base_signal_lens, device),
                                      FloatTensor(np.array(base_probs), device), FloatTensor(k_signals, device))
    #fea_hooks = get_feas_by_hook(model)
    #print('The feature is:', fea_hooks.fea)
    #out_fwd_last = fea_hooks.fea[:, -1, :args.hid_rnn]
    #out_bwd_last = fea_hooks.fea[:, 0, args.hid_rnn:]
    #embeddings = torch.cat((out_fwd_last, out_bwd_last), 1)
    #logits = vlogits.data.numpy()
    _, vpredicted = torch.max(vlogits.data, 1)
    if use_cuda:
        vpredicted = vpredicted.cpu()
    predicted = vpredicted.numpy()
    #for i in range(len(labels)):
    #    tag=tags[i]
    #    label=labels[i]
    #    pred=predicted[i]
    #    if tag==1 and label==pred:
    #        color.append('g')
    #    elif tag==1 and label!=pred:
    #        color.append('r')
    #    elif tag==0 and label==pred:
    #        color.append('b')
    #    else:
    #        color.append('c')
    features=model.feas
    
    for fea in features:
        embeddings=(fea.cpu().detach().numpy())
    #pca = PCA(n_components=2)  # 选择要降至的维度
    #reduced_embeddings = pca.fit_transform(embeddings)
    tsne = TSNE(n_components=2, perplexity=30, n_iter=300)
    reduced_embeddings = tsne.fit_transform(embeddings)
    plt.scatter(reduced_embeddings[:, 0], reduced_embeddings[:, 1],c=color)
    plt.title("2D Embedding Visualization")
    plt.savefig(args.outputdir+args.pname+".png")
    #plt.show()

def read_feature_gc(args,device=0,max_num=10000):
    infile = open(args.input_path, 'r')
    #line = next(infile)
    #words = line.strip().split("\t")
    #readid_pre = words[4]
    sampleinfo = []  # contains: chromosome, pos, strand, pos_in_strand, read_name, read_strand
    kmers = []
    base_means = []
    base_stds = []
    base_signal_lens = []
    base_probs = []
    k_signals = []
    labels=[]
    tags=[]
    gc_contents=[]

    #sampleinfo.append("\t".join(words[0:6]))
    #kmers.append([base2code_dna[x] for x in words[6]])
    #base_means.append([float(x) for x in words[7].split(",")])
    #base_stds.append([float(x) for x in words[8].split(",")])
    #base_signal_lens.append([int(x) for x in words[9].split(",")])
    #base_probs.append(np.zeros(13))#[float(x) for x in words[10].split(",")])
    #k_signals.append([[float(y) for y in x.split(",")] for x in words[11].split(";")])
    #labels.append(int(words[12]))
    data_num=0

    color=[]
    #singletons = read_bed(args.singleton)

    for line in infile:
        data_num+=1
        if data_num>=max_num:
            break
        words = line.strip().split()
        #readidtmp = words[4]

        sampleinfo.append("\t".join(words[0:6]))
        key=(words[0],words[1],words[2])

        kmers.append([base2code_dna[x] for x in words[6]])
        base_means.append([float(x) for x in words[7].split(",")])
        base_stds.append([float(x) for x in words[8].split(",")])
        base_signal_lens.append([int(x) for x in words[9].split(",")])
        #base_probs.append(np.zeros(len([int(x) for x in words[9].split(",")])))#[float(x) for x in words[10].split(",")])
        k_signals.append([[float(y) for y in x.split(",")] for x in words[10].split(";")])
        label=int(words[11])
        tag=int(words[12])
        gc_content=float(words[13])
        labels.append(label)
        tags.append(tag)
        gc_contents.append(gc_content)
        if gc_content<=0.1:
            color.append('darkgreen')
        elif gc_content<=0.2 and gc_content>0.1:
            color.append('forestgreen')
        elif gc_content<=0.3 and gc_content>0.2:
            color.append('seagreen')
        elif gc_content<=0.4 and gc_content>0.3:
            color.append('springgreen')
        elif gc_content<=0.5 and gc_content>0.4:
            color.append('lime')
        elif gc_content<=0.6 and gc_content>0.5:
            color.append('rosybrown')
        elif gc_content<=0.7 and gc_content>0.6:
            color.append('lightcoral')
        elif gc_content<=0.8 and gc_content>0.7:
            color.append('tomato')
        elif gc_content<=0.9 and gc_content>0.8:
            color.append('orangered')
        elif gc_content>0.9:
            color.append('red')
        #elif gc_content<=0.6 and gc_content>0.5:
        #    color.append('aqua')
        #elif gc_content<=0.7 and gc_content>0.6:
        #    color.append('darkturquoise')
        #elif gc_content<=0.8 and gc_content>0.7:
        #    color.append('lightskyblue')
        #elif gc_content<=0.9 and gc_content>0.8:
        #    color.append('deepskyblue')
        #elif gc_content>0.9:
        #    color.append('dodgerblue')
    infile.close()
    model = ModelBiLSTM_new(args.seq_len, args.signal_len, args.layernum1, args.layernum2, args.class_num,
                        args.dropout_rate, args.hid_rnn,
                        args.n_vocab, args.n_embed, str2bool(args.is_base), str2bool(args.is_signallen),
                        str2bool(args.is_trace),
                        args.model_type, device=device)
    try:
        para_dict = torch.load(args.model_path, map_location=torch.device('cpu'))
    except Exception:
        para_dict = torch.jit.load(args.model_path)
    # para_dict = torch.load(model_path, map_location=torch.device(device))
    model_dict = model.state_dict()
    model_dict.update(para_dict)
    model.load_state_dict(model_dict)
    del model_dict

    if use_cuda:
        model = model.cuda(device)
    model.eval()
    voutputs, vlogits = model(FloatTensor(kmers, device), FloatTensor(base_means, device),
                                      FloatTensor(base_stds, device), FloatTensor(base_signal_lens, device),
                                      FloatTensor(k_signals, device))
    #fea_hooks = get_feas_by_hook(model)
    #print('The feature is:', fea_hooks.fea)
    #out_fwd_last = fea_hooks.fea[:, -1, :args.hid_rnn]
    #out_bwd_last = fea_hooks.fea[:, 0, args.hid_rnn:]
    #embeddings = torch.cat((out_fwd_last, out_bwd_last), 1)
    #logits = vlogits.data.numpy()
    _, vpredicted = torch.max(vlogits.data, 1)
    if use_cuda:
        vpredicted = vpredicted.cpu()
    predicted = vpredicted.numpy()
    #for i in range(len(labels)):
    #    tag=tags[i]
    #    label=labels[i]
    #    pred=predicted[i]
    #    if tag==1 and label==pred:
    #        color.append('g')
    #    elif tag==1 and label!=pred:
    #        color.append('r')
    #    elif tag==0 and label==pred:
    #        color.append('b')
    #    else:
    #        color.append('c')
    features=model.feas
    
    for fea in features:
        embeddings=(fea.cpu().detach().numpy())
    #pca = PCA(n_components=2)  # 选择要降至的维度
    #reduced_embeddings = pca.fit_transform(embeddings)
    tsne = TSNE(n_components=2, perplexity=30, n_iter=300)
    reduced_embeddings = tsne.fit_transform(embeddings)
    plt.scatter(reduced_embeddings[:, 0], reduced_embeddings[:, 1],c=color)
    plt.title("2D Embedding Visualization")
    plt.savefig(args.outputdir+args.pname+".png")
    #plt.show()

def read_feature_motif(args,device=0,max_num=10000):
    infile = open(args.input_path, 'r')
    #line = next(infile)
    #words = line.strip().split("\t")
    #readid_pre = words[4]
    sampleinfo = []  # contains: chromosome, pos, strand, pos_in_strand, read_name, read_strand
    kmers = []
    base_means = []
    base_stds = []
    base_signal_lens = []
    base_probs = []
    k_signals = []
    labels=[]
    tags=[]

    #sampleinfo.append("\t".join(words[0:6]))
    #kmers.append([base2code_dna[x] for x in words[6]])
    #base_means.append([float(x) for x in words[7].split(",")])
    #base_stds.append([float(x) for x in words[8].split(",")])
    #base_signal_lens.append([int(x) for x in words[9].split(",")])
    #base_probs.append(np.zeros(13))#[float(x) for x in words[10].split(",")])
    #k_signals.append([[float(y) for y in x.split(",")] for x in words[11].split(";")])
    #labels.append(int(words[12]))
    data_num=0

    color=[]
    #singletons = read_bed(args.singleton)
    
    for line in infile:
        data_num+=1
        if data_num>=max_num:
            break
        words = line.strip().split()
        #readidtmp = words[4]

        sampleinfo.append("\t".join(words[0:6]))
        key=(words[0],words[1],words[2])
        label=int(words[11])
        if args.label is not None:
            if label!=args.label:
                continue

        if words[6][11]=='G':
            motif=0
        elif words[6][12]=='G':
            motif=1
        else:
            motif=2
        
        if motif==1:
            color.append('r')
            
        elif motif==2:
            color.append('g')
            
        else:
            color.append('b')
            
        #try:
        #    kmers.append([base2code_dna[x] for x in words[6]])
        #except:
        #    print(words[6])
        kmers.append([base2code_dna[x] for x in words[6]])
        base_means.append([float(x) for x in words[7].split(",")])
        base_stds.append([float(x) for x in words[8].split(",")])
        base_signal_lens.append([int(x) for x in words[9].split(",")])
        #base_probs.append(np.zeros(13))#[float(x) for x in words[10].split(",")])
        k_signals.append([[float(y) for y in x.split(",")] for x in words[10].split(";")])
        #label=int(words[12])
        #tag=int(words[13])
        #labels.append(int(words[12]))
        #tags.append(int(words[13]))
        #if tag==1 and label==1:
        #    color.append('g')
        #elif tag==1 and label==0:
        #    color.append('r')
        #elif tag==0 and label==1:
        #    color.append('b')
        #else:
        #    color.append('c')
    infile.close()
    model = ModelBiLSTM_new(args.seq_len, args.signal_len, args.layernum1, args.layernum2, args.class_num,
                        args.dropout_rate, args.hid_rnn,
                        args.n_vocab, args.n_embed, str2bool(args.is_base), str2bool(args.is_signallen),
                        str2bool(args.is_trace),
                        args.model_type, device=device)
    try:
        para_dict = torch.load(args.model_path, map_location=torch.device('cpu'))
    except Exception:
        para_dict = torch.jit.load(args.model_path)
    # para_dict = torch.load(model_path, map_location=torch.device(device))
    model_dict = model.state_dict()
    model_dict.update(para_dict)
    model.load_state_dict(model_dict)
    del model_dict

    if use_cuda:
        model = model.cuda(device)
    model.eval()
    voutputs, vlogits = model(FloatTensor(kmers, device), FloatTensor(base_means, device),
                                    FloatTensor(base_stds, device), FloatTensor(base_signal_lens, device),
                                    FloatTensor(k_signals, device))
    #fea_hooks = get_feas_by_hook(model)
    #print('The feature is:', fea_hooks.fea)
    #out_fwd_last = fea_hooks.fea[:, -1, :args.hid_rnn]
    #out_bwd_last = fea_hooks.fea[:, 0, args.hid_rnn:]
    #embeddings = torch.cat((out_fwd_last, out_bwd_last), 1)
    #logits = vlogits.data.numpy()
    _, vpredicted = torch.max(vlogits.data, 1)
    if use_cuda:
        vpredicted = vpredicted.cpu()
    predicted = vpredicted.numpy()
    #for i in range(len(labels)):
    #    tag=tags[i]
    #    label=labels[i]
    #    pred=predicted[i]
    #    if tag==1 and label==pred:
    #        color.append('g')
    #    elif tag==1 and label!=pred:
    #        color.append('r')
    #    elif tag==0 and label==pred:
    #        color.append('b')
    #    else:
    #        color.append('c')
    features=model.feas
    
    for fea in features:
        embeddings=(fea.cpu().detach().numpy())
    #pca = PCA(n_components=2)  # 选择要降至的维度
    #reduced_embeddings = pca.fit_transform(embeddings)
    tsne = TSNE(n_components=2, perplexity=30, n_iter=300)
    reduced_embeddings = tsne.fit_transform(embeddings)
    plt.scatter(reduced_embeddings[:, 0], reduced_embeddings[:, 1],c=color)#,marker="*"
    plt.title("2D Embedding Visualization")
    plt.savefig(args.outputdir+args.pname+".png")
    #plt.show()


def read_feature_motif2(args,device=0,max_num=10000):
    infile = open(args.input_path, 'r')

    data_num=0

    color=[]
    #singletons = read_bed(args.singleton)
    embeddings=[]
    for line in infile:
        data_num+=1
        if data_num>=max_num:
            break
        words = line.strip().split()
        #readidtmp = words[4]

        key=(words[0],words[1],words[2])
        label=int(words[11])
        if args.label is not None:
            if label!=args.label:
                continue
        if len(words[6])==21:
            if words[6][11]=='G':
                motif=0
            elif words[6][12]=='G':
                motif=1
            else:
                motif=2
        elif len(words[6])==13:
            if words[6][7]=='G':
                motif=0
            elif words[6][8]=='G':
                motif=1
            else:
                motif=2
        
        if motif==1:
            color.append('r')
            print('have chg')
        elif motif==2:
            color.append('g')
            
            
        else:
            color.append('b')
            
        kmers=[base2code_dna[x] for x in words[6]]
        base_means=[float(x) for x in words[7].split(",")]
        base_stds=[float(x) for x in words[8].split(",")]
        base_signal_lens=[int(x) for x in words[9].split(",")]
        #base_probs.append(np.zeros(13))#[float(x) for x in words[10].split(",")])      
        if motif==0:
            kmers=kmers[4:17]
            base_stds=base_stds[4:17]
            base_means=base_means[4:17]
            base_signal_lens=base_signal_lens[4:17]
        signal_group=[]
        signals_len=5
        i=0
        for signal in words[10].split(";"):
            if motif==0 and (i>=17 or i<4):
                #print(len(words[10].split(";")))
                i+=1
                continue
            i+=1
            signal=[float(y) for y in signal.split(",")] 
            sig=[
                signal[x]
                for x in sorted(random.sample(range(len(signal)), signals_len))
            ]
            #print(len(sig))
            #print(i)
            for s in sig:
               signal_group.append(float(s))
        #if motif==0:
        #    print(kmers)
        #    print(len(signal_group))
        #    break
        features=[kmers,base_means,base_stds,base_signal_lens,signal_group]
        if len(kmers)!=13:
            print(motif)
            print(len(kmers))
        if len(base_means)!=13:
            print(motif)
            print(len(base_means))
        if len(base_stds)!=13:
            print(motif)
            print(len(base_stds))
        if len(signal_group)!=65:
            print(motif)
            print(len(signal_group))
        feature=[]
        for fea in features:
            for f in fea:
                feature.append(f)
        embeddings.append(feature)

    infile.close()
    
    embeddings=np.array(embeddings)
    
    #pca = PCA(n_components=2)  # 选择要降至的维度
    #reduced_embeddings = pca.fit_transform(embeddings)
    tsne = TSNE(n_components=3, perplexity=30, n_iter=300)
    #umap_reducer=umap.UMAP(n_neighbors=10, min_dist=0.001)
    #reduced_embeddings = umap_reducer.fit_transform(embeddings)
    
    fig = plt.figure(figsize=(20, 20))
    fig.add_subplot(projection='3d')
    reduced_embeddings = tsne.fit_transform(embeddings)
    plt.scatter(reduced_embeddings[:, 0], reduced_embeddings[:, 1],reduced_embeddings[:, 2],c=color)#,marker="*"
    plt.title("3D Embedding Visualization")
    plt.savefig(args.outputdir+args.pname+".png")

if __name__ == '__main__':
    args=parse_args()
    #read_feature_tag(args)
    #read_feature_chr(args)
    #read_feature_gc(args)
    read_feature_motif2(args)