from networkx import union
import pysam
import argparse
import sys
import os
from multiprocessing import Manager,Value
import multiprocessing as mp
import gzip
import mmap
import pod5 as p5
import time
import datetime
import numpy as np
from ont_fast5_api.fast5_interface import get_fast5_file
from pathlib import Path
from deepsignal3.utils.process_utils import CIGAR2CODE
from deepsignal3.utils.process_utils import CIGAR_REGEX
from deepsignal3.utils import bam_reader
from deepsignal3.utils.process_utils import get_refloc_of_methysite_in_motif
from deepsignal3.extract_features_pod5 import _group_signals_by_movetable_v2
from deepsignal3.utils.process_utils import complement_seq
from deepsignal3.utils.process_utils import get_motif_seqs
from deepsignal3.utils.process_utils import fill_files_queue
import re
import traceback
from tqdm import tqdm
import resource

# Set memory limit to unlimited
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
print("Current memory limit:", soft)
resource.setrlimit(resource.RLIMIT_AS, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
print(resource.getrlimit(resource.RLIMIT_AS))

class ExtractSignalWithKmerByUncalled4(object):
    def __init__(self, ur, ul, k):
        self.ur = self.process_ur(ur)
        self.ul = ul
        self.k = k

    def process_ur(self, ur):
        assert len(ur) % 2 == 0, 'The length of ur(%d) must be an even number.' % (len(ur))
        return [(ur[i], ur[i + 1]) for i in range(0, len(ur) - 1, 2)]

    def get_clip_between_ur_ref(self, ref_range, ur_pair):
        start = ref_range[0] - ur_pair[0]
        if start > 0:
            print("ur({}) should not before ref start({})".format(ur_pair[0], ref_range[0]))
            start = -1
        else:
            start = abs(start)
        end = ref_range[1] - ur_pair[1]
        if end < 0:
            print("ur({}) end should not after ref start({})".format(ur_pair[1], ref_range[1]))
        return start, end

    def get_rseq_by_ur(self, rseq, ref_range):
        new_rseq_split = []
        rseq_splice = ""
        for ur_pair in self.ur:
            st, en = self.get_clip_between_ur_ref(ref_range, ur_pair)
            assert st >= 0 and en >= 0, "The starting positions of ur and ref do not correspond, please check the file!"
            rseq_ = rseq[st: len(rseq) - en]
            assert ur_pair[1] - ur_pair[0] == len(rseq_), 'Not expected rseq(%d) and ur(%d) to have different lengths.' % (
                len(rseq_), ur_pair[1] - ur_pair[0])
            rseq_splice = rseq_splice + rseq_
            new_rseq_split.append(rseq_)
        self.new_rseq_split = new_rseq_split
        return rseq_splice

    def generate_kmers_by_rseq(self, rseq, ref_range):
        self.new_rseq = self.get_rseq_by_ur(rseq, ref_range)
        if self.k <= 0:
            raise ValueError("k must be integer greater than 0.")
        if self.k > len(self.new_rseq):
            raise ValueError("k should be smaller than length of sequence.")
        kmers = [self.new_rseq[i: i + self.k] for i in range(len(self.new_rseq) - self.k + 1)]
        return kmers

    def get_signal_with_kmers(self, rseq, ref_range, signal):
        kmers = self.generate_kmers_by_rseq(rseq, ref_range)
        neg_count = len([x for x in self.ul if x < 0])
        assert len(self.ul) - neg_count == len(kmers), 'Not expected kmer len(%d) and ul(%d) to have different lengths.' % (
            len(kmers), len(self.ul) - neg_count)
        first_2_nuc = kmers[0][:2]
        last_3_nuc = kmers[-1][3:]
        last_kmer = kmers[-1]
        first_kmer = kmers[0]
        signal_start = 0
        ref_start = self.ur[0][0]
        ref_end_by_ur = 0
        read_start = 0
        kmer_signal = []
        signal_start_pos = []
        refs_pos = []
        read_pos = []
        kmer_table = []
        first_kmer_signal = []
        first_signal_start_pos = []
        for n in first_2_nuc:
            dic = {'kmer': first_kmer, 'nuc': n, 'signal': [], 'signal_pos': 0, 'ref_pos': ref_start,
                   'read_pos': read_start}
            kmer_table.append(dic)
            first_kmer_signal.append([])
            first_signal_start_pos.append(0)
            read_start += 1
            ref_start += 1
        for length in self.ul:
            if length < 0:
                signal_start += abs(length)
                continue
            segment = signal[signal_start:signal_start + length]
            signal_start_pos.append(signal_start)
            kmer_signal.append(list(segment))
            refs_pos.append(ref_start)
            read_pos.append(read_start)
            if ref_start + 1 == self.ur[ref_end_by_ur][1]:
                ref_end_by_ur += 1
                ref_start = self.ur[ref_end_by_ur][0]
                read_start += 1
            else:
                ref_start += 1
                read_start += 1
            signal_start += length
        for i in range(len(kmers)):
            dic = {'kmer': kmers[i], 'nuc': kmers[i][2], 'signal': list(kmer_signal[i]), 'signal_pos': signal_start_pos[i],
                   'ref_pos': refs_pos[i], 'read_pos': read_pos[i]}
            kmer_table.append(dic)
        for n in last_3_nuc:
            dic = {'kmer': last_kmer, 'nuc': n, 'signal': [], 'signal_pos': signal_start_pos[-1], 'ref_pos': ref_start,
                   'read_pos': read_start}
            kmer_table.append(dic)
            kmer_signal.append([])
            signal_start_pos.append(signal_start_pos[-1])
            read_start += 1
            ref_start += 1
        kmer_signal = first_kmer_signal + kmer_signal
        signal_start_pos = first_signal_start_pos + signal_start_pos
        assert len(signal_start_pos) == len(kmers) + 5 and len(signal_start_pos) == len(kmer_signal), \
            'Not expected kmer len + 5(%d), signal split by kmer len(%d) and signal start pos(%d) to have different lengths.' % (
                len(kmers) + 5, len(kmer_signal), len(signal_start_pos))
        assert len(self.new_rseq) == len(kmer_signal), \
            'Not expected new ref seq len(%d) and signal split by kmers(%d) to have different lengths.' % (
                len(self.new_rseq), len(kmer_signal))
        return kmer_table, kmer_signal

def extract_signal_with_kmer(read_id, chr, rseq, ref_range, signal, uncalled4):
    kmer_table, kmer_signal = uncalled4.get_signal_with_kmers(rseq, ref_range, signal)
    list_segments = [",".join(map(str, segment)) for segment in kmer_signal]
    total_signal = ";".join(list_segments)
    return read_id, chr, uncalled4.ur, total_signal, kmer_table

def extract_refs_signal_with_kmer(rseq, ref_range, signal, uncalled4):
    _, kmer_signal = uncalled4.get_signal_with_kmers(rseq, ref_range, signal)
    return uncalled4.ur, uncalled4.new_rseq, uncalled4.new_rseq_split, kmer_signal

def get_q2tloc_from_cigar(r_cigar_tuple, strand, seq_len):
    fill_invalid = -2
    q_to_r_poss = np.full(seq_len + 1, fill_invalid, dtype=np.int32)
    curr_r_pos, curr_q_pos = 0, 0
    cigar_ops = r_cigar_tuple if strand == 1 else r_cigar_tuple[::-1]
    for op, op_len in cigar_ops:
        if op == 1:
            for q_pos in range(curr_q_pos, curr_q_pos + op_len):
                q_to_r_poss[q_pos] = -1
            curr_q_pos += op_len
        elif op in (2, 3):
            curr_r_pos += op_len
        elif op in (0, 7, 8):
            for op_offset in range(op_len):
                q_to_r_poss[curr_q_pos + op_offset] = curr_r_pos + op_offset
            curr_q_pos += op_len
            curr_r_pos += op_len
        elif op == 6:
            pass
    q_to_r_poss[curr_q_pos] = curr_r_pos
    if q_to_r_poss[-1] == fill_invalid:
        raise ValueError(
            ("Invalid cigar string encountered. Reference length: {}  Cigar "
             + "implied reference length: {}").format(seq_len, curr_r_pos)
        )
    return q_to_r_poss

def _write_featurestr(write_fp, featurestr_q, time_wait=1, control=False):
    output = 0
    with open(write_fp, 'w') as wf:
        while True:
            try:
                features_str = featurestr_q.get(timeout=1000)
            except mp.queues.Empty:
                print("output_Q timeout, assuming all data processed", flush=True)
                break
            if features_str == "kill":
                break
            for one_features_str in features_str:
                output += 1
                wf.write(one_features_str + "\n")
                if control and output >= 4000:
                    sys.exit(0)
            wf.flush()
    sys.exit(0)
        
def statistic_read(features_file,signal_ids):
    if features_file.endswith(".gz"):
        infile = gzip.open(features_file, 'rt')
    else:
        infile = open(features_file, 'r')
    union_read_nums=0
    read_nums=0
    union_read_set=set()
    for line in infile:
        read_nums+=1
        words = line.strip().split("\t")
        if words[0] not in signal_ids:
            continue
        union_read_nums+=1
        union_read_set.add(words[0])
    infile.close()
    print(f"Total reads: {read_nums}, Unique reads: {len(union_read_set)}, Filtered reads: {union_read_nums}", flush=True)
    return union_read_set,read_nums,union_read_nums

def process_tsv(features_file, feature_Q, signal_ids,read_nums, existing_keys, remove_indexes, time_wait=1, reads_per_chunk=2, qsize_limit=20):
    if features_file.endswith(".gz"):
        infile = gzip.open(features_file, 'rt')
    else:
        infile = open(features_file, 'r')
    
    fea_list = []
    for line in tqdm(infile,total=read_nums, ncols=100, desc="process rockfish"):
        words = line.strip().split("\t")
        read_id = words[0]
        chromosome = words[1]
        key = '||'.join([read_id, chromosome])
        if key in existing_keys or key in remove_indexes:
            continue
        
        positions = list(map(int, words[2].split(",")))
        predictions = list(map(float, words[3].split(",")))
        pred_deepsignal = dict(zip(positions, predictions))
        
        fea_list.append((read_id, chromosome, pred_deepsignal))
        
        if len(fea_list) >= reads_per_chunk:
            while feature_Q.qsize() >= qsize_limit:
                time.sleep(time_wait)
            feature_Q.put(fea_list)
            fea_list = []
    
    if fea_list:
        while feature_Q.qsize() >= qsize_limit:
            time.sleep(time_wait)
        feature_Q.put(fea_list)
    infile.close()
    print('finish read feature file', flush=True)

def determine_motif_at_pos(seq, pos):
    if pos < len(seq) - 1:
        if seq[pos:pos+2] == "CG":
            return "CG"
        elif pos < len(seq) - 2:
            triplet = seq[pos:pos+3]
            if triplet[0] != 'C':
                return None
            if (triplet[1] != 'G' and triplet[2] == 'G'):
                return "CHG"
            elif (triplet[1] != 'G' and triplet[2] != 'G'):
                return "CHH"
            else:
                return None
    return None

def filter_n_and_update_indices(seq, pred_pos):
    n_count = 0
    n_positions = []
    for i, char in enumerate(seq):
        if char == 'N':
            n_positions.append(i)
    updated_pred_pos = []
    for pos in pred_pos:
        count_n_before_pos = sum(1 for n_pos in n_positions if n_pos < pos)
        updated_pos = pos - count_n_before_pos
        updated_pred_pos.append(updated_pos)
    filtered_seq = ''.join([char for char in seq if char != 'N'])
    return filtered_seq, updated_pred_pos, n_positions

def align_signals_and_extend_ref_seq(pos_pair, read_signal, read_seq, ref_seq, motif_seqs, methyloc, strand, ref_start, ref_end):
    first_valid_index = next((i for i, (_, ref_pos) in enumerate(pos_pair) if ref_pos is not None), len(pos_pair))
    last_valid_index = len(pos_pair) - 1 - next((i for i, (_, ref_pos) in enumerate(reversed(pos_pair)) if ref_pos is not None), len(pos_pair))
    new_ref_seq = []
    new_ref_signal = []
    for i in range(first_valid_index):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])
    last_valid_ref_pos = len(new_ref_seq) - 1
    for i in range(first_valid_index, last_valid_index + 1):
        read_pos, ref_pos = pos_pair[i]
        if ref_pos is not None:
            new_ref_seq.append(ref_seq[ref_pos])
            new_ref_signal.append(read_signal[read_pos] if read_pos is not None else [])
            last_valid_ref_pos = len(new_ref_seq) - 1
        elif ref_pos is None:
            if last_valid_ref_pos is not None:
                new_ref_signal[last_valid_ref_pos].extend(read_signal[read_pos])
    for i in range(last_valid_index + 1, len(pos_pair)):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])
    new_ref_seq = ''.join(base for base in new_ref_seq)
    ref_readlocs = dict()
    ref_poss = []
    pred_pos = []
    tsite_locs = get_refloc_of_methysite_in_motif(new_ref_seq, set(motif_seqs), methyloc)
    for loc_in_read in tsite_locs:
        if loc_in_read<first_valid_index:
            ref_pos = -1
            ref_poss.append(ref_pos)
            pred_pos.append(loc_in_read)
            continue
        if loc_in_read>last_valid_index:
            ref_pos = -1
            ref_poss.append(ref_pos)
            pred_pos.append(loc_in_read)
            continue
        if strand == "-":
            ref_pos = ref_end - loc_in_read - 1 + first_valid_index
        else:
            ref_pos = ref_start + loc_in_read - first_valid_index
        ref_poss.append(ref_pos)
        pred_pos.append(loc_in_read)
    ref_readlocs = dict(zip(ref_poss, pred_pos))
    return new_ref_seq, new_ref_signal, ref_readlocs, ref_poss, pred_pos

def process_dorado(bam_index, pod5_dr, feature_Q, output_Q, hp_position, hn_position, bisulfite, use_ref, motif_seqs, methyloc, time_wait=1, d_batch_size=2, qsize_limit=22, mapq=10, kmer=6, progress_bar=None, lock=None, process_chr=None, no_reverse=False):
    fea_list = []
    while True:
        while output_Q.qsize() >= qsize_limit:
            time.sleep(time_wait)
        read_data = feature_Q.get()
        if read_data == "kill":
            feature_Q.put("kill")
            break
        for (read_name, chromosome, pred_deepsignal) in read_data:
            progress_bar.put(1)
            while output_Q.qsize() >= qsize_limit:
                time.sleep(time_wait)
            read = pod5_dr.get_read(read_name)
            if read is None:
                continue
            signal = read.signal
            shift_dacs_to_pa = read.calibration.offset
            scale_dacs_to_pa = read.calibration.scale
            try:
                for bam_read in bam_index.get_alignments(read_name):
                    if bam_read.is_mapped == False:
                        continue
                    if bam_read.mapping_quality < mapq:
                        continue
                    if bam_read.is_supplementary or bam_read.is_secondary:
                        continue
                    if bam_read.is_reverse and no_reverse:
                        continue
                    reference_name = bam_read.reference_name
                    if reference_name != chromosome:
                        continue
                    if process_chr is not None:
                        if isinstance(process_chr, list):
                            if reference_name not in process_chr:
                                continue
                        else:
                            if process_chr[:2] != 'no':
                                if reference_name != process_chr:
                                    continue
                            elif process_chr[:2] == 'no':
                                if reference_name == process_chr[2:]:
                                    continue
                    cigar_tuples = bam_read.cigartuples
                    seq = bam_read.get_forward_sequence()
                    if seq is None or signal is None:
                        continue
                    seq_len = len(seq)
                    if bam_read.infer_query_length() != seq_len:
                        print('infer read length is not same as seq length of readid {}'.format(read_name))
                        continue
                    if use_ref:
                        if not bam_read.has_tag('MD'):
                            print('not have MD of readid {}'.format(read_name))
                            continue
                        try:
                            if bam_read.is_reverse:
                                ref_seq = complement_seq(bam_read.get_reference_sequence().upper())
                            else:
                                ref_seq = bam_read.get_reference_sequence().upper()
                        except:
                            print('MD length does not match CIGAR of readid {}'.format(read_name))
                            continue
                    
                    read_dict = dict(bam_read.tags)
                    strand_code = 0 if bam_read.is_reverse else 1
                    strand = "-" if strand_code == 0 else "+"
                    ref_start = bam_read.reference_start
                    ref_end = bam_read.reference_end
                    num_trimmed = read_dict["ts"]
                    if bam_read.has_tag('sp'):
                        num_trimmed += bam_read.get_tag('sp')
                    shift_pa_to_norm = read_dict["sm"]
                    scale_pa_to_norm = read_dict["sd"]
                    mv_table = read_dict["mv"]
                    if num_trimmed >= 0:
                        signal_trimmed = signal[num_trimmed:]
                    else:
                        signal_trimmed = signal[:num_trimmed]
                    signal_group = _group_signals_by_movetable_v2(signal_trimmed, np.asarray(mv_table[1:]), int(mv_table[0]))
                    if use_ref:
                        if bam_read.has_tag('ur') and bam_read.has_tag('ul'):
                            ur = bam_read.get_tag("ur")
                            ul = bam_read.get_tag("ul")
                            ref_range = [ref_start, ref_end]
                            uncalled4_obj = ExtractSignalWithKmerByUncalled4(ur, ul, kmer)
                            ref_range, seq, rseqs, signal_group = extract_refs_signal_with_kmer(ref_seq, ref_range, signal, uncalled4_obj)
                            ref_poss = []
                            pred_pos = []
                            ref_len = []
                            for i in range(len(rseqs)):
                                rseq = rseqs[i]
                                ref_start, ref_end = ref_range[i]
                                ref_len.append(len(rseq))
                                tsite_locs = get_refloc_of_methysite_in_motif(rseq, set(motif_seqs), methyloc)
                                for loc_in_read in tsite_locs:
                                    if strand == "-":
                                        ref_pos = ref_end - loc_in_read - 1
                                    else:
                                        ref_pos = ref_start + loc_in_read
                                    ref_poss.append(ref_pos)
                                    pred_pos.append(loc_in_read + sum(ref_len[:i]))
                            ref_readlocs = dict(zip(pred_pos, ref_poss))
                        else:
                            pos_pair = []
                            for read_pos, ref_pos in bam_read.get_aligned_pairs():
                                if read_pos is None:
                                    if bam_read.is_reverse:
                                        pos_pair.append((None, ref_end - ref_pos - 1))
                                    else:
                                        pos_pair.append((None, ref_pos - ref_start))
                                    continue
                                if ref_pos is None:
                                    if bam_read.is_reverse:
                                        pos_pair.append((len(seq) - read_pos - 1, None))
                                    else:
                                        pos_pair.append((read_pos, None))
                                    continue
                                if bam_read.is_reverse:
                                    pos_pair.append((len(seq) - read_pos - 1, ref_end - ref_pos - 1))
                                else:
                                    pos_pair.append((read_pos, ref_pos - ref_start))
                            if strand == "-":
                                pos_pair.reverse()
                            seq, signal_group, ref_readlocs, ref_poss, pred_pos = align_signals_and_extend_ref_seq(
                                pos_pair, signal_group, seq, ref_seq, motif_seqs, methyloc, strand, ref_start, ref_end)
                    if len(signal_group) != len(seq):
                        print('signal to seq error!', flush=True)
                        continue
                    if use_ref is False:
                        qalign_start = bam_read.query_alignment_start
                        qalign_end = bam_read.query_alignment_end
                        if bam_read.is_reverse:
                            seq_start = len(seq) - qalign_end
                            seq_end = len(seq) - qalign_start
                        else:
                            seq_start = qalign_start
                            seq_end = qalign_end
                        q_to_r_poss = get_q2tloc_from_cigar(cigar_tuples, strand_code, (seq_end - seq_start))
                        ref_readlocs = dict()
                        ref_poss = []
                        pred_pos = []
                        tsite_locs = get_refloc_of_methysite_in_motif(seq, set(motif_seqs), methyloc)
                        for loc_in_read in tsite_locs:
                            if seq_start <= loc_in_read < seq_end:
                                offset_idx = loc_in_read - seq_start
                                if q_to_r_poss[offset_idx] != -1:
                                    if strand == "-":
                                        ref_pos = ref_end - 1 - q_to_r_poss[offset_idx]
                                    else:
                                        ref_pos = ref_start + q_to_r_poss[offset_idx]
                                    ref_readlocs[ref_pos] = loc_in_read
                                    ref_poss.append(ref_pos)
                                    pred_pos.append(loc_in_read)
                    if len(ref_poss) == 0:
                        print('no ref poss with readid {} and strand {}'.format(read_name, strand))
                        continue
                    for key in ref_poss:
                        if key not in pred_deepsignal.keys():
                            pred_deepsignal[key] = -1
                    signal_group_new = []
                    for sig in signal_group:
                        signal_group_new.append(np.round(np.array(sig), decimals=6))
                    norm_signals_text = ';'.join([",".join([str(y) for y in x]) for x in signal_group_new])
                    mean_pred = dict()
                    wgbs = dict()
                    bisulfite_ref = dict()
                    pred_label = dict()
                    for key in ref_poss:
                        deep_val = pred_deepsignal.get(key, -1)
                        mean_pred[key] = deep_val
                        
                        key_combine = f"{reference_name}||{key}"
                        bisulfite_ref[key] = bisulfite.get(key_combine, -1)
                        if hp_position and key_combine in hp_position:
                            wgbs[key] = 1
                        elif hn_position and key_combine in hn_position:
                            wgbs[key] = 0
                        else:
                            wgbs[key] = -1
                        if wgbs[key] != -1:
                            pred_label[key] = wgbs[key]
                        elif mean_pred[key] == -1:
                            pred_label[key] = -1
                        else:
                            pred_label[key] = 1 if mean_pred[key] >= 0.5 else 0
                    pred_deepsignal_new = np.round(np.array([pred_deepsignal[x] for x in ref_poss]), decimals=6)
                    pred_deepsignal_text = ','.join([str(x) for x in pred_deepsignal_new])
                    pred_dorado_text = '.'
                    mean_pred_new = np.round(np.array([mean_pred[x] for x in ref_poss]), decimals=6)
                    mean_pred_text = ','.join([str(x) for x in mean_pred_new])
                    pred_label_text = ','.join([str(pred_label[x]) for x in ref_poss])
                    pred_pos_text = ','.join([str(x) for x in pred_pos])
                    sample_id = '\t'.join([read_name, reference_name, str(ref_start)])
                    bisulfite_text = ','.join([str(bisulfite_ref[x]) for x in ref_poss])
                    ref_pos_text = ','.join(
                        [str(x) for x in ref_poss])
                    fea_str = '\t'.join([sample_id, seq, norm_signals_text, pred_pos_text, pred_dorado_text, pred_deepsignal_text, mean_pred_text, pred_label_text, str(bam_read.mapping_quality),
                                         str(shift_dacs_to_pa), str(scale_dacs_to_pa), str(shift_pa_to_norm), str(scale_pa_to_norm), bisulfite_text, ref_pos_text])
                    fea_list.append(fea_str)
                    if len(fea_list) >= d_batch_size:
                        output_Q.put(fea_list)
                        fea_list = []
            except Exception as e:
                traceback.print_exc()
                continue
    if len(fea_list) > 0:
        output_Q.put(fea_list)

def generate_key(line):
    columns = line.split()[:2]
    return '||'.join(columns)

def remove_last_line(file_path):
    with open(file_path, 'r') as f:
        f.seek(0, os.SEEK_END)
        file_size = f.tell()
        if file_size == 0:
            return
        pos = file_size - 1
        while pos > 0 and f.read(1) != '\n':
            pos -= 1
        f.seek(pos, os.SEEK_SET)
        f.truncate()

def process_file(output_file):
    try:
        with open(output_file, 'r') as f:
            existing_keys = set()
            for line in f:
                existing_keys.add(generate_key(line))
    except FileNotFoundError:
        existing_keys = set()
    return existing_keys

def remove_key(key_input):
    remove_indexes = set()
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            remove_indexes.add(key)
    return remove_indexes

def read_position_file(position_file):
    key_sep = "||"
    positions = set()
    with open(position_file, "r") as rf:
        for line in rf:
            words = line.strip().split()
            positions.add(key_sep.join(words[:2]))
    return positions

def read_bed(bisulfite_bed, strict=False, depth_threshold=5, label=1):
    key_sep = "||"
    depth_info = {}
    value_info = {}
    with open(bisulfite_bed, "r") as rf:
        for line in rf:
            words = line.strip().split()
            chrom = words[0]
            pos = int(words[1])
            if len(words) == 6:
                strand = words[5]
                value = label * 100.0
            else:
                strand = words[5]
                depth = int(words[9])
                value = float(words[10])
                if depth < depth_threshold:
                    continue
            m_key = key_sep.join([chrom, str(pos), strand])
            value_info[m_key] = value
    freqinfo = {}
    if strict:
        for m_key in value_info:
            chrom, pos, strand = m_key.split(key_sep)
            pos = int(pos)
            if strand == '+':
                pair_key = key_sep.join([chrom, str(pos + 1), '-'])
            else:
                pair_key = key_sep.join([chrom, str(pos - 1), '+'])
            if pair_key in value_info:
                value_1 = value_info[m_key]
                value_2 = value_info[pair_key]
                if value_1 >= 95 and value_2 >= 95:
                    freq_key = key_sep.join([chrom, str(pos)])
                    freqinfo[freq_key] = 100
                elif value_1 <= 5 and value_2 <= 5:
                    freq_key = key_sep.join([chrom, str(pos)])
                    freqinfo[freq_key] = 0
    else:
        for m_key in value_info:
            chrom, pos, strand = m_key.split(key_sep)
            pos = int(pos)
            freq_key = key_sep.join([chrom, str(pos)])
            freqinfo[freq_key] = value_info[m_key]
    return freqinfo

def read_tsv(key_input):
    key_indexes = []
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            key_indexes.append(key)
    return key_indexes

def read_id(pod5_dir):
    ids_set = list()
    recursive = True
    glob = Path.rglob if recursive else Path.glob
    for pod5 in glob(Path(pod5_dir), "*.pod5"):
        with p5.DatasetReader(pod5, recursive=True, max_cached_readers=10, threads=5) as dataset:
            for read_record in dataset:
                ids_set.append(str(read_record.read_id))
    print(len(ids_set))
    return ids_set

def process_chr(chr_file):
    chr_indexes = []
    with open(chr_file, 'r') as input_file:
        for line in input_file:
            chr = line.strip()
            chr_indexes.append(chr)
    return chr_indexes

def extract(args):
    manager = mp.Manager()
    feature_Q = manager.Queue()
    output_Q = manager.Queue()
    lock = manager.Lock()
    input_path = os.path.abspath(args.tsv)
    if not os.path.exists(input_path):
        raise ValueError("--tsv input_path does not exist!")
    bam_index = bam_reader.ReadIndexedBam(args.bam)
    signal_dir = os.path.abspath(args.signal)
    if os.path.isdir(signal_dir):
        pod5_dr = p5.DatasetReader(signal_dir, recursive=True, threads=args.pod5_proc)
    else:
        pod5_dr = p5.DatasetReader(signal_dir, threads=args.pod5_proc)
    if args.tsv_read_ids:
        signal_ids = read_tsv(args.tsv_read_ids)
    else:
        if os.path.isdir(signal_dir):
            signal_ids = read_id(args.signal)
        else:
            signal_ids = list(pod5_dr.read_ids)
    if args.keyset is not None:
        remove_indexes = remove_key(args.keyset)
    else:
        remove_indexes = set()
    existing_keys = process_file(args.write_path)
    hp_position = None
    hn_position = None
    if args.hp is not None and args.hn is not None:
        hp_position = read_position_file(args.hp)
        hn_position = read_position_file(args.hn)
    bisulfite = {}
    is_dna = False if args.rna else True
    motif_seqs = get_motif_seqs(args.motifs, is_dna)
    if args.bed is not None:
        for bed in args.bed:
            if ',' not in bed:
                bisulfite.update(read_bed(bed, args.strict, args.depth, args.label))
            else:
                motif, bed_address = bed.split(',', 1)
                bisulfite.update(read_bed(bed_address, args.strict, args.depth, args.label))
    timewait = args.timewait
    chr_indexes = args.chr
    if args.chr is not None:
        if os.path.isfile(args.chr):
            chr_indexes = process_chr(args.chr)
    union_read_set, read_nums,union_read_nums = statistic_read(input_path, signal_ids)

    bar_q= manager.Queue()

    with tqdm(total=union_read_nums, desc="Processing read", ncols=100) as progress_bar:
        pd = mp.Process(target=process_tsv, args=(input_path, feature_Q, union_read_set,read_nums, existing_keys, remove_indexes, timewait, args.chunk, args.q_size_limit), name="fea_reader")
        pd.daemon = True
        pd.start()
        ex_dp = args.nproc - 2
        ex_procs = []
        time.sleep(2)
        for i in range(ex_dp):
            pb = mp.Process(target=process_dorado, args=(bam_index, pod5_dr, feature_Q, output_Q, hp_position, hn_position, bisulfite, args.ref, motif_seqs, args.mod_loc, timewait, args.d_batch_size, args.q_size_limit, args.mapq, args.kmer, bar_q, lock, chr_indexes, args.no_reverse), name="pb_reader")
            pb.daemon = True
            pb.start()
            ex_procs.append(pb)
        p_w = mp.Process(target=_write_featurestr, args=(args.write_path, output_Q, timewait, args.control), name="writer")
        p_w.daemon = True
        p_w.start()
        count = 0
        while count < union_read_nums:
            bar_q.get()  # 从队列获取更新信号
            progress_bar.update(1)
            count += 1
        pd.join()
        for _ in range(ex_dp):  # 为每个 process_dorado 进程放入 "kill"，以防万一
            feature_Q.put("kill")
        print("Waiting for process_dorado to finish...")
        for pb in ex_procs:
            pb.join()
        output_Q.put("kill")
        print("Waiting for writer to finish...")
        p_w.join()
        print("All processes finished.")

def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--keyset", type=str, required=False)
    parser.add_argument("--hp", type=str, required=False, help='high confidence positive position')
    parser.add_argument("--hn", type=str, required=False, help='high confidence negative position')
    parser.add_argument("--bam", type=str, required=True)
    parser.add_argument("--pod5", action="store_true", default=False, required=False, help='use pod5, default false')
    parser.add_argument("--bed", type=str, required=False, action="append")
    parser.add_argument("--signal", type=str, required=True)
    parser.add_argument("--tsv", type=str, required=True)
    parser.add_argument("--tsv_read_ids", type=str, required=False, help='TSV file with read IDs')
    parser.add_argument("--write_path", type=str, required=True)
    parser.add_argument("--nproc", "-p", type=int, required=True)
    parser.add_argument("--timewait", "-t", default=0.1, type=float, required=False)
    parser.add_argument("--d_batch_size", action="store", type=int, default=2, required=False)
    parser.add_argument("--chunk", '-c', default=1, type=int, required=False)
    parser.add_argument('--q_size_limit', '-q', type=int, default=80, required=False)
    parser.add_argument("--control", action="store_true", default=False, required=False, help='test')
    parser.add_argument('--mapq', type=int, default=10, required=False)
    parser.add_argument('--depth', type=int, default=5, required=False)
    parser.add_argument('--kmer', type=int, default=6, required=False)
    parser.add_argument('--pod5_proc', '-d', type=int, default=10, required=False)
    parser.add_argument("--no_reverse", action="store_true", default=False, required=False, help="whether use ref sequence instead of read sequence, default false.")
    parser.add_argument("--ref", action="store_true", default=False, required=False, help="whether use ref sequence instead of read sequence, default false.")
    parser.add_argument("--motifs", action="store", type=str, required=False, default="CG", help="motif seq to be extracted, default: CG.")
    parser.add_argument("--mod_loc", action="store", type=int, required=False, default=0, help="0-based location of the targeted base in the motif, default 0")
    parser.add_argument("--rna", action="store_true", default=False, required=False, help="the fast5 files are from RNA samples.")
    parser.add_argument('--strict', action="store_true", default=False, required=False)
    parser.add_argument('--test', action="store_true", default=False, required=False)
    parser.add_argument("--chr", type=str, required=False, help='only extract chr')
    parser.add_argument("--label", type=int, required=False, help="label for bed file")
    return parser.parse_args()

def main():
    args = parse_args()
    extract(args)

if __name__ == '__main__':
    sys.exit(main())