#!/usr/bin/python
# coding=UTF-8
import argparse
from operator import ge
import sys
import os
from multiprocessing import Manager
import multiprocessing as mp
import gzip
import mmap
import pod5 as p5
import time
import datetime
import numpy as np
from ont_fast5_api.fast5_interface import get_fast5_file
from deepsignal3.utils.process_utils import fill_files_queue
from deepsignal3.utils.process_utils import get_refloc_of_methysite_in_motif
from deepsignal3.utils.process_utils import get_motif_seqs
from pathlib import Path
from deepsignal3.utils.process_utils import CIGAR2CODE
from deepsignal3.utils.process_utils import CIGAR_REGEX
from deepsignal3.utils import bam_reader
from deepsignal3.extract_features_pod5 import _group_signals_by_movetable_v2
import re
import traceback
from deepsignal3.utils.process_utils import complement_seq
from tqdm import tqdm
from typing import List, Tuple, Optional

class ExtractSignalWithKmerByUncalled4(object):
    def __init__(self, ur, ul, k):
        self.ur = self.process_ur(ur)
        self.ul = ul
        self.k = k

    def process_ur(self, ur):
        assert len(ur) % 2 == 0, 'The length of ur(%d) must be an even number.' % (len(ur))
        return [(ur[i], ur[i + 1]) for i in range(0, len(ur) - 1, 2)]

    def get_clip_between_ur_ref(self, ref_range, ur_pair):
        start = ref_range[0] - ur_pair[0]
        if start > 0:
            print("ur({}) should not before ref start({})".format(ur_pair[0], ref_range[0]))
            start = -1
        else:
            start = abs(start)
        end = ref_range[1] - ur_pair[1]
        if end < 0:
            print("ur({}) end should not after ref start({})".format(ur_pair[1], ref_range[1]))
        return start, end

    def get_rseq_by_ur(self, rseq, ref_range):
        new_rseq_split = []
        rseq_splice = ""
        for ur_pair in self.ur:
            st, en = self.get_clip_between_ur_ref(ref_range, ur_pair)
            assert st >= 0 and en >= 0, "The starting positions of ur and ref do not correspond, please check the file!"
            rseq_ = rseq[st: len(rseq) - en]
            assert ur_pair[1] - ur_pair[0] == len(rseq_), 'Not expected rseq(%d) and ur(%d) to have different lengths.' % (
                len(rseq_), ur_pair[1] - ur_pair[0])
            rseq_splice = rseq_splice + rseq_
            new_rseq_split.append(rseq_)
        self.new_rseq_split = new_rseq_split
        return rseq_splice

    def generate_kmers_by_rseq(self, rseq, ref_range):
        self.new_rseq = self.get_rseq_by_ur(rseq, ref_range)
        if self.k <= 0:
            raise ValueError("k must be integer greater than 0.")
        if self.k > len(self.new_rseq):
            raise ValueError("k should be smaller than length of sequence.")
        kmers = [self.new_rseq[i: i + self.k] for i in range(len(self.new_rseq) - self.k + 1)]
        return kmers

    def get_signal_with_kmers(self, rseq, ref_range, signal):
        kmers = self.generate_kmers_by_rseq(rseq, ref_range)
        neg_count = len([x for x in self.ul if x < 0])
        assert len(self.ul) - neg_count == len(kmers), 'Not expected kmer len(%d) and ul(%d) to have different lengths.' % (
            len(kmers), len(self.ul) - neg_count)
        first_2_nuc = kmers[0][:2]
        last_3_nuc = kmers[-1][3:]
        last_kmer = kmers[-1]
        first_kmer = kmers[0]
        signal_start = 0
        ref_start = self.ur[0][0]
        ref_end_by_ur = 0
        read_start = 0
        kmer_signal = []
        signal_start_pos = []
        refs_pos = []
        read_pos = []
        kmer_table = []
        first_kmer_signal = []
        first_signal_start_pos = []
        for n in first_2_nuc:
            dic = {'kmer': first_kmer, 'nuc': n, 'signal': [], 'signal_pos': 0, 'ref_pos': ref_start,
                   'read_pos': read_start}
            kmer_table.append(dic)
            first_kmer_signal.append([])
            first_signal_start_pos.append(0)
            read_start += 1
            ref_start += 1
        for length in self.ul:
            if length < 0:
                signal_start += abs(length)
                continue
            segment = signal[signal_start:signal_start + length]
            signal_start_pos.append(signal_start)
            kmer_signal.append(list(segment))
            refs_pos.append(ref_start)
            read_pos.append(read_start)
            if ref_start + 1 == self.ur[ref_end_by_ur][1]:
                ref_end_by_ur += 1
                ref_start = self.ur[ref_end_by_ur][0]
                read_start += 1
            else:
                ref_start += 1
                read_start += 1
            signal_start += length
        for i in range(len(kmers)):
            dic = {'kmer': kmers[i], 'nuc': kmers[i][2], 'signal': list(kmer_signal[i]), 'signal_pos': signal_start_pos[i], 'ref_pos': refs_pos[i], 'read_pos': read_pos[i]}
            kmer_table.append(dic)
        for n in last_3_nuc:
            dic = {'kmer': last_kmer, 'nuc': n, 'signal': [], 'signal_pos': signal_start_pos[-1], 'ref_pos': ref_start, 'read_pos': read_start}
            kmer_table.append(dic)
            kmer_signal.append([])
            signal_start_pos.append(signal_start_pos[-1])
            read_start += 1
            ref_start += 1
        kmer_signal = first_kmer_signal + kmer_signal
        signal_start_pos = first_signal_start_pos + signal_start_pos
        assert len(signal_start_pos) == len(kmers) + 5 and len(signal_start_pos) == len(kmer_signal), 'Not expected kmer len + 5(%d) , signal split by kmer len(%d) and signal start pos(%d) to have different lengths.' % (
            len(kmers) + 5, len(kmer_signal), len(signal_start_pos))
        assert len(self.new_rseq) == len(kmer_signal), 'Not expected new ref seq len(%d) and signal split by kmers(%d) to have different lengths.' % (
            len(self.new_rseq), len(kmer_signal))
        return kmer_table, kmer_signal

def extract_signal_with_kmer(read_id, chr, rseq, ref_range, signal, uncalled4):
    kmer_table, kmer_signal = uncalled4.get_signal_with_kmers(rseq, ref_range, signal)
    list_segments = [",".join(map(str, segment)) for segment in kmer_signal]
    total_signal = ";".join(list_segments)
    return read_id, chr, uncalled4.ur, total_signal, kmer_table

def extract_refs_signal_with_kmer(rseq, ref_range, signal, uncalled4):
    _, kmer_signal = uncalled4.get_signal_with_kmers(rseq, ref_range, signal)
    return uncalled4.ur, uncalled4.new_rseq, uncalled4.new_rseq_split, kmer_signal

def get_q2tloc_from_cigar(r_cigar_tuple, strand, seq_len):
    fill_invalid = -2
    q_to_r_poss = np.full(seq_len + 1, fill_invalid, dtype=np.int32)
    curr_r_pos, curr_q_pos = 0, 0
    cigar_ops = r_cigar_tuple if strand == 1 else r_cigar_tuple[::-1]
    for op, op_len in cigar_ops:
        if op == 1:
            for q_pos in range(curr_q_pos, curr_q_pos + op_len):
                q_to_r_poss[q_pos] = -1
            curr_q_pos += op_len
        elif op in (2, 3):
            curr_r_pos += op_len
        elif op in (0, 7, 8):
            for op_offset in range(op_len):
                q_to_r_poss[curr_q_pos + op_offset] = curr_r_pos + op_offset
            curr_q_pos += op_len
            curr_r_pos += op_len
        elif op == 6:
            pass
    q_to_r_poss[curr_q_pos] = curr_r_pos
    if q_to_r_poss[-1] == fill_invalid:
        raise ValueError(
            (
                "Invalid cigar string encountered. Reference length: {}  Cigar "
                + "implied reference length: {}"
            ).format(seq_len, curr_r_pos)
        )
    return q_to_r_poss

def _write_featurestr(write_fp, featurestr_q, control, time_wait=1):
    output = 0
    with open(write_fp, 'w') as wf:
        while True:
            if featurestr_q.empty():
                time.sleep(time_wait)
                continue
            features_str = featurestr_q.get()
            if features_str == "kill":
                break
            for one_features_str in features_str:
                output += 1
                wf.write(one_features_str + "\n")
                if control is not None and output >= control:
                    sys.exit(0)
            wf.flush()

def process_deepsignal(features_file, feature_Q, signal_ids, existing_keys, remove_indexes, time_wait=1, reads_per_chunk=2, qsize_limit=20):
    if features_file.endswith(".gz"):
        infile = gzip.open(features_file, 'rt')
    else:
        infile = open(features_file, 'r')
    pred_deepsignal = {}
    fea_list = []
    pre_read_id = ''
    chromosome = ''
    read_id = ''
    for line in infile:
        words = line.strip().split("\t")
        read_id = words[4]
        key = '||'.join([read_id, words[0]])
        if read_id not in signal_ids:
            continue
        if key in existing_keys or key in remove_indexes:
            continue
        if pre_read_id == '':
            pre_read_id = read_id
        elif pre_read_id != read_id:
            fea_list.append((pre_read_id, chromosome, pred_deepsignal))
            pre_read_id = read_id
            pred_deepsignal = {}
            if len(fea_list) >= reads_per_chunk:
                while feature_Q.qsize() >= qsize_limit:
                    time.sleep(time_wait)
                feature_Q.put(fea_list)
                fea_list = []
        chromosome = words[0]
        pred_deepsignal[int(words[1])] = float(words[7])
    if pre_read_id != read_id:
        fea_list.append((pre_read_id, chromosome, pred_deepsignal))
    if len(fea_list) > 0:
        feature_Q.put(fea_list)
    infile.close()

def align_signals_and_extend_ref_seq(pos_pair, read_signal, read_seq, ref_seq, motif_seqs, methyloc, strand, ref_start, ref_end):
    first_valid_index = next((i for i, (_, ref_pos) in enumerate(pos_pair) if ref_pos is not None), len(pos_pair))
    last_valid_index = len(pos_pair) - 1 - next((i for i, (_, ref_pos) in enumerate(reversed(pos_pair)) if ref_pos is not None), len(pos_pair))
    new_ref_seq = []
    new_ref_signal = []
    for i in range(first_valid_index):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])
    last_valid_ref_pos = len(new_ref_seq) - 1
    for i in range(first_valid_index, last_valid_index + 1):
        read_pos, ref_pos = pos_pair[i]
        if ref_pos is not None:
            new_ref_seq.append(ref_seq[ref_pos])
            new_ref_signal.append(read_signal[read_pos] if read_pos is not None else [])
            last_valid_ref_pos = len(new_ref_seq) - 1
        elif ref_pos is None:
            if last_valid_ref_pos is not None:
                new_ref_signal[last_valid_ref_pos].extend(read_signal[read_pos])
    for i in range(last_valid_index + 1, len(pos_pair)):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])
    new_ref_seq = ''.join(base for base in new_ref_seq)
    ref_readlocs = dict()
    ref_poss = []
    pred_pos = []
    ref_pos = -1
    tsite_locs = get_refloc_of_methysite_in_motif(
        new_ref_seq, set(motif_seqs), methyloc)
    for loc_in_read in tsite_locs:
        if loc_in_read < first_valid_index:
            ref_pos = -1
            ref_poss.append(ref_pos)
            pred_pos.append(loc_in_read)
            continue
        if loc_in_read > last_valid_index:
            ref_pos = -1
            ref_poss.append(ref_pos)
            pred_pos.append(loc_in_read)
            continue
        if strand == "-":
            ref_pos = ref_end - loc_in_read - 1 + first_valid_index
        else:
            ref_pos = ref_start + loc_in_read - first_valid_index
        ref_poss.append(ref_pos)
        pred_pos.append(loc_in_read)
    ref_readlocs = dict(zip(pred_pos, ref_poss))
    return new_ref_seq, new_ref_signal, ref_readlocs, ref_poss, pred_pos

def get_md_reference_length(md_tag: str) -> int:
    length = 0
    md_idx = 0
    nmatches = 0
    while md_idx < len(md_tag):
        if md_tag[md_idx].isdigit():
            nmatches *= 10
            nmatches += int(md_tag[md_idx])
            md_idx += 1
            continue
        else:
            length += nmatches
            nmatches = 0
            if md_tag[md_idx] == '^':
                md_idx += 1
                while md_idx < len(md_tag) and md_tag[md_idx].isupper():
                    md_idx += 1
                    length += 1
            else:
                md_idx += 1
                length += 1
    length += nmatches
    return length

def get_alignment_length(cigartuples: List[Tuple[int, int]]) -> int:
    length = 0
    for op, op_len in cigartuples:
        if op in {0, 2, 3, 7, 8}:
            length += op_len
    return length

def count_insertions(alignment_sequence: str) -> int:
    return sum(1 for c in alignment_sequence if c.islower())

def check_md_cigar_length(md_tag: str, cigartuples: List[Tuple[int, int]], alignment_sequence: str) -> None:
    md_len = get_md_reference_length(md_tag)
    cigar_ref_len = get_alignment_length(cigartuples)
    insertions = count_insertions(alignment_sequence)
    if md_len + insertions > cigar_ref_len:
        raise AssertionError(
            f"Invalid MD tag: MD length {md_len} + {insertions} insertions "
            f"exceeds CIGAR length {cigar_ref_len}"
        )

def process_dorado(bam_index, pod5_dr, bisulfite, read_ids_Q, output_Q, motif_seqs, progress_bar, lock, process_chr, use_ref, label, methyloc=0, time_wait=1, mapq=0, kmer=6, d_batch_size=2, qsize_limit=22, motif='A'):
    fea_list = []
    while True:
        while read_ids_Q.empty():
            time.sleep(time_wait)
        read_data = read_ids_Q.get()
        if read_data == "kill":
            read_ids_Q.put("kill")
            break
        for read_name in read_data:
            while output_Q.qsize() >= qsize_limit:
                time.sleep(time_wait)
            with lock:
                progress_bar.update(1)
            read = pod5_dr.get_read(read_name)
            if read is None:
                continue
            signal = read.signal
            shift_dacs_to_pa = read.calibration.offset
            scale_dacs_to_pa = read.calibration.scale
            try:
                for bam_read in bam_index.get_alignments(read_name):
                    if bam_read.is_supplementary or bam_read.is_secondary:
                        continue
                    reference_name = bam_read.reference_name
                    if reference_name is None:
                        continue
                    if bam_read.mapping_quality < mapq:
                        continue
                    if process_chr is not None:
                        if isinstance(process_chr, list):
                            if reference_name not in process_chr:
                                continue
                        else:
                            if process_chr[:2] != 'no':
                                if reference_name != process_chr:
                                    continue
                            elif process_chr[:2] == 'no':
                                if reference_name == process_chr[2:]:
                                    continue
                    seq = bam_read.get_forward_sequence().upper()
                    if seq is None or signal is None:
                        continue
                    if use_ref:
                        if not bam_read.has_tag('MD'):
                            print('not have MD of readid {}'.format(read_name))
                            continue
                        try:
                            if bam_read.is_reverse:
                                ref_seq = complement_seq(bam_read.get_reference_sequence().upper())
                            else:
                                ref_seq = bam_read.get_reference_sequence().upper()
                        except Exception as e:
                            print(f"Read {bam_read.query_name} failed: {e}", flush=True)
                            continue
                    seq_len = len(seq)
                    if bam_read.infer_query_length() != len(seq):
                        print('infer read length is not same as seq length of readid {}'.format(read_name), flush=True)
                        continue
                    dorado_pred = dict()
                    if motif == 'A':
                        m0 = 'A'
                        m2 = 'a'
                    else:
                        m0 = 'C'
                        m2 = 'm'
                    if bam_read.modified_bases is not None:
                        ref_loc = bam_read.get_reference_positions(full_length=True)
                        for m, locs in bam_read.modified_bases_forward.items():
                            if m[0] == m0 and m[2] == m2:
                                for lc in locs:
                                    rloc = ref_loc[lc[0]] if bam_read.is_forward else ref_loc[seq_len - lc[0] - 1]
                                    if rloc is None:
                                        continue
                                    dorado_pred[rloc] = (lc[1] + 0.5) / 256
                    read_dict = dict(bam_read.tags)
                    strand_code = 0 if bam_read.is_reverse else 1
                    strand = "-" if strand_code == 0 else "+"
                    ref_start = bam_read.reference_start
                    ref_end = bam_read.reference_end
                    cigar_tuples = bam_read.cigartuples
                    num_trimmed = read_dict["ts"]
                    if bam_read.has_tag('sp'):
                        num_trimmed += bam_read.get_tag('sp')
                    shift_pa_to_norm = read_dict["sm"]
                    scale_pa_to_norm = read_dict["sd"]
                    mv_table = read_dict["mv"]
                    if num_trimmed >= 0:
                        signal_trimmed = signal[num_trimmed:]
                    else:
                        signal_trimmed = signal[:num_trimmed]
                    signal_group = _group_signals_by_movetable_v2(
                        signal_trimmed, np.asarray(mv_table[1:]), int(mv_table[0]))
                    if use_ref:
                        if bam_read.has_tag('ur') and bam_read.has_tag('ul'):
                            ur = bam_read.get_tag("ur")
                            ul = bam_read.get_tag("ul")
                            ref_range = [ref_start, ref_end]
                            uncalled4_obj = ExtractSignalWithKmerByUncalled4(ur, ul, kmer)
                            ref_range, seq, rseqs, signal_group = extract_refs_signal_with_kmer(ref_seq, ref_range, signal, uncalled4_obj)
                            ref_poss = []
                            pred_pos = []
                            ref_len = []
                            for i in range(len(rseqs)):
                                rseq = rseqs[i]
                                ref_start, ref_end = ref_range[i]
                                ref_len.append(len(rseq))
                                tsite_locs = get_refloc_of_methysite_in_motif(
                                    rseq, set(motif_seqs), methyloc)
                                for loc_in_read in tsite_locs:
                                    if strand == "-":
                                        ref_pos = ref_end - loc_in_read - 1
                                    else:
                                        ref_pos = ref_start + loc_in_read
                                    ref_poss.append(ref_pos)
                                    pred_pos.append(loc_in_read + sum(ref_len[:i]))
                                ref_readlocs = dict(zip(pred_pos, ref_poss))
                        else:
                            pos_pair = []
                            for read_pos, ref_pos in bam_read.get_aligned_pairs():
                                if read_pos is None:
                                    if bam_read.is_reverse:
                                        pos_pair.append((None, ref_end - ref_pos - 1))
                                    else:
                                        pos_pair.append((None, ref_pos - ref_start))
                                    continue
                                if ref_pos is None:
                                    if bam_read.is_reverse:
                                        pos_pair.append((len(seq) - read_pos - 1, None))
                                    else:
                                        pos_pair.append((read_pos, None))
                                    continue
                                if bam_read.is_reverse:
                                    pos_pair.append((len(seq) - read_pos - 1, ref_end - ref_pos - 1))
                                else:
                                    pos_pair.append((read_pos, ref_pos - ref_start))
                            if strand == "-":
                                pos_pair.reverse()
                            seq, signal_group, ref_readlocs, ref_poss, pred_pos = align_signals_and_extend_ref_seq(pos_pair, signal_group, seq, ref_seq, motif_seqs, methyloc, strand, ref_start, ref_end)
                    if len(signal_group) != len(seq):
                        print('signal to seq error!', flush=True)
                        continue
                    if use_ref is False:
                        qalign_start = bam_read.query_alignment_start
                        qalign_end = bam_read.query_alignment_end
                        if bam_read.is_reverse:
                            seq_start = len(seq) - qalign_end
                            seq_end = len(seq) - qalign_start
                        else:
                            seq_start = qalign_start
                            seq_end = qalign_end
                        q_to_r_poss = get_q2tloc_from_cigar(
                            cigar_tuples, strand_code, (seq_end - seq_start)
                        )
                        ref_readlocs = dict()
                        ref_poss = []
                        pred_pos = []
                        tsite_locs = get_refloc_of_methysite_in_motif(
                            seq, set(motif_seqs), methyloc)
                        for loc_in_read in tsite_locs:
                            if seq_start <= loc_in_read < seq_end:
                                offset_idx = loc_in_read - seq_start
                                if q_to_r_poss[offset_idx] != -1:
                                    if strand == "-":
                                        ref_pos = ref_end - 1 - q_to_r_poss[offset_idx]
                                    else:
                                        ref_pos = ref_start + q_to_r_poss[offset_idx]
                                    ref_readlocs[loc_in_read] = ref_pos
                                    ref_poss.append(ref_pos)
                                    pred_pos.append(loc_in_read)
                                else:
                                    ref_pos = -1
                                    ref_readlocs[loc_in_read] = ref_pos
                                    ref_poss.append(ref_pos)
                                    pred_pos.append(loc_in_read)
                            else:
                                ref_pos = -1
                                ref_readlocs[loc_in_read] = ref_pos
                                ref_poss.append(ref_pos)
                                pred_pos.append(loc_in_read)
                    if len(ref_poss) == 0:
                        continue
                    nucleosome_regions = []
                    if bam_read.has_tag('ns') and bam_read.has_tag('nl'):
                        ns_positions = [int(x) for x in bam_read.get_tag('ns').split(',')]
                        nl_positions = [int(x) for x in bam_read.get_tag('nl').split(',')]
                        if len(ns_positions) == len(nl_positions):
                            for ns, nl in zip(ns_positions, nl_positions):
                                if nl >= ns:
                                    nucleosome_regions.append((ns, nl))
                    label_ref = dict()
                    dorado_ref = dict()
                    for pos in pred_pos:
                        ref_pos = ref_readlocs[pos]
                        if ref_pos in dorado_pred:
                            pred_value = dorado_pred[ref_pos]
                            if pred_value >= 0.95:
                                label_ref[pos] = 1
                                dorado_ref[pos] = pred_value
                            elif pred_value <= 0.05 and nucleosome_regions:
                                is_in_nucleosome = any(ns <= ref_pos <= nl for ns, nl in nucleosome_regions)
                                label_ref[pos] = 0 if is_in_nucleosome else -1
                                dorado_ref[pos] = pred_value if is_in_nucleosome else -1
                            else:
                                label_ref[pos] = -1
                                dorado_ref[pos] = -1
                        else:
                            label_ref[pos] = -1
                            dorado_ref[pos] = -1
                    if not any(label != -1 for label in label_ref.values()):
                        print('no label in dorado',flush=True)
                        continue
                    signal_group_new = []
                    for sig in signal_group:
                        signal_group_new.append(np.round(np.array(sig), decimals=6))
                    norm_signals_text = ';'.join([",".join([str(y) for y in x]) for x in signal_group_new])
                    pred_deepsignal_text = '.'
                    pred_dorado_text = ','.join([str(dorado_ref[x]) if dorado_ref[x] != -1 else '.' for x in pred_pos])
                    mean_pred_text = '.'
                    pred_label_text = ','.join([str(label_ref[x]) for x in pred_pos])
                    pred_pos_text = ','.join([str(x) for x in pred_pos])
                    sample_id = '\t'.join([bam_read.query_name, str(reference_name), str(ref_start)])
                    bisulfite_text = '.'#','.join([str(bisulfite_ref[x]) for x in pred_pos])
                    ref_pos_text = ','.join([str(ref_readlocs[x]) for x in pred_pos])
                    fea_str = '\t'.join([sample_id, seq, norm_signals_text, pred_pos_text, pred_dorado_text, pred_deepsignal_text, mean_pred_text, pred_label_text, str(bam_read.mapping_quality),
                                         str(shift_dacs_to_pa), str(scale_dacs_to_pa), str(shift_pa_to_norm), str(scale_pa_to_norm), bisulfite_text, ref_pos_text])
                    fea_list.append(fea_str)
                    if len(fea_list) >= d_batch_size:
                        output_Q.put(fea_list)
                        fea_list = []
            except Exception as e:
                traceback.print_exc()
                continue
    if len(fea_list) > 0:
        output_Q.put(fea_list)

def generate_key(line):
    columns = line.split()[:2]
    return '||'.join(columns)

def remove_last_line(file_path):
    with open(file_path, 'r') as f:
        f.seek(0, os.SEEK_END)
        file_size = f.tell()
        if file_size == 0:
            return
        pos = file_size - 1
        while pos > 0 and f.read(1) != '\n':
            pos -= 1
        f.seek(pos, os.SEEK_SET)
        f.truncate()

def process_file(output_file):
    try:
        with open(output_file, 'r') as f:
            existing_keys = set()
            for line in f:
                existing_keys.add(generate_key(line))
    except FileNotFoundError:
        existing_keys = set()
    return existing_keys

def remove_key(key_input):
    remove_indexes = set()
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            remove_indexes.add(key)
    return remove_indexes

def read_position_file(position_file):
    key_sep = "||"
    postions = set()
    with open(position_file, "r") as rf:
        for line in rf:
            words = line.strip().split()
            postions.add(key_sep.join(words[:2]))
    return postions

def read_bed(bisulfite_bed, strict=False, depth_threshold=5, label=1):
    key_sep = "||"
    depth_info = {}
    value_info = {}
    with open(bisulfite_bed, "r") as rf:
        for line in rf:
            words = line.strip().split()
            chrom = words[0]
            pos = int(words[1])
            if len(words) == 6:
                strand = words[5]
                value = label * 100.0
            else:
                strand = words[5]
                depth = int(words[9])
                value = float(words[10])
                if depth < depth_threshold:
                    continue
            m_key = key_sep.join([chrom, str(pos), strand])
            value_info[m_key] = value
    freqinfo = {}
    if strict:
        for m_key in value_info:
            chrom, pos, strand = m_key.split(key_sep)
            pos = int(pos)
            if strand == '+':
                pair_key = key_sep.join([chrom, str(pos + 1), '-'])
            else:
                pair_key = key_sep.join([chrom, str(pos - 1), '+'])
            if pair_key in value_info:
                value_1 = value_info[m_key]
                value_2 = value_info[pair_key]
                if value_1 >= 95 and value_2 >= 95:
                    freq_key = key_sep.join([chrom, str(pos)])
                    freqinfo[freq_key] = 100
                elif value_1 <= 5 and value_2 <= 5:
                    freq_key = key_sep.join([chrom, str(pos)])
                    freqinfo[freq_key] = 0
    else:
        for m_key in value_info:
            chrom, pos, strand = m_key.split(key_sep)
            pos = int(pos)
            freq_key = key_sep.join([chrom, str(pos)])
            freqinfo[freq_key] = value_info[m_key]
    return freqinfo

def read_tsv(key_input):
    key_indexes = []
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            key_indexes.append(key)
    return key_indexes

def read_id(pod5_dir):
    ids_set = list()
    recursive = True
    glob = Path.rglob if recursive else Path.glob
    for pod5 in glob(Path(pod5_dir), "*.pod5"):
        with p5.DatasetReader(pod5, recursive=True, max_cached_readers=1) as dataset:
            for read_record in dataset:
                ids_set.append(str(read_record.read_id))
    print(len(ids_set))
    return ids_set

def process_chr(chr_file):
    chr_indexes = []
    with open(chr_file, 'r') as input_file:
        for line in input_file:
            chr = line.strip()
            chr_indexes.append(chr)
    return chr_indexes

def extract(args):
    manager = mp.Manager()
    output_Q = manager.Queue()
    bam_index = bam_reader.ReadIndexedBam(args.bam)
    signal_dir = os.path.abspath(args.signal)
    if os.path.isdir(signal_dir):
        pod5_dr = p5.DatasetReader(signal_dir, recursive=True, threads=args.pod5_proc)
    else:
        pod5_dr = p5.DatasetReader(signal_dir, threads=args.pod5_proc)
    if args.tsv is not None:
        signal_ids = read_tsv(args.tsv)
    else:
        if os.path.isdir(signal_dir):
            signal_ids = read_id(args.signal)
        else:
            signal_ids = list(pod5_dr.read_ids)
    read_ids_Q = manager.Queue()
    fill_files_queue(read_ids_Q, signal_ids, 20, True)
    bisulfite = None
    if args.bed is not None:
        bisulfite = read_bed(args.bed, args.strict, args.depth, args.label)
        print('read bisulfite')
    timewait = args.timewait
    ex_dp = args.nproc - 1
    ex_procs = []
    is_dna = False if args.rna else True
    motif_seqs = get_motif_seqs(args.motifs, is_dna)
    print('get motifs')
    lock = manager.Lock()
    chr_indexes = args.chr
    if args.chr is not None:
        if os.path.isfile(args.chr):
            chr_indexes = process_chr(args.chr)
    with tqdm(total=len(signal_ids), desc="Processing", ncols=100) as progress_bar:
        for i in range(ex_dp):
            pb = mp.Process(target=process_dorado, args=(bam_index, pod5_dr, bisulfite, read_ids_Q, output_Q, motif_seqs, progress_bar, lock, chr_indexes, args.ref, args.label, args.mod_loc, timewait, args.mapq, args.kmer,args.motifs),
                            name="pb_reader")
            pb.daemon = True
            pb.start()
            ex_procs.append(pb)
    print('start process dorado')
    p_w = mp.Process(target=_write_featurestr, args=(args.write_path, output_Q, args.control, timewait),
                     name="writer")
    p_w.daemon = True
    p_w.start()
    print('start writer')
    read_ids_Q.put("kill")
    for pb in ex_procs:
        pb.join()
    output_Q.put("kill")
    print('finished')
    p_w.join()

def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--keyset", type=str, required=False)
    parser.add_argument("--hp", type=str, required=False,
                        help='high confidence positive position')
    parser.add_argument("--hn", type=str, required=False,
                        help='high confidence negative position')
    parser.add_argument("--bam", type=str, required=True)
    parser.add_argument("--pod5", action="store_true", default=False, required=False,
                        help='use pod5, default false')
    parser.add_argument("--bed", type=str, required=False)
    parser.add_argument("--signal", type=str, required=True)
    parser.add_argument("--tsv", type=str, required=False)
    parser.add_argument("--write_path", type=str, required=True)
    parser.add_argument("--nproc", "-p", type=int, required=True)
    parser.add_argument("--timewait", "-t", default=0.1,
                        type=float, required=False)
    parser.add_argument("--d_batch_size", action="store", type=int, default=2,
                        required=False)
    parser.add_argument("--control", type=int, required=False,
                        help='test')
    parser.add_argument("--label", type=int,
                        required=False)
    parser.add_argument('--mapq', type=int, default=0, required=False)
    parser.add_argument('--depth', type=int, default=5, required=False)
    parser.add_argument('--kmer', type=int, default=6, required=False)
    parser.add_argument('--pod5_proc', '-d', type=int, default=10, required=False)
    parser.add_argument(
        "--motifs",
        action="store",
        type=str,
        required=False,
        default="CG",
        help="motif seq to be extracted, default: CG. "
        "can be multi motifs splited by comma "
        "(no space allowed in the input str), "
        "or use IUPAC alphabet, "
        "the mod_loc of all motifs must be "
        "the same",
    )
    parser.add_argument(
        "--mod_loc",
        action="store",
        type=int,
        required=False,
        default=0,
        help="0-based location of the targeted base in the motif, default 0",
    )
    parser.add_argument(
        "--rna",
        action="store_true",
        default=False,
        required=False,
        help="the fast5 files are from RNA samples. if is rna, the signals are reversed. "
        "NOTE: Currently no use, waiting for further extentsion",
    )
    parser.add_argument(
        "--ref",
        action="store_true",
        default=False,
        required=False,
        help="wetheter use ref sequence instead of read sequence, default false.",
    )
    parser.add_argument('--strict', action="store_true", default=False, required=False)
    parser.add_argument('--test', action="store_true", default=False, required=False)
    parser.add_argument("--chr", type=str, required=False, help='only extract chr')
    return parser.parse_args()

def main():
    args = parse_args()
    extract(args)

if __name__ == '__main__':
    sys.exit(main())