import argparse
import sys
import os
import gzip
import threading
import queue
import concurrent.futures
import logging
import time
import numpy as np
from pathlib import Path
from tqdm import tqdm
import pysam
import pod5 as p5
from deepsignal3.utils import bam_reader
from deepsignal3.utils.process_utils import CIGAR2CODE, CIGAR_REGEX, get_refloc_of_methysite_in_motif, complement_seq, get_motif_seqs
from deepsignal3.extract_features_pod5 import _group_signals_by_movetable_v2
import resource
import traceback

# Set up logging to write to sys.stdout (redirected file)
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[logging.StreamHandler(sys.stdout)]
)

# Set memory limit to unlimited
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
logging.info(f"Current memory limit: {soft}")
resource.setrlimit(resource.RLIMIT_AS, (resource.RLIM_INFINITY, resource.RLIM_INFINITY))
logging.info(f"New memory limit: {resource.getrlimit(resource.RLIMIT_AS)}")

class ExtractSignalWithKmerByUncalled4:
    def __init__(self, ur, ul, k):
        self.ur = self.process_ur(ur)
        self.ul = ul
        self.k = k

    def process_ur(self, ur):
        assert len(ur) % 2 == 0, f'UR length ({len(ur)}) must be even.'
        return [(ur[i], ur[i + 1]) for i in range(0, len(ur) - 1, 2)]

    def get_clip_between_ur_ref(self, ref_range, ur_pair):
        start = ref_range[0] - ur_pair[0]
        if start > 0:
            logging.warning(f"UR ({ur_pair[0]}) before ref start ({ref_range[0]})")
            start = -1
        else:
            start = abs(start)
        end = ref_range[1] - ur_pair[1]
        if end < 0:
            logging.warning(f"UR end ({ur_pair[1]}) after ref start ({ref_range[1]})")
        return start, end

    def get_rseq_by_ur(self, rseq, ref_range):
        new_rseq_split = []
        rseq_splice = ""
        for ur_pair in self.ur:
            st, en = self.get_clip_between_ur_ref(ref_range, ur_pair)
            assert st >= 0 and en >= 0, "UR and ref positions do not correspond."
            rseq_ = rseq[st: len(rseq) - en]
            assert ur_pair[1] - ur_pair[0] == len(rseq_), f'Unexpected rseq ({len(rseq_)}) and UR ({ur_pair[1] - ur_pair[0]}) length mismatch.'
            rseq_splice += rseq_
            new_rseq_split.append(rseq_)
        self.new_rseq_split = new_rseq_split
        return rseq_splice

    def generate_kmers_by_rseq(self, rseq, ref_range):
        self.new_rseq = self.get_rseq_by_ur(rseq, ref_range)
        if self.k <= 0:
            raise ValueError("k must be positive integer.")
        if self.k > len(self.new_rseq):
            raise ValueError("k larger than sequence length.")
        kmers = [self.new_rseq[i: i + self.k] for i in range(len(self.new_rseq) - self.k + 1)]
        return kmers

    def get_signal_with_kmers(self, rseq, ref_range, signal):
        kmers = self.generate_kmers_by_rseq(rseq, ref_range)
        neg_count = len([x for x in self.ul if x < 0])
        assert len(self.ul) - neg_count == len(kmers), f'Kmer len ({len(kmers)}) and UL ({len(self.ul) - neg_count}) length mismatch.'
        first_2_nuc = kmers[0][:2]
        last_3_nuc = kmers[-1][3:]
        last_kmer = kmers[-1]
        first_kmer = kmers[0]
        signal_start = 0
        ref_start = self.ur[0][0]
        ref_end_by_ur = 0
        read_start = 0
        kmer_signal = []
        signal_start_pos = []
        refs_pos = []
        read_pos = []
        kmer_table = []
        first_kmer_signal = []
        first_signal_start_pos = []
        for n in first_2_nuc:
            dic = {'kmer': first_kmer, 'nuc': n, 'signal': [], 'signal_pos': 0, 'ref_pos': ref_start, 'read_pos': read_start}
            kmer_table.append(dic)
            first_kmer_signal.append([])
            first_signal_start_pos.append(0)
            read_start += 1
            ref_start += 1
        for length in self.ul:
            if length < 0:
                signal_start += abs(length)
                continue
            segment = signal[signal_start:signal_start + length]
            signal_start_pos.append(signal_start)
            kmer_signal.append(list(segment))
            refs_pos.append(ref_start)
            read_pos.append(read_start)
            if ref_start + 1 == self.ur[ref_end_by_ur][1]:
                ref_end_by_ur += 1
                ref_start = self.ur[ref_end_by_ur][0]
                read_start += 1
            else:
                ref_start += 1
                read_start += 1
            signal_start += length
        for i in range(len(kmers)):
            dic = {'kmer': kmers[i], 'nuc': kmers[i][2], 'signal': list(kmer_signal[i]), 'signal_pos': signal_start_pos[i], 'ref_pos': refs_pos[i], 'read_pos': read_pos[i]}
            kmer_table.append(dic)
        for n in last_3_nuc:
            dic = {'kmer': last_kmer, 'nuc': n, 'signal': [], 'signal_pos': signal_start_pos[-1], 'ref_pos': ref_start, 'read_pos': read_start}
            kmer_table.append(dic)
            kmer_signal.append([])
            signal_start_pos.append(signal_start_pos[-1])
            read_start += 1
            ref_start += 1
        kmer_signal = first_kmer_signal + kmer_signal
        signal_start_pos = first_signal_start_pos + signal_start_pos
        assert len(signal_start_pos) == len(kmers) + 5 and len(signal_start_pos) == len(kmer_signal), f'Length mismatch: kmers+5 ({len(kmers) + 5}), signals ({len(kmer_signal)}), positions ({len(signal_start_pos)}).'
        assert len(self.new_rseq) == len(kmer_signal), f'New ref seq len ({len(self.new_rseq)}) and signal len ({len(kmer_signal)}) mismatch.'
        return kmer_table, kmer_signal

def extract_signal_with_kmer(read_id, chr, rseq, ref_range, signal, uncalled4):
    kmer_table, kmer_signal = uncalled4.get_signal_with_kmers(rseq, ref_range, signal)
    list_segments = [",".join(map(str, segment)) for segment in kmer_signal]
    total_signal = ";".join(list_segments)
    return read_id, chr, uncalled4.ur, total_signal, kmer_table

def extract_refs_signal_with_kmer(rseq, ref_range, signal, uncalled4):
    _, kmer_signal = uncalled4.get_signal_with_kmers(rseq, ref_range, signal)
    return uncalled4.ur, uncalled4.new_rseq, uncalled4.new_rseq_split, kmer_signal

def get_q2tloc_from_cigar(r_cigar_tuple, strand, seq_len):
    fill_invalid = -2
    q_to_r_poss = np.full(seq_len + 1, fill_invalid, dtype=np.int32)
    curr_r_pos, curr_q_pos = 0, 0
    cigar_ops = r_cigar_tuple if strand == 1 else r_cigar_tuple[::-1]
    for op, op_len in cigar_ops:
        if op == 1:
            for q_pos in range(curr_q_pos, curr_q_pos + op_len):
                q_to_r_poss[q_pos] = -1
            curr_q_pos += op_len
        elif op in (2, 3):
            curr_r_pos += op_len
        elif op in (0, 7, 8):
            for op_offset in range(op_len):
                q_to_r_poss[curr_q_pos + op_offset] = curr_r_pos + op_offset
            curr_q_pos += op_len
            curr_r_pos += op_len
        elif op == 6:
            pass
    q_to_r_poss[curr_q_pos] = curr_r_pos
    if q_to_r_poss[-1] == fill_invalid:
        raise ValueError(f"Invalid CIGAR: ref length {seq_len}, implied {curr_r_pos}")
    return q_to_r_poss

def write_featurestr(write_fp, featurestr_q, time_wait, control, stop_event):
    output = 0
    with open(write_fp, 'w') as wf:
        while not stop_event.is_set():
            try:
                features_str = featurestr_q.get(timeout=10)
                if features_str == "kill":
                    break
                for one_features_str in features_str:
                    output += 1
                    wf.write(one_features_str + "\n")
                    if control and output >= 4000:
                        logging.info("Control mode: exiting after 4000 outputs")
                        stop_event.set()
                        break
                wf.flush()
            except queue.Empty:
                continue
    logging.info(f"Written {output} feature strings")

def statistic_read(features_file, signal_ids):
    if features_file.endswith(".gz"):
        infile = gzip.open(features_file, 'rt')
    else:
        infile = open(features_file, 'r')
    union_read_nums = 0
    read_nums = 0
    union_read_set = set()
    for line in infile:
        read_nums += 1
        words = line.strip().split("\t")
        if words[0] not in signal_ids:
            continue
        union_read_nums += 1
        union_read_set.add(words[0])
    infile.close()
    return union_read_set, read_nums, union_read_nums

def process_tsv(features_file, feature_q, signal_ids, read_nums, existing_keys, remove_indexes, time_wait, reads_per_chunk, qsize_limit, stop_event):
    if features_file.endswith(".gz"):
        infile = gzip.open(features_file, 'rt')
    else:
        infile = open(features_file, 'r')
    fea_list = []
    try:
        for line in tqdm(infile, total=read_nums, ncols=100, desc="Processing TSV", file=sys.stderr):
            if stop_event.is_set():
                break
            words = line.strip().split("\t")
            read_id = words[0]
            chromosome = words[1]
            key = '||'.join([read_id, chromosome])
            if key in existing_keys or key in remove_indexes:
                continue
            positions = list(map(int, words[2].split(",")))
            predictions = list(map(float, words[3].split(",")))
            pred_deepsignal = dict(zip(positions, predictions))
            fea_list.append((read_id, chromosome, pred_deepsignal))
            if len(fea_list) >= reads_per_chunk:
                while feature_q.qsize() >= qsize_limit and not stop_event.is_set():
                    time.sleep(time_wait)
                feature_q.put(fea_list)
                fea_list = []
        if fea_list and not stop_event.is_set():
            while feature_q.qsize() >= qsize_limit and not stop_event.is_set():
                time.sleep(time_wait)
            feature_q.put(fea_list)
    finally:
        infile.close()
    feature_q.put("kill")
    logging.info("TSV processing complete.")

def determine_motif_at_pos(seq, pos):
    if pos < len(seq) - 1:
        if seq[pos:pos+2] == "CG":
            return "CG"
        elif pos < len(seq) - 2:
            triplet = seq[pos:pos+3]
            if triplet[0] != 'C':
                return None
            if triplet[1] != 'G' and triplet[2] == 'G':
                return "CHG"
            elif triplet[1] != 'G' and triplet[2] != 'G':
                return "CHH"
            else:
                return None
    return None

def align_signals_and_extend_ref_seq(pos_pair, read_signal, read_seq, ref_seq, motif_seqs, methyloc, strand, ref_start, ref_end):
    first_valid_index = next((i for i, (_, ref_pos) in enumerate(pos_pair) if ref_pos is not None), len(pos_pair))
    last_valid_index = len(pos_pair) - 1 - next((i for i, (_, ref_pos) in enumerate(reversed(pos_pair)) if ref_pos is not None), len(pos_pair))
    new_ref_seq = []
    new_ref_signal = []
    for i in range(first_valid_index):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])
    last_valid_ref_pos = len(new_ref_seq) - 1
    for i in range(first_valid_index, last_valid_index + 1):
        read_pos, ref_pos = pos_pair[i]
        if ref_pos is not None:
            new_ref_seq.append(ref_seq[ref_pos])
            new_ref_signal.append(read_signal[read_pos] if read_pos is not None else [])
            last_valid_ref_pos = len(new_ref_seq) - 1
        elif ref_pos is None:
            if last_valid_ref_pos is not None:
                new_ref_signal[last_valid_ref_pos].extend(read_signal[read_pos])
    for i in range(last_valid_index + 1, len(pos_pair)):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])
    new_ref_seq = ''.join(new_ref_seq)
    ref_readlocs = {}
    ref_poss = []
    pred_pos = []
    tsite_locs = get_refloc_of_methysite_in_motif(new_ref_seq, set(motif_seqs), methyloc)
    for loc_in_read in tsite_locs:
        if loc_in_read < first_valid_index or loc_in_read > last_valid_index:
            ref_pos = -1
            ref_poss.append(ref_pos)
            pred_pos.append(loc_in_read)
            continue
        if strand == "-":
            ref_pos = ref_end - loc_in_read - 1 + first_valid_index
        else:
            ref_pos = ref_start + loc_in_read - first_valid_index
        ref_poss.append(ref_pos)
        pred_pos.append(loc_in_read)
    ref_readlocs = dict(zip(ref_poss, pred_pos))
    return new_ref_seq, new_ref_signal, ref_readlocs, ref_poss, pred_pos

def process_dorado(read_data, bam_index, pod5_dr, hp_position, hn_position, bisulfite, use_ref, motif_seqs, methyloc, d_batch_size, qsize_limit, mapq, kmer, progress_bar, lock, process_chr, output_q, time_wait, stop_event, args):
    fea_list = []
    for read_name, chromosome, pred_deepsignal in read_data:
        if stop_event.is_set():
            break
        try:
            if progress_bar and lock:
                with lock:
                    progress_bar.update(1)
            while output_q.qsize() >= qsize_limit and not stop_event.is_set():
                time.sleep(time_wait)
            start_time = time.time()
            read = pod5_dr.get_read(read_name)
            logging.debug(f"Read {read_name} POD5 took {time.time() - start_time:.2f}s")
            if read is None:
                continue
            signal = read.signal
            shift_dacs_to_pa = read.calibration.offset
            scale_dacs_to_pa = read.calibration.scale
            for bam_read in bam_index.get_alignments(read_name):
                start_time = time.time()
                if bam_read.is_mapped == False or bam_read.mapping_quality < mapq or bam_read.is_supplementary or bam_read.is_secondary:
                    continue
                reference_name = bam_read.reference_name
                if reference_name != chromosome:
                    continue
                if process_chr:
                    if isinstance(process_chr, list) and reference_name not in process_chr:
                        continue
                    elif process_chr[:2] != 'no' and reference_name != process_chr:
                        continue
                    elif process_chr[:2] == 'no' and reference_name == process_chr[2:]:
                        continue
                cigar_tuples = bam_read.cigartuples
                seq = bam_read.get_forward_sequence()
                if seq is None or signal is None:
                    continue
                seq_len = len(seq)
                if bam_read.infer_query_length() != seq_len:
                    logging.warning(f'Infer read length != seq length for read {read_name}')
                    continue
                dorado_pred = {}
                if bam_read.modified_bases:
                    ref_loc = bam_read.get_reference_positions(full_length=True)
                    for m, locs in bam_read.modified_bases_forward.items():
                        if m[0] == 'C' and m[2] == 'm':
                            for lc in locs:
                                rloc = ref_loc[lc[0]] if bam_read.is_forward else ref_loc[seq_len - lc[0] - 1]
                                if rloc is not None:
                                    dorado_pred[rloc] = (lc[1] + 0.5) / 256
                if use_ref:
                    if not bam_read.has_tag('MD'):
                        logging.warning(f'No MD tag for read {read_name}')
                        continue
                    try:
                        ref_seq = complement_seq(bam_read.get_reference_sequence().upper()) if bam_read.is_reverse else bam_read.get_reference_sequence().upper()
                    except:
                        logging.warning(f'MD length mismatch for read {read_name}')
                        continue
                read_dict = dict(bam_read.tags)
                strand_code = 0 if bam_read.is_reverse else 1
                strand = "-" if strand_code == 0 else "+"
                ref_start = bam_read.reference_start
                ref_end = bam_read.reference_end
                num_trimmed = read_dict["ts"]
                if bam_read.has_tag('sp'):
                    num_trimmed += bam_read.get_tag('sp')
                shift_pa_to_norm = read_dict["sm"]
                scale_pa_to_norm = read_dict["sd"]
                mv_table = read_dict["mv"]
                signal_trimmed = signal[num_trimmed:] if num_trimmed >= 0 else signal[:num_trimmed]
                signal_group = _group_signals_by_movetable_v2(signal_trimmed, np.asarray(mv_table[1:]), int(mv_table[0]))
                if use_ref:
                    if bam_read.has_tag('ur') and bam_read.has_tag('ul'):
                        ur = bam_read.get_tag("ur")
                        ul = bam_read.get_tag("ul")
                        ref_range = [ref_start, ref_end]
                        uncalled4_obj = ExtractSignalWithKmerByUncalled4(ur, ul, kmer)
                        ref_range, seq, rseqs, signal_group = extract_refs_signal_with_kmer(ref_seq, ref_range, signal, uncalled4_obj)
                        ref_poss = []
                        pred_pos = []
                        ref_len = []
                        for i, rseq in enumerate(rseqs):
                            ref_start, ref_end = ref_range[i]
                            ref_len.append(len(rseq))
                            tsite_locs = get_refloc_of_methysite_in_motif(rseq, set(motif_seqs), methyloc)
                            for loc_in_read in tsite_locs:
                                ref_pos = ref_end - loc_in_read - 1 if strand == "-" else ref_start + loc_in_read
                                ref_poss.append(ref_pos)
                                pred_pos.append(loc_in_read + sum(ref_len[:i]))
                        ref_readlocs = dict(zip(pred_pos, ref_poss))
                    else:
                        pos_pair = []
                        for read_pos, ref_pos in bam_read.get_aligned_pairs():
                            if read_pos is None:
                                pos_pair.append((None, ref_end - ref_pos - 1 if bam_read.is_reverse else ref_pos - ref_start))
                                continue
                            if ref_pos is None:
                                pos_pair.append((len(seq) - read_pos - 1 if bam_read.is_reverse else read_pos, None))
                                continue
                            pos_pair.append((len(seq) - read_pos - 1 if bam_read.is_reverse else read_pos, ref_end - ref_pos - 1 if bam_read.is_reverse else ref_pos - ref_start))
                        if strand == "-":
                            pos_pair.reverse()
                        seq, signal_group, ref_readlocs, ref_poss, pred_pos = align_signals_and_extend_ref_seq(
                            pos_pair, signal_group, seq, ref_seq, motif_seqs, methyloc, strand, ref_start, ref_end)
                if len(signal_group) != len(seq):
                    logging.warning(f'Signal to seq mismatch for read {read_name}')
                    continue
                if not use_ref:
                    qalign_start = bam_read.query_alignment_start
                    qalign_end = bam_read.query_alignment_end
                    seq_start = len(seq) - qalign_end if bam_read.is_reverse else qalign_start
                    seq_end = len(seq) - qalign_start if bam_read.is_reverse else qalign_end
                    q_to_r_poss = get_q2tloc_from_cigar(cigar_tuples, strand_code, seq_end - seq_start)
                    ref_readlocs = {}
                    ref_poss = []
                    pred_pos = []
                    tsite_locs = get_refloc_of_methysite_in_motif(seq, set(motif_seqs), methyloc)
                    for loc_in_read in tsite_locs:
                        if seq_start <= loc_in_read < seq_end:
                            offset_idx = loc_in_read - seq_start
                            if q_to_r_poss[offset_idx] != -1:
                                ref_pos = ref_end - 1 - q_to_r_poss[offset_idx] if strand == "-" else ref_start + q_to_r_poss[offset_idx]
                                ref_readlocs[ref_pos] = loc_in_read
                                ref_poss.append(ref_pos)
                                pred_pos.append(loc_in_read)
                if len(ref_poss) == 0:
                    logging.warning(f'No ref positions for read {read_name}, strand {strand}')
                    continue
                for key in ref_poss:
                    if key not in pred_deepsignal:
                        pred_deepsignal[key] = -1
                    if key not in dorado_pred:
                        dorado_pred[key] = -1
                signal_group_new = [np.round(np.array(sig), decimals=6) for sig in signal_group]
                norm_signals_text = ';'.join([",".join(map(str, x)) for x in signal_group_new])
                mean_pred = {}
                wgbs = {}
                bisulfite_ref = {}
                pred_label = {}
                for key in ref_poss:
                    deep_val = pred_deepsignal.get(key, -1)
                    dorado_val = dorado_pred.get(key, -1)
                    mean_pred[key] = (deep_val + dorado_val) / 2 if deep_val != -1 and dorado_val != -1 else dorado_val if dorado_val != -1 else deep_val
                    read_pos = ref_readlocs[key]
                    motif = determine_motif_at_pos(seq, read_pos)
                    if motif is None or motif not in bisulfite:
                        bisulfite_ref[key] = -1
                        wgbs[key] = -1
                    else:
                        key_combine = f"{reference_name}||{key}"
                        bisulfite_ref[key] = bisulfite[motif].get(key_combine, -1)
                        wgbs[key] = 1 if key_combine in hp_position else 0 if key_combine in hn_position else -1
                    pred_label[key] = wgbs[key] if wgbs[key] != -1 else -1 if mean_pred[key] == -1 else 1 if mean_pred[key] >= 0.5 else 0
                pred_deepsignal_new = np.round(np.array([pred_deepsignal[x] for x in ref_poss]), decimals=6)
                pred_deepsignal_text = ','.join(map(str, pred_deepsignal_new))
                dorado_pred_new = np.round(np.array([dorado_pred[x] for x in ref_poss]), decimals=6)
                pred_dorado_text = ','.join(map(str, dorado_pred_new))
                mean_pred_new = np.round(np.array([mean_pred[x] for x in ref_poss]), decimals=6)
                mean_pred_text = ','.join(map(str, mean_pred_new))
                pred_label_text = ','.join(map(str, [pred_label[x] for x in ref_poss]))
                pred_pos_text = ','.join(map(str, pred_pos))
                sample_id = '\t'.join([read_name, reference_name, str(ref_start)])
                bisulfite_text = ','.join(map(str, [bisulfite_ref[x] for x in ref_poss]))
                ref_pos_text = ','.join(map(str, ref_poss))
                fea_str = '\t'.join([
                    sample_id, seq, norm_signals_text, pred_pos_text, pred_dorado_text, pred_deepsignal_text,
                    mean_pred_text, pred_label_text, str(bam_read.mapping_quality), str(shift_dacs_to_pa),
                    str(scale_dacs_to_pa), str(shift_pa_to_norm), str(scale_pa_to_norm), bisulfite_text, ref_pos_text
                ])
                fea_list.append(fea_str)
                logging.debug(f"Read {read_name} data process took {time.time() - start_time:.2f}s")
                if len(fea_list) >= d_batch_size:
                    while output_q.qsize() >= qsize_limit and not stop_event.is_set():
                        time.sleep(time_wait)
                    output_q.put(fea_list)
                    fea_list = []
        except Exception as e:
            logging.error(f"Error processing read {read_name}: {e}")
            traceback.print_exc()
    if fea_list and not stop_event.is_set():
        while output_q.qsize() >= qsize_limit and not stop_event.is_set():
            time.sleep(time_wait)
        output_q.put(fea_list)

def generate_key(line):
    columns = line.split()[:2]
    return '||'.join(columns)

def process_file(output_file):
    try:
        with open(output_file, 'r') as f:
            existing_keys = {generate_key(line) for line in f}
    except FileNotFoundError:
        existing_keys = set()
    return existing_keys

def remove_key(key_input):
    remove_indexes = set()
    with open(key_input, 'r') as input_file:
        for line in input_file:
            remove_indexes.add(line.strip())
    return remove_indexes

def read_position_file(position_file):
    key_sep = "||"
    positions = set()
    with open(position_file, "r") as rf:
        for line in rf:
            words = line.strip().split()
            positions.add(key_sep.join(words[:2]))
    return positions

def read_bed(bisulfite_bed, strict=False, depth_threshold=5, label=1):
    key_sep = "||"
    value_info = {}
    with open(bisulfite_bed, "r") as rf:
        for line in rf:
            words = line.strip().split()
            chrom = words[0]
            pos = int(words[1])
            if len(words) == 6:
                strand = words[5]
                value = label * 100.0
            else:
                strand = words[5]
                depth = int(words[9])
                value = float(words[10])
                if depth < depth_threshold:
                    continue
            m_key = key_sep.join([chrom, str(pos), strand])
            value_info[m_key] = value
    freqinfo = {}
    if strict:
        for m_key in value_info:
            chrom, pos, strand = m_key.split(key_sep)
            pos = int(pos)
            pair_key = key_sep.join([chrom, str(pos + 1), '-'] if strand == '+' else [chrom, str(pos - 1), '+'])
            if pair_key in value_info:
                value_1, value_2 = value_info[m_key], value_info[pair_key]
                freq_key = key_sep.join([chrom, str(pos)])
                freqinfo[freq_key] = 100 if value_1 >= 95 and value_2 >= 95 else 0 if value_1 <= 5 and value_2 <= 5 else freqinfo.get(freq_key, 0)
    else:
        for m_key in value_info:
            chrom, pos, _ = m_key.split(key_sep)
            freq_key = key_sep.join([chrom, str(pos)])
            freqinfo[freq_key] = value_info[m_key]
    return {'CG': freqinfo}

def read_tsv(key_input):
    key_indexes = []
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key_indexes.append(line.strip())
    return key_indexes

def read_id(pod5_dir):
    ids_set = []
    for pod5 in Path(pod5_dir).rglob("*.pod5"):
        with p5.DatasetReader(pod5, recursive=True, max_cached_readers=10, threads=5) as dataset:
            for read_record in dataset:
                ids_set.append(str(read_record.read_id))
    logging.info(f"Found {len(ids_set)} read IDs in POD5")
    return ids_set

def process_chr(chr_file):
    chr_indexes = []
    with open(chr_file, 'r') as input_file:
        for line in input_file:
            chr_indexes.append(line.strip())
    return chr_indexes

def extract(args):
    feature_q = queue.Queue(maxsize=args.q_size_limit)
    output_q = queue.Queue(maxsize=args.q_size_limit)
    stop_event = threading.Event()
    lock = threading.Lock()
    input_path = os.path.abspath(args.tsv)
    if not os.path.exists(input_path):
        raise ValueError(f"TSV input path {input_path} does not exist!")
    bam_index = bam_reader.ReadIndexedBam(args.bam)
    signal_dir = os.path.abspath(args.signal)
    pod5_dr = p5.DatasetReader(signal_dir, recursive=True, threads=args.pod5_proc) if os.path.isdir(signal_dir) else p5.DatasetReader(signal_dir, threads=args.pod5_proc)
    signal_ids = read_tsv(args.tsv_read_ids) if args.tsv_read_ids else read_id(args.signal) if os.path.isdir(signal_dir) else list(pod5_dr.read_ids)
    remove_indexes = remove_key(args.keyset) if args.keyset else set()
    existing_keys = process_file(args.write_path)
    hp_position = read_position_file(args.hp) if args.hp and args.hn else set()
    hn_position = read_position_file(args.hn) if args.hp and args.hn else set()
    bisulfite = {}
    is_dna = not args.rna
    motif_seqs = get_motif_seqs(args.motifs, is_dna)
    if args.bed:
        for bed in args.bed:
            motif, bed_address = bed.split(',', 1) if ',' in bed else (None, bed)
            bisulfite.update(read_bed(bed_address, args.strict, args.depth, args.label))
    chr_indexes = process_chr(args.chr) if args.chr and os.path.isfile(args.chr) else args.chr
    union_read_set, read_nums, union_read_nums = statistic_read(input_path, signal_ids)
    with tqdm(total=union_read_nums, desc="Processing reads", ncols=100, file=sys.stderr) as progress_bar:
        tsv_thread = threading.Thread(
            target=process_tsv,
            args=(input_path, feature_q, union_read_set, read_nums, existing_keys, remove_indexes,
                  args.timewait, args.chunk, args.q_size_limit, stop_event),
            name="tsv_reader"
        )
        tsv_thread.daemon = True
        tsv_thread.start()
        writer_thread = threading.Thread(
            target=write_featurestr,
            args=(args.write_path, output_q, args.timewait, args.control, stop_event),
            name="writer"
        )
        writer_thread.daemon = True
        writer_thread.start()
        time.sleep(2)  # Allow TSV thread to start
        try:
            with concurrent.futures.ThreadPoolExecutor(max_workers=args.nproc - 2) as executor:
                futures = []
                while not stop_event.is_set():
                    try:
                        read_data = feature_q.get(timeout=10)
                        if read_data == "kill":
                            break
                        futures.append(executor.submit(
                            process_dorado,
                            read_data, bam_index, pod5_dr, hp_position, hn_position, bisulfite, args.ref, motif_seqs,
                            args.mod_loc, args.d_batch_size, args.q_size_limit, args.mapq, args.kmer, progress_bar,
                            lock, chr_indexes, output_q, args.timewait, stop_event, args
                        ))
                    except queue.Empty:
                        if not tsv_thread.is_alive() and feature_q.qsize() == 0:
                            break
                for future in concurrent.futures.as_completed(futures):
                    try:
                        future.result()
                    except Exception as e:
                        logging.error(f"Worker exception: {e}")
                        traceback.print_exc()
        except Exception as e:
            logging.error(f"Main loop exception: {e}")
            traceback.print_exc()
        finally:
            stop_event.set()
            output_q.put("kill")
            tsv_thread.join()
            writer_thread.join()
            pod5_dr.close()
            bam_index.close()
    logging.info("Processing complete.")

def parse_args():
    parser = argparse.ArgumentParser(description="Extract features from TSV, BAM, and POD5 files.")
    parser.add_argument("--keyset", type=str, help="File with keys to exclude")
    parser.add_argument("--hp", type=str, help="High confidence positive position file")
    parser.add_argument("--hn", type=str, help="High confidence negative position file")
    parser.add_argument("--bam", type=str, required=True, help="BAM file")
    parser.add_argument("--pod5", action="store_true", help="Use POD5 format")
    parser.add_argument("--bed", type=str, action="append", help="BED file(s)")
    parser.add_argument("--signal", type=str, required=True, help="POD5 file or directory")
    parser.add_argument("--tsv", type=str, required=True, help="Input TSV file")
    parser.add_argument("--tsv_read_ids", type=str, help="TSV file with read IDs")
    parser.add_argument("--write_path", type=str, required=True, help="Output TSV file")
    parser.add_argument("--nproc", "-p", type=int, required=True, help="Number of threads")
    parser.add_argument("--timewait", "-t", type=float, default=0.1, help="Wait time for queue operations")
    parser.add_argument("--d_batch_size", type=int, default=2, help="Batch size for Dorado processing")
    parser.add_argument("--chunk", "-c", type=int, default=1, help="TSV chunk size")
    parser.add_argument("--q_size_limit", "-q", type=int, default=80, help="Queue size limit")
    parser.add_argument("--control", action="store_true", help="Test mode")
    parser.add_argument("--mapq", type=int, default=10, help="Minimum mapping quality")
    parser.add_argument("--depth", type=int, default=5, help="Minimum depth for BED")
    parser.add_argument("--kmer", type=int, default=6, help="K-mer size")
    parser.add_argument("--pod5_proc", "-d", type=int, default=10, help="POD5 processing threads")
    parser.add_argument("--ref", action="store_true", help="Use reference sequence")
    parser.add_argument("--motifs", type=str, default="CG", help="Motif sequences")
    parser.add_argument("--mod_loc", type=int, default=0, help="Modification location in motif")
    parser.add_argument("--rna", action="store_true", help="RNA data")
    parser.add_argument("--strict", action="store_true", help="Strict BED parsing")
    parser.add_argument("--chr", type=str, help="Chromosome to process")
    parser.add_argument("--label", type=int, help="Label for BED file")
    parser.add_argument("--log-level", type=str, default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR"],
                        help="Logging level (default: INFO)")
    return parser.parse_args()

def main():
    args = parse_args()
    # Update logging level based on command-line argument
    logging.getLogger().setLevel(getattr(logging, args.log_level))
    extract(args)

if __name__ == '__main__':
    sys.exit(main())