#!/usr/bin/python
# coding=UTF-8
#import pysam
import argparse
import sys
import os
from multiprocessing import Manager
import multiprocessing as mp
import gzip
import mmap
import pod5 as p5
import time
import datetime
import numpy as np
from ont_fast5_api.fast5_interface import get_fast5_file
from deepsignal3.utils.process_utils import fill_files_queue
from deepsignal3.utils.process_utils import get_refloc_of_methysite_in_motif
from deepsignal3.utils.process_utils import get_motif_seqs
from pathlib import Path
# from memory_profiler import profile
from deepsignal3.utils.process_utils import CIGAR2CODE
from deepsignal3.utils.process_utils import CIGAR_REGEX
from deepsignal3.utils import bam_reader
from deepsignal3.extract_features_pod5 import _group_signals_by_movetable_v2
import re
import traceback
from deepsignal3.utils.process_utils import complement_seq
from tqdm import tqdm

def _compute_pct_identity(cigar):
    nalign, nmatch = 0, 0
    for op_len, op in cigar:
        if op not in (4, 5):
            nalign += op_len
        if op in (0, 7):
            nmatch += op_len
    return nmatch / float(nalign)




def get_q2tloc_from_cigar(r_cigar_tuple, strand, seq_len):
    """
    insertion: -1, deletion: -2, mismatch: -3
    :param r_cigar_tuple: pysam.alignmentSegment.cigartuples
    :param strand: 1/-1 for fwd/rev
    :param seq_len: read alignment length
    :return: query pos to ref pos
    """
    fill_invalid = -2
    # get each base calls genomic position
    q_to_r_poss = np.full(seq_len + 1, fill_invalid, dtype=np.int32)
    # process cigar ops in read direction
    curr_r_pos, curr_q_pos = 0, 0
    cigar_ops = r_cigar_tuple if strand == 1 else r_cigar_tuple[::-1]
    for op, op_len in cigar_ops:
        if op == 1:
            # inserted bases into ref
            for q_pos in range(curr_q_pos, curr_q_pos + op_len):
                q_to_r_poss[q_pos] = -1
            curr_q_pos += op_len
        elif op in (2, 3):
            # deleted ref bases
            curr_r_pos += op_len
        elif op in (0, 7, 8):
            # aligned bases
            for op_offset in range(op_len):
                q_to_r_poss[curr_q_pos + op_offset] = curr_r_pos + op_offset
            curr_q_pos += op_len
            curr_r_pos += op_len
        elif op == 6:
            # padding (shouldn't happen in mappy)
            pass
    q_to_r_poss[curr_q_pos] = curr_r_pos
    if q_to_r_poss[-1] == fill_invalid:
        raise ValueError(
            (
                "Invalid cigar string encountered. Reference length: {}  Cigar "
                + "implied reference length: {}"
            ).format(seq_len, curr_r_pos)
        )
    return q_to_r_poss

def _write_featurestr(write_fp, featurestr_q, control, time_wait=1):
    # print('write_process-{} starts'.format(os.getpid()))
    output = 0
    # if os.path.exists(write_fp):
    #     with open(write_fp, 'a') as wf:
    #         while True:
    #             # during test, it's ok without the sleep(time_wait)
    #             if featurestr_q.empty():
    #                 time.sleep(time_wait)
    #                 continue
    #             features_str = featurestr_q.get()
    #             if features_str == "kill":
    #                 # print('output line {}'.format(output))
    #                 # print('write_process-{} finished'.format(os.getpid()))
    #                 break
    #             for one_features_str in features_str:
    #                 output += 1
    #                 wf.write(one_features_str + "\n")
    #             wf.flush()
    # else:
    with open(write_fp, 'w') as wf:
        while True:
            # during test, it's ok without the sleep(time_wait)
            if featurestr_q.empty():
                time.sleep(time_wait)
                continue
            features_str = featurestr_q.get()
            if features_str == "kill":
                # print('output line {}'.format(output))
                # print('write_process-{} finished'.format(os.getpid()))
                break
            for one_features_str in features_str:
                output += 1
                wf.write(one_features_str + "\n")
                if control is not None and output >= control:
                    # 设定终止标志
                    # event.set()
                    sys.exit(0)
            wf.flush()

##########
# process tsv and alignment to bam and pod5
##########


def align_signals(pos_pair, read_signal, ref_seq):
    # 初始化 ref_signal，长度与 ref_seq 相同
    ref_signal = [[] for _ in range(len(ref_seq))]
    
    # 记录上一个有效的 ref_pos
    last_valid_ref_pos = None

    # 遍历 pos_pair
    for i, (read_pos, ref_pos) in enumerate(pos_pair):
        if read_pos is None:
            # 如果 read_pos 为 None，表示发生删除，ref_signal 对应位置保持为空列表
            continue
        elif ref_pos is None:
            # 如果 ref_pos 为 None，表示发生插入，将信号拼接到上一个有效 ref_pos 的后面
            if last_valid_ref_pos is None:
                # 如果没有找到前一个有效的 ref_pos，插入到 ref_signal[0] 的最前面
                ref_signal[0] = read_signal[read_pos] + ref_signal[0]
            else:
                # 否则拼接到前一个有效 ref_pos 对应的 ref_signal 列表的后面
                ref_signal[last_valid_ref_pos] += read_signal[read_pos]
        else:
            # 如果 ref_pos 有效，将信号添加到 ref_signal[ref_pos]
            ref_signal[ref_pos] += read_signal[read_pos]
            # 更新 last_valid_ref_pos 为当前 ref_pos
            last_valid_ref_pos = ref_pos

    return ref_signal

def filter_n_and_update_indices(seq, pred_pos):
    # Step 1: Initialize variables
    n_count = 0
    n_positions = []
    
    # Step 2: Traverse the sequence to identify 'N' positions and count them
    for i, char in enumerate(seq):
        if char == 'N':
            n_positions.append(i)
    
    # Calculate number of 'N's to the left of each position in pred_pos
    updated_pred_pos = []
    
    for pos in pred_pos:
        # Calculate the number of 'N's before the current position
        count_n_before_pos = sum(1 for n_pos in n_positions if n_pos < pos)
        # Adjust the position by subtracting the number of 'N's before it
        updated_pos = pos - count_n_before_pos
        updated_pred_pos.append(updated_pos)
    
    # Step 3: Filter 'N' from the sequence
    filtered_seq = ''.join([char for char in seq if char != 'N'])
    return filtered_seq, updated_pred_pos, n_positions

def align_signals_and_extend_ref_seq(pos_pair, read_signal, read_seq, ref_seq,motif_seqs,methyloc,strand,ref_start,ref_end):
    # 确定最前端和最末端有效的比对索引
    first_valid_index = next((i for i, (_, ref_pos) in enumerate(pos_pair) if ref_pos is not None), len(pos_pair))
    last_valid_index = len(pos_pair) - 1 - next((i for i, (_, ref_pos) in enumerate(reversed(pos_pair)) if ref_pos is not None), len(pos_pair))

    # 生成新的 ref_seq 和 ref_signal
    new_ref_seq = []
    new_ref_signal = []

    # 填充前端未比对的部分
    for i in range(first_valid_index):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])

    last_valid_ref_pos = len(new_ref_seq) - 1

    # 处理中间的比对部分
    for i in range(first_valid_index, last_valid_index + 1):
        read_pos, ref_pos = pos_pair[i]

        if ref_pos is not None:
            # 确保 new_ref_seq 的长度足够
            # while len(new_ref_seq) < ref_pos:
            #     new_ref_seq.append(None)  # 用 None 占位，表示插入的部分
            #     new_ref_signal.append([]) # 对应的信号也填充为空列表
            
            new_ref_seq.append(ref_seq[ref_pos])
            new_ref_signal.append(read_signal[read_pos] if read_pos is not None else [])
            last_valid_ref_pos = len(new_ref_seq) - 1

        elif ref_pos is None:
            if last_valid_ref_pos is not None:
                new_ref_signal[last_valid_ref_pos].extend(read_signal[read_pos])

    # 填充后端未比对的部分
    for i in range(last_valid_index + 1, len(pos_pair)):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])
            

    new_ref_seq = ''.join(base  for base in new_ref_seq)
    ref_readlocs = dict()
    ref_poss = []
    pred_pos = []
    ref_pos = -1
    tsite_locs = get_refloc_of_methysite_in_motif(
        new_ref_seq, set(motif_seqs), methyloc)
    for loc_in_read in tsite_locs:
        if loc_in_read<first_valid_index:
            ref_pos = -1
            ref_poss.append(ref_pos)
            pred_pos.append(loc_in_read)
            continue
        if loc_in_read>last_valid_index:
            ref_pos = -1
            ref_poss.append(ref_pos)
            pred_pos.append(loc_in_read)
            continue
        if strand == "-":
            ref_pos = ref_end-loc_in_read-1+first_valid_index
        else:
            ref_pos = ref_start+loc_in_read-first_valid_index
        #ref_readlocs[loc_in_read+first_valid_index] = ref_pos
        ref_poss.append(ref_pos)
        pred_pos.append(loc_in_read)
    #new_ref_seq, pred_pos,n_positions=filter_n_and_update_indices(new_ref_seq, pred_pos)
    ref_readlocs = dict(zip(pred_pos, ref_poss))
    #new_ref_signal = [new_ref_signal[i] for i in range(len(new_ref_signal)) if i not in n_positions]
    #n_lens=len(n_positions)
    return new_ref_seq, new_ref_signal, ref_readlocs, ref_poss, pred_pos#, n_lens

def process_dorado(bam_index, pod5_dr, bisulfite, read_ids_Q, output_Q, motif_seqs,progress_bar,lock,process_chr,use_ref,label, methyloc=0, time_wait=1,mapq=0,kmer=6, d_batch_size=2, qsize_limit=22):
    fea_list = []
    while True:
        while read_ids_Q.empty():
            time.sleep(time_wait)
        read_data = read_ids_Q.get()
        if read_data == "kill":
            read_ids_Q.put("kill")
            break
        for read_name in read_data:
            while output_Q.qsize() >= qsize_limit:
                # print('Pausing bam and pod5 input due to OUTPUT queue size limit. Output_qsize=%d' %(output_Q.qsize()), flush=True)
                time.sleep(time_wait)
            with lock:
                progress_bar.update(1)
            read = pod5_dr.get_read(read_name)
            if read is None:
                continue
            signal = read.signal
            shift_dacs_to_pa = read.calibration.offset
            scale_dacs_to_pa = read.calibration.scale
            # read_iter=bam_index.find(read_name)
            try:
                for bam_read in bam_index.get_alignments(read_name):
                    if bam_read.is_supplementary or bam_read.is_secondary:
                        continue
                    reference_name = bam_read.reference_name
                    if reference_name is not None:
                        reference_name=reference_name.lower()
                    if bam_read.mapping_quality < mapq:
                        continue

                    if process_chr is not None:
                        if process_chr[:2]!='no':
                            if reference_name != process_chr:
                                continue
                        elif process_chr[:2]=='no':
                            if reference_name == process_chr[2:]:
                                continue

                    seq = bam_read.get_forward_sequence().upper()
                    if seq is None or signal is None:
                        continue
                    if reference_name is None:
                        continue
                    if use_ref:
                        if not bam_read.has_tag('MD'):
                            print('not have MD of readid {}'.format(read_name))
                            continue
                        if bam_read.is_reverse:
                            ref_seq = complement_seq(bam_read.get_reference_sequence().upper())
                        else:
                            ref_seq = bam_read.get_reference_sequence().upper()                   
                    
                    if bam_read.infer_query_length() != len(seq):
                        print(
                            'infer read length is not same as seq length of readid {}'.format(read_name))
                        continue
                    read_dict = dict(bam_read.tags)

                    strand_code = 0 if bam_read.is_reverse else 1

                    strand = "-" if strand_code == 0 else "+"
                    # find_key=(read_name,reference_name)
                    ref_start = bam_read.reference_start
                    ref_end = bam_read.reference_end
                    cigar_tuples = bam_read.cigartuples
                    nm = bam_read.get_tag("NM")
                    #mapq = bam_read.mapping_quality
                    #query_length = read.query_length
                    query_alignment_length = bam_read.query_alignment_length
                    error_rate = nm / query_alignment_length * 100
                    identity=_compute_pct_identity(cigar_tuples)

                    num_trimmed = read_dict["ts"]
                    shift_pa_to_norm = read_dict["sm"]
                    scale_pa_to_norm = read_dict["sd"]
                    mv_table = read_dict["mv"]
                    if num_trimmed >= 0:
                        # (signal[num_trimmed:] - norm_shift) / norm_scale
                        signal_trimmed = signal[num_trimmed:]
                    else:
                        # (signal[:num_trimmed] - norm_shift) / norm_scale
                        signal_trimmed = signal[:num_trimmed]
                    # sshift, sscale = np.mean(signal_trimmed), float(np.std(signal_trimmed))
                    # if sscale == 0.0:
                    #    norm_signals = signal_trimmed
                    # else:
                    #    norm_signals = (signal_trimmed - sshift) / sscale
                    signal_group = _group_signals_by_movetable_v2(
                        signal_trimmed, np.asarray(mv_table[1:]), int(mv_table[0]))
                    if use_ref:
                        
                        pos_pair=[]
                        for read_pos,ref_pos in bam_read.get_aligned_pairs():
                            # if ref_pos is None:
                            #     continue
                            if read_pos is None:
                                if bam_read.is_reverse:
                                    pos_pair.append((None,ref_end-ref_pos-1))
                                else:
                                    pos_pair.append((None,ref_pos-ref_start))
                                continue
                            if ref_pos is None:
                                if bam_read.is_reverse:
                                    pos_pair.append((len(seq)-read_pos-1,None))
                                else:
                                    pos_pair.append((read_pos,None))
                                continue
                            if bam_read.is_reverse:
                                pos_pair.append((len(seq)-read_pos-1,ref_end-ref_pos-1))
                            else:
                                pos_pair.append((read_pos,ref_pos-ref_start))
                        #pos_pair=bam_read.get_aligned_pairs()
                        #signal_group=align_signals(pos_pair,signal_group,ref_seq)
                        if strand == "-":
                            pos_pair.reverse()
                        seq,signal_group, ref_readlocs, ref_poss, pred_pos=align_signals_and_extend_ref_seq(pos_pair, signal_group, seq, ref_seq,motif_seqs,methyloc,strand,ref_start,ref_end)
                    # if use_ref:
                    #     seq=ref_seq
                        # print(
                        #     '{},{}'.format(n_lens,read_name))
                    if len(signal_group) != len(seq):
                        print('signal to seq error!')
                        continue
                    
                        
                       
                    # if len(ref_poss) == 0:
                    #     continue
                    
                    base_signal_counts = [len(signal) for signal in signal_group]
                    read_signal_length = sum(base_signal_counts)
                    signal_values=sum(signal_group)

                    sample_id = '\t'.join(
                        [read_name, str(reference_name), str(ref_start)])
                    fea_str = '\t'.join([sample_id, len(seq),str(read_signal_length),str(signal_values),str(error_rate),str(identity) , str(bam_read.mapping_quality),
                                         str(shift_dacs_to_pa), str(scale_dacs_to_pa), str(shift_pa_to_norm), str(scale_pa_to_norm)])
                    fea_list.append(fea_str)
                    if len(fea_list) >= d_batch_size:
                        output_Q.put(fea_list)
                        fea_list = []
                    
            except Exception as e:
                traceback.print_exc()
                continue
    if len(fea_list) > 0:
        output_Q.put(fea_list)


def generate_key(line):
    # 根据前两列生成key
    columns = line.split()[:2]
    return '||'.join(columns)


def remove_last_line(file_path):
    # 打开原始文件以读取模式
    with open(file_path, 'r') as f:
        # 定位到文件末尾
        f.seek(0, os.SEEK_END)

        # 获取文件大小
        file_size = f.tell()

        # 如果文件为空，直接返回
        if file_size == 0:
            return

        # 逐行向前查找，直到找到倒数第二个换行符位置
        pos = file_size - 1
        while pos > 0 and f.read(1) != '\n':
            pos -= 1

        # 截断文件到倒数第二个换行符位置
        f.seek(pos, os.SEEK_SET)
        f.truncate()


def process_file(output_file):
    # 检查输出文件是否存在
    try:
        with open(output_file, 'r') as f:
            existing_keys = set()
            for line in f:
                existing_keys.add(generate_key(line))
    except FileNotFoundError:
        existing_keys = set()
    return existing_keys


def remove_key(key_input):
    remove_indexes = set()
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            remove_indexes.add(key)
    return remove_indexes


def read_position_file(position_file):
    key_sep = "||"
    postions = set()
    with open(position_file, "r") as rf:
        for line in rf:
            words = line.strip().split()
            postions.add(key_sep.join(words[:2]))
    return postions


# def read_bed(bisulfite_bed):
#     key_sep = "||"
#     freqinfo = {}
#     with open(bisulfite_bed, "r") as rf:
#         for line in rf:
#             words = line.strip().split()
#             if int(words[9]) < 5:
#                 continue
#             m_key = key_sep.join([words[0], words[1]])
#             freqinfo[m_key] = float(words[10])
#     return freqinfo

def read_bed(bisulfite_bed, strict=False):
    key_sep = "||"
    depth_info = {}
    value_info = {}

    with open(bisulfite_bed, "r") as rf:
        for line in rf:
            words = line.strip().split()
            chrom = words[0]
            pos = int(words[1])
            strand = words[5]
            depth = int(words[9])
            value = float(words[10])

            if depth < 5:
                continue

            # 使用染色体、位置和链方向来生成唯一的key
            m_key = key_sep.join([chrom, str(pos), strand])
            depth_info[m_key] = depth
            value_info[m_key] = value

    freqinfo = {}

    if strict:
        # 严格模式下，检查正负链是否匹配，并筛选满足新条件的点
        for m_key in value_info:
            chrom, pos, strand = m_key.split(key_sep)
            pos = int(pos)
            if strand == '+':
                pair_key = key_sep.join([chrom, str(pos + 1), '-'])
            else:
                pair_key = key_sep.join([chrom, str(pos - 1), '+'])
            
            # 检查匹配的key是否存在
            if pair_key in value_info:
                # 取得正负链的值
                value_1 = value_info[m_key]
                value_2 = value_info[pair_key]
                
                # 筛选条件：正负链要么都大于等于95，要么都小于等于5
                if value_1 >= 95 and value_2 >= 95:
                    # 将key转换为不包含正负链的形式
                    freq_key = key_sep.join([chrom, str(pos)])
                    freqinfo[freq_key] = 100
                elif value_1 <= 5 and value_2 <= 5:
                    freq_key = key_sep.join([chrom, str(pos)])
                    freqinfo[freq_key] = 0
    else:
        # 非严格模式下，直接记录所有满足深度要求的点
        for m_key in value_info:
            chrom, pos, strand = m_key.split(key_sep)
            pos = int(pos)
            freq_key = key_sep.join([chrom, str(pos)])
            freqinfo[freq_key] = value_info[m_key]

    return freqinfo




def read_tsv(key_input):
    key_indexes = []
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            key_indexes.append(key)
    return key_indexes

def read_id(pod5_dir):
    # pod5_dr=pod5.DatasetReader(pod5_dir, recursive=True)
    # ids_set=set(pod5_dr.read_ids)
    ids_set = list()
    recursive = True
    glob = Path.rglob if recursive else Path.glob
    # with pod5.DatasetReader(pod5_dir, recursive=True,max_cached_readers=1) as dataset:
    #     for read_record in tqdm(dataset,desc="read ids"):
    #         ids_set.add(read_record.read_id)
    for pod5 in glob(Path(pod5_dir), "*.pod5"):
        with p5.DatasetReader(pod5, recursive=True, max_cached_readers=1) as dataset:
            for read_record in dataset:
                ids_set.append(str(read_record.read_id))

    print(len(ids_set))
    return ids_set

def extract(args):
    manager = mp.Manager()
    output_Q = manager.Queue()
    # bam_file=pysam.AlignmentFile(args.bam,'rb',check_sq=False,ignore_truncation=True)
    # print('%s: Building BAM index.' %str(datetime.datetime.now()), flush=True)
    # pysam.IndexedReads(bam_file,multiple_iterators=True)
    bam_index = bam_reader.ReadIndexedBam(args.bam)
    # bam_index.build()
    # print('%s: Finished building BAM index.' %str(datetime.datetime.now()), flush=True)
    signal_dir = os.path.abspath(args.signal)
    pod5_dr = p5.DatasetReader(signal_dir, recursive=True,threads=args.pod5_proc)#, index=True #May cause the program to freeze
    if args.tsv is not None:
        signal_ids = read_tsv(args.tsv)
    else:
        signal_ids = read_id(args.signal)
    read_ids_Q = manager.Queue()
    fill_files_queue(read_ids_Q, signal_ids, 20, True)
    #read_ids_Q.put("kill")

    # existing_keys=process_file(args.write_path)

    bisulfite = None
    if args.bed is not None:
        bisulfite = read_bed(args.bed,args.strict)
        print('read bisulfite')
    timewait = args.timewait
    ex_dp = args.nproc - 1
    ex_procs = []
    is_dna = False if args.rna else True
    motif_seqs = get_motif_seqs(args.motifs, is_dna)
    print('get motifs')
    lock = manager.Lock()  
    with tqdm(total=len(signal_ids), desc="Processing", ncols=100) as progress_bar:
        for i in range(ex_dp):
            pb = mp.Process(target=process_dorado, args=(bam_index, pod5_dr, bisulfite, read_ids_Q, output_Q, motif_seqs,progress_bar,lock,args.chr,args.ref,args.label, args.mod_loc, timewait,args.mapq,args.kmer),
                            name="pb_reader")

            pb.daemon = True
            pb.start()
            ex_procs.append(pb)
    print('start process dorado')
    p_w = mp.Process(target=_write_featurestr, args=(args.write_path, output_Q, args.control, timewait),
                     name="writer")
    p_w.daemon = True
    p_w.start()
    print('start writer')
    # while args.control and not event.is_set():
    #    sys.exit(0)
    read_ids_Q.put("kill")
    for pb in ex_procs:
        pb.join()
    output_Q.put("kill")
    print('finished')
    p_w.join()


def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--keyset", type=str, required=False)
    parser.add_argument("--hp", type=str, required=False,
                        help='high confidence positive position')
    parser.add_argument("--hn", type=str, required=False,
                        help='high confidence negative position')
    parser.add_argument("--bam", type=str, required=True)
    parser.add_argument("--pod5", action="store_true", default=False, required=False,
                        help='use pod5, default false')
    parser.add_argument("--bed", type=str, required=False)
    parser.add_argument("--signal", type=str, required=True)
    parser.add_argument("--tsv", type=str, required=False)
    parser.add_argument("--write_path", type=str, required=True)
    parser.add_argument("--nproc", "-p", type=int, required=True)
    parser.add_argument("--timewait", "-t", default=0.1,
                        type=float, required=False)
    parser.add_argument("--d_batch_size", action="store", type=int, default=2,
                        required=False)
    parser.add_argument("--control", type=int, required=False,
                        help='test')
    parser.add_argument("--label",  type=int,
                        required=False)
    parser.add_argument('--mapq', type=int, default=0, required=False)
    parser.add_argument('--kmer', type=int, default=6, required=False)
    parser.add_argument('--pod5_proc','-d', type=int, default=10, required=False)
    parser.add_argument(
        "--motifs",
        action="store",
        type=str,
        required=False,
        default="CG",
        help="motif seq to be extracted, default: CG. "
        "can be multi motifs splited by comma "
        "(no space allowed in the input str), "
        "or use IUPAC alphabet, "
        "the mod_loc of all motifs must be "
        "the same",
    )
    parser.add_argument(
        "--mod_loc",
        action="store",
        type=int,
        required=False,
        default=0,
        help="0-based location of the targeted base in the motif, default 0",
    )
    parser.add_argument(
        "--rna",
        action="store_true",
        default=False,
        required=False,
        help="the fast5 files are from RNA samples. if is rna, the signals are reversed. "
        "NOTE: Currently no use, waiting for further extentsion",
    )
    parser.add_argument(
        "--ref",
        action="store_true",
        default=False,
        required=False,
        help="wetheter use ref sequence  instead of read sequence, default false.",
    )
    parser.add_argument('--strict', action="store_true", default=False, required=False)
    parser.add_argument("--chr", type=str, required=False,help='only extract chr')

    return parser.parse_args()


def main():
    args = parse_args()
    extract(args)


if __name__ == '__main__':
    sys.exit(main())
