import pysam
import argparse
import sys
import os
from multiprocessing import Manager
import multiprocessing as mp
import gzip
import mmap
import pod5 as p5
import time
import datetime
import numpy as np
from ont_fast5_api.fast5_interface import get_fast5_file
from pathlib import Path
# from memory_profiler import profile
from deepsignal3.utils.process_utils import CIGAR2CODE
from deepsignal3.utils.process_utils import CIGAR_REGEX
from deepsignal3.utils import bam_reader
from deepsignal3.extract_features_pod5 import _group_signals_by_movetable_v2
from deepsignal3.utils.ref_reader import get_contig2len_n_seq
from deepsignal3.utils.process_utils import fill_files_queue
from deepsignal3.utils.process_utils import get_motif_seqs
from deepsignal3.utils.process_utils import get_refloc_of_methysite_in_motif
import re
import traceback

# import threading
# 定义终止事件
# event = threading.Event()


def _write_featurestr(write_fp, featurestr_q, time_wait=1, control=False):
    # print('write_process-{} starts'.format(os.getpid()))
    output = 0
    if os.path.exists(write_fp):
        with open(write_fp, 'a') as wf:
            while True:
                # during test, it's ok without the sleep(time_wait)
                if featurestr_q.empty():
                    time.sleep(time_wait)
                    continue
                features_str = featurestr_q.get()
                if features_str == "kill":
                    # print('output line {}'.format(output))
                    # print('write_process-{} finished'.format(os.getpid()))
                    break
                for one_features_str in features_str:
                    output += 1
                    wf.write(one_features_str + "\n")
                wf.flush()
    else:
        with open(write_fp, 'w') as wf:
            while True:
                # during test, it's ok without the sleep(time_wait)
                if featurestr_q.empty():
                    time.sleep(time_wait)
                    continue
                features_str = featurestr_q.get()
                if features_str == "kill":
                    # print('output line {}'.format(output))
                    # print('write_process-{} finished'.format(os.getpid()))
                    break
                for one_features_str in features_str:
                    output += 1
                    wf.write(one_features_str + "\n")
                    if control is True and output >= 4000:
                        # 设定终止标志
                        # event.set()
                        sys.exit(0)
                wf.flush()

##########
# process tsv and alignment to bam and pod5
##########


def get_q2tloc_from_cigar(r_cigar_tuple, strand, seq_len):
    """
    insertion: -1, deletion: -2, mismatch: -3
    :param r_cigar_tuple: pysam.alignmentSegment.cigartuples
    :param strand: 1/-1 for fwd/rev
    :param seq_len: read alignment length
    :return: query pos to ref pos
    """
    fill_invalid = -2
    # get each base calls genomic position
    q_to_r_poss = np.full(seq_len + 1, fill_invalid, dtype=np.int32)
    # process cigar ops in read direction
    curr_r_pos, curr_q_pos = 0, 0
    cigar_ops = r_cigar_tuple if strand == 1 else r_cigar_tuple[::-1]
    for op, op_len in cigar_ops:
        if op == 1:
            # inserted bases into ref
            for q_pos in range(curr_q_pos, curr_q_pos + op_len):
                q_to_r_poss[q_pos] = -1
            curr_q_pos += op_len
        elif op in (2, 3):
            # deleted ref bases
            curr_r_pos += op_len
        elif op in (0, 7, 8):
            # aligned bases
            for op_offset in range(op_len):
                q_to_r_poss[curr_q_pos + op_offset] = curr_r_pos + op_offset
            curr_q_pos += op_len
            curr_r_pos += op_len
        elif op == 6:
            # padding (shouldn't happen in mappy)
            pass
    q_to_r_poss[curr_q_pos] = curr_r_pos
    if q_to_r_poss[-1] == fill_invalid:
        raise ValueError(
            (
                "Invalid cigar string encountered. Reference length: {}  Cigar "
                + "implied reference length: {}"
            ).format(seq_len, curr_r_pos)
        )
    return q_to_r_poss


def process_dorado(bam_index, pod5_dr, read_ids_Q, output_Q, motif_seqs, contigs, label, methyloc, time_wait=1, d_batch_size=2, qsize_limit=22):
    fea_list = []
    while True:
        while read_ids_Q.empty():
            time.sleep(time_wait)
        readids = read_ids_Q.get()
        if readids == "kill":
            read_ids_Q.put("kill")
            break
        for readid in readids:
            while output_Q.qsize() >= qsize_limit:
                # print('Pausing bam and pod5 input due to OUTPUT queue size limit. Output_qsize=%d' %(output_Q.qsize()), flush=True)
                time.sleep(time_wait)
            read = pod5_dr.get_read(readid)
            if read is None:
                continue
            signal = read.signal
            shift_dacs_to_pa = read.calibration.offset
            scale_dacs_to_pa = read.calibration.scale
            # read_iter=bam_index.find(read_name)
            try:
                for bam_read in bam_index.get_alignments(readid):
                    if bam_read.is_mapped == False:  # bam_read.is_supplementary or bam_read.is_secondary or
                        continue
                    if bam_read.is_supplementary or bam_read.is_secondary:
                        continue
                    if bam_read.mapping_quality < 10:
                        continue

                    reference_name = bam_read.reference_name
                    seq = bam_read.get_forward_sequence()
                    if seq is None or signal is None:
                        continue
                    if bam_read.infer_query_length() != len(seq):
                        print(
                            'infer read length is not same as seq length of readid {}'.format(readid))
                        continue
                    read_dict = dict(bam_read.tags)

                    strand_code = 0 if bam_read.is_reverse else 1

                    strand = "-" if strand_code == 0 else "+"
                    # find_key=(read_name,reference_name)
                    ref_start = bam_read.reference_start
                    # read_key='||'.join([read_name,reference_name,str(ref_start)])
                    # if read_key in remove_indexes:
                    #    continue
                    tsite_locs = get_refloc_of_methysite_in_motif(
                        seq, set(motif_seqs), methyloc)
                    q_to_r_poss = None
                    ref_end = bam_read.reference_end
                    cigar_tuples = bam_read.cigartuples
                    qalign_start = bam_read.query_alignment_start
                    qalign_end = bam_read.query_alignment_end
                    if (qalign_end-qalign_start) / bam_read.query_length < 0.5:
                        continue
                    if bam_read.is_reverse:
                        seq_start = len(seq) - qalign_end
                        seq_end = len(seq) - qalign_start
                    else:
                        seq_start = qalign_start
                        seq_end = qalign_end
                    q_to_r_poss = get_q2tloc_from_cigar(
                        cigar_tuples, strand_code, (seq_end - seq_start)
                    )

                    num_trimmed = read_dict["ts"]
                    shift_pa_to_norm = read_dict["sm"]
                    scale_pa_to_norm = read_dict["sd"]
                    mv_table = read_dict["mv"]
                    if num_trimmed >= 0:
                        # (signal[num_trimmed:] - norm_shift) / norm_scale
                        signal_trimmed = signal[num_trimmed:]
                    else:
                        # (signal[:num_trimmed] - norm_shift) / norm_scale
                        signal_trimmed = signal[:num_trimmed]
                    # sshift, sscale = np.mean(signal_trimmed), float(np.std(signal_trimmed))
                    # if sscale == 0.0:
                    #    norm_signals = signal_trimmed
                    # else:
                    #    norm_signals = (signal_trimmed - sshift) / sscale
                    signal_group = _group_signals_by_movetable_v2(
                        signal_trimmed, np.asarray(mv_table[1:]), int(mv_table[0]))
                    signal_group_new = []
                    for sig in signal_group:
                        signal_group_new.append(
                            np.round(np.array(sig), decimals=6))
                    ref_poss = []
                    pred_pos = []
                    for loc_in_read in tsite_locs:
                        pred_pos.append(loc_in_read)
                        ref_pos = -1
                        if loc_in_read < len(seq):
                            if seq_start <= loc_in_read < seq_end:
                                offset_idx = loc_in_read - seq_start
                                if q_to_r_poss[offset_idx] != -1:
                                    if strand == "-":
                                        # pos = '.'#loc_in_read
                                        ref_pos = ref_end - 1 - \
                                            q_to_r_poss[offset_idx]
                                        if contigs[reference_name][ref_pos-1:ref_pos+1] != 'CG':
                                            ref_pos = -1

                                    else:
                                        # pos = loc_in_read
                                        ref_pos = ref_start + \
                                            q_to_r_poss[offset_idx]
                                        if contigs[reference_name][ref_pos:ref_pos+2] != 'CG':
                                            ref_pos = -1
                        ref_poss.append(ref_pos)

                    norm_signals_text = ';'.join(
                        [",".join([str(y) for y in x]) for x in signal_group_new])

                    pred_label = [label]*len(pred_pos)

                    pred_deepsignal_text = '.'
                    pred_dorado_text = '.'
                    mean_pred_text = '.'
                    pred_label_text = ','.join(
                        [str(x) for x in pred_label])
                    pred_pos_text = ','.join([str(x)
                                              for x in pred_pos])
                    sample_id = '\t'.join(
                        [readid, reference_name, str(ref_start)])
                    bisulfite_text = ','.join(
                        [str(x*100) for x in pred_label])
                    ref_pos_text = ','.join([str(x) for x in ref_poss])
                    fea_str = '\t'.join([sample_id, seq, norm_signals_text, pred_pos_text, pred_dorado_text, pred_deepsignal_text, mean_pred_text, pred_label_text, str(bam_read.mapping_quality),
                                         str(shift_dacs_to_pa), str(scale_dacs_to_pa), str(shift_pa_to_norm), str(scale_pa_to_norm), bisulfite_text, ref_pos_text])
                    fea_list.append(fea_str)
                    if len(fea_list) >= d_batch_size:
                        output_Q.put(fea_list)
                        fea_list = []
            except Exception as e:
                # traceback.print_exc()
                continue
    if len(fea_list) > 0:
        output_Q.put(fea_list)


def generate_key(line):
    # 根据前两列生成key
    columns = line.split()[:2]
    return '||'.join(columns)


def remove_key(key_input):
    remove_indexes = set()
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            remove_indexes.add(key)
    return remove_indexes


def read_position_file(position_file):
    key_sep = "||"
    postions = set()
    with open(position_file, "r") as rf:
        for line in rf:
            words = line.strip().split()
            postions.add(key_sep.join(words[:2]))
    return postions


def read_bed(bisulfite_bed):
    key_sep = "||"
    freqinfo = {}
    with open(bisulfite_bed, "r") as rf:
        for line in rf:
            words = line.strip().split()
            if int(words[9]) < 5:
                continue
            m_key = key_sep.join([words[0], words[1]])
            freqinfo[m_key] = float(words[10])
    return freqinfo


def read_id(pod5_dir):
    # pod5_dr=pod5.DatasetReader(pod5_dir, recursive=True)
    # ids_set=set(pod5_dr.read_ids)
    ids_set = list()
    recursive = True
    glob = Path.rglob if recursive else Path.glob
    # with pod5.DatasetReader(pod5_dir, recursive=True,max_cached_readers=1) as dataset:
    #     for read_record in tqdm(dataset,desc="read ids"):
    #         ids_set.add(read_record.read_id)
    for pod5 in glob(Path(pod5_dir), "*.pod5"):
        with p5.DatasetReader(pod5, recursive=True, max_cached_readers=1) as dataset:
            for read_record in dataset:
                ids_set.append(str(read_record.read_id))

    print(len(ids_set))
    return ids_set


def extract(args):
    manager = mp.Manager()
    output_Q = manager.Queue()
    bam_index = bam_reader.ReadIndexedBam(args.bam)
    reference_path = args.reference
    chrom2len, contigs = get_contig2len_n_seq(reference_path)
    # bam_index.build()
    # print('%s: Finished building BAM index.' %str(datetime.datetime.now()), flush=True)
    signal_dir = os.path.abspath(args.signal)
    pod5_dr = p5.DatasetReader(signal_dir, recursive=True)
    print('interface')
    #############
    signal_ids = read_id(args.signal)
    read_ids_Q = manager.Queue()
    fill_files_queue(read_ids_Q, signal_ids, 20, True)
    print('queue')
    read_ids_Q.put("kill")
    timewait = args.timewait
    ex_dp = args.nproc - 1
    ex_procs = []
    is_dna = False if args.rna else True
    motif_seqs = get_motif_seqs(args.motifs, is_dna)
    for i in range(ex_dp):
        pb = mp.Process(target=process_dorado, args=(bam_index, pod5_dr, read_ids_Q, output_Q, motif_seqs, contigs, args.label, args.mod_loc, timewait),
                        name="pb_reader")

        pb.daemon = True
        pb.start()
        ex_procs.append(pb)
    p_w = mp.Process(target=_write_featurestr, args=(args.write_path, output_Q, timewait, args.control),
                     name="writer")
    p_w.daemon = True
    p_w.start()
    # while args.control and not event.is_set():
    #    sys.exit(0)
    # read_ids_Q.put("kill")
    for pb in ex_procs:
        pb.join()
    output_Q.put("kill")
    p_w.join()


def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--reference", type=str, required=True)
    parser.add_argument("--bam", type=str, required=True)
    parser.add_argument("--signal", type=str, required=True)
    parser.add_argument("--write_path", type=str, required=True)
    parser.add_argument("--nproc", "-p", type=int, required=True)
    parser.add_argument("--timewait", "-t", default=0.1,
                        type=float, required=False)
    parser.add_argument("--label",  type=int,
                        required=True)
    parser.add_argument(
        "--rna",
        action="store_true",
        default=False,
        required=False,
        help="the fast5 files are from RNA samples. if is rna, the signals are reversed. "
        "NOTE: Currently no use, waiting for further extentsion",
    )
    parser.add_argument(
        "--mod_loc",
        action="store",
        type=int,
        required=False,
        default=0,
        help="0-based location of the targeted base in the motif, default 0",
    )
    parser.add_argument(
        "--motifs",
        action="store",
        type=str,
        required=False,
        default="CG",
        help="motif seq to be extracted, default: CG. "
        "can be multi motifs splited by comma "
        "(no space allowed in the input str), "
        "or use IUPAC alphabet, "
        "the mod_loc of all motifs must be "
        "the same",
    )
    parser.add_argument("--d_batch_size", action="store", type=int, default=2,
                        required=False)
    parser.add_argument("--control", action="store_true", default=False, required=False,
                        help='test')

    return parser.parse_args()


def main():
    args = parse_args()
    extract(args)


if __name__ == '__main__':
    sys.exit(main())
