import argparse
import gzip
import sys
import mappy
from collections import namedtuple
import time
import threading
import os
import multiprocessing as mp
import numpy as np
from deepsignal3.extract_features import _write_featurestr
from deepsignal3.utils.process_utils import str2bool

MAP_RES = namedtuple('MAP_RES', (
    'read_id', 'ctg', 'strand', 'r_st', 'r_en',
    'q_st', 'q_en', 'cigar', 'mapq'))
def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--input",'-i', type=str,required=True)
    parser.add_argument("--output",'-o', type=str,required=True)
    parser.add_argument("--reference_path",'-r', type=str,required=True)
    parser.add_argument("--number",'-n', type=int,required=False,default=-1)
    parser.add_argument("--w_batch_num", type=int,required=False,default=200)
    parser.add_argument("--w_is_dir", action="store",
                           type=str, required=False, default="no",
                           help='if using a dir to save features into multiple files')
    parser.add_argument("--best_n", type=int, default=1, required=False,
                            help="best_n arg in mappy(minimap2), default 1")
    return parser.parse_args()

def get_aligner(ref_path, best_n):
    print("get mappy(minimap2) Aligner")
    aligner = mappy.Aligner(str(ref_path),
                            preset=str("map-ont"),
                            best_n=best_n)
    return aligner

def align_read(q_seq, aligner, map_thr_buf, read_id=None):
    try:
        # enumerate all alignments to avoid memory leak from mappy
        r_algn = list(aligner.map(str(q_seq), buf=map_thr_buf))[0]
    except IndexError:
        # alignment not produced
        return None
    
    # coord 0-based
    return MAP_RES(
        read_id=read_id,  ctg=r_algn.ctg,#name of the reference sequence the query is mapped to
        strand=r_algn.strand, r_st=r_algn.r_st, r_en=r_algn.r_en,
        q_st=r_algn.q_st, q_en=r_algn.q_en, cigar=r_algn.cigar,
        mapq=r_algn.mapq)

def _map_read_to_ref_process(aligner, map_conn):
    map_thr_buf = mappy.ThreadBuffer()
    print("align process starts")
    while True:
        try:
            read_id, q_seq = map_conn.recv()
        except EOFError:
            print("align process ending")
            break
        map_res = align_read(q_seq, aligner, map_thr_buf, read_id)
        if map_res is None:
            map_conn.send((0, None))
        else:
            map_res = tuple(map_res)
            map_conn.send((1, map_res))

def start_map_threads(map_conn, aligner):
    #time.sleep(1)
    
    
    map_read_ts=threading.Thread(
        target=_map_read_to_ref_process, args=(aligner, map_conn),
        daemon=True, name='aligner')
    map_read_ts.start()
    return map_read_ts

def get_q2tloc_from_cigar(r_cigar_tuple, strand, seq_len):
    """
    insertion: -1, deletion: -2, mismatch: -3
    :param r_cigar_tuple: pysam.alignmentSegment.cigartuples
    :param strand: 1/-1 for fwd/rev
    :param seq_len: read alignment length
    :return: query pos to ref pos
    """
    fill_invalid = -2
    # get each base calls genomic position
    q_to_r_poss = np.full(seq_len + 1, fill_invalid, dtype=np.int32)
    # process cigar ops in read direction
    curr_r_pos, curr_q_pos = 0, 0
    cigar_ops = r_cigar_tuple if strand == 1 else r_cigar_tuple[::-1]
    for op_len,op in cigar_ops:
        if op == 1:
            # inserted bases into ref
            for q_pos in range(curr_q_pos, curr_q_pos + op_len):
                q_to_r_poss[q_pos] = -1
            curr_q_pos += op_len
        elif op in (2, 3):
            # deleted ref bases
            curr_r_pos += op_len
        elif op in (0, 7, 8):
            # aligned bases
            for op_offset in range(op_len):
                q_to_r_poss[curr_q_pos + op_offset] = curr_r_pos + op_offset
            curr_q_pos += op_len
            curr_r_pos += op_len
        elif op == 6:
            # padding (shouldn't happen in mappy)
            pass
    q_to_r_poss[curr_q_pos] = curr_r_pos
    if q_to_r_poss[-1] == fill_invalid:
        raise ValueError(('Invalid cigar string encountered. Reference length: {}  Cigar ' +
                          'implied reference length: {}').format(seq_len, curr_r_pos))
    return q_to_r_poss

def read_feature(input,feature_Q,extract_conn,number,batch_size=1,qsize_limit=20,time_wait=1):
    print('begin read feature', flush=True)
    fea_list=[]
    count=0
    with open(input, 'r') as f:
        flag=0
        for line in f:
            while feature_Q.qsize()>=qsize_limit:
                #print('Pausing bam and pod5 input due to OUTPUT queue size limit. Output_qsize=%d' %(output_Q.qsize()), flush=True)
                time.sleep(time_wait)
            words=line.strip().split('\t')
            seq=words[3]
            pred_pos=np.array([int(x) for x in words[5].split(",")])
            extract_conn.send((words[0], seq))
            success, map_res = extract_conn.recv()
            map_res = MAP_RES(*map_res)
            if success == 0:
                continue
            q_start=map_res.q_st
            q_end=map_res.q_en
            r_start=map_res.r_st
            r_end=map_res.r_en
            cigar_tuple=map_res.cigar
            strand_code=map_res.strand
            strand='+'if strand_code==1 else '-'
            if strand_code==-1:
                seq_start = len(seq) - q_end
                seq_end = len(seq) - q_start
            else:
                seq_start = q_start
                seq_end = q_end
            try:
                q_to_r_poss = get_q2tloc_from_cigar(cigar_tuple, strand_code, (seq_end - seq_start))
            except ValueError:
                print(strand_code,flush=True)
                print(cigar_tuple,flush=True)
                break
            mean_preds=np.array([float(x) for x in words[8].split(",")])
            pred_lebals=np.array([int(x) for x in words[9].split(",")])
            i=0
            for loc_in_read in pred_pos:
                mean_pred=mean_preds[i]
                pred_lebal=pred_lebals[i]
                i+=1
                ref_pos = -1
                if seq_start <= loc_in_read < seq_end:
                    offset_idx = loc_in_read - seq_start
                    if q_to_r_poss[offset_idx] != -1:
                        if strand_code == -1:
                            #pos = '.'#loc_in_read
                            ref_pos = r_end  - 1 - q_to_r_poss[offset_idx]
                        else:
                            #pos = loc_in_read
                            ref_pos = r_start + q_to_r_poss[offset_idx]
                else:
                    continue
                bkmer_start = loc_in_read - 2 if loc_in_read - 2 >= 0 else 0
                bkmer_end = loc_in_read + 3 if loc_in_read + 3 <= len(seq) else len(seq)              
                fea_list.append('\t'.join([words[1],str(ref_pos),strand,'.',words[0],'.',str(1-mean_pred),str(mean_pred),str(pred_lebal),seq[bkmer_start:bkmer_end]]))
                count+=1
                if number==-1:
                    if len(fea_list)>=batch_size:
                        feature_Q.put(fea_list)
                        fea_list=[]
                else:
                    if count<number:
                        if len(fea_list)>=batch_size:
                            feature_Q.put(fea_list)
                            fea_list=[]
                    else:
                        flag=1
                        break
            if flag==1:
                break

    if len(fea_list)>0:
        feature_Q.put(fea_list)



def main():
    args=parse_args()
    #if args.input.endswith(".gz"):
    #    input_file = gzip.open(args.input, 'rt')
    #else:
    #    input_file = open(args.input, 'r')
    ref_path = os.path.abspath(args.reference_path)
    if not os.path.exists(ref_path):
        raise ValueError("--reference_path not set right!")
    aligner = get_aligner(ref_path, args.best_n)
    map_conn, extract_conn = mp.Pipe()
    feature_Q = mp.Queue()
    map_read_ts = start_map_threads(map_conn, aligner)
    p = mp.Process(target=read_feature,args=(args.input,feature_Q,extract_conn,args.number),name='read_feature')
    p.daemon = True
    p.start()
    
    p_w = mp.Process(target=_write_featurestr, args=(args.output, feature_Q, args.w_batch_num,
                                                     str2bool(args.w_is_dir)),
                     name="writer")
    p_w.daemon = True
    p_w.start()
    p.join()
    map_read_ts.join()
    feature_Q.put("kill")
    p_w.join()

if __name__ == '__main__':
    sys.exit(main())