import pysam
import argparse
import sys
import os
from multiprocessing import Manager
import multiprocessing as mp
import gzip
import mmap
import pod5 as p5
import time
import datetime
import numpy as np
from ont_fast5_api.fast5_interface import get_fast5_file
from pathlib import Path
#from memory_profiler import profile
from deepsignal3.utils.process_utils import CIGAR2CODE
from deepsignal3.utils.process_utils import CIGAR_REGEX
from deepsignal3.utils.process_utils import fill_files_queue
from deepsignal3.utils import bam_reader
from deepsignal3.extract_features_pod5 import _group_signals_by_movetable_v2
from deepsignal3.extract_features import _convert_cigarstring2tuple
from deepsignal3.extract_features import parse_cigar
import re
import traceback
from tqdm import tqdm

#import threading
# 定义终止事件
#event = threading.Event()

def _write_featurestr(write_fp, featurestr_q,time_wait = 1,control=False):
    #print('write_process-{} starts'.format(os.getpid()))
    output=0

    with open(write_fp, 'w') as wf:
        while True:
            # during test, it's ok without the sleep(time_wait)
            if featurestr_q.empty():
                time.sleep(time_wait)
                continue
            features_str = featurestr_q.get()
            if features_str == "kill":
                #print('output line {}'.format(output))
                #print('write_process-{} finished'.format(os.getpid()))
                break
            for one_features_str in features_str:
                output+=1                   
                wf.write(one_features_str + "\n")
                if control is True and output>=4000:
                    # 设定终止标志
                    #event.set()
                    sys.exit(0)
            wf.flush()

##########
#process tsv and alignment to bam and pod5
##########
def process_basecall(input_file,ref_fasta,basecall_Q,identity_limit,time_wait=1,reads_per_chunk=2,qsize_limit=20):
    infile = open(input_file, 'r')
    reffile= open(ref_fasta, 'r')
    fea_list=[]
    #init
    ref_line=reffile.readline()
    ref_info=ref_line.replace('>', '')
    ref_seq=''
    for line in tqdm(infile):
        words=line.strip().split("\t")
        identity=float(words[3])
        if identity<identity_limit:
            continue
        #read_id=words[0].split(":")[0]
        #align_read_start=words[0].split(":")[1].split("-")[0]
        #align_read_end=words[0].split(":")[1].split("-")[1]
        read_info=words[0]
        strand=words[1]
        #genebank_id=words[2].split(":")[0]
        align_key=words[2]        
        cigar=words[4]
        
        #if align_key!=ref_info:
        #    print('fasta is unmapped with tsv')
        #    print('input file mapID: {}'.format(align_key))
        #    print('ref file mapID: {}'.format(ref_info))
        #    #while not ref_info.startswith(">"):
        #    #    ref_info=reffile.readline()
        #    #ref_info=ref_info.replace('>', '').strip()#add one more .strip() more secure            
        #    #continue
        #    break
        
        ref_line=reffile.readline()
        while not ref_line.startswith(">"):
            ref_seq=ref_seq+ref_line.strip().upper()
            ref_line=reffile.readline()
        
        fea_list.append((read_info,strand,align_key,cigar,ref_seq))
        #re init
        ref_info=ref_line.replace('>', '').strip()
        ref_seq=''
        if len(fea_list)>=reads_per_chunk:
            while basecall_Q.qsize()>=qsize_limit:
                #print('Pausing tsv input due to INPUT queue size limit. Signal_qsize=%d' %(feature_Q.qsize()), flush=True)
                time.sleep(time_wait)
            basecall_Q.put(fea_list)
            fea_list=[]
    if len(fea_list)>0:
        basecall_Q.put(fea_list)
    infile.close()



def process_dorado(bam_index,pod5_dr,basecall_Q,output_Q,coverage_ratio_limit,time_wait=1,d_batch_size=2,qsize_limit=22):
    fea_list=[]
    while True:
        while basecall_Q.empty():
            time.sleep(time_wait)
        read_data = basecall_Q.get()
        if read_data == "kill":
            basecall_Q.put("kill")
            break
        for read_info,strand,align_key,cigar,ref_seq in read_data:
            while output_Q.qsize()>=qsize_limit:
                #print('Pausing bam and pod5 input due to OUTPUT queue size limit. Output_qsize=%d' %(output_Q.qsize()), flush=True)
                time.sleep(time_wait)
            read_name=read_info.split(":")[0]
            read=pod5_dr.get_read(read_name)
            if read is None:
                print('read name: {} can\'t find in pod5'.format(read_name))
                #print(read_name)
                continue
            signal=read.signal
            shift_dacs_to_pa=read.calibration.offset
            scale_dacs_to_pa=read.calibration.scale
            #read_iter=bam_index.find(read_name)
            try:
                for bam_read in bam_index.get_alignments(read_name):
                    if bam_read.is_supplementary or bam_read.is_secondary:                      
                        continue                   
                    seq = bam_read.get_forward_sequence()
                    if seq is None or signal is None:
                        print("read id: {} 's seq or signal is None".format(read_name))
                        continue
                    read_dict=dict(bam_read.tags)          
                    reference_name=align_key.split(":")[0].lower()                            
                    strand_code = 0 if strand=="-" else 1
                    #find_key=(read_name,reference_name)
                    align_read_start=int(read_info.split(":")[1].split("-")[0])
                    align_read_end=int(read_info.split(":")[1].split("-")[1])
                    if (align_read_end-align_read_start)/len(seq)<coverage_ratio_limit:
                        print("read id: {} 's coverage is low than limit".format(read_name))
                        continue

                    ref_start=align_key.split(":")[1].split("-")[0]
                    cigartuple = _convert_cigarstring2tuple(cigar)
                    r_to_q_poss = parse_cigar(cigartuple, strand, len(ref_seq))     

                    num_trimmed = read_dict["ts"]
                    shift_pa_to_norm = read_dict["sm"]
                    scale_pa_to_norm = read_dict["sd"]
                    mv_table=read_dict["mv"]
                    
                    if num_trimmed >= 0:
                        signal_trimmed = signal[num_trimmed:]#(signal[num_trimmed:] - norm_shift) / norm_scale
                    else:
                        signal_trimmed = signal[:num_trimmed]#(signal[:num_trimmed] - norm_shift) / norm_scale
                    signal_group = _group_signals_by_movetable_v2(signal_trimmed, np.asarray(mv_table[1:]), int(mv_table[0]))
                    
                    ref_signal_grp = [
                        None,
                    ] * len(ref_seq)
                    ref_readlocs = [
                        0,
                    ] * len(ref_seq)
                    flag=0
                    for ref_pos, q_pos in enumerate(r_to_q_poss[:-1]):
                        # signal groups
                        try:
                            ref_signal_grp[ref_pos] = signal_group[q_pos + align_read_start-1]
                        except:
                            print('query {} but read len is {}'.format((q_pos + align_read_start),len(signal_group)))
                            flag=1
                            break
                        ref_readlocs[ref_pos] = q_pos + align_read_start
                    if flag==1:
                        break

                    signal_group_new=[]
                    for sig in ref_signal_grp:
                        signal_group_new.append(np.array(sig))
                    try:
                        norm_signals_text=';'.join([",".join([str(y) for y in x]) for x in signal_group_new])                  
                    except:
                        print('signal error!!')
                        print(ref_signal_grp)
                    pred_deepsignal_text='.'
                    
                    pred_dorado_text='.'
                    mean_pred_text='.'
                    pred_label_text='.'
                    pred_pos='.'
                    sample_id='\t'.join([read_name,str(reference_name),str(ref_start)])
                    bisulfite_text='.'
                    fea_str='\t'.join([sample_id,ref_seq,norm_signals_text,pred_pos,pred_dorado_text,pred_deepsignal_text,mean_pred_text,pred_label_text,str(bam_read.mapping_quality),
                                       str(shift_dacs_to_pa),str(scale_dacs_to_pa),str(shift_pa_to_norm),str(scale_pa_to_norm),bisulfite_text])
                    fea_list.append(fea_str)
                    if len(fea_list)>=d_batch_size:
                        output_Q.put(fea_list)
                        fea_list=[]
            except Exception as e:
                traceback.print_exc()
    if len(fea_list)>0:
        output_Q.put(fea_list)

def read_tsv(key_input):
    key_indexes=[]
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            key_indexes.append(key)
    return key_indexes


def extract(args):
    manager = mp.Manager()
    besecall_Q=manager.Queue()
    output_Q = manager.Queue()
    
    #bam_file=pysam.AlignmentFile(args.bam,'rb',check_sq=False,ignore_truncation=True)
    #print('%s: Building BAM index.' %str(datetime.datetime.now()), flush=True)
    bam_index=bam_reader.ReadIndexedBam(args.bam)#pysam.IndexedReads(bam_file,multiple_iterators=True)
    #bam_index.build()
    #print('%s: Finished building BAM index.' %str(datetime.datetime.now()), flush=True)
    signal_dir = os.path.abspath(args.signal)
    pod5_dr=p5.DatasetReader(signal_dir, recursive=True,index=True)  
    #remove_last_line(input_path)
    #print('%s: Finished remove last line.' %str(datetime.datetime.now()), flush=True)
    #print('%s: Finished process file.' %str(datetime.datetime.now()), flush=True)
    #############
    timewait=args.timewait
    pd=mp.Process(target=process_basecall,args=(args.tsv,args.fna,besecall_Q,args.identity,timewait),name="basecall_reader")
    pd.daemon = True
    pd.start()
    ex_dp=args.nproc - 2
    ex_procs = []
    for i in range(ex_dp):
        pb = mp.Process(target=process_dorado, args=(bam_index, pod5_dr,besecall_Q,output_Q,args.coverage_ratio,timewait),
                          name="pb_reader")
            
        pb.daemon = True
        pb.start()
        ex_procs.append(pb)
    p_w = mp.Process(target=_write_featurestr, args=(args.write_path, output_Q,timewait,args.control),
                     name="writer")
    p_w.daemon = True
    p_w.start()
    #while args.control and not event.is_set():
    #    sys.exit(0)
    pd.join()
    besecall_Q.put("kill")
    for pb in ex_procs:
        pb.join()
    output_Q.put("kill")
    p_w.join()

def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--tsv", type=str,required=False)
    parser.add_argument("--fna", type=str,required=False)
    parser.add_argument("--bam", type=str,required=True)   
    parser.add_argument("--pod5", action="store_true", default=False, required=False,
                       help='use pod5, default false')
    parser.add_argument("--bed", type=str,required=False)
    parser.add_argument("--signal", type=str,required=True)
    parser.add_argument("--write_path", type=str,required=True)
    parser.add_argument("--nproc", "-p", type=int,required=True)
    parser.add_argument("--timewait", "-t", default=0.1, type=float,required=False)
    parser.add_argument("--d_batch_size", action="store", type=int, default=2,
                         required=False)
    parser.add_argument("--control", action="store_true", default=False, required=False,
                       help='test')
    parser.add_argument(
        "--identity",
        type=float,
        default=0.9,
        required=False,
        help="identity cutoff for selecting alignment items, default 0.9",
    )
    parser.add_argument(
        "--coverage_ratio",
        type=float,
        default=0.95,
        required=False,
        help="percent of coverage, read alignment len against read len, default 0.95",
    )

    return parser.parse_args()


def main():
    args=parse_args()
    extract(args)


if __name__ == '__main__':
    sys.exit(main())