
import argparse
import sys
import os
from multiprocessing import Manager
import multiprocessing as mp
import gzip

import time
import datetime
import numpy as np

from pathlib import Path
#from memory_profiler import profile

import re
import traceback
from tqdm import tqdm

#import threading
# 定义终止事件
#event = threading.Event()

def _write_featurestr(write_fp, featurestr_q,time_wait = 1,control=False):
    #print('write_process-{} starts'.format(os.getpid()))
    output=0

    with open(write_fp, 'w') as wf:
        while True:
            # during test, it's ok without the sleep(time_wait)
            if featurestr_q.empty():
                time.sleep(time_wait)
                continue
            features_str = featurestr_q.get()
            if features_str == "kill":
                #print('output line {}'.format(output))
                #print('write_process-{} finished'.format(os.getpid()))
                break
            for one_features_str in features_str:
                output+=1                   
                wf.write(one_features_str + "\n")
                if control is True and output>=4000:
                    # 设定终止标志
                    #event.set()
                    sys.exit(0)
            wf.flush()

##########
#process tsv and alignment to bam and pod5
##########
def process_tombo(input_file,tombo_Q,time_wait=1,reads_per_chunk=2,qsize_limit=20):
    infile = open(input_file, 'r')
    
    fea_list=[]
    for line in tqdm(infile):
        fea_list.append(line)
        if len(fea_list)>=reads_per_chunk:
            while tombo_Q.qsize()>=qsize_limit:
                #print('Pausing tsv input due to INPUT queue size limit. Signal_qsize=%d' %(feature_Q.qsize()), flush=True)
                time.sleep(time_wait)
            tombo_Q.put(fea_list)
            fea_list=[]
    if len(fea_list)>0:
        tombo_Q.put(fea_list)
    infile.close()

def process_tsv(input,index,tombo_Q,output_Q,time_wait=1,d_batch_size=2,qsize_limit=22):
    fea_list=[]
    while True:
        while tombo_Q.empty():
            time.sleep(time_wait)
        read_data = tombo_Q.get()
        if read_data == "kill":
            tombo_Q.put("kill")
            break
        for line1 in read_data:           
            while output_Q.qsize()>=qsize_limit:
                #print('Pausing bam and pod5 input due to OUTPUT queue size limit. Output_qsize=%d' %(output_Q.qsize()), flush=True)
                time.sleep(time_wait)
            readid, chrom, chrom_start, seq, signals, scaling, offset=line1.strip().split("\t")
            
            if readid not in index:
                print('read name: {} can\'t find in tsv'.format(readid))
                #print(read_name)
                continue
            mapping_quality='.'
            shift_pa_to_norm='.'
            scale_pa_to_norm='.' 
            with open(input, 'r') as input_file:
                i=0
                for line in input_file:
                    if i!=index[readid]:
                        i+=1
                        continue
                    words = line.strip().split("\t")
                    mapping_quality=words[10]
                    shift_pa_to_norm=words[13]
                    scale_pa_to_norm=words[14]
                    break                 
            pred_deepsignal_text='.'
            
            pred_dorado_text='.'
            mean_pred_text='.'
            pred_label_text='.'
            pred_pos='.'
            sample_id='\t'.join([readid,str(chrom),str(chrom_start)])
            bisulfite_text='.'
            fea_str='\t'.join([sample_id,seq,signals,pred_pos,pred_dorado_text,pred_deepsignal_text,mean_pred_text,pred_label_text,str(mapping_quality),
                                str(offset),str(scaling),str(shift_pa_to_norm),str(scale_pa_to_norm),bisulfite_text])
            fea_list.append(fea_str)
            if len(fea_list)>=d_batch_size:
                output_Q.put(fea_list)
                fea_list=[]
    if len(fea_list)>0:
        output_Q.put(fea_list)

def read_tsv(key_input):
    key_indexes=[]
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            key_indexes.append(key)
    return key_indexes

def build_index(input):
    index={}
    with open(input, 'r') as input_file:
        i=0
        for line in input_file:
            words = line.strip().split("\t")
            index[words[0]]=i
            i+=1
    return index


def extract(args):
    manager = mp.Manager()
    tombo_Q=manager.Queue()
    output_Q = manager.Queue()
    
    #############
    index=build_index(args.input)
    timewait=args.timewait
    pd=mp.Process(target=process_tombo,args=(args.tsv,tombo_Q,timewait),name="basecall_reader")
    pd.daemon = True
    pd.start()
    ex_dp=args.nproc - 2
    ex_procs = []
    for i in range(ex_dp):
        pb = mp.Process(target=process_tsv, args=(args.input,index,tombo_Q,output_Q,timewait),
                          name="pb_reader")
            
        pb.daemon = True
        pb.start()
        ex_procs.append(pb)
    p_w = mp.Process(target=_write_featurestr, args=(args.write_path, output_Q,timewait,args.control),
                     name="writer")
    p_w.daemon = True
    p_w.start()
    #while args.control and not event.is_set():
    #    sys.exit(0)
    pd.join()
    tombo_Q.put("kill")
    for pb in ex_procs:
        pb.join()
    output_Q.put("kill")
    p_w.join()

def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--tsv", type=str,required=False)
    parser.add_argument("--input", type=str,required=True)
    parser.add_argument("--write_path", type=str,required=True)
    parser.add_argument("--nproc", "-p", type=int,required=True)
    parser.add_argument("--timewait", "-t", default=0.1, type=float,required=False)
    parser.add_argument("--d_batch_size", action="store", type=int, default=2,
                         required=False)
    parser.add_argument("--control", action="store_true", default=False, required=False,
                       help='test')
    parser.add_argument(
        "--identity",
        type=float,
        default=0.9,
        required=False,
        help="identity cutoff for selecting alignment items, default 0.9",
    )
    parser.add_argument(
        "--coverage_ratio",
        type=float,
        default=0.95,
        required=False,
        help="percent of coverage, read alignment len against read len, default 0.95",
    )

    return parser.parse_args()


def main():
    args=parse_args()
    extract(args)


if __name__ == '__main__':
    sys.exit(main())