import pysam
import argparse
import sys
import os
from multiprocessing import Manager
import multiprocessing as mp
import gzip
import mmap
import pod5 as p5
import time
import datetime
import numpy as np
from ont_fast5_api.fast5_interface import get_fast5_file
from pathlib import Path
from memory_profiler import profile
from deepsignal3.utils.process_utils import CIGAR2CODE
from deepsignal3.utils.process_utils import CIGAR_REGEX
from deepsignal3.extract_features_pod5 import _group_signals_by_movetable_v2
import re

def get_object_size(obj):
    return sys.getsizeof(obj)

def get_q2tloc_from_cigar(r_cigar_tuple, strand, seq_len):
    """
    insertion: -1, deletion: -2, mismatch: -3
    :param r_cigar_tuple: pysam.alignmentSegment.cigartuples
    :param strand: 1/-1 for fwd/rev
    :param seq_len: read alignment length
    :return: query pos to ref pos
    """
    fill_invalid = -2
    # get each base calls genomic position
    q_to_r_poss = np.full(seq_len + 1, fill_invalid, dtype=np.int32)
    # process cigar ops in read direction
    curr_r_pos, curr_q_pos = 0, 0
    cigar_ops = r_cigar_tuple if strand == 1 else r_cigar_tuple[::-1]
    for op, op_len in cigar_ops:
        if op == 1:
            # inserted bases into ref
            for q_pos in range(curr_q_pos, curr_q_pos + op_len):
                q_to_r_poss[q_pos] = -1
            curr_q_pos += op_len
        elif op in (2, 3):
            # deleted ref bases
            curr_r_pos += op_len
        elif op in (0, 7, 8):
            # aligned bases
            for op_offset in range(op_len):
                q_to_r_poss[curr_q_pos + op_offset] = curr_r_pos + op_offset
            curr_q_pos += op_len
            curr_r_pos += op_len
        elif op == 6:
            # padding (shouldn't happen in mappy)
            pass
    q_to_r_poss[curr_q_pos] = curr_r_pos
    if q_to_r_poss[-1] == fill_invalid:
        raise ValueError(('Invalid cigar string encountered. Reference length: {}  Cigar ' +
                          'implied reference length: {}').format(seq_len, curr_r_pos))
    return q_to_r_poss

def get_refloc_of_methysite_in_motif(seqstr, motifset, methyloc_in_motif=0):
    """

    :param seqstr:
    :param motifset:
    :param methyloc_in_motif: 0-based
    :return:
    """
    motifset = set(motifset)
    strlen = len(seqstr)
    motiflen = len(list(motifset)[0])
    sites = []
    for i in range(0, strlen - motiflen + 1):
        if seqstr[i:i + motiflen] in motifset:
            sites.append(i+methyloc_in_motif)
    return sites

def get_key2lines(feafile):
    if feafile.endswith(".gz"):
        infile = gzip.open(feafile, 'rt')
    else:
        infile = open(feafile, 'r')
    key2lines = {}
    lcnt = 0
    for line in infile:
        words = line.strip().split("\t")
        key=(words[4],words[0])#,words[0],words[2],words[1]
        if key not in key2lines:
            key2lines[key] = []
        key2lines[key].append(lcnt) 
        lcnt += 1
    print('build index for tsv')
    return key2lines

def _read_features_file(signal_Q,output_Q,key2lines,features_file,motif='C',mod_loc=0, d_batch_size=2):
   
    
    fea_list=[]
    while True:
        if signal_Q.empty():
            break
        chunk=signal_Q.get(block=False, timeout=10)
        for data in chunk:
            read_name,signal,bam_read=data
            seq = bam_read.query_sequence
            read_dict=dict(bam_read.tags)
            
            #reference_name,strand_code,read_name,cigartuples,seq_len=read_info
            pattern = re.compile(r'C[+-]m')
            matches = pattern.findall(read_dict["MM"])
            if not matches:
                print("skip methylation isn\'t 5mC")
                continue
            strand_code = -1 if bam_read.is_reverse else 1
            reference_name=bam_read.reference_name
            ref_start=bam_read.reference_start
            strand = "-" if strand_code==-1 else "+"
            dorado_pred = []
            dorado_pred_pos=[]
            #readlocs = get_q2tloc_from_cigar(cigartuples, strand_code,seq_len)
            #num_bases = (seq_len - 1) // 2
            #offsets = get_refloc_of_methysite_in_motif(seq, set(motif), mod_loc)
            pred_deepsignal=[]
            #for off_loc_i in range(len(offsets)):
            #    off_loc = offsets[off_loc_i]
            #    if num_bases <= off_loc < len(seq) - num_bases:
            #        read_loc = readlocs[off_loc]
            #        key=(read_name,reference_name,strand,read_loc)
            #        if key not in key2lines:
            #            continue
            key=(read_name,reference_name)
            #if key not in key2lines:
            #    print('can\'t find read {} in deepsignal'.format(read_name))
            #    #skip_num+=1
            #    continue
            query_pos=[]
            if bam_read.modified_bases != None:
                ref_loc = bam_read.get_reference_positions(full_length=True)
                for m, locs in bam_read.modified_bases.items():
                    if m[0] == 'C' and m[2] == 'm':
                        for lc in locs:
                            if ref_loc[lc[0]] != None:
                                query_pos.append(lc[0] if bam_read.is_forward else lc[0]-1)
                                rloc = ref_loc[lc[0]]  #if bam_read.is_forward else ref_loc[lc[0]]-1
                                dorado_pred.append(lc[1])
                                dorado_pred_pos.append(rloc)
            
            #for m, locs in bam_read.modified_bases_forward.items():
            #    if m[0] == 'C' and m[2] == 'm':
            #        for lc in locs:
            #            query_pos.append(lc[0])
            dorado_len=len(dorado_pred_pos)
            #line_index=0
            find_first=0
            if features_file.endswith(".gz"):
                infile = gzip.open(features_file, 'rt')
            else:
                infile = open(features_file, 'r')
            with mmap.mmap(infile.fileno(), 0, access=mmap.ACCESS_READ) as mm:
                for line in mm.readline():
                    # 在字节块的开始位置搜索换行符
                    #line = mm.readline()
                    #if line_index not in key2lines[key]:
                    #    if find_first==1:
                    #        break
                    #    else:
                    #        line_index+=1
                    #
                    #         continue
                    try:
                        decoded_line = line.decode('cp1252')  # 尝试使用 utf-8 编码解码
                    except UnicodeDecodeError:
                        decoded_line = line.decode('latin-1')
                    words=decoded_line.strip().split('\t')
                    if (words[4],words[0])!=key:
                        if find_first==1:
                            break
                        continue

                    if int(words[1]) in dorado_pred_pos:
                        pred_deepsignal[int(words[1])]=float(words[7])
                        #pred_num+=1
                    #line_index+=1
                    if find_first==0:
                        find_first=1
            for i in dorado_pred_pos:
                if i not in pred_deepsignal.keys():
                    pred_deepsignal[i]=-1
            try:
                num_trimmed = read_dict["ts"]
                norm_shift = read_dict["sm"]
                norm_scale = read_dict["sd"]
                mv_table=read_dict["mv"]
                                
                if num_trimmed >= 0:
                    signal_trimmed = (signal[num_trimmed:] - norm_shift) / norm_scale
                else:
                    signal_trimmed = (signal[:num_trimmed] - norm_shift) / norm_scale
                sshift, sscale = np.mean(signal_trimmed), float(np.std(signal_trimmed))
                if sscale == 0.0:
                    norm_signals = signal_trimmed
                else:
                    norm_signals = (signal_trimmed - sshift) / sscale
                signal_group = _group_signals_by_movetable_v2(norm_signals, np.asarray(mv_table[1:]), int(mv_table[0]))
                norm_signals_text=','.join([str(x) for x in signal_group])
                for i in range(dorado_len):
                    dorado_pred[i]=(dorado_pred[i]+0.5)/256
                #for i in range(len(pred_deepsignal)):
                #    pred_deepsignal[i]=min(np.round(pred_deepsignal[i]*256),255)
                pred_deepsignal_text=','.join([str(pred_deepsignal[x]) for x in dorado_pred_pos])

                pred_dorado_text=','.join([str(x) for x in dorado_pred])
                                                         
                mean_pred=[]
                k=0
                for key in dorado_pred_pos:
                    if pred_deepsignal[key]!=-1:
                        mean_pred.append((dorado_pred[k]+pred_deepsignal[key])/2)#.astype(int)
                    else:
                        mean_pred.append(dorado_pred[k])
                    k+=1
                pred_label=[]
                for i in range(dorado_len):
                    if mean_pred[i]>=0.5:
                        pred_label.append(1)
                    else:
                        pred_label.append(0)
                mean_pred_text=','.join([str(x) for x in mean_pred])
                pred_label_text=','.join([str(x) for x in pred_label])
                pred_pos=','.join([str(x) for x in query_pos])
                sample_id='\t'.join([read_name,reference_name,str(ref_start)])
                fea_str='\t'.join([sample_id,seq,norm_signals_text,pred_pos,pred_dorado_text,pred_deepsignal_text,mean_pred_text,pred_label_text,str(bam_read.mapping_quality)])
                #keep_num+=1
                fea_list.append(fea_str)
            except Exception as e:
                print('deal feature false.', flush=True)
                #print(read_dict["ML"])
                #print(read_dict.keys())
                print(e, flush=True)

            if len(fea_list)>d_batch_size:
                output_Q.put(fea_list)
                fea_list=[]
        
    if len(fea_list)>0:
        output_Q.put(fea_list)

def _read_signal_bam(signal_Q,filenames,bam,is_pod5=False):
    bam_file=pysam.AlignmentFile(bam,'rb',check_sq=False)
    print('%s: Building BAM index.' %str(datetime.datetime.now()), flush=True)
    bam_index=pysam.IndexedReads(bam_file)
    bam_index.build()
    print('%s: Finished building BAM index.' %str(datetime.datetime.now()), flush=True)
    chunk=[]
    reads_per_chunk=1
    file_type= 'pod5' if is_pod5 else 'fast5'
    signal_files= [filenames] if os.path.isfile(filenames) else Path(filenames).rglob("*.%s" %file_type)
    if is_pod5:
        for filename in signal_files:
            with p5.Reader(filename) as reader:
                for read in reader.reads():
                    if signal_Q.qsize()>2:
                        time.sleep(20)
                        print('Pausing input due to INPUT queue size limit. Signal_qsize=%d' %(signal_Q.qsize()), flush=True)
                    read_name=str(read.read_id)
                    try:
                        read_iter=bam_index.find(read_name)
                        for bam_read in read_iter:
                            if bam_read.is_supplementary or bam_read.is_secondary or bam_read.is_mapped==False:
                                continue
                            signal=read.signal
                            #strand = "-" if bam_read.is_reverse else "+"
                            seq = bam_read.get_forward_sequence()
                            if seq is None or signal is None:
                                continue
                            #read_dict=bam_read.to_dict()
                            
                            data=(read_name,signal,bam_read)
                        
                            chunk.append(data)
                            if len(chunk)>=reads_per_chunk:
                                signal_Q.put(chunk)
                                chunk=[]
                    except KeyError:
                        continue
    else:
        for filename in signal_files:
            with get_fast5_file(filename, mode="r") as f5:
                for read in f5.get_reads():
                    if signal_Q.qsize()>2:
                        time.sleep(20)
                    read_name=str(read.read_id)
                    try:
                        read_iter=bam_index.find(read_name)
                        for bam_read in read_iter:
                            if bam_read.is_supplementary or bam_read.is_secondary or bam_read.is_mapped==False:
                                continue
                            signal=read.signal
                            #strand = "-" if bam_read.is_reverse else "+"
                            seq = bam_read.get_forward_sequence()
                            if seq is None or signal is None:
                                continue
                            #read_dict=bam_read.to_dict()
                            
                            data=(read_name,signal,bam_read)
                        
                            chunk.append(data)
                            if len(chunk)>=reads_per_chunk:
                                signal_Q.put(chunk)
                                chunk=[]
                    except KeyError:
                        continue

def _write_featurestr(write_fp, featurestr_q,time_wait = 3):
    print('write_process-{} starts'.format(os.getpid()))
    with open(write_fp, 'w') as wf:
        while True:
            # during test, it's ok without the sleep(time_wait)
            if featurestr_q.empty():
                time.sleep(time_wait)
                continue
            features_str = featurestr_q.get()
            if features_str == "kill":
                print('write_process-{} finished'.format(os.getpid()))
                break
            for one_features_str in features_str:
                wf.write(one_features_str + "\n")
            wf.flush()

def extract(args):
    manager = mp.Manager()
    signal_Q = manager.Queue()
    output_Q = manager.Queue()
    input_path = os.path.abspath(args.tsv)
    if not os.path.exists(input_path):
        raise ValueError("--tsv input_path does not exist!")
    ex_dp=args.nproc - 2
    #total_size=os.path.getsize(input_path)
    #ex_size=int(total_size/ex_dp)
    ex_procs = []
    key2lines=None#get_key2lines(input_path)
    print("size of key2lines: {}".format(get_object_size(key2lines)))
    sys.stdout.flush()
    pb=mp.Process(target=_read_signal_bam,args=(signal_Q,args.signal,args.bam),name="signal_bam")
    pb.daemon = True
    pb.start()
    for i in range(ex_dp):
        #start_pos = i * ex_size
        #end_pos = (i + 1) * ex_size if i < ex_dp - 1 else total_size
        p_rf = mp.Process(target=_read_features_file, args=(signal_Q,output_Q,key2lines,input_path,
                                                            args.d_batch_size),
                      name="reader")
            
        p_rf.daemon = True
        p_rf.start()
        ex_procs.append(p_rf)
    p_w = mp.Process(target=_write_featurestr, args=(args.write_path, output_Q),
                     name="writer")
    p_w.daemon = True
    p_w.start()
    pb.join()
    #signal_Q.put("kill")
    for p in ex_procs:
        p.join()
    output_Q.put("kill")
    p_w.join()



def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--bam", type=str,required=True)
    parser.add_argument("--pod5", action="store_true", default=False, required=False,
                       help='use pod5, default false')
    parser.add_argument("--signal", type=str,required=True)
    parser.add_argument("--tsv", type=str,required=True)
    parser.add_argument("--write_path", type=str,required=True)
    parser.add_argument("--nproc", "-p", type=int,required=True)
    parser.add_argument("--d_batch_size", action="store", type=int, default=2,
                         required=False)

    return parser.parse_args()


def main():
    args=parse_args()
    extract(args)


if __name__ == '__main__':
    sys.exit(main())