import pysam
import argparse
import sys
import os
from multiprocessing import Manager
import multiprocessing as mp
from multiprocessing import Queue
import gzip
import mmap
import pod5 as p5
import time
import datetime
import numpy as np
from ont_fast5_api.fast5_interface import get_fast5_file
from pathlib import Path
from memory_profiler import profile
from deepsignal3.utils.process_utils import CIGAR2CODE
from deepsignal3.utils.process_utils import CIGAR_REGEX
from deepsignal3.utils.process_utils import get_files
from deepsignal3.utils.process_utils import fill_files_queue
from deepsignal3.utils import bam_reader
from deepsignal3.extract_features_pod5 import _group_signals_by_movetable_v2
import re
import traceback

#import threading
# 定义终止事件
#event = threading.Event()

def _write_featurestr(write_fp, featurestr_q,time_wait = 1,control=False):
    #print('write_process-{} starts'.format(os.getpid()))
    output=0
    if os.path.exists(write_fp):
        with open(write_fp, 'a') as wf:
            while True:
                # during test, it's ok without the sleep(time_wait)
                if featurestr_q.empty():
                    time.sleep(time_wait)
                    continue
                features_str = featurestr_q.get()
                if features_str == "kill":
                    #print('output line {}'.format(output))
                    #print('write_process-{} finished'.format(os.getpid()))
                    break
                for one_features_str in features_str:
                    output+=1
                    wf.write(one_features_str + "\n")
                wf.flush()
    else:
        with open(write_fp, 'w') as wf:
            while True:
                # during test, it's ok without the sleep(time_wait)
                if featurestr_q.empty():
                    time.sleep(time_wait)
                    continue
                features_str = featurestr_q.get()
                if features_str == "kill":
                    #print('output line {}'.format(output))
                    #print('write_process-{} finished'.format(os.getpid()))
                    break
                for one_features_str in features_str:
                    output+=1                   
                    wf.write(one_features_str + "\n")
                    if control is True and output>=4000:
                        # 设定终止标志
                        #event.set()
                        sys.exit(0)
                wf.flush()



def process_dorado(bam_index,pod5s_q,output_Q,hp_position,hn_position,bisulfite,time_wait=1,d_batch_size=2,qsize_limit=22):
    fea_list=[]
    while True:
        pod5_file = pod5s_q.get()
        if pod5_file == "kill":
            pod5s_q.put("kill")
            break
        with p5.Reader(pod5_file[0]) as reader:
            for read in reader.reads():
                while output_Q.qsize()>=qsize_limit:
                    #print('Pausing bam and pod5 input due to OUTPUT queue size limit. Output_qsize=%d' %(output_Q.qsize()), flush=True)
                    time.sleep(time_wait)
            
                signal=read.signal
                shift_dacs_to_pa=read.calibration.offset
                scale_dacs_to_pa=read.calibration.scale
                read_name= str(read.read_id)
                #read_iter=bam_index.find(read_name)
                try:
                    for bam_read in bam_index.get_alignments(read_name):
                        if bam_read.is_supplementary or bam_read.is_secondary or bam_read.is_mapped==False:#
                            continue
                        if bam_read.mapping_quality<10:
                            continue
                        
                        reference_name=bam_read.reference_name
                        seq = bam_read.get_forward_sequence()
                        if seq is None or signal is None:
                            continue
                        read_dict=dict(bam_read.tags)                   
                        
                        strand_code = 0 if bam_read.is_reverse else 1
                    
                        strand = "-" if strand_code==0 else "+"
                        #find_key=(read_name,reference_name)
                        ref_start=bam_read.reference_start
                        #read_key='||'.join([read_name,reference_name,str(ref_start)])
                        #if read_key in remove_indexes:
                        #    continue
                        dorado_pred = dict()
                        dorado_pred_pos=dict()
                        query_pos=[]
                        
                        
                        if bam_read.modified_bases != None:
                            ref_loc = bam_read.get_reference_positions(full_length=True)
                            for m, locs in bam_read.modified_bases_forward.items():
                                if m[0] == 'C' and m[2] == 'm':
                                    for lc in locs:
                                        if ref_loc[lc[0]] != None:
                                            query_pos.append(lc[0])  # if bam_read.is_forward else (lc[0]-1)
                                            rloc = ref_loc[lc[0]]  # if bam_read.is_forward else (ref_start+ref_end-int(ref_loc[lc[0]]))
                                            dorado_pred[rloc]=lc[1]
                                            dorado_pred_pos[rloc]=lc[0]
                                            
                        ref_readlocs=dict()
                        for read_pos,ref_pos in bam_read.get_aligned_pairs(matches_only=True):
                            if ref_pos in ref_readlocs:
                                print('repeat')
                            ref_readlocs[ref_pos]=read_pos
                        dorado_len=len(dorado_pred_pos.keys())
                        if dorado_len==0:
                            continue
                        for key in dorado_pred.keys():
                            dorado_pred[key]=(dorado_pred[key]+0.5)/256
                        log_1=0
                        pred_deepsignal=dict()
                        for key in dorado_pred.keys():
                            if key not in pred_deepsignal.keys():
                                log_1+=1
                                pred_deepsignal[key]=-1
                        # if log_1/len(dorado_pred.keys())>0.133334:
                        #     continue
                        # for key in pred_deepsignal.keys():
                        #     if key not in dorado_pred.keys():
                        #         if key not in ref_readlocs:
                        #             continue
                        #         if seq[ref_readlocs[key]]!='C':
                        #             continue
                        #         dorado_pred[key]=-1
                        #         dorado_pred_pos[key]=ref_readlocs[key]
                        #         query_pos.append(ref_readlocs[key])  

                        
                        ref_pos=sorted(dorado_pred_pos.keys())
                        num_trimmed = read_dict["ts"]
                        shift_pa_to_norm = read_dict["sm"]
                        scale_pa_to_norm = read_dict["sd"]
                        mv_table=read_dict["mv"]
                        if num_trimmed >= 0:
                            signal_trimmed = signal[num_trimmed:]#(signal[num_trimmed:] - norm_shift) / norm_scale
                        else:
                            signal_trimmed = signal[:num_trimmed]#(signal[:num_trimmed] - norm_shift) / norm_scale
                        #sshift, sscale = np.mean(signal_trimmed), float(np.std(signal_trimmed))
                        #if sscale == 0.0:
                        #    norm_signals = signal_trimmed
                        #else:
                        #    norm_signals = (signal_trimmed - sshift) / sscale
                        signal_group = _group_signals_by_movetable_v2(signal_trimmed, np.asarray(mv_table[1:]), int(mv_table[0]))
                        signal_group_new=[]
                        for sig in signal_group:
                            signal_group_new.append(np.round(np.array(sig), decimals=6))

                        norm_signals_text=';'.join([",".join([str(y) for y in x]) for x in signal_group_new])
                        pred_deepsignal_new=np.round(np.array([pred_deepsignal[x] for x in ref_pos]), decimals=6)
                        pred_deepsignal_text=','.join([str(x) for x in pred_deepsignal_new])
                        
                        dorado_pred_new=np.round(np.array([dorado_pred[x] for x in ref_pos]), decimals=6)
                        pred_dorado_text=','.join([str(x) for x in dorado_pred_new])
                        mean_pred=dict()
                        #k=0
                        
                        wgbs=dict()
                        bisulfite_ref=dict()
                        del_pos=[]
                        for key in dorado_pred_pos.keys():
                            if pred_deepsignal[key]!=-1 and dorado_pred[key]!=-1:                           
                                mean_pred[key]=(dorado_pred[key]+pred_deepsignal[key])/2#.astype(int)
                            else:
                                
                                if dorado_pred[key]!=-1:                               
                                    mean_pred[key]=dorado_pred[key]
                                elif pred_deepsignal[key]!=-1: 
                                    mean_pred[key]=pred_deepsignal[key]
                                else:
                                    del_pos.append(key)
                            key_combine='||'.join([reference_name,str(key)])
                            if key_combine in bisulfite:
                                bisulfite_ref[key]=bisulfite[key_combine]
                            else:
                                bisulfite_ref[key]=-1
                            if key_combine in hp_position:
                                wgbs[key]=1
                            elif key_combine in hn_position:
                                wgbs[key]=0
                            else:
                                wgbs[key]=-1
                            #k+=1
                        #print('{} {} {}'.format(log_1/len(mean_pred),read_name,reference_name))
                        for key in del_pos:
                            del dorado_pred_pos[key]
                        pred_label=dict()
                        for key in dorado_pred_pos.keys():
                            if wgbs[key]==-1:
                                if mean_pred[key]>=0.5:
                                    pred_label[key]=1
                                else:
                                    pred_label[key]=0
                            else:
                                pred_label[key]=wgbs[key]
                                if wgbs[key]==1 and mean_pred[key]<0.5:
                                    print('WGBS correct a positive position in {}'.format(read_name))
                                elif wgbs[key]==0 and mean_pred[key]>=0.5:
                                    print('WGBS correct a negative position in {}'.format(read_name))
                        mean_pred_new=np.round(np.array([mean_pred[x] for x in ref_pos]), decimals=6)
                        mean_pred_text=','.join([str(x) for x in np.round(np.array(mean_pred_new), decimals=6)])
                        pred_label_new=np.array([pred_label[x] for x in ref_pos])
                        pred_label_text=','.join([str(x) for x in pred_label_new])
                        pred_pos=','.join([str(dorado_pred_pos[x]) for x in ref_pos])
                        sample_id='\t'.join([read_name,reference_name,str(ref_start)])
                        bisulfite_ref_new=np.array([bisulfite_ref[x] for x in ref_pos])
                        bisulfite_text=','.join([str(x) for x in bisulfite_ref_new])
                        fea_str='\t'.join([sample_id,seq,norm_signals_text,pred_pos,pred_dorado_text,pred_deepsignal_text,mean_pred_text,pred_label_text,str(bam_read.mapping_quality),
                                        str(shift_dacs_to_pa),str(scale_dacs_to_pa),str(shift_pa_to_norm),str(scale_pa_to_norm),bisulfite_text])
                        fea_list.append(fea_str)
                        if len(fea_list)>=d_batch_size:
                            output_Q.put(fea_list)
                            fea_list=[]
                except Exception as e:
                    traceback.print_exc()
        if len(fea_list)>0:
            output_Q.put(fea_list)

def generate_key(line):
    # 根据前两列生成key
    columns = line.split()[:2]
    return '||'.join(columns)

def remove_last_line(file_path):
    # 打开原始文件以读取模式
    with open(file_path, 'r') as f:
        # 定位到文件末尾
        f.seek(0, os.SEEK_END)
        
        # 获取文件大小
        file_size = f.tell()
        
        # 如果文件为空，直接返回
        if file_size == 0:
            return
        
        # 逐行向前查找，直到找到倒数第二个换行符位置
        pos = file_size - 1
        while pos > 0 and f.read(1) != '\n':
            pos -= 1
        
        # 截断文件到倒数第二个换行符位置
        f.seek(pos, os.SEEK_SET)
        f.truncate()

def process_file(output_file):
     # 检查输出文件是否存在
    try:
        with open(output_file, 'r') as f:
            existing_keys = set()
            for line in f:
                existing_keys.add(generate_key(line))
    except FileNotFoundError:
        existing_keys = set()
    return existing_keys

def remove_key(key_input):
    remove_indexes=set()
    with open(key_input, 'r') as input_file:
        for line in input_file:
            key = line.strip()
            remove_indexes.add(key)
    return remove_indexes

def read_position_file(position_file):
    key_sep = "||"
    postions = set()
    with open(position_file, "r") as rf:
        for line in rf:
            words = line.strip().split()
            postions.add(key_sep.join(words[:2]))
    return postions

def read_bed(bisulfite_bed):
    key_sep = "||"
    freqinfo = {}
    with open(bisulfite_bed, "r") as rf:
        for line in rf:
            words = line.strip().split()
            if int(words[9])<5:
                continue
            m_key = key_sep.join([words[0], words[1]])
            freqinfo[m_key] = float(words[10])
    return freqinfo

def extract(args):
    manager = mp.Manager()
    output_Q = manager.Queue()
    
    #bam_file=pysam.AlignmentFile(args.bam,'rb',check_sq=False,ignore_truncation=True)
    #print('%s: Building BAM index.' %str(datetime.datetime.now()), flush=True)
    bam_index=bam_reader.ReadIndexedBam(args.bam)#pysam.IndexedReads(bam_file,multiple_iterators=True)
    #bam_index.build()
    #print('%s: Finished building BAM index.' %str(datetime.datetime.now()), flush=True)
    signal_dir = os.path.abspath(args.signal)
    # pod5_dr=p5.DatasetReader(signal_dir, recursive=True,index=True)
    # signal_ids=set(pod5_dr.read_ids)
    pod5_dr = get_files(signal_dir, True, ".pod5")

    pod5s_q = Queue()
    fill_files_queue(pod5s_q, pod5_dr)
    pod5s_q.put("kill")
    
    #print('%s: Finished process file.' %str(datetime.datetime.now()), flush=True)
    #############
    hp_position=None
    hn_position=None
    if args.hp is not None and args.hn is not None:
        hp_position=read_position_file(args.hp)
        hn_position=read_position_file(args.hn)
    bisulfite=None
    if args.bed is not None:
        bisulfite=read_bed(args.bed)
    timewait=args.timewait
    ex_dp=args.nproc - 1
    ex_procs = []
    for i in range(ex_dp):
        pb = mp.Process(target=process_dorado, args=(bam_index, pod5s_q,output_Q,hp_position,hn_position,bisulfite,timewait),
                          name="pb_reader")
            
        pb.daemon = True
        pb.start()
        ex_procs.append(pb)
    p_w = mp.Process(target=_write_featurestr, args=(args.write_path, output_Q,timewait,args.control),
                     name="writer")
    p_w.daemon = True
    p_w.start()
    #while args.control and not event.is_set():
    #    sys.exit(0)
    for pb in ex_procs:
        pb.join()
    output_Q.put("kill")
    p_w.join()

def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--keyset", type=str,required=False)
    parser.add_argument("--hp", type=str,required=False,help='high confidence positive position')
    parser.add_argument("--hn", type=str,required=False,help='high confidence negative position')
    parser.add_argument("--bam", type=str,required=True)   
    parser.add_argument("--pod5", action="store_true", default=False, required=False,
                       help='use pod5, default false')
    parser.add_argument("--bed", type=str,required=False)
    parser.add_argument("--signal", type=str,required=True)
    parser.add_argument("--tsv", type=str,required=False)
    parser.add_argument("--write_path", type=str,required=True)
    parser.add_argument("--nproc", "-p", type=int,required=True)
    parser.add_argument("--timewait", "-t", default=0.1, type=float,required=False)
    parser.add_argument("--d_batch_size", action="store", type=int, default=2,
                         required=False)
    parser.add_argument("--control", action="store_true", default=False, required=False,
                       help='test')

    return parser.parse_args()


def main():
    args=parse_args()
    extract(args)


if __name__ == '__main__':
    sys.exit(main())