# import pysam
# import argparse
# import time
# import os
# import gzip
# from multiprocessing import Manager
# import multiprocessing as mp
# import sys
# from deepsignal3.utils import bam_reader
# import traceback
# from tqdm import tqdm

# import chardet

# def detect_encoding(file_path):
#     # 读取文件的一部分来检测编码
#     with open(file_path, 'rb') as file:
#         raw_data = file.read(10000)  # 读取文件的前10000字节
#     result = chardet.detect(raw_data)
#     encoding = result['encoding']
#     confidence = result['confidence']
    
#     return encoding, confidence

# def _write_featurestr(write_fp, featurestr_q,time_wait = 1,control=False):
#     #print('write_process-{} starts'.format(os.getpid()))
#     output=0
#     # if os.path.exists(write_fp):
#     #     with open(write_fp, 'a') as wf:
#     #         while True:
#     #             # during test, it's ok without the sleep(time_wait)
#     #             if featurestr_q.empty():
#     #                 time.sleep(time_wait)
#     #                 continue
#     #             features_str = featurestr_q.get()
#     #             if features_str == "kill":
#     #                 #print('output line {}'.format(output))
#     #                 #print('write_process-{} finished'.format(os.getpid()))
#     #                 break
#     #             for one_features_str in features_str:
#     #                 output+=1
#     #                 wf.write(one_features_str + "\n")
#     #             wf.flush()
#     # else:
#     with open(write_fp, 'w') as wf:
#         while True:
#             # during test, it's ok without the sleep(time_wait)
#             if featurestr_q.empty():
#                 time.sleep(time_wait)
#                 continue
#             features_str = featurestr_q.get()
#             if features_str == "kill":
#                 #print('output line {}'.format(output))
#                 #print('write_process-{} finished'.format(os.getpid()))
#                 break
#             for one_features_str in features_str:
#                 output+=1                   
#                 wf.write(one_features_str + "\n")
#                 if control is True and output>=4000:
                    
#                     #event.set()
#                     sys.exit(0)
#             wf.flush()

# def process_rockfish(features_file,feature_Q,time_wait=1,reads_per_chunk=2,qsize_limit=100):
#     if features_file.endswith(".gz"):
#         infile = gzip.open(features_file, 'rt')
#     else:
#         infile = open(features_file, 'r',encoding='ascii')

#     pred_rockfish={}
#     fea_list=[]
#     pre_read_id=''
#     read_id=''
#     next(infile)
#     #encodings = ['utf-8', 'gbk', 'latin-1']
#     for line in tqdm(infile, ncols=100,desc="process_rockfish"):
#         #if features_file.endswith(".gz"):
#         #    words = line.decode('gbk').strip().split("\t")
#         #else:
#         #    words=line.strip().split("\t")
#         words=line.strip().split("\t")
#         read_id=str(words[0]).strip()
#         if pre_read_id=='':
#             pre_read_id=read_id
#         elif pre_read_id!=read_id:          
#             #print(pre_read_id)
#             #sys.stdout.flush()
#             fea_list.append((pre_read_id,pred_rockfish))
#             pre_read_id=read_id
#             pred_rockfish={}
#             if len(fea_list)>=reads_per_chunk:
#                 while feature_Q.qsize()>=qsize_limit:
#                     #print('Pausing tsv input due to INPUT queue size limit. Signal_qsize=%d' %(feature_Q.qsize()), flush=True)
#                     time.sleep(time_wait)
#                 feature_Q.put(fea_list)
#                 fea_list=[]
                
#         pred_rockfish[int(words[1])]=float(words[2])
#     if pre_read_id!=read_id:
#         fea_list.append((pre_read_id,pred_rockfish))
#     if len(fea_list)>0:
#         feature_Q.put(fea_list)
#     infile.close()

# def process_bam(bam_index,feature_Q,output_Q,chr,time_wait=1,d_batch_size=20,qsize_limit=1000):
#     fea_list=[]
#     while True:
#         while feature_Q.empty():
#             time.sleep(time_wait)
#         read_data = feature_Q.get()
#         if read_data == "kill":
#             feature_Q.put("kill")
#             break
#         for (read_name,pred_rockfish) in read_data:
#             while output_Q.qsize()>=qsize_limit:
#                 #print('Pausing bam and pod5 input due to OUTPUT queue size limit. Output_qsize=%d' %(output_Q.qsize()), flush=True)
#                 time.sleep(time_wait)
#             try:
#                 #read_iter = bam_index.get_alignments(read_name)
#                 #read_iter=bam_index.find(read_name)

#                 #for bam_read in bam_index.get_alignments(read_name):   
#                 # if bam_read.is_mapped==False or bam_read.is_supplementary or bam_read.is_secondary:
#                 #     continue
#             # except Exception as e:
#             #     print("process_bam err of readid: {}".format(read_name))
#             #     sys.stdout.flush()
#             #     continue
#                 for bam_read in bam_index.get_alignments(read_name):
#                     if bam_read.reference_name is None:
#                         print("reference_name None,process_bam skip of readid: {}".format(read_name))
#                         sys.stdout.flush()
#                         continue
#                     else:
#                         reference_name = bam_read.reference_name
#                     if reference_name.lower() != chr:
#                         print("reference_name,process_bam skip of readid: {}".format(read_name))
#                         sys.stdout.flush()
#                         continue
#                     seq = bam_read.get_forward_sequence()
#                     if seq is None:
#                         print("seq,process_bam skip of readid: {}".format(read_name))
#                         sys.stdout.flush()
#                         continue
#                     print("process_bam true of readid: {}".format(read_name))
#                     sys.stdout.flush()
#                     ref_loc = bam_read.get_reference_positions(full_length=True)
#                     for pos in pred_rockfish.keys():
#                         if pos in ref_loc:
#                             rloc = ref_loc[pos] if bam_read.is_forward else ref_loc[len(seq)-pos-1]
#                             if rloc is None:
#                                 continue
#                             fea_str='\t'.join([read_name,reference_name,str(rloc),str(pred_rockfish[pos])])
#                             fea_list.append(fea_str)
#                 if len(fea_list)>=d_batch_size:
#                     output_Q.put(fea_list)
#                     fea_list=[]
#             except KeyError as e:
#                 #traceback.print_exc()
#                 print("process_bam keyerr of readid: {}".format(read_name))
#                 sys.stdout.flush()
#                 continue
#             except OSError as e:
#                 print("process_bam oserr of readid: {}".format(read_name))
#                 sys.stdout.flush()
#                 continue
#             except UnboundLocalError as e:
#                 print("process_bam UnboundLocalError of readid: {}".format(read_name))
#                 sys.stdout.flush()
#                 continue
            
#     if len(fea_list)>0:
#         output_Q.put(fea_list)

# def extract(args):
#     manager = mp.Manager()
#     feature_Q = manager.Queue()
#     output_Q = manager.Queue()  
#     timewait=args.timewait
#     encoding, confidence = detect_encoding(args.tsv)
#     print(f"Detected encoding: {encoding}")
#     print(f"Confidence: {confidence:.2f}")
#     sys.stdout.flush()
#     bam_index=bam_reader.ReadIndexedBam(args.bam)
#     #bamfile = pysam.AlignmentFile(args.bam, "rb", check_sq=False)
#     #bam_index=pysam.IndexedReads(bamfile)
#     #bam_index.build()
#     read_iter=bam_index.get_alignments('5e96c633-5b40-4e38-904e-cfb761fd2c23')
#     for bam_read in read_iter:
#         print(bam_read.reference_name)
#         sys.stdout.flush()
#     pd=mp.Process(target=process_rockfish,args=(args.tsv,feature_Q,timewait),name="fea_reader")
#     pd.daemon = True
#     pd.start()
#     ex_dp=args.nproc - 2
#     ex_procs = []    
#     for i in range(ex_dp):
#         pb = mp.Process(target=process_bam, args=(bam_index,feature_Q,output_Q,args.chr,timewait),
#                           name="pb_reader")
            
#         pb.daemon = True
#         pb.start()
#         ex_procs.append(pb)
#     p_w = mp.Process(target=_write_featurestr, args=(args.write_path, output_Q,timewait),
#                      name="writer")
#     p_w.daemon = True
#     p_w.start()
#     pd.join()
#     feature_Q.put("kill")
#     for pb in ex_procs:
#         pb.join()
#     output_Q.put("kill")
#     p_w.join()

# def parse_args():
#     parser = argparse.ArgumentParser("")
#     parser.add_argument("--tsv",type=str,required=True)
#     parser.add_argument("--write_path",type=str,required=True)
#     parser.add_argument("--bam", type=str,required=True)
#     parser.add_argument("--chr", type=str,required=False)
#     parser.add_argument("--nproc", "-p", type=int,required=True)
#     parser.add_argument("--timewait", "-t", default=0.001, type=float,required=False)
#     return parser.parse_args()

# def main():
#     args=parse_args()
#     extract(args)

# if __name__ == '__main__':
#     sys.exit(main())
            
# import pysam
# import argparse
# import time
# import os
# import gzip
# import sys
# from deepsignal3.utils import bam_reader
# import traceback

# def _write_featurestr(write_fp, feature_list, control=False):
#     output = 0
#     with open(write_fp, 'w') as wf:
#         for features_str in feature_list:
#             for one_features_str in features_str:
#                 output += 1
#                 wf.write(one_features_str + "\n")
#                 if control and output >= 4000:
#                     sys.exit(0)
#             wf.flush()

# def process_rockfish(features_file, reads_per_chunk=2):
#     feature_list = []
#     if features_file.endswith(".gz"):
#         infile = gzip.open(features_file, 'rt')
#     else:
#         infile = open(features_file, 'r')

#     pred_rockfish = {}
#     fea_list = []
#     pre_read_id = ''
#     read_id = ''
#     next(infile)

#     for line in infile:
#         words = line.strip().split("\t")
#         read_id = words[0]
#         if pre_read_id == '':
#             pre_read_id = read_id
#         elif pre_read_id != read_id:
#             fea_list.append((pre_read_id, pred_rockfish))
#             pre_read_id = read_id
#             pred_rockfish = {}
#             if len(fea_list) >= reads_per_chunk:
#                 feature_list.append(fea_list)
#                 fea_list = []
#         pred_rockfish[int(words[1])] = float(words[2])

#     if pre_read_id != read_id:
#         fea_list.append((pre_read_id, pred_rockfish))
#     if len(fea_list) > 0:
#         feature_list.append(fea_list)
#     infile.close()

#     return feature_list

# def process_bam(bam_index, feature_list, d_batch_size=2):
#     output_list = []
#     for read_data in feature_list:
#         fea_list = []
#         for (read_name, pred_rockfish) in read_data:
#             try:
#                 for bam_read in bam_index.get_alignments(read_name):
#                     if not bam_read.is_mapped or bam_read.is_supplementary or bam_read.is_secondary:
#                         continue

#                     reference_name = bam_read.reference_name if bam_read.reference_name else "."
#                     seq = bam_read.get_forward_sequence()
#                     ref_loc = bam_read.get_reference_positions(full_length=True)

#                     for pos in pred_rockfish.keys():
#                         if pos in ref_loc:
#                             rloc = ref_loc[pos] if bam_read.is_forward else ref_loc[len(seq) - pos - 1]
#                             if rloc is None:
#                                 continue
#                             fea_str = '\t'.join([read_name, reference_name, str(rloc), str(pred_rockfish[pos])])
#                             fea_list.append(fea_str)

#                 if len(fea_list) >= d_batch_size:
#                     output_list.append(fea_list)
#                     fea_list = []
#             except Exception as e:
#                 #traceback.print_exc()
#                 print("process_bam err of readid: {}".format(read_name))
#                 continue
#         if len(fea_list) > 0:
#             output_list.append(fea_list)

#     return output_list

# def extract(args):
#     bam_index = bam_reader.ReadIndexedBam(args.bam)
    
#     # Sequential execution
#     feature_list = process_rockfish(args.tsv)
#     output_list = process_bam(bam_index, feature_list)
    
#     _write_featurestr(args.write_path, output_list)

# def parse_args():
#     parser = argparse.ArgumentParser("")
#     parser.add_argument("--tsv", type=str, required=True)
#     parser.add_argument("--write_path", type=str, required=True)
#     parser.add_argument("--bam", type=str, required=True)
#     parser.add_argument("--nproc", "-p", type=int, required=True)
#     parser.add_argument("--timewait", "-t", default=0.01, type=float, required=False)
#     return parser.parse_args()

# def main():
#     args = parse_args()
#     extract(args)

# if __name__ == '__main__':
#     sys.exit(main())

import pysam
import argparse
import time
import os
import gzip
import sys
from deepsignal3.utils import bam_reader
import traceback

def _write_featurestr(write_fp, feature_str, control=False):
    with open(write_fp, 'a') as wf:
        wf.write(feature_str + "\n")
        wf.flush()

def process_rockfish_line(line):
    words = line.strip().split("\t")
    read_id = words[0]
    pos = int(words[1])
    value = float(words[2])
    return read_id, pos, value

def process_bam_read(bam_read, pred_rockfish):
    fea_list = []
    if not bam_read.is_mapped or bam_read.is_supplementary or bam_read.is_secondary:
        return fea_list

    reference_name = bam_read.reference_name if bam_read.reference_name else "."
    seq = bam_read.get_forward_sequence()
    ref_loc = bam_read.get_reference_positions(full_length=True)

    for pos in pred_rockfish.keys():
        if pos in ref_loc:
            rloc = ref_loc[pos] if bam_read.is_forward else ref_loc[len(seq) - pos - 1]
            if rloc is None:
                continue
            fea_str = '\t'.join([bam_read.query_name, reference_name, str(rloc), str(pred_rockfish[pos])])
            fea_list.append(fea_str)
    return fea_list

def extract(args):
    if args.tsv.endswith(".gz"):
        infile = gzip.open(args.tsv, 'rt')
    else:
        infile = open(args.tsv, 'r')

    bam_index = bam_reader.ReadIndexedBam(args.bam)
    output_fp = args.write_path

    if os.path.exists(output_fp):
        os.remove(output_fp)

    pred_rockfish = {}
    pre_read_id = ''
    next(infile)

    for line in infile:
        read_id, pos, value = process_rockfish_line(line)

        if pre_read_id and pre_read_id != read_id:
            # Process the previous read
            try:
                for bam_read in bam_index.get_alignments(pre_read_id):
                    fea_list = process_bam_read(bam_read, pred_rockfish)
                    for fea_str in fea_list:
                        _write_featurestr(output_fp, fea_str)
            except KeyError as e:
                #traceback.print_exc()
                print("process_bam keyerr of readid: {}".format(pre_read_id))
                sys.stdout.flush()
                continue
            except OSError as e:
                print("process_bam oserr of readid: {}".format(pre_read_id))
                sys.stdout.flush()
                continue
            except UnboundLocalError as e:
                print("process_bam UnboundLocalError of readid: {}".format(pre_read_id))
                sys.stdout.flush()
                continue

            pred_rockfish = {}
            pre_read_id = read_id

        pred_rockfish[pos] = value
        pre_read_id = read_id

    # Process the last read
    if pre_read_id:
        for bam_read in bam_index.get_alignments(pre_read_id):
            fea_list = process_bam_read(bam_read, pred_rockfish)
            for fea_str in fea_list:
                _write_featurestr(output_fp, fea_str)

    infile.close()

def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--tsv", type=str, required=True)
    parser.add_argument("--write_path", type=str, required=True)
    parser.add_argument("--bam", type=str, required=True)
    parser.add_argument("--nproc", "-p", type=int, required=False)
    parser.add_argument("--timewait", "-t", default=0.01, type=float, required=False)
    parser.add_argument("--chr", type=str,required=False)
    return parser.parse_args()

def main():
    args = parse_args()
    extract(args)

if __name__ == '__main__':
    sys.exit(main())

# import pysam
# import argparse
# import time
# import os
# import gzip
# from multiprocessing import Manager, Process, Queue
# import sys
# from deepsignal3.utils import bam_reader
# import traceback

# def _write_featurestr(write_fp, output_Q, control=False, time_wait=0.01):
#     output = 0
#     with open(write_fp, 'w') as wf:
#         while True:
#             try:
#                 features_str = output_Q.get(timeout=time_wait)
#                 if features_str == "kill":
#                     break
#                 for one_features_str in features_str:
#                     output += 1
#                     wf.write(one_features_str + "\n")
#                     if control and output >= 4000:
#                         sys.exit(0)
#                 wf.flush()
#             except:
#                 continue

# def process_rockfish(features_file, feature_Q, reads_per_chunk=2, time_wait=0.01, qsize_limit=40):
#     if features_file.endswith(".gz"):
#         infile = gzip.open(features_file, 'rt')
#     else:
#         infile = open(features_file, 'r')

#     pred_rockfish = {}
#     fea_list = []
#     pre_read_id = ''
#     read_id = ''
#     next(infile)

#     for line in infile:
#         words = line.strip().split("\t")
#         read_id = words[0]
#         if pre_read_id == '':
#             pre_read_id = read_id
#         elif pre_read_id != read_id:
#             fea_list.append((pre_read_id, pred_rockfish))
#             pre_read_id = read_id
#             pred_rockfish = {}
#             if len(fea_list) >= reads_per_chunk:
#                 while feature_Q.qsize() >= qsize_limit:
#                     time.sleep(time_wait)
#                 feature_Q.put(fea_list)
#                 fea_list = []
#         pred_rockfish[int(words[1])] = float(words[2])

#     if pre_read_id != read_id:
#         fea_list.append((pre_read_id, pred_rockfish))
#     if len(fea_list) > 0:
#         feature_Q.put(fea_list)
#     infile.close()

# def process_bam(bam_index, feature_Q, output_Q,chr, d_batch_size=2, time_wait=0.01, qsize_limit=40):
#     fea_list = []
#     while True:
#         try:
#             read_data = feature_Q.get(timeout=time_wait)
#             if read_data == "kill":
#                 break
#             for (read_name, pred_rockfish) in read_data:
#                 try:
#                     for bam_read in bam_index.get_alignments(read_name):
#                         if not bam_read.is_mapped or bam_read.is_supplementary or bam_read.is_secondary:
#                             continue
#                         reference_name = bam_read.reference_name if bam_read.reference_name else "."
#                         if reference_name != chr:
#                             continue
#                         seq = bam_read.get_forward_sequence()
#                         if seq is None:
#                             continue
#                         ref_loc = bam_read.get_reference_positions(full_length=True)

#                         for pos in pred_rockfish.keys():
#                             if pos in ref_loc:
#                                 rloc = ref_loc[pos] if bam_read.is_forward else ref_loc[len(seq) - pos - 1]
#                                 if rloc is None:
#                                     continue
#                                 fea_str = '\t'.join([read_name, reference_name, str(rloc), str(pred_rockfish[pos])])
#                                 fea_list.append(fea_str)
#                     if len(fea_list) >= d_batch_size:
#                         output_Q.put(fea_list)
#                         fea_list = []
#                 except Exception as e:
#                     traceback.print_exc()
#                     continue
#         except:
#             continue

#     if len(fea_list) > 0:
#         output_Q.put(fea_list)

# def extract(args):
#     manager = Manager()
#     feature_Q = manager.Queue(maxsize=1000)  # Increase queue size for better throughput
#     output_Q = manager.Queue(maxsize=1000)

#     timewait = args.timewait
#     bam_index = bam_reader.ReadIndexedBam(args.bam)

#     pd = Process(target=process_rockfish, args=(args.tsv, feature_Q, 2, timewait, 1000), name="fea_reader")
#     pd.start()

#     ex_procs = []
#     for _ in range(args.nproc - 2):
#         pb = Process(target=process_bam, args=(bam_index, feature_Q, output_Q,args.chr, 2, timewait, 1000), name="pb_reader")
#         pb.start()
#         ex_procs.append(pb)

#     p_w = Process(target=_write_featurestr, args=(args.write_path, output_Q, False, timewait), name="writer")
#     p_w.start()

#     pd.join()
#     feature_Q.put("kill")
#     for pb in ex_procs:
#         pb.join()

#     output_Q.put("kill")
#     p_w.join()

# def parse_args():
#     parser = argparse.ArgumentParser("")
#     parser.add_argument("--tsv", type=str, required=True)
#     parser.add_argument("--write_path", type=str, required=True)
#     parser.add_argument("--bam", type=str, required=True)
#     parser.add_argument("--nproc", "-p", type=int, required=True)
#     parser.add_argument("--timewait", "-t", default=0.01, type=float, required=False)
#     parser.add_argument("--chr", type=str, required=False)
#     return parser.parse_args()

# def main():
#     args = parse_args()
#     extract(args)

# if __name__ == '__main__':
#     sys.exit(main())
