import os
import argparse
import pysam
import re
import math
import sys
import time
import tabix
import multiprocessing as mp
from multiprocessing import Queue
from collections import defaultdict
import subprocess
import gzip

from generate_per_read_modscall import _generate_sorted_per_read_calls
import traceback
import shutil
from itertools import groupby

queue_size_border = 1000
time_wait = 1

basepairs = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'N': 'N',
             'W': 'W', 'S': 'S', 'M': 'K', 'K': 'M', 'R': 'Y',
             'Y': 'R', 'B': 'V', 'V': 'B', 'D': 'H', 'H': 'D',
             'Z': 'Z'}
basepairs_rna = {'A': 'U', 'C': 'G', 'G': 'C', 'U': 'A', 'N': 'N',
                 'W': 'W', 'S': 'S', 'M': 'K', 'K': 'M', 'R': 'Y',
                 'Y': 'R', 'B': 'V', 'V': 'B', 'D': 'H', 'H': 'D',
                 'Z': 'Z'}

def _alphabet(letter, dbasepairs):
    if letter in dbasepairs.keys():
        return dbasepairs[letter]
    return 'N'

def complement_seq(base_seq, seq_type="DNA"):
    rbase_seq = base_seq[::-1]
    comseq = ''
    try:
        if seq_type == "DNA":
            comseq = ''.join([_alphabet(x, basepairs) for x in rbase_seq])
        elif seq_type == "RNA":
            comseq = ''.join([_alphabet(x, basepairs_rna) for x in rbase_seq])
        else:
            raise ValueError("the seq_type must be DNA or RNA")
    except Exception:
        print('something wrong in the dna/rna sequence.')
    return comseq

def open_input_bamfile(bamfile, threads=1):
    if bamfile.endswith(".bam"):
        try:
            ori_bam = pysam.AlignmentFile(bamfile, 'rb', threads=threads)
        except ValueError:
            ori_bam = pysam.AlignmentFile(bamfile, 'rb', check_sq=False, threads=threads)
    else:
        ori_bam = pysam.AlignmentFile(bamfile, 'r', threads=threads)
    return ori_bam

def _get_necessary_alignment_items(readitem):
    seq_name = readitem.query_name
    flag = readitem.flag
    ref_name = readitem.reference_name
    ref_start = readitem.reference_start
    mapq = readitem.mapping_quality
    cigartuples = readitem.cigartuples
    rnext = readitem.next_reference_name
    pnext = readitem.next_reference_start
    tlen = readitem.template_length
    seq_seq = readitem.query_sequence
    seq_qual = readitem.query_qualities
    all_tags = readitem.get_tags(with_value_type=True)
    is_reverse = readitem.is_reverse
    return (seq_name, flag, ref_name, ref_start, mapq, cigartuples,
            rnext, pnext, tlen, seq_seq, seq_qual, all_tags, is_reverse)

def _worker_reader(bamfile, batch_size, rreads_q, threads=1):
    try:
        ori_bam = open_input_bamfile(bamfile, threads=threads)
        cnt_all = 0
        reads_batch = []
        for readitem in ori_bam.fetch(until_eof=True):
            readitem_info = _get_necessary_alignment_items(readitem)
            reads_batch.append(readitem_info)
            cnt_all += 1
            if cnt_all % batch_size == 0:
                rreads_q.put(reads_batch)
                reads_batch = []
                while rreads_q.qsize() > queue_size_border:
                    time.sleep(time_wait)
        ori_bam.close()
        if len(reads_batch) > 0:
            rreads_q.put(reads_batch)
        rreads_q.put("kill")
        sys.stderr.write("read {} reads from input file\n".format(cnt_all))
    except Exception:  # <--- 新增捕获
        sys.stderr.write(f"Process {os.getpid()} crashed!\n")
        traceback.print_exc() # 打印详细报错堆栈
        sys.stderr.flush()    # 强制刷新缓冲区，确保你能在终端看到
        os._exit(1)           # 强制退出整个程序，防止主进程傻等

def _convert_locstr(locstr):
    return [int(x) for x in locstr.split(",")]

def _convert_probstr(probstr):
    return [float(x) for x in probstr.split(",")]

def _fetch_locprobs_of_a_read_from_tabixobj2(readname, tabixobj):
    try:
        rows = tabixobj.query(readname, 0, 5000000)
        row_list = []
        for row in rows:
            row_list.append(row)
        if len(row_list) == 1:
            return _convert_locstr(row_list[0][4]), _convert_probstr(row_list[0][5])
        else:
            locs_0, probs_0 = _convert_locstr(row_list[0][4]), _convert_probstr(row_list[0][5])
            loc_probs = list(zip(locs_0, probs_0))
            locs_set = set(locs_0)
            for ridx in range(1, len(row_list)):
                locs_tmp, probs_tmp = _convert_locstr(row_list[ridx][4]), _convert_probstr(row_list[ridx][5])
                for lidx in range(len(locs_tmp)):
                    if locs_tmp[lidx] not in locs_set:
                        locs_set.add(locs_tmp[lidx])
                        loc_probs.append((locs_tmp[lidx], probs_tmp[lidx]))
            loc_probs = sorted(loc_probs, key=lambda x: x[0])
            loc_probs = list(zip(*loc_probs))
            return loc_probs[0], loc_probs[1]
    except tabix.TabixError:
        sys.stderr.write("TabixError: {}\n".format(readname))
        return None

def query_locs_probs_of_a_read(readname, tabixobj):
    loc_prob = _fetch_locprobs_of_a_read_from_tabixobj2(readname, tabixobj)
    if loc_prob is not None:
        return loc_prob[0], loc_prob[1]
    return None, None

def _convert_locs_to_mmtag(locs, seq_fwseq, base):
    assert len(locs) > 0
    base_alllocs = [i.start() for i in re.finditer(base, seq_fwseq)]
    base_orders = [-1] * len(locs)
    order_idx = 0
    for base_idx in range(0, len(base_alllocs)):
        try:
            if base_alllocs[base_idx] == locs[order_idx]:
                base_orders[order_idx] = base_idx
                order_idx += 1
        except IndexError:
            break
    assert base_orders[-1] != -1
    mm_idxes = [base_orders[0]]
    for i in range(1, len(base_orders)):
        mm_idxes.append(base_orders[i] - 1 - base_orders[i-1])
    return mm_idxes

def _convert_probs_to_mltag(probs):
    return [math.floor(prob * 256) if prob < 1 else 255 for prob in probs]

def _refill_tags(all_tags, mm_values, ml_values, rm_pulse=True, base="C"):
    mod_code = "m" if base == "C" else "a"
    new_tags = []
    for tagtuple in all_tags:
        if tagtuple[0] in {"MM", "ML"}:
            continue
        if rm_pulse and tagtuple[0] in {"fi", "fp", "ri", "rp"}:
            continue
        new_tags.append((tagtuple[0], tagtuple[1]))
    if mm_values is not None:
        mm_tag = f"{base}+{mod_code}," + ",".join(list(map(str, mm_values))) + ";"
        new_tags.append(('MM', mm_tag))
        new_tags.append(('ML', ml_values))
    return new_tags

def _worker_process_reads_batch(rreads_q, wreads_q, tabix_file, rm_pulse=True, base="C"):
    try:
        perread_tbx = tabix.open(tabix_file)
        while True:
            if rreads_q.empty():
                time.sleep(time_wait)
                continue
            reads_batch = rreads_q.get()
            if reads_batch == "kill":
                rreads_q.put("kill")
                break
            wreads_tmp = []
            for rread in reads_batch:
                seq_name, flag, ref_name, ref_start, mapq, cigartuples, rnext, pnext, tlen, \
                    seq_seq, seq_qual, all_tags, is_reverse = rread
                mm_values = ml_values = None
                mm_flag = 0
                if seq_seq is not None:
                    seq_fwdseq = complement_seq(seq_seq) if is_reverse else seq_seq
                    locs, probs = query_locs_probs_of_a_read(seq_name, perread_tbx)
                    if locs is not None:
                        try:
                            mm_values = _convert_locs_to_mmtag(locs, seq_fwdseq, base)
                            ml_values = _convert_probs_to_mltag(probs)
                            mm_flag = 1
                        except AssertionError:
                            sys.stderr.write("AssertionError, processing without MM/ML tags-{}.\n".format(seq_name))
                else:
                    sys.stderr.write("No query sequence, processing without MM/ML tags-{}.\n".format(seq_name))
                new_tags = _refill_tags(all_tags, mm_values, ml_values, rm_pulse, base)
                wreads_tmp.append((seq_name, flag, ref_name, ref_start, mapq, cigartuples, rnext, pnext, tlen,
                                seq_seq, seq_qual, new_tags, mm_flag))
            if len(wreads_tmp) > 0:
                wreads_q.put(wreads_tmp)
                while wreads_q.qsize() > queue_size_border:
                    time.sleep(time_wait)
    except Exception:  # <--- 新增捕获
        sys.stderr.write(f"Process {os.getpid()} crashed!\n")
        traceback.print_exc() # 打印详细报错堆栈
        sys.stderr.flush()    # 强制刷新缓冲区，确保你能在终端看到
        os._exit(1)           # 强制退出整个程序，防止主进程傻等

def write_alignedsegment(readitem_info, output_bam):
    seq_name, flag, ref_name, ref_start, mapq, cigartuples, \
        rnext, pnext, tlen, seq_seq, seq_qual, all_tags, mm_flag = readitem_info
    out_read = pysam.AlignedSegment(output_bam.header)
    out_read.query_name = seq_name
    out_read.flag = flag
    out_read.reference_name = ref_name
    out_read.reference_start = ref_start
    out_read.mapping_quality = mapq
    out_read.cigar = cigartuples
    out_read.next_reference_name = rnext
    out_read.next_reference_start = pnext
    out_read.template_length = tlen
    out_read.query_sequence = seq_seq
    out_read.query_qualities = seq_qual
    if len(all_tags) >= 1:
        out_read.set_tags(all_tags)
    output_bam.write(out_read)

def _worker_write_modbam(wreads_q, modbamfile, inputbamfile, threads=1):
    try:
        ori_bam = open_input_bamfile(inputbamfile)
        w_bam = pysam.AlignmentFile(modbamfile, "wb", template=ori_bam, threads=threads)
        ori_bam.close()
        cnt_w, cnt_mm = 0, 0
        while True:
            if wreads_q.empty():
                time.sleep(time_wait)
                continue
            wreads_batch = wreads_q.get()
            if wreads_batch == "kill":
                w_bam.close()
                sys.stderr.write("write {} reads, in which {} were added mm tags\n".format(cnt_w, cnt_mm))
                break
            for walignseg in wreads_batch:
                mm_flag = walignseg[-1]
                write_alignedsegment(walignseg, w_bam)
                cnt_w += 1
                cnt_mm += mm_flag
    except Exception:  # <--- 新增捕获
        sys.stderr.write(f"Process {os.getpid()} crashed!\n")
        traceback.print_exc() # 打印详细报错堆栈
        sys.stderr.flush()    # 强制刷新缓冲区，确保你能在终端看到
        os._exit(1)           # 强制退出整个程序，防止主进程傻等

def parse_per_read_modcalls(per_readsite):
    """
    解析 per-read 修改调用文件，支持旧格式（10列）和新格式（11列，跨两行），以及 .gz 压缩文件。
    返回按读取名称分组的 (position, probability) 列表字典。
    """
    data = defaultdict(list)
    opener = gzip.open if per_readsite.endswith('.gz') else open
    mode = 'rt' if per_readsite.endswith('.gz') else 'r'
    with opener(per_readsite, mode) as f:
        current_line = []
        for line in f:
            fields = line.strip().split()
            if not current_line:
                current_line = fields
            else:
                current_line.extend(fields)
            if len(current_line) >= 10:
                read_name = current_line[4]
                position = int(current_line[5])
                prob_idx = 6 if current_line[6].replace('.', '', 1).isdigit() else 7
                probability = float(current_line[prob_idx])
                data[read_name].append((position, probability))
                current_line = []
    for read_name in data:
        data[read_name].sort(key=lambda x: x[0])
    return data

def _raw_line_parser(per_readsite):
    """
    生成器：流式读取原始文件，解析出 (read_name, pos, prob)
    """
    opener = gzip.open if per_readsite.endswith('.gz') else open
    mode = 'rt' if per_readsite.endswith('.gz') else 'r'
    
    with opener(per_readsite, mode) as f:
        current_line = []
        for line in f:
            fields = line.strip().split()
            if not current_line:
                current_line = fields
            else:
                current_line.extend(fields)
            
            # 保持你原有的解析逻辑
            if len(current_line) >= 10:
                read_name = current_line[4]
                try:
                    position = int(current_line[5])
                    # 自动判断概率在第6列还是第7列
                    prob_idx = 6 if current_line[6].replace('.', '', 1).isdigit() else 7
                    probability = float(current_line[prob_idx])
                    yield read_name, position, probability
                except (ValueError, IndexError):
                    pass # 跳过格式错误的行
                
                current_line = []

def generate_per_read_modcalls_tabix(per_readsite, per_read_file):
    """
    高性能版本：
    1. 使用 awk + sort 管道直接处理，跳过 Python 解析（提速10倍）。
    2. 强制指定 sort 的临时目录为大容量存储（解决 No space error）。
    """
    sys.stderr.write(f"High-Performance Stream processing {per_readsite}...\n")
    
    # 1. 确定大容量临时目录
    # 优先使用输出文件所在的目录作为临时目录，因为那里肯定空间够
    big_disk_dir = os.path.dirname(os.path.abspath(per_read_file))
    
    # 2. 强制设置环境变量，双重保险防止 sort 写到 /tmp
    os.environ['TMPDIR'] = big_disk_dir
    if not os.path.exists(big_disk_dir):
        os.makedirs(big_disk_dir)

    temp_sorted = per_read_file + ".sorted.tmp"

    # 3. 构建 Shell 命令流水线
    # 逻辑：解压(可选) -> awk提取列 -> sort排序(指定临时盘+多核) -> 写入文件
    
    # A. 确定读取命令 (支持 .gz 或普通文本)
    if per_readsite.endswith('.gz'):
        cat_cmd = f"gunzip -c {per_readsite}"
    else:
        cat_cmd = f"cat {per_readsite}"

    # B. 构造 awk 命令替代 Python 解析
    # 对应原逻辑：read_name=col[4], pos=col[5], prob=col[6]或col[7]
    # awk 是 1-based 索引，所以 python 的 [4] 是 awk 的 $5
    # 逻辑解释：如果第7列($7)看起来像数字，就用$7，否则用$8
    awk_cmd = "awk '{if($7 ~ /^[0-9.]+$/) print $5\"\t\"$6\"\t\"$7; else print $5\"\t\"$6\"\t\"$8}'"

    # C. 构造 sort 命令
    # -T {big_disk_dir}: 关键！强制使用大硬盘存临时数据
    # --parallel=8: 使用8个CPU核心加速排序
    # -S 20%: 使用20%的物理内存作为缓冲区
    sort_cmd = f"sort -T {big_disk_dir} --parallel=8 -S 20% -k1,1 -k2,2n"

    # 组合完整命令
    full_cmd = f"{cat_cmd} | {awk_cmd} | {sort_cmd} > {temp_sorted}"

    sys.stderr.write(f"Executing pipeline (Temp dir: {big_disk_dir})...\n")
    # 打印命令方便调试
    # print(full_cmd) 
    
    try:
        subprocess.run(full_cmd, shell=True, check=True)
    except subprocess.CalledProcessError as e:
        sys.stderr.write(f"Pipeline failed: {e}\n")
        raise e

    # 4. 聚合步骤 (Python 处理聚合比 Shell 方便且这步数据量已经很规整了)
    sys.stderr.write("Aggregating sorted data...\n")
    
    with open(temp_sorted, 'r') as f_in, open(per_read_file, 'w') as f_out:
        # 简单的行生成器
        def line_reader():
            for line in f_in:
                # 此时文件已经是 clean 的：readname \t pos \t prob
                yield line.strip().split('\t')

        # 使用 groupby 聚合
        for read_name, group in groupby(line_reader(), key=lambda x: x[0]):
            locs = []
            probs = []
            for _, pos, prob in group:
                locs.append(pos)
                probs.append(prob)
            
            if locs: # 确保非空
                loc_string = ",".join(locs)
                prob_string = ",".join(probs)
                f_out.write(f"{read_name}\t1\t1\t.\t{loc_string}\t{prob_string}\n")

    # 5. 清理和建立索引
    if os.path.exists(temp_sorted):
        os.remove(temp_sorted)

    sys.stderr.write("Indexing...\n")
    subprocess.run(["bgzip", "-f", per_read_file], check=True)
    subprocess.run(["tabix", "-f", "-p", "bed", per_read_file + ".gz"], check=True)
    
    return per_read_file + ".gz"

def add_mm_ml_tags_to_bam(bamfile, per_readsite, modbamfile,
                          rm_pulse=True, threads=3,
                          reads_batch=100, mode="align", inter_dir=None,
                          base="C"):
    sys.stderr.write("[generate_modbam_file]starts\n")
    start = time.time()

    if inter_dir:
        os.makedirs(inter_dir, exist_ok=True)

    sys.stderr.write("generating per_read mod_calls..\n")
    per_read_filename = "per_read_mod_calls.per_read.bed"
    per_read_file = os.path.join(inter_dir, per_read_filename) if inter_dir else per_read_filename
    per_read_file = generate_per_read_modcalls_tabix(per_readsite, per_read_file)

    sys.stderr.write("add per_read mod_calls to bam file..\n")
    rreads_q = Queue()
    wreads_q = Queue()

    nproc = threads
    if nproc < 5:
        nproc = 5
    if nproc > 8:
        threads_r, threads_w = 4, 4
    elif nproc > 6:
        threads_r, threads_w = 3, 3
    else:
        threads_r, threads_w = 2, 2

    p_read = mp.Process(target=_worker_reader,
                        args=(bamfile, reads_batch, rreads_q, threads_r))
    p_read.daemon = True
    p_read.start()

    ps_gen = []
    for _ in range(nproc - threads_r - threads_w):
        p_gen = mp.Process(target=_worker_process_reads_batch,
                           args=(rreads_q, wreads_q, per_read_file, rm_pulse, base))
        p_gen.daemon = True
        p_gen.start()
        ps_gen.append(p_gen)

    fname, fext = os.path.splitext(bamfile)
    if modbamfile is None:
        modbamfile = fname + ".modbam.bam"
    p_w = mp.Process(target=_worker_write_modbam,
                     args=(wreads_q, modbamfile, bamfile, threads_w))
    p_w.daemon = True
    p_w.start()

    for p in ps_gen:
        p.join()
    p_read.join()
    wreads_q.put("kill")
    p_w.join()

    if modbamfile.endswith(".bam") and mode == "align":
        sys.stderr.write("sorting and indexing new bam file..\n")
        modbam_sorted = os.path.join(inter_dir, os.path.basename(modbamfile) + ".sorted.bam") if inter_dir else modbamfile + ".sorted.bam"
        pysam.sort("-o", modbam_sorted, "-@", str(threads), modbamfile)
        os.rename(modbam_sorted, modbamfile)
        pysam.index("-@", str(threads), modbamfile)

    if os.path.exists(per_read_file):
        os.remove(per_read_file)
    if os.path.exists(per_read_file + ".tbi"):
        os.remove(per_read_file + ".tbi")

    endtime = time.time()
    sys.stderr.write("[generate_modbam_file]costs {:.1f} seconds\n".format(endtime - start))

def main():
    parser = argparse.ArgumentParser("add MM/ML tags to bam/sam for methylation analysis")
    parser.add_argument("--per_readsite", type=str, required=True, help="Per-read modification calls from call_mods module (can be .gz compressed)")
    parser.add_argument("--bam", type=str, required=True, help="Input BAM file")
    parser.add_argument("--base", type=str, choices=["C", "A"], default="C", help="Base to analyze (C for 5mC, A for 6mA), default: C")
    parser.add_argument("--mode", type=str, default="align", choices=["denovo", "align"],
                        help="Mode: denovo for unaligned hifi.bam, align for aligned hifi.bam, default: align")
    parser.add_argument("--modbam", type=str, required=False, help="Output modbam file")
    parser.add_argument("--rm_pulse", action="store_true", default=False,
                        help="Remove IPD/PW tags in the BAM file")
    parser.add_argument("--threads", "-p", type=int, default=10,
                        help="Number of threads to use, default: 10")
    parser.add_argument("--batch_size", type=int, default=100,
                        help="Batch size of reads to process at once, default: 100")
    parser.add_argument("--inter_dir", type=str, required=False,
                        help="Directory to store intermediate files")

    args = parser.parse_args()

    add_mm_ml_tags_to_bam(args.bam, args.per_readsite, args.modbam,
                          args.rm_pulse, args.threads, args.batch_size,
                          args.mode, args.inter_dir, args.base)

if __name__ == '__main__':
    main()