
#from datasets import IterableDataset
import pod5
import numpy as np
import sys
import argparse
import math
import time
import os

import torch
from torch.utils.data import DataLoader
from torch.utils.data import IterableDataset
from torch.multiprocessing import Queue
import torch.multiprocessing as mp

from deepsignal3.utils import bam_reader
from deepsignal3.utils.process_utils import str2bool
from deepsignal3.utils.process_utils import complement_seq
from deepsignal3.utils.process_utils import fill_files_queue
from deepsignal3.utils.process_utils import get_motif_seqs
from deepsignal3.utils.process_utils import get_files
from deepsignal3.utils.process_utils import get_refloc_of_methysite_in_motif
from deepsignal3.extract_features_pod5 import _group_signals_by_movetable_v2

time_wait = 0.01

def read_bed(bisulfite_bed, strict=False):
    key_sep = "||"
    depth_info = {}
    value_info = {}

    with open(bisulfite_bed, "r") as rf:
        for line in rf:
            words = line.strip().split()
            chrom = words[0]
            pos = int(words[1])
            strand = words[5]
            depth = int(words[9])
            value = float(words[10])

            if depth < 5:
                continue

            # 使用染色体、位置和链方向来生成唯一的key
            m_key = key_sep.join([chrom, str(pos), strand])
            depth_info[m_key] = depth
            value_info[m_key] = value

    freqinfo = {}

    if strict:
        # 严格模式下，检查正负链是否匹配，并筛选满足新条件的点
        for m_key in value_info:
            chrom, pos, strand = m_key.split(key_sep)
            pos = int(pos)
            if strand == '+':
                pair_key = key_sep.join([chrom, str(pos + 1), '-'])
            else:
                pair_key = key_sep.join([chrom, str(pos - 1), '+'])
            
            # 检查匹配的key是否存在
            if pair_key in value_info:
                # 取得正负链的值
                value_1 = value_info[m_key]
                value_2 = value_info[pair_key]
                
                # 筛选条件：正负链要么都大于等于95，要么都小于等于5
                if value_1 >= 95 and value_2 >= 95:
                    # 将key转换为不包含正负链的形式
                    freq_key = key_sep.join([chrom, str(pos)])
                    freqinfo[freq_key] = 100
                elif value_1 <= 5 and value_2 <= 5:
                    freq_key = key_sep.join([chrom, str(pos)])
                    freqinfo[freq_key] = 0
    else:
        # 非严格模式下，直接记录所有满足深度要求的点
        for m_key in value_info:
            chrom, pos, strand = m_key.split(key_sep)
            pos = int(pos)
            freq_key = key_sep.join([chrom, str(pos)])
            freqinfo[freq_key] = value_info[m_key]
    print("len(freqinfo): ", len(freqinfo))

    return freqinfo
def get_q2tloc_from_cigar(r_cigar_tuple, strand, seq_len):
    """
    insertion: -1, deletion: -2, mismatch: -3
    :param r_cigar_tuple: pysam.alignmentSegment.cigartuples
    :param strand: 1/-1 for fwd/rev
    :param seq_len: read alignment length
    :return: query pos to ref pos
    """
    fill_invalid = -2
    # get each base calls genomic position
    q_to_r_poss = np.full(seq_len + 1, fill_invalid, dtype=np.int32)
    # process cigar ops in read direction
    curr_r_pos, curr_q_pos = 0, 0
    cigar_ops = r_cigar_tuple if strand == 1 else r_cigar_tuple[::-1]
    for op, op_len in cigar_ops:
        if op == 1:
            # inserted bases into ref
            for q_pos in range(curr_q_pos, curr_q_pos + op_len):
                q_to_r_poss[q_pos] = -1
            curr_q_pos += op_len
        elif op in (2, 3):
            # deleted ref bases
            curr_r_pos += op_len
        elif op in (0, 7, 8):
            # aligned bases
            for op_offset in range(op_len):
                q_to_r_poss[curr_q_pos + op_offset] = curr_r_pos + op_offset
            curr_q_pos += op_len
            curr_r_pos += op_len
        elif op == 6:
            # padding (shouldn't happen in mappy)
            pass
    q_to_r_poss[curr_q_pos] = curr_r_pos
    if q_to_r_poss[-1] == fill_invalid:
        raise ValueError(
            (
                "Invalid cigar string encountered. Reference length: {}  Cigar "
                + "implied reference length: {}"
            ).format(seq_len, curr_r_pos)
        )
    return q_to_r_poss
def align_signals_and_extend_ref_seq(pos_pair, read_signal, read_seq, ref_seq,motif_seqs,methyloc,strand,ref_start,ref_end,boundary=30):
    # 确定最前端和最末端有效的比对索引
    first_valid_index = next((i for i, (_, ref_pos) in enumerate(pos_pair) if ref_pos is not None), len(pos_pair))
    last_valid_index = len(pos_pair) - 1 - next((i for i, (_, ref_pos) in enumerate(reversed(pos_pair)) if ref_pos is not None), len(pos_pair))

    # 生成新的 ref_seq 和 ref_signal
    new_ref_seq = []
    new_ref_signal = []

    # 填充前端未比对的部分
    for i in range(first_valid_index):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])

    last_valid_ref_pos = len(new_ref_seq) - 1

    # 处理中间的比对部分
    for i in range(first_valid_index, last_valid_index + 1):
        read_pos, ref_pos = pos_pair[i]

        if ref_pos is not None:
            # 确保 new_ref_seq 的长度足够
            # while len(new_ref_seq) < ref_pos:
            #     new_ref_seq.append(None)  # 用 None 占位，表示插入的部分
            #     new_ref_signal.append([]) # 对应的信号也填充为空列表
            
            new_ref_seq.append(ref_seq[ref_pos])
            new_ref_signal.append(read_signal[read_pos] if read_pos is not None else [])
            last_valid_ref_pos = len(new_ref_seq) - 1

        elif ref_pos is None:
            if last_valid_ref_pos is not None:
                new_ref_signal[last_valid_ref_pos].extend(read_signal[read_pos])

    # 填充后端未比对的部分
    for i in range(last_valid_index + 1, len(pos_pair)):
        read_pos, _ = pos_pair[i]
        if read_pos is not None:
            new_ref_seq.append(read_seq[read_pos])
            new_ref_signal.append(read_signal[read_pos])
            

    new_ref_seq = ''.join(base  for base in new_ref_seq)
    ref_readlocs = dict()
    ref_poss = []
    pred_pos = []
    ref_pos = -1
    tsite_locs = get_refloc_of_methysite_in_motif(
        new_ref_seq, set(motif_seqs), methyloc)
    for loc_in_read in tsite_locs:
        if loc_in_read<boundary or len(new_ref_seq)-loc_in_read<boundary:
            continue
        if loc_in_read<first_valid_index:
            # ref_pos = -1
            # ref_poss.append(ref_pos)
            # pred_pos.append(loc_in_read)
            continue
        if loc_in_read>last_valid_index:
            # ref_pos = -1
            # ref_poss.append(ref_pos)
            # pred_pos.append(loc_in_read)
            continue
        if strand == "-":
            ref_pos = ref_end-loc_in_read-1+first_valid_index
        else:
            ref_pos = ref_start+loc_in_read-first_valid_index
        #ref_readlocs[loc_in_read+first_valid_index] = ref_pos
        ref_poss.append(ref_pos)
        pred_pos.append(loc_in_read)
    #new_ref_seq, pred_pos,n_positions=filter_n_and_update_indices(new_ref_seq, pred_pos)
    ref_readlocs = dict(zip(pred_pos, ref_poss))
    #new_ref_signal = [new_ref_signal[i] for i in range(len(new_ref_signal)) if i not in n_positions]
    #n_lens=len(n_positions)
    return new_ref_seq, new_ref_signal, ref_readlocs, ref_poss, pred_pos#, n_lens
class largeModalDataset(IterableDataset):
    def __init__(self, pod5_dr, bam_index, motif_seqs, bisulfite, args):
        super(largeModalDataset).__init__()
        self.files = pod5_dr
        self.bam_index = bam_index
        self.motif_seqs = motif_seqs
        self.args = args
        self.bisulfite = bisulfite

    def __iter__(self):
        # 填充pod5数据队列
        pod5s_q = []
        fill_files_queue(pod5s_q, self.files)
        for files in pod5s_q:
            for file in files:  # Iterate over files
                with pod5.Reader(file) as reader:
                # Using list to collect data instead of yielding a generator
                    for read_record in reader.reads():
                        data_list = self.process_sig_seq(read_record)
                        for item in data_list:
                            if len(item)==0:
                                print("empty item",flush=True)
                                continue
                            #print(item,flush=True)
                            yield item
    def process_sig_seq(self,
        read_record,
    ):       
        #results = []  # Collect results in a list                 
        read_name = str(read_record.read_id)
        signal = read_record.signal
        shift_dacs_to_pa = read_record.calibration.offset
        scale_dacs_to_pa = read_record.calibration.scale
        if signal is None:
            return []
        try:
            for seq_read in self.bam_index.get_alignments(read_name):
                if self.args.chr is not None and seq_read.reference_name != self.args.chr:
                    continue
                seq = seq_read.get_forward_sequence()
                if seq is None:
                    continue
                data = (signal,shift_dacs_to_pa,scale_dacs_to_pa, seq_read)
                feature_lists = self.process_data(
                    data,
                )
                # print('{} has {} features'.format(read_name, len(feature_lists)),flush=True)
                return feature_lists # Append to list instead of yielding
        except KeyError:
            return []
        #print("extract_features process-{} finished".format(os.getpid()))
        return []  # Return list of results instead of yielding


    def process_data(self,
        data,
    ):
        features_list = []
        
        signal,shift_dacs_to_pa,scale_dacs_to_pa, seq_read = data
        if seq_read.mapping_quality<self.args.mapq:
            print("mapq < {} ".format(self.args.mapq),flush=True)
            return features_list
        read_dict = dict(seq_read.tags)
        strand_code = 0 if seq_read.is_reverse else 1
        strand = "-" if strand_code == 0 else "+"
        mv_table = np.asarray(read_dict["mv"][1:])
        stride = int(read_dict["mv"][0])
        num_trimmed = read_dict["ts"]
        # norm_shift = read_dict["sm"]
        # norm_scale = read_dict["sd"]
        signal_trimmed = signal[num_trimmed:] if num_trimmed >= 0 else signal[:num_trimmed]
        shift_pa_to_norm = read_dict["sm"]
        scale_pa_to_norm = read_dict["sd"]
        shift = (shift_pa_to_norm / scale_dacs_to_pa) - shift_dacs_to_pa
        scale = scale_pa_to_norm / scale_dacs_to_pa
        norm_signals = (signal_trimmed - shift) / scale
        # sshift, sscale = np.mean(signal_trimmed), float(np.std(signal_trimmed))
        # if sscale == 0.0:
        #    norm_signals = signal_trimmed
        # else:
        #    norm_signals = (signal_trimmed - sshift) / sscale
        read_name= seq_read.query_name
        reference_name = seq_read.reference_name
        seq = seq_read.get_forward_sequence()
        ref_start = seq_read.reference_start
        ref_end = seq_read.reference_end
        cigar_tuples = seq_read.cigartuples
        signal_group = _group_signals_by_movetable_v2(norm_signals, mv_table, stride)
        if self.args.ref:
            if not seq_read.has_tag('MD'):
                print('not have MD of readid {}'.format(read_name),flush=True)
                return features_list
            if seq_read.is_reverse:
                ref_seq = complement_seq(seq_read.get_reference_sequence().upper())
            else:
                ref_seq = seq_read.get_reference_sequence().upper()

            pos_pair=[]
            for read_pos,ref_pos in seq_read.get_aligned_pairs():
                # if ref_pos is None:
                #     continue
                if read_pos is None:
                    if seq_read.is_reverse:
                        pos_pair.append((None,ref_end-ref_pos-1))
                    else:
                        pos_pair.append((None,ref_pos-ref_start))
                    continue
                if ref_pos is None:
                    if seq_read.is_reverse:
                        pos_pair.append((len(seq)-read_pos-1,None))
                    else:
                        pos_pair.append((read_pos,None))
                    continue
                if seq_read.is_reverse:
                    pos_pair.append((len(seq)-read_pos-1,ref_end-ref_pos-1))
                else:
                    pos_pair.append((read_pos,ref_pos-ref_start))
            if strand == "-":
                pos_pair.reverse()
            seq,signal_group, ref_readlocs, ref_poss, pred_pos=align_signals_and_extend_ref_seq(pos_pair, signal_group, seq, 
                                                                                                    ref_seq,self.motif_seqs,self.args.mod_loc,
                                                                                                    strand,ref_start,ref_end,self.args.boundary)

        if len(signal_group) != len(seq):
            print('signal to seq error!',flush=True)
            return features_list
        if self.args.ref is False:
            qalign_start = seq_read.query_alignment_start
            qalign_end = seq_read.query_alignment_end
            if seq_read.is_reverse:
                seq_start = len(seq) - qalign_end
                seq_end = len(seq) - qalign_start
            else:
                seq_start = qalign_start
                seq_end = qalign_end
            q_to_r_poss = get_q2tloc_from_cigar(
                cigar_tuples, strand_code, (seq_end - seq_start)
            )
            ref_readlocs = dict()
            ref_poss = []
            pred_pos = []
            tsite_locs = get_refloc_of_methysite_in_motif(
                seq, set(self.motif_seqs), self.args.mod_loc)
            for loc_in_read in tsite_locs:
                if loc_in_read<self.args.boundary or loc_in_read>len(seq)-self.args.boundary:
                    continue
                if seq_start <= loc_in_read < seq_end:
                    offset_idx = loc_in_read - seq_start
                    if q_to_r_poss[offset_idx] != -1:
                        if strand == "-":
                            # pos = '.'#loc_in_read
                            ref_pos = ref_end - 1 - \
                                q_to_r_poss[offset_idx]
                        else:
                            # pos = loc_in_read
                            ref_pos = ref_start + \
                                q_to_r_poss[offset_idx]
                        ref_readlocs[loc_in_read] = ref_pos
                        ref_poss.append(ref_pos)
                        pred_pos.append(loc_in_read)
                    else:
                        continue
                else:
                    continue
            
        if len(ref_poss) == 0:
            print('ref_poss is empty!',flush=True)
            return features_list
        bisulfite_ref = dict()
        # pred_pos=dict()
        pred_pos_final=[]
        if self.bisulfite is not None:                       
            for pos in pred_pos:
                ref_pos = ref_readlocs[pos]
                key_combine = '||'.join([reference_name, str(ref_pos)])
                if key_combine in self.bisulfite:
                    bisulfite_ref[pos] = self.bisulfite[key_combine]
                    pred_pos_final.append(pos)
                else:
                    if self.args.test:
                        pred_pos_final.append(pos)
                    bisulfite_ref[pos] = -1
        for pos in pred_pos_final:
            ref_pos = ref_readlocs[pos]
            features_list.append(_features_to_str([reference_name, ref_pos,strand, bisulfite_ref[pos], read_name, pos,seq[pos-self.args.boundary:pos+self.args.boundary+ 1], signal_group[pos-self.args.boundary:pos+self.args.boundary+ 1]]))
        
        return features_list#to_tensor(features_list,device) if len(features_list) > 0 else []
def worker_init_fn(worker_id: int) -> None:
    worker_info = torch.utils.data.get_worker_info()
    dataset = worker_info.dataset

    total_files = len(dataset.files)
    per_worker = int(math.ceil(total_files / float(worker_info.num_workers)))

    start = worker_id * per_worker
    end = min(start + per_worker, total_files)
    dataset.files = dataset.files[start:end]
def collate_fn_inference(batch):
    return batch
def _features_to_str(feature):
    reference_name, ref_pos,strand, bisulfite, read_name, pos,seq, signal=feature
    signal_group_new = []
    for sig in signal:
        signal_group_new.append(
            np.round(np.array(sig), decimals=6))
    norm_signals_text = ';'.join(
                        [",".join([str(y) for y in x]) for x in signal_group_new])
    fea_str = '\t'.join([reference_name, str(ref_pos),strand, str(bisulfite), read_name, str(pos),seq, norm_signals_text])
    return fea_str
def _write_predstr_to_file(write_fp, predstr_q):
    with open(write_fp, "w") as wf:
        while True:
            # during test, it's ok without the sleep()
            if predstr_q.empty():
                time.sleep(time_wait)
                continue
            pred_str = predstr_q.get()
            if pred_str == "kill":
                break
            for one_pred_str in pred_str:
                wf.write(one_pred_str + "\n")
            wf.flush()
def parse_args():
    parser = argparse.ArgumentParser("")
    parser.add_argument("--write_path", type=str, required=True)
    parser.add_argument(
        "--ref",
        action="store_true",
        default=False,
        required=False,
        help="wetheter use ref sequence  instead of read sequence, default false.",
    )
    parser.add_argument('--strict', action="store_true", default=False, required=False)
    parser.add_argument(
        "--motifs",
        action="store",
        type=str,
        required=False,
        default="CG",
        help="motif seq to be extracted, default: CG. "
        "can be multi motifs splited by comma "
        "(no space allowed in the input str), "
        "or use IUPAC alphabet, "
        "the mod_loc of all motifs must be "
        "the same",
    )
    parser.add_argument(
        "--mod_loc",
        action="store",
        type=int,
        required=False,
        default=0,
        help="0-based location of the targeted base in the motif, default 0",
    )
    parser.add_argument(
        "--nproc",
        '-p',
        type=int,
        required=False,
        default=0,
    )
    parser.add_argument(
        "--batch_size",
        action="store",
        type=int,
        default=50,
        required=False,
        help="number of files to be processed by each process one time, default 50",
    )
    parser.add_argument(
        "--recursively",
        "-r",
        action="store",
        type=str,
        required=False,
        default="yes",
        help="is to find fast5 files from fast5 dir recursively. "
        "default true, t, yes, 1",
    )
    parser.add_argument(
        "--rna",
        action="store_true",
        default=False,
        required=False,
        help="the fast5 files are from RNA samples. if is rna, the signals are reversed. "
        "NOTE: Currently no use, waiting for further extentsion",
    )
    parser.add_argument(
        "--test",
        action="store_true",
        default=False,
        required=False
    )
    parser.add_argument('--boundary', type=int, default=30, required=False)
    parser.add_argument('--mapq', type=int, default=0, required=False)
    parser.add_argument("--bam", type=str, required=True)
    parser.add_argument("--pod5", type=str, required=True)
    parser.add_argument("--bed", type=str, required=False)
    parser.add_argument("--chr", type=str, required=False)
    return parser.parse_args()

def main():
    args = parse_args()
    bam_index = bam_reader.ReadIndexedBam(args.bam)
    is_recursive = str2bool(args.recursively)
    pod5_dr = get_files(args.pod5, is_recursive, ".pod5")
    is_dna = False if args.rna else True
    motif_seqs = get_motif_seqs(args.motifs, is_dna)
    if args.bed is not None:
        bisulfite = read_bed(args.bed,args.strict)
    dataset = largeModalDataset(pod5_dr, bam_index, motif_seqs,bisulfite, args)
    data_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.nproc,
                        worker_init_fn=worker_init_fn,collate_fn=collate_fn_inference,#sampler=sampler,
                        pin_memory=True
                        )
    pred_str_q = Queue()

    # 启动写进程，将预测结果写入文件
    p_w = mp.Process(
        target=_write_predstr_to_file,
        args=(args.write_path, pred_str_q),
        name="writer",
    )
    p_w.daemon = True
    p_w.start()
    for batch in data_loader:
        if batch is not None:
            pred_str_q.put(batch)
            # for item in batch:
            #     print(item,flush=True)
            # break
    pred_str_q.put("kill")
    p_w.join()


if __name__ == '__main__':
    sys.exit(main())