#!/usr/bin/env python3
'''
Calculate fragment sizes for mapped bam file.
Author: liqiming@whu.edu.cn
'''
import os
import re
import sys
import gzip
import pysam
import argparse
import pandas as pd
from itertools import groupby
from collections import Counter
from .gtfparser import GTF, reader


def filter_transcript(gtffile, method, filtered_file=os.devnull):
    assert method in (
        "first", "last", "max_len", "min_len", "not"), f"{method} not support"
    if method == "not":
        return None
    transcript_idlist = set()
    for _, group in groupby(
        reader(
            gtffile,
            header=GTF,
            skip_while=lambda toks: toks[0].startswith("#") or not (
                toks[2] == "transcript")
        ),
        lambda x: x.gene_id
    ):
        transcript = None
        if method == 'first':
            transcript = list(group)[0]
        elif method == 'last':
            transcript = list(group)[-1]
        elif method == 'max_len':
            transcript = max(group, key=lambda x: x.size)
        else:
            transcript = min(group, key=lambda x: x.size)
        transcript_idlist.add(transcript.transcript_id)
        filtered_file.write(f"{str(transcript)}\n".encode())

    return transcript_idlist


def is_overlap(interval1, interval2):
    x1, y1 = interval1
    x2, y2 = interval2
    if y1 <= x2 or y2 <= x1:
        return False
    else:
        return True


def overlap_length(lst1, lst2):
    l = 0
    for x in lst1:
        for y in lst2:
            if is_overlap(x, y):
                l += len(range(max(x[0], y[0]), min(x[-1], y[-1]) + 1))
    return l


def fragment_size(bedfile, samfile, transcript_idlist, qcut=30, ncut=1, filtered_bed=os.devnull, temp=os.devnull):
    '''calculate the fragment size for each gene'''
    for line in open(bedfile, 'r'):
        exon_range = []
        if line.startswith(('#', 'track', 'browser')):
            continue
        fields = line.split()
        chrom = fields[0]
        tx_start = int(fields[1])
        tx_end = int(fields[2])
        geneName = fields[3]
        if transcript_idlist:
            if geneName not in transcript_idlist:
                continue
        filtered_bed.write(line)
        exon_starts = list(map(int, fields[11].rstrip(',\n').split(',')))
        exon_starts = list(map((lambda x: x + tx_start), exon_starts))
        exon_ends = list(map(int, fields[10].rstrip(',\n').split(',')))
        exon_ends = list(map((lambda x, y: x + y), exon_starts, exon_ends))

        for st, end in zip(exon_starts, exon_ends):
            exon_range.append([st, end])
        try:
            alignedReads = samfile.fetch(chrom, tx_start, tx_end)
        except:
            continue

        frag_sizes = []
        read_1_set = []
        read_2_set = []
        for aligned_read in alignedReads:
            if aligned_read.is_unmapped:
                continue
            if aligned_read.is_qcfail:
                continue  # skip low quanlity
            if aligned_read.is_duplicate:
                continue  # skip duplicate read
            if aligned_read.is_secondary:
                continue  # skip non primary hit
            if aligned_read.mapping_quality < qcut:
                continue
            if not aligned_read.is_paired:
                if tx_start <= aligned_read.reference_start and tx_end >= aligned_read.reference_end:
                    frag_sizes.append(aligned_read.infer_query_length())
                continue
            if aligned_read.mate_is_unmapped:
                continue
            if aligned_read.is_read1:
                read_1_set.append(aligned_read)
            if aligned_read.is_read2:
                read_2_set.append(aligned_read)
        
        read_1_set.sort(key=lambda x:x.query_name)
        read_2_set.sort(key=lambda x:x.query_name)

        assert len(read_1_set) == len(read_2_set)

        for read1, read2 in zip(read_1_set, read_2_set):
            assert read1.query_name == read2.query_name
            read_st = read1.reference_start
            read_ed = read1.reference_end
            read_len = read1.infer_query_length()
            read_blocks = read1.get_blocks()
            mate_len = read2.infer_query_length()
            mate_st = read2.reference_start
            mate_ed = read2.reference_end
            mate_blocks = read2.get_blocks()
            if read_st > mate_st:
                (read_st, mate_st) = (mate_st, read_st)
                (read_ed, mate_ed) = (mate_ed, read_ed)

            if mate_st <= read_ed:
                frag_len = read_len + mate_len - overlap_length(read_blocks, mate_blocks)
            else:
                map_range = [[read_ed, mate_st]]
                frag_len = read_len + mate_len + overlap_length(exon_range, map_range)
            frag_sizes.append(frag_len)

        if len(frag_sizes) < ncut:
            continue
        else:
            for i in frag_sizes:
                temp.write(f"{i}\n".encode())
                yield i


def run():
    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument("-i", "--input", dest="input_file",
                        type=str, required=True, help="Input BAM file")
    parser.add_argument("-o", "--output", dest="output_file",
                        type=str, help="Output file")
    parser.add_argument("-r", "--refgene", dest="refgene_bed", type=str, required=True,
                        help="Reference gene model in BED format. Must be strandard 12-column BED file. [required]")
    parser.add_argument("-g", "--gtf", dest="gtf_file", type=str, required=True,
                        help="Reference gene model in GTF format. Used to filter 12-column transcipt BED. [required]")
    parser.add_argument("-m", "--method", dest="filter_method", type=str, default="first",
                        help="Method to maintain transcipt when filtering 12-column transcipt BED. [first|last|max_len|min_len|not]")
    parser.add_argument("-q", "--mapq", dest="map_qual", type=int, default=30,
                        help="Minimum mapping quality (phred scaled) for an alignment to be called \"uniquely mapped\"")
    parser.add_argument("-n", "--frag-num", dest="fragment_num", type=int, default=1,
                        help="Minimum number of fragment")
    parser.add_argument("-b", "--bed_filtered", dest="filtered_bed", type=str, default=os.devnull,
                        help="Output file to store filtered 12-column transcipt BED")
    parser.add_argument("-f", "--filtered_gtf", dest="filtered_gtf", type=str, default=os.devnull,
                        help="Output file to store filtered GTF file, support *.gz")
    parser.add_argument("-t", "--temp", dest="temp_file", type=str, default=os.devnull,
                        help="Output file to store processing data, support *.gz")

    args = parser.parse_args()

    if not os.path.exists(args.input_file + '.bai'):
        print("cannot find index file of input BAM file", file=sys.stderr)
        print(args.input_file + '.bai' + " does not exists", file=sys.stderr)
        sys.exit(0)

    bed_writer = open(args.filtered_bed, "w")
    gtf_writer = gzip.open(args.filtered_gtf, "wb") if args.filtered_gtf.endswith(
        ".gz") else open(args.filtered_gtf, "wb")
    temp_writer = gzip.open(args.temp_file, "wb") if args.temp_file.endswith(
        ".gz") else open(args.temp_file, "wb")

    transcript_idlist = filter_transcript(
        args.gtf_file, args.filter_method, gtf_writer)
    gtf_writer.close()

    fragment_sizes = Counter(i for i in fragment_size(
        args.refgene_bed,
        pysam.AlignmentFile(args.input_file, "rb"),
        transcript_idlist,
        args.map_qual,
        args.fragment_num,
        bed_writer,
        temp_writer
    ))
    bed_writer.close()
    temp_writer.close()

    frag_sizes_df = pd.DataFrame(
        {"Length": fragment_sizes.keys(), "Count": fragment_sizes.values()})
    frag_sizes_df = frag_sizes_df.sort_values(by="Length", ascending=True)

    if args.output_file:
        frag_sizes_df.to_csv(args.output_file, sep="\t",
                             header=True, index=False)
    else:
        print("Length\tCount")
        for _, i in frag_sizes_df.iterrows():
            print(i["Length"], i["Count"], sep="\t")


if __name__ == "__main__":
    run()