#!/usr/bin/env python3
# -*- coding:utf-8 -*-
u"""
Created at 2020.01.23

A Python wrapper of PSI.sh
"""

import os

from datetime import datetime
from subprocess import check_output, CalledProcessError


class FileSuffix:
    def __init__(self, prefix: str):
        self.prefix = prefix
        self.INTRON_BED = prefix + "_intron.bed"
        self.JUNC_BED = prefix + "_junctions.bed"
        self.FILTERED_JUNC = prefix + "_filtered.junctions"
        self.INCLUSION = prefix + "_exonic_parts.inclusion"
        self.EXCLUSION = prefix + "_exonic_parts.exclusion"
        self.PSI = prefix + "_exonic_parts.psi"


class StartPSI(object):
    u"""
    Wrapper of ePSI from STAR pipeline
    """

    def __init__(
            self,
            gtf_file: str,
            reads: str,
            read_length: int,
            junctions: str,
            prefix: str,
            bedtools: str = ""
    ):
        u"""

        """
        self.gtf_file = gtf_file
        self.junctions = junctions
        self.read_length = read_length
        self.FILE = FileSuffix(prefix)
        self.bedtools = bedtools
        self.reads = reads
        self.prefix = prefix

        self.coverage_bed = "coverageBed"

        if self.bedtools:
            self.coverage_bed = os.path.join(self.bedtools, self.coverage_bed)

        self.intersect_bed = "intersectBed"
        if self.bedtools:
            self.intersect_bed = os.path.join(self.bedtools, self.intersect_bed)

    @staticmethod
    def call(cmd):
        u"""
        call command line software
        """
        try:
            return check_output(cmd, shell=True).decode("utf-8").split("\n")
        except CalledProcessError as err:
            print(cmd)
            print(err)
            exit(1)

    def format_junction_from_star(self):
        u"""
        Replace original combination of grep, sed and awk with python
        :param junctions: STAR output SJ.out.tab file
        :param output_prefix
        """
        # format
        data, junctions = {}, []
        with open(self.FILE.JUNC_BED, "w+") as w:
            with open(self.junctions) as r:
                for idx, line in enumerate(r):
                    if line.startswith("#"):
                        continue
                    lines = line.split()
                    lines[1] = int(lines[1]) - 21
                    lines[2] = int(lines[2]) + 20
                    lines[3] = "+" if lines[3] == 1 else "-"

                    # format junctions.bed
                    new_line = [
                        lines[0], int(lines[1]), int(lines[2]),
                        "JUNCBJ{}".format(idx + 1),
                        lines[6], lines[3], lines[1],
                        lines[2], "255,0,0", "20,20",
                        "30"
                    ]
                    junctions.append(new_line)
                    w.write("\t".join([str(x) for x in new_line]) + "\n")

                    key = "{}#{}".format(new_line[0], new_line[5])
                    temp = data.get(key, [])
                    temp.append([new_line[0], new_line[1], new_line[1] + 20, new_line[3]])
                    temp.append([new_line[0], new_line[2] - 20, new_line[2], new_line[3]])
                    data[key] = temp

        self.filter_junctions(junctions, data)

    @staticmethod
    def __is_upstream__(junc, ref):
        return junc[2] < ref[1]

    @staticmethod
    def __is_down_stream__(junc, ref):
        return junc[1] > ref[2]

    def filter_junctions(self, junctions: list, reference: dict):
        u"""

        """
        gtf = {}
        with open(self.gtf_file) as r:
            for line in r:
                if line.startswith("#"):
                    continue

                lines = line.split()
                key = "{}#{}".format(lines[0], lines[6])
                temp = gtf.get(key, [])
                temp.append([lines[0], int(lines[3]), int(lines[4])])
                gtf[key] = temp

        data = set()
        for key in set(gtf.keys()) & set(reference.keys()):
            curr_gtf = sorted(gtf[key], key=lambda x: [x[0], x[1], x[2]])
            curr_ref = sorted(reference[key], key=lambda x: [x[0], x[1], x[2]])

            i, j = 0, 0
            while i < len(curr_gtf) and j < len(curr_ref):
                if self.__is_upstream__(junc=curr_gtf[i], ref=curr_ref[j]):
                    i += 1
                elif self.__is_down_stream__(junc=curr_gtf[i], ref=curr_ref[j]):
                    j += 1
                else:
                    data.add(curr_ref[j][3])
                    j += 1

        with open(self.FILE.INTRON_BED, "w+") as wi:
            with open(self.FILE.FILTERED_JUNC, "w+") as w:
                for line in sorted(junctions, key=lambda x: [x[0], x[1], x[2], x[5]]):
                    if line[3] not in data:
                        continue
                    w.write("\t".join([str(x) for x in line]) + "\n")

                    wi.write("\t".join([
                        line[0], str(line[1] + 20), str(line[2] - 20),
                        line[3], line[4], line[5]
                    ]) + "\n")

    def count_inclusion(self):
        u"""
        ## Counting exon inclusion coverage for each exonic part.
        function CountInclusion {
            echo "Counting exon coverage...."

            ## extracting the exon annotation from input annotation file, and simplify the attribute field as "geneID:exonNumer" to track the exon
            [ -f ${prefix}_exonic_parts.gff ] || awk '{OFS="\t"}{if ($3 == "exonic_part") print  $1,$2,$3,$4,$5,$6,$7,$8,$14":"$12}' $annotation | sed 's@[";]@@g' > ${prefix}_exonic_parts.gff

            ## counting the exon coverage  sort based on exon ID
            coverageBed -split -abam $reads -b ${prefix}_exonic_parts.gff | awk 'BEGIN{OFS = "\t"}{ print $1,$4,$5,$5-$4+1,$9,$10 }' | sort -k 5 > ${prefix}_exonic_parts.inclusion

            echo "Exon coverage counting finished!"
            echo
        }
        """

        if not os.path.exists(self.FILE.INCLUSION) or \
                os.path.getsize(self.FILE.INCLUSION) == 0:

            out = self.call("{} -split -abam {} -b {}".format(self.coverage_bed, self.reads, self.gtf_file))

            data = []
            for line in out:
                lines = line.split()
                try:
                    new_lines = [
                        lines[0], lines[3], lines[4],
                        str(int(lines[4]) - int(lines[3]) + 1),
                        lines[8], lines[9]
                    ]

                    data.append(new_lines)
                except IndexError as _:
                    pass

            with open(self.FILE.INCLUSION, "w+") as w:
                for line in sorted(data, key=lambda x: x[4]):
                    w.write("\t".join(line) + "\n")

    def count_exclusion(self):
        u"""
        ## Counting exon exclusion
        function CountExclusion {
            echo "Counting exclusion...."
            echo "junctions file is: $junctions"
            [ -f ${prefix}_exonic_parts.gff ] || awk '{OFS="\t"}{if ($3 == "exonic_part") print  $1,$2,$3,$4,$5,$6,$7,$8,$14":"$12}' $annotation | sed 's@[";]@@g' > ${prefix}_exonic_parts.gff
            ##  We create an intron list from Tophat junctions
            grep -v description $junctions | sed 's/,/\t/g' | awk '{OFS="\t"}{print $1,$2+$13,$3-$14,$4,$5,$6}' > ${prefix}_intron.bed

            ##  Extract all Introns belonging to an exon and summarize read counts for each exon
            intersectBed -wao -f 1.0 -s -a ${prefix}_exonic_parts.gff -b ${prefix}_intron.bed | awk 'BEGIN{OFS = "\t"}{ $16 == 0? s[$9] += 0:s[$9] += $14 }END{ for (i in s) {print i,s[i]} }' | sort -k 1 > ${prefix}_exonic_parts.exclusion
            rm  ${prefix}_intron.bed
            echo "Exclusion counting finished! "
            echo
        }
        """

        data = {}
        for i in self.call(f"{self.intersect_bed} -wao -f 1.0 -s -a {self.gtf_file} -b {self.FILE.INTRON_BED}"):
            lines = i.split()

            try:
                temp = data.get(lines[8], 0)
                temp += int(lines[13]) if int(lines[15]) != 0 else 0
                data[lines[8]] = temp
            except IndexError as _:
                pass

        with open(self.FILE.EXCLUSION, "w+") as w:
            for key in sorted(data.keys()):
                w.write("{}\t{}\n".format(key, data[key]))

        if os.path.exists(self.FILE.INTRON_BED):
            os.remove(self.FILE.INTRON_BED)

    def count_psi(self):
        u"""
        ## Calculating the PSI value based on inclusion and exclusion number
        function CountPSI {
            echo "Calculating PSI value..."

            ## checking the inclusion and exclusion input file, if the exonID are not sorted or different, then exit with status 9
            cut -f5 $inclusion > ${prefix}_exonID1.txt
            cut -f1 $exclusion > ${prefix}_exonID2.txt
            diff ${prefix}_exonID1.txt ${prefix}_exonID2.txt > /dev/null  ||( echo "Unsorted exonID exit" &&  return 9 )
            rm ${prefix}_exonID1.txt ${prefix}_exonID2.txt

            ## Calculating PSI
            paste  $inclusion $exclusion | awk -v "len=$readLength" -v "prefix=$prefix" 'BEGIN{OFS = "\t"; print "exon_ID",prefix"_length",prefix"_inclusion",prefix"_exclusion",prefix"_PSI" }{NIR=$6/($4+len-1);NER=$8/(len-1)}{print $5,$4,$6,$8,(NIR+NER==0)? "NA":NIR/(NIR + NER)}' > ${prefix}_exonic_parts.psi
            echo "PSI calculating finished!"
        }
        """
        inclusion_cols = {}
        with open(self.FILE.INCLUSION) as r:
            for line in r:
                lines = line.split()
                inclusion_cols[lines[4]] = line.strip()

        exclusion_cols = {}
        with open(self.FILE.EXCLUSION) as r:
            for line in r:
                lines = line.split()
                exclusion_cols[lines[0]] = line.strip()

        with open(self.FILE.PSI, "w+") as w:
            w.write("econ_ID\tlength\tinclusion\texclusion\tPSI\n")

            for i in set(inclusion_cols.keys()) & set(exclusion_cols.keys()):
                line = "{}\t{}".format(inclusion_cols[i], exclusion_cols[i]).split()
                val_nir = int(line[5]) / (int(line[3]) + self.read_length - 1)
                val_ner = int(line[7]) / (self.read_length - 1)
                val = "NA" if val_nir + val_ner == 0 else val_nir / (val_nir + val_ner)
                w.write(f"{line[4]}\t{line[3]}\t{line[5]}\t{line[7]}\t{val}\n")

    def start(self):
        u"""
        StartPSI)
        [ $# -lt 6 ] && showHelp
        annotation=$2
        readLength=$3
        reads=$4
        junctions=$5
        prefix=$6
        CountInclusion
        junctionFilter
        junctions=${prefix}_filtered_junctions.bed
        CountExclusion
        inclusion=${prefix}_exonic_parts.inclusion
        exclusion=${prefix}_exonic_parts.exclusion
        CountPSI
        ;;
        """
        begin = datetime.now()
        self.format_junction_from_star()
        print("format junction:", datetime.now() - begin)

        begin = datetime.now()
        self.count_inclusion()
        print("count inclusion:", datetime.now() - begin)

        begin = datetime.now()
        self.count_exclusion()
        print("count exclusion:", datetime.now() - begin)

        begin = datetime.now()
        self.count_psi()
        print("count psi:", datetime.now() - begin)


if __name__ == '__main__':
    pass
