import os
import sys
from collections import defaultdict, namedtuple

import numpy as np

import matplotlib

if True:
    # Agg appears to be the most robust backend when only saving plots.
    matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sns

from megalodon import logging, mapping, megalodon_helper as mh, mods, validation
from ._extras_parsers import get_parser_validate_results


LOGGER = logging.get_logger()

PLOT_MIN_BC_ACC = 80
BC_BANDWIDTH = 0.7
BC_BANDWIDTH2 = 0.2
LEN_BANDWIDTH = 0.2
LEN_BANDWIDTH2 = 10
GRIDSIZE = 1000

BC_LEGEND_LABEL = "Sample"
DEFAULT_VS_LABEL = "All Sites"

ACC_METRICS_HEADER = (
    "{: <17}{: <15}{: <15}{: <11}{: <15}{: <15}{: <15}{}\n".format(
        "Median_Accuracy",
        "Mean_Accuracy",
        "Mode_Accuracy",
        "Num_Reads",
        "Longest_Aligned_Len",
        "Median_Aligned_Len",
        "Mean_Aligned_Len",
        "Sample_Label",
    )
)
ACC_METRICS_TMPLT = (
    "{: <17.4}{: <15.4f}{: <15.1f}{: <11d}{: <15.1f}{: <15.1f}{: <15.1f}{}\n"
)

MOD_MISSING_MSG = (
    '{0} not found in "{1}", "{2}", "{3}" sites. '
    + 'Skipping validation for "{0}" + "{1}" + "{2}".'
)

VAL_MOD_DATA = namedtuple(
    "VAL_MOD_DATA",
    ("acc", "parsim_acc", "aligned_lens", "mod_data", "ctrl_data", "label"),
)


def plot_nmap_reads(pdf_fp, samp_labs, nmapped_reads):
    LOGGER.info("Plotting number of mapped reads")


def report_mod_metrics(
    mod_samps_data, ctrl_samps_data, balance_classes, vs_labs, out_fp, pdf_fp
):
    LOGGER.info("Computing modified base metrics")
    if vs_labs is None:
        vs_labs = [
            DEFAULT_VS_LABEL,
        ]
    all_mods_data = [msd.mod_data for msd in mod_samps_data]
    samp_labs = [msd.label for msd in mod_samps_data]
    if ctrl_samps_data is None:
        all_ctrl_data = [msd.ctrl_data for msd in mod_samps_data]
    else:
        # handle case where single control sample is provided for all mod
        # samples
        if len(ctrl_samps_data) == 1:
            all_ctrl_data = [
                ctrl_samps_data[0].mod_data for _ in mod_samps_data
            ]
        else:
            all_ctrl_data = [
                ctrl_samp_data.mod_data for ctrl_samp_data in ctrl_samps_data
            ]

    # extract all modified bases from all samples and all valid sites
    all_mod_bases = set(
        (
            mod_base
            for samp_data in all_mods_data + all_ctrl_data
            for vs_samp_data in samp_data
            for mod_base in vs_samp_data
        )
    )
    out_fp.write(validation.MOD_VAL_METRICS_HEADER)
    all_pr_data, all_roc_data = defaultdict(list), defaultdict(list)
    all_kde_data = []
    # loop over samples
    for mod_samp_data, ctrl_samp_data, samp_lab in zip(
        all_mods_data, all_ctrl_data, samp_labs
    ):
        # loop over valid site sets
        for vs_samp_mod_data, vs_samp_ctrl_data, vs_lab in zip(
            mod_samp_data, ctrl_samp_data, vs_labs
        ):
            # loop over modified bases
            for mod_base in all_mod_bases:
                # check that mod_base exists in both data set
                if (
                    mod_base not in vs_samp_mod_data
                    or vs_samp_mod_data[mod_base].shape[0] == 0
                ):
                    LOGGER.warning(
                        MOD_MISSING_MSG.format(
                            mod_base, samp_lab, vs_lab, "modified"
                        )
                    )
                    continue
                if (
                    mod_base not in vs_samp_ctrl_data
                    or vs_samp_ctrl_data[mod_base].shape[0] == 0
                ):
                    LOGGER.warning(
                        MOD_MISSING_MSG.format(
                            mod_base, samp_lab, vs_lab, "control"
                        )
                    )
                    continue
                try:
                    # compute modified base metrics
                    (
                        pr_data,
                        roc_data,
                        kde_data,
                    ) = validation.compute_mod_sites_stats(
                        vs_samp_mod_data[mod_base],
                        vs_samp_ctrl_data[mod_base],
                        balance_classes,
                        mod_base,
                        samp_lab,
                        vs_lab,
                        out_fp,
                    )
                    all_pr_data[mod_base].append(pr_data)
                    all_roc_data[mod_base].append(roc_data)
                    all_kde_data.append(kde_data)
                except mh.MegaError as e:
                    LOGGER.warning(str(e))

    validation.plot_pr(pdf_fp, all_pr_data)
    validation.plot_roc(pdf_fp, all_roc_data)
    validation.plot_kde(pdf_fp, all_kde_data)


def plot_acc(pdf_fp, samps_val_data):
    # check that there are accuracies to be plotted, else return
    if all(samp_val_data.acc is None for samp_val_data in samps_val_data):
        return

    LOGGER.info("Plotting mapping accuracy distribution(s)")
    plt.figure(figsize=(8, 5))
    for samp_val_data in samps_val_data:
        if samp_val_data.acc is not None:
            try:
                sns.kdeplot(
                    samp_val_data.acc,
                    shade=False,
                    gridsize=GRIDSIZE,
                    bw_adjust=BC_BANDWIDTH,
                    label=samp_val_data.label,
                )
            except AttributeError:
                sns.kdeplot(
                    samp_val_data.acc,
                    shade=False,
                    gridsize=GRIDSIZE,
                    bw=BC_BANDWIDTH2,
                    label=samp_val_data.label,
                )
    plt.legend(title=BC_LEGEND_LABEL)
    plt.xlabel("Mapping Accuracy")
    plt.ylabel("Density")
    plt.title("Mapping Accuracy")
    plt.xlim(PLOT_MIN_BC_ACC, 100)
    pdf_fp.savefig(bbox_inches="tight")
    plt.close()

    plt.figure(figsize=(8, 5))
    for samp_val_data in samps_val_data:
        if samp_val_data.parsim_acc is not None:
            try:
                sns.kdeplot(
                    samp_val_data.parsim_acc,
                    shade=False,
                    bw_adjust=BC_BANDWIDTH,
                    gridsize=GRIDSIZE,
                    label=samp_val_data.label,
                )
            except AttributeError:
                sns.kdeplot(
                    samp_val_data.parsim_acc,
                    shade=False,
                    bw=BC_BANDWIDTH2,
                    gridsize=GRIDSIZE,
                    label=samp_val_data.label,
                )
    plt.legend(title=BC_LEGEND_LABEL)
    plt.xlabel("Mapping Accuracy")
    plt.ylabel("Density")
    plt.title("Mapping Accuracy (Parsimonious: match - ins / ref_len)")
    plt.xlim(PLOT_MIN_BC_ACC, 100)
    pdf_fp.savefig(bbox_inches="tight")
    plt.close()

    plt.figure(figsize=(8, 5))
    for samp_val_data in samps_val_data:
        if samp_val_data.aligned_lens is not None:
            try:
                sns.kdeplot(
                    samp_val_data.aligned_lens,
                    shade=False,
                    bw_adjust=LEN_BANDWIDTH,
                    gridsize=GRIDSIZE,
                    label=samp_val_data.label,
                    log_scale=10,
                )
            except AttributeError:
                sns.kdeplot(
                    samp_val_data.aligned_lens,
                    shade=False,
                    bw=LEN_BANDWIDTH2,
                    gridsize=GRIDSIZE,
                    label=samp_val_data.label,
                )
                try:
                    plt.xscale("log", base=10)
                except ValueError:
                    plt.xscale("log", basex=10)
    plt.legend(title=BC_LEGEND_LABEL)
    plt.xlabel("Aligned Length (Log10 scale)")
    plt.ylabel("Density")
    plt.title("Aligned Length (alignment_length - num_insertions)")
    pdf_fp.savefig(bbox_inches="tight")
    plt.close()

    samp_labs, nmapped_reads = [], []
    for samp_val_data in samps_val_data:
        if samp_val_data.acc is not None:
            samp_labs.append(samp_val_data.label)
            nmapped_reads.append(len(samp_val_data.acc))
    plt.figure(figsize=(8, 5))
    with sns.axes_style("whitegrid"):
        sns.barplot(x=samp_labs, y=nmapped_reads, hue=samp_labs, dodge=False)
    plt.legend([], [], frameon=False)
    plt.xlabel("Samples")
    plt.ylabel("Number of Mapped Reads")
    plt.title("Number of Mapped Reads")
    pdf_fp.savefig(bbox_inches="tight")
    plt.close()


def report_acc_metrics(res_dir, out_fp, samp_lab):
    try:
        bc_data = mapping.parse_map_summary_file(
            mh.get_megalodon_fn(res_dir, mh.MAP_SUMM_NAME)
        )
        bc_acc = np.array([r_data.pct_identity for r_data in bc_data])
        parsim_acc = np.array(
            [
                100
                * (r_data.num_match - r_data.num_ins)
                / (r_data.num_align - r_data.num_ins)
                for r_data in bc_data
            ]
        )
        aligned_lens = np.array(
            [r_data.num_align - r_data.num_ins for r_data in bc_data]
        )
        # crude mode by rounding to 1 decimal
        uniq_acc, acc_counts = np.unique(
            np.around(bc_acc, 1), return_counts=True
        )
        mode_bc_acc = uniq_acc[np.argmax(acc_counts)]
        out_fp.write(
            ACC_METRICS_TMPLT.format(
                np.median(bc_acc),
                np.mean(bc_acc),
                mode_bc_acc,
                len(bc_data),
                np.max(aligned_lens),
                np.median(aligned_lens),
                np.mean(aligned_lens),
                samp_lab,
            )
        )
    except FileNotFoundError:
        bc_acc = parsim_acc = aligned_lens = None
        LOGGER.info("Mappings not found for {}".format(res_dir))

    return bc_acc, parsim_acc, aligned_lens


def parse_mod_data(
    res_dir,
    out_fp,
    valid_sites,
    include_strand,
    samp_lab,
    max_stats,
    ctrl_sites=None,
):
    mod_acc, parsim_acc, aligned_lens = report_acc_metrics(
        res_dir, out_fp, samp_lab
    )

    ctrl_data = None
    mods_db_fn = mh.get_megalodon_fn(res_dir, mh.PR_MOD_NAME)
    if os.path.exists(mods_db_fn):
        if ctrl_sites is not None:
            all_site_stats = mods.extract_stats_at_valid_sites(
                mods_db_fn,
                valid_sites + ctrl_sites,
                include_strand=include_strand,
                max_stats=max_stats,
            )
            mods_data = all_site_stats[: len(valid_sites)]
            ctrl_data = all_site_stats[len(valid_sites) :]
        elif valid_sites is not None:
            mods_data = mods.extract_stats_at_valid_sites(
                mods_db_fn,
                valid_sites,
                include_strand=include_strand,
                max_stats=max_stats,
            )
        else:
            mods_data = [
                mods.extract_all_stats(mods_db_fn, max_stats=max_stats),
            ]
    else:
        mods_data = None

    return VAL_MOD_DATA(
        mod_acc, parsim_acc, aligned_lens, mods_data, ctrl_data, samp_lab
    )


def parse_valid_sites(valid_sites_fns, gt_data_fn, include_strand):
    if valid_sites_fns is None and gt_data_fn is None:
        return None, None, None

    # if ground truth file provided, parse first
    if gt_data_fn is not None:
        LOGGER.info("Reading ground truth file")
        gt_mod_pos, gt_ctrl_pos = mh.parse_ground_truth_file(
            gt_data_fn, include_strand=include_strand
        )
        if valid_sites_fns is None:
            # if ground truth provided, but not valid sites return parsed
            # ground truth sites.
            return (
                [
                    gt_mod_pos,
                ],
                None,
                [
                    gt_ctrl_pos,
                ],
            )

    # parse valid sites files and intersect with ground truth (if provided)
    LOGGER.info("Reading valid sites data")
    valid_sites, vs_labs = [], []
    ctrl_sites = None if gt_data_fn is None else []
    for vs_lab, valid_sites_fn in valid_sites_fns:
        try:
            vs_i_sites = mh.parse_beds(
                [
                    valid_sites_fn,
                ]
            )
        except FileNotFoundError:
            LOGGER.warning(
                "Could not find valid sites file: {}".format(valid_sites_fn)
            )
            continue

        vs_i_sites = set(
            (
                (chrm, strand, pos) if include_strand else (chrm, pos)
                for (chrm, strand), cs_pos in vs_i_sites.items()
                for pos in cs_pos
            )
        )
        if gt_data_fn is None:
            valid_sites.append(vs_i_sites)
        else:
            ctrl_sites.append(vs_i_sites.intersection(gt_ctrl_pos))
            valid_sites.append(vs_i_sites.intersection(gt_mod_pos))
        vs_labs.append(vs_lab)

    if len(valid_sites) == 0:
        return None, None, None

    return valid_sites, vs_labs, ctrl_sites


def _main(args):
    logging.init_logger(quiet=args.quiet)
    pdf_fp = PdfPages(args.out_pdf)
    out_fp = (
        sys.stdout
        if args.out_filename is None
        else open(args.out_filename, "w")
    )
    do_report_mod_metrics = (
        args.control_megalodon_results_dirs is not None
        or args.ground_truth_data is not None
    )
    if (
        args.control_megalodon_results_dirs is not None
        and args.ground_truth_data is not None
    ):
        LOGGER.warning(
            "Cannot provide both control results and ground "
            + "truth file. Ignoring ground truth file."
        )
        args.ground_truth_data = None
    if (
        args.control_megalodon_results_dirs is not None
        and len(args.control_megalodon_results_dirs) > 1
        and len(args.control_megalodon_results_dirs)
        != len(args.megalodon_results_dirs)
    ):
        LOGGER.error(
            "Must provide either one control results directory for all "
            + "modified results directories or a control directory for each "
            + "modified base results directory."
        )
        sys.exit(1)
    valid_sites, vs_labs, ctrl_sites = parse_valid_sites(
        args.valid_sites, args.ground_truth_data, args.strand_specific_sites
    )

    out_fp.write(ACC_METRICS_HEADER)
    LOGGER.info("Reading Megalodon results data")
    if args.results_labels is None:
        samp_labs = [
            "Sample {}".format(samp_i + 1)
            for samp_i in range(len(args.megalodon_results_dirs))
        ]
    else:
        assert len(args.megalodon_results_dirs) == len(args.results_labels), (
            "Must be a label in --results-labels for each provided "
            + "megalodon_results_dir"
        )
        samp_labs = args.results_labels
    mod_samps_data = [
        parse_mod_data(
            mega_dir,
            out_fp,
            valid_sites,
            args.strand_specific_sites,
            samp_lab,
            args.max_stats,
            ctrl_sites,
        )
        for samp_lab, mega_dir in zip(samp_labs, args.megalodon_results_dirs)
    ]
    ctrl_samps_data = None
    # if control is not specified via ground truth file, and control results
    # dir was provided parse control data
    if args.control_megalodon_results_dirs is not None:
        LOGGER.info("Reading Megalodon control data results")
        if len(args.control_megalodon_results_dirs) > 1:
            ctrl_samps_data = [
                parse_mod_data(
                    mega_dir,
                    out_fp,
                    valid_sites,
                    args.strand_specific_sites,
                    "{} Control".format(samp_lab),
                    args.max_stats,
                )
                for samp_lab, mega_dir in zip(
                    samp_labs, args.control_megalodon_results_dirs
                )
            ]
        else:
            # handle case with a single control for all mod dirs
            ctrl_samps_data = [
                parse_mod_data(
                    args.control_megalodon_results_dirs[0],
                    out_fp,
                    valid_sites,
                    args.strand_specific_sites,
                    "Control",
                    args.max_stats,
                ),
            ]
        plot_acc(pdf_fp, mod_samps_data + ctrl_samps_data)
    else:
        plot_acc(pdf_fp, mod_samps_data)

    if do_report_mod_metrics:
        # enter newline between basecall accuracy and mod base results
        out_fp.write("\n")
        report_mod_metrics(
            mod_samps_data,
            ctrl_samps_data,
            not args.allow_unbalance_classes,
            vs_labs,
            out_fp,
            pdf_fp,
        )

    pdf_fp.close()
    if out_fp is not sys.stdout:
        out_fp.close()


if __name__ == "__main__":
    _main(get_parser_validate_results().parse_args())
