"""
vis is a collection of functions which generate visualizations of statistical
information about posts from the `database.db`
"""

import sqlite3
from enum import Enum
# from collections import Counter
from datetime import datetime

import numpy as np
import matplotlib.pyplot as plt
from bs4 import BeautifulSoup


# FIXME
import pdb

class Scope(Enum):
    """
    Scope are the options available for how much information the graphs
    include. Global would be the entire database, for example.
    """

    GLOBAL = 0  # entire db
    BOARD = 1  # /v/, /tech/, etc
    THREAD = 2  # responses to only 1 thread id
    POST = 3  # a single comment/response


class Vis:
    """
    Vis is the master class for graph generating methods. It shares state
    common to these functions such as scope (ie, the sqlite connection and
    scope of the graph).

    TODO:
    1. implement some filtering mechanism to prevent generating graphs
    in nonsensical conditions (ie, don't perform img vs comment-only
    comparison on a single post)
    2. pass pyplot kwargs to vis methods to allow better control of plot
    appearance
    3. more control over WHERE-clauses (like time)

    """

    def __init__(
        self,
        scope=Scope.GLOBAL,
        board="/v/",
        thread=None,
        post=None,
        target="../../graphs/",
        db="../../database.db",
    ):
        self.scope = scope
        if scope == Scope.GLOBAL:
            self.where = ""
        elif scope == Scope.BOARD:
            self.where = f"WHERE boardname LIKE '{board}'"
        elif scope == Scope.THREAD:
            self.where = f"WHERE resto = {thread} OR no = {thread}"
        else:
            self.where = f"WHERE no = {thread}"
        self.target = target
        conn = sqlite3.connect(db)
        self.cur = conn.cursor()
        assert self.cur.execute(
            "SELECT 1 FROM posts"
        ), f"""Database {db} has not been initialized! Is {db} the correct
        path to your archive?
        """

    def save(self, fig, name):
        """
        save writes the plot as a png to the target directory as
        '{name}.png' and clears all axes and figures
        """
        fig.savefig(f"{self.target}{name}.png")
        plt.cla()
        plt.clf()
        return None

    def image_ratio(self, name="pie-chart"):
        """
        image_ratio makes a simple pie-chart showing posts with/without
        images included
        """
        query = f"""
        SELECT COUNT(CASE WHEN `image_data` NOT LIKE '' THEN 1 END) AS images,
        COUNT() FROM posts {self.where}
        """
        self.cur.execute(query)
        img, all = self.cur.fetchone()
        labels = ["posts with images", "text only"]
        fig, ax = plt.subplots()
        ax.set_title("Proportion of Posts With Images")
        ax.pie([img, all - img], labels=labels, autopct="%1.1f%%")
        ax.axis("equal")
        self.save(fig, name)
        return None

    def post_velocity(self, name="velocity", bins=25):
        """
        post_velocity generates a histogram of time between posts. This would
        give a quick visualization of how 'fast' a thread or a board moves.

        TODO: Deep dive this method to figure out why visualization is trash
        TODO: Bins should probably be dynamic so there aren't empty bins and
        the histogram doesn't look like a damn hair comb.
        """
        query = f"SELECT time FROM posts {self.where} ORDER BY time"
        self.cur.execute(query)
        times = np.array([t[0] for t in self.cur.fetchall()])
        deltas = np.diff(times)
        # Use Tukey's Method (1.5 IQR) so we don't have outrageous skew
        q1 = np.quantile(deltas,0.25)
        q3 = np.quantile(deltas,0.75)
        iqr = q3-q1
        median = np.median(deltas)
        lower_bound = max(0.0, median - 1.5 * iqr)
        upper_bound = median + 1.5 * iqr
        # Exclude data we don't want to plot anyway to avoid skewing the bins
        deltas_filtered = deltas[(lower_bound < deltas) & (deltas < upper_bound)]
        fig, ax = plt.subplots()
        ax.set_title("Distribution of Time Between Posts")
        ax.set_ylabel("count")
        ax.set_xlabel("time differential (seconds)")
        ax.set_xlim(lower_bound,upper_bound)
        ax.hist(deltas_filtered, bins=bins)
        self.save(fig, name)
        return None

    def verbosity(self,name="words-per-post",bins=25):
        """
        verbosity generates a histogram of word counts per-post for the
        given scope.

        TODO: use markup tag remover already defined in the source. Should
        probably just be using std-lib instead of importing bs4 . . .
        """
        query = f"SELECT com FROM posts {self.where}"
        self.cur.execute(query)
        comments = [BeautifulSoup(c[0],'html.parser').text for c in self.cur.fetchall()]
        # strip punctuation
        comments = [com.replace(char,' ') for char in '-,.!?><\n' for com in comments]
        comments = [com.lower().split() for com in comments]
        counts = np.array([len(com) for com in comments])
        # Use Tukey's Method (1.5 IQR) so we don't have outrageous skew
        q1 = np.quantile(counts,0.25)
        q3 = np.quantile(counts,0.75)
        iqr = q3-q1
        median = np.median(counts)
        lower_bound = max(0.0, median - 1.5 * iqr)
        upper_bound = median + 1.5 * iqr
        # Exclude data we don't want to plot anyway to avoid skewing the bins
        counts_filtered = counts[(lower_bound < counts) & (counts < upper_bound)]
        fig, ax = plt.subplots()
        ax.set_title("Distribution of Words per Post")
        ax.set_ylabel("number of posts")
        ax.set_xlabel("words per post")
        ax.set_xlim(lower_bound,upper_bound)
        ax.hist(counts_filtered, bins=bins)
        self.save(fig, name)
        return None

    def heatmap(self,name="heat",norm=False):
        """
        heatmap generates a 2-dimensional heatmap to show the relationship
        between the day of the week and the time of day and frequency of
        posting.

        TODO: Make this look a lot nicer
        NOTE: White annotations looked terrible. Way too busy. A colorbar
        would be a sane additon.
        """
        query = f"SELECT time FROM posts {self.where}"
        self.cur.execute(query)
        times = np.array([t[0] for t in self.cur.fetchall()])
        utc = np.vectorize(datetime.utcfromtimestamp)(times)
        total = len(utc) # total number of posts. Use with `norm`
        hours = np.vectorize(lambda x: x.hour)(utc)
        days = np.vectorize(lambda x: x.strftime("%A"))(utc)
        day_hr = np.array(list(zip(hours,days)))
        chart_days = ['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
        chart_hours = [str(x) for x in range(24)]
        count = np.zeros((len(chart_hours),len(chart_days)))
        for i,h in enumerate(chart_hours):
            for j,d in enumerate(chart_days):
                count[i][j] = len(day_hr[day_hr==[h,d]])
        fig,ax = plt.subplots()
        ax.pcolor(count)
        ax.set_title("Weekly Post Heatmap")
        ax.set_ylabel("Hour")
        ax.set_yticks(np.arange(len(chart_hours)))
        ax.set_yticklabels(chart_hours)
        ax.set_xlabel("Day")
        ax.set_xticks(np.arange(len(chart_days)))
        ax.set_xticklabels(chart_days)
        # Rotate the tick labels and set their alignment.
        plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
            rotation_mode="anchor")
        self.save(fig,name)
        

        