import os
import os.path as osp
import numpy as np
import math as m
import matplotlib.pyplot as plt
from itertools import chain
from random import randint
from typing import *

import sys
sys.path.append(".")
from GNNSwitch.graph import sweep_dataset

# configures
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['savefig.dpi'] = 500

def custom_barwidth(group_size, x_tick, base_w):
    """
    calculate x_tick offsets for multi-bar group
    @param x_tick: expected bar group width, must be smaller than the interval x ticks between groups
    @base_w: sum of width of bars, space are evenly distributed between bars
    """
    bar_norm = base_w / x_tick
    sp_norm = 1 - bar_norm
    bar_w = (bar_norm / group_size) * x_tick
    sp_w = (sp_norm / (group_size-1)) * x_tick if group_size > 1 else 0
    
    if group_size == 1:
        return bar_w, [0]

    b_s = np.arange(0,group_size) * (bar_w+sp_w) + 0.5*bar_w - 0.5*x_tick

    return bar_w, b_s

def general_float(repr):
    try:
        return float(repr)
    except ValueError:
        if repr == '/':
            return 0.

def process_tab_text(path, use_tags = 'full', truncate_zero=False):
    entries = {}
    with open(path, "r") as f:
        # pass prefix 
        pre_line = f.readline()
        while pre_line[0] != '|':
            pre_line = f.readline()
        lines = [pre_line] + f.readlines()
        
        header = [x.strip() for x in lines[0][1:-2].split("|")][1:]
        nonzeros = np.zeros(len(header), dtype=bool)
        for line in lines[2:]:
            parts = [x.strip() for x in line[1:-2].split("|")]
            raw_tag = parts[0].split("-")
            if len(raw_tag) < 2:
                continue
            if isinstance(use_tags, str):
                if use_tags == "all":
                    tag = raw_tag
                elif use_tags == "full":
                    tag = parts[0]
                else:
                    raise ValueError("Unknown tagging scheme (choices: all/full)")
            elif isinstance(use_tags, list):
                tag = []
                for sel in use_tags:
                    tag.append(raw_tag[sel])
            vals = np.array([general_float(x.strip()) for x in parts[1:]])
            nonzeros = nonzeros | (vals > 0) 
            entries[tuple(tag)] = vals
        if truncate_zero:
            for _k in entries.keys():
                entries[_k] = entries[_k][nonzeros]
            header = [header[i] for i in range(len(header)) if nonzeros[i]]
    return header, entries

def tab_select(tab, *sels, filter=None):
    """
    expected input: tuple(int, str)+
    """
    ret = {}
    for tag, entry in tab.items():
        if callable(filter):
            if not filter(entry):
                continue
        ok = True
        for sel in sels:
            ok = tag[sel[0]] == sel[1]
            if not ok:
                break
        if ok:
            ret[tag] = entry

    return ret


def split_by_distinct_tag(tab, idx, filter=None):
    distinct = {}
    ret = []
    for tag, entry in tab.items():
        if not tag[idx] in distinct.keys():
            distinct[tag[idx]] = {}
        distinct[tag[idx]][tag] = entry
    
    for v in distinct.values():
        if filter(v):
            ret.append(v)

    return ret


def tab_transpose(header, entries, new_header_scheme = 'concat'):
    t_tab = {}
    t_header_raw = []
    for tag, dat in entries.items():
        t_header_raw.append(tag)
        for col, col_id in enumerate(header):
            # create new row
            if not col_id in t_tab.keys():
                t_tab[col_id] = []
            # append to differnet rows
            t_tab[col_id].append(dat[col])   

    t_header = []
    for hr in t_header_raw:
        if new_header_scheme == 'concat':
            t_header.append("-".join(*hr))
        elif isinstance(new_header_scheme, int):
            t_header.append(hr[new_header_scheme])

    return t_header, t_tab


def plot_histogram_from_tab(ax, header, entries, title=None, 
                            use_as_legacy = -1):
    """
    print given info into a subplot
     - entries are treated as different entities
        - entry keys are treated as entity name
        - entry value are treated as profile of a certain entity under
     - headers are treated different conditions
    """
    bar_w, x_off = custom_barwidth(len(entries), 0.8, 0.6)
    legend_handles = []
    legend_names = []
    for idx, (en_name, en_dat) in enumerate(entries.items()):
        x_list = np.arange(len(header)) - x_off[idx]
        bar = ax.bar(x_list, en_dat, bar_w)
        if use_as_legacy >=0:
            legend_handles.append(bar)
            if isinstance(en_name, str):
                legend_names.append(en_name)
            else:
                legend_names.append(en_name[use_as_legacy])
    
    ax.set_xticks(np.arange(len(header)), header)
    if title:
        ax.set_title(title)
    if use_as_legacy >=0:
        ax.legend(legend_handles, legend_names)

def flattern(axes):
    return chain(*[ax for ax in axes])

def paint_for_dataset_format(header, tabs, save_path,
                             plot_grid: Tuple[int, int]=None, suptitle=None, ax_title=None, file_name="pic"):
    if plot_grid is None:
        cols = m.ceil(m.sqrt(len(tabs)))
        rows = m.ceil(len(tabs)/cols)
    else:
        rows, cols = plot_grid
    figure, axes = plt.subplots(nrows=rows, ncols=cols, squeeze=False, figsize=(20,10), layout='tight')

    for i, (ax, tab) in enumerate(zip(flattern(axes), tabs)):
        # leg_idx = 1 if i == 0 else -1
        if isinstance(header, list):
            h = header[i]
        if not ax_title is None:
            ax_t = ax_title[i]
        else:
            ax_t = None
        plot_histogram_from_tab(ax, h, tab, title=ax_t)#, use_as_legacy=2)
    
    figure.suptitle(suptitle)
    figure.savefig(osp.join(save_path, f"{file_name}.png"))

def sample_tab(tab, num_samples, sample_scheme='max_var', distinct_on=None):
    
    def _build_tmp(samp, other=None):
        _tmp = {}
        for _k, _vals in tab.items():
            id = samp(_vals)
            _tmp[id] = (_k, _vals)
            if callable(other):
                other(id)
        return _tmp
    
    def _var_calc(vals):
        return np.var(np.array(vals))
    def _rand_calc(vals):
        return randint(0, len(tab))
    def _best_k(vals, k):
        return vals[k]*len(vals) - sum(vals)

    class Callback(object):
        def __init__(self, val, op):
            assert callable(op)
            self._val = val
            self._op = op
        
        @property
        def val(self):
            return self._val
        
        def __call__(self, x):
            self._val = self._op(self._val, x)
    
    if sample_scheme == 'max_var':
        samp = _var_calc
    elif sample_scheme == 'random':
        samp = _rand_calc
    elif sample_scheme == 'distinct':
        ret = {}
        rng = range(num_samples) if distinct_on is None else distinct_on
        for k in rng:
            samp = lambda vals: _best_k(vals, k%len(vals))
            tmp = _build_tmp(samp)
            ids = np.array(list(tmp.keys()))
            ids.sort()
            for ki in range(len(ids)-1, -1, -1):
                if not tmp[ids[ki]][0] in ret.keys():
                    ret[tmp[ids[ki]][0]] = tmp[ids[ki]][1]
                    break
        return ret
    
    
    tmp = _build_tmp(samp)
    ids = np.array(list(tmp.keys()))
    ids.sort()
    topk = ids[-num_samples:]
    ret = {}
    for k in topk:
        ret[tmp[k][0]] = tmp[k][1]
    return ret


def _paint_format_performance(vlen):
    header, tab = process_tab_text("./workspace/samples_format_benchmark.md", 'all')

    # reddit_tab = tab_select(tab, (0, "Reddit"), (3, '8')) 
    Graph = ["PPI", "Flickr", "Reddit", "Yelp", "AmazonProducts"]
    Samplers = [ "ClusterGCN_sampler", "FastGCN_sampler", "Sage_sampler", 
                "VRGCN_sampler", "GraphSAINT_V_sampler", "GraphSAINT_E_sampler",
                "GraphSAINT_RW_sampler"]
    raw_split = []
    ax_title = []
    for ds in Graph:
        ds_tab = tab_select(tab, (0, ds), (3, str(8)))
        for alg in Samplers:
            ds_alg_tab = tab_select(ds_tab, (1, alg))
            if len(ds_alg_tab) < 5:
                continue
            
            if vlen != 8:
                prefered_keys = []
                for _k in sample_tab(ds_alg_tab, 10, sample_scheme='distinct').keys():
                    good = True
                    for vl in [16, 32, 64, 128, 256]:
                        new_k = _k[:3] + (str(vl),)
                        if not new_k in tab.keys():
                            good = False
                            break
                    if good:
                        prefered_keys.append(_k[:3] + (str(vlen),))
                    if len(prefered_keys) == 5:
                        break
                    
                higher = {}
                for _k in prefered_keys:
                    new_k = _k[:3] + (str(vlen),)
                    if new_k in tab.keys():
                        higher[new_k] = tab[new_k]
                    if len(higher) == 5:
                        break
                raw_split.append(higher)
            else:
                raw_split.append(sample_tab(ds_alg_tab, 5, sample_scheme='distinct'))
            ax_title.append(f"{ds}-{alg[:alg.rfind('_')]}")
        # raw_split.append(sample_tab(raw_tab, 5, sample_scheme='distinct'))
    
    # ==non_transpose
    ax_tabs = raw_split
    t_header = [header]*len(raw_split)

    # ==transpose
    # ax_tabs = []
    # t_header = []
    # for t in raw_split:
    #     t_h, t_t = tab_transpose(header, t, 1)
    #     t_header.append([x[:x.rfind("_")] for x in t_h])
    #     ax_tabs.append(t_t)
        
    paint_for_dataset_format(t_header, ax_tabs, "./workspace", 
                             plot_grid=(len(Graph), len(Samplers)),
                             suptitle="formats performance diff", 
                             ax_title=ax_title,
                             file_name=f"formats-{vlen}")

def _paint_schedule_performance(vlen):
    header, tab = process_tab_text("./workspace/sampler_schedule_diff.md", 'all', truncate_zero=True)

    Graph = ["PPI", "Flickr", "Reddit", "Yelp", "AmazonProducts"]
    Samplers = [ "ClusterGCN_sampler", "FastGCN_sampler", "Sage_sampler", "VRGCN_sampler",
        "GraphSAINT_V_sampler", "GraphSAINT_E_sampler", "GraphSAINT_RW_sampler"]
    raw_split = []
    ax_title = []
    for ds in Graph:
        ds_tab = tab_select(tab, (0, ds), (3, str(8)))
        for alg in Samplers:
            ds_alg_tab = tab_select(ds_tab, (1, alg))
            if len(ds_alg_tab) < 5:
                continue
            
            if vlen != 8:
                prefered_keys = list(sample_tab(ds_alg_tab, len(header)-1, sample_scheme='distinct', 
                                                distinct_on=[1,2,3,4,5,6]).keys())
                higher = {}
                for _k in prefered_keys:
                    new_k = _k[:3] + (str(vlen),)
                    higher[new_k] = tab[new_k]
                raw_split.append(higher)
            else:
                raw_split.append(sample_tab(ds_alg_tab, len(header)-1, sample_scheme='distinct',
                                            distinct_on=[1,2,3,4,5,6]))
            ax_title.append(f"{ds}-{alg[:alg.rfind('_')]}")
        # raw_split.append(sample_tab(raw_tab, 5, sample_scheme='distinct'))
    
    # ==non_transpose
    ax_tabs = raw_split
    t_header = [ [x[-2] if x[-1]==']' else x for x in header] ]*len(raw_split)

    # ==transpose
    # ax_tabs = []
    # t_header = []
    # for t in raw_split:
    #     t_h, t_t = tab_transpose(header, t, 1)
    #     t_header.append([x[:x.rfind("_")] for x in t_h])
    #     ax_tabs.append(t_t)
        
    paint_for_dataset_format(t_header, ax_tabs, "./workspace", 
                             plot_grid=(len(Graph), len(Samplers)),
                             suptitle="schedule performance diff", 
                             ax_title=ax_title,
                             file_name=f"scheds-{vlen}")


def dataset_profiler(dss):
    for i, ds in enumerate(dss):
        sweep_dataset(ds, i==0)

if __name__ == '__main__':
    # i = 8
    # while i < 512:
    #     _paint_format_performance(i)
    #     i *= 2
    # dss = ["Reddit", "AmazonProducts", "Flickr", "FacebookPagePage", ]
    dss = ["AIFB", "MUTAG", "BGS", "AM"]
    dataset_profiler(dss)

    