#!/usr/bin/python -O


import collections
import itertools
import math
import os
import random
#import scipy
import sys

sys.path.append('src/python/modules')

#from scipy.stats import normaltest
#from scipy.stats import f_oneway as anova

import utils
import wp_actives as wpa

SAMPLE_SIZE = 1000


def main(input_dir):

    # format: g_fn => g_key => list of sampled users
    samples = collections.defaultdict(lambda: collections.defaultdict(list))

    # format: g_fn => g_key => sample_count
    counts = collections.defaultdict(lambda: collections.defaultdict(float))

    draw_samples(wpa.UserIterator(input_dir), samples, counts)

    summarize_samples(samples, counts)
    quantity_variation(samples, counts)
    proportion_variation(samples, counts)

def draw_samples(users, samples, counts):
    n = 0
    n_iters = 0
    for is_current, udata in users:
        n_iters += 1
        for g_fn in GROUPINGS:
            # group the user data according to the grouping
            ug_fn = lambda record: g_fn(is_current, record)
            for (g, ug_data) in itertools.groupby(udata, ug_fn):
                if g == None:
                    continue
                ug_data = list(ug_data) # save it
                f = 1.0 * len(ug_data) / len(udata) # fraction of activity in this group
                counts[g_fn][g] += f
                s = samples[g_fn][g]
                if ((len(s) < SAMPLE_SIZE)
                or  (random.random() < 1.0 * SAMPLE_SIZE / counts[g_fn][g])):
                    n += len(ug_data)
                    s.append((f, is_current, ug_data))
                    if len(s) > SAMPLE_SIZE:
                        i = random.randint(0, len(s) - 1)
                        n -= len(s[i][-1])
                        del(s[i])
        if n_iters % 10000 == 0:
            utils.warn('number of sampled records: %d' % n)


def summarize_samples(samples, counts):
    for (g_fn, g_fn_sample) in samples.items():
        print 'sample for grouping function %s:' % g_fn.__name__
        for (g, g_sample) in g_fn_sample.items():
            print '\tgroup=%s, sampled %d of %d' % (g, len(g_sample), counts[g_fn][g])
        print '\n'

def quantity_variation(samples, counts):
    for (m_name, m_fn) in QUANTITY_METRICS:
        print 'calculating values for metric', m_name 

        for (g_fn, g_fn_sample) in samples.items():
            sum_mae = 0.0
            all_values = []
            all_errors = []
            for (g, g_sample) in g_fn_sample.items():
                fracs = [f for (f, is_current, udata) in g_sample]
                values = [m_fn(is_current, udata) for (f, is_current, udata) in g_sample]
                m = utils.mean(values)
                abs_errors = [abs(x - m) for x in values]
                mae = sum([f*e for (f, e) in zip(fracs, abs_errors)]) / sum(fracs)
                sum_mae += mae * counts[g_fn][g]
                all_values.append(values)
                all_errors.extend(abs_errors)
            #normality = -1.0 if (len(all_errors) < 8) else normaltest(all_errors)
            #aov = anova(*all_values)
            normality = 0.0
            aov = 0.0
            n = len(all_errors)
            sum_mae /= sum(counts[g_fn].values())
            print ('grouping function %s: normality=%s mae=%.4f anova=%s (n=%d)' %
                    (g_fn.__name__, normality, sum_mae, aov, n))

def proportion_variation(samples, counts):
    for (m_name, metric, dist_fn) in PROPORTION_METRICS:
        print 'calculating values for metric', m_name 

        for (g_fn, g_fn_sample) in samples.items():
            sum_distances = 0.0
            all_distances = []
            for (g, g_sample) in g_fn_sample.items():
                fracs = [f for (f, is_current, udata) in g_sample]
                uprops = [udata_to_prop(ud, metric) for (f, is_current, ud) in g_sample]
                mprop = mean_prop(uprops, metric)
                distances = [dist_fn(mprop, up) for up in uprops]
                mean_distance = sum([f*d for (f, d) in zip(fracs, distances)]) / sum(fracs)
                sum_distances += mean_distance * counts[g_fn][g]
                all_distances.extend(distances)
            #normality = -1.0 if (len(all_distances) < 8) else normaltest(all_distances)
            normality = 0.0
            n = len(all_distances)
            mean_dist = sum_distances / sum(counts[g_fn].values())
            print ('grouping function %s: normality=%s dist=%.4f (n=%d)' %
                    (g_fn.__name__, normality, mean_dist, n))

def udata_to_prop(udata, metric):
    prop = collections.defaultdict(float)
    for record in udata:
        prop[record['worktype']] += record.get_metric(metric)
    total = sum(prop.values())
    for (k, v) in prop.items():
        prop[k] = 0.0 if total == 0 else 1.0 * v / total
    return prop

def mean_prop(uprops, metric):
    prop = collections.defaultdict(float)
    for up in uprops:
        for (k, v) in up.items():
            prop[k] += v
    total = sum(prop.values())
    for (k, v) in prop.items():
        if total == 0:
            prop[k] = 0.0
        else:
            prop[k] = 1.0 * v / total
    return prop


def g_all(is_current, record):
    return 1

def g_cohort(is_current, record):
    return record['cohort']

def g_offset(is_current, record):
    return record['offset']

def g_lifespan(is_current, record):
    return (None if is_current else record['lifespan'])

def g_cohort_lifespan(is_current, record):
    return (is_current, record['cohort'], record['lifespan'])

def g_cohort_offset(is_current, record):
    return (record['cohort'], record['offset'])

def g_lifespan_offset(is_current, record):
    return (None if is_current else (record['cohort'], record['offset']))

def g_cohort_offset_lifespan(is_current, record):
    return (is_current, record['cohort'], record['lifespan'], record['offset'])

def euclidean_dist(p1, p2):
    all_keys = set(p1.keys())
    all_keys.update(p2)
    dist = 0.0
    for k in all_keys:
        dist += (p1.get(k, 0.0) - p2.get(k, 0.0)) ** 2.0
    return dist

def cosine_dist(p1, p2):
    len1 = sum([x*x for x in p1.values()]) ** 0.5
    len2 = sum([x*x for x in p2.values()]) ** 0.5
    if not len1 or not len2:
        return 0.0
    all_keys = set(p1.keys())
    all_keys.update(p2)
    sim = 0.0
    for k in all_keys:
        sim += p1.get(k, 0.0) * p2.get(k, 0.0)
    return sim / (len1 * len2)

def kl_dist(mprop, uprop):
    x = 0.0
    for k in uprop:
        if uprop[k] > 0:
            x += uprop[k] * math.log(uprop[k] / max(0.000000001, mprop[k]))
    return x

GROUPINGS = [
    g_all,
    g_cohort,
    g_lifespan,
    g_offset,
    g_cohort_offset,
    g_cohort_lifespan,
    g_lifespan_offset,
    g_cohort_offset_lifespan,
]

QUANTITY_METRICS = [
    ('hours', lambda is_current, udata: sum([r.get_metric('hours') for r in udata])),
    ('edits', lambda is_current, udata: sum([r.get_metric('edits') for r in udata])),
    ('bytes', lambda is_current, udata: sum([r.get_metric('bytes') for r in udata])),
    ('log-hours', lambda is_current, udata: math.log(1.0 + sum([r.get_metric('hours') for r in udata]))),
    ('log-edits', lambda is_current, udata: math.log(1.0 + sum([r.get_metric('edits') for r in udata]))),
    ('log-bytes', lambda is_current, udata: math.log(1.0 + sum([r.get_metric('bytes') for r in udata]))),
]

PROPORTION_METRICS = [
    ( 'hours-kl', 'hours', kl_dist ),
    ( 'hours-cosine', 'hours', cosine_dist ),
    ( 'hours-euclid', 'hours', euclidean_dist ),
    ( 'edits-euclid', 'edits', euclidean_dist ),
    ( 'bytes-euclid', 'bytes', euclidean_dist ),
    ( 'hours-cosine', 'hours', cosine_dist ),
    ( 'edits-cosine', 'edits', cosine_dist ),
    ( 'bytes-cosine', 'bytes', cosine_dist ),
    ( 'hours-kl', 'hours', kl_dist ),
    ( 'edits-kl', 'edits', kl_dist ),
    ( 'bytes-kl', 'bytes', kl_dist ),
]


if __name__ == '__main__':
    main(sys.argv[1])
