import random
from collections import Counter as Histogram
from aima.learning import learn_decision_tree, DataSet, test as predictor_test
from aima.utils import mean
    
from our_utils import max_k, random_split
import our_utils
from functools import partial
from itertools import product

N = 50
sample = None


class datasets:
    house_votes = {'name':"house-votes-84",
                   'values' : [('y', 'n', '?')] * 16 + [('republican', 'democrat')] }

    balance = {'name':"balance-scale.data" }
    crx = {'name':"crx.data" }

    @staticmethod
    def all():
        for c in [datasets.house_votes, datasets.crx, datasets.balance]:
            yield DataSet(**c)


def learn_tree_list(ds):
    'steps 1-2'
    return [learn_decision_tree(ds.fork(sample)) for _ in range(N)]


def classify_by_committee(committee):
    def predict(value):
        hist = Histogram(tree(value) for tree in committee)
        _, key = max((val, key) for key, val in hist.items())
        return key
    return predict


class Experiment:
    def __init__(self, ds, k):
        self.k = k
        train, self.validation = random_split(ds.examples)
        self.train = DataSet(examples=train, values=ds.values)

    'Evaluation algorithms for a single tree'

    def weight_all(self, tree):
        return predictor_test(tree, self.train, self.validation)

    def ratio_all(self, tree):
        return predictor_test(tree, self.train, self.validation) / float(tree.size)

    'Selection aglorithms'

    def uniform(self, trees):
        return list(random.sample(trees, self.k))

    def evaluate_all(self, trees):
        return max_k(trees, self.k, key=self.weight_all)
    
    def by_ratio_all(self, trees):
        return max_k(trees, self.k, key=self.ratio_all)


def cross_validation(selectors, dataset, t=10):
    def train_and_test(start, end):
        examples = dataset.examples
        try:
            dataset.examples = examples[:start] + examples[end:]
            tree_list = learn_tree_list(dataset)
            return [predictor_test(classify_by_committee(select(tree_list)),
                                   dataset, examples[start:end])
                    for select in selectors]
        finally:
            dataset.examples = examples
    
    n = len(dataset.examples)
    random.shuffle(dataset.examples)
    matrix = [train_and_test(i * (n / t), (i + 1) * (n / t)) for i in range(t)]
    inverse = list(zip(*matrix))
    return [round(mean(x), 4) for x in inverse]


def run_cross_validation(ds, k):
    data = Experiment(ds, k=k)
    res = cross_validation([data.uniform, data.evaluate_all, data.by_ratio_all], data.train)
    print str(res)[1:-1]


def run_experiment_k(ks):
    global sample
    sample = partial(our_utils.sample, alpha=0.7)
    for ds, k in product(datasets.all(), ks):
        print k, ",",
        run_cross_validation(ds, k)


def run_experiment_alpha(alphas):
    k = 5
    for ds, alpha in product(datasets.all(), alphas):
        print alpha, ',', 
        global sample
        sample = partial(our_utils.sample, alpha=alpha)
        run_cross_validation(ds, k)
