import copy
import logging
import numpy as np
from dataset import DataSet
import helpers

log = logging.getLogger('golem.cv')

def rep_cv(d, node, reps=5, k=10):
  """
  Repeated cross-validation of shuffled stratified subsets of d. 
  Returns a list with the output of node on the test sets.
  """
  result = []
  for ri in range(reps):
    d = d.shuffled() 
    result.extend(cross_validate(folds(subsets(d, strat_splits(d.Y, k))), node))
  return result


def cross_validate(folds, node):
  """
  Cross-validate on subsets using node. Returns a list with the output of node
  on the test sets. The list folds contains folds in the form of (tr, te),
  where tr and te are DataSets used for training and testing respectively.

  Cross validation can give an estimate of how well the analysis will work
  in practice [1].

  This methods has a few extra safety measures: 1) a fresh copy of node is used
  for training with each fold, and 2) the labels of the test sets are removed
  to prevent accidental cheating.

  [1] http://en.wikipedia.org/wiki/Cross-validation_(statistics)
  """
  result = []
  for (tr, te) in folds:
    # fresh copy, no cheating by remembering
    inode = copy.deepcopy(node)

    te_stripped = DataSet(Y=np.nan * te.Y, default=te)
    pred = inode.train_apply(tr, te_stripped)
    pred = DataSet(Y=te.Y, default=pred) # reattach labels

    result.append(pred)
  return result


def subsets(d, folds):
  '''
  Get test subsets of d indicated by the folds array. Returns a list with
  DataSets.
  '''
  folds = np.atleast_1d(folds)
  assert d.ninstances == folds.size
  return [d[folds==fi] for fi in np.unique(folds)]


def folds(subsets):
  """
  Generate training and test sets from a list with DataSets. The training set
  is created from the subsets after isolating a test set.
  Returns a generator with tuples (tr, te)
  """
  k = len(subsets)
  for ki in range(k):
    te = subsets[ki]
    tr = (reduce(lambda a, b: a + b, 
      [subsets[i] for i in range(len(subsets)) if i != ki]))
    log.info('Generating training and test set %d of %d' % (ki + 1, k))
    yield (tr, te)
  

def strat_splits(Y, k=10):
  """
  Create indices for k stratified subsets for labels Y.

  >>> Y = helpers.to_one_of_n([0, 0, 0, 1, 1, 1, 2, 2, 2, 0, 0, 0])
  >>> strat_splits(Y, 4)
  array([0, 1, 2, 0, 1, 2, 0, 1, 2, 3, 0, 1])
  """
  Y = helpers.hard_max(Y)
  assert k <= max(np.sum(Y, axis=1)), 'Too many folds.'
  subsets = np.ones(Y.shape[1]) * -1

  # Loop over classes
  for ci in range(Y.shape[0]):
    subsets[Y[ci]==1] = np.arange(np.sum(Y[ci])) % k
  return subsets.astype(int)


def seq_splits(n, k=10):
  """
  Generate indices for k sequential subsets with for n instances.
  
  For data that is time-dependent, the cross-validation results on these folds
  will be more representative than strat_splits().

  >>> seq_splits(10, 4)
  array([0, 0, 0, 1, 1, 2, 2, 2, 3, 3])
  """
  assert k <= n, 'Too many folds'
  return np.floor(np.linspace(0, k, n, endpoint=False)).astype(int)
