#coding: utf8
import logging, warnings
import numpy as np
import cvxopt.base as cvx
import cvxopt.solvers

from basenode import BaseNode
from ..kernel import build_kernel_matrix, kernel_cv_fold
from ..helpers import hard_max
from ..dataset import DataSet
from ..cv import strat_splits

ALPHA_RTOL = 1e-5

def cvxopt_dsvm(K, labels, c, D, implicit_bias):
  log = logging.getLogger('golem.nodes.svm.cvxopt_dsvm')
  K = np.atleast_2d(K)
  assert K.shape[0] == K.shape[1]
  m = K.shape[0]
  c = float(c)/m # Note that we normalize c by the number of examples!
  D = np.atleast_2d(D)
  if np.any(np.sum(D, axis=0) > 1):
    # D-SVM does not degenerate into a c-SVM.
    assert implicit_bias == True, \
      'Use a implicit, regularized bias for the D-SVM.'

  labels = np.atleast_2d(labels).T # column vector
  assert np.all(np.unique(labels) == [-1, 1])
  
  log.debug('Creating QP-target')
  # (1) min W(a) = (1/2) * a^T * P * a - \vec{1}^T a
  label_matrix = np.dot(labels, labels.T)
  P = cvx.matrix(K * label_matrix)
  q = cvx.matrix([-1. for i in range(m)])

  log.debug('Creating QP-constraints')
  # (2) 0 < all alphas < c/m, using Ga <= h
  # is solved in two parts, first 0 < alphas, then alphas < c
  # (3) sum(a_i * y_i) = 0, using Aa = b (optional)
  G1 = cvx.spmatrix(-1, range(m), range(m))
  G2 = cvx.matrix(D.T)
  G = cvx.sparse([G1, G2])
  h = cvx.matrix([0. for i in range(m)] + [c for i in range(G2.size[0])])
  A = cvx.matrix(labels.T)
  r = cvx.matrix(0.)

  log.debug('Solving QP')
  cvxopt.solvers.options['show_progress'] = False
  if implicit_bias:
    sol = cvxopt.solvers.qp(P, q, G, h)
  else:
    sol = cvxopt.solvers.qp(P, q, G, h, A, r)
  if sol['status'] != 'optimal':
    log.warning('QP solution status: ' + sol['status'])
  log.debug('solver.status = ' + sol['status'])

  # Extract alphas
  alphas = np.asarray(sol['x']) # column vector!
  alphas[alphas < np.max(alphas) * ALPHA_RTOL] = 0
  
  if implicit_bias:
    return alphas.flatten(), 0.

  # Calculate explicit bias using support vectors with slack \xi_i=0. That is,
  # where 0 < alpha[i] < c. This does not work for the D-SVM due to the
  # distribution of the errors over all support vectors --- use an implicit
  # bias (inhomogeneous kernel or constant feature) instead.
  bias = labels - np.dot(K, labels * alphas)

  freesv = np.flatnonzero(
    np.logical_and(0 < alphas.T, # support vector, and
    alphas.T / c < 1 - ALPHA_RTOL)) # not within margin
  log.debug('Free SVs: %s' % freesv)

  if freesv.size > 0: 
    log.debug('Bias for freesv.T: %s' % bias[freesv].T)
    bias = np.mean(bias[freesv])
  else:
    # The optimal b is an interval. However, for some b's SVs might fall
    # outside the margin again. To prevent this, we pick the points for each
    # class that are closest to the margin, and pretend these points are in
    # fact on the margin. A similar trick is used in LIBSVM.
    log.warning('All support vectors are within the margin.')
    bias = np.mean([np.max(bias[labels==-1]), np.min(bias[labels==1])])

  return alphas.flatten(), bias

class SVM(BaseNode):
  def __init__(self, c=np.logspace(-3, 5, 10), implicit_bias=False, Dfun=None, 
    fold_fun=lambda d: strat_splits(d.Y, 5), kernel=None, **kernel_params):

    BaseNode.__init__(self)
    if 'C' in kernel_params.keys():
      warnings.warn(
        "The SVM's C-parameter has been replaced with a lowercase c.",
        DeprecationWarning)
      c = np.atleast_1d(kernel_params['C'])

    self.c, self.c_star = np.atleast_1d(c), np.nan
    self.implicit_bias = implicit_bias
    self.Dfun = Dfun if Dfun else lambda d: np.eye(d.ninstances)
    self.fold_fun = fold_fun if fold_fun else lambda d: strat_splits(d.Y, 5)
    self.kernel, self.kernel_params = kernel, kernel_params
  
  def K(self, d, d_test=None):
    if not d_test:
      d_test = d
    return build_kernel_matrix(d.X, d_test.X, kernel=self.kernel, 
      **self.kernel_params)

  def labels(self, d):
    return np.where(d.Y[1] > d.Y[0], 1, -1).astype(float)

  @classmethod
  def svm_crossval(cls, K, D, labels, c, implicit_bias, folds):
    ''' Low-level SVM cross-validation procedure '''
    preds = np.zeros(K.shape[0]) * np.nan
    for fi in np.unique(folds):
      # get kernels and labels for fold
      K_tr, K_te = kernel_cv_fold(K, folds, fi)
      D_tr = D[folds!=fi,:]
      tr_lab = labels[folds!=fi]

      # train SVM
      alphas, b = cvxopt_dsvm(K_tr, tr_lab, c, D_tr, implicit_bias)
      preds[folds==fi] = np.dot(alphas * tr_lab, K_te) + b
    return preds

  @classmethod
  def tune(cls, K, D, labels, cs, implicit_bias, folds):
    ''' Find c-parameter by cross-validation '''
    accs = [np.mean(np.sign(
      SVM.svm_crossval(K, D, labels, c, implicit_bias, folds))==labels) 
      for c in cs]
    assert np.all(np.isfinite(accs))
    return cs[np.argmax(accs)]

  def train_(self, d):
    assert d.nclasses == 2
    self.log.debug('Calculating kernel matrix, D and labels')
    K, D, labels = self.K(d), self.Dfun(d), self.labels(d)

    # find c-parameter
    if self.c.size > 1:
      self.c_star = SVM.tune(K, D, labels, self.c, self.implicit_bias, 
        self.fold_fun(d))
      self.log.info('Selected c=%.3g' % self.c_star)
    else:
      self.c_star = self.c[0]

    # train final SVM
    alphas, b = cvxopt_dsvm(K, labels, self.c_star, D, self.implicit_bias)

    # sparsify
    svi = alphas != 0
    svs, alphas, y = d[svi], alphas[svi], labels[svi]
    self.log.info('Found %d SVs (%.2f%%)' % (np.sum(svi), 100. * np.mean(svi)))

    # store model
    self.alphas, self.svs, self.y, self.b = alphas, svs, y, b

  def apply_(self, d):
    K = self.K(self.svs, d)
    preds = np.atleast_2d(np.dot(self.alphas * self.y, K) + self.b)
    return DataSet(X=np.vstack([-preds, preds]), default=d)

  def __str__(self):
    return 'SVM (c=%g, kernel=%s, params=%s)' % (self.c_star, self.kernel, 
      str(self.kernel_params))
