#!/usr/bin/env python3


"""Relative Error Linear Regression

Linear regression with relative error, esp. percentage error
solved by gradient descendent(GD).
"""

import pandas as pd
import numpy as np
from sklearn.linear_model import *


#errors

LOWER = 0.0001

def u(x):
    return (x-LOWER)*(LOWER<x) + LOWER

relative_errors = {
'absolute error': lambda pred, true: pred-true, 
'log error': lambda pred, true: np.log(pred+LOWER)-np.log(true+LOWER),
 'percentage error': lambda pred, true: (pred-true)/u(true), 
'symmetric percentage error': lambda pred, true: (pred-true)/u((pred+true)/2)}


# helper functions

def max0(x):
    return np.maximum(x, 0)

def add_constant(X):
    n = X.shape[1]
    if isinstance(X, pd.DataFrame):
        X['intercept'] = 1
        return X
    else:
        return np.insert(X, n, 1, axis=1)


class Max0Mixin:

    def predict(self, X):
        return max0(super().predict(X))

class LogMixin:
    """fit logX, logY, isntead of X, Y directly
    """
    def fit(self, X, Y):
        return super().fit(np.log(X), np.log(Y))

    def predict(self, X):
        return np.exp(super().predict(np.log(X)))


class Max0LinearRegression(Max0Mixin, LinearRegression):
    pass


class LogLinearRegression(LogMixin, LinearRegression):
    pass

def logarithm(cls):
    class cls_(cls):
        def fit(self, X, Y):
            return super().fit(np.log(X), np.log(Y))

        def predict(self, X):
            return np.exp(super().predict(np.log(X)))
    return cls_

def maxo(cls):
    class cls_(cls):
        def predict(self, X):
            return max0(super().predict(X))
    return cls_


class GDLinearRegression:
    # Linear Regression with GD
    # It is an abstract/mixin class;
    # You have to define `_gradient`, gradient of the loss function

    init_method = 'f'  # initalize the parameters before executing gd
    warm_start = True

    def fit(self, X, y):
        if y.ndim>1:
            raise ValueError('dim of y must ==1!')
        if not isinstance(X, np.ndarray):
            X = np.asarray(X)
        if not self.warm_start or not hasattr(self, 'coef_') or not hasattr(self, 'intercept_'):
            if self.init_method == 'f':
                super().fit(X, y)
            elif self.init_method == 'r':
                self.coef_, self.intercept_ = np.random.random(X.shape[1]), np.random.random()
            else:
                raise Exception('No such initialization method')
        self.partial_fit(X, y)
        return self

    def partial_fit(self, X, y):
        if y.ndim>1:
            raise ValueError('dim of y must ==1!')
        try:
            w, b = self.coef_, self.intercept_
        except:
            raise AttributeError('the model dose not have attributes coef_ or intercept_, might not be fitted.')
        wb = np.append(w, b)
        wb = adam(wb, self._gradient, X, y)
        self.coef_ = wb[:-1]
        self.intercept_ = wb[-1]
        return self

class SMPERegressor(GDLinearRegression, Max0LinearRegression):
    @staticmethod
    def _gradient(w, X, y):
        # gradient for samples logX, y
        X = add_constant(X)
        y_ = max0(np.dot(X, w))
        m = (y_+y)/2
        d = y_ - y
        return np.dot((y/m**3 * (y_>0) * (m>LOWER)+2/LOWER**2 * (y_>0) * (m<=LOWER)) * d, X)

class SMPE1Regressor(GDLinearRegression, Max0LinearRegression):
    @staticmethod
    def _gradient(w, X, y):
        # gradient for samples logX, y
        X = add_constant(X)
        y_ = max0(np.dot(X, w))
        m = (y_+y)/2
        d = np.sign(y_ - y) * y_
        return np.dot((y/m**2 * (m>LOWER)+2/LOWER * (m<=LOWER)) * d, X)

@logarithm
class SMPELogRegressor(GDLinearRegression, LinearRegression):

    @staticmethod
    def _gradient(w, X, y):
        # gradient for samples logX, y
        X = add_constant(X)
        y_ = np.exp(np.dot(X, w))
        y = np.exp(y)
        m = (y_ + y)/2
        d = (y_ - y) * y_
        return np.dot((y/m**3 * (m>LOWER)+2/LOWER**2 * (m<=LOWER)) * d, X)

@logarithm
class SMPE1LogRegressor(GDLinearRegression,LinearRegression):

    @staticmethod
    def _gradient(wb, X, y):
        # gradient for samples logX, y
        X = add_constant(X)
        y_ = np.exp(np.dot(X, wb))
        y = np.exp(y)
        m = (y_ + y)/2
        d = np.sign(y_ - y) * y_
        return np.dot((y/m**2 * (m>LOWER)+2/LOWER * (m<=LOWER)) * d, X)


# GD algorithms

EPSILON = 1e-8
def adam(w, gradient, X, y, learning_rate=0.001, beta=0.9, gamma=0.9, tol=1e-8, batch=16, max_iter=20000):
    # adam for gd
    N = X.shape[0]
    v = 0
    m = 0
    for _ in range(max_iter//batch):
        B = N // batch
        for k in range(B+1):
            if k == B:
                Xbatch, ybatch = X[k*batch:], y[k*batch:]
            else:
                Xbatch, ybatch = X[k*batch:(k+1)*batch], y[k*batch:(k+1)*batch]
            g = gradient(w, Xbatch, ybatch)
            m = beta * m + (1-beta) / B * g
            v = gamma * v + (1-gamma) / B * g**2
            Dw = learning_rate / (np.sqrt(v) + EPSILON) * m
            if np.all(v<tol):
                break
            else:
                w -= Dw
    return w

def mgd(w, gradient, X, y, learning_rate=0.002, gamma=0.9, tol=1e-8, batch=16, max_iter=5000):
    # gd with momentum
    N = X.shape[0]
    v = 0
    for _ in range(max_iter//batch):
        B = N // batch
        for k in range(B+1):
            if k == B:
                Xbatch, ybatch = X[k*batch:], y[k*batch:]
            else:
                Xbatch, ybatch = X[k*batch:(k+1)*batch], y[k*batch:(k+1)*batch]
            g = gradient(w, Xbatch, ybatch)
            v = gamma * v + learning_rate / B * g
            if np.all(v<tol):
                break
            else:
                w -= v
    return w
