#!/usr/bin/env python3

"""test linear model with relative error
esp. symmetric percentage error
"""

import numpy as np
import pandas as pd

u = lambda x: x+ 0.000001

#errors

errors = {
'log error': lambda pred, true: np.log(pred+LOWER)-np.log(true+LOWER),
 'percentage error': lambda pred, true: (pred-true)/u(true), 
'symmetric percentage error': lambda pred, true: (pred-true)/u((pred+true)/2)}

e = errors['symmetric percentage error']

from keras.models import Sequential
from keras.layers import Dense, Activation

class ANN(Sequential):
    def init(self):
        self.add(Dense(3))
        self.add(Activation('relu'))
        self.compile(loss=lambda pred, true: abs(e(pred, true)), optimizer='adam')

    def fit(self, X, Y, *args, **kwargs):
        super().fit(X, Y, verbose=0, epochs=2000, *args, **kwargs)
        return self


class LogANN(Sequential):
    def init(self):
        self.add(Dense(3))
        self.compile(loss=lambda pred, true: abs(pred -true), optimizer='adam')

    def fit(self, X, Y, *args, **kwargs):
        super().fit(np.log(X), np.log(Y), verbose=0, epochs=2000)
        return self

    def predict(self, X):
        return np.exp(super().predict(np.log(X)))


if __name__ == '__main__':
    # data

    FILENAME = 'data-diff003.csv'

    data = pd.read_csv(FILENAME, index_col=0)
    X_keys = ['RGB_R', 'RGB_G', 'RGB_B']
    E_keys = ['e500(相对差)', 'e510(相对差)', 'e520(相对差)']
    B_keys = ['b_500', 'b_510', 'b_520']
    S_keys = ['s_500', 's_510', 's_520']


    Y = data[B_keys]
    X = data[S_keys]

    # models
    from utils import *
    from sklearn.linear_model import *
    from sklearn.model_selection import *
    class Max0LinearRegression(Max0Mixin, LinearRegression):
        pass


    from relinear import *
    from mars import *
    import mo

    models={
        'SMPE-Linear': SMPE1Regressor(),
        'Linear': Max0LinearRegression(),
        # 'Ridge': maxo(Ridge)(),
        'Bayesian': maxo(BayesianRidge)(),
        'Huber': maxo(HuberRegressor)(),
        'Mars': maxo(Mars)(),
        'ANN': ANN(),

        'Log-SMPE-Linear': SMPE1LogRegressor(),
        'Log-Linear': log(LinearRegression)(),
        # 'Log-Ridge': log(Ridge)(),
        'Log-Bayesian': log(BayesianRidge)(),
        'Log-Huber': log(HuberRegressor)(),
        'Log-Mars': log(Mars)(),
        'Log-ANN': LogANN(),
    }
    print(f'''total number of data: {len(Y)}
    size of test data: 0.2
    ''')

    def _predict(model, X):
        return np.exp(model.predict(np.log(X)))

    # test
    import time
    results = []
    train_results = []
    times = []
    n_tests = 2
    for i in range(n_tests):
        _results = []
        _train_results = []
        _times = []
        X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
        for mn, model in models.items():
            if mn.endswith('ANN'): model.init()

            time1 = time.perf_counter()
            try:
                model.fit(X_train, Y_train)
            except:
                model = mo.MORegression(model=model)
                model.fit(X_train, Y_train)
            time2 = time.perf_counter()
            _times.append(time2 - time1)
            Y_pred = model.predict(X_test)
            E = np.mean(np.mean(np.abs(e(Y_pred, Y_test)), axis=0))
            Y_fit = model.predict(X_train)
            Ef = np.mean(np.mean(np.abs(e(Y_fit, Y_train)), axis=0))
            _results.append(E)
            _train_results.append(Ef)

        results.append(_results)
        train_results.append(_train_results)
        times.append(_times)


    result = {}
    result['测试误差'] = np.median(results, axis=0)
    result['训练误差'] = np.median(train_results, axis=0)
    result['训练耗时'] = np.median(times, axis=0)

    result= pd.DataFrame(result, index=pd.Index(models.keys()))

    result = ftable(result)
    print(result)
    # result.to_csv('result-re.csv')
