import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from itertools import product


from sklearn.linear_model import LogisticRegression
import time
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import LogisticRegressionCV
from sklearn.model_selection import GridSearchCV
from sklearn.cross_validation import KFold

class MeanEncoder:
    def __init__(self, categorical_features, n_splits=5, target_type='classification', prior_weight_func=None):
        """
        :param categorical_features: list of str, the name of the categorical columns to encode

        :param n_splits: the number of splits used in mean encoding

        :param target_type: str, 'regression' or 'classification'

        :param prior_weight_func:
        a function that takes in the number of observations, and outputs prior weight
        when a dict is passed, the default exponential decay function will be used:
        k: the number of observations needed for the posterior to be weighted equally as the prior
        f: larger f --> smaller slope
        """

        self.categorical_features = categorical_features
        self.n_splits = n_splits
        self.learned_stats = {}

        if target_type == 'classification':
            self.target_type = target_type
            self.target_values = []
        else:
            self.target_type = 'regression'
            self.target_values = None

        if isinstance(prior_weight_func, dict):
            self.prior_weight_func = eval('lambda x: 1 / (1 + np.exp((x - k) / f))', dict(prior_weight_func, np=np))
        elif callable(prior_weight_func):
            self.prior_weight_func = prior_weight_func
        else:
            self.prior_weight_func = lambda x: 1 / (1 + np.exp((x - 2) / 1))

    @staticmethod
    def mean_encode_subroutine(X_train, y_train, X_test, variable, target, prior_weight_func):
        X_train = X_train[[variable]].copy()
        X_test = X_test[[variable]].copy()

        if target is not None:
            nf_name = '{}_pred_{}'.format(variable, target)
            X_train['pred_temp'] = (y_train == target).astype(int)  # classification
        else:
            nf_name = '{}_pred'.format(variable)
            X_train['pred_temp'] = y_train  # regression
        prior = X_train['pred_temp'].mean()

        col_avg_y = X_train.groupby(by=variable, axis=0)['pred_temp'].agg({'mean': 'mean', 'beta': 'size'})
        col_avg_y['beta'] = prior_weight_func(col_avg_y['beta'])
        col_avg_y[nf_name] = col_avg_y['beta'] * prior + (1 - col_avg_y['beta']) * col_avg_y['mean']
        col_avg_y.drop(['beta', 'mean'], axis=1, inplace=True)

        nf_train = X_train.join(col_avg_y, on=variable)[nf_name].values
        nf_test = X_test.join(col_avg_y, on=variable).fillna(prior, inplace=False)[nf_name].values

        return nf_train, nf_test, prior, col_avg_y

    def fit_transform(self, X, y):
        """
        :param X: pandas DataFrame, n_samples * n_features
        :param y: pandas Series or numpy array, n_samples
        :return X_new: the transformed pandas DataFrame containing mean-encoded categorical features
        """
        X_new = X.copy()
        if self.target_type == 'classification':
            skf = StratifiedKFold(self.n_splits)
        else:
            skf = KFold(self.n_splits)

        if self.target_type == 'classification':
            self.target_values = sorted(set(y))
            self.learned_stats = {'{}_pred_{}'.format(variable, target): [] for variable, target in
                                  product(self.categorical_features, self.target_values)}
            for variable, target in product(self.categorical_features, self.target_values):
                nf_name = '{}_pred_{}'.format(variable, target)
                X_new.loc[:, nf_name] = np.nan
                for large_ind, small_ind in skf.split(y, y):
                    nf_large, nf_small, prior, col_avg_y = MeanEncoder.mean_encode_subroutine(
                        X_new.iloc[large_ind], y.iloc[large_ind], X_new.iloc[small_ind], variable, target, self.prior_weight_func)
                    X_new.iloc[small_ind, -1] = nf_small
                    self.learned_stats[nf_name].append((prior, col_avg_y))
        else:
            self.learned_stats = {'{}_pred'.format(variable): [] for variable in self.categorical_features}
            for variable in self.categorical_features:
                nf_name = '{}_pred'.format(variable)
                X_new.loc[:, nf_name] = np.nan
                for large_ind, small_ind in skf.split(y, y):
                    nf_large, nf_small, prior, col_avg_y = MeanEncoder.mean_encode_subroutine(
                        X_new.iloc[large_ind], y.iloc[large_ind], X_new.iloc[small_ind], variable, None, self.prior_weight_func)
                    X_new.iloc[small_ind, -1] = nf_small
                    self.learned_stats[nf_name].append((prior, col_avg_y))
        return X_new

    def transform(self, X):
        """
        :param X: pandas DataFrame, n_samples * n_features
        :return X_new: the transformed pandas DataFrame containing mean-encoded categorical features
        """
        X_new = X.copy()

        if self.target_type == 'classification':
            for variable, target in product(self.categorical_features, self.target_values):
                nf_name = '{}_pred_{}'.format(variable, target)
                X_new[nf_name] = 0
                for prior, col_avg_y in self.learned_stats[nf_name]:
                    X_new[nf_name] += X_new[[variable]].join(col_avg_y, on=variable).fillna(prior, inplace=False)[
                        nf_name]
                X_new[nf_name] /= self.n_splits
        else:
            for variable in self.categorical_features:
                nf_name = '{}_pred'.format(variable)
                X_new[nf_name] = 0
                for prior, col_avg_y in self.learned_stats[nf_name]:
                    X_new[nf_name] += X_new[[variable]].join(col_avg_y, on=variable).fillna(prior, inplace=False)[
                        nf_name]
                X_new[nf_name] /= self.n_splits

        return X_new

def logloss(act, pred):
    
    '''
    比赛使用logloss作为evaluation
    '''
    epsilon = 1e-15
    pred = np.maximum(epsilon, pred)
    pred = np.minimum(1-epsilon, pred)
    ll = sum(act*np.log(pred) + np.subtract(1,act)*np.log(np.subtract(1,pred)))
    ll = ll * -1.0/len(act)
    return ll

feature_list = ['C1', 'banner_pos', 'site_id', 'site_domain',
       'site_category', 'app_id', 'app_domain', 'app_category', 'device_id',
       'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14',
       'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21']

use_cols = ['click','C1', 'banner_pos', 'site_id', 'site_domain',
       'site_category', 'app_id', 'app_domain', 'app_category', 'device_id',
       'device_ip', 'device_model', 'device_type', 'device_conn_type', 'C14',
       'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21']

def toDataFrame(data):
    return pd.DataFrame(data)

from sklearn.model_selection import train_test_split
#先只读取需要编码处理的列吧
X_train = pd.read_csv('data/train_1.csv',usecols=use_cols)
y_train = X_train.click.values
print("y_train:",y_train[:100])
X_train_part, X_val, y_train_part, y_val = train_test_split(X_train, y_train, train_size = 0.33,random_state = 0)

X_train_part = toDataFrame(X_train_part)
X_train_part.drop(['click'],axis=1,inplace=True)
y_train_part = toDataFrame(y_train_part)
X_val = toDataFrame(X_val)
X_val.drop(['click'],axis=1,inplace=True)
y_val = toDataFrame(y_val)
print("X_train_part.shape",X_train_part.shape)
print("y_train_part.shape",y_train_part.shape)
print("X_val.shape",X_val.shape)
print("y_val.shape",y_val.shape)
print("Y val head:",y_val.head())

en_time = time.time()
print("{} start encoding".format(en_time))
mean_enc = MeanEncoder(categorical_features=feature_list,n_splits=3)

X_train_part = mean_enc.fit_transform(X_train_part,y_train_part)
X_val = mean_enc.transform(X_val)
fi_time = time.time()
print("{} finish encoding".format(fi_time))
print("encoding use time:{}".format(fi_time - en_time))
print('encoded x head:',X_train_part.head())
print('encoded x_val head:',X_val.head())

# Drop 原来的列
X_train_part.drop(feature_list, axis=1, inplace=True)
X_val.drop(feature_list, axis=1, inplace=True)
print('encoded x head drop origin:',X_train_part.head())


predict_time = time.time()
print("{} start fitting".format(predict_time))
lr = LogisticRegression(C = 0.001, penalty='l1', verbose=10000,n_jobs=-1)
lr.fit(X_train_part, y_train_part.values)
y_predict_prob = lr.predict_proba(X_val)

# Cs = [0.01,0.05]
#     # 大量样本（6W+）、高维度（93），L2正则 --> 缺省用lbfgs
#     # LogisticRegressionCV比GridSearchCV快
# lr = LogisticRegressionCV(Cs= Cs, cv = 3, scoring='neg_log_loss', 
#                                 penalty='l1',solver='liblinear',n_jobs=-1)
# lr.fit(X_train_part, y_train_part)
# print("LR scores:",lr.scores_)  

print("X_val.shape",y_val.shape)
print("y_val.shape",y_predict_prob.shape)



finish_predict_time = time.time()
print("{} start predicting".format(finish_predict_time))
print("prediction:",y_predict_prob)


loss = logloss(y_val.values, y_predict_prob)

print("logloss = ",loss)
    



