from sklearn.cluster import KMeans
from gensim.models import Word2Vec
from utils import BayesianSmoothing

def gen_onehot_feat(df,column_name):
    feature_df=pd.get_dummies(df[column_name], prefix=column_name)
    new_df = pd.concat([df.drop([column_name], axis=1),feature_df], axis=1)
    return new_df

def gen_label_encode_feat(df, column_name):
    lbl = preprocessing.LabelEncoder()
    lbl.fit(list(df[column_name].values))
    df[column_name] = lbl.transform(list(df[column_name].values))
    return df

# merge stat feature byself
def merge_count(df,columns,value,cname):
    add = pd.DataFrame(df.groupby(columns)[value].count()).reset_index()
    add.columns=columns+[cname]
    df=df.merge(add,on=columns,how="left")
    return df

def merge_nunique(df,columns,value,cname):
    add = pd.DataFrame(df.groupby(columns)[value].nunique()).reset_index()
    add.columns=columns+[cname]
    df=df.merge(add,on=columns,how="left")
    return df

def merge_median(df,columns,value,cname):
    add = pd.DataFrame(df.groupby(columns)[value].median()).reset_index()
    add.columns=columns+[cname]
    df=df.merge(add,on=columns,how="left")
    return df

def merge_mean(df,columns,value,cname):
    add = pd.DataFrame(df.groupby(columns)[value].mean()).reset_index()
    add.columns=columns+[cname]
    df=df.merge(add,on=columns,how="left")
    return df

def merge_sum(df,columns,value,cname):
    add = pd.DataFrame(df.groupby(columns)[value].sum()).reset_index()
    add.columns=columns+[cname]
    df=df.merge(add,on=columns,how="left")
    return df

def merge_max(df,columns,value,cname):
    add = pd.DataFrame(df.groupby(columns)[value].max()).reset_index()
    add.columns=columns+[cname]
    df=df.merge(add,on=columns,how="left")
    return df

def merge_min(df,columns,value,cname):
    add = pd.DataFrame(df.groupby(columns)[value].min()).reset_index()
    add.columns=columns+[cname]
    df=df.merge(add,on=columns,how="left")
    return df

def merge_std(df,columns,value,cname):
    add = pd.DataFrame(df.groupby(columns)[value].std()).reset_index()
    add.columns=columns+[cname]
    df=df.merge(add,on=columns,how="left")
    return df
def merge_count(df,columns,value,cname):
    add = pd.DataFrame(df.groupby(columns)[value].count()).reset_index()
    add.columns=columns+[cname]
    df=df.merge(add,on=columns,how="left")
    return df

# merge stat feature by new dataframe
def feat_count(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].count()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_count" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df

def feat_nunique(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].nunique()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_nunique" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df

def feat_mean(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].mean()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_mean" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df

def feat_std(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].std()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_std" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df

def feat_median(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].median()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_median" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df

def feat_max(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].max()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_max" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df

def feat_min(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].min()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_min" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df

def feat_sum(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].sum()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_sum" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df


def feat_var(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].var()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_var" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df


def feat_quantile(df, df_feature, fe,value,n,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].quantile(n)).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_quantile" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df

def feat_skew(df, df_feature, fe,value,name=""):
    df_count = pd.DataFrame(df_feature.groupby(fe)[value].skew()).reset_index()
    if not name:
        df_count.columns = fe + [value+"_%s_skew" % ("_".join(fe))]
    else:
        df_count.columns = fe + [name]
    df = df.merge(df_count, on=fe, how="left").fillna(0)
    return df

def gen_smoothing_ctr_feat(shows, clks, steps, epsilon):
    '''
    Get the beyes smoothing ctr feature.
    
    Parameters
    ----------
    shows : array-like
        The show data.
    clks : array-like
        The click data.
    steps : int
        Iteration steps.
    epsilon : float
        The decreased threshold of parameters.
        
    Returns
    -------
    ctrs : list of float
        The ctr array.
    '''
    bs = BayesianSmoothing(1,1)
    bs.update(shows, clks, steps, epsilon)
    alpha = bs.alpha
    beta = bs.beta
    ctrs = [(clk + alpha) * 1.0 / (show + alpha + beta) for clk, show in zip(clks, shows)]
    return ctrs

def gen_kmeans_feat(df, k):
    '''
    Get the cluster label feature by KMeans.
    
    Parameters
    ----------
    df : DataFrame, shape(n_samples, n_features)
        The features data.
    k : int
        The k parameter value of KMeans.
        
    labels : array-like
        The cluster label list.
    '''
    kmeans_model = KMeans(n_clusters=k, random_state=1).fit(df.values)
    labels = kmeans_model.labels_
    return labels

def gen_gbdt_feat(train_x, train_y, eval_x, eval_y, test_x, params=None, iter_num=1000, early_stop=10):
    '''
    Get the gbdt leaf feature.
    
    Parameters
    ----------
    train_x : train data
    train_y : train label
    eval_x : eval data
    eval_y : eval label
    test_x : test data
    params : the params of xgboost
    iter_num : iteration number of the xgboost model
    early_stop : the parameter of xgboost model
    
    Returns
    -------
    new_train_feature : gbdt train features
    new_test_feature : gbdt test features
    
    Examples
    --------
    params like as follows :
    params={
        'eta': 0.3,
        'max_depth':3,   
        'min_child_weight':1,
        'gamma':0.3, 
        'subsample':0.8,
        'colsample_bytree':0.8,
        'booster':'gbtree',
        'objective': 'binary:logistic',
        'nthread':12,
        'scale_pos_weight': 1,
        'lambda':1,  
        'seed':27,
        'silent':0 ,
        'eval_metric': 'auc'
    }
    '''
    d_train = xgb.DMatrix(train_x, label=train_y)
    d_valid = xgb.DMatrix(eval_x, label=eval_y)
    d_test = xgb.DMatrix(test_x)
    watchlist = [(d_train, 'train'), (d_valid, 'valid')]

    model_bst = xgb.train(params, d_train, iter_num, watchlist, early_stopping_rounds=early_stop, verbose_eval=10)

    y_bst= model_bst.predict(d_test)
    merge_train = np.vstack((train_x, eval_x))
    merge_y = np.vstack((train_y, eval_y))
    d_train = xgb.DMatrix(merge_train, label=merge_y)
    new_train_feature= model_bst.predict(d_train, pred_leaf=True)
    new_test_feature= model_bst.predict(d_test, pred_leaf=True)
    return new_train_feature, new_test_feature


def gen_embedding_feat():
    pass

def gen_lad_feat():
    pass
    
# 1. k-means将某些特征转换为类别特征
# 2. xgboos叶节点t特征转换
# 3. pca降维
# 4. word2vec
# 5. 点击率和转化率平滑，贝叶斯平滑
# 6. LDA
# 7. DNN中间层
