# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""


# 首先 import 必要的模块
import pandas as pd 
import numpy as np

from sklearn.model_selection import GridSearchCV

#竞赛的评价指标为logloss
#from sklearn.metrics import log_loss  
#SVM并不能直接输出各类的概率，所以在这个例子中我们用正确率作为模型预测性能的度量
from sklearn.metrics import log_loss
from sklearn import metrics

from matplotlib import pyplot
# import seaborn as sns

from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import recall_score
from pandas import DataFrame
from sklearn.metrics import log_loss
import tensorflow as tf
from sklearn.preprocessing import OneHotEncoder

allPdata=pd.read_csv("tao/allPdata(1).csv",index_col=0)                   #加载数据
allNdata=pd.read_csv("tao/allNdata(1).csv",index_col=0)
allTdata=pd.read_csv("tao/allTdata.csv",index_col=0)

def handclf(flag, howbig, zhengfu, allTdata, handoutlineset):                   #去除异常点，一个返回集合，一个返回数据
    if zhengfu == 1:                                                            #是异常的电机
        one=set(allTdata[allTdata[flag]>howbig].index)                          #得到一个大于此特征大于 howbig 的索引(索引是文件名)
        handoutlineset=handoutlineset | one
        # print(flag)                                                           #显示需要筛选的特征
        # print(len(handoutlineset))                                            #显示大于 howbig 的个数
        allTdata=allTdata.drop(allTdata[allTdata[flag]>howbig].index,axis=0)    #原来的数据里面剔除被选出的索引
#         print(allTdata.shape)
#         print(handoutlineset)
        return handoutlineset,allTdata                                         #返回提出的文件，还有洗过的数据集
    if zhengfu == 0:                                                            #同上
        one=set(allTdata[allTdata[flag]<howbig].index)
        handoutlineset=handoutlineset | one
#         print(flag)
#         print(len(handoutlineset))
        allTdata=allTdata.drop(allTdata[allTdata[flag]<howbig].index,axis=0)
#         print(allTdata.shape)
        return handoutlineset,allTdata

handoutline1=set()
handoutline2=set()
handoutline1,allPdata=handclf('c0', 0.2, 1, allPdata,handoutline1)
handoutline1,allPdata=handclf('c20',1, 1,allPdata,handoutline1)
handoutline1,allPdata=handclf('FA1dataMean',4, 1,allPdata,handoutline1)

handoutline2,allNdata=handclf('c0',0.2, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c2',2, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c10',1.1, 0,allNdata,handoutline2)
handoutline2,allNdata=handclf('c15',-0.4, 0,allNdata,handoutline2)
handoutline2,allNdata=handclf('c16',2, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c17',1.1, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c20',1, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('F_ai2_fft_frequency_max',2000, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('FA1dataVar',10, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('ai2_frequency_max_diff',-1000, 0,allNdata,handoutline2)
print(allPdata.shape)
print(allNdata.shape)

allPdata['lable']=1
allNdata['lable']=0
newallPdata=pd.concat([allPdata,allPdata,allPdata,allPdata],axis=0)         #为了平衡样本，手动复制，‘1’的样本复制四份
traindata=pd.concat([newallPdata,allNdata],axis=0)                          #前120为‘1’，后500为‘0’
y=traindata['lable']                                                       #确定了 label
x=traindata.drop('lable',axis=1)                                           #确定了训练集

testXGBClassifier =  XGBClassifier(
        learning_rate =0.1,
        n_estimators=5,                                                    #数值大没关系，cv会自动返回合适的n_estimators
        max_depth=5,
        min_child_weight=1,
        gamma=0,
        subsample=0.8,
        colsample_bytree=0.6,
        colsample_bylevel=0.8,
        objective= 'binary:logistic',
        reg_alpha=0.01,
        reg_lambda=0.02,
        seed=3)

x = x.values
y = y.values
testXGBClassifier.fit(x,y)                                                 #训练Xgboost

# X_test_leaves = testXGBClassifier.apply()
# xgbenc = OneHotEncoder()
# test=xgbenc.fit_transform(X_train_leaves)                                 #对Xgboost的每个叶子0--1编码，没下文了？？？？？？？
# from sklearn.preprocessing import  OneHotEncoder

X_test_leaves = testXGBClassifier.apply(allTdata.values)                  #相当于用xgb提取五个特征
X_train_leaves = testXGBClassifier.apply(x)
X_leaves = np.concatenate((X_train_leaves, X_test_leaves), axis=0)        #把训练集和测试集同时求出叶子结点，相加，没下文了？？？？？？
xgbenc = OneHotEncoder()
test=xgbenc.fit_transform(X_leaves)                                       #对上面的0--1编码，进行0--1编码训练
x_train_xgb_onehot=xgbenc.transform(X_train_leaves).toarray()             #训练集的叶子0----1编码转换, 转成49维特征
x_test_xgb_onehot=xgbenc.transform(X_test_leaves).toarray()               #测试集的叶子0----1编码转换, 转成49维特征
xgb_onehotDfforTrain=pd.DataFrame(x_train_xgb_onehot,index=traindata.index)   #输出df形式
xgb_onehotDfforTest=pd.DataFrame(x_test_xgb_onehot,index=allTdata.index)      #输出df形式

# 数据标准化
from sklearn.preprocessing import StandardScaler

# 初始化特征的标准化器
ss_X = StandardScaler()

# 分别对训练和测试数据的特征进行标准化处理
x = ss_X.fit_transform(x)                                 #对训练集标准化
x=pd.DataFrame(x,xgb_onehotDfforTrain.index)              #输出df格式
sstest=ss_X.transform(allTdata)                           #对测试集标准化
sstest=pd.DataFrame(sstest,xgb_onehotDfforTest.index)     #输出df格式

x_train=pd.concat([x,xgb_onehotDfforTrain],axis=1)        #把标准化的训练集和上面的01编码的整合成新的训练集，相当于扩充了xgb的特征
x_test=pd.concat([sstest,xgb_onehotDfforTest],axis=1)

#开始神经网络训练
x_ = tf.placeholder(tf.float32, [None, 101])                #特征的大小是后面的位数
y_ = tf.placeholder(tf.float32,shape=(None,1))
w = tf.Variable(tf.truncated_normal([101,1],stddev=0.1))    #初始化
b = tf.Variable(tf.constant(0.1, shape=[1]))                #初始化

logits_ = tf.matmul(x_,w)+b                                 #矩阵相乘，就是线性拟合

##计算交叉熵损失
cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_, labels=y_))        #用simoid激活求交叉熵
y_val=tf.nn.sigmoid(logits_)                               #sigmoid激活

# 生成step，用ftrl优化
train_step = tf.train.FtrlOptimizer(learning_rate=0.01,l1_regularization_strength=1,l2_regularization_strength=0).minimize(cross_entropy)  #优化器是Ftrl
#train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)

sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)
# # Train
for i in range(600000):
    #example_batch, label_batch = tf.train.shuffle_batch([example,label], batch_size=1, capacity=200, min_after_dequeue=100, num_threads=2)
    #example_batch,label_batch = tf.train.batch([X_train,y_train.reshape(-1,1)],batch_size = 1,capacity=10)
    #example, l = sess.run([example_batch,label_batch])
    sess.run(train_step,feed_dict={x_:x_train, y_: y.reshape(-1,1)})                         #输入训练集和标签
    if (i+1) % 1000 == 0:
        train_logloss = sess.run(cross_entropy,feed_dict={x_:x_train, y_: y.reshape(-1,1)})   #显示loss
#         test_logloss = sess.run(cross_entropy, feed_dict={x_:X_test, y_: y_test.reshape(-1,1)})
        print('训练集的loss %f'%(train_logloss))

output = sess.run([y_val],feed_dict={x_: x_train})                                   #训练集，用网络输出
output2 = sess.run([y_val],feed_dict={x_: x_test})                                   #测试集，用网络输出

y_val=np.round(output)                                                               #训练集输出四舍五入
print("Classification report for classifier xgboost:\n%s\n"
      % (metrics.classification_report(y, y_val[0])))
y_predict=np.round(output2)                                                          #输出四舍五入

TEST=pd.DataFrame(output2[0],index=xgb_onehotDfforTest.index,columns=['GBDT_FTRL']) #结果中‘GBDT_FTRL’列表示输出的概率，经过sigmod激活
TEST['result']=0
# TEST.describe()
index1=TEST[TEST['GBDT_FTRL']>0.00005].index
print(len(index1))                                                                    #输出“1”的个数
TEST.loc[index1,'result']=1                                                          #结果中‘result’列表示输出的结果，经过sigmod激活
# result=pd.DataFrame(y_predict[0],index=xgb_onehotDfforTest.index,columns=['GBDT_FTRL'])
TEST.to_csv('GBDT_FTRL.csv')                                                        #输出结果

# TEST.describe()
# TEST=TEST.drop('GBDT_FTRL',axis=1)                                                  #丢掉概率值那一列

# TEST.columns=['GBDT_FTRL']
# TEST.to_csv('GBDT_FTRL.csv')

########################################################  第二步==LR  #######################################################################################
# 数据标准化
# from sklearn.preprocessing import StandardScaler
# # 初始化特征的标准化器
# ss_X = StandardScaler()
# # 分别对训练和测试数据的特征进行标准化处理
# x = ss_X.fit_transform(x)
#
# # from sklearn.model_selection import GridSearchCV
# # from sklearn.linear_model import LogisticRegression
# #
# # #需要调优的参数
# # # 请尝试将L1正则和L2正则分开，并配合合适的优化求解算法（slover）
# # #tuned_parameters = {'penalty':['l1','l2'],
# # #                   'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000]
# # #                   }
# # penaltys = ['l1','l2']
# # Cs = [0.001, 0.01, 0.1, 1, 10, 100, 1000]
# # tuned_parameters = dict(penalty = penaltys, C = Cs)
# # lr_penalty= LogisticRegression()                                                 #建立LR模型
# # grid= GridSearchCV(lr_penalty, tuned_parameters,cv=5, scoring='neg_log_loss')  #网格搜索
# # grid.fit(x,y)                                                                    #训练
# #
# #
# # # examine the best model
# # print(-grid.best_score_)                                                         #显示网格搜索结果
# # print(grid.best_params_)
#
#
# from sklearn.linear_model import LogisticRegressionCV
# Cs = [0.01,0.1,1, 10,100,1000]
#
# # 大量样本（6W+）、高维度（93），L1正则 --> 可选用saga优化求解器(0.19版本新功能)
# # LogisticRegressionCV比GridSearchCV快
# lrcv_L1 = LogisticRegressionCV(Cs=Cs, cv = 5, scoring='neg_log_loss', penalty='l1', solver='liblinear', multi_class='ovr')
# lrcv_L1.fit(x, y)
#
# lr_y_val=lrcv_L1.predict(x)
# print("Classification report for classifier xgboost:\n%s\n"
#       % (metrics.classification_report(y, lr_y_val)))
# lrtestdata=x=pd.concat([allTdata,xgb_onehotDfforTest],axis=1)                       #整合测试集
#
# # 数据标准化
# from sklearn.preprocessing import StandardScaler
# # 初始化特征的标准化器
# ss_X = StandardScaler()
# # 分别对训练和测试数据的特征进行标准化处理
# lrtestdata = ss_X.fit_transform(lrtestdata)
# lr_y_test_val=lrcv_L1.predict(lrtestdata)                                          #并没有设置阈值，先不管这个LR模型
# gbdt_lr_yDF=pd.DataFrame(lr_y_test_val,index=allTdata.index,columns=["gbdt_lr_y"])
# gbdt_lr_yDF.to_csv('gbdt_lr_y.csv')
# gbdt_lr_yDF.describe()

