import pandas as pd
import numpy as np
import get_rid_of_1
import os

from sklearn.model_selection import GridSearchCV

#竞赛的评价指标为logloss
#from sklearn.metrics import log_loss  
#SVM并不能直接输出各类的概率，所以在这个例子中我们用正确率作为模型预测性能的度量
from sklearn.metrics import log_loss
from sklearn import metrics

from matplotlib import pyplot
import seaborn as sns

from xgboost import XGBClassifier
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import recall_score
from pandas import DataFrame
from sklearn.metrics import log_loss
import tensorflow as tf
from sklearn.preprocessing import OneHotEncoder

allPdata=pd.read_csv("tao2_model/predate/allPdata_1.csv",index_col=0)                      #加载数据
allNdata=pd.read_csv("tao2_model/predate/allNdata_1.csv",index_col=0)
allTdata=pd.read_csv("tao2_model/predate/allTdata_1.csv",index_col=0)

def handclf(flag, howbig, zhengfu, allTdata, handoutlineset):                   #去除异常点，一个返回集合，一个返回数据
    if zhengfu == 1:                                                            #是异常的电机
        one=set(allTdata[allTdata[flag]>howbig].index)                          #得到一个大于此特征大于 howbig 的索引(索引是文件名)
        handoutlineset=handoutlineset | one
        # print(flag)                                                           #显示需要筛选的特征
        # print(len(handoutlineset))                                            #显示大于 howbig 的个数
        allTdata=allTdata.drop(allTdata[allTdata[flag]>howbig].index,axis=0)    #原来的数据里面剔除被选出的索引
#         print(allTdata.shape)
#         print(handoutlineset)
        return handoutlineset,allTdata                                         #返回提出的文件，还有洗过的数据集
    if zhengfu == 0:                                                            #同上
        one=set(allTdata[allTdata[flag]<howbig].index)
        handoutlineset=handoutlineset | one
#         print(flag)
#         print(len(handoutlineset))
        allTdata=allTdata.drop(allTdata[allTdata[flag]<howbig].index,axis=0)
#         print(allTdata.shape)
        return handoutlineset,allTdata

handoutline1=set()
handoutline2=set()
handoutline1,allPdata=handclf('c0', 0.2, 1, allPdata,handoutline1)
handoutline1,allPdata=handclf('c20',1, 1,allPdata,handoutline1)
handoutline1,allPdata=handclf('FA1dataMean',4, 1,allPdata,handoutline1)

handoutline2,allNdata=handclf('c0',0.2, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c2',2, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c10',1.1, 0,allNdata,handoutline2)
handoutline2,allNdata=handclf('c15',-0.4, 0,allNdata,handoutline2)
handoutline2,allNdata=handclf('c16',2, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c17',1.1, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('c20',1, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('F_ai2_fft_frequency_max',2000, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('FA1dataVar',10, 1,allNdata,handoutline2)
handoutline2,allNdata=handclf('ai2_frequency_max_diff',-1000, 0,allNdata,handoutline2)
# print(allPdata.shape)
# print(allNdata.shape)

allPdata['lable']=1
allNdata['lable']=0
newallPdata=pd.concat([allPdata,allPdata,allPdata,allPdata],axis=0)         #为了平衡样本，手动复制，‘1’的样本复制四份
traindata=pd.concat([newallPdata,allNdata],axis=0)                          #前120为‘1’，后500为‘0’
y=traindata['lable']                                                       #确定了 label
x=traindata.drop('lable',axis=1)                                           #确定了训练集

testXGBClassifier =  XGBClassifier(
        learning_rate =0.1,
        n_estimators=20,                                                    #数值大没关系，cv会自动返回合适的n_estimators
        max_depth=5,
        min_child_weight=1,
        gamma=0,
        subsample=0.8,
        colsample_bytree=0.6,
        colsample_bylevel=0.8,
        objective= 'binary:logistic',
        reg_alpha=0.01,
        reg_lambda=0.02,
        seed=3)

# x = x.values
# y = y.values
testXGBClassifier.fit(x,y)                                                 #训练Xgboost

# X_test_leaves = testXGBClassifier.apply()
# xgbenc = OneHotEncoder()
# test=xgbenc.fit_transform(X_train_leaves)                                 #对Xgboost的每个叶子0--1编码，没下文了？？？？？？？
# from sklearn.preprocessing import  OneHotEncoder

# X_test_leaves = testXGBClassifier.apply(allTdata.values)                  #相当于用xgb提取五个特征
X_train_leaves = testXGBClassifier.apply(x)                                 #相当于用xgb提取五个特征
xgbenc = OneHotEncoder()
test=xgbenc.fit_transform(X_train_leaves)                                   #对上面的0--1编码，进行0--1编码训练
X_test_leaves = testXGBClassifier.apply(allTdata)                           # 相当于用xgb提取五个特征
X_leaves = np.concatenate((X_train_leaves, X_test_leaves), axis=0)          #把训练集和测试集同时求出叶子结点，相加
test=xgbenc.fit_transform(X_leaves)                                         #对上面的0--1编码，进行0--1编码训练
x_train_xgb_onehot=xgbenc.transform(X_train_leaves).toarray()               #训练集的叶子0----1编码转换, 转成49维特征
x_test_xgb_onehot=xgbenc.transform(X_test_leaves).toarray()                 #测试集的叶子0----1编码转换, 转成49维特征

#新特征
rateP=pd.read_csv('tao2_model/predate/fftDataP2.csv',index_col=0)
rateN=pd.read_csv('tao2_model/predate/fftDataN2.csv',index_col=0)
rateTest=pd.read_csv('tao2_model/predate/fftDataT2.csv',index_col=0)

ratiolist=['F_ai1_ratio_base','F_ai1_ratio_base2','F_ai1_ratio_base3','F_ai2_ratio_base','F_ai2_ratio_base2','F_ai2_ratio_base3','B_ai1_ratio_base','B_ai1_ratio_base2','B_ai1_ratio_base3','B_ai2_ratio_base','B_ai1_ratio_base2','B_ai2_ratio_base3']
rP=rateP.loc[:,['ratio_all','ratio_half']]
rN=rateN.loc[:,['ratio_all','ratio_half']]
rT=rateTest.loc[:,['ratio_all','ratio_half']]

ratePd=rateP.loc[:,ratiolist]
rateNd=rateN.loc[:,ratiolist]
ratePd=ratePd.drop(handoutline1,axis=0)
rateNd=rateNd.drop(handoutline2,axis=0)

rateTd=rateTest.loc[:,ratiolist]
ratePd=pd.concat([ratePd,ratePd,ratePd,ratePd],axis=0)
rateTrain=pd.concat([ratePd,rateNd],axis=0)

rP=rP.drop(handoutline1,axis=0)
rN=rN.drop(handoutline2,axis=0)

rTtrain=pd.concat([rP,rP,rP,rP,rN],axis=0)
rAll=pd.concat([rTtrain,rT],axis=0)
ratioOneHot = OneHotEncoder()
ratioOneHot.fit_transform(rAll)

rTrainOne=ratioOneHot.transform(rTtrain).toarray()
rTrainOne=pd.DataFrame(rTrainOne,index=rTtrain.index)
rTestOne=ratioOneHot.transform(rT).toarray()
rTestOne=pd.DataFrame(rTestOne,index=rT.index)
rateTrain=pd.concat([rateTrain,rTrainOne],axis=1)
rateTest=pd.concat([rateTd,rTestOne],axis=1)

xgb_onehotDfforTrain=pd.DataFrame(x_train_xgb_onehot,index=traindata.index)
xgb_onehotDfforTest=pd.DataFrame(x_test_xgb_onehot,index=allTdata.index)


# X_train_leaves = testXGBClassifier.apply(x)
# X_leaves = np.concatenate((X_train_leaves, X_test_leaves), axis=0)        #把训练集和测试集同时求出叶子结点，相加，没下文了？？？？？？
# xgbenc = OneHotEncoder()
# test=xgbenc.fit_transform(X_leaves)                                       #对上面的0--1编码，进行0--1编码训练
# x_train_xgb_onehot=xgbenc.transform(X_train_leaves).toarray()             #训练集的叶子0----1编码转换, 转成49维特征
# x_test_xgb_onehot=xgbenc.transform(X_test_leaves).toarray()               #测试集的叶子0----1编码转换, 转成49维特征
# xgb_onehotDfforTrain=pd.DataFrame(x_train_xgb_onehot,index=traindata.index)   #输出df形式
# xgb_onehotDfforTest=pd.DataFrame(x_test_xgb_onehot,index=allTdata.index)      #输出df形式

# 数据标准化
from sklearn.preprocessing import StandardScaler

# 初始化特征的标准化器
ss_X = StandardScaler()

# 分别对训练和测试数据的特征进行标准化处理
x = ss_X.fit_transform(x)                                 #对训练集标准化
x=pd.DataFrame(x,xgb_onehotDfforTrain.index)              #输出df格式
sstest=ss_X.transform(allTdata)                           #对测试集标准化
sstest=pd.DataFrame(sstest,xgb_onehotDfforTest.index)     #输出df格式

x_train=pd.concat([x,xgb_onehotDfforTrain],axis=1)        #把标准化的训练集和上面的01编码的整合成新的训练集，相当于扩充了xgb的特征
x_test=pd.concat([sstest,xgb_onehotDfforTest],axis=1)

y = y.values

#开始神经网络训练
x_ = tf.placeholder(tf.float32, [None, x_train.shape[1]])                #特征的大小是后面的位数
y_ = tf.placeholder(tf.float32,shape=(None,1))
w = tf.Variable(tf.truncated_normal([x_train.shape[1], 1],stddev=0.1))    #初始化
b = tf.Variable(tf.constant(0.1, shape=[1]))                #初始化

logits_ = tf.matmul(x_,w)+b                                 #矩阵相乘，就是线性拟合

##计算交叉熵损失
cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_, labels=y_))        #用simoid激活求交叉熵
y_val=tf.nn.sigmoid(logits_)                               #sigmoid激活

# 生成step，用ftrl优化
train_step = tf.train.FtrlOptimizer(learning_rate=0.01,l1_regularization_strength=1,l2_regularization_strength=0).minimize(cross_entropy)  #优化器是Ftrl
#train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)

sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)
# # Train
for i in range(400000):
    #example_batch, label_batch = tf.train.shuffle_batch([example,label], batch_size=1, capacity=200, min_after_dequeue=100, num_threads=2)
    #example_batch,label_batch = tf.train.batch([X_train,y_train.reshape(-1,1)],batch_size = 1,capacity=10)
    #example, l = sess.run([example_batch,label_batch])
    sess.run(train_step, feed_dict={x_:x_train, y_: y.reshape(-1,1)})                         #输入训练集和标签
    if (i+1) % 1000 == 0:
        train_logloss = sess.run(cross_entropy,feed_dict={x_:x_train, y_: y.reshape(-1,1)})   #显示loss
#         test_logloss = sess.run(cross_entropy, feed_dict={x_:X_test, y_: y_test.reshape(-1,1)})
        print('训练集的loss %f'%(train_logloss))

output = sess.run([y_val],feed_dict={x_: x_train})                                   #训练集，用网络输出
output2 = sess.run([y_val],feed_dict={x_: x_test})                                   #测试集，用网络输出

y_val=np.round(output)                                                               #训练集输出四舍五入
print("Classification report for classifier xgboost:\n%s\n"
      % (metrics.classification_report(y, y_val[0])))
# y_predict=np.round(output2)                                                          #输出四舍五入

TEST=pd.DataFrame(output2[0],index=xgb_onehotDfforTest.index,columns=['GBDT_FTRL']) #结果中‘GBDT_FTRL’列表示输出的概率，经过sigmod激活
TEST['result']=0
# TEST.describe()

thread = TEST['GBDT_FTRL'].quantile(0.75)
print(thread)

index1=TEST[TEST['GBDT_FTRL']>thread].index
print(len(index1))                                                                    #输出“1”的个数
TEST.loc[index1,'result']=1                                                          #结果中‘result’列表示输出的结果，经过sigmod激活
# result=pd.DataFrame(y_predict[0],index=xgb_onehotDfforTest.index,columns=['GBDT_FTRL'])
TEST.to_csv('GBDT_FTRL.csv')                                                        #输出结果

# TEST.describe()
TEST=TEST.drop('GBDT_FTRL',axis=1)                                                  #丢掉概率值那一列

# TEST.columns=['GBDT_FTRL']
TEST.to_csv('GBDT_FTRL.csv')

#筛选
get_rid_of_1.inhance()
os.remove('GBDT_FTRL.csv')
print("tao_model_2 sucessed!")
