from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
#将数据分割训练数据与测试数据
from sklearn.model_selection import train_test_split


import tensorflow as tf
import numpy as np
import pandas as pd

#数据读入，数据分割
data = pd.read_csv("FE_GBDT.csv")


y = data['click']
X = data.drop(['click'],axis=1)
X = np.array(X)
y = np.array(y)

# 随机采样30%的数据构建测试样本，其余作为训练样本
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=33, test_size=0.3)
print('##############')
print('X_train shape',X_train.shape)

#数据准备，建构线性回归

x_ = tf.placeholder(tf.float32, [None, 1270])
y_ = tf.placeholder(tf.float32,shape=(None,1))
w = tf.Variable(tf.truncated_normal([1270,1],stddev=0.1))
b = tf.Variable(tf.constant(0.1, shape=[1]))

logits_ = tf.matmul(x_,w)+b


##计算交叉熵损失
cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits_, labels=y_))

#生成step，用ftrl优化
train_step = tf.train.FtrlOptimizer(learning_rate=0.8,l1_regularization_strength=2,l2_regularization_strength=0).minimize(cross_entropy)
#train_step = tf.train.GradientDescentOptimizer(0.001).minimize(cross_entropy)

sess = tf.Session()
init_op = tf.global_variables_initializer()
sess.run(init_op)

# Train

batch_size = 100000
epoch = 1000
epoch_num = X_train.shape[0]
range_num = epoch_num * epoch // batch_size
print('####range_num:',range_num)

end = 0
for i in range(range_num):

    start = end
    end = start + batch_size
    if end < epoch_num:
        sess.run(train_step, feed_dict={x_: X_train[start:end], y_: y_train.reshape(-1, 1)[start:end]})

    if end > epoch_num:
        start_1 = start
        end_1 = epoch_num
        start_2 = 0
        end = batch_size - (end_1 - start_1)
        X_new = np.concatenate((X_train[start_1:end_1], X_train[start_2:end]), axis=0)
        y_new = np.concatenate((y_train.reshape(-1, 1)[start_1:end_1], y_train.reshape(-1, 1)[start_2:end]), axis=0)
        sess.run(train_step, feed_dict={x_: X_new, y_: y_new})

    if end == epoch_num:
        start_1 = start
        end_1 = epoch_num
        end = 0
        sess.run(train_step, feed_dict={x_: X_train[start_1:end_1], y_: y_train.reshape(-1, 1)[start_1:end_1]})

    if (i+1) % 10 == 0:
        train_logloss = sess.run(cross_entropy, feed_dict={x_:X_train, y_: y_train.reshape(-1,1)})
        test_logloss = sess.run(cross_entropy, feed_dict={x_:X_test, y_: y_test.reshape(-1,1)})
        print('训练集的loss %f, 测试集的loss %f'%(train_logloss,test_logloss))
