import pandas as pd
import os
import time
import tensorflow as tf
import sys


class Logger(object):
    def __init__(self, fileN="Default.log"):
        self.terminal = sys.stdout
        self.log = open(fileN, "a")

    def write(self, message):
        self.terminal.write(message)
        self.log.write(message)

    def flush(self):
        pass


sys.stdout = Logger("D:\\AI_wide_deep_log.txt")  # 保存到D盘
print(tf.__version__)

gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
    try:
        for gpu in gpus:
            tf.config.experimental.set_memory_growth(gpu, True)
    except RuntimeError as e:
        print(e)

# 连续型特征列名，有13列
CONTINUOUS_COLUMNS =  ["I"+str(i) for i in range(1,14)]

# 类别型特征列名，有26列
CATEGORICAL_COLUMNS = ["C"+str(i) for i in range(1,27)]

# 标签名
LABEL_COLUMN = ["clicked"]

# 特征列名
DATA_COLUMNS = LABEL_COLUMN + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS

# 数据列名（特征+标签）
FEATURE_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS

print('Feature columns are: ', FEATURE_COLUMNS, '\n')
# 类别型特征的预处理
# categorical_column_with_hash_bucket对特征做one hot编码
wide_columns = []
for column in CATEGORICAL_COLUMNS:
    wide_columns.append(tf.feature_column.categorical_column_with_hash_bucket(
                            column, hash_bucket_size=1000))

# 数值型特征的预处理
# numeric_column将连续值转为tf.float32格式
deep_columns = []
for column in CONTINUOUS_COLUMNS:
    deep_columns.append(
        tf.feature_column.numeric_column(
            column, dtype=tf.float32))

# wide_columns做Embeddings（词向量）后加入deep_columns
for column in wide_columns:
    deep_columns.append(
        tf.feature_column.embedding_column(
            column, dimension=4))

# 类别型特征的特征组合
# crossed_column对特征做组合
# 用于Wide & Deep Model中的Wide Model输入
# 这里只随意选取了几个特征的组合作为例子
C2_C10 = tf.feature_column.crossed_column(['C2', 'C10'], hash_bucket_size=int(1e4))
C4_C13 = tf.feature_column.crossed_column(['C4', 'C13'], hash_bucket_size=int(1e4))
C7_C21 = tf.feature_column.crossed_column(['C7', 'C21'], hash_bucket_size=int(1e4))
cross_columns = [C2_C10, C4_C13, C7_C21]


def input_fn(csv_file, batch_size=256):
    """An input function for training"""

    dataset = tf.data.experimental.make_csv_dataset(
        file_pattern=csv_file, batch_size=batch_size, shuffle_seed=1000,
        column_names=DATA_COLUMNS, label_name="clicked", header=False)

    return dataset


def input_eval_fn(csv_file, batch_size=256):
    """An input function for evaluating"""

    dataset = tf.data.experimental.make_csv_dataset(
        file_pattern=csv_file, batch_size=batch_size, shuffle=False, num_epochs=1,
        column_names=DATA_COLUMNS, label_name="clicked", header=False)

    return dataset


def input_pred_fn(csv_file, batch_size=256):
    """An input function for predicting"""

    dataset = tf.data.experimental.make_csv_dataset(
        csv_file, batch_size=batch_size, column_names=DATA_COLUMNS,
        select_columns=FEATURE_COLUMNS, header=False, num_epochs=1)

    return dataset


def get_model(model_type, model_dir):
    # 打印模型checkpoint保存的路径
    print("Model directory = %s" % model_dir)

    # tf.estimator.RunConfig配置训练参数
    # save_checkpoints_secs: 设定每多少秒保存一次checkpoints
    # save_checkpoints_steps: 设定每多少步保存一次checkpoints，这里设定了，save_checkpoints_secs就不能设置
    runconfig = tf.estimator.RunConfig(
        save_checkpoints_secs=None,
        save_checkpoints_steps=500,
    )

    # 设置estimator为None，防止model_type没有设置指定项而没有输出
    estimator = None

    # 线性分类器
    if model_type == 'WIDE':
        estimator = tf.estimator.LinearClassifier(
            model_dir=model_dir,
            feature_columns=wide_columns,
            config=runconfig)

    # 深度神经网络分类器
    if model_type == 'DEEP':
        estimator = tf.estimator.DNNClassifier(
            model_dir=model_dir,
            feature_columns=deep_columns,
            hidden_units=[10, 2],  # 设置每个隐层的神经元个数，这里是两个隐层
            config=runconfig)

    # 线性与深度联合分类器
    if model_type == 'WIDE_AND_DEEP':
        estimator = tf.estimator.DNNLinearCombinedClassifier(
            model_dir=model_dir,
            linear_feature_columns=cross_columns,
            dnn_feature_columns=deep_columns,
            dnn_hidden_units=[128, 64, 16],  # 设置每个隐层的神经元个数，这里是三个隐层
            config=runconfig)

    print('estimator built')
    return estimator

# 要创建线性分类器，所以MODEL_TYPE设置为'WIDE'
MODEL_TYPE = 'WIDE'

# 返回类似'models/model_WIDE_AND_DEEP_xxxxxxxxxx'的字符串
wide_model_dir = 'models/model_' + MODEL_TYPE + '_' + str(int(time.time()))

# 创建线性分类器
estimator_wide = get_model(model_type=MODEL_TYPE, model_dir=wide_model_dir)

# 要创建深度神经网络分类器，所以MODEL_TYPE设置为'DEEP'
MODEL_TYPE = 'DEEP'

# 返回类似'models/model_WIDE_AND_DEEP_xxxxxxxxxx'的字符串
deep_model_dir = 'models/model_' + MODEL_TYPE + '_' + str(int(time.time()))

# 创建深度神经网络分类器
estimator_deep = get_model(model_type=MODEL_TYPE, model_dir=deep_model_dir)

# 要创建线性与深度联合分类器，所以MODEL_TYPE设置为'WIDE_AND_DEEP'
MODEL_TYPE = 'WIDE_AND_DEEP'

# 返回类似'models/model_WIDE_AND_DEEP_xxxxxxxxxx'的字符串
wnd_model_dir = 'models/model_' + MODEL_TYPE + '_' + str(int(time.time()))

# 创建线性与深度联合分类器
estimator_wnd = get_model(model_type=MODEL_TYPE, model_dir=wnd_model_dir)

# 设置60000个steps，batch_size在input_fn设置为128，训练集总样本数是800000，所以epochs=steps*batch_size/800000=9.6
estimator_wide.train(input_fn = lambda : input_fn("data/train.csv"), steps=60000)
print('train done')

# 设置60000个steps，batch_size在input_fn设置为128，训练集总样本数是800000，所以epochs=steps*batch_size/800000=9.6
estimator_deep.train(input_fn = lambda : input_fn("data/train.csv"), steps=60000)
print('train done')

# 设置60000个steps，batch_size在input_fn设置为128，训练集总样本数是800000，所以epochs=steps*batch_size/800000=9.6
estimator_wnd.train(input_fn = lambda : input_fn("data/train.csv"), steps=60000)
print('train done')

eval_result = estimator_wide.evaluate(input_fn=lambda: input_eval_fn("data/eval.csv"))

print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

eval_result = estimator_deep.evaluate(input_fn=lambda: input_eval_fn("data/eval.csv"))

print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

eval_result = estimator_wnd.evaluate(input_fn=lambda: input_eval_fn("data/eval.csv"))

print('\nTest set accuracy: {accuracy:0.3f}\n'.format(**eval_result))

# 得到的预测结果predictions是一个迭代器
predictions = estimator_wnd.predict(input_fn=lambda: input_pred_fn("data/eval.csv"))
print('predict done')

eval_labels = pd.read_csv('data/eval.csv', names=DATA_COLUMNS, usecols=['clicked'])
print(eval_labels)

i = 0
for pred_dict in predictions:
    # 预测的类别的编号
    class_id = pred_dict['class_ids'][0]

    # 编号对应的输出的预测类别的概率值
    probability = pred_dict['probabilities'][class_id]

    i += 1
    print('id {} Prediction is "{}" ({:.1f}%), expected "{}"'.format(
        i, class_id, 100 * probability, eval_labels['clicked'][i - 1]))

# 模型导出路径
wide_export_folder = wide_model_dir + '/export'
# 若导出路径不存在，创建一个
if not os.path.exists(wide_export_folder):
    os.mkdir(wide_export_folder)

serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
    tf.feature_column.make_parse_example_spec(deep_columns))

# 导出模型
wide_export_path = estimator_wide.export_saved_model(wide_export_folder, serving_input_receiver_fn=serving_input_fn)

print('model exported successfully to {}'.format(wide_export_path))
# 模型导出路径
deep_export_folder = deep_model_dir + '/export'
# 若导出路径不存在，创建一个
if not os.path.exists(deep_export_folder):
    os.mkdir(deep_export_folder)

serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
    tf.feature_column.make_parse_example_spec(deep_columns))

# 导出模型
deep_export_path = estimator_deep.export_saved_model(deep_export_folder, serving_input_receiver_fn=serving_input_fn)

print('model exported successfully to {}'.format(deep_export_path))
# 模型导出路径
wnd_export_folder = wnd_model_dir + '/export'
# 若导出路径不存在，创建一个
if not os.path.exists(wnd_export_folder):
    os.mkdir(wnd_export_folder)

serving_input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
    tf.feature_column.make_parse_example_spec(deep_columns))

# 导出模型
wnd_export_path = estimator_wnd.export_saved_model(wnd_export_folder, serving_input_receiver_fn=serving_input_fn)

print('model exported successfully to {}'.format(wnd_export_path))

