from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import time

import tensorflow as tf
import numpy as np


tf.logging.set_verbosity(
    tf.logging.INFO)  # Set to INFO for tracking training, default is WARN. ERROR for least messages

print("Using TensorFlow version %s\n" % tf.__version__)

CONTINUOUS_COLUMNS = ["I" + str(i) for i in range(1, 14)]  # 1-13 inclusive
CATEGORICAL_COLUMNS = ["C" + str(i) for i in range(1, 27)]  # 1-26 inclusive
LABEL_COLUMN = ["clicked"]

TRAIN_DATA_COLUMNS = LABEL_COLUMN + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS
# TEST_DATA_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS

FEATURE_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS

print('Feature columns are: ', FEATURE_COLUMNS, '\n')

# label is 1
# sample = [0, 2, 11, 5, 10262, 34, 2, 4, 5, 0, 1, 0, 5, "be589b51", "287130e0", "cd7a7a22", "fb7334df", "25c83c98", "0",
#           "6cdb3998", "361384ce", "a73ee510", "3ff10fb2", "5874c9c9", "976cbd4c", "740c210d", "1adce6ef", "310d155b",
#           "07eb8110", "07c540c4", "891589e7", "18259a83", "a458ea53", "a0ab60ca", "0", "32c7478e", "a052b1ed",
#           "9b3e8820", "8967c0d2"]

# label is 1
sample = [0, 127, 1, 3, 1683, 19, 26, 17, 475, 0, 9, 0, 3, "05db9164", "8947f767", "11c9d79e", "52a787c8", "4cf72387",
          "fbad5c96", "18671b18", "0b153874", "a73ee510", "ceb10289", "77212bd7", "79507c6b", "7203f04e", "07d13a8f",
          "2c14c412", "49013ffe", "8efede7f", "bd17c3da", "f6a3e43b", "a458ea53", "35cd95c9", "ad3062eb", "c7dc6720",
          "3fdb382b", "010f6491", "49d68486"]

print('Columns and data as a dict: ', dict(zip(FEATURE_COLUMNS, sample)), '\n')

BATCH_SIZE = 400


def generate_input_fn(filename, batch_size=BATCH_SIZE):
    def _input_fn():
        filename_queue = tf.train.string_input_producer([filename])
        reader = tf.TextLineReader()
        # Reads out batch_size number of lines
        key, value = reader.read_up_to(filename_queue, num_records=batch_size)

        # 1 int label, 13 ints, 26 strings
        cont_defaults = [[0] for _ in range(1, 14)]
        cate_defaults = [[" "] for _ in range(1, 27)]
        label_defaults = [[0]]
        column_headers = TRAIN_DATA_COLUMNS
        # The label is the first column of the data.
        record_defaults = label_defaults + cont_defaults + cate_defaults

        # Decode CSV data that was just read out.
        # Note that this does NOT return a dict,
        # so we will need to zip it up with our headers
        columns = tf.decode_csv(
            value, record_defaults=record_defaults)

        # all_columns is a dictionary that maps from column names to tensors of the data.
        all_columns = dict(zip(column_headers, columns))

        # Pop and save our labels
        # dict.pop() returns the popped array of values; exactly what we need!
        labels = all_columns.pop(LABEL_COLUMN[0])

        # the remaining columns are our features
        features = all_columns

        # Sparse categorical features must be represented with an additional dimension.
        # There is no additional work needed for the Continuous columns; they are the unaltered columns.
        # See docs for tf.SparseTensor for more info
        for feature_name in CATEGORICAL_COLUMNS:
            features[feature_name] = tf.expand_dims(features[feature_name], -1)

        return features, labels

    return _input_fn


print('input function configured')

# Sparse base columns.
# C1 = tf.contrib.layers.sparse_column_with_hash_bucket('C1', hash_bucket_size=1000)
# C2 = tf.contrib.layers.sparse_column_with_hash_bucket('C2', hash_bucket_size=1000)
# C3 = tf.contrib.layers.sparse_column_with_hash_bucket('C3', hash_bucket_size=1000)
# ...
# Cn = tf.contrib.layers.sparse_column_with_hash_bucket('Cn', hash_bucket_size=1000)
# wide_columns = [C1, C2, C3, ... , Cn]

wide_columns = []
for name in CATEGORICAL_COLUMNS:
    wide_columns.append(tf.contrib.layers.sparse_column_with_hash_bucket(
        name, hash_bucket_size=1000))

print('Wide/Sparse columns configured')

# Continuous base columns.
# I1 = tf.contrib.layers.real_valued_column("I1")
# I2 = tf.contrib.layers.real_valued_column("I2")
# I3 = tf.contrib.layers.real_valued_column("I3")
# ...
# In = tf.contrib.layers.real_valued_column("In")
# deep_columns = [I1, I2, I3, ... , In]

deep_columns = []
for name in CONTINUOUS_COLUMNS:
    deep_columns.append(tf.contrib.layers.real_valued_column(name))

print('deep/continuous columns configured')

# No known Transformations. Can add some if desired.
# Examples from other datasets are shown below.

# age_buckets = tf.contrib.layers.bucketized_column(age,
#             boundaries=[ 18, 25, 30, 35, 40, 45, 50, 55, 60, 65 ])
# education_occupation = tf.contrib.layers.crossed_column([education, occupation],
#                                                         hash_bucket_size=int(1e4))
# age_race_occupation = tf.contrib.layers.crossed_column([age_buckets, race, occupation],
#                                                        hash_bucket_size=int(1e6))
# country_occupation = tf.contrib.layers.crossed_column([native_country, occupation],
#                                                       hash_bucket_size=int(1e4))

print('Transformations complete')

# Wide columns and deep columns.
# wide_columns = [gender, race, native_country,
#       education, occupation, workclass,
#       marital_status, relationship,
#       age_buckets, education_occupation,
#       age_race_occupation, country_occupation]

# deep_columns = [
#   tf.contrib.layers.embedding_column(workclass, dimension=8),
#   tf.contrib.layers.embedding_column(education, dimension=8),
#   tf.contrib.layers.embedding_column(marital_status, dimension=8),
#   tf.contrib.layers.embedding_column(gender, dimension=8),
#   tf.contrib.layers.embedding_column(relationship, dimension=8),
#   tf.contrib.layers.embedding_column(race, dimension=8),
#   tf.contrib.layers.embedding_column(native_country, dimension=8),
#   tf.contrib.layers.embedding_column(occupation, dimension=8),
#   age,
#   education_num,
#   capital_gain,
#   capital_loss,
#   hours_per_week,
# ]

# Embeddings for wide columns into deep columns
for col in wide_columns:
    deep_columns.append(tf.contrib.layers.embedding_column(col,
                                                           dimension=8))

print('wide and deep columns configured')


def create_model_dir(model_type):
    # Returns something like models/model_WIDE_AND_DEEP_1493043407
    return 'models/model_' + model_type + '_' + str(int(time.time()))


# Specify the desired model_dir
def get_model(model_type, model_dir):
    print("Model directory = %s" % model_dir)

    # There are more options here than shown here.
    # We are using this to show additional checkpointing for illustrative purposes.
    # In a real system with far more samples, you would
    #     likely choose to save checkpoints less frequently.
    runconfig = tf.contrib.learn.RunConfig(
        save_checkpoints_secs=None,
        save_checkpoints_steps=100,
    )

    m = None

    # Linear Classifier
    if model_type == 'WIDE':
        m = tf.contrib.learn.LinearClassifier(
            model_dir=model_dir,
            feature_columns=wide_columns)

    # Deep Neural Net Classifier
    if model_type == 'DEEP':
        m = tf.contrib.learn.DNNClassifier(
            model_dir=model_dir,
            feature_columns=deep_columns,
            hidden_units=[100, 50, 25])

    # Combined Linear and Deep Classifier
    if model_type == 'WIDE_AND_DEEP':
        m = tf.contrib.learn.DNNLinearCombinedClassifier(
            model_dir=model_dir,
            linear_feature_columns=wide_columns,
            dnn_feature_columns=deep_columns,
            dnn_hidden_units=[100, 70, 50, 25],
            config=runconfig)

    print('estimator built')

    return m


MODEL_TYPE = 'WIDE'
model_dir = create_model_dir(model_type=MODEL_TYPE)
m = get_model(model_type=MODEL_TYPE, model_dir=model_dir)

# Showing that canned estimators return an instance of 'Evaluable'

from tensorflow.contrib.learn.python.learn import evaluable

isinstance(m, evaluable.Evaluable)

# Use the cloud or local depending on your preference

# CLOUD
# train_file = "gs://dataset-uploader/criteo-kaggle/medium_version/train.csv"
# eval_file  = "gs://dataset-uploader/criteo-kaggle/medium_version/eval.csv"

# LOCAL. Update these paths as appropriate
train_file = "data/train.csv"
eval_file = "data/eval.csv"

# This can be found with
# wc -l train.csv
train_sample_size = 800000
train_steps = train_sample_size / BATCH_SIZE  # 8000/40 = 200

m.fit(input_fn=generate_input_fn(train_file, BATCH_SIZE), steps=train_steps)

print('fit done')

eval_sample_size = 200000  # this can be found with a 'wc -l eval.csv'
eval_steps = eval_sample_size / BATCH_SIZE  # 2000/40 = 50

results = m.evaluate(input_fn=generate_input_fn(eval_file),
                     steps=eval_steps)
print('evaluate done')

print('Accuracy: %s' % results['accuracy'])
print("result:", results)
neg_log_loss = -np.log(results["loss"])
print("neg_log_loss:", neg_log_loss)

"""
log
Accuracy: 0.766125
result: {'loss': 0.50244325, 'accuracy': 0.766125, 'labels/prediction_mean': 0.26990917, 'labels/actual_label_mean': 0.251165, 'accuracy/baseline_label_mean': 0.251165, 'auc': 0.72278744, 'auc_precision_recall': 0.47572494, 'accuracy/threshold_0.500000_mean': 0.766125, 'precision/positive_threshold_0.500000_mean': 0.6087969, 'recall/positive_threshold_0.500000_mean': 0.19260247, 'global_step': 2000}
neg_log_loss: 0.6882726
"""

# def pred_fn():
#     sample = [0, 127, 1, 3, 1683, 19, 26, 17, 475, 0, 9, 0, 3, "05db9164", "8947f767", "11c9d79e", "52a787c8",
#               "4cf72387", "fbad5c96", "18671b18", "0b153874", "a73ee510", "ceb10289", "77212bd7", "79507c6b",
#               "7203f04e", "07d13a8f", "2c14c412", "49013ffe", "8efede7f", "bd17c3da", "f6a3e43b", "a458ea53",
#               "35cd95c9", "ad3062eb", "c7dc6720", "3fdb382b", "010f6491", "49d68486"]
#     sample_dict = dict(zip(FEATURE_COLUMNS, sample))
#
#     for feature_name in CATEGORICAL_COLUMNS:
#         sample_dict[feature_name] = tf.expand_dims(sample_dict[feature_name], -1)
#
#     for feature_name in CONTINUOUS_COLUMNS:
#         sample_dict[feature_name] = tf.constant(sample_dict[feature_name], dtype=tf.int32)
#     print(sample_dict)
#
#     return sample_dict
#
#
# m.predict(input_fn=pred_fn)
#
# from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
#
#
# def column_to_dtype(column):
#     if column in CATEGORICAL_COLUMNS:
#         return tf.string
#     else:
#         return tf.float32
#
#
# def serving_input_fn():
#     feature_placeholders = {
#         column: tf.placeholder(column_to_dtype(column), [None])
#         for column in FEATURE_COLUMNS
#     }
#     # DNNCombinedLinearClassifier expects rank 2 Tensors, but inputs should be
#     # rank 1, so that we can provide scalars to the server
#     features = {
#         key: tf.expand_dims(tensor, -1)
#         for key, tensor in feature_placeholders.items()
#     }
#
#     return input_fn_utils.InputFnOps(
#         features,  # input into graph
#         None,
#         feature_placeholders  # tensor input converted from request
#     )
#
#
# # Manually export
# export_folder = m.export_savedmodel(
#     export_dir_base=model_dir + '/export',
#     input_fn=serving_input_fn
# )
#
# print('model exported successfully to {}'.format(export_folder))
#
# from tensorflow.contrib.learn.python.learn import learn_runner
# from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
#
#
# # get_model(model_type = 'WIDE_AND_DEEP', model_dir=model_dir)
#
# # output_dir is an arg passed in by the learn_runner.run() call.
# def experiment_fn(output_dir):
#     print(output_dir)
#
#     train_input_fn = generate_input_fn(train_file, BATCH_SIZE)
#     eval_input_fn = generate_input_fn(eval_file)
#     my_model = get_model(model_type=MODEL_TYPE,
#                          model_dir=output_dir)
#
#     experiment = tf.contrib.learn.Experiment(
#         my_model,
#         train_input_fn=train_input_fn,
#         eval_input_fn=eval_input_fn,
#         train_steps=1000
#         ,
#         export_strategies=[saved_model_export_utils.make_export_strategy(
#             serving_input_fn,
#             default_output_alternative_key=None,
#             exports_to_keep=1
#         )]
#     )
#     return experiment
#
#
# exp = experiment_fn(model_dir)
#
# exp.train_and_evaluate()
#
# # Run the experiment
#
# model_dir = create_model_dir(model_type=MODEL_TYPE)
# metrics, output_folder = learn_runner.run(experiment_fn, model_dir)
#
# print('Accuracy: {}'.format(metrics['accuracy']))
# print('Model exported to {}'.format(output_folder))
