# -*- coding: utf-8 -*-
from __future__ import print_function
import math
from IPython import display
from matplotlib import cm
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
import os

os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

"""根据中国的国情分析房价相关因素
最大粒度：小区
小区均价其实就是由小区的经纬度决定的
采用神经网络构建模型
"""

tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
target_label = 'house_unit_price'
housing_dataframe = pd.read_csv('../data/data.csv')
housing_dataframe = housing_dataframe.reindex(
    np.random.permutation(housing_dataframe.index))


def preprocess_features(housing_dataframe):
    selected_features = housing_dataframe[
        ["latitude",
         "longitude",
         #         "housing_median_age",
         #         "build_up_area",
         ]]
    processed_features = selected_features.copy()

    return processed_features


def preprocess_targets(housing_dataframe):
    output_targets = pd.DataFrame()
    output_targets[target_label] = (
            housing_dataframe[target_label] / 1000.0)
    return output_targets


def construct_feature_columns(input_features):
    return set([tf.feature_column.numeric_column(my_feature)
                for my_feature in input_features])


def linear_scale(series):
    min_val = series.min()
    max_val = series.max()
    scale = (max_val - min_val) / 2.0
    return series.apply(lambda x: ((x - min_val) / scale) - 1.0);


def log_normalize(series):
    return series.apply(lambda x: math.log(x + 1.0))


def clip(series, clip_to_min, clip_to_max):
    return series.apply(lambda x: (
        min(max(x, clip_to_min), clip_to_max)))


def z_score_normalize(series):
    mean = series.mean()
    std_dv = series.std()
    return series.apply(lambda x: (x - mean) / std_dv)


def binary_threshold(series, threshold):
    return series.apply(lambda x: (1 if x > threshold else 0))


def normalize_linear_scale(examples_dataframe):
    processed_dataframe = pd.DataFrame()
    # ["latitude",
    #  "longitude",
    #  "housing_median_age",
    #  "build_up_area",
    #  ]]
    processed_dataframe['latitude'] = linear_scale(examples_dataframe['latitude'])
    processed_dataframe['longitude'] = linear_scale(examples_dataframe['longitude'])
    #    processed_dataframe['housing_median_age'] = log_normalize(examples_dataframe['housing_median_age'])
    #
    #    processed_dataframe['build_up_area'] = log_normalize(examples_dataframe['build_up_area'])
    return processed_dataframe


def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
    # Convert pandas data into a dict of np arrays.
    features = {key: np.array(value) for key, value in dict(features).items()}

    # Construct a dataset, and configure batching/repeating
    ds = Dataset.from_tensor_slices((features, targets))  # warning: 2GB limit
    ds = ds.batch(batch_size).repeat(num_epochs)

    # Shuffle the data, if specified
    if shuffle:
        ds = ds.shuffle(buffer_size=10000)

    # Return the next batch of data
    features, labels = ds.make_one_shot_iterator().get_next()
    return features, labels


normalized_dataframe = normalize_linear_scale(preprocess_features(housing_dataframe))
training_examples = normalized_dataframe.head(24000)
training_targets = preprocess_targets(housing_dataframe.head(24000))

validation_examples = normalized_dataframe.tail(7000)
validation_targets = preprocess_targets(housing_dataframe.tail(7000))
display.display(training_examples.describe())
display.display(training_targets.describe())
display.display(validation_examples.describe())
display.display(validation_targets.describe())


# 调整模型超参数
def train_model(
        my_optimizer,
        steps,
        batch_size,
        hidden_units,
        training_examples,
        training_targets,
        validation_examples,
        validation_targets):
    periods = 10
    steps_per_period = steps / periods

    my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
    dnn_regressor = tf.estimator.DNNRegressor(
        feature_columns=construct_feature_columns(training_examples),
        hidden_units=hidden_units,
        optimizer=my_optimizer,
    )
    # Create input functions
    training_input_fn = lambda: my_input_fn(training_examples, training_targets[target_label],
                                            batch_size=batch_size)
    predict_training_input_fn = lambda: my_input_fn(training_examples, training_targets[target_label],
                                                    num_epochs=1, shuffle=False)
    predict_validation_input_fn = lambda: my_input_fn(validation_examples, validation_targets[target_label],
                                                      num_epochs=1, shuffle=False)

    # Create a linear regressor object.

    # Train the model, but do so inside a loop so that we can periodically assess
    # loss metrics.
    print("Training model...")
    print("RMSE (on training data):")
    training_rmse = []
    validation_rmse = []
    for period in range(0, periods):
        # Train the model, starting from the prior state.
        dnn_regressor.train(
            input_fn=training_input_fn,
            steps=steps_per_period,
        )
        # Take a break and compute predictions.
        training_predictions = dnn_regressor.predict(input_fn=predict_training_input_fn)
        training_predictions = np.array([item['predictions'][0] for item in training_predictions])

        validation_predictions = dnn_regressor.predict(input_fn=predict_validation_input_fn)
        validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])

        # Compute training and validation loss.
        training_root_mean_squared_error = math.sqrt(
            metrics.mean_squared_error(training_predictions, training_targets))
        validation_root_mean_squared_error = math.sqrt(
            metrics.mean_squared_error(validation_predictions, validation_targets))
        # Occasionally print the current loss.
        print("  period %02d : %0.2f" % (period, training_root_mean_squared_error))
        # Add the loss metrics from this period to our list.
        training_rmse.append(training_root_mean_squared_error)
        validation_rmse.append(validation_root_mean_squared_error)
    print("Model training finished.")

    return dnn_regressor, training_rmse, validation_rmse


gradient_model, gradient_training_rmse, gradient_validation_rmse = train_model(
    my_optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05),
    steps=2000,
    batch_size=50,
    hidden_units=[10, 10, 5, 5, 5],
    training_examples=training_examples,
    training_targets=training_targets,
    validation_examples=validation_examples,
    validation_targets=validation_targets)

adagrad_model, adagrad_training_rmse, adagrad_validation_rmse = train_model(
    my_optimizer=tf.train.AdagradOptimizer(learning_rate=0.5),
    steps=500,
    batch_size=100,
    hidden_units=[10, 10, 5, 5, 5],
    training_examples=training_examples,
    training_targets=training_targets,
    validation_examples=validation_examples,
    validation_targets=validation_targets)

adam_model, adam_training_rmse, adam_validation_rmse = train_model(
    my_optimizer=tf.train.AdamOptimizer(learning_rate=0.009),
    steps=500,
    batch_size=100,
    hidden_units=[10, 10, 5, 5, 5],
    training_examples=training_examples,
    training_targets=training_targets,
    validation_examples=validation_examples,
    validation_targets=validation_targets)

plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.plot(gradient_training_rmse, label='Gradient training')
plt.plot(gradient_validation_rmse, label='Gradient validation')
plt.plot(adagrad_training_rmse, label='Adagrad training')
plt.plot(adagrad_validation_rmse, label='Adagrad validation')
plt.plot(adam_training_rmse, label='Adam training')
plt.plot(adam_validation_rmse, label='Adam validation')
_ = plt.legend()

test_data = housing_dataframe.sample(1)
print(test_data['house_unit_price'])
test_examples = preprocess_features(test_data)
predict_test_input_fn = lambda: my_input_fn(test_examples, test_data[target_label],
                                            num_epochs=1, shuffle=False)

test_predictions = adam_model.predict(
    input_fn=predict_test_input_fn)
for p in test_predictions:
    print(p['predictions'][0]*1000 - test_data['house_unit_price'])
