import os

import tensorflow as tf

from src.utils import (column_defaults, column_names, get_train_file_names,
                       parse_like_csv)


def get_train_dataset(num_epochs=1, shuffle=True):
    dataset = tf.data.TextLineDataset(get_train_file_names())
    dataset = dataset.repeat(num_epochs)
    if shuffle:
        #  把buffer_size设为99999
        dataset = dataset.shuffle(99999)
    dataset = dataset.map(parse_like_csv, num_parallel_calls=os.cpu_count())
    dataset = dataset.prefetch(200)
    return dataset


def train_lr():
    """TODO: Docstring for .
    :returns: TODO
    [663011, 0, 31180492, 15595718, 410, 6]

    """

    def get_lr_columns():
        #  所有的hash_bucket_size全部扩大10倍
        uid = tf.feature_column.categorical_column_with_hash_bucket(
            'uid', 663011 * 10)
        #  第二列user_city只有一个取值，没有任何信息，不用了
        item_id = tf.feature_column.categorical_column_with_hash_bucket(
            'item_id', 31180492 * 10)
        author_id = tf.feature_column.categorical_column_with_hash_bucket(
            'author_id', 15595718 * 10)
        item_city = tf.feature_column.categorical_column_with_hash_bucket(
            'item_city', 410 * 10)
        channel = tf.feature_column.categorical_column_with_vocabulary_list(
            'channel', ['0', '1', '2', '3'])
        return [uid, item_id, author_id, item_city, channel]

    lr_columns = get_lr_columns()
    lr_estimator = tf.estimator.LinearClassifier(
        feature_columns=lr_columns,
        model_dir='./lr_like_checkpoints',
        optimizer=tf.train.FtrlOptimizer(learning_rate=0.2))
    lr_estimator.train(get_train_dataset)


if __name__ == "__main__":
    tf.logging.set_verbosity(tf.logging.INFO)
    train_lr()
