# Copyright 2017 Yahoo Inc.
# Licensed under the terms of the Apache 2.0 license.
# Please see LICENSE file in the project root for terms.

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import argparse
import logging
from datetime import datetime

from pyspark.sql import SparkSession
from yahoo.ml.tf import TFCluster

import ctr_lr_dist_mapfun

spark = SparkSession.builder.appName("ctr_tensorflow_spark") \
    .enableHiveSupport() \
    .getOrCreate()
sc = spark.sparkContext
executors = sc.getConf().get("spark.executor.instances")
logging.info("spark.executor.instances={0}".format(executors))
num_executors = int(executors) if executors is not None else 1
num_ps = 1

parser = argparse.ArgumentParser()
parser.add_argument("-e", "--epochs", help="number of epochs", type=int, default=1)
parser.add_argument("-f", "--format", help="example format: (csv|pickle|tfr)", choices=["csv", "pickle", "tfr"],
                    default="csv")
parser.add_argument("-p", "--path", help="HDFS path to MNIST labels in parallelized format")
parser.add_argument("-m", "--model", help="HDFS path to save/load model during train/inference",
                    default="mnist/mnist_model/")
parser.add_argument("-n", "--cluster_size", help="number of nodes in the cluster", type=int, default=num_executors)
parser.add_argument("-o", "--output", help="HDFS path to save test/inference output", default="predictions")
parser.add_argument("-r", "--readers", help="number of reader/enqueue threads", type=int, default=1)
parser.add_argument("-s", "--steps", help="maximum number of steps", type=int, default=1000)
parser.add_argument("-tb", "--tensorboard", help="launch tensorboard process", action="store_true")
parser.add_argument("-X", "--mode", help="train|inference", default="train")
parser.add_argument("-c", "--rdma", help="use rdma connection", default=False)
args = parser.parse_args()
logging.info("args:{0}".format(args))

logging.info("{0} ===== Start".format(datetime.now().isoformat()))


# def mapRow(row):
#     clicked = [float(row.clicked)]
#     ad = [float(x) for x in row.ad.split(',')]
#     plan = [float(x) for x in row.plan.split(',')]
#     advertiser = [float(x) for x in row.advertiser.split(',')]
#     industry = [float(x) for x in row.industry.split(',')]
#     sec_industry = [float(x) for x in row.sec_industry.split(',')]
#     display_mode = [float(x) for x in row.display_mode.split(',')]
#     tags = [float(x) for x in row.tags.split(',')]
#     return clicked, ad, plan, advertiser, industry, sec_industry, display_mode, tags
#
# train_df = spark.sql("select * from adv.ctr_train_data")
# dataRDD = train_df.rdd.map(mapRow)

def mapLine(line):
    info = line.split('\t')
    clicked = [float(info[1])]
    ad = [float(x) for x in info[2].split(',')]
    plan = [float(x) for x in info[3].split(',')]
    advertiser = [float(x) for x in info[4].split(',')]
    industry = [float(x) for x in info[5].split(',')]
    sec_industry = [float(x) for x in info[6].split(',')]
    display_mode = [float(x) for x in info[7].split(',')]
    tags = [float(x) for x in info[8].split(',')]
    return clicked, ad, plan, advertiser, industry, sec_industry, display_mode, tags

dataRDD = sc.textFile(args.path).map(mapLine)

cluster = TFCluster.reserve(sc, args.cluster_size, num_ps, args.tensorboard, TFCluster.InputMode.SPARK)
cluster.start(ctr_lr_dist_mapfun.map_func, args)
# cluster = TFCluster.run(sc, ctr_dist_mapfun.map_func, args, args.cluster_size, num_ps, args.tensorboard,
#                         TFCluster.InputMode.SPARK)

if args.mode == "train":
    cluster.train(dataRDD, args.epochs)
else:
    predictRDD = cluster.inference(dataRDD)
    predictRDD.saveAsTextFile(args.output)
cluster.shutdown()

logging.info("{0} ===== Stop".format(datetime.now().isoformat()))
