#!encoding:utf8
from __future__ import absolute_import
from __future__ import division
from __future__ import nested_scopes
from __future__ import print_function


def map_func(args, ctx):
    from yahoo.ml.tf import TFNode
    from datetime import datetime
    import math
    import numpy
    import time
    import tensorflow as tf
    import logging

    worker_num = ctx.worker_num
    job_name = ctx.job_name
    task_index = ctx.task_index
    cluster_spec = ctx.cluster_spec

    # Delay PS nodes a bit, since workers seem to reserve GPUs more quickly/reliably (w/o conflict)
    if job_name == "ps":
        time.sleep((worker_num + 1) * 5)

    # Parameters
    batch_size = 3000

    # Get TF cluster and server instances
    cluster, server = TFNode.start_cluster_server(ctx, 1, args.rdma)

    def print_log(worker_num, arg):
        print("{0}: {1}".format(worker_num, arg))

    def weight_variable(shape):
        initial = tf.truncated_normal(shape, stddev=0.1)
        return tf.Variable(initial)

    def bias_variable(shape):
        initial = tf.constant(0.1, shape=shape)
        return tf.Variable(initial)

    def feed_dict():
        # Get a batch of examples from spark data feeder job
        batch = TFNode.next_batch(ctx.mgr, batch_size)

        clicked = []
        ad = []
        plan = []
        advertiser = []
        industry = []
        sec_industry = []
        display_mode = []
        tags = []
        for item in batch:
            clicked.append(item[0])
            ad.append(item[1])
            plan.append(item[2])
            advertiser.append(item[3])
            industry.append(item[4])
            sec_industry.append(item[5])
            display_mode.append(item[6])
            tags.append(item[7])
        # xs1 = numpy.array(ad).astype(numpy.float32)
        # xs2 = numpy.array(plan).astype(numpy.float32)
        # xs3 = numpy.array(advertiser).astype(numpy.float32)
        # xs4 = numpy.array(industry).astype(numpy.float32)
        # xs5 = numpy.array(sec_industry).astype(numpy.float32)
        # xs6 = numpy.array(display_mode).astype(numpy.float32)
        # xs7 = numpy.array(tags).astype(numpy.float32)
        # xs = numpy.concatenate((xs1, xs2, xs3, xs4, xs5, xs6, xs7), axis=1)
        ad = numpy.array(ad).astype(numpy.float32)
        plan = numpy.array(plan).astype(numpy.float32)
        advertiser = numpy.array(advertiser).astype(numpy.float32)
        industry = numpy.array(industry).astype(numpy.float32)
        sec_industry = numpy.array(sec_industry).astype(numpy.float32)
        display_mode = numpy.array(display_mode).astype(numpy.float32)
        tags = numpy.array(tags).astype(numpy.float32)
        clicked = numpy.array(clicked).astype(numpy.float32)
        keep_prob = 0.5
        return ad, plan, advertiser, industry, sec_industry, display_mode, tags, clicked, keep_prob

    if job_name == "ps":
        server.join()
    elif job_name == "worker":
        # Assigns ops to the local worker by default.
        with tf.device(
                tf.train.replica_device_setter(worker_device="/job:worker/task:%d" % task_index, cluster=cluster)):
            # 设计神经网络结构
            # x = tf.placeholder(tf.float32, [None, 1354])
            ad_holder = tf.placeholder(tf.float32, [None, 1069])
            plan_holder = tf.placeholder(tf.float32, [None, 322])
            advertiser_holder = tf.placeholder(tf.float32, [None, 122])
            industry_holder = tf.placeholder(tf.float32, [None, 21])
            sec_industry_holder = tf.placeholder(tf.float32, [None, 63])
            display_mode_holder = tf.placeholder(tf.float32, [None, 3])
            tags_holder = tf.placeholder(tf.float32, [None, 1334])
            keep_prob_holder = tf.placeholder(tf.float32)

            # ad_hiden_layer
            ad_hiden_w = weight_variable([1069, 128])
            ad_hiden_b = bias_variable([128])
            ad_hiden_layer = tf.nn.relu(tf.matmul(ad_holder, ad_hiden_w) + ad_hiden_b)

            # plan_hiden_layer
            plan_hiden_w = weight_variable([322, 128])
            plan_hiden_b = bias_variable([128])
            plan_hiden_layer = tf.nn.relu(tf.matmul(plan_holder, plan_hiden_w) + plan_hiden_b)

            # tags_hiden_layer
            tags_hiden_w = weight_variable([1334, 216])
            tags_hiden_b = bias_variable([216])
            tags_hiden_layer = tf.nn.relu(tf.matmul(tags_holder, tags_hiden_w) + tags_hiden_b)
            tags_hiden_layer_drop = tf.nn.dropout(tags_hiden_layer, keep_prob_holder)

            # keep_prob1 = tf.placeholder(tf.float32)  # drop out
            # h_fc1_drop = tf.nn.dropout(h_conv1, keep_prob1)

            # concat_layer
            concat_x = tf.concat([ad_hiden_layer, plan_hiden_layer, tags_hiden_layer_drop, advertiser_holder,
                                  industry_holder, sec_industry_holder, display_mode_holder], axis=1)
            concat_layer_w = weight_variable([681, 216])
            concat_layer_b = bias_variable([216])
            concat_layer = tf.nn.relu(tf.matmul(concat_x, concat_layer_w) + concat_layer_b)
            concat_layer_drop = tf.nn.dropout(concat_layer, keep_prob_holder)

            # hiden_1
            hiden_1_w = weight_variable([216, 216])
            hiden_1_b = bias_variable([216])
            hiden_1 = tf.nn.relu(tf.matmul(concat_layer_drop, hiden_1_w) + hiden_1_b)
            hiden_1_drop = tf.nn.dropout(hiden_1, keep_prob_holder)


            # hiden_2
            hiden_2_w = weight_variable([216, 216])
            hiden_2_b = bias_variable([216])
            hiden_2 = tf.nn.relu(tf.matmul(hiden_1_drop, hiden_2_w) + hiden_2_b)

            # out_layer
            out_layer_w = weight_variable([216, 1])
            out_layer_b = bias_variable([1])
            click_out = tf.nn.sigmoid(tf.matmul(hiden_2, out_layer_w) + out_layer_b)

            click = tf.placeholder(tf.float32, [None, 1])

            global_step = tf.Variable(0)

            # 损失函数，交叉熵
            cross_entropy = tf.reduce_mean(-tf.reduce_sum(
                click * tf.log(tf.clip_by_value(click_out, 1e-10, 1.0)) +
                (1 - click) * tf.log(1 - tf.clip_by_value(click_out, 1e-10, 1.0))))
            train_op = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy, global_step=global_step)  # 使用adam优化
            # correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))  # 计算准确度
            # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

            # 计算AUC
            auc_tensor = tf.contrib.metrics.streaming_auc(tf.reshape(click_out, [-1]),
                                                          tf.cast(tf.reshape(click, [-1]), tf.bool))

            # tf.summary.scalar("loss", cross_entropy)

            saver = tf.train.Saver()
            summary_op = tf.summary.merge_all()
            init_op = tf.global_variables_initializer()

        # Create a "supervisor", which oversees the training process and stores model state into HDFS
        logdir = TFNode.hdfs_path(ctx, args.model)
        print("tensorflow model path: {0}".format(logdir))
        summary_writer = tf.summary.FileWriter("tensorboard_%d" % worker_num, graph=tf.get_default_graph())

        if args.mode == "train":
            sv = tf.train.Supervisor(is_chief=(task_index == 0),
                                     logdir=logdir,
                                     init_op=init_op,
                                     summary_op=summary_op,
                                     saver=saver,
                                     global_step=global_step,
                                     summary_writer=summary_writer,
                                     stop_grace_secs=300,
                                     save_model_secs=300)
        else:
            sv = tf.train.Supervisor(is_chief=(task_index == 0),
                                     logdir=logdir,
                                     saver=saver,
                                     global_step=global_step,
                                     stop_grace_secs=300,
                                     save_model_secs=0)

        # The supervisor takes care of session initialization, restoring from
        # a checkpoint, and closing when done or an error occurs.
        with sv.managed_session(server.target) as sess:
            print("{0} session ready".format(datetime.now().isoformat()))

            # Loop until the supervisor shuts down or 1000000 steps have completed.
            step = 0
            while not sv.should_stop() and step < args.steps:
                # Run a training step asynchronously.
                # See `tf.train.SyncReplicasOptimizer` for additional details on how to
                # perform *synchronous* training.

                # using feed_dict
                (ad_batch, plan_batch, advertiser_batch, industry_batch, sec_industry_batch, display_mode_batch,
                 tags_batch, clicked_batch, kp_prob) = feed_dict()
                feed = {ad_holder: ad_batch, plan_holder: plan_batch, advertiser_holder: advertiser_batch,
                        industry_holder: industry_batch, sec_industry_holder: sec_industry_batch,
                        display_mode_holder: display_mode_batch, tags_holder: tags_batch, click: clicked_batch,
                        keep_prob_holder: kp_prob}

                if len(ad_batch) != batch_size:
                    print("done feeding")
                    break
                else:
                    if args.mode == "train":
                        if step % 100 == 0:
                            loss, auc = sess.run([cross_entropy, auc_tensor],
                                                 feed_dict={ad_holder: ad_batch, plan_holder: plan_batch,
                                                            advertiser_holder: advertiser_batch,
                                                            industry_holder: industry_batch,
                                                            sec_industry_holder: sec_industry_batch,
                                                            display_mode_holder: display_mode_batch,
                                                            tags_holder: tags_batch, click: clicked_batch,
                                                            keep_prob_holder: 1.0})
                            print("{0} step: {1} loss:{2}, auc:{3}".format(datetime.now().isoformat(), step, loss, auc))
                        _, step = sess.run([train_op, global_step], feed_dict=feed)
                    else:  # args.mode == "inference"
                        click_t, preds, loss, auc = sess.run([click, click_out, cross_entropy, auc_tensor],
                                                             feed_dict={ad_holder: ad_batch, plan_holder: plan_batch,
                                                                        advertiser_holder: advertiser_batch,
                                                                        industry_holder: industry_batch,
                                                                        sec_industry_holder: sec_industry_batch,
                                                                        display_mode_holder: display_mode_batch,
                                                                        tags_holder: tags_batch, click: clicked_batch,
                                                                        keep_prob_holder: 1.0})

                        results = ["{0} Label: {1}, Prediction: {2}".format(datetime.now().isoformat(), l, p) for l, p
                                   in zip(click_t, preds)]
                        TFNode.batch_results(ctx.mgr, results)
                        print("loss: {0}, auc: {1} ".format(loss, auc))

            if sv.should_stop() or step >= args.steps:
                TFNode.terminate(ctx.mgr)

        # Ask for all the services to stop.
        print("{0} stopping supervisor".format(datetime.now().isoformat()))
        sv.stop()
