# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import time
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
flags = tf.app.flags
flags.DEFINE_string("data_dir", "data/",
                    "存储数据文件夹")
flags.DEFINE_string("output_dir", "output/",
                    "存储model和summary文件夹")
flags.DEFINE_integer("task_index", None,"节点编号，woker节点编号0为主节点")
flags.DEFINE_integer("num_gpus", 1, "当前task_index 所在机器的GPU数目，若不适用gpu置为0,num_gpus应该等于task_index的最大数目(利用task管理gpu使用)")
flags.DEFINE_integer("replicas_to_aggregate", None,"参数节点更新前需要的batch副本数目,仅对同步模式有效,模式为worker节点数目")
flags.DEFINE_integer("hidden_units", 100,"神经网络隐藏节点数目")
flags.DEFINE_integer("train_steps", 400,"全局迭代次数")
flags.DEFINE_integer("batch_size", 100, "batch大小")
flags.DEFINE_float("learning_rate", 0.01, "学习速率")
flags.DEFINE_boolean("sync_replicas", True,"是否为同步模式,默认为True")
flags.DEFINE_string("ps_hosts", "localhost:2222", "ps节点集合,用逗号隔开")
flags.DEFINE_string("worker_hosts", "localhost:2223,localhost:2224", "worker节点集合,用逗号隔开")
flags.DEFINE_string("job_name", None, "job_name:ps or worker")
FLAGS = flags.FLAGS
IMAGE_PIXELS = 28
def main(unused_argv):
  print(FLAGS.data_dir)
  mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)
  if FLAGS.job_name is None or FLAGS.job_name == "":
    raise ValueError("Must specify an explicit `job_name`")
  if FLAGS.task_index is None or FLAGS.task_index == "":
    raise ValueError("Must specify an explicit `task_index`")
  print("job name = %s" % FLAGS.job_name)
  print("task index = %d" % FLAGS.task_index)
  #创建集群并定义创建服务
  ps_spec = FLAGS.ps_hosts.split(",")
  worker_spec = FLAGS.worker_hosts.split(",")
  # 得到worker节点的数目
  num_workers = len(worker_spec)
  cluster = tf.train.ClusterSpec({"ps": ps_spec, "worker": worker_spec})
  server = tf.train.Server(cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)
  #ps节点等待,分发、汇总、更新，由PS执行
  if FLAGS.job_name == "ps":
      server.join()
      return
  is_chief = (FLAGS.task_index == 0)
  if FLAGS.num_gpus > 0:
     #为避免gpu分配冲突,分配task_num个gpu
    # 每一个task分配一个gpu
    gpu = (FLAGS.task_index % FLAGS.num_gpus)
    worker_device = "/job:worker/task:%d/gpu:%d" % (FLAGS.task_index, gpu)
  elif FLAGS.num_gpus == 0:
    # 不用gpu时,用cpu定义worker_device
    cpu = 0
    worker_device = "/job:worker/task:%d/cpu:%d" % (FLAGS.task_index, cpu)
  # device setter自动将变量分配到ps节点(轮换方式)，非变量操作分配到worker节点
  #ps 使用cpu并且worker使用gpu(当gpu可用时)
  with tf.device(
      tf.train.replica_device_setter(
          worker_device=worker_device,
          ps_device="/job:ps/cpu:0",
          cluster=cluster)):
    #定义全局global_step,同步模式下为同步的次数，异步模式下为所有worker_task_index的迭代次数之和
    global_step = tf.Variable(0, name="global_step", trainable=False)
    # 隐藏层节点数目
    hid_w = tf.Variable(
        tf.truncated_normal(
            [IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],
            stddev=1.0 / IMAGE_PIXELS),
        name="hid_w")
    hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name="hid_b")
    # softmax层变量
    sm_w = tf.Variable(
        tf.truncated_normal(
            [FLAGS.hidden_units, 10],
            stddev=1.0 / math.sqrt(FLAGS.hidden_units)),
        name="sm_w")
    sm_b = tf.Variable(tf.zeros([10]), name="sm_b")
    #非变量操作定义在worker节点
    x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])
    y_ = tf.placeholder(tf.float32, [None, 10])
    hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)
    hid = tf.nn.relu(hid_lin)
    y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))
    cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
    tf.summary.scalar('loss',cross_entropy)
    opt = tf.train.AdamOptimizer(FLAGS.learning_rate)
    #同步模式下设置并行副本数
    #并行 = worker节点数目：全民参与，一个worker领取一个batch数据
    #并行 > worker节点数目：能者多劳，先完成自己batch的worker会继续领取未训练数据，PS会等到梯度份数到达并行数后进行模型参数计算
    #并行 < worker节点数目：替补等位，存在空闲的worker，取代可能出现的异常worker，确保训练过程高可用
    if FLAGS.sync_replicas:
      if FLAGS.replicas_to_aggregate is None:
        replicas_to_aggregate = num_workers
      else:
        replicas_to_aggregate = FLAGS.replicas_to_aggregate
      #定义同步优化器
      opt = tf.train.SyncReplicasOptimizer(opt,replicas_to_aggregate=replicas_to_aggregate,total_num_replicas=num_workers,name="mnist_sync_replicas")
    #定义优化操作
    train_step = opt.minimize(cross_entropy, global_step=global_step)
    if FLAGS.sync_replicas:#同步模式,中worker主节点进行chief初始化,其他节点进行局部初始化
      local_init_op = opt.local_step_init_op
      if is_chief:
        local_init_op = opt.chief_init_op
      ready_for_local_init_op = opt.ready_for_local_init_op
      #创建队列执行器
      chief_queue_runner = opt.get_chief_queue_runner()
      sync_init_op = opt.get_init_tokens_op()
    init_op = tf.global_variables_initializer()
    merged_summary_op = tf.summary.merge_all()
    output_dir= FLAGS.output_dir
    #对同步和分布式分别定义sv
    if FLAGS.sync_replicas:
      sv = tf.train.Supervisor(
          is_chief=is_chief,
          logdir=output_dir,
          init_op=init_op,
          summary_op=None,
          local_init_op=local_init_op,
          ready_for_local_init_op=ready_for_local_init_op,
          recovery_wait_secs=1,
          global_step=global_step)
    else:
      sv = tf.train.Supervisor(
          is_chief=is_chief,
          logdir=output_dir,
          init_op=init_op,
          summary_op=None,
          recovery_wait_secs=1,
          global_step=global_step)
    sess_config = tf.ConfigProto(
        allow_soft_placement=True,
        log_device_placement=False,
        device_filters=["/job:ps", "/job:worker/task:%d" % FLAGS.task_index])
    # 主worker节点(task_index=0)初始化session,其他节点等待session初始化完成
    if is_chief:
      print("Worker %d: Initializing session..." % FLAGS.task_index)
    else:
      print("Worker %d: Waiting for session to be initialized..." %FLAGS.task_index)
    sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)
    print("Worker %d: Session initialization complete." % FLAGS.task_index)
    if FLAGS.sync_replicas and is_chief:
      #同步模式下,worker主节点执行队列化执行器，并执行全局参数初始化
      sess.run(sync_init_op)
      sv.start_queue_runners(sess, [chief_queue_runner])
    # Perform training
    time_begin = time.time()
    print("Training begins @ %f" % time_begin)
    local_step = 0
    while True:
      batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)
      train_feed = {x: batch_xs, y_: batch_ys}
      _, step = sess.run([train_step, global_step], feed_dict=train_feed)
      if is_chief:
          summary_str = sess.run(merged_summary_op,feed_dict=train_feed)
          sv.summary_computed(sess,summary_str,global_step=step)
      local_step += 1
      now = time.time()
      print("%f: Worker %d: training step %d done (global step: %d)" % (now, FLAGS.task_index, local_step, step))
      if step >= FLAGS.train_steps:
        break
    time_end = time.time()
    print("Training ends @ %f" % time_end)
    training_time = time_end - time_begin
    print("Training elapsed time: %f s" % training_time)
    # 验证
    val_feed = {x: mnist.validation.images, y_: mnist.validation.labels}
    val_xent = sess.run(cross_entropy, feed_dict=val_feed)
    print("After %d training step(s), validation cross entropy = %g" %
          (FLAGS.train_steps, val_xent))
    print(FLAGS.output_dir)
    if is_chief:
        sv.saver.save(sess,output_dir+"model.ckpt",global_step=step)
if __name__ == "__main__":
  tf.app.run()