from xml.etree.ElementTree import ElementTree,Element
import subprocess

def read_xml(in_path):
  tree = ElementTree()
  tree.parse(in_path)
  return tree
  
def if_match(node, kv_map):
  for key in kv_map:
    if node.get(key) != kv_map.get(key):
      return False
  return True
  
def find_nodes(tree, path):
  return tree.findall(path)
  
def get_node_by_keyvalue(nodelist, kv_map):
  result_nodes = []
  for node in nodelist:
    if if_match(node, kv_map):
      result_nodes.append(node)
  return result_nodes

def init_myid(node):
  myid_cmd = []
  myid_cmd.append("docker")
  myid_cmd.append("exec")
  myid_cmd.append("-it")
  myid_cmd.append(node.attrib["host"])
  myid_cmd.append("myid.sh")
  myid_cmd.append(node.attrib["id"])
  
  print myid_cmd
  returncode = subprocess.call(myid_cmd)
  return returncode

def start_zkserver(node):
  start_cmd = []
  start_cmd.append("docker")
  start_cmd.append("exec")
  start_cmd.append("-it")
  start_cmd.append(node.attrib["host"])
  start_cmd.append("zkServer.sh")
  start_cmd.append("start")
  
  print start_cmd
  returncode = subprocess.call(start_cmd)
  return returncode

def zkcluster_run(nodelist):
  for node in nodelist:
    init_myid(node)
    start_zkserver(node)

def start_journalnode(node):
  start_cmd = []
  start_cmd.append("docker")
  start_cmd.append("exec")
  start_cmd.append("-it")
  start_cmd.append(node.attrib["host"])
  start_cmd.append("hadoop-daemon.sh")
  start_cmd.append("start")
  start_cmd.append("journalnode")

  print start_cmd
  returncode = subprocess.call(start_cmd)
  return returncode

def journalnode_run(nodelist):
  for node in nodelist:
    start_journalnode(node)

def format_namenode(node):
  format_cmd = []
  format_cmd.append("docker")
  format_cmd.append("exec")
  format_cmd.append("-it")
  format_cmd.append(node.attrib["host"])
  format_cmd.append("hdfs")
  format_cmd.append("namenode")
  format_cmd.append("-format")

  print format_cmd
  returncode = subprocess.call(format_cmd)
  return returncode

def bootstrap_namenode(node):
  bootstrap_cmd = []
  bootstrap_cmd.append("docker")
  bootstrap_cmd.append("exec")
  bootstrap_cmd.append("-it")
  bootstrap_cmd.append(node.attrib["host"])
  bootstrap_cmd.append("hdfs")
  bootstrap_cmd.append("namenode")
  bootstrap_cmd.append("-bootstrapStandby")
  
  print bootstrap_cmd
  returncode = subprocess.call(bootstrap_cmd)
  return returncode

def start_namenode(node):
  start_cmd = []
  start_cmd.append("docker")
  start_cmd.append("exec")
  start_cmd.append("-it")
  start_cmd.append(node.attrib["host"])
  start_cmd.append("hadoop-daemon.sh")
  start_cmd.append("start")
  start_cmd.append("namenode")

  print start_cmd
  returncode = subprocess.call(start_cmd)
  return returncode

def format_zkfc(node):
  format_cmd = []
  format_cmd.append("docker")
  format_cmd.append("exec")
  format_cmd.append("-it")
  format_cmd.append(node.attrib["host"])
  format_cmd.append("hdfs")
  format_cmd.append("zkfc")
  format_cmd.append("-formatZK")

  print format_cmd
  returncode = subprocess.call(format_cmd)
  return returncode

def start_zkfc(node):
  start_cmd = []
  start_cmd.append("docker")
  start_cmd.append("exec")
  start_cmd.append("-it")
  start_cmd.append(node.attrib["host"])
  start_cmd.append("hadoop-daemon.sh")
  start_cmd.append("start")
  start_cmd.append("zkfc")
  
  print start_cmd
  returncode = subprocess.call(start_cmd)
  return returncode

def start_yarn(node):
  start_cmd = []
  start_cmd.append("docker")
  start_cmd.append("exec")
  start_cmd.append("-it")
  start_cmd.append(node.attrib["host"])
  start_cmd.append("start-yarn.sh")

  print start_cmd
  returncode = subprocess.call(start_cmd)
  return returncode

def start_resourcemanager(node):
  start_cmd = []
  start_cmd.append("docker")
  start_cmd.append("exec")
  start_cmd.append("-it")
  start_cmd.append(node.attrib["host"])
  start_cmd.append("yarn-daemon.sh")
  start_cmd.append("start")
  start_cmd.append("resourcemanager")

  print start_cmd
  returncode = subprocess.call(start_cmd)
  return returncode

def zkfc_run(nodelist):
  for node in nodelist:
    start_zkfc(node)

def yarn_run(nodelist):
  i = 0
  for node in nodelist:
    i = i + 1
    if (i == 1):
      start_yarn(node)
    else:
      start_resourcemanager(node)

def namenode_run(nodelist):
  i = 0
  for node in nodelist:
    i = i + 1
    if (i == 1):
      format_namenode(node)
    else:
      bootstrap_namenode(node)
    start_namenode(node)
    
    if (i == len(nodelist)):
      format_zkfc(node)
      
def stop_hdfs(node, name):
  stop_cmd = []
  stop_cmd.append("docker")
  stop_cmd.append("exec")
  stop_cmd.append("-it")
  stop_cmd.append(node.attrib["host"])
  stop_cmd.append("hadoop-daemon.sh")
  stop_cmd.append("stop")
  stop_cmd.append(name)

  print stop_cmd
  returncode = subprocess.call(stop_cmd)
  return returncode

def journalnode_stop(nodelist):
  for node in nodelist:
    stop_hdfs(node, "journalnode")

def namenode_stop(nodelist):
  for node in nodelist:
    stop_hdfs(node, "namenode")

def start_dfs(node):
  start_cmd = []
  start_cmd.append("docker")
  start_cmd.append("exec")
  start_cmd.append("-it")
  start_cmd.append(node.attrib["host"])
  start_cmd.append("start-dfs.sh")

  print start_cmd
  returncode = subprocess.call(start_cmd)
  return returncode

if __name__ == "__main__":
  cluster = read_xml("hacluster.xml")
  clusterlist = find_nodes(cluster, "cluster")

  # 1> zk cluster

  zkcluster = get_node_by_keyvalue(clusterlist, {"type":"zk"})
  zknodes = zkcluster[0].findall("node")  
  zkdockers = get_node_by_keyvalue(zknodes, {"platform":"docker"})
  zkcluster_run(zkdockers)

  # 2> hdfs cluster

  hdfscluster = get_node_by_keyvalue(clusterlist, {"type":"hdfs"})
  hdfsnodes = hdfscluster[0].findall("node")  
  hdfsdockers = get_node_by_keyvalue(hdfsnodes, {"platform":"docker"})
  
  # 2.1> journal node

  journalnodes = get_node_by_keyvalue(hdfsdockers, {"type":"journalnode"})
  journalnode_run(journalnodes)

  # 2.2> name node
  namenodes = get_node_by_keyvalue(hdfsdockers, {"type":"namenode"})
  namenode_run(namenodes)

  # 2.3> start hdfs cluster
  journalnode_stop(journalnodes)
  namenode_stop(namenodes)
  start_dfs(namenodes[0])
  zkfc_run(namenodes)

  # 3> yarn cluster
  yarncluster = get_node_by_keyvalue(clusterlist, {"type":"yarn"})
  yarnnodes = yarncluster[0].findall("node")  
  yarndockers = get_node_by_keyvalue(yarnnodes, {"platform":"docker"}) 
  
  # 3.1> resource manager
  resourcemanagers = get_node_by_keyvalue(yarndockers, {"type":"resourcemanager"})
  yarn_run(resourcemanagers)
