from xml.etree.ElementTree import ElementTree,Element
import os
import subprocess

def read_xml(in_path):
  tree = ElementTree()
  tree.parse(in_path)
  return tree

def write_xml(tree, out_path):
  tree.write(out_path, encoding="utf-8",xml_declaration=True)

def if_match(node, kv_map):
  for key in kv_map:
    if node.get(key) != kv_map.get(key):
      return False
  return True

def if_match_subnode(node, nodetag, text):
  subtext = node.find(nodetag).text
  if subtext == text:
    return True
  else:
    return False

#---------------search -----
def find_nodes(tree, path):
  return tree.findall(path)

def get_node_by_keyvalue(nodelist, kv_map):
  result_nodes = []
  for node in nodelist:
    if if_match(node, kv_map):
      result_nodes.append(node)
  return result_nodes

def get_node_by_subnodetext(nodelist, nodetag, text):
  result_nodes = []
  for node in nodelist:
    if if_match_subnode(node, nodetag, text):
      result_nodes.append(node)
  return result_nodes

#---------------change -----
def change_node_properties(nodelist, kv_map, is_delete=False):
  for node in nodelist:
    for key in kv_map:
      if is_delete:
        if key in node.attrib:
          del node.attrib[key]
      else:
        node.set(key, kv_map.get(key))

def change_node_text(nodelist, text, is_add=False, is_delete=False):
  for node in nodelist:
    if is_add:
      node.text += text
    elif is_delete:
      node.text = ""
    else:
      node.text = text

def change_subnode_text(nodelist, subnode, text, is_add=False, is_delete=False):
  for node in nodelist:
    if is_add:
      node.find(subnode).text += text
    elif is_delete:
      node.find(subnode).text = ""
    else:
      node.find(subnode).text = text

def create_node(tag, property_map, content):
  element = Element(tag, property_map)
  element.text = content
  return element

def add_child_node(nodelist, element):
  for node in nodelist:
    node.append(element)

def add_property(root, name, value):
  prop = create_node("property", {}, "")
  name = create_node("name", {}, name)
  value = create_node("value", {}, value)
  prop.append(name)
  prop.append(value)
  root.append(prop)

def del_node_by_tagkeyvalue(nodelist, tag, kv_map):
  for parent_node in nodelist:
    children = parent_node.getchildren()
    for child in children:
      if child.tag == tag and if_match(child, kv_map):
        parent_node.remove(child)

if __name__ == "__main__":
  HADOOP_HOME = os.environ.get("HADOOP_HOME")
  
  cluster = read_xml("hacluster.xml")

  clusterlist = find_nodes(cluster, "cluster")
  
  # 1> zk cluster
  zkcluster = get_node_by_keyvalue(clusterlist, {"type":"zk"})
  zkclustername = zkcluster[0].attrib["name"]
  zklist = zkcluster[0].findall("node")
  
  # 1.1> zookeeper
  zookeepers = get_node_by_keyvalue(zklist, {"type":"zookeeper"})
  
  # 1.2> zoo.cfg
  zoo_file = file("config/zookeeper/zoo.cfg", "w")
  zoocfg = []
  zoocfg.append("tickTime=2000\n")
  zoocfg.append("dataDir=/root/zookeeper\n")
  zoocfg.append("clientPort=2181\n")
  zoocfg.append("initLimit=5\n")
  zoocfg.append("syncLimit=2\n")
  
  i = 0
  for zookeeper in zookeepers:
    i = i + 1
    zoocfg.append("server." + str(i) + "=" + zookeeper.attrib["host"] + ":2888:3888\n")  
  zoo_file.writelines(zoocfg)
  zoo_file.close()
  
  # 2> hdfs cluster
  hdfscluster = get_node_by_keyvalue(clusterlist, {"type":"hdfs"})
  hdfsclustername =  hdfscluster[0].attrib["name"]
  hdfslist = hdfscluster[0].findall("node")
  
  # 2.1> namenode
  namenodes = get_node_by_keyvalue(hdfslist, {"type":"namenode"})
  
  # 2.2> journalnode
  journalnodes = get_node_by_keyvalue(hdfslist, {"type":"journalnode"})
  
  # 2.3> datanode
  datanodes = get_node_by_keyvalue(hdfslist, {"type":"datanode"})  

  # 2.4> hdfs-site.xml
  hdfs_site = ElementTree()
  hdfs = create_node("configuration",{},"")
  hdfs_site._setroot(hdfs)
  
  # 2.4.1> dfs.nameservices
  add_property(hdfs, "dfs.nameservices", hdfsclustername)
  
  # 2.4.2> dfs.ha.namenodes.[nameservice ID]
  nameserviceids = ""
  for namenode in namenodes:
    if nameserviceids == "":
      nameserviceids = namenode.attrib["invididual"]
    else:
      nameserviceids = nameserviceids + "," + namenode.attrib["invididual"]
  
  add_property(hdfs, "dfs.ha.namenodes." + hdfsclustername, nameserviceids) 
  
  for namenode in namenodes:
    # 2.4.3> dfs.namenode.rpc-address.[nameservice ID].[name node ID] 
    add_property(hdfs, "dfs.namenode.rpc-address." + hdfsclustername + "." + namenode.attrib["invididual"], namenode.attrib["host"] + ":8020")
    # 2.4.4> dfs.namenode.http-address.[nameservice ID].[name node ID]
    add_property(hdfs, "dfs.namenode.http-address." + hdfsclustername + "." + namenode.attrib["invididual"], namenode.attrib["host"] + ":50070")    

  # 2.4.5> dfs.namenode.shared.edits.dir
  qjournals = ""
  for journalnode in journalnodes:
    if qjournals == "":
      qjournals = "qjournal://" + journalnode.attrib["host"] + ":8485"
    else:
      qjournals = qjournals + ";" + journalnode.attrib["host"] + ":8485"
  
  qjournals = qjournals + "/" + hdfsclustername
  
  add_property(hdfs, "dfs.namenode.shared.edits.dir", qjournals)

  # 2.4.6> dfs.client.failover.proxy.provider.[nameservice ID]
  add_property(hdfs, "dfs.client.failover.proxy.provider." + hdfsclustername, "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider")

  # 2.4.7> dfs.ha.fencing.methods
  add_property(hdfs, "dfs.ha.fencing.methods", "sshfence")

  # 2.4.8> dfs.ha.fencing.ssh.private-key-files
  add_property(hdfs, "dfs.ha.fencing.ssh.private-key-files", "/root/.ssh/id_dsa")

  # 2.4.9> dfs.ha.automatic-failover.enabled
  add_property(hdfs, "dfs.ha.automatic-failover.enabled." + hdfsclustername, "true")
 
  # 2.4.10> dfs.journalnode.edits.dir
  add_property(hdfs, "dfs.journalnode.edits.dir", "/root/hdfs/journalnode")

  # 2.4.11> dfs.namenode.name.dir
  add_property(hdfs, "dfs.namenode.name.dir", "/root/hdfs/namenode")

  # 2.4.12> dfs.datanode.data.dir
  add_property(hdfs, "dfs.datanode.data.dir", "/root/hdfs/datanode")
  
  # 2.4.13> dfs.replication
  add_property(hdfs, "dfs.replication", "3")
  
  # 2.4.14> dfs.webhdfs.enabled
  add_property(hdfs, "dfs.webhdfs.enabled", "true")
  
  # 2.4.15> dfs.journalnode.http-address
  add_property(hdfs, "dfs.journalnode.http-address", "0.0.0.0:8480")
  
  # 2.4.16> dfs.journalnode.rpc-address
  add_property(hdfs, "dfs.journalnode.rpc-address", "0.0.0.0:8485")
  
  # 2.4.17> dfs.webhdfs.enabled
  add_property(hdfs, "dfs.webhdfs.enabled", "true")
 
  write_xml(hdfs_site, "config/hadoop/hdfs-site.xml")

  # 2.5> core-site.xml
  core_site = ElementTree()  
  core = create_node("configuration",{},"")
  core_site._setroot(core)

  # 2.5.1> fs.defaultFS
  add_property(core, "fs.defaultFS", "hdfs://" + hdfsclustername)
  
  # 2.5.2> hadoop.tmp.dir
  add_property(core, "hadoop.tmp.dir", "/root/hdfs/temp")

  # 2.5.3> ha.zookeeper.quorum
  quorum = ""
  for zookeeper in zookeepers:
    if quorum == "":
      quorum = zookeeper.attrib["host"] + ":2181"
    else:
      quorum = quorum + "," + zookeeper.attrib["host"] + ":2181"

  add_property(core, "ha.zookeeper.quorum", quorum)

  # 2.5.4> hadoop.proxyuser.root.hosts
  add_property(core, "hadoop.proxyuser.root.hosts", "*")

  # 2.5.5> hadoop.proxyuser.root.groups
  add_property(core, "hadoop.proxyuser.root.groups", "*")

  write_xml(core_site, "config/hadoop/core-site.xml")

  # 2.6> slaves
  slaves_file = file("config/hadoop/slaves", "w")
  slaves = []
  
  for datanode in datanodes:
    slaves.append(datanode.attrib["host"] + "\n")
  slaves_file.writelines(slaves)
  slaves_file.close()

  # 3> yarn cluster
  yarncluster = get_node_by_keyvalue(clusterlist, {"type":"yarn"})
  yarnclustername =  yarncluster[0].attrib["name"]
  yarnlist = yarncluster[0].findall("node")
  
  # 3.1> resourcemanager
  resourcemanagers = get_node_by_keyvalue(yarnlist, {"type":"resourcemanager"})
  
  # 3.2> yarn-site.xml
  yarn_site = ElementTree()
  yarn = create_node("configuration", {}, "")
  yarn_site._setroot(yarn)

  # 3.2.1> yarn.nodemanager.aux-services
  add_property(yarn, "yarn.nodemanager.aux-services", "mapreduce_shuffle")

  # 3.2.2> yarn.nodemanager.aux-services.mapreduce_shuffle.class
  add_property(yarn, "yarn.nodemanager.aux-services.mapreduce_shuffle.class", "org.apache.hadoop.mapred.ShuffleHandler")

  # 3.2.3> yarn.resourcemanager.ha.enabled
  add_property(yarn, "yarn.resourcemanager.ha.enabled", "true")
  
  # 3.2.4> yarn.resourcemanager.cluster-id
  add_property(yarn, "yarn.resourcemanager.cluster-id", yarnclustername)
  
  rmids = ""
  for resourcemanager in resourcemanagers:
    if rmids == "":
      rmids = resourcemanager.attrib["invididual"]
    else:
      rmids = rmids + "," + resourcemanager.attrib["invididual"]    
    
    # 3.2.5> yarn.resourcemanager.hostname.rm-id
    add_property(yarn, "yarn.resourcemanager.hostname." + resourcemanager.attrib["invididual"], resourcemanager.attrib["host"])	
    
    # 3.2.6> yarn.resourcemanager.webapp.address.rm-id
    add_property(yarn, "yarn.resourcemanager.webapp.address." + resourcemanager.attrib["invididual"], resourcemanager.attrib["host"] + ":8088")
  
    # yarn.resourcemanager.ha.id
    # If we want to launch more than one RM in single node, we need this configuration
    # add_property(yarn, "yarn.resourcemanager.ha.id", resourcemanager.attrib["invididual"])

  # 3.2.7> yarn.resourcemanager.ha.rm-ids	  
  add_property(yarn, "yarn.resourcemanager.ha.rm-ids", rmids) 
  
  # 3.2.8> yarn.resourcemanager.zk-address
  zkaddresses = ""
  for zookeeper in zookeepers:
    if zkaddresses == "":
      zkaddresses = zookeeper.attrib["host"] + ":2181"
    else:
      zkaddresses = zkaddresses + "," + zookeeper.attrib["host"] + ":2181"
  
  add_property(yarn, "yarn.resourcemanager.zk-address", zkaddresses)
  
  # 3.2.9> yarn.resourcemanager.zk-state-store.address
  add_property(yarn, "yarn.resourcemanager.zk-state-store.address", zkaddresses)
  
  # yarn.resourcemanager.store.class
  add_property(yarn, "yarn.resourcemanager.store.class", "org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore")
  
  # 3.2.10> yarn.resourcemanager.connect.retry-interval.ms
  add_property(yarn, "yarn.resourcemanager.connect.retry-interval.ms", "2000")
  
  # 3.2.11> ha.zookeeper.quorum
  quorum = ""
  for zookeeper in zookeepers:
    if quorum == "":
      quorum = zookeeper.attrib["host"] + ":2181"
    else:
      quorum = quorum + "," + zookeeper.attrib["host"] + ":2181"

  add_property(yarn, "ha.zookeeper.quorum", quorum)
  
  # 3.2.12> yarn.resourcemanager.ha.automatic-failover.enabled
  add_property(yarn, "yarn.resourcemanager.ha.automatic-failover.enabled", "true")
  
  # 3.2.13> yarn.resourcemanager.recovery.enabled
  add_property(yarn, "yarn.resourcemanager.recovery.enabled", "true")
  
  # 3.2.14> yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms
  add_property(yarn, "yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms", "5000")
  
  # 3.2.15> yarn.client.failover-proxy-provider
  add_property(yarn, "yarn.client.failover-proxy-provider", "org.apache.hadoop.yarn.client.ConfiguredRMFailoverProxyProvider")
  
  write_xml(yarn_site, "config/hadoop/yarn-site.xml")
