package mammuthus.web.service

import net.csdn.common.path.Url
import net.liftweb.{json => SJSon}
import com.webdm.common.tools.Md5
import java.net.URLDecoder
import java.util.concurrent.atomic.AtomicInteger
import scala.Some
import mammuthus.web.bean.ShellResult


/**
 * 6/7/15 WilliamZhu(allwefantasy@gmail.com)
 */
trait HadoopConfigure {
  this: SlaveService =>

  def cleanHadoop(appName: String) = {

    startDataNodes(appName, "stop")
    startNameNodes(appName, "stop")
    startZkfc(appName, "stop")
    startQJM(appName, "stop")

    haddoopC(appName, findHadoopNodes(appName), encode("rm -rf /data/1/journal/node/local/;rm -rf /data/2"))
  }

  def findHadoopNodes(appName: String) = {
    val nodes1 = getAPPExtra(appName, "qjm").split(",").toSet
    val nodes2 = getAPPExtra(appName, "nameNodes").split(",").toSet
    val nodes3 = getAPPExtra(appName, "dataNodes").split(",").toSet
    val allNodes = nodes1 ++ nodes2 ++ nodes3
    allNodes.mkString(",")
  }

  def addSlaves(appName: String, slaves: String) = {

    val nameNode = getAPPExtra(appName, "nameNodes").split(",")(0)
    val oldNodes = findHadoopNodes(appName)
    val originSize = getAPPExtraItems(appName, "appName").size

    val sres = new scala.collection.mutable.ArrayBuffer[Map[String, AnyRef]]()

    sres += Map("name" -> "install hadoop in new slaves", "rs" -> installNewSlaves(appName,slaves))


    //准备配置文件

    sres ++= configNewSlaves(appName,nameNode,slaves,List("hdfs-site.xml", "core-site.xml", "yarn-site.xml", "mapred-site.xml"),"etc/hadoop")


    //等待服务器信息更新

    waitUntilAllNewSlavesReady(appName:String,originSize:Int,slaves:String)

    //更新系统信息
    val dataNodesNow = getAPPExtra(appName, "dataNodes").split(",").toSet ++ slaves.split(",").filterNot(f => f.isEmpty).toSet
    putAPPExtra(appName, Map("dataNodes" -> dataNodesNow.mkString(",")))
    List("nameNodes", "qjm").map {
      f =>
        putAPPExtra(appName, Map(f -> getAPPExtra(appName, f)))
    }

    sres += Map("name" -> s"update hadoop app info", "rs" -> ShellResult(true,"","",List()))

    //为新服务器添加hostNames
    val newNodesHostNames = sycHosts(appName, slaves)
    sres += Map("name" -> s"add hostNames for new nodes $slaves", "rs" -> newNodesHostNames)
    //给老服务器添加hostNames
    val oldNodesHostNames = sycHosts(appName, oldNodes, slaves)
    sres += Map("name" -> s"add hostNames for old nodes $oldNodes", "rs" -> oldNodesHostNames)

    //zookeeper 也是要填充的
    val zookeeperHostNames = getAPPExtraItems("zookeeper","appName").map(f=>f.hostName).mkString(",")
    sycHosts(appName, oldNodes, zookeeperHostNames)

    sres
  }

  def hadoopRunningCheck(appName: String) = {
    val nodes = getAPPExtra(appName, "nameNodes")
    val res = haddoopC(appName, nodes, encode(s"${installPath(appName)}/bin/hdfs dfsadmin -report"))
    //我们保证至少有节点
    if (res.taskInfos.filter(f => f.status.finished && !f.status.isError && !f.status.isTimeout).size == 0) false
    else {
      res.taskInfos(0).message.split("\n").filter {
        f =>
          "Live\\s+datanodes\\s+\\((\\d+)\\)".r.findFirstMatchIn(f) match {
            case Some(i) => i.group(1).toInt > 0
            case None => false
          }
      }
    }

  }



  def startQJM(appName: String, action: String = "start") = {
    val nodes = getAPPExtra(appName, "qjm")

    ///data/1/usr/local/applications/hadoop
    val shellCommand = encode(s"${installPath(appName)}/$appName/sbin/hadoop-daemon.sh --script hdfs ${action} journalnode")
    haddoopC(appName, nodes, encode("mkdir -p /data/1/journal/node/local/data/mammuthus/current"))
    haddoopC(appName, nodes, shellCommand)
  }


  ///data/1/usr/local/applications/hadoop/bin/hdfs zkfc -formatZK
  def formatZK(appName: String) = {
    val nodes = getAPPExtra(appName, "nameNodes").split(",")(0)

    ///data/1/usr/local/applications/hadoop
    val shellCommand = encode(s"${installPath(appName)}/$appName/bin/hdfs zkfc -formatZK -force")
    haddoopC(appName, nodes, shellCommand)
  }

  def startZkfc(appName: String, action: String = "start") = {
    val nodes = getAPPExtra(appName, "nameNodes")
    val shellCommand = encode(s"${installPath(appName)}/$appName/sbin/hadoop-daemon.sh --config ${installPath(appName)}/$appName/sbin/../etc/hadoop/ ${action} zkfc")
    haddoopC(appName, nodes, shellCommand)
  }

  def formatNameNodes(appName: String) = {
    val Array(node1, node2) = getAPPExtra(appName, "nameNodes").split(",")

    ///data/1/usr/local/applications/hadoop
    val shellCommand = encode(s"${installPath(appName)}/$appName/bin/hdfs namenode -format -force")
    haddoopC(appName, node1, shellCommand)
  }

  def copyNameNodes(appName: String) = {
    val Array(node1, node2) = getAPPExtra(appName, "nameNodes").split(",")
    val nameNodeMetaBaseDir = "/data/2"
    val dfsNameFile = Md5.MD5(Math.random() + "_" + System.currentTimeMillis()) + ".tar.gz"
    val uploadUrl = s"${masterUrl}/upload?${authTokenStr}fileName=${dfsNameFile}"
    val curlCommand = s"""curl -v -X POST -T "${nameNodeMetaBaseDir}/${appName}/dfs/${dfsNameFile}" "${uploadUrl}" """
    val uploadShellCommand = encode(s"cd ${nameNodeMetaBaseDir}/${appName}/dfs;tar czvf ${dfsNameFile} name;${curlCommand}")

    val shellRes = haddoopC(appName, node1, uploadShellCommand)
    logger.info("upload file:" + shellRes.tasks)

    //创建目录
    haddoopC(appName, node2, encode(s"mkdir -p ${nameNodeMetaBaseDir}/${appName}/dfs"))

    //下载
    val masterFileUrl = s"${masterUrl}/${dfsNameFile}"
    val downloadUrl = s"${masterUrl}/block/download"
    val filePath = encode(s"${nameNodeMetaBaseDir}/${appName}/dfs/${dfsNameFile}")
    val res = httpClient.get(new Url(downloadUrl).query(s"${authTokenStr}slaves=${node2}&url=${encode(masterFileUrl)}&filePath=$filePath"))
    logger.info("文件下载:" + res.getContent)

    //解压
    val tarShellCommand = encode(s"cd ${nameNodeMetaBaseDir}/${appName}/dfs;tar xzvf ${dfsNameFile}")
    haddoopC(appName, node2, tarShellCommand)


    haddoopC(appName, node2, encode(s"ls ${nameNodeMetaBaseDir}/${appName}/dfs/name"))
  }

  ///data/1/usr/local/applications/hadoop/sbin/hadoop-daemon.sh --config /data/1/usr/local/applications/hadoop/sbin/../etc/hadoop/ start namenode
  def startNameNodes(appName: String, action: String = "start") = {
    val nodes = getAPPExtra(appName, "nameNodes")
    val shellCommand = encode(s"${hadoopCommand(appName)} ${action} namenode")
    haddoopC(appName, nodes, shellCommand)
  }


  def startNodeManager(appName: String, action: String = "start") = {
    val nodes = getAPPExtra(appName, "dataNodes")
    val shellCommand = encode(s"${yarnCommand(appName)} ${action} nodemanager")
    haddoopC(appName, nodes, shellCommand)
  }

  def startResourceManager(appName: String, action: String = "start") = {
    val nodes = getAPPExtra(appName, "yarnResourceManager")
    val shellCommand = encode(s"${yarnCommand(appName)} ${action} resourcemanager")
    haddoopC(appName, nodes, shellCommand)
  }


  def startDataNodes(appName: String, action: String = "start") = {
    val nodes = getAPPExtra(appName, "dataNodes")
    val shellCommand = encode(s"${hadoopCommand(appName)} ${action} datanode")
    haddoopC(appName, nodes, shellCommand)
  }

  def startSpecificDataNodes(appName: String,slaves:String,action: String = "start") = {
    val shellCommand = encode(s"${hadoopCommand(appName)} ${action} datanode")
    haddoopC(appName, slaves, shellCommand)
  }

  def hadoopCommand(appName: String) = {
    s"${installPath(appName)}/$appName/sbin/hadoop-daemon.sh --config ${installPath(appName)}/$appName/etc/hadoop/"
  }

  def yarnCommand(appName: String) = {
    s"${installPath(appName)}/$appName/sbin/yarn-daemon.sh --config ${installPath(appName)}/$appName/etc/hadoop/"
  }



}
