package com.talkingdata.hadoop.service

import com.fasterxml.jackson.databind.{JsonNode, ObjectMapper}
import com.talkingdata.hadoop.bean._
import com.talkingdata.hadoop.util.{LoadUrl, ShowTime}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.hadoop.security.UserGroupInformation
import org.apache.hadoop.util.StringUtils
import org.apache.log4j.Logger

import scala.collection.mutable
import scala.math._

class HadoopService(xc: XmlConfig) {
	@transient lazy val log = Logger.getLogger(this.getClass)
	val TIMESTAMP = ShowTime().getTimestamp()
	var activeNameNode = ""
	var VERSION:String = xc.version.toString
	var activeRM = ""
	var dfsParentUrl = ""
	var rpcPortUi = ""
	var nameNodeStatusUrl = ""
	var nameNodeInfoUrl = ""
	var FSNamesystemUrl = ""
	var yarnParentUrl = ""
	var yarnNameServices = ""
	var yarnClusterMetricsUrl = ""
	var yarnClusterSchedulerUrl = ""
	var yarnClusterAppsUrl = ""
	var schedulerType = xc.scheduler
	val conf = new Configuration()
	val coreFile = new Path(xc.confDir + "core-site.xml")
	val hdfsFile = new Path(xc.confDir + "hdfs-site.xml")
	val mapredFile = new Path(xc.confDir + "mapred-site.xml")
	val yarnFile = new Path(xc.confDir + "yarn-site.xml")
	conf.addResource(coreFile)
	conf.addResource(hdfsFile)
	conf.addResource(mapredFile)
	conf.addResource(yarnFile)
	if (xc.kerberos) {
		System.setProperty("java.security.krb5.conf", xc.confDir + "krb5.conf")
		System.setProperty("sun.security.krb5.debug", "true")
		UserGroupInformation.setConfiguration(conf)
		val keytabFile = xc.kerberosKeytab
		UserGroupInformation.loginUserFromKeytab(xc.kerberosUser, keytabFile)
	}
	yarnNameServices = conf.get("yarn.resourcemanager.cluster-id")
	println(UserGroupInformation.getLoginUser)

	def getRMState(): mutable.Set[RmState] = {
		val rmStateSet: mutable.Set[RmState] = mutable.Set()
		if (xc.serviceId == "bjxgapp") {
			var serviceState = "none"
			val httpAddress = conf.get("yarn.resourcemanager.webapp.address")
			yarnParentUrl = "http://" + httpAddress + "/ws/v1/cluster/"
			activeRM = httpAddress.split(":")(0)
			yarnNameServices = "bjxgappyarn01"
			try {
				val clusterInfo = LoadUrl().getOneMetricOnYarn(yarnParentUrl, "clusterInfo")
				serviceState = clusterInfo.get("haState").asText
			} catch {
				case e: Exception => print(e.getStackTrace)
			}
			val rmState = RmState(
				1,
				yarnNameServices,
				"single",
				activeRM,
				httpAddress,
				serviceState,
				TIMESTAMP
			)

			log.info(rmState.toString)
			rmStateSet.add(rmState)
		} else if (xc.serviceId == "bjdcnameservices01") {
			var serviceState = "none"
			val httpAddress = conf.get("yarn.resourcemanager.webapp.address")
			yarnParentUrl = "http://" + httpAddress + "/ws/v1/cluster/"
			activeRM = httpAddress.split(":")(0)
			yarnNameServices = "bjdchbase01yarn01"
			try {
				val clusterInfo = LoadUrl().getOneMetricOnYarn(yarnParentUrl, "clusterInfo")
				serviceState = clusterInfo.get("haState").asText
			} catch {
				case e: Exception => print(e.getStackTrace)
			}
			val rmState = RmState(
				1,
				yarnNameServices,
				"single",
				activeRM,
				httpAddress,
				serviceState,
				TIMESTAMP
			)
			log.info(rmState.toString)
			rmStateSet.add(rmState)
		}
		else {
			for (rmId <- conf.get("yarn.resourcemanager.ha.rm-ids").split(",")) {
				var serviceState = "none"
				val httpAddressMetric = "yarn.resourcemanager.webapp.address." + rmId
				val httpAddress = conf.get(httpAddressMetric)
				val hostname = httpAddress.split(":")(0)
				val RmUrl = "http://" + httpAddress + "/ws/v1/cluster/info"
				try {
					val clusterInfo = LoadUrl().getOneMetricOnYarn(RmUrl, "clusterInfo")
					serviceState = clusterInfo.get("haState").asText
					if ("ACTIVE".equals(serviceState)) {
						activeRM = hostname;
						yarnParentUrl = "http://" + httpAddress + "/ws/v1/cluster/"
					}
				} catch {
					case e: Exception => print(e.getStackTrace)
				}
				val rmState = RmState(
					1,
					yarnNameServices,
					rmId,
					hostname,
					httpAddress,
					serviceState,
					TIMESTAMP
				)
				log.info(rmState.toString)
				rmStateSet.add(rmState)
			}
		}
		yarnClusterMetricsUrl = yarnParentUrl + "metrics"
		yarnClusterSchedulerUrl = yarnParentUrl + "scheduler"
		yarnClusterAppsUrl = yarnParentUrl + "apps"
		rmStateSet
	}

	def getCapacityScheduler(): Unit = {
		val CapacitySchedulerSet: mutable.Set[SchedulerState] = mutable.Set()
		println("yarnClusterSchedulerUrl " + yarnClusterSchedulerUrl)
		val scheduler = LoadUrl().getOneMetricOnYarn(yarnClusterSchedulerUrl,"scheduler").path("schedulerInfo")
		println( scheduler.toString)
		val cm = scheduler.get("capacities").get("queueCapacitiesByPartition").get(0).get("configuredMaxResource")
		val rootMem = cm.get("memory").longValue
		val strMemory = StringUtils.byteDesc(rootMem * Math.pow(1024, 2).toLong)
		println(cm.toString+"\nrootMem ")
		/*println("queueName " + scheduler.get("queueName").asText +
			"\nstrMemory " + strMemory +
			"\nrootMem " + rootMem +
			"\nvCores " + cm.get("vCores").intValue +
			"\nmaxCapacity " + scheduler.get("maxCapacity").longValue +
			"\nusedCapacity " + rootMem +
			"\nschedulerType " + schedulerType +
			"\nTIMESTAMP " + TIMESTAMP
		)
		val schedulerRoot = SchedulerState(
			yarnNameServices,
			scheduler.get("queueName").asText, strMemory, rootMem, cm.get("vCores").intValue,
			scheduler.get("maxCapacity").longValue, scheduler.get("usedCapacity").doubleValue,
			scheduler.get("usedCapacity").doubleValue,schedulerType , TIMESTAMP)
	*/}
	def getYarnClusterStatus(): YarnClusterState = {
		var yarnClusterState: YarnClusterState = null
		val clusterMetrics = LoadUrl().getOneMetricOnYarn(yarnClusterMetricsUrl, "clusterMetrics")
		val availableMB = clusterMetrics.get("availableMB").asLong() * (pow(1024, 2).toLong)
		val allocatedMB = clusterMetrics.get("allocatedMB").longValue * Math.pow(1024, 2).toLong
		val totalMB = clusterMetrics.get("totalMB").longValue * Math.pow(1024, 2).toLong
		val availableByteDesc = StringUtils.byteDesc(availableMB)
		val allocatedMByteDesc = StringUtils.byteDesc(allocatedMB)
		val totalByteDesc = StringUtils.byteDesc(totalMB)
		val decommissioningNodes = 0
		val shutdownNodes = 0
		yarnClusterState = YarnClusterState(
			1,
			yarnNameServices,
			clusterMetrics.get("appsSubmitted").intValue(),
			clusterMetrics.get("appsCompleted").intValue(),
			clusterMetrics.get("appsFailed").intValue,
			clusterMetrics.get("availableMB").longValue,
			availableByteDesc,
			clusterMetrics.get("allocatedMB").longValue,
			allocatedMByteDesc,
			clusterMetrics.get("totalMB").longValue,
			totalByteDesc,
			clusterMetrics.get("totalVirtualCores").intValue,
			clusterMetrics.get("totalNodes").intValue,
			clusterMetrics.get("lostNodes").intValue,
			clusterMetrics.get("unhealthyNodes").intValue,
			decommissioningNodes,
			clusterMetrics.get("decommissionedNodes").intValue,
			clusterMetrics.get("activeNodes").intValue,
			shutdownNodes,
			VERSION,
			TIMESTAMP
		)
		log.info(yarnClusterState)
		yarnClusterState
	}

	def getNamenodeState(): mutable.Set[NameNodeState] = {
		val namenodeStateSet: mutable.Set[NameNodeState] = mutable.Set()
		val str = "dfs.ha.namenodes." + xc.serviceId
		for (nnId <- conf.get(str).split(",")) {
			val metric = "dfs.namenode.rpc-address." + xc.serviceId + "." + nnId
			val httpAddress = conf.get("dfs.namenode.http-address." + xc.serviceId + "." + nnId)
			val rpcAddress = conf.get(metric)
			val address = httpAddress.split(":")(0)
			rpcPortUi = httpAddress.split(":")(1)
			val nameNodeUrl = "http://" + address + ":" + rpcPortUi + "/jmx?qry=Hadoop:service=NameNode,name=NameNodeStatus"
			var ServiceState = "Error"
			try ServiceState = LoadUrl().getOneMetric(nameNodeUrl, "State").asText()
			if (ServiceState == "active") {
				activeNameNode = address
			}
			val nn = NameNodeState(1, xc.serviceId, nnId, address, ServiceState, rpcAddress, httpAddress, ShowTime().getTimestamp())
			log.info(nn)
			namenodeStateSet.add(nn)
		}
		dfsParentUrl = "http://" + activeNameNode + ":" + rpcPortUi + "/jmx?qry="
		nameNodeStatusUrl = dfsParentUrl + "Hadoop:service=NameNode,name=NameNodeStatus"
		nameNodeInfoUrl = dfsParentUrl + "Hadoop:service=NameNode,name=NameNodeInfo"
		FSNamesystemUrl = dfsParentUrl + "Hadoop:service=NameNode,name=FSNamesystem"
		namenodeStateSet
	}

	def getDfsClusterStatus(): DfsClusterState = {
		val om = new ObjectMapper
		var safemode = "true"
		try {
			if ("" == LoadUrl().getOneMetric(nameNodeInfoUrl, "Safemode").asText()) {
				safemode = "false"
			}
		}
		catch {
			case e: Exception => println(e.printStackTrace())
				safemode = "error"
		}
		// isSecurity 没有找到这个指标
		val security = "null"
		val capacity = LoadUrl().getOneMetric(nameNodeInfoUrl, "Total").asLong()
		val capacityByteDesc = StringUtils.byteDesc(capacity)
		val totalBlocks = LoadUrl().getOneMetric(nameNodeInfoUrl, "TotalBlocks").asLong()
		val totalFiles = LoadUrl().getOneMetric(nameNodeInfoUrl, "TotalFiles").asLong()
		val percentUsed = LoadUrl().getOneMetric(nameNodeInfoUrl, "PercentUsed").asDouble()
		val missingBlocks = LoadUrl().getOneMetric(FSNamesystemUrl, "MissingBlocks").asInt()
		val underReplicatedBlocks = LoadUrl().getOneMetric(FSNamesystemUrl, "UnderReplicatedBlocks").asLong()
		val liveNodes = LoadUrl().getOneMetric(nameNodeInfoUrl, "LiveNodes").asText()
		val DeadNodes = LoadUrl().getOneMetric(nameNodeInfoUrl, "DeadNodes").asText()
		val DecomNodes = LoadUrl().getOneMetric(nameNodeInfoUrl, "DecomNodes").asText()
		val numLiveNodes = om.readTree(liveNodes).size()
		val numDeadNodes = om.readTree(DeadNodes).size()
		val numDecomNodes = om.readTree(DecomNodes).size()
		val DecomDeadDataNodes = 0
		val cluster = DfsClusterState(
			1,
			xc.serviceId,
			safemode,
			security,
			activeNameNode,
			capacity,
			capacityByteDesc,
			totalBlocks,
			totalFiles,
			percentUsed,
			missingBlocks,
			underReplicatedBlocks,
			numLiveNodes,
			numDeadNodes,
			numDecomNodes,
			DecomDeadDataNodes,
			VERSION,
			TIMESTAMP
		)
		log.info(cluster.toString)
		cluster
	}
}
