package com.boco.blch.analysiser.compliance.HW.manager;

import com.boco.blch.analysiser.compliance.ComplianceResolved;
import com.boco.blch.analysiser.util.XmlUtil;

/**
 * NameNode与Secondary NameNode配置    控制节点
 * 【控制节点】
 * 1、找到配置文件 hdfs-site.xml配置项ha.zookeeper.quorum要有值；
 * 2、找到配置文件 core-site.xml的配置项：fs.defaultFS 要有值，如 hdfs://hacluster
 * 3、配置文件 hdfs-site.xml的配置项dfs.nameservices的值要是第二部的值相同（【】字体部分）；
 * 4、根据第三步的值，在hdfs-site.xml文件中查找 dfs.ha.namenodes.【hacluster】 节点，其值不为空；
 * 如 180，181
 * 4、查找配置项 dfs.namenode.https-address.hacluster.180
 * a)、dfs.namenode.rpc-address.hacluster.180
 * b)、dfs.namenode.https-address.hacluster.181
 * c)、dfs.namenode.rpc-address.hacluster.181值不为空；
 * 5、 hdfs-site.xml的配置项dfs.ha.automatic-failover.enabled的值 为 true;
 * 6、hdfs-site.xml的配置项dfs.client.failover.proxy.provider.hacluster的
 * 值为org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
 *
 * @author admin
 *
 */
public class BOCO_Hdfs_HAManage_MasterSlave implements ComplianceResolved {
	
	/*
	 * 【控制节点】
	 * 1、找到配置文件 hdfs-site.xml配置项ha.zookeeper.quorum要有值；
	 * 2、找到配置文件 core-site.xml的配置项：fs.defaultFS 要有值，如 hdfs://hacluster
	 * 3、配置文件 hdfs-site.xml的配置项dfs.nameservices的值要是第二部的值相同（【】字体部分）；
	 * 4、根据第三步的值，在hdfs-site.xml文件中查找 dfs.ha.namenodes.【hacluster】 节点，其值不为空；
	 * 如 180，181
	 * 4、查找配置项 dfs.namenode.https-address.hacluster.180
     * a)、dfs.namenode.rpc-address.hacluster.180
     * b)、dfs.namenode.https-address.hacluster.181
     * c)、dfs.namenode.rpc-address.hacluster.181值不为空；
     * 5、 hdfs-site.xml的配置项dfs.ha.automatic-failover.enabled的值 为 true;
     * 6、hdfs-site.xml的配置项dfs.client.failover.proxy.provider.hacluster的
     * 值为org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
     */
	private final static String FS_DEFAULTFS = "fs.defaultFS";//值不为空，如 hdfs://hacluster
	private final static String HA_ZOOKPEEPER_QUORUM = "ha.zookeeper.quorum";//hdfs-site.xml要有值
	private final static String DFS_HA_NAMENODES = "dfs.ha.namenodes.";//dfs.ha.namenodes.hacluster其值不为空; 如 180，181
	private final static String DFS_NAMENODE_HTTPS_ADDRESS_HACLUSTER = "dfs.namenode.https-address.hacluster.";
	private final static String DFS_NAMENODE_RPC_ADDRESS_HACLUSTER = "dfs.namenode.rpc-address.hacluster.";
	private final static String DFS_HA_AUTOMATIC_FAILOVER_ENABLED = "dfs.ha.automatic-failover.enabled";//的值 为 true;
	private final static String DFS_CLIENT_PPH_NAME = "dfs.client.failover.proxy.provider.hacluster";
	private final static String DFS_CLIENT_PPH_VALUE = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
	

	@Override
	public int realEchoResolved(String echoResult) {
		int result = FAILED;
		if(echoResult != null && echoResult.length() > 0){
			if(result == FAILED && echoResult.indexOf("ResourceManager-start") != -1){//控制节点
				String strResult = echoResult.substring(echoResult.indexOf("ResourceManager-start")+"ResourceManager-start".length(), echoResult.indexOf("ResourceManager-end")).trim();
				result = getResult(strResult);
			}
			if(result == FAILED && echoResult.indexOf("JobHistoryServer-start") != -1){//控制节点
				String strResult = echoResult.substring(echoResult.indexOf("JobHistoryServer-start")+"JobHistoryServer-start".length(), echoResult.indexOf("JobHistoryServer-end")).trim();
				result = getResult(strResult);
			}
		}
		return result;
	}
	
	/**
	 * @param echoResult
	 * @return
	 */
	private int getResult(String echoResult){
		int result = FAILED;
//		if(echoResult != null && echoResult.contains(NotFindFile)){//目录不存在
//			result = SUCCESS;
//		}
		if(echoResult != null && echoResult.indexOf("core-site-start") != -1){//core-site.xml解析 
			String coreXml = echoResult.substring(echoResult.indexOf("core-site-start")+"core-site-start".length(), echoResult.indexOf("core-site-end"));
			String value = "";
			if(coreXml != null && coreXml.length() > 0){//contains(NotFindFile)){//目录不存在
				result = SUCCESS;
			//}else{
				//fs.defaultFS要有值，如 hdfs://hacluster
				value = XmlUtil.getValueByName(coreXml, FS_DEFAULTFS).trim();
				//System.out.println("value----:"+value);
				if(value != null && value.indexOf("//") != -1){
					//截取hdfs://hacluster
					value = value.substring(value.indexOf("//")+"//".length(), value.length()).trim();
					//System.out.println("value2---:"+value);
					if(value != null && value.length() > 0){//fs.defaultFS值不为空
						//获取hdfs-site.xml值
						String hdfsXml = echoResult.substring(echoResult.indexOf("hdfs-site-start")+"hdfs-site-start".length(), echoResult.indexOf("hdfs-site-end"));
						if(hdfsXml != null && hdfsXml.length() > 0){
							
							String quorumVal = XmlUtil.getValueByName(hdfsXml, HA_ZOOKPEEPER_QUORUM).trim();
							//System.out.println("quorumVal---:"+quorumVal);
							if(quorumVal != null && quorumVal.length() > 0){//值不为空
								result = SUCCESS;
							}else{
								result = FAILED;
							}
							
							if(result == SUCCESS){//该属性值为true
								String enabledVal = XmlUtil.getValueByName(hdfsXml, DFS_HA_AUTOMATIC_FAILOVER_ENABLED).trim();
								//System.out.println("enabledVal---:"+enabledVal);
								if(enabledVal != null && enabledVal.length() > 0 && enabledVal.equals("true")){
									result = SUCCESS;
								}else{
									result = FAILED;
								}
							}
							if(result == SUCCESS){
								String pphValue = XmlUtil.getValueByName(hdfsXml, DFS_CLIENT_PPH_NAME).trim();
								if(pphValue != null && pphValue.length() > 0 && pphValue.equals(DFS_CLIENT_PPH_VALUE)){
									result = SUCCESS;
								}else{
									result = FAILED;
								}
							}
							//dfs.ha.namenodes.hacluster 节点，其值不为空；如 180，181
							String nameNodes  = DFS_HA_NAMENODES + value;
							//值为180，181
							String nameNodesVal = XmlUtil.getValueByName(hdfsXml, nameNodes).trim();
							if(result == SUCCESS && nameNodesVal != null && nameNodesVal.length() > 0){
								if(nameNodesVal.indexOf(",") != -1){
									String []nodesValue = nameNodesVal.split(",");
									for (int i = 0; i < nodesValue.length; i++) {
										String httpAddrName = DFS_NAMENODE_HTTPS_ADDRESS_HACLUSTER + nodesValue[i].trim();
										String rpcAddrName = DFS_NAMENODE_RPC_ADDRESS_HACLUSTER + nodesValue[i].trim();
										
										String httpAddrValue = XmlUtil.getValueByName(hdfsXml, httpAddrName).trim();
										String rpcAddrValue = XmlUtil.getValueByName(hdfsXml, rpcAddrName).trim();
										if(httpAddrValue != null && httpAddrValue.length() > 0 && rpcAddrValue != null && rpcAddrValue.length() > 0){
											result = SUCCESS;
										}else{
											result = FAILED;
											break;
										}
									}
									//
								}else{
									String httpAddrName = DFS_NAMENODE_HTTPS_ADDRESS_HACLUSTER + nameNodesVal.trim();
									String rpcAddrName = DFS_NAMENODE_RPC_ADDRESS_HACLUSTER + nameNodesVal.trim();
									String httpAddrValue = XmlUtil.getValueByName(hdfsXml, httpAddrName).trim();
									String rpcAddrValue = XmlUtil.getValueByName(hdfsXml, rpcAddrName).trim();
									if(httpAddrValue != null && httpAddrValue.length() > 0 && rpcAddrValue != null && rpcAddrValue.length() > 0){
										result = SUCCESS;
									}else{
										result = FAILED;
									}
								}
							}else{//dfs.ha.namenodes.hacluster 节点，其值为空
								result = FAILED;
							}
						}
					}
				}else{
					result = FAILED;
				}
			}
		}
		return result;
	}
	
	
	


}
