package com.boco.blch.analysiser.compliance.HW.manager;

import com.boco.blch.analysiser.compliance.ComplianceResolved;

/**
 * hdfs日志记录完整性安全基线要求项   【控制节点】+【数据节点】
 * 【控制节点】
 * 1、配置文件log4j.properties文件
 * 配置项log4j.logger.com.huawei.hadoop.datasight.hadoopCommonAuditLogger.audit的值 为 INFO,RFAAUDIT 
 * 配置项 hdfs.audit.log.level 的值为 INFO
 * 配置项log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit的值 为 INFO,RFAAUDIT 
 * 配置项log4j.logger.org.apache.hadoop.hdfs.server.common.HadoopAuditLogger.audit的值 为 INFO,RFAAUDIT 
 * 
 * 
 * 【数据节点】
 * 1、配置文件log4j.properties文件
 * 配置项log4j.logger.com.huawei.hadoop.datasight.hadoopCommonAuditLogger.audit的值 为 INFO,RFAAUDIT 
 * 配置项 hdfs.audit.log.level 的值为 INFO
 * 配置项log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit的值 为 INFO,RFAAUDIT 
 * 配置项log4j.logger.org.apache.hadoop.hdfs.server.common.HadoopAuditLogger.audit的值 为 INFO,RFAAUDIT 
 *
 * @author admin
 *
 */
public class BOCO_Hdfs_LogAudit_LogRecordComplete implements ComplianceResolved {
	
	private final static String HADOOPCOMMONAUDITLOGGER_AUDIT = "log4j.logger.com.huawei.hadoop.datasight.hadoopCommonAuditLogger.audit=INFO,RFAAUDIT";
	private final static String LOG_LEVEL = "hdfs.audit.log.level=INFO";
	private final static String FSNAMESYSTEM_AUDIT = "log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=INFO,RFAAUDIT";
	private final static String HADOOPAUDITLOGGER_AUDIT = "log4j.logger.org.apache.hadoop.hdfs.server.common.HadoopAuditLogger.audit=INFO,RFAAUDIT";
	
	@Override
	public int realEchoResolved(String echoResult) {
		int result = FAILED;
		if(echoResult != null && echoResult.length() > 0){
			if(echoResult.indexOf("NameNode-start") != -1){//控制节点
				String strResult = echoResult.substring(echoResult.indexOf("NameNode-start")+"NameNode-start".length(), echoResult.indexOf("NameNode-end")).trim();
				result = getResult(strResult);
			}
			if(result == FAILED && echoResult.indexOf("JournalNode-start") != -1){//控制节点
				String strResult = echoResult.substring(echoResult.indexOf("JournalNode-start")+"JournalNode-start".length(), echoResult.indexOf("JournalNode-end")).trim();
				result = getResult(strResult);
			}
			if(result == FAILED && echoResult.indexOf("DataNode-start") != -1){//数据节点
				String strResult = echoResult.substring(echoResult.indexOf("DataNode-start")+"DataNode-start".length(), echoResult.indexOf("DataNode-end")).trim();
				result = getResult(strResult);
			}
		}
		return result;
	}
	
	/**
	 * 判断
	 * @param strResult
	 * @return
	 */
	private int getResult(String strResult){
		int result = FAILED;
		if(strResult != null){
			strResult = strResult.replace(" ", "");
		}
		if(strResult!=null && strResult.contains(HADOOPCOMMONAUDITLOGGER_AUDIT) 
				&& strResult.contains(LOG_LEVEL) && strResult.contains(FSNAMESYSTEM_AUDIT) 
				&& strResult.contains(HADOOPAUDITLOGGER_AUDIT)){
			result = SUCCESS;
			
		}
		return result;
	}
}
