<?xml version="1.0"?>
<!-- Site specific YARN configuration properties --><configuration>
	<property>
        <name>yarn.resourcemanager.ha.enabled</name>
        <value>true</value>
	</property>
	<!--是否启用自动故障转移。默认情况下，在启用 HA 时，启用自动故障转移。-->
	<property>
	    <name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
	    <value>true</value>
	</property>
	<!--启用内置的自动故障转移。默认情况下，在启用 HA 时，启用内置的自动故障转移。--> 
	<property>
	    <name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
	    <value>true</value>
	</property>
	<property>
        <name>yarn.resourcemanager.cluster-id</name>
        <value>yarn-rm-cluster</value>
	</property>
	<property>
	    <name>yarn.resourcemanager.ha.rm-ids</name>
	    <value>rm1,rm2</value>
	</property>
	<property>
	    <name>yarn.resourcemanager.hostname.rm1</name>
	    <value>{{ hostvars[groups['hadoop'][0]]['hostname'] }}</value>
	</property>
	<property>
	    <name>yarn.resourcemanager.hostname.rm2</name>
	    <value>{{ hostvars[groups['hadoop'][1]]['hostname'] }}</value>
	</property>
<property>
    <name>yarn.scheduler.capacity.resource-calculator</name>
    <value>org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator</value>
  </property>

  <property>
    <name>yarn.resourcemanager.scheduler.class</name>
    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler</value>
  </property>
	<!--启用 resourcemanager 自动恢复--> 
	<property>
        <name>yarn.resourcemanager.recovery.enabled</name>
        <value>true</value>
	</property>
	<!--配置 Zookeeper 地址--> 
	<property>
        <name>yarn.resourcemanager.zk.state-store.address</name>
        <value>{{ hadoop_zk_addr }}</value>
	</property>
	<property>
        <name>yarn.resourcemanager.zk-address</name>
        <value>{{ hadoop_zk_addr }}</value>
	</property>
        <property>
            <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
            <value>{{ hostvars[groups['hadoop'][0]]['hostname'] }}:8031</value>
        </property>
        <property>
            <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
            <value>{{ hostvars[groups['hadoop'][1]]['hostname'] }}:8031</value>
        </property>
	<property>
            <name>yarn.resourcemanager.address.rm1</name>
            <value>{{ hostvars[groups['hadoop'][0]]['hostname'] }}:8032</value>
	</property>
	<property>
            <name>yarn.resourcemanager.address.rm2</name>
            <value>{{ hostvars[groups['hadoop'][1]]['hostname'] }}:8032</value>
	</property>
	<property>
            <name>yarn.resourcemanager.scheduler.address.rm1</name>
            <value>{{ hostvars[groups['hadoop'][0]]['hostname'] }}:8034</value>
	</property>
	<property>
            <name>yarn.resourcemanager.webapp.address.rm1</name>
            <value>{{ hostvars[groups['hadoop'][0]]['ansible_host'] }}:8088</value>
	</property>
	<property>
            <name>yarn.resourcemanager.scheduler.address.rm2</name>
            <value>{{ hostvars[groups['hadoop'][1]]['hostname'] }}:8034</value>
	</property>
	<property>
            <name>yarn.resourcemanager.webapp.address.rm2</name>
            <value>{{ hostvars[groups['hadoop'][1]]['ansible_host'] }}:8088</value>
	</property>

	<!-- Configurations for ResourceManager and NodeManager -->
	<property>
		<name>yarn.acl.enable</name>
		<value>true</value>
		<description>Enable ACLs? Defaults to false.</description>
	</property>
	<property>
		<name>yarn.admin.acl</name>
		<value>*</value>
	</property>	
	<property>
		<name>yarn.log-aggregation-enable</name>
		<value>true</value>
		<description>Configuration to enable or disable log aggregation</description>
	</property>
	<!-- spark on yarn -->
	<property>
		<name>yarn.scheduler.minimum-allocation-mb</name>
		<value>1024</value>
		<description>saprk调度时一个container能够申请的最小资源，默认值为1024MB</description>
	</property>
	<property>
		<name>yarn.scheduler.maximum-allocation-mb</name>
		<value>28672</value>
		<description>saprk调度时一个container能够申请的最大资源，默认值为8192MB</description>
	</property>
	<property>
		<name>yarn.nodemanager.resource.memory-mb</name>
		<value>{{ hadoop_vmemory * 1024 }}</value>
		<description>nodemanager能够申请的最大内存，默认值为8192MB</description>
	</property>
	<property>
		<name>yarn.app.mapreduce.am.resource.mb</name>
		<value>28672</value>
		<description>AM能够申请的最大内存，默认值为1536MB</description>
	</property>
	<!-- Configurations for NodeManager -->
	<property>
		<name>yarn.nodemanager.log.retain-seconds</name>
		<value>10800</value>
	</property>
	<property>
		<name>yarn.nodemanager.log-dirs</name>
		<value>{{ hadoop_datadir }}/yarn-log/1,{{ hadoop_datadir }}/yarn-log/2,{{ hadoop_datadir }}/yarn-log/3</value>
	</property>
	<property>
		<name>yarn.nodemanager.aux-services</name>
		<value>mapreduce_shuffle</value>
		<description>Shuffle service that needs to be set for Map Reduce applications.</description>
	</property>
	<!-- 限制cpu使用 -->
<property>
<name>yarn.nodemanager.resource.count-logical-processors-as-cores</name>
<value>true</value>
<description>是否将虚拟核当cpu 核心数</description>
</property>

         <property>
                <name>yarn.nodemanager.resource.cpu-vcores</name>
                <value>{{ hadoop_vcore * 3 }}</value>
                <description>容器虚拟 CPU 内核</description>
        </property>
<!--
<property>
     <name>yarn.nodemanager.container-executor.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor</value>
</property>
<property>
     <name>yarn.nodemanager.linux-container-executor.resources-handler.class</name>
     <value>org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler</value>
</property>
<property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.hierarchy</name>
     <value>/hadoop-yarn</value>
</property>
<property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.mount</name>
     <value>false</value>
</property>
<property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.monut-path</name>
     <value>/sys/fs/cgroup</value>
</property>
<property>
     <name>yarn.nodemanager.linux-container-executor.path</name>
     <value>/home/bigdata/hadoop/bin/container-executor</value>
</property>
<property>
     <name>yarn.nodemanager.linux-container-executor.group</name>
     <value>hadoop</value>
</property>

<property>
     <name>yarn.nodemanager.resource.percentage-physical-cpu-limit</name>
     <value>90</value>
</property>

<property>
     <name>yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage</name>
     <value>false</value>
</property> -->

	<!-- 限制cpu使用 -->
	<!-- 
	     	<property>
		<name>yarn.nodemanager.env-whitelist</name>
		<value>Environment properties to be inherited by containers from NodeManagers</value>
		<description>For mapreduce application in addition to the default values HADOOP_MAPRED_HOME should to be added. Property value should JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</description>
	</property>	
	 -->
	<!-- Configurations for History Server (Needs to be moved elsewhere) -->
	<property>
		<name>yarn.log-aggregation.retain-seconds</name>
		<value>86400</value>
	</property>
        <property>
                <name>yarn.nodemanager.remote-app-log-dir</name>
                <value>{{ hadoop_datadir }}/logs</value>
        </property>
	<property>
		<name>yarn.log-aggregation.retain-check-interval-seconds</name>
		<value>-1</value>
	</property>
        <property>
                <name>yarn.app.mapreduce.am.staging-dir</name>
                <value>hdfs://ns1/tmp/hadoop-yarn/staging</value>
                <description>The staging dir used while submitting jobs.</description>
        </property>
        <property>
                <name>yarn.application.classpath</name>
                <value>{{ snc_home_path }}/hadoop/etc/hadoop:{{ snc_home_path }}/hadoop/share/hadoop/common/lib/*:{{ snc_home_path }}/hadoop/share/hadoop/common/*:{{ snc_home_path }}/hadoop/share/hadoop/hdfs:{{ snc_home_path }}/hadoop/share/hadoop/hdfs/lib/*:{{ snc_home_path }}/hadoop/share/hadoop/hdfs/*:{{ snc_home_path }}/hadoop/share/hadoop/mapreduce/*:{{ snc_home_path }}/hadoop/share/hadoop/yarn:{{ snc_home_path }}/hadoop/share/hadoop/yarn/lib/*:{{ snc_home_path }}/hadoop/share/hadoop/yarn/*</value>
                <description>Linux上打 hadoop classpath 找到的所有路径</description>
        </property>
        <property>
                <name>dfs.safemode.threshold.pct</name>
                <value>0</value>
        </property>
        <property>
                <name>yarn.scheduler.fair.assignmultiple</name>
                <value>false</value>
                <description>不会同时分配多个任务给同一个应用程序。如果该属性设置为 true，则 FairScheduler 会将多个任务同时分配给同一个应用程序，这可能会导致该应用程序占用过多的资源，影响其他应用程序的运行</description>
        </property>
        <property>
                <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
                <value>0.5</value>
        </property>
        <property>
                <name>yarn.scheduler.fair.max.assign</name>
                <value>2</value>
                <description>最多可同时为每个应用程序分配 2 个容器</description>
        </property>
        <property>
                <name>yarn.scheduler.fair.continuous-scheduling-enabled</name>
                <value>true</value>
                <description>启用“流式调度”可以提高 YARN 的资源利用率，特别是在空闲时可以自动分配资源，从而避免资源浪费。然而，在大规模集群和应用程序数量众多的情况下，这可能会导致 FairScheduler 占用大量的 CPU 和内存资源，从而影响系统的性能</description>
        </property>
</configuration>

