ObsidianRepository/HaiNiuHadoop搭建/4.Yarn配置.md

18 KiB
Raw Blame History

  • yarn.env.sh
# 添加如下内容:
source /etc/profile
JAVA=$JAVA_HOME/bin/java
JAVA_HEAP_MAX=-Xmx256m 
YARN_HEAPSIZE=256 export YARN_RESOURCEMANAGER_HEAPSIZE=256
# 分发 yarn.env.sh 到其他主机
scp_all.sh /usr/local/hadoop/etc/hadoop/yarn-env.sh /usr/local/hadoop/etc/hadoop/
  • yarn-site.xml
<configuration>
   <!-- RM1 configs start -->
   <property>
      <name>yarn.resourcemanager.address.rm1</name>
      <value>nn1:8032</value>
      <description>ResourceManager 对客户端暴露的地址。客户端通过该地址向RM提交应用程序杀死应用程序等</description>
   </property>
   <property>
      <name>yarn.resourcemanager.hostname.rm1</name>
      <value>nn1</value>
      <description>ResourceManager主机名</description>
   </property>
   <property>
      <name>yarn.resourcemanager.scheduler.address.rm1</name>
      <value>nn1:8030</value>
      <description>ResourceManager 对ApplicationMaster暴露的访问地址。ApplicationMaster通过该地址向RM申请资源、释放资源等。</description>
   </property>
   <property>
      <name>yarn.resourcemanager.webapp.https.address.rm1</name>
      <value>nn1:8089</value>
   </property>
   <property>
      <name>yarn.resourcemanager.webapp.address.rm1</name>
      <value>nn1:8088</value>
      <description>ResourceManager对外web ui地址。用户可通过该地址在浏览器中查看集群各类信息。</description>
   </property>
   <property>
      <name>yarn.resourcemanager.resource-tracker.address.rm1</name>
      <value>nn1:8031</value>
      <description>ResourceManager 对NodeManager暴露的地址.。NodeManager通过该地址向RM汇报心跳领取任务等。</description>
   </property>
   <property>
      <name>yarn.resourcemanager.admin.address.rm1</name>
      <value>nn1:8033</value>
      <description>ResourceManager 对管理员暴露的访问地址。管理员通过该地址向RM发送管理命令等</description>
   </property>
   <!-- RM1 configs end -->
   
   <!-- RM2 configs start -->
   <property>
      <name>yarn.resourcemanager.address.rm2</name>
      <value>nn2:8032</value>
      <description>ResourceManager 对客户端暴露的地址。客户端通过该地址向RM提交应用程序杀死应用程序等</description>
   </property>
   <property>
      <name>yarn.resourcemanager.hostname.rm2</name>
      <value>nn2</value>
      <description>ResourceManager主机名</description>
   </property>
   <property>
      <name>yarn.resourcemanager.scheduler.address.rm2</name>
      <value>nn2:8030</value>
      <description>ResourceManager 对ApplicationMaster暴露的访问地址。ApplicationMaster通过该地址向RM申请资源、释放资>源等。</description>
   </property>
   <property>
      <name>yarn.resourcemanager.webapp.https.address.rm2</name>
      <value>nn2:8089</value>
   </property>
   <property>
      <name>yarn.resourcemanager.webapp.address.rm2</name>
      <value>nn2:8088</value>
      <description>ResourceManager对外web ui地址。用户可通过该地址在浏览器中查看集群各类信息。</description>
   </property>
   <property>
      <name>yarn.resourcemanager.resource-tracker.address.rm2</name>
      <value>nn2:8031</value>
      <description>ResourceManager 对NodeManager暴露的地址.。NodeManager通过该地址向RM汇报心跳领取任务等。</description>
   </property>
   <property>
      <name>yarn.resourcemanager.admin.address.rm2</name>
      <value>nn2:8033</value>
      <description>ResourceManager 对管理员暴露的访问地址。管理员通过该地址向RM发送管理命令等</description>
   </property>
   <!-- RM2 configs end -->
   
   <!-- RM3 configs start -->
   <property>
      <name>yarn.resourcemanager.address.rm3</name>
      <value>nn3:8032</value>
      <description>ResourceManager 对客户端暴露的地址。客户端通过该地址向RM提交应用程序杀死应用程序等</description>
   </property>
   <property>
      <name>yarn.resourcemanager.hostname.rm3</name>
      <value>nn3</value>
      <description>ResourceManager主机名</description>
   </property>
   <property>
      <name>yarn.resourcemanager.scheduler.address.rm3</name>
      <value>nn3:8030</value>
      <description>ResourceManager 对ApplicationMaster暴露的访问地址。ApplicationMaster通过该地址向RM申请资源、释放资源等。</description>
   </property>
   <property>
      <name>yarn.resourcemanager.webapp.https.address.rm3</name>
      <value>nn3:8089</value>
   </property>
   <property>
      <name>yarn.resourcemanager.webapp.address.rm3</name>
      <value>nn3:8088</value>
      <description>ResourceManager对外web ui地址。用户可通过该地址在浏览器中查看集群各类信息。</description>
   </property>
   <property>
      <name>yarn.resourcemanager.resource-tracker.address.rm3</name>
      <value>nn3:8031</value>
      <description>ResourceManager 对NodeManager暴露的地址.。NodeManager通过该地址向RM汇报心跳领取任务等。</description>
   </property>
   <property>
      <name>yarn.resourcemanager.admin.address.rm3</name>
      <value>nn3:8033</value>
      <description>ResourceManager 对管理员暴露的访问地址。管理员通过该地址向RM发送管理命令等</description>
   </property>
   
   <!-- yarn ha start -->
   <property>
      <name>yarn.resourcemanager.ha.enabled</name>
      <value>true</value>
      <description>是否开启yarn ha</description>
   </property>
   <property>
      <name>yarn.resourcemanager.ha.automatic-failover.embedded</name>
      <value>true</value>
      <description>ha状态切换为自动切换</description>
   </property>
   <property>
      <name>yarn.resourcemanager.ha.rm-ids</name>
      <value>rm1,rm2,rm3</value>
      <description>RMs的逻辑id列表</description>
   </property>
   <property>
      <name>yarn.resourcemanager.zk-address</name>
      <value>nn1:2181,nn2:2181,nn3:2181</value>
      <description>ha状态的存储地址</description>
   </property>
   <!-- yarn ha end -->
   
   <!-- 元数据存储共享 start -->
   <property>
      <name>yarn.resourcemanager.cluster-id</name>
      <value>pseudo-yarn-rm-cluster</value>
      <description>集群的Id</description>
   </property>
   <property>
      <name>yarn.resourcemanager.recovery.enabled</name>
      <value>true</value>
      <description>默认值为false也就是说resourcemanager挂了相应的正在运行的任务在rm恢复后不能重新启动</description>
   </property>
   <property>
      <name>yarn.resourcemanager.store.class</name>
      <value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
      <description>配置RM状态信息存储方式3有两种一种是FileSystemRMStateStore,另一种是MemoryRMStateStore还有一种目前较为主流的是zkstore</description>
   </property>
   <property>
      <name>yarn.resourcemanager.zk.state-store.address</name>
      <value>nn1:2181,nn2:2181,nn3:2181</value>
      <description>当使用ZK存储时指定在ZK上的存储地址。</description>
   </property>
   <!-- 元数据存储共享 end-->
   
   <!-- nodeManager基础配置 start-->
   <property>
      <name>yarn.nodemanager.local-dirs</name>
      <value>/data/yarn/local</value>
      <description>中间结果存放位置存放执行Container所需的数据如可执行程序或jar包配置文件等和运行过程中产生的临时数据</description>
   </property>
   <property>
      <name>yarn.nodemanager.log-dirs</name>
      <value>/data/yarn/logs</value>
      <description>Container运行日志存放地址可配置多个目录</description>
   </property>
   <property>
      <name>yarn.nodemanager.address</name>
      <value>0.0.0.0:9103</value>
   </property>
   <property>
      <name>yarn.nodemanager.aux-services</name>
      <value>mapreduce_shuffle</value>
      <description>NodeManager上运行的附属服务。需配置成mapreduce_shuffle才可运行MapReduce程序</description>
   </property>
   <property>
      <name>yarn.nodemanager.webapp.address</name>
      <value>0.0.0.0:8042</value>
   </property>
   <property>
      <name>yarn.nodemanager.localizer.address</name>
      <value>0.0.0.0:8040</value>
   </property>
   <property>
      <name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name>
      <value>org.apache.hadoop.mapred.ShuffleHandler</value>
   </property>
   <property>
      <name>mapreduce.shuffle.port</name>
      <value>23080</value>
   </property>
   <!-- nodeManager基础配置 end-->
   
   <!-- nodeMananger资源限定 start -->
   <property>
      <name>yarn.scheduler.minimum-allocation-vcores</name>
      <value>1</value>
      <description>单个任务可申请的最小虚拟CPU个数</description>
   </property>
   <property>
      <name>yarn.scheduler.maximum-allocation-vcores</name>
      <value>3</value>
      <description>单个任务可申请的最大虚拟CPU个数此参数对应yarn.nodemanager.resource.cpu-vcores建议最大为一个物理CPU的数量</description>
   </property>
   <property>
      <name>yarn.nodemanager.resource.memory-mb</name>
      <value>1536</value>
   </property>
   <property>
      <name>yarn.scheduler.maximum-allocation-mb</name>
      <value>1024</value>
      <description>单个任务可申请的最多物理内存量</description>
   </property>
   <property>
      <name>yarn.nodemanager.resource.cpu-vcores</name>
      <value>3</value>
      <description>该节点上YARN可使用的虚拟CPU个数一个物理CPU对应3个虚拟CPU</description>
   </property>
   <!-- 关闭内存检测 start -->
   <property>
      <name>yarn.nodemanager.vmem-check-enabled</name>
      <value>false</value>
      <description>虚拟内存检测默认是True</description>
   </property>
   <property>
      <name>yarn.nodemanager.pmem-check-enabled</name>
      <value>false</value>
      <description>物理内存检测默认是True</description>
   </property>
   <!-- 关闭内存检测 end -->
   <property>
      <name>yarn.application.classpath</name>
      <value>$HADOOP_CONF_DIR,
        $HADOOP_COMMON_HOME/share/hadoop/common/*,
        $HADOOP_COMMON_HOME/share/hadoop/common/lib/*,
        $HADOOP_COMMON_HOME/share/hadoop/hdfs/*,
        $HADOOP_COMMON_HOME/share/hadoop/hdfs/lib/*,
        $HADOOP_COMMON_HOME/share/hadoop/mapreduce/*,
        $HADOOP_COMMON_HOME/share/hadoop/mapreduce/lib/*,
        $HADOOP_COMMON_HOME/share/hadoop/yarn/*,
        $HADOOP_COMMON_HOME/share/hadoop/yarn/lib/*</value>
   </property>
   <!-- nodeMananger资源限定 end-->
   
   <!-- 容量调度器的优先级 
    0 - 5 , 0是默认值 5 优先级最高
    start
    -->
   <property>
      <name>yarn.cluster.max-application-priority</name>
      <value>5</value>
   </property>
   <!-- 容量调度器的优先级 
    0 - 5 , 0是默认值 5 优先级最高
    end
    -->
    
   <!--在yarn-site.xml中配置执行的任务文件应该上传到/user的用户目录下 start -->
   <property>
      <name>yarn.app.mapreduce.am.staging-dir</name>
      <value>/user</value>
   </property>
   <property>
      <name>yarn.app.mapreduce.am.scheduler.connection.wait.interval-ms</name>
      <value>5000</value>
   </property>
   <!-- end -->
   
   <!-- 日志聚集功能开启 start-->
   <property>
      <name>yarn.log-aggregation-enable</name>
      <value>true</value>
      <description>是否启用日志聚集功能</description>
   </property>
   <property>
      <name>yarn.nodemanager.remote-app-log-dir</name>
      <value>/tmp/app-logs</value>
      <description>当应用程序运行结束后日志被转移到的HDFS目录启用日志聚集功能时有效</description>
   </property>
   <!--目录相关 end -->
   <!-- 其它 -->
   <property>
      <name>yarn.log-aggregation.retain-seconds</name>
      <value>1209600</value>
      <description>nodemanager上所有Container的运行日志在HDFS中的保存时间保留半个月</description>
   </property>
   <!-- 日志聚集功能开启end -->
</configuration>
# 分发 yarn-site.xml  到其他主机
scp_all.sh /usr/local/hadoop/etc/hadoop/yarn-site.xml /usr/local/hadoop/etc/hadoop/

mapred-site.xml

<configuration>
   <!--运行模式 -->
   <property>
      <name>mapreduce.framework.name</name>
      <value>yarn</value>
      <description>运行模式</description>
   </property>
   <!--运行模式 -->
   <!--资源限定 -->
   <property>
      <name>yarn.app.mapreduce.am.resource.mb</name>
      <value>1024</value>
      <description>MR ApplicationMaster yarn申请的内存量</description>
   </property>
   <property>
      <name>yarn.app.mapreduce.am.command-opts</name>
      <value>-Xmx768m</value>
      <description>jvm使用内存</description>
   </property>
   <property>
      <name>mapreduce.map.memory.mb</name>
      <value>1024</value>
      <description>每个Map Task yarn申请内存</description>
   </property>
   <property>
      <name>mapreduce.reduce.memory.mb</name>
      <value>1024</value>
      <description>每个Reduce Task yarn申请内存</description>
   </property>
   <property>
      <name>yarn.app.mapreduce.am.resource.cpu-vcores</name>
      <value>1</value>
      <description>MR ApplicationMaster占用的虚拟CPU个数此参数对应yarn.nodemanager.resource.cpu-vcores建议最大为一个物理CPU的数量</description>
   </property>
   <property>
      <name>mapreduce.reduce.java.opts</name>
      <value>-Xmx768m</value>
      <description>reduce jvm实际内存</description>
   </property>
   <property>
      <name>mapreduce.map.java.opts</name>
      <value>-Xmx768m</value>
      <description>map jvm实际内存</description>
   </property>
   <property>
      <name>mapreduce.map.cpu.vcores</name>
      <value>1</value>
      <description>每个map Task需要的虚拟cpu数</description>
   </property>
   <property>
      <name>mapreduce.reduce.cpu.vcores</name>
      <value>1</value>
      <description>每个Reduce Task需要的虚拟cpu数</description>
   </property>
   <property>
      <name>mapreduce.application.classpath</name>
      <value>/usr/local/hadoop/etc/hadoop,/usr/local/hadoop/share/hadoop/common/*,/usr/local/hadoop/share/hadoop/common/lib/*,/usr/local/hadoop/share/hadoop/hdfs/*,/usr/local/hadoop/share/hadoop/hdfs/lib/*,/usr/local/hadoop/share/hadoop/mapreduce/*,/usr/local/hadoop/share/hadoop/mapreduce/lib/*,/usr/local/hadoop/share/hadoop/yarn/*,/usr/local/hadoop/share/hadoop/yarn/lib/*,/usr/local/hadoop/lib/*,/usr/local/hbase/lib/*</value>
      <description>运行mr程序所使用的虚拟机运行时的classpath</description>
   </property>
   
   <!-- 在nn1 开启历史服务器 start-->
   <property>
      <name>mapreduce.jobhistory.address</name>
      <value>nn1:10020</value>
      <description>MapReduce JobHistory Server地址</description>
   </property>
   <property>
      <name>mapreduce.jobhistory.webapp.address</name>
      <value>nn1:19888</value>
      <description>MapReduce JobHistory Server Web UI地址</description>
   </property>
   <property>
      <name>mapreduce.jobhistory.intermediate-done-dir</name>
      <value>/data/mapred/tmp</value>
      <description>MapReduce作业产生的日志存放位置</description>
   </property>
   <property>
      <name>mapreduce.jobhistory.done-dir</name>
      <value>/data/mapred/done</value>
      <description>MR JobHistory Server管理的日志的存放位置</description>
   </property>
   <property>
      <name>mapreduce.job.userlog.retain.hours</name>
      <value>48</value>
   </property>
   <!-- 在nn1 开启历史服务器 end-->

   <!-- map阶段开启输出压缩便于reduce阶段从磁盘拉取数据 start-->
   <property>
      <name>mapreduce.map.output.compress</name>
      <value>true</value>
      <description>map是否开启输出压缩</description>
   </property>
   <property>
      <name>mapreduce.map.output.compress.codec</name>
      <value>org.apache.hadoop.io.compress.Bzip2Codec</value>
      <description>map输出默认的算法</description>
   </property>
   <!-- map阶段开启输出压缩 end-->
   
   <!-- 
   这里是开启reduce输出压缩如果开启了那么输出结果无法直接看到需要进行解压缩
   如果需要,取消注释即可
   <property>
      <name>mapreduce.output.fileoutputformat.compress</name>
      <value>true</value>
      <description>reduce是否开启输出压缩</description>
   </property>
   <property>
      <name>mapreduce.output.fileoutputformat.compress.codec</name>
      <value>org.apache.hadoop.io.compress.Bzip2Codec</value>
      <description>reduce输出默认的算法</description>
   </property>
   -->
</configuration>
# 分发 mapred-site.xml  到其他主机
scp_all.sh /usr/local/hadoop/etc/hadoop/mapred-site.xml /usr/local/hadoop/etc/hadoop/

配置capacity-scheduler.xml

<!--  yarn使用容量调度器来管理集群的资源
	  配置root队列下两个子队列 hainiu占比80 default占比20
	  start
-->
<configuration>
   <property>
      <name>yarn.scheduler.capacity.root.queues</name>
      <value>hainiu,default</value>
   </property>
   <property>
      <name>yarn.scheduler.capacity.root.hainiu.capacity</name>
      <value>80</value>
   </property>
   <property>
      <name>yarn.scheduler.capacity.root.default.capacity</name>
      <value>20</value>
   </property>
   <property>
      <name>yarn.scheduler.capacity.root.hainiu.maximum-capacity</name>
      <value>100</value>
   </property>
   <property>
      <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
      <value>100</value>
   </property>
<!--  yarn使用容量调度器来管理集群的资源
	  配置root队列下两个子队列 hainiu占比80 default占比20
	  end
-->
</configuration>
# 分发 capacity-scheduler.xml 到其他主机
scp_all.sh /usr/local/hadoop/etc/hadoop/capacity-scheduler.xml /usr/local/hadoop/etc/hadoop/
#启动yarn如果已经启动可以先关闭yarn集群stop-yarn.sh
start-yarn.sh
#启动历史服务器 ,在 nn1执行
mapred --daemon start historyserver