<!DOCTYPE html><html lang="zh-CN" data-theme="light"><head><meta charset="UTF-8"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no"><title>大数据之Hadoop06_系统调优手册 | Oct25-X</title><meta name="keywords" content="Hadoop"><meta name="author" content="Oct25-X"><meta name="copyright" content="Oct25-X"><meta name="format-detection" content="telephone=no"><meta name="theme-color" content="#ffffff"><meta name="description" content="HDFS —— 核心参数一、NameNode 内存生产配置NameNode 内存计算每个文件块大概占用 150byte，一台服务器 128G 内存为例，能存储多少文件块呢？ 12128 * 1024 * 1024 * 1024 &#x2F; 150Byte ≈ 9.1亿G      MB     KB    Byte  配置 NameNode 内存hadoop-env.sh 中描述 Hadoop 的内存是动">
<meta property="og:type" content="article">
<meta property="og:title" content="大数据之Hadoop06_系统调优手册">
<meta property="og:url" content="https://oct25-x.atomgit.net/oct25-xxxxx/2023/06/17/07%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop06_%E7%B3%BB%E7%BB%9F%E8%B0%83%E4%BC%98%E6%89%8B%E5%86%8C/index.html">
<meta property="og:site_name" content="Oct25-X">
<meta property="og:description" content="HDFS —— 核心参数一、NameNode 内存生产配置NameNode 内存计算每个文件块大概占用 150byte，一台服务器 128G 内存为例，能存储多少文件块呢？ 12128 * 1024 * 1024 * 1024 &#x2F; 150Byte ≈ 9.1亿G      MB     KB    Byte  配置 NameNode 内存hadoop-env.sh 中描述 Hadoop 的内存是动">
<meta property="og:locale" content="zh_CN">
<meta property="og:image" content="https://oct25-x.atomgit.net/oct25-xxxxx/img/cover_img/ctt7.png">
<meta property="article:published_time" content="2023-06-17T07:30:29.000Z">
<meta property="article:modified_time" content="2024-02-17T14:14:38.996Z">
<meta property="article:author" content="Oct25-X">
<meta property="article:tag" content="Hadoop">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="https://oct25-x.atomgit.net/oct25-xxxxx/img/cover_img/ctt7.png"><link rel="shortcut icon" href="/oct25-xxxxx/img/logo.jpg"><link rel="canonical" href="https://oct25-x.atomgit.net/oct25-xxxxx/2023/06/17/07%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop06_%E7%B3%BB%E7%BB%9F%E8%B0%83%E4%BC%98%E6%89%8B%E5%86%8C/"><link rel="preconnect" href="//cdn.jsdelivr.net"/><link rel="preconnect" href="//busuanzi.ibruce.info"/><link rel="stylesheet" href="/oct25-xxxxx/css/index.css"><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@fortawesome/fontawesome-free@6/css/all.min.css" media="print" onload="this.media='all'"><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@fancyapps/ui/dist/fancybox.css" media="print" onload="this.media='all'"><script>const GLOBAL_CONFIG = { 
  root: '/oct25-xxxxx/',
  algolia: undefined,
  localSearch: {"path":"search.json","languages":{"hits_empty":"找不到您查询的内容：${query}"}},
  translate: {"defaultEncoding":2,"translateDelay":0,"msgToTraditionalChinese":"繁","msgToSimplifiedChinese":"簡"},
  noticeOutdate: undefined,
  highlight: {"plugin":"highlighjs","highlightCopy":true,"highlightLang":true,"highlightHeightLimit":false},
  copy: {
    success: '复制成功',
    error: '复制错误',
    noSupport: '浏览器不支持'
  },
  relativeDate: {
    homepage: false,
    post: false
  },
  runtime: '',
  date_suffix: {
    just: '刚刚',
    min: '分钟前',
    hour: '小时前',
    day: '天前',
    month: '个月前'
  },
  copyright: undefined,
  lightbox: 'fancybox',
  Snackbar: undefined,
  source: {
    justifiedGallery: {
      js: 'https://cdn.jsdelivr.net/npm/flickr-justified-gallery@2/dist/fjGallery.min.js',
      css: 'https://cdn.jsdelivr.net/npm/flickr-justified-gallery@2/dist/fjGallery.min.css'
    }
  },
  isPhotoFigcaption: false,
  islazyload: false,
  isAnchor: false
}</script><script id="config-diff">var GLOBAL_CONFIG_SITE = {
  title: '大数据之Hadoop06_系统调优手册',
  isPost: true,
  isHome: false,
  isHighlightShrink: false,
  isToc: true,
  postUpdate: '2024-02-17 22:14:38'
}</script><noscript><style type="text/css">
  #nav {
    opacity: 1
  }
  .justified-gallery img {
    opacity: 1
  }

  #recent-posts time,
  #post-meta time {
    display: inline !important
  }
</style></noscript><script>(win=>{
    win.saveToLocal = {
      set: function setWithExpiry(key, value, ttl) {
        if (ttl === 0) return
        const now = new Date()
        const expiryDay = ttl * 86400000
        const item = {
          value: value,
          expiry: now.getTime() + expiryDay,
        }
        localStorage.setItem(key, JSON.stringify(item))
      },

      get: function getWithExpiry(key) {
        const itemStr = localStorage.getItem(key)

        if (!itemStr) {
          return undefined
        }
        const item = JSON.parse(itemStr)
        const now = new Date()

        if (now.getTime() > item.expiry) {
          localStorage.removeItem(key)
          return undefined
        }
        return item.value
      }
    }
  
    win.getScript = url => new Promise((resolve, reject) => {
      const script = document.createElement('script')
      script.src = url
      script.async = true
      script.onerror = reject
      script.onload = script.onreadystatechange = function() {
        const loadState = this.readyState
        if (loadState && loadState !== 'loaded' && loadState !== 'complete') return
        script.onload = script.onreadystatechange = null
        resolve()
      }
      document.head.appendChild(script)
    })
  
      win.activateDarkMode = function () {
        document.documentElement.setAttribute('data-theme', 'dark')
        if (document.querySelector('meta[name="theme-color"]') !== null) {
          document.querySelector('meta[name="theme-color"]').setAttribute('content', '#0d0d0d')
        }
      }
      win.activateLightMode = function () {
        document.documentElement.setAttribute('data-theme', 'light')
        if (document.querySelector('meta[name="theme-color"]') !== null) {
          document.querySelector('meta[name="theme-color"]').setAttribute('content', '#ffffff')
        }
      }
      const t = saveToLocal.get('theme')
    
          if (t === 'dark') activateDarkMode()
          else if (t === 'light') activateLightMode()
        
      const asideStatus = saveToLocal.get('aside-status')
      if (asideStatus !== undefined) {
        if (asideStatus === 'hide') {
          document.documentElement.classList.add('hide-aside')
        } else {
          document.documentElement.classList.remove('hide-aside')
        }
      }
    
    const detectApple = () => {
      if(/iPad|iPhone|iPod|Macintosh/.test(navigator.userAgent)){
        document.documentElement.classList.add('apple')
      }
    }
    detectApple()
    })(window)</script><link rel="stylesheet" href="/css/rightMenu.css"><meta name="generator" content="Hexo 6.1.0"></head><body><div id="sidebar"><div id="menu-mask"></div><div id="sidebar-menus"><div class="avatar-img is-center"><img src="/oct25-xxxxx/img/avatar.jpg" onerror="onerror=null;src='/img/friend_404.gif'" alt="avatar"/></div><div class="site-data is-center"><div class="data-item"><a href="/oct25-xxxxx/archives/"><div class="headline">文章</div><div class="length-num">52</div></a></div><div class="data-item"><a href="/oct25-xxxxx/tags/"><div class="headline">标签</div><div class="length-num">17</div></a></div><div class="data-item"><a href="/oct25-xxxxx/categories/"><div class="headline">分类</div><div class="length-num">8</div></a></div></div><hr/><div class="menus_items"><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/"><i class="fa-fw fas fa-home"></i><span> 首页</span></a></div><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/archives/"><i class="fa-fw fas fa-archive"></i><span> 归档</span></a></div><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/tags/"><i class="fa-fw fas fa-tags"></i><span> 标签</span></a></div><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/categories/"><i class="fa-fw fas fa-folder-open"></i><span> 分类</span></a></div><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/about/"><i class="fa-fw fas fa-heart"></i><span> 关于</span></a></div></div></div></div><div class="post" id="body-wrap"><header class="post-bg" id="page-header" style="background-image: url('/oct25-xxxxx/img/top_img/top_bg6.jpg')"><nav id="nav"><span id="blog_name"><a id="site-name" href="/oct25-xxxxx/">Oct25-X</a></span><div id="menus"><div id="search-button"><a class="site-page social-icon search"><i class="fas fa-search fa-fw"></i><span> 搜索</span></a></div><div class="menus_items"><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/"><i class="fa-fw fas fa-home"></i><span> 首页</span></a></div><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/archives/"><i class="fa-fw fas fa-archive"></i><span> 归档</span></a></div><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/tags/"><i class="fa-fw fas fa-tags"></i><span> 标签</span></a></div><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/categories/"><i class="fa-fw fas fa-folder-open"></i><span> 分类</span></a></div><div class="menus_item"><a class="site-page" href="/oct25-xxxxx/about/"><i class="fa-fw fas fa-heart"></i><span> 关于</span></a></div></div><div id="toggle-menu"><a class="site-page"><i class="fas fa-bars fa-fw"></i></a></div></div></nav><div id="post-info"><h1 class="post-title">大数据之Hadoop06_系统调优手册</h1><div id="post-meta"><div class="meta-firstline"><span class="post-meta-date"><i class="far fa-calendar-alt fa-fw post-meta-icon"></i><span class="post-meta-label">发表于</span><time class="post-meta-date-created" datetime="2023-06-17T07:30:29.000Z" title="发表于 2023-06-17 15:30:29">2023-06-17</time><span class="post-meta-separator">|</span><i class="fas fa-history fa-fw post-meta-icon"></i><span class="post-meta-label">更新于</span><time class="post-meta-date-updated" datetime="2024-02-17T14:14:38.996Z" title="更新于 2024-02-17 22:14:38">2024-02-17</time></span><span class="post-meta-categories"><span class="post-meta-separator">|</span><i class="fas fa-inbox fa-fw post-meta-icon"></i><a class="post-meta-categories" href="/oct25-xxxxx/categories/%E5%A4%A7%E6%95%B0%E6%8D%AE%E7%BB%84%E4%BB%B6/">大数据组件</a></span></div><div class="meta-secondline"><span class="post-meta-separator">|</span><span class="post-meta-pv-cv" id="" data-flag-title="大数据之Hadoop06_系统调优手册"><i class="far fa-eye fa-fw post-meta-icon"></i><span class="post-meta-label">阅读量:</span><span id="busuanzi_value_page_pv"></span></span></div></div></div></header><main class="layout" id="content-inner"><div id="post"><article class="post-content" id="article-container"><h1 id="HDFS-——-核心参数"><a href="#HDFS-——-核心参数" class="headerlink" title="HDFS —— 核心参数"></a>HDFS —— 核心参数</h1><h2 id="一、NameNode-内存生产配置"><a href="#一、NameNode-内存生产配置" class="headerlink" title="一、NameNode 内存生产配置"></a>一、NameNode 内存生产配置</h2><h3 id="NameNode-内存计算"><a href="#NameNode-内存计算" class="headerlink" title="NameNode 内存计算"></a>NameNode 内存计算</h3><p>每个文件块大概占用 150byte，一台服务器 128G 内存为例，能存储多少文件块呢？</p>
<figure class="highlight plaintext"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">128 * 1024 * 1024 * 1024 / 150Byte ≈ 9.1亿</span><br><span class="line">G      MB     KB    Byte</span><br></pre></td></tr></table></figure>

<h3 id="配置-NameNode-内存"><a href="#配置-NameNode-内存" class="headerlink" title="配置 NameNode 内存"></a>配置 NameNode 内存</h3><p>hadoop-env.sh 中描述 Hadoop 的内存是动态分配的</p>
<figure class="highlight plaintext"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line"># The maximum amount of heap to use (Java -Xmx). If no unit</span><br><span class="line"># is provided, it will be converted to MB. Daemons will</span><br><span class="line"># prefer any Xmx setting in their respective _OPT variable.</span><br><span class="line"># There is no default; the JVM will autoscale based upon machine</span><br><span class="line"># memory size.</span><br><span class="line"># export HADOOP_HEAPSIZE_MAX=</span><br><span class="line"># The minimum amount of heap to use (Java -Xms). If no unit</span><br><span class="line"># is provided, it will be converted to MB. Daemons will</span><br><span class="line"># prefer any Xms setting in their respective _OPT variable.</span><br><span class="line"># There is no default; the JVM will autoscale based upon machine</span><br><span class="line"># memory size.</span><br><span class="line"># export HADOOP_HEAPSIZE_MIN=</span><br><span class="line">HADOOP_NAMENODE_OPTS=-Xmx102400m</span><br></pre></td></tr></table></figure>

<p>查看 NameNode 占用内存</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">jps</span><br><span class="line">2611 NameNode</span><br><span class="line">2744 DataNode</span><br><span class="line"></span><br><span class="line">jmap -heap 2611</span><br><span class="line"></span><br><span class="line">Heap Configuration:</span><br><span class="line"> MaxHeapSize = 1031798784 (984.0MB)</span><br></pre></td></tr></table></figure>

<p>查看 DataNode 占用内存</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line">jmap -heap 2744</span><br><span class="line"></span><br><span class="line">Heap Configuration:</span><br><span class="line"> MaxHeapSize = 1031798784 (984.0MB)</span><br></pre></td></tr></table></figure>

<p>默认 NameNode 和 DataNode 占用内存是自动分配的，且相等</p>
<p>这样做并不合理</p>
<p>经验参考：</p>
<ol>
<li>NameNode 最小值 1G，每增加 1000000 block，增加 1G 内存</li>
<li>DataNode 最小值 4G，block 数，或者副本数升高，都应该增加 DataNode 值</li>
<li>一个 DataNode 上的副本总数低于 4000000 调整为 4G，超过 4000000，每增加 1000000，增加 1G</li>
</ol>
<p>具体修改，配置文件：hadoop-env.sh</p>
<figure class="highlight plaintext"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">export HDFS_NAMENODE_OPTS=&quot;-Dhadoop.security.logger=INFO,RFAS - Xmx1024m&quot;</span><br><span class="line">export HDFS_DATANODE_OPTS=&quot;-Dhadoop.security.logger=ERROR,RFAS - Xmx1024m&quot;</span><br></pre></td></tr></table></figure>

<h2 id="二、NameNode-心跳并发配置"><a href="#二、NameNode-心跳并发配置" class="headerlink" title="二、NameNode 心跳并发配置"></a>二、NameNode 心跳并发配置</h2><p>hdfs-site.xml</p>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line">The number of Namenode RPC server threads that listen to requests </span><br><span class="line">from clients. If dfs.namenode.servicerpc-address is not </span><br><span class="line">configured then Namenode RPC server threads listen to requests </span><br><span class="line">from all nodes.</span><br><span class="line">NameNode 有一个工作线程池，用来处理不同 DataNode 的并发心跳以及客户端并发</span><br><span class="line">的元数据操作。</span><br><span class="line">对于大集群或者有大量客户端的集群来说，通常需要增大该参数。默认值是 10。</span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>dfs.namenode.handler.count<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>21<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<p>企业经验：<br>$$<br>dfs.namenode.handler.count &#x3D; 20 × log_eClusterSize<br>$$<br>比如，集群规模 datanode 3台，此参数设置为21</p>
<h1 id="HDFS-——-集群压测"><a href="#HDFS-——-集群压测" class="headerlink" title="HDFS —— 集群压测"></a>HDFS —— 集群压测</h1><p>HDFS 读写性能主要受网络和磁盘影响较大，为了方便测试，将三台虚拟机网络都设置为 100mbps</p>
<p><img src="/oct25-xxxxx/img/hadoop/16_%E8%99%9A%E6%8B%9F%E6%9C%BA%E8%AE%BE%E7%BD%AE%E5%B8%A6%E5%AE%BD.png"></p>
<h2 id="一、测试-HDFS-写性能"><a href="#一、测试-HDFS-写性能" class="headerlink" title="一、测试 HDFS 写性能"></a>一、测试 HDFS 写性能</h2><h3 id="测试"><a href="#测试" class="headerlink" title="测试"></a>测试</h3><p>向 HDFS 集群写 10 个 128M 的文件</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line">hadoop jar /opt/module/hadoop-3.1.3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-3.1.3-tests.jar TestDFSIO -write -nrFiles 10 -fileSize 128MB</span><br><span class="line"></span><br><span class="line">2023-05-29 23:34:07,927 INFO fs.TestDFSIO: ----- TestDFSIO ----- : write</span><br><span class="line">2023-05-29 23:34:07,928 INFO fs.TestDFSIO:             Date &amp; time: Mon May 29 23:34:07 CST 2023</span><br><span class="line">2023-05-29 23:34:07,928 INFO fs.TestDFSIO:         Number of files: 10</span><br><span class="line">2023-05-29 23:34:07,928 INFO fs.TestDFSIO:  Total MBytes processed: 1280</span><br><span class="line">2023-05-29 23:34:07,929 INFO fs.TestDFSIO:       Throughput mb/sec: 1.34</span><br><span class="line">2023-05-29 23:34:07,930 INFO fs.TestDFSIO:  Average IO rate mb/sec: 1.37</span><br><span class="line">2023-05-29 23:34:07,930 INFO fs.TestDFSIO:   IO rate std deviation: 0.18</span><br><span class="line">2023-05-29 23:34:07,930 INFO fs.TestDFSIO:      Test <span class="built_in">exec</span> time sec: 134.37</span><br></pre></td></tr></table></figure>

<p>注意：nrFiles n 为生成 MapTask 数量 </p>
<p><strong>Number of files</strong>：生成 mapTask 数量，一般是集群中（CPU 核数-1），我们测试虚拟机就按照实际的物理内存-1 分配即可</p>
<p><strong>Total MBytes processed</strong>：单个 map 处理的文件大小</p>
<p><strong>Throughput mb&#x2F;sec</strong>：单个 mapTak 的吞吐量</p>
<ul>
<li>计算方式：处理的总文件大小&#x2F;每一个 mapTask 写数据的时间累加</li>
<li>集群整体吞吐量：生成 mapTask 数量*单个 mapTak 的吞吐量</li>
</ul>
<p><strong>Average IO rate mb&#x2F;sec</strong>：平均 mapTak 的吞吐量</p>
<ul>
<li>计算方式：每个 mapTask 处理文件大小&#x2F;每一个 mapTask 写数据的时间全部相加除以 task 数量</li>
</ul>
<p><strong>IO rate std deviation</strong>：方差、反映各个 mapTask 处理的差值，越小越均衡</p>
<h3 id="测试异常"><a href="#测试异常" class="headerlink" title="测试异常"></a>测试异常</h3><p>测试出现异常，可以在 yarn-site.xml 中设置虚拟内存检测为 false</p>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!--是否启动一个线程检查每个任务正使用的虚拟内存量，如果任务超出分配值，则直接将其杀掉，默认是 true --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.nodemanager.vmem-check-enabled<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>false<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<h3 id="测试结果分析"><a href="#测试结果分析" class="headerlink" title="测试结果分析"></a>测试结果分析</h3><p>因为有个副本在本地，所以不参与测试</p>
<p>集群配置副本数量为 3</p>
<p>参与测试的文件：<br>$$<br>10 个文件 × 2 个副本 &#x3D; 20 个<br>$$<br>压测后的速度：1.34 mb&#x2F;sec</p>
<p>实测速度：<br>$$<br>1.34 M&#x2F;s × 20 个文件 ≈ 26.8 M&#x2F;s<br>$$<br>三台服务器的带宽：<br>$$<br>12.5 + 12.5 + 12.5 ≈ 30 m&#x2F;s<br>$$<br><strong>如果实测速度远远小于网络，并且实测速度不能满足工作需求，可以考虑采用固态硬盘或者增加磁盘个数。</strong></p>
<blockquote>
<p>注意：如果客户端不丰集群节点，那三个副本都需要参与计算</p>
</blockquote>
<h2 id="二、测试-HDFS-读性能"><a href="#二、测试-HDFS-读性能" class="headerlink" title="二、测试 HDFS 读性能"></a>二、测试 HDFS 读性能</h2><h3 id="测试-1"><a href="#测试-1" class="headerlink" title="测试"></a>测试</h3><p>读取 HDFS 集群 10 个 128M 的文件</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line">hadoop jar /opt/module/hadoop-3.1.3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-3.1.3-tests.jar TestDFSIO -<span class="built_in">read</span> -nrFiles 10 -fileSize 128MB</span><br><span class="line"></span><br><span class="line">2023-05-30 07:15:42,044 INFO fs.TestDFSIO: ----- TestDFSIO ----- : <span class="built_in">read</span></span><br><span class="line">2023-05-30 07:15:42,044 INFO fs.TestDFSIO:             Date &amp; time: Tue May 30 07:15:42 CST 2023</span><br><span class="line">2023-05-30 07:15:42,044 INFO fs.TestDFSIO:         Number of files: 10</span><br><span class="line">2023-05-30 07:15:42,044 INFO fs.TestDFSIO:  Total MBytes processed: 1280</span><br><span class="line">2023-05-30 07:15:42,044 INFO fs.TestDFSIO:       Throughput mb/sec: 46.63</span><br><span class="line">2023-05-30 07:15:42,044 INFO fs.TestDFSIO:  Average IO rate mb/sec: 55.56</span><br><span class="line">2023-05-30 07:15:42,044 INFO fs.TestDFSIO:   IO rate std deviation: 24.56</span><br><span class="line">2023-05-30 07:15:42,044 INFO fs.TestDFSIO:      Test <span class="built_in">exec</span> time sec: 33.43</span><br><span class="line">2023-05-30 07:15:42,044 INFO fs.TestDFSIO:</span><br></pre></td></tr></table></figure>

<h3 id="删除测试生成数据"><a href="#删除测试生成数据" class="headerlink" title="删除测试生成数据"></a>删除测试生成数据</h3><figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hadoop jar /opt/module/hadoop-3.1.3/share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-3.1.3-tests.jar TestDFSIO -clean</span><br></pre></td></tr></table></figure>

<h1 id="HDFS-——-多目录"><a href="#HDFS-——-多目录" class="headerlink" title="HDFS —— 多目录"></a>HDFS —— 多目录</h1><h2 id="一、NameNode-多目录配置"><a href="#一、NameNode-多目录配置" class="headerlink" title="一、NameNode 多目录配置"></a>一、NameNode 多目录配置</h2><p>NameNode 的本地目录可以配置成多个，且每个目录存放内存相同，增加了可靠性</p>
<h3 id="具体配置"><a href="#具体配置" class="headerlink" title="具体配置"></a>具体配置</h3><p>hdfs-site.xml</p>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- namenode多目录配置 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>dfs.namenode.name.dir<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>file://$&#123;hadoop.tmp.dir&#125;/dfs/name1,file://$&#123;hadoop.tmp.dir&#125;/dfs/name2<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<p>注意：因为每台服务器节点的磁盘情况不同，所以这个配置添加后，可以选择不同步到其他节点，每个节点单独配置即可。</p>
<h3 id="停止集群删除data和log"><a href="#停止集群删除data和log" class="headerlink" title="停止集群删除data和log"></a>停止集群删除data和log</h3><figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">#三台节点分别执行</span></span><br><span class="line"><span class="built_in">rm</span> -rf ./data ./logs</span><br></pre></td></tr></table></figure>

<h3 id="重新格式化集群并启动"><a href="#重新格式化集群并启动" class="headerlink" title="重新格式化集群并启动"></a>重新格式化集群并启动</h3><figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">bin/hdfs namenode -format</span><br><span class="line"></span><br><span class="line">sbin/start-dfs.sh</span><br></pre></td></tr></table></figure>

<h3 id="查看"><a href="#查看" class="headerlink" title="查看"></a>查看</h3><figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">pwd</span></span><br><span class="line">/opt/module/hadoop-3.1.3/data/tmp/dfs</span><br><span class="line"></span><br><span class="line">ll</span><br><span class="line">总用量 0</span><br><span class="line">drwxrwxr-x. 3 hadoop hadoop 21 5月  30 07:29 name1</span><br><span class="line">drwxrwxr-x. 3 hadoop hadoop 21 5月  30 07:29 name2</span><br></pre></td></tr></table></figure>

<h2 id="二、DataNode-多目录配置"><a href="#二、DataNode-多目录配置" class="headerlink" title="二、DataNode 多目录配置"></a>二、DataNode 多目录配置</h2><p>DataNode 可以配置成多个目录，每个目录存储的数据不一样（数据不是副本）</p>
<h3 id="具体配置-1"><a href="#具体配置-1" class="headerlink" title="具体配置"></a>具体配置</h3><p>hdfs-site.xml</p>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>dfs.datanode.data.dir<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>file://$&#123;hadoop.tmp.dir&#125;/dfs/data1,file://$&#123;hadoop.tmp.dir&#125;/dfs/data2<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<h3 id="停止集群删除data和log-1"><a href="#停止集群删除data和log-1" class="headerlink" title="停止集群删除data和log"></a>停止集群删除data和log</h3><figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">#三台节点分别执行</span></span><br><span class="line"><span class="built_in">rm</span> -rf ./data ./logs</span><br></pre></td></tr></table></figure>

<h3 id="重新格式化集群并启动-1"><a href="#重新格式化集群并启动-1" class="headerlink" title="重新格式化集群并启动"></a>重新格式化集群并启动</h3><figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">bin/hdfs namenode -format</span><br><span class="line"></span><br><span class="line">sbin/start-dfs.sh</span><br></pre></td></tr></table></figure>

<h3 id="查看结果"><a href="#查看结果" class="headerlink" title="查看结果"></a>查看结果</h3><figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">pwd</span></span><br><span class="line">/opt/module/hadoop-3.1.3/data/tmp/dfs</span><br><span class="line"></span><br><span class="line">ll</span><br><span class="line">总用量 0</span><br><span class="line">drwx------. 2 hadoop hadoop  6 5月  30 07:33 data1</span><br><span class="line">drwx------. 2 hadoop hadoop  6 5月  30 07:33 data2</span><br><span class="line">drwxrwxr-x. 3 hadoop hadoop 21 5月  30 07:34 name1</span><br><span class="line">drwxrwxr-x. 3 hadoop hadoop 21 5月  30 07:34 name2</span><br></pre></td></tr></table></figure>

<p>上传文件，然后查看data1&#x2F;data2中的文件内容</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hadoop fs -put <span class="built_in">test</span>/input/word.txt /</span><br></pre></td></tr></table></figure>

<p>两个文件夹里面内容不一致，一个有数据，一个没有数据</p>
<h2 id="三、集群数据均衡之数据均衡"><a href="#三、集群数据均衡之数据均衡" class="headerlink" title="三、集群数据均衡之数据均衡"></a>三、集群数据均衡之数据均衡</h2><p>生产环境由于磁盘空间不足，往往会需要增加一块硬盘。</p>
<p>刚加载的硬盘没有数据，可以执行磁盘数据均衡命令（hadoop3.x新特性）</p>
<ol>
<li>生成均衡计划</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hdfs diskbalancer -plan hadoop103</span><br></pre></td></tr></table></figure>

<ol start="2">
<li>执行均衡计划</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hdfs diskbalancer -execute hadoop103.plan.json</span><br></pre></td></tr></table></figure>

<ol start="3">
<li>查看当前均衡任务的执行情况</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hdfs diskbalancer -query hadoop103</span><br></pre></td></tr></table></figure>

<ol start="4">
<li>取消均衡任务</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hdfs diskbalancer -cancel hadoop103.plan.json</span><br></pre></td></tr></table></figure>

<h1 id="HDFS-——-集群扩容缩容"><a href="#HDFS-——-集群扩容缩容" class="headerlink" title="HDFS —— 集群扩容缩容"></a>HDFS —— 集群扩容缩容</h1><h2 id="一、添加白名单"><a href="#一、添加白名单" class="headerlink" title="一、添加白名单"></a>一、添加白名单</h2><p>白名单：表示在白名单的主机 IP 地址可以用来存储数据。</p>
<p>在企业中，配置白名单，可以尽量防止黑客恶意访问攻击。</p>
<ol>
<li>在指定目录下，创建黑&#x2F;白名单</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">pwd</span></span><br><span class="line">/opt/module/hadoop-3.1.3/etc/hadoop</span><br></pre></td></tr></table></figure>

<p>创建白名单</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">touch</span> whitelist</span><br></pre></td></tr></table></figure>

<p>模拟示例，假如集群正常工作的节点为 hadoop102 和 hadoop103，将两个节点添加到白名单</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">hadoop102</span><br><span class="line">hadoop103</span><br></pre></td></tr></table></figure>

<p>创建黑名单</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">touch</span> blacklist</span><br></pre></td></tr></table></figure>

<p>保持空文件即可</p>
<ol start="2">
<li>在 hdfs-site.xml 中，添加配置</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- 白名单 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>dfs.hosts<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>/opt/module/hadoop-3.1.3/etc/hadoop/whitelist<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br><span class="line"></span><br><span class="line"><span class="comment">&lt;!-- 黑名单 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>dfs.hosts.exclude<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>/opt/module/hadoop-3.1.3/etc/hadoop/blacklist<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="3">
<li><p>将配置文件（hdfs-site.xml）和黑&#x2F;白名单（whitelist, blacklist）文件分发到集群其他节点</p>
</li>
<li><p>第一次添加白名单，必须重启集群，非第一次，只需要刷新 NameNode 节点即可</p>
</li>
<li><p>访问 NameNode 的 webui：<a target="_blank" rel="noopener" href="http://hadoop102:9870/">http://hadoop102:9870</a></p>
</li>
</ol>
<p><img src="/oct25-xxxxx/img/hadoop/17_%E9%85%8D%E7%BD%AE%E7%99%BD%E5%90%8D%E5%8D%95datanode.png"></p>
<ol start="6">
<li>在 hadoop104 执行上传数据命令失败</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hadoop fs -put NOTICE.TXT /</span><br></pre></td></tr></table></figure>

<ol start="7">
<li>将 hadoop104 添加到白名单</li>
</ol>
<figure class="highlight plaintext"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">hadoop102</span><br><span class="line">hadoop103</span><br><span class="line">hadoop104</span><br></pre></td></tr></table></figure>

<ol start="8">
<li>刷新 NameNode</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">hdfs dfsadmin -refreshNodes</span><br><span class="line"></span><br><span class="line">Refresh nodes successful</span><br></pre></td></tr></table></figure>

<ol start="9">
<li>访问 NameNode 的 webui：<a target="_blank" rel="noopener" href="http://hadoop102:9870/">http://hadoop102:9870</a></li>
</ol>
<p><img src="/oct25-xxxxx/img/hadoop/18_%E7%99%BD%E5%90%8D%E5%8D%95%E6%B7%BB%E5%8A%A0hadoop104.png"></p>
<h2 id="二、服役新服务器"><a href="#二、服役新服务器" class="headerlink" title="二、服役新服务器"></a>二、服役新服务器</h2><h3 id="需求"><a href="#需求" class="headerlink" title="需求"></a>需求</h3><p>随着业务增长，数据量增大，原有数据节点的容量已不能满足存储需求，需要在原有集群基础上动态添加新的数据节点。</p>
<h3 id="环境准备"><a href="#环境准备" class="headerlink" title="环境准备"></a>环境准备</h3><p>需要另外准备一台虚拟机 hadoop105，确保安装 hadoop</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">hadoop version</span><br><span class="line"></span><br><span class="line">Hadoop 3.1.3</span><br><span class="line">Source code repository https://gitbox.apache.org/repos/asf/hadoop.git -r ba631c436b806728f8ec2f54ab1e289526c90579</span><br><span class="line">Compiled by ztang on 2019-09-12T02:47Z</span><br><span class="line">Compiled with protoc 2.5.0</span><br><span class="line">From <span class="built_in">source</span> with checksum ec785077c385118ac91aadde5ec9799</span><br><span class="line">This <span class="built_in">command</span> was run using /opt/module/hadoop-3.1.3/share/hadoop/common/hadoop-common-3.1.3.jar</span><br></pre></td></tr></table></figure>

<p>配置 hadoop102 和 hadoop103 到 hadoop105 的 ssh 无密登录</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">#在hadoop102和hadoop103分别执行</span></span><br><span class="line">[xkee@hadoop102 ~]$ ssh-copy-id hadoop105</span><br></pre></td></tr></table></figure>

<h3 id="服役新节点具体步骤"><a href="#服役新节点具体步骤" class="headerlink" title="服役新节点具体步骤"></a>服役新节点具体步骤</h3><p>直接启动 DataNode 和 NodeManager 即可关联到集群</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">hdfs --daemon start datanode</span><br><span class="line">yarn --daemon start nodemanager</span><br></pre></td></tr></table></figure>

<p>查看 DataNode 节点</p>
<p><img src="/oct25-xxxxx/img/hadoop/19_%E6%B7%BB%E5%8A%A0%E6%96%B0%E8%8A%82%E7%82%B9datanode.png"></p>
<p>查看 NodeManager 节点</p>
<p><img src="/oct25-xxxxx/img/hadoop/20_%E6%B7%BB%E5%8A%A0%E6%96%B0%E8%8A%82%E7%82%B9nodemanager.png"></p>
<blockquote>
<p>思考：如果数据不均衡（hadoop105数据少，其他节点数据多），怎么处理？</p>
</blockquote>
<h2 id="三、服务器间数据均衡"><a href="#三、服务器间数据均衡" class="headerlink" title="三、服务器间数据均衡"></a>三、服务器间数据均衡</h2><p>在企业开发中，如果经常在 hadoop102 和 hadoop104 上提交任务，且副本数为 2，由于数据本地性原则，就会导致 hadoop102 和 hadoop104 数据过多，hadoop103 存储的数据量小。</p>
<p>另一种情况，就是新服役的服务器数据量比较少，需要执行集群均衡命令。</p>
<ol>
<li>开启数据均衡命令</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">bin/start-balancer.sh -threshold 10</span><br></pre></td></tr></table></figure>

<p>参数10，代表的是集群中各个节点的磁盘空间利用率，相差不超过10%，可以根据实际情况调整</p>
<ol start="2">
<li>停止数据均衡</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">sbin/stop-balancer.sh</span><br></pre></td></tr></table></figure>

<p>注意：由于 HDFS 需要启动单独的 Rebalance Server 来执行 Rebalance 操作，所以尽量不要在 NameNode 上执行 start-balancer.sh，而是找一台比较空闲的机器执行。</p>
<h2 id="四、黑名单退役服务器"><a href="#四、黑名单退役服务器" class="headerlink" title="四、黑名单退役服务器"></a>四、黑名单退役服务器</h2><p>黑名单，表示在黑名单的主机 IP 地址不可以用来存储数据</p>
<p>在企业中，配置黑名单，用来退役服务器</p>
<ol>
<li>编辑 blacklist 文件，添加要退役的主机节点</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">pwd</span></span><br><span class="line"></span><br><span class="line">/opt/module/hadoop-3.1.3/etc/hadoop</span><br></pre></td></tr></table></figure>

<p>添加 hadoop105，注意这里表示要将 hadoop105 退役，不再存储数据</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hadoop105</span><br></pre></td></tr></table></figure>

<ol start="2">
<li>在 hdfs-site.xml 配置文件中，添加配置</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- 黑名单 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>dfs.hosts.exclude<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>/opt/module/hadoop-3.1.3/etc/hadoop/blacklist<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="3">
<li>将 hdfs-site.xml 和 blacklist 分发到其他节点</li>
<li>第一次添加黑名单，需要重启集群，非第一次，只需要刷新 NameNode 即可</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">hdfs dfsadmin -refreshNodes</span><br><span class="line"></span><br><span class="line">Refresh node successful</span><br></pre></td></tr></table></figure>

<ol start="5">
<li>查看浏览器</li>
</ol>
<p>decommission in progress 状态，就是退役中，说明数据节点正在复制到其他节点</p>
<p><img src="/oct25-xxxxx/img/hadoop/21_%E9%80%80%E5%BD%B9%E8%8A%82%E7%82%B9%E7%8A%B6%E6%80%81.png"></p>
<blockquote>
<p>注意：如果副本数为3，服役节点小于等于3，是不能退役成功的，需要修改配置数</p>
</blockquote>
<ol start="6">
<li><p>可以停止 hadoop105 的 DataNode 和 NodeManager</p>
</li>
<li><p>如果数据不均，可以使用命令实现集群数据均衡</p>
</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">sbin/start-balancer.sh -threshold 10</span><br></pre></td></tr></table></figure>

<h1 id="HDFS-——-存储优化"><a href="#HDFS-——-存储优化" class="headerlink" title="HDFS —— 存储优化"></a>HDFS —— 存储优化</h1><h2 id="一、纠删码"><a href="#一、纠删码" class="headerlink" title="一、纠删码"></a>一、纠删码</h2><h3 id="纠删码原理"><a href="#纠删码原理" class="headerlink" title="纠删码原理"></a>纠删码原理</h3><p>HDFS 默认情况下，一个文件有 3 个副本，这样提高了数据的可靠性，但也带来了 2 倍的冗余开销。Hadoop3.x 引入了纠删码，采用计算的方式，可以节省约 50％左右的存储空间。</p>
<ol>
<li>纠删码操作相关的命令</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br></pre></td><td class="code"><pre><span class="line">hdfs ec</span><br><span class="line"></span><br><span class="line">Usage: bin/hdfs ec [COMMAND]</span><br><span class="line">          [-listPolicies]</span><br><span class="line">          [-addPolicies -policyFile &lt;file&gt;]</span><br><span class="line">          [-getPolicy -path &lt;path&gt;]</span><br><span class="line">          [-removePolicy -policy &lt;policy&gt;]</span><br><span class="line">          [-setPolicy -path &lt;path&gt; [-policy &lt;policy&gt;] [-replicate]]</span><br><span class="line">          [-unsetPolicy -path &lt;path&gt;]</span><br><span class="line">          [-listCodecs]</span><br><span class="line">          [-enablePolicy -policy &lt;policy&gt;]</span><br><span class="line">          [-disablePolicy -policy &lt;policy&gt;]</span><br><span class="line">          [-<span class="built_in">help</span> &lt;command-name&gt;]</span><br></pre></td></tr></table></figure>

<ol start="2">
<li>查看当前支持的纠删码策略</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line">hdfs ec -listPolicies</span><br><span class="line"></span><br><span class="line">Erasure Coding Policies:</span><br><span class="line">ErasureCodingPolicy=[Name=RS-10-4-1024k, Schema=[ECSchema=[Codec=rs, numDataUnits=10, numParityUnits=4]], CellSize=1048576, Id=5], State=DISABLED</span><br><span class="line">ErasureCodingPolicy=[Name=RS-3-2-1024k, Schema=[ECSchema=[Codec=rs, numDataUnits=3, numParityUnits=2]], CellSize=1048576, Id=2], State=DISABLED</span><br><span class="line">ErasureCodingPolicy=[Name=RS-6-3-1024k, Schema=[ECSchema=[Codec=rs, numDataUnits=6, numParityUnits=3]], CellSize=1048576, Id=1], State=ENABLED</span><br><span class="line">ErasureCodingPolicy=[Name=RS-LEGACY-6-3-1024k, Schema=[ECSchema=[Codec=rs-legacy, numDataUnits=6, numParityUnits=3]], CellSize=1048576, Id=3], State=DISABLED</span><br><span class="line">ErasureCodingPolicy=[Name=XOR-2-1-1024k, Schema=[ECSchema=[Codec=xor, numDataUnits=2, numParityUnits=1]], CellSize=1048576, Id=4], State=DISABLED</span><br></pre></td></tr></table></figure>

<h3 id="纠删码策略解释"><a href="#纠删码策略解释" class="headerlink" title="纠删码策略解释"></a>纠删码策略解释</h3><p><strong>RS-3-2-1024k</strong>：使用 RS 编码，每 3 个数据单元，生成 2 个校验单元，共 5 个单元，也就是说：这 5 个单元中，只要有任意的 3 个单元存在（不管是数据单元还是校验单元，只要总数&#x3D;3），就可以得到原始数据。每个单元的大小是 1024k &#x3D; 1024 * 1024 &#x3D; 1048576。</p>
<p><strong>RS-10-4-1024k</strong>：使用 RS 编码，每 10 个数据单元（cell），生成 4 个校验单元，共 14个单元，也就是说：这 14 个单元中，只要有任意的 10 个单元存在（不管是数据单元还是校验单元，只要总数&#x3D;10），就可以得到原始数据。每个单元的大小是 1024k &#x3D; 1024 * 1024 &#x3D; 1048576。</p>
<p><strong>RS-6-3-1024k</strong>：使用 RS 编码，每 6 个数据单元，生成 3 个校验单元，共 9 个单元，也就是说：这 9 个单元中，只要有任意的 6 个单元存在（不管是数据单元还是校验单元，只要总数&#x3D;6），就可以得到原始数据。每个单元的大小是 1024k &#x3D; 1024 * 1024 &#x3D;1048576。</p>
<p><strong>RS-LEGACY-6-3-1024k</strong>：策略和上面的 RS-6-3-1024k 一样，只是编码的算法用的是 rs-legacy。</p>
<p><strong>XOR-2-1-1024k</strong>：使用 XOR 编码（速度比 RS 编码快），每 2 个数据单元，生成 1 个校验单元，共 3 个单元，也就是说：这 3 个单元中，只要有任意的 2 个单元存在（不管是数据单元还是校验单元，只要总数&#x3D; 2），就可以得到原始数据。每个单元的大小是 1024k &#x3D; 1024 * 1024 &#x3D; 1048576。</p>
<h3 id="纠删码案例实操"><a href="#纠删码案例实操" class="headerlink" title="纠删码案例实操"></a>纠删码案例实操</h3><p>纠删码策略是给具体一个路径设置。所有往此路径下存储的文件，都会执行此策略。</p>
<p>默认只开启对 RS-6-3-1024k 策略的支持，如要使用别的策略需要提前启用。</p>
<h4 id="ⅰ需求"><a href="#ⅰ需求" class="headerlink" title="ⅰ需求"></a>ⅰ需求</h4><p>将 &#x2F;input 目录设置为 RS-3-2-1024k 策略</p>
<h4 id="ⅱ具体步骤"><a href="#ⅱ具体步骤" class="headerlink" title="ⅱ具体步骤"></a>ⅱ具体步骤</h4><ol>
<li>开启对 RS-3-2-1024k 策略的支持</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br></pre></td><td class="code"><pre><span class="line">hdfs ec -enablePolicy -policy RS-3-2-1024k</span><br><span class="line"></span><br><span class="line">Erasure coding policy RS-3-2-1024k is enabled</span><br></pre></td></tr></table></figure>

<ol start="2">
<li>在 HDFS 创建目录，并设置 RS-3-2-1024k 策略</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">hdfs dfs -<span class="built_in">mkdir</span> /input</span><br><span class="line">hdfs ec -setPolicy -path /input -policy RS-3-2-1024k</span><br></pre></td></tr></table></figure>

<ol start="3">
<li>上传文件，并查看文件编码后的存储情况</li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hdfs dfs -put web.log /input</span><br></pre></td></tr></table></figure>

<p>注意：上传的文件需要大于 2M 才可以看到效果（低于 2M，只有一个数据单元和两个校验单元）</p>
<ol start="4">
<li>查看存储路径的数据单元和校验单元，并删除数据测试</li>
</ol>
<h1 id="HDFS-——-故障排除"><a href="#HDFS-——-故障排除" class="headerlink" title="HDFS —— 故障排除"></a>HDFS —— 故障排除</h1><p>如果 NameNode 故障导致数据丢失，如何将数据恢复？</p>
<p>可以尝试使用 SecondaryNameNode 恢复部分数据</p>
<ol>
<li><strong>需求</strong>：模拟 NameNode 故障丢失数据，使用 SecondaryNameNode 恢复数据</li>
<li><strong>故障模拟</strong></li>
</ol>
<p>将 NameNode 进程杀死</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">kill</span> -9 19886</span><br></pre></td></tr></table></figure>

<p>删除 NameNode 存储的数据</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"><span class="built_in">rm</span> -rf /opt/module/hadoop-3.1.3/data/dfs/name/*</span><br></pre></td></tr></table></figure>

<ol start="3">
<li><strong>恢复数据</strong></li>
</ol>
<p>将 SecondaryNameNode 中数据拷贝到 NameNode 数据存储目录</p>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">scp -r xkee@hadoop104:/opt/module/hadoop-3.1.3/data/dfs/namesecondary/* /opt/module/hadoop-3.1.3/data/dfs/name/</span><br></pre></td></tr></table></figure>

<ol start="4">
<li><strong>重新启动 NameNode</strong></li>
</ol>
<figure class="highlight sh"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">hdfs --daemon start namenode</span><br></pre></td></tr></table></figure>

<ol start="5">
<li><strong>向集群上传文件测试</strong></li>
</ol>
<h1 id="HDFS-——-小文件归档"><a href="#HDFS-——-小文件归档" class="headerlink" title="HDFS —— 小文件归档"></a>HDFS —— 小文件归档</h1><h2 id="一、HDFS-小文件弊端"><a href="#一、HDFS-小文件弊端" class="headerlink" title="一、HDFS 小文件弊端"></a>一、HDFS 小文件弊端</h2><p>每个文件均按块存储，每个块的元数据存储在 NameNode 的内存中，因此 HDFS 存储小文件会非常低效。因为大量的小文件会耗尽 NameNode 中的大部分内存。但注意，存储小文件所需要的磁盘容量和数据块的大小无关。例如，一个 1MB 的文件设置为 128MB 的块存储，实际使用的是 1MB 的磁盘空间，而不是 128MB。</p>
<h2 id="二、HDFS-小文件解决方案"><a href="#二、HDFS-小文件解决方案" class="headerlink" title="二、HDFS 小文件解决方案"></a>二、HDFS 小文件解决方案</h2><h3 id="Hadoop-Archive"><a href="#Hadoop-Archive" class="headerlink" title="Hadoop Archive"></a>Hadoop Archive</h3><p>高效的将小文件放入 HDFS 块中的文件存档工具，能够将小文件打包成 HAR 文件，这样就减少了 NameNode 的内存使用；</p>
<p>Hadoop 归档操作，可以参考 <a href="HAR小文件归档章节">大数据之Hadoop03_分布式文件系统 HDFS 文档中 HAR小文件归档 章节</a></p>
<h3 id="Sequence-File"><a href="#Sequence-File" class="headerlink" title="Sequence File"></a>Sequence File</h3><p>Sequence File 由一系列的二进制 key&#x2F;value 组成，如果 key 为文件名，value 为文件内容，则可以将大批小文件合并成一个大文件；</p>
<h3 id="CombineFileInputFormat"><a href="#CombineFileInputFormat" class="headerlink" title="CombineFileInputFormat"></a>CombineFileInputFormat</h3><p>CombineFileInputFormat 是一种新的 InputFormat，用于将多个文件合并成一个单独的 Split，另外，它会考虑数据的存储位置；</p>
<h3 id="开启-JVM-重用"><a href="#开启-JVM-重用" class="headerlink" title="开启 JVM 重用"></a>开启 JVM 重用</h3><p>对于大量小文件 Job，可以开启 JVM 重用，会减少 45% 运行时间。</p>
<p>JVM 重用原理：一个 Map 运行在一个 JVM 上，开启重用的话，该 Map 在 JVM 上运行完毕后，JVM 继续运行其他 Map；</p>
<p>mapred-default,.xml，参数设置在 10 - 20 之间</p>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">  <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.job.jvm.numtasks<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">  <span class="tag">&lt;<span class="name">value</span>&gt;</span>1<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">  <span class="tag">&lt;<span class="name">description</span>&gt;</span>How many tasks to run per jvm. If set to -1, there is</span><br><span class="line">  no limit. </span><br><span class="line">  <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<h1 id="MapReduce-生产经验"><a href="#MapReduce-生产经验" class="headerlink" title="MapReduce 生产经验"></a>MapReduce 生产经验</h1><h2 id="一、MapReduce-运行慢的原因"><a href="#一、MapReduce-运行慢的原因" class="headerlink" title="一、MapReduce 运行慢的原因"></a>一、MapReduce 运行慢的原因</h2><p>MapReduce 程序效率的瓶颈在于两点：</p>
<ol>
<li>计算机性能<ul>
<li>CPU、内存、磁盘健康、网络</li>
</ul>
</li>
<li>I&#x2F;O 操作<ul>
<li>数据倾斜；</li>
<li>Map 和 Reduce 数设置不合理；</li>
<li>Map 运行时间太长，导致 Reduce 等待过久；</li>
<li>小文件过多；</li>
<li>大量的不可分块超大的文件；</li>
<li>spill 溢写次数过多；</li>
<li>Merge 次数过多等；</li>
</ul>
</li>
</ol>
<h2 id="二、MapReduce-常用调优参数"><a href="#二、MapReduce-常用调优参数" class="headerlink" title="二、MapReduce 常用调优参数"></a>二、MapReduce 常用调优参数</h2><ol>
<li>自定义分区，减少数据倾斜</li>
</ol>
<figure class="highlight plaintext"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">自定义分区类，继承 Partitioner 接口，重写 getPartition() 方法</span><br></pre></td></tr></table></figure>

<ol start="2">
<li>减少溢写次数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br><span class="line">12</span><br><span class="line">13</span><br><span class="line">14</span><br><span class="line">15</span><br><span class="line">16</span><br><span class="line">17</span><br><span class="line">18</span><br><span class="line">19</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- Shuffle的环形缓冲区大小，默认100M，可以提高到200M --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.task.io.sort.mb<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>100<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The total amount of buffer memory to use while sorting </span><br><span class="line">        files, in megabytes.  By default, gives each merge stream 1MB, which</span><br><span class="line">        should minimize seeks.<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br><span class="line"></span><br><span class="line"><span class="comment">&lt;!-- 环形缓冲区溢出的阈值，默认80%，可以提高到90% --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.map.sort.spill.percent<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>0.80<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The soft limit in the serialization buffer. Once reached, a</span><br><span class="line">        thread will begin to spill the contents to disk in the background. Note that</span><br><span class="line">        collection will not block if this threshold is exceeded while a spill is</span><br><span class="line">        already in progress, so spills may be larger than this threshold when it is</span><br><span class="line">        set to less than .5<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="3">
<li>减少每次 Merge 合并次数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- merge合并数，默认10，可以提高到20（增加合并数，降低合并的次数） --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.task.io.sort.factor<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>10<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The number of streams to merge at once while sorting</span><br><span class="line">        files.  This determines the number of open file handles.<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="4">
<li>在不影响业务结果的前提下，使用 Combiner</li>
</ol>
<figure class="highlight java"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line">job.setCombinerClass(xxxReducer.class)</span><br></pre></td></tr></table></figure>

<ol start="5">
<li>采用 Snappy 或者 LZO 压缩</li>
</ol>
<figure class="highlight java"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br></pre></td><td class="code"><pre><span class="line">conf.setBoolean(<span class="string">&quot;mapreduce.map.output.compress&quot;</span>, <span class="literal">true</span>);</span><br><span class="line">conf.setClass(<span class="string">&quot;mapreduce.map.output.compress.codec&quot;</span>, SnappyCodec.class, CompressionCodec.class);</span><br></pre></td></tr></table></figure>

<ol start="6">
<li>设置 MapTask 内存上限</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- MapTask默认内存上限为1024M，可以根据 128M 数据对应 1G 内存原则提高内存 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.map.memory.mb<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>-1<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The amount of memory to request from the scheduler for each</span><br><span class="line">        map task. If this is not specified or is non-positive, it is inferred from</span><br><span class="line">        mapreduce.map.java.opts and mapreduce.job.heap.memory-mb.ratio.</span><br><span class="line">        If java-opts are also not specified, we set it to 1024.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="7">
<li>控制 MapTask 堆内存大小，内存不够，报 java.lang.OutOfMemoryError</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.map.java.opts<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span><span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Java opts only for the child processes that are maps. If set,</span><br><span class="line">        this will be used instead of mapred.child.java.opts. If -Xmx is not set,</span><br><span class="line">        it is inferred from mapreduce.map.memory.mb and</span><br><span class="line">        mapreduce.job.heap.memory-mb.ratio.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="8">
<li>修改 MapTask CPU 核数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- MapTask默认CPU核数为1，计算密集型任务，可以增加CPU核数 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.map.cpu.vcores<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>1<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The number of virtual cores to request from the scheduler for</span><br><span class="line">        each map task.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="9">
<li>异常重试</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- MapTask最大重试次数，重试超过该值，MapTask运行失败，默认值4，可以根据机器性能修改 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.map.maxattempts<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>4<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Expert: The maximum number of attempts per map task.</span><br><span class="line">        In other words, framework will try to execute a map task these many number</span><br><span class="line">        of times before giving up on it.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="10">
<li>提高 Reduce 去 Map 中的拉取数据的并行数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- Map去Map中拉取数据的并行数，默认为5，可以提高到10 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.reduce.shuffle.parallelcopies<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>5<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The default number of parallel transfers run by reduce</span><br><span class="line">        during the copy(shuffle) phase.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="11">
<li>调高 Reduce 的 Buffer 占比</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- Buffer大小占Reduce可用内存的比例，默认为70%，可以提高到80% --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.reduce.shuffle.input.buffer.percent<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>0.70<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The percentage of memory to be allocated from the maximum heap</span><br><span class="line">        size to storing map outputs during the shuffle.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="12">
<li>Reduce数据达到多少，开始写入磁盘</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- 默认66%，可以提高到70% --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.reduce.shuffle.merge.percent<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>0.66<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The usage threshold at which an in-memory merge will be</span><br><span class="line">        initiated, expressed as a percentage of the total memory allocated to</span><br><span class="line">        storing in-memory map outputs, as defined by</span><br><span class="line">        mapreduce.reduce.shuffle.input.buffer.percent.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="13">
<li>调整 ReduceTask 堆内存上限</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- 调整ReduceTask堆内存上限，默认1024M，根据128M数据对应1G原则，适当调整 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.reduce.memory.mb<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>-1<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The amount of memory to request from the scheduler for each</span><br><span class="line">        reduce task. If this is not specified or is non-positive, it is inferred</span><br><span class="line">        from mapreduce.reduce.java.opts and mapreduce.job.heap.memory-mb.ratio.</span><br><span class="line">        If java-opts are also not specified, we set it to 1024.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="14">
<li>控制 ReduceTask 堆内存大小，如果内存不够，报 java.lang.OutOfMemoryError</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.reduce.java.opts<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span><span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Java opts only for the child processes that are reduces. If set,</span><br><span class="line">        this will be used instead of mapred.child.java.opts. If -Xmx is not set,</span><br><span class="line">        it is inferred from mapreduce.reduce.memory.mb and</span><br><span class="line">        mapreduce.job.heap.memory-mb.ratio.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="15">
<li>修改 ReduceTask CPU 核数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- ReduceTask默认CPU核数为1，可以提高到2-4个 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.reduce.cpu.vcores<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>1<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The number of virtual cores to request from the scheduler for</span><br><span class="line">        each reduce task.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="16">
<li>ReduceTask 最大重试次数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- ReduceTask最大重试次数，重试超过该值，ReduceTask运行失败，默认值4，可以根据机器性能修改 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.reduce.maxattempts<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>4<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Expert: The maximum number of attempts per reduce task.</span><br><span class="line">        In other words, framework will try to execute a reduce task these many number</span><br><span class="line">        of times before giving up on it.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="17">
<li>Map、Reduce共存</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="comment">&lt;!-- 当MapTask完成比例达到当前值时，ReduceTask可以申请资源，默认是0.05 --&gt;</span></span><br><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>mapreduce.job.reduce.slowstart.completedmaps<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>0.05<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Fraction of the number of maps in the job which should be </span><br><span class="line">        complete before reduces are scheduled for the job. </span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="18">
<li>如果可以，尽量避免使用 Reduce</li>
</ol>
<h2 id="三、数据倾斜问题"><a href="#三、数据倾斜问题" class="headerlink" title="三、数据倾斜问题"></a>三、数据倾斜问题</h2><h3 id="数据倾斜现象"><a href="#数据倾斜现象" class="headerlink" title="数据倾斜现象"></a>数据倾斜现象</h3><p>数据频率倾斜——某一个区域的数据量要远远大于其他区域。</p>
<p>数据大小倾斜——部分记录的大小远远大于平均值。</p>
<h3 id="减少数据倾斜的方法"><a href="#减少数据倾斜的方法" class="headerlink" title="减少数据倾斜的方法"></a>减少数据倾斜的方法</h3><ol>
<li>检查是否是空值造成的数据倾斜<ul>
<li>生产环境，可以直接过滤掉空值。如果想保留空值，就自定义分区，将空值随便打散。最后再二次聚合</li>
</ul>
</li>
<li>能在 Map 阶段提前处理，最好先在 Map 阶段处理。如：Combine，MapJoin</li>
<li>设置多个 Reduce 个数</li>
</ol>
<h1 id="Yarn-生产经验"><a href="#Yarn-生产经验" class="headerlink" title="Yarn 生产经验"></a>Yarn 生产经验</h1><h2 id="一、常用参数调优"><a href="#一、常用参数调优" class="headerlink" title="一、常用参数调优"></a>一、常用参数调优</h2><h3 id="ResourceManager-相关"><a href="#ResourceManager-相关" class="headerlink" title="ResourceManager 相关"></a>ResourceManager 相关</h3><ol>
<li>处理调度器的线程数量</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"></span><br></pre></td></tr></table></figure>

<ol start="2">
<li>配置调度器</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br></pre></td><td class="code"><pre><span class="line"></span><br></pre></td></tr></table></figure>

<h3 id="NodeManager-相关"><a href="#NodeManager-相关" class="headerlink" title="NodeManager 相关"></a>NodeManager 相关</h3><ol>
<li>NodeManager 使用内存数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Amount of physical memory, in MB, that can be allocated </span><br><span class="line">        for containers. If set to -1 and</span><br><span class="line">        yarn.nodemanager.resource.detect-hardware-capabilities is true, it is</span><br><span class="line">        automatically calculated(in case of Windows and Linux).</span><br><span class="line">        In other cases, the default is 8192MB.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.nodemanager.resource.memory-mb<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>-1<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="2">
<li>NodeManager 使用内存 CPU 数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br><span class="line">10</span><br><span class="line">11</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Number of vcores that can be allocated</span><br><span class="line">        for containers. This is used by the RM scheduler when allocating</span><br><span class="line">        resources for containers. This is not used to limit the number of</span><br><span class="line">        CPUs used by YARN containers. If it is set to -1 and</span><br><span class="line">        yarn.nodemanager.resource.detect-hardware-capabilities is true, it is</span><br><span class="line">        automatically determined from the hardware in case of Windows and Linux.</span><br><span class="line">        In other cases, number of vcores is 8 by default.<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.nodemanager.resource.cpu-vcores<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>-1<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="3">
<li>是否将虚拟核数当作 CPU 核数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Flag to determine if logical processors(such as</span><br><span class="line">        hyperthreads) should be counted as cores. Only applicable on Linux</span><br><span class="line">        when yarn.nodemanager.resource.cpu-vcores is set to -1 and</span><br><span class="line">        yarn.nodemanager.resource.detect-hardware-capabilities is true.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.nodemanager.resource.count-logical-processors-as-cores<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>false<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="4">
<li>是否开启物理内存检查限制</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Whether physical memory limits will be enforced for</span><br><span class="line">        containers.<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.nodemanager.pmem-check-enabled<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>true<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="5">
<li>是否开启虚拟内存检查限制</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Whether virtual memory limits will be enforced for</span><br><span class="line">        containers.<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.nodemanager.vmem-check-enabled<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>true<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="6">
<li>虚拟内存物理内存比例</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>Ratio between virtual memory to physical memory when</span><br><span class="line">        setting memory limits for containers. Container allocations are</span><br><span class="line">        expressed in terms of physical memory, and virtual memory usage</span><br><span class="line">        is allowed to exceed this allocation by this ratio.</span><br><span class="line">    <span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.nodemanager.vmem-pmem-ratio<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>2.1<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<h3 id="Container-相关"><a href="#Container-相关" class="headerlink" title="Container 相关"></a>Container 相关</h3><ol>
<li>容器最小内存</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The minimum allocation for every container request at the RM</span><br><span class="line">        in MBs. Memory requests lower than this will be set to the value of this</span><br><span class="line">        property. Additionally, a node manager that is configured to have less memory</span><br><span class="line">        than this value will be shut down by the resource manager.<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.scheduler.minimum-allocation-mb<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>1024<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="2">
<li>容器最大内存</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The maximum allocation for every container request at the RM</span><br><span class="line">        in MBs. Memory requests higher than this will throw an</span><br><span class="line">        InvalidResourceRequestException.<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.scheduler.maximum-allocation-mb<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>8192<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="3">
<li>容器最小核数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br><span class="line">8</span><br><span class="line">9</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The minimum allocation for every container request at the RM</span><br><span class="line">        in terms of virtual CPU cores. Requests lower than this will be set to the</span><br><span class="line">        value of this property. Additionally, a node manager that is configured to</span><br><span class="line">        have fewer virtual cores than this value will be shut down by the resource</span><br><span class="line">        manager.<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.scheduler.minimum-allocation-vcores<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>1<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<ol start="4">
<li>容器最小核数</li>
</ol>
<figure class="highlight xml"><table><tr><td class="gutter"><pre><span class="line">1</span><br><span class="line">2</span><br><span class="line">3</span><br><span class="line">4</span><br><span class="line">5</span><br><span class="line">6</span><br><span class="line">7</span><br></pre></td><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">property</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">description</span>&gt;</span>The maximum allocation for every container request at the RM</span><br><span class="line">        in terms of virtual CPU cores. Requests higher than this will throw an</span><br><span class="line">        InvalidResourceRequestException.<span class="tag">&lt;/<span class="name">description</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">name</span>&gt;</span>yarn.scheduler.maximum-allocation-vcores<span class="tag">&lt;/<span class="name">name</span>&gt;</span></span><br><span class="line">    <span class="tag">&lt;<span class="name">value</span>&gt;</span>4<span class="tag">&lt;/<span class="name">value</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">property</span>&gt;</span></span><br></pre></td></tr></table></figure>

<h2 id="二、资源调度器使用"><a href="#二、资源调度器使用" class="headerlink" title="二、资源调度器使用"></a>二、资源调度器使用</h2><p> <a href="#Yarn-的资源调度器">大数据之Hadoop05_分布式资源调度框架 Yarn 文档中 Yarn的资源调度 章节</a></p>
</article><div class="post-copyright"><div class="post-copyright__author"><span class="post-copyright-meta">文章作者: </span><span class="post-copyright-info"><a href="mailto:undefined">Oct25-X</a></span></div><div class="post-copyright__type"><span class="post-copyright-meta">文章链接: </span><span class="post-copyright-info"><a href="https://oct25-x.atomgit.net/oct25-xxxxx/2023/06/17/07%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop06_%E7%B3%BB%E7%BB%9F%E8%B0%83%E4%BC%98%E6%89%8B%E5%86%8C/">https://oct25-x.atomgit.net/oct25-xxxxx/2023/06/17/07%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop06_%E7%B3%BB%E7%BB%9F%E8%B0%83%E4%BC%98%E6%89%8B%E5%86%8C/</a></span></div><div class="post-copyright__notice"><span class="post-copyright-meta">版权声明: </span><span class="post-copyright-info">本博客所有文章除特别声明外，均采用 <a href="https://creativecommons.org/licenses/by-nc-sa/4.0/" target="_blank">CC BY-NC-SA 4.0</a> 许可协议。转载请注明来自 <a href="https://oct25-x.atomgit.net/oct25-xxxxx" target="_blank">Oct25-X</a>！</span></div></div><div class="tag_share"><div class="post-meta__tag-list"><a class="post-meta__tags" href="/oct25-xxxxx/tags/Hadoop/">Hadoop</a></div><div class="post_share"><div class="social-share" data-image="/oct25-xxxxx/img/cover_img/ctt7.png" data-sites="facebook,twitter,wechat,weibo,qq"></div><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/social-share.js/dist/css/share.min.css" media="print" onload="this.media='all'"><script src="https://cdn.jsdelivr.net/npm/social-share.js/dist/js/social-share.min.js" defer></script></div></div><nav class="pagination-post" id="pagination"><div class="prev-post pull-left"><a href="/oct25-xxxxx/2023/06/23/08%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop07_Hadoop%20%E9%AB%98%E5%8F%AF%E7%94%A8%E9%83%A8%E7%BD%B2/"><img class="prev-cover" src="/oct25-xxxxx/img/cover_img/ctt8.png" onerror="onerror=null;src='/oct25-xxxxx/img/404.jpg'" alt="cover of previous post"><div class="pagination-info"><div class="label">上一篇</div><div class="prev_info">大数据之Hadoop07_Hadoop 高可用部署</div></div></a></div><div class="next-post pull-right"><a href="/oct25-xxxxx/2023/06/15/06%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop05_%E5%88%86%E5%B8%83%E5%BC%8F%E8%B5%84%E6%BA%90%E8%B0%83%E5%BA%A6%E6%A1%86%E6%9E%B6%20Yarn/"><img class="next-cover" src="/oct25-xxxxx/img/cover_img/ctt6.png" onerror="onerror=null;src='/oct25-xxxxx/img/404.jpg'" alt="cover of next post"><div class="pagination-info"><div class="label">下一篇</div><div class="next_info">大数据之Hadoop05_分布式资源调度框架 Yarn</div></div></a></div></nav><div class="relatedPosts"><div class="headline"><i class="fas fa-thumbs-up fa-fw"></i><span>相关推荐</span></div><div class="relatedPosts-list"><div><a href="/oct25-xxxxx/2023/05/28/02%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop01_%E5%85%A5%E9%97%A8/" title="大数据之Hadoop01_入门"><img class="cover" src="/oct25-xxxxx/img/cover_img/ctt2.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2023-05-28</div><div class="title">大数据之Hadoop01_入门</div></div></a></div><div><a href="/oct25-xxxxx/2023/05/29/03%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop02_%E9%9B%86%E7%BE%A4%E6%90%AD%E5%BB%BA/" title="大数据之Hadoop02_集群搭建"><img class="cover" src="/oct25-xxxxx/img/cover_img/ctt3.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2023-05-29</div><div class="title">大数据之Hadoop02_集群搭建</div></div></a></div><div><a href="/oct25-xxxxx/2023/06/02/04%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop03_%E5%88%86%E5%B8%83%E5%BC%8F%E6%96%87%E4%BB%B6%E7%B3%BB%E7%BB%9F%20HDFS/" title="大数据之Hadoop03_分布式文件系统 HDFS"><img class="cover" src="/oct25-xxxxx/img/cover_img/ctt4.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2023-06-02</div><div class="title">大数据之Hadoop03_分布式文件系统 HDFS</div></div></a></div><div><a href="/oct25-xxxxx/2023/06/06/05%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop04_%E5%88%86%E5%B8%83%E5%BC%8F%E8%AE%A1%E7%AE%97%E6%A1%86%E6%9E%B6%20MapReduce/" title="大数据之Hadoop04_分布式计算框架 MapReduce"><img class="cover" src="/oct25-xxxxx/img/cover_img/ctt5.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2023-06-06</div><div class="title">大数据之Hadoop04_分布式计算框架 MapReduce</div></div></a></div><div><a href="/oct25-xxxxx/2023/06/15/06%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop05_%E5%88%86%E5%B8%83%E5%BC%8F%E8%B5%84%E6%BA%90%E8%B0%83%E5%BA%A6%E6%A1%86%E6%9E%B6%20Yarn/" title="大数据之Hadoop05_分布式资源调度框架 Yarn"><img class="cover" src="/oct25-xxxxx/img/cover_img/ctt6.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2023-06-15</div><div class="title">大数据之Hadoop05_分布式资源调度框架 Yarn</div></div></a></div><div><a href="/oct25-xxxxx/2023/06/23/08%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8BHadoop07_Hadoop%20%E9%AB%98%E5%8F%AF%E7%94%A8%E9%83%A8%E7%BD%B2/" title="大数据之Hadoop07_Hadoop 高可用部署"><img class="cover" src="/oct25-xxxxx/img/cover_img/ctt8.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2023-06-23</div><div class="title">大数据之Hadoop07_Hadoop 高可用部署</div></div></a></div></div></div></div><div class="aside-content" id="aside-content"><div class="card-widget card-info"><div class="is-center"><div class="avatar-img"><img src="/oct25-xxxxx/img/avatar.jpg" onerror="this.onerror=null;this.src='/oct25-xxxxx/img/friend_404.gif'" alt="avatar"/></div><div class="author-info__name">Oct25-X</div><div class="author-info__description"></div></div><div class="card-info-data is-center"><div class="card-info-data-item"><a href="/oct25-xxxxx/archives/"><div class="headline">文章</div><div class="length-num">52</div></a></div><div class="card-info-data-item"><a href="/oct25-xxxxx/tags/"><div class="headline">标签</div><div class="length-num">17</div></a></div><div class="card-info-data-item"><a href="/oct25-xxxxx/categories/"><div class="headline">分类</div><div class="length-num">8</div></a></div></div><a id="card-info-btn" target="_blank" rel="noopener" href="https://github.com/xxxxxx"><i class="fab fa-github"></i><span>Follow Me</span></a></div><div class="sticky_layout"><div class="card-widget" id="card-toc"><div class="item-headline"><i class="fas fa-stream"></i><span>目录</span><span class="toc-percentage"></span></div><div class="toc-content"><ol class="toc"><li class="toc-item toc-level-1"><a class="toc-link" href="#HDFS-%E2%80%94%E2%80%94-%E6%A0%B8%E5%BF%83%E5%8F%82%E6%95%B0"><span class="toc-number">1.</span> <span class="toc-text">HDFS —— 核心参数</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%80%E3%80%81NameNode-%E5%86%85%E5%AD%98%E7%94%9F%E4%BA%A7%E9%85%8D%E7%BD%AE"><span class="toc-number">1.1.</span> <span class="toc-text">一、NameNode 内存生产配置</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#NameNode-%E5%86%85%E5%AD%98%E8%AE%A1%E7%AE%97"><span class="toc-number">1.1.1.</span> <span class="toc-text">NameNode 内存计算</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E9%85%8D%E7%BD%AE-NameNode-%E5%86%85%E5%AD%98"><span class="toc-number">1.1.2.</span> <span class="toc-text">配置 NameNode 内存</span></a></li></ol></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%BA%8C%E3%80%81NameNode-%E5%BF%83%E8%B7%B3%E5%B9%B6%E5%8F%91%E9%85%8D%E7%BD%AE"><span class="toc-number">1.2.</span> <span class="toc-text">二、NameNode 心跳并发配置</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#HDFS-%E2%80%94%E2%80%94-%E9%9B%86%E7%BE%A4%E5%8E%8B%E6%B5%8B"><span class="toc-number">2.</span> <span class="toc-text">HDFS —— 集群压测</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%80%E3%80%81%E6%B5%8B%E8%AF%95-HDFS-%E5%86%99%E6%80%A7%E8%83%BD"><span class="toc-number">2.1.</span> <span class="toc-text">一、测试 HDFS 写性能</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#%E6%B5%8B%E8%AF%95"><span class="toc-number">2.1.1.</span> <span class="toc-text">测试</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E6%B5%8B%E8%AF%95%E5%BC%82%E5%B8%B8"><span class="toc-number">2.1.2.</span> <span class="toc-text">测试异常</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E6%B5%8B%E8%AF%95%E7%BB%93%E6%9E%9C%E5%88%86%E6%9E%90"><span class="toc-number">2.1.3.</span> <span class="toc-text">测试结果分析</span></a></li></ol></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%BA%8C%E3%80%81%E6%B5%8B%E8%AF%95-HDFS-%E8%AF%BB%E6%80%A7%E8%83%BD"><span class="toc-number">2.2.</span> <span class="toc-text">二、测试 HDFS 读性能</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#%E6%B5%8B%E8%AF%95-1"><span class="toc-number">2.2.1.</span> <span class="toc-text">测试</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E5%88%A0%E9%99%A4%E6%B5%8B%E8%AF%95%E7%94%9F%E6%88%90%E6%95%B0%E6%8D%AE"><span class="toc-number">2.2.2.</span> <span class="toc-text">删除测试生成数据</span></a></li></ol></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#HDFS-%E2%80%94%E2%80%94-%E5%A4%9A%E7%9B%AE%E5%BD%95"><span class="toc-number">3.</span> <span class="toc-text">HDFS —— 多目录</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%80%E3%80%81NameNode-%E5%A4%9A%E7%9B%AE%E5%BD%95%E9%85%8D%E7%BD%AE"><span class="toc-number">3.1.</span> <span class="toc-text">一、NameNode 多目录配置</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#%E5%85%B7%E4%BD%93%E9%85%8D%E7%BD%AE"><span class="toc-number">3.1.1.</span> <span class="toc-text">具体配置</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E5%81%9C%E6%AD%A2%E9%9B%86%E7%BE%A4%E5%88%A0%E9%99%A4data%E5%92%8Clog"><span class="toc-number">3.1.2.</span> <span class="toc-text">停止集群删除data和log</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E9%87%8D%E6%96%B0%E6%A0%BC%E5%BC%8F%E5%8C%96%E9%9B%86%E7%BE%A4%E5%B9%B6%E5%90%AF%E5%8A%A8"><span class="toc-number">3.1.3.</span> <span class="toc-text">重新格式化集群并启动</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E6%9F%A5%E7%9C%8B"><span class="toc-number">3.1.4.</span> <span class="toc-text">查看</span></a></li></ol></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%BA%8C%E3%80%81DataNode-%E5%A4%9A%E7%9B%AE%E5%BD%95%E9%85%8D%E7%BD%AE"><span class="toc-number">3.2.</span> <span class="toc-text">二、DataNode 多目录配置</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#%E5%85%B7%E4%BD%93%E9%85%8D%E7%BD%AE-1"><span class="toc-number">3.2.1.</span> <span class="toc-text">具体配置</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E5%81%9C%E6%AD%A2%E9%9B%86%E7%BE%A4%E5%88%A0%E9%99%A4data%E5%92%8Clog-1"><span class="toc-number">3.2.2.</span> <span class="toc-text">停止集群删除data和log</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E9%87%8D%E6%96%B0%E6%A0%BC%E5%BC%8F%E5%8C%96%E9%9B%86%E7%BE%A4%E5%B9%B6%E5%90%AF%E5%8A%A8-1"><span class="toc-number">3.2.3.</span> <span class="toc-text">重新格式化集群并启动</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E6%9F%A5%E7%9C%8B%E7%BB%93%E6%9E%9C"><span class="toc-number">3.2.4.</span> <span class="toc-text">查看结果</span></a></li></ol></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%89%E3%80%81%E9%9B%86%E7%BE%A4%E6%95%B0%E6%8D%AE%E5%9D%87%E8%A1%A1%E4%B9%8B%E6%95%B0%E6%8D%AE%E5%9D%87%E8%A1%A1"><span class="toc-number">3.3.</span> <span class="toc-text">三、集群数据均衡之数据均衡</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#HDFS-%E2%80%94%E2%80%94-%E9%9B%86%E7%BE%A4%E6%89%A9%E5%AE%B9%E7%BC%A9%E5%AE%B9"><span class="toc-number">4.</span> <span class="toc-text">HDFS —— 集群扩容缩容</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%80%E3%80%81%E6%B7%BB%E5%8A%A0%E7%99%BD%E5%90%8D%E5%8D%95"><span class="toc-number">4.1.</span> <span class="toc-text">一、添加白名单</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%BA%8C%E3%80%81%E6%9C%8D%E5%BD%B9%E6%96%B0%E6%9C%8D%E5%8A%A1%E5%99%A8"><span class="toc-number">4.2.</span> <span class="toc-text">二、服役新服务器</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#%E9%9C%80%E6%B1%82"><span class="toc-number">4.2.1.</span> <span class="toc-text">需求</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E7%8E%AF%E5%A2%83%E5%87%86%E5%A4%87"><span class="toc-number">4.2.2.</span> <span class="toc-text">环境准备</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E6%9C%8D%E5%BD%B9%E6%96%B0%E8%8A%82%E7%82%B9%E5%85%B7%E4%BD%93%E6%AD%A5%E9%AA%A4"><span class="toc-number">4.2.3.</span> <span class="toc-text">服役新节点具体步骤</span></a></li></ol></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%89%E3%80%81%E6%9C%8D%E5%8A%A1%E5%99%A8%E9%97%B4%E6%95%B0%E6%8D%AE%E5%9D%87%E8%A1%A1"><span class="toc-number">4.3.</span> <span class="toc-text">三、服务器间数据均衡</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E5%9B%9B%E3%80%81%E9%BB%91%E5%90%8D%E5%8D%95%E9%80%80%E5%BD%B9%E6%9C%8D%E5%8A%A1%E5%99%A8"><span class="toc-number">4.4.</span> <span class="toc-text">四、黑名单退役服务器</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#HDFS-%E2%80%94%E2%80%94-%E5%AD%98%E5%82%A8%E4%BC%98%E5%8C%96"><span class="toc-number">5.</span> <span class="toc-text">HDFS —— 存储优化</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%80%E3%80%81%E7%BA%A0%E5%88%A0%E7%A0%81"><span class="toc-number">5.1.</span> <span class="toc-text">一、纠删码</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#%E7%BA%A0%E5%88%A0%E7%A0%81%E5%8E%9F%E7%90%86"><span class="toc-number">5.1.1.</span> <span class="toc-text">纠删码原理</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E7%BA%A0%E5%88%A0%E7%A0%81%E7%AD%96%E7%95%A5%E8%A7%A3%E9%87%8A"><span class="toc-number">5.1.2.</span> <span class="toc-text">纠删码策略解释</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E7%BA%A0%E5%88%A0%E7%A0%81%E6%A1%88%E4%BE%8B%E5%AE%9E%E6%93%8D"><span class="toc-number">5.1.3.</span> <span class="toc-text">纠删码案例实操</span></a><ol class="toc-child"><li class="toc-item toc-level-4"><a class="toc-link" href="#%E2%85%B0%E9%9C%80%E6%B1%82"><span class="toc-number">5.1.3.1.</span> <span class="toc-text">ⅰ需求</span></a></li><li class="toc-item toc-level-4"><a class="toc-link" href="#%E2%85%B1%E5%85%B7%E4%BD%93%E6%AD%A5%E9%AA%A4"><span class="toc-number">5.1.3.2.</span> <span class="toc-text">ⅱ具体步骤</span></a></li></ol></li></ol></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#HDFS-%E2%80%94%E2%80%94-%E6%95%85%E9%9A%9C%E6%8E%92%E9%99%A4"><span class="toc-number">6.</span> <span class="toc-text">HDFS —— 故障排除</span></a></li><li class="toc-item toc-level-1"><a class="toc-link" href="#HDFS-%E2%80%94%E2%80%94-%E5%B0%8F%E6%96%87%E4%BB%B6%E5%BD%92%E6%A1%A3"><span class="toc-number">7.</span> <span class="toc-text">HDFS —— 小文件归档</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%80%E3%80%81HDFS-%E5%B0%8F%E6%96%87%E4%BB%B6%E5%BC%8A%E7%AB%AF"><span class="toc-number">7.1.</span> <span class="toc-text">一、HDFS 小文件弊端</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%BA%8C%E3%80%81HDFS-%E5%B0%8F%E6%96%87%E4%BB%B6%E8%A7%A3%E5%86%B3%E6%96%B9%E6%A1%88"><span class="toc-number">7.2.</span> <span class="toc-text">二、HDFS 小文件解决方案</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#Hadoop-Archive"><span class="toc-number">7.2.1.</span> <span class="toc-text">Hadoop Archive</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#Sequence-File"><span class="toc-number">7.2.2.</span> <span class="toc-text">Sequence File</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#CombineFileInputFormat"><span class="toc-number">7.2.3.</span> <span class="toc-text">CombineFileInputFormat</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E5%BC%80%E5%90%AF-JVM-%E9%87%8D%E7%94%A8"><span class="toc-number">7.2.4.</span> <span class="toc-text">开启 JVM 重用</span></a></li></ol></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#MapReduce-%E7%94%9F%E4%BA%A7%E7%BB%8F%E9%AA%8C"><span class="toc-number">8.</span> <span class="toc-text">MapReduce 生产经验</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%80%E3%80%81MapReduce-%E8%BF%90%E8%A1%8C%E6%85%A2%E7%9A%84%E5%8E%9F%E5%9B%A0"><span class="toc-number">8.1.</span> <span class="toc-text">一、MapReduce 运行慢的原因</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%BA%8C%E3%80%81MapReduce-%E5%B8%B8%E7%94%A8%E8%B0%83%E4%BC%98%E5%8F%82%E6%95%B0"><span class="toc-number">8.2.</span> <span class="toc-text">二、MapReduce 常用调优参数</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%89%E3%80%81%E6%95%B0%E6%8D%AE%E5%80%BE%E6%96%9C%E9%97%AE%E9%A2%98"><span class="toc-number">8.3.</span> <span class="toc-text">三、数据倾斜问题</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#%E6%95%B0%E6%8D%AE%E5%80%BE%E6%96%9C%E7%8E%B0%E8%B1%A1"><span class="toc-number">8.3.1.</span> <span class="toc-text">数据倾斜现象</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#%E5%87%8F%E5%B0%91%E6%95%B0%E6%8D%AE%E5%80%BE%E6%96%9C%E7%9A%84%E6%96%B9%E6%B3%95"><span class="toc-number">8.3.2.</span> <span class="toc-text">减少数据倾斜的方法</span></a></li></ol></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#Yarn-%E7%94%9F%E4%BA%A7%E7%BB%8F%E9%AA%8C"><span class="toc-number">9.</span> <span class="toc-text">Yarn 生产经验</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%B8%80%E3%80%81%E5%B8%B8%E7%94%A8%E5%8F%82%E6%95%B0%E8%B0%83%E4%BC%98"><span class="toc-number">9.1.</span> <span class="toc-text">一、常用参数调优</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#ResourceManager-%E7%9B%B8%E5%85%B3"><span class="toc-number">9.1.1.</span> <span class="toc-text">ResourceManager 相关</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#NodeManager-%E7%9B%B8%E5%85%B3"><span class="toc-number">9.1.2.</span> <span class="toc-text">NodeManager 相关</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#Container-%E7%9B%B8%E5%85%B3"><span class="toc-number">9.1.3.</span> <span class="toc-text">Container 相关</span></a></li></ol></li><li class="toc-item toc-level-2"><a class="toc-link" href="#%E4%BA%8C%E3%80%81%E8%B5%84%E6%BA%90%E8%B0%83%E5%BA%A6%E5%99%A8%E4%BD%BF%E7%94%A8"><span class="toc-number">9.2.</span> <span class="toc-text">二、资源调度器使用</span></a></li></ol></li></ol></div></div><div class="card-widget card-recent-post"><div class="item-headline"><i class="fas fa-history"></i><span>最新文章</span></div><div class="aside-list"><div class="aside-list-item"><a class="thumbnail" href="/oct25-xxxxx/2024/02/24/52%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala09_%E9%9A%90%E5%BC%8F%E8%BD%AC%E6%8D%A2%E3%80%81%E5%BC%82%E5%B8%B8%E5%92%8C%E6%B3%9B%E5%9E%8B/" title="大数据之多范式编程语言 Scala09_隐式转换、异常和泛型"><img src="/oct25-xxxxx/img/cover_img/ctt52.png" onerror="this.onerror=null;this.src='/oct25-xxxxx/img/404.jpg'" alt="大数据之多范式编程语言 Scala09_隐式转换、异常和泛型"/></a><div class="content"><a class="title" href="/oct25-xxxxx/2024/02/24/52%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala09_%E9%9A%90%E5%BC%8F%E8%BD%AC%E6%8D%A2%E3%80%81%E5%BC%82%E5%B8%B8%E5%92%8C%E6%B3%9B%E5%9E%8B/" title="大数据之多范式编程语言 Scala09_隐式转换、异常和泛型">大数据之多范式编程语言 Scala09_隐式转换、异常和泛型</a><time datetime="2024-02-24T04:55:34.000Z" title="发表于 2024-02-24 12:55:34">2024-02-24</time></div></div><div class="aside-list-item"><a class="thumbnail" href="/oct25-xxxxx/2024/02/24/51%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala08_%E6%A8%A1%E5%BC%8F%E5%8C%B9%E9%85%8D/" title="大数据之多范式编程语言 Scala08_模式匹配"><img src="/oct25-xxxxx/img/cover_img/ctt51.png" onerror="this.onerror=null;this.src='/oct25-xxxxx/img/404.jpg'" alt="大数据之多范式编程语言 Scala08_模式匹配"/></a><div class="content"><a class="title" href="/oct25-xxxxx/2024/02/24/51%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala08_%E6%A8%A1%E5%BC%8F%E5%8C%B9%E9%85%8D/" title="大数据之多范式编程语言 Scala08_模式匹配">大数据之多范式编程语言 Scala08_模式匹配</a><time datetime="2024-02-24T00:46:12.000Z" title="发表于 2024-02-24 08:46:12">2024-02-24</time></div></div><div class="aside-list-item"><a class="thumbnail" href="/oct25-xxxxx/2024/02/22/50%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala07_%E9%9B%86%E5%90%88/" title="大数据之多范式编程语言 Scala07_集合"><img src="/oct25-xxxxx/img/cover_img/ctt50.png" onerror="this.onerror=null;this.src='/oct25-xxxxx/img/404.jpg'" alt="大数据之多范式编程语言 Scala07_集合"/></a><div class="content"><a class="title" href="/oct25-xxxxx/2024/02/22/50%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala07_%E9%9B%86%E5%90%88/" title="大数据之多范式编程语言 Scala07_集合">大数据之多范式编程语言 Scala07_集合</a><time datetime="2024-02-22T08:16:10.000Z" title="发表于 2024-02-22 16:16:10">2024-02-22</time></div></div><div class="aside-list-item"><a class="thumbnail" href="/oct25-xxxxx/2024/02/21/49%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala06_%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1/" title="大数据之多范式编程语言 Scala06_面向对象"><img src="/oct25-xxxxx/img/cover_img/ctt49.png" onerror="this.onerror=null;this.src='/oct25-xxxxx/img/404.jpg'" alt="大数据之多范式编程语言 Scala06_面向对象"/></a><div class="content"><a class="title" href="/oct25-xxxxx/2024/02/21/49%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala06_%E9%9D%A2%E5%90%91%E5%AF%B9%E8%B1%A1/" title="大数据之多范式编程语言 Scala06_面向对象">大数据之多范式编程语言 Scala06_面向对象</a><time datetime="2024-02-21T07:01:10.000Z" title="发表于 2024-02-21 15:01:10">2024-02-21</time></div></div><div class="aside-list-item"><a class="thumbnail" href="/oct25-xxxxx/2024/02/20/48%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala05_%E5%87%BD%E6%95%B0%E5%BC%8F%E7%BC%96%E7%A8%8B/" title="大数据之多范式编程语言 Scala05_函数式编程"><img src="/oct25-xxxxx/img/cover_img/ctt48.png" onerror="this.onerror=null;this.src='/oct25-xxxxx/img/404.jpg'" alt="大数据之多范式编程语言 Scala05_函数式编程"/></a><div class="content"><a class="title" href="/oct25-xxxxx/2024/02/20/48%E5%A4%A7%E6%95%B0%E6%8D%AE%E4%B9%8B%E5%A4%9A%E8%8C%83%E5%BC%8F%E7%BC%96%E7%A8%8B%E8%AF%AD%E8%A8%80Scala05_%E5%87%BD%E6%95%B0%E5%BC%8F%E7%BC%96%E7%A8%8B/" title="大数据之多范式编程语言 Scala05_函数式编程">大数据之多范式编程语言 Scala05_函数式编程</a><time datetime="2024-02-20T12:05:11.000Z" title="发表于 2024-02-20 20:05:11">2024-02-20</time></div></div></div></div></div></div></main><footer id="footer" style="background-image: url('/oct25-xxxxx/img/top_img/top_bg6.jpg')"><div id="footer-wrap"><div class="copyright">&copy;2020 - 2024 By Oct25-X</div><div class="framework-info"><span>框架 </span><a target="_blank" rel="noopener" href="https://hexo.io">Hexo</a><span class="footer-separator">|</span><span>主题 </span><a target="_blank" rel="noopener" href="https://github.com/jerryc127/hexo-theme-butterfly">Butterfly</a></div></div></footer></div><div id="rightside"><div id="rightside-config-hide"><button id="readmode" type="button" title="阅读模式"><i class="fas fa-book-open"></i></button><button id="translateLink" type="button" title="简繁转换">繁</button><button id="darkmode" type="button" title="浅色和深色模式转换"><i class="fas fa-adjust"></i></button><button id="hide-aside-btn" type="button" title="单栏和双栏切换"><i class="fas fa-arrows-alt-h"></i></button></div><div id="rightside-config-show"><button id="rightside_config" type="button" title="设置"><i class="fas fa-cog fa-spin"></i></button><button class="close" id="mobile-toc-button" type="button" title="目录"><i class="fas fa-list-ul"></i></button><button id="go-up" type="button" title="回到顶部"><i class="fas fa-arrow-up"></i></button></div></div><div id="local-search"><div class="search-dialog"><nav class="search-nav"><span class="search-dialog-title">本地搜索</span><span id="loading-status"></span><button class="search-close-button"><i class="fas fa-times"></i></button></nav><div class="is-center" id="loading-database"><i class="fas fa-spinner fa-pulse"></i><span>  数据库加载中</span></div><div class="search-wrap"><div id="local-search-input"><div class="local-search-box"><input class="local-search-box--input" placeholder="搜索文章" type="text"/></div></div><hr/><div id="local-search-results"></div></div></div><div id="search-mask"></div></div><div id="rightMenu"><div class="rightMenu-group rightMenu-small"><div class="rightMenu-item" id="menu-backward"><i class="fa-solid fa-arrow-left"></i></div><div class="rightMenu-item" id="menu-forward"><i class="fa-solid fa-arrow-right"></i></div><div class="rightMenu-item" id="menu-refresh"><i class="fa-solid fa-arrow-rotate-right"></i></div><div class="rightMenu-item" id="menu-home"><i class="fa-solid fa-house"></i></div></div><div class="rightMenu-group rightMenu-line rightMenuOther"><a class="rightMenu-item menu-link" href="/archives/"><i class="fa-solid fa-archive"></i><span>文章归档</span></a><a class="rightMenu-item menu-link" href="/categories/"><i class="fa-solid fa-folder-open"></i><span>文章分类</span></a><a class="rightMenu-item menu-link" href="/tags/"><i class="fa-solid fa-tags"></i><span>文章标签</span></a></div><div class="rightMenu-group rightMenu-line rightMenuNormal"><a class="rightMenu-item" id="menu-translate"><i class="fa-solid fa-earth-asia"></i><span>繁简切换</span></a><div class="rightMenu-item" id="menu-darkmode"><i class="fa-solid fa-moon"></i><span>切换模式</span></div></div></div><div id="rightmenu-mask"></div><div><script src="/oct25-xxxxx/js/utils.js"></script><script src="/oct25-xxxxx/js/main.js"></script><script src="/oct25-xxxxx/js/tw_cn.js"></script><script src="https://cdn.jsdelivr.net/npm/@fancyapps/ui/dist/fancybox.umd.js"></script><script src="/oct25-xxxxx/js/search/local-search.js"></script><div class="js-pjax"></div><script defer src="https://npm.elemecdn.com/jquery@latest/dist/jquery.min.js"></script><script defer data-pjax src="/js/rightMenu.js"></script><script defer="defer" id="ribbon" src="https://cdn.jsdelivr.net/npm/butterfly-extsrc@1/dist/canvas-ribbon.min.js" size="150" alpha="0.6" zIndex="-1" mobile="false" data-click="true"></script><script src="https://cdn.jsdelivr.net/npm/butterfly-extsrc@1/dist/activate-power-mode.min.js"></script><script>POWERMODE.colorful = true;
POWERMODE.shake = true;
POWERMODE.mobile = false;
document.body.addEventListener('input', POWERMODE);
</script><script id="click-heart" src="https://cdn.jsdelivr.net/npm/butterfly-extsrc@1/dist/click-heart.min.js" async="async" mobile="false"></script><script async data-pjax src="//busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js"></script></div></body></html>