<!DOCTYPE html><html lang="zh-CN" data-theme="light"><head><meta charset="UTF-8"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta name="viewport" content="width=device-width,initial-scale=1"><title>深入浅出学习MapReduce | 全栈进阶那些事</title><meta name="keywords" content="Hadoop,MapReduce"><meta name="author" content="liuyuantao"><meta name="copyright" content="liuyuantao"><meta name="format-detection" content="telephone=no"><meta name="theme-color" content="#ffffff"><meta name="description" content="本文是基于CentOS 7.3系统环境，进行MapReduce的学习和使用本文是基于CentOS 7.3系统环境，进行MapReduce的学习和使用  1. MapReduce简介1.1 MapReduce定义MapReduce是一个分布式运算程序的编程框架，是基于Hadoop的数据分析计算的核心框架 1.2 MapReduce处理过程主要分为两个阶段：Map和Reduce  Map负责把一个任">
<meta property="og:type" content="article">
<meta property="og:title" content="深入浅出学习MapReduce">
<meta property="og:url" content="https://liuyuantao.gitee.io/2021/11/12/56ac102284d9.html">
<meta property="og:site_name" content="全栈进阶那些事">
<meta property="og:description" content="本文是基于CentOS 7.3系统环境，进行MapReduce的学习和使用本文是基于CentOS 7.3系统环境，进行MapReduce的学习和使用  1. MapReduce简介1.1 MapReduce定义MapReduce是一个分布式运算程序的编程框架，是基于Hadoop的数据分析计算的核心框架 1.2 MapReduce处理过程主要分为两个阶段：Map和Reduce  Map负责把一个任">
<meta property="og:locale" content="zh_CN">
<meta property="og:image" content="https://oscimg.oschina.net/oscnet/20200617165336828.png">
<meta property="article:published_time" content="2021-11-12T13:58:21.000Z">
<meta property="article:modified_time" content="2021-11-13T01:06:19.874Z">
<meta property="article:author" content="liuyuantao">
<meta property="article:tag" content="Hadoop">
<meta property="article:tag" content="MapReduce">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="https://oscimg.oschina.net/oscnet/20200617165336828.png"><link rel="shortcut icon" href="/img/favicon.png"><link rel="canonical" href="https://liuyuantao.gitee.io/2021/11/12/56ac102284d9"><link rel="preconnect" href="//cdn.jsdelivr.net"/><link rel="preconnect" href="//hm.baidu.com"/><link rel="preconnect" href="//busuanzi.ibruce.info"/><link rel="stylesheet" href="/css/index.css"><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@fortawesome/fontawesome-free/css/all.min.css" media="print" onload="this.media='all'"><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/node-snackbar/dist/snackbar.min.css" media="print" onload="this.media='all'"><script>var _hmt = _hmt || [];
(function() {
  var hm = document.createElement("script");
  hm.src = "https://hm.baidu.com/hm.js?d43383137144f76a37f1074576bc674e";
  var s = document.getElementsByTagName("script")[0]; 
  s.parentNode.insertBefore(hm, s);
})();
</script><script>const GLOBAL_CONFIG = { 
  root: '/',
  algolia: undefined,
  localSearch: {"path":"search.xml","languages":{"hits_empty":"找不到您查询的内容：${query}"}},
  translate: {"defaultEncoding":1,"translateDelay":0,"msgToTraditionalChinese":"繁","msgToSimplifiedChinese":"简"},
  noticeOutdate: undefined,
  highlight: {"plugin":"highlighjs","highlightCopy":true,"highlightLang":true,"highlightHeightLimit":200},
  copy: {
    success: '复制成功',
    error: '复制错误',
    noSupport: '浏览器不支持'
  },
  relativeDate: {
    homepage: true,
    post: true
  },
  runtime: '天',
  date_suffix: {
    just: '刚刚',
    min: '分钟前',
    hour: '小时前',
    day: '天前',
    month: '个月前'
  },
  copyright: {"limitCount":500,"languages":{"author":"作者: liuyuantao","link":"链接: ","source":"来源: 全栈进阶那些事","info":"著作权归作者所有。商业转载请联系作者获得授权，非商业转载请注明出处。"}},
  lightbox: 'mediumZoom',
  Snackbar: {"chs_to_cht":"你已切换为繁体","cht_to_chs":"你已切换为简体","day_to_night":"你已切换为深色模式","night_to_day":"你已切换为浅色模式","bgLight":"#49b1f5","bgDark":"#2d3035","position":"top-right"},
  source: {
    jQuery: 'https://cdn.jsdelivr.net/npm/jquery@latest/dist/jquery.min.js',
    justifiedGallery: {
      js: 'https://cdn.jsdelivr.net/npm/justifiedGallery/dist/js/jquery.justifiedGallery.min.js',
      css: 'https://cdn.jsdelivr.net/npm/justifiedGallery/dist/css/justifiedGallery.min.css'
    },
    fancybox: {
      js: 'https://cdn.jsdelivr.net/npm/@fancyapps/fancybox@latest/dist/jquery.fancybox.min.js',
      css: 'https://cdn.jsdelivr.net/npm/@fancyapps/fancybox@latest/dist/jquery.fancybox.min.css'
    }
  },
  isPhotoFigcaption: true,
  islazyload: false,
  isanchor: true
}</script><script id="config-diff">var GLOBAL_CONFIG_SITE = {
  title: '深入浅出学习MapReduce',
  isPost: true,
  isHome: false,
  isHighlightShrink: false,
  isToc: true,
  postUpdate: '2021-11-13 09:06:19'
}</script><noscript><style type="text/css">
  #nav {
    opacity: 1
  }
  .justified-gallery img {
    opacity: 1
  }

  #recent-posts time,
  #post-meta time {
    display: inline !important
  }
</style></noscript><script>(win=>{
    win.saveToLocal = {
      set: function setWithExpiry(key, value, ttl) {
        if (ttl === 0) return
        const now = new Date()
        const expiryDay = ttl * 86400000
        const item = {
          value: value,
          expiry: now.getTime() + expiryDay,
        }
        localStorage.setItem(key, JSON.stringify(item))
      },

      get: function getWithExpiry(key) {
        const itemStr = localStorage.getItem(key)

        if (!itemStr) {
          return undefined
        }
        const item = JSON.parse(itemStr)
        const now = new Date()

        if (now.getTime() > item.expiry) {
          localStorage.removeItem(key)
          return undefined
        }
        return item.value
      }
    }
  
    win.getScript = url => new Promise((resolve, reject) => {
      const script = document.createElement('script')
      script.src = url
      script.async = true
      script.onerror = reject
      script.onload = script.onreadystatechange = function() {
        const loadState = this.readyState
        if (loadState && loadState !== 'loaded' && loadState !== 'complete') return
        script.onload = script.onreadystatechange = null
        resolve()
      }
      document.head.appendChild(script)
    })
  
      win.activateDarkMode = function () {
        document.documentElement.setAttribute('data-theme', 'dark')
        if (document.querySelector('meta[name="theme-color"]') !== null) {
          document.querySelector('meta[name="theme-color"]').setAttribute('content', '#0d0d0d')
        }
      }
      win.activateLightMode = function () {
        document.documentElement.setAttribute('data-theme', 'light')
        if (document.querySelector('meta[name="theme-color"]') !== null) {
          document.querySelector('meta[name="theme-color"]').setAttribute('content', '#ffffff')
        }
      }
      const t = saveToLocal.get('theme')
    
          if (t === 'dark') activateDarkMode()
          else if (t === 'light') activateLightMode()
        
      const asideStatus = saveToLocal.get('aside-status')
      if (asideStatus !== undefined) {
        if (asideStatus === 'hide') {
          document.documentElement.classList.add('hide-aside')
        } else {
          document.documentElement.classList.remove('hide-aside')
        }
      }
    
    const fontSizeVal = saveToLocal.get('global-font-size')
    if (fontSizeVal !== undefined) {
      document.documentElement.style.setProperty('--global-font-size', fontSizeVal + 'px')
    }
    
    const detectApple = () => {
      if (GLOBAL_CONFIG_SITE.isHome && /iPad|iPhone|iPod|Macintosh/.test(navigator.userAgent)){
        document.documentElement.classList.add('apple')
      }
    }
    detectApple()
    })(window)</script><meta name="generator" content="Hexo 5.4.0"><link rel="alternate" href="/atom.xml" title="全栈进阶那些事" type="application/atom+xml">
</head><body><div id="loading-box"><div class="loading-left-bg"></div><div class="loading-right-bg"></div><div class="spinner-box"><div class="configure-border-1"><div class="configure-core"></div></div><div class="configure-border-2"><div class="configure-core"></div></div><div class="loading-word">加载中...</div></div></div><div id="sidebar"><div id="menu-mask"></div><div id="sidebar-menus"><div class="avatar-img is-center"><img src="/img/zhiyao_avatar.jpg" onerror="onerror=null;src='/img/friend_404.gif'" alt="avatar"/></div><div class="site-data"><div class="data-item is-center"><div class="data-item-link"><a href="/archives/"><div class="headline">文章</div><div class="length-num">41</div></a></div></div><div class="data-item is-center"><div class="data-item-link"><a href="/tags/"><div class="headline">标签</div><div class="length-num">14</div></a></div></div><div class="data-item is-center"><div class="data-item-link"><a href="/categories/"><div class="headline">分类</div><div class="length-num">4</div></a></div></div></div><hr/><div class="menus_items"><div class="menus_item"><a class="site-page" href="/"><i class="fa-fw fas fa-home"></i><span> 首页</span></a></div><div class="menus_item"><a class="site-page" href="/archives/"><i class="fa-fw fas fa-archive"></i><span> 时间轴</span></a></div><div class="menus_item"><a class="site-page" href="/tags/"><i class="fa-fw fas fa-tags"></i><span> 标签</span></a></div><div class="menus_item"><a class="site-page" href="/categories/"><i class="fa-fw fas fa-folder-open"></i><span> 分类</span></a></div><div class="menus_item"><a class="site-page" href="/link/"><i class="fa-fw fas fa-link"></i><span> 友链</span></a></div><div class="menus_item"><a class="site-page" href="/about/"><i class="fa-fw fas fa-heart"></i><span> 关于</span></a></div></div></div></div><div class="post" id="body-wrap"><header class="not-top-img" id="page-header"><nav id="nav"><span id="blog_name"><a id="site-name" href="/">全栈进阶那些事</a></span><div id="menus"><div id="search-button"><a class="site-page social-icon search"><i class="fas fa-search fa-fw"></i><span> 搜索</span></a></div><div class="menus_items"><div class="menus_item"><a class="site-page" href="/"><i class="fa-fw fas fa-home"></i><span> 首页</span></a></div><div class="menus_item"><a class="site-page" href="/archives/"><i class="fa-fw fas fa-archive"></i><span> 时间轴</span></a></div><div class="menus_item"><a class="site-page" href="/tags/"><i class="fa-fw fas fa-tags"></i><span> 标签</span></a></div><div class="menus_item"><a class="site-page" href="/categories/"><i class="fa-fw fas fa-folder-open"></i><span> 分类</span></a></div><div class="menus_item"><a class="site-page" href="/link/"><i class="fa-fw fas fa-link"></i><span> 友链</span></a></div><div class="menus_item"><a class="site-page" href="/about/"><i class="fa-fw fas fa-heart"></i><span> 关于</span></a></div></div><div id="toggle-menu"><a class="site-page"><i class="fas fa-bars fa-fw"></i></a></div></div></nav></header><main class="layout" id="content-inner"><div id="post"><div id="post-info"><h1 class="post-title">深入浅出学习MapReduce</h1><div id="post-meta"><div class="meta-firstline"><span class="post-meta-date"><i class="fa-fw post-meta-icon far fa-calendar-alt"></i><span class="post-meta-label">发表于</span><time datetime="2021-11-12T13:58:21.000Z" title="undefined 2021-11-12 21:58:21">2021-11-12</time></span><span class="post-meta-categories"><span class="post-meta-separator">|</span><i class="fas fa-inbox fa-fw post-meta-icon"></i><a class="post-meta-categories" href="/categories/%E5%A4%A7%E6%95%B0%E6%8D%AE/">大数据</a></span></div><div class="meta-secondline"><span class="post-meta-separator">|</span><span class="post-meta-wordcount"><i class="far fa-file-word fa-fw post-meta-icon"></i><span class="post-meta-label">字数总计:</span><span class="word-count">5.2k</span><span class="post-meta-separator">|</span><i class="far fa-clock fa-fw post-meta-icon"></i><span class="post-meta-label">阅读时长:</span><span>17分钟</span></span><span class="post-meta-separator">|</span><span class="post-meta-pv-cv" id="" data-flag-title="深入浅出学习MapReduce"><i class="far fa-eye fa-fw post-meta-icon"></i><span class="post-meta-label">阅读量:</span><span id="busuanzi_value_page_pv"></span></span></div></div></div><article class="post-content" id="article-container"><blockquote>
<p>本文是基于CentOS 7.3系统环境，进行MapReduce的学习和使用本文是基于CentOS 7.3系统环境，进行MapReduce的学习和使用</p>
</blockquote>
<h1 id="1-MapReduce简介"><a href="#1-MapReduce简介" class="headerlink" title="1. MapReduce简介"></a>1. MapReduce简介</h1><h2 id="1-1-MapReduce定义"><a href="#1-1-MapReduce定义" class="headerlink" title="1.1 MapReduce定义"></a>1.1 MapReduce定义</h2><p>MapReduce是一个分布式运算程序的编程框架，是基于Hadoop的数据分析计算的核心框架</p>
<h2 id="1-2-MapReduce处理过程"><a href="#1-2-MapReduce处理过程" class="headerlink" title="1.2 MapReduce处理过程"></a>1.2 MapReduce处理过程</h2><p>主要分为两个阶段：Map和Reduce</p>
<ul>
<li>Map负责把一个任务分解成多个任务</li>
<li>Reduce负责把分解后多任务处理的结果进行汇总</li>
</ul>
<h2 id="1-3-MapReduce的优点"><a href="#1-3-MapReduce的优点" class="headerlink" title="1.3 MapReduce的优点"></a>1.3 MapReduce的优点</h2><ol>
<li><strong>MapReduce易于编程</strong><br>只需要实现一些简单接口，就可以完成一个分布式程序，这个分布式程序可以分布到大量廉价的PC机器上运行。也就是说你写一个分布式程序，就跟写一个简单的串行程序是一模一样的。 </li>
<li><strong>良好的扩展性（hadoop的特点）</strong><br>当你的计算资源不能满足的时候，你可以通过简单的增加机器（nodemanager）来扩展它的计算能力 </li>
<li><strong>高容错性</strong><br>MapReduce设计的初衷就是使程序能够部署在廉价的PC机器上，这就要求它具有很高的容错性，比如其中一台机器挂了，它可以把上面的计算任务转移到另外一个节点上运行，不至于整个任务运行失败。 </li>
<li><strong>适合PB级以上海量数据的离线处理</strong><br>可以实现上千台服务器集群并发工作，提供数据处理能力</li>
</ol>
<h2 id="1-4-MapReduce的缺点"><a href="#1-4-MapReduce的缺点" class="headerlink" title="1.4 MapReduce的缺点"></a>1.4 MapReduce的缺点</h2><ol>
<li><strong>不擅长实时计算</strong><br>MapReduce无法像MySQL一样，在毫秒或者秒级内返回结果 </li>
<li><strong>不擅长流式计算</strong><br>流式计算的输入数据是动态的，而MapReduce的输入数据集是静态的，不能动态变化。这是因为MapReduce自身的设计特点决定了数据源必须是静态的 </li>
<li><strong>不擅长DAG有向图计算</strong><br>多个应用程序之间存在依赖关系，后一个应用程序的输入为前一个程序的输出。在这种情况下，每个MapReduce作业的输出结果都会写入到磁盘，会造成大量的磁盘IO，导致性能非常低下</li>
</ol>
<h2 id="1-5-MapReduce核心编程思想"><a href="#1-5-MapReduce核心编程思想" class="headerlink" title="1.5 MapReduce核心编程思想"></a>1.5 MapReduce核心编程思想</h2><p><img src="https://img-blog.csdnimg.cn/20200523112141354.png"></p>
<p>分布式的运算程序往往需要分成至少2个阶段。<br>第一个阶段的MapTask并发实例，完全并行运行，互不相干。<br>第二个阶段的ReduceTask并发实例互不相干，但是他们的数据依赖于上一个阶段的所有MapTask并发实例的输出。<br>MapReduce编程模型只能包含一个Map阶段和一个Reduce阶段，如果用户的业务逻辑非常复杂，那就只能多个MapReduce程序，串行运行。</p>
<h2 id="1-6-MapReduce进程"><a href="#1-6-MapReduce进程" class="headerlink" title="1.6 MapReduce进程"></a>1.6 MapReduce进程</h2><ol>
<li><strong>MrAppMaster</strong><br>负责整个程序的过程调度及状态协调 </li>
<li><strong>MapTask</strong><br>负责Map阶段的整个数据处理流程 </li>
<li><strong>ReduceTask</strong><br>负责Reduce阶段的整个数据处理流程</li>
</ol>
<h2 id="1-7-数据切片与MapTask并行度机制"><a href="#1-7-数据切片与MapTask并行度机制" class="headerlink" title="1.7 数据切片与MapTask并行度机制"></a>1.7 数据切片与MapTask并行度机制</h2><p><img src="https://img-blog.csdnimg.cn/20200526152737847.png"></p>
<h2 id="1-8-FileInputFormat切片机制"><a href="#1-8-FileInputFormat切片机制" class="headerlink" title="1.8 FileInputFormat切片机制"></a>1.8 FileInputFormat切片机制</h2><ol>
<li><strong>切片机制</strong></li>
</ol>
<p>简单地安装文件的内容长度进行切片<br>切片大小，默认等于Block大小<br>切片时不考虑数据集整体，而是逐个针对每一个文件单独切片</p>
<ol start="3">
<li><strong>案例分析</strong></li>
</ol>
<p>输入数据有两个文件： </p>
<figure class="highlight plaintext"><table><tr><td class="code"><pre><span class="line">file1.txt 320M </span><br><span class="line">file2.txt 10M </span><br></pre></td></tr></table></figure>
<p>经过FileInputFormat的切片机制运算后，形成的切片信息如下： </p>
<figure class="highlight plaintext"><table><tr><td class="code"><pre><span class="line">file1.txt.split1 ---- 0~128M </span><br><span class="line">file1.txt.split2 ---- 128M~256M </span><br><span class="line">file1.txt.split1 ---- 256M~320M </span><br><span class="line">file2.txt.split1 ---- 0~10M</span><br></pre></td></tr></table></figure>
<h2 id="1-9-CombineTextInputFormat切片机制"><a href="#1-9-CombineTextInputFormat切片机制" class="headerlink" title="1.9 CombineTextInputFormat切片机制"></a><strong>1.9 CombineTextInputFormat切片机制</strong></h2><p>框架默认的TextInputFormat切片机制是对任务按文件规划切片，不管文件多小，都会是一个单独的切片，都会交给一个MapTask，这样如果有大量小文件，就会产生大量的MapTask，而创建MapTask的开销比较大，处理效率极其低下。</p>
<ol>
<li><strong>应用场景</strong><br>CombineTextInputFormat用于小文件过多的场景，它可以将多个小文件从逻辑上规划到一个切片中，这样，多个小文件就可以交给一个MapTask处理。 </li>
<li><strong>虚拟存储切片最大值设置</strong><br>CombineTextInputFormat.setMaxInputSplitSize(job, 4194304);// 4m </li>
<li><strong>切片机制</strong><br>生成切片过程包括：虚拟存储过程和切片过程两部分。</li>
</ol>
<p><strong>虚拟存储过程：</strong><br>将输入目录下所有文件大小，依次和设置的setMaxInputSplitSize值比较，如果不大于设置的最大值，逻辑上划分一个块。如果输入文件大于设置的最大值且大于两倍，那么以最大值切割一块；当剩余数据大小超过设置的最大值且不大于最大值2倍，此时将文件均分成2个虚拟存储块（防止出现太小切片）。<br><strong>切片过程：</strong> </p>
<ul>
<li>（a）判断虚拟存储的文件大小是否大于setMaxInputSplitSize值，大于等于则单独形成一个切片。 </li>
<li>（b）如果不大于则跟下一个虚拟存储文件进行合并，共同形成一个切片。</li>
</ul>
<p><strong>4. 案例分析</strong><br><img src="https://img-blog.csdnimg.cn/2020052617342823.png"></p>
<p>例如setMaxInputSplitSize值为4M，输入文件大小为8.02M，则先逻辑上分成一个4M。剩余的大小为4.02M，如果按照4M逻辑划分，就会出现0.02M的小的虚拟存储文件，所以将剩余的4.02M文件切分成（2.01M和2.01M）两个文件。<br>有4个小文件大小分别为1.7M、5.1M、3.4M以及6.8M这四个小文件，则虚拟存储之后形成6个文件块，大小分别为：<br><code>1.7M</code>，<code>（2.55M、2.55M）</code>，<code>3.4M</code>以及<code>（3.4M、3.4M） </code><br>最终会形成3个切片，大小分别为：<br><code>（1.7+2.55）M</code>，<code>（2.55+3.4）M</code>，<code>（3.4+3.4）M</code></p>
<h2 id="1-10-FileInputFormat实现类"><a href="#1-10-FileInputFormat实现类" class="headerlink" title="1.10 FileInputFormat实现类"></a>1.10 FileInputFormat实现类</h2><table>
<thead>
<tr>
<th>InputFormat</th>
<th>切片规则(getSplits)</th>
<th>把切片分解成KV(createRecordReader)</th>
</tr>
</thead>
<tbody><tr>
<td>FileInputFormat</td>
<td>按文件-&gt;块大小</td>
<td>没有实现</td>
</tr>
<tr>
<td>TextInputFormat</td>
<td>继承FileInputFormat</td>
<td>LineRecordReader&lt;偏移量，行数据&gt;</td>
</tr>
<tr>
<td>CombineTextInputFormat</td>
<td>重写了getSplit，小块合并切</td>
<td>CombineFileRecordReader（和LineRecordReader处理一样，只不过跨文件了）&lt;偏移量，行数据&gt;</td>
</tr>
<tr>
<td>KeyValueTextInputFormat</td>
<td>继承FileInputFormat</td>
<td>KeyValueLineRecordReader&lt;分隔符前，分隔符后&gt;</td>
</tr>
<tr>
<td>NLineInputFormat</td>
<td>重写了getSplit，按行切</td>
<td>LineRecordReader&lt;偏移量，行数据&gt;</td>
</tr>
<tr>
<td>自定义</td>
<td>继承FileInputFormat</td>
<td>自定义RecordReader</td>
</tr>
</tbody></table>
<h2 id="1-11-Shuffle机制"><a href="#1-11-Shuffle机制" class="headerlink" title="1.11 Shuffle机制"></a>1.11 Shuffle机制</h2><p><img src="https://img-blog.csdnimg.cn/2020052717205493.png"></p>
<p><img src="https://img-blog.csdnimg.cn/20200527172102742.png"> </p>
<h2 id="1-12-分区"><a href="#1-12-分区" class="headerlink" title="1.12 分区"></a>1.12 分区</h2><p>分区数量等于ReduceTask的进程数</p>
<p><strong>1. 分区机制</strong></p>
<ol>
<li>如果ReduceTask的数量&gt;getPartition的结果数，则会多产生几个空的输出文件part-r-000xxx；</li>
<li>如果1&lt;ReduceTask的数量&lt;getPartition的结果数，则有一部分分区数据无处安放，会报Exception；</li>
<li>如果ReduceTask的数量=1，则不管MapTask端输出多少个分区文件，最终结果都交给这一个ReduceTask，最终也就只会产生一个结果文件part-r-00000；</li>
<li>分区号必须从零开始，逐一累加。</li>
</ol>
<p><strong>2. 案例分析</strong><br>例如：假设自定义分区数为5，</p>
<ol>
<li>job.setNumReduceTasks(6)；大于5，程序会正常运行，则会多产生几个空的输出文件</li>
<li>job.setNumReduceTasks(2)；会报Exception；</li>
<li>job.setNumReduceTasks(1)，会正常运行，最终也就只会产生一个输出文件；</li>
</ol>
<h2 id="1-13-排序"><a href="#1-13-排序" class="headerlink" title="1.13 排序"></a>1.13 排序</h2><p><strong>1. 排序概述</strong></p>
<ol>
<li>排序是MapReduce框架中最重要的操作之一</li>
<li>MapTask和ReduceTask均会对数据按照key进行排序。该操作数据Hadoop的默认行为。任何应用程序中的数据均会被排序，而不是逻辑上是否需要。</li>
<li>默认排序是按照字典顺序排序，且实现该排序的方法是快速排序。</li>
</ol>
<p><strong>2. 排序分类</strong></p>
<ol>
<li>部分排序：MapReduce根据输入记录的键对数据集排序，保障输出的每个文件内部有序。</li>
<li>全排序：最终输出结果只有一个文件，且文件内部有序。</li>
<li>辅助排序（GroupingComparator）：在Reduce端对key进行分组</li>
<li>二次排序：在自定义排序过程中，如果compareTo中的判断条件为两个即为二次排序</li>
</ol>
<h2 id="1-14-Combiner合并"><a href="#1-14-Combiner合并" class="headerlink" title="1.14 Combiner合并"></a>1.14 Combiner合并</h2><ol>
<li>Combiner是MR程序中Mapper和Reducer之外的一种组件</li>
<li>Combiner组件的父类就是Reducer</li>
<li>Combiner和Reducer的区别在于运行的位置不同（Combiner是在每一个MapTask所在的节点运行；Reducer是接收全局所有Mapper的输出结果）</li>
<li>Combiner的意义就是对每一个MapTask的输出进行局部汇总，以减少网络传输量</li>
<li>Combiner能够应用的前提是不能影响最终的业务逻辑，而且Combiner的输出kv应用跟Reducer的输入kv类型要对应起来</li>
</ol>
<p><img src="https://img-blog.csdnimg.cn/2020052720084062.png"></p>
<h2 id="1-15-MapTask工作机制"><a href="#1-15-MapTask工作机制" class="headerlink" title="1.15 MapTask工作机制"></a>1.15 MapTask工作机制</h2><p><img src="https://img-blog.csdnimg.cn/20200527233708643.png"></p>
<ol>
<li><p>Read阶段：MapTask通过用户编写的RecordReader，从输入InputSplit中解析出一个个key/value。</p>
</li>
<li><p>Map阶段：该节点主要是将解析出的key/value交给用户编写map()函数处理，并产生一系列新的key/value。</p>
</li>
<li><p>Collect收集阶段：在用户编写map()函数中，当数据处理完成后，一般会调用OutputCollector.collect()输出结果。在该函数内部，它会将生成的key/value分区（调用Partitioner），并写入一个环形内存缓冲区中。</p>
</li>
<li><p>Spill阶段：即“溢写”，当环形缓冲区满后，MapReduce会将数据写到本地磁盘上，生成一个临时文件。需要注意的是，将数据写入本地磁盘之前，先要对数据进行一次本地排序，并在必要时对数据进行合并、压缩等操作。</p>
<p> <strong>溢写阶段详情</strong><br> <strong>步骤1：</strong>利用快速排序算法对缓存区内的数据进行排序，排序方式是，先按照分区编号Partition进行排序，然后按照key进行排序。这样，经过排序后，数据以分区为单位聚集在一起，且同一分区内所有数据按照key有序。<br> <strong>步骤2：</strong>按照分区编号由小到大依次将每个分区中的数据写入任务工作目录下的临时文件output/spillN.out（N表示当前溢写次数）中。如果用户设置了Combiner，则写入文件之前，对每个分区中的数据进行一次聚集操作。<br> <strong>步骤3：</strong>将分区数据的元信息写到内存索引数据结构SpillRecord中，其中每个分区的元信息包括在临时文件中的偏移量、压缩前数据大小和压缩后数据大小。如果当前内存索引大小超过1MB，则将内存索引写到文件output/spillN.out.index中。</p>
</li>
<li><p>Combine阶段：当所有数据处理完成后，MapTask对所有临时文件进行一次合并，以确保最终只会生成一个数据文件。</p>
</li>
</ol>
<p>当所有数据处理完后，MapTask会将所有临时文件合并成一个大文件，并保存到文件output/file.out中，同时生成相应的索引文件output/file.out.index。</p>
<p>在进行文件合并过程中，MapTask以分区为单位进行合并。对于某个分区，它将采用多轮递归合并的方式。每轮合并io.sort.factor（默认10）个文件，并将产生的文件重新加入待合并列表中，对文件排序后，重复以上过程，直到最终得到一个大文件。</p>
<p>让每个MapTask最终只生成一个数据文件，可避免同时打开大量文件和同时读取大量小文件产生的随机读取带来的开销。</p>
<h2 id="1-16-ReduceTask工作机制"><a href="#1-16-ReduceTask工作机制" class="headerlink" title="1.16 ReduceTask工作机制"></a>1.16 ReduceTask工作机制</h2><p><img src="https://img-blog.csdnimg.cn/20200528083022658.png"></p>
<ol>
<li>Copy阶段：ReduceTask从各个MapTask上远程拷贝一片数据，并针对某一片数据，如果其大小超过一定阈值，则写到磁盘上，否则直接放到内存中。</li>
<li>Merge阶段：在远程拷贝数据的同时，ReduceTask启动了两个后台线程对内存和磁盘上的文件进行合并，以防止内存使用过多或磁盘上文件过多。</li>
<li>Sort阶段：按照MapReduce语义，用户编写reduce()函数输入数据是按key进行聚集的一组数据。为了将key相同的数据聚在一起，Hadoop采用了基于排序的策略。由于各个MapTask已经实现对自己的处理结果进行了局部排序，因此，ReduceTask只需对所有数据进行一次归并排序即可。</li>
<li>Reduce阶段：reduce()函数将计算结果写到HDFS上。</li>
</ol>
<h2 id="1-17-ReduceTask并行度"><a href="#1-17-ReduceTask并行度" class="headerlink" title="1.17 ReduceTask并行度"></a>1.17 ReduceTask并行度</h2><p>ReduceTask的并行度影响整个job的执行并发度和执行效率，但与MapTask的并发数有切片数决定不同，ReduceTask数量的决定是可以直接手动设置的</p>
<ol>
<li>ReduceTask=0，表示没有Reduce阶段，输出文件个数和Map个数一致，在实际开发中，如果可以不用Reduce，可以将值设置为0，因为在整个MR阶段，比较耗时的shuffle，省掉了Reduce，就相当于省掉了shuffle；</li>
<li>ReduceTask默认值就是1，所以输出文件个数为1；</li>
<li>如果数据分布不均匀，就有可能在Reduce阶段产生数据倾斜；</li>
<li>ReduceTask数量并不是任意设置，还要考虑业务逻辑需求，有些情况下，需要计算全局汇总结果，就只能有1个ReduceTask；</li>
<li>具体多少个ReduceTask，需要根据集群性能而定；</li>
<li>如果分区数不是1，但是ReduceTask为1，是否执行分区过程。答案是：不执行分区过程，因为在MapTask的源码中，执行分区的前提就是先判断ReduceNum个数是否大于1，不大于1肯定不执行。</li>
</ol>
<h2 id="1-18-Reduce-Join工作原理"><a href="#1-18-Reduce-Join工作原理" class="headerlink" title="1.18 Reduce Join工作原理"></a>1.18 Reduce Join工作原理</h2><p><strong>Map端的主要工作：</strong>为来自不同表或文件的key/value对，打标签以区别不同来源的记录。然后用连接字段作为key，其余部分和新加的标志作为value，最后进行输出。<br><strong>Reduce端的主要工作：</strong>在Reduce端以连接字段作为key的分组已经完成，我们只需要在每一个分组当中将那些来源于不同文件的记录（在Map阶段已经打标志）分开，最后进行合并就OK了。<br><strong>缺点：</strong>这种方式，合并的操作是在Reduce阶段完成，Reduce端的处理压力太大，Map节点的运算负载则很低，资源利用率不高，且在Reduce阶段极易产生数据倾斜<br><strong>解决方案：</strong>Map端实现数据合并</p>
<h2 id="1-19-Map-Join工作原理"><a href="#1-19-Map-Join工作原理" class="headerlink" title="1.19 Map Join工作原理"></a>1.19 Map Join工作原理</h2><p><strong>适用场景：</strong>Map Join试验于一张表十分大，一张表十分小的场景<br><strong>优点：</strong>在Map端缓存多张表，提前出来业务逻辑，这样增加Map端业务，减少Reduce端数据的压力，尽可能的减少数据倾斜</p>
<h2 id="1-20-计数器应用"><a href="#1-20-计数器应用" class="headerlink" title="1.20 计数器应用"></a>1.20 计数器应用</h2><p><strong>定义：</strong>Hadoop为每个作业维护若干内置计数器，以描述多项指标。例如，某些计数器记录已处理的字节数和记录数，使用户可监控已处理的输入数据量和已产生的输出数据量</p>
<p><strong>计数器API：</strong></p>
<p>采用枚举的方式统计计数<br>采用计数器组、计数器名称的方式统计</p>
<h2 id="1-21-数据压缩"><a href="#1-21-数据压缩" class="headerlink" title="1.21 数据压缩"></a>1.21 数据压缩</h2><p><strong>定义：</strong>压缩技术能够有效减少底层存储系统（HDFS）读写字节数。压缩提高了网络带宽和磁盘空间的效率。在运行MR程序时，I/O操作、网络数据传输、shuffle和Merge要花大量的时间，尤其是数据规模很大和工作负载密集的情况下。因此，使用数据压缩显得非常重要。</p>
<p><strong>优点：</strong>鉴于磁盘I/O和网络带宽是Hadoop的宝贵资源，数据压缩对于节省资源、最小化磁盘I/O和网络传输非常有帮助。可以在任意MapReduce阶段启用压缩。</p>
<p><strong>缺点：</strong>不过，尽管压缩与解压操作的CPU开销不高，其性能的提升和资源的节省并非没有代价</p>
<p><strong>压缩策略：</strong>压缩是提供Hadoop运行效率的一种优化策略。<br>通过对mapper、reducer运行过程的数据进行压缩，以减少磁盘I/O，提高MR程序运行速度。</p>
<p><strong>压缩原则：</strong></p>
<ul>
<li>运算密集型的job，少用压缩</li>
<li>I/O密集型的job，多用压缩</li>
</ul>
<h2 id="1-22-MR支持的压缩编码"><a href="#1-22-MR支持的压缩编码" class="headerlink" title="1.22 MR支持的压缩编码"></a>1.22 MR支持的压缩编码</h2><table>
<thead>
<tr>
<th>压缩格式</th>
<th>hadoop自带</th>
<th>算法文件扩展名</th>
<th>是否可切分</th>
<th>压缩后，原来的程序是否需要修改</th>
</tr>
</thead>
<tbody><tr>
<td>DEFLATE</td>
<td>是，直接使用</td>
<td>DEFLATE.deflate</td>
<td>否</td>
<td>和文本处理一样，不需要修改</td>
</tr>
<tr>
<td>gzip</td>
<td>是，直接使用</td>
<td>DEFLATE.gz</td>
<td>否</td>
<td>和文本处理一样，不需要修改</td>
</tr>
<tr>
<td>bzip2</td>
<td>是，直接使用</td>
<td>bzip2.bz</td>
<td>是</td>
<td>和文本处理一样，不需要修改</td>
</tr>
<tr>
<td>LZO</td>
<td>否，需要安装</td>
<td>LZO.lzo</td>
<td>是</td>
<td>需要建索引，还需要指定输入格式</td>
</tr>
<tr>
<td>Snappy</td>
<td>否，需要安装</td>
<td>Snappy.snappy</td>
<td>否</td>
<td>和文本处理一样，不需要修改</td>
</tr>
</tbody></table>
<table>
<thead>
<tr>
<th>压缩算法</th>
<th>原始文件大小</th>
<th>压缩文件大小</th>
<th>压缩速度</th>
<th>解压速度</th>
</tr>
</thead>
<tbody><tr>
<td>gzip</td>
<td>8.3G</td>
<td>1.8G</td>
<td>17.5MB/s</td>
<td>58MB/s</td>
</tr>
<tr>
<td>bzip</td>
<td>28.3G</td>
<td>1.1G</td>
<td>2.4MB/s</td>
<td>9.5MB/s</td>
</tr>
<tr>
<td>gzip</td>
<td>8.3G</td>
<td>2.9G</td>
<td>49.3MB/s</td>
<td>74.6MB/s</td>
</tr>
<tr>
<td>snappy</td>
<td>8.3G</td>
<td>*</td>
<td>250MB/s</td>
<td>500MB/s</td>
</tr>
</tbody></table>
<h2 id="1-23-压缩方式选择"><a href="#1-23-压缩方式选择" class="headerlink" title="1.23 压缩方式选择"></a>1.23 压缩方式选择</h2><ul>
<li><strong>Gzip压缩</strong></li>
</ul>
<p><strong>优点：</strong>压缩率比较高，而且压缩/解压速度也比较快；Hadoop本身支持，在应用中处理gzip格式的文件就和直接处理文本一样；大部分Linux系统都自带gzip命令，使用方便<br><strong>缺点：</strong>不支持split<br><strong>应用场景：</strong>当每个文件压缩之后在130M以内的（1个块大小内），都可以考虑gzip压缩格式 </p>
<ul>
<li><strong>Bzip2压缩</strong></li>
</ul>
<p><strong>优点：</strong>支持split，具有很高压缩率，比Gzip压缩率高；Hadoop本身支持，使用方便<br><strong>缺点：</strong>压缩/解压速度比较慢<br><strong>应用场景：</strong>适合对速度要求不高，但需要较高的压缩率的时候；或者输出之后的数据比较大，处理之后的数据需要压缩存档减少磁盘空间并且以后数据用得比较少的情况；或者对单个很大的文本文件想压缩减少存储空间，同时又需要支持split，而且兼容之前的应用程序的情况 </p>
<ul>
<li><strong>Lzo压缩</strong></li>
</ul>
<p><strong>优点：</strong>压缩/解压速度也比较快，合理的压缩率；支持split，是Hadoop中最流行的压缩格式；可以在Linux系统下安装lzop命令，使用方便<br><strong>缺点：</strong>压缩率比Gzip要低一些；Hadoop本身不支持，需要安装；在应用中对Lzo格式的文件需要做一些特殊处理（为了支持split要建索引，还需要指定InputFormat为Lzo格式）<br><strong>应用场景：</strong>一个很大的文本文件，压缩之后还大于200M以上的可以考虑，而且单个文件越大，Lzo优点越明显 </p>
<ul>
<li><strong>snappy压缩</strong></li>
</ul>
<p><strong>优点：</strong>高速压缩/解压速度，合理的压缩率；<br><strong>缺点：</strong>不支持split；压缩率比Gzip要低；Hadoop本身不支持，需要安装<br><strong>应用场景：</strong>当MapReduce作业的map输出的数据比较大的时候，作为map到reduce的中间数据的压缩格式；或者作为一个MapReduce作业的输出和另一个MapReduce作业的输入。</p>
<h1 id="2-MapReduce编程规范"><a href="#2-MapReduce编程规范" class="headerlink" title="2. MapReduce编程规范"></a>2. MapReduce编程规范</h1><h2 id="2-1-编写Mapper类"><a href="#2-1-编写Mapper类" class="headerlink" title="2.1 编写Mapper类"></a>2.1 编写Mapper类</h2><ol>
<li>继承org.apache.hadoop.mapreduce.Mapper类</li>
<li>设置mapper类的输入类型&lt;LongWritable, Text&gt;</li>
<li>设置mapper类的输出类型&lt;Text, IntWritable&gt;</li>
<li>将输入类型中的Text转换成String类型，并按照指定分隔符进行分割</li>
<li>通过context.write()方法进行输出</li>
</ol>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">package</span>  com.lytdev.dw.mapr;</span><br><span class="line"></span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.io.IntWritable;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.io.LongWritable;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.io.Text;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.mapreduce.Mapper;</span><br><span class="line"></span><br><span class="line"><span class="keyword">import</span> java.io.IOException;</span><br><span class="line"></span><br><span class="line"><span class="keyword">public</span> <span class="class"><span class="keyword">class</span> <span class="title">WordCountMapper</span> <span class="keyword">extends</span> <span class="title">Mapper</span>&lt;<span class="title">LongWritable</span>, <span class="title">Text</span>, <span class="title">Text</span>, <span class="title">IntWritable</span>&gt; </span>&#123;</span><br><span class="line"><span class="keyword">private</span> Text k = <span class="keyword">new</span> Text();</span><br><span class="line"><span class="keyword">private</span> IntWritable v = <span class="keyword">new</span> IntWritable(<span class="number">1</span>);</span><br><span class="line"></span><br><span class="line">    <span class="meta">@Override</span></span><br><span class="line">    <span class="function"><span class="keyword">protected</span> <span class="keyword">void</span> <span class="title">map</span><span class="params">(LongWritable key, Text value, Context context)</span> <span class="keyword">throws</span> IOException, InterruptedException </span>&#123;</span><br><span class="line">        String line = value.toString();</span><br><span class="line">        String[] words = line.split(<span class="string">&quot; &quot;</span>);</span><br><span class="line">        <span class="keyword">for</span> (String word : words) &#123;</span><br><span class="line">            k.set(word);</span><br><span class="line">            context.write(k, v);</span><br><span class="line">        &#125;</span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<h2 id="2-2-编写Reducer类"><a href="#2-2-编写Reducer类" class="headerlink" title="2.2 编写Reducer类"></a>2.2 编写Reducer类</h2><ol>
<li>继承org.apache.hadoop.mapreduce.Reducer类</li>
<li>设置reducer类的输入类型&lt;Text, IntWritable&gt;</li>
<li>设置reducer类的输出类型&lt;Text, IntWritable&gt;</li>
<li>对values值进行汇总求和</li>
<li>通过context.write()方法进行输出</li>
</ol>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">package</span>  com.lytdev.dw.mapr.demo;</span><br><span class="line"></span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.io.IntWritable;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.io.Text;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.mapreduce.Reducer;</span><br><span class="line"></span><br><span class="line"><span class="keyword">import</span> java.io.IOException;</span><br><span class="line"></span><br><span class="line"><span class="keyword">public</span> <span class="class"><span class="keyword">class</span> <span class="title">WordcountReducer</span> <span class="keyword">extends</span> <span class="title">Reducer</span>&lt;<span class="title">Text</span>, <span class="title">IntWritable</span>, <span class="title">Text</span>, <span class="title">IntWritable</span>&gt; </span>&#123;</span><br><span class="line"><span class="keyword">private</span> <span class="keyword">int</span> sum;</span><br><span class="line"><span class="keyword">private</span> IntWritable v = <span class="keyword">new</span> IntWritable();</span><br><span class="line"></span><br><span class="line">    <span class="meta">@Override</span></span><br><span class="line">    <span class="function"><span class="keyword">protected</span> <span class="keyword">void</span> <span class="title">reduce</span><span class="params">(Text key, Iterable&lt;IntWritable&gt; values, Context context)</span> <span class="keyword">throws</span> IOException, InterruptedException </span>&#123;</span><br><span class="line">        sum = <span class="number">0</span>;</span><br><span class="line">        <span class="keyword">for</span> (IntWritable value : values) &#123;</span><br><span class="line">            sum += value.get();</span><br><span class="line">        &#125;</span><br><span class="line">        v.set(sum);</span><br><span class="line">        context.write(key, v);</span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<h2 id="2-3-编写Driver类"><a href="#2-3-编写Driver类" class="headerlink" title="2.3 编写Driver类"></a>2.3 编写Driver类</h2><ol>
<li>创建一个org.apache.hadoop.conf.Configuration类对象</li>
<li>通过Job.getInstance(conf)获得一个job对象</li>
<li>设置job的3个类，driver、mapper、reducer</li>
<li>设置job的2个输出类型，map输出和总体输出</li>
<li>设置一个输入输出路径</li>
<li>调用job.waitForCompletion(true)进行提交任务</li>
</ol>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">package</span>  com.lytdev.dw.mapr.demo;</span><br><span class="line"></span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.conf.Configuration;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.fs.Path;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.io.IntWritable;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.io.Text;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.mapreduce.Job;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.mapreduce.lib.input.FileInputFormat;</span><br><span class="line"><span class="keyword">import</span> org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;</span><br><span class="line"></span><br><span class="line"><span class="keyword">import</span> java.io.IOException;</span><br><span class="line"></span><br><span class="line"><span class="keyword">public</span> <span class="class"><span class="keyword">class</span> <span class="title">WordcountDriver</span> </span>&#123;</span><br><span class="line">    <span class="function"><span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title">main</span><span class="params">(String[] args)</span> <span class="keyword">throws</span> IOException, InterruptedException, ClassNotFoundException </span>&#123;</span><br><span class="line">        Configuration conf = <span class="keyword">new</span> Configuration();</span><br><span class="line">        Job job = Job.getInstance(conf);</span><br><span class="line"></span><br><span class="line">        job.setJarByClass(WordcountDriver.class);</span><br><span class="line">        job.setMapperClass(WordcountMapper.class);</span><br><span class="line">        job.setReducerClass(WordcountReducer.class);</span><br><span class="line"></span><br><span class="line">        job.setMapOutputKeyClass(Text.class);</span><br><span class="line">        job.setMapOutputValueClass(IntWritable.class);</span><br><span class="line">        job.setOutputKeyClass(Text.class);</span><br><span class="line">        job.setMapOutputValueClass(IntWritable.class);</span><br><span class="line"></span><br><span class="line">        FileInputFormat.setInputPaths(job, <span class="keyword">new</span> Path(<span class="string">&quot;/input&quot;</span>));</span><br><span class="line">        FileOutputFormat.setOutputPath(job, <span class="keyword">new</span> Path(<span class="string">&quot;/output&quot;</span>));</span><br><span class="line">        <span class="keyword">boolean</span> result = job.waitForCompletion(<span class="keyword">true</span>);</span><br><span class="line">        System.out.println(result);</span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>
</article><div class="post-copyright"><div class="post-copyright__author"><span class="post-copyright-meta">文章作者: </span><span class="post-copyright-info"><a href="mailto:undefined" rel="external nofollow noreferrer">liuyuantao</a></span></div><div class="post-copyright__type"><span class="post-copyright-meta">文章链接: </span><span class="post-copyright-info"><a href="https://liuyuantao.gitee.io/2021/11/12/56ac102284d9.html">https://liuyuantao.gitee.io/2021/11/12/56ac102284d9.html</a></span></div><div class="post-copyright__notice"><span class="post-copyright-meta">版权声明: </span><span class="post-copyright-info">本博客所有文章除特别声明外，均采用 <a href="https://creativecommons.org/licenses/by-nc-sa/4.0/" rel="external nofollow noreferrer" target="_blank">CC BY-NC-SA 4.0</a> 许可协议。转载请注明来自 <a href="https://liuyuantao.gitee.io" target="_blank">全栈进阶那些事</a>！</span></div></div><div class="tag_share"><div class="post-meta__tag-list"><a class="post-meta__tags" href="/tags/Hadoop/">Hadoop</a><a class="post-meta__tags" href="/tags/MapReduce/">MapReduce</a></div><div class="post_share"><div class="social-share" data-image="https://oscimg.oschina.net/oscnet/20200617165336828.png" data-sites="facebook,twitter,wechat,weibo,qq"></div><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/social-share.js/dist/css/share.min.css" media="print" onload="this.media='all'"><script src="https://cdn.jsdelivr.net/npm/social-share.js/dist/js/social-share.min.js" defer></script></div></div><div class="post-reward"><div class="reward-button button--animated"><i class="fas fa-qrcode"></i> 打赏</div><div class="reward-main"><ul class="reward-all"><li class="reward-item"><a href="/img/qr_code_wechat.jpg" target="_blank"><img class="post-qr-code-img" src="/img/qr_code_wechat.jpg" alt="微信"/></a><div class="post-qr-code-desc">微信</div></li><li class="reward-item"><a href="/img/qr_code_alipay.jpg" target="_blank"><img class="post-qr-code-img" src="/img/qr_code_alipay.jpg" alt="支付宝"/></a><div class="post-qr-code-desc">支付宝</div></li></ul></div></div><nav class="pagination-post" id="pagination"><div class="prev-post pull-left"><a href="/2021/11/13/3f0a31ee0039.html"><img class="prev-cover" src="https://img-blog.csdnimg.cn/20200529093005305.png" onerror="onerror=null;src='/img/404.jpg'" alt="cover of previous post"><div class="pagination-info"><div class="label">上一篇</div><div class="prev_info">深入浅出学习Hive</div></div></a></div><div class="next-post pull-right"><a href="/2021/11/12/2d8833eb4290.html"><img class="next-cover" src="https://img-blog.csdnimg.cn/20200715092910375.png" onerror="onerror=null;src='/img/404.jpg'" alt="cover of next post"><div class="pagination-info"><div class="label">下一篇</div><div class="next_info">一文彻底搞懂HBase</div></div></a></div></nav><div class="relatedPosts"><div class="headline"><i class="fas fa-thumbs-up fa-fw"></i><span>相关推荐</span></div><div class="relatedPosts-list"><div><a href="/2020/11/07/7d14a19c4606.html" title="DataNode工作机制"><img class="cover" src="https://oscimg.oschina.net/oscnet/up-0a831a2854d1859bc355a98a741ee74452b.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2020-11-07</div><div class="title">DataNode工作机制</div></div></a></div><div><a href="/2020/11/07/9cc4868ea813.html" title="Hadoop-Yarn常用的调优参数"><img class="cover" src="https://cdn.pixabay.com/photo/2021/08/08/10/34/ocean-6530523__340.jpg" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2020-11-07</div><div class="title">Hadoop-Yarn常用的调优参数</div></div></a></div><div><a href="/2020/11/07/5f9b1cc664c1.html" title="HDFS—集群扩容及缩容"><img class="cover" src="https://oscimg.oschina.net/oscnet/up-acb08d98e431429592cdb8802409bb29a19.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2020-11-07</div><div class="title">HDFS—集群扩容及缩容</div></div></a></div><div><a href="/2020/11/07/a8fda6be4c8d.html" title="HDFS—存储优化（纠删码）"><img class="cover" src="https://oscimg.oschina.net/oscnet/up-87ba27811580330155a29254c76223dd59f.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2020-11-07</div><div class="title">HDFS—存储优化（纠删码）</div></div></a></div><div><a href="/2020/11/07/3a181e232503.html" title="Hadoop小文件优化方法"><img class="cover" src="https://oscimg.oschina.net/oscnet/up-1681b15baa2d4bc66ceb813b9d6fcd4ad55.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2020-11-07</div><div class="title">Hadoop小文件优化方法</div></div></a></div><div><a href="/2020/11/07/fd53f751c2eb.html" title="Hadoop3.X分布式运行环境搭建手记"><img class="cover" src="https://oscimg.oschina.net/oscnet/c646ab2c9196c3eacc49b81b10d47f7a191.jpg" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2020-11-07</div><div class="title">Hadoop3.X分布式运行环境搭建手记</div></div></a></div></div></div></div><div class="aside-content" id="aside-content"><div class="sticky_layout"><div class="card-widget" id="card-toc"><div class="item-headline"><i class="fas fa-stream"></i><span>目录</span></div><div class="toc-content"><ol class="toc"><li class="toc-item toc-level-1"><a class="toc-link" href="#1-MapReduce%E7%AE%80%E4%BB%8B"><span class="toc-text">1. MapReduce简介</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#1-1-MapReduce%E5%AE%9A%E4%B9%89"><span class="toc-text">1.1 MapReduce定义</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-2-MapReduce%E5%A4%84%E7%90%86%E8%BF%87%E7%A8%8B"><span class="toc-text">1.2 MapReduce处理过程</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-3-MapReduce%E7%9A%84%E4%BC%98%E7%82%B9"><span class="toc-text">1.3 MapReduce的优点</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-4-MapReduce%E7%9A%84%E7%BC%BA%E7%82%B9"><span class="toc-text">1.4 MapReduce的缺点</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-5-MapReduce%E6%A0%B8%E5%BF%83%E7%BC%96%E7%A8%8B%E6%80%9D%E6%83%B3"><span class="toc-text">1.5 MapReduce核心编程思想</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-6-MapReduce%E8%BF%9B%E7%A8%8B"><span class="toc-text">1.6 MapReduce进程</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-7-%E6%95%B0%E6%8D%AE%E5%88%87%E7%89%87%E4%B8%8EMapTask%E5%B9%B6%E8%A1%8C%E5%BA%A6%E6%9C%BA%E5%88%B6"><span class="toc-text">1.7 数据切片与MapTask并行度机制</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-8-FileInputFormat%E5%88%87%E7%89%87%E6%9C%BA%E5%88%B6"><span class="toc-text">1.8 FileInputFormat切片机制</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-9-CombineTextInputFormat%E5%88%87%E7%89%87%E6%9C%BA%E5%88%B6"><span class="toc-text">1.9 CombineTextInputFormat切片机制</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-10-FileInputFormat%E5%AE%9E%E7%8E%B0%E7%B1%BB"><span class="toc-text">1.10 FileInputFormat实现类</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-11-Shuffle%E6%9C%BA%E5%88%B6"><span class="toc-text">1.11 Shuffle机制</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-12-%E5%88%86%E5%8C%BA"><span class="toc-text">1.12 分区</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-13-%E6%8E%92%E5%BA%8F"><span class="toc-text">1.13 排序</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-14-Combiner%E5%90%88%E5%B9%B6"><span class="toc-text">1.14 Combiner合并</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-15-MapTask%E5%B7%A5%E4%BD%9C%E6%9C%BA%E5%88%B6"><span class="toc-text">1.15 MapTask工作机制</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-16-ReduceTask%E5%B7%A5%E4%BD%9C%E6%9C%BA%E5%88%B6"><span class="toc-text">1.16 ReduceTask工作机制</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-17-ReduceTask%E5%B9%B6%E8%A1%8C%E5%BA%A6"><span class="toc-text">1.17 ReduceTask并行度</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-18-Reduce-Join%E5%B7%A5%E4%BD%9C%E5%8E%9F%E7%90%86"><span class="toc-text">1.18 Reduce Join工作原理</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-19-Map-Join%E5%B7%A5%E4%BD%9C%E5%8E%9F%E7%90%86"><span class="toc-text">1.19 Map Join工作原理</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-20-%E8%AE%A1%E6%95%B0%E5%99%A8%E5%BA%94%E7%94%A8"><span class="toc-text">1.20 计数器应用</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-21-%E6%95%B0%E6%8D%AE%E5%8E%8B%E7%BC%A9"><span class="toc-text">1.21 数据压缩</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-22-MR%E6%94%AF%E6%8C%81%E7%9A%84%E5%8E%8B%E7%BC%A9%E7%BC%96%E7%A0%81"><span class="toc-text">1.22 MR支持的压缩编码</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#1-23-%E5%8E%8B%E7%BC%A9%E6%96%B9%E5%BC%8F%E9%80%89%E6%8B%A9"><span class="toc-text">1.23 压缩方式选择</span></a></li></ol></li><li class="toc-item toc-level-1"><a class="toc-link" href="#2-MapReduce%E7%BC%96%E7%A8%8B%E8%A7%84%E8%8C%83"><span class="toc-text">2. MapReduce编程规范</span></a><ol class="toc-child"><li class="toc-item toc-level-2"><a class="toc-link" href="#2-1-%E7%BC%96%E5%86%99Mapper%E7%B1%BB"><span class="toc-text">2.1 编写Mapper类</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#2-2-%E7%BC%96%E5%86%99Reducer%E7%B1%BB"><span class="toc-text">2.2 编写Reducer类</span></a></li><li class="toc-item toc-level-2"><a class="toc-link" href="#2-3-%E7%BC%96%E5%86%99Driver%E7%B1%BB"><span class="toc-text">2.3 编写Driver类</span></a></li></ol></li></ol></div></div></div></div></main><footer id="footer"><div id="footer-wrap"><div class="copyright">&copy;2020 - 2021 By liuyuantao</div><div class="framework-info"><span>框架 </span><a target="_blank" rel="noopener external nofollow noreferrer" href="https://hexo.io">Hexo</a><span class="footer-separator">|</span><span>主题 </span><a target="_blank" rel="noopener external nofollow noreferrer" href="https://github.com/jerryc127/hexo-theme-butterfly">Butterfly</a></div><div class="footer_custom_text">扫描二维码,关注我们!了解更多全栈技能!<br/><img style="width:100px;height:100px;" src="/img/qrcode_for_gh_da91cebd00cc_258.jpg" ></div></div></footer></div><div id="rightside"><div id="rightside-config-hide"><button id="readmode" type="button" title="阅读模式"><i class="fas fa-book-open"></i></button><button id="font-plus" type="button" title="放大字体"><i class="fas fa-plus"></i></button><button id="font-minus" type="button" title="缩小字体"><i class="fas fa-minus"></i></button><button id="translateLink" type="button" title="简繁转换">简</button><button id="darkmode" type="button" title="浅色和深色模式转换"><i class="fas fa-adjust"></i></button><button id="hide-aside-btn" type="button" title="单栏和双栏切换"><i class="fas fa-arrows-alt-h"></i></button></div><div id="rightside-config-show"><button id="rightside_config" type="button" title="设置"><i class="fas fa-cog fa-spin"></i></button><button class="close" id="mobile-toc-button" type="button" title="目录"><i class="fas fa-list-ul"></i></button><button id="go-up" type="button" title="回到顶部"><i class="fas fa-arrow-up"></i></button></div></div><div id="local-search"><div class="search-dialog"><div class="search-dialog__title" id="local-search-title">本地搜索</div><div id="local-input-panel"><div id="local-search-input"><div class="local-search-box"><input class="local-search-box--input" placeholder="搜索文章" type="text"/></div></div></div><hr/><div id="local-search-results"></div><span class="search-close-button"><i class="fas fa-times"></i></span></div><div id="search-mask"></div></div><div><script src="/js/utils.js"></script><script src="/js/main.js"></script><script src="/js/tw_cn.js"></script><script src="https://cdn.jsdelivr.net/npm/medium-zoom/dist/medium-zoom.min.js"></script><script src="https://cdn.jsdelivr.net/npm/instant.page/instantpage.min.js" type="module"></script><script src="https://cdn.jsdelivr.net/npm/node-snackbar/dist/snackbar.min.js"></script><script src="/js/search/local-search.js"></script><script>var preloader = {
  endLoading: () => {
    document.body.style.overflow = 'auto';
    document.getElementById('loading-box').classList.add("loaded")
  },
  initLoading: () => {
    document.body.style.overflow = '';
    document.getElementById('loading-box').classList.remove("loaded")

  }
}
window.addEventListener('load',preloader.endLoading())</script><div class="js-pjax"></div><script defer="defer" id="fluttering_ribbon" mobile="false" src="https://cdn.jsdelivr.net/npm/butterfly-extsrc@1/dist/canvas-fluttering-ribbon.min.js"></script><script async data-pjax src="//busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js"></script></div></body></html>