<!DOCTYPE html><html lang="zh-CN" data-theme="light"><head><meta charset="UTF-8"><meta http-equiv="X-UA-Compatible" content="IE=edge"><meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=no"><title>Kafka基础知识 | 无人深空</title><meta name="keywords" content="Kafka"><meta name="author" content="zcm"><meta name="copyright" content="zcm"><meta name="format-detection" content="telephone=no"><meta name="theme-color" content="#ffffff"><meta name="description" content="Kafka 基础知识1.Kafka 概述 定义  传统定义  Kafka 是一个分布式的基于发布&#x2F;订阅模式的消息队列（MessageQueue），主要应用于大数据实时处理领域   最新定义  Kafka 是一个开源的分布式事件流平台（Event StreamingPlatform），被数千家公司用于高性能数据管道、流分析、数据集成和关键任务应用     消息队列  消息队列有哪些  目前">
<meta property="og:type" content="article">
<meta property="og:title" content="Kafka基础知识">
<meta property="og:url" content="https://gitee.com/zcmmmm/zcmmmm/2022/10/30/kafka%E5%9F%BA%E7%A1%80%E7%9F%A5%E8%AF%86/index.html">
<meta property="og:site_name" content="无人深空">
<meta property="og:description" content="Kafka 基础知识1.Kafka 概述 定义  传统定义  Kafka 是一个分布式的基于发布&#x2F;订阅模式的消息队列（MessageQueue），主要应用于大数据实时处理领域   最新定义  Kafka 是一个开源的分布式事件流平台（Event StreamingPlatform），被数千家公司用于高性能数据管道、流分析、数据集成和关键任务应用     消息队列  消息队列有哪些  目前">
<meta property="og:locale" content="zh_CN">
<meta property="og:image" content="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/kafka.png">
<meta property="article:published_time" content="2022-10-30T01:47:38.000Z">
<meta property="article:modified_time" content="2023-05-02T02:07:37.938Z">
<meta property="article:author" content="zcm">
<meta property="article:tag" content="Kafka">
<meta name="twitter:card" content="summary">
<meta name="twitter:image" content="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/kafka.png"><link rel="shortcut icon" href="/img/favicon.png"><link rel="canonical" href="https://gitee.com/zcmmmm/zcmmmm/2022/10/30/kafka%E5%9F%BA%E7%A1%80%E7%9F%A5%E8%AF%86/"><link rel="preconnect" href="//cdn.jsdelivr.net"/><link rel="preconnect" href="//busuanzi.ibruce.info"/><link rel="stylesheet" href="/css/index.css"><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@fortawesome/fontawesome-free/css/all.min.css" media="print" onload="this.media='all'"><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/node-snackbar/dist/snackbar.min.css" media="print" onload="this.media='all'"><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@fancyapps/ui/dist/fancybox.min.css" media="print" onload="this.media='all'"><script>const GLOBAL_CONFIG = { 
  root: '/',
  algolia: undefined,
  localSearch: {"path":"/search.xml","preload":false,"languages":{"hits_empty":"找不到您查询的内容：${query}"}},
  translate: undefined,
  noticeOutdate: undefined,
  highlight: {"plugin":"highlighjs","highlightCopy":true,"highlightLang":true,"highlightHeightLimit":200},
  copy: {
    success: '复制成功',
    error: '复制错误',
    noSupport: '浏览器不支持'
  },
  relativeDate: {
    homepage: false,
    post: false
  },
  runtime: '天',
  date_suffix: {
    just: '刚刚',
    min: '分钟前',
    hour: '小时前',
    day: '天前',
    month: '个月前'
  },
  copyright: undefined,
  lightbox: 'fancybox',
  Snackbar: {"chs_to_cht":"你已切换为繁体","cht_to_chs":"你已切换为简体","day_to_night":"你已切换为深色模式","night_to_day":"你已切换为浅色模式","bgLight":"#49b1f5","bgDark":"#1f1f1f","position":"bottom-left"},
  source: {
    justifiedGallery: {
      js: 'https://cdn.jsdelivr.net/npm/flickr-justified-gallery/dist/fjGallery.min.js',
      css: 'https://cdn.jsdelivr.net/npm/flickr-justified-gallery/dist/fjGallery.min.css'
    }
  },
  isPhotoFigcaption: false,
  islazyload: false,
  isAnchor: false
}</script><script id="config-diff">var GLOBAL_CONFIG_SITE = {
  title: 'Kafka基础知识',
  isPost: true,
  isHome: false,
  isHighlightShrink: false,
  isToc: true,
  postUpdate: '2023-05-02 10:07:37'
}</script><noscript><style type="text/css">
  #nav {
    opacity: 1
  }
  .justified-gallery img {
    opacity: 1
  }

  #recent-posts time,
  #post-meta time {
    display: inline !important
  }
</style></noscript><script>(win=>{
    win.saveToLocal = {
      set: function setWithExpiry(key, value, ttl) {
        if (ttl === 0) return
        const now = new Date()
        const expiryDay = ttl * 86400000
        const item = {
          value: value,
          expiry: now.getTime() + expiryDay,
        }
        localStorage.setItem(key, JSON.stringify(item))
      },

      get: function getWithExpiry(key) {
        const itemStr = localStorage.getItem(key)

        if (!itemStr) {
          return undefined
        }
        const item = JSON.parse(itemStr)
        const now = new Date()

        if (now.getTime() > item.expiry) {
          localStorage.removeItem(key)
          return undefined
        }
        return item.value
      }
    }
  
    win.getScript = url => new Promise((resolve, reject) => {
      const script = document.createElement('script')
      script.src = url
      script.async = true
      script.onerror = reject
      script.onload = script.onreadystatechange = function() {
        const loadState = this.readyState
        if (loadState && loadState !== 'loaded' && loadState !== 'complete') return
        script.onload = script.onreadystatechange = null
        resolve()
      }
      document.head.appendChild(script)
    })
  
      win.activateDarkMode = function () {
        document.documentElement.setAttribute('data-theme', 'dark')
        if (document.querySelector('meta[name="theme-color"]') !== null) {
          document.querySelector('meta[name="theme-color"]').setAttribute('content', '#0d0d0d')
        }
      }
      win.activateLightMode = function () {
        document.documentElement.setAttribute('data-theme', 'light')
        if (document.querySelector('meta[name="theme-color"]') !== null) {
          document.querySelector('meta[name="theme-color"]').setAttribute('content', '#ffffff')
        }
      }
      const t = saveToLocal.get('theme')
    
          if (t === 'dark') activateDarkMode()
          else if (t === 'light') activateLightMode()
        
      const asideStatus = saveToLocal.get('aside-status')
      if (asideStatus !== undefined) {
        if (asideStatus === 'hide') {
          document.documentElement.classList.add('hide-aside')
        } else {
          document.documentElement.classList.remove('hide-aside')
        }
      }
    
    const detectApple = () => {
      if(/iPad|iPhone|iPod|Macintosh/.test(navigator.userAgent)){
        document.documentElement.classList.add('apple')
      }
    }
    detectApple()
    })(window)</script><link rel="stylesheet" href="/css/categories.css?v1"><link rel="stylesheet" href="/css/navigation.css?v1"><link rel="stylesheet" href="/css/custom.css?v1"><meta name="generator" content="Hexo 6.3.0"></head><body><div id="sidebar"><div id="menu-mask"></div><div id="sidebar-menus"><div class="avatar-img is-center"><img src="https://s2.loli.net/2022/09/22/36LN4kcjYOd9HT8.jpg" onerror="onerror=null;src='/img/friend_404.gif'" alt="avatar"/></div><div class="sidebar-site-data site-data is-center"><a href="/archives/"><div class="headline">文章</div><div class="length-num">43</div></a><a href="/tags/"><div class="headline">标签</div><div class="length-num">25</div></a><a href="/categories/"><div class="headline">分类</div><div class="length-num">11</div></a></div><hr/><div class="menus_items"><div class="menus_item"><a class="site-page" href="/"><i class="fa-fw fas fa-home"></i><span> 首页</span></a></div><div class="menus_item"><a class="site-page" href="/archives/"><i class="fa-fw fas fa-archive"></i><span> 时间轴</span></a></div><div class="menus_item"><a class="site-page" href="/tags/"><i class="fa-fw fas fa-tags"></i><span> 标签</span></a></div><div class="menus_item"><a class="site-page" href="/categories/"><i class="fa-fw fas fa-folder-open"></i><span> 分类</span></a></div></div></div></div><div class="post" id="body-wrap"><header class="not-top-img" id="page-header"><nav id="nav"><span id="blog_name"><a id="site-name" href="/">无人深空</a></span><div id="menus"></div><div class="menus_items"><div class="menus_item"><a class="site-page" href="/"><i class="fa-fw fas fa-home"></i><span> 首页</span></a></div><div class="menus_item"><a class="site-page" href="/archives/"><i class="fa-fw fas fa-archive"></i><span> 时间轴</span></a></div><div class="menus_item"><a class="site-page" href="/tags/"><i class="fa-fw fas fa-tags"></i><span> 标签</span></a></div><div class="menus_item"><a class="site-page" href="/categories/"><i class="fa-fw fas fa-folder-open"></i><span> 分类</span></a></div></div><div id="nav-right"><div id="search-button"><a class="site-page social-icon search"><i class="fas fa-search fa-fw"></i></a></div><div id="toggle-menu"><a class="site-page"><i class="fas fa-bars fa-fw"></i></a></div></div></nav></header><main class="layout" id="content-inner"><div id="post"><div id="post-info"><h1 class="post-title">Kafka基础知识</h1><div id="post-meta"><div class="meta-firstline"><span class="post-meta-date"><i class="far fa-calendar-alt fa-fw post-meta-icon"></i><span class="post-meta-label">发表于</span><time class="post-meta-date-created" datetime="2022-10-30T01:47:38.000Z" title="发表于 2022-10-30 09:47:38">2022-10-30</time><span class="post-meta-separator">|</span><i class="fas fa-history fa-fw post-meta-icon"></i><span class="post-meta-label">更新于</span><time class="post-meta-date-updated" datetime="2023-05-02T02:07:37.938Z" title="更新于 2023-05-02 10:07:37">2023-05-02</time></span><span class="post-meta-categories"><span class="post-meta-separator">|</span><i class="fas fa-inbox fa-fw post-meta-icon"></i><a class="post-meta-categories" href="/categories/%E4%B8%AD%E9%97%B4%E4%BB%B6/">中间件</a></span></div><div class="meta-secondline"><span class="post-meta-separator">|</span><span class="post-meta-wordcount"><i class="far fa-file-word fa-fw post-meta-icon"></i><span class="post-meta-label">字数总计:</span><span class="word-count">15.1k</span><span class="post-meta-separator">|</span><i class="far fa-clock fa-fw post-meta-icon"></i><span class="post-meta-label">阅读时长:</span><span>56分钟</span></span><span class="post-meta-separator">|</span><span class="post-meta-pv-cv" id="" data-flag-title="Kafka基础知识"><i class="far fa-eye fa-fw post-meta-icon"></i><span class="post-meta-label">阅读量:</span><span id="busuanzi_value_page_pv"><i class="fa-solid fa-spinner fa-spin"></i></span></span></div></div></div><article class="post-content" id="article-container"><h2 id="Kafka-基础知识"><a href="#Kafka-基础知识" class="headerlink" title="Kafka 基础知识"></a><a target="_blank" rel="noopener" href="https://www.bilibili.com/video/BV1vr4y1677k">Kafka 基础知识</a></h2><h3 id="1-Kafka-概述"><a href="#1-Kafka-概述" class="headerlink" title="1.Kafka 概述"></a>1.Kafka 概述</h3><ol>
<li><p>定义</p>
<ul>
<li><p>传统定义</p>
<blockquote>
<p>Kafka 是一个分布式的基于发布&#x2F;订阅模式的消息队列（MessageQueue），主要应用于大数据实时处理领域</p>
</blockquote>
</li>
<li><p>最新定义</p>
<blockquote>
<p>Kafka 是一个开源的分布式事件流平台（Event StreamingPlatform），被数千家公司用于高性能数据管道、流分析、数据集成和关键任务应用</p>
</blockquote>
</li>
</ul>
</li>
<li><p>消息队列</p>
<ul>
<li><p>消息队列有哪些</p>
<blockquote>
<p>目前企业中比较常见的消息队列产品主要有 Kafka、ActiveMQ 、RabbitMQ 、RocketMQ 等</p>
<p>在大数据场景主要采用 Kafka 作为消息队列</p>
<p>在 JavaEE 开发中主要采用 ActiveMQ、RabbitMQ、RocketMQ</p>
</blockquote>
</li>
<li><p>消息队列应用场景</p>
<blockquote>
<p>传统的消息队列的主要应用场景包括：<strong>缓存消峰、解耦、异步通信</strong></p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E5%B3%B0.png" alt="消峰"></p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E8%A7%A3%E8%80%A6.png" alt="解耦"></p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%BC%82%E6%AD%A5%E9%80%9A%E4%BF%A1.png" alt="异步通信"></p>
</li>
<li><p>消息队列的两种模式</p>
<ul>
<li>点对点模式：消费消息只能发布到一个主题， 消费完成就删除消息，且只有一个消费者</li>
<li>发布 &#x2F; 订阅模式：消息可以发布到多个主题， 消息一般保留七天，且有多个消费者</li>
</ul>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E6%81%AF%E9%98%9F%E5%88%97%E6%A8%A1%E5%BC%8F.png" alt="消息队列模式"></p>
</li>
</ul>
</li>
<li><p>基础架构</p>
<blockquote>
<p>在 Kafka2.8 版本前，Zookeeper 的 Consumer 文件中存放消息被消费的记录（offset）</p>
<p>在 Kafka2.8 版本后，消息被消费的记录（offset）存放在 Kafka 中</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%9F%BA%E7%A1%80%E6%9E%B6%E6%9E%84.png" alt="基础架构"></p>
<ul>
<li><p>Producer</p>
<blockquote>
<p>消息生产者，就是向 Kafka broker 发消息的客户端</p>
</blockquote>
</li>
<li><p>Consumer</p>
<blockquote>
<p>消息消费者，向 Kafka broker 取消息的客户端</p>
</blockquote>
</li>
<li><p>Consumer Group（CG）</p>
<blockquote>
<p>消费者组，由多个 Consumer 组成</p>
<p>消费者组内每个消费者负责消费不同分区的数据，<strong>一个分区只能由一个组内消费者消费</strong></p>
<p><strong>消费者组之间互不影响</strong></p>
<p>所有的消费者都属于某个消费者组，即消费者组是逻辑上的一个订阅者</p>
</blockquote>
</li>
<li><p>Broker</p>
<blockquote>
<p>一台 Kafka 服务器就是一个 broker</p>
<p>一个集群由多个 broker 组成</p>
<p>一个broker 可以容纳多个 topic</p>
</blockquote>
</li>
<li><p>Topic</p>
<blockquote>
<p>可以理解为一个队列，生产者和消费者面向的都是一个 topic</p>
</blockquote>
</li>
<li><p>Partition</p>
<blockquote>
<p>为了实现扩展性，一个非常大的 topic 可以分布到多个 broker（即服务器）上</p>
<p>一个 topic 可以分为多个 partition，每个 partition 是一个有序的队列</p>
</blockquote>
</li>
<li><p>Replica</p>
<blockquote>
<p>副本</p>
<p>一个 topic 的每个分区都有若干个副本，一个 Leader 和若干个 Follower</p>
</blockquote>
</li>
<li><p>Leader</p>
<blockquote>
<p>每个分区多个副本的<code>主</code>，生产者发送数据的对象，以及消费者消费数据的对象都是 Leader</p>
</blockquote>
</li>
<li><p>Follower</p>
<blockquote>
<p>每个分区多个副本中的<code>从</code>，实时从 Leader 中同步数据，保持和 Leader 数据的同步</p>
<p>Leader 发生故障时，某个 Follower 会成为新的 Leader</p>
</blockquote>
</li>
</ul>
</li>
</ol>
<h3 id="2-Kafka-快速入门"><a href="#2-Kafka-快速入门" class="headerlink" title="2.Kafka 快速入门"></a>2.Kafka 快速入门</h3><ol>
<li><p>安装部署</p>
<ul>
<li><p>集群规划</p>
<blockquote>
<p>实际生产中不是每台都需要安装</p>
<p>此处是只有三台虚拟机，且为了演示集群</p>
</blockquote>
<table>
<thead>
<tr>
<th align="center">hadoop102</th>
<th align="center">hadoop103</th>
<th align="center">hadoop104</th>
</tr>
</thead>
<tbody><tr>
<td align="center">zk</td>
<td align="center">zk</td>
<td align="center">zk</td>
</tr>
<tr>
<td align="center">kafka</td>
<td align="center">kafka</td>
<td align="center">kafka</td>
</tr>
</tbody></table>
</li>
<li><p>集群部署</p>
<ul>
<li><p><a target="_blank" rel="noopener" href="http://kafka.apache.org/downloads.html">Kafka 官网下载地址</a></p>
</li>
<li><p>解压安装包并修改名称</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">tar -zxvf kafka_2.12-3.0.0.tgz -C /opt/module/</span><br><span class="line">mv kafka_2.12-3.0.0/ kafka</span><br></pre></td></tr></table></figure>
</li>
<li><p>修改配置文件 <code>server.properties</code></p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">cd config/</span><br><span class="line">vim server.properties</span><br></pre></td></tr></table></figure>

<figure class="highlight properties"><table><tr><td class="code"><pre><span class="line"><span class="comment">#broker 的全局唯一编号，不能重复，只能是数字。</span></span><br><span class="line"><span class="attr">broker.id</span>=<span class="string">0</span></span><br><span class="line"><span class="comment">#处理网络请求的线程数量</span></span><br><span class="line"><span class="attr">num.network.threads</span>=<span class="string">3</span></span><br><span class="line"><span class="comment">#用来处理磁盘 IO 的线程数量</span></span><br><span class="line"><span class="attr">num.io.threads</span>=<span class="string">8</span></span><br><span class="line"><span class="comment">#发送套接字的缓冲区大小</span></span><br><span class="line"><span class="attr">socket.send.buffer.bytes</span>=<span class="string">102400</span></span><br><span class="line"><span class="comment">#接收套接字的缓冲区大小</span></span><br><span class="line"><span class="attr">socket.receive.buffer.bytes</span>=<span class="string">102400</span></span><br><span class="line"><span class="comment">#请求套接字的缓冲区大小</span></span><br><span class="line"><span class="attr">socket.request.max.bytes</span>=<span class="string">104857600</span></span><br><span class="line"><span class="comment">#kafka 运行日志(数据)存放的路径，路径不需要提前创建，kafka 自动帮你创建，可以配置多个磁盘路径，路径与路径之间可以用&quot;，&quot;分隔</span></span><br><span class="line"><span class="attr">log.dirs</span>=<span class="string">/opt/module/kafka/datas</span></span><br><span class="line"><span class="comment">#topic 在当前 broker 上的分区个数</span></span><br><span class="line"><span class="attr">num.partitions</span>=<span class="string">1</span></span><br><span class="line"><span class="comment">#用来恢复和清理 data 下数据的线程数量</span></span><br><span class="line"><span class="attr">num.recovery.threads.per.data.dir</span>=<span class="string">1</span></span><br><span class="line"><span class="comment"># 每个 topic 创建时的副本数，默认时 1 个副本</span></span><br><span class="line"><span class="attr">offsets.topic.replication.factor</span>=<span class="string">1</span></span><br><span class="line"><span class="comment">#segment 文件保留的最长时间，超时将被删除</span></span><br><span class="line"><span class="attr">log.retention.hours</span>=<span class="string">168</span></span><br><span class="line"><span class="comment">#每个 segment 文件的大小，默认最大 1G</span></span><br><span class="line"><span class="attr">log.segment.bytes</span>=<span class="string">1073741824</span></span><br><span class="line"><span class="comment"></span></span><br><span class="line"><span class="comment"># 检查过期数据的时间，默认 5 分钟检查一次是否数据过期</span></span><br><span class="line"><span class="attr">log.retention.check.interval.ms</span>=<span class="string">300000</span></span><br><span class="line"><span class="comment">#配置连接 Zookeeper 集群地址（在 zk 根目录下创建/kafka，方便管理）</span></span><br><span class="line"><span class="attr">zookeeper.connect</span>=<span class="string">hadoop102:2181,hadoop103:2181,hadoop104:2181/kafka</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>分发安装包</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">xsync kafka/</span><br></pre></td></tr></table></figure>

<blockquote>
<p>分别在 hadoop103 和 hadoop104 上修改配置件 <code>/opt/module/kafka/config/server.properties</code> 中的 broker.id&#x3D;1、broker.id&#x3D;2</p>
<p><strong>broker.id 不得重复，整个集群中唯一</strong></p>
</blockquote>
</li>
<li><p>配置环境变量并刷新</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">sudo vim /etc/profile.d/my_env.sh</span><br></pre></td></tr></table></figure>

<figure class="highlight sh"><table><tr><td class="code"><pre><span class="line"><span class="comment">#KAFKA_HOME</span></span><br><span class="line"><span class="built_in">export</span> KAFKA_HOME=/opt/module/kafka</span><br><span class="line"><span class="built_in">export</span> PATH=<span class="variable">$PATH</span>:<span class="variable">$KAFKA_HOME</span>/bin</span><br></pre></td></tr></table></figure>

<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">source /etc/profile</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li><p>启动集群</p>
<ul>
<li><p>启动 Zookeeper</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line"><span class="meta prompt_"># </span><span class="language-bash">启动 (默认守护进程)</span></span><br><span class="line">./zkServer.sh start</span><br><span class="line"><span class="meta prompt_"># </span><span class="language-bash">查看状态</span></span><br><span class="line">./zkServer.sh status</span><br><span class="line"><span class="meta prompt_"># </span><span class="language-bash">停止</span></span><br><span class="line">./zkServer.sh stop</span><br></pre></td></tr></table></figure>
</li>
<li><p>启动 Kafka</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line"><span class="meta prompt_"># </span><span class="language-bash">启动，守护方式 (环境变量配置前提下)</span></span><br><span class="line">kafka-server-start.sh -daemon /home/environment/kafka/config/server.properties</span><br><span class="line"><span class="meta prompt_"># </span><span class="language-bash">停止</span></span><br><span class="line">kafka-server-stop.sh </span><br></pre></td></tr></table></figure>

<blockquote>
<p>停止 Kafka 集群时，一定要等 Kafka 所有节点进程全部停止后再停止 Zookeeper 集群</p>
<p>因为 Zookeeper 集群当中记录着 Kafka 集群相关信息，Zookeeper 集群一旦先停止，Kafka 集群就没有办法再获取停止进程的信息，只能手动杀死 Kafka 进程了</p>
</blockquote>
</li>
</ul>
</li>
</ul>
</li>
<li><p>Kafka 命令行操作</p>
<ul>
<li><p>主题相关操作</p>
<ul>
<li><p>查看操作主题命令参数</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">bin/kafka-topics.sh</span><br></pre></td></tr></table></figure>

<table>
<thead>
<tr>
<th>参数</th>
<th>描述</th>
</tr>
</thead>
<tbody><tr>
<td>–bootstrap-server&lt;String: servertoconnect to&gt;</td>
<td>连接的KafkaBroker主机名称和端口号</td>
</tr>
<tr>
<td>–topic&lt;String: topic&gt;</td>
<td>操作的topic名称</td>
</tr>
<tr>
<td>–create</td>
<td>创建主题</td>
</tr>
<tr>
<td>–delete</td>
<td>删除主题</td>
</tr>
<tr>
<td>–alter</td>
<td>修改主题</td>
</tr>
<tr>
<td>–list</td>
<td>查看所有主题</td>
</tr>
<tr>
<td>-describe</td>
<td>查看主题详细描述</td>
</tr>
<tr>
<td>–partitions&lt;lnteger: # of partitions&gt;</td>
<td>设置分区数</td>
</tr>
<tr>
<td>–replication-factor&lt;Integer: replication factor&gt;</td>
<td>设置分区副本</td>
</tr>
<tr>
<td>–config&lt;Stringname&#x3D;value&gt;</td>
<td>更新系统默认的配置</td>
</tr>
</tbody></table>
</li>
<li><p>查看当前服务器中的所有 topic</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --list</span><br></pre></td></tr></table></figure>
</li>
<li><p>创建 first topic</p>
<blockquote>
<p>选项说明：</p>
<p>–topic：定义 topic 名</p>
<p>–replication-factor：定义副本数</p>
<p>–partitions：定义分区数</p>
</blockquote>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --create --partitions 1 --replication-factor 3 --topic first</span><br></pre></td></tr></table></figure>
</li>
<li><p>查看 first 主题详情</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --alter --topic first --partitions 3</span><br></pre></td></tr></table></figure>
</li>
<li><p>修改分区数<strong>（分区数只能增加，不能减少）</strong></p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --alter --topic first --partitions 3</span><br></pre></td></tr></table></figure>
</li>
<li><p>再次查看 first 主题的详情</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --describe --topic first</span><br></pre></td></tr></table></figure>
</li>
<li><p>删除 topic（需要配置信息）</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --delete --topic first</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li><p>生产者相关操作</p>
<ul>
<li><p>查看操作生产者命令参数</p>
<blockquote>
<p>连接 Kafka 生产者</p>
<p>参数描述</p>
<p>–bootstrap-server &lt;String: server toconnect to&gt;：连接的 Kafka Broker 主机名称和端口号</p>
<p>–topic &lt;String: topic&gt;：操作的 topic 名称</p>
</blockquote>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-console-producer.sh --bootstrap-server 47.106.86.64:9092 --topic first</span><br></pre></td></tr></table></figure>
</li>
<li><p>发送消息</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">[atguigu@hadoop102 kafka]$ bin/kafka-console-producer.sh --</span><br><span class="line">bootstrap-server hadoop102:9092 --topic first</span><br><span class="line"><span class="meta prompt_">&gt;</span><span class="language-bash">hello world</span></span><br><span class="line"><span class="meta prompt_">&gt;</span><span class="language-bash">Hi HI</span></span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li><p>消费者相关操作</p>
<ul>
<li><p>查看操作消费者命令参数</p>
<blockquote>
<p>连接 Kafka 消费者</p>
<p>参数描述</p>
<p>–bootstrap-server &lt;String: server toconnect to&gt;：连接的 Kafka Broker 主机名称和端口号</p>
<p>–topic &lt;String: topic&gt;：操作的 topic 名称</p>
<p>–from-beginning：从头开始消费</p>
<p>–group &lt;String: consumer group id&gt;：指定消费者组名称</p>
</blockquote>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-console-consumer.sh</span><br></pre></td></tr></table></figure>
</li>
<li><p>消费消息</p>
<blockquote>
<p>消费 first 主题中的数据</p>
</blockquote>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-console-consumer.sh --bootstrap-server 47.106.86.64:9092 --topic first</span><br></pre></td></tr></table></figure>

<blockquote>
<p>把主题中所有的数据都读取出来（包括历史数据）</p>
</blockquote>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-console-consumer.sh --bootstrap-server 47.106.86.64:9092 --from-beginning --topic first</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
</ul>
</li>
</ol>
<h3 id="3-Kafka-生产者"><a href="#3-Kafka-生产者" class="headerlink" title="3.Kafka 生产者"></a>3.Kafka 生产者</h3><ol>
<li><p>生产者消息发送流程</p>
<ul>
<li><p>发送原理</p>
<blockquote>
<p><strong>指将外部数据发送到双端队列</strong></p>
<p>在消息发送的过程中，涉及到两个线程，main 线程和 sender 线程，其中 main 线程是消息的生产线程，而 sender 线程是 jvm 单例的线程，专门用于消息的发送</p>
<p>在 jvm 的内存中开辟了一块缓存空间叫 RecordAccumulator（消息累加器），用于将多条消息合并成一个批次，然后由 sender 线程发送给 kafka 集群</p>
<p>一条消息在生产过程会调用 send 方法然后经过拦截器经过序列化器，再经过分区器确定消息发送在具体 topic 下的哪个分区，然后发送到对应的消息累加器中，消息累加器是多个双端队列，并且每个队列和主题分区都具有一一映射关系</p>
<p>消息在累加器中进行合并，达到了对应的 size（batch.size）或者等待超过对应的等待时间 <code>linger.ms</code>，都会触发 sender 线程的发送</p>
<p>sender 线程有一个请求池，默认缓存五个请求 <code>max.in.flight.requests.per.connection</code>，发送消息后，会等待服务端的 ack，如果没收到 ack 就会重试，默认重试次数为 int 最大值<code>retries </code></p>
<p>如果 ack 成功就会删除累加器中的消息批次，并相应到生产端</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%8F%91%E9%80%81%E5%8E%9F%E7%90%86.png" alt="发送原理"></p>
</li>
<li><p>生产者重要参数列表</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E7%94%9F%E4%BA%A7%E8%80%85%E5%8F%82%E6%95%B0%E5%88%97%E8%A1%A8.png" alt="生产者参数列表"></p>
</li>
</ul>
</li>
<li><p>发送消息</p>
<ul>
<li><p>普通异步发送</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%BC%82%E6%AD%A5%E6%99%AE%E9%80%9A%E5%8F%91%E9%80%811.png" alt="异步普通发送1"></p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%BC%82%E6%AD%A5%E6%99%AE%E9%80%9A%E5%8F%91%E9%80%812.png" alt="异步普通发送2"></p>
<ul>
<li><p>创建工程 Kafka 并导入相关依赖</p>
<figure class="highlight xml"><table><tr><td class="code"><pre><span class="line"><span class="tag">&lt;<span class="name">dependencies</span>&gt;</span></span><br><span class="line">     <span class="tag">&lt;<span class="name">dependency</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">groupId</span>&gt;</span>org.apache.kafka<span class="tag">&lt;/<span class="name">groupId</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">artifactId</span>&gt;</span>kafka-clients<span class="tag">&lt;/<span class="name">artifactId</span>&gt;</span></span><br><span class="line">         <span class="tag">&lt;<span class="name">version</span>&gt;</span>3.0.0<span class="tag">&lt;/<span class="name">version</span>&gt;</span></span><br><span class="line">     <span class="tag">&lt;/<span class="name">dependency</span>&gt;</span></span><br><span class="line"><span class="tag">&lt;/<span class="name">dependencies</span>&gt;</span></span><br><span class="line"></span><br></pre></td></tr></table></figure>
</li>
<li><p>编写代码</p>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">class</span> <span class="title class_">CustomProducer</span> &#123;</span><br><span class="line">    <span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title function_">main</span><span class="params">(String[] args)</span> &#123;</span><br><span class="line">        <span class="comment">// 1. 给 kafka 配置对象添加配置信息：bootstrap.servers</span></span><br><span class="line">        <span class="type">Properties</span> <span class="variable">properties</span> <span class="operator">=</span> <span class="keyword">new</span> <span class="title class_">Properties</span>();</span><br><span class="line">        <span class="comment">//服务信息</span></span><br><span class="line">        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,<span class="string">&quot;47.106.86.64:9092&quot;</span>);</span><br><span class="line">        <span class="comment">//配置序列化</span></span><br><span class="line">        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());</span><br><span class="line">        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());</span><br><span class="line">        <span class="comment">// 2. 创建 kafka 生产者的配置对象</span></span><br><span class="line">        KafkaProducer&lt;String, String&gt; kafkaProducer = <span class="keyword">new</span> <span class="title class_">KafkaProducer</span>&lt;String,String&gt;(properties);</span><br><span class="line">        <span class="comment">// 3. 创建 kafka 生产者对象</span></span><br><span class="line">        <span class="keyword">for</span> (<span class="type">int</span> <span class="variable">i</span> <span class="operator">=</span> <span class="number">0</span>; i &lt; <span class="number">5</span>; i++) &#123;</span><br><span class="line">            kafkaProducer.send(<span class="keyword">new</span> <span class="title class_">ProducerRecord</span>(<span class="string">&quot;first&quot;</span>, <span class="string">&quot;one&quot;</span> + i));</span><br><span class="line">        &#125;</span><br><span class="line">        kafkaProducer.close();</span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>
</li>
<li><p>测试</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-console-consumer.sh --bootstrap-server 47.106.86.64:9092 --topic first</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li><p>带回调函数的异步发送</p>
<blockquote>
<p>回调函数会在 producer 收到 ack 时调用，为异步调用</p>
<p>该方法有两个参数，分别是<code>元数据信息（Record Metadata）</code>和<code>异常信息（Exception）</code></p>
<p>如果 Exception 为 null，说明消息发送成功，如果 Exception 不为 null，说明消息发送失败</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%BC%82%E6%AD%A5%E5%B8%A6%E5%9B%9E%E8%B0%83.png" alt="异步带回调"></p>
<blockquote>
<p>消息发送失败会自动重试，不需要在回调函数中手动重试</p>
<p>在普通异步调用的基础上添加回调函数 <code>new Callback()</code></p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"></span><br><span class="line"><span class="keyword">public</span> <span class="keyword">class</span> <span class="title class_">CustomProducer</span> &#123;</span><br><span class="line">    <span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title function_">main</span><span class="params">(String[] args)</span> &#123;</span><br><span class="line">        <span class="comment">// 1. 给 kafka 配置对象添加配置信息：bootstrap.servers</span></span><br><span class="line">        <span class="type">Properties</span> <span class="variable">properties</span> <span class="operator">=</span> <span class="keyword">new</span> <span class="title class_">Properties</span>();</span><br><span class="line">        <span class="comment">//服务信息</span></span><br><span class="line">        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,<span class="string">&quot;47.106.86.64:9092&quot;</span>);</span><br><span class="line">        <span class="comment">//配置序列化</span></span><br><span class="line">        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());</span><br><span class="line">        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());</span><br><span class="line">        <span class="comment">// 2. 创建 kafka 生产者的配置对象</span></span><br><span class="line">        KafkaProducer&lt;String, String&gt; kafkaProducer = <span class="keyword">new</span> <span class="title class_">KafkaProducer</span>&lt;String,String&gt;(properties);</span><br><span class="line">        <span class="comment">// 3. 创建 kafka 生产者对象</span></span><br><span class="line">        <span class="keyword">for</span> (<span class="type">int</span> <span class="variable">i</span> <span class="operator">=</span> <span class="number">0</span>; i &lt; <span class="number">5</span>; i++) &#123;</span><br><span class="line">            kafkaProducer.send(<span class="keyword">new</span> <span class="title class_">ProducerRecord</span>(<span class="string">&quot;first&quot;</span>, <span class="string">&quot;one&quot;</span> + i), <span class="keyword">new</span> <span class="title class_">Callback</span>() &#123;</span><br><span class="line">                <span class="meta">@Override</span></span><br><span class="line">                <span class="keyword">public</span> <span class="keyword">void</span> <span class="title function_">onCompletion</span><span class="params">(RecordMetadata recordMetadata, Exception e)</span> &#123;</span><br><span class="line">                    <span class="keyword">if</span> (e == <span class="literal">null</span>) &#123;</span><br><span class="line">                        System.out.println( <span class="string">&quot;分区 ： &quot;</span> + recordMetadata.partition() + <span class="string">&quot; 主题： &quot;</span> + recordMetadata.topic() );</span><br><span class="line">                    &#125;</span><br><span class="line">                &#125;</span><br><span class="line">            &#125;);</span><br><span class="line">        &#125;</span><br><span class="line">        kafkaProducer.close();</span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>
</li>
<li><p>同步发送</p>
<blockquote>
<p>先处理已经堆积在 DQueue 中的数据</p>
<p>RecordAccumulator 再处理外部数据</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%90%8C%E6%AD%A5%E5%8F%91%E9%80%81.png" alt="同步发送"></p>
<blockquote>
<p>在异步发送的基础上，调用一下 <code>get()</code> 方法即可</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">class</span> <span class="title class_">CustomProducerSync</span> &#123;</span><br><span class="line">    <span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title function_">main</span><span class="params">(String[] args)</span> <span class="keyword">throws</span> ExecutionException, InterruptedException &#123;</span><br><span class="line">        <span class="comment">// 1. 给 kafka 配置对象添加配置信息：bootstrap.servers</span></span><br><span class="line">        <span class="type">Properties</span> <span class="variable">properties</span> <span class="operator">=</span> <span class="keyword">new</span> <span class="title class_">Properties</span>();</span><br><span class="line">        <span class="comment">//服务信息</span></span><br><span class="line">        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,<span class="string">&quot;47.106.86.64:9092&quot;</span>);</span><br><span class="line">        <span class="comment">//配置序列化</span></span><br><span class="line">        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());</span><br><span class="line">        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());</span><br><span class="line">        <span class="comment">// 2. 创建 kafka 生产者的配置对象</span></span><br><span class="line">        KafkaProducer&lt;String, String&gt; kafkaProducer = <span class="keyword">new</span> <span class="title class_">KafkaProducer</span>&lt;String,String&gt;(properties);</span><br><span class="line">        <span class="comment">// 3. 创建 kafka 生产者对象</span></span><br><span class="line">        <span class="keyword">for</span> (<span class="type">int</span> <span class="variable">i</span> <span class="operator">=</span> <span class="number">0</span>; i &lt; <span class="number">5</span>; i++) &#123;</span><br><span class="line">            kafkaProducer.send(<span class="keyword">new</span> <span class="title class_">ProducerRecord</span>(<span class="string">&quot;first&quot;</span>, <span class="string">&quot;one&quot;</span> + i), <span class="keyword">new</span> <span class="title class_">Callback</span>() &#123;</span><br><span class="line">                <span class="meta">@Override</span></span><br><span class="line">                <span class="keyword">public</span> <span class="keyword">void</span> <span class="title function_">onCompletion</span><span class="params">(RecordMetadata recordMetadata, Exception e)</span> &#123;</span><br><span class="line">                    <span class="keyword">if</span> (e == <span class="literal">null</span>) &#123;</span><br><span class="line">                        System.out.println( <span class="string">&quot;分区 ： &quot;</span> + recordMetadata.partition() + <span class="string">&quot; 主题： &quot;</span> + recordMetadata.topic() );</span><br><span class="line">                    &#125;</span><br><span class="line"></span><br><span class="line">                &#125;</span><br><span class="line">            &#125;).get();</span><br><span class="line">            Thread.sleep(<span class="number">100</span>);</span><br><span class="line">        &#125;</span><br><span class="line">        kafkaProducer.close();</span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>
</li>
<li><p>异步发送和同步发送的区别</p>
<blockquote>
<p>异步发送：发送完当前数据后不等待 kafka 集群的 acks 应答（acks &#x3D;&#x3D; 0），随时发送下一批数据</p>
<p>同步发送：发送完当前数据后等待 kafka 集群的 acks 应答（acks &#x3D;&#x3D; 1 &#x2F; -1），收到应答后才发送下一批数据</p>
</blockquote>
</li>
</ul>
</li>
<li><p>生产者拦截器 （ProducerInterceptor）</p>
<ul>
<li><p>拦截器接口</p>
<blockquote>
<p>共调用了三个方法</p>
<p>三个方法内的实现如果抛出异常，会被<code>ProducerInterceptors</code> 内部捕获，<strong>并不会抛到上层</strong></p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">interface</span> <span class="title class_">ProducerInterceptor</span>&lt;K, V&gt; <span class="keyword">extends</span> <span class="title class_">Configurable</span> &#123;</span><br><span class="line">    ProducerRecord&lt;K, V&gt; <span class="title function_">onSend</span><span class="params">(ProducerRecord&lt;K, V&gt; record)</span>;</span><br><span class="line">    <span class="keyword">void</span> <span class="title function_">onAcknowledgement</span><span class="params">(RecordMetadata metadata, Exception exception)</span>;</span><br><span class="line">    <span class="keyword">void</span> <span class="title function_">close</span><span class="params">()</span>;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<ul>
<li><p>onSend</p>
<blockquote>
<p>在消息分区之前，可以对消息进行一定的修改，比如给 key 添加前缀，甚至可以修改 topic</p>
<p>如果需要使用 kafka 实现延时队列高级应用，就可以通过拦截器对消息进行判断并修改，暂时放入延时主题中，等时间达到再放回普通主题队列</p>
</blockquote>
</li>
<li><p>onAcknowledgement</p>
<blockquote>
<p>该方法是在服务端对 sender 线程进行消息确认，或消息发送失败后的一个回调</p>
<p>优先于 send 方法的 callback 回调</p>
<p>可以对发送情况做一个统计</p>
<p><strong>但是该方法在 sender 线程也就是唯一的 IO 线程执行，逻辑越少越好</strong></p>
</blockquote>
</li>
<li><p>close</p>
<blockquote>
<p>该方法可以在关闭拦截器时，进行一些资源的释放</p>
</blockquote>
</li>
</ul>
</li>
<li><p>实现自定义拦截器</p>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">public</span> MyInterceptor <span class="keyword">implements</span> <span class="title class_">ProducerInterceptor</span> &#123;</span><br><span class="line">    ProducerRecord&lt;K, V&gt; <span class="title function_">onSend</span><span class="params">(ProducerRecord&lt;K, V&gt; record)</span>;</span><br><span class="line">    <span class="keyword">void</span> <span class="title function_">onAcknowledgement</span><span class="params">(RecordMetadata metadata, Exception exception)</span>;</span><br><span class="line">    <span class="keyword">void</span> <span class="title function_">close</span><span class="params">()</span>;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>
</li>
<li><p>将自定义拦截器加入设置中</p>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line">properties.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG,MyInterceptor.getClass.getName());</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li><p>生产者分区</p>
<ul>
<li><p>分区的好处</p>
<ul>
<li>从存储的角度：合理使用存储资源，实现负载均衡</li>
<li>从计算的角度：提高并行计算的可行性</li>
</ul>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%88%86%E5%8C%BA%E7%9A%84%E5%A5%BD%E5%A4%84.png" alt="分区的好处"></p>
</li>
<li><p>生产者发送消息分区策略</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%88%86%E5%8C%BA%E7%AD%96%E7%95%A5.png" alt="分区策略"></p>
<ul>
<li><strong>指定分区</strong>：放入指定分区</li>
<li><strong>没有指定分区，指定 key</strong>：计算 hash 得到分区</li>
<li><strong>没有指定分区，也没有指定 key</strong>：随机粘性分区</li>
</ul>
</li>
<li><p>自定义分区器</p>
<ul>
<li><p>需求</p>
<blockquote>
<p>例如实现一个分区器，发送过来的数据中如果包含 Hi，就发往 0 号分区，不包含 Hi，就发往 1 号分区</p>
</blockquote>
</li>
<li><p>实现步骤</p>
<blockquote>
<p>定义类实现 <code>Partitioner</code> 接口</p>
<p>重写 <code>partition()</code> 方法</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">class</span> <span class="title class_">MyPartitioner</span> <span class="keyword">implements</span> <span class="title class_">Partitioner</span> &#123;</span><br><span class="line">    <span class="comment">/**</span></span><br><span class="line"><span class="comment">     * <span class="doctag">@param</span> topic 主题</span></span><br><span class="line"><span class="comment">     * <span class="doctag">@param</span> key 消息的 key</span></span><br><span class="line"><span class="comment">     * <span class="doctag">@param</span> keyBytes 消息的 key 序列化后的字节数组</span></span><br><span class="line"><span class="comment">     * <span class="doctag">@param</span> value 消息的 value</span></span><br><span class="line"><span class="comment">     * <span class="doctag">@param</span> valueBytes 消息的 value 序列化后的字节数组</span></span><br><span class="line"><span class="comment">     * <span class="doctag">@param</span> cluster 集群元数据可以查看分区信息</span></span><br><span class="line"><span class="comment">     */</span></span><br><span class="line">    <span class="meta">@Override</span></span><br><span class="line">    <span class="keyword">public</span> <span class="type">int</span> <span class="title function_">partition</span><span class="params">(String topic, Object key, <span class="type">byte</span>[] keyBytes, Object value, <span class="type">byte</span>[] valueBytes, Cluster cluster)</span> &#123;</span><br><span class="line">        <span class="type">String</span> <span class="variable">string</span> <span class="operator">=</span> value.toString();</span><br><span class="line">        <span class="keyword">if</span> (string.contains(<span class="string">&quot;vi&quot;</span>))&#123;</span><br><span class="line">            <span class="keyword">return</span> <span class="number">2</span>;</span><br><span class="line">        &#125;<span class="keyword">else</span>&#123;</span><br><span class="line">            <span class="keyword">return</span> <span class="number">1</span>;</span><br><span class="line">        &#125;</span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<blockquote>
<p>使用分区器的方法，在生产者的配置中添加分区器参数</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="comment">//自定义分区规则 </span></span><br><span class="line">properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,MyPartitioner.class.getName());</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
</ul>
</li>
<li><p>生产者提高吞吐量</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%8F%90%E9%AB%98%E5%90%9E%E5%90%90%E9%87%8F.png" alt="提高吞吐量"></p>
<blockquote>
<p>通过提高吞吐量达到低延迟的效果</p>
<p><strong>Batch.size 与 linger.ms 配合使用，根据生成数据的大小指定</strong></p>
<p><code>batch.size</code> 适当提高到 32k</p>
<p><code>linger.ms</code> 适当提高到 5-100ms</p>
<p><strong>RecordAccumlator：在异步发送并且分区很多的情况下，32M 的数据量容易被满足，进程交互加大，可以适当提高到 64M</strong></p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="comment">// batch.size：批次大小，默认 16K</span></span><br><span class="line">properties.put(ProducerConfig.BATCH_SIZE_CONFIG, <span class="number">16384</span>);</span><br><span class="line"><span class="comment">// linger.ms：等待时间，默认 0</span></span><br><span class="line">properties.put(ProducerConfig.LINGER_MS_CONFIG, <span class="number">1</span>);</span><br><span class="line"><span class="comment">// RecordAccumulator：缓冲区大小，默认 32M：buffer.memory</span></span><br><span class="line">properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG,<span class="number">33554432</span>);</span><br><span class="line"><span class="comment">// compression.type：压缩，默认 none，可配置值 gzip、snappy、lz4 和 zstd</span></span><br><span class="line">properties.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, <span class="string">&quot;snappy&quot;</span>);</span><br></pre></td></tr></table></figure>

<ul>
<li><p>消息累加器（RecordAccumulator）</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E6%81%AF%E7%B4%AF%E5%8A%A0%E5%99%A8.png" alt="消息累加器"></p>
<blockquote>
<p>为了提高生产者的吞吐量，通过累加器将多条消息合并成一批统一发送，在 broker 中将消息批量存入，减少多次的网络 IO</p>
<p>消息累加器默认 32m，如果生产者的发送速率大于 sender 发送的速率，消息就会堆满累加器，生产者就会阻塞或者报错，报错取决于阻塞时间的配置</p>
<p><strong>累加器的存储形式为 <code>ConcurrentMap&lt;TopicPartition, Deque&lt;ProducerBatch&gt;&gt;</code>，可以看出来就是一个分区对应一个双端队列，队列中存储的是 <code>ProducerBatch</code> 一般大小是 16k（根据 batch.size 配置）</strong></p>
<p>新的消息会 append 到 ProducerBatch 中，满 16k 就会创建新的 ProducerBatch，并且触发 sender 线程进行发送</p>
<p>如果消息量非常大，生成了大量的 ProducerBatch，在发送后又需要 JVM 通过 GC 回收这些 ProducerBatch 就变得非常影响性能</p>
<p>所以 kafka 通过 BufferPool 作为内存池来管理 ProducerBatch 的创建和回收，需要申请一个新的 ProducerBatch 空间时，调用 <code>free.allocate(size, maxTimeToBlock)</code> 找内存池申请空间</p>
<p>如果单条消息大于 16k，那么就不会复用内存池了，会生成一个更大的 ProducerBatch 专门存放大消息，发送完后 GC 回收该内存空间</p>
<p>为了进一步减小网络中消息传输的带宽，也可以通过<strong>消息压缩</strong>的方式，在生产端将消息追加进 <code>ProducerBatch</code> 就对每一条消息进行压缩</p>
<p>常用的有 Gzip、Snappy、Lz4、Zstd，这是时间换空间的手段</p>
<p>压缩的消息会在消费端进行解压</p>
</blockquote>
</li>
<li><p>消息发送线程（Sender）</p>
<blockquote>
<p>消息保存在内存后，Sender 线程就会把符合条件的消息按照批次进行发送</p>
<p>除了发送消息，元数据的加载也是通过 Sender 线程来处理的</p>
<p> Sender 线程发送消息以及接收消息，都是基于 java NIO 的 Selector</p>
<p>通过 Selector 把消息发出去，并通过 Selector 接收消息</p>
<p>Sender 线程默认容纳 5 个未确认的消息，消息发送失败后会进行重试</p>
</blockquote>
</li>
</ul>
</li>
<li><p>生产经验—数据可靠性</p>
<ul>
<li><p>消息确认机制-ACK</p>
<blockquote>
<p>producer 提供了三种消息确认的模式，通过配置 <code>acks</code> 来实现</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/acks.png" alt="acks"></p>
<ul>
<li><p>acks &#x3D;&#x3D; 0 </p>
<blockquote>
<p>表示生产者将数据发送出去就不管了，不等待任何返回</p>
<p>这种情况下数据传输效率最高，但是数据可靠性最低，当 server 挂掉的时候就会丢数据</p>
</blockquote>
</li>
<li><p>acks &#x3D;&#x3D; 1（默认）</p>
<blockquote>
<p>表示数据发送到 Kafka 后，经过 leader 成功接收消息的的确认，才算发送成功</p>
<p>如果 leader 宕机了，就会丢失数据</p>
</blockquote>
</li>
<li><p>acks &#x3D;&#x3D; -1&#x2F;all</p>
<blockquote>
<p>表示生产者需要等待 ISR 中的所有 follower 都确认接收到数据后才算发送完成</p>
<p>这样数据不会丢失，可靠性最高，性能最低</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/acks2.png" alt="acks2"></p>
</li>
</ul>
<blockquote>
<p><strong>数据完全可靠条件 &#x3D; ACK &#x3D;&#x3D; -1 + 分区副本 &gt;&#x3D; 2 + ISR 里应答的最小副本数量 &gt;&#x3D; 2</strong></p>
<p><strong>分区副本包括 leader，分区副本 &gt;&#x3D; 2 指至少有一个 leader、一个 follower</strong></p>
<p><strong>ISR 应答队列同分区副本，也包括 leader</strong></p>
<p>AR &#x3D; ISR + ORS</p>
<p>正常情况下，如果所有的 follower 副本都应该与 leader 副本保持一定程度的同步，则 AR &#x3D; ISR，OSR &#x3D; null</p>
<p>ISR 表示在指定时间内和 leader 保存数据同步的集合</p>
<p>ORS 表示不能在指定的时间内和 leader 保持数据同步集合，称为 OSR（Out-Sync Relipca set）</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="comment">// Ack 设置</span></span><br><span class="line">properties.put(ProducerConfig.ACKS_CONFIG,<span class="string">&quot;1&quot;</span>);</span><br><span class="line"><span class="comment">// 重试次数, 默认的重试次数是 Max.Integer</span></span><br><span class="line">properties.put(ProducerConfig.RETRIES_CONFIG,<span class="number">3</span>);</span><br></pre></td></tr></table></figure>
</li>
<li><p>数据去重-幂等性</p>
<ul>
<li><p>幂等性原理</p>
<blockquote>
<p>幂等性，简单地说就是对接口的多次调用所产生的结果和调用一次是一致的</p>
<p>生产者在进行重试的时候有可能会重复写入消息，而使用 Kafka 的幂等性功能之后就可以避免这种情况（不产生重复数据）</p>
<p>在一般的 MQ 模型中，常有以下的消息通信概念</p>
</blockquote>
<ul>
<li><p>至少一次（At Least Once）</p>
<blockquote>
<p>ACK 级别设置为-1 + 分区副本 &gt;&#x3D; 2 + ISR里应答的最小副本数量 &gt;&#x3D; 2</p>
<p>可以保证数据不丢失，但是不能保证数据不重复</p>
</blockquote>
</li>
<li><p>最多一次（At Most Once）</p>
<blockquote>
<p>ACK 级别设置为 0 </p>
<p>可以保证数据不重复，但是不能保证数据不丢失</p>
</blockquote>
</li>
<li><p>精确一次（Exactly Once）</p>
<blockquote>
<p>至少一次 + 幂等性 </p>
<p> Kafka 0.11 版本引入一项重大特性：幂等性和事务</p>
</blockquote>
</li>
</ul>
<blockquote>
<p>重复数据的判断标准：具有 <code>&lt;PID, Partition, SeqNumber&gt;</code> 相同主键的消息提交时，Broker 只会持久化一条</p>
<p>broker 中会在内存维护一个 pid + 分区对应的序列号</p>
<p><strong>如果收到的序列号正好比内存序列号大一，才存储消息</strong></p>
<p><strong>如果小于内存序列号，意味着消息重复，那么会丢弃消息并应答</strong></p>
<p><strong>如果远大于内存序列号，意味着消息丢失，会抛出异常</strong></p>
<p><strong>所以幂等解决的是 sender 到 broker 间，由于网络波动可能造成的重发问题。用幂等来标识唯一消息</strong></p>
<p>并且幂等性只能保证的是在<strong>单分区单会话内不重复</strong></p>
</blockquote>
<ul>
<li><code>ProducerId（pid）</code> 是 Kafka 每次重启都会分配一个新的</li>
<li>Partition 表示分区号</li>
<li>Sequence Number 序列化号，是单调自增的</li>
</ul>
</li>
<li><p>如何使用幂等性</p>
<blockquote>
<p>开启幂等性功能的方式很简单，只需要显式地将生产者客户端参数 <code>enable.idempotence</code> 设置为 true 即可（默认值为true）</p>
<p>并且还需要确保生产者客户端的 <code>retries、acks、max.in.filght.request.per.connection</code> 参数不被配置错（默认值就是对的）</p>
</blockquote>
</li>
</ul>
</li>
<li><p>消息事务</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E6%81%AF%E4%BA%8B%E5%8A%A1.png" alt="消息事务"></p>
<blockquote>
<p>由于幂等性不能跨分区运作，为了保证同时发的多条消息，要么全成功要么全失败</p>
<p>kafka 引入了事务的概念</p>
<p>开启事务需要 <code>producer</code> 设置 <code>transactional.id</code> 的值并同时开启幂等性</p>
<p>通过事务协调器，来实现事务，工作流程如下</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="comment">// 1 初始化事务</span></span><br><span class="line"><span class="keyword">void</span> <span class="title function_">initTransactions</span><span class="params">()</span>;</span><br><span class="line"><span class="comment">// 2 开启事务</span></span><br><span class="line"><span class="keyword">void</span> <span class="title function_">beginTransaction</span><span class="params">()</span> <span class="keyword">throws</span> ProducerFencedException;</span><br><span class="line"><span class="comment">// 3 在事务内提交已经消费的偏移量（主要用于消费者）</span></span><br><span class="line"><span class="keyword">void</span> <span class="title function_">sendOffsetsToTransaction</span><span class="params">(Map&lt;TopicPartition, OffsetAndMetadata&gt; offsets, String consumerGroupId)</span> <span class="keyword">throws</span> ProducerFencedException;</span><br><span class="line"><span class="comment">// 4 提交事务</span></span><br><span class="line"><span class="keyword">void</span> <span class="title function_">commitTransaction</span><span class="params">()</span> <span class="keyword">throws</span> ProducerFencedException;</span><br><span class="line"><span class="comment">// 5 放弃事务（类似于回滚事务的操作）</span></span><br><span class="line"><span class="keyword">void</span> <span class="title function_">abortTransaction</span><span class="params">()</span> <span class="keyword">throws</span> ProducerFencedException;</span><br></pre></td></tr></table></figure>

<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E6%81%AF%E4%BA%8B%E5%8A%A12.png" alt="消息事务2"></p>
</li>
</ul>
</li>
<li><p>消息顺序</p>
<blockquote>
<p><strong>消息在单分区内有序，多分区内无序</strong></p>
<p>如果对多分区进行排序，造成分区无法工作需要等待排序，浪费性能</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E6%81%AF%E9%A1%BA%E5%BA%8F.png" alt="消息顺序"></p>
<blockquote>
<p>kafka 只能保证单分区下的消息顺序性，为了保证消息的顺序性，需要做到如下几点</p>
</blockquote>
<ul>
<li><p>如果未开启幂等性</p>
<blockquote>
<p>需要 <code>max.in.flight.requests.per.connection</code> 设置为1（缓冲队列最多放置1个请求）</p>
</blockquote>
</li>
<li><p>如果开启幂等性</p>
<blockquote>
<p>需要 <code>max.in.flight.requests.per.connection</code> 设置为小于 5</p>
<p>因为 broker 端会缓存 producer 主题分区下的五个 request，保证最近 5 个 request 是有序的</p>
</blockquote>
</li>
</ul>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E6%81%AF%E9%A1%BA%E5%BA%8F2.png" alt="消息顺序2"></p>
<blockquote>
<p>如果 Request3 在失败重试后才发往到集群中，必然会导致乱序，但是集群会重新按照序列号进行排序（最多一次排序 5 个）</p>
</blockquote>
</li>
</ol>
<h3 id="4-Kafka-Broker"><a href="#4-Kafka-Broker" class="headerlink" title="4.Kafka Broker"></a>4.Kafka Broker</h3><ol>
<li><p>Broker 设计</p>
<blockquote>
<p>kafka 能堆积非常大的数据，一台服务器肯定是放不下的</p>
<p>由此出现的集群的概念，集群不仅可以让消息负载均衡，还能提高消息存取的吞吐量</p>
<p>kafka 集群中，会有多台 broker，每台 broker 分别在不同的机器上</p>
<p>为了提高吞吐量，每个 topic 也会都多个分区，同时为了保持可靠性，每个分区还会有多个副本</p>
<p>这些分区副本被均匀的散落在每个 broker 上，其中每个分区副本中有一个副本为 leader，其他的为 follower</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/Broker%E8%AE%BE%E8%AE%A1.png" alt="Broker设计"></p>
</li>
<li><p>Zookeeper</p>
<ul>
<li><p>Zookeeper 的作用</p>
<blockquote>
<p>Zookeeper 在 Kafka 中扮演了重要的角色</p>
<p>kafka 使用 zookeeper 进行元数据管理，保存 broker 注册信息，包括主题（Topic）、分区（Partition）信息等，选择分区 leader</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/Zookeeper%E4%BD%9C%E7%94%A8.png" alt="Zookeeper作用"></p>
</li>
<li><p>Broker 选举 Leader</p>
<blockquote>
<p>leader 选举</p>
<p>因为 kafka 中涉及多处选举机制，容易搞混</p>
<p>Kafka 由三个方面会涉及到选举</p>
</blockquote>
<ul>
<li>broker（控制器）选 Leader</li>
<li>分区多副本选 Leader</li>
<li>消费者选 Leader</li>
</ul>
<blockquote>
<p>在 kafka 集群中由很多的 broker（也叫做控制器），但是他们之间需要选举出一个 leader，其他的都是 follower</p>
<p>broker 的 leader 有很重要的作用，诸如创建、删除主题、增加分区并分配 leader 分区、集群 broker 管理，包括新增、关闭和故障处理、分区重分配 <code>auto.leader.rebalance.enable=true</code>、分区 leader 选举</p>
<p>每个 broker 都有唯一的 brokerId，他们在启动后会去竞争注册 zookeeper 上的 Controller 结点，谁先抢到谁就是 broker leader</p>
<p>而其他 broker 会监听该结点事件，以便后续 leader 下线后触发重新选举</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E9%80%89%E4%B8%BEleader1.png" alt="选举leader1"></p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E9%80%89%E4%B8%BEleader2.png" alt="选举leader2"></p>
</li>
<li><p>Broker 重要参数</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/Broker%E9%87%8D%E8%A6%81%E5%8F%82%E6%95%B0.png" alt="Broker重要参数"></p>
</li>
</ul>
</li>
<li><p>节点服役和退役</p>
<ul>
<li><p>新节点服役</p>
<ul>
<li><p>启动一台新的 KafKa 服务端（加入原有的 Zookeeper 集群）</p>
</li>
<li><p>查看原有的分区信息 describe</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line"><span class="meta prompt_">$ </span><span class="language-bash">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --topic first --describe</span></span><br><span class="line"></span><br><span class="line">Topic: first	TopicId: 4DtkHPe4R1KyXNF7QyVqBA	PartitionCount: 3	ReplicationFactor: 3	Configs: segment.bytes=1073741824</span><br><span class="line">	Topic: first	Partition: 0	Leader: 1	Replicas: 2,1,0	Isr: 1,0</span><br><span class="line">	Topic: first	Partition: 1	Leader: 0	Replicas: 0,1,2	Isr: 0,1</span><br><span class="line">	Topic: first	Partition: 2	Leader: 1	Replicas: 1,2,0	Isr: 1,0</span><br></pre></td></tr></table></figure>
</li>
<li><p>指定需要均衡的主题</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line"><span class="meta prompt_">$ </span><span class="language-bash">vim topics-to-move.json</span></span><br></pre></td></tr></table></figure>

<figure class="highlight json"><table><tr><td class="code"><pre><span class="line"><span class="punctuation">&#123;</span></span><br><span class="line"> <span class="attr">&quot;topics&quot;</span><span class="punctuation">:</span> <span class="punctuation">[</span></span><br><span class="line">     	<span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span> <span class="string">&quot;first&quot;</span><span class="punctuation">&#125;</span></span><br><span class="line">     <span class="punctuation">]</span><span class="punctuation">,</span></span><br><span class="line"> <span class="attr">&quot;version&quot;</span><span class="punctuation">:</span> <span class="number">1</span></span><br><span class="line"><span class="punctuation">&#125;</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>生成负载均衡计划（只是生成计划）</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">bin/kafka-reassign-partitions.sh --bootstrap-server 47.106.86.64:9092 --topics-to-move-json-file topics-to-move.json --broker-list &quot;0,1,2,3&quot; --generate</span><br></pre></td></tr></table></figure>

<figure class="highlight json"><table><tr><td class="code"><pre><span class="line">Current partition replica assignment</span><br><span class="line"><span class="punctuation">&#123;</span><span class="attr">&quot;version&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span><span class="attr">&quot;partitions&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;first&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">0</span><span class="punctuation">,</span><span class="string">&quot;replic</span></span><br><span class="line"><span class="string">as&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">0</span><span class="punctuation">,</span><span class="number">2</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">]</span><span class="punctuation">,</span><span class="attr">&quot;log_dirs&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span><span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;first&quot;</span><span class="punctuation">,</span><span class="string">&quot;par</span></span><br><span class="line"><span class="string">tition&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">2</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">,</span><span class="number">0</span><span class="punctuation">]</span><span class="punctuation">,</span><span class="attr">&quot;log_dirs&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span><span class="punctuation">&#123;</span><span class="string">&quot;to</span></span><br><span class="line"><span class="string">pic&quot;</span><span class="punctuation">:</span><span class="string">&quot;first&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">2</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">1</span><span class="punctuation">,</span><span class="number">0</span><span class="punctuation">,</span><span class="number">2</span><span class="punctuation">]</span><span class="punctuation">,</span><span class="attr">&quot;log_dirs&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;</span></span><br><span class="line"><span class="string">any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span></span><br><span class="line">Proposed partition reassignment configuration</span><br><span class="line"><span class="punctuation">&#123;</span><span class="attr">&quot;version&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span><span class="attr">&quot;partitions&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;first&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">0</span><span class="punctuation">,</span><span class="string">&quot;replic</span></span><br><span class="line"><span class="string">as&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">2</span><span class="punctuation">,</span><span class="number">3</span><span class="punctuation">,</span><span class="number">0</span><span class="punctuation">]</span><span class="punctuation">,</span><span class="attr">&quot;log_dirs&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span><span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;first&quot;</span><span class="punctuation">,</span><span class="string">&quot;par</span></span><br><span class="line"><span class="string">tition&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">3</span><span class="punctuation">,</span><span class="number">0</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">]</span><span class="punctuation">,</span><span class="attr">&quot;log_dirs&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span><span class="punctuation">&#123;</span><span class="string">&quot;to</span></span><br><span class="line"><span class="string">pic&quot;</span><span class="punctuation">:</span><span class="string">&quot;first&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">2</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">0</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">,</span><span class="number">2</span><span class="punctuation">]</span><span class="punctuation">,</span><span class="attr">&quot;log_dirs&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;</span></span><br><span class="line"><span class="string">any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>创建副本存储计划（所有副本存储在 broker0、broker1、broker2、broker3 中）</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">vim increase-replication-factor.json</span><br></pre></td></tr></table></figure>

<figure class="highlight json"><table><tr><td class="code"><pre><span class="line"><span class="punctuation">&#123;</span><span class="attr">&quot;version&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span><span class="attr">&quot;partitions&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;first&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">0</span><span class="punctuation">,</span><span class="string">&quot;replic</span></span><br><span class="line"><span class="string">as&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">2</span><span class="punctuation">,</span><span class="number">3</span><span class="punctuation">,</span><span class="number">0</span><span class="punctuation">]</span><span class="punctuation">,</span><span class="attr">&quot;log_dirs&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span><span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;first&quot;</span><span class="punctuation">,</span><span class="string">&quot;par</span></span><br><span class="line"><span class="string">tition&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">3</span><span class="punctuation">,</span><span class="number">0</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">]</span><span class="punctuation">,</span><span class="attr">&quot;log_dirs&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span><span class="punctuation">&#123;</span><span class="string">&quot;to</span></span><br><span class="line"><span class="string">pic&quot;</span><span class="punctuation">:</span><span class="string">&quot;first&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">2</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">0</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">,</span><span class="number">2</span><span class="punctuation">]</span><span class="punctuation">,</span><span class="attr">&quot;log_dirs&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="string">&quot;any&quot;</span><span class="punctuation">,</span><span class="string">&quot;</span></span><br><span class="line"><span class="string">any&quot;</span><span class="punctuation">,</span><span class="string">&quot;any&quot;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">]</span><span class="punctuation">&#125;</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>执行副本计划</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-reassign-partitions.sh --bootstrap-server 47.106.86.64:9092 --reassignment-json-file increase-replication-factor.json --execute</span><br></pre></td></tr></table></figure>
</li>
<li><p>验证计划</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-reassign-partitions.sh --bootstrap-server 47.106.86.64:9092 --reassignment-json-file increase-replication-factor.json --verify</span><br></pre></td></tr></table></figure>

<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">Status of partition reassignment:</span><br><span class="line">Reassignment of partition first-0 is complete.</span><br><span class="line">Reassignment of partition first-1 is complete.</span><br><span class="line">Reassignment of partition first-2 is complete.</span><br><span class="line">Clearing broker-level throttles on brokers 0,1,2,3</span><br><span class="line">Clearing topic-level throttles on topic first</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li><p>旧节点退役</p>
<blockquote>
<p>执行负载均衡操作</p>
<p>先退役一台节点，生成执行计划，然后按照服役时操作流程执行负载均衡</p>
<p>不同于服役计划的 <code>--broker-list &quot;0,1,2&quot;</code> 退役了 <code>Broker3</code></p>
</blockquote>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-reassign-partitions.sh --bootstrap-server 47.106.86.64:9092 --topics-to-move-json-file topics-to-move.json --broker-list &quot;0,1,2&quot; --generate</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li><p>副本机制</p>
<ul>
<li><p>副本基本信息</p>
<ul>
<li><p>Replica</p>
<blockquote>
<p>副本，同一分区的不同副本保存的是相同的消息</p>
<p>为保证集群中的某个节点发生故障时，该节点上的 partition 数据不丢失 ，提高副本可靠性，且 kafka 仍然能够继续工作，kafka 提供了副本机制</p>
<p>一个 topic 的每个分区都有若干个副本，一个 leader 和若干个 follower</p>
<p>生产环境一般配置为 2 个副本</p>
</blockquote>
</li>
<li><p>Leader</p>
<blockquote>
<p>每个分区的多个副本中的<code>主副本</code>，生产者以及消费者只与 <code>Leader</code> 交互</p>
</blockquote>
</li>
<li><p>Follower </p>
<blockquote>
<p>每个分区的多个副本中的<code>从副本</code>，负责实时从 Leader 中同步数据，保持和 Leader 数据的同步</p>
<p>Leader 发生故障时，从 Follower 副本中重新选举新的 Leader 副本对外提供服务</p>
</blockquote>
</li>
</ul>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%89%AF%E6%9C%AC%E5%9F%BA%E6%9C%AC%E4%BF%A1%E6%81%AF.png" alt="副本基本信息"></p>
<ul>
<li><p>AR（Assigned Replicas 总的分配副本）</p>
<blockquote>
<p><strong>分区中的所有 Replica 统称为 AR &#x3D; ISR +OSR</strong></p>
</blockquote>
</li>
<li><p>ISR（in-sync-replica set 同步副本）</p>
<blockquote>
<p>所有与 Leader 副本保持一定程度同步的 Replica（包括 Leader 副本在内）组成 ISR</p>
</blockquote>
</li>
<li><p>OSR（Out-of-Sync Replicas 脱离同步副本）</p>
<blockquote>
<p>与 Leader 副本同步滞后过多的 Replica 组成了 OSR</p>
</blockquote>
</li>
<li><p>LEO（Log End Offset）</p>
<blockquote>
<p>每个副本都有内部的 LEO，代表当前队列消息的最后一条偏移量 offset + 1</p>
</blockquote>
</li>
<li><p>HW（High Watermark）</p>
<blockquote>
<p>高水位，代表所有 ISR 中的 LEO 最低的那个 offset，也是消费者可见的最大消息 offset</p>
</blockquote>
</li>
</ul>
</li>
<li><p>副本选举 Leader</p>
<blockquote>
<p>Kafka 集群中有一个 broker 的 Controller 会被选举为 Controller Leader (4.2.2) ，负责管理集群 Broker 的上下线、所有 topic 的分区副本分配和 Leader 选举等工作</p>
<p>Broker 中 Controller 的信息同步工作是依赖于 Zookeeper 的 <code>./broker/topic</code> 目录下的信息</p>
<p> <strong>如果 leader 副本下线， 会在 ISR 队列中存活为前提，按照 AR 队列（Replicas 队列）中前面优先的原则选举新 leader</strong></p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E9%80%89%E4%B8%BEleader2.png" alt="选举leader2"></p>
<blockquote>
<p>举例如下</p>
</blockquote>
<ul>
<li><p>创建一个新的 topic，4 个分区，4 个副本</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --create --topic atguigu1 --partitions 4 --replication-factor 4</span><br></pre></td></tr></table></figure>
</li>
<li><p>查看 Leader 分布情况</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --describe --topic atguigu1</span><br></pre></td></tr></table></figure>

<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">Topic: atguigu1 TopicId: awpgX_7WR-OX3Vl6HE8sVg PartitionCount: 4 ReplicationFactor: 4</span><br><span class="line">Configs: segment.bytes=1073741824</span><br><span class="line">Topic: atguigu1 Partition: 0 Leader: 3 Replicas: 3,0,2,1 Isr: 3,0,2,1</span><br><span class="line">Topic: atguigu1 Partition: 1 Leader: 1 Replicas: 1,2,3,0 Isr: 1,2,3,0</span><br><span class="line">Topic: atguigu1 Partition: 2 Leader: 0 Replicas: 0,3,1,2 Isr: 0,3,1,2</span><br><span class="line">Topic: atguigu1 Partition: 3 Leader: 2 Replicas: 2,1,0,3 Isr: 2,1,0,3</span><br></pre></td></tr></table></figure>
</li>
<li><p>停止掉 hadoop105 的 kafka 进程（对应 broker 3 号节点），并查看 Leader 分区情况</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-server-stop.sh</span><br></pre></td></tr></table></figure>

<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --describe --topic atguigu1</span><br></pre></td></tr></table></figure>

<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">Topic: atguigu1 TopicId: awpgX_7WR-OX3Vl6HE8sVg PartitionCount: 4 ReplicationFactor: 4</span><br><span class="line">Configs: segment.bytes=1073741824</span><br><span class="line">Topic: atguigu1 Partition: 0 Leader: 0 Replicas: 3,0,2,1 Isr: 0,2,1</span><br><span class="line">Topic: atguigu1 Partition: 1 Leader: 1 Replicas: 1,2,3,0 Isr: 1,2,0</span><br><span class="line">Topic: atguigu1 Partition: 2 Leader: 0 Replicas: 0,3,1,2 Isr: 0,1,2</span><br><span class="line">Topic: atguigu1 Partition: 3 Leader: 2 Replicas: 2,1,0,3 Isr: 2,1,0</span><br></pre></td></tr></table></figure>
</li>
<li><p>停止掉 hadoop104 的 kafka 进程（对应 broker 2 号节点），并查看 Leader 分区情况</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-server-stop.sh</span><br><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --describe  --topic atguigu1</span><br></pre></td></tr></table></figure>

<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">Topic: atguigu1 TopicId: awpgX_7WR-OX3Vl6HE8sVg PartitionCount: 4 ReplicationFactor: 4</span><br><span class="line">Configs: segment.bytes=1073741824</span><br><span class="line">Topic: atguigu1 Partition: 0 Leader: 0 Replicas: 3,0,2,1 Isr: 0,1</span><br><span class="line">Topic: atguigu1 Partition: 1 Leader: 1 Replicas: 1,2,3,0 Isr: 1,0</span><br><span class="line">Topic: atguigu1 Partition: 2 Leader: 0 Replicas: 0,3,1,2 Isr: 0,1</span><br><span class="line">Topic: atguigu1 Partition: 3 Leader: 1 Replicas: 2,1,0,3 Isr: 1,0</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li><p>副本故障处理</p>
<ul>
<li><p>follower 故障流程</p>
<blockquote>
<p>如果 follower 落后 leader 过多，体现在落后时间 <code>repca.lag.time.max.ms</code> ，或者落后偏移量 <code>repca.lag.max.messages</code>（由于 kafka 生成速度不好界定，后面取消了该参数）</p>
<p>follower 就会被移除 ISR 队列，等待该队列 LEO 追上 HW，才会重新加入 ISR 中</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/follower%E6%95%85%E9%9A%9C%E6%B5%81%E7%A8%8B.png" alt="follower故障流程"></p>
</li>
<li><p>leader 故障流程</p>
<blockquote>
<p>旧 Leader 先被从 ISR 队列中踢出，然后从 ISR 中选出一个新的 Leader 来</p>
<p>此时为了保证多个副本之间的数据一致性，其他的 follower 会先将各自的 log 文件中<strong>高于（右到左，比如 LEO 高于 HW）</strong> HW 的部分截取掉，然后从新的 leader 同步数据（由此可知这只能保证副本之间数据一致性，并不能保证数据不丢失或者不重复）</p>
<p>体现了设置 ACK-all 的重要性</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/leader%E6%95%85%E9%9A%9C%E6%B5%81%E7%A8%8B.png" alt="leader故障流程"></p>
</li>
</ul>
</li>
<li><p>分区副本分配</p>
<blockquote>
<p>如果 kafka 服务器只有 4 个节点，那么设置 kafka 的分区数大于服务器台数</p>
<p>在 kafka 底层如何分配存储副本呢</p>
</blockquote>
<ul>
<li><p>创建 16 分区，3 个副本</p>
</li>
<li><p>创建一个新的 topic，名称为 second</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --create --partitions 16 --replication-factor 3 --topic second</span><br></pre></td></tr></table></figure>
</li>
<li><p>查看分区和副本情况</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server 47.106.86.64:9092  --describe --topic second</span><br><span class="line"><span class="meta prompt_"># </span><span class="language-bash">空 0 个</span></span><br><span class="line">Topic: second4 Partition: 0 Leader: 0 Replicas: 0,1,2 Isr: 0,1,2</span><br><span class="line">Topic: second4 Partition: 1 Leader: 1 Replicas: 1,2,3 Isr: 1,2,3</span><br><span class="line">Topic: second4 Partition: 2 Leader: 2 Replicas: 2,3,0 Isr: 2,3,0</span><br><span class="line">Topic: second4 Partition: 3 Leader: 3 Replicas: 3,0,1 Isr: 3,0,1</span><br><span class="line"><span class="meta prompt_"># </span><span class="language-bash">空 1 个</span></span><br><span class="line">Topic: second4 Partition: 4 Leader: 0 Replicas: 0,2,3 Isr: 0,2,3</span><br><span class="line">Topic: second4 Partition: 5 Leader: 1 Replicas: 1,3,0 Isr: 1,3,0</span><br><span class="line">Topic: second4 Partition: 6 Leader: 2 Replicas: 2,0,1 Isr: 2,0,1</span><br><span class="line">Topic: second4 Partition: 7 Leader: 3 Replicas: 3,1,2 Isr: 3,1,2</span><br><span class="line"><span class="meta prompt_"># </span><span class="language-bash">空 2 个</span></span><br><span class="line">Topic: second4 Partition: 8 Leader: 0 Replicas: 0,3,1 Isr: 0,3,1</span><br><span class="line">Topic: second4 Partition: 9 Leader: 1 Replicas: 1,0,2 Isr: 1,0,2</span><br><span class="line">Topic: second4 Partition: 10 Leader: 2 Replicas: 2,1,3 Isr: 2,1,3</span><br><span class="line">Topic: second4 Partition: 11 Leader: 3 Replicas: 3,2,0 Isr: 3,2,0</span><br><span class="line"><span class="meta prompt_"># </span><span class="language-bash">空 0 个</span></span><br><span class="line">Topic: second4 Partition: 12 Leader: 0 Replicas: 0,1,2 Isr: 0,1,2</span><br><span class="line">Topic: second4 Partition: 13 Leader: 1 Replicas: 1,2,3 Isr: 1,2,3</span><br><span class="line">Topic: second4 Partition: 14 Leader: 2 Replicas: 2,3,0 Isr: 2,3,0</span><br><span class="line">Topic: second4 Partition: 15 Leader: 3 Replicas: 3,0,1 Isr: 3,0,1</span><br></pre></td></tr></table></figure>

<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%88%86%E5%8C%BA%E5%89%AF%E6%9C%AC%E5%88%86%E9%85%8D.png" alt="分区副本分配"></p>
</li>
</ul>
</li>
<li><p>手动调整分区副本</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%89%8B%E5%8A%A8%E8%B0%83%E6%95%B4%E5%88%86%E5%8C%BA%E5%89%AF%E6%9C%AC.png" alt="手动调整分区副本"></p>
<blockquote>
<p>手动调整分区副本存储的步骤如下</p>
</blockquote>
<ul>
<li><p>创建一个新的 topic，名称为 three</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-topics.sh --bootstrap-server  47.106.86.64:9092  --create --partitions 4 --replication-factor 2 --topic three</span><br></pre></td></tr></table></figure>
</li>
<li><p>创建副本存储计划（所有副本都指定存储在 broker0、broker1 中）</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line"><span class="meta prompt_">$ </span><span class="language-bash">vim increase-replication-factor.json</span></span><br></pre></td></tr></table></figure>

<figure class="highlight json"><table><tr><td class="code"><pre><span class="line"><span class="punctuation">&#123;</span></span><br><span class="line">    <span class="attr">&quot;version&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span></span><br><span class="line">    <span class="attr">&quot;partitions&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span></span><br><span class="line">        <span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;three&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">0</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">0</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span></span><br><span class="line">        <span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;three&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">0</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span></span><br><span class="line">        <span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;three&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">2</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">1</span><span class="punctuation">,</span><span class="number">0</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span></span><br><span class="line">        <span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;three&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">3</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">1</span><span class="punctuation">,</span><span class="number">0</span><span class="punctuation">]</span><span class="punctuation">&#125;</span></span><br><span class="line">    <span class="punctuation">]</span></span><br><span class="line"><span class="punctuation">&#125;</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>执行副本存储计划</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-reassign-partitions.sh --bootstrap-server  47.106.86.64:9092  --reassignment-json-file increase-replication-factor.json --execute</span><br></pre></td></tr></table></figure>
</li>
<li><p>验证副本存储计划</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-reassign-partitions.sh --bootstrap-server  47.106.86.64:9092  --reassignment-json-file increase-replication-factor.json --verify</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
<li><p>分区自动调整</p>
<blockquote>
<p>一般情况下分区都是平衡散落在 broker 的，随着一些 broker 故障，会慢慢出现 leader 集中在某台 broker 上的情况，造成集群负载不均衡，这时候就需要分区平衡</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%88%86%E5%8C%BA%E8%87%AA%E5%8A%A8%E8%B0%83%E6%95%B4.png" alt="分区自动调整"></p>
<blockquote>
<p>为了解决上述问题 kafka 出现了自动平衡的机制</p>
<p>kafka 提供了下面几个参数进行控制</p>
</blockquote>
<ul>
<li><p><code>auto.leader.rebalance.enable</code></p>
<blockquote>
<p>自动 <code>leader parition</code> 平衡，默认是 true</p>
</blockquote>
</li>
<li><p><code>leader.imbalance.per.broker.percentage</code></p>
<blockquote>
<p>每个 broker 允许的不平衡的 leader 的比率，默认是10%</p>
<p>如果超过这个值，控制器将会触发 leader 的平衡</p>
</blockquote>
</li>
<li><p><code>leader.imbalance.check.interval.seconds</code></p>
<blockquote>
<p>检查 leader 负载是否平衡的时间间隔，默认是 300 秒</p>
</blockquote>
</li>
</ul>
<blockquote>
<p>但是在生产环境中是不开启这个自动平衡，因为触发 <code>leader partition</code> 的自动平衡会损耗性能</p>
<p>或者可以将触发自动平和的参数 <code>leader.imbalance.per.broker.percentage</code> 的值调大点</p>
<p>也可以通过修改配置，然后手动触发分区的再平衡</p>
</blockquote>
</li>
<li><p>增加副本因子</p>
<blockquote>
<p>在生产环境当中，由于某个主题的重要等级需要提升，考虑增加副本</p>
<p>副本数的增加需要先制定计划，然后根据计划执行</p>
<p>不能通过命令行的方法添加副本</p>
</blockquote>
<ul>
<li><p>创建 topic</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">bin/kafka-topics.sh --bootstrap-server 47.106.86.64:9092 --create --partitions 3 --replication-factor 1 --topic four</span><br></pre></td></tr></table></figure>
</li>
<li><p>创建副本存储计划</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">vim increase-replication-factor.json</span><br></pre></td></tr></table></figure>

<figure class="highlight json"><table><tr><td class="code"><pre><span class="line"><span class="punctuation">&#123;</span></span><br><span class="line">    <span class="attr">&quot;version&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span></span><br><span class="line">    <span class="attr">&quot;partitions&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span></span><br><span class="line">        <span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;four&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">0</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">0</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">,</span><span class="number">2</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span></span><br><span class="line">        <span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;four&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">1</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">0</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">,</span><span class="number">2</span><span class="punctuation">]</span><span class="punctuation">&#125;</span><span class="punctuation">,</span></span><br><span class="line">        <span class="punctuation">&#123;</span><span class="attr">&quot;topic&quot;</span><span class="punctuation">:</span><span class="string">&quot;four&quot;</span><span class="punctuation">,</span><span class="attr">&quot;partition&quot;</span><span class="punctuation">:</span><span class="number">2</span><span class="punctuation">,</span><span class="attr">&quot;replicas&quot;</span><span class="punctuation">:</span><span class="punctuation">[</span><span class="number">0</span><span class="punctuation">,</span><span class="number">1</span><span class="punctuation">,</span><span class="number">2</span><span class="punctuation">]</span><span class="punctuation">&#125;</span></span><br><span class="line">    <span class="punctuation">]</span></span><br><span class="line"><span class="punctuation">&#125;</span></span><br></pre></td></tr></table></figure>
</li>
<li><p>执行副本存储计划</p>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-reassign-partitions.sh --bootstrap-server 47.106.86.64:9092 --reassignment-json-file increase-replication-factor.json --execute</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
</ul>
</li>
<li><p>文件存储</p>
<ul>
<li><p>存储结构</p>
<blockquote>
<p><strong>在 Kafka 中主题（Topic）是一个逻辑上的概念，分区（partition）是物理上的存在的</strong></p>
<p>每个 partition 对应一个 log 文件，该 log 文件中存储的就是 Producer 生产的数据</p>
<p>Producer 生产的数据会被不断追加到该 log 文件末端</p>
<p>为防止 log 文件过大导致数据定位效率低下，Kafka 采用了分片和索引机制，将每个 partition 分为多个 segment，每个 segment 默认 1G <code>log.segment.bytes</code>， 每个 segment 包括 <code>.index</code> 文件、<code>.log</code> 文件和 <code>.timeindex</code> 等文件</p>
<p>这些文件位于文件夹下，该文件命名规则为：topic 名称 + 分区号</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%AD%98%E5%82%A8%E7%BB%93%E6%9E%84.png" alt="存储结构"></p>
<blockquote>
<p>Segment 的三个文件需要通过特定工具打开才能看到信息</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line">kafka-run-class.sh kafka.tools.DumpLogSegments --files ./<span class="number">00000000000000000000.</span>index</span><br><span class="line">kafka-run-class.sh kafka.tools.DumpLogSegments --files ./<span class="number">00000000000000000000.</span>log</span><br></pre></td></tr></table></figure>

<blockquote>
<p>当 log 文件写入4k（可以通过 <code>log.index.interval.bytes</code> 设置）数据，就会写入一条索引信息到 index 文件中，这样的 index 索引文件就是一个稀疏索引，它并不会每条日志都建立索引信息</p>
<p>当 Kafka 查询一条 offset 对应实际消息时，可以通过 index 进行二分查找，获取最近的低位 offset，然后从低位 offset 对应的 position 开始，从实际的 log 文件中开始往后查找对应的消息</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%AD%98%E5%82%A8%E7%BB%93%E6%9E%842.png" alt="存储结构2"></p>
<blockquote>
<p>时间戳索引文件，它的作用是可以查询某一个时间段内的消息</p>
<p>数据结构为：时间戳（8 byte）+ 相对 offset（4 byte）</p>
<p>如果要使用这个索引文件，先要通过时间范围找到对应的 offset，然后再去找对应的 index 文件找到 position 信息，最后再遍历 log 文件，这个过程也需要用到 index 索引文件</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%97%B6%E9%97%B4%E6%88%B3%E7%B4%A2%E5%BC%95%E6%96%87%E4%BB%B6.png" alt="时间戳索引文件"></p>
</li>
<li><p>文件清理策略</p>
<blockquote>
<p>Kafka 将消息存储在磁盘中，为了控制磁盘占用空间的不断增加就需要对消息做一定的清理操作</p>
<p>Kafka 中每一个分区副本都对应一个 Log，而 Log 又可以分为多个日志分段，这样也便于日志的清理操作</p>
<p>Kafka 提供了两种日志清理策略</p>
</blockquote>
<ul>
<li><p>日志删除（delete）</p>
<blockquote>
<p>按照一定的<strong>保留策略</strong>直接删除不符合条件的日志分段</p>
<p>kafka 中默认的日志保存时间为 7 天，可以通过调整如下参数修改保存时间</p>
</blockquote>
<ul>
<li><code>log.retention.hours</code>：最低优先级小时，默认 7 天</li>
<li><code>log.retention.minutes</code>：分钟</li>
<li><code>log.retention.ms</code>：最高优先级毫秒</li>
<li><code>log.retention.check.interval.ms</code>：负责设置检查周期，默认 5 分钟</li>
<li><code>file.delete.delay.ms</code>：延迟执行删除时间</li>
<li><code>log.retention.bytes</code>：当设置为 -1 时表示运行保留日志最大值（相当于关闭）；当设置为 1G 时，表示日志文件最大值</li>
</ul>
<blockquote>
<p>具体的保留日志策略有三种：</p>
</blockquote>
<ul>
<li><p><strong>基于时间策略</strong></p>
<blockquote>
<p>日志删除任务会周期检查当前日志文件中是否有保留时间超过设定的阈值来寻找可删除的日志段文件集合</p>
<p>需要注意 <code>log.retention</code> 参数的优先级：<code>log.retention.ms &gt; log.retention.minutes &gt; log.retention.hours</code>，默认只会配置 <code>log.retention.hours</code> 参数，值为 168 即为 7 天</p>
<p>删除过期的日志段文件，并不是简单的根据日志段文件的修改时间计算，而是要根据该日志段中最大的时间戳来计算的，首先要查询该日志分段所对应的时间戳索引文件，查找该时间戳索引文件的最后一条索引数据，如果时间戳大于 0 就取值，否则才会使用最近修改时间</p>
<p>在删除的时候先从 Log 对象所维护的日志段的跳跃表中移除要删除的日志段，用来确保已经没有线程来读取这些日志段</p>
<p>接着将日志段所对应的所有文件，包括索引文件都添加上 <code>.deleted</code> 的后缀</p>
<p>最后交给一个以 <code>delete-file</code> 命名的延迟任务来删除这些以 <code>.deleted</code> 为后缀的文件，默认是 1 分钟执行一次，可以通过 <code>file.delete.delay.ms</code> 来配置</p>
</blockquote>
</li>
<li><p><strong>基于日志大小策略</strong></p>
<blockquote>
<p>日志删除任务会周期性检查当前日志大小是否超过设定的阈值（<code>log.retention.bytes</code>，默认是-1，表示无穷大），就从第一个日志分段中寻找可删除的日志段文件集合</p>
<p>如果超过阈值则删除</p>
</blockquote>
</li>
<li><p><strong>基于日志起始偏移量</strong></p>
<blockquote>
<p>该策略判断依据是日志段的下一个日志段的起始偏移量 <code>baseOffset</code> 是否小于等于 <code>logStartOffset</code></p>
<p>如果是，则可以删除此日志分段</p>
<p>一般情况下，日志文件的起始偏移量 <code>logStartOffset</code> 等于第一个日志分段的 <code>baseOffset</code>，但这并不是绝对的，<code>logStartOffset</code> 的值可以通过 <code>DeleteRecordsRequest</code> 请求、使用 <code>kafka-delete-records.sh</code> 脚本、日志的清理和截断等操作进行修改</p>
</blockquote>
</li>
</ul>
</li>
<li><p>日志压缩（compact）</p>
<blockquote>
<p>针对每个消息的 key 进行整合，对于有相同 key 的不同 value 值，只保留最后一个版本</p>
<p>如果应用只关心 key 对应的最新 value 值，则可以开启 Kafka 相应的日志清理功能</p>
<p>Kafka 会定期将相同 key 的消息进行合并，只保留最新的 value 值</p>
<p><code>log.cleanup.policy = compact</code> 所有数据启用压缩策略</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%97%A5%E5%BF%97%E5%8E%8B%E7%BC%A9.png" alt="日志压缩"></p>
<blockquote>
<p>这种策略只适合特殊场景，比如消息的 key 是用户 ID，value 是用户的资料，通过这种压缩策略，整个消息集里就保存了所有用户最新的资料</p>
</blockquote>
</li>
</ul>
</li>
</ul>
</li>
<li><p>Kafka 高效读写数据</p>
<blockquote>
<p>kafka 之所以可以快速读写的原因如下</p>
</blockquote>
<ul>
<li><p>kafka 是分布式集群，采用分区方式，并行操作</p>
</li>
<li><p>读取数据采用稀疏索引，可以快速定位消费数据</p>
</li>
<li><p>顺序写磁盘</p>
<blockquote>
<p>Kafka 的 producer 生产数据，要写入到 log 文件中，写的过程是一直追加到文件末端，为顺序写</p>
<p>同样的磁盘，顺序写能到 600M&#x2F;s，而随机写只有 100K&#x2F;s</p>
<p>这与磁盘的机械机构有关，顺序写之所以快，是因为其省去了大量磁头寻址的时间</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E9%A1%BA%E5%BA%8F%E5%86%99%E7%A3%81%E7%9B%98.png" alt="顺序写磁盘"></p>
</li>
<li><p>页缓存与零拷贝</p>
<blockquote>
<p>kafka 高效读写的原因很大一部分取决于<strong>页缓存</strong>和<strong>零拷贝</strong></p>
</blockquote>
<ul>
<li><p>页缓存</p>
<blockquote>
<p>在 Kafka 中，大量使用了 PageCache， 这也是 Kafka 能实现高吞吐的重要因素之一</p>
<p>首先看一下读操作，当一个进程要去读取磁盘上的文件内容时，操作系统会先查看要读取的数据页是否缓冲在 PageCache 中，如果存在则直接返回要读取的数据，这就减少了对于磁盘 I&#x2F;O的 操作</p>
<p>但是如果没有查到，操作系统会向磁盘发起读取请求并将读取的数据页存入 PageCache 中，之后再将数据返回给进程，就和使用 redis 缓冲是一个道理</p>
<p>接着写操作和读操作是一样的，如果一个进程需要将数据写入磁盘，操作系统会检查数据页是否在 PageCache 中已经存在，如果不存在就在  PageCache 中添加相应的数据页，接着将数据写入对应的数据页</p>
<p><strong>另外被修改过后的数据页也就变成了脏页，操作系统会在适当时间将脏页中的数据写入磁盘，以保持数据的一致性</strong></p>
<p>具体的刷盘机制可以通过 <code>log.flush.interval messages</code>、<code>log.flush.interval.ms</code> 等参数来控制</p>
<p>同步刷盘可以提高消息的可靠性，防止由于机器掉电等异常造成处于页缓存而没有及时写入磁盘的消息丢失</p>
<p>一般并不建议这么做，刷盘任务就应交由操作系统去调配，消息的可靠性应该由多副本机制来保障，而不是由同步刷盘这 种严重影响性能的行为来保障</p>
</blockquote>
</li>
<li><p>零拷贝</p>
<blockquote>
<p>零拷贝并不是不需要拷贝，而是减少不必要的拷贝次数</p>
<p>常规应用程序IO过程会经过四次拷贝</p>
</blockquote>
<ul>
<li>数据从磁盘经过 DMA（直接存储器访问）到内核的 Read Buffer</li>
<li>内核态的 Read Buffer 到用户态应用层的 Buffer</li>
<li>用户态的 Buffer 到内核态的 Socket Buffer</li>
<li>Socket Buffer 到网卡的 NIC Buffer</li>
</ul>
<blockquote>
<p>从上面的流程可以知道<strong>内核态和用户态之间的拷贝相当于执行两次无用的操作</strong>，之间切换也会花费很多资源</p>
<p><strong>Kafka Broker 应用层不关心存储的数据，所以不走应用层，传输效率高</strong></p>
<p>当数据从磁盘经过 DMA 拷贝到内核缓存（页缓存）后，为了减少 CPU 拷贝的性能损耗，操作系统会将该内核缓存与用户层进行共享，减少一次 CPU copy 过程，同时用户层的读写也会直接访问该共享存储，本身由用户层到 Socket 缓存的数据拷贝过程也变成了从内核到内核的 CPU 拷贝过程，更加的快速，这就是零拷贝，IO流程如下图</p>
<p>甚至如果消息存在页缓存 <code>PageCache</code> 中，还避免了硬盘到内核的拷贝过程，更加一步提升了消息的吞吐量（大概就理解成传输的数据只保存在内核空间，不需要再拷贝到用户态的应用层）</p>
<p>Java 的 JDK NIO 中方法 <code>transferTo()</code> 方法就能够实现零拷贝操作，这个实现依赖于操作系统底层的 <code>sendFile()</code> 实现的</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E9%9B%B6%E6%8B%B7%E8%B4%9D.png" alt="零拷贝"></p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E9%9B%B6%E6%8B%B7%E8%B4%9D2.png" alt="零拷贝2"></p>
</li>
</ul>
</li>
</ul>
</li>
</ol>
<h3 id="5-Kafka-消费者"><a href="#5-Kafka-消费者" class="headerlink" title="5.Kafka 消费者"></a>5.Kafka 消费者</h3><ol>
<li><p>消费模式</p>
<blockquote>
<p>常见的消费模式有两种：poll、push</p>
<p>由于推模式很难考虑到每个客户端不同的消费速率，导致消费者无法消费消息而宕机，因此 kafka 采用的是 poll 的模式</p>
<p>该模式有个缺点，如果服务端没有消息，消费端就会一直空轮询</p>
<p>为了避免过多不必要的空轮询，kafka 做了改进，如果没消息服务端就会暂时保持该请求，在一段时间内有消息再回应给客户端</p>
</blockquote>
<ul>
<li>poll（kafka 采用）：消费者主动向服务端拉取消息</li>
<li>push：服务端主动推送消息给消费者</li>
</ul>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E8%B4%B9%E6%96%B9%E5%BC%8F.png" alt="消费方式"></p>
</li>
<li><p>消费工作流程</p>
<ul>
<li><p>总体流程</p>
<blockquote>
<p>消费者对消息进行消费，并且将已经消费的消息加入 <code>_consumer_offsets</code> 中</p>
<p> <code>_consumer_offsets</code> 相当于一个 topic，有多个分区，存放在 broker 中而非存放在 zookeeper，是为了防止 kafka 与 zookeeper 大量交流而耗费过多网络 IO</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%80%BB%E4%BD%93%E5%B7%A5%E4%BD%9C%E6%B5%81%E7%A8%8B.png" alt="总体工作流程"></p>
</li>
<li><p>消费者组原理</p>
<blockquote>
<p>Consumer Group（CG）：消费者组</p>
<p>由多个 consumer 组成</p>
<p><strong>形成一个消费者组的条件，是所有消费者的 groupid 相同</strong></p>
<p><strong>消费者组内每个消费者负责消费不同分区的数据，一个分区只能由一个组内消费者消费</strong></p>
<p><strong>消费者组之间互不影响。所有的消费者都属于某个消费者组，即消费者组是逻辑上的一个订阅者</strong></p>
<p>如果消费者组中的消费者数 &gt; topic 分区数，则超出的部分消费者处于空闲状态</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E8%B4%B9%E8%80%85%E7%BB%84%E5%8E%9F%E7%90%86.png" alt="消费者组原理"></p>
<blockquote>
<p>对于消息中间件而言，一般有两种消息投递模式：</p>
</blockquote>
<ul>
<li><p>点对点（P2P，Point-to-Point）模式</p>
<blockquote>
<p>点对点模式是基于队列的，消息生产者发送消息到队列，消息消费者从队列中接收消息</p>
</blockquote>
</li>
<li><p>发布／订阅（Pub &#x2F; Sub）模式</p>
<blockquote>
<p>发布订阅模式定义了如何向一个内容节点发布和订阅消息，这个内容节点称为主题（Topic）,</p>
<p>主题可以认为是消息传递的中介，消息发布者将消息发布到某个主题， 而消息订阅者从主题中订阅消息</p>
<p>主题使得消息的订阅者和发布者互相保持独立，不需要进行接触即可保证消息的传递</p>
<p>发布／订阅模式在消息的一对多广播时采用</p>
</blockquote>
</li>
</ul>
<blockquote>
<p>Kafka 同时支待两种消息投递模式，而这正是得益于消费者与消费组模型的契合</p>
</blockquote>
<ul>
<li>如果所有的消费者都隶属于同一个消费组，那么所有的消息都会被均衡地投递给每一个消费者，即每条消息只会被一个消费者处理，这就相当于点对点模式的应用</li>
<li>如果所有的消费者都隶属于不同的消费组，那么所有的消息都会被广播给所有的消费者，即每条消息会被所有的消费者处理，这就相当于发布／订阅模式的应用</li>
</ul>
</li>
<li><p>消费者组选举 Leader</p>
<blockquote>
<p>具体的消费者组初始化流程</p>
<p>通过对 <code>GroupId</code> 进行 <code>Hash</code> 得到那台服务器的 <code>coordinator</code></p>
<p><code>coordinator</code> 负责选出消费组中的 <code>Leader</code> ，并且协调信息</p>
<p>真正存储消费记录的是 <code>_consumer_offsets_partition</code></p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E8%B4%B9%E8%80%85%E7%BB%84%E9%80%89%E4%B8%BELeader1.png" alt="消费者组选举Leader1"></p>
<blockquote>
<p>消费者组详细消费流程</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E8%B4%B9%E8%80%85%E7%BB%84%E9%80%89%E4%B8%BELeader2.png" alt="消费者组选举Leader2"></p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E8%B4%B9%E8%80%85%E7%BB%84%E9%80%89%E4%B8%BELeader3.png" alt="消费者组选举Leader3"></p>
</li>
</ul>
</li>
<li><p>消费者 API</p>
<blockquote>
<p>消费者组单消费者</p>
<p>消费者组多消费者</p>
</blockquote>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E8%B4%B9%E8%80%85API.png" alt="消费者API"></p>
<blockquote>
<p>在消费者 API 代码中必须配置消费者组 id</p>
<p>命令行启动消费者不填写消费者组id 会被自动填写随机的消费者组 id</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">class</span> <span class="title class_">CustomConsumer</span> &#123;</span><br><span class="line">    <span class="keyword">public</span> <span class="keyword">static</span> <span class="keyword">void</span> <span class="title function_">main</span><span class="params">(String[] args)</span> &#123;</span><br><span class="line">        <span class="comment">//0.配置信息</span></span><br><span class="line">        <span class="type">Properties</span> <span class="variable">properties</span> <span class="operator">=</span> <span class="keyword">new</span> <span class="title class_">Properties</span>();</span><br><span class="line">        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, <span class="string">&quot;47.106.86.64:9092&quot;</span>);</span><br><span class="line">        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());</span><br><span class="line">        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());</span><br><span class="line">        properties.put(ConsumerConfig.GROUP_ID_CONFIG, <span class="string">&quot;test&quot;</span>);</span><br><span class="line">        <span class="comment">//1.创建消费者</span></span><br><span class="line">        KafkaConsumer&lt;String, String&gt; kafkaConsumer = <span class="keyword">new</span> <span class="title class_">KafkaConsumer</span>&lt;&gt;(properties);</span><br><span class="line">        ArrayList&lt;String&gt; topic = <span class="keyword">new</span> <span class="title class_">ArrayList</span>&lt;&gt;();</span><br><span class="line">        topic.add(<span class="string">&quot;first&quot;</span>);</span><br><span class="line">        kafkaConsumer.subscribe(topic);</span><br><span class="line">        <span class="comment">//2.消费信息</span></span><br><span class="line">        <span class="keyword">while</span> (<span class="literal">true</span>) &#123;</span><br><span class="line">            ConsumerRecords&lt;String, String&gt; records = kafkaConsumer.poll(Duration.ofSeconds(<span class="number">1</span>));</span><br><span class="line">            records.forEach(record -&gt; &#123;</span><br><span class="line">                System.out.println(record);</span><br><span class="line">            &#125;);</span><br><span class="line">        &#125;</span><br><span class="line">        <span class="comment">//3.关闭</span></span><br><span class="line">    &#125;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>
</li>
<li><p>分区平衡以及再平衡</p>
<blockquote>
<p>参数名称描述</p>
</blockquote>
<ul>
<li><p><code>heartbeat.interval.ms</code></p>
<blockquote>
<p>Kafka 消费者和 coordinator 之间的心跳时间，默认 3s</p>
<p>该条目的值必须小于 <code>session.timeout.ms</code>，也不应该高于<code>session.timeout.ms</code> 的 1&#x2F;3</p>
</blockquote>
</li>
<li><p><code>session.timeout.ms</code></p>
<blockquote>
<p>Kafka 消费者和 coordinator 之间连接超时时间，默认 45s</p>
<p>超过该值，该消费者被移除，消费者组执行再平衡</p>
</blockquote>
</li>
<li><p><code>max.poll.interval.ms</code></p>
<blockquote>
<p>消费者处理消息的最大时长，默认是 5 分钟</p>
<p>超过该值，该消费者被移除，消费者组执行再平衡</p>
</blockquote>
</li>
<li><p><code>partition.assignment.strategy</code></p>
<blockquote>
<p>消 费 者 分 区 分 配 策 略 ， 默 认 策 略 是 <code>Range + CooperativeSticky</code></p>
<p>Kafka 可以同时使用多个分区分配策略</p>
<p>可以选择的策略包括 ： Range 、 RoundRobin 、 Sticky 、CooperativeSticky (协作者粘性)</p>
</blockquote>
</li>
</ul>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E5%88%86%E5%8C%BA%E5%B9%B3%E8%A1%A1%E4%BB%A5%E5%8F%8A%E5%86%8D%E5%B9%B3%E8%A1%A1.png" alt="分区平衡以及再平衡"></p>
<ul>
<li><p>分区分配策略</p>
<blockquote>
<p>一个 Consumer Group 中有多个 Consumer，一个 Topic 也有多个 Partition</p>
<p><strong>所以必然会涉及到 Partition 的分配问题：确定哪个 Partition 由哪个 Consumer 来消费的问题</strong></p>
<p>Kafka 客户端提供了3 种分区分配策略：RangeAssignor、RoundRobinAssignor、StickyAssignor，前两种分配方案相对简单一些，StickyAssignor 分配方案相对复杂一些</p>
</blockquote>
<ul>
<li><p>RangeAssignor</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/Range.png" alt="Range"></p>
<blockquote>
<p>Range 分区分配再平衡案例</p>
<p><strong>停止掉 0 号消费者，快速重新发送消息观看结果（45s 以内，越快越好）</strong><br>1 号消费者：消费到 3、4 号分区数据<br>2 号消费者：消费到 5、6 号分区数据<br>0 号消费者的任务会整体被分配到 1 号消费者或者 2 号消费者<strong>（0、1、2 被整体分配）</strong><br>说明：0 号消费者挂掉后，消费者组需要按照超时时间 45s 来判断它是否退出，所以需要等待，时间到了 45s 后，判断它真的退出就会把任务分配给其他 broker 执行</p>
<p><strong>再次重新发送消息观看结果（45s 以后）</strong><br>1 号消费者：消费到 0、1、2、3 号分区数据<br>2 号消费者：消费到 4、5、6 号分区数据<br>说明：消费者 0 已经被踢出消费者组，所以重新按照 range 方式分配</p>
</blockquote>
</li>
<li><p>RoundRobinAssignor</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/RoundRobin.png" alt="RoundRobin"></p>
<blockquote>
<p>修改分区分配策略</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line">properties.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, <span class="string">&quot;org.apache.kafka.clients.consumer.RoundRobinAssignor&quot;</span>);</span><br></pre></td></tr></table></figure>

<blockquote>
<p>RoundRobin 分区分配再平衡案例</p>
<p><strong>停止掉 1 号消费者，快速重新发送消息观看结果（45s 以内，越快越好）</strong><br>2 号消费者：消费到 2、5 号分区数据<br>3 号消费者：消费到 4、1 号分区数据<br>1 号消费者的任务会按照 RoundRobin 的方式，把数据轮询分成 0 、6 和 3 号分区数据，分别由 2 号消费者或者 3 号消费者消费<strong>（0、3、6 采用轮询分配）</strong><br>说明：1 号消费者挂掉后，消费者组需要按照超时时间 45s 来判断它是否退出，所以需要等待，时间到了 45s 后，判断它真的退出就会把任务分配给其他 broker 执行</p>
<p><strong>再次重新发送消息观看结果（45s 以后）</strong><br>2 号消费者：消费到 0、2、4、6 号分区数据<br>3 号消费者：消费到 1、3、5 号分区数据<br>说明：消费者 0 已经被踢出消费者组，所以重新按照 RoundRobin 方式分配</p>
</blockquote>
</li>
<li><p>StickyAssignor</p>
<blockquote>
<p>StickyAssignor分区分配算法是 Kafka 客户端提供的分配策略中最复杂的一种，可以通过 <code>partition.assignment.strategy</code> 参数设置</p>
<p>从 0.11 版本开始引入，目的就是在执行新分配时，尽量在上一次分配结果上少做调整，其主要实现了以下 2 个目标：</p>
</blockquote>
<ul>
<li>Topic Partition 的分配要尽量均衡</li>
<li>当 Rebalance（重分配，后面会详细分析）发生时，尽量与上一次分配结果保持一致</li>
</ul>
<blockquote>
<p>该算法的精髓在于，重分配后还能尽量与上一次结果保持一致</p>
<p>进而达到消费者故障下线，故障恢复后的均衡问题</p>
</blockquote>
<blockquote>
<p>修改分区分配策略</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="comment">// 修改分区分配策略</span></span><br><span class="line">properties.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, <span class="string">&quot;org.apache.kafka.clients.consumer.StickyAssignor&quot;</span>);</span><br></pre></td></tr></table></figure></li>
</ul>
</li>
</ul>
</li>
<li><p>offset 位移提交</p>
<ul>
<li><p>offset 的默认维护位置</p>
<blockquote>
<p>Kafka 0.9 版本之前 consumer 默认将 offset 保存在 Zookeeper 中</p>
<p>0.9 版本之后 consumer 默认保存在 Kafka 一个内置的 topic 中，该 topic 为 <code>_consumer_offsets</code></p>
<p>消费者提交的 offset 值维护在**<code>__consumer_offsets</code> 这个 Topic 中，具体维护在哪个分区中，是由消费者所在的消费者组 groupid** 决定，计算方式是：groupid 的 hashCode % 50</p>
<p>当 kafka 环境正常而消费者不能消费时，有可能是对应的 <code>__consumer_offsets</code> 分区 leader 为 none 或 -1，或者分区中的日志文件损坏导致</p>
<p> <strong><code>__consumer_offsets</code> 主题里面采用 key 和 value 的方式存储数据</strong></p>
<p>key 是 <code>group.id + topic + 分区号</code>，value 就是当前 <code>offset</code> 的值</p>
<p>每隔一段时间，kafka 内部会对这个 topic 进行 compact，也就是每个 group.id + topic + 分区号就保留最新数据</p>
<p>一般情况下， 当集群中第一次有消费者消费消息时会自动创建主题 <code>_ consumer_ offsets</code>，不过它的副本因子还受 <code>offsets.topic.replication.factor</code> 参数的约束，这个参数的默认值为 3，分区数可以通过 <code>offsets.topic.num.partitions</code> 参数设置，默认为 50</p>
<p>在配置文件 <code>config/consumer.properties</code> 中添加配置 <code>exclude.internal.topics=false</code>，默认是 true，表示不能消费系统主题</p>
<p>为了查看该系统主题数据，所以该参数修改为 false</p>
</blockquote>
<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">kafka-console-consumer.sh --topic __consumer_offsets --bootstrap-server 47.106.86.64:9092 --consumer.config config/consumer.properties --formatter &quot;kafka.coordinator.group.GroupMetadataManager\$OffsetsMessageFormatter&quot; --from-beginning</span><br></pre></td></tr></table></figure>

<figure class="highlight shell"><table><tr><td class="code"><pre><span class="line">[offset,atguigu,1]::OffsetAndMetadata(offset=7, </span><br><span class="line">leaderEpoch=Optional[0], metadata=, commitTimestamp=1622442520203, </span><br><span class="line">expireTimestamp=None)</span><br><span class="line">[offset,atguigu,0]::OffsetAndMetadata(offset=8, </span><br><span class="line">leaderEpoch=Optional[0], metadata=, commitTimestamp=1622442520203, </span><br><span class="line">expireTimestamp=None)</span><br></pre></td></tr></table></figure></li>
</ul>
<blockquote>
<p>消费者提交 offset 的方式有两种，<strong>自动提交</strong>、<strong>手动提交</strong></p>
<p>自动提交有可能出现消息消费失败，但是却提交了 offset 的情况，导致<strong>消息丢失</strong></p>
<p>为了能够实现消息消费 offset 的精确控制，更推荐手动提交</p>
</blockquote>
<ul>
<li><p>自动提交</p>
<blockquote>
<p>为了能够专注于业务逻辑，Kafka 提供了自动提交 offset 的功能</p>
<p>参数说明</p>
</blockquote>
<ul>
<li><code>enable.auto.commit</code>：是否开启自动提交 offset 功能，默认是 true</li>
<li><code>auto.commit.interval.ms</code>：自动提交 offset 的时间间隔，默认是 5s</li>
</ul>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="comment">// 自动提交</span></span><br><span class="line">properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, <span class="literal">true</span>);</span><br><span class="line"><span class="comment">// 提交时间间隔</span></span><br><span class="line">properties.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, <span class="number">1000</span>);</span><br></pre></td></tr></table></figure>

<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E8%87%AA%E5%8A%A8%E6%8F%90%E4%BA%A4.png" alt="自动提交"></p>
</li>
<li><p>手动提交</p>
<blockquote>
<p>虽然自动提交 offset 十分简单便利，但由于其是基于时间提交的，开发人员难以把握 offset 提交的时机</p>
</blockquote>
</li>
</ul>
</li>
</ol>
<blockquote>
</blockquote>
<pre><code> &gt; 因此 Kafka 还提供了手动提交 offset 的 API
 &gt;
 &gt; 手动提交 offset 的方法有两种：commitSync（同步提交）和 commitAsync（异步提交）

 - commitSync（同步提交）

   &gt; 必须等待 offset 提交完毕，再去消费下一批数据
&gt;
   &gt; 阻塞线程，一直到提交到成功，会进行失败重试
   
 - commitAsync（异步提交） 

   &gt; 发送完提交 offset 请求后，就开始消费下一批数据了
&gt;
   &gt; 没有失败重试机制，会提交失败
   
 &gt; 相同点：都会将本次提交的一批数据最高的偏移量提交
</code></pre>
<blockquote>
</blockquote>
<pre><code> &gt; 不同点：同步提交阻塞当前线程，一直到提交成功，并且会自动失败重试（由不可控因素导致，也会出现提交失败）；而异步提交则没有失败重试机制，故有可能提交失败
</code></pre>
<ul>
<li><p>配置 offset 消费</p>
<blockquote>
<p>在 kafka 中当消费者查找不到所记录的消费位移时，会根据 <code>auto.offset.reset</code> 的配置，决定从何处消费</p>
</blockquote>
</li>
</ul>
<blockquote>
</blockquote>
<pre><code> &gt; `auto.offset.reset = earliest | latest | none` 默认是 latest

 - `earliest`：自动将偏移量重置为最早的偏移量，–from-beginning
</code></pre>
<ul>
<li><p><code>latest（默认值）</code>：自动将偏移量重置为最新偏移量</p>
<ul>
<li><code>none</code>：如果未找到消费者组的先前偏移量，则向消费者抛出异常</li>
</ul>
  <figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="comment">// 配置信息</span></span><br><span class="line"><span class="type">Properties</span> <span class="variable">properties</span> <span class="operator">=</span> <span class="keyword">new</span> <span class="title class_">Properties</span>();</span><br><span class="line">properties.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, <span class="string">&quot;latest&quot;</span>);</span><br></pre></td></tr></table></figure>

<p>  <img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%8C%87%E5%AE%9A%E6%B6%88%E8%B4%B9%E4%BD%8D%E7%BD%AE.png" alt="指定消费位置"></p>
</li>
<li><p>指定 offset 消费</p>
<blockquote>
<p>Kafka 中的消费位移是存储在一个内部主题中的， 可以使用 <code>seek()</code> 方法可以突破这一限制：消费位移可以保存在任意的存储介质中， 例如数据库、 文件系统等</p>
</blockquote>
</li>
</ul>
<blockquote>
</blockquote>
<pre><code> &gt; 以数据库为例， 将消费位移保存在其中的一个表中， 在下次消费的时候可以读取存储在数据表中的消费位移并通过 `seek()` 方法指向这个具体的位置

 <figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="comment">// 指定位置进行消费</span></span><br><span class="line">   Set&lt;TopicPartition&gt; assignment = kafkaConsumer.assignment();</span><br><span class="line">   <span class="comment">// 保证分区分配方案已经制定完毕</span></span><br><span class="line">   <span class="keyword">while</span> (assignment.size() == <span class="number">0</span>)&#123;</span><br><span class="line">       kafkaConsumer.poll(Duration.ofSeconds(<span class="number">1</span>));</span><br><span class="line">       assignment = kafkaConsumer.assignment();</span><br><span class="line">   &#125;</span><br><span class="line">   <span class="comment">// 指定消费的offset</span></span><br><span class="line">   <span class="keyword">for</span> (TopicPartition topicPartition : assignment) &#123;</span><br><span class="line">       kafkaConsumer.seek(topicPartition,<span class="number">600</span>);</span><br><span class="line">   &#125;</span><br><span class="line">   <span class="comment">// 消费数据</span></span><br><span class="line">   <span class="keyword">while</span> (<span class="literal">true</span>)&#123;</span><br><span class="line">       ConsumerRecords&lt;String, String&gt; consumerRecords = kafkaConsumer.poll(Duration.ofSeconds(<span class="number">1</span>));</span><br><span class="line">       <span class="keyword">for</span> (ConsumerRecord&lt;String, String&gt; consumerRecord : consumerRecords) &#123;</span><br><span class="line">           System.out.println(consumerRecord);</span><br><span class="line">       &#125;</span><br><span class="line">   &#125;</span><br></pre></td></tr></table></figure>
</code></pre>
<ul>
<li><p>指定时间消费</p>
<blockquote>
<p>原理就是查到时间对应的 offset 再去指定位移消费，为了确保同步到分区信息，还需要确保能获取到分区再去查询分区时间</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="comment">// 指定位置进行消费</span></span><br><span class="line"> Set&lt;TopicPartition&gt; assignment = kafkaConsumer.assignment();</span><br><span class="line"> <span class="comment">// 保证分区分配方案已经制定完毕</span></span><br><span class="line"> <span class="keyword">while</span> (assignment.size() == <span class="number">0</span>)&#123;</span><br><span class="line">     kafkaConsumer.poll(Duration.ofSeconds(<span class="number">1</span>));</span><br><span class="line">     assignment = kafkaConsumer.assignment();</span><br><span class="line"> &#125;</span><br><span class="line"> <span class="comment">// 希望把时间转换为对应的offset</span></span><br><span class="line"> HashMap&lt;TopicPartition, Long&gt; topicPartitionLongHashMap = <span class="keyword">new</span> <span class="title class_">HashMap</span>&lt;&gt;();</span><br><span class="line"> <span class="comment">// 封装对应集合</span></span><br><span class="line"> <span class="keyword">for</span> (TopicPartition topicPartition : assignment) &#123;</span><br><span class="line">     topicPartitionLongHashMap.put(topicPartition,System.currentTimeMillis() - <span class="number">1</span> * <span class="number">24</span> * <span class="number">3600</span> * <span class="number">1000</span>);</span><br><span class="line"> &#125;</span><br><span class="line"> Map&lt;TopicPartition, OffsetAndTimestamp&gt; topicPartitionOffsetAndTimestampMap = kafkaConsumer.offsetsForTimes(topicPartitionLongHashMap);</span><br><span class="line"> <span class="comment">// 指定消费的offset</span></span><br><span class="line"> <span class="keyword">for</span> (TopicPartition topicPartition : assignment) &#123;</span><br><span class="line">     <span class="type">OffsetAndTimestamp</span> <span class="variable">offsetAndTimestamp</span> <span class="operator">=</span> topicPartitionOffsetAndTimestampMap.get(topicPartition);</span><br><span class="line">     kafkaConsumer.seek(topicPartition,offsetAndTimestamp.offset());</span><br><span class="line"> &#125;</span><br><span class="line"> <span class="comment">// 消费数据</span></span><br><span class="line"> <span class="keyword">while</span> (<span class="literal">true</span>)&#123;</span><br><span class="line">     ConsumerRecords&lt;String, String&gt; consumerRecords = kafkaConsumer.poll(Duration.ofSeconds(<span class="number">1</span>));</span><br><span class="line">     <span class="keyword">for</span> (ConsumerRecord&lt;String, String&gt; consumerRecord : consumerRecords) &#123;</span><br><span class="line">         System.out.println(consumerRecord);</span><br><span class="line">     &#125;</span><br><span class="line"> &#125;</span><br></pre></td></tr></table></figure>
</li>
<li><p>漏消费和重复消费</p>
<blockquote>
<p>重复消费：已经消费了数据，但是 offset 没提交</p>
</blockquote>
</li>
</ul>
<blockquote>
</blockquote>
<pre><code> &gt; 漏消费：先提交 offset 后消费，有可能会造成数据的漏消费

 ![漏消费和重复消费](https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%BC%8F%E6%B6%88%E8%B4%B9%E5%92%8C%E9%87%8D%E5%A4%8D%E6%B6%88%E8%B4%B9.png)
</code></pre>
<ol start="6">
<li><p>消费者事务</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%B6%88%E8%B4%B9%E8%80%85%E4%BA%8B%E5%8A%A1.png" alt="消费者事务"></p>
</li>
<li><p>数据积压（提高吞吐量）</p>
<p><img src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/%E6%95%B0%E6%8D%AE%E7%A7%AF%E5%8E%8B%EF%BC%88%E6%8F%90%E9%AB%98%E5%90%9E%E5%90%90%E9%87%8F%EF%BC%89.png" alt="数据积压（提高吞吐量）"></p>
<blockquote>
<p>参数说明</p>
</blockquote>
<ul>
<li><p><code>fetch.max.bytes</code></p>
<blockquote>
<p>默认 52428800（50 m）</p>
<p>消费者获取服务器端一批消息最大的字节数</p>
<p>如果服务器端一批次的数据大于该值（50m）仍然可以拉取回来这批数据，因此这不是一个绝对最大值</p>
<p>一批次的大小受 <code>message.max.bytes</code> （broker config）<code>ormax.message.bytes</code> （topic config）影响</p>
</blockquote>
</li>
<li><p><code>max.poll.records</code></p>
<blockquote>
<p>一次 poll 拉取数据返回消息的最大条数，默认是 500 条</p>
</blockquote>
</li>
</ul>
</li>
<li><p>拦截器</p>
<blockquote>
<p>与生产者对应，消费者也有拦截器</p>
</blockquote>
<figure class="highlight java"><table><tr><td class="code"><pre><span class="line"><span class="keyword">public</span> <span class="keyword">interface</span> <span class="title class_">ConsumerInterceptor</span>&lt;K, V&gt; <span class="keyword">extends</span> <span class="title class_">Configurable</span>, AutoCloseable &#123;</span><br><span class="line">    ConsumerRecords&lt;K, V&gt; <span class="title function_">onConsume</span><span class="params">(ConsumerRecords&lt;K, V&gt; records)</span>;</span><br><span class="line">    <span class="keyword">void</span> <span class="title function_">onCommit</span><span class="params">(Map&lt;TopicPartition, OffsetAndMetadata&gt; offsets)</span>;</span><br><span class="line">    <span class="keyword">void</span> <span class="title function_">close</span><span class="params">()</span>;</span><br><span class="line">&#125;</span><br></pre></td></tr></table></figure>

<blockquote>
<p>Kafka Consumer 会在 <code>poll()</code> 方法返回之前调用拦截器的 <code>onConsume()</code> 方法来对消息进行相应的定制化操作</p>
<p>比如修改返回的消息内容、按照某种规则过滤消息（可能会减少 <code>poll()</code> 方法返回的消息的个数）</p>
<p>如果 <code>onConsume()</code> 方法中抛出异常， 那么会被捕获并记录到日志中， 但是异常不会再向上传递</p>
<p>Kafka Consumer 会在提交完消费位移之后调用拦截器的 <code>onCommit()</code> 方法， 可以使用这个方法来记录跟踪所提交的位移信息，比如当消费者使用 <code>commitSync</code> 的无参方法时，不知道提交的消费位移的具体细节， 而使用拦截器的 <code>onCommit()</code> 方法却可以做到这 一点</p>
</blockquote>
</li>
</ol>
<h3 id="6-Kafka-整合-Spring-Boot"><a href="#6-Kafka-整合-Spring-Boot" class="headerlink" title="6.Kafka 整合 Spring Boot"></a>6.Kafka 整合 Spring Boot</h3></article><div class="post-copyright"><div class="post-copyright__author"><span class="post-copyright-meta">文章作者: </span><span class="post-copyright-info"><a href="https://gitee.com/zcmmmm/zcmmmm">zcm</a></span></div><div class="post-copyright__type"><span class="post-copyright-meta">文章链接: </span><span class="post-copyright-info"><a href="https://gitee.com/zcmmmm/zcmmmm/2022/10/30/kafka%E5%9F%BA%E7%A1%80%E7%9F%A5%E8%AF%86/">https://gitee.com/zcmmmm/zcmmmm/2022/10/30/kafka%E5%9F%BA%E7%A1%80%E7%9F%A5%E8%AF%86/</a></span></div><div class="post-copyright__notice"><span class="post-copyright-meta">版权声明: </span><span class="post-copyright-info">本博客所有文章除特别声明外，均采用 <a href="https://creativecommons.org/licenses/by-nc-sa/4.0/" target="_blank">CC BY-NC-SA 4.0</a> 许可协议。转载请注明来自 <a href="https://gitee.com/zcmmmm/zcmmmm" target="_blank">无人深空</a>！</span></div></div><div class="tag_share"><div class="post-meta__tag-list"><a class="post-meta__tags" href="/tags/Kafka/">Kafka</a></div><div class="post_share"><div class="social-share" data-image="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/kafka.png" data-sites="facebook,twitter,wechat,weibo,qq"></div><link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/butterfly-extsrc/sharejs/dist/css/share.min.css" media="print" onload="this.media='all'"><script src="https://cdn.jsdelivr.net/npm/butterfly-extsrc/sharejs/dist/js/social-share.min.js" defer></script></div></div><nav class="pagination-post" id="pagination"><div class="prev-post pull-left"><a href="/2022/10/30/kafka%E9%9D%A2%E7%BB%8F/"><img class="prev-cover" src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/kafka.png" onerror="onerror=null;src='/img/404.jpg'" alt="cover of previous post"><div class="pagination-info"><div class="label">上一篇</div><div class="prev_info">Kafka面经</div></div></a></div><div class="next-post pull-right"><a href="/2022/10/28/SpringCloud%E9%9D%A2%E7%BB%8F/"><img class="next-cover" src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Java/Spring/Spring.jpg" onerror="onerror=null;src='/img/404.jpg'" alt="cover of next post"><div class="pagination-info"><div class="label">下一篇</div><div class="next_info">SpringCloud面经</div></div></a></div></nav><div class="relatedPosts"><div class="headline"><i class="fas fa-thumbs-up fa-fw"></i><span>相关推荐</span></div><div class="relatedPosts-list"><div><a href="/2022/10/30/kafka%E9%9D%A2%E7%BB%8F/" title="Kafka面经"><img class="cover" src="https://study-record-images.oss-cn-beijing.aliyuncs.com/Kafka/kafka.png" alt="cover"><div class="content is-center"><div class="date"><i class="far fa-calendar-alt fa-fw"></i> 2022-10-30</div><div class="title">Kafka面经</div></div></a></div></div></div></div><div class="aside-content" id="aside-content"><div class="sticky_layout"><div class="card-widget" id="card-toc"><div class="item-headline"><i class="fas fa-stream"></i><span>目录</span><span class="toc-percentage"></span></div><div class="toc-content is-expand"><ol class="toc"><li class="toc-item toc-level-2"><a class="toc-link" href="#Kafka-%E5%9F%BA%E7%A1%80%E7%9F%A5%E8%AF%86"><span class="toc-text">Kafka 基础知识</span></a><ol class="toc-child"><li class="toc-item toc-level-3"><a class="toc-link" href="#1-Kafka-%E6%A6%82%E8%BF%B0"><span class="toc-text">1.Kafka 概述</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#2-Kafka-%E5%BF%AB%E9%80%9F%E5%85%A5%E9%97%A8"><span class="toc-text">2.Kafka 快速入门</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#3-Kafka-%E7%94%9F%E4%BA%A7%E8%80%85"><span class="toc-text">3.Kafka 生产者</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#4-Kafka-Broker"><span class="toc-text">4.Kafka Broker</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#5-Kafka-%E6%B6%88%E8%B4%B9%E8%80%85"><span class="toc-text">5.Kafka 消费者</span></a></li><li class="toc-item toc-level-3"><a class="toc-link" href="#6-Kafka-%E6%95%B4%E5%90%88-Spring-Boot"><span class="toc-text">6.Kafka 整合 Spring Boot</span></a></li></ol></li></ol></div></div></div></div></main><footer id="footer"><div id="footer-wrap"><div class="copyright">&copy;2022 - 2023 By zcm</div><div class="framework-info"><span>框架 </span><a target="_blank" rel="noopener" href="https://hexo.io">Hexo</a><span class="footer-separator">|</span><span>主题 </span><a target="_blank" rel="noopener" href="https://github.com/jerryc127/hexo-theme-butterfly">Butterfly</a></div></div></footer></div><div id="rightside"><div id="rightside-config-hide"><button id="readmode" type="button" title="阅读模式"><i class="fas fa-book-open"></i></button><button id="darkmode" type="button" title="浅色和深色模式转换"><i class="fas fa-adjust"></i></button><button id="hide-aside-btn" type="button" title="单栏和双栏切换"><i class="fas fa-arrows-alt-h"></i></button></div><div id="rightside-config-show"><button id="rightside_config" type="button" title="设置"><i class="fas fa-cog fa-spin"></i></button><button class="close" id="mobile-toc-button" type="button" title="目录"><i class="fas fa-list-ul"></i></button><button id="go-up" type="button" title="回到顶部"><i class="fas fa-arrow-up"></i></button></div></div><div id="local-search"><div class="search-dialog"><nav class="search-nav"><span class="search-dialog-title">搜索</span><span id="loading-status"></span><button class="search-close-button"><i class="fas fa-times"></i></button></nav><div class="is-center" id="loading-database"><i class="fas fa-spinner fa-pulse"></i><span>  数据库加载中</span></div><div class="search-wrap"><div id="local-search-input"><div class="local-search-box"><input class="local-search-box--input" placeholder="搜索文章" type="text"/></div></div><hr/><div id="local-search-results"></div></div></div><div id="search-mask"></div></div><div><script src="/js/utils.js"></script><script src="/js/main.js"></script><script src="https://cdn.jsdelivr.net/npm/@fancyapps/ui/dist/fancybox.umd.min.js"></script><script src="https://cdn.jsdelivr.net/npm/node-snackbar/dist/snackbar.min.js"></script><script src="/js/search/local-search.js"></script><div class="js-pjax"></div><script src="/js/categories.js?v1"></script><script src="/js/navigation.js?v1"></script><script async data-pjax src="//busuanzi.ibruce.info/busuanzi/2.3/busuanzi.pure.mini.js"></script></div></body></html>