<!DOCTYPE html>



  


<html class="theme-next gemini use-motion" lang="zh-Hans">
<head><meta name="generator" content="Hexo 3.8.0">
  <meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1">
<meta name="theme-color" content="#222">









<meta http-equiv="Cache-Control" content="no-transform">
<meta http-equiv="Cache-Control" content="no-siteapp">
















  
  
  <link href="../../../../lib/fancybox/source/jquery.fancybox.css?v=2.1.5" rel="stylesheet" type="text/css">




  
  
  
  

  
    
    
  

  

  

  

  

  
    
    
    <link href="//fonts.useso.com/css?family=Lato:300,300italic,400,400italic,700,700italic&subset=latin,latin-ext" rel="stylesheet" type="text/css">
  






<link href="../../../../lib/font-awesome/css/font-awesome.min.css?v=4.6.2" rel="stylesheet" type="text/css">

<link href="../../../../css/main.css?v=5.1.4" rel="stylesheet" type="text/css">


  <link rel="apple-touch-icon" sizes="180x180" href="../../../../images/apple-touch-icon-next.png?v=5.1.4">


  <link rel="icon" type="image/png" sizes="32x32" href="../../../../images/favicon-32x32-next.png?v=5.1.4">


  <link rel="icon" type="image/png" sizes="16x16" href="../../../../images/favicon-16x16-next.png?v=5.1.4">


  <link rel="mask-icon" href="../../../../images/logo.svg?v=5.1.4" color="#222">





  <meta name="keywords" content="ELK,kafka,zookeeper,">










<meta name="description" content="背景：最近线上上了ELK，但是只用了一台Redis在中间作为消息队列，以减轻前端es集群的压力，Redis的集群解决方案暂时没有接触过，并且Redis作为消息队列并不是它的强项；所以最近将Redis换成了专业的消息信息发布订阅系统Kafka, Kafka的更多介绍大家可以看这里：传送门  ,关于ELK的知识网上有很多的哦， 此篇博客主要是总结一下目前线上这个平台的实施步骤，ELK是怎么跟Kafka">
<meta name="keywords" content="ELK,kafka,zookeeper">
<meta property="og:type" content="article">
<meta property="og:title" content="ELK+Kafka 企业日志收集平台(一)">
<meta property="og:url" content="http://demo_demo.gitee.io/2015/11/14/elkkafka-e4-bc-81-e4-b8-9a-e6-97-a5-e5-bf-97-e6-94-b6-e9-9b-86-e5-b9-b3-e5-8f-b0-e4-b8-80/index.html">
<meta property="og:site_name" content="运维人">
<meta property="og:description" content="背景：最近线上上了ELK，但是只用了一台Redis在中间作为消息队列，以减轻前端es集群的压力，Redis的集群解决方案暂时没有接触过，并且Redis作为消息队列并不是它的强项；所以最近将Redis换成了专业的消息信息发布订阅系统Kafka, Kafka的更多介绍大家可以看这里：传送门  ,关于ELK的知识网上有很多的哦， 此篇博客主要是总结一下目前线上这个平台的实施步骤，ELK是怎么跟Kafka">
<meta property="og:locale" content="zh-Hans">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/1.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/11111.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/2.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/3.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/4.png.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/5.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/6.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/7.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/9.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/8.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/10.png">
<meta property="og:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/111.png">
<meta property="og:updated_time" content="2018-12-10T10:28:44.000Z">
<meta name="twitter:card" content="summary">
<meta name="twitter:title" content="ELK+Kafka 企业日志收集平台(一)">
<meta name="twitter:description" content="背景：最近线上上了ELK，但是只用了一台Redis在中间作为消息队列，以减轻前端es集群的压力，Redis的集群解决方案暂时没有接触过，并且Redis作为消息队列并不是它的强项；所以最近将Redis换成了专业的消息信息发布订阅系统Kafka, Kafka的更多介绍大家可以看这里：传送门  ,关于ELK的知识网上有很多的哦， 此篇博客主要是总结一下目前线上这个平台的实施步骤，ELK是怎么跟Kafka">
<meta name="twitter:image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/1.png">



<script type="text/javascript" id="hexo.configurations">
  var NexT = window.NexT || {};
  var CONFIG = {
    root: '/',
    scheme: 'Gemini',
    version: '5.1.4',
    sidebar: {"position":"left","display":"post","offset":12,"b2t":false,"scrollpercent":false,"onmobile":false},
    fancybox: true,
    tabs: true,
    motion: {"enable":true,"async":false,"transition":{"post_block":"fadeIn","post_header":"slideDownIn","post_body":"slideDownIn","coll_header":"slideLeftIn","sidebar":"slideUpIn"}},
    duoshuo: {
      userId: '0',
      author: '博主'
    },
    algolia: {
      applicationID: '',
      apiKey: '',
      indexName: '',
      hits: {"per_page":10},
      labels: {"input_placeholder":"Search for Posts","hits_empty":"We didn't find any results for the search: ${query}","hits_stats":"${hits} results found in ${time} ms"}
    }
  };
</script>



  <link rel="canonical" href="http://demo_demo.gitee.io/2015/11/14/elkkafka-e4-bc-81-e4-b8-9a-e6-97-a5-e5-bf-97-e6-94-b6-e9-9b-86-e5-b9-b3-e5-8f-b0-e4-b8-80/">





  <title>ELK+Kafka 企业日志收集平台(一) | 运维人</title>
  








</head>

<body itemscope="" itemtype="http://schema.org/WebPage" lang="zh-Hans">

  
  
    
  

  <div class="container sidebar-position-left page-post-detail">
    <div class="headband"></div>
<a href="https://github.com/guomaoqiu" class="github-corner" aria-label="View source on GitHub"><svg width="80" height="80" viewbox="0 0 250 250" style="fill:#FD6C6C; color:#fff; position: absolute; top: 0; border: 0; right: 0;" aria-hidden="true"><path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"/><path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"/><path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"/></svg></a><style>.github-corner:hover .octo-arm{animation:octocat-wave 560ms ease-in-out}@keyframes octocat-wave{0%,100%{transform:rotate(0)}20%,60%{transform:rotate(-25deg)}40%,80%{transform:rotate(10deg)}}@media (max-width:500px){.github-corner:hover .octo-arm{animation:none}.github-corner .octo-arm{animation:octocat-wave 560ms ease-in-out}}</style>
    <header id="header" class="header" itemscope="" itemtype="http://schema.org/WPHeader">
      <div class="header-inner"><div class="site-brand-wrapper">
  <div class="site-meta ">
    

    <div class="custom-logo-site-title">
      <a href="/" class="brand" rel="start">
        <span class="logo-line-before"><i></i></span>
        <span class="site-title">运维人</span>
        <span class="logo-line-after"><i></i></span>
      </a>
    </div>
      
        <p class="site-subtitle">生命在于折腾</p>
      
  </div>

  <div class="site-nav-toggle">
    <button>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
      <span class="btn-bar"></span>
    </button>
  </div>
</div>

<nav class="site-nav">
  

  
    <ul id="menu" class="menu">
      
        
        <li class="menu-item menu-item-home">
          <a href="" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-home"></i> <br>
            
            首页
          </a>
        </li>
      
        
        <li class="menu-item menu-item-about">
          <a href="" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-user"></i> <br>
            
            关于
          </a>
        </li>
      
        
        <li class="menu-item menu-item-tags">
          <a href="" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-tags"></i> <br>
            
            标签
          </a>
        </li>
      
        
        <li class="menu-item menu-item-categories">
          <a href="" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-th"></i> <br>
            
            分类
          </a>
        </li>
      
        
        <li class="menu-item menu-item-archives">
          <a href="" rel="section">
            
              <i class="menu-item-icon fa fa-fw fa-archive"></i> <br>
            
            归档
          </a>
        </li>
      

      
    </ul>
  

  
</nav>



 </div>
    </header>

    <main id="main" class="main">
      <div class="main-inner">
        <div class="content-wrap">
          <div id="content" class="content">
            

  <div id="posts" class="posts-expand">
    

  

  
  
  

  <article class="post post-type-normal" itemscope="" itemtype="http://schema.org/Article">
  
  
  
  <div class="post-block">
    <link itemprop="mainEntityOfPage" href="http://demo_demo.gitee.io">

    <span hidden itemprop="author" itemscope="" itemtype="http://schema.org/Person">
      <meta itemprop="name" content="OutMan">
      <meta itemprop="description" content="">
      <meta itemprop="image" content="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/myphoto.png">
    </span>

    <span hidden itemprop="publisher" itemscope="" itemtype="http://schema.org/Organization">
      <meta itemprop="name" content="运维人">
    </span>

    
      <header class="post-header">

        
        
          <h1 class="post-title" itemprop="name headline">ELK+Kafka 企业日志收集平台(一)</h1>
        

        <div class="post-meta">
          <span class="post-time">
            
              <span class="post-meta-item-icon">
                <i class="fa fa-calendar-o"></i>
              </span>
              
                <span class="post-meta-item-text">发表于</span>
              
              <time title="创建于" itemprop="dateCreated datePublished" datetime="2015-11-14T16:48:45+08:00">
                2015-11-14
              </time>
            

            

            
          </span>

          
            <span class="post-category">
            
              <span class="post-meta-divider">|</span>
            
              <span class="post-meta-item-icon">
                <i class="fa fa-folder-o"></i>
              </span>
              
                <span class="post-meta-item-text">分类于</span>
              
              
                <span itemprop="about" itemscope="" itemtype="http://schema.org/Thing">
                  <a href="../../../../categories/自动化运维/" itemprop="url" rel="index">
                    <span itemprop="name">自动化运维</span>
                  </a>
                </span>

                
                
              
            </span>
          

          
            
          

          
          

          

          

          

        </div>
      </header>
    

    
    
    
    <div class="post-body" itemprop="articleBody">

      
      

      
        <h3 id="背景："><a href="#背景：" class="headerlink" title="背景："></a><strong>背景：</strong></h3><p>最近线上上了ELK，但是只用了一台Redis在中间作为消息队列，以减轻前端es集群的压力，Redis的集群解决方案暂时没有接触过，并且Redis作为消息队列并不是它的强项；所以最近将Redis换成了专业的消息信息发布订阅系统Kafka, Kafka的更多介绍大家可以看这里：<a href="http://blog.csdn.net/lizhitao/article/details/39499283" target="_blank" rel="noopener">传送门</a>  ,关于ELK的知识网上有很多的哦， 此篇博客主要是总结一下目前线上这个平台的实施步骤，ELK是怎么跟Kafka结合起来的。好吧，动手！</p>
<h3 id="ELK架构拓扑："><a href="#ELK架构拓扑：" class="headerlink" title="ELK架构拓扑："></a><strong>ELK架构拓扑：</strong></h3><p>然而我这里的整个日志收集平台就是这样的拓扑： <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/1.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/1.png" alt="1"></a> 1，使用一台Nginx代理访问kibana的请求; 2，两台es组成es集群，并且在两台es上面都安装kibana;（以下对elasticsearch简称es） 3，中间三台服务器就是我的kafka(zookeeper)集群啦; 上面写的消费者/生产者这是kafka(zookeeper)中的概念; 4，最后面的就是一大堆的生产服务器啦，上面使用的是logstash，当然除了logstash也可以使用其他的工具来收集你的应用程序的日志，例如：Flume，Scribe，Rsyslog，Scripts…… <strong>角色：</strong> <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/11111.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/11111.png" alt="11111"></a> <strong>软件选用：</strong></p>
<p>elasticsearch-1.7.3.tar.gz #这里需要说明一下，前几天使用了最新的elasticsearch2.0，java-1.8.0报错，目前未找到原因，故这里使用1.7.3版本<br>Logstash-2.0.0.tar.gz<br>kibana-4.1.2-linux-x64.tar.gz<br>以上软件都可以从官网下载:<a href="https://www.elastic.co/downloads" target="_blank" rel="noopener">https://www.elastic.co/downloads</a></p>
<p>java-1.8.0，nginx采用yum安装</p>
<p><strong>部署步骤：</strong> 1.ES集群安装配置; 2.Logstash客户端配置(直接写入数据到ES集群，写入系统messages日志); 3.Kafka(zookeeper)集群配置;(Logstash写入数据到Kafka消息系统); 4.Kibana部署; 5.Nginx负载均衡Kibana请求; 6.案例：nginx日志收集以及MySQL慢日志收集; 7.Kibana报表基本使用;</p>
<h3 id="ES集群安装配置"><a href="#ES集群安装配置" class="headerlink" title="ES集群安装配置;"></a>ES集群安装配置;</h3><p>es1.example.com: 1.安装java-1.8.0以及依赖包</p>
<p>yum install -y epel-release<br>yum install -y java-1.8.0 git wget lrzsz</p>
<p>2.获取es软件包</p>
<p>wget <a href="https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.7.3.tar.gz" target="_blank" rel="noopener">https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.7.3.tar.gz</a><br>tar -xf elasticsearch-1.7.3.tar.gz -C /usr/local<br>ln -sv /usr/local/elasticsearch-1.7.3 /usr/local/elasticsearch</p>
<p>3.修改配置文件</p>
<p>[root@es1 ~]# vim /usr/local/elasticsearch/config/elasticsearch.yml<br>32 cluster.name: es-cluster                         #组播的名称地址<br>40 node.name: “es-node1 “                           #节点名称，不能和其他节点重复<br>47 node.master: true                                #节点能否被选举为master<br>51 node.data: true                                  #节点是否存储数据<br>107 index.number_of_shards: 5                       #索引分片的个数<br>111 index.number_of_replicas: 1                     #分片的副本个数<br>145 path.conf: /usr/local/elasticsearch/config/     #配置文件的路径<br>149 path.data: /data/es/data                        #数据目录路径<br>159 path.work: /data/es/worker                      #工作目录路径<br>163 path.logs:  /usr/local/elasticsearch/logs/      #日志文件路径<br>167 path.plugins:  /data/es/plugins                 #插件路径<br>184 bootstrap.mlockall: true                        #内存不向swap交换<br>232 http.enabled: true                              #启用http</p>
<p>4.创建相关目录</p>
<p>mkdir /data/es/{data,worker,plugins} -p</p>
<p>5.获取es服务管理脚本</p>
<p>​[root@es1 ~]# git clone <a href="https://github.com/elastic/elasticsearch-servicewrapper.git" target="_blank" rel="noopener">https://github.com/elastic/elasticsearch-servicewrapper.git</a><br>[root@es1 ~]# mv elasticsearch-servicewrapper/service /usr/local/elasticsearch/bin/<br>[root@es1 ~]# /usr/local/elasticsearch/bin/service/elasticsearch install<br>Detected RHEL or Fedora:<br>Installing the Elasticsearch daemon..<br>[root@es1 ~]# </p>
<p>#这时就会在/etc/init.d/目录下安装上es的管理脚本啦</p>
<p>#修改其配置:<br>[root@es1 ~]#<br>set.default.ES_HOME=/usr/local/elasticsearch   #安装路径<br>set.default.ES_HEAP_SIZE=1024                  #jvm内存大小，根据实际环境调整即可</p>
<p>6.启动es ，并检查其服务是否正常</p>
<p>[root@es1 ~]# netstat -nlpt | grep -E “9200|”9300<br>tcp        0      0 0.0.0.0:9200                0.0.0.0:<em>                   LISTEN      1684/java<br>tcp        0      0 0.0.0.0:9300                0.0.0.0:</em>                   LISTEN      1684/java</p>
<p>访问<a href="http://192.168.2.18:9200/" target="_blank" rel="noopener">http://192.168.2.18:9200/</a> 如果出现以下提示信息说明安装配置完成啦， <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/2.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/2.png" alt="2"></a> 7.es1节点好啦，我们直接把目录复制到es2</p>
<p>[root@es1 local]# scp -r elasticsearch-1.7.3  192.168.12.19:/usr/local/</p>
<p>[root@es2 local]# ln -sv elasticsearch-1.7.3 elasticsearch<br>[root@es2 local]# elasticsearch/bin/service/elasticsearch install</p>
<p>#es2只需要修改node.name即可，其他都与es1相同配置</p>
<p>8.安装es的管理插件 es官方提供一个用于管理es的插件，可清晰直观看到es集群的状态，以及对集群的操作管理，安装方法如下：</p>
<p>[root@es1 local]# /usr/local/elasticsearch/bin/plugin -i mobz/elasticsearch-head</p>
<p>安装好之后，访问方式为： <a href="http://192.168.2.18:9200/_plugin/head，由于集群中现在暂时没有数据，所以显示为空" target="_blank" rel="noopener">http://192.168.2.18:9200/_plugin/head，由于集群中现在暂时没有数据，所以显示为空</a>, <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/3.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/3.png" alt="3"></a>       <strong>此时，es集群的部署完成。</strong></p>
<h3 id="Logstash客户端安装配置"><a href="#Logstash客户端安装配置" class="headerlink" title="Logstash客户端安装配置;"></a>Logstash客户端安装配置;</h3><p>在webserve1上面安装Logstassh 1.downloads  软件包 ，这里注意，Logstash是需要依赖java环境的，所以这里还是需要yum install -y java-1.8.0.</p>
<p>[root@webserver1 ~]# wget <a href="https://download.elastic.co/logstash/logstash/logstash-2.0.0.tar.gz" target="_blank" rel="noopener">https://download.elastic.co/logstash/logstash/logstash-2.0.0.tar.gz</a><br>[root@webserver1 ~]# tar -xf logstash-2.0.0.tar.gz -C /usr/local<br>[root@webserver1 ~]# cd /usr/local/<br>[root@webserver1 local]# ln -sv logstash-2.0.0 logstash<br>[root@webserver1 local]# mkdir logs etc</p>
<p>2.提供logstash管理脚本，其中里面的配置路径可根据实际情况修改</p>
<p>#!/bin/bash</p>
<p>#chkconfig: 2345 55 24</p>
<p>#description: logstash service manager</p>
<p>#auto: Maoqiu Guo<br>FILE=’/usr/local/logstash/etc/*.conf’    #logstash配置文件<br>LOGBIN=’/usr/local/logstash/bin/logstash agent –verbose –config’  #指定logstash配置文件的命令<br>LOCK=’/usr/local/logstash/locks’         #用锁文件配合服务启动与关闭<br>LOGLOG=’–log /usr/local/logstash/logs/stdou.log’  #日志</p>
<p>START() {<br>    if [ -f $LOCK ];then<br>        echo -e “Logstash is already \033[32mrunning\033[0m, do nothing.”<br>    else<br>        echo -e “Start logstash service.\033[32mdone\033[m”<br>        nohup ${LOGBIN} ${FILE} ${LOGLOG} &amp;<br>        touch $LOCK<br>    fi<br>}</p>
<p>STOP() {<br>    if [ ! -f $LOCK ];then<br>        echo -e “Logstash is already stop, do nothing.”<br>    else<br>        echo -e “Stop logstash serivce \033[32mdone\033[m”<br>        rm -rf $LOCK<br>        ps -ef | grep logstash | grep -v “grep” | awk ‘{print $2}’ | xargs kill -s 9 &gt;/dev/null<br>    fi<br>}</p>
<p>STATUS() {<br>    ps aux | grep logstash | grep -v “grep” &gt;/dev/null<br>    if [ -f $LOCK ] &amp;&amp; [ $? -eq 0 ]; then<br>        echo -e “Logstash is: \033[32mrunning\033[0m…”<br>    else<br>        echo -e “Logstash is: \033[31mstopped\033[0m…”<br>    fi<br>}</p>
<p>TEST(){<br>    ${LOGBIN} ${FILE} –configtest<br>}</p>
<p>case “$1” in<br>  start)<br>    START<br>    ;;<br>  stop)<br>    STOP<br>    ;;<br>  status)<br>    STATUS<br>    ;;<br>  restart)<br>    STOP<br>        sleep 2<br>        START<br>    ;;<br>  test)<br>    TEST<br>    ;;<br>  *)<br>    echo “Usage: /etc/init.d/logstash (test|start|stop|status|restart)”<br>    ;;<br>esac</p>
<p>3.Logstash 向es集群写数据 (1)编写一个logstash配置文件</p>
<p>[root@webserver1 etc]# cat logstash.conf<br>input {              #数据的输入从标准输入<br>  stdin {}<br>}</p>
<p>output {             #数据的输出我们指向了es集群<br>  elasticsearch {<br>    hosts =&gt; [“192.168.2.18:9200”,”192.168.2.19:9200”]　　　＃es主机的ip及端口<br>  }<br>}<br>[root@webserver1 etc]#</p>
<p>(2)检查配置文件是否有语法错</p>
<p>[root@webserver1 etc]# /usr/local/logstash/bin/logstash -f logstash.conf –configtest –verbose<br>Configuration OK<br>[root@webserver1 etc]# </p>
<p>(3)既然配置ok我们手动启动它，然后写点东西看能否写到es <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/4.png.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/4.png.png" alt="4.png"></a> ok.上图已经看到logstash已经可以正常的工作啦. ４.下面演示一下如何收集系统日志 将之前的配置文件修改如下所示内容，然后启动logstash服务就可以在web页面中看到messages的日志写入es，并且创建了一条索引</p>
<p>[root@webserver1 etc]# cat logstash.conf<br>input {　　　　　　　#这里的输入使用的文件，即日志文件messsages<br>  file {　　　<br>    path =&gt; “/var/log/messages”　　　＃这是日志文件的绝对路径<br>    start_position =&gt; “beginning”　＃这个表示从messages的第一行读取，即文件开始处<br>  }<br>}</p>
<p>output {　　　　＃输出到es<br>  elasticsearch {<br>    hosts =&gt; [“192.168.2.18:9200”,”192.168.2.19:9200”]<br>    index =&gt; “system-messages-%{+YYYY-MM}”　　＃这里将按照这个索引格式来创建索引<br>  }<br>}<br>[root@webserver1 etc]#</p>
<p>启动logstash后，我们来看head这个插件的web页面 <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/5.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/5.png" alt="5"></a> ok，系统日志我们已经成功的收集，并且已经写入到es集群中，那上面的演示是logstash直接将日志写入到es集群中的，这种场合我觉得如果量不是很大的话直接像上面已将将输出output定义到es集群即可，如果量大的话需要加上消息队列来缓解es集群的压力。前面已经提到了我这边之前使用的是单台redis作为消息队列，但是redis不能作为list类型的集群，也就是redis单点的问题没法解决，所以这里我选用了kafka ;下面就在三台server上面安装kafka集群</p>
<h3 id="Kafka集群安装配置"><a href="#Kafka集群安装配置" class="headerlink" title="Kafka集群安装配置;"></a>Kafka集群安装配置;</h3><p>在搭建kafka集群时，需要提前安装zookeeper集群，当然kafka已经自带zookeeper程序只需要解压并且安装配置就行了 kafka1上面的配置： 1.获取软件包.官网：<a href="http://kafka.apache.org/" target="_blank" rel="noopener">http://kafka.apache.org</a></p>
<p>[root@kafka1 ~]# wget <a href="http://mirror.rise.ph/apache/kafka/0.8.2.1/kafka_2.11-0.8.2.1.tgz" target="_blank" rel="noopener">http://mirror.rise.ph/apache/kafka/0.8.2.1/kafka_2.11-0.8.2.1.tgz</a><br>[root@kafka1 ~]# tar -xf kafka_2.11-0.8.2.1.tgz -C /usr/local/<br>[root@kafka1 ~]# cd /usr/local/<br>[root@kafka1 local]# ln -sv kafka_2.11-0.8.2.1 kafka</p>
<p>2.配置zookeeper集群，修改配置文件</p>
<p>[root@kafka1 ~]# vim /usr/local/kafka/config/zookeeper.propertie<br>dataDir=/data/zookeeper<br>clientPort=2181<br>tickTime=2000<br>initLimit=20<br>syncLimit=10<br>server.2=192.168.2.22:2888:3888<br>server.3=192.168.2.23:2888:3888<br>server.4=192.168.2.24:2888:3888</p>
<p>＃说明：<br>tickTime: 这个时间是作为 Zookeeper 服务器之间或客户端与服务器之间维持心跳的时间间隔，也就是每个 tickTime 时间就会发送一个心跳。<br>2888端口：表示的是这个服务器与集群中的 Leader 服务器交换信息的端口；<br>3888端口：表示的是万一集群中的 Leader 服务器挂了，需要一个端口来重新进行选举，选出一个新的 Leader，而这个端口就是用来执行选举时服务器相互通信的端口。</p>
<p>3.创建zookeeper所需要的目录</p>
<p>[root@kafka1 ~]# mkdir /data/zookeeper</p>
<p>4.在/data/zookeeper目录下创建myid文件，里面的内容为数字，用于标识主机，如果这个文件没有的话，zookeeper是没法启动的哦</p>
<p>[root@kafka1 ~]# echo 2 &gt; /data/zookeeper/myid</p>
<p>以上就是zookeeper集群的配置，下面等我配置好kafka之后直接复制到其他两个节点即可 5.kafka配置</p>
<p>[root@kafka1 ~]# vim /usr/local/kafka/config/server.properties<br>broker.id=2    　　　　    ＃　唯一，填数字，本文中分别为2/3/4<br>prot=9092　　　　　　　     ＃　这个broker监听的端口　<br>host.name=192.168.2.22　  ＃　唯一，填服务器IP<br>log.dir=/data/kafka-logs  #  该目录可以不用提前创建，在启动时自己会创建<br>zookeeper.connect=192.168.2.22:2181,192.168.2.23:2181,192.168.2.24:2181　　＃这个就是zookeeper的ip及端口<br>num.partitions=16         # 需要配置较大 分片影响读写速度<br>log.dirs=/data/kafka-logs # 数据目录也要单独配置磁盘较大的地方<br>log.retention.hours=168   # 时间按需求保留过期时间 避免磁盘满</p>
<p>6.将kafka(zookeeper)的程序目录全部拷贝至其他两个节点</p>
<p>[root@kafka1 ~]# scp -r /usr/local/kafka 192.168.2.23:/usr/local/<br>[root@kafka1 ~]# scp -r /usr/local/kafka 192.168.2.24:/usr/local/</p>
<p>7.修改两个借点的配置，注意这里除了以下两点不同外，都是相同的配置</p>
<p>（1）zookeeper的配置<br>mkdir /data/zookeeper<br>echo “x” &gt; /data/zookeeper/myid<br>（2）kafka的配置<br>broker.id=2<br>host.name=192.168.2.22</p>
<p>8.修改完毕配置之后我们就可以启动了，这里先要启动zookeeper集群，才能启动kafka 我们按照顺序来，kafka1 –&gt; kafka2 –&gt;kafka3</p>
<p>[root@kafka1 ~]# /usr/local/kafka/bin/zookeeper-server-start.sh /usr/local/kafka/config/zookeeper.properties &amp;   #zookeeper启动命令<br>[root@kafka1 ~]# /usr/local/kafka/bin/zookeeper-server-stop.sh                                                   #zookeeper停止的命令</p>
<p>注意，如果zookeeper有问题 nohup的日志文件会非常大，把磁盘占满，这个zookeeper服务可以通过自己些服务脚本来管理服务的启动与关闭。 后面两台执行相同操作，在启动过程当中会出现以下报错信息</p>
<p>[2015-11-13 19:18:04,225] WARN Cannot open channel to 3 at election address /192.168.2.23:3888 (org.apache.zookeeper.server.quorum.QuorumCnxManager)<br>java.net.ConnectException: Connection refused<br>    at java.net.PlainSocketImpl.socketConnect(Native Method)<br>    at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350)<br>    at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206)<br>    at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188)<br>    at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)<br>    at java.net.Socket.connect(Socket.java:589)<br>    at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:368)<br>    at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectAll(QuorumCnxManager.java:402)<br>    at org.apache.zookeeper.server.quorum.FastLeaderElection.lookForLeader(FastLeaderElection.java:840)<br>    at org.apache.zookeeper.server.quorum.QuorumPeer.run(QuorumPeer.java:762)<br>[2015-11-13 19:18:04,232] WARN Cannot open channel to 4 at election address /192.168.2.24:3888 (org.apache.zookeeper.server.quorum.QuorumCnxManager)<br>java.net.ConnectException: Connection refused<br>    at java.net.PlainSocketImpl.socketConnect(Native Method)<br>    at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350)<br>    at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206)<br>    at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188)<br>    at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)<br>    at java.net.Socket.connect(Socket.java:589)<br>    at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectOne(QuorumCnxManager.java:368)<br>    at org.apache.zookeeper.server.quorum.QuorumCnxManager.connectAll(QuorumCnxManager.java:402)<br>    at org.apache.zookeeper.server.quorum.FastLeaderElection.lookForLeader(FastLeaderElection.java:840)<br>    at org.apache.zookeeper.server.quorum.QuorumPeer.run(QuorumPeer.java:762)<br>[2015-11-13 19:18:04,233] INFO Notification time out: 6400 (org.apache.zookeeper.server.quorum.FastLeaderElection)</p>
<p>由于zookeeper集群在启动的时候，每个结点都试图去连接集群中的其它结点，先启动的肯定连不上后面还没启动的，所以上面日志前面部分的异常是可以忽略的。通过后面部分可以看到，集群在选出一个Leader后，最后稳定了。 其他节点也可能会出现类似的情况，属于正常。 9.zookeeper服务检查</p>
<p>[root@kafka1~]#  netstat -nlpt | grep -E “2181|2888|3888”<br>tcp        0      0 192.168.2.24:3888           0.0.0.0:<em>                   LISTEN      1959/java<br>tcp        0      0 0.0.0.0:2181                0.0.0.0:</em>                   LISTEN      1959/java                       </p>
<p>[root@kafka2 ~]#  netstat -nlpt | grep -E “2181|2888|3888”<br>tcp        0      0 192.168.2.23:3888           0.0.0.0:<em>                   LISTEN      1723/java<br>tcp        0      0 0.0.0.0:2181                0.0.0.0:</em>                   LISTEN      1723/java           </p>
<p>[root@kafka3 ~]#  netstat -nlpt | grep -E “2181|2888|3888”<br>tcp        0      0 192.168.2.24:3888           0.0.0.0:<em>                   LISTEN      950/java<br>tcp        0      0 0.0.0.0:2181                0.0.0.0:</em>                   LISTEN      950/java<br>tcp        0      0 192.168.2.24:2888           0.0.0.0:*                   LISTEN      950/java            </p>
<p>#可以看出，如果哪台是Leader,那么它就拥有2888这个端口</p>
<p>ok.  这时候zookeeper集群已经启动起来了，下面启动kafka，也是依次按照顺序启动</p>
<p>[root@kafka1 ~]# nohup /usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties &amp;   #kafka启动的命令<br>[root@kafka1 ~]#  /usr/local/kafka/bin/kafka-server-stop.sh                                                         #kafka停止的命令</p>
<p>注意，跟zookeeper服务一样，如果kafka有问题 nohup的日志文件会非常大,把磁盘占满，这个kafka服务同样可以通过自己些服务脚本来管理服务的启动与关闭。 此时三台上面的zookeeper及kafka都已经启动完毕，来检测以下吧 (1)建立一个主题</p>
<p>[root@kafka1 ~]# /usr/local/kafka/bin/kafka-topics.sh –create –zookeeper localhost:2181 –replication-factor 3 –partitions 1 –topic summer</p>
<p>#注意：factor大小不能超过broker数</p>
<p>(2)查看有哪些主题已经创建</p>
<p>[root@kafka1 ~]# /usr/local/kafka/bin/kafka-topics.sh –list –zookeeper 192.168.2.22:2181   #列出集群中所有的topic<br>summer  #已经创建成功</p>
<p>(3)查看summer这个主题的详情</p>
<p>[root@kafka1 ~]# /usr/local/kafka/bin/kafka-topics.sh –describe –zookeeper 192.168.2.22:2181 –topic summer<br>Topic:summer    PartitionCount:1    ReplicationFactor:3    Configs:<br>    Topic: summer    Partition: 0    Leader: 2    Replicas: 2,4,3    Isr: 2,4,3</p>
<p>#主题名称：summer</p>
<p>#Partition:只有一个，从0开始</p>
<p>#leader ：id为2的broker</p>
<p>#Replicas 副本存在于broker id为2,3,4的上面</p>
<p>#Isr:活跃状态的broker</p>
<p>(4)发送消息，这里使用的是生产者角色</p>
<p>[root@kafka1 ~]# /bin/bash /usr/local/kafka/bin/kafka-console-producer.sh –broker-list 192.168.2.22:9092 –topic summer<br>This is a messages<br>welcome to kafka    </p>
<p>(5)接收消息，这里使用的是消费者角色</p>
<p>[root@kafka2 ~]# /usr/local/kafka/bin/kafka-console-consumer.sh –zookeeper  192.168.2.24:2181 –topic summer –from-beginning<br>This is a messages<br>welcome to kafka</p>
<p>如果能够像上面一样能够接收到生产者发过来的消息，那说明基于kafka的zookeeper集群就成功啦。 10，下面我们将webserver1上面的logstash的输出改到kafka上面，将数据写入到kafka中 (1)修改webserver1上面的logstash配置，如下所示：各个参数可以到<a href="https://www.elastic.co/" target="_blank" rel="noopener">官网</a>查询.</p>
<p>root@webserver1 etc]# cat logstash.conf<br>input {             #这里的输入还是定义的是从日志文件输入<br>  file {<br>    type =&gt; “system-message”<br>    path =&gt; “/var/log/messages”<br>    start_position =&gt; “beginning”<br>  }<br>}</p>
<p>output {</p>
<pre><code>#stdout { codec =&gt; rubydebug }   #这是标准输出到终端，可以用于调试看有没有输出，注意输出的方向可以有多个
kafka {   #输出到kafka
  bootstrap_servers =&gt; &quot;192.168.2.22:9092,192.168.2.23:9092,192.168.2.24:9092&quot;   #他们就是生产者
  topic_id =&gt; &quot;system-messages&quot;  #这个将作为主题的名称，将会自动创建
  compression_type =&gt; &quot;snappy&quot;   #压缩类型
}
</code></pre><p>}<br>[root@webserver1 etc]#</p>
<p>(2)配置检测</p>
<p>[root@webserver1 etc]# /usr/local/logstash/bin/logstash -f logstash.conf –configtest –verbose<br>Configuration OK<br>[root@webserver1 etc]# </p>
<p>(2)启动Logstash，这里我直接在命令行执行即可</p>
<p>[root@webserver1 etc]# /usr/local/logstash/bin/logstash -f logstash.conf</p>
<p>(3)验证数据是否写入到kafka，这里我们检查是否生成了一个叫system-messages的主题</p>
<p>[root@kafka1 ~]# /usr/local/kafka/bin/kafka-topics.sh –list –zookeeper 192.168.2.22:2181<br>summer<br>system-messages   #可以看到这个主题已经生成了</p>
<p>#再看看这个主题的详情:<br>[root@kafka1 ~]# /usr/local/kafka/bin/kafka-topics.sh –describe –zookeeper 192.168.2.22:2181 –topic system-messages<br>Topic:system-messages    PartitionCount:16    ReplicationFactor:1    Configs:<br>    Topic: system-messages    Partition: 0    Leader: 2    Replicas: 2    Isr: 2<br>    Topic: system-messages    Partition: 1    Leader: 3    Replicas: 3    Isr: 3<br>    Topic: system-messages    Partition: 2    Leader: 4    Replicas: 4    Isr: 4<br>    Topic: system-messages    Partition: 3    Leader: 2    Replicas: 2    Isr: 2<br>    Topic: system-messages    Partition: 4    Leader: 3    Replicas: 3    Isr: 3<br>    Topic: system-messages    Partition: 5    Leader: 4    Replicas: 4    Isr: 4<br>    Topic: system-messages    Partition: 6    Leader: 2    Replicas: 2    Isr: 2<br>    Topic: system-messages    Partition: 7    Leader: 3    Replicas: 3    Isr: 3<br>    Topic: system-messages    Partition: 8    Leader: 4    Replicas: 4    Isr: 4<br>    Topic: system-messages    Partition: 9    Leader: 2    Replicas: 2    Isr: 2<br>    Topic: system-messages    Partition: 10    Leader: 3    Replicas: 3    Isr: 3<br>    Topic: system-messages    Partition: 11    Leader: 4    Replicas: 4    Isr: 4<br>    Topic: system-messages    Partition: 12    Leader: 2    Replicas: 2    Isr: 2<br>    Topic: system-messages    Partition: 13    Leader: 3    Replicas: 3    Isr: 3<br>    Topic: system-messages    Partition: 14    Leader: 4    Replicas: 4    Isr: 4<br>    Topic: system-messages    Partition: 15    Leader: 2    Replicas: 2    Isr: 2<br>[root@kafka1 ~]# </p>
<p>可以看出，这个主题生成了16个分区，每个分区都有对应自己的Leader，但是我想要有10个分区，3个副本如何办？还是跟我们上面一样命令行来创建主题就行，当然对于logstash输出的我们也可以提前先定义主题，然后启动logstash 直接往定义好的主题写数据就行啦，命令如下：</p>
<p>[root@kafka1 ~]# /usr/local/kafka/bin/kafka-topics.sh –create –zookeeper 192.168.2.22:2181 –replication-factor 3 –partitions 10 –topic TOPIC_NAME</p>
<p>好了，我们将logstash收集到的数据写入到了kafka中了，在实验过程中我使用while脚本测试了如果不断的往kafka写数据的同时停掉两个节点，数据写入没有任何问题。 那如何将数据从kafka中读取然后给我们的es集群呢？那下面我们在kafka集群上安装Logstash，安装步骤不再赘述；三台上面的logstash 的配置如下，作用是将kafka集群的数据读取然后转交给es集群，这里为了测试我让他新建一个索引文件，注意这里的输入日志还是messages，主题名称还是“system-messages”</p>
<p>[root@kafka1 etc]# more logstash.conf<br>input {<br>    kafka {<br>        zk_connect =&gt; “192.168.2.22:2181,192.168.2.23:2181,192.168.2.24:2181”   #消费者们<br>        topic_id =&gt; “system-messages”<br>        codec =&gt; plain<br>        reset_beginning =&gt; false<br>        consumer_threads =&gt; 5<br>        decorate_events =&gt; true<br>    }<br>}</p>
<p>output {<br>    elasticsearch {<br>      hosts =&gt; [“192.168.2.18:9200”,”192.168.2.19:9200”]<br>      index =&gt; “test-system-messages-%{+YYYY-MM}”           #为了区分之前实验，我这里新生成的所以名字为“test-system-messages-%{+YYYY-MM}”<br>  }<br>  }</p>
<p>在三台kafka上面启动Logstash，注意我这里是在命令行启动的；</p>
<p>[root@kafka1 etc]# pwd<br>/usr/local/logstash/etc<br>[root@kafka1 etc]# /usr/local/logstash/bin/logstash -f logstash.conf<br>[root@kafka2 etc]# pwd<br>/usr/local/logstash/etc<br>[root@kafka2 etc]# /usr/local/logstash/bin/logstash -f logstash.conf<br>[root@kafka3 etc]# pwd<br>/usr/local/logstash/etc<br>[root@kafka3 etc]# /usr/local/logstash/bin/logstash -f logstash.conf </p>
<p>在webserver1上写入测试内容，即webserver1上面利用message这个文件来测试，我先将其清空，然后启动</p>
<p>[root@webserver1 etc]# &gt;/var/log/messages<br>[root@webserver1 etc]# echo “我将通过kafka集群达到es集群哦^0^” &gt;&gt; /var/log/messages</p>
<p>#启动logstash,让其读取messages中的内容</p>
<p>下图为我在客户端写入到kafka集群的同时也将其输入到终端，这里写入了三条内容 <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/6.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/6.png" alt="6"></a> 而下面三张图侧可以看出，三台Logstash 很平均的从kafka集群当中读取出来了日志内容 <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/7.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/7.png" alt="7"></a> <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/9.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/9.png" alt="9"></a> <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/8.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/8.png" alt="8"></a> 再来看看我们的es管理界面 <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/10.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/10.png" alt="10"></a> ok ,看到了吧， 流程差不多就是下面 酱紫咯 <a href="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/111.png" target="_blank" rel="noopener"><img src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/2015/11/111.png" alt="111"></a> 由于篇幅较长，我将 4.Kibana部署; 5.Nginx负载均衡Kibana请求; 6.案例：nginx日志收集以及MySQL慢日志收集; 7.Kibana报表基本使用; 放到下一篇博客。</p>

      
    </div>
    
    
    
<div>
  
    <div>
    
        <div style="text-align:center;color: #ccc;font-size:14px;">-------------本文结束<i class="fa fa-paw"></i>感谢您的阅读-------------</div>
    
</div>


  
</div>
    

    

    

    <footer class="post-footer">
      
        <div class="post-tags">
          
            <a href="../../../../tags/ELK/" rel="tag"># ELK</a>
          
            <a href="../../../../tags/kafka/" rel="tag"># kafka</a>
          
            <a href="../../../../tags/zookeeper/" rel="tag"># zookeeper</a>
          
        </div>
      

      
      
      

      
        <div class="post-nav">
          <div class="post-nav-next post-nav-item">
            
              <a href="../../../10/23/e6-b8-85-e7-90-86elasticsearch-e7-9a-84-e7-b4-a2-e5-bc-95/" rel="next" title="清理Elasticsearch的索引">
                <i class="fa fa-chevron-left"></i> 清理Elasticsearch的索引
              </a>
            
          </div>

          <span class="post-nav-divider"></span>

          <div class="post-nav-prev post-nav-item">
            
              <a href="../elkkafka-e4-bc-81-e4-b8-9a-e6-97-a5-e5-bf-97-e6-94-b6-e9-9b-86-e5-b9-b3-e5-8f-b0-e4-ba-8c/" rel="prev" title="ELK+Kafka 企业日志收集平台(二)">
                ELK+Kafka 企业日志收集平台(二) <i class="fa fa-chevron-right"></i>
              </a>
            
          </div>
        </div>
      

      
      
    </footer>
  </div>
  
  
  
  </article>



    <div class="post-spread">
      
    </div>
  </div>


          </div>
          


          

  



        </div>
        
          
  
  <div class="sidebar-toggle">
    <div class="sidebar-toggle-line-wrap">
      <span class="sidebar-toggle-line sidebar-toggle-line-first"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-middle"></span>
      <span class="sidebar-toggle-line sidebar-toggle-line-last"></span>
    </div>
  </div>

  <aside id="sidebar" class="sidebar">
    
    <div class="sidebar-inner">

      

      
        <ul class="sidebar-nav motion-element">
          <li class="sidebar-nav-toc sidebar-nav-active" data-target="post-toc-wrap">
            文章目录
          </li>
          <li class="sidebar-nav-overview" data-target="site-overview-wrap">
            站点概览
          </li>
        </ul>
      

      <section class="site-overview-wrap sidebar-panel">
        <div class="site-overview">
          <div class="site-author motion-element" itemprop="author" itemscope="" itemtype="http://schema.org/Person">
            
              <img class="site-author-image" itemprop="image" src="https://raw.githubusercontent.com/guomaoqiu/myblog_backup/master/uploads/myphoto.png" alt="OutMan">
            
              <p class="site-author-name" itemprop="name">OutMan</p>
              <p class="site-description motion-element" itemprop="description">😊</p>
          </div>

          <nav class="site-state motion-element">

            
              <div class="site-state-item site-state-posts">
              
                <a href="../../../../archives/">
              
                  <span class="site-state-item-count">79</span>
                  <span class="site-state-item-name">日志</span>
                </a>
              </div>
            

            
              
              
              <div class="site-state-item site-state-categories">
                <a href="../../../../categories/index.html">
                  <span class="site-state-item-count">22</span>
                  <span class="site-state-item-name">分类</span>
                </a>
              </div>
            

            
              
              
              <div class="site-state-item site-state-tags">
                <a href="../../../../tags/index.html">
                  <span class="site-state-item-count">46</span>
                  <span class="site-state-item-name">标签</span>
                </a>
              </div>
            

          </nav>

          

          
            <div class="links-of-author motion-element">
                
                  <span class="links-of-author-item">
                    <a href="https://github.com/guomaoqiu" target="_blank" title="GitHub">
                      
                        <i class="fa fa-fw fa-github"></i></a>
                  </span>
                
                  <span class="links-of-author-item">
                    <a href="mailto:guomaoqiu@google.com" target="_blank" title="E-Mail">
                      
                        <i class="fa fa-fw fa-envelope"></i></a>
                  </span>
                
            </div>
          

          
          

          
          

          

        </div>
      </section>

      
      <!--noindex-->
        <section class="post-toc-wrap motion-element sidebar-panel sidebar-panel-active">
          <div class="post-toc">

            
              
            

            
              <div class="post-toc-content"><ol class="nav"><li class="nav-item nav-level-3"><a class="nav-link" href="#背景："><span class="nav-number">1.</span> <span class="nav-text">背景：</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#ELK架构拓扑："><span class="nav-number">2.</span> <span class="nav-text">ELK架构拓扑：</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#ES集群安装配置"><span class="nav-number">3.</span> <span class="nav-text">ES集群安装配置;</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#Logstash客户端安装配置"><span class="nav-number">4.</span> <span class="nav-text">Logstash客户端安装配置;</span></a></li><li class="nav-item nav-level-3"><a class="nav-link" href="#Kafka集群安装配置"><span class="nav-number">5.</span> <span class="nav-text">Kafka集群安装配置;</span></a></li></ol></div>
            

          </div>
        </section>
      <!--/noindex-->
      

      

    </div>
  </aside>


        
      </div>
    </main>

    <footer id="footer" class="footer">
      <div class="footer-inner">
        <div class="copyright">&copy; <span itemprop="copyrightYear">2018</span>
  <span class="with-love">
    <i class="fa fa-user"></i>
  </span>
  <span class="author" itemprop="copyrightHolder">OutMan</span>

  
</div>


  <div class="powered-by">由 <a class="theme-link" target="_blank" href="https://hexo.io">Hexo</a> 强力驱动</div>



  <span class="post-meta-divider">|</span>



  <div class="theme-info">主题 &mdash; <a class="theme-link" target="_blank" href="https://github.com/iissnan/hexo-theme-next">NexT.Gemini</a> v5.1.4</div>




        







        
      </div>
    </footer>

    
      <div class="back-to-top">
        <i class="fa fa-arrow-up"></i>
        
      </div>
    

    

  </div>

  

<script type="text/javascript">
  if (Object.prototype.toString.call(window.Promise) !== '[object Function]') {
    window.Promise = null;
  }
</script>









  


  











  
  
    <script type="text/javascript" src="../../../../lib/jquery/index.js?v=2.1.3"></script>
  

  
  
    <script type="text/javascript" src="../../../../lib/fastclick/lib/fastclick.min.js?v=1.0.6"></script>
  

  
  
    <script type="text/javascript" src="../../../../lib/jquery_lazyload/jquery.lazyload.js?v=1.9.7"></script>
  

  
  
    <script type="text/javascript" src="../../../../lib/velocity/velocity.min.js?v=1.2.1"></script>
  

  
  
    <script type="text/javascript" src="../../../../lib/velocity/velocity.ui.min.js?v=1.2.1"></script>
  

  
  
    <script type="text/javascript" src="../../../../lib/fancybox/source/jquery.fancybox.pack.js?v=2.1.5"></script>
  

  
  
    <script type="text/javascript" src="../../../../lib/canvas-nest/canvas-nest.min.js"></script>
  


  


  <script type="text/javascript" src="../../../../js/src/utils.js?v=5.1.4"></script>

  <script type="text/javascript" src="../../../../js/src/motion.js?v=5.1.4"></script>



  
  


  <script type="text/javascript" src="../../../../js/src/affix.js?v=5.1.4"></script>

  <script type="text/javascript" src="../../../../js/src/schemes/pisces.js?v=5.1.4"></script>



  
  <script type="text/javascript" src="../../../../js/src/scrollspy.js?v=5.1.4"></script>
<script type="text/javascript" src="../../../../js/src/post-details.js?v=5.1.4"></script>



  


  <script type="text/javascript" src="../../../../js/src/bootstrap.js?v=5.1.4"></script>



  


  




	





  





  












  





  

  

  

  
  

  

  

  

</body>
</html>
