<!doctype html><html lang=zh class=no-js> <head><meta charset=utf-8><meta name=viewport content="width=device-width,initial-scale=1"><link rel="shortcut icon" href=../../assets/images/favicon.png><meta name=generator content="mkdocs-1.1.2, mkdocs-material-5.5.13"><title>Flume - Dayet</title><link rel=stylesheet href=../../assets/stylesheets/main.077507d7.min.css><link rel=stylesheet href=../../assets/stylesheets/palette.ff0a5ce4.min.css><meta name=theme-color content=#546d78><link href=https://fonts.gstatic.com rel=preconnect crossorigin><link rel=stylesheet href="https://fonts.googleapis.com/css?family=Roboto:300,400,400i,700%7CConsolas&display=fallback"><style>body,input{font-family:"Roboto",-apple-system,BlinkMacSystemFont,Helvetica,Arial,sans-serif}code,kbd,pre{font-family:"Consolas",SFMono-Regular,Consolas,Menlo,monospace}</style><script>window.ga=window.ga||function(){(ga.q=ga.q||[]).push(arguments)},ga.l=+new Date,ga("create","UA-XXXXXXXX-X","auto"),ga("set","anonymizeIp",!0),ga("send","pageview"),document.addEventListener("DOMContentLoaded",function(){document.forms.search&&document.forms.search.query.addEventListener("blur",function(){if(this.value){var e=document.location.pathname;ga("send","pageview",e+"?q="+this.value)}})}),document.addEventListener("DOMContentSwitch",function(){ga("send","pageview",document.location.pathname)})</script><script async src=https://www.google-analytics.com/analytics.js></script></head> <body dir=ltr data-md-color-scheme data-md-color-primary=blue-grey data-md-color-accent=blue-grey> <input class=md-toggle data-md-toggle=drawer type=checkbox id=__drawer autocomplete=off> <input class=md-toggle data-md-toggle=search type=checkbox id=__search autocomplete=off> <label class=md-overlay for=__drawer></label> <div data-md-component=skip> <a href=#1 class=md-skip> 跳转至 </a> </div> <div data-md-component=announce> </div> <header class=md-header data-md-component=header> <nav class="md-header-nav md-grid" aria-label=Header> <a href=../.. title=Dayet class="md-header-nav__button md-logo" aria-label=Dayet> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 24 24"><path d="M12 8a3 3 0 003-3 3 3 0 00-3-3 3 3 0 00-3 3 3 3 0 003 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54z"/></svg> </a> <label class="md-header-nav__button md-icon" for=__drawer> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 24 24"><path d="M3 6h18v2H3V6m0 5h18v2H3v-2m0 5h18v2H3v-2z"/></svg> </label> <div class=md-header-nav__title data-md-component=header-title> <div class=md-header-nav__ellipsis> <span class="md-header-nav__topic md-ellipsis"> Dayet </span> <span class="md-header-nav__topic md-ellipsis"> Flume </span> </div> </div> <label class="md-header-nav__button md-icon" for=__search> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0116 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 019.5 16 6.5 6.5 0 013 9.5 6.5 6.5 0 019.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5z"/></svg> </label> <div class=md-search data-md-component=search role=dialog> <label class=md-search__overlay for=__search></label> <div class=md-search__inner role=search> <form class=md-search__form name=search> <input type=text class=md-search__input name=query aria-label=搜索 placeholder=搜索 autocapitalize=off autocorrect=off autocomplete=off spellcheck=false data-md-component=search-query data-md-state=active> <label class="md-search__icon md-icon" for=__search> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 24 24"><path d="M9.5 3A6.5 6.5 0 0116 9.5c0 1.61-.59 3.09-1.56 4.23l.27.27h.79l5 5-1.5 1.5-5-5v-.79l-.27-.27A6.516 6.516 0 019.5 16 6.5 6.5 0 013 9.5 6.5 6.5 0 019.5 3m0 2C7 5 5 7 5 9.5S7 14 9.5 14 14 12 14 9.5 12 5 9.5 5z"/></svg> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12z"/></svg> </label> <button type=reset class="md-search__icon md-icon" aria-label=Clear data-md-component=search-reset tabindex=-1> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 24 24"><path d="M19 6.41L17.59 5 12 10.59 6.41 5 5 6.41 10.59 12 5 17.59 6.41 19 12 13.41 17.59 19 19 17.59 13.41 12 19 6.41z"/></svg> </button> </form> <div class=md-search__output> <div class=md-search__scrollwrap data-md-scrollfix> <div class=md-search-result data-md-component=search-result> <div class=md-search-result__meta> Initializing search </div> <ol class=md-search-result__list></ol> </div> </div> </div> </div> </div> <div class=md-header-nav__source> <a href=https://chokgit.gitee.io/blog title="前往 GitHub 仓库" class=md-source> <div class="md-source__icon md-icon"> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 448 512"><path d="M439.55 236.05L244 40.45a28.87 28.87 0 00-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 01-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 000 40.81l195.61 195.6a28.86 28.86 0 0040.8 0l194.69-194.69a28.86 28.86 0 000-40.81z"/></svg> </div> <div class=md-source__repository> 吾生也有崖，而知也无涯。 </div> </a> </div> </nav> </header> <div class=md-container data-md-component=container> <main class=md-main data-md-component=main> <div class="md-main__inner md-grid"> <div class="md-sidebar md-sidebar--primary" data-md-component=navigation> <div class=md-sidebar__scrollwrap> <div class=md-sidebar__inner> <nav class="md-nav md-nav--primary" aria-label=Navigation data-md-level=0> <label class=md-nav__title for=__drawer> <a href=../.. title=Dayet class="md-nav__button md-logo" aria-label=Dayet> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 24 24"><path d="M12 8a3 3 0 003-3 3 3 0 00-3-3 3 3 0 00-3 3 3 3 0 003 3m0 3.54C9.64 9.35 6.5 8 3 8v11c3.5 0 6.64 1.35 9 3.54 2.36-2.19 5.5-3.54 9-3.54V8c-3.5 0-6.64 1.35-9 3.54z"/></svg> </a> Dayet </label> <div class=md-nav__source> <a href=https://chokgit.gitee.io/blog title="前往 GitHub 仓库" class=md-source> <div class="md-source__icon md-icon"> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 448 512"><path d="M439.55 236.05L244 40.45a28.87 28.87 0 00-40.81 0l-40.66 40.63 51.52 51.52c27.06-9.14 52.68 16.77 43.39 43.68l49.66 49.66c34.23-11.8 61.18 31 35.47 56.69-26.49 26.49-70.21-2.87-56-37.34L240.22 199v121.85c25.3 12.54 22.26 41.85 9.08 55a34.34 34.34 0 01-48.55 0c-17.57-17.6-11.07-46.91 11.25-56v-123c-20.8-8.51-24.6-30.74-18.64-45L142.57 101 8.45 235.14a28.86 28.86 0 000 40.81l195.61 195.6a28.86 28.86 0 0040.8 0l194.69-194.69a28.86 28.86 0 000-40.81z"/></svg> </div> <div class=md-source__repository> 吾生也有崖，而知也无涯。 </div> </a> </div> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../.. title=主页 class=md-nav__link> 主页 </a> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-2 type=checkbox id=nav-2> <label class=md-nav__link for=nav-2> 系统基础 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=系统基础 data-md-level=1> <label class=md-nav__title for=nav-2> <span class="md-nav__icon md-icon"></span> 系统基础 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E7%B3%BB%E7%BB%9F/Linux/ title=Linux class=md-nav__link> Linux </a> </li> <li class=md-nav__item> <a href=../../%E7%B3%BB%E7%BB%9F/Shell/ title=Shell class=md-nav__link> Shell </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-3 type=checkbox id=nav-3> <label class=md-nav__link for=nav-3> 编程语言 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=编程语言 data-md-level=1> <label class=md-nav__title for=nav-3> <span class="md-nav__icon md-icon"></span> 编程语言 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E5%BF%85%E5%A4%87%E6%8A%80%E6%9C%AF/JavaSE/ title=Java class=md-nav__link> Java </a> </li> <li class=md-nav__item> <a href=../../%E5%BF%85%E5%A4%87%E6%8A%80%E6%9C%AF/Scala/ title=Scala class=md-nav__link> Scala </a> </li> <li class=md-nav__item> <a href=../../%E5%BF%85%E5%A4%87%E6%8A%80%E6%9C%AF/Python/ title=Python class=md-nav__link> Python </a> </li> <li class=md-nav__item> <a href=../../%E5%BF%85%E5%A4%87%E6%8A%80%E6%9C%AF/Kotlin/ title=Kotlin class=md-nav__link> Kotlin </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-4 type=checkbox id=nav-4> <label class=md-nav__link for=nav-4> 前端技术 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=前端技术 data-md-level=1> <label class=md-nav__title for=nav-4> <span class="md-nav__icon md-icon"></span> 前端技术 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E5%89%8D%E7%AB%AF/Vue/ title=Vue class=md-nav__link> Vue </a> </li> <li class=md-nav__item> <a href=../../%E5%89%8D%E7%AB%AF/Element-UI/ title=Element-UI-admin class=md-nav__link> Element-UI-admin </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-5 type=checkbox id=nav-5> <label class=md-nav__link for=nav-5> 后端技术 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=后端技术 data-md-level=1> <label class=md-nav__title for=nav-5> <span class="md-nav__icon md-icon"></span> 后端技术 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E5%90%8E%E7%AB%AF/SpringBoot/ title=SpringBoot class=md-nav__link> SpringBoot </a> </li> <li class=md-nav__item> <a href=../../%E5%90%8E%E7%AB%AF/Mybatis/ title=Mybatis class=md-nav__link> Mybatis </a> </li> <li class=md-nav__item> <a href=../../%E5%90%8E%E7%AB%AF/SpringData/ title=SpringData class=md-nav__link> SpringData </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-6 type=checkbox id=nav-6> <label class=md-nav__link for=nav-6> 中间件 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=中间件 data-md-level=1> <label class=md-nav__title for=nav-6> <span class="md-nav__icon md-icon"></span> 中间件 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E4%B8%AD%E9%97%B4%E4%BB%B6/Nginx/ title=Nginx class=md-nav__link> Nginx </a> </li> <li class=md-nav__item> <a href=../../%E4%B8%AD%E9%97%B4%E4%BB%B6/Tomcat/ title=Tomcat class=md-nav__link> Tomcat </a> </li> <li class=md-nav__item> <a href=../../%E4%B8%AD%E9%97%B4%E4%BB%B6/RabbitMQ/ title=RabbitMQ class=md-nav__link> RabbitMQ </a> </li> <li class=md-nav__item> <a href=../../%E4%B8%AD%E9%97%B4%E4%BB%B6/RocketMQ/ title=RocketMQ class=md-nav__link> RocketMQ </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--active md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-7 type=checkbox id=nav-7 checked> <label class=md-nav__link for=nav-7> 大数据 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=大数据 data-md-level=1> <label class=md-nav__title for=nav-7> <span class="md-nav__icon md-icon"></span> 大数据 </label> <ul class=md-nav__list data-md-scrollfix> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-7-1 type=checkbox id=nav-7-1> <label class=md-nav__link for=nav-7-1> Hadoop <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=Hadoop data-md-level=2> <label class=md-nav__title for=nav-7-1> <span class="md-nav__icon md-icon"></span> Hadoop </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../Hadoop%E5%9F%BA%E7%A1%80/ title=入门 class=md-nav__link> 入门 </a> </li> <li class=md-nav__item> <a href=../Hadoop-HDFS/ title=HDFS class=md-nav__link> HDFS </a> </li> <li class=md-nav__item> <a href=../Hadoop-MapReduce/ title=MapReduce class=md-nav__link> MapReduce </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-7-2 type=checkbox id=nav-7-2> <label class=md-nav__link for=nav-7-2> Spark <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=Spark data-md-level=2> <label class=md-nav__title for=nav-7-2> <span class="md-nav__icon md-icon"></span> Spark </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../Spark%E5%9F%BA%E7%A1%80/ title=Spark基础 class=md-nav__link> Spark基础 </a> </li> <li class=md-nav__item> <a href=../SparkCore/ title=SparkCore class=md-nav__link> SparkCore </a> </li> <li class=md-nav__item> <a href=../SparkSql/ title=SparkSql class=md-nav__link> SparkSql </a> </li> <li class=md-nav__item> <a href=../SparkStreaming/ title=SparkStreaming class=md-nav__link> SparkStreaming </a> </li> </ul> </nav> </li> <li class=md-nav__item> <a href=../Flink/ title=Flink class=md-nav__link> Flink </a> </li> <li class=md-nav__item> <a href=../Zookeeper/ title=Zookeeper class=md-nav__link> Zookeeper </a> </li> <li class=md-nav__item> <a href=../Hbase/ title=HBase class=md-nav__link> HBase </a> </li> <li class=md-nav__item> <a href=../Hive/ title=Hive class=md-nav__link> Hive </a> </li> <li class=md-nav__item> <a href=../Kafka/ title=Kafka class=md-nav__link> Kafka </a> </li> <li class="md-nav__item md-nav__item--active"> <input class="md-nav__toggle md-toggle" data-md-toggle=toc type=checkbox id=__toc> <label class="md-nav__link md-nav__link--active" for=__toc> Flume <span class="md-nav__icon md-icon"></span> </label> <a href=./ title=Flume class="md-nav__link md-nav__link--active"> Flume </a> <nav class="md-nav md-nav--secondary" aria-label=目录> <label class=md-nav__title for=__toc> <span class="md-nav__icon md-icon"></span> 目录 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=#11-flume class=md-nav__link> 1.1 Flume定义 </a> </li> <li class=md-nav__item> <a href=#12-flume class=md-nav__link> 1.2 Flume组成架构 </a> <nav class=md-nav aria-label="1.2 Flume组成架构"> <ul class=md-nav__list> <li class=md-nav__item> <a href=#121-agent class=md-nav__link> 1.2.1 Agent </a> </li> <li class=md-nav__item> <a href=#122-source class=md-nav__link> 1.2.2 Source </a> </li> <li class=md-nav__item> <a href=#123-channel class=md-nav__link> 1.2.3 Channel </a> </li> <li class=md-nav__item> <a href=#124-sink class=md-nav__link> 1.2.4 Sink </a> </li> <li class=md-nav__item> <a href=#125-event class=md-nav__link> 1.2.5 Event </a> </li> </ul> </nav> </li> <li class=md-nav__item> <a href=#13-flume class=md-nav__link> 1.3 Flume拓扑结构 </a> </li> <li class=md-nav__item> <a href=#14-flume-agent class=md-nav__link> 1.4 Flume Agent内部原理 </a> </li> </ul> </nav> </li> <li class=md-nav__item> <a href=../Sqoop/ title=Sqoop class=md-nav__link> Sqoop </a> </li> <li class=md-nav__item> <a href=../Oozie/ title=Oozie class=md-nav__link> Oozie </a> </li> <li class=md-nav__item> <a href=../Azkaban/ title=Azkaban class=md-nav__link> Azkaban </a> </li> <li class=md-nav__item> <a href=../Kylin/ title=Kylin class=md-nav__link> Kylin </a> </li> <li class=md-nav__item> <a href=../Presto/ title=Presto class=md-nav__link> Presto </a> </li> <li class=md-nav__item> <a href=../ELK/ title=ELK class=md-nav__link> ELK </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-8 type=checkbox id=nav-8> <label class=md-nav__link for=nav-8> 数据库 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=数据库 data-md-level=1> <label class=md-nav__title for=nav-8> <span class="md-nav__icon md-icon"></span> 数据库 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E5%BF%85%E5%A4%87%E6%8A%80%E6%9C%AF/Mysql%E5%AE%89%E8%A3%85/ title=Mysql安装 class=md-nav__link> Mysql安装 </a> </li> <li class=md-nav__item> <a href=../../%E5%BF%85%E5%A4%87%E6%8A%80%E6%9C%AF/Oracle%E5%AE%89%E8%A3%85/ title=Oracle安装 class=md-nav__link> Oracle安装 </a> </li> <li class=md-nav__item> <a href=../../%E5%BF%85%E5%A4%87%E6%8A%80%E6%9C%AF/Redis%E5%AE%89%E8%A3%85/ title=Redis安装 class=md-nav__link> Redis安装 </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-9 type=checkbox id=nav-9> <label class=md-nav__link for=nav-9> 项目开发 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=项目开发 data-md-level=1> <label class=md-nav__title for=nav-9> <span class="md-nav__icon md-icon"></span> 项目开发 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E9%A1%B9%E7%9B%AE%E5%BC%80%E5%8F%91/Web%E5%BC%80%E5%8F%91/ title=Web开发 class=md-nav__link> Web开发 </a> </li> <li class=md-nav__item> <a href=../../%E9%A1%B9%E7%9B%AE%E5%BC%80%E5%8F%91/%E5%AE%89%E5%8D%93%E5%BC%80%E5%8F%91/ title=安卓开发 class=md-nav__link> 安卓开发 </a> </li> <li class=md-nav__item> <a href=../../%E9%A1%B9%E7%9B%AE%E5%BC%80%E5%8F%91/PC%E7%AB%AF%E5%BC%80%E5%8F%91/ title=PC端开发 class=md-nav__link> PC端开发 </a> </li> <li class=md-nav__item> <a href=../../%E9%A1%B9%E7%9B%AE%E5%BC%80%E5%8F%91/%E5%A4%A7%E6%95%B0%E6%8D%AE%E5%BC%80%E5%8F%91/ title=大数据开发 class=md-nav__link> 大数据开发 </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-10 type=checkbox id=nav-10> <label class=md-nav__link for=nav-10> 必备技能 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=必备技能 data-md-level=1> <label class=md-nav__title for=nav-10> <span class="md-nav__icon md-icon"></span> 必备技能 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E5%BF%85%E5%A4%87%E6%8A%80%E6%9C%AF/Git/ title=Git class=md-nav__link> Git </a> </li> <li class=md-nav__item> <a href=../../%E5%BF%85%E5%A4%87%E6%8A%80%E6%9C%AF/Zabbix5/ title=Zabbix class=md-nav__link> Zabbix </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-11 type=checkbox id=nav-11> <label class=md-nav__link for=nav-11> 常用工具 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=常用工具 data-md-level=1> <label class=md-nav__title for=nav-11> <span class="md-nav__icon md-icon"></span> 常用工具 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E5%B8%B8%E7%94%A8%E5%B7%A5%E5%85%B7/IDEA/ title=IDEA class=md-nav__link> IDEA </a> </li> <li class=md-nav__item> <a href=../../%E5%B8%B8%E7%94%A8%E5%B7%A5%E5%85%B7/Wiki/ title=Wiki class=md-nav__link> Wiki </a> </li> </ul> </nav> </li> <li class="md-nav__item md-nav__item--nested"> <input class="md-nav__toggle md-toggle" data-md-toggle=nav-12 type=checkbox id=nav-12> <label class=md-nav__link for=nav-12> 面试题库 <span class="md-nav__icon md-icon"></span> </label> <nav class=md-nav aria-label=面试题库 data-md-level=1> <label class=md-nav__title for=nav-12> <span class="md-nav__icon md-icon"></span> 面试题库 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=../../%E5%B8%B8%E7%94%A8%E5%B7%A5%E5%85%B7/IDEA/ title=编程语言 class=md-nav__link> 编程语言 </a> </li> <li class=md-nav__item> <a href=../../%E5%B8%B8%E7%94%A8%E5%B7%A5%E5%85%B7/IDEA/ title=系统操作 class=md-nav__link> 系统操作 </a> </li> <li class=md-nav__item> <a href=../../%E5%B8%B8%E7%94%A8%E5%B7%A5%E5%85%B7/IDEA/ title=数据库 class=md-nav__link> 数据库 </a> </li> <li class=md-nav__item> <a href=../../%E5%B8%B8%E7%94%A8%E5%B7%A5%E5%85%B7/IDEA/ title=大数据 class=md-nav__link> 大数据 </a> </li> <li class=md-nav__item> <a href=../../%E5%B8%B8%E7%94%A8%E5%B7%A5%E5%85%B7/IDEA/ title=项目 class=md-nav__link> 项目 </a> </li> </ul> </nav> </li> </ul> </nav> </div> </div> </div> <div class="md-sidebar md-sidebar--secondary" data-md-component=toc> <div class=md-sidebar__scrollwrap> <div class=md-sidebar__inner> <nav class="md-nav md-nav--secondary" aria-label=目录> <label class=md-nav__title for=__toc> <span class="md-nav__icon md-icon"></span> 目录 </label> <ul class=md-nav__list data-md-scrollfix> <li class=md-nav__item> <a href=#11-flume class=md-nav__link> 1.1 Flume定义 </a> </li> <li class=md-nav__item> <a href=#12-flume class=md-nav__link> 1.2 Flume组成架构 </a> <nav class=md-nav aria-label="1.2 Flume组成架构"> <ul class=md-nav__list> <li class=md-nav__item> <a href=#121-agent class=md-nav__link> 1.2.1 Agent </a> </li> <li class=md-nav__item> <a href=#122-source class=md-nav__link> 1.2.2 Source </a> </li> <li class=md-nav__item> <a href=#123-channel class=md-nav__link> 1.2.3 Channel </a> </li> <li class=md-nav__item> <a href=#124-sink class=md-nav__link> 1.2.4 Sink </a> </li> <li class=md-nav__item> <a href=#125-event class=md-nav__link> 1.2.5 Event </a> </li> </ul> </nav> </li> <li class=md-nav__item> <a href=#13-flume class=md-nav__link> 1.3 Flume拓扑结构 </a> </li> <li class=md-nav__item> <a href=#14-flume-agent class=md-nav__link> 1.4 Flume Agent内部原理 </a> </li> </ul> </nav> </div> </div> </div> <div class=md-content> <article class="md-content__inner md-typeset"> <p>大数据技术之Flume</p> <h1 id=1>第1章 概述</h1> <h2 id=11-flume>1.1 Flume定义</h2> <p>Flume是Cloudera提供的一个高可用的，高可靠的，分布式的海量日志采集、聚合和传输的系统。Flume基于流式架构，灵活简单。</p> <h2 id=12-flume>1.2 Flume组成架构</h2> <p>Flume组成架构如图1-1，图1-2所示：</p> <p><img alt=1528772665(1) src=5615e1bf5b09ae6c7f0b9887eca6df7b.png></p> <p>图1-1 Flume组成架构</p> <p>图1-2 Flume组成架构详解</p> <p>下面我们来详细介绍一下Flume架构中的组件。</p> <h3 id=121-agent>1.2.1 Agent</h3> <p>Agent是一个JVM进程，它以事件的形式将数据从源头送至目的，是Flume数据传输的基本单元。</p> <p>Agent主要有3个部分组成，Source、Channel、Sink。</p> <h3 id=122-source>1.2.2 Source</h3> <p>Source是负责接收数据到Flume Agent的组件。Source组件可以处理各种类型、各种格式的日志数据，包括avro、thrift、exec、jms、spooling directory、netcat、sequence generator、syslog、http、legacy。</p> <h3 id=123-channel>1.2.3 Channel</h3> <p>Channel是位于Source和Sink之间的缓冲区。因此，Channel允许Source和Sink运作在不同的速率上。Channel是线程安全的，可以同时处理几个Source的写入操作和几个Sink的读取操作。</p> <p>Flume自带两种Channel：Memory Channel和File Channel。</p> <p>Memory Channel是内存中的队列。Memory Channel在不需要关心数据丢失的情景下适用。如果需要关心数据丢失，那么Memory Channel就不应该使用，因为程序死亡、机器宕机或者重启都会导致数据丢失。</p> <p>File Channel将所有事件写到磁盘。因此在程序关闭或机器宕机的情况下不会丢失数据。</p> <h3 id=124-sink>1.2.4 Sink</h3> <p>Sink不断地轮询Channel中的事件且批量地移除它们，并将这些事件批量写入到存储或索引系统、或者被发送到另一个Flume Agent。</p> <p>Sink是完全事务性的。在从Channel批量删除数据之前，每个Sink用Channel启动一个事务。批量事件一旦成功写出到存储系统或下一个Flume Agent，Sink就利用Channel提交事务。事务一旦被提交，该Channel从自己的内部缓冲区删除事件。</p> <p>Sink组件目的地包括hdfs、logger、avro、thrift、ipc、file、null、HBase、solr、自定义。</p> <h3 id=125-event>1.2.5 Event</h3> <p>传输单元，Flume数据传输的基本单元，以事件的形式将数据从源头送至目的地。</p> <h2 id=13-flume>1.3 Flume拓扑结构</h2> <p>Flume的拓扑结构如图1-3、1-4、1-5和1-6所示：</p> <p><img alt src=09e3c2d3795608d2e1c7f5b3e468c493.png></p> <p>图1-3 Flume Agent连接</p> <p><img alt src=67e9832ba792ea500cc2674a791f0575.png></p> <p>图1-4 单source，多channel、sink</p> <p><img alt src=1e2edc63b6e5a19e8eb5fcbdc6137d2b.png></p> <p>图1-5 Flume负载均衡</p> <p><img alt src=ce73dd725d6a5201fb94c56770ed1828.png></p> <p>图1-6 Flume Agent聚合</p> <h2 id=14-flume-agent>1.4 Flume Agent内部原理</h2> <h1 id=2>第2章 快速入门</h1> <h2 id=21-flume>2.1 Flume安装地址</h2> <p>1） Flume官网地址</p> <p><a href=http://flume.apache.org/ >http://flume.apache.org/</a></p> <p>2）文档查看地址</p> <p><a href=http://flume.apache.org/FlumeUserGuide.html>http://flume.apache.org/FlumeUserGuide.html</a></p> <p>3）下载地址</p> <p><a href=http://archive.apache.org/dist/flume/ >http://archive.apache.org/dist/flume/</a></p> <h2 id=22>2.2 安装部署</h2> <blockquote> <p>1）将apache-flume-1.7.0-bin.tar.gz上传到linux的/opt/software目录下</p> <p>2）解压apache-flume-1.7.0-bin.tar.gz到/opt/module/目录下</p> <p>[atguigu\@hadoop102 software]$ tar -zxf apache-flume-1.7.0-bin.tar.gz -C /opt/module/</p> <p>3）修改apache-flume-1.7.0-bin的名称为flume</p> <p>[atguigu\@hadoop102 module]$ mv apache-flume-1.7.0-bin flume</p> </blockquote> <ol> <li>将flume/conf下的flume-env.sh.template文件修改为flume-env.sh，并配置flume-env.sh文件</li> </ol> <blockquote> <p>[atguigu\@hadoop102 conf]$ mv flume-env.sh.template flume-env.sh</p> <p>[atguigu\@hadoop102 conf]$ vi flume-env.sh</p> <p>export JAVA_HOME=/opt/module/jdk1.8.0_144</p> </blockquote> <h1 id=3>第3章 企业开发案例</h1> <h2 id=31>3.1 监控端口数据官方案例</h2> <p>1）案例需求：首先，Flume监控本机44444端口，然后通过telnet工具向本机44444端口发送消息，最后Flume将监听的数据实时显示在控制台。</p> <p>2）需求分析：</p> <p>3）实现步骤：</p> <p>1．安装telnet工具</p> <p>将rpm软件包(xinetd-2.3.14-40.el6.x86_64.rpm、telnet-0.17-48.el6.x86_64.rpm和telnet-server-0.17-48.el6.x86_64.rpm)拷入/opt/software文件夹下面。执行RPM软件包安装命令：</p> <blockquote> <p>[atguigu\@hadoop102 software]$ sudo rpm -ivh xinetd-2.3.14-40.el6.x86_64.rpm</p> <p>[atguigu\@hadoop102 software]$ sudo rpm -ivh telnet-0.17-48.el6.x86_64.rpm</p> <p>[atguigu\@hadoop102 software]$ sudo rpm -ivh telnet-server-0.17-48.el6.x86_64.rpm</p> <p>2．判断44444端口是否被占用</p> <p>[atguigu\@hadoop102 flume-telnet]$ sudo netstat -tunlp | grep 44444</p> </blockquote> <p>功能描述：netstat命令是一个监控TCP/IP网络的非常有用的工具，它可以显示路由表、实际的网络连接以及每一个网络接口设备的状态信息。</p> <p>基本语法：netstat [选项]</p> <p>选项参数：</p> <p>-t或&ndash;tcp：显示TCP传输协议的连线状况；</p> <blockquote> <p>-u或&ndash;udp：显示UDP传输协议的连线状况；</p> </blockquote> <p>-n或&ndash;numeric：直接使用ip地址，而不通过域名服务器；</p> <p>-l或&ndash;listening：显示监控中的服务器的Socket；</p> <p>-p或&ndash;programs：显示正在使用Socket的程序识别码和程序名称；</p> <blockquote> <p>3．创建Flume Agent配置文件flume-telnet-logger.conf</p> </blockquote> <p>在flume目录下创建job文件夹并进入job文件夹。</p> <blockquote> <p>[atguigu\@hadoop102 flume]$ mkdir job</p> <p>[atguigu\@hadoop102 flume]$ cd job/</p> </blockquote> <p>在job文件夹下创建Flume Agent配置文件flume-telnet-logger.conf。</p> <blockquote> <p>[atguigu\@hadoop102 job]$ touch flume-telnet-logger.conf</p> </blockquote> <p>在flume-telnet-logger.conf文件中添加如下内容。</p> <blockquote> <p>[atguigu\@hadoop102 job]$ vim flume-telnet-logger.conf</p> </blockquote> <p>添加内容如下：</p> <blockquote> <p># Name the components on this agent</p> <p>a1.sources = r1</p> <p>a1.sinks = k1</p> <p>a1.channels = c1</p> <p># Describe/configure the source</p> <p>a1.sources.r1.type = netcat</p> <p>a1.sources.r1.bind = localhost</p> <p>a1.sources.r1.port = 44444</p> <p># Describe the sink</p> <p>a1.sinks.k1.type = logger</p> <p># Use a channel which buffers events in memory</p> <p>a1.channels.c1.type = memory</p> <p>a1.channels.c1.capacity = 1000</p> <p>a1.channels.c1.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a1.sources.r1.channels = c1</p> <p>a1.sinks.k1.channel = c1</p> </blockquote> <p>注：配置文件来源于官方手册<a href=http://flume.apache.org/FlumeUserGuide.html>http://flume.apache.org/FlumeUserGuide.html</a></p> <ol> <li>先开启flume监听端口</li> </ol> <blockquote> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a1 &ndash;conf-file job/flume-telnet-logger.conf -Dflume.root.logger=INFO,console</p> </blockquote> <p>参数说明：</p> <p>--conf conf/ ：表示配置文件存储在conf/目录</p> <p>--name a1 ：表示给agent起名为a1</p> <p>--conf-file job/flume-telnet.conf ：flume本次启动读取的配置文件是在job文件夹下的flume-telnet.conf文件。</p> <p>-Dflume.root.logger==INFO,console ：-D表示flume运行时动态修改flume.root.logger参数属性值，并将控制台日志打印级别设置为INFO级别。日志级别包括:log、info、warn、error。</p> <blockquote> <p>5．使用telnet工具向本机的44444端口发送内容</p> <p>[atguigu\@hadoop102 ~]$ telnet localhost 44444</p> </blockquote> <p><img alt src=02897b7a6adf6bb6303db71f70af95ca.png></p> <blockquote> <p>6．在Flume监听页面观察接收数据情况</p> </blockquote> <p><img alt src=00fbf0bdf2125c19b9ecf14e27b9a4f7.png></p> <h2 id=32-hdfs>3.2 实时读取本地文件到HDFS案例</h2> <p>1）案例需求：实时监控Hive日志，并上传到HDFS中</p> <p>2）需求分析：</p> <p>3）实现步骤：</p> <ol> <li>Flume要想将数据输出到HDFS，必须持有Hadoop相关jar包</li> </ol> <p>将commons-configuration-1.6.jar、</p> <p>hadoop-auth-2.7.2.jar、</p> <p>hadoop-common-2.7.2.jar、</p> <p>hadoop-hdfs-2.7.2.jar、</p> <p>commons-io-2.4.jar、</p> <p>htrace-core-3.1.0-incubating.jar</p> <p>拷贝到/opt/module/flume/lib文件夹下。</p> <ol> <li>创建flume-file-hdfs.conf文件</li> </ol> <p>创建文件</p> <blockquote> <p>[atguigu\@hadoop102 job]$ touch flume-file-hdfs.conf</p> </blockquote> <p>注：要想读取Linux系统中的文件，就得按照Linux命令的规则执行命令。由于Hive日志在Linux系统中所以读取文件的类型选择：exec即execute执行的意思。表示执行Linux命令来读取文件。</p> <blockquote> <p>[atguigu\@hadoop102 job]$ vim flume-file-hdfs.conf</p> </blockquote> <p>添加如下内容</p> <blockquote> <p># Name the components on this agent</p> <p>a2.sources = r2</p> <p>a2.sinks = k2</p> <p>a2.channels = c2</p> <p># Describe/configure the source</p> <p>a2.sources.r2.type = exec</p> <p>a2.sources.r2.command = tail -F /opt/module/hive/logs/hive.log</p> <p>a2.sources.r2.shell = /bin/bash -c</p> <p># Describe the sink</p> <p>a2.sinks.k2.type = hdfs</p> <p>a2.sinks.k2.hdfs.path = hdfs://hadoop102:9000/flume/%Y%m%d/%H</p> <p>#上传文件的前缀</p> <p>a2.sinks.k2.hdfs.filePrefix = logs-</p> <p>#是否按照时间滚动文件夹</p> <p>a2.sinks.k2.hdfs.round = true</p> <p>#多少时间单位创建一个新的文件夹</p> <p>a2.sinks.k2.hdfs.roundValue = 1</p> <p>#重新定义时间单位</p> <p>a2.sinks.k2.hdfs.roundUnit = hour</p> <p>#是否使用本地时间戳</p> <p>a2.sinks.k2.hdfs.useLocalTimeStamp = true</p> <p>#积攒多少个Event才flush到HDFS一次</p> <p>a2.sinks.k2.hdfs.batchSize = 1000</p> <p>#设置文件类型，可支持压缩</p> <p>a2.sinks.k2.hdfs.fileType = DataStream</p> <p>#多久生成一个新的文件</p> <p>a2.sinks.k2.hdfs.rollInterval = 600</p> <p>#设置每个文件的滚动大小</p> <p>a2.sinks.k2.hdfs.rollSize = 134217700</p> <p>#文件的滚动与Event数量无关</p> <p>a2.sinks.k2.hdfs.rollCount = 0</p> <p>#最小冗余数</p> <p>a2.sinks.k2.hdfs.minBlockReplicas = 1</p> <p># Use a channel which buffers events in memory</p> <p>a2.channels.c2.type = memory</p> <p>a2.channels.c2.capacity = 1000</p> <p>a2.channels.c2.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a2.sources.r2.channels = c2</p> <p>a2.sinks.k2.channel = c2</p> </blockquote> <ol> <li>执行监控配置</li> </ol> <blockquote> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a2 &ndash;conf-file job/flume-file-hdfs.conf</p> </blockquote> <ol> <li>开启Hadoop和Hive并操作Hive产生日志</li> </ol> <blockquote> <p>[atguigu\@hadoop102 hadoop-2.7.2]$ sbin/start-dfs.sh</p> <p>[atguigu\@hadoop103 hadoop-2.7.2]$ sbin/start-yarn.sh</p> <p>[atguigu\@hadoop102 hive]$ bin/hive</p> <p>hive (default)></p> </blockquote> <ol> <li>在HDFS上查看文件。</li> </ol> <p><img alt src=d09560db3abd5d676ad0ada2afc48000.png></p> <h2 id=33-hdfs>3.3 实时读取目录文件到HDFS案例</h2> <p>1）案例需求：使用Flume监听整个目录的文件</p> <p>2）需求分析：</p> <p>3）实现步骤：</p> <p>1．创建配置文件flume-dir-hdfs.conf</p> <p>创建一个文件</p> <blockquote> <p>[atguigu\@hadoop102 job]$ touch flume-dir-hdfs.conf</p> </blockquote> <p>打开文件</p> <blockquote> <p>[atguigu\@hadoop102 job]$ vim flume-dir-hdfs.conf</p> </blockquote> <p>添加如下内容</p> <blockquote> <p>a3.sources = r3</p> <p>a3.sinks = k3</p> <p>a3.channels = c3</p> <p># Describe/configure the source</p> <p>a3.sources.r3.type = spooldir</p> <p>a3.sources.r3.spoolDir = /opt/module/flume/upload</p> <p>a3.sources.r3.fileSuffix = .COMPLETED</p> <p>a3.sources.r3.fileHeader = true</p> <p>#忽略所有以.tmp结尾的文件，不上传</p> <p>a3.sources.r3.ignorePattern = ([^ ]*\.tmp)</p> <p># Describe the sink</p> <p>a3.sinks.k3.type = hdfs</p> <p>a3.sinks.k3.hdfs.path = hdfs://hadoop102:9000/flume/upload/%Y%m%d/%H</p> <p>#上传文件的前缀</p> <p>a3.sinks.k3.hdfs.filePrefix = upload-</p> <p>#是否按照时间滚动文件夹</p> <p>a3.sinks.k3.hdfs.round = true</p> <p>#多少时间单位创建一个新的文件夹</p> <p>a3.sinks.k3.hdfs.roundValue = 1</p> <p>#重新定义时间单位</p> <p>a3.sinks.k3.hdfs.roundUnit = hour</p> <p>#是否使用本地时间戳</p> <p>a3.sinks.k3.hdfs.useLocalTimeStamp = true</p> <p>#积攒多少个Event才flush到HDFS一次</p> <p>a3.sinks.k3.hdfs.batchSize = 100</p> <p>#设置文件类型，可支持压缩</p> <p>a3.sinks.k3.hdfs.fileType = DataStream</p> <p>#多久生成一个新的文件</p> <p>a3.sinks.k3.hdfs.rollInterval = 600</p> <p>#设置每个文件的滚动大小大概是128M</p> <p>a3.sinks.k3.hdfs.rollSize = 134217700</p> <p>#文件的滚动与Event数量无关</p> <p>a3.sinks.k3.hdfs.rollCount = 0</p> <p>#最小冗余数</p> <p>a3.sinks.k3.hdfs.minBlockReplicas = 1</p> <p># Use a channel which buffers events in memory</p> <p>a3.channels.c3.type = memory</p> <p>a3.channels.c3.capacity = 1000</p> <p>a3.channels.c3.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a3.sources.r3.channels = c3</p> <p>a3.sinks.k3.channel = c3</p> </blockquote> <ol> <li>启动监控文件夹命令</li> </ol> <blockquote> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a3 &ndash;conf-file job/flume-dir-hdfs.conf</p> </blockquote> <p>说明： 在使用Spooling Directory Source时</p> <ol> <li> <p>不要在监控目录中创建并持续修改文件</p> </li> <li> <p>上传完成的文件会以.COMPLETED结尾</p> </li> <li> <p>被监控文件夹每500毫秒扫描一次文件变动</p> </li> </ol> <blockquote> <ol> <li>向upload文件夹中添加文件</li> </ol> </blockquote> <p>在/opt/module/flume目录下创建upload文件夹</p> <blockquote> <p>[atguigu\@hadoop102 flume]$ mkdir upload</p> </blockquote> <p>向upload文件夹中添加文件</p> <blockquote> <p>[atguigu\@hadoop102 upload]$ touch atguigu.txt</p> <p>[atguigu\@hadoop102 upload]$ touch atguigu.tmp</p> <p>[atguigu\@hadoop102 upload]$ touch atguigu.log</p> <ol> <li>查看HDFS上的数据</li> </ol> </blockquote> <p><img alt src=3060a8a53a4b4f63406f016f8119cd70.png></p> <blockquote> <ol> <li>等待1s，再次查询upload文件夹</li> </ol> <p>[atguigu\@hadoop102 upload]$ ll</p> <p>总用量 0</p> <p>-rw-rw-r&ndash;. 1 atguigu atguigu 0 5月 20 22:31 atguigu.log.COMPLETED</p> <p>-rw-rw-r&ndash;. 1 atguigu atguigu 0 5月 20 22:31 atguigu.tmp</p> <p>-rw-rw-r&ndash;. 1 atguigu atguigu 0 5月 20 22:31 atguigu.txt.COMPLETED</p> </blockquote> <h2 id=34>3.4 单数据源多出口案例(选择器)</h2> <p>单Source多Channel、Sink如图7-2所示。</p> <p><img alt=UserGuide_image01 src=13074f5aeb2bea0c81da5738e02f9924.png></p> <p>图7-2 单Source多Channel、Sink</p> <p>1）案例需求：使用Flume-1监控文件变动，Flume-1将变动内容传递给Flume-2，Flume-2负责存储到HDFS。同时Flume-1将变动内容传递给Flume-3，Flume-3负责输出到Local FileSystem。</p> <p>2）需求分析：</p> <p>3）实现步骤：</p> <p>0．准备工作</p> <p>在/opt/module/flume/job目录下创建group1文件夹</p> <blockquote> <p>[atguigu\@hadoop102 job]$ cd group1/</p> </blockquote> <p>在/opt/module/datas/目录下创建flume3文件夹</p> <blockquote> <p>[atguigu\@hadoop102 datas]$ mkdir flume3</p> </blockquote> <p>1．创建flume-file-flume.conf</p> <p>配置1个接收日志文件的source和两个channel、两个sink，分别输送给flume-flume-hdfs和flume-flume-dir。</p> <blockquote> <p>创建配置文件并打开</p> <p>[atguigu\@hadoop102 group1]$ touch flume-file-flume.conf</p> <p>[atguigu\@hadoop102 group1]$ vim flume-file-flume.conf</p> <p>添加如下内容</p> <p># Name the components on this agent</p> <p>a1.sources = r1</p> <p>a1.sinks = k1 k2</p> <p>a1.channels = c1 c2</p> <p># 将数据流复制给所有channel</p> <p>a1.sources.r1.selector.type = replicating</p> <p># Describe/configure the source</p> <p>a1.sources.r1.type = exec</p> <p>a1.sources.r1.command = tail -F /opt/module/hive/logs/hive.log</p> <p>a1.sources.r1.shell = /bin/bash -c</p> <p># Describe the sink</p> <p>a1.sinks.k1.type = avro</p> <p>a1.sinks.k1.hostname = hadoop102</p> <p>a1.sinks.k1.port = 4141</p> <p>a1.sinks.k2.type = avro</p> <p>a1.sinks.k2.hostname = hadoop102</p> <p>a1.sinks.k2.port = 4142</p> <p># Describe the channel</p> <p>a1.channels.c1.type = memory</p> <p>a1.channels.c1.capacity = 1000</p> <p>a1.channels.c1.transactionCapacity = 100</p> <p>a1.channels.c2.type = memory</p> <p>a1.channels.c2.capacity = 1000</p> <p>a1.channels.c2.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a1.sources.r1.channels = c1 c2</p> <p>a1.sinks.k1.channel = c1</p> <p>a1.sinks.k2.channel = c2</p> </blockquote> <p>注：Avro是由Hadoop创始人Doug Cutting创建的一种语言无关的数据序列化和RPC框架。</p> <p>注：RPC（Remote Procedure Call）—远程过程调用，它是一种通过网络从远程计算机程序上请求服务，而不需要了解底层网络技术的协议。</p> <p>2．创建flume-flume-hdfs.conf</p> <p>配置上级Flume输出的Source，输出是到HDFS的Sink。</p> <blockquote> <p>创建配置文件并打开</p> <p>[atguigu\@hadoop102 group1]$ touch flume-flume-hdfs.conf</p> <p>[atguigu\@hadoop102 group1]$ vim flume-flume-hdfs.conf</p> <p>添加如下内容</p> <p># Name the components on this agent</p> <p>a2.sources = r1</p> <p>a2.sinks = k1</p> <p>a2.channels = c1</p> <p># Describe/configure the source</p> <p>a2.sources.r1.type = avro</p> <p>a2.sources.r1.bind = hadoop102</p> <p>a2.sources.r1.port = 4141</p> <p># Describe the sink</p> <p>a2.sinks.k1.type = hdfs</p> <p>a2.sinks.k1.hdfs.path = hdfs://hadoop102:9000/flume2/%Y%m%d/%H</p> <p>#上传文件的前缀</p> <p>a2.sinks.k1.hdfs.filePrefix = flume2-</p> <p>#是否按照时间滚动文件夹</p> <p>a2.sinks.k1.hdfs.round = true</p> <p>#多少时间单位创建一个新的文件夹</p> <p>a2.sinks.k1.hdfs.roundValue = 1</p> <p>#重新定义时间单位</p> <p>a2.sinks.k1.hdfs.roundUnit = hour</p> <p>#是否使用本地时间戳</p> <p>a2.sinks.k1.hdfs.useLocalTimeStamp = true</p> <p>#积攒多少个Event才flush到HDFS一次</p> <p>a2.sinks.k1.hdfs.batchSize = 100</p> <p>#设置文件类型，可支持压缩</p> <p>a2.sinks.k1.hdfs.fileType = DataStream</p> <p>#多久生成一个新的文件</p> <p>a2.sinks.k1.hdfs.rollInterval = 600</p> <p>#设置每个文件的滚动大小大概是128M</p> <p>a2.sinks.k1.hdfs.rollSize = 134217700</p> <p>#文件的滚动与Event数量无关</p> <p>a2.sinks.k1.hdfs.rollCount = 0</p> <p>#最小冗余数</p> <p>a2.sinks.k1.hdfs.minBlockReplicas = 1</p> <p># Describe the channel</p> <p>a2.channels.c1.type = memory</p> <p>a2.channels.c1.capacity = 1000</p> <p>a2.channels.c1.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a2.sources.r1.channels = c1</p> <p>a2.sinks.k1.channel = c1</p> </blockquote> <p>3．创建flume-flume-dir.conf</p> <p>配置上级Flume输出的Source，输出是到本地目录的Sink。</p> <blockquote> <p>创建配置文件并打开</p> <p>[atguigu\@hadoop102 group1]$ touch flume-flume-dir.conf</p> <p>[atguigu\@hadoop102 group1]$ vim flume-flume-dir.conf</p> <p>添加如下内容</p> <p># Name the components on this agent</p> <p>a3.sources = r1</p> <p>a3.sinks = k1</p> <p>a3.channels = c2</p> <p># Describe/configure the source</p> <p>a3.sources.r1.type = avro</p> <p>a3.sources.r1.bind = hadoop102</p> <p>a3.sources.r1.port = 4142</p> <p># Describe the sink</p> <p>a3.sinks.k1.type = file_roll</p> <p>a3.sinks.k1.sink.directory = /opt/module/datas/flume3</p> <p># Describe the channel</p> <p>a3.channels.c2.type = memory</p> <p>a3.channels.c2.capacity = 1000</p> <p>a3.channels.c2.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a3.sources.r1.channels = c2</p> <p>a3.sinks.k1.channel = c2</p> </blockquote> <p>提示：输出的本地目录必须是已经存在的目录，如果该目录不存在，并不会创建新的目录。</p> <p>4．执行配置文件</p> <p>分别开启对应配置文件：flume-flume-dir，flume-flume-hdfs，flume-file-flume。</p> <blockquote> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a3 &ndash;conf-file job/group1/flume-flume-dir.conf</p> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a2 &ndash;conf-file job/group1/flume-flume-hdfs.conf</p> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a1 &ndash;conf-file job/group1/flume-file-flume.conf</p> </blockquote> <p>5．启动Hadoop和Hive</p> <blockquote> <p>[atguigu\@hadoop102 hadoop-2.7.2]$ sbin/start-dfs.sh</p> <p>[atguigu\@hadoop103 hadoop-2.7.2]$ sbin/start-yarn.sh</p> <p>[atguigu\@hadoop102 hive]$ bin/hive</p> <p>hive (default)></p> </blockquote> <p>6．检查HDFS上数据</p> <p><img alt src=dd1917b61773c0e56e39cd9f5543040d.png></p> <p>7检查/opt/module/datas/flume3目录中数据</p> <blockquote> <p>[atguigu\@hadoop102 flume3]$ ll</p> <p>总用量 8</p> <p>-rw-rw-r&ndash;. 1 atguigu atguigu 5942 5月 22 00:09 1526918887550-3</p> </blockquote> <h2 id=35-sink>3.5 单数据源多出口案例(Sink组)</h2> <p>单Source、Channel多Sink(负载均衡)如图7-3所示。</p> <p><img alt src=1e2edc63b6e5a19e8eb5fcbdc6137d2b.png></p> <p>图7-3 单Source、Channel多Sink</p> <p>1）案例需求：使用Flume-1监控文件变动，Flume-1将变动内容传递给Flume-2，Flume-2负责存储到HDFS。同时Flume-1将变动内容传递给Flume-3，Flume-3也负责存储到HDFS</p> <p>2）需求分析：</p> <p>3）实现步骤：</p> <p>0．准备工作</p> <p>在/opt/module/flume/job目录下创建group2文件夹</p> <blockquote> <p>[atguigu\@hadoop102 job]$ cd group2/</p> </blockquote> <p>1．创建flume-netcat-flume.conf</p> <p>配置1个接收日志文件的source和1个channel、两个sink，分别输送给flume-flume-console1和flume-flume-console2。</p> <blockquote> <p>创建配置文件并打开</p> <p>[atguigu\@hadoop102 group2]$ touch flume-netcat-flume.conf</p> <p>[atguigu\@hadoop102 group2]$ vim flume-netcat-flume.conf</p> <p>添加如下内容</p> <p># Name the components on this agent</p> <p>a1.sources = r1</p> <p>a1.channels = c1</p> <p>a1.sinkgroups = g1</p> <p>a1.sinks = k1 k2</p> <p># Describe/configure the source</p> <p>a1.sources.r1.type = netcat</p> <p>a1.sources.r1.bind = localhost</p> <p>a1.sources.r1.port = 44444</p> <p>a1.sinkgroups.g1.processor.type = load_balance</p> <p>a1.sinkgroups.g1.processor.backoff = true</p> <p>a1.sinkgroups.g1.processor.selector = round_robin</p> <p>a1.sinkgroups.g1.processor.selector.maxTimeOut=10000</p> <p># Describe the sink</p> <p>a1.sinks.k1.type = avro</p> <p>a1.sinks.k1.hostname = hadoop102</p> <p>a1.sinks.k1.port = 4141</p> <p>a1.sinks.k2.type = avro</p> <p>a1.sinks.k2.hostname = hadoop102</p> <p>a1.sinks.k2.port = 4142</p> <p># Describe the channel</p> <p>a1.channels.c1.type = memory</p> <p>a1.channels.c1.capacity = 1000</p> <p>a1.channels.c1.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a1.sources.r1.channels = c1</p> <p>a1.sinkgroups.g1.sinks = k1 k2</p> <p>a1.sinks.k1.channel = c1</p> <p>a1.sinks.k2.channel = c1</p> </blockquote> <p>注：Avro是由Hadoop创始人Doug Cutting创建的一种语言无关的数据序列化和RPC框架。</p> <p>注：RPC（Remote Procedure Call）—远程过程调用，它是一种通过网络从远程计算机程序上请求服务，而不需要了解底层网络技术的协议。</p> <p>2．创建flume-flume-console1.conf</p> <p>配置上级Flume输出的Source，输出是到本地控制台。</p> <blockquote> <p>创建配置文件并打开</p> <p>[atguigu\@hadoop102 group2]$ touch flume-flume-console1.conf</p> <p>[atguigu\@hadoop102 group2]$ vim flume-flume-console1.conf</p> <p>添加如下内容</p> <p># Name the components on this agent</p> <p>a2.sources = r1</p> <p>a2.sinks = k1</p> <p>a2.channels = c1</p> <p># Describe/configure the source</p> <p>a2.sources.r1.type = avro</p> <p>a2.sources.r1.bind = hadoop102</p> <p>a2.sources.r1.port = 4141</p> <p># Describe the sink</p> <p>a2.sinks.k1.type = logger</p> <p># Describe the channel</p> <p>a2.channels.c1.type = memory</p> <p>a2.channels.c1.capacity = 1000</p> <p>a2.channels.c1.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a2.sources.r1.channels = c1</p> <p>a2.sinks.k1.channel = c1</p> </blockquote> <p>3．创建flume-flume-console2.conf</p> <p>配置上级Flume输出的Source，输出是到本地控制台。</p> <blockquote> <p>创建配置文件并打开</p> <p>[atguigu\@hadoop102 group2]$ touch flume-flume-console2.conf</p> <p>[atguigu\@hadoop102 group2]$ vim flume-flume-console2.conf</p> <p>添加如下内容</p> <p># Name the components on this agent</p> <p>a3.sources = r1</p> <p>a3.sinks = k1</p> <p>a3.channels = c2</p> <p># Describe/configure the source</p> <p>a3.sources.r1.type = avro</p> <p>a3.sources.r1.bind = hadoop102</p> <p>a3.sources.r1.port = 4142</p> <p># Describe the sink</p> <p>a3.sinks.k1.type = logger</p> <p># Describe the channel</p> <p>a3.channels.c2.type = memory</p> <p>a3.channels.c2.capacity = 1000</p> <p>a3.channels.c2.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a3.sources.r1.channels = c2</p> <p>a3.sinks.k1.channel = c2</p> </blockquote> <p>4．执行配置文件</p> <p>分别开启对应配置文件：flume-flume-console2，flume-flume-console1，flume-netcat-flume。</p> <blockquote> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a3 &ndash;conf-file job/group2/flume-flume-console2.conf -Dflume.root.logger=INFO,console</p> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a2 &ndash;conf-file job/group2/flume-flume-console1.conf -Dflume.root.logger=INFO,console</p> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a1 &ndash;conf-file job/group2/flume-netcat-flume.conf</p> </blockquote> <ol> <li>使用telnet工具向本机的44444端口发送内容</li> </ol> <blockquote> <p>$ telnet localhost 44444</p> </blockquote> <ol> <li>查看Flume2及Flume3的控制台打印日志</li> </ol> <h2 id=36>3.6 多数据源汇总案例</h2> <p>多Source汇总数据到单Flume如图7-4所示。</p> <p><img alt=UserGuide_image02 src=399477b09a431c477f67a769ae65974a.png></p> <p>图7-4多Flume汇总数据到单Flume</p> <ol> <li>案例需求：</li> </ol> <blockquote> <p>hadoop103上的Flume-1监控文件/opt/module/group.log，</p> <p>hadoop102上的Flume-2监控某一个端口的数据流，</p> <p>Flume-1与Flume-2将数据发送给hadoop104上的Flume-3，Flume-3将最终数据打印到控制台。</p> </blockquote> <p>2）需求分析：</p> <p>3）实现步骤：</p> <p>0．准备工作</p> <p>分发Flume</p> <blockquote> <p>[atguigu\@hadoop102 module]$ xsync flume</p> </blockquote> <p>在hadoop102、hadoop103以及hadoop104的/opt/module/flume/job目录下创建一个group3文件夹。</p> <blockquote> <p>[atguigu\@hadoop102 job]$ mkdir group3</p> <p>[atguigu\@hadoop103 job]$ mkdir group3</p> <p>[atguigu\@hadoop104 job]$ mkdir group3</p> </blockquote> <p>1．创建flume1-logger-flume.conf</p> <blockquote> <p>配置Source用于监控hive.log文件，配置Sink输出数据到下一级Flume。</p> </blockquote> <p>在hadoop103上创建配置文件并打开</p> <blockquote> <p>[atguigu\@hadoop103 group3]$ touch flume1-logger-flume.conf</p> <p>[atguigu\@hadoop103 group3]$ vim flume1-logger-flume.conf</p> <p>添加如下内容</p> <p># Name the components on this agent</p> <p>a1.sources = r1</p> <p>a1.sinks = k1</p> <p>a1.channels = c1</p> <p># Describe/configure the source</p> <p>a1.sources.r1.type = exec</p> <p>a1.sources.r1.command = tail -F /opt/module/group.log</p> <p>a1.sources.r1.shell = /bin/bash -c</p> <p># Describe the sink</p> <p>a1.sinks.k1.type = avro</p> <p>a1.sinks.k1.hostname = hadoop104</p> <p>a1.sinks.k1.port = 4141</p> <p># Describe the channel</p> <p>a1.channels.c1.type = memory</p> <p>a1.channels.c1.capacity = 1000</p> <p>a1.channels.c1.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a1.sources.r1.channels = c1</p> <p>a1.sinks.k1.channel = c1</p> </blockquote> <p>2．创建flume2-netcat-flume.conf</p> <p>配置Source监控端口44444数据流，配置Sink数据到下一级Flume：</p> <blockquote> <p>在hadoop102上创建配置文件并打开</p> <p>[atguigu\@hadoop102 group3]$ touch flume2-netcat-flume.conf</p> <p>[atguigu\@hadoop102 group3]$ vim flume2-netcat-flume.conf</p> </blockquote> <p>添加如下内容</p> <blockquote> <p># Name the components on this agent</p> <p>a2.sources = r1</p> <p>a2.sinks = k1</p> <p>a2.channels = c1</p> <p># Describe/configure the source</p> <p>a2.sources.r1.type = netcat</p> <p>a2.sources.r1.bind = hadoop102</p> <p>a2.sources.r1.port = 44444</p> <p># Describe the sink</p> <p>a2.sinks.k1.type = avro</p> <p>a2.sinks.k1.hostname = hadoop104</p> <p>a2.sinks.k1.port = 4141</p> <p># Use a channel which buffers events in memory</p> <p>a2.channels.c1.type = memory</p> <p>a2.channels.c1.capacity = 1000</p> <p>a2.channels.c1.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a2.sources.r1.channels = c1</p> <p>a2.sinks.k1.channel = c1</p> </blockquote> <p>3．创建flume3-flume-logger.conf</p> <p>配置source用于接收flume1与flume2发送过来的数据流，最终合并后sink到控制台。</p> <blockquote> <p>在hadoop104上创建配置文件并打开</p> <p>[atguigu\@hadoop104 group3]$ touch flume3-flume-logger.conf</p> <p>[atguigu\@hadoop104 group3]$ vim flume3-flume-logger.conf</p> </blockquote> <p>添加如下内容</p> <blockquote> <p># Name the components on this agent</p> <p>a3.sources = r1</p> <p>a3.sinks = k1</p> <p>a3.channels = c1</p> <p># Describe/configure the source</p> <p>a3.sources.r1.type = avro</p> <p>a3.sources.r1.bind = hadoop104</p> <p>a3.sources.r1.port = 4141</p> <p># Describe the sink</p> <p># Describe the sink</p> <p>a3.sinks.k1.type = logger</p> <p># Describe the channel</p> <p>a3.channels.c1.type = memory</p> <p>a3.channels.c1.capacity = 1000</p> <p>a3.channels.c1.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a3.sources.r1.channels = c1</p> <p>a3.sinks.k1.channel = c1</p> </blockquote> <p>4．执行配置文件</p> <p>分别开启对应配置文件：flume3-flume-logger.conf，flume2-netcat-flume.conf，flume1-logger-flume.conf。</p> <blockquote> <p>[atguigu\@hadoop104 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a3 &ndash;conf-file job/group3/flume3-flume-logger.conf -Dflume.root.logger=INFO,console</p> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a2 &ndash;conf-file job/group3/flume2-netcat-flume.conf</p> <p>[atguigu\@hadoop103 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a1 &ndash;conf-file job/group3/flume1-logger-flume.conf</p> </blockquote> <p>5．在hadoop103上向/opt/module目录下的group.log追加内容</p> <blockquote> <p>[atguigu\@hadoop103 module]$ echo &lsquo;hello&rsquo; > group.log</p> </blockquote> <p>6．在hadoop102上向44444端口发送数据</p> <blockquote> <p>[atguigu\@hadoop102 flume]$ telnet hadoop102 44444</p> </blockquote> <p>7.检查hadoop104上数据</p> <p><img alt=1528770881(1) src=bba710a6b592eb54e30693f156d000ef.png></p> <h1 id=4-flumeganglia>第4章 Flume监控之Ganglia</h1> <h2 id=41-ganglia>4.1 Ganglia的安装与部署</h2> <p><strong>1) 安装httpd服务与php</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo yum -y install httpd php</p> </blockquote> <p><strong>2) 安装其他依赖</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo yum -y install rrdtool perl-rrdtool rrdtool-devel</p> <p>[atguigu\@hadoop102 flume]$ sudo yum -y install apr-devel</p> </blockquote> <p><strong>3) 安装ganglia</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo rpm -Uvh <a href=http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm>http://dl.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm</a></p> <p>[atguigu\@hadoop102 flume]$ sudo yum -y install ganglia-gmetad</p> <p>[atguigu\@hadoop102 flume]$ sudo yum -y install ganglia-web</p> <p>[atguigu\@hadoop102 flume]$ sudo yum install -y ganglia-gmond</p> </blockquote> <p><strong>4) 修改配置文件/etc/httpd/conf.d/ganglia.conf</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo vim /etc/httpd/conf.d/ganglia.conf</p> </blockquote> <p><strong>修改为红颜色的配置：</strong></p> <blockquote> <p># Ganglia monitoring system php web frontend</p> <p>Alias /ganglia /usr/share/ganglia</p> <p>\&lt;Location /ganglia></p> <p>Order deny,allow</p> <p>Deny from all</p> <p>Allow from all</p> <p># Allow from 127.0.0.1</p> <p># Allow from ::1</p> <p># Allow from .example.com</p> <p>\&lt;/Location></p> </blockquote> <p><strong>5) 修改配置文件/etc/ganglia/gmetad.conf</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo vim /etc/ganglia/gmetad.conf</p> </blockquote> <p><strong>修改为：</strong></p> <blockquote> <p>data_source &ldquo;hadoop102&rdquo; 192.168.1.102</p> </blockquote> <p><strong>6) 修改配置文件/etc/ganglia/gmond.conf</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo vim /etc/ganglia/gmond.conf</p> </blockquote> <p><strong>修改为：</strong></p> <blockquote> <p>cluster {</p> <p>name = &ldquo;hadoop102&rdquo;</p> <p>owner = &ldquo;unspecified&rdquo;</p> <p>latlong = &ldquo;unspecified&rdquo;</p> <p>url = &ldquo;unspecified&rdquo;</p> <p>}</p> <p>udp_send_channel {</p> <p>#bind_hostname = yes # Highly recommended, soon to be default.</p> <p># This option tells gmond to use a source address</p> <p># that resolves to the machine&rsquo;s hostname. Without</p> <p># this, the metrics may appear to come from any</p> <p># interface and the DNS names associated with</p> <p># those IPs will be used to create the RRDs.</p> <p># mcast_join = 239.2.11.71</p> <p>host = 192.168.1.102</p> <p>port = 8649</p> <p>ttl = 1</p> <p>}</p> <p>udp_recv_channel {</p> <p># mcast_join = 239.2.11.71</p> <p>port = 8649</p> <p>bind = 192.168.1.102</p> <p>retry_bind = true</p> <p># Size of the UDP buffer. If you are handling lots of metrics you really</p> <p># should bump it up to e.g. 10MB or even higher.</p> <p># buffer = 10485760</p> <p>}</p> </blockquote> <p><strong>7) 修改配置文件/etc/selinux/config</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo vim /etc/selinux/config</p> </blockquote> <p><strong>修改为：</strong></p> <blockquote> <p># This file controls the state of SELinux on the system.</p> <p># SELINUX= can take one of these three values:</p> <p># enforcing - SELinux security policy is enforced.</p> <p># permissive - SELinux prints warnings instead of enforcing.</p> <p># disabled - No SELinux policy is loaded.</p> <p>SELINUX=disabled</p> <p># SELINUXTYPE= can take one of these two values:</p> <p># targeted - Targeted processes are protected,</p> <p># mls - Multi Level Security protection.</p> <p>SELINUXTYPE=targeted</p> </blockquote> <p>尖叫提示：selinux本次生效关闭必须重启，如果此时不想重启，可以临时生效之：</p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo setenforce 0</p> </blockquote> <p><strong>5) 启动ganglia</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo service httpd start</p> <p>[atguigu\@hadoop102 flume]$ sudo service gmetad start</p> <p>[atguigu\@hadoop102 flume]$ sudo service gmond start</p> </blockquote> <p><strong>6) 打开网页浏览ganglia页面</strong></p> <p><a href=http://192.168.1.102/ganglia>http://192.168.1.102/ganglia</a></p> <p>尖叫提示：如果完成以上操作依然出现权限不足错误，请修改/var/lib/ganglia目录的权限：</p> <blockquote> <p>[atguigu\@hadoop102 flume]$ sudo chmod -R 777 /var/lib/ganglia</p> </blockquote> <h2 id=42-flume>4.2 操作Flume测试监控</h2> <p><strong>1) 修改/opt/module/flume/conf目录下的flume-env.sh配置：</strong></p> <blockquote> <p>JAVA_OPTS=&rdquo;-Dflume.monitoring.type=ganglia</p> <p>-Dflume.monitoring.hosts=192.168.1.102:8649</p> <p>-Xms100m</p> <p>-Xmx200m&rdquo;</p> </blockquote> <p><strong>2) 启动Flume任务</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent \</p> <p>--conf conf/ \</p> <p>--name a1 \</p> <p>--conf-file job/flume-telnet-logger.conf \</p> <p>-Dflume.root.logger==INFO,console \</p> <p>-Dflume.monitoring.type=ganglia \</p> <p>-Dflume.monitoring.hosts=192.168.1.102:8649</p> </blockquote> <p><strong>3) 发送数据观察ganglia监测图</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ telnet localhost 44444</p> </blockquote> <p><strong>样式如图：</strong></p> <p><img alt src=d664b64ba4194b14ad22062a2426c953.png></p> <p>图例说明：</p> <table> <thead> <tr> <th>字段（图表名称）</th> <th>字段含义</th> </tr> </thead> <tbody> <tr> <td>EventPutAttemptCount</td> <td>source尝试写入channel的事件总数量</td> </tr> <tr> <td>EventPutSuccessCount</td> <td>成功写入channel且提交的事件总数量</td> </tr> <tr> <td>EventTakeAttemptCount</td> <td>sink尝试从channel拉取事件的总数量。这不意味着每次事件都被返回，因为sink拉取的时候channel可能没有任何数据。</td> </tr> <tr> <td>EventTakeSuccessCount</td> <td>sink成功读取的事件的总数量</td> </tr> <tr> <td>StartTime</td> <td>channel启动的时间（毫秒）</td> </tr> <tr> <td>StopTime</td> <td>channel停止的时间（毫秒）</td> </tr> <tr> <td>ChannelSize</td> <td>目前channel中事件的总数量</td> </tr> <tr> <td>ChannelFillPercentage</td> <td>channel占用百分比</td> </tr> <tr> <td>ChannelCapacity</td> <td>channel的容量</td> </tr> </tbody> </table> <h1 id=5-flumemysqlsource>第5章 Flume高级之自定义MySQLSource</h1> <h2 id=51-source>5.1 自定义Source说明</h2> <p>Source是负责接收数据到Flume Agent的组件。Source组件可以处理各种类型、各种格式的日志数据，包括avro、thrift、exec、jms、spooling directory、netcat、sequence generator、syslog、http、legacy。官方提供的source类型已经很多，但是有时候并不能满足实际开发当中的需求，此时我们就需要根据实际需求自定义某些Source。</p> <p>如：实时监控MySQL，从MySQL中获取数据传输到HDFS或者其他存储框架，所以此时需要我们自己实现MySQLSource。</p> <p>官方也提供了自定义source的接口：</p> <p>官网说明：<a href=https://flume.apache.org/FlumeDeveloperGuide.html#source>https://flume.apache.org/FlumeDeveloperGuide.html#source</a></p> <h2 id=53-mysqlsource>5.3 自定义MySQLSource组成</h2> <p>图6-1 自定义MySQLSource组成</p> <h2 id=52-mysqlsource>5.2 自定义MySQLSource步骤</h2> <p>根据官方说明自定义MySqlSource需要继承AbstractSource类并实现Configurable和PollableSource接口。</p> <p>实现相应方法：</p> <p>getBackOffSleepIncrement()//暂不用</p> <p>getMaxBackOffSleepInterval()//暂不用</p> <p>configure(Context context)//初始化context</p> <p>process()//获取数据（从MySql获取数据，业务处理比较复杂，所以我们定义一个专门的类——SQLSourceHelper来处理跟MySql的交互），封装成Event并写入Channel，这个方法被循环调用</p> <p>stop()//关闭相关的资源</p> <h2 id=54>5.4 代码实现</h2> <h3 id=541-pom>5.4.1 导入Pom依赖</h3> <blockquote> <p>\&lt;dependencies></p> <p>\&lt;dependency></p> <p>\&lt;groupId>org.apache.flume\&lt;/groupId></p> <p>\&lt;artifactId>flume-ng-core\&lt;/artifactId></p> <p>\&lt;version>1.7.0\&lt;/version></p> <p>\&lt;/dependency></p> <p>\&lt;dependency></p> <p>\&lt;groupId>mysql\&lt;/groupId></p> <p>\&lt;artifactId>mysql-connector-java\&lt;/artifactId></p> <p>\&lt;version>5.1.27\&lt;/version></p> <p>\&lt;/dependency></p> <p>\&lt;/dependencies></p> </blockquote> <h3 id=542>5.4.2 添加配置信息</h3> <p>在ClassPath下添加jdbc.properties和log4j. properties</p> <p>jdbc.properties:</p> <blockquote> <p>dbDriver=com.mysql.jdbc.Driver</p> <p>dbUrl=jdbc:mysql://hadoop102:3306/mysqlsource?useUnicode=true&amp;characterEncoding=utf-8<br> dbUser=root<br> dbPassword=000000</p> </blockquote> <p>log4j. properties:</p> <blockquote> <p>#--------console-----------</p> <p>log4j.rootLogger=info,myconsole,myfile<br> log4j.appender.myconsole=org.apache.log4j.ConsoleAppender<br> log4j.appender.myconsole.layout=org.apache.log4j.SimpleLayout<br> #log4j.appender.myconsole.layout.ConversionPattern =%d [%t] %-5p [%c] - %m%n </p> <p>#log4j.rootLogger=error,myfile<br> log4j.appender.myfile=org.apache.log4j.DailyRollingFileAppender<br> log4j.appender.myfile.File=/tmp/flume.log<br> log4j.appender.myfile.layout=org.apache.log4j.PatternLayout<br> log4j.appender.myfile.layout.ConversionPattern =%d [%t] %-5p [%c] - %m%n</p> </blockquote> <h3 id=543-sqlsourcehelper>5.4.3 SQLSourceHelper</h3> <p><strong>1）属性说明：</strong></p> <table> <thead> <tr> <th>属性</th> <th>说明（括号中为默认值）</th> </tr> </thead> <tbody> <tr> <td>runQueryDelay</td> <td>查询时间间隔（10000）</td> </tr> <tr> <td>batchSize</td> <td>缓存大小（100）</td> </tr> <tr> <td>startFrom</td> <td>查询语句开始id（0）</td> </tr> <tr> <td>currentIndex</td> <td>查询语句当前id，每次查询之前需要查元数据表</td> </tr> <tr> <td>recordSixe</td> <td>查询返回条数</td> </tr> <tr> <td>table</td> <td>监控的表名</td> </tr> <tr> <td>columnsToSelect</td> <td>查询字段（*）</td> </tr> <tr> <td>customQuery</td> <td>用户传入的查询语句</td> </tr> <tr> <td>query</td> <td>查询语句</td> </tr> <tr> <td>defaultCharsetResultSet</td> <td>编码格式（UTF-8）</td> </tr> </tbody> </table> <p><strong>2）方法说明：</strong></p> <table> <thead> <tr> <th>方法</th> <th>说明</th> </tr> </thead> <tbody> <tr> <td>SQLSourceHelper(Context context)</td> <td>构造方法，初始化属性及获取JDBC连接</td> </tr> <tr> <td>InitConnection(String url, String user, String pw)</td> <td>获取JDBC连接</td> </tr> <tr> <td>checkMandatoryProperties()</td> <td>校验相关属性是否设置（实际开发中可增加内容）</td> </tr> <tr> <td>buildQuery()</td> <td>根据实际情况构建sql语句，返回值String</td> </tr> <tr> <td>executeQuery()</td> <td>执行sql语句的查询操作，返回值List\&lt;List\&lt;Object>></td> </tr> <tr> <td>getAllRows(List\&lt;List\&lt;Object>> queryResult)</td> <td>将查询结果转换为String，方便后续操作</td> </tr> <tr> <td>updateOffset2DB(int size)</td> <td>根据每次查询结果将offset写入元数据表</td> </tr> <tr> <td>execSql(String sql)</td> <td>具体执行sql语句方法</td> </tr> <tr> <td>getStatusDBIndex(int startFrom)</td> <td>获取元数据表中的offset</td> </tr> <tr> <td>queryOne(String sql)</td> <td>获取元数据表中的offset实际sql语句执行方法</td> </tr> <tr> <td>close()</td> <td>关闭资源</td> </tr> </tbody> </table> <p><strong>3）代码分析</strong></p> <p><strong>4）代码实现：</strong></p> <blockquote> <p>package com.atguigu.source;</p> <p>import org.apache.flume.Context;</p> <p>import org.apache.flume.conf.ConfigurationException;</p> <p>import org.slf4j.Logger;</p> <p>import org.slf4j.LoggerFactory;</p> <p>import java.io.IOException;</p> <p>import java.sql.*;</p> <p>import java.text.ParseException;</p> <p>import java.util.ArrayList;</p> <p>import java.util.List;</p> <p>import java.util.Properties;</p> <p>public class SQLSourceHelper {</p> <p>private static final Logger LOG = LoggerFactory.getLogger(SQLSourceHelper.class);</p> <p>private int runQueryDelay, //两次查询的时间间隔</p> <p>startFrom, //开始id</p> <p>currentIndex, //当前id</p> <p>recordSixe = 0, //每次查询返回结果的条数</p> <p>maxRow; //每次查询的最大条数</p> <p>private String table, //要操作的表</p> <p>columnsToSelect, //用户传入的查询的列</p> <p>customQuery, //用户传入的查询语句</p> <p>query, //构建的查询语句</p> <p>defaultCharsetResultSet;//编码集</p> <p>//上下文，用来获取配置文件</p> <p>private Context context;</p> <p>//为定义的变量赋值（默认值），可在flume任务的配置文件中修改</p> <p>private static final int DEFAULT_QUERY_DELAY = 10000;</p> <p>private static final int DEFAULT_START_VALUE = 0;</p> <p>private static final int DEFAULT_MAX_ROWS = 2000;</p> <p>private static final String DEFAULT_COLUMNS_SELECT = &ldquo;*&ldquo;;</p> <p>private static final String DEFAULT_CHARSET_RESULTSET = &ldquo;UTF-8&rdquo;;</p> <p>private static Connection conn = null;</p> <p>private static PreparedStatement ps = null;</p> <p>private static String connectionURL, connectionUserName, connectionPassword;</p> <p>//加载静态资源</p> <p>static {</p> <p>Properties p = new Properties();</p> <p>try {</p> <p>p.load(SQLSourceHelper.class.getClassLoader().getResourceAsStream(&ldquo;jdbc.properties&rdquo;));</p> <p>connectionURL = p.getProperty(&ldquo;dbUrl&rdquo;);</p> <p>connectionUserName = p.getProperty(&ldquo;dbUser&rdquo;);</p> <p>connectionPassword = p.getProperty(&ldquo;dbPassword&rdquo;);</p> <p>Class.forName(p.getProperty(&ldquo;dbDriver&rdquo;));</p> <p>} catch (IOException | ClassNotFoundException e) {</p> <p>LOG.error(e.toString());</p> <p>}</p> <p>}</p> <p>//获取JDBC连接</p> <p>private static Connection InitConnection(String url, String user, String pw) {</p> <p>try {</p> <p>Connection conn = DriverManager.getConnection(url, user, pw);</p> <p>if (conn == null)</p> <p>throw new SQLException();</p> <p>return conn;</p> <p>} catch (SQLException e) {</p> <p>e.printStackTrace();</p> <p>}</p> <p>return null;</p> <p>}</p> <p>//构造方法</p> <p>SQLSourceHelper(Context context) throws ParseException {</p> <p>//初始化上下文</p> <p>this.context = context;</p> <p>//有默认值参数：获取flume任务配置文件中的参数，读不到的采用默认值</p> <p>this.columnsToSelect = context.getString(&ldquo;columns.to.select&rdquo;, DEFAULT_COLUMNS_SELECT);</p> <p>this.runQueryDelay = context.getInteger(&ldquo;run.query.delay&rdquo;, DEFAULT_QUERY_DELAY);</p> <p>this.startFrom = context.getInteger(&ldquo;start.from&rdquo;, DEFAULT_START_VALUE);</p> <p>this.defaultCharsetResultSet = context.getString(&ldquo;default.charset.resultset&rdquo;, DEFAULT_CHARSET_RESULTSET);</p> <p>//无默认值参数：获取flume任务配置文件中的参数</p> <p>this.table = context.getString(&ldquo;table&rdquo;);</p> <p>this.customQuery = context.getString(&ldquo;custom.query&rdquo;);</p> <p>connectionURL = context.getString(&ldquo;connection.url&rdquo;);</p> <p>connectionUserName = context.getString(&ldquo;connection.user&rdquo;);</p> <p>connectionPassword = context.getString(&ldquo;connection.password&rdquo;);</p> <p>conn = InitConnection(connectionURL, connectionUserName, connectionPassword);</p> <p>//校验相应的配置信息，如果没有默认值的参数也没赋值，抛出异常</p> <p>checkMandatoryProperties();</p> <p>//获取当前的id</p> <p>currentIndex = getStatusDBIndex(startFrom);</p> <p>//构建查询语句</p> <p>query = buildQuery();</p> <p>}</p> <p>//校验相应的配置信息（表，查询语句以及数据库连接的参数）</p> <p>private void checkMandatoryProperties() {</p> <p>if (table == null) {</p> <p>throw new ConfigurationException(&ldquo;property table not set&rdquo;);</p> <p>}</p> <p>if (connectionURL == null) {</p> <p>throw new ConfigurationException(&ldquo;connection.url property not set&rdquo;);</p> <p>}</p> <p>if (connectionUserName == null) {</p> <p>throw new ConfigurationException(&ldquo;connection.user property not set&rdquo;);</p> <p>}</p> <p>if (connectionPassword == null) {</p> <p>throw new ConfigurationException(&ldquo;connection.password property not set&rdquo;);</p> <p>}</p> <p>}</p> <p>//构建sql语句</p> <p>private String buildQuery() {</p> <p>String sql = &ldquo;&rdquo;;</p> <p>//获取当前id</p> <p>currentIndex = getStatusDBIndex(startFrom);</p> <p>LOG.info(currentIndex + &ldquo;&rdquo;);</p> <p>if (customQuery == null) {</p> <p>sql = &ldquo;SELECT &rdquo; + columnsToSelect + &rdquo; FROM &rdquo; + table;</p> <p>} else {</p> <p>sql = customQuery;</p> <p>}</p> <p>StringBuilder execSql = new StringBuilder(sql);</p> <p>//以id作为offset</p> <p>if (!sql.contains(&ldquo;where&rdquo;)) {</p> <p>execSql.append(&rdquo; where &ldquo;);</p> <p>execSql.append(&ldquo;id&rdquo;).append(&ldquo;>&ldquo;).append(currentIndex);</p> <p>return execSql.toString();</p> <p>} else {</p> <p>int length = execSql.toString().length();</p> <p>return execSql.toString().substring(0, length - String.valueOf(currentIndex).length()) + currentIndex;</p> <p>}</p> <p>}</p> <p>//执行查询</p> <p>List\&lt;List\&lt;Object>> executeQuery() {</p> <p>try {</p> <p>//每次执行查询时都要重新生成sql，因为id不同</p> <p>customQuery = buildQuery();</p> <p>//存放结果的集合</p> <p>List\&lt;List\&lt;Object>> results = new ArrayList\&lt;>();</p> <p>if (ps == null) {</p> <p>//</p> <p>ps = conn.prepareStatement(customQuery);</p> <p>}</p> <p>ResultSet result = ps.executeQuery(customQuery);</p> <p>while (result.next()) {</p> <p>//存放一条数据的集合（多个列）</p> <p>List\&lt;Object> row = new ArrayList\&lt;>();</p> <p>//将返回结果放入集合</p> <p>for (int i = 1; i \&lt;= result.getMetaData().getColumnCount(); i++) {</p> <p>row.add(result.getObject(i));</p> <p>}</p> <p>results.add(row);</p> <p>}</p> <p>LOG.info(&ldquo;execSql:&rdquo; + customQuery + &ldquo;\nresultSize:&rdquo; + results.size());</p> <p>return results;</p> <p>} catch (SQLException e) {</p> <p>LOG.error(e.toString());</p> <p>// 重新连接</p> <p>conn = InitConnection(connectionURL, connectionUserName, connectionPassword);</p> <p>}</p> <p>return null;</p> <p>}</p> <p>//将结果集转化为字符串，每一条数据是一个list集合，将每一个小的list集合转化为字符串</p> <p>List\&lt;String> getAllRows(List\&lt;List\&lt;Object>> queryResult) {</p> <p>List\&lt;String> allRows = new ArrayList\&lt;>();</p> <p>if (queryResult == null || queryResult.isEmpty())</p> <p>return allRows;</p> <p>StringBuilder row = new StringBuilder();</p> <p>for (List\&lt;Object> rawRow : queryResult) {</p> <p>Object value = null;</p> <p>for (Object aRawRow : rawRow) {</p> <p>value = aRawRow;</p> <p>if (value == null) {</p> <p>row.append(&ldquo;,&rdquo;);</p> <p>} else {</p> <p>row.append(aRawRow.toString()).append(&ldquo;,&rdquo;);</p> <p>}</p> <p>}</p> <p>allRows.add(row.toString());</p> <p>row = new StringBuilder();</p> <p>}</p> <p>return allRows;</p> <p>}</p> <p>//更新offset元数据状态，每次返回结果集后调用。必须记录每次查询的offset值，为程序中断续跑数据时使用，以id为offset</p> <p>void updateOffset2DB(int size) {</p> <p>//以source_tab做为KEY，如果不存在则插入，存在则更新（每个源表对应一条记录）</p> <p>String sql = &ldquo;insert into flume_meta(source_tab,currentIndex) VALUES(&lsquo;&ldquo;</p> <p>+ this.table</p> <p>+ &ldquo;&rsquo;,&rsquo;&rdquo; + (recordSixe += size)</p> <p>+ &ldquo;&rsquo;) on DUPLICATE key update source_tab=values(source_tab),currentIndex=values(currentIndex)&rdquo;;</p> <p>LOG.info(&ldquo;updateStatus Sql:&rdquo; + sql);</p> <p>execSql(sql);</p> <p>}</p> <p>//执行sql语句</p> <p>private void execSql(String sql) {</p> <p>try {</p> <p>ps = conn.prepareStatement(sql);</p> <p>LOG.info(&ldquo;exec::&rdquo; + sql);</p> <p>ps.execute();</p> <p>} catch (SQLException e) {</p> <p>e.printStackTrace();</p> <p>}</p> <p>}</p> <p>//获取当前id的offset</p> <p>private Integer getStatusDBIndex(int startFrom) {</p> <p>//从flume_meta表中查询出当前的id是多少</p> <p>String dbIndex = queryOne(&ldquo;select currentIndex from flume_meta where source_tab=&rsquo;&rdquo; + table + &ldquo;&rsquo;&ldquo;);</p> <p>if (dbIndex != null) {</p> <p>return Integer.parseInt(dbIndex);</p> <p>}</p> <p>//如果没有数据，则说明是第一次查询或者数据表中还没有存入数据，返回最初传入的值</p> <p>return startFrom;</p> <p>}</p> <p>//查询一条数据的执行语句(当前id)</p> <p>private String queryOne(String sql) {</p> <p>ResultSet result = null;</p> <p>try {</p> <p>ps = conn.prepareStatement(sql);</p> <p>result = ps.executeQuery();</p> <p>while (result.next()) {</p> <p>return result.getString(1);</p> <p>}</p> <p>} catch (SQLException e) {</p> <p>e.printStackTrace();</p> <p>}</p> <p>return null;</p> <p>}</p> <p>//关闭相关资源</p> <p>void close() {</p> <p>try {</p> <p>ps.close();</p> <p>conn.close();</p> <p>} catch (SQLException e) {</p> <p>e.printStackTrace();</p> <p>}</p> <p>}</p> <p>int getCurrentIndex() {</p> <p>return currentIndex;</p> <p>}</p> <p>void setCurrentIndex(int newValue) {</p> <p>currentIndex = newValue;</p> <p>}</p> <p>int getRunQueryDelay() {</p> <p>return runQueryDelay;</p> <p>}</p> <p>String getQuery() {</p> <p>return query;</p> <p>}</p> <p>String getConnectionURL() {</p> <p>return connectionURL;</p> <p>}</p> <p>private boolean isCustomQuerySet() {</p> <p>return (customQuery != null);</p> <p>}</p> <p>Context getContext() {</p> <p>return context;</p> <p>}</p> <p>public String getConnectionUserName() {</p> <p>return connectionUserName;</p> <p>}</p> <p>public String getConnectionPassword() {</p> <p>return connectionPassword;</p> <p>}</p> <p>String getDefaultCharsetResultSet() {</p> <p>return defaultCharsetResultSet;</p> <p>}</p> <p>}</p> </blockquote> <h3 id=544-mysqlsource>5.4.4 MySQLSource</h3> <p>代码实现：</p> <blockquote> <p>package com.atguigu.source;</p> <p>import org.apache.flume.Context;</p> <p>import org.apache.flume.Event;</p> <p>import org.apache.flume.EventDeliveryException;</p> <p>import org.apache.flume.PollableSource;</p> <p>import org.apache.flume.conf.Configurable;</p> <p>import org.apache.flume.event.SimpleEvent;</p> <p>import org.apache.flume.source.AbstractSource;</p> <p>import org.slf4j.Logger;</p> <p>import org.slf4j.LoggerFactory;</p> <p>import java.text.ParseException;</p> <p>import java.util.ArrayList;</p> <p>import java.util.HashMap;</p> <p>import java.util.List;</p> <p>public class SQLSource extends AbstractSource implements Configurable, PollableSource {</p> <p>//打印日志</p> <p>private static final Logger LOG = LoggerFactory.getLogger(SQLSource.class);</p> <p>//定义sqlHelper</p> <p>private SQLSourceHelper sqlSourceHelper;</p> <p>\@Override</p> <p>public long getBackOffSleepIncrement() {</p> <p>return 0;</p> <p>}</p> <p>\@Override</p> <p>public long getMaxBackOffSleepInterval() {</p> <p>return 0;</p> <p>}</p> <p>\@Override</p> <p>public void configure(Context context) {</p> <p>try {</p> <p>//初始化</p> <p>sqlSourceHelper = new SQLSourceHelper(context);</p> <p>} catch (ParseException e) {</p> <p>e.printStackTrace();</p> <p>}</p> <p>}</p> <p>\@Override</p> <p>public Status process() throws EventDeliveryException {</p> <p>try {</p> <p>//查询数据表</p> <p>List\&lt;List\&lt;Object>> result = sqlSourceHelper.executeQuery();</p> <p>//存放event的集合</p> <p>List\&lt;Event> events = new ArrayList\&lt;>();</p> <p>//存放event头集合</p> <p>HashMap\&lt;String, String> header = new HashMap\&lt;>();</p> <p>//如果有返回数据，则将数据封装为event</p> <p>if (!result.isEmpty()) {</p> <p>List\&lt;String> allRows = sqlSourceHelper.getAllRows(result);</p> <p>Event event = null;</p> <p>for (String row : allRows) {</p> <p>event = new SimpleEvent();</p> <p>event.setBody(row.getBytes());</p> <p>event.setHeaders(header);</p> <p>events.add(event);</p> <p>}</p> <p>//将event写入channel</p> <p>this.getChannelProcessor().processEventBatch(events);</p> <p>//更新数据表中的offset信息</p> <p>sqlSourceHelper.updateOffset2DB(result.size());</p> <p>}</p> <p>//等待时长</p> <p>Thread.sleep(sqlSourceHelper.getRunQueryDelay());</p> <p>return Status.READY;</p> <p>} catch (InterruptedException e) {</p> <p>LOG.error(&ldquo;Error procesing row&rdquo;, e);</p> <p>return Status.BACKOFF;</p> <p>}</p> <p>}</p> <p>\@Override</p> <p>public synchronized void stop() {</p> <p>LOG.info(&ldquo;Stopping sql source {} &hellip;&rdquo;, getName());</p> <p>try {</p> <p>//关闭资源</p> <p>sqlSourceHelper.close();</p> <p>} finally {</p> <p>super.stop();</p> <p>}</p> <p>}</p> <p>}</p> </blockquote> <h2 id=55>5.5 测试</h2> <h3 id=551-jar>5.5.1 Jar包准备</h3> <p><strong>1) 将MySql驱动包放入Flume的lib目录下</strong></p> <blockquote> <p>[atguigu\@hadoop102 flume]$ cp \</p> <p>/opt/sorfware/mysql-libs/mysql-connector-java-5.1.27/mysql-connector-java-5.1.27-bin.jar \</p> <p>/opt/module/flume/lib/</p> </blockquote> <p><strong>2) 打包项目并将Jar包放入Flume的lib目录下</strong></p> <h3 id=552>5.5.2 配置文件准备</h3> <p>1）创建配置文件并打开</p> <blockquote> <p>[atguigu\@hadoop102 job]$ touch mysql.conf</p> <p>[atguigu\@hadoop102 job]$ vim mysql.conf</p> </blockquote> <p>2）添加如下内容</p> <blockquote> <p># Name the components on this agent</p> <p>a1.sources = r1</p> <p>a1.sinks = k1</p> <p>a1.channels = c1</p> <p># Describe/configure the source</p> <p>a1.sources.r1.type = com.atguigu.source.SQLSource</p> <p>a1.sources.r1.connection.url = jdbc:mysql://192.168.9.102:3306/mysqlsource</p> <p>a1.sources.r1.connection.user = root</p> <p>a1.sources.r1.connection.password = 000000</p> <p>a1.sources.r1.table = student</p> <p>a1.sources.r1.columns.to.select = *</p> <p>#a1.sources.r1.incremental.column.name = id</p> <p>#a1.sources.r1.incremental.value = 0</p> <p>a1.sources.r1.run.query.delay=5000</p> <p># Describe the sink</p> <p>a1.sinks.k1.type = logger</p> <p># Describe the channel</p> <p>a1.channels.c1.type = memory</p> <p>a1.channels.c1.capacity = 1000</p> <p>a1.channels.c1.transactionCapacity = 100</p> <p># Bind the source and sink to the channel</p> <p>a1.sources.r1.channels = c1</p> <p>a1.sinks.k1.channel = c1</p> </blockquote> <h3 id=553-mysql>5.5.3 MySql表准备</h3> <p><strong>1) 创建MySqlSource数据库</strong></p> <blockquote> <p>CREATE DATABASE mysqlsource；</p> </blockquote> <p><strong>2) 在MySqlSource数据库下创建数据表Student和元数据表Flume_meta</strong></p> <blockquote> <p>CREATE TABLE `student` (</p> <p>`id` int(11) NOT NULL AUTO_INCREMENT,</p> <p>`name` varchar(255) NOT NULL,</p> <p>PRIMARY KEY (`id`)</p> <p>);</p> <p>CREATE TABLE `flume_meta` (</p> <p>`source_tab` varchar(255) NOT NULL,</p> <p>`currentIndex` varchar(255) NOT NULL,</p> <p>PRIMARY KEY (`source_tab`)</p> <p>);</p> </blockquote> <ol> <li><strong>向数据表中添加数据</strong></li> </ol> <blockquote> <p>1 zhangsan</p> <p>2 lisi</p> <p>3 wangwu</p> <p>4 zhaoliu</p> </blockquote> <h3 id=_1>测试并查看结果</h3> <ol> <li><strong>任务执行</strong></li> </ol> <blockquote> <p>[atguigu\@hadoop102 flume]$ bin/flume-ng agent &ndash;conf conf/ &ndash;name a1 \</p> <p>--conf-file job/mysql.conf -Dflume.root.logger=INFO,console</p> </blockquote> <ol> <li><strong>结果展示，如图6-2所示：</strong></li> </ol> <p><img alt src=bb2028aaf0e250c56a63d781fa3db69e.jpg></p> <p>图6-2 结果展示</p> <h1 id=6>第6章 知识扩展</h1> <h2 id=61>6.1 常见正则表达式语法</h2> <table> <thead> <tr> <th>元字符</th> <th>描述</th> </tr> </thead> <tbody> <tr> <td>^</td> <td>匹配输入字符串的开始位置。如果设置了RegExp对象的Multiline属性，^也匹配“\n”或“\r”之后的位置。</td> </tr> <tr> <td>$</td> <td>匹配输入字符串的结束位置。如果设置了RegExp对象的Multiline属性，$也匹配“\n”或“\r”之前的位置。</td> </tr> <tr> <td>*</td> <td 0_=0,>匹配前面的子表达式任意次。例如，zo*能匹配“z”，“zo”以及“zoo”。*等价于</td> </tr> <tr> <td>+</td> <td 1_=1,>匹配前面的子表达式一次或多次(大于等于1次）。例如，“zo+”能匹配“zo”以及“zoo”，但不能匹配“z”。+等价于</td> </tr> <tr> <td>[a-z]</td> <td>字符范围。匹配指定范围内的任意字符。例如，“[a-z]”可以匹配“a”到“z”范围内的任意小写字母字符。 注意:只有连字符在字符组内部时,并且出现在两个字符之间时,才能表示字符的范围; 如果出字符组的开头,则只能表示连字符本身.</td> </tr> </tbody> </table> <h2 id=62>6.2 练习</h2> <p>案例需求：</p> <p>1）flume-1监控hive.log日志，flume-1的数据传送给flume-2，flume-2将数据追加到本地文件，同时将数据传输到flume-3。</p> <p>2）flume-4监控本地另一个自己创建的文件any.txt，并将数据传送给flume-3。</p> <p>3）flume-3将汇总数据写入到HDFS。</p> <p>请先画出结构图，再开始编写任务脚本。</p> <h1 id=7>第7章 企业真实面试题（重点）</h1> <h2 id=71-flume>7.1 你是如何实现Flume数据传输的监控的</h2> <p>使用第三方框架Ganglia实时监控Flume。</p> <h2 id=72-flumesourcesinkchannelsource>7.2 Flume的Source，Sink，Channel的作用？你们Source是什么类型？</h2> <p>1、作用</p> <p>（1）Source组件是专门用来收集数据的，可以处理各种类型、各种格式的日志数据，包括avro、thrift、exec、jms、spooling directory、netcat、sequence generator、syslog、http、legacy</p> <p>（2）Channel组件对采集到的数据进行缓存，可以存放在Memory或File中。</p> <p>（3）Sink组件是用于把数据发送到目的地的组件，目的地包括Hdfs、Logger、avro、thrift、ipc、file、Hbase、solr、自定义。</p> <p>2、我公司采用的Source类型为：</p> <p>（1）监控后台日志：exec</p> <p>（2）监控后台产生日志的端口：netcat</p> <p>Exec spooldir</p> <h2 id=73-flumechannel-selectors>7.3 Flume的Channel Selectors</h2> <h2 id=74-flume>7.4 Flume参数调优</h2> <p>1. Source</p> <p>增加Source个（使用Tair Dir Source时可增加FileGroups个数）可以增大Source的读取数据的能力。例如：当某一个目录产生的文件过多时需要将这个文件目录拆分成多个文件目录，同时配置好多个Source 以保证Source有足够的能力获取到新产生的数据。</p> <p>batchSize参数决定Source一次批量运输到Channel的event条数，适当调大这个参数可以提高Source搬运Event到Channel时的性能。</p> <ol> <li>Channel </li> </ol> <p>type 选择memory时Channel的性能最好，但是如果Flume进程意外挂掉可能会丢失数据。type选择file时Channel的容错性更好，但是性能上会比memory channel差。</p> <p>使用file Channel时dataDirs配置多个不同盘下的目录可以提高性能。</p> <p>Capacity 参数决定Channel可容纳最大的event条数。transactionCapacity 参数决定每次Source往channel里面写的最大event条数和每次Sink从channel里面读的最大event条数。transactionCapacity需要大于Source和Sink的batchSize参数。</p> <ol> <li>Sink </li> </ol> <p>增加Sink的个数可以增加Sink消费event的能力。Sink也不是越多越好够用就行，过多的Sink会占用系统资源，造成系统资源不必要的浪费。</p> <p>batchSize参数决定Sink一次批量从Channel读取的event条数，适当调大这个参数可以提高Sink从Channel搬出event的性能。</p> <h2 id=75-flume>7.5 Flume的事务机制</h2> <p>Flume的事务机制（类似数据库的事务机制）：Flume使用两个独立的事务分别负责从Soucrce到Channel，以及从Channel到Sink的事件传递。比如spooling directory source 为文件的每一行创建一个事件，一旦事务中所有的事件全部传递到Channel且提交成功，那么Soucrce就将该文件标记为完成。同理，事务以类似的方式处理从Channel到Sink的传递过程，如果因为某种原因使得事件无法记录，那么事务将会回滚。且所有的事件都会保持到Channel中，等待重新传递。</p> <h2 id=76-flume>7.6 Flume采集数据会丢失吗?</h2> <p>不会，Channel存储可以存储在File中，数据传输自身有事务。</p> </article> </div> </div> </main> <footer class=md-footer> <div class=md-footer-nav> <nav class="md-footer-nav__inner md-grid" aria-label=Footer> <a href=../Kafka/ title=Kafka class="md-footer-nav__link md-footer-nav__link--prev" rel=prev> <div class="md-footer-nav__button md-icon"> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 24 24"><path d="M20 11v2H8l5.5 5.5-1.42 1.42L4.16 12l7.92-7.92L13.5 5.5 8 11h12z"/></svg> </div> <div class=md-footer-nav__title> <div class=md-ellipsis> <span class=md-footer-nav__direction> 上一页 </span> Kafka </div> </div> </a> <a href=../Sqoop/ title=Sqoop class="md-footer-nav__link md-footer-nav__link--next" rel=next> <div class=md-footer-nav__title> <div class=md-ellipsis> <span class=md-footer-nav__direction> 下一页 </span> Sqoop </div> </div> <div class="md-footer-nav__button md-icon"> <svg xmlns=http://www.w3.org/2000/svg viewbox="0 0 24 24"><path d="M4 11v2h12l-5.5 5.5 1.42 1.42L19.84 12l-7.92-7.92L10.5 5.5 16 11H4z"/></svg> </div> </a> </nav> </div> <div class="md-footer-meta md-typeset"> <div class="md-footer-meta__inner md-grid"> <div class=md-footer-copyright> <div class=md-footer-copyright__highlight> Copyright &copy; 2018 - 2029 Dayet 296577630@qq.com </div> Made with <a href=https://squidfunk.github.io/mkdocs-material/ target=_blank rel=noopener> Material for MkDocs </a> </div> </div> </div> </footer> </div> <script src=../../assets/javascripts/vendor.2d1db4bd.min.js></script> <script src=../../assets/javascripts/bundle.6627ddf3.min.js></script><script id=__lang type=application/json>{"clipboard.copy": "\u590d\u5236", "clipboard.copied": "\u5df2\u590d\u5236", "search.config.lang": "ja", "search.config.pipeline": "trimmer, stemmer", "search.config.separator": "[\\uff0c\\u3002]+", "search.result.placeholder": "\u952e\u5165\u4ee5\u5f00\u59cb\u641c\u7d22", "search.result.none": "\u6ca1\u6709\u627e\u5230\u7b26\u5408\u6761\u4ef6\u7684\u7ed3\u679c", "search.result.one": "\u627e\u5230 1 \u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u7ed3\u679c", "search.result.other": "# \u4e2a\u7b26\u5408\u6761\u4ef6\u7684\u7ed3\u679c"}</script> <script>
        app = initialize({
          base: "../..",
          features: [],
          search: Object.assign({
            worker: "../../assets/javascripts/worker/search.5eca75d3.min.js"
          }, typeof search !== "undefined" && search)
        })
      </script> </body> </html>