 1.高可用案例
   
   1).配置环境
   在linux121、linux122上部署Flume、修改环境变量
   # 在liunx123上执行
   /opt/lagou/servers
   scp -r flume-1.9.0/ linux121:$PWD
   scp -r flume-1.9.0/ linux122:$PWD
   cd /etc
   scp profile linux121:$PWD
   scp profile linux122:$PWD
   # 在linux121、linux122上分别执行
   source /etc/profile
   2).conf文件
   linux123：flume-taildir-avro.conf
   # agent name
     a1.sources = r1
     a1.channels = c1
     a1.sinks = k1 k2
   # source
     a1.sources.r1.type = TAILDIR
     a1.sources.r1.positionFile =/root/flume_log/taildir_position.json
     a1.sources.r1.filegroups = f1
     a1.sources.r1.filegroups.f1 = /tmp/root/.*log
     a1.sources.r1.fileHeader = true
   # interceptor
     a1.sources.r1.interceptors = i1 i2
     a1.sources.r1.interceptors.i1.type = static
     a1.sources.r1.interceptors.i1.key = Type
     a1.sources.r1.interceptors.i1.value = LOGIN
   # 在event header添加了时间戳
     a1.sources.r1.interceptors.i2.type = timestamp
   # channel
     a1.channels.c1.type = memory
     a1.channels.c1.capacity = 10000
     a1.channels.c1.transactionCapacity = 500
   # sink group
     a1.sinkgroups = g1
     a1.sinkgroups.g1.sinks = k1 k2
   # set sink1
     a1.sinks.k1.type = avro
     a1.sinks.k1.hostname = linux121
     a1.sinks.k1.port = 9999
   # set sink2
     a1.sinks.k2.type = avro
     a1.sinks.k2.hostname = linux122
     a1.sinks.k2.port = 9999
   # set failover
     a1.sinkgroups.g1.processor.type = failover
     a1.sinkgroups.g1.processor.priority.k1 = 100
     a1.sinkgroups.g1.processor.priority.k2 = 60
     a1.sinkgroups.g1.processor.maxpenalty = 10000
     a1.sources.r1.channels = c1
     a1.sinks.k1.channel = c1
     a1.sinks.k2.channel = c1
	 
   linux121：flume-avro-hdfs.conf
   # set Agent name
     a2.sources = r1
     a2.channels = c1
     a2.sinks = k1
   # Source
     a2.sources.r1.type = avro
     a2.sources.r1.bind = linux121
     a2.sources.r1.port = 9999
   # interceptor
     a2.sources.r1.interceptors = i1
     a2.sources.r1.interceptors.i1.type = static
     a2.sources.r1.interceptors.i1.key = Collector
     a2.sources.r1.interceptors.i1.value = linux121
   # set channel
     a2.channels.c1.type = memory
     a2.channels.c1.capacity = 10000
     a2.channels.c1.transactionCapacity = 500
   # HDFS Sink
     a2.sinks.k1.type=hdfs
     a2.sinks.k1.hdfs.path=hdfs://linux121:8020/flume/failover/
     a2.sinks.k1.hdfs.fileType=DataStream
     a2.sinks.k1.hdfs.writeFormat=TEXT
     a2.sinks.k1.hdfs.rollInterval=60
     a2.sinks.k1.hdfs.filePrefix=%Y-%m-%d
     a2.sinks.k1.hdfs.minBlockReplicas=1
     a2.sinks.k1.hdfs.rollSize=0
     a2.sinks.k1.hdfs.rollCount=0
     a2.sinks.k1.hdfs.idleTimeout=0
     a2.sources.r1.channels = c1
     a2.sinks.k1.channel=c1

   linux122：flume-avro-hdfs.conf
   # set Agent name
     a3.sources = r1
     a3.channels = c1
     a3.sinks = k1
   # Source
     a3.sources.r1.type = avro
     a3.sources.r1.bind = linux122
     a3.sources.r1.port = 9999
   # interceptor
     a3.sources.r1.interceptors = i1
     a3.sources.r1.interceptors.i1.type = static
     a3.sources.r1.interceptors.i1.key = Collector
     a3.sources.r1.interceptors.i1.value = linux122
   # set channel
     a3.channels.c1.type = memory
     a3.channels.c1.capacity = 10000
     a3.channels.c1.transactionCapacity = 500
   # HDFS Sink
     a3.sinks.k1.type=hdfs
     a3.sinks.k1.hdfs.path=hdfs://linux121:8020/flume/failover/
     a3.sinks.k1.hdfs.fileType=DataStream
     a3.sinks.k1.hdfs.writeFormat=TEXT
     a3.sinks.k1.hdfs.rollInterval=60
     a3.sinks.k1.hdfs.filePrefix=%Y-%m-%d
     a3.sinks.k1.hdfs.minBlockReplicas=1
     a3.sinks.k1.hdfs.rollSize=0
     a3.sinks.k1.hdfs.rollCount=0
     a3.sinks.k1.hdfs.idleTimeout=0
     a3.sources.r1.channels = c1
     a3.sinks.k1.channel=c1
   3).分别在linux121、linux122、linux123上启动对应服务(先启动下游的agent)
   # linux121
   flume-ng agent --name a2 --conf-file ~/conf/flume-avro-hdfs.conf
   # linux122
   flume-ng agent --name a3 --conf-file ~/conf/flume-avro-hdfs.conf
   # linux123
   flume-ng agent --name a1 --conf-file ~/conf/flume-taildir-avro.conf
   4).先hive.log中写入数据，检查HDFS目录
   hdfs dfs -ls /flume
   hdfs dfs -ls /flume/failover
   
   5).杀掉一个Agent，看看另外Agent是否能启动
 