 1.在会员分析中计算最近七天连续三天活跃会员数
   
   1).采集启动日志和事件日志
   
   创建文件
   mkdir -p /data/lagoudw/conf
   cd flume-log2hdfs3.conf
   vim flume-log2hdfs3.conf

a1.sources = r1
a1.sinks = k1
a1.channels = c1

# taildir source
a1.sources.r1.type = TAILDIR
a1.sources.r1.positionFile =/data/lagoudw/conf/startlog_position.json
a1.sources.r1.filegroups = f1 f2
a1.sources.r1.filegroups.f1 = /data/lagoudw/logs/start/.*log
a1.sources.r1.headers.f1.logtype = start
a1.sources.r1.filegroups.f2 = /data/lagoudw/logs/event/.*log
a1.sources.r1.headers.f2.logtype = event

# 自定义拦截器
a1.sources.r1.interceptors = i1
a1.sources.r1.interceptors.i1.type =cn.lagou.dw.flume.interceptor.LogTypeInterceptor$Builder

# memorychannel
a1.channels.c1.type = memory
a1.channels.c1.capacity = 100000
a1.channels.c1.transactionCapacity = 2000

# hdfs sink
a1.sinks.k1.type = hdfs
a1.sinks.k1.hdfs.path = /user/data/logs/%{logtype}/dt=%{logtime}/
a1.sinks.k1.hdfs.filePrefix = startlog.
a1.sinks.k1.hdfs.fileType = DataStream

# 配置文件滚动方式（文件大小32M）
a1.sinks.k1.hdfs.rollSize = 33554432
a1.sinks.k1.hdfs.rollCount = 0
a1.sinks.k1.hdfs.rollInterval = 0
a1.sinks.k1.hdfs.idleTimeout = 0
a1.sinks.k1.hdfs.minBlockReplicas = 1

# 向hdfs上刷新的event的个数
a1.sinks.k1.hdfs.batchSize = 1000

# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
   
   测试
   启动Agent，拷贝日志，检查HDFS文件
   # 清理环境
   rm -f /data/lagoudw/conf/startlog_position.json
   rm -f /data/lagoudw/logs/start/*.log
   rm -f /data/lagoudw/logs/event/*.log        
   hdfs dfs -rm -r -f /user/data/logs/event/* */
   
   # 启动 Agent
   flume-ng agent --conf /opt/lagou/servers/flume-1.9.0/conf --conf-file
 /data/lagoudw/conf/flume-log2hdfs3.conf -name a1 -Dflume.root.logger=INFO,console
   
   # 拷贝日志
   cd /data/lagoudw/logs/
   cp source/start0721.log start/start1.log
   cp source/start0722.log start/start2.log
   cp source/start0723.log start/start3.log
   cp source/start0724.log start/start4.log
   cp source/start0725.log start/start5.log
   cp source/start0726.log start/start6.log
   cp source/start0727.log start/start7.log
   # 检查HDFS文件
   hdfs dfs -ls /user/data/logs/event
   1).创建ODS层表：
   use ods;
   drop table if exists ods.ods_start_log;
   create external table ods.ods_start_log(
`str` string)
comment '用户启动日志信息'
partitioned by (`dt` string)
location '/user/data/logs/start';
     加载启动日志数据脚本
	 cd /data/lagoudw/script/member_active/
	 vim ods_load_log.sh
	 
#！/bin/bash

APP=ODS
hive=/opt/lagou/servers/hive-2.3.7/bin/hive

# 可以输入日期；如果未输入日期取昨天的时间
if [ -n "$1" ]
then
    do_date=$1
else
    do_date=`date -d "-1 day" +%F`
fi

# 定义要执行的SQL
sql="alter table "$APP".ods_start_log add partition(dt='$do_date');"

$hive -e "$sql"
   执行脚本
   sh ods_load_log.sh 2020-07-21
   sh ods_load_log.sh 2020-07-22
   sh ods_load_log.sh 2020-07-23
   sh ods_load_log.sh 2020-07-24
   sh ods_load_log.sh 2020-07-25
   sh ods_load_log.sh 2020-07-26
   sh ods_load_log.sh 2020-07-27
   查看ods表数据
   show partitions ods_start_log;
   OK
partition
dt=2020-08-20
dt=2020-08-21
dt=2020-08-22
dt=2020-08-23
dt=2020-08-24
dt=2020-08-25
dt=2020-08-26
   hive (ods)> select * from ods_start_log;
   3).创建DWD层表
   use DWD;
   drop table if exists dwd.dwd_start_log;
   CREATE TABLE dwd.dwd_start_log(
`device_id` string,
`area` string,
`uid` string,
`app_v` string,
`event_type` string,
`os_type` string,
`channel` string,
`language` string,
`brand` string,
`entry` string,
`action` string,
`error_code` string
)
PARTITIONED BY (dt string)
STORED AS parquet;
   表的格式：parquet、分区表
   加载DWD层数据
   script/member_active/dwd_load_start.sh
   vi dwd_load_start.sh
   
#！/bin/bash

source /etc/profile

# 可以输入日期；如果未输入日期取昨天的时间
if [ -n "$1" ]
then
    do_date=$1
else
    do_date=`date -d "-1 day" +%F`
fi

# 定义要执行的SQL
sql="
with tmp as(
select split(str, ' ')[7] line
from ods.ods_start_log
where dt='$do_date'
)
insert overwrite table dwd.dwd_start_log
partition(dt='$do_date')
select get_json_object(line, '$.attr.device_id'),
get_json_object(line, '$.attr.area'),
get_json_object(line, '$.attr.uid'),
get_json_object(line, '$.attr.app_v'),
get_json_object(line, '$.attr.event_type'),
get_json_object(line, '$.attr.os_type'),
get_json_object(line, '$.attr.channel'),
get_json_object(line, '$.attr.language'),
get_json_object(line, '$.attr.brand'),
get_json_object(line, '$.app_active.json.entry'),
get_json_object(line, '$.app_active.json.action'),
get_json_object(line, '$.app_active.json.error_code')
from tmp;"

hive -e "$sql"
   
   加载数据执行脚本
   sh dwd_load_start.sh 2020-07-21
   sh dwd_load_start.sh 2020-07-22
   sh dwd_load_start.sh 2020-07-23
   sh dwd_load_start.sh 2020-07-24
   sh dwd_load_start.sh 2020-07-25
   sh dwd_load_start.sh 2020-07-26
   sh dwd_load_start.sh 2020-07-27
   4).创建DWS层表 
   use dws;
   drop table if exists dws.dws_member_start_day;
   create table dws.dws_member_start_day
(
`device_id` string,
`uid` string,
`app_v` string,
`os_type` string,
`language` string,
`channel` string,
`area` string,
`brand` string,
`dt` string
) COMMENT '会员日启动汇总'
stored as parquet;

    创建加载数据脚本
	script/member_active/dws_load_member_start.sh
	vim dws_load_member_start1.sh

#！/bin/bash

source /etc/profile

# 可以输入日期；如果未输入日期取昨天的时间
if [ -n "$1" ]
then
    do_date=$1
else
    do_date=`date -d "-1 day" +%F`
fi

# 定义要执行的SQL
# 汇总得到每日活跃会员信息；每日数据汇总得到每周、每月数据
sql="
insert into table dws.dws_member_start_day
select device_id,
concat_ws('|', collect_set(uid)),
concat_ws('|', collect_set(app_v)),
concat_ws('|', collect_set(os_type)),
concat_ws('|', collect_set(language)),
concat_ws('|', collect_set(channel)),
concat_ws('|', collect_set(area)),
concat_ws('|', collect_set(brand)),
'$do_date' 
from dwd.dwd_start_log
where dt='$do_date'
group by device_id;
"

hive -e "$sql" 
   加载数据执行脚本
   sh dws_load_member_start1.sh 2020-07-21
   sh dws_load_member_start1.sh 2020-07-22
   sh dws_load_member_start1.sh 2020-07-23
   sh dws_load_member_start1.sh 2020-07-24
   sh dws_load_member_start1.sh 2020-07-25
   sh dws_load_member_start1.sh 2020-07-26
   sh dws_load_member_start1.sh 2020-07-27
   
   5).创建ads层表，计算7天之内连续3天登陆的用户
   use ads;
drop table if exists ads.ads_continue_login_3_days;
create table ads.ads_continue_login_3_days
( 
`device_id` string,
`uid` string,
`app_v` string,
`os_type` string,
`language` string,
`channel` string,
`area` string,
`brand` string,
`gid` string,
`login_count` int
) COMMENT '会员日启动汇总'
partitioned by(dt string);
    创建加载数据脚本
	Cd /data/script/member_active
	vim  ads_load_continue_login_3_days.sh

#！/bin/bash

source /etc/profile
# 手动输入开始日期和结束日期
start_date=$1
end_date=$2
# 定义要执行的SQL
# 汇总得到每日活跃会员信息；每日数据汇总得到每周、每月数据
sql="
insert overwrite table ads.ads_continue_login_3_days
partition(dt='$end_date')
SELECT
device_id,
uid,
app_v,
os_type,
LANGUAGE,
channel,
area,
brand,
gid,
count(1)
FROM
(
SELECT
device_id,
uid,
app_v,
os_type,
LANGUAGE,
channel,
area,
brand,
date_sub(dt, row_number () over (PARTITION BY device_id ORDER BY dt)) gid
FROM
dws.dws_member_start_day
WHERE dt >= '$start_date' AND dt <= '$end_date'
) tmp
GROUP BY device_id,uid,app_v,os_type,LANGUAGE,channel,area,brand,gid
HAVING count(1) >= 3;
"
hive -e "$sql" 
   
   6)执行脚本并查看数据
   sh ads_load_continue_login_3_days.sh 2020-07-21 2020-07-27

 2.项目的数据采集过程中，有哪些地方能够优化，如何实现？
   
   使用taildir source 监控指定的多个目录，可以给不同目录的日志加上不同header
   在每个目录中可以使用正则匹配多个文件
   使用自定义拦截器，主要功能是从json串中获取时间戳，加到event的header中
   hdfs sink使用event header中的信息写数据（控制写文件的位置）
   hdfs文件的滚动方式（基于文件大小、基于event数量、基于时间）
   调节flume jvm内存的分配
   
   
  
   