


# 日志采集
 mkdir -p /data/lagoudw/conf/
 mkdir -p /data/lagoudw/logs/
 mkdir -p /data/lagoudw/logs/source
 mkdir -p /data/lagoudw/logs/source
 mkdir -p /data/lagoudw/logs/source

 mkdir -p /data/lagoudw/logs/start/
 mkdir -p /data/lagoudw/logs/event/

# 拷贝日志在linux123上
scp *log root@linux123:/data/lagoudw/logs/source
# 在linux123上（可以等flume-ng启动之后来触发）， 在生产环境可以直接监控日志目录
cp eventlog0721.small.log ../event/
cp start0721.small.log ../start/


# 拷贝日志配置到指定目录（如果是生产， 可以用Jenkins来做）
scp flume-log2hdfs3.conf root@linux123:/data/lagoudw/conf/




# 检查HDFS文件
hdfs dfs -ls /user/data/logs/event
hdfs dfs -ls /user/data/logs/start


# 部署拦截器
scp lagouinterceptor-1.0-SNAPSHOT-jar-with-dependencies.jar root@linux123:/opt/lagou/servers/flume-1.9.0/lib/


# 调试启动flume Agent
flume-ng agent --conf /opt/lagou/servers/flume-1.9.0/conf \
      --conf-file /data/lagoudw/conf/flume-log2hdfs3.conf \
      -name a1  \
      -Dflume.root.logger=INFO,console


# 以nohup的方式常驻后台
nohup flume-ng agent --conf /opt/lagou/servers/flume-1.9.0/conf \
      --conf-file /data/lagoudw/conf/flume-log2hdfs3.conf \
      -name a1  \
      -Dflume.root.logger=INFO,LOGFILE \
      > /dev/null 2>&1 &






# 同步本地和服务器目录
rsync -rvl code/lagoudw/script/member_active/ods_load_log.sh \
  root@linux123:/data/lagoudw/script/member_active/


rsync -rvl code/lagoudw/script/member_active/dwd_load_start.sh \
  root@linux123:/data/lagoudw/script/member_active/


rsync -rvl code/lagoudw/script/member_active/dws_load_member_start.sh \
  root@linux123:/data/lagoudw/script/member_active/




rsync -rvl code/lagoudw/script/member_active/ads_load_member_active.sh \
  root@linux123:/data/lagoudw/script/member_active/



rsync -rvl code/lagoudw/script/member_active/dws_load_member_add_day.sh \
  root@linux123:/data/lagoudw/script/member_active/


rsync -rvl code/lagoudw/script/member_active/ads_load_member_add.sh \
  root@linux123:/data/lagoudw/script/member_active/


rsync -rvl code/lagoudw/script/member_active/dws_load_member_retention_day.sh \
  root@linux123:/data/lagoudw/script/member_active/



rsync -rvl code/lagoudw/script/member_active/ads_load_member_retention.sh \
  root@linux123:/data/lagoudw/script/member_active/



rsync -rvl code/lagoudw/script/member_active/export_member_active_count.json \
  root@linux123:/data/lagoudw/script/member_active/




rsync -rvl code/lagoudw/script/trade/dim_load_shops.sh \
  root@linux123:/data/lagoudw/script/trade/





rsync -rvl code/lagoudw/json/shops_with_where.json \
  root@linux123:/data/lagoudw/json/






# 拷贝日志在linux123上
scp datax.tar.gz root@linux123:/opt/lagou/software/

tar xzvf datax.tar.gz

tar zxvf datax.tar.gz -C ../servers/


python $DATAX_HOME/bin/datax.py -p "-Ddo_date=2020-07-21" /data/lagoudw/script/member_active/export_member_active_count.json


# 在hive上使用tez的配置
export HADOOP_CONF_DIR=$HADOOP_HOME/etc/hadoop
export TEZ_CONF_DIR=$HADOOP_CONF_DIR
export TEZ_JARS=/opt/lagou/servers/tez/*:/opt/lagou/servers/tez/lib/*
export HADOOP_CLASSPATH=$TEZ_CONF_DIR:$TEZ_JARS:$HADOOP_CLASSPATH




rm /opt/lagou/servers/hadoop-2.9.2/etc/hadoop/tez-size.xml