package com.atguigu.realtime.app.dwm;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseAppV2;
import com.atguigu.realtime.bean.OrderDetail;
import com.atguigu.realtime.bean.OrderInfo;
import com.atguigu.realtime.bean.OrderWide;
import com.atguigu.realtime.util.MyDimUtil;
import com.atguigu.realtime.util.MyJdbcUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;

import java.sql.Connection;
import java.time.Duration;
import java.util.Map;

import static com.atguigu.realtime.common.Constant.PHOENIX_DRIVER;
import static com.atguigu.realtime.common.Constant.PHOENIX_URL;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/4/20 14:22
 */
public class DWMOrderWideApp extends BaseAppV2 {
    public static void main(String[] args) {
        new DWMOrderWideApp().init(3003,
                                   2,
                                   "DWMOrderWideApp",
                                   "DWMOrderWideApp",
                                   "dwd_order_info", "dwd_order_detail");
    }
    
    @Override
    protected void run(StreamExecutionEnvironment env,
                       Map<String, DataStreamSource<String>> streams) {
        // 1. 事实表join
        SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDim = factTableJoin(streams);
        // 2. join 维度(补齐维度信息)
        joinDim(orderWideStreamWithoutDim);
    }
    
    private void joinDim(SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDim) {
        
        /*
         使用 map算子
         */
        orderWideStreamWithoutDim
            .map(new RichMapFunction<OrderWide, OrderWide>() {
                
                private Connection conn;
                
                @Override
                public void open(Configuration parameters) throws Exception {
                    conn = MyJdbcUtil.getJdbcConnection(PHOENIX_URL, PHOENIX_DRIVER);
                    
                }
                
                @Override
                public OrderWide map(OrderWide orderWide) throws Exception {
                    // 1. join 用户维度, 根据用户id去查询对应的用户的信息
                    JSONObject user = MyDimUtil.readDim(conn, "dim_user_info", orderWide.getUser_id());
                    orderWide.calcUserAge(user.getString("BIRTHDAY"));
                    orderWide.setUser_gender(user.getString("GENDER"));
                    // 2. join 省份维度
                   
                    final JSONObject provinceObj = MyDimUtil.readDim(conn, "dim_base_province", orderWide.getProvince_id());
                    orderWide.setProvince_3166_2_code(provinceObj.getString("ISO_3166_2"));
                    orderWide.setProvince_area_code(provinceObj.getString("AREA_CODE"));
                    orderWide.setProvince_name(provinceObj.getString("NAME"));
                    orderWide.setProvince_iso_code(provinceObj.getString("ISO_CODE"));
    
                    // 3. join sku维度
                    final JSONObject skuObj = MyDimUtil.readDim(conn, "dim_sku_info", orderWide.getSku_id());
                    orderWide.setSku_name(skuObj.getString("SKU_NAME"));
                    orderWide.setSpu_id(skuObj.getLong("SPU_ID"));
                    orderWide.setCategory3_id(skuObj.getLong("CATEGORY3_ID"));
                    orderWide.setTm_id(skuObj.getLong("TM_ID"));
    
                    // 4. join spu维度
                    final JSONObject spuObj = MyDimUtil.readDim(conn, "dim_spu_info", orderWide.getSpu_id());
                    orderWide.setSpu_name(spuObj.getString("SPU_NAME"));
    
                    // 5. join category3维度
                    final JSONObject category3Obj = MyDimUtil.readDim(conn, "dim_base_category3", orderWide.getCategory3_id());
                    orderWide.setCategory3_name(category3Obj.getString("NAME"));
                    // 6. join 品牌维度
                    final JSONObject tmObj = MyDimUtil.readDim(conn, "dim_base_trademark", orderWide.getTm_id());
                    orderWide.setTm_name(tmObj.getString("TM_NAME"));
    
    
                    return orderWide;
                }
            })
            .print();
    }
    
    private SingleOutputStreamOperator<OrderWide> factTableJoin(Map<String, DataStreamSource<String>> streams) {
        // interval join:  1. 必须是keyBy 2. 必须是事件时间
        KeyedStream<OrderInfo, Long> orderInfoStream = streams
            .get("dwd_order_info")
            .map(jsonString -> JSON.parseObject(jsonString, OrderInfo.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderInfo>forBoundedOutOfOrderness(Duration.ofSeconds(2))
                    .withTimestampAssigner((orderInfo, ts) -> orderInfo.getCreate_ts())
            )
            .keyBy(OrderInfo::getId);
        
        KeyedStream<OrderDetail, Long> orderDetailStream = streams
            .get("dwd_order_detail")
            .map(jsonString -> JSON.parseObject(jsonString, OrderDetail.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderDetail>forBoundedOutOfOrderness(Duration.ofSeconds(2))
                    .withTimestampAssigner((orderDetail, ts) -> orderDetail.getCreate_ts())
            )
            .keyBy(OrderDetail::getOrder_id);
        
        return orderInfoStream
            .intervalJoin(orderDetailStream)
            .between(Time.seconds(-5), Time.seconds(5))
            .process(new ProcessJoinFunction<OrderInfo, OrderDetail, OrderWide>() {
                @Override
                public void processElement(OrderInfo orderInfo,
                                           OrderDetail orderDetail,
                                           Context ctx,
                                           Collector<OrderWide> out) throws Exception {
                    out.collect(new OrderWide(orderInfo, orderDetail));
                }
            });
        
    }
}
/*
需要频繁的读取Phoenix数据:
 问题:
    1. 频繁的读对hbase是一种压力
    2. 每读一次需要连接一次Phoenix, 网络连接太多, 读取的性能差

优化1: 缓存
    1. 使用flink的状态
        状态是本地读写方式, 所以效率比较/
        
        每次读取维度信息的是, 先从状态读, 状态中有, 就直接使用, 没有再去Phoenix中读取, 然后把读到数据存储到状态中
        
        缺点:
         1. 随着数据的增加, 状态大小会持续增长
         2. 如果维度数据发生变化, 则状态无法感知, 没有办法更新.
         
    2.  redis
        应该当维度发生变化的时候, 缓存能够自动的变化, 或者自动失效, 这些缓存还应该有淘汰机制
        
        a: orderWideApp负责读缓存, 和向缓存写数据
        b: 当维度变化的时候 DWDDbApp 负责更新缓存
        
优化2: 异步
    提高网络连接效率


 */