package com.atguigu.realtime.app.dwm;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseAppV2;
import com.atguigu.realtime.bean.OrderDetail;
import com.atguigu.realtime.bean.OrderInfo;
import com.atguigu.realtime.bean.OrderWide;
import com.atguigu.realtime.common.Constant;
import com.atguigu.realtime.function.DimAsyncFunction;
import com.atguigu.realtime.util.DimUtil;
import com.atguigu.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.streaming.api.datastream.AsyncDataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;
import redis.clients.jedis.Jedis;

import java.sql.Connection;
import java.time.Duration;
import java.util.Collections;
import java.util.Map;
import java.util.concurrent.TimeUnit;

import static com.atguigu.realtime.common.Constant.TOPIC_DWD_ORDER_DETAIL;
import static com.atguigu.realtime.common.Constant.TOPIC_DWD_ORDER_INFO;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2022/2/12 14:02
 */
public class DwmOrderWideApp_Cache_Async extends BaseAppV2 {
    public static void main(String[] args) {
        new DwmOrderWideApp_Cache_Async().init(
            3003,
            1,
            "DwmOrderWideApp_Cache_Async",
            "DwmOrderWideApp_Cache_Async",
            TOPIC_DWD_ORDER_INFO, TOPIC_DWD_ORDER_DETAIL
        );
    }
    
    @Override
    protected void run(StreamExecutionEnvironment env,
                       Map<String, DataStreamSource<String>> topicStreamMap) {
        
        // 1. 实时表与事实表的join: 双流join   a:window join  b:interval join
        SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDim = factAndFactJoin(topicStreamMap);
        // 2. 事实表与维度表join: 手动join, 根据各个维度id, 通过jdbc去查询维度表, 得到堆一块那个的维度数据
        SingleOutputStreamOperator<OrderWide> orderWideStreamWithDim = factAndDimJoin(orderWideStreamWithoutDim);
        
        // 3. 把join后的宽表写入到Kafka中(dws_order_wide)
        write2Kafka(orderWideStreamWithDim);
        
    }
    
    private void write2Kafka(SingleOutputStreamOperator<OrderWide> stream) {
        stream
            .map(JSON::toJSONString)
            .addSink(FlinkSinkUtil.getKafkaSink(Constant.TOPIC_DWM_ORDER_WIDE));
            
    }
    
    private SingleOutputStreamOperator<OrderWide> factAndDimJoin(SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDim) {
        return AsyncDataStream.unorderedWait(
            orderWideStreamWithoutDim,
            new DimAsyncFunction<OrderWide>() {
                @Override
                protected void addDim(Jedis redisClient,
                                      Connection conn,
                                      OrderWide orderWide,
                                      ResultFuture<OrderWide> resultFuture) throws Exception {
                    // 1. userInfo
                    JSONObject userInfo = DimUtil.readDim(redisClient, conn, "dim_user_info", orderWide.getUser_id());
                    orderWide.setUser_gender(userInfo.getString("GENDER"));
                    orderWide.calculateUserAge(userInfo.getString("BIRTHDAY"));
                    // 2. base_province
                    JSONObject baseProvince = DimUtil.readDim(redisClient,conn, "dim_base_province", orderWide.getProvince_id());
                    orderWide.setProvince_name(baseProvince.getString("NAME"));
                    orderWide.setProvince_3166_2_code(baseProvince.getString("ISO_3166_2"));
                    orderWide.setProvince_area_code(baseProvince.getString("AREA_CODE"));
                    orderWide.setProvince_iso_code(baseProvince.getString("ISO_CODE"));
    
                    // 3. sku_info  一定要先查sku_info, 这样才能查到spu_id, 然后再根据spu_id去spu_info中查spu_name
                    JSONObject skuInfo = DimUtil.readDim(redisClient,conn, "dim_sku_info", orderWide.getSku_id());
                    orderWide.setSku_name(skuInfo.getString("SKU_NAME"));
                    orderWide.setOrder_price(skuInfo.getBigDecimal("PRICE"));
    
    
                    orderWide.setSpu_id(skuInfo.getLong("SPU_ID"));
                    orderWide.setTm_id(skuInfo.getLong("TM_ID"));
                    orderWide.setCategory3_id(skuInfo.getLong("CATEGORY3_ID"));
    
                    // 4. spu_info
                    JSONObject spuInfo = DimUtil.readDim(redisClient,conn, "dim_spu_info", orderWide.getSpu_id());
                    orderWide.setSpu_name(spuInfo.getString("SPU_NAME"));
    
                    // 5. base_trademark
                    JSONObject baseTrademark = DimUtil.readDim(redisClient,conn, "dim_base_trademark", orderWide.getTm_id());
                    orderWide.setTm_name(baseTrademark.getString("TM_NAME"));
    
                    // 6. c3
                    JSONObject baseCategory3 = DimUtil.readDim(redisClient,conn, "dim_base_category3", orderWide.getCategory3_id());
                    orderWide.setCategory3_name(baseCategory3.getString("NAME"));
                    
                    
                    // 把补完维度数据orderWide放入到后面的流中
                    resultFuture.complete(Collections.singletonList(orderWide));
                    
                }
            },
            30,
            TimeUnit.SECONDS
        
        );
    }
    
    private SingleOutputStreamOperator<OrderWide> factAndFactJoin(Map<String, DataStreamSource<String>> topicStreamMap) {
        KeyedStream<OrderInfo, Long> orderInfoStream = topicStreamMap
            .get(TOPIC_DWD_ORDER_INFO)
            .map(info -> JSON.parseObject(info, OrderInfo.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderInfo>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                    .withTimestampAssigner((info, ts) -> info.getCreate_ts())
            )
            .keyBy(OrderInfo::getId);
        
        KeyedStream<OrderDetail, Long> orderDetailStream = topicStreamMap
            .get(TOPIC_DWD_ORDER_DETAIL)
            .map(info -> JSON.parseObject(info, OrderDetail.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderDetail>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                    .withTimestampAssigner((detail, ts) -> detail.getCreate_ts())
            )
            .keyBy(OrderDetail::getOrder_id);
        
        return orderInfoStream
            .intervalJoin(orderDetailStream)
            .between(Time.seconds(-5), Time.seconds(5))
            .process(new ProcessJoinFunction<OrderInfo, OrderDetail, OrderWide>() {
                @Override
                public void processElement(OrderInfo orderInfo,
                                           OrderDetail orderDetail,
                                           Context ctx,
                                           Collector<OrderWide> out) throws Exception {
                    out.collect(new OrderWide(orderInfo, orderDetail));
                }
            });
        
    }
}
/*
异步超时解决的问题步骤:
1. 确认用到的集群全部正常开启
    hbase和redis
    
2. 检查6张维度表是否都在
    保证6张表都在, 并且其中有数据
    尤其是dim_user_info 数据是4000
    6张表都用 max bootstrap功能初始化一次
    
3. 检查维度更新的时候有没有字段名设置为大写

4. 打包不要打包Phoenix, 把Phoenix依赖直接copy flink/lib

5. 找我



第一条数据
   通过网络连接数据库->数据库执行查询-> 通过网络把数据返回
第二条数据
   通过网络连接数据库->数据库执行查询-> 通过网络把数据返回


-------

2.14
同步:
 追女孩
女女神发信息-> 是否同意做女朋友 -> 等待半年 -> 不同意
下一个女女神发信息-> 是否同意做女朋友 -> 等待1年 -> 不同意
下一个女女神发信息-> 是否同意做女朋友 -> 等待版年 -> 不同意
...

异步:
女女神发信息  下一个女女神发信息  下一个女女神发信息  下一个女女神发信息

...
----




*/
