package com.atguigu.gmall.realtime.app.dwm;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.gmall.realtime.app.BaseAppV2;
import com.atguigu.gmall.realtime.bean.OrderDetail;
import com.atguigu.gmall.realtime.bean.OrderInfo;
import com.atguigu.gmall.realtime.bean.OrderWide;
import com.atguigu.gmall.realtime.common.Constant;
import com.atguigu.gmall.realtime.function.DimAsyncFunction;
import com.atguigu.gmall.realtime.util.DimUtil;
import com.atguigu.gmall.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.streaming.api.datastream.AsyncDataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;
import redis.clients.jedis.Jedis;

import java.sql.Connection;
import java.text.ParseException;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;

import static com.atguigu.gmall.realtime.common.Constant.TOPIC_DWD_ORDER_DETAIL;
import static com.atguigu.gmall.realtime.common.Constant.TOPIC_DWD_ORDER_INFO;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/9/30 14:15
 */
public class DwmOrderWide_Cache_Async extends BaseAppV2 {
    public static void main(String[] args) {
        new DwmOrderWide_Cache_Async().init(
            3003,
            1,
            "DwmOrderWide_Cache_Async",
            "DwmOrderWide_Cache_Async",
            TOPIC_DWD_ORDER_INFO, TOPIC_DWD_ORDER_DETAIL);
    }
    
    @Override
    public void run(StreamExecutionEnvironment env,
                    HashMap<String, DataStreamSource<String>> streams) {
        // 1. 两个流进行join; 事实表join
        SingleOutputStreamOperator<OrderWide> orderWideWithoutDimStream = joinFact(streams);
        // 2. 补充维度信息: join 维度
        SingleOutputStreamOperator<OrderWide> orderWideWithDimStream = joinDim(orderWideWithoutDimStream);
        // 3. 数据写入到kafka
        writeToKafka(orderWideWithDimStream);
        
    }
    
    private void writeToKafka(SingleOutputStreamOperator<OrderWide> stream) {
        stream
            .map(JSON::toJSONString)
            .addSink(FlinkSinkUtil.getKafkaSink(Constant.TOPIC_DWM_ORDER_WIDE));
    }
    
    private SingleOutputStreamOperator<OrderWide> joinDim(SingleOutputStreamOperator<OrderWide> stream) {
        return AsyncDataStream.unorderedWait(
            stream,
            new DimAsyncFunction<OrderWide>() {
                @Override
                public void addDim(Connection conn,
                                   Jedis redisClient,
                                   OrderWide orderWide,
                                   ResultFuture<OrderWide> resultFuture) {
                    // 读取维度信息:  Phoenix连接, redis客户端, orderWide, ResultFuture<T>
                    // 1. 补齐的用户维度
                    JSONObject userInfo = DimUtil.getDim(redisClient, conn, "dim_user_info", orderWide.getUser_id());
                    System.out.println("userInfo = " + userInfo);
                    orderWide.setUser_gender(userInfo.getString("GENDER"));
                    try {
                        orderWide.calcUser_Age(userInfo.getString("BIRTHDAY"));
                    } catch (ParseException e) {
                        e.printStackTrace();
                    }
                    
                    // 2. 补齐省份信息
                    JSONObject baseProvince = DimUtil.getDim(redisClient, conn, "dim_base_province", orderWide.getProvince_id());
                    orderWide.setProvince_name(baseProvince.getString("NAME"));
                    orderWide.setProvince_iso_code(baseProvince.getString("ISO_CODE"));
                    orderWide.setProvince_area_code(baseProvince.getString("AREA_CODE"));
                    orderWide.setProvince_3166_2_code(baseProvince.getString("ISO_3166_2"));
                    
                    // 3. 补齐skuInf
                    JSONObject skuInfo = DimUtil.getDim(redisClient, conn, "dim_sku_info", orderWide.getSku_id());
                    orderWide.setSku_name(skuInfo.getString("SKU_NAME"));
                    // 补齐spu_id tm_id c3_id
                    
                    orderWide.setSpu_id(skuInfo.getLong("SPU_ID"));
                    orderWide.setTm_id(skuInfo.getLong("TM_ID"));
                    orderWide.setCategory3_id(skuInfo.getLong("CATEGORY3_ID"));
                    
                    // 4. spu
                    JSONObject spuInfo = DimUtil.getDim(redisClient, conn, "dim_spu_info", orderWide.getSpu_id());
                    orderWide.setSpu_name(spuInfo.getString("SPU_NAME"));
                    // 5. tm
                    JSONObject tmInfo = DimUtil.getDim(redisClient, conn, "dim_base_trademark", orderWide.getTm_id());
                    orderWide.setTm_name(tmInfo.getString("TM_NAME"));
                    
                    // 6. c3
                    JSONObject c3Info = DimUtil.getDim(redisClient, conn, "dim_base_category3", orderWide.getCategory3_id());
                    orderWide.setCategory3_name(c3Info.getString("NAME"));
                    // 返回的数据放入的resultFuture中
                    resultFuture.complete(Collections.singletonList(orderWide));
                }
            },
            60,
            TimeUnit.SECONDS
        );
        
    }
    
    private SingleOutputStreamOperator<OrderWide> joinFact(HashMap<String, DataStreamSource<String>> streams) {
        KeyedStream<OrderInfo, Long> orderInfoStream = streams
            .get(TOPIC_DWD_ORDER_INFO)
            .map(data -> JSON.parseObject(data, OrderInfo.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderInfo>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                    .withTimestampAssigner((info, ts) -> info.getCreate_ts())
            )
            .keyBy(OrderInfo::getId);
        
        KeyedStream<OrderDetail, Long> orderDetailStream = streams
            .get(TOPIC_DWD_ORDER_DETAIL)
            .map(data -> JSON.parseObject(data, OrderDetail.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderDetail>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                    .withTimestampAssigner((detail, ts) -> detail.getCreate_ts())
            )
            .keyBy(OrderDetail::getOrder_id);
        
        return orderInfoStream
            .intervalJoin(orderDetailStream)
            .between(Time.seconds(-10), Time.seconds(10))
            .process(new ProcessJoinFunction<OrderInfo, OrderDetail, OrderWide>() {
                @Override
                public void processElement(OrderInfo left,
                                           OrderDetail right,
                                           Context ctx,
                                           Collector<OrderWide> out) throws Exception {
                    out.collect(new OrderWide(left, right));
                }
            });
    }
}
/*
去数据库查询, 执行sql, 效率比较低, 对数据库的压力也比较

给查询加缓存, 同一条维度数据, 第一次从数据库, 以后再用, 应该从缓存查, 提高查询速度

缓存的位置:
1. 用flink的状态
    优点:
        本地内存, 速度特快

    缺点:
        1. 如果缓存的维度过多, 对flink的内存影响比较大
        2. 无法感知到维度数据的变化

2. 应外置缓存: redis   旁路缓存

    优点:
        如果维度发生变化, 可以实时的修改缓存中的数据

    缺点:
        1. 通过网络读写redis, 应该读写速度
        2. 如果数据量过大, 也会内存造成压力

----

redis

Phoenix(hbase)

通过网络

1. 查询每次都是通过网络, 网络等待时间比较

2. 通过同步的方式发送的网络请求

---
导致异步超时:
1. redis服务器开启
2. 维度数据的大小写要一致
3. hbase要开启
4. 维度数据要完整
    务必要把6张维度表全部 bootstrap一遍
 */