package com.atgugu.realtime.app.dwm;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atgugu.realtime.app.BaseAppV2;
import com.atgugu.realtime.bean.OrderDetail;
import com.atgugu.realtime.bean.OrderInfo;
import com.atgugu.realtime.bean.OrderWide;
import com.atgugu.realtime.function.DimAsyncFunction;
import com.atgugu.realtime.util.DimUtil;
import com.atgugu.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.streaming.api.datastream.AsyncDataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;
import redis.clients.jedis.Jedis;

import java.sql.Connection;
import java.text.ParseException;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;

import static com.atgugu.realtime.common.Constant.*;

/**
 * @Author lizhenchao@atguigu.cn
 * @Date 2021/12/28 14:19
 */
public class DwmOrderWideApp_Cache_Async extends BaseAppV2 {
    public static void main(String[] args) {
        new DwmOrderWideApp_Cache_Async().init(3003, 1, "DwmOrderWideApp_Cache_Async", "DwmOrderWideApp_Cache_Async",
                                               TOPIC_DWD_ORDER_INFO, TOPIC_DWD_ORDER_DETAIL);
    }
    
    @Override
    protected void run(StreamExecutionEnvironment env,
                       HashMap<String, DataStreamSource<String>> topicToStream) {
        // 1. 事实表的join
        SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDims = factsJoin(topicToStream);
        
        // 2. join维度数据
        SingleOutputStreamOperator<OrderWide> orderWideStreamWithDims = factDims(orderWideStreamWithoutDims);
        
        // 3. 把宽表数据写入到Kafka中
        writeToKafka(orderWideStreamWithDims);
        
    }
    
    private void writeToKafka(SingleOutputStreamOperator<OrderWide> stream) {
        stream
            .map(JSON::toJSONString)
            .addSink(FlinkSinkUtil.getKafkaSink(TOPIC_DWM_ORDER_WIDE));
        
    }
    
    private SingleOutputStreamOperator<OrderWide> factDims(SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDims) {
        return AsyncDataStream.unorderedWait(
            orderWideStreamWithoutDims,
            new DimAsyncFunction<OrderWide>() {
                
                @Override
                public void timeout(OrderWide input,
                                    ResultFuture<OrderWide> resultFuture) throws Exception {
                    throw new RuntimeException(" 异步超时:\n" +
                                                   " 1. 检测你的集群是否全部开启: redis Phoenix hadoop ...\n" +
                                                   " 2. 检测在Phoenix中, 六张维度表数据是否都在\n" +
                                                   " 3. 检测dim_user_info的数据是否4000\n" +
                                                   " 4. 检测一些redis是否允许远程连接\n" +
                                                   "            redis-server /etc/redis.conf\n" +
                                                   " 5. 检测redis中维度的缓存数据中列名的大写问题\n" +
                                                   " 6. 找我");
                }
                
                @Override
                public void addDim(Jedis jedis,
                                   Connection phoenixConn,
                                   OrderWide orderWide,
                                   ResultFuture<OrderWide> resultFuture) {
                    // 1. 补充userInfo
                    JSONObject userInfo = DimUtil.readDim(phoenixConn, jedis, "dim_user_info", orderWide.getUser_id());
                    orderWide.setUser_gender(userInfo.getString("GENDER"));
                    try {
                        orderWide.calcUserAge(userInfo.getString("BIRTHDAY"));
                    } catch (ParseException e) {
                        e.printStackTrace();
                    }
                    
                    // 2. 补充省份
                    JSONObject provinceInfo = DimUtil.readDim(phoenixConn, jedis, "dim_base_province", orderWide.getProvince_id());
                    orderWide.setProvince_3166_2_code(provinceInfo.getString("ISO_3166_2"));
                    orderWide.setProvince_area_code(provinceInfo.getString("AREA_CODE"));
                    orderWide.setProvince_iso_code(provinceInfo.getString("ISO_CODE"));
                    orderWide.setProvince_name(provinceInfo.getString("NAME"));
                    
                    // 3. 补充sku
                    JSONObject skuInfo = DimUtil.readDim(phoenixConn, jedis, "dim_sku_info", orderWide.getSku_id());
                    orderWide.setSku_name(skuInfo.getString("SKU_NAME"));
                    orderWide.setOrder_price(skuInfo.getBigDecimal("PRICE"));
                    
                    orderWide.setSpu_id(skuInfo.getLong("SPU_ID"));
                    orderWide.setTm_id(skuInfo.getLong("TM_ID"));
                    orderWide.setCategory3_id(skuInfo.getLong("CATEGORY3_ID"));
                    
                    // 4. 补充spu
                    JSONObject spuInfo = DimUtil.readDim(phoenixConn, jedis, "dim_spu_info", orderWide.getSpu_id());
                    orderWide.setSpu_name(spuInfo.getString("SPU_NAME"));
                    // 5. 补充tm
                    JSONObject tmInfo = DimUtil.readDim(phoenixConn, jedis, "dim_base_trademark", orderWide.getTm_id());
                    orderWide.setTm_name(tmInfo.getString("TM_NAME"));
                    // 5. 补充c3
                    JSONObject c3Info = DimUtil.readDim(phoenixConn, jedis, "dim_base_category3", orderWide.getCategory3_id());
                    orderWide.setCategory3_name(c3Info.getString("NAME"));
                    
                    // 需要放入到流中的数据, 交给resultFuture
                    resultFuture.complete(Collections.singletonList(orderWide));
                    
                }
            },
            30,
            TimeUnit.SECONDS
        );
        
    }
    
    private SingleOutputStreamOperator<OrderWide> factsJoin(HashMap<String, DataStreamSource<String>> topicToStream) {
        // interval join : 1. 只支持事件时间  2. 必须keyBy之后使用
        
        KeyedStream<OrderInfo, Long> orderInfoStream = topicToStream
            .get(TOPIC_DWD_ORDER_INFO)
            .map(info -> JSON.parseObject(info, OrderInfo.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderInfo>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                    .withTimestampAssigner((info, ts) -> info.getCreate_ts())
            
            )
            .keyBy(OrderInfo::getId);
        
        KeyedStream<OrderDetail, Long> orderDetailStream = topicToStream
            .get(TOPIC_DWD_ORDER_DETAIL)
            .map(info -> JSON.parseObject(info, OrderDetail.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderDetail>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                    .withTimestampAssigner((detail, ts) -> detail.getCreate_ts())
            
            )
            .keyBy(OrderDetail::getOrder_id);
        
        return orderInfoStream
            .intervalJoin(orderDetailStream)
            .between(Time.seconds(-5), Time.seconds(5))
            .process(new ProcessJoinFunction<OrderInfo, OrderDetail, OrderWide>() {
                @Override
                public void processElement(OrderInfo left,
                                           OrderDetail right,
                                           Context ctx,
                                           Collector<OrderWide> out) throws Exception {
                    out.collect(new OrderWide(left, right));
                }
            });
        
    }
}
/*
 异步超时:
 1. 检测你的集群是否全部开启: redis Phoenix hadoop ...
 2. 检测在Phoenix中, 六张维度表数据是否都在
 3. 检测dim_user_info的数据是否4000
 4. 检测一些redis是否允许远程连接
            redis-server /etc/redis.conf
 5. 检测redis中维度的缓存数据中列名的大写问题
 6. 找我
 
 */