package com.atguigu.realtime.app.dwm;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseAppV2;
import com.atguigu.realtime.bean.OrderDetail;
import com.atguigu.realtime.bean.OrderInfo;
import com.atguigu.realtime.bean.OrderWide;
import com.atguigu.realtime.function.DimAsyncFunction;
import com.atguigu.realtime.util.DimUtil;
import com.atguigu.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.streaming.api.datastream.AsyncDataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;
import redis.clients.jedis.Jedis;

import java.sql.Connection;
import java.text.ParseException;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;

import static com.atguigu.realtime.common.Constant.*;

// 提高并发能力，解决与外部系统交互的网络延迟的瓶颈问题，而默认的是同步方式去交互
// 异步：1.支持异步请求的数据库客户端，2.创建多个客户端并使用线程池同步调用方法，从而转为有限并发的客户端
// phoenix 目前不支持异步的客户端
public class DwmOrderWideApp_Cache_Async extends BaseAppV2 {
    public static void main(String[] args) {
        new DwmOrderWideApp_Cache_Async().init(3003,1,"DwmOrderWideApp_Cache_Async",
                "DwmOrderWideApp_Cache_Async",TOPIC_DWD_ORDER_INFO, TOPIC_DWD_ORDER_DETAIL);
    }
    @Override
    protected void run(StreamExecutionEnvironment env,
                       HashMap<String, DataStreamSource<String>> topicToStream) {
//        topicToStream.get(TOPIC_DWD_ORDER_INFO).print("info");
//        topicToStream.get(TOPIC_DWD_ORDER_DETAIL).print("detail");

        //1.事实表的join
        SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDims = factsJoin(topicToStream);

        //2.join维度数据
        SingleOutputStreamOperator<OrderWide> orderWideStreamWithDims = factDims(orderWideStreamWithoutDims);

        // 3.把宽表数据写入kafka
       writeToKafka(orderWideStreamWithDims);

    }

    private void writeToKafka(SingleOutputStreamOperator<OrderWide> stream) {
        stream
                .map(JSON::toJSONString)
                .addSink(FlinkSinkUtil.getKafkaSink(TOPIC_DWM_ORDER_WIDE));
    }

    private SingleOutputStreamOperator<OrderWide> factDims(SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDims) {
      return   AsyncDataStream  // 无序等待
                .unorderedWait(
                        orderWideStreamWithoutDims,
                        new DimAsyncFunction<OrderWide>() {
                            @Override
                            public void timeout(OrderWide input, ResultFuture<OrderWide> resultFuture) throws Exception {
                                throw new RuntimeException(
                                        "异步超时:\n" +
                                                " 1. 检测你的集群是否全部开启: redis Phoenix hadoop ...\n" +
                                                " 2. 检测在Phoenix中, 六张维度表数据是否都在\n" +
                                                " 3. 检测dim_user_info的数据是否4000\n" +
                                                " 4. 检测一些redis是否允许远程连接\n" +
                                                "            redis-server /etc/redis.conf   bond 0.0.0.0\n" +
                                                " 5. 检测redis中维度的缓存数据中列名的大写问题\n" +
                                                " 6. 找我"
                                );
                            }

                            @Override
                            public void addDim(Jedis jedis,
                                               Connection phoenixConn,
                                               OrderWide orderWide,
                                               ResultFuture<OrderWide> resultFuture) {
                                // 补充维度信息
                                // 1. 补充userInfo
                                JSONObject userInfo = DimUtil.readDim(phoenixConn, jedis,"dim_user_info", orderWide.getUser_id());
                                orderWide.setUser_gender(userInfo.getString("GENDER"));
                                try {
                                    orderWide.calcUserAge(userInfo.getString("BIRTHDAY"));
                                } catch (ParseException e) {
                                    e.printStackTrace();
                                }

                                // 2. 补充省份
                                JSONObject provinceInfo = DimUtil.readDim(phoenixConn, jedis, "dim_base_province", orderWide.getProvince_id());
                                orderWide.setProvince_3166_2_code(provinceInfo.getString("ISO_3166_2"));
                                orderWide.setProvince_area_code(provinceInfo.getString("AREA_CODE"));
                                orderWide.setProvince_iso_code(provinceInfo.getString("ISO_CODE"));
                                orderWide.setProvince_name(provinceInfo.getString("NAME"));

                                // 3. 补充sku
                                JSONObject skuInfo = DimUtil.readDim(phoenixConn, jedis, "dim_sku_info", orderWide.getSku_id());
                                orderWide.setSku_name(skuInfo.getString("SKU_NAME"));
                                orderWide.setOrder_price(skuInfo.getBigDecimal("PRICE"));

                                orderWide.setSpu_id(skuInfo.getLong("SPU_ID"));
                                orderWide.setTm_id(skuInfo.getLong("TM_ID"));
                                orderWide.setCategory3_id(skuInfo.getLong("CATEGORY3_ID"));

                                // 4. 补充spu
                                JSONObject spuInfo = DimUtil.readDim(phoenixConn, jedis, "dim_spu_info", orderWide.getSpu_id());
                                orderWide.setSpu_name(spuInfo.getString("SPU_NAME"));
                                // 5. 补充tm
                                JSONObject tmInfo = DimUtil.readDim(phoenixConn, jedis, "dim_base_trademark", orderWide.getTm_id());
                                orderWide.setTm_name(tmInfo.getString("TM_NAME"));
                                // 5. 补充c3
                                JSONObject c3Info = DimUtil.readDim(phoenixConn, jedis, "dim_base_category3", orderWide.getCategory3_id());
                                orderWide.setCategory3_name(c3Info.getString("NAME"));
                                        // 把只有一个元素的集合放入结果中
                                // 需要放入到 流中 的数据，交给resultFuture
                                resultFuture.complete(Collections.singletonList(orderWide));
                            }
                        }
                        , 30,
                        TimeUnit.SECONDS
                );

    }

    private SingleOutputStreamOperator<OrderWide> factsJoin(HashMap<String, DataStreamSource<String>> stream) {
        // interval join  1.只支持事件时间，2。必须keyby之后使用
        KeyedStream<OrderInfo, Long> orderInfoStream = stream
                .get(TOPIC_DWD_ORDER_INFO)
                .map(info -> JSON.parseObject(info, OrderInfo.class))
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy.<OrderInfo>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                                .withTimestampAssigner((info, ts) -> info.getCreate_ts())
                )
                .keyBy(OrderInfo::getId); // 这是订单的id

        KeyedStream<OrderDetail, Long> orderDetailStream = stream
                .get(TOPIC_DWD_ORDER_DETAIL)
                .map(detail -> JSON.parseObject(detail, OrderDetail.class))
                .assignTimestampsAndWatermarks(
                        WatermarkStrategy.<OrderDetail>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                                .withTimestampAssigner((detail, ts) -> detail.getCreate_ts())
                )
                .keyBy(OrderDetail::getOrder_id);// 这里是orderId 是详情的id 一定不要写错了！！;


       return orderInfoStream
                .intervalJoin(orderDetailStream)
                //设置上限和下限
                .between(Time.seconds(-5),Time.seconds(5))
                .process(new ProcessJoinFunction<OrderInfo, OrderDetail, OrderWide>() {
                    @Override
                    public void processElement(OrderInfo left,
                                               OrderDetail right,
                                               Context ctx,
                                               Collector<OrderWide> out) throws Exception {
                        out.collect(new OrderWide(left,right));
                    }
                });


    }
}
/*
 异步超时:
 1. 检测你的集群是否全部开启: redis Phoenix hadoop ...
 2. 检测在Phoenix中, 六张维度表数据是否都在
 3. 检测dim_user_info的数据是否4000
 4. 检测一些redis是否允许远程连接
            redis-server /etc/redis.conf
 5. 检测redis中维度的缓存数据中列名的大写问题
 6. 找我

 */