package com.hubiwei.gmall.realtime.app.dwm;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.hubiwei.gmall.realtime.app.BaseAppV2;
import com.hubiwei.gmall.realtime.bean.OrderDetail;
import com.hubiwei.gmall.realtime.bean.OrderInfo;
import com.hubiwei.gmall.realtime.bean.OrderWide;
import com.hubiwei.gmall.realtime.common.Constant;
import com.hubiwei.gmall.realtime.function.DimAsyncFunction;
import com.hubiwei.gmall.realtime.util.DimUtil;
import com.hubiwei.gmall.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.streaming.api.datastream.AsyncDataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.KeyedStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.async.ResultFuture;
import org.apache.flink.streaming.api.functions.co.ProcessJoinFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.util.Collector;
import redis.clients.jedis.Jedis;

import java.sql.Connection;
import java.text.ParseException;
import java.time.Duration;
import java.util.Collections;
import java.util.HashMap;
import java.util.concurrent.TimeUnit;

import static com.hubiwei.gmall.realtime.common.Constant.TOPIC_DWD_ORDER_DETAIL;
import static com.hubiwei.gmall.realtime.common.Constant.TOPIC_DWD_ORDER_INFO;


public class DwmOrderWideApp_Cache_Async extends BaseAppV2 {
    public static void main(String[] args) {
        new DwmOrderWideApp_Cache_Async().init(3003, 1, "DwmOrderWideApp_Cache_Async", "DwmOrderWideApp_Cache_Async",
                                               TOPIC_DWD_ORDER_INFO, TOPIC_DWD_ORDER_DETAIL);
    }
    
    @Override
    public void run(StreamExecutionEnvironment env,
                    HashMap<String, DataStreamSource<String>> topicAndStreamMap) {
        // 1. 事实表的join: interval join
        SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDims = joinFacts(topicAndStreamMap);
        // 2. join维度数据
        SingleOutputStreamOperator<OrderWide> orderWideStreamWithDims = joinDims(orderWideStreamWithoutDims);
        // 3. 把结果写入到kafka中, 给dws准备数据
        write2Kafka(orderWideStreamWithDims); // log4j -> 4 four for -> logForJava   2 two To  i18n
    }
    
    private void write2Kafka(SingleOutputStreamOperator<OrderWide> stream) {
        stream
            .map(JSON::toJSONString)
            .addSink(FlinkSinkUtil.getKafkaSink(Constant.TOPIC_DWM_ORDER_WIDE));
    }
    
    private SingleOutputStreamOperator<OrderWide> joinDims(SingleOutputStreamOperator<OrderWide> orderWideStreamWithoutDims) {
        // join 6张维度表
        // 根据维度表的中的某个id去查找对应的那一条数据
      return  AsyncDataStream
            .unorderedWait(
                orderWideStreamWithoutDims,
                new DimAsyncFunction<OrderWide>(){
                    @Override
                    protected void addDim(Connection phoenixConnection,
                                          Jedis redisClient,
                                          OrderWide orderWide,
                                          ResultFuture<OrderWide> resultFuture) {
                        // 1. 补充user
                        JSONObject userInfo = DimUtil.getDim(phoenixConnection,
                                                             redisClient,
                                                             "dim_user_info",
                                                             orderWide.getUser_id());
                        orderWide.setUser_gender(userInfo.getString("GENDER"));
                        try {
                            orderWide.calcUserAge(userInfo.getString("BIRTHDAY"));
                        } catch (ParseException e) {
                            e.printStackTrace();
                        }
    
                        // 2. 补齐省份
                        JSONObject provinceInfo = DimUtil.getDim(phoenixConnection,
                                                                 redisClient,
                                                                 "dim_base_province",
                                                                 orderWide.getProvince_id());
                        orderWide.setProvince_name(provinceInfo.getString("NAME"));
                        orderWide.setProvince_iso_code(provinceInfo.getString("ISO_CODE"));
                        orderWide.setProvince_area_code(provinceInfo.getString("AREA_CODE"));
                        orderWide.setProvince_3166_2_code(provinceInfo.getString("ISO_3166_2"));
    
                        // 3. sku
                        JSONObject skuInfo = DimUtil.getDim(phoenixConnection,
                                                            redisClient,
                                                            "dim_sku_info",
                                                            orderWide.getSku_id());
                        orderWide.setSku_name(skuInfo.getString("SKU_NAME"));
    
                        orderWide.setSpu_id(skuInfo.getLong("SPU_ID"));
                        orderWide.setTm_id(skuInfo.getLong("TM_ID"));
                        orderWide.setCategory3_id(skuInfo.getLong("CATEGORY3_ID"));
    
                        // 4. spu
                        JSONObject spuInfo = DimUtil.getDim(phoenixConnection,
                                                            redisClient,
                                                            "dim_spu_info",
                                                            orderWide.getSpu_id());
                        orderWide.setSpu_name(spuInfo.getString("SPU_NAME"));
    
                        // 5. tm
                        JSONObject tmInfo = DimUtil.getDim(phoenixConnection,
                                                           redisClient,
                                                           "dim_base_trademark",
                                                           orderWide.getTm_id());
                        orderWide.setTm_name(tmInfo.getString("TM_NAME"));
    
                        // 5. tm
                        JSONObject c3Info = DimUtil.getDim(phoenixConnection,
                                                           redisClient,
                                                           "dim_base_category3",
                                                           orderWide.getCategory3_id());
                        orderWide.setCategory3_name(c3Info.getString("NAME"));
    
                        resultFuture.complete(Collections.singletonList(orderWide));
    
                    }
                },
                60,
                TimeUnit.SECONDS
            );
        
    }
    
    private SingleOutputStreamOperator<OrderWide> joinFacts(HashMap<String, DataStreamSource<String>> topicAndStreamMap) {
        KeyedStream<OrderInfo, Long> orderInfoStream = topicAndStreamMap
            .get(TOPIC_DWD_ORDER_INFO)
            .map(info -> JSON.parseObject(info, OrderInfo.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderInfo>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                    .withTimestampAssigner((info, ts) -> info.getCreate_ts())
            )
            .keyBy(OrderInfo::getId);
        KeyedStream<OrderDetail, Long> orderDetailStream = topicAndStreamMap
            .get(TOPIC_DWD_ORDER_DETAIL)
            .map(info -> JSON.parseObject(info, OrderDetail.class))
            .assignTimestampsAndWatermarks(
                WatermarkStrategy
                    .<OrderDetail>forBoundedOutOfOrderness(Duration.ofSeconds(3))
                    .withTimestampAssigner((detail, ts) -> detail.getCreate_ts())
            )
            .keyBy(OrderDetail::getOrder_id);
        
        return orderInfoStream
            .intervalJoin(orderDetailStream)
            .between(Time.seconds(-10), Time.seconds(10))
            .process(new ProcessJoinFunction<OrderInfo, OrderDetail, OrderWide>() {
                @Override
                public void processElement(OrderInfo left,
                                           OrderDetail right,
                                           Context ctx,
                                           Collector<OrderWide> out) throws Exception {
                    out.collect(new OrderWide(left, right));
                }
            });
        
    }
}
/*,
读取维度表的第一个优化: 缓存
第一次从数据库读, 这条以后应该从缓存(内存)

1. 把维度数据缓存到flink的状态中
    优点:
        本地内存, 快, 不需要网络, 数据结构也比较丰富
        
    缺点:
        1. 对 flink 的内存有压力
        2. 维度有变化, 缓存的数据没有办法收到这个变化
                缓存在dwm层的应用
                
                维度数据是dwddbapp负责写入到hbase的, 如果有变化这个app知道

2. 把维度数据缓存到外部专用的缓存: redis
    
    优点: 专用缓存, 容器比较大, 速度也快
    
        如果维度发生变化, dwddblog可以直接访问redis去更新缓存
    
    缺点:
        需要通过网络


----------

redis的数据结构如何选?
string list set hash(map) zset(带分数可以排序)

根据表名和id查找对应的数据

string
   
   key                          value
   
   dwd_user_info:1              json格式字符串
   
   缺点:
        key比较多, 管理不方便
            专门放到一个数据库中
        
   优点:
        1. 读写方便
        2. 可以单独给每个key设置过期时间

list
    key                 value
    dwd_user_info       每个数据的json格式
    
    
   好处:
        一张一个key
        
    坏处:
        写方便
        读很难
    


set



hash
    
    key             field           value
    表名               1              json格式字符串
    
    
    一个表一个字段
    
    读写也方便
    
    没有办法单独给每条数据设置过期时间
   
   
   -----------
   异步超时可能的原因:
   1. kafka或者redis或者hbase等集群没有正确启动
   2. hbase中的维度表不全(6个)
     务必要通过maxwell 的 bootstrap功能把6张维度表全部初始化一遍
        注意user_info 4000条数据, 少一条
   3. 注意redis中缓冲数据的大小写
        字段名全部大写
        
        粗暴的去删除缓存
   
    


 */