package cn.doitedu.etl;

import cn.doitedu.pojo.SearchAggBean;
import cn.doitedu.pojo.SearchAggResultBean;
import com.alibaba.fastjson.JSONObject;
import org.apache.commons.io.Charsets;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.KeyedProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.http.HttpEntity;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClientBuilder;
import org.apache.http.util.EntityUtils;

import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;

import com.alibaba.fastjson.JSON;

public class Job05_搜索行为分析olap主题聚合宽表 {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("file:///d:/ckpt");

        env.setParallelism(1);

        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        // 建表，映射 kafka 的dwd层的topic: dwd-events
        tenv.executeSql(
                "create table dwd_events_kafka(\n" +
                        "     user_id bigint           \n" +
                        "    ,session_id         string\n" +
                        "    ,event_id           string\n" +
                        "    ,action_time        bigint\n" +
                        "    ,properties         map<string,string>\n" +
                        "    ,keyword as   properties['keyword'] \n" +
                        "    ,res_cnt as   cast(properties['res_cnt'] as int) \n" +
                        "    ,search_id as properties['search_id'] \n" +
                        "    ,province string          \n" +
                        "    ,city string              \n" +
                        "    ,region string            \n" +
                        "    ,rt as to_timestamp_ltz(action_time,3) \n" +
                        "    ,watermark for rt as rt  \n" +
                        ") WITH (\n" +
                        "    'connector' = 'kafka',\n" +
                        "    'topic' = 'dwd-events',\n" +
                        "    'properties.bootstrap.servers' = 'doitedu:9092',\n" +
                        "    'properties.group.id' = 'doit44_g1',\n" +
                        "    'scan.startup.mode' = 'latest-offset',\n" +
                        "    'value.format' = 'json',\n" +
                        "    'value.fields-include' = 'EXCEPT_KEY'\n" +
                        ")               ");


        tenv.executeSql(
                " create temporary view agg_res as                                            " +
                        " with tmp as (                                                               " +
                        " select *                                                                    " +
                        " from dwd_events_kafka                                                       " +
                        " where event_id in ('search','search_return','search_click')                 " +
                        " )                                                                           " +
                        "                                                                             " +
                        " select                                                                      " +
                        "   user_id,                                                                  " +
                        " 	session_id,                                                               " +
                        " 	search_id,                                                                " +
                        " 	keyword,                                                                  " +
                        " 	province,                                                                 " +
                        " 	city,                                                                     " +
                        " 	region,                                                                   " +
                        " 	min(action_time) as search_start_time,                                    " +
                        " 	max(res_cnt) as search_res_cnt,                                           " +
                        " 	count(1) filter(where event_id = 'search_click') as search_click_cnt      " +
                        " from table(                                                                 " +
                        "     tumble(table tmp, descriptor(rt), interval '1' minute)                  " +
                        " )                                                                           " +
                        " group by                                                                    " +
                        "   window_start,                                                             " +
                        " 	window_end,                                                               " +
                        "   user_id,                                                                  " +
                        " 	session_id,                                                               " +
                        " 	search_id,                                                                " +
                        " 	keyword,                                                                  " +
                        " 	province,                                                                 " +
                        " 	city,                                                                     " +
                        " 	region                                                                    "
        );


        // 表转流，方便写api代码
        Table table = tenv.from("agg_res");
        DataStream<SearchAggBean> dataStream = tenv.toDataStream(table, SearchAggBean.class);

        // 去请求近义词算法服务，来获得keyword对应的 分词和标准近义词
        SingleOutputStreamOperator<SearchAggResultBean> resultStream = dataStream.keyBy(SearchAggBean::getKeyword)
                .process(new KeyedProcessFunction<String, SearchAggBean, SearchAggResultBean>() {

                    CloseableHttpClient client;
                    HttpPost post;
                    Map<String, String> mp;
                    SearchAggResultBean searchAggResultBean;

                    @Override
                    public void open(Configuration parameters) throws Exception {
                        // 构造http客户端
                        client = HttpClientBuilder.create().build();

                        post = new HttpPost("http://doitedu:8081/api/post/simwords");
                        post.addHeader("Content-type", "application/json; charset=utf-8");
                        post.addHeader("Accept", "application/json");

                        mp = new HashMap<String, String>();


                        searchAggResultBean = new SearchAggResultBean();

                    }

                    @Override
                    public void processElement(SearchAggBean bean, KeyedProcessFunction<String, SearchAggBean, SearchAggResultBean>.Context ctx, Collector<SearchAggResultBean> out) throws Exception {

                        String keyword = bean.getKeyword();
                        mp.put("origin", keyword);
                        String paramJson = JSON.toJSONString(mp);


                        // 把参数设置到post请求的 请求体重
                        post.setEntity(new StringEntity(paramJson, Charsets.UTF_8));

                        // 用客户端 执行封装好的请求，得到服务端的响应
                        CloseableHttpResponse response = client.execute(post);

                        // 从响应中取出响应体
                        HttpEntity responseEntity = response.getEntity();
                        // 将响应体转成字符串
                        String resultJson = EntityUtils.toString(responseEntity, "utf-8");

                        // 解析响应，提取出里面的 近义词，和 分词
                        JSONObject jsonObject = JSON.parseObject(resultJson);
                        String split_word = jsonObject.getString("words");
                        String similar_word = jsonObject.getString("similarWord");

                        // 再将输入bean中的所有字段，拷贝到输出结果bean，并补充近义词和分词
                        searchAggResultBean.set(bean.getUser_id(), bean.getSession_id(),
                                bean.getSearch_id(), bean.getKeyword(), similar_word, split_word,
                                bean.getProvince(), bean.getCity(), bean.getRegion(),
                                bean.getSearch_start_time(), bean.getSearch_res_cnt(),
                                bean.getSearch_click_cnt());


                        out.collect(searchAggResultBean);

                    }
                });


        // 结果插入doris
        resultStream.print();


        env.execute();


    }


}
