import com.alibaba.fastjson.JSONObject;
import com.lcy.bean.PageA;
import com.lcy.bean.PageB;
import com.lcy.bean.PageC;
import com.lcy.util.FlinkToClickHouse;
import com.lcy.util.FlinkToHbase;
import com.lcy.util.FlinkToRedis;
import com.lcy.utils.PhonexUtil;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;

import java.util.Properties;

public class Kun {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //表环境
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        Properties properties = new Properties();
        properties.setProperty("bootstrap.servers", "hadoop-single:9092");
        properties.setProperty("group.id", "test");
        DataStream<String> stream = env
                .addSource(new FlinkKafkaConsumer<>("practice", new SimpleStringSchema(), properties));
        SingleOutputStreamOperator<JSONObject> page = stream.filter(new FilterFunction<String>() {
            @Override
            public boolean filter(String s) throws Exception {
                try {
                    JSONObject jsonObject = JSONObject.parseObject(s);
                    return true;
                } catch (Exception e) {
                    return false;
                }

            }
        }).process(new ProcessFunction<String, JSONObject>() {
            @Override
            public void processElement(String s, ProcessFunction<String, JSONObject>.Context context, Collector<JSONObject> collector) throws Exception {
                JSONObject jsonObject = JSONObject.parseObject(s);
                if (jsonObject.containsKey("page")) {
                    collector.collect(jsonObject);
                }
            }
        });
        //数据封装
        SingleOutputStreamOperator<PageA> mapPage = page.map(new MapFunction<JSONObject, PageA>() {
            @Override
            public PageA map(JSONObject jsonObject) throws Exception {
                String page_id = jsonObject.getJSONObject("page").getString("page_id");
                String mid = jsonObject.getJSONObject("common").getString("mid");
                String os = jsonObject.getJSONObject("common").getString("os");
                return new PageA(page_id, mid, os);
            }
        });
        tEnv.createTemporaryView("page",mapPage);
//        tEnv.sqlQuery(" select * from page").execute().print();

        //据筛选出的数据计算每个页面的访问量
        //tEnv.executeSql("select p.page_id,count(*) num from page p group by p.page_id").print();
        //根据mid字段计算每个页面的独立访客数
        //tEnv.sqlQuery("select page_id,count(*) from (select distinct mid,page_id from page) group by page_id").execute().print();
        //6)根据os字段计算安卓和ios的页面浏览量
//        Table table = tEnv.sqlQuery("select substring(os,0,3) os_name,count(*) num from page group by substring(os,0,3)");

        //将ios的数据 和安卓的数据分别侧流到不同的kafka 主题上
//        tEnv.executeSql("CREATE TABLE os_ios (\n" +
//                "  os_name STRING,\n" +
//                "  num BIGINT,\n" +
//                "  PRIMARY KEY (os_name) NOT ENFORCED\n" +
//                ") WITH (\n" +
//                "  'connector' = 'upsert-kafka',\n" +
//                "  'topic' = 'page_ios',\n" +
//                "  'properties.bootstrap.servers' = 'hadoop-single:9092',\n" +
//                "  'key.format' = 'csv',\n" +
//                "  'value.format' = 'csv'\n" +
//                ")").print();
//
//        tEnv.executeSql("CREATE TABLE os_and (\n" +
//                "  os_name STRING,\n" +
//                "  num BIGINT,\n" +
//                "  PRIMARY KEY (os_name) NOT ENFORCED\n" +
//                ") WITH (\n" +
//                "  'connector' = 'upsert-kafka',\n" +
//                "  'topic' = 'page_and',\n" +
//                "  'properties.bootstrap.servers' = 'hadoop-single:9092',\n" +
//                "  'key.format' = 'csv',\n" +
//                "  'value.format' = 'csv'\n" +
//                ")").print();
//        tEnv.executeSql("insert into os_ios (select * from (select substring(os,0,3) os_name,count(*) num from page group by substring(os,0,3)) where os_name='ios' )").print();
//        tEnv.executeSql("insert into os_ios (select * from (select substring(os,0,3) os_name,count(*) num from page group by substring(os,0,3)) where os_name='And' )").print();


        //8)将题4）的结果落盘到mysql中
//     tEnv.executeSql("" +
//             "CREATE TABLE kun (\n" +
//
//             " page_id STRING primary key,\n" +
//
//             " num bigint\n" +
//             ") WITH (\n" +
//             " 'connector' = 'jdbc',\n" +
//             " 'url' = 'jdbc:mysql://192.168.80.128:3306/lcy',\n" +
//             " 'username' = 'root',\n" +
//             " 'password' = 'root',\n" +
//             " 'table-name' = 'kun'\n" +
//             ")" +
//             "" +
//             "").print();
//
//     tEnv.executeSql("insert into kun (select p.page_id,count(*) num from page p group by p.page_id)").print();

        //9)将题5）的结果落盘到redis中
//        Table table = tEnv.sqlQuery("select page_id,count(*) num from (select distinct mid,page_id from page) group by page_id");
//        DataStream<Tuple2<Boolean, PageB>> tuple2DataStream = tEnv.toRetractStream(table, PageB.class);
//        SingleOutputStreamOperator<PageB> pageB = tuple2DataStream.map(new MapFunction<Tuple2<Boolean, PageB>, PageB>() {
//            @Override
//            public PageB map(Tuple2<Boolean, PageB> booleanPageBTuple2) throws Exception {
//                return booleanPageBTuple2.f1;
//            }
//        });

        //PhonexUtil.checkTable("page",PageB.class,"page_id","");

        //存入redis
        //pageB.addSink(new FlinkToRedis());
        //存入hbase
        Table table2 = tEnv.sqlQuery("select substring(os,0,3) os_name,count(*) num from page group by substring(os,0,3)");
        SingleOutputStreamOperator<PageC> pagec = tEnv.toRetractStream(table2, PageC.class).map(x -> (x.f1));
        //pagec.addSink(new FlinkToHbase());
        //clickhouse
        pagec.addSink(new FlinkToClickHouse());



        env.execute();


    }
}
