package com.tydic;

import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.io.jdbc.JDBCAppendTableSink;
import org.apache.flink.api.java.io.jdbc.JDBCInputFormat;
import org.apache.flink.api.java.typeutils.RowTypeInfo;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.Kafka010JsonTableSource;
import org.apache.flink.table.api.*;
import org.apache.flink.table.sources.TableSource;

import java.util.Properties;

public class KafkaSourceJoinJDBCSourceToJDBCSink {

    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tableEnv = TableEnvironment.getTableEnvironment(env);

        Properties properties = new Properties();
        properties.put("bootstrap.servers", "68.61.64.102:9092,68.61.64.103:9092");
        properties.put("group.id", "group01");

        properties.put("auto.offset.reset", "earliest");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");


        String[] sourceFieldNames = {"xm", "age", "address", "time"};
        TypeInformation[] sourceFieldTypes = {Types.STRING(), Types.INT(), Types.STRING(), Types.STRING()};
        TableSchema sourceTableSchema = new TableSchema(sourceFieldNames, sourceFieldTypes);
        TableSource kafkaSource = Kafka010JsonTableSource.builder().forTopic("test_001").withKafkaProperties(properties)
                .withSchema(sourceTableSchema).build();
        tableEnv.registerTableSource("KafkaSource", kafkaSource);

        String[] joinSourceFieldNames = {"age", "address"};
        TypeInformation[] joinSourceFieldTypes = {Types.INT(), Types.STRING()};
        RowTypeInfo rowTypeInfo = new RowTypeInfo(joinSourceFieldTypes, joinSourceFieldNames);
        DataStreamSource joinSource = env.createInput(
                JDBCInputFormat.buildJDBCInputFormat().setDrivername("com.mysql.jdbc.Driver")
                        .setDBUrl("jdbc:mysql://68.61.64.39:3306/test3?useUnicode=true&characterEncoding=utf-8&allowMultiQueries=true&useSSL=false")
                        .setUsername("root")
                        .setPassword("tydic123456")
                        .setQuery("select age,address from user01")
                        .setRowTypeInfo(rowTypeInfo)
                        .finish()
        );
        tableEnv.registerDataStreamInternal("joinSource", joinSource);


        String[] sinkFieldNames = {"age", "address"};
        TypeInformation[] sinkFieldTypes = {Types.INT(), Types.STRING()};
        JDBCAppendTableSink jdbcAppendTableSink = JDBCAppendTableSink.builder().setDrivername("com.mysql.jdbc.Driver")
                .setDBUrl("jdbc:mysql://68.61.64.39:3306/test3?useUnicode=true&characterEncoding=utf-8&allowMultiQueries=true&useSSL=false")
                .setUsername("root")
                .setPassword("tydic123456")
                .setQuery("insert into user(age,address) values (?,?)")
                .setBatchSize(1)
                .setParameterTypes(sinkFieldTypes).build();
        tableEnv.registerTableSink("JDBCAppendTableSink", sinkFieldNames, sinkFieldTypes, jdbcAppendTableSink);

        Table kafkaTable = tableEnv.scan("KafkaSource").select("age as t1Age,address as t1Address");
        tableEnv.registerTable("kafkaTable", kafkaTable);

        // 1. SQL api方式
        // Table result = tableEnv.sqlQuery("SELECT t1Age,t1Address FROM kafkaTable inner join joinSource on kafkaTable.t1Age = joinSource.age");
        // 2. table api方式
        Table joinTable = tableEnv.scan("joinSource").select("age,address");
        Table result = kafkaTable.join(joinTable).where("t1Age = age").select("age,address");

        result.writeToSink(jdbcAppendTableSink);

        env.execute();
    }
}
