package net.bwie.flink;

import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.TableConfig;
import org.apache.flink.table.api.TableEnvironment;

/**
 * 使用FlinkSQL对Kafka消息队列中交易订单数据进行实时统计
 * @author xuanyu
 * @date 2025/10/30
 */
public class _04SqlOrderDemo {

	public static void main(String[] args) {
		// 1.表执行环境-tabEnv
		EnvironmentSettings settings = EnvironmentSettings
			.newInstance()
			.inStreamingMode()
			.build();
		TableEnvironment tabEnv = TableEnvironment.create(settings);
		// 设置并行度
		TableConfig config = tabEnv.getConfig();
		config.set("table.exec.resource.default-parallelism", "1") ;

		// 2.输入表
		tabEnv.executeSql(
			"CREATE TABLE order_info_kafka_source (\n" +
				"    `before` MAP<STRING, STRING>,\n" +
				"    `after` MAP<STRING, STRING>,\n" +
				"    `source` MAP<STRING, STRING>,\n" +
				"    `op` STRING,\n" +
				"    `ts_ms` BIGINT,\n" +
				"    `transaction` STRING\n" +
				") WITH (\n" +
				"  'connector' = 'kafka',\n" +
				"  'topic' = 'dwd-order',\n" +
				"  'properties.bootstrap.servers' = 'node101:9092,node102:9092,node103:9092',\n" +
				"  'properties.group.id' = 'sql-order-g1',\n" +
				"  'scan.startup.mode' = 'earliest-offset',\n" +
				"  'format' = 'json',\n" +
				"  'json.fail-on-missing-field' = 'false',\n" +
				"  'json.ignore-parse-errors' = 'true'\n" +
				")"
		);
		// tabEnv.sqlQuery("SELECT * FROM order_info_kafka_source").execute().print();

		// todo 1-FlinkSql统计每个地区的下单量和下单金额；
		Table table1 = tabEnv.sqlQuery(
			"SELECT\n" +
				"    after['province_id'] AS province_id,\n" +
				"    count(after['id']) AS order_count,\n" +
				"    sum(CAST(after['total_amount'] AS DECIMAL(16, 2))) AS order_amount\n" +
				"FROM order_info_kafka_source\n" +
				"WHERE after IS NOT NULL\n" +
				"GROUP BY after['province_id']"
		);
		// table1.execute().print();

		// todo 2-FlinkSql统计每个用户的下单总金额；
		Table table2 = tabEnv.sqlQuery(
			"SELECT\n" +
				"    after['user_id'] AS province_id,\n" +
				"    sum(CAST(after['total_amount'] AS DECIMAL(16, 2))) AS order_amount\n" +
				"FROM order_info_kafka_source\n" +
				"WHERE after IS NOT NULL\n" +
				"GROUP BY after['user_id']"
		);
		// table2.execute().print();


		// todo 3-FlinkSql每种支付方式的下单金额；
		Table table3 = tabEnv.sqlQuery(
			"SELECT\n" +
				"    after['payment_way'] AS payment_way,\n" +
				"    sum(CAST(after['total_amount'] AS DECIMAL(16, 2))) AS order_amount\n" +
				"FROM order_info_kafka_source\n" +
				"WHERE after IS NOT NULL\n" +
				"GROUP BY after['payment_way']"
		);
		// table3.execute().print();

		// todo 4-FlinkSql查询每个用户最大的下单金额；
		Table table4 = tabEnv.sqlQuery(
			"SELECT\n" +
				"    after['user_id'] AS user_id,\n" +
				"    max(CAST(after['total_amount'] AS DECIMAL(16, 2))) AS max_order_amount\n" +
				"FROM order_info_kafka_source\n" +
				"WHERE after IS NOT NULL\n" +
				"GROUP BY after['user_id']"
		);
//		table4.execute().print();


		// todo 5计算结果写入到Mysql表；
/*
-- 创建数据库
CREATE DATABASE IF NOT EXISTS flink_day14;
-- 创建表
CREATE TABLE IF NOT EXISTS flink_day14.tbl_order_report(
    user_id VARCHAR(255) PRIMARY KEY ,
    max_order_amount DECIMAL(16,2)
);
 */
		// 4.输出表
		tabEnv.executeSql(
			"CREATE TABLE order_report_mysql_sink (\n" +
				"    user_id STRING,\n" +
				"    max_order_amount DECIMAL(16,2),\n" +
				"    PRIMARY KEY (user_id) NOT ENFORCED\n" +
				") WITH (\n" +
				"   'connector' = 'jdbc',\n" +
				"   'url' = 'jdbc:mysql://node101:3306/flink_day14',\n" +
				"   'table-name' = 'tbl_order_report',\n" +
				"   'driver' = 'com.mysql.jdbc.Driver',\n" +
				"   'username' = 'root',\n" +
				"   'password' = '123456'\n" +
				")"
		);

		// 5.插入数据
		tabEnv.createTemporaryView("report_table", table4);
		tabEnv.executeSql(
			"INSERT INTO order_report_mysql_sink\n" +
				"SELECT user_id, max_order_amount FROM report_table"
		);

	}

}
