-- https://blog.csdn.net/xianpanjia4616/article/details/116168585




WITH orders_with_total AS (
    SELECT order_id, price + tax AS total
    FROM Orders
)
SELECT order_id, SUM(total)
FROM orders_with_total
GROUP BY order_id;


SELECT order_id, price + tax FROM Orders


SELECT price + tax FROM Orders WHERE id = 10

SELECT DISTINCT id FROM Orders;

SELECT * FROM Bid;
+------------------+-------+------+-------------+
|          bidtime | price | item | supplier_id |
+------------------+-------+------+-------------+
| 2020-04-15 08:05 | 4.00  | C    | supplier1   |
| 2020-04-15 08:07 | 2.00  | A    | supplier1   |
| 2020-04-15 08:09 | 5.00  | D    | supplier2   |
| 2020-04-15 08:11 | 3.00  | B    | supplier2   |
| 2020-04-15 08:13 | 1.00  | E    | supplier1   |
| 2020-04-15 08:17 | 6.00  | F    | supplier2   |
+------------------+-------+------+-------------+

DATE
TIME
TIMESTAMP
TIMESTAMP_LTZ


CREATE TABLE Orders (
  user       BIGINT,
  product    STRING,
  amount     INT,
  order_time TIMESTAMP(3),
  WATERMARK FOR order_time AS order_time - INTERVAL '1' MINUTE
) WITH (...);

SELECT
  user,
  TUMBLE_START(order_time, INTERVAL '1' DAY) AS wStart,
  SUM(amount) FROM Orders
GROUP BY
  TUMBLE(order_time, INTERVAL '1' DAY),
  user


CREATE TABLE user_actions (
  user_name STRING,
  data STRING,
  user_action_time AS PROCTIME() -- 声明一个额外的列作为处理时间属性
) WITH (
  ...
);

SELECT TUMBLE_START(user_action_time, INTERVAL '10' MINUTE), COUNT(DISTINCT user_name)
FROM user_actions
GROUP BY TUMBLE(user_action_time, INTERVAL '10' MINUTE);



CREATE TABLE user_actions (
  user_name STRING,
  data STRING,
  user_action_time TIMESTAMP(3),
  -- 声明 user_action_time 是事件时间属性，并且用 延迟 5 秒的策略来生成 watermark
  WATERMARK FOR user_action_time AS user_action_time - INTERVAL '5' SECOND
) WITH (
  ...
);

SELECT TUMBLE_START(user_action_time, INTERVAL '10' MINUTE), COUNT(DISTINCT user_name)
FROM user_actions
GROUP BY TUMBLE(user_action_time, INTERVAL '10' MINUTE);


CREATE TABLE user_actions (
 user_name STRING,
 data STRING,
 ts BIGINT,
 time_ltz AS TO_TIMESTAMP_LTZ(ts, 3),
 -- declare time_ltz as event time attribute and use 5 seconds delayed watermark strategy
 WATERMARK FOR time_ltz AS time_ltz - INTERVAL '5' SECOND
) WITH (
 ...
);

SELECT TUMBLE_START(time_ltz, INTERVAL '10' MINUTE), COUNT(DISTINCT user_name)
FROM user_actions
GROUP BY TUMBLE(time_ltz, INTERVAL '10' MINUTE);


SELECT * FROM TABLE(
   TUMBLE(TABLE Bid, DESCRIPTOR(bidtime), INTERVAL '10' MINUTES));

SELECT window_start, window_end, SUM(price)
  FROM TABLE(
    TUMBLE(TABLE Bid, DESCRIPTOR(bidtime), INTERVAL '10' MINUTES))
  GROUP BY window_start, window_end;

SELECT * FROM TABLE(
    HOP(TABLE Bid, DESCRIPTOR(bidtime), INTERVAL '5' MINUTES, INTERVAL '10' MINUTES));

SELECT window_start, window_end, SUM(price)
  FROM TABLE(
    HOP(TABLE Bid, DESCRIPTOR(bidtime), INTERVAL '5' MINUTES, INTERVAL '10' MINUTES))
  GROUP BY window_start, window_end;

SELECT * FROM TABLE(
    CUMULATE(TABLE Bid, DESCRIPTOR(bidtime), INTERVAL '2' MINUTES, INTERVAL '10' MINUTES));

SELECT window_start, window_end, SUM(price)
  FROM TABLE(
    CUMULATE(TABLE Bid, DESCRIPTOR(bidtime), INTERVAL '2' MINUTES, INTERVAL '10' MINUTES))
  GROUP BY window_start, window_end;



SELECT COUNT(*) FROM Orders

SELECT COUNT(*)
FROM Orders
GROUP BY order_id

SELECT COUNT(DISTINCT order_id) FROM Orders



SELECT supplier_id, rating, COUNT(*) AS total
FROM (VALUES
    ('supplier1', 'product1', 4),
    ('supplier1', 'product2', 3),
    ('supplier2', 'product3', 3),
    ('supplier2', 'product4', 4))
AS Products(supplier_id, product_id, rating)
GROUP BY GROUPING SETS ((supplier_id, rating), (supplier_id), ())


SELECT SUM(amount)
FROM Orders
GROUP BY users
HAVING SUM(amount) > 50


SELECT order_id, order_time, amount,
  SUM(amount) OVER (
    PARTITION BY product
    ORDER BY order_time
    RANGE BETWEEN INTERVAL '1' HOUR PRECEDING AND CURRENT ROW
  ) AS one_hour_prod_amount_sum
FROM Orders

SELECT order_id, order_time, amount,
  SUM(amount) OVER w AS sum_amount,
  AVG(amount) OVER w AS avg_amount
FROM Orders
WINDOW w AS (
  PARTITION BY product
  ORDER BY order_time
  RANGE BETWEEN INTERVAL '1' HOUR PRECEDING AND CURRENT ROW)

SELECT * FROM Orders
INNER JOIN Product
ON Orders.productId = Product.id


SELECT *
FROM Orders
INNER JOIN Product
ON Orders.product_id = Product.id


SELECT *
FROM Orders
LEFT JOIN Product
ON Orders.product_id = Product.id

SELECT *
FROM Orders
RIGHT JOIN Product
ON Orders.product_id = Product.id

SELECT *
FROM Orders
FULL OUTER JOIN Product
ON Orders.product_id = Product.id

SELECT *
FROM Orders o, Shipments s
WHERE o.id = s.order_id
AND o.order_time BETWEEN s.ship_time - INTERVAL '4' HOUR AND s.ship_time


CREATE TABLE orders (
    order_id    STRING,
    price       DECIMAL(32,2),
    currency    STRING,
    order_time  TIMESTAMP(3),
    WATERMARK FOR order_time AS order_time
) WITH (/* ... */);

CREATE TABLE currency_rates (
    currency STRING,
    conversion_rate DECIMAL(32, 2),
    update_time TIMESTAMP(3) METADATA FROM `values.source.timestamp` VIRTUAL,
    WATERMARK FOR update_time AS update_time,
    PRIMARY KEY(currency) NOT ENFORCED
) WITH (
   'connector' = 'kafka',
   'value.format' = 'debezium-json',
   /* ... */
);

SELECT
     order_id,
     price,
     currency,
     conversion_rate,
     order_time,
FROM orders
LEFT JOIN currency_rates FOR SYSTEM_TIME AS OF orders.order_time
ON orders.currency = currency_rates.currency;


SELECT
  o.amount, o.currency, r.rate, o.amount * r.rate
FROM
  Orders AS o
  JOIN LatestRates FOR SYSTEM_TIME AS OF o.proctime AS r
  ON r.currency = o.currency


CREATE TEMPORARY TABLE Customers (
  id INT,
  name STRING,
  country STRING,
  zip STRING
) WITH (
  'connector' = 'jdbc',
  'url' = 'jdbc:mysql://localhost:3306/db58_zp_bi',
  'table-name' = 'customers'
  'username' = 'root'
  'password' = 'lixy0302'
);
-- 连接器参数 https://blog.csdn.net/zhengzaifeidelushang/article/details/135112269

SELECT o.order_id, o.total, c.country, c.zip
FROM Orders AS o
  JOIN Customers FOR SYSTEM_TIME AS OF o.proc_time AS c
    ON o.customer_id = c.id;

SELECT order_id, tag
FROM Orders CROSS JOIN UNNEST(tags) AS t (tag)

内部联接, 如果左（外）表的表函数调用返回空结果，则该行将被删除。
SELECT order_id, res
FROM Orders,
LATERAL TABLE(table_func(order_id)) t(res)

SELECT order_id, res
FROM Orders
LEFT OUTER JOIN LATERAL TABLE(table_func(order_id)) t(res)
  ON TRUE



import org.apache.flink.table.functions.*;

public class ExternalServiceFunction extends ScalarFunction {
    public String eval(String input) throws Exception {
        // 在这里调用外部服务并返回结果
        return "External service response";
    }
}

StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);

// 创建一个名为external_service_function的临时表函数
tableEnv.registerFunction("external_service_function", new ExternalServiceFunction());


SELECT external_service_function('input') AS result FROM source;


SELECT user, amount
FROM Orders
WHERE product IN (
    SELECT product FROM NewProducts
)

SELECT user, amount
FROM Orders
WHERE product EXISTS (
    SELECT product FROM NewProducts
)


SELECT *
FROM Orders
ORDER BY order_time, order_id

SELECT *
FROM Orders
ORDER BY orderTime
LIMIT 3


create function SplitFunction AS 'udtf.SplitFunction';



create database zp_realtime_db;

-- source
create table zp_realtime_db.hdp_lbg_supin_ods_zp_n_star_app_log (
    content string comment '使用$$分割的字符串'
)
with(
'connector' = 'kafka',
'topic' = 'hdp_lbg_supin_star_app_log',
'properties.bootstrap.servers' = 'localhost:9092',
'properties.group.id' = 'hdp_lbg_supin-hdp_lbg_supin_star_app_log-BQxsW-www',
'scan.startup.mode' = 'group-offsets',
'properties.client.id' = 'hdp_lbg_supin-hdp_lbg_supin_star_app_log-BQxsW',
'format' = 'raw'
);


create table hdp_lbg_supin_ods_zp_n_star_app_log (
    content string comment '使用$$分割的字符串'
)
with(
'connector' = 'kafka',
'topic' = 'hdp_lbg_supin_star_app_log',
'properties.bootstrap.servers' = 'localhost:9092',
'properties.group.id' = 'hdp_lbg_supin-hdp_lbg_supin_star_app_log-BQxsW-www',
'scan.startup.mode' = 'earliest-offset',
'properties.client.id' = 'hdp_lbg_supin-hdp_lbg_supin_star_app_log-BQxsW',
'format' = 'raw'
);

drop table hdp_lbg_supin_ods_zp_n_star_app_log;


$ echo "Hello Kafka!" | bin/kafka-console-producer.sh --broker-list localhost:9092 --topic hdp_lbg_supin_star_app_log


select * from zp_realtime_db.hdp_lbg_supin_ods_zp_n_star_app_log;



-- sink
create table hdp_lbg_supin_dwd_zp_n_star_app_log (
    ver string comment 'v0.0.1',
    ts string comment '事件时间格式yyyy-MM-dd HH:mm:ss.SSS',
    host string comment '服务地址',
    log_level string comment '日志级别',
    service_name string comment '服务名称',
    thread_name string comment '线程名称',
    code_file string comment '代码文件',
    code_line string comment '代码行数',
    code_method string comment '代码方法',
    code_class string comment '代码类',
    trace_id string comment 'traceId',
    msg_content string comment '消息内容'
)
with(
'connector' = 'kafka',
'topic' = 'hdp_lbg_supin_dwd_zp_n_star_app_log',
'properties.bootstrap.servers' = '10.178.10.8:9092,10.178.10.10:9092,10.178.10.11:9092,10.178.10.12:9092,10.178.10.13:9092',
'scan.startup.mode' = 'group-offsets',
'properties.client.id' = 'hdp_lbg_supin-hdp_lbg_supin_dwd_zp_n_star_app_log-01hRX',
'format' = 'json'
);


-- sql
insert into hdp_lbg_supin_dwd_zp_n_star_app_log
select 'v0.0.1' as ver, '' as ts, '' as host, '' as log_level, '' as service_name, '' as thread_name,
    '' as code_file, '' as code_line, '' as code_method, '' as code_class, '' as trace_id, regex_str as msg_content
from hdp_lbg_supin_ods_zp_n_star_app_log,
lateral table(SplitFunction(content,'\n')) as t(regex_str);



CREATE TABLE kafka_table1 (id BIGINT, name STRING, age INT) WITH (...);
CREATE TABLE kafka_table2 (id BIGINT, name STRING, age INT) WITH (...);

-- 覆盖查询语句中源表的选项
select id, name from kafka_table1 /*+ OPTIONS('scan.startup.mode'='earliest-offset') */;

-- 覆盖 join 中源表的选项
select * from
    kafka_table1 /*+ OPTIONS('scan.startup.mode'='earliest-offset') */ t1
    join
    kafka_table2 /*+ OPTIONS('scan.startup.mode'='earliest-offset') */ t2
    on t1.id = t2.id;

-- 覆盖插入语句中结果表的选项
insert into kafka_table1 /*+ OPTIONS('sink.partitioner'='round-robin') */ select * from kafka_table2;

set table.dynamic-table-options.enabled=true;

select * from zp_realtime_db.hdp_lbg_supin_ods_zp_n_star_app_log /*+ OPTIONS('scan.startup.mode'='earliest-offset') */;






CREATE TABLE kafka_table (
    id INT,
    name STRING,
    age INT
) WITH (
    'connector' = 'kafka',
    'topic' = '<your-topic>',
    'properties.bootstrap.servers' = '<your-broker-list>',
    'format.type' = 'csv', -- 指定数据格式为CSV
    'scan.startup.mode' = 'earliest-offset', -- 从最早的消息开始读取
    'format.field-delimiter' = ',', -- 设置字段之间的分隔符为逗号（可根据需求修改）
    'format.ignore-parse-errors' = 'true' -- 忽略无法解析的错误记录
);



select *
from hdp_lbg_supin_ods_zp_n_star_app_log /*+ OPTIONS('scan.startup.mode'='earliest-offset') */;

select *
from hdp_lbg_supin_ods_zp_n_star_app_log;






DROP TABLE IF EXISTS hdp_lbg_supin_ods_zp_n_star_app_log;
create table hdp_lbg_supin_ods_zp_n_star_app_log (
    content string comment '使用$$分割的字符串'
)
with(
'connector' = 'kafka',
'topic' = 'hdp_lbg_supin_star_app_log',
'properties.bootstrap.servers' = 'localhost:9092',
'scan.startup.mode' = 'group-offsets',
'format' = 'raw',
'properties.group.id' = 'hdp_lbg_supin-hdp_lbg_supin_star_app_log-BQxsW-www',
'properties.client.id' = 'hdp_lbg_supin-hdp_lbg_supin_star_app_log-BQxsW'
);


add jar '/Users/a58/xiyong.lxy/project/bigdata/bigdata_realtime_flink_sql_udf/target/bigdata_realtime_flink_sql_udf-1.2.1.jar';

create function SplitFunction AS 'udtf.SplitFunction';

select regex_str
from hdp_lbg_supin_ods_zp_n_star_app_log /*+ OPTIONS('scan.startup.mode'='earliest-offset') */,
lateral table(SplitFunction(content,'\n')) as t(regex_str);




-- 将数据结果写入到新的topic
-- bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic hdp_lbg_supin_dwd_zp_n_star_app_log
-- sink
create table hdp_lbg_supin_dwd_zp_n_star_app_log (
    info_id string,
    content string,
    sink_time string,
    e_time string
)
with(
'connector' = 'kafka',
'topic' = 'hdp_lbg_supin_dwd_zp_n_star_app_log',
'properties.bootstrap.servers' = 'localhost:9092',
'scan.startup.mode' = 'group-offsets',
'properties.client.id' = 'hdp_lbg_supin-hdp_lbg_supin_dwd_zp_n_star_app_log-01hRX',
'format' = 'json'
);


-- sql-etl
insert into hdp_lbg_supin_dwd_zp_n_star_app_log
select
    split_index(regex_str,'$$',0) as info_id,
    split_index(regex_str,'$$',1) as content,
    split_index(regex_str,'$$',2) as sink_time,
    split_index(regex_str,'$$',3) as e_time
from (
    select regex_str
    from hdp_lbg_supin_ods_zp_n_star_app_log /*+ OPTIONS('scan.startup.mode'='earliest-offset') */,
    lateral table(SplitFunction(content,'\n')) as t(regex_str)
) t;


select *
from hdp_lbg_supin_dwd_zp_n_star_app_log /*+ OPTIONS(
'scan.startup.mode'='earliest-offset',
'properties.group.id' = 'hdp_lbg_supin-hdp_lbg_supin_dwd_zp_n_star_app_log-BQxsW-www',
'properties.client.id' = 'hdp_lbg_supin-hdp_lbg_supin_dwd_zp_n_star_app_log-BQxsW') */ t;


