package com.study.flink.java.day08_tableAPI;

import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.operators.DataSource;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.java.BatchTableEnvironment;

/**
 * Flink Table API示例  批处理
 */
public class BatchSqlWordV2Count {

    public static void main(String[] args) throws Exception {

        ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();

        BatchTableEnvironment tEnv = BatchTableEnvironment.create(env);

        DataSource<WordCountEntity> input = env.fromElements(
                new WordCountEntity("spark", 1L),
                new WordCountEntity("spark", 1L),
                new WordCountEntity("hadoop", 1L),
                new WordCountEntity("hadoop", 1L),
                new WordCountEntity("hadoop", 1L),
                new WordCountEntity("hadoop", 1L),
                new WordCountEntity("hue", 1L),
                new WordCountEntity("hadoop", 1L),
                new WordCountEntity("hadoop", 1L),
                new WordCountEntity("flink", 1L),
                new WordCountEntity("flink", 1L),
                new WordCountEntity("spark", 1L),
                new WordCountEntity("flink", 1L),
                new WordCountEntity("flink", 1L));
        // 注册表名并指定字段名称
        tEnv.registerDataSet("WordCount", input, "word, counts");
        Table table = tEnv.sqlQuery("select word, sum(counts) as counts from WordCount group by word having sum(counts)>2 order by counts desc");
        DataSet<WordCountEntity> result = tEnv.toDataSet(table, WordCountEntity.class);
        result.print();
        //env.execute("BatchSqlWordV2Count");
    }


}
