package com.innodealing;

import com.innodealing.config.FlinkConfig;
import com.innodealing.constants.AppConstants;
import com.innodealing.constants.KafkaConstants;
import com.innodealing.factory.KafkaSourceFactory;
import com.innodealing.model.User;
import com.innodealing.model.UserKafkaMessage;
import com.innodealing.process.GlobalStateAggregator;
import com.innodealing.process.UserNameCountFunction;
import com.innodealing.source.TableApiUserSnapshotSource;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * 使用简化 Table API 实现的用户名统计作业
 * 展示如何使用新的简化版 TableApiUserSnapshotSource
 * 
 * @author 系统生成
 * @date 2024/12/18
 */
public class UserNameCountJobWithTableApi {

    private static final Logger logger = LoggerFactory.getLogger(UserNameCountJobWithTableApi.class);

    public static void main(String[] args) throws Exception {

        // 1. 创建 Flink 环境
        final ParameterTool params = ParameterTool.fromArgs(args);
        final StreamExecutionEnvironment env = FlinkConfig.createStreamExecutionEnvironment(params);

        // 2. 获取配置参数
        final String kafkaBootstrapServers = FlinkConfig.getKafkaBootstrapServers(params);
        final String mysqlUrl = params.get(AppConstants.ParameterKeys.MYSQL_URL,
            "jdbc:mysql://pc-uf6m1w8l4i301u655.rwlb.rds.aliyuncs.com:3306/penghai_test");
        final String mysqlUsername = params.get(AppConstants.ParameterKeys.MYSQL_USERNAME, "ops_qa_polar");
        final String mysqlPassword = params.get(AppConstants.ParameterKeys.MYSQL_PASSWORD, "804QjW9Qk6V7EAEPZeMa");
        
        logger.info("   - Kafka Servers: {}", kafkaBootstrapServers);
        logger.info("   - MySQL URL: {}", mysqlUrl);
        logger.info("   - MySQL Username: {}", mysqlUsername);

        // 3. 使用简化的 Table API 创建数据库快照数据源
        DataStream<User> userSnapshotStream = TableApiUserSnapshotSource.fromMysql(
                env, mysqlUrl, mysqlUsername, mysqlPassword);


        // 4. 创建 Kafka 数据流（监听新增用户数据）
        KafkaSource<UserKafkaMessage> userKafkaSource = KafkaSourceFactory.createUserEventsSource(kafkaBootstrapServers);
        DataStream<UserKafkaMessage> userKafkaStream = env.fromSource(
                userKafkaSource,
                WatermarkStrategy.noWatermarks(),
                KafkaConstants.SourceNames.USER_EVENTS_SOURCE
        );

        // 5. 连接两个数据流并进行用户名统计
        DataStream<Tuple2<String, Long>> distributedState = userSnapshotStream
                .connect(userKafkaStream)
                .keyBy(User::getUsername, UserKafkaMessage::getUsername)
                .process(new UserNameCountFunction());

        DataStream<String> globalStateStream = distributedState
                .keyBy(t -> 1)
                .map(new GlobalStateAggregator());

        globalStateStream.print();

        // 7. 执行作业
        env.execute("User Name Count Job with Simplified Table API");
        
        logger.info("作业执行完成");
    }
} 