package com.gsm.projects.jobDw.dw.dwd;//package dw.dwd;
//
//import dw.beans.EmploymentRecord;
//import org.apache.flink.api.common.eventtime.WatermarkStrategy;
//import org.apache.flink.api.common.functions.MapFunction;
//import org.apache.flink.api.java.tuple.Tuple4;
//import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
//import org.apache.flink.connector.kafka.source.KafkaSource;
//import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
//import org.apache.flink.connector.kafka.sink.KafkaSink;
//import org.apache.flink.streaming.api.datastream.DataStream;
//import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
//import org.apache.flink.streaming.api.windowing.time.Time;
//import org.apache.flink.streaming.api.windowing.windows.TimeWindow;
//import org.apache.flink.streaming.util.serialization.SimpleStringSchema;
//import com.fasterxml.jackson.databind.ObjectMapper;
//
//import java.util.Properties;
//
//public class DWDLayer {
//
//    public static void main(String[] args) throws Exception {
//
//        final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//
//        // Kafka 源配置
//        KafkaSource<String> kafkaSource = KafkaSource.<String>builder()
//                .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
//                .setTopics("ods_employment_records")
//                .setGroupId("gsm")
//                .setValueOnlyDeserializer(new SimpleStringSchema())
//                .setStartingOffsets(OffsetsInitializer.latest())
//                .build();
//
//        // 创建 Kafka 生产者
//        Properties producerProps = new Properties();
//        producerProps.setProperty("bootstrap.servers", "hadoop102:9092,hadoop103:9092,hadoop104:9092");
//        KafkaSink<String> kafkaSink = KafkaSink.<String>builder()
//                .setBootstrapServers("hadoop102:9092,hadoop103:9092,hadoop104:9092")
//                .setRecordSerializer(KafkaRecordSerializationSchema.builder()
//                        .setTopic("dwd_employment_records")
//                        .setValueSerializationSchema(new SimpleStringSchema())
//                        .build())
//                .build();
//
//        // 读取 Kafka 数据
//        DataStream<String> stream = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "Kafka Source");
//
//        // 数据转换和初步聚合
//        DataStream<Tuple4<String, Integer, Long, Long>> aggregatedStream = stream
//                .map(new MapFunction<String, Tuple4<String, Integer, Long, Long>>() {
//                    @Override
//                    public Tuple4<String, Integer, Long, Long> map(String value) throws Exception {
//                        ObjectMapper mapper = new ObjectMapper();
//                        EmploymentRecord record = mapper.readValue(value, EmploymentRecord.class);
//                        // 假设 city 字段已存在
//                        String city = "New York"; // 示例中直接返回固定值，实际应用中应解析 JSON 并提取字段
//                        return new Tuple4<>(city, 1, record.getSendOfferTime().getTime(), record.getSendOfferTime().getTime());
//                    }
//                })
//                .keyBy(t -> t.f0) // 按城市分组
//                .timeWindow(Time.hours(1)) // 滚动窗口，每小时一次
//                .reduce((t1, t2) -> new Tuple4<>(t1.f0, t1.f1 + t2.f1, Math.min(t1.f2, t2.f2), Math.max(t1.f3, t2.f3)));
//
//        // 写回 Kafka
//        aggregatedStream.map(new MapFunction<Tuple4<String, Integer, Long, Long>, String>() {
//            @Override
//            public String map(Tuple4<String, Integer, Long, Long> value) {
//                return value.toString();
//            }
//        }).sinkTo(kafkaSink);
//
//        // 执行任务
//        env.execute("DWD Layer Processing");
//    }
//}