package highlevel;

import org.apache.kafka.common.serialization.Serdes;
import org.apache.kafka.streams.*;
import org.apache.kafka.streams.kstream.KStream;
import org.apache.kafka.streams.kstream.KTable;
import org.apache.kafka.streams.kstream.Printed;
import org.apache.kafka.streams.kstream.Produced;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.Properties;

/**
 * @Author:RenPu
 * @Date: 2020/4/17 10:41
 * @Version: 1.0
 * @description:kafka streaming highlevel 无状态转换因子（不做任何更改流数据信息的状态）的使用
 */
public class HighLevelTransation {


        public static void main(String[] args) {
          //创建properties对象配置kafka集群以及相关
            Properties properties = new Properties();
            properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG,"node1:9092,node2:9092,node3:9092");
            properties.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass());
            properties.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG,Serdes.String().getClass());
            properties.put(StreamsConfig.APPLICATION_ID_CONFIG,"word-dsl");
            properties.put(StreamsConfig.NUM_STREAM_THREADS_CONFIG,3);
            

            //创建StreamBuild对象，可以获取kafkastreaming的topology任务
            StreamsBuilder streamsBuilder = new StreamsBuilder();

            
            //从消费主题进行获取record中的数据
            KStream<String, String> kStream= streamsBuilder.stream("t9");
            
            //转换因子的分流(branch)转换算子 由kStream->kStream[]
            //前二个条件不成立，走第三个条件
            KStream<String, String>[] branch = kStream.branch((k, v) -> v.startsWith("A"), (k, v) -> v.startsWith("B"), (k, v) -> true);
            branch[0].foreach((k,v)-> System.out.println(k+"\t"+v));


           //转换因子的过滤(filter)转换算子,通过boolean函数进行过滤掉不满足条件的数据
          //过滤掉所有不包含hello开头的数据
          //  kStream.filter((k,v)->v.startsWith("hello")).foreach((k,v)-> System.out.println(k+"\t"+v));

         //转换因子的反转过滤(filterNot)转换算子,通过boolean函数进行保存不满足条件的数据
           // kStream.filterNot((k,v)->v.startsWith("hello")).foreach((k,v)-> System.out.println(k+"\t"+v));


           //转换因子的展开(flatMap)转换算子,展开获取多个record.转化为0到多个record
//            kStream.flatMap((k,v)->{
//                ArrayList<KeyValue<String,String>>  list= new ArrayList<>();
//                String[] words = v.split(" ");
//                for (String word : words) {
//                    KeyValue<String, String> keyValue = new KeyValue<>(k,word);
//                    list.add(keyValue);
//                }
//                return list;
//            });

            //转换因子的展开(flatMapValues)转换算子,展开获取多个record.转化为0到多个record,与flatMap不同的是专注value的更改
            kStream.flatMapValues((v)-> Arrays.asList(v.split(" "))).foreach((k,v)-> System.out.println(k+"\t"+v));


//            //转换因子的foreach转换算子,终止操作可以在此方法打印数据
//
//            //groupByKey | groupBy
//                        kStream.flatMap((k,v)->{
//                ArrayList<KeyValue<String,String>>  list= new ArrayList<>();
//                String[] words = v.split(" ");
//                for (String word : words) {
//                    KeyValue<String, String> keyValue = new KeyValue<>(word,word);
//                    list.add(keyValue);
//                }
//                return list;
//            }).groupByKey().count().toStream().print(Printed.toSysOut());

            //map | mapValues 映射方法
            //kStream.map((k,v)->new KeyValue<String,Long>(k,(long)v.length())).foreach((k,v)-> System.out.println(k+""+v));

            //peek：探针流的输入与输出保持一致，不会对流进行破坏，通常用于测试查看流数据的信息
            //kStream.peek((k,v)-> System.out.println(k+"\t"+v));


            //selectKey,将key映射一个新的key，value的值保持不变,指定key为hello
            kStream.selectKey((k,v)->"hello").peek((k,v)-> System.out.println(k+"\t"+v));


            //获取topology任务对象
            Topology topology = streamsBuilder.build();

            //创建kafkaStream对象启动任务
            KafkaStreams kafkaStreams = new KafkaStreams(topology,properties);

            //开启任务
            kafkaStreams.start();


        }





}
