package com.burning.demo.flink.DataStreamAPI.demo1;

import org.apache.flink.api.common.JobExecutionResult;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.io.FilePathFilter;
import org.apache.flink.api.java.io.TextInputFormat;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.typeutils.TypeExtractor;
import org.apache.flink.core.fs.FileSystem.WriteMode;
import org.apache.flink.core.fs.Path;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.IterativeStream;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.FileProcessingMode;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

//import org.apache.flink.api.common.RuntimeExecutionMode;

public class BatchModeDemo {
    /**
     * 样例1
     * 
     * @param  args
     * @throws Exception
     */
    public static void main1(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// readTextFile(path)- 读取文本文件，即尊重 TextInputFormat规范，逐行并将它们作为字符串返回。
	DataStream<String> text = env.readTextFile("file:///D:\\data\\a_b_c_d.txt");
	SingleOutputStreamOperator singleOutputStreamOperator = text.map(new MapFunction<String, String>() {
	    @Override
	    public String map(String value) {
		return value + "-test";
	    }
	});
	// flatMap(new Splitter())
	// .keyBy(value -> value.f0)
	// .window(TumblingProcessingTimeWindows.of(Time.seconds(5L))).sum(1);
	// env.readTextFile("file:///D:/data/a_b_c_d.txt").print();
	// singleOutputStreamOperator.print();
	singleOutputStreamOperator.writeAsText("D:\\data\\res.txt");
	env.execute("Window WordCount");
    }

    /**
     * 数据源-基于文件
     * 
     * @param  args
     * @throws Exception
     */
    public static void main2(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// readTextFile(path)- 读取文本文件，即尊重 TextInputFormat规范，逐行并将它们作为字符串返回。
	String filePath = "file:///D:\\data\\";
	// 创建TextInputFormat
	TextInputFormat textInputFormat = new TextInputFormat(null);
	// 可以过滤文件
	textInputFormat.setFilesFilter(new FilePathFilter() {
	    @Override
	    public boolean filterPath(Path filePath) {
		return filePath.getName().startsWith("f");// 过滤掉2开头的文件
	    }
	});
	// readFile(fileInputFormat, path)- 按照指定的文件输入格式读取（一次）文件。
	env.readFile(textInputFormat, filePath).print();
	// -----------------
	// readFile(fileInputFormat, path, watchType, interval, pathFilter, typeInfo)- 这是前两个内部调用的方法。 它读取文件 path基于给定的 fileInputFormat. 根据提供的
	// watchType，此源可能会定期监视（每个 intervalms 毫秒) 新数据的路径 ( FileProcessingMode.PROCESS_CONTINUOUSLY)，或处理当前路径中的数据并退出 (
	// FileProcessingMode.PROCESS_ONCE）。 使用 pathFilter，用户可以进一步排除正在处理的文件。
	// 重要笔记：
	// 如果 watchType设定为 FileProcessingMode.PROCESS_CONTINUOUSLY, 当一个文件被修改时，它的内容将被完全重新处理。 这可能会破坏“恰好一次”语义，因为在文件末尾附加数据将导致其 所有 内容都被重新处理。
	// 如果 watchType设定为 FileProcessingMode.PROCESS_ONCE, source 扫描 路径 一次 并退出，无需等待 reader 完成读取文件内容。 当然，读者会继续阅读，直到所有文件内容都读完。
	// 关闭源会导致在那之后不再有检查点。 这可能会导致节点故障后恢复速度变慢，因为作业将从最后一个检查点恢复读取。
	// env.readFile(textInputFormat, filePath, FileProcessingMode.PROCESS_CONTINUOUSLY, 1000).print(); // textInputFormat里的过江条件没启作用
	// env.readFile(textInputFormat, filePath, FileProcessingMode.PROCESS_CONTINUOUSLY, 1000, new FilePathFilter() {
	// @Override
	// public boolean filterPath(Path filePath) {
	// return filePath.getName().startsWith("f");// 过滤掉2开头的文件
	// }
	// }).print();
	env.readFile(textInputFormat, filePath, FileProcessingMode.PROCESS_CONTINUOUSLY, 1000L, TypeExtractor.getInputFormatTypes(textInputFormat)).print();
	env.execute("Window WordCount");
    }

    /**
     * 基于集合
     * 
     * @param  args
     * @throws Exception
     */
    public static void main3(String[] args) throws Exception {
	List<Integer> list = new ArrayList<Integer>();
	list = Arrays.asList(1, 2, 3, 4, 5, 6, 7, 8, 9);
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	// env.fromCollection(list).print();
	env.fromElements("q", "b", "c").print();
	env.generateSequence(1, 100).print("pp - ");
	//	NumberSequenceIterator pitertor = new NumberSequenceIterator(0, 100);
	//	env.fromParallelCollection(pitertor, Integer.class).print();
	// -----------sinks-----------
	// env.generateSequence(1, 100).writeAsText("d:\\data\\res1");
	List<Tuple2> tuplesList = new ArrayList();
	tuplesList.add(new Tuple2("a", 1));
	tuplesList.add(new Tuple2("a", 2));
	tuplesList.add(new Tuple2("a", 3));
	env.fromCollection(tuplesList).writeAsCsv("d:\\data\\csv", WriteMode.OVERWRITE, "\r\n", ",");
	env.fromCollection(tuplesList).writeAsCsv("d:\\data\\csv", WriteMode.OVERWRITE, "\r\n", ",");
	env.execute("Window WordCount");
    }

    /**
     * 基于集合
     * 
     * @param  args
     * @throws Exception
     */
    public static int i = 0;

    public static void main(String[] args) throws Exception {
	StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
	env.setBufferTimeout(100);// 此超时的默认值为 100 毫秒。 
	//env.setRuntimeMode(RuntimeExecutionMode.BATCH);

	List<Tuple2> tuplesList = new ArrayList();
	tuplesList.add(new Tuple2("a", 1));
	tuplesList.add(new Tuple2("a", 2));
	tuplesList.add(new Tuple2("a", 3));
	//	env.fromCollection(tuplesList).addSink(JdbcSink.sink("insert into websites (id, name, url, alexa, country) values (?,?,?,?,?)", (ps, t) -> {
	//	    ps.setInt(1, (int) t.f1);
	//	    ps.setString(2, (String) t.f0);
	//	    ps.setString(3, (String) t.f0);
	//	    ps.setInt(4, (int) t.f1 + 1000);
	//	    ps.setString(5, (String) t.f0);
	//	}, new JdbcConnectionOptions.JdbcConnectionOptionsBuilder().withDriverName("com.mysql.cj.jdbc.Driver")
	//		.withUrl("jdbc:mysql://localhost:3309/test?serverTimezone=UTC").withUsername("root").withPassword("root").build()));

	//	env.generateSequence(1, 100).print("pp - ");
	// 测试迭代器
	DataStream<Long> someIntegers = env.generateSequence(0, 10);
	IterativeStream<Long> iteration = someIntegers.iterate();

	DataStream<Long> minusOne = iteration.map(new MapFunction<Long, Long>() {
	    @Override
	    public Long map(Long value) throws Exception {
		//System.out.println("-- index = " + (++i) + "\t" + value);
		return value - 5;
	    }
	});
	DataStream<Long> stillGreaterThanZero = minusOne.filter(new FilterFunction<Long>() {
	    @Override
	    public boolean filter(Long value) throws Exception {
		return (value > 0);
	    }
	});

	iteration.closeWith(stillGreaterThanZero);

	DataStream<Long> lessThanZero = minusOne.filter(new FilterFunction<Long>() {
	    @Override
	    public boolean filter(Long value) throws Exception {
		return (value <= 0);
	    }
	});
	lessThanZero.print("res");
	final JobExecutionResult jobExecutionResult = env.execute("Window WordCount");
	jobExecutionResult.toString();
    }
}
