package day02;

import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SerializationSchema;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.RichSinkFunction;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;

import java.io.File;
import java.io.FileOutputStream;
import java.io.PrintWriter;
import java.io.Writer;

/**
 * sink到HDFS
 */
public class TestSink {
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.createLocalEnvironmentWithWebUI(new Configuration());
        DataStreamSource<String> ds = env.socketTextStream("192.168.21.160", 6667);
        ds.map(new MapFunction<String, Tuple2<String,String>>() {
            @Override
            public Tuple2<String, String> map(String value) throws Exception {
                String[] arr = value.split(" ");
                return Tuple2.of(arr[0],arr[1]);
            }//已经没有下游了 直接在invoke定义写入的sink端
        }).setParallelism(1).addSink(new RichSinkFunction<Tuple2<String, String>>() {
            //打开一个文件 可以使用FileOutputStream
            private FileSystem fs=null;
            private static final String url="/user/wangy33/flink/test_sink.txt";
            private FSDataOutputStream os =null;
            private PrintWriter pw =null;

            @Override//初始化hdfs文件链接
            public void open(Configuration parameters) throws Exception {
                fs=FileSystem.get(new org.apache.hadoop.conf.Configuration());
                Path path=new Path(url);

                if(fs.exists(path)){
                    os=fs.append(path);
                }else {
                    os=fs.create(path);
                }
                pw = new PrintWriter(os,true);//true 写一次刷新一次
            }

            @Override
            public void invoke(Tuple2<String, String> value, Context context) throws Exception {
                if(value!=null){
                    pw.println(value.f0+" "+value.f1);
                }
            }

            @Override
            public void close() throws Exception {
                pw.close();
                os.close();
                fs.close();
            }
        });



                //.printToErr();//打印红色

//                .writeToSocket("192.168.21.160", 6667, new SerializationSchema<Tuple2<String, String>>() {
//            @Override
//            public byte[] serialize(Tuple2<String, String> element) {
//               return (element.f0+" "+element.f1+"\n").getBytes();
//            }
//        });

        //
        // ds.writeToSocket("192.168.21.160",6667,new SimpleStringSchema());

                //.writeAsText("data/testres");


                //.writeAsCsv("data/csvres");

        env.execute();
    }
}
