package com.burges.net.dataSet.api.output

import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapred.{JobConf, TextOutputFormat}
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat

/**
  * 创建人    BurgessLee 
  * 创建时间   2020/2/11 
  * 描述     数据输出
  */
object OutputDemo {

	def main(args: Array[String]): Unit = {
		val environment = ExecutionEnvironment.getExecutionEnvironment
		val dataset: DataSet[(String, Int, Double)] = environment.fromElements(("a",1,152.1),("a",1,152.1),("a",1,152.1))

		// 将DataSet数据输出到本地文件系统
		dataset.writeAsText("file:///my/result/on/localFS")
		// 将DataSet数据输出到HDFS文件系统
		dataset.writeAsText("hdfs://nnHost:nnPort/my/result/on/localFS")

		// 将数据集输出为CSV文件，指定行切割符为 \n ，列切割符为 ,
		dataset.writeAsCsv("file://path/file", "\n", ",")

		/**
		  * 通用输出接口
		  */
		val ds: DataSet[(String, Int)] = environment.fromElements(("a",1),("a",1),("a",1))
		// 将数据集的格式转换成[Text,LongWritable]
		val words: DataSet[(Text, LongWritable)] = ds.map(t => (new Text(t._1), new LongWritable(t._2)))
		//  定义HadoopOutputFormat
//		var hadoopOutputFormat = new HadoopOutputFormat[Text, LongWritable](
//			new TextOutputFormat[Text, LongWritable],
//			new JobConf()
//		)
		// 指定输出路径
//		FileOutputFormat.setOutputPath(hadoopOutputFormat.getJobConf, new Path(resultPath))
		// 调用Output方法将数据写入Hadoop文件系统
//		words.output(hadoopOutputFormat)


	}

}
