package com.weic.flink.dataset.source

import org.apache.flink.api.scala._

/**
 * @Auther:BigData-weic
 * @ClassName:Demo03CsvSourceOps
 * @Date:2020/12/16 22:34
 * @功能描述: $FunctionDescription
 * @Version:1.0
 */
object Demo03CsvSourceOps {
	def main(args: Array[String]): Unit = {

		val env = ExecutionEnvironment.getExecutionEnvironment
		//读取csv文件
		val lines = env.readCsvFile[Student](
			filePath = "file:\\F:\\datas\\spark\\sql\\student.csv",
			ignoreFirstLine = true,
			fieldDelimiter = "|"
		)
		val ret: DataSet[Result] = lines.map(
			stu => Result(stu.id, stu.name, stu.course,stu.score)
		).groupBy(result => result.name)
			.maxBy(2)
		ret.print()
	}
}

//id|name|age|gender|course|score
case class Student(id: Int, name: String, age: Int, gender: String, course: String, score: Double)

case class Result(id: Int, name: String,course:String ,score: Double)
