package com.cjy

import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.table.descriptors.FileSystem

/**
 *
 * @author cjy
 * @create 2020-07-22
 */
object FraudDetectionJob {
  def main(args: Array[String]): Unit = {
    val env: ExecutionEnvironment = ExecutionEnvironment.getExecutionEnvironment


//    val transactions = env.addSource(new TransactionSource()).name("transactions")

    import org.apache.flink.api.scala._
    val lineDataSet: DataSet[String] = env.fromElements("The Apache Flink community released the third bugfix version of the Apache Flink 1.8 series.Running Apache Flink on Kubernetes with KUDO A common use case for Apache Flink is streaming data analytics together with Apache Kafka, which provides a pub/sub model and durability for data streams.","In this post, we demonstrate how to orchestrate Flink and Kafka with KUDO")

    val wordDataSet= lineDataSet.flatMap(_.split(" ",-1))

    val value = wordDataSet.map((_, 1)).groupBy(0).sum(1)
   value.print()
    value.setParallelism(1)

    value.writeAsText("hdfs://node01:9000/workcount/output7")
    env.execute()// 触发执行
//    wordDataSet.print()
  }

}
