package com.zc.study

import java.nio.charset.Charset

import org.apache.commons.lang3.CharSet
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.common.typeutils.CompositeType
import org.apache.flink.api.java.io.{CsvInputFormat, PojoCsvInputFormat, TextInputFormat, TextOutputFormat, TupleCsvInputFormat}
import org.apache.flink.api.java.operators.DataSource
import org.apache.flink.api.java.typeutils.{PojoTypeInfo, TupleTypeInfoBase}
import org.apache.flink.api.scala.{DataSet, ExecutionEnvironment, getCallLocationName, wrap}
import org.apache.flink.configuration.Configuration
import org.apache.flink.core.fs.Path
import org.apache.flink.util.Preconditions

import scala.reflect.ClassTag

object DataSetDataSource {
  val fileBasePath = "D:\\gitee\\imooc_study\\test-data\\flink\\"

  def main(args: Array[String]): Unit = {
    import org.apache.flink.api.scala._
    val executionEnvironment = ExecutionEnvironment.createLocalEnvironment();

    //    readTextFile(executionEnvironment,fileBasePath+"a1") // 读取单个文件

    //    readTextFile(executionEnvironment, fileBasePath) // 读取路径

    //    readRecursionPath(executionEnvironment, fileBasePath) // 读取路径遍历

    val csvData = readCsvFile(executionEnvironment, fileBasePath) // 读取csv文件 TODO 中文的时候有乱码
    csvData.map(x=>{
      val bytes = x.toString.getBytes(Charset.forName("utf-8"))
      new String(bytes)
    }).print()

    //    executionEnvironment.readTextFile(fileBasePath+"a1.gz").print()

    //    executionEnvironment.fromCollection(1 to 10).print()

  }

  def readTextFile(executionEnvironment: ExecutionEnvironment, filePath: String) = {
    executionEnvironment.readTextFile(filePath).print()
  }

  def readRecursionPath(executionEnvironment: ExecutionEnvironment, filePath: String) = {
    val parameters = new Configuration()
    parameters.setBoolean("recursive.file.enumeration", true)
    executionEnvironment.readTextFile(filePath).withParameters(parameters).print()
  }

  def readCsvFile(executionEnvironment: ExecutionEnvironment, filePath: String) = {
    import org.apache.flink.api.scala._
    //    executionEnvironment
    //      .readCsvFile[(String,String,Double)](filePath+"11.csv",
    //        ignoreFirstLine = true)
    //      .print()

    //    executionEnvironment
    //      .readCsvFile[(String,Double)](filePath+"11.csv",
    //        ignoreFirstLine = true, includedFields = Array(0, 2))
    //      .print()


    //    case class MyCaseClass1(name: String, sary: Double)
    //    executionEnvironment.readCsvFile[MyCaseClass1](filePath+"11.csv",
    //      ignoreFirstLine = true, includedFields = Array(0, 2))
    //      .print()

    case class MyCaseClass2(name: String, work: String, sary: Double){
      override def toString: String = name+"-"+work+"-"+sary
    }


    executionEnvironment.readCsvFile[MyCaseClass2](filePath + "11.csv",
      ignoreFirstLine = true)


  }

}


