package util

import org.apache.spark.sql.types.{StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

import scala.collection.mutable.ArrayBuffer

/**
  * Created by 6v on 2018/11/6.
  */
object SparkReadUtil {


  def readFromFile(spark: SparkSession, path: String, split_line: Char, split_field: Char, column_input: Seq[String], column_output: Seq[String]): DataFrame = {
    val rdd = spark.read.textFile(path).rdd.flatMap(_.split(split_line)).map(row => {
      val values = row.split(split_field).toSeq
      if (values.length >= column_input.length) {
        Row.fromSeq(values)
      } else {
        Row.fromSeq(new ArrayBuffer())
      }
    }).filter(_.size > 0)
    val schema = StructType(column_input.map(fieldName => StructField(fieldName, StringType)))
    var data = spark.createDataFrame(rdd, schema)
    data = data.select(column_output.head, column_output.tail: _*)
    data
  }

  def readFromFileTest(spark: SparkSession, path: String, split_line: Char, split_field: Char, column_input: Seq[String], column_output: Seq[String]): DataFrame = {
    val rdd = spark.read.textFile(path).rdd.flatMap(line => {

      line.split(split_line)
    }).map(row => {

      val values = row.split(split_field).toSeq
      //      println("==="+values.length)
      if (values.length >= column_input.length) {
        Row.fromSeq(values)
      } else {
        Row.fromSeq(new ArrayBuffer())
      }
    }).filter(_.size > 0)
    val schema = StructType(column_input.map(fieldName => StructField(fieldName, StringType)))
    var data = spark.createDataFrame(rdd, schema)
    data = data.select(column_output.head, column_output.tail: _*)
    data
  }

  def readFromFileZLog(spark: SparkSession, path: String, split_line: Char, split_field: Char, column_input: Seq[String], column_output: Seq[String]): DataFrame = {
    val rdd = spark.read.textFile(path).rdd.flatMap(line => {
      line.split(split_line)
    }).map(row => {
      var values = row.split(split_field).toSeq
      if (values.length >= column_input.length) {
        values = values.indices.map(index => {
          var value = values(index)
          var column = column_input(index)
          index match{
            case 0=>
              val i = value.indexOf(column + ":")
              value = value.substring(i)
            case _ =>
          }
          val res = value.replace(column+":","")
          res
        })
        Row.fromSeq(values)
      } else {
        Row.fromSeq(new ArrayBuffer())
      }
    }).filter(_.size > 0)
    val schema = StructType(column_input.map(fieldName => StructField(fieldName, StringType)))
    var data = spark.createDataFrame(rdd, schema)
    data = data.select(column_output.head, column_output.tail: _*)
    data
  }

}
