package com.chenzhiling.study.datasource.LocalFile

import com.chenzhiling.study.datasource.FileSchema
import com.chenzhiling.study.util.FileUtil
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.{Partition, SparkContext, TaskContext}

import java.io.{File, FileInputStream}

/**
 * @Author: CHEN ZHI LING
 * @Date: 2021/8/30
 * @Description: 将一个文件按照schema转换成Rdd
 */
case class LocalFilePartition(index: Int,path:String) extends Partition
class LocalFileRdd(sc:SparkContext,path:String) extends RDD[Row](sc, Nil) with Serializable{

  var file:Array[File] = _
  override def compute(split: Partition, context: TaskContext): Iterator[Row] = {
    val check = new File(path)
    if (check.isFile){
      file = Array(check)
    }else{
      file = FileUtil.listDirectory(check)
    }
    val rows: Array[Row] = file.flatMap((file: File) => fileToRow(file))
    rows.toIterator
  }

  override protected def getPartitions: Array[Partition] = {
    FileSchema.getPartition(path)
  }



  private def fileToRow(file: File):Iterator[Row]={
    val absolutePath: String = file.getAbsolutePath
    val stream = new FileInputStream(file)
    val fileName: String = file.getName
    val length: Long = file.length()
    val suffix: String = FileUtil.getFileSuffix(fileName)
    val iterator: Iterator[Array[Byte]] = FileUtil.splitRemoteStream(stream, length)
    val rows: Iterator[Row] = iterator.map((array: Array[Byte]) => FileSchema.filledSchema(fileName, absolutePath, suffix, length, array))
    rows
  }
}
