package demo.spark.stream.format

import java.io.{InputStream}

import org.apache.hadoop.fs.{FSDataInputStream, Path}
import org.apache.hadoop.mapreduce.{InputSplit, RecordReader, TaskAttemptContext}
import org.apache.hadoop.mapreduce.lib.input.FileSplit

/**
 * 该类的设计目的意在于读取文件的变更内容，
 * 透出本次读取后文件的偏移量，offset，文件path
 * 以及本次读取到的文件内容 字节数组.
 */
class FileOffSetBytesRecordReader extends RecordReader[PathAndOffsetWritable,LengthUnfixedBytesWritable]{
  private var filePath: Path = null;
  private var splitStart: Long = 0L
  private var splitEnd: Long = 0L
  private var currentPosition: Long = 0L
  private var maxSegmentLength: Int = 0
  private var fileInputStream: FSDataInputStream = null
  private var internalStream: InputStream = null
  private var recordKey: PathAndOffsetWritable = null
  private var recordValue: LengthUnfixedBytesWritable = null

  override def initialize(split: InputSplit, context: TaskAttemptContext): Unit = {
    val fileSplit = split.asInstanceOf[FileSplit]
    // 1. 文件系统目录
    filePath = fileSplit.getPath
    // 2. 记录文件文件变更切片起始位置 offset
    splitStart = fileSplit.getStart
    // 3. 当前文件文件文件变更切片本次变更的末端 offset.
    splitEnd = splitStart + fileSplit.getLength
    // 4. 从文件系统开启文件流
    val conf = context.getConfiguration
    val file = fileSplit.getPath
    val fs = file.getFileSystem(conf)
    fileInputStream = fs.open(file)
    internalStream = fileInputStream.getWrappedStream
    // 5. 从配置获取每次尝试读取的字节长度
    maxSegmentLength = conf.get(FileOffsetBytesFormat.MAX_SEGMENT_LENGTH_PROPERTY).toInt
    // 6. 移动文件读取指针到文件变更切片的起始位置 offset
    fileInputStream.seek(splitStart)
    // 7. 初始化当前文件读取位置文件变更切片的起始位置 offset
    currentPosition = splitStart;
    // 8. 初始化 recordKey, 指向文件变更切片的起始位置 offset，在spark读取时调用 nextKeyValue 变更 recordKey指向 offset.
    if(recordKey == null){
      recordKey = new PathAndOffsetWritable(currentPosition, filePath);
    }
    // 9. 初始化 recordValue, 默认0字，在spark读取时调用 nextKeyValue 变更 recordValue 本次读取的实际内容.
    if (recordValue == null) {
      recordValue = new LengthUnfixedBytesWritable()
    }
  }

  override def getCurrentKey: PathAndOffsetWritable = recordKey
  override def getCurrentValue: LengthUnfixedBytesWritable = recordValue

  override def nextKeyValue(): Boolean = {
    // read a record if the currentPosition is less than the split end
    if (currentPosition < splitEnd) {
      // 读取分段长度内尽可能多的效地字节(除了当前seek position文件指针指向了EOF).
      var maxValidBytesReadableInSegment: Array[Byte] = Stream.iterate[Byte](0.toByte)(left => {
          val read: Int = fileInputStream.read()
          if(read < 0) Int.box(-1).toByte // return -1, has EOF.
          else read.toByte
        })
        .drop(1)  // Header is inValid.
        .takeWhile((item: Byte) => item != -1.toByte)
        .take(maxSegmentLength)
        .toList
        .toArray[Byte]

      // 移动变更切片读取到的当前位置.
      currentPosition = currentPosition + maxValidBytesReadableInSegment.length
      // 更新 recordKey.
      recordKey.set(currentPosition)
      // 更新 recordVal.
      recordValue.updateBytes(maxValidBytesReadableInSegment)
      return true
    }
    false
  }


  /**
   * 该方法会返回一个文件变更内容分段读取的比率，选择是否继续进行分段读取.
   * @return [0.0, 1.0]
   */
  override def getProgress: Float = {
    splitStart match {
        // 当读取文件变更切片起始位置等于文件变更切面末尾位置，表示本次没有变更~ 或者在监控周期内删除了变更项，
        // 此时不在继续读取，继续读取的比率为 0.0
      case x if x == splitEnd => 0.0.toFloat
        // 计算当前已经分段读取过的文件变更分片比率,
        // 当读取完成时 currentPosition = splitEnd, 此时继续读取比率为 0.0, 否则将继续读取.
      case _ => Math.min(
        ((currentPosition - splitStart) / (splitEnd - splitStart)).toFloat, 1.0
      ).toFloat
    }
  }

  override def close(): Unit = {
    if(fileInputStream != null){
      fileInputStream.close();
    }
  }

}
