package com.fwmagic.spark.core.cases

import java.io.{BufferedReader, InputStreamReader}
import java.net.{URI, URL}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, FSDataOutputStream, FileSystem, Path}
import scala.collection.mutable.ArrayBuffer

object HdfsFileUtils {

    var buffer: ArrayBuffer[(Integer, String, Integer)] = new ArrayBuffer[(Integer, String, Integer)]()

    //代码块，只会随着类的加载而加载一次
    {
        val fs: FileSystem = FileSystem.get(URI.create("hdfs://hd1:9000"), new Configuration())
        val inputStream: FSDataInputStream = fs.open(new Path("/tmp/user.txt"))
        val bf = new BufferedReader(new InputStreamReader(inputStream))
        var line: String = null
        do {
            line = bf.readLine()
            if (line != null) {
                val fields: Array[String] = line.split(",")
                val id: Integer = Integer.valueOf(fields(0))
                val name: String = String.valueOf(fields(1))
                val age: Integer = Integer.valueOf(fields(2))
                val t = (id, name, age)
                buffer += t
            }
        } while (line != null)

        //关流
        bf.close()
        inputStream.close()
        fs.close()
    }

    def getBuffer(): ArrayBuffer[(Integer, String, Integer)] = {
        buffer
    }

    def main(args: Array[String]): Unit = {
        val buffer: ArrayBuffer[(Integer, String, Integer)] = getBuffer()
        println(buffer)
    }

}
