package chapter03
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkConf, SparkContext}
object Test02_FileOperator {
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME","root")
    //先创建环境
    val conf = new SparkConf().setMaster("local[*]").setAppName("FileOperator")
    val sc = new SparkContext(conf)
    //读取本地文件
    val value = sc.textFile("input/word.txt")
    println(value.collect().toList)
    //判断文件是否存在
    val system = new Path("hdfs://192.168.100.131:9000")
      .getFileSystem(new Configuration())
    val bool = system.exists(new Path("hdfs://192.168.100.131:9000/word.txt"))
    println(bool)
    //读取hadoop上的文件
    val value1 = sc.textFile("hdfs://192.168.100.131:9000/word.txt")
    println(value1.collect().mkString("Array(", ", ", ")"))
    //下载Hadoop文件到本地
    system.copyToLocalFile(new Path("hdfs://192.168.100.131:9000/word.txt"),
      new Path("input/ts.txt"))
    //文件的上传
    system.copyFromLocalFile(new Path("input/wordlocal.txt"),
      new Path("hdfs://192.168.100.131:9000/"))
  }
}
