package com.sdehualu.utils

import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.{SparkConf, SparkContext}
import ucar.ma2.ArrayFloat
import org.apache.spark.sql.types._

object sparkhdfsnctest {

  def main(args: Array[String]): Unit = {
    //创建SparkConf()并且设置App的名称
    val spark = SparkSession
      .builder()
      .appName("SparkSessionT")
      .master("local[2]")
      .getOrCreate()
    val sc = spark.sparkContext

    val data = List(1, 2, 3)
    val nameRDD = sc.parallelize(data, 3)

    nameRDD.foreach(println)
    var varName: List[String] = Nil

    //    val r2 = nameRDD.map(fileName => {
    //
    //      var k = fileName.toString + "good"
    //
    //      var v = List("Runoob", "Google", "Baidu")
    //      (k, v)
    //
    //    })
    //    r2.foreach(println)
    //
    //
    //    val r3 = nameRDD.map(fileName => {
    //      val ncf = NetCDFUtils.loadDFSNetCDFDataSet("hdfs://192.168.22.9:8020", "/par0/data.nc", 4000)
    //
    //      var k3 = ncf.getVariables
    //      var v3 = "jdf"
    //      (k3, v3)
    //
    //
    //    })
    //    r3.foreach(println)

    //

    val r4 = nameRDD.map(fileName => {
      val ncf = NetCDFUtils.loadDFSNetCDFDataSet("hdfs://192.168.22.9:8020",
        "/par0/B.E.13.B1850C5.ne120_t12.sehires38.003.sunway_02.rtm.h0.0022-02.nc",
        4000, false)
      val UVar = ncf.findVariable("QCHANR")


      //      println(fileName.getClass.getSimpleName)

      // ArrayString.D3
      // ArrayFloat.D1 latArray;
      //      var k4 = fileName
      //      System.out.println(fileName.toString)
      //      System.out.println(k4)


      val l = UVar.read.asInstanceOf[ArrayFloat.D3].get(0, fileName, 0)

      //      System.out.println(l)


      //      println(v4)

      Row(fileName.toString, l.toString)

      //      (fileName, l)


    })
    //    r4.foreach(println)
    //
    val structFields = Array(StructField("name", StringType, true), StructField("name2", StringType, true))
    val structType = StructType(structFields)
    val df = spark.createDataFrame(r4, structType)
    df.show
    //    df.write.csv("./letaw")
    df.repartition(1).write
      .option("header", "true")
      .csv("./letaw")




    //使用sc创建rdd,并且执行相应的transformation和action
    //        sc.textFile("D:/jvm/sparkNC/src/test/scala/wc.txt")
    //          .flatMap(_.split(" ")).foreach(println)
    //          .map((_, 1))

    //          .reduceByKey(_ + _, 1).sortBy(_._2, false).saveAsTextFile(args(1));
    //停止sc，结束该任务
    sc.stop();
  }
}