package com.hadwinling.alogriithm.projectforpso.onepso

import org.apache.spark.SparkConf
import org.apache.spark.sql.types.{DoubleType, StringType, StructField, StructType}
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}

object Test {
  val conf = new SparkConf().setAppName("SparkPSO").setMaster("local")
  val spark: SparkSession = SparkSession.builder().config(conf).getOrCreate()
  var sc = spark.sparkContext
  def main(args: Array[String]): Unit = {
    sss()

  }
  def sss()={

    val myManualSchema2: StructType = StructType(Array(
      StructField("user1_id", StringType, true),
      StructField("user2_id", StringType, true),
    ))
    val testDF: DataFrame = spark.read.format("csv")
      .schema(myManualSchema2)
      .option("header", "true")
      .load("file:///home/hadoop/Downloads/psoInput/test.csv")

    //    val incorrect: Dataset[Row] = testDF.except(predictDS)
    testDF.show()
    val myManualSchema: StructType = StructType(Array(
      StructField("user1_id", StringType, true),
      StructField("user2_id", StringType, true),
      StructField("J", DoubleType, true),
      StructField("A", DoubleType, true),
      StructField("C", DoubleType, true),
      StructField("P", DoubleType, true),
      StructField("K", DoubleType, true),
    ))
    val matrixDF: DataFrame = spark.read.format("csv")
      .schema(myManualSchema)
      .option("header", "true")
      .load("file:///home/hadoop/Downloads/psoInput/matrix.csv")
    matrixDF.show()
  }
}
