package com.lmq.sparkConcept

import org.apache.log4j.{Level, Logger}
import org.apache.parquet.format.IntType
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}


object sparkConp {

  Logger.getLogger("org.apache.spark")
    .setLevel(Level.WARN)

  val spConfig = (new SparkConf).setMaster("local").setAppName("SparkApp")
  val spark = SparkSession
    .builder()
    .appName("SparkUserData").config(spConfig)
    .getOrCreate()

  def createDf(): Unit = {
    val user_df: DataFrame = spark.read.format("com.databricks.spark.csv")
      .option("delimiter", "|").schema(StructType(
      Array(
        StructField("user_id", StringType, nullable = true),
        StructField("age", IntegerType, nullable = true),
        StructField("occupation ", StringType, nullable = true),
        StructField("zip_code", StringType, nullable = true)
      )
    ))
      .load("D:\\2019开题\\TGSRec-master\\TGSRec-master\\ML25M\\ml-100k\\u.user")
    val first: Row = user_df.first()
    println(first)
  }

  def createRdd(): Unit = {
    val collection = List("a", "b", "c", "d", "e")
    val rddFromCollection: RDD[String] = spark.sparkContext.parallelize(collection)
    rddFromCollection.foreach(println)

  }

  def createDF():Unit = {

    import spark.implicits._
    val df = spark.read.json("D:\\javaproject\\MLWithSpark\\data\\example.json")
    df.show(false)
    df.printSchema()

  }
  def main(args: Array[String]): Unit = {
    println(spark.sparkContext.appName)
    createDf()
    createRdd
    createDF
    spark.stop()

  }

}
