package com.xx.sparkdemo

import com.github.javafaker.Faker
import org.apache.commons.codec.digest.DigestUtils
import org.apache.spark.sql.SparkSession

/**
 *
 * @author tzp
 * @since 2021/8/12
 */
object FakerDataToHdfs {

  case class Student(id: Int, name: String, grade: Int, age: Int)

  def main(args: Array[String]): Unit = {
    val spark = SparkSession.builder()
      .master("yarn")
      .appName("Foo")
      .config("spark.jars", "target/imeigenerator-1.0-SNAPSHOT.jar")
      .getOrCreate()
    val sc = spark.sparkContext
    import spark.implicits._


    val df = sc.parallelize(1 to 10)
    val students = df.repartition(10).mapPartitions(
      it => {
        val faker = new Faker
        val x = for {
          j <- it
          i <- (1 to 20).toIterator
        } yield {
          Student(j * 1000 + i,
            faker.name().firstName(),
            j, faker.number().numberBetween(10 + j, 20 + j))
        }
        x
      }).toDF()
    print(students.count())
  }

}
