import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.{DecimalType, IntegerType}
import org.apache.spark.sql.{SaveMode, SparkSession}

object movie_ods {

  case class Line(json: String)

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setAppName("movie_ods")
      .setMaster("local[*]")

    val spark = SparkSession
      .builder()
      .config(conf)
      .enableHiveSupport()
      .getOrCreate()

    val sc = spark.sparkContext

    import spark.implicits._
    import org.apache.spark.sql.functions._

    val rddLines: RDD[Line] = sc
        .textFile("hdfs://single01:9000/tmp/douban", 4)
        .mapPartitions(_.map(line => Line(line)))

    spark.createDataFrame(rddLines)
      .select(
        json_tuple($"json", "名称", "导演", "编剧", "主演", "类型", "制片国家/地区", "语言", "上映日期", "片长", "又名", "IMDb", "评分", "评价人数")
          .as(Seq("name", "directors", "writers", "actors", "genres", "production_countries", "languages", "release_date", "runtime", "alternative_titles", "imdb_id", "rating", "rating_count"))
      )
      .select(
        $"name",
        split($"directors", " / ").as("directors"),
        split($"writers", " / ").as("writers"),
        split($"actors", " / ").as("actors"),
        split($"genres", " / ").as("genres"),
        split($"production_countries", " / ").as("production_countries"),
        split($"languages", " / ").as("languages"),
        split($"release_date", " / ").as("release_date"),
        split($"runtime", " / ").as("runtime"),
        split($"alternative_titles", " / ").as("alternative_titles"),
        $"imdb_id",
        $"rating".cast(DecimalType(2, 1)),
        $"rating_count".cast(IntegerType)
      )
      .repartition(1)
      .write
      .format("orc")
      .mode(SaveMode.Overwrite)
      .saveAsTable("movies.movie_ods")

    spark.stop()
  }
}
