package com.o2o.cleaning.month.platform.ebusiness_plat.kuaishou

import java.sql.{ResultSet, ResultSetMetaData}

import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}

object addDataframeMember {

  val spark = SparkSession.builder()
    .appName("Kuaishou_webcast")
    .config("spark.debug.maxToStringFields", "2000")
    .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    .config("spark.sql.caseSensitive", "true")
    .master("local[*]")
    .getOrCreate()

  val sc = spark.sparkContext
  sc.hadoopConfiguration.set("fs.s3a.access.key", "GAO7EO9FWKPJ8WFCQDME")
  sc.hadoopConfiguration.set("fs.s3a.secret.key", "LZ0xaHBSYKHaJ9ECDbX9f7zin79UZkXfGoNapRPL")
  sc.hadoopConfiguration.set("fs.s3a.endpoint", "https://obs.cn-north-1.myhuaweicloud.com")
  sc.setLogLevel("ERROR")

  trait ResultSetMetaDataToSchema {
    def columnCount: Int

    def schema: StructType
  }

  implicit def wrapResultSetMetaData(rsmd: ResultSetMetaData) = {
    new ResultSetMetaDataToSchema {
      def columnCount = rsmd.getColumnCount

      def schema = {
        def tdCovert(tdDpeStr: String, precision: Int = 0, scale: Int = 0, className: String = ""): DataType = {
          tdDpeStr match {
            case "string" => StringType
            case _ => StringType
          }
        }

        def col2StructField(rsmd: ResultSetMetaData, i: Int): StructField = StructField(rsmd.getColumnName(i), tdCovert(rsmd.getColumnTypeName(i), rsmd.getPrecision(i), rsmd.getScale(i), rsmd.getColumnClassName(i)), rsmd.isNullable(i) match { case 1 => true case 0 => false }).withComment(rsmd.getColumnLabel(i))

        def rsmd2Schema(rsmd: ResultSetMetaData): StructType = (1 to columnCount).map(col2StructField(rsmd, _)).foldLeft(new StructType)((s: StructType, i: StructField) => s.add(i))

        rsmd2Schema(rsmd)
      }
    }
  }

  trait ResultSetToDF {
    def schema: StructType

    def DF: DataFrame
  }

  implicit def wrapResultSet(rs: ResultSet) = {
    def rsmd = rs.getMetaData

    def toList[T](retrieve: ResultSet => T): List[T] = Iterator.continually((rs.next(), rs)).takeWhile(_._1).map(r => r._2).map(retrieve).toList

    def rsContent2Row(rs: ResultSet): Row = Row.fromSeq(Array.tabulate[Object](rsmd.columnCount)(i => rs.getObject(i + 1)).toSeq)

    new ResultSetToDF {
      def schema = rsmd.schema

      def DF = spark.createDataFrame(sc.parallelize(toList(rsContent2Row)), schema)
    }

  }


}