package com.edata.bigdata.utils

import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.apache.spark.sql.types.{DataType, DataTypes}

object DataFrameUtils {

  def getValueByCol(row: Row, colName: String, dataType: String): String = {
    dataType match {
      case "string" => s"${row.getAs[String](colName)}"
      case "integer" => s"${row.getAs[Integer](colName)}"
      case "float" => s"${row.getAs[Float](colName)}"
      case "double" => s"${row.getAs[Double](colName)}"
      case "boolean" => s"${row.getAs[Boolean](colName)}"
      case "short" => s"${row.getAs[Boolean](colName)}"
      case "long" => s"${row.getAs[Long](colName)}"
      case "date" => s"${row.getAs[String](colName)}"
      case "timestamp" => s"${row.getAs[String](colName)}"
      case "point" => CommonUtils.findSubStrByRegex(row.getAs[String](colName), "POINT\\(([^\\)]+)\\)")
      case "linestring" => CommonUtils.findSubStrByRegex(row.getAs[String](colName), "LINESTRING\\(\\(([^\\)]+)\\)\\)")
      case x => row.getAs[String](colName)
    }
  }

  def createDataFrame(session:SparkSession,rdd:RDD[String],separator:String,colNames:Array[String],colTypes:Array[DataType],nullables:Array[Boolean]):DataFrame={
    val data = rdd.map(line=>{
      val subStrs = line.split(separator)
      convertDataToRow(subStrs,colTypes)
    })
    val structFields = colNames.zipWithIndex.map{
      case (colname,index)=>{
        val dtype = colTypes(index)
        val nullable = nullables(index)
        DataTypes.createStructField(colname,dtype,nullable)
      }
    }
    val structType = DataTypes.createStructType(structFields)
    session.createDataFrame(data,structType)
  }

  def convertDataToRow(data:Array[String],colTypes:Array[DataType]):Row={
    val row = data.zipWithIndex.map {
      case (element, index) => {
        val dtype = colTypes(index)
        dtype match {
          case DataTypes.IntegerType => element.toInt
          case DataTypes.StringType => element
          case DataTypes.FloatType => element.toFloat
          case DataTypes.ShortType => element.toShort
          case DataTypes.DoubleType => element.toDouble
          case _ => {
            throw new ClassCastException(s"unexpected data type ${dtype.typeName}")
          }
        }
      }
    }.toSeq
    Row.fromSeq(row)
  }

  def parseDataTypeByStr(datas:Array[String]):Array[DataType]={
    datas.map(data=>{
      data.toLowerCase match{
        case "string" => DataTypes.StringType
        case "integer" => DataTypes.IntegerType
        case "float" => DataTypes.FloatType
        case "double" => DataTypes.TimestampType
        case "boolean" => DataTypes.BooleanType
        case "short" => DataTypes.ShortType
        case "long" => DataTypes.LongType
        case "date" => DataTypes.DateType
        case "timestamp" => DataTypes.TimestampType
        case x=>{
          throw new ClassCastException((s"unexpected data type ${data}"))
        }
      }
    })
  }

  def parseBooleanByStr(datas:Array[String]):Array[Boolean]={
    datas.map(data=>{
      data.toLowerCase match{
        case "true" => true
        case "false" => false
        case x =>{
          throw new ClassCastException((s"unexpected boolean value ${data}"))
        }
      }
    })
  }
  


}