package com.edata.bigdata.util

import com.alibaba.fastjson.{JSON, JSONObject}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types.StructField
import org.apache.spark.sql.{DataFrame, Row, SaveMode}
import java.sql.{Connection, DriverManager, Statement}
import java.util.Properties
import scala.collection.mutable.ArrayBuffer


trait SQLExecutor extends Executor {


  override def findDataFromPath[T](path: String, seperator: String)(implicit bean: Manifest[T]): DataFrame = {
    val rdd = SESSION.sparkContext.textFile(path)
    LOGGER.info(s"finding data from ${path}")
    createDataFrame(rdd, seperator)
  }

  override def saveDataToPath[T](data: RDD[String], path: String)(implicit bean: Manifest[T]): Unit = {
    LOGGER.info(s"saving data to ${path}")
    data.saveAsTextFile(path)
  }

  //[{col:{}}]
  override def findData[T](args: String*)(implicit bean: Manifest[T]): DataFrame = {
    var CONDITION = ""
    val (label, _, col_types) = find_SCHEMA(bean.toString())
    val cols = col_types.map(c_t => {
      val c = c_t.split(":")(0)
      val t = c_t.split(":")(1)
      EDataUtils.convertKToQueryStrByDataType(c,t)
    })

    if (!args.isEmpty) {
      CONDITION = CONDITION + " where " + args.mkString("and")
    }

    val sql = s"SELECT ${cols.mkString(",")} FROM ${label} ${CONDITION}"
    LOGGER.info(s"Executing SQL: ${sql}")
    SESSION.read.format("jdbc")
      .option("url", JDBC_PREFIX + JDBC_IP + ":" + JDBC_PORT + "/" + JDBC_DATABASE)
      .option("user", JDBC_USER)
      .option("password", JDBC_PASSWORD)
      .option("query", sql).load()
  }

  override def saveData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    if (args.isEmpty) {
      throw new Exception("seperator should not be empty")
    }
    val (label, _, _) = find_SCHEMA(bean.toString())
    LOGGER.info(s"saving data ${label}")
    createDataFrame(data, args(1)).write.format("jdbc").mode(SaveMode.Append)
      .option("url", JDBC_PREFIX + JDBC_IP + ":" + JDBC_PORT + "/" + JDBC_DATABASE)
      .option("user", JDBC_USER)
      .option("password", JDBC_PASSWORD)
      .option("dbtable", label)
      .save()
  }

  override def updateData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    if (args.isEmpty) {
      throw new Exception("seperator should not be empty")
    }
    val (label, id, _) = find_SCHEMA(bean.toString())
    val data_df = createDataFrame(data, args(1))
    var connection: Connection = null
    data_df.foreachPartition((iter: Iterator[Row]) => {
      try {
        connection = createConnection()
        val st: Statement = connection.createStatement()
//        iter.foreach((row: Row) => {
//          val c_array = new ArrayBuffer[String]()
//          val v_array = new ArrayBuffer[String]()
//          val cv_array = new ArrayBuffer[String]()
//          row.schema.foreach((c: StructField) => {
//            c_array.append(c.name)
//            val value = convertColValToStrByDataType(findDataFrameColValue(row, c.name, c.dataType), convertDataTypeToStr(c.dataType))
//            v_array.append(value)
//            if (id != c.name) {
//              cv_array.append(s"${c.name}=${value}")
//            }
//          })
//          val sql = s"INSERT INTO ${label} (${c_array.toArray.mkString(",")}) VALUES (${v_array.toArray.mkString(",")}) ON CONFLICT(${id}) DO UPDATE SET ${cv_array.toArray.mkString(",")}"
//          LOGGER.info(s"Executing SQL:${sql}")
//          st.addBatch(sql)
//        })

        //调用st.executeBatch()相当于告诉编译器每个分区都需要返回Array[Int]，但是在foreachPartition中只能返回Unit
        val result = st.executeBatch()
      } catch {
        case e: Exception => e.printStackTrace()
      } finally {
        if (connection != null) {
          connection.close()
        }
      }
    })
  }

  override def findDataById[T](value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    val (label, id, col_types) = find_SCHEMA(bean.toString())
    val cols = col_types.map(c_t => {
      val c = c_t.split(":")(0)
      val t = c_t.split(":")(1)
      EDataUtils.convertKToQueryStrByDataType(c, t)
    })
    var CONDITION = s"where ${id.split(":")(0)}=${EDataUtils.convertVToQueryStrByDataType(value, id.split(":")(1))}"
    if (!args.isEmpty) {
      CONDITION = CONDITION + " and " + args.mkString("and")
    }

    val sql = s"SELECT ${cols.mkString(",")} FROM ${label} ${CONDITION}"
    LOGGER.info(s"Executing SQL:${sql}")
    SESSION.read.format("jdbc")
      .option("url", JDBC_PREFIX + JDBC_IP + ":" + JDBC_PORT + "/" + JDBC_DATABASE)
      .option("user", JDBC_USER)
      .option("password", JDBC_PASSWORD)
      .option("query", sql).load()
  }

  override def findDataByProperty[T](key: String, value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    val (label, _, col_types) = find_SCHEMA(bean.toString())
    val cols = col_types.map(c_t => {
      val c = c_t.split(":")(0)
      val t = c_t.split(":")(1)
      EDataUtils.convertKToQueryStrByDataType(c, t)
    })
    val key_type = col_types.filter(c_t => c_t.startsWith(key))(0).split(":")(1)
    val sql = s"SELECT ${cols.mkString(",")} FROM ${label} where ${key}=${EDataUtils.convertVToQueryStrByDataType(value, key_type)}"
    SESSION.read.format("jdbc")
      .option("url", JDBC_PREFIX + JDBC_IP + ":" + JDBC_PORT + "/" + JDBC_DATABASE)
      .option("user", JDBC_USER)
      .option("password", JDBC_PASSWORD)
      .option("query", sql).load()
  }


  def createConnection(): Connection = {
    val props = new Properties()
    props.put("user", JDBC_USER)
    props.put("password", JDBC_PASSWORD)
    props.setProperty("useSSL", "false")
    props.setProperty("useUnicode", "true")
    props.setProperty("characterEncoding", "utf-8")
    val URL = s"${JDBC_PREFIX}${JDBC_IP}:${JDBC_PORT}/${JDBC_DATABASE}?${JDBC_BATCHPARAMS}"
    DriverManager.getConnection(URL, props)
  }


}
