package com.study.spark.scala.sql

import java.sql.DriverManager

/**
  * spark与hive集成
  * 先启动thriftserver
  * start-thriftserver.sh  \
  * --master local[2]  \
  * --jars mysql-connector-java-5.1.25.jar \
  * --hiveconf hive.server2.thrift.port=10000
  */
object ThriftServerDemo {
  def main(args: Array[String]): Unit = {
    // 添加驱动
    val driver = "org.apache.hive.jdbc.HiveDriver"
    Class.forName(driver)
    // 获取connection
    val (url, username, password) = ("jdbc:hive2://localhost:10000", "stephen", "")
    val connection = DriverManager.getConnection(url, username, password)
    // 切换database，hive URL不支持给定数据库名称，需要我们手动通过执行SQL切换
    connection.prepareStatement("use hadoop").execute();
    val sql = "SELECT region,fname,director FROM film"
    // 获取statement
    val statement = connection.prepareStatement(sql)
    // 获取结果
    val res = statement.executeQuery()
    while (res.next()) {
      println(s"${res.getString("fname")}:${res.getString("director")}:${res.getString("region")}")
    }
    // 关闭资源
    res.close()
    statement.close()
    connection.close()
  }
}
