package spark.core.scala

import java.sql.{DriverManager, ResultSet}

/**
 * Created by ibf on 2018/2/4.
 */
object SparkSQLThriftServerJdbc {
  def main(args: Array[String]) {
    //1、driver
    val driver = "org.apache.hive.jdbc.HiveDriver"
    //使用类的反射机制，将driver加载到JVM当中
    Class.forName(driver)

    //2、链接信息
    val (url,user,password) = ("jdbc:hive2://bigdata-01:10000","beifeng","123456")

    //3、构建connect
    val conn = DriverManager.getConnection(url,user,password)

    //4、构建预编译（执行）对象
    val sql = "select * from class19.emp a join class19.dept b on a.deptno = b.deptno"
    val pstmt = conn.prepareStatement(sql)

    //构建resultSet，执行sql，接收结果集
    val rs: ResultSet = pstmt.executeQuery()

    //循环遍历结果集，打印想要的结果
    while (rs.next()){
      println(rs.getString("ename")+" : "+rs.getDouble("sal"))
    }
    println("=================================================")

    val sql2 =
      """select
        |deptno,AVG(sal) as avg_sal
        |from
        |class19.emp a
        |group by deptno
        |having avg_sal > ?""".stripMargin

    val pstmt2 = conn.prepareStatement(sql2)
    pstmt2.setInt(1,2000)
    val rs2 = pstmt2.executeQuery()
    while (rs2.next()){
      println(rs2.getInt("deptno")+" : "+rs2.getDouble("avg_sal"))
    }


    rs.close()
    pstmt.close()
    rs2.close()
    pstmt2.close()
    conn.close()

  }
}
