import org.apache.hadoop.hbase.HBaseConfiguration
import java.sql.Connection
import org.apache.spark.SparkContext
//import org.apache.phoenix.spark.ConfigurationUtil
import java.util.Properties
import org.apache.phoenix.util.PhoenixRuntime
import java.sql.DriverManager
import org.apache.spark.SparkConf
import org.junit.rules.TemporaryFolder
import org.apache.spark.sql.SQLContext
import scala.collection.mutable.ListBuffer
//import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.SaveMode


object PhoenixSparkITHelper{
 /*
  def select{
     val conf = new SparkConf()
      .setAppName("PhoenixSparkIT")
      .setMaster("local[2]") // 2 threads, some parallelism
      .set("spark.ui.showConsoleProgress", "false") // Disable printing stage progress
     val sc = new SparkContext(conf)
     val hbaseConfiguration=HBaseConfiguration.create()
     hbaseConfiguration.set("hbase.zookeeper.quorum", "192.168.154.180");
     hbaseConfiguration.set("hbase.zookeeper.property.clientPort", "2181");
     val sqlContext = new SQLContext(sc)
     import org.apache.phoenix.spark._
     val df1 = sqlContext.phoenixTableAsDataFrame("TABLE1", Array("ID", "COL1"), conf = hbaseConfiguration)
     //df1.sqlContext.sql(sqlText);
    df1.registerTempTable("sql_table_1")
    val df2 = sqlContext.phoenixTableAsDataFrame("TABLE2", Array("ID", "TABLE1_ID"),
      conf = hbaseConfiguration)
    //df2.registerTempTable("sql_table_2")
    val sqlRdd =sqlContext.sql("select * from sql_table_1");
    val sqlRdd = sqlContext.sql(
      """
        |SELECT t1.ID, t1.COL1, t2.ID, t2.TABLE1_ID FROM sql_table_1 AS t1
        |INNER JOIN sql_table_2 AS t2 ON (t2.TABLE1_ID = t1.ID)""".stripMargin
    )
     sqlRdd.foreach ( println(_))
     val count = sqlRdd.count()
     println(count)
   }
  def save(){
    val conf = new SparkConf()
      .setAppName("PhoenixSparkIT")
      .setMaster("local[2]") // 2 threads, some parallelism
      .set("spark.ui.showConsoleProgress", "false") // Disable printing stage progress
     val sc = new SparkContext(conf)
     val hbaseConfiguration=HBaseConfiguration.create()
     hbaseConfiguration.set("hbase.zookeeper.quorum", "192.168.154.180");
     hbaseConfiguration.set("hbase.zookeeper.property.clientPort", "2181");
     val sqlContext = new SQLContext(sc)
     import org.apache.phoenix.spark._
     val df1= sqlContext.phoenixTableAsDataFrame(
         "TABLE1",//表名称
         Array("ID", "COL1"),//字段名 
         zkUrl = Some("192.168.154.180:2181")
         ,predicate = 
       Some(" ID = 1 or COL1 = 'test_row_2'"))
       df1.foreach ( println(_))
     df1.registerTempTable("table3")

    val sqlRdd = sqlContext.sql("SELECT * FROM table3")
     sqlRdd.foreach ( println(_))
     val count = sqlRdd.count()
     println(count)
  }
  def sacet(){
     val conf = new SparkConf()
      .setAppName("PhoenixSparkIT")
      .setMaster("local[2]") // 2 threads, some parallelism
      .set("spark.ui.showConsoleProgress", "false") // Disable printing stage progress
     val sc = new SparkContext(conf)
     val sqlContext = new SQLContext(sc)
   import org.apache.phoenix.spark._
    val dataSet = List((1L, "1", 1), (2L, "2", 2), (3L, "3", 3))
    sc.parallelize(dataSet).saveToPhoenix("OUTPUT_TEST_TABLE",
        Seq("ID", "COL1", "COL2"),  zkUrl=Some("node180:2181"));
     sqlContext.load("org.apache.phoenix.spark", Map("table" -> "TABLE1",
      "zkUrl" -> "node180:2181"))

    // Verify they match
      
  }
  */
   def main(args: Array[String]): Unit = {
    /* val conf = new SparkConf()
      .setAppName("PhoenixSparkIT")
      .setMaster("local[2]") // 2 threads, some parallelism
      .set("spark.ui.showConsoleProgress", "false") // Disable printing stage progress
     val sc = new SparkContext(conf)
     //import org.apache.phoenix.spark._
     val dataSet = List((1L, "1", 1), (2L, "2", 2), (3L, "3", 3))
    // sc.parallelize(dataSet, 2).saveToPhoenix(tableName, cols, conf, zkUrl, tenantId);
     val sqlContext = new SQLContext(sc)
      val df = sqlContext
      .read
      .format("org.apache.phoenix.spark")
      .option("table", "TABLE1")
      .option("zkUrl", "node180:2181")
      .load()//查询一个表
      //df.createTempView("tese")
      df.sqlContext.sql("select * from tese where id=1").foreach { println(_) };
      df
      .write
      .format("org.apache.phoenix.spark")
      .mode(SaveMode.Overwrite)
      .option("table", "TABLE1_COPY")
      .option("zkUrl", "node180:2181")
      .save()
      val props=new Properties
    val conn = DriverManager.getConnection("jdbc:phoenix:node180,node181,node181:2181", props)
    val stmt = conn.createStatement()
      
    val rs = stmt.executeQuery("SELECT * FROM TABLE1_COPY")

    val checkResults = List((1L, "test_row_1"), (2, "test_row_2"))
    val results = ListBuffer[(Long, String)]()
    while (rs.next()) {
      results.append((rs.getLong(1), rs.getString(2)))
    }*/
    
    val props=new Properties
    val conn = DriverManager.getConnection("jdbc:phoenix:node180,node181,node181:2181", props)
    val stmt = conn.createStatement()
    //val rs = stmt.executeQuery("SELECT *  FROM applog")
    val rs2 = stmt.executeQuery("SELECT *  FROM TABLE1")
    val results = ListBuffer[(Long, String, Int)]()
    while (rs2.next()) {
      println(rs2.getObject(2))
    }
   }
}

