package tech.zhaoxin;

import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.DataFrameReader;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;

import java.util.List;


public class PhoenixSparkRead {

    public static void main(String[] args){
        SparkConf sparkConf = new SparkConf().setMaster("spark://192.168.61.102:7077").setAppName("phoenix-test");
        JavaSparkContext jsc = new JavaSparkContext(sparkConf);
        SQLContext sqlContext = new SQLContext(jsc);
        System.out.println("开始执行第一步");
        // Load data from TABLE1
        Dataset<Row> df = sqlContext
                .read()
                .format("org.apache.phoenix.spark")
                .option("table", "iteblog")
                .option("zkUrl", "192.168.61.102:2181")
                .load();
        df.createOrReplaceTempView("iteblog");
        System.out.println("开始执行第二步");
        SQLContext sqlCtx = new SQLContext(jsc);
        df = sqlCtx.sql("SELECT * FROM iteblog");
        System.out.println("开始执行第三步");
        List<Row> rows = df.collectAsList();
        System.out.println(rows);
        jsc.stop();
        System.out.println("完成");
    }
}
