package com.chis.jmdatatimer.main;

import org.apache.hadoop.conf.Configuration;
import org.apache.phoenix.spark.SparkSqlContextFunctions;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SQLContext;
import scala.Option;
import scala.Some;
import scala.collection.JavaConverters;
import scala.collection.Seq;

import java.util.Arrays;
import java.util.List;

/**
 * 
 * @Description 
 * @Company zwx
 * @author wlj
 * @version 2019年01月27日
 */
public class TestPhoniex {
    public static void main(String[] args) {

        try {
            System.setProperty("SPARK_YARN_MODE", "true");
            SparkConf sparkConf=new SparkConf();
            sparkConf.setAppName("hive_convert_task");
            sparkConf.setMaster("yarn-cluster");
            sparkConf.set("spark.yarn.dist.files", "hdfs://10.88.88.241:9000/home/hadoop/yarn-site.xml");
            sparkConf.set("spark.yarn.jars", "hdfs://10.88.88.241:9000/home/hadoop/jars/*.jar");

            Configuration configuration = new Configuration();
            configuration.set("phoenix.schema.isNamespaceMappingEnabled","true");
            configuration.set("phoenix.schema.mapSystemTablesToNamespace","true");

            JavaSparkContext jsc = new JavaSparkContext(sparkConf);
            SQLContext sqlContext = new SQLContext(jsc);
            SparkSqlContextFunctions sparkSqlContextFunctions = new SparkSqlContextFunctions(sqlContext);
            String columns = "MONITORCODE,DATA1";
            String[] split = columns.split(",");
            List<String> strings = Arrays.asList(split);
            Seq<String> seq = JavaConverters.asScalaIteratorConverter(strings.iterator()).asScala().toSeq();
            Option<String> predicate = new Some<>("");
            Option<String> tenantId = new Some<>("");
            String jdbcUrl = "jdbc:phoenix:zk1,zk2,zk3:2181";
            String table = "JMLL.TD_DATA";
            Option<String> option = new Some<>(jdbcUrl);
            Dataset<Row> rowDataset = sparkSqlContextFunctions.phoenixTableAsDataFrame(table, seq, predicate, option, tenantId, configuration);
            rowDataset.registerTempTable("td_data");
            sqlContext.sql("select * from td_data limit 10").show();
            jsc.close();


        } catch (Exception e) {
            e.printStackTrace();
        }




    }

}
