package org.eking.bigdata.spark;

import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.ForeachFunction;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.api.java.function.PairFunction;
import org.apache.spark.api.java.function.VoidFunction;
import org.apache.spark.sql.DataFrameReader;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.RowFactory;
import org.apache.spark.sql.SQLContext;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructField;
import org.apache.spark.sql.types.StructType;
import scala.Tuple2;
import org.apache.spark.sql.SparkSession;
public class SparkSqlTest {

	public static void main(String[] args) {
		// TODO Auto-generated method stub
		System.out.println("run spark sql begin");
		SparkSession spark = SparkSession.builder() .enableHiveSupport()
			    .appName("UnderstandingSparkSession").getOrCreate();
			    
//		Dataset<Row> df = spark.read().text("hdfs://node1:8020/test/TestJson.json");
////		df.select("col1", "col2").show();
//		df.select("name");
//		df.show();
		Dataset<Row> ds = spark.sql("select * from tsttab2");
		ds.foreach(new ForeachFunction<Row>(){

			@Override
			public void call(Row t) throws Exception {
				// TODO Auto-generated method stub
				System.out.println("row: " + t.getString(0) + "  " + t.getInt(1));
			}
			
		});
		spark.stop();
		System.out.println("run spark sql end");
	}
	
	
	public void RunTest(){
		
	}
}
