package org.apache.spark.examples.sql.streaming;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.spark.examples.utils.Utils;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;

import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;

/**
 * Created by admin on 2019/3/23.
 */
public class JavaSQLDataSourceExample {

    public static void main(String[] args){
        System.setProperty("hadoop.home.dir", "C:/hadoop-2.6.0");
        SparkSession spark = SparkSession
                .builder()
                .appName("sql DataSource Example")
                .getOrCreate();
//        Dataset<Row> usersDF = spark.read().load(Utils.filePathUsersparquet);
//        usersDF.select("name", "favorite_color").write().save("namesAndFavColors.parquet");
//        usersDF.show();
        Dataset<Row> peopleDF = spark.read().json(Utils.filePathPeopleJson);
        peopleDF.show();
// DataFrames can be saved as Parquet files, maintaining the schema information
        Configuration conf = new Configuration();
        conf.set("fs.defaultFS", "hdfs://172.30.17.164:8020");
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");
        FileSystem fs = null;
        try {
            fs = FileSystem.get(conf);
            if(fs.exists(new Path("people.parquet"))){
                System.out.println("people.parquet" + " 文件存在");
            }else {
                System.out.println("people.parquet" + " 文件不存在");
//                 parquetFileDF = spark.read().parquet("people.parquet");
                peopleDF.write().parquet("hdfs://172.30.17.164:8020/streamsets/people.parquet");
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
// Read in the Parquet file created above.
// Parquet files are self-describing so the schema is preserved
// The result of loading a parquet file is also a DataFrame

        Dataset<Row> parquetFileDF = spark.read().parquet("people.parquet");

// Parquet files can also be used to create a temporary view and then used in SQL statements
        parquetFileDF.createOrReplaceTempView("parquetFile");
        Dataset<Row> namesDF = spark.sql("SELECT name FROM parquetFile WHERE age BETWEEN 13 AND 19");
        //遍历row
        Dataset<String> namesDS = namesDF.map(
                (MapFunction<Row, String>) row -> "Name: " + row.getString(0),
                Encoders.STRING());
        namesDS.show();




//        Dataset<Row> usersDF = spark.read().format("avro").load("examples/src/main/resources/users.avro");
//        usersDF.select("name", "favorite_color").write().format("avro").save("namesAndFavColors.avro");
    }

    public static void represented(SparkSession spark){
        List<String> jsonData = Arrays.asList(
                "{\"name\":\"Yin\",\"address\":{\"city\":\"Columbus\",\"state\":\"Ohio\"}}");
        Dataset<String> anotherPeopleDataset = spark.createDataset(jsonData, Encoders.STRING());
        Dataset<Row> anotherPeople = spark.read().json(anotherPeopleDataset);
        anotherPeople.show();
    }

    private static void runJdbcDatasetExample(SparkSession spark) {
        // $example on:jdbc_dataset$
        // Note: JDBC loading and saving can be achieved via either the load/save or jdbc methods
        // Loading data from a JDBC source
        Dataset<Row> jdbcDF = spark.read()
                .format("jdbc")
                .option("url", "jdbc:postgresql:dbserver")
                .option("dbtable", "schema.tablename")
                .option("user", "username")
                .option("password", "password")
                .load();

        Properties connectionProperties = new Properties();
        connectionProperties.put("user", "username");
        connectionProperties.put("password", "password");
        Dataset<Row> jdbcDF2 = spark.read()
                .jdbc("jdbc:postgresql:dbserver", "schema.tablename", connectionProperties);

        // Saving data to a JDBC source
        jdbcDF.write()
                .format("jdbc")
                .option("url", "jdbc:postgresql:dbserver")
                .option("dbtable", "schema.tablename")
                .option("user", "username")
                .option("password", "password")
                .save();

        jdbcDF2.write()
                .jdbc("jdbc:postgresql:dbserver", "schema.tablename", connectionProperties);

        // Specifying create table column data types on write
        jdbcDF.write()
                .option("createTableColumnTypes", "name CHAR(64), comments VARCHAR(1024)")
                .jdbc("jdbc:postgresql:dbserver", "schema.tablename", connectionProperties);
        // $example off:jdbc_dataset$
    }
}
