package cn.hfuu.spark;

import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.functions;

/**
 * @author oldliu
 * @since 1.0
 */
public class zy1182 {
    public static void main(String[] args) {
        // 创建SparkSession
        SparkSession spark = SparkSession.builder()
                .appName("SparkApp")
                .master("local")
                .getOrCreate();


        // 创建JavaSparkContext
        //JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
        Dataset<Row> df = spark.read()
                .option("header", "false") // 假设CSV有标题行
                .option("inferSchema", "true") // 推断列的数据类型
                .csv("file:///home/fan/retail_db-csv/customers.csv").toDF(
                        "customer_id",
                        "customer_fname",
                        "customer_lname",
                        "customer_email",
                        "customer_password",
                        "customer_street",
                        "customer_city",
                        "customer_state",
                        "customer_zipcode"); // 指定CSV文件的路径

        //>>>df.persist();

        Dataset<Row> mary = df.filter(functions.col("customer_lname").contains("Smith"));
        mary.limit(20).show();
        System.out.println("lname中包含'Smith'的客户数量为:" + mary.count() + " 人");
        spark.stop();
    }
}
