import model.Info;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.Function;
import org.apache.spark.sql.*;
import org.apache.spark.sql.types.DataTypes;
import org.apache.spark.sql.types.StructType;
import scala.Tuple2;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

public class App {
    public static void main(String[] args) {
        SparkSession spark = SparkSession.builder()
                .appName("RDDToDataset")
                .master("local[*]")
                .getOrCreate();
        //todo
        /**
         * 1.source
         *
         * 2.tranform
         *
         * 3.sink
         */
        JavaRDD<String> lines = spark.read().textFile("src/main/resources/input").javaRDD();

        JavaRDD<Row> t = lines.map(new Function<String, Row>() {
            @Override
            public Row call(String line) throws Exception {
                String[] str = line.split(" ");
                return RowFactory.create(str[0], Integer.parseInt(str[1]));
            }
        });

        List schemaFields = new ArrayList();

        schemaFields.add(DataTypes.createStructField("word", DataTypes.StringType, true));

        schemaFields.add(DataTypes.createStructField("cnt", DataTypes.IntegerType, true));

        StructType schema = DataTypes.createStructType(schemaFields);

        Dataset<Row> stuDf = spark.createDataFrame(t, schema);
        stuDf.printSchema();
        stuDf.createOrReplaceTempView("info");
        Dataset<Row> nameDf = spark.sql("select word,sum(cnt) as cnt from info group by word");
        nameDf.show();
        nameDf.coalesce(2).write().mode(SaveMode.Overwrite).format("csv").csv("src/main/resources/output");
        spark.stop();

    }
}

