package com.fudan.run.dataset;

import java.util.HashMap;
import java.util.Map;

import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.execution.datasources.csv.CSVDataSource;

import com.fudan.cfg.dataset.CSVFileDataset;
import com.fudan.cfg.dataset.JsonFileDataset;
import com.fudan.run.JobRunner;
import com.fudan.run.ctx.annotation.DatasetAdapter;

@DatasetAdapter("csv")
public class CSVRdd extends DatasetHandler<CSVFileDataset>{

	@Override
	public JavaRDD<Map<String, Object>> rdd(JobRunner runContext, CSVFileDataset datasetNode) {
		Dataset<Row> csvDataset = runContext.getSparkSession().read().option("header", true).csv(datasetNode.getPath());
		return csvDataset.javaRDD().map(row->{
			int schemaLength = row.schema().length();
			Map<String,Object> map = new HashMap<String, Object>();
			for(int i = 0;i<schemaLength;i++) {
				map.put(row.schema().fieldNames()[i],row.get(i));
			}
			return map;
		});
	}

}
