package com.fudan.run.dataset;

import java.util.Map;

import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;

import com.fudan.cfg.base.DatasetDef;
import com.fudan.cfg.dataset.OracleDataset;
import com.fudan.run.JobRunner;
import com.fudan.run.ctx.annotation.DatasetAdapter;

@DatasetAdapter("oracle")
public class OracleRdd extends DatasetHandler<OracleDataset>{

	@Override
	public JavaRDD<Map<String, Object>> rdd(JobRunner runContext, OracleDataset oracleDataset) {
		Dataset<Row> ds = runContext.getSparkSession().read()
		.format("jdbc")
		.option("driver", "oracle.jdbc.OracleDriver")
		.option("url", oracleDataset.getSource())
		.option("dbtable", oracleDataset.getTable())
		.option("user", oracleDataset.getUsername())
		.option("password", oracleDataset.getPassword())
		.load();
		if(oracleDataset.getCondition() != null) {
			ds = ds.where(oracleDataset.getCondition());
		}
		return ds.toJavaRDD().map(f->{
			String[] columns = f.schema().fieldNames();
			Map<String,Object> map = new java.util.HashMap<>();
			for(int i = 0;i<columns.length;i++) {
				map.put(columns[i], f.get(i));
			}
			return map;
		});
	}

}
