package com.fudan.run.dataset;

import java.util.LinkedList;
import java.util.List;
import java.util.Map;

import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.sql.Dataset;

import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fudan.cfg.dataset.JsonFileDataset;
import com.fudan.run.JobRunner;
import com.fudan.run.ctx.annotation.DatasetAdapter;

@DatasetAdapter("json")
public class JsonFileRdd extends DatasetHandler<JsonFileDataset>{

	@Override
	public JavaRDD<Map<String, Object>> rdd(JobRunner runContext, JsonFileDataset datasetNode) {
		Dataset<String> textFile = runContext.getSparkSession().read().textFile(datasetNode.getPath());
		JavaRDD<Map<String, Object>> result = textFile.javaRDD().mapPartitions((it)->{
			ObjectMapper mapper = new ObjectMapper();
			List<Map<String,Object>> r = new LinkedList<>();
			while(it.hasNext()) {
				String line = it.next();
				line = "["+line+"]";
				try {
					List<Map<String,Object>> l = mapper.readValue(line, new TypeReference<List<Map<String,Object>>>() {
					});
					if(l != null) {
						r.addAll(l);
					}
				}catch(Exception e) {
					//skip
				}
			}
			return r.iterator();
		});
		return result;
	}

}
