package org.apache.spark.rdd;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapred.SplitLocationInfo;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.spark.Partition;
import org.apache.spark.SparkContext;
import scala.collection.Seq;

public class NewHadoopRDD2<K,V> extends NewHadoopRDD<K,V> {
    public NewHadoopRDD2(SparkContext sc, Class<? extends InputFormat<K,V>> inputFormatClass, Class<K> keyClass, Class<V> valueClass, Configuration _conf) {
        super(sc, inputFormatClass, keyClass, valueClass, _conf);
    }

    public Seq<String> getPreferredLocations(Partition hsplit){
        try {
            NewHadoopPartition partition = (NewHadoopPartition) hsplit;
            InputSplit split = partition.serializableHadoopSplit().value();
            String locs[]=split.getLocations();
            SplitLocationInfo infos[]=new SplitLocationInfo[locs.length];
            for(int i=0;i<locs.length;i++){
                infos[i]=new SplitLocationInfo(locs[i],false);
            }
            //noinspection unchecked
            return HadoopRDD$.MODULE$.convertSplitLocationInfo(infos).get();
        }catch (RuntimeException ex){
            throw ex;
        }catch (Exception e){
            throw new RuntimeException(e);
        }
    }
}
