package skewreduce.framework.physical;

import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.OutputFormat;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import skewreduce.framework.EaggPlan;
import skewreduce.framework.OperatorSchedule;
import skewreduce.framework.Schedule;
import skewreduce.framework.SchedulerEvent;
import skewreduce.framework.planner.PartitionNode;
import skewreduce.framework.planner.partition.PartitionStrategy;
import skewreduce.lib.Cube2;
import skewreduce.lib.IPoint3D;
import skewreduce.lib.ITuple;
import skewreduce.lib.Partition;
import skewreduce.lib.Point3D;



public abstract class RandomSample extends PSampleOp {
    private static final Logger LOG = LoggerFactory.getLogger("PLAN");

    private static final long DEFAULT_SAMPLE_SIZE = 4*1024*1024;

    private final static String TOTAL_SIZE_ATTR = "sampler.size.total";
    private final static String EXPECTED_SIZE_ATTR = "sampler.size.expected";
    private final static String PARTITION_SIZE_ATTR = "sampler.size.partition";
    private final static String RECORD_SIZE_ATTR = "sampler.size.record";
    private final static String ROOT_CUBE_SPEC_ATTR = "sampler.cube.root";
    private final static String NUM_PARTITION_ATTR = "sampler.partition.num";
    private final static String WRITE_SAMPLE_ATTR = "sampler.writeSample";
    private final static String SAMPLE_RATE_ATTR = "sampler.sampleRate";

//    protected abstract Class<? extends SampleMapper<?,?>> getMapperClass();
//    protected abstract Class<? extends SpatialSampleReducer<?,?>> getReducerClass();
    protected abstract Class<? extends InputFormat<?,?>> getInputFormatClass();
    protected abstract Class<? extends OutputFormat<?,?>> getOutputFormatClass();
    protected abstract Class<?> getOutputKeyClass();
    protected abstract Class<?> getOutputValueClass();
//    protected abstract int getRecordSize();
    
    protected long getSampleSize() {
    	return Math.max((long)(Runtime.getRuntime().maxMemory()*0.05),DEFAULT_SAMPLE_SIZE);
    }

    protected Job createJob(Configuration conf) throws IOException {
        Job job = new Job(conf);
        
        job.setJarByClass(RandomSample.class);

        job.setInputFormatClass(getInputFormatClass());
        job.setOutputFormatClass(getOutputFormatClass());
        
//        job.setMapperClass(getMapperClass());
//        job.setReducerClass(getReducerClass());
        job.setMapperClass(SampleMapper.class);
        job.setReducerClass(SampleReducer.class);
        
        job.setOutputKeyClass(getOutputKeyClass());
        job.setOutputValueClass(getOutputValueClass());
        
        job.setNumReduceTasks(1);
        
    	job.getConfiguration().set("io.serializations", "skewreduce.framework.WritableSerialization");
        
        return job;
    }

    private long getInputSize(EaggPlan plan) throws IOException {
        FileStatus[] status = plan.getFileSystem().globStatus(getInputPath());
        long sz = 0;
        for ( FileStatus stat : status ) {
            sz += stat.getLen();
        }
        return sz;
    }

    @Override
    public void setup(EaggPlan plan) throws IOException {
        setupInputOutput(plan);
        
        // setup. set configurations?
        Configuration conf = job.getConfiguration();

        //FileStatus status = plan.getFileSystem().getFileStatus(getInputPath());
        
        //conf.setLong(TOTAL_SIZE_ATTR, status.getLen());

        long totalSize = getInputSize(plan);
        float sampleRate = conf.getFloat(SAMPLE_RATE_ATTR,Float.NaN);
        if ( Float.isNaN(sampleRate) ) {
        	sampleRate = (float)( (double)getSampleSize() / totalSize );
        	conf.setFloat(SAMPLE_RATE_ATTR, sampleRate);
        }
        
        conf.setLong(TOTAL_SIZE_ATTR, getInputSize(plan));
//        conf.setInt(RECORD_SIZE_ATTR, getRecordSize());
        conf.set(ROOT_CUBE_SPEC_ATTR, getBound().toSpec());
        conf.set(PARTITION_SPEC_ATTR, getBound().toSpec());
        if ( conf.getLong(EXPECTED_SIZE_ATTR,0) == 0 ) {
            conf.setLong(EXPECTED_SIZE_ATTR, getSampleSize());
        }

//        if ( getBound().equals(plan.getPartitionInfo()) ) {
//            conf.setInt(NUM_PARTITION_ATTR, conf.getInt("skewreduce.partition.initialpartitions",0));
//        }
    }
    
    @Override
	public void setConf(Configuration conf) {
		// TODO Auto-generated method stub
		super.setConf(conf);
	}
    
    public static class SampleMapper<K,V>
    extends Mapper<K,V,K,V> {
    	// either based on sample rate or sample size
    	private float sampleRate;
        private int numSamples;
        private Random rand;

        @Override
        protected void setup(Context context)
        throws InterruptedException,IOException {
            Configuration conf = context.getConfiguration();
            sampleRate = conf.getFloat(SAMPLE_RATE_ATTR,Float.NaN);
            if ( Float.isNaN(sampleRate) ) {
	            long totalSize = conf.getLong(TOTAL_SIZE_ATTR,0L);
	            if ( totalSize == 0 ) {
	                throw new IllegalArgumentException(TOTAL_SIZE_ATTR);
	            }
	
	            // by default, 4MB
	            long expectedSize = conf.getLong(EXPECTED_SIZE_ATTR,DEFAULT_SAMPLE_SIZE);
	            
	            sampleRate = (float)((double)expectedSize / totalSize);
	            LOG.info("Overriding sample rate = {}",sampleRate);
            }
            rand = new Random();
        }

//        class Pair<KEY,VAL> {
//            KEY key;
//            VAL value;
//
//            Pair(KEY k,VAL v) { key = k; value = v; }
//        }

//        private Pair<K,V> createPair(Context context) throws IOException,InterruptedException {
//            return new Pair<K,V>(context.getCurrentKey(),context.getCurrentValue());
//        }

        @Override
        public void run(Context context)
        throws IOException,InterruptedException {
            setup(context);
            
            if ( Float.isNaN(sampleRate) ) {
//	            int k = 0;
//	            while ( context.nextKeyValue() ) {
//	                if ( ++k <= numSamples ) {
//	                    reservoir.add(createPair(context));
//	                } else if ( rand.nextDouble() <= (numSamples/(double)k) ) {
//	                    // randomly evict one
//	                    reservoir.set(rand.nextInt(numSamples),createPair(context));
//	                }
//	            }
//	            
//	            for ( Pair<K,V> p : reservoir ) {
//	                context.write(p.key,p.value);
//	            }
            } else {
            	while ( context.nextKeyValue() ) {
            		if ( rand.nextFloat() < sampleRate ) {
            			context.write(context.getCurrentKey(), context.getCurrentValue());
            			++numSamples;
            		}
            	}
            	LOG.info("{} items have been sampled",numSamples);
            }

            cleanup(context);
        }
    }

    public static class SampleReducer<K,V extends ITuple>
    extends Reducer<K,V,K,V> {
        boolean writeSample;
        boolean onePhase;
        float sampleRate;
        Repartitioner<K,V> repartitioner;

        // load all particles and build partition information
        @Override
        protected void setup(Context context)
        throws InterruptedException,IOException {
            Configuration conf = context.getConfiguration();

            writeSample = conf.getBoolean(WRITE_SAMPLE_ATTR,false);
            sampleRate = conf.getFloat(SAMPLE_RATE_ATTR, Float.NaN);
            if ( Float.isNaN(sampleRate) ) {
	            long totalSize    = conf.getLong(TOTAL_SIZE_ATTR,0);
	            if ( totalSize == 0 ) {
	                throw new IllegalArgumentException(TOTAL_SIZE_ATTR);
	            }
	
	            long expectedSize = conf.getLong(EXPECTED_SIZE_ATTR,DEFAULT_SAMPLE_SIZE);
	            sampleRate = (float)((double)expectedSize / totalSize);
	            conf.setFloat(SAMPLE_RATE_ATTR, sampleRate);
            }
            conf.setFloat("skewreduce.planner.sampleRate",sampleRate); // override sample rate
            onePhase = conf.getBoolean("sampler.planner.onephase", false);
        	//org.apache.log4j.Logger.getLogger("PLAN.REPARTITION").setLevel(org.apache.log4j.Level.DEBUG);
        	//org.apache.log4j.Logger.getLogger(PartitionStrategy.class).setLevel(org.apache.log4j.Level.DEBUG);
            
			repartitioner = new Repartitioner<K,V>();
			repartitioner.setConf(context.getConfiguration());
        }

        @Override
        public void run(Context context)
        throws IOException,InterruptedException {
            setup(context);

		    while (context.nextKey()) {
		    	K key = context.getCurrentKey();
		    	for ( V val : context.getValues() ) {
		    		if ( writeSample )
                        context.write(key,val);
		    		repartitioner.add(key,val);
		    	}
	        }
		    
		    Set<PartitionNode> nodes;
//		    if ( onePhase ) {
		    	nodes = repartitioner.partitionPhase1(); // FIXME should return partition plan as well as the schedule 
//		    } else {
//		    	nodes = repartitioner.partition();
//		    }
            
            // write partition information
            Path p = FileOutputFormat.getWorkOutputPath(context);
            FileSystem fs = FileSystem.get(context.getConfiguration());
            FSDataOutputStream out = fs.create(new Path(p,"partitionInfo"),context);
            repartitioner.getRootPartition().write(out);
            out.close();
            
            // write schedule information
            out = fs.create(new Path(p,"scheduleInfo"),context);
            out.writeInt(nodes.size());
            for ( PartitionNode node : nodes ) {
            	node.getSchedule().write(out);
            }
            out.close();

            // wrote nothing.
            cleanup(context);
        }
    }
    
    @Override
    public SchedulerEvent call() throws Exception {
        SchedulerEvent result = null;

        if ( LOG.isInfoEnabled() ) {
        	LOG.info("EXECUTE {}",getID());
        }

        try {
        	setup();

        	scheduledAt = System.currentTimeMillis();

            job.submit();

            //plan.registerRunningPhysicalOp(this);

            setStatus(Status.RUNNING);

 //           JobID jobid = job.getJobID();

//            if ( LOG.isInfoEnabled() ) {
//                LOG.info(String.format("EAGG-MR\t%s\t%s",getID(),jobid));
//            }
        } catch ( InterruptedException x ) {
            if ( Thread.interrupted() ) {
                LOG.info("Killing MapReduce task : "+job.getJobID());
                try { job.killJob(); } catch ( IOException ignore ) {}
                result = new SchedulerEvent.Killed(this);
            } else {
                result = new SchedulerEvent.Error(this,x);
            }
        } catch ( Exception x ) {
            result = new SchedulerEvent.Error(this,x);
        }
        
        return ( result == null ) ? new SchedulerEvent.Noop(this) : result;
    }

    @Override
    public void onSuccess() throws Exception {
        String pf = job.getConfiguration().get("skewreduce.partition.file");
        if ( pf == null || ! getBound().equals(plan.getPartitionInfo()) ) {
            FileSystem fs = getPlan().getDriver().getFileSystem();
            FSDataInputStream in = fs.open(new Path(getOutputPath(),"partitionInfo"));
            getBound().readFields(in);
            in.close();
            
            in = fs.open(new Path(getOutputPath(),"scheduleInfo"));
            plan.getSchedule().readFields(in);
            in.close();
            
        	plan.getSchedule().dumpSchedule(String.format("%s-%d.init.schedule",getPlan().getDriver().getAppName(),System.currentTimeMillis()) );
        } else {
            File f = new File(pf);
            if ( f.exists() ) {
                // FIXME: do not prune. accept manual plan.
                int numPart = job.getConfiguration().getInt(NUM_PARTITION_ATTR,0);
                LOG.info("Overriding partitioning information from "+pf+" and prune to have "+numPart+" partitions");

                DataInputStream in = new DataInputStream(new FileInputStream(f));
                getBound().readFields(in);
                in.close();

                if ( numPart > 0 )
                    getBound().prune(numPart);

                if ( LOG.isDebugEnabled() ) {
                    LOG.debug(getBound().toLongString());
                }
            } else {
                FileSystem fs = getPlan().getDriver().getFileSystem();
                FSDataInputStream in = fs.open(new Path(getOutputPath(),"partitionInfo"));
                getBound().readFields(in);
                in.close();

                DataOutputStream out = new DataOutputStream(new FileOutputStream(pf));
                getBound().write(out);
                out.close();
            }
        }
    }
}
