package skewreduce.framework.physical;

import java.io.IOException;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.List;

import org.apache.commons.httpclient.HttpClient;
import org.apache.commons.httpclient.HttpMethod;
import org.apache.commons.httpclient.URI;
import org.apache.commons.httpclient.methods.ByteArrayRequestEntity;
import org.apache.commons.httpclient.methods.GetMethod;
import org.apache.commons.httpclient.methods.PutMethod;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.mapred.JobPriority;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.RecordWriter;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.util.ReflectionUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import skewreduce.framework.EaggPlan;
import skewreduce.framework.PartitionOutputs;
import skewreduce.framework.Schedule;
import skewreduce.framework.logical.LogicalOp;
import skewreduce.framework.planner.PartitionNode;
import skewreduce.lib.ITuple;
import skewreduce.lib.Partition;
import skewreduce.lib.PartitionInfo;



public abstract class PPartitionOp extends BlockingPhysicalOp {
    private static final Logger LOG = LoggerFactory.getLogger("PLAN");

    private int numPartition;
    private PartitionInfo partitions;
    private boolean plainReducer;
    private boolean reduceSerializer;

    @Override
    protected Path getOutputByContext(PhysicalOp op) {
        if ( op.getType() == LogicalOp.Type.MERGE 
                || op.getType() == LogicalOp.Type.FINALIZE) {
            throw new IllegalArgumentException(String.format("operator %s",op.getID()));
        }

        // c is one of sub partition. 
        //int partition = (int)op.getBound().getID() % numPartition;

        // if we have used normal reducer
        if ( plainReducer ) {
	        int partition = partitions.getPartition(op.getBound()) % numPartition;
	        Path p = new Path( getOutputPath(), String.format("part-r-%05d",partition));
	        return p;
        } else {
	        // otherwise, we can directly construct the path from given partition id
	        Partition b = op.getBound();
	        return new Path( getOutputPath(), String.format("part-%d-%d",b.getLevel(),b.getID()));
	    }
    }

    /**
     * partition should leverage distributed cache
     */
    @Override
    public void setup(EaggPlan plan) throws IOException {
        setupInputOutput(plan);

        if ( LOG.isDebugEnabled() ) {
        	LOG.debug("partition bound = {}",this.getBound());
        }
        
        job.setPartitionerClass(TuplePartitioner.class);
        
        // raise priority
        getConf().set("mapred.job.priority",JobPriority.HIGH.toString());
        getConf().set(PARTITION_CLASS_ATTR, bound.getClass().getCanonicalName());
        
        if ( bound.getLevel() == 0 ) {
        	job.setMapperClass(Mapper.class);
        	
        	// this is PARTITION phase
        	switch ( plan.getInitialPartitionStrategy() ) {
	        	case FULLY_DYNAMIC:
		        	{
		        		job.setReducerClass(RepartitionReducer.class);
		        		reduceSerializer = true;
		        	}
		        	break;
	        	case PARTIAL_DYNAMIC:
		        	{
		        		job.setReducerClass(CostEvalReducer.class);
		        		plainReducer = true;
		        		reduceSerializer = true;
		        	}
		        	break;
	        	case STATIC:
	    		default:
		        	{
		        		job.setReducerClass(Reducer.class);
		        		plainReducer = true;
		        	}
		        	break;	
        	}
        	
	        partitions = new PartitionInfo(bound);
	        if ( LOG.isDebugEnabled() ) {
	            LOG.debug(partitions.toString());
	        }
	
	        numPartition = partitions.size();
            
            job.setNumReduceTasks(numPartition);    // depending on number of leaf nodes?
            
            // write out partition info
            Path infoDir = getOutputPath().suffix(".info");
            Path p = new Path( infoDir, "partitionInfo");
            FileSystem fs = plan.getDriver().getFileSystem();
            FSDataOutputStream out = fs.create(p,true);
            bound.write(out);
            out.close();

            try {
    	        java.net.URI fsUri = fs.getUri();
    	        java.net.URI fileUri = p.toUri();
    	        java.net.URI cacheUri = new java.net.URI(fsUri.getScheme(),null,fsUri.getHost(),fsUri.getPort(),fileUri.getPath(),null,"partitionInfo");
    	
    	        Configuration jobConf = job.getConfiguration();
    	
    	        DistributedCache.addCacheFile(cacheUri,jobConf);
    	        DistributedCache.createSymlink(jobConf);
            } catch ( URISyntaxException x) {
            	throw new IOException(x);
            }
        } else {
        	// this is repartitioning during actual execution
        	// we only need to specify the partition spec, which is already set
            job.getConfiguration().set(PARTITION_SPEC_ATTR,bound.toSpec());
        	job.setMapperClass(RepartitionMapper.class);
        	job.setNumReduceTasks(0);
        }

        if ( reduceSerializer ) {
        	// override serializer so that the key and value objects do not overwrite
        	job.getConfiguration().set("io.serializations", "skewreduce.framework.WritableSerialization");
        }
    }
  
    public static class TuplePartitioner<K,V extends ITuple>
    extends Partitioner<K,V>
    implements Configurable {
        private Configuration conf;
        private Partition root;
        private PartitionInfo partitions;

        @Override public Configuration getConf() { return conf; }

        @Override
        public void setConf(Configuration jobconf) {
            this.conf = jobconf;
            // load partitioning information
            try {
                LocalFileSystem fs = FileSystem.getLocal(conf);
                String pFileName = conf.get(PhysicalOp.PARTITION_SPEC_FILE_ATTR,"partitionInfo");
                final Path pPath = new Path(pFileName);
                
                Class<? extends Partition> pClass = conf.getClass(PhysicalOp.PARTITION_CLASS_ATTR, null, Partition.class);
                root = ReflectionUtils.newInstance(pClass, conf);

//                System.err.println(pPath + " exists? " + fs.exists(pPath) );
                FSDataInputStream fileIn = fs.open(pPath);
                root.readFields(fileIn);
                fileIn.close();
                partitions = new PartitionInfo(root);
            } catch ( Throwable ex ) {
                //ex.printStackTrace(System.err);
                LOG.error("Can't load partition information",ex);
            }
            if ( LOG.isDebugEnabled() )
            	root.print(System.out);
        }

        @Override
        public int getPartition(K key,V value,int n) {
            return partitions.getPartition(value) % n;
        }
    }
    
    private static int httpPutRequest(HttpClient client,String uri,byte[] data,int len) throws IOException {
    	ByteArrayRequestEntity content = new ByteArrayRequestEntity(Arrays.copyOf(data, len),"application/octet-stream");

    	URI url = new URI(uri, false);
    	PutMethod method = new PutMethod(url.getEscapedURI());
    	method.setRequestEntity(content);
	    method.setRequestHeader("Accept", "*/*");
	    return client.executeMethod(method);
    }
    
    private static int notifyPartitionPlan(HttpClient client,String uri,Partition root,DataOutputBuffer buf) throws IOException {
    	if ( uri == null || uri.length() == 0 ) return -1;

    	buf.reset();
    	root.write(buf);
    	byte[] data = buf.getData();
    	int sz = buf.getLength();
    	return httpPutRequest(client,uri,data,sz);
    }
    
    private static int notifyPartitionSchedule(HttpClient client,String uri,List<Schedule> schedule,DataOutputBuffer buf) throws IOException {
    	if ( uri == null || uri.length() == 0 ) return -1;
    	
    	buf.reset();
    	buf.writeInt(schedule.size());
    	for ( Schedule s : schedule ) {
    		s.write(buf);
    	}
    	byte[] data = buf.getData();
    	int sz = buf.getLength();
    	return httpPutRequest(client,uri, data, sz);
    }
    
    private static int postPartitionResult(String urlPrefix,Repartitioner<?,?> repart) throws IOException {
    	Partition.ID prevId = repart.getRootPartition().getPartitionID();
    	String url1 = String.format("%s/p/%s",urlPrefix,prevId);
    	String url2 = String.format("%s/s/%s",urlPrefix,prevId);
    	
    	if ( LOG.isInfoEnabled() ) {
    		LOG.info("Partition notification URL = {}",url1);
    		LOG.info("Schedule notification URL = {}",url2);
    	}
    	
    	HttpClient client = new HttpClient();
	    DataOutputBuffer buffer = new DataOutputBuffer(16384);
	    int rc1 = notifyPartitionPlan(client,url1,repart.getRootPartition(),buffer);
	    int rc2 = notifyPartitionSchedule(client,url2,repart.getPartitionSchedule(),buffer);
	    if ( LOG.isInfoEnabled() ) {
	    	LOG.info("Response codes = {}, {}",rc1,rc2);
	    }
	    return rc2;
    }

    private static int postTimingResult(String urlPrefix,Repartitioner<?,?> repart,double t, double c) throws IOException {
    	Partition.ID prevId = repart.getRootPartition().getPartitionID();
    	String uri = String.format("%s/t/%s?%e&%e",urlPrefix,prevId,t,c);
    	
    	if ( LOG.isInfoEnabled() ) {
    		LOG.info("Partition timing notification URL = {} / Timing = {}",uri,t);
    	}
        URI url = new URI(uri, false);
        HttpClient client = new HttpClient();
        HttpMethod method = new GetMethod(url.getEscapedURI());
        method.setRequestHeader("Accept", "*/*");
        return client.executeMethod(method);
    }
    
	@Override
	public void onSuccess() throws Exception {
		if ( getBound().getLevel() == 0 ) {
        	long t = System.currentTimeMillis();
    		String app = plan.getDriver().getAppName();
        	try {
            	plan.getSchedule().dumpSchedule(String.format("%s-%d.schedule",app,t) );
            	plan.getPartitionInfo().dump(String.format("%s-%d.partition",app,t));
        	} catch ( IOException ioex ) {
        		LOG.error("Failed to dump partition/schedule information",ioex);
        	}
		}
	}

	public static class RepartitionMapper<K,V extends ITuple> extends Mapper<K,V,K,V> {
    	PartitionOutputs<K,V> partitionOut;
    	Repartitioner<K,V> repartitioner;

		@Override
		protected void setup(Context context) throws IOException,
				InterruptedException {
			context.getConfiguration().setFloat("skewreduce.planner.sampleRate",1.0f);

			partitionOut = new PartitionOutputs<K,V>(context);
			repartitioner = new Repartitioner<K,V>();
			repartitioner.setConf(context.getConfiguration());
		}

		@Override
		public void run(Context context) throws IOException,
				InterruptedException {
			setup(context);

		    while (context.nextKeyValue()) {
		    	repartitioner.add(context.getCurrentKey(), context.getCurrentValue());
		    }
		    
		    repartitioner.partition();
		    
		    for ( PartitionNode p : repartitioner.getLeafPartitions() ) {
		    	RecordWriter<K,V> writer = partitionOut.createRecordWriter(context, p.getPartition());
		    	for ( ITuple vv : p.getSample() ) {
		    		Pair<K,V> pair = (Pair<K,V>)vv;
		    		writer.write(pair.getKey(), pair.getValue());
		    	}
		    	writer.close(context);
		    }
		    
		    postPartitionResult(context.getConfiguration().get(EaggPlan.EAGG_HTTP_RPC_URI_ATTR),repartitioner);
		    
			cleanup(context);
		}

		@Override
		protected void cleanup(Context context) throws IOException,
				InterruptedException {
			partitionOut.close(context);
			super.cleanup(context);
		}
    }
    
    public static class RepartitionReducer<K,V extends ITuple> extends Reducer<K,V,K,V> {
    	PartitionOutputs<K,V> partitionOut;
    	Repartitioner<K,V> repartitioner;

		@Override
		protected void setup(Context context) throws IOException,
				InterruptedException {
			context.getConfiguration().setFloat("skewreduce.planner.sampleRate",1.0f);

			partitionOut = new PartitionOutputs<K,V>(context);
			repartitioner = new Repartitioner<K,V>();
			repartitioner.setConf(context.getConfiguration());

			String s = context.getConfiguration().get(PhysicalOp.PARTITION_SPEC_ATTR);
			if ( s == null ) {
				// should load the partition
	            try {
	                LocalFileSystem fs = FileSystem.getLocal(context.getConfiguration());
	                final Path pPath = new Path("partitionInfo");

	                System.err.println(pPath + " exists? " + fs.exists(pPath) );
	                FSDataInputStream fileIn = fs.open(pPath);
	                
	                Partition root = repartitioner.getRootPartition().read(fileIn);
	                fileIn.close();

	                PartitionInfo partitions = new PartitionInfo(root);
	                int partition = context.getConfiguration().getInt("mapred.task.partition", 0);
	                Partition node = partitions.get( partition );
	                repartitioner.getRootPartition().set(node);
	            } catch ( Throwable ex ) {
	                ex.printStackTrace(System.err);
	                LOG.error("Can't load partition information",ex);
	            }
			}
		}

		@Override
		public void run(Context context) throws IOException,
				InterruptedException {
			setup(context);

		    while (context.nextKey()) {
		    	K key = context.getCurrentKey();
		    	for ( V val : context.getValues() ) {
		    		repartitioner.add(key,val);
		    	}
	        }
		    
		    repartitioner.partition();
		    
		    for ( PartitionNode p : repartitioner.getLeafPartitions() ) {
		    	RecordWriter<K,V> writer = partitionOut.createRecordWriter(context, p.getPartition());
		    	for ( ITuple vv : p.getSample() ) {
		    		Pair<K,V> pair = (Pair<K,V>)vv;
		    		writer.write(pair.getKey(), pair.getValue());
		    	}
		    	writer.close(context);
		    }
		    
		    postPartitionResult(context.getConfiguration().get(EaggPlan.EAGG_HTTP_RPC_URI_ATTR),repartitioner);
			
			cleanup(context);
		}

		@Override
		protected void cleanup(Context context) throws IOException,
				InterruptedException {
			partitionOut.close(context);
			super.cleanup(context);
		}
    }

    public static class CostEvalReducer<K,V extends ITuple> extends Reducer<K,V,K,V> {
    	Repartitioner<K,V> repartitioner;

		@Override
		protected void setup(Context context) throws IOException,
				InterruptedException {
			context.getConfiguration().setFloat("skewreduce.planner.sampleRate",1.0f);

			repartitioner = new Repartitioner<K,V>();
			repartitioner.setConf(context.getConfiguration());

			String s = context.getConfiguration().get(PhysicalOp.PARTITION_SPEC_ATTR);
			if ( s == null ) {
				// should load the partition
	            try {
	                LocalFileSystem fs = FileSystem.getLocal(context.getConfiguration());
	                final Path pPath = new Path("partitionInfo");

	                System.err.println(pPath + " exists? " + fs.exists(pPath) );
	                FSDataInputStream fileIn = fs.open(pPath);
	                
	                Partition root = repartitioner.getRootPartition().read(fileIn);
	                fileIn.close();

	                PartitionInfo partitions = new PartitionInfo(root);
	                int partition = context.getConfiguration().getInt("mapred.task.partition", 0);
	                Partition node = partitions.get( partition );
	                if ( LOG.isInfoEnabled() ) {
	                	LOG.info("Setting partition {} to {}",partition,node.toSpec());
	                }
	                repartitioner.getRootPartition().set(node);
	            } catch ( Throwable ex ) {
	                ex.printStackTrace(System.err);
	                LOG.error("Can't load partition information",ex);
	            }
			}
		}

		@Override
		public void run(Context context) throws IOException,
				InterruptedException {
			setup(context);

		    while (context.nextKey()) {
		    	K key = context.getCurrentKey();
		    	for ( V val : context.getValues() ) {
		    		context.write(key, val);
		    		repartitioner.add(key,val);
		    	}
	        }
		    
		    PartitionNode node = repartitioner.evalTime();
		    double realExpTime = node.getWorkSecs();
		    double realExpCost = node.getWorkCost();
		    
		    postTimingResult(context.getConfiguration().get(EaggPlan.EAGG_HTTP_RPC_URI_ATTR),repartitioner,realExpTime,realExpCost);
			
			cleanup(context);
		}
    }
}
