
import java.io.File;
import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;

import org.apache.hadoop.dfs.DistributedFileSystem;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;



public class Query {

	static final DistributedFileSystem dfs = getaDFS();
	
	public static DistributedFileSystem getaDFS () {
        try {
            return ProjectMain.getDFS(new JobConf (Query.class));
        } catch (IOException e) {
            throw new RuntimeException ("I/O: " + e);
        }
    }
	
	private final static String[] getQueryString (){
		String query = ProjectMain.fileToString (new Path ("/sdf/query"), dfs);
    	String[] tmp = new String [3];
    	StringTokenizer queryTok = new StringTokenizer(query);
        tmp[0] = queryTok.nextToken();
        tmp[1] = null;
        tmp[2] = null; 
        if (queryTok.hasMoreTokens()){
            tmp[1] = queryTok.nextToken();
            if (queryTok.hasMoreTokens()){
            	tmp[2] = queryTok.nextToken();
            }
        }
		return tmp;
    	
    }
	
    public static class QueryMapper extends MapReduceBase implements Mapper {

    	private final static String[] query = getQueryString ();
    	private final static Text position = new Text();
    	
        public void map(WritableComparable key, Writable val, OutputCollector output, Reporter reporter)
        throws IOException {

            //input is <word, DocID:byte_offset^DocID:byte_offset...>
            //output is <DocID:byte_offset, word>

            String input1 = query[0];
            //String op = query[1];
            String input2 = query[2];

            //String line = val.toString();
            //int tabPos = line.indexOf("\t");
            //String word = line.substring(0, tabPos);
            //line = line.substring(tabPos+1);
            
            StringTokenizer itr1 = new StringTokenizer(val.toString(), "\t\n");
            String word = itr1.nextToken();
            String line = itr1.nextToken();
            
            Text theWord = new Text(word);

            if (input2 == null){//for only 1 input
            	if (input1.equals(word)) {
            		StringTokenizer itr = new StringTokenizer(line, "^\n");
            		while (itr.hasMoreTokens()) {
            			position.set(itr.nextToken());
            			output.collect(position, theWord);
            		}
            	}
            }
            	
            else if (word.equals(input2) || word.equals(input1)) {
            	StringTokenizer itr = new StringTokenizer(line, "^\n");
            	while (itr.hasMoreTokens()){
            		position.set(itr.nextToken());
            		output.collect(position, theWord);
            	}
            }
        }
    }

    public static class QueryReducer extends MapReduceBase implements Reducer {

        private final static Text results = new Text("results");
        private final static String[] query = getQueryString();
        
        @SuppressWarnings("unchecked")
        public void reduce(WritableComparable key, Iterator values, OutputCollector output, Reporter reporter) throws IOException {
            // input is <DocID:byte_offset, word>
            // output is <results, DocID:byte_offset::snippet>

//            Text summary = new Text();
//            String snippet = ProjectMain.getSnippet("Shakespeare:102626", dfs);
            //String snippet = "This is a snippet";
            String input1 = query[0];
            String op = query[1];
            String input2 = query[2];

            if (input2 != null){ //query has more than just one word
               // operator = queryTok.nextToken();
               // input2 = queryTok.nextToken();
                if (ProjectMain.satisfiesQuery(input1, op, input2, values)){
//                    summary.set(key.toString() + "::" + snippet);
                    output.collect(results, key);
                }
            }
            else {
//	            summary.set(key.toString() + "::" + snippet);
	            output.collect(results, key);
            }
        }
    }


    public static void main(String[] args) throws IOException {
        JobConf conf = new JobConf(Query.class);
        DistributedFileSystem fs = ProjectMain.getDFS(conf);

        // setup DFS system
        Path root = new Path("/");
        Path out = new Path ("out");
        Path dfsInput = new Path (args[0]); //file name of inverted index
        Path localOut = new Path (args[1]);
        String query = args[2];
        //if (!verifyQuery(query))
        //	return;
        //System.out.println(query);

        conf.setWorkingDirectory(root);
        fs.setWorkingDirectory(root);
        fs.delete(dfsInput);
        fs.copyFromLocalFile(dfsInput, root);
        fs.delete(out);
        conf.setInputPath(dfsInput);
        conf.setOutputPath(out);

        fs.setWorkingDirectory(root);
        fs.delete(new Path ("/sdf/query"));
        FSDataOutputStream file = fs.create(new Path ("/sdf/query"));
        file.writeBytes(query);
        file.close();
        //System.out.println("yes");

        conf.setJobName("Task IV: Query");
        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(Text.class);
        conf.setMapperClass(Query.QueryMapper.class);
        //conf.setCombinerClass(Query.QueryReducer.class);
        conf.setReducerClass(Query.QueryReducer.class);
        JobClient.runJob(conf);

        // move output back to local FS
        File outFile = new File (args[1]);
        outFile.delete();
        if (!fs.exists(new Path ("out/part-00000")))
            fs.createNewFile(new Path ("out/part-00000"));
        fs.copyToLocalFile(new Path ("out/part-00000"), localOut);
        fs.delete(out);
        fs.delete(dfsInput);
        fs.delete(new Path ("/xxx"));
    }
    
/*
    private static boolean verifyQuery(String sentQ){
        StringTokenizer queryTok = new StringTokenizer(sentQ);
        if (queryTok.hasMoreTokens()){ //first input
//            String dummy=queryTok.nextToken();
            			if (queryTok.hasMoreTokens()){ //operator
				dummy=queryTok.nextToken();
				if (dummy.equals("AND") || dummy.equals("OR") || dummy.equals("ANDNOT")){
					if (queryTok.hasMoreTokens())
						return true;
					else
						return true;
				}

					return false;
				//TODO:check for too many elements
			} 
			else {
				return true;
			}

		} else
			return false;
            return true;}
        else return false;

    }*/
    /*
    private static String getStringFromFile(String filename) {
        try {
            FSDataInputStream file = Query.getaDFS().open(new Path (filename));
            StringBuilder str = new StringBuilder ();
            int c = file.read();
            while (c != -1) {
                str.append((char) c);
                c = file.read();
            }
            return str.toString();
            FileInputStream fis = new FileInputStream (pathname);
            int available = fis.available();
            byte buffer[] = new byte[available];
            fis.read(buffer);
            fis.close();
            String tmp = new String(buffer);
            return tmp;
        	FileReader fr = new FileReader(filename);
        	BufferedReader buf = new BufferedReader(fr);
        	String tmp = buf.readLine();
        	buf.close();   
            fr.close();
        	return tmp;
            
        } catch (IOException e) {
            throw new RuntimeException(e);
        }
    }
    */
}