package ru.mipt.victator.pagerank.index;

import java.io.IOException;
import java.util.Iterator;
import java.util.List;

import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;

import ru.mipt.victator.pagerank.Value;


public class IndexMap extends MapReduceBase implements Mapper
{
	private FileSystem FS = null;
	private JobConf conf;
	private Path WikiPath;
	
	@Override
	public void configure(JobConf job)
	{
		conf = job;
		WikiPath = conf.getWorkingDirectory();
		try
		{
			FS = FileSystem.get(conf);
		}
		catch (IOException e)
		{
			throw new RuntimeException(e);
		}
	}


	@Override
	public void map(WritableComparable key, Writable value,
			OutputCollector output, Reporter reporter) throws IOException
	{
		Path link;
		//read file
		String file = FSFile2String.Read(FS, new Path( WikiPath, ((Text)key).toString() ) );
		Path base = new Path(((Text)key).toString()).getParent();
		//find links
		List<String> theList = LinkParser.FindLinks( file );
		//ValueClass
		Value rValue = new Value((Text)key, new IntWritable(theList.size()));
		//Iterator		
		Iterator<String> iter = theList.iterator();
		//return <link;<source;N>>
		while(iter.hasNext())
		{
			link = new Path(base,iter.next());
			if (FS.exists(new Path(WikiPath,link)))
			{
				output.collect(new Text(link.toString()), rValue);
			}
			
		}
	}
}
