

run: 
	@echo "Run what?"
	@echo "Don't forget about HADOOP_HOME"

input: 
	mkdir -p input
	wget http://www.gutenberg.org/cache/epub/5000/pg5000.txt
	wget http://www.gutenberg.org/cache/epub/4300/pg4300.txt
	mv pg*.txt input
 
run-test: input
	cat input/* | ./mapper.py | sort -n | ./reducer.py > output.txt
	egrep "^the\s" output.txt

run-standalone: input
	rm -rf output
	$(HADOOP_HOME)/bin/hadoop jar $(HADOOP_HOME)/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar -input input -output output -mapper ./mapper.py -reducer ./reducer.py
	egrep "^the\s" output/part-00000

run-cluster: input
	rm -rf output
	$(HADOOP_HOME)/bin/hdfs dfs -rm -r -f output
	$(HADOOP_HOME)/bin/hdfs dfs -copyFromLocal -f input
	$(HADOOP_HOME)/bin/hadoop jar $(HADOOP_HOME)/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar -files mapper.py,reducer.py -input input -output output -mapper mapper.py -reducer reducer.py
	$(HADOOP_HOME)/bin/hdfs dfs -get output
	egrep "^the\s" output/part-00000

start-cluster: 
	$(HADOOP_HOME)/bin/hdfs namenode -format
	$(HADOOP_HOME)/sbin/start-dfs.sh
	$(HADOOP_HOME)/bin/hdfs dfs -mkdir /user
	$(HADOOP_HOME)/bin/hdfs dfs -mkdir /user/`whoami`

stop-cluster: 
	$(HADOOP_HOME)/sbin/stop-dfs.sh

clean:
	rm -rf output
	rm -rf output.txt
	rm -rf input
	rm -f *~


       	

