
run: 
	@echo "Run what?"
	@echo "Don't forget about HADOOP_HOME"

input.txt: 
	python gen.py

run-test: input.txt
	cat input.txt | ./mapper.py | sort -n | ./reducer.py > output.txt
	cat output.txt

run-standalone: input.txt
	rm -rf output
	$(HADOOP_HOME)/bin/hadoop jar $(HADOOP_HOME)/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar -input input.txt -output output -mapper ./mapper.py -reducer ./reducer.py
	cat output/part-*

run-cluster: input.txt
	rm -rf output
	$(HADOOP_HOME)/bin/hdfs dfs -rm -r -f output
	$(HADOOP_HOME)/bin/hdfs dfs -put -f input.txt
	$(HADOOP_HOME)/bin/hadoop jar $(HADOOP_HOME)/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar -files mapper.py,reducer.py,solver.py -input input.txt -output output -mapper mapper.py -reducer reducer.py -cmdenv HOME=$(HOME)
	$(HADOOP_HOME)/bin/hdfs dfs -get output
	cat output/part-*


start-cluster:
	$(HADOOP_HOME)/bin/hdfs namenode -format
	$(HADOOP_HOME)/sbin/start-dfs.sh
	$(HADOOP_HOME)/bin/hdfs dfs -mkdir /user
	$(HADOOP_HOME)/bin/hdfs dfs -mkdir /user/`whoami`

stop-cluster: 
	$(HADOOP_HOME)/sbin/stop-dfs.sh

clean:
	rm -rf output
	rm -rf output.txt
	rm -rf input.txt
	rm -f *~


