
run: 
	@echo "Run what?"
	@echo "Don't forget about HADOOP_HOME"

random.txt: 
	python gen.py

run-test: random.txt
	cat random.txt | ./mapper.py | sort -n | ./reducer.py > output.txt

run-standalone: random.txt
	rm -rf output
	$(HADOOP_HOME)/bin/hadoop jar $(HADOOP_HOME)/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar -input random.txt -output output -mapper ./mapper.py -reducer ./reducer.py
	gnuplot -e 'plot "output/part-00000"; pause 3'

run-cluster: random.txt
	rm -rf output
	$(HADOOP_HOME)/bin/hdfs dfs -rm -r -f output
	$(HADOOP_HOME)/bin/hdfs dfs -put -f random.txt
	$(HADOOP_HOME)/bin/hadoop jar $(HADOOP_HOME)/share/hadoop/tools/lib/hadoop-streaming-2.6.0.jar -files mapper.py,reducer.py -input random.txt -output output -mapper mapper.py -reducer reducer.py
	$(HADOOP_HOME)/bin/hdfs dfs -get output
	gnuplot -e 'plot "output/part-00000"; pause 3'

start-cluster:
	$(HADOOP_HOME)/bin/hdfs namenode -format
	$(HADOOP_HOME)/sbin/start-dfs.sh
	$(HADOOP_HOME)/bin/hdfs dfs -mkdir /user
	$(HADOOP_HOME)/bin/hdfs dfs -mkdir /user/`whoami`

stop-cluster: 
	$(HADOOP_HOME)/sbin/stop-dfs.sh

clean:
	rm -rf output
	rm -rf output.txt
	rm -rf random.txt
	rm -f *~




