.SUFFIXES:
.PHONY: bin java clean clobber backup public test
# (SECONDARY keeps all intermediate targets)
.SECONDARY: 

all: bin java

#
# Machine-specific settings.
#
# Note the trailing slash on JAVA_BIN.
#
N := $(shell,pwd)
BZ2DAT := data/enwiki-20080103-pages-articles.xml.bz2
JAVA_BIN := # use the one on the PATH
JAVA_CP := "bin:lib/trove-2.0.2.jar:lib/commons-lang-2.4.jar:lib/junit-4.1.jar"
DOT := "dot"
JAVA_BIG_HEAP := -Xmx3g
CFLAGS := -O3 -Wall
#CFLAGS := -g -Wall # for debugging

# 
# Utility Functions (use with $(call ...,args))
#

# Time command $1 and save to target.time.
FTIME = (time $1) 2> $@.time

# Run java program (and args) $1 and save error to target.err.
FJAVA = $(JAVA_BIN)java -cp $(JAVA_CP) -Dfile.encoding=utf8 $1 2>$@.err

# Rule for CPP compilation.
# $1: the program name (eg cluster for bin/cluster)
# $2: source files that we have to compile (eg. u1 for src/u1.cpp)
# $3: source files from netflix directory
define TCXX
PROGRAMS += $1
SOURCES += $1 $2
bin/$1 : $(1:%=objs/%.o) $(2:%=objs/%.o) $(3:%=objs/%.o)
endef

#
# Java compilation. Javac will determine what it has to recompile without any
# help from make. Unfortunately, it won't tell make what the dependencies are.
# So, we can either rebuild every data target that depends on any java program,
# or we can leave the java dependencies out of the rest of the make file. I've
# opted for the latter, because it seems like the lesser of two evils.
#
# You should run "make java" whenever you want to recompile any java.
#
# A data target may depend on some .java source files directly, but this
# Makefile is not smart enough to compile the resulting .class files
# automatically. These .java dependencies are a convenience only: a change to
# this file is a sufficient but not a necessary condition for the target
# actually being out of date.
#
# This sucks.
#

java : $(wildcard src/u1/*.java) $(wildcard src/u1/**/*.java)
	mkdir -p bin
	$(JAVA_BIN)javac -d bin -cp $(JAVA_CP) $^

JAVA_TESTS := $(patsubst src/u1/test/%.java,u1.test.%,$(wildcard src/u1/test/*.java))
test_java: java
	$(call FJAVA, org.junit.runner.JUnitCore $(JAVA_TESTS)) 

#
# C++ compilation. 
# Not terribly pretty or extendible, but I did my best!

# define programs
PROGRAMS :=
SOURCES :=

$(eval $(call TCXX,add_pred_files))
$(eval $(call TCXX,pagerank,u1))
$(eval $(call TCXX,adj_list_to_sparse_undirected_adj_matrix,u1))
$(eval $(call TCXX,pagerank_similarity,u1))
$(eval $(call TCXX,pagerank_movie_ids,u1))
$(eval $(call TCXX,filter_similarity_matrix,u1))
$(eval $(call TCXX,matrix_stats,u1))
$(eval $(call TCXX,neighbours,u1))
$(eval $(call TCXX,neighbour_intersection,u1))
$(eval $(call TCXX,remove_zeros,u1))
$(eval $(call TCXX,to_symmetric_binary_matrix,u1))
$(eval $(call TCXX,from_symmetric_binary_matrix,u1))
$(eval $(call TCXX,similarity_bin_at,u1))
$(eval $(call TCXX,symmetric_matrix_op,u1))
$(eval $(call TCXX,symmetric_matrix_top,u1))
$(eval $(call TCXX,bellkor_Abar,u1 io))
$(eval $(call TCXX,bellkor_Ahat,u1))
$(eval $(call TCXX,bellkor_knn,u1 io))
$(eval $(call TCXX,bellkor_knn_pred,u1 io))
$(eval $(call TCXX,coratings_pearson,u1 io))
$(eval $(call TCXX,coratings_count,u1 io))
$(eval $(call TCXX,test_prediction))
$(eval $(call TCXX,term_doc_freq,u1))
$(eval $(call TCXX,term_movies,u1))
$(eval $(call TCXX,term_weights,u1 io))
$(eval $(call TCXX,smooth_movies,u1 io))
$(eval $(call TCXX,smooth_users,u1 io))
$(eval $(call TCXX,similarity_movie_ids,u1))
$(eval $(call TCXX,test,u1 io))

bin: $(PROGRAMS:%=bin/%)

# everything else required for proper compilation of C++
bin/% :
	@mkdir -p bin
	g++ $(CFLAGS) -Iobjs $^ -o $@

mf/%.d : src/%.cpp
	@mkdir -p mf
	@mkdir -p objs
	@set -e; rm -f $@; echo -n "objs/" > $@; \
	g++ -MM $(CFLAGS) $< > $@.$$$$; \
	sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ >> $@; \
	rm -f $@.$$$$

CPPDEPS := $(addprefix mf/,$(addsuffix .d,$(PROGRAMS)))
-include $(CPPDEPS)

objs/%.o: src/%.cpp
	@mkdir -p objs
	g++ $(CFLAGS) -c -o $@ $<

# Uncompress big data file for (somewhat) faster access.
big.xml : $(BZ2DAT)
	bunzip2 -c $< >$@

# Take every 100th page for the first 1M pages (a more manageable dataset)
slice1h1M.xml : big.xml
	$(call FTIME,$(call FJAVA,u1.Slice 0 100 1000000 <$< >$@))

#
# The Real Work
#

# Look for shared data files in shared directory.
# JLM: don't use: vpath %.xml $S/
#      there appears to be a bug in make wrt circular dependencies
#      strangely, this one works fine:
#VPATH := $S

# Files to be cleaned (added later).
CLEAN_FILES :=

# Compute some stats on the wiki.
%.stats : %.xml src/u1/Stats.java
	$(call FTIME,$(call FJAVA,u1.Stats <$< >$@))

# Strip out titles.
%.titles : %.xml
	$(call FTIME,$(call FJAVA,u1.Titles <$< >$@))

# Get ids for titles.
%.ids : %.titles big.titles 
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.TitleNumbers $^ >$@))

# Sort the titles list.
%.titles.sorted : %.titles
	$(call FTIME,sort <$< >$@)

# Compute adjacency list.
%.adj.dead %.adj.redirect %.adj : %.xml %.titles 
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.ForwardLinks $*.adj $*.titles <$<))

# Compute reversed adjacency list.
%.rev.adj : %.adj
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.BackwardLinks <$< >$@))

# Build sparse undirected adjacency matrix from adjacency list.
%.adjmat : %.adj
	$(call FTIME,bin/adj_list_to_sparse_undirected_adj_matrix <$< >$@ 2>$@.err)

# Follow redirects in adj lists.
%.noredir.adj : %.adj %.adj.redirect
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.FollowRedirects <$^ >$@))

# Pretty print adjacency lists for debugging.
%.adj.html : %.adj %.titles
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.PrettyAdjList $^ >$@))
%.rev.adj.html : %.rev.adj %.titles
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.PrettyAdjList $^ >$@))

# Find example pages for adjacency lists for debugging.
%.adj.examples : %.titles %.adj.hist %.adj 
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.LinkHistogramExamples $*.titles $*.adj.hist 10 <$*.adj >$@))
%.rev.adj.examples : %.titles %.rev.adj.hist %.rev.adj 
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.LinkHistogramExamples $*.titles $*.rev.adj.hist 10 <$*.rev.adj >$@))

# Link frequency histograms.
%.adj.hist : %.adj
	$(call FTIME,$(call FJAVA, u1.LinkHistogram <$< >$@))

# Page rank with uniform E.
%.rank : %.adj
	$(call FTIME,bin/pagerank <$< >$@ 2>$@.err)

# Page rank with E starting at movies.
%.movie.rank : %.adj %.movies
	$(call FTIME,bin/pagerank $(join < -i,$(wordlist 1,2,$^)) -n0.1 >$@ 2>$@.err)

# Sometimes we want to sort ranks by page number instead of rank.
%.rank.page_number_sorted : %.rank
	sort $< > $@

# Sometimes we want just the page numbers.
%.rank.page_numbers : %.rank
	cut -f 1 -d " " $< >$@

# Get tiles for rank file, for testing. HACK: just use big.titles
%.rank.titles : big.titles %.rank.page_numbers
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.NumberTitles $(join <,$^) >$@)

# Dot (graphviz) files for visualizing (small!) graphs.
%.adj.dot : %.adj %.titles %.adj.redirect %.adj.rank
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.PrettyDot $(join < -t -r -w,$^) >$@))

# Render a Dot file to svg. Other formats broken; SVG slow; render in GIMP OK.
%.dot.svg : %.dot
	$(DOT) -T svg <$< >$@

# We can't visualize everything, but we can do a subset of the top pages.
%.rank.top1h : %.rank
	head -n 100 $< | sed "s/\([0-9]*\).*/\1/" >$@
%.rank.top1h.dot : %.adj %.titles %.adj.redirect %.rank %.rank.top1h
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.PrettyDot $(join < -t -r -w -f,$^) >$@))
%.movie.rank.top1h.dot : %.adj %.titles %.adj.redirect %.movie.rank %.movie.rank.top1h
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.PrettyDot $(join < -t -r -w -f,$^) >$@))

# There was once a file called small.titles; just use small.titles.new now.
small.titles : small.titles.new
	cp $< $@

# Number titles with their netflix movie ids.
small.titles%.with_ids : small.titles%
	awk '{print FNR " " $$0}' $< | grep -v NO_WIKI_TITLE >$@

small.titles%.pages.xml : big.xml small.titles%
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.PullPages $(join <,$^) >$@)

# Find actors.
actors.xml : big.xml
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.PullPattern <$^ \
	  actors "(?i)(?s).*\\{\\{infobox actor.*" >$@)

# Remove "many movie to one article" matchings.
one_to_1.titles.new.with_ids : small.titles.new.with_ids num_movie_ratings_training_set.txt
	ruby src/one_to_one_titles.rb <$^ 1 >$@
one_to_2.titles.new.with_ids : small.titles.new.with_ids num_movie_ratings_training_set.txt
	ruby src/one_to_one_titles.rb <$^ 2 >$@
one_to_3.titles.new.with_ids : small.titles.new.with_ids num_movie_ratings_training_set.txt
	ruby src/one_to_one_titles.rb <$^ 3 >$@

# Find ordinals of movies for use with PageRank.
# (One page number per line, in the same order as small.titles.)
big.movies : small.titles big.titles
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.TitleNumbers $^ >$@))
big.movies.dup : big.movies
	sort $< | uniq -d > $@
big.movies.dup.titles : big.titles big.movies.dup 
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.NumberTitles $(join <,$^) >$@))

big.new.movies : small.titles.new big.titles
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.TitleNumbers $^ >$@))
big.new.movies.dup : big.new.movies
	sort $< | uniq -d > $@
big.new.movies.dup.titles : big.titles big.new.movies.dup 
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.NumberTitles $(join <,$^) >$@))

# We also need the movie ids to build similarity files.
big.movies.with_ids : small.titles.with_ids big.titles
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.MovieIdNumbers $^ >$@))
big.new.movies.with_ids : small.titles.new.with_ids big.titles
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.MovieIdNumbers $^ >$@))
big.one_to_1.new.movies.with_ids : one_to_1.titles.new.with_ids big.titles
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.MovieIdNumbers $^ >$@))
big.one_to_2.new.movies.with_ids : one_to_2.titles.new.with_ids big.titles
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.MovieIdNumbers $^ >$@))
big.one_to_3.new.movies.with_ids : one_to_3.titles.new.with_ids big.titles
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.MovieIdNumbers $^ >$@))

# Just keep the ids.
big.movies.ids : small.titles.with_ids
	cut -f 1 -d " " $< >$@
big.new.movies.ids : small.titles.new.with_ids
	cut -f 1 -d " " $< >$@
big.infobox.movies.ids : sf.infobox.big.similarity.bin
	bin/similarity_movie_ids 17770 <$< >$@

# Look up netflix titles corresponding to matched movies.
big.new.movies.netflix_titles : netflixMovies.txt big.new.movies.ids
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.MovieIdTitles $(join <,$^) >$@)

# Count forward links per page for matched movies.
big.movies.link_counts : big.adj big.movies.with_ids
	perl src/count_movie_links.pl $(join <,$^) >$@
big.new.movies.link_counts : big.adj big.new.movies.with_ids
	perl src/count_movie_links.pl $(join <,$^) >$@

# Plot link count and article length correlations.
#function plot_correl(map_file_1, map_file_2, max_ratings, title, x_label, y_label, outfile)
doc/big.movies.link_count_v_raters.eps : big.movies.link_counts 
	cd src; matlab -nojvm -nodisplay -r \
	  "plot_correl('../$<','$(N)num_ratings_per_movie.txt',1000,'','ratings','links','$@');exit"; cd ..
doc/big.new.movies.link_count_v_raters.eps : big.new.movies.link_counts 
	cd src; matlab -nojvm -nodisplay -r \
	  "plot_correl('../$<','$(N)num_ratings_per_movie.txt',1000,'','ratings','links','$@');exit"; cd ..
doc/big.new.movies.word_count_v_raters.eps : articleVcount.txt
	cd src; matlab -nojvm -nodisplay -r \
	  "plot_correl('$<','$(N)num_ratings_per_movie.txt',1000,'','ratings','words','$@');exit"; cd ..

# Sanity check on graph growth, below.
big.movies.hist : big.adj big.movies
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.LinkHistogram $(join <,$^) >$@))
big.movies.hist.examples : big.adj big.titles big.movies.hist
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.LinkHistogramExamples $(join <, $^) 20 big.movies >$@))

# Grow graph around movies.
%.movies.1 : %.adj %.movies %.adj.redirect
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.Grow $(join <,$^) >$@))
%.movies.2 : %.adj %.movies.1 %.adj.redirect
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.Grow $(join <,$^) >$@))
%.movies.3 : %.adj %.movies.2 %.adj.redirect
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.Grow $(join <,$^) >$@))
%.movies.4 : %.adj %.movies.3 %.adj.redirect
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.Grow $(join <,$^) >$@))

# Grow subgraphs induced around movies.
%.movies.1.adj : %.adj %.movies.1
	$(call FTIME,$(call FJAVA, u1.Subgraph $(join <,$^) >$@))
%.movies.2.adj : %.adj %.movies.2
	$(call FTIME,$(call FJAVA, u1.Subgraph $(join <,$^) >$@))
%.movies.3.adj : %.adj %.movies.3
	$(call FTIME,$(call FJAVA, u1.Subgraph $(join <,$^) >$@))
%.movies.4.adj : %.adj %.movies.4
	$(call FTIME,$(call FJAVA, u1.Subgraph $(join <,$^) >$@))

# Do this with noredir as well.
%.noredir.movies.1 : %.adj %.movies %.adj.redirect
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.Grow $(join <,$^) >$@))

# Look up movie ids in global rank (as opposed to movie rank) order.
%.movies.ranked : %.movies.with_ids %.rank
	$(call FTIME,bin/pagerank_movie_ids $(join -i -r, $^) >$@ 2>$@.err)

# Look up movie ids in movie rank (as opposed to global rank) order.
%.movies.movie_ranked : %.movies.with_ids %.movie.rank
	$(call FTIME,bin/pagerank_movie_ids $(join -i -r, $^) >$@ 2>$@.err)

# Strip out the movie_ids for other lookups.
%.ranked.page_numbers : %.ranked
	cat $< | sed "s/^[0-9]* //" >$@

# Strip out the page_numbers for lookups on movie ids.
%.ranked.movie_ids : %.ranked
	cat $< | sed "s/ [0-9]*$$//" >$@

# Look up movie (actually page) titles for ranked movies.
%.movies.ranked.titles : %.titles %.movies.ranked.page_numbers
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.NumberTitles $(join <,$^) >$@)

# Look up Netflix movie titles for ranked movies.
%.movies.ranked.netflix_titles : netflixMovies.txt %.movies.ranked.movie_ids
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.MovieIdTitles $(join <,$^) >$@)

# Run simple pagerank similarity measure.
%.rank.similarity : %.movies.ranked
	$(call FTIME,bin/pagerank_similarity <$< >$@ 2>$@.err)

# Run link similarity measure on new matches.
link_0.new.similarity : big.adj big.new.movies.with_ids big.rank
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.LinkSimilarity $(join <,$^) 0 >$@)
link_0.1.new.similarity : big.adj big.new.movies.with_ids big.rank
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.LinkSimilarity $(join <,$^) 0.1 >$@)
link_noredir_0.new.similarity : big.noredir.adj big.new.movies.with_ids big.noredir.rank
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.LinkSimilarity $(join <,$^) 0 >$@)
link_noredir_0.1.new.similarity : big.noredir.adj big.new.movies.with_ids big.noredir.rank
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.LinkSimilarity $(join <,$^) 0.1 >$@)

# Run simple pagerank similarity measure w/ movie bias.
%.movie.rank.similarity : %.movies.movie_ranked
	$(call FTIME,bin/pagerank_similarity <$< >$@ 2>$@.err)

# More efficient storage for similarity matrices.
%.similarity.bin : %.similarity
	bin/to_symmetric_binary_matrix 17770 <$< >$@

# Get principal components for similarity matrix.
# 17770 is the number of movies used by the pseudo SVD method.
# 64 is the number of features used by the pseudo SVD method.
%.similarity.eigvals %.similarity.eigvecs : %.similarity
	cd src; matlab -nojvm -nodisplay -r "similarity_eigs('../$<',17770,64);exit"; cd ..

# Get principal components for similarity matrix with zeros removed.
# 17770 is the number of movies used by the pseudo SVD method.
# 64 is the number of features used by the pseudo SVD method.
# 0.1 is the default feature value for a zeroed movie.
%.similarity.nonzero.eigvals %.similarity.nonzero.eigvecs : %.similarity.nonzero
	cd src; matlab -nojvm -nodisplay -r "similarity_nonzero_eigs('../$<',17770,64,0.1);exit"; cd ..

# Baseline hybrid pseudo SVD prediction. Defaults from file were initial feature
# weights of 0.01 and initial feature values of 0.1 for all movies.
base.eigvals :
	yes '0.01' | head -n 64 > $@
base.eigvecs :
	yes '0.1'  | head -n `expr 17770 "*" 64` > $@

# Test a prediction. This doesn't count predictions < 0.
%.pred.res : probe_answers.txt %.pred 
	bin/test_prediction $^ >$@

# Take a prediction that has been printed in tuple form
# movie_id user_id rating
# ...
# where there is no sort order assumed and turn it into block form
# ordered like the probe set.
%.pred.residuals : %.pred.residuals.tuples probe_answers.txt
	perl src/tuple_with_order.pl \
	  "perl src/block_to_tuple.pl <probe_answers.txt |" <$< | \
	perl src/tuple_to_block.pl | \
	sed "s/^.*,//" >$@

# Add prediction files. Currently we always use 26 effects.
%.pred : %.pred.residuals probe_residuals.txt
	bin/add_pred_files $^ $@

# Aggregate errors.
%.pred.errors.by_movie : %.pred
	perl src/errors_by.pl movie <$<.errors >$@
%.pred.errors.by_user : %.pred
	perl src/errors_by.pl user <$<.errors >$@

# Get training set as single text file, sorted by movie and then user.
training_set.txt: data/training_set.tar
	tar -xOf $^ | \
	perl src/block_to_tuple.pl | \
	sort -t ',' -k 1n,1 -k 2n,2 | \
	perl src/tuple_to_block.pl >$@

# Number of ratings including probe set.
# I think this requires the bash shell.
num_ratings_per_movie.txt: training_set.txt
	{ grep $< -n -e ":" | cut -d ":" -f 1; \
	  wc -l $< | cut -d " " -f 1; } | \
	ruby -e 'movie = 0; \
          STDIN.readlines.drop(1).inject(1) {|n,l| l = l.to_i; \
	  puts "#{movie += 1} #{l-n}"; l}' >$@

# Number of ratings without probe set.
num_movie_ratings_training_set.txt: training_set_without_probe.txt
	{ grep $< -n -e ":" | cut -d ":" -f 1; \
	  expr `wc -l $< | cut -d " " -f 1` + 1; } | \
	ruby -e 'movie = 0; \
          STDIN.readlines.drop(1).inject(1) {|n,l| l = l.to_i; \
	  puts "#{movie += 1} #{l-n-1}"; l}' >$@

# Get num ratings for all probe movies for correlation with errors.
probe_movies.txt: data/probe.txt
	grep $< -e ":" | tr -d ":" | sort >$@
num_ratings_per_probe_movie.txt: num_ratings_per_movie.txt probe_movies.txt
	cat $< | tr -d ',' | sort | join probe_movies.txt - | sort -n >$@ 

# Probe set categorization by which movies we've matched.
probe_matched_%_categories.txt : %.movies.ids
	perl src/blend_build_match_categorized_probe.pl $< <probe_answers.txt >$@

# Stats on the probe set that we have movie matches for.
%.movies.ids.probe_stats : probe_answers.txt %.movies.ids 
	perl src/probe_stats.pl $(join <,$^) >$@

# Dump the coratings so we can get some stats.
movie_coratings.bin: training_set_without_probe.txt
	bin/coratings_count <$< >$@
movie_coratings.matrixstats : movie_coratings.bin
	bin/from_symmetric_binary_matrix 17770 <$< | \
	  bin/matrix_stats 1 nodiag >$@

#
# Ratings residuals for the probe set, in the same order as data/probe.txt.
# Format:
# 1:
# 3.923228
# 3.819071
# 3.735523
# ...
# 10:
# ...
probe_residuals.txt:
	@echo SORRY, we don\'t yet have the code to compute residuals.
	@echo You have to supply probe_residuals.txt yourself - see Makefile.
	@echo Contributions welcome!
	exit 1

# Rating residuals WITHOUT the probe set, in blocks ordered by user, with
# movies ordered by id within blocks.
# Format:
# 6:
# 30,-0.80209
# 157,-0.462986
# 173,0.28933
# 7:
# ...
user_residuals.txt:
	@echo SORRY, we don\'t yet have the code to compute residuals.
	@echo You have to supply user_residuals.txt yourself - see Makefile.
	@echo Contributions welcome!
	exit 1

user_residuals_by_movie.txt: user_residuals.txt
	$(call FTIME,bash src/invert_ratings.sh <$^ >$@)

# Build BellKor's Abar and U matrices.
bellkor_Abar.bin: user_residuals_by_movie.txt
	$(call FTIME,bin/bellkor_Abar bellkor_Abar.bin bellkor_U.bin <$< 2>$@.err)
bellkor_Abar.txt: bellkor_Abar.bin
	bin/from_symmetric_binary_matrix 17770 <$< >$@
bellkor_U.txt: bellkor_U.bin
	bin/from_symmetric_binary_matrix 17770 <$< >$@
bellkor_pearson.similarity.bin: user_residuals_by_movie.txt
	$(call FTIME,bin/coratings_pearson <$< >$@ 2>$@.err)

# Build BellKor's Ahat matrix.
bellkor_Ahat.bin: bellkor_Abar.bin
	$(call FTIME,bin/bellkor_Ahat $^ bellkor_U.bin 500 >$@ 2>$@.err)

# Compute stats on similarity matrices.
%.similarity.matrixstats : %.similarity
	bin/matrix_stats 0.05 nodiag <$< >$@
%.similarity.bin.matrixstats : %.similarity.bin
	bin/from_symmetric_binary_matrix 17770 <$< | \
	  bin/matrix_stats 0.01 nodiag >$@

# As above, but customize bin sizes.
link_0.new.similarity.matrixstats : link_0.similarity
	bin/matrix_stats 0.0005 nodiag <$< >$@
link_0.1.new.similarity.matrixstats : link_0.1.similarity
	bin/matrix_stats 0.0005 nodiag <$< >$@

# Extract per-movie means and stddevs.
# The format is 1:min=0,max=0,mean=0,stddev=0,bins=0 ...
# So we can just cut them out and lose the summary row (17771).
%.matrixstats.mean: %.matrixstats
	head -n 17770 $< | cut -f 4 -d "=" | cut -f 1 -d "," >$@
%.matrixstats.stddev: %.matrixstats
	head -n 17770 $< | cut -f 5 -d "=" | cut -f 1 -d "," >$@

# Netflix title list, in descending order by ID (first line is netflix movie
# id 1). Non-ASCII characters removed to avoid MATLAB issues.
netflix_titles : data/movie_titles.txt
	cut -f 3- -d "," $< | tr -cd '\11\12\40-\176' >$@

# Render and title the graphs. It takes a long time to produce all 18000
# graphs, so we taken every 10th one (third parameter).
%.matrixstats.html: %.matrixstats netflix_titles num_movie_ratings_training_set.txt
	rm -rf $<_files
	mkdir -p $<_files
	cd src; matlab -nodisplay -r "matrix_stats_plot('$<','netflix_titles','$(N)num_movie_ratings_training_set.txt',1000);exit"; cd ..

# Render an example for the paper.
cosine_stddev_figure: movie_cosine.similarity.matrixstats netflix_titles
	cd src; matlab -nojvm -nodisplay -r "matrix_stats_plot3('$<',5401,'Dodgeball',5701,'No_Contest',6001,'Tom_and_Jerry');exit"; cd ..

# So I can download the graphs easily.
%.matrixstats.html.tgz : %.matrixstats.html
	tar -czf $@ $^ $*.matrixstats $*.matrixstats_files

# Shrink matrix with respect to coratings.
# A shrinkage factor of 200 is about the mean # of coratings (S*U / (U + d))
# 500 is the factor used by BellKor.
%.shrink_500_coratings.similarity.bin : %.similarity.bin movie_coratings.bin
	bin/symmetric_matrix_op 17770 \
	  $(word 1,$^) shrink_with $(word 2,$^) by 500 >$@

# Nearest neighbours (just gets neighbours; doesn't make any predictions).
# This should be done on a similarity matrix.
%.neighbours_50 : %.bin
	$(call FTIME,bin/from_symmetric_binary_matrix 17770 <$< | \
	               bin/neighbours 50 >$@)
%.neighbours_200 : %.bin
	$(call FTIME,bin/from_symmetric_binary_matrix 17770 <$< | \
	               bin/neighbours 200 >$@)

%.similarity.nonzero : %.similarity
	bin/remove_zeros <$< >$@ 2>$@.zeros

# Look up titles. I usually view this in Excel, so they're CSV.
%.npretty.csv : % netflix_titles
	perl src/neighbours_pretty.pl $(join <,$^) >$@

# Run all neighbourhood intersections.
neighbour_intersections: \
  neighbours_50_cosine_pearson.neighbours \
  neighbours_200_cosine_pearson.neighbours

# Intersections of nearest neighbours. 
neighbours_%_cosine_pearson.neighbours: movie_cosine.similarity.neighbours_% movie_pearson.similarity.neighbours_%
	$(call FTIME,bin/neighbour_intersection $^ >$@ 2>$@.info)

# Get the probe answers from the training set. We need to keep them in the same
# order as the probe.txt file for testing predictions.
probe_answers.txt : training_set.txt data/probe.txt
	perl src/block_to_tuple.pl <$< | \
	perl src/tuple_select.pl \
	  "perl src/block_to_tuple.pl <$(word 2,$^) |" | \
	perl src/tuple_with_order.pl \
	  "perl src/block_to_tuple.pl <$(word 2,$^) |" | \
	perl src/tuple_to_block.pl >$@

# Remove the probe set from the training set.
training_set_without_probe.txt : training_set.txt probe_answers.txt
	perl src/block_to_tuple.pl <$< | perl src/tuple_reject.pl \
	  "perl src/block_to_tuple.pl <$(word 2,$^) |" | \
	  perl src/tuple_to_block.pl >$@

# Training set without probe, sorted by user instead of movie.
training_set_users_without_probe.txt : training_set_without_probe.txt
	bash src/invert_ratings.sh <$< >$@

# Probe set sorted by users then movies (instead of other way around).
# The probe set input is sorted by neither.
probe_by_user.txt : data/probe.txt
	bash src/invert_ratings.sh <$< >$@

# Simple stats on user and movie ratings.
raw_movie_ratings_stats.txt : training_set_without_probe.txt
	perl src/ratings_stats.pl <$^ >$@
raw_user_ratings_stats.txt : training_set_users_without_probe.txt
	perl src/ratings_stats.pl <$^ >$@

# Find term (link) weightings based on ratings similarity
# See expt/make_term_weights.pl for more.
big.new.links_by_movie : big.noredir.adj big.new.movies.with_ids
	bin/term_movies $(join <,$^) >$@
big.one_to_1.new.links_by_movie : big.noredir.adj big.one_to_1.new.movies.with_ids
	bin/term_movies $(join <,$^) >$@
big.one_to_2.new.links_by_movie : big.noredir.adj big.one_to_2.new.movies.with_ids
	bin/term_movies $(join <,$^) >$@
big.one_to_3.new.links_by_movie : big.noredir.adj big.one_to_3.new.movies.with_ids
	bin/term_movies $(join <,$^) >$@

# Find number of ratings for each term.  
%.term_ratings : %.links_by_movie num_ratings_per_movie.txt
	ruby src/term_ratings.rb $(join <,$^) >$@

%.links_by_movie.term_counts : %.links_by_movie
	cut -f "1 2" -d " " $< >$@

# Find document frequencies for terms using whole Wikipedia corpus.
%.adj.doc_freq: %.adj
	bin/term_doc_freq $(join <,$^) >$@

# Find document frequencies for terms using only movie articles.
%.adj.new.movies.doc_freq: %.adj big.new.movies.ids
	bin/term_doc_freq $(join <,$^) >$@

# Density plot of term weights.
rep/%.term_weights.density.png : %.term_weights
	echo "in_file <- '$<'; out_file <- '$@';" | \
	cat - src/term_weights_density.R | R --slave

# Summarize term weights from various different settings.
rep/term_weights.html: big.new.movies.with_ids actors.titles
	mkdir -p rep
	ruby expt/report_term_weights.rb >$@

# List of terms (wiki article ordinals), heaviest first.
%.term_weights.terms_by_weight : %.term_weights
	sort -k 2 -g -r <$< | cut -f 1 -d " " >$@

# Look up page titles corresponding to terms.
%.term_weights.titles : big.titles %.term_weights.terms_by_weight
	$(call FJAVA,$(JAVA_BIG_HEAP) u1.NumberTitles $(join <,$^) >$@)

# See which movies we have similarity data for.
%.similarity.bin.movies.ids : %.similarity.bin
	bin/similarity_movie_ids 17770 <$< >$@

# The Brad Pitt example. Grow graph around Brad. Brad is page 34687.
brad :
	echo 34687 >$@
brad.1 : big.adj brad big.adj.redirect
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.Grow $(join <,$^) >$@))
brad.2 : big.adj brad.1 big.adj.redirect
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.Grow $(join <,$^) >$@))
brad.3 : big.adj brad.2 big.adj.redirect
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.Grow $(join <,$^) >$@))
brad.%.adj : big.adj brad.%
	$(call FTIME,$(call FJAVA, u1.Subgraph $(join <,$^) >$@))
brad.filter: big.new.movies
	cat $< > $@
	echo 34687 >> $@
brad.%.dot : brad.%.adj big.titles big.adj.redirect big.movie.rank brad.filter
	$(call FTIME,$(call FJAVA,$(JAVA_BIG_HEAP) u1.PrettyDot $(join < -t -r -w -f,$^) >$@))

#brad.%.sorted : brad.%
#	sort $< > $@
#brad.%.top1h : big.movie.rank.page_number_sorted brad.%.sorted
#	join $^ | sort --key=2 | head -n 100 | sed "s/ [0-9.e-]*$$//" > $@

# Include experiments.
mf/%.expt: expt/make_%.pl
	@mkdir -p mf
	@perl $^ >$@
PL_EXPT = $(wildcard expt/make_*.pl)
-include $(PL_EXPT:expt/make_%.pl=mf/%.expt)

#
# Utility Targets
#

public:
	chmod -R a+rwX .

# Delete compiled files.
clean:
	rm -rf bin
	rm -rf objs
	rm -rf mf
	rm -rf $(CLEAN_FILES)

# Delete more files...
clobber: clean
	rm -rf ./*.time ./*.err
	rm -rf ./test

