(ns webcrawler.core
(:require [clojure.contrib.http.agent :as http])
(:require [clojure.contrib.duck-streams :as duck])                                  	     
(:require [clojure.contrib.java-utils :as java-uts])
(:require [clojure.contrib.str-utils :as str-uts]))

(def been-there-loc "been-there-file.txt")
(def should-visit-loc "should-visit-file.txt")
(def found-pdfs-loc "pdfs-file.txt")
(def serie-size 28)
(def counter (ref 0))
(def stop 40)


(defn crawlable? 
 	"Verifies that url is crawlable."
	[url]
	(cond 
		(re-matches #"(?i).*?\.html$" url) true
		(re-matches #"(?i).*?\.htm$" url) true
		(re-matches #".*?\.php$" url) true
		(re-matches #".*?\.\);$" url) false
		#(true) false))

 
(defn pdf?
	"Returns the url (String) if it's a pdf, else returns nil."
	[url]
	(re-matches #"http://.+?\.pdf$" url))

 
(defn request
	"Agent gets html at url."
	[url]
	(http/http-agent url :handler #(duck/slurp* (http/stream %))))


(defn read-seq-from-file
	"If file doesn't exist, returns empty list; else returns list (of lines in the file)." 
	[filename]
	(if (.exists (java.io.File. filename))
		(duck/read-lines filename)
		(list ()) ))

  
(defn write-seq-to-file
	"Adds strings in seq to the file. if rm-past is set to true, then deletes file first."
	[seq filename rm-past]
	(if (= true rm-past)
		(java-uts/delete-file filename true)) 
	(if-not (empty? seq)
		(duck/append-spit filename (println-str (str-uts/str-join "\n" seq)))))

		
(defn rm-junk
	"Removes everything from seq that is contained in have and filters results"
	[f seq have]
	(def unique (remove #(.contains have %) seq))
	(filter f unique))

(defn commit-to-file
	[next-url save-pdf-links next-should-visit]
	(write-seq-to-file (list next-url) been-there-loc false) 	
	(write-seq-to-file save-pdf-links found-pdfs-loc false)
	(write-seq-to-file next-should-visit should-visit-loc true))


(defn visit-serie 
	"Visits urls (a serie), returns agents."
	[should-visit]
	(def agents (map #(request %) should-visit))
	(apply await-for 15000 agents)
	agents)

(defn parse-urls
	"Parses urls from result."
	[agent]
	(def html (http/result agent ))
	(re-seq #"http://[^;\"' \t\n\r]+" html))


(defn failed-agt
	"Checks whether agent has failed. Returns true if it has."
	[agent]
	(cond
		(not (http/status agent)) true
		(http/error? agent) true
		#(true) false))


(defn start-webcrawler 
	"Series of agents (28 agents in each serie) visit crawlable urls in effort to find links that lead to .pdf. Results are parsed and saved in three .txt files. It stops when 40 agents succeed. Usually finishes in less than a min."
	[agents been-there should-visit]
	(if (empty? agents)

	  ; empty, visit new serie of urls
	  (let [serie (take serie-size should-visit)] 
	    (def next-agents (visit-serie serie))
	    (def next-should-visit (drop serie-size should-visit))
	    (start-webcrawler next-agents been-there next-should-visit))
		
	  ; not empty, process next agent
	  (let [next-agent (first agents)]
	    (if (failed-agt next-agent) 

	      ; agent failed
	      (start-webcrawler (rest agents) been-there should-visit)

	      ; agent succeeded
	      (let [next-url (http/request-uri next-agent)]
					
	      (def all-urls (seq (into #{} (parse-urls next-agent))))
					
	      (println  " Still working, for immediate stop press Ctrl + D ")

	      (def next-been-there (cons next-url been-there))
					
	      (def save-pdf-links (rm-junk #(pdf? %) all-urls ())) 

	      (def new-should-visit (rm-junk #(crawlable? % ) all-urls been-there))

	      (def next-should-visit (concat should-visit new-should-visit))

	      (commit-to-file next-url save-pdf-links next-should-visit) ; save important data

	      (dosync (alter counter inc))

	      (if (< @counter stop)(start-webcrawler (rest agents) next-been-there next-should-visit)))))))


; making sure there is a start url
(if (not (.exists (java.io.File. should-visit-loc)))
	(write-seq-to-file (list "http://www.oracle.com/index.html") should-visit-loc true))

(def been-there (read-seq-from-file been-there-loc))
(def should-visit (read-seq-from-file should-visit-loc))

;start web crawler
(start-webcrawler () been-there should-visit)
