require 'net/http'
require 'uri'
#require 'rubygems'
#require 'libxml'

class BiblioBot
	LOGIN_URL = 'http://bibliotik.org/login'
	
	def initialize ()
	end
	
	def login (username, password)
		uri = URI.parse(LOGIN_URL)
		req = Net::HTTP::Post.new(uri.path)
		req.set_form_data({ 'username' => username, 'password' => password }, '&')
		
		res = Net::HTTP.new(uri.host, uri.port).start { |http| http.request(req) }
		
		@cookie = res['Set-Cookie']
		
		puts @cookie
		
		while res['Location']
			puts res['Location']
			
			uri = URI.parse(res['Location'])
			req = Net::HTTP::Get.new(uri.path)
			req['Cookie'] = @cookie
			
			res = Net::HTTP.start(uri.host, uri.port) { |http| http.request(req) }
			
			if (res['Set-Cookie'])
				@cookie = res['Set-Cookie']
			end
		end
		
		res.body
	end
	
	def request (url, use_curl = true)
		if (use_curl)
			url = url.gsub('[', '\[')
			url = url.gsub(']', '\]')
			
			`curl -b '#{@cookie}' '#{url}'`
		else
			uri = URI.parse(url)
			req = Net::HTTP::Get.new(uri.path)
			if (@cookie)
				req['Cookie'] = @cookie
			end
			res = Net::HTTP.start(uri.host, uri.port) { |http| http.request(req) }
			
			res.body
		end
	end
	
	def scrape (keyword = 'all', output_dir = '', initial_page = 1)
		if !@cookie
			return
		end
		
		page = initial_page
		
		enabled = true
		
		pattern = Regexp.new('http://bibliotik.org/torrents/\d+/download')
			
		while (enabled)
			if (page == 1)
				if (keyword == 'all')
					page_url = 'http://bibliotik.org/torrents/'
				else
					page_url = "http://bibliotik.org/torrents/?search=#{keyword}&cat[]=5&orderby=added&order=desc"
				end
			else
				if (keyword == 'all')
					page_url = "http://bibliotik.org/torrents/?orderby=added&order=desc&page=#{page}"
				else
					page_url = "http://bibliotik.org/torrents/?search=#{keyword}&cat[0]=5&orderby=added&order=desc&page=#{page}"
				end
			end
			
			puts "Page #{page} (#{page_url})"
		
			body = request(page_url)
			
			File.open("page#{page}.html", 'w') { |file| file.write(body) }
			
			urls = body.scan(pattern)
			
			if (urls.length == 0)
				enabled = false
			end
			
			urls.each do |url|
				path = output_dir + '/' + /\d+/.match(url)[0] + '.torrent'
				
				puts path
				
				if (File.size?(path))
					if (keyword != 'all')
						enabled = false # all caught up
					end
				else				
					File.open(path, 'w') do |file|
						file.write(request(url, false))
					end
				end
			end
			
			page = page + 1
			
			sleep 5
		end
		
=begin fucked markup
		LibXML::XML::HTMLParser.string(res.body, :encoding => LibXML::XML::Encoding::UTF_8, :base_uri => 'http://bibliotik.org/torrents/advanced/').parse().find('//a[@title=\'Download\']').each do |a|
			path = output_dir + /\d+/.match(a['href']) + '.torrent'
			
			puts path
			
			if (!File.size?(path))
				File.open(path, 'w') do |file|
					uri = URI.parse(a['href'])
					req = Net::HTTP::Get.new(uri.path)
					res = Net::HTTP.start(uri.host, uri.port) { |http| http.request(req) }
					
					file.write(res.body)
				end
			end
		end
=end
	end
end

if __FILE__ == $0
	bot = BiblioBot.new()
	bot.login(ARGV[0], ARGV[1])

	keyword = ARGV[2]
	dir = ARGV[3]
	page = ARGV[4]

	bot.scrape(keyword, dir, page.to_i)
end
