"""
Download the (metadata of) all the datasets of opendata.antwerpen.be.
The datasets are stored as JSON files.
The script waits 4 seconds between HTTP requests.
"""

__author__ = "Bart Hanssens <bart.hanssens@fedict.be>"

import os, errno, sys, time, argparse, logging
import urllib2
from bs4 import BeautifulSoup

logging.basicConfig(filename='antwerpen-harvest.log', level=logging.DEBUG)


def getdata(url):
	"""Make a HTTP GET request."""
	logging.info('Getting ' + url)
	
	try:
		req = urllib2.Request(url)
		resp = urllib2.urlopen(req)
		return resp.read()
	except urllib2.HTTPError as e:
		logging.error('HTTP Error : ' + str(e.code))
		return ''

		
def htmlset(url, dataset, path):
	"""Get the datasets from website and store them as separate files."""
	file = os.path.join(path, dataset.rsplit('/', 1)[1])
	
	if not os.path.exists(file):
		url = url + dataset
		data = getdata(url)

		if data:
			with open(file, "w") as outfile:
				outfile.write(data)
		
			time.sleep(4)
	else:
		logging.info('Skipping ' + file)


def antwerplistpage(data):
	"""Get the list of datasets on a specific page."""
	soup = BeautifulSoup(data)
	div = soup.find('div', class_='view-datasets-faceted')
	rows = div.find_all('h3')

	links = []
	for row in rows:
		links.append(row.a['href'])
	
	return links


def antwerplist(url):
	"""Get the list of all datasets available on opendata.antwerpen.be."""
	result = []
	
	# Paginated view: 20 sets / page
	data = getdata(url + '/datasets-faceted')
	soup = BeautifulSoup(data)
	ul = soup.find('ul', class_='pager')
	lastpage = ul.find('li', class_='pager-last')
	href = lastpage.find('a')['href']
	(page, count) = href.split('=', 1)
	
	pages = []
	for i in range (0, int(count) + 1):
		pages.append(url + page + '=' + str(i))

	# Now collect the links from all the pages
	for page in pages:
		data = getdata(page)
		result.extend(antwerplistpage(data))
		time.sleep(4)

	return result 


def main():	
	parser = argparse.ArgumentParser(description='Download Antwerpen metadata to HTML files')
	parser.add_argument('--url', help='Data.antwerpen.be url', required=True)
	parser.add_argument('--outdir', help='Output directory', required=True)
	args = parser.parse_args()

	for dataset in antwerplist(args.url):
		htmlset(args.url, dataset, args.outdir)

if __name__ == "__main__":
	main()
