#Server script
import snowcrawl
import test3_processor as myP

if __name__ == '__main__':
	my_path = 'results3/'
	my_params = snowcrawl.Parameters(
		time_inc = 5,
		time_limit = 10,
		wave_size = 1000,
		save_states=False, save_files=False, save_edges=True,
		prioritize_urls = False,
		csv_header = ["download_start_time", "download_end_time", "length",
			"classify_start_time", "classify_end_time", "stem_count", "pol_z", "pol_p", "pol_guess",
			"edge_start_time", "edge_end_time", "out_degree", "self_loops"
		],
		process_url_function = myP.downloadAndClassifyUrl,
		decide_terminate_function = myP.fiveWaves,
		processing_params = myP.processing_params
	)
	my_seed_list = ['www.whitehouse.gov', 'lowlywonk.blogspot.com', 'dailykos.com']

	my_crawl_server = snowcrawl.ServerCrawler( myP.port, myP.pw, batch_size=100)
	my_crawl_server.runUntilDone( my_path, my_params, seed_list=my_seed_list, overwrite_existing_files=True )
