#!/usr/bin/env python
#coding=utf-8
import urllib2
def download(url, user_agent='wswp', num_retries=2):
	print 'Downloading:', url
	headers = {'User-agent':user_agent}
	request = urllib2.Request(url, headers=headers)
	try:
		html = urllib2.urlopen(request).read()
		print html
	except urllib2.URLError as e:
		print('Download error:', e.reason)
		html = None
		if num_retries > 0:
			if hasattr(e, 'code') and 500 <= e.code < 600:
				#retyr 5XX HTTP errors
				return download(url, user_agent, num_retries-1)
	return html
FIELDSS = ('area', 'population', 'iso', 'country', 'capital', 'continent', 
	'tld', 'currency_code', 'currency_name', 'phone', 'postal_code_format', 'postal_code_regex', 'languages', 'neighbours')
FIELDS = ( 'population',)

import re

def re_scraper(html):
	results = {}
	for field in FIELDS:
		results[field] = re.search('<tr id="places_%s__row">.*?<td class="w2p_fw">(.*?)</td>' % field, html).groups()[0]
	return results

from bs4 import BeautifulSoup
def bs_scraper(html):
	soup = BeautifulSoup(html, 'html.parser')
	results = {}
	for field in FIELDS:
		results[field] = soup.find('table').find('tr',id='places_%s__row'%field).find('td', class_='w2p_fw').text
	return results

import lxml.html
def lxml_scraper(html):
	tree = lxml.html.fromstring(html)
	results = {}
	for field in FIELDS:
		results[field] = tree.cssselect('table > tr#places_%s__row > td.w2p_fw' % field)[0].text_content()
	return results
# 时间比较
import time
NUM_ITERATIONS = 1000 # number of times to test
html = download ('http://example.webscraping.com/places/default/view/Cuba-55')
for name, scraper in [('Regular expressions', re_scraper), ('BeautifulSoup', bs_scraper), ('Lxml', lxml_scraper)]:
	start = time.time()
	for i in range(NUM_ITERATIONS):
		if scraper == re_scraper:
			re.purge() # re 清除缓存
		result = scraper(html)
		assert(result['population'] == '11,423,000')
	end = time.time()
	print '%s: %.2f seconds' % (name, end - start)


