import logging
import urllib2
from lxml.html import fromstring
from lxml import etree
import sys
sys.path.append("..")
from oj_data.problem_info import ProblemInfo

class POJSpider:
	problem_base_url = "http://poj.org/problem?id="
	solved_pro_list_base_url = "http://poj.org/userstatus?user_id="

	def crawl_problem (self, id):
		"""
		return None or ProblemInfo instance
		"""
		try:
			id = int(id)
		except TypeError, ValueError:
			Logger.error("id is not number!")
			return None
		url = self.problem_base_url + str(id)
		content = urllib2.urlopen(url).read()
		content = unicode(content, 'utf-8')
		doc = fromstring(content)
		doc.make_links_absolute(url)

		try:
			title = doc.xpath("//div[@class='ptt']")[0].text_content()
			time_limit = doc.xpath("//div[@class='plm']/table/tr[1]/td[1]")[0].text_content()[12:-2]
			mem_limit = doc.xpath("//div[@class='plm']/table/tr[1]/td[3]")[0].text_content()[14:-1]
			ptx = doc.xpath("//div[@class='ptx']")
			description = etree.tostring(ptx[0])
			pro_input = etree.tostring(ptx[1])
			pro_output = etree.tostring(ptx[2])
			if len(ptx) >= 5 :
				pro_hint = etree.tostring(ptx[3])
				pro_source = ptx[4].text_content()
			else:
				pro_source = ptx[3].text_content()
			sio = doc.xpath("//pre[@class='sio']")
			pro_sample_in = etree.tostring(sio[0])
			pro_sample_out = etree.tostring(sio[1])
		except:
			return None

		problem_dict = {}
		problem_dict['title'] = title
		problem_dict['time_limit'] = time_limit
		problem_dict['mem_limit'] = mem_limit
		problem_dict['description'] = description
		problem_dict['input'] = pro_input
		problem_dict['sample_in'] = pro_sample_in
		problem_dict['sample_out'] = pro_sample_out
		problem_dict['source'] = pro_source

		#pprint.pprint (problem_dict)
		return ProblemInfo(problem_dict)

######################################################################
	def crawl_user_pro_list (self, usr_id):
		"""
		return list of problems that user solved
		"""
		url = self.solved_pro_list_base_url + usr_id
		content = urllib2.urlopen(url).read()
		content = unicode(content, 'utf-8')
		doc = fromstring(content)
		doc.make_links_absolute(url)

		#results = doc.xpath("//table")
		"""
		for r in results:
			print r.text_content();
		"""
		#print len(results)
		#print results[4].text_content()

		path = "/html/body/center[1]/table[1]/tr[3]/td[3]"
		results = doc.xpath(path)
		print len(results)
		for r in results:
			raw_input()
			print(r.text_content())
		
		

if __name__ == '__main__':
	spider = POJSpider()
	spider.crawl_user_pro_list('tatc')
