# coding:utf8

from url_manager import UrlManager
from html_downloader import HtmlDownloader
from html_outputer import HtmlOutputer
from html_parser import HtmlParser


class SpiderMain(object):
	"""docstring for SpiderMain"""
	def __init__(self):
		self.urlManager = UrlManager()
		self.htmlDownloader = HtmlDownloader()
		self.htmlOutputer = HtmlOutputer()
		self.htmlParser = HtmlParser()


	def craw(self, root_url):
		count = 1
		self.urlManager.add_new_url(root_url)
		while self.urlManager.has_new_url():
			try:
				new_url = self.urlManager.get_new_url()
				print("%d:%s"%(count, new_url))
				html_content = self.htmlDownloader.download(new_url)
				new_urls, new_data = self.htmlParser.parse(new_url, html_content)
				self.urlManager.add_new_urls(new_urls)
				self.htmlOutputer.collect_data(new_data)
			except Exception as e:
				print("craw faile:%s"%(e))

			if count == 100:
				break
			count = count + 1


		self.htmlOutputer.show_datas()
			



if __name__ == "__main__":
	print ("这里是程序入口函数")
	root_url = r"https://baike.baidu.com/item/AV%E5%A5%B3%E4%BC%98/416320"
	spiderMain = SpiderMain()
	spiderMain.craw(root_url)