import html_download
import html_outputer
import html_parser
import url_manager
import time

class SpiderMain(object):
    def __init__(self):
        self.urls = url_manager.UrlManger()
        self.downloade = html_download.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_outputer.HtmlOutputer()
        self.times = []

    def craw(self,root_url):
        count = 0
        wcount = 0
        self.urls.add_new_url(root_url)
        while self.urls.has_new_url():
            try:
                sta = time.time()
                new_url = self.urls.get_new_url()

                html_cont = self.downloade.download(new_url)
                new_urls,new_data = self.parser.parse(new_url,html_cont)

                self.urls.add_new_urls(new_urls)
                self.outputer.collect_data(new_data)
                end = time.time()
                count += 1
                self.times.append(end-sta)
                print("craw %d : %s %f" % (count, new_url,end-sta))
                if wcount+count >= 1000:
                    print("错误页面数： ", wcount)
                    break
            except:
                wcount += 1
                if(wcount + count >= 1000):
                    return
                print("错误页面数： ", wcount)
                print("craw failed...")

        self.outputer.output_html()

    def timecal(self):
        #最短时间，最长时间，平均时间，针对单个网页
        self.times.sort()
        print("最短时间: ", self.times[0])
        print("最长时间: ", self.times[-1])
        sum = 0
        for item in self.times:
            sum += item
        print("平均时间: ",sum/len(self.times))

if __name__ =="__main__":
    #百度百科
    # root_url = "https://baike.baidu.com/item/Python/407313"
    #豆瓣电影
    root_url = "https://movie.douban.com/subject/27038183/"
    # root_url = "http://baike.sogou.com/v65403902.htm"
    #凤凰资讯 - 动态
    # root_url = "http://news.ifeng.com/a/20171024/52774540_0.shtml"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)
    #时间计算
    obj_spider.timecal()