from PolicyData.Policy_Craw import html_parser
from PolicyData.Policy_Craw import sql_connect
from PolicyData.Policy_Craw import html_downloader

class SpiderMain(object):
    def __init__(self):
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.sql = sql_connect.Sql()

    #爬虫的调度程序
    def craw(self,root_url):
        root_links = []#最新政策所有目标页面链接
        #抓取所有政策文章页面的链接
        for i in range(1,15):#共有14页
            print(i)
            if(i == 1):
                root_url_now = 'http://www.chinajob.gov.cn/EmploymentServices/node_424.htm'
            else:
                root_url_now = root_url + str(i) + '.htm'
            root_html_cont = self.downloader.download(root_url_now)
            root_link = self.parser.get_all_state(root_html_cont) #所有的目标页链接，是一个list
            root_links.extend(root_link)
        print('最新政策所有链接 : %s %d' % (root_links , len(root_links)))#%---ceshi---%

        #对所有链接爬取数据
        for i in range(len(root_links)): #如果有待爬取得link
            try:
                new_url = root_links[i] #获取待爬取得url
                html_cont = self.downloader.download(new_url)  #下载器下载页面
                new_data = self.parser.parse(new_url,html_cont,i) #解析器解析页面
                print('A dict of data,main@craw?1',new_data)
                self.sql.output_sql(new_data)
                #new_data是一个包含所有页面的所有信息的集合列表
                #print(new_data,len(new_data))
            except:
                print('craw failed')
        #print('len(new_data)',len(new_data))
        #self.output_sql(new_data)


    def get_all_url(self):
        root_url = "http://www.chinajob.gov.cn/EmploymentServices/node_424_"
        obj_spider = SpiderMain()
        obj_spider.craw(root_url)
if __name__ == "__main__":
    root_url = "http://www.chinajob.gov.cn/EmploymentServices/node_424_"
    obj_spider = SpiderMain()
    obj_spider.craw(root_url)
