# -*- coding:utf-8 -*-
import html_downloader
import html_outputer
import html_parser
import url_manager


class SpiderMain(object):
    # 初始化管理器，下载器，解析器，输出
    def __init__(self):
        self.urls = url_manager.UrlManager()
        self.downloader = html_downloader.HtmlDownloader()
        self.parser = html_parser.HtmlParser()
        self.outputer = html_outputer.HtmlOutputer()

    # 抓页面中的取城市 http://www.aqistudy.cn/historydata/index.php
    def crawcity(self, root_url):
        count = 1
        self.urls.add_new_url(root_url)
        if self.urls.has_new_url():
            try:
                new_url = self.urls.get_new_url()
                print '抓取城市 %d : %s' % (count, new_url)
                html_cont = self.downloader.download(new_url)
                cities = self.parser._get_cities_urls(new_url, html_cont)
                self.outputer.collect_citydata(cities)
                count += 1
            except Exception, e:
                print e
                print '抓取城市失败，请重试'
        self.outputer.output_cities()

    def crawtime(self, cityUrl):
        count = 1
        f = open("city.txt", "r")
        while True:
            line = f.readline()
            if line:
                pass
                line = line.strip()
                data = line.split(",")
                cityname = data[0]
                cityUrl = data[1]
                print "正准备抓取 %s 链接 %s" % (cityname, cityUrl)
                self.crawcityhistoryUrl(cityname, cityUrl)

            else:
                break
        f.close()

    # 抓取某个城市的月份数据
    def crawcityhistoryUrl(self, cityname, cityurl):
        count = 1
        self.urls.add_new_url(cityurl)
        if self.urls.has_new_url():
            try:
                new_url = self.urls.get_new_url()
                print '抓取 %s月 %d: %s' % (cityname, count, new_url)
                html_cont = self.downloader.download(new_url)
                new_data = self.parser._get_citytimes_urls(new_url, html_cont)
                # 对于月份数据就不输出了
                # self.outputer.collect_citydata(new_data)
                for moth in new_data:
                    monthname = moth["name"]
                    monthurl = moth["url"]
                    self.crawcityaqitable(cityname, monthname, monthurl)
                count += 1
            except Exception, e:
                print e
                print '抓取城市失败，请重试'

    def crawcityaqitable(self, cityname, monthname, monthurl):
        count = 1
        self.urls.add_new_url(monthurl)
        if self.urls.has_new_url():
            try:
                new_url = self.urls.get_new_url()
                print '抓取%s的 %s %d: %s' % (cityname, monthname, count, new_url)
                html_cont = self.downloader.download(new_url)
                self.parser._get_aqitable(new_url, html_cont, cityname)
                # 由于数据是table 一个个抓取的就不再整理到变量，直接写入txt
                count += 1
            except Exception, e:
                print e
                print '抓取城市失败，请重试'


if __name__ == "__main__":
    root_url = "http://www.aqistudy.cn/historydata/index.php"
    obj_spider = SpiderMain()
    # 抓取城市
    # obj_spider.crawcity(root_url)
    obj_spider.crawtime(root_url)
