# -*- coding:utf-8 -*-
import urlparse
import re

from bs4 import BeautifulSoup


class HtmlParser(object):
    def _get_cities_urls(self, page_url, html_cont):
        if page_url is None or html_cont is None:
            return
        soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
        cities = []
        links = soup.find_all('a', href=re.compile(r"monthdata\.php\?city=[^xyz]"))
        for link in links:
            new_url = link['href']
            city = link.get_text()
            #  urlparse.urljoin不需要提取域名
            new_full_url = urlparse.urljoin(page_url, new_url)
            cityDit = {'name': city, 'url': new_full_url}
            cities.append(cityDit)
        print "共计%d个城市" % (len(cities))
        return cities

    # 解析时间链接数据
    def _get_citytimes_urls(self, page_url, html_cont):
        if page_url is None or html_cont is None:
            return
        soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
        times = []
        links = soup.find_all('a', href=re.compile(r"daydata\.php\?city=[^xyz]"))
        for link in links:
            new_url = link['href']
            time = link.get_text()
            #  urlparse.urljoin不需要提取域名
            new_full_url = urlparse.urljoin(page_url, new_url)
            time = {'name': time, 'url': new_full_url}
            times.append(time)
        print "共计%d个月" % (len(times))
        # 去重处理
        distinct_data = []
        for city in times:
            if city not in distinct_data:
                distinct_data.append(city)
        print "月份数据去重处理后有%d月" % len(distinct_data)
        return distinct_data

    # 抓取api表的数据
    def _get_aqitable(self, page_url, html_cont, cityname):
        if page_url is None or html_cont is None:
            return
        soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
        tables = soup.findAll('table')
        tab = tables[0]
        for tr in tab.findAll('tr'):
            # 去掉标题栏
            if len(tr.findAll('th')) != 0:
                continue
            self.output_datas(cityname + ",")
            for td in tr.findAll('td'):
                self.output_datas(td.getText().strip() + ",")
            self.output_datas("\n")

    def output_datas(self, value):
        # 由于 self.cities  self.citiyUrls 长度是一致的
        fout = open('data.txt', 'a')
        fout.write(value)
