# coding:utf8
'html parser'
from bs4 import BeautifulSoup
import urllib.parse as urlparse


class HtmlParser(object):
    def parse(self, page_url, html_cont):
        if page_url is None or html_cont is None:
            return

        soup = BeautifulSoup(html_cont, 'html.parser', from_encoding='utf-8')
        new_urls = self._get_new_urls(page_url, soup)
        new_data = self._get_new_data(page_url, soup)
        return new_urls, new_data

    def _get_new_urls(self, page_url, soup):
        # 找到所有新的连接,带<a>标签
        # /daily/?period=1629&amp;fr=daily-index-periodnav
        # target="_blank"
        new_urls = set()

        # links = soup.find_all(name='a', attrs={'target': '_bland'})
        link = soup.find(name='a', class_='go-prev')

        if not link:
            print("may be this page is first")
            return
        # for link in links:
        prev_url = link['href']
        # 拼接前缀
        new_full_url = urlparse.urljoin(page_url, prev_url)
        new_urls.add(new_full_url)

        print('add new new_full_url = %s ' % new_full_url)
        return new_urls

    def _get_new_data(self, page_url, soup):
        # 构造想要爬去的数据
        # 想要爬取的数据格式:
        #   {
        #       'periodical':'1633',
        #       'date':'2018年02月17日'
        #       'content':{
        #           'headline':{xxx},
        #           'intell_0':{
        #               'title':'每逢佳节胖三斤',
        #               'summer':'面对春节假期，我们不得不考虑：怎么吃才能不“节后胖三斤”？'
        #               'href':'xxx/xxx/xx'
        #           },
        #           'intell_1':{xxx},
        #           'intell_2':{xxx},
        #           'intell_3':{xxx},
        #           'intell_4':{xxx},
        #           'intell_5':{xxx},
        #       }
        #   }

        res_data = {}

        # 期刊<div id="num-wp" class="num-wp" data-num="1633">
        periodical = soup.find('div', class_='num-wp').get('data-num')
        # 时间
        time = soup.find('span', class_='time').get_text()

        res_data['periodical'] = periodical
        res_data['date'] = time

        # <span class="banner-title">
        #   晕了...中国人亲戚的叫法怎么这么复杂？
        # </span>
        content_dict = {}
        content_item = {'title': '', 'summer': '', 'href': ''}
        # 头条/ 没有描述
        banner_title = soup.find('span', class_='banner-title')
        content_item['title'] = banner_title.get_text()
        content_item['href'] = banner_title.find_parent().find_parent().get('href')
        content_dict['headline'] = content_item

        # <ul class="daily-list" id="daily-list">
        ul_soup = soup.find_all('ul', id='daily-list')[0]
        items = ul_soup.find_all('h2')
        i = 0
        for item in items:
            it_title = item.find('a').get_text()
            it_summer = item.find_parent().find('div', class_='summer').find('a').get_text()
            it_href = item.find_parent().find('div', class_='summer').find('a').get("href")

            if not it_title:
                return

            content_item = {'title': '', 'summer': '', 'href': ''}
            content_item['title'] = it_title
            content_item['summer'] = it_summer
            content_item['href'] = it_href
            content_dict['intell_' + str(i)] = content_item
            i = i + 1

        print('i = ', i)
        res_data['content'] = content_dict
        print(res_data)

        return res_data
