# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2014 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
对抓取网页的解析。

Authors: caopeirui(caopeirui@baidu.com)
Date:	2018/04/19 20:10:41
"""
import logging
import re
import urllib
import ssl
import bs4
import log
import webpage_save as ws
from config_load import config_dict
from url_table import url_bloom_filter

log.init_log("./log/webpage_parse")

# 解决urllib.error.URLError: <urlopen error [SSL: CERTIFICATE_VERIFY_FAILED]
ssl._create_default_https_context = ssl._create_unverified_context

def get_beautifulsoup(url):
    '''根据url，请求网页，并返回获取到的BeautifulSoup对象
    '''
    # 记录抓取过的url
    url_bloom_filter.set(url)
    try:
        response = urllib.request.urlopen(url, timeout=int(config_dict.get('crawl_timeout')))
        # ignore解决 can't decode byte 0x8b
        html = response.read().decode('utf-8', "ignore")
        soup = bs4.BeautifulSoup(html, 'lxml')
    except Exception as e:
        logging.warning('请求%s时出错：%s' % (url, e))
        print('[WARNING] 请求%s时出错：%s' % (url, e))
    else:
        # 用正则筛选url，保存需要保存的页面
        target_url = config_dict.get('target_url')
        if re.search(target_url, url):
            ws.save_webpage(url, html)
        return soup


def get_href(url):
    '''根据url，请求网页，找到<a>标签，并返回其所有超链地址list
    注意：该函数里，涵盖了请求网页，以及保存需要的网页。
    '''
    soup = get_beautifulsoup(url)
    if not soup:
        return
    try:
        label_a = soup.find_all('a')
    except Exception:
        logging.info('%s中没有超链接了！' % url)        
    else:
        href_list = [it.get('href') for it in label_a]
        # 补全url绝对路径
        href_list = [urllib.parse.urljoin(url, it) for it in href_list]
        return href_list

if __name__ == '__main__':
    res = get_href('http://pycm.baidu.com:8081')
    print(res)
    url = 'http://pycm.baidu.com:8081/page1.html'

    
