#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date    : 2018-08-21 11:28:21
# @Author  : wanglele (18911756410@163.com)
# @Link    : None
# @Version : "Python 3.7"

import os
import re
import sys
from urllib import parse

from bs4 import BeautifulSoup


class HtmlParser:
    def parser(self, page_url, html_cont):
        """
        网页页面解析，
        page_url: 下载页面的url
        html_cont: 下载页面返回的数据
        """
        # 如果链接为None或内容长度为0 返回空值
        if page_url is None or len(html_cont) == 0:
            return None

        soup = BeautifulSoup(html_cont, "lxml")

        new_urls = self._get_new_urls(page_url, soup)
        new_data = self._get_new_data(page_url, soup)
        return new_urls, new_data

    def _get_new_urls(self, page_url, soup):
        """
        抽取所有的url，存入集合中
        page_url: 下载页面的url
        """
        new_urls = set()
        links = list()
        # 抽取页面所有符合要求的 a 标签
        infos = soup.find_all("div", class_="info")
        for info in infos:
            for link in info.find_all("dd"):
                ren_a_tag = link.find_all(
                    "a", href=re.compile(r"/item/[\s\S]*?"))
                if len(ren_a_tag) == 0:
                    continue
                links += ren_a_tag

        # links = soup.find_all("a", href=re.compile(r"/item/[\s\S]*?"))  # 列表
        for link in links:
            # 提取a标签href属性
            new_url = link["href"]
            # 拼接完整的url
            new_full_url = parse.urljoin(page_url, new_url)
            new_urls.add(new_full_url)
        return new_urls

    def _get_new_data(self, page_url, soup):
        data = dict()
        data["url"] = page_url
        # =========下边是百度百科的标签，使用其他网站的时候需要自己更改相应的标签==========================================
        name = soup.find_all("dd", class_="lemmaWgt-lemmaTitle-title")
        if len(name) == 0:
            data["name"] = None
        else:
            data["name"] = name[0].h1.get_text()

        # 简介
        infos = soup.find_all("div", class_="lemma-summary")
        if len(infos) == 0:
            data["infos"] = None
        else:
            data["infos"] = infos[0].get_text().replace("\xa0", "")

        #  出生年月
        bridth = soup.find_all(text="出生日期")
        if len(bridth) == 0:
            data["bridth"] = None
        else:
            data["bridth"] = (
                bridth[0].find_previous().find_next_sibling().get_text().strip()
            )

        #  出生地
        bridth_addr = soup.find_all(text="出生地")
        if len(bridth_addr) == 0:
            data["bridth_addr"] = None
        else:
            data["bridth_addr"] = (
                bridth_addr[0].find_previous(
                ).find_next_sibling().get_text().strip()
            )

        # 主要作品
        zuopins = soup.find_all("b", class_="title")
        if len(zuopins) == 0:
            data["zuopins"] = None
        else:
            data["zuopins"] = ['%s' % zuopin.a.get_text().strip()
                               for zuopin in zuopins]
            print(['%s' % zuopin.a.get_text().strip() for zuopin in zuopins])

        return data
