import re
from bs4 import BeautifulSoup


class CxHTMLParser(object):
    def Cxparser(self, page_url, html_content):
        try:
            # 判断当前链接是详情链接还是列表链接
            # 列表链接就提取链接
            if ("page" in page_url):
                return self.Cx_get_new_urls(html_content)
            else:
                return self.Cx_get_new_data(html_content)
        except:
            print("URL：" + page_url + " 解析失败！！！！----------------------------")
            return

    # 获取所有链接
    def Cx_get_new_urls(self, html):
        bs = BeautifulSoup(html, 'lxml')
        # 先提取当前页面每篇文章的详情链接
        base_url = 'http://www.woniuxy.com'
        content_url = []
        content = bs.find_all(class_="col-lg-12 col-md-12 col-sm-12 col-xs-12 article-list box")
        for data in content:
            content_url.append(base_url + data('a')[0].attrs.get('href'))
        # 在提取下一页链接
        next_url = base_url + bs.find_all(text="下一页")[0].find_parent().attrs.get('href')
        content_url.append(next_url)
        return content_url

    # 获取内容页的数据
    def Cx_get_new_data(self, html):
        bs = BeautifulSoup(html, 'lxml')
        # 获取文章标题
        title = bs.find_all(class_="col-lg-10 col-md-10 col-sm-10 col-xs-10 title")[0].string.strip()
        # 正则抽取发布日期和阅读数
        info = bs.find_all(class_="col-lg-12 col-md-12 col-sm-12 col-xs-12 info")
        date_num = info[0].string.strip().find("日期：")
        read_num = info[0].string.strip().find("阅读：")
        end_num = info[0].string.strip().find("消耗积分：")
        date = info[0].string.strip()[date_num + 3:read_num].strip()
        read = info[0].string.strip()[read_num + 3:end_num - 4].strip()
        # 讲获取的标题，发布时间和阅读数放入data
        data = {
            "标题": title,
            "发布时间": date,
            "阅读次数": read
        }
        return data
