from bs4 import BeautifulSoup
from datasave import DataSave
import re
import requests

class HtmlParse:
    # 主输出方法，范湖i提取的URL列表与待保存的数据
    def parse_data(self, page_url, data):
        print("现在开始分析提取数据")
        # 如果待分析的文章的URL或者数据为空，那么不做处理
        if page_url is None or data is None:
            return None
        else:
            soup = BeautifulSoup(data, 'lxml')
            # 分别调用get_uels 与get_data获取数据
            urls = self.get_urls(soup)
            data = self.get_data(page_url, soup)
            return urls, data

    # 提取科技类文章url
    def get_urls(self, soup):
        urls = list()
        # 获取科技类文章地址Tag
        links = soup.select('a[href*="/tech/"]')
        for link in links:
            # 从Tag中提取网址数据
            url = link['href']
            urls.append(url)
        return urls


    def get_data(self, page_url, soup):
        data = {}
        # 将文章的地址、标题、发布日期保存到字典中
        # 文章URL只是使用参数url
        data['url'] = page_url
        # select_one选择符合条件的第一条
        # 获取文章标题
        title = soup.select_one('.cnbeta-article > header > h1')
        # 获取发布日期
        release_date = soup.select_one('.cnbeta-article > header > .meta > span')
        # 将数据保存到一个字典变量中
        data['title'] = title.get_text()
        data['release_date'] = release_date.get_text()
        print("文章url：{0}".format(page_url))
        print("数据url：{0}".format(data))
        return data


if __name__ == "__main__":
    url = 'http://www.cnbeta.com/articles/tech/811395.htm'
    save = DataSave('C:/Users/lenovo/Desktop/file.txt')
    response = requests.get(url)
    response.encoding = 'utf-8'
    parse = HtmlParse()
    u, d = parse.parse_data(url, response.text)
    save.save(d)
    print(u, d)
