#导库
import urllib.request
import re
import json
from bs4 import BeautifulSoup

class Spider(object):
    def __init__(self):
        # 起始页位置
        self.begin_page = int(input("请输入起始页："))
        # 终止页位置
        self.end_page = int(input("请输入终止页："))
        # 基本URL
        self.base_url = "https://www.0315.cc/job/fulllist_20_"

    def load_page(self):
        """
            @brief 定义一个url请求网页的方法
            @param page 需要请求的第几页
        """
        user_agent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36"
        headers = {"User-Agent": user_agent}
        html = ''
        for page in range(self.begin_page, self.end_page + 1):
            url = self.base_url + str(page) + "_0_-1_0_0_0_0_0_-1_-1_-1_0.html?search=-1"
            request = urllib.request.Request(url, headers=headers)
            # 获取每页HTML源码字符串
            response = urllib.request.urlopen(request)
            #读取response中的内容并解码及将特殊字符替换成空
            html += response.read().decode("utf-8").replace('\xa0', '')
        return html

    #用BeautifulSoup解析网页数据
    def parse_page(self, html):
        """
            @brief      定义一个解析网页的方法
            @param html 服务器返回的网页HTML
        """
        html_list = re.findall('<html>.*?</html>', html, re.S)
        print(len(html_list))
        for html in html_list:
            # 创建BeautifulSoup解析工具，使用lxml解析器进行解析
            html = BeautifulSoup(html, 'lxml')
            # print(html.encode('gbk','ignore').decode('gbk', 'ignore'))
            # 通过 CSS选择器 搜索ul节点
            result = html.select('#work_list li')
            # print(result)
            # 定义空列表，以保存元素的信息
            items = []
            # 更新base_url
            self.base_url_update = "https://www.0315.cc/job/"
            for site in result:
                # print(site)
                item = {}
                name = site.select('.left > h3 > a')[0].get_text()  # 职位名称
                welfare = site.select('.g-tip02')[0].get_text()  # 福利待遇
                # welfare = site.select('.g-tip02')[0].get_text()     # 福利待遇
                detailLink = site.select('.inner > a')[0].attrs['href']  # 详情链接
                catalog = re.findall(r'</em>(.*?)<em>', str(site.select('.g-desc01 > p')[0]))[0]    # 职位类别
                company = site.select('.right > h3 > a')[0].get_text()  # 招聘公司名称
                workLocation = site.select('div.g-desc01 > p > span')[0].get_text()  # 工作地点
                publishTime = site.select('span.time')[0].get_text()  # 发布时间
                item['职位名称'] = name
                # item['福利待遇'] = welfare
                item['福利待遇'] = welfare.strip().replace('\n', '，')
                item['详情链接'] = self.base_url_update + detailLink
                item['职位类别'] = catalog
                item['公司名称'] = company
                item['工作地点'] = workLocation
                item['发布时间'] = publishTime
                items.append(item)
            print(items)
            self.save_file(items)
    #创建保存数据函数
    def save_file(self, items):
        """
            @brief       将数据追加写进文件中
            @param html 文件内容
        """
        with open('./work3.json', 'w') as fp:
            json.dump(items, fp=fp, ensure_ascii=False, indent=4)

if __name__ == '__main__':
    # 测试是否返回网页源码
    # 测试正则表达式/ lxml库/ bs4库
    spider = Spider()
    return_html = spider.load_page()
    # print(return_html)
    spider.parse_page(return_html.replace('\u2795',''))
