# 爬虫

import random
import time

import requests
import re


# 请求头池
header_pool = {
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0",
}


def save(title, txt):
    """
    储存器
    专门用来保存文件的函数
    title: 每篇文章的的标题或id
    txt: 正文
    """
    base_dir = "E:\\dianxin23\\dianxin2301\\teacher\\lesson11\\data\\"
    file_name = base_dir + title + ".txt"

    txt = cleaner(data=txt)
    with open(file_name, 'w', encoding='utf-8') as wf:
        wf.write(txt)
    print(f"{file_name} 已保存！")


def cleaner(data):
    """
    数据清洗
    data 是 html 格式的文本
    本函数负责去掉html标签
    """

    # 去掉样式表
    # <style ...> css内容 </style>
    data = re.sub(r'<style[\s\S]+?</style>', '', data)      # 将找到的内容使用 "" 替换掉

    # 去掉 js 代码
    # <script ...> css内容 </script>
    data = re.sub(r'<script[\s\S]+?</script>', '', data)

    # 去掉普通的html标签
    data = re.sub(r'<[\s\S]+?>', '', data)

    # 清洗多余空字符
    data = re.sub(r'\s+', '\n', data)

    # 清洗掉多余的头部信息
    data = re.sub(r'[\s\S]+?正文', '', data)

    # 清洗掉多余的尾部信息
    data = re.sub(r'关闭[\s\S]+', '', data)

    return data


def start_req(url, url_id):
    """
    请求器
    """
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        # "Host": "www.baidu.com",
        "Referer": "https://www.gxnzd.edu.cn/",
        "Cookie": "JSESSIONID=4F53315F8473CEE701D1AAE001F1B697",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    }

    print(f"正在请求网址：{url}")

    res = requests.get(url, timeout=(10, 30), headers=headers)

    res.encoding = "UTF-8"
    txt = res.content.decode('UTF-8')
    if "404错误提示" not in txt:
        save(title=url_id, txt=txt)


def main2():
    url = "https://www.zhipin.com/web/geek/job?query=&city=101010100&position=100109&experience=104"

    for i in range(100):
        start_req(url=url, url_id="01")


def main():
    """
    程序入口
    """
    url_base = "https://www.gxnzd.edu.cn/xg/info/1052/{}.htm"

    # counter = 1
    for i in range(1000, 1413):
        i = str(i)
        url = url_base.format(i)
        start_req(url=url, url_id=i)

        # if counter >= 50:
        #     break
        # counter += 1

        time.sleep(round(random.random() * 10, 3))


if __name__ == '__main__':
    # main()
    main2()
