import re

# 爬虫

import requests
from twisted.conch.insults.insults import const


def saver(content, content_id):
    """
    储存器
    content: 网页源码
    content_id: 编号或者标题
    """
    dir_path = "D:\\pythonDev\\dianxin2302\\teacher\\lesson11\\data\\"
    file_name = dir_path + content_id + ".txt"

    with open(file_name, 'w', encoding='utf-8') as wf:
        wf.write(content)

    print(f"保存成功：{file_name}")


def data_cleaner(content):
    """
    数据清洗器
    content：网页源码
    最后返回清洗后的文本
    """

    # 删除源码中的所有 js 代码
    content = re.sub(r'<script[\s\S]+?</script>', '', content)

    # 删除 css 样式表
    content = re.sub(r'<style[\s\S]+?</style>', '', content)

    # 删除普通html标签
    content = re.sub(r'<[\s\S]+?>', '', content)

    # 清理空字符
    content = re.sub(r'[\s]{2,}', '\n', content)

    # 清洗前半部分无用内容
    content = re.sub(r'[\s\S]+?正文', '', content, count=1)

    # 清洗前后部分无用内容
    content = re.sub(r'关闭\s+[上下]一篇：[\s\S]+', '', content)

    # 去掉前后空行
    # content = re.sub(r'\n{2,}', '', content)
    content = content.strip()

    return content



def req():
    """
    请求器
    """
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        # "Host": "www.baidu.com",
        "Referer": "https://www.gxnzd.edu.cn/",
        "Cookie": "JSESSIONID=4F53315F8473CEE701D1AAE001F1B697",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    }
    url_base = "https://www.gxnzd.edu.cn/xg/info/1052/{}.htm"
    for i in range(1026, 1500):
        url = url_base.format(str(i))

        print(f"正在请求：{url}")
        res = requests.get(url, timeout=(10, 30), headers=headers)

        res.encoding = "utf-8"
        txt = res.content.decode('utf-8')
        if "404错误提示" not in txt:
            txt = data_cleaner(content=txt)
            saver(content=txt, content_id=str(i))


def req2():
    """
    请求器
    """
    headers = {
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
        "Accept-Encoding": "gzip, deflate, br, zstd",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Connection": "keep-alive",
        # "Host": "www.baidu.com",
        # "Referer": "https://www.gxnzd.edu.cn/",
        "Cookie": "JSESSIONID=4F53315F8473CEE701D1AAE001F1B697",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
    }

    for i in range(0, 100):
        url = "https://www.zhipin.com/web/geek/job?query=Python%E7%88%AC%E8%99%AB%E5%B7%A5%E7%A8%8B%E5%B8%88&city=101270100"

        print(f"正在请求：{url}")
        res = requests.get(url, timeout=(10, 30), headers=headers)

        res.encoding = "utf-8"
        txt = res.content.decode('utf-8')
        txt = data_cleaner(content=txt)
        saver(content=txt, content_id=str(i))




if __name__ == '__main__':
    req2()
