import requests
import time
import random
import csv
import re
import json
from lxml import etree
from datetime import datetime

chart_headers = ['职位名称', '公司名称', '公司ID', '公司规模', '薪资范围', '城市', '学历要求', '公司标签','公司福利', '岗位职责', '职位链接']

def init_csv():
    """初始化CSV文件并写入表头"""
    with open("拉钩招聘网技术岗C++相关的在招信息.csv", mode="a", encoding='utf-8', newline="") as f:
        f_csv = csv.DictWriter(f, chart_headers)
        # 将表头写入
        f_csv.writeheader()
        print("csv文件初始化完成")

def save_to_csv(data_dict):
    """将职位数据保存到CSV文件"""
    f = open("拉钩招聘网技术岗C++相关的在招信息.csv", mode="a", encoding='utf-8', newline="")
    f_csv = csv.DictWriter(f, chart_headers)
    for index_num in data_dict['props']['pageProps']['initData']['content']['positionResult']['result']:
        res_dict = {
            "职位名称": index_num["positionName"],
            '公司名称': index_num["companyFullName"],
            '公司ID': index_num["companyId"],
            '公司规模': index_num["companySize"],
            '薪资范围': index_num["salary"],
            '城市': index_num["city"],
            '学历要求': index_num["education"],
            '公司标签': index_num["companyLabelList"],
            '公司福利': index_num["hitags"],
            '岗位职责':index_num["positionDetail"].replace('<br />',"").replace('<br/>',"").replace('<br>',"").replace('<p>',"").replace('</p>',"").replace("&nbsp",""),
            '职位链接': "https://m.lagou.com/wn/jobs/{}.html".format(index_num["positionId"])
        }
        f_csv.writerow(res_dict) #将字典数据写入csv文件



def crawl_page(session, page):
    """爬取第page页的数据"""
    url = "https://www.lagou.com/wn/zhaopin?kd=C%2B%2B&pn={}".format(page)

    headers = {
        # 防盗链，告诉服务器我们请求的url地址是从哪跳转过来的
        # 'Referer': '',
        # 用户代理，用来标识浏览器的身份
        'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Mobile Safari/537.36 Edg/137.0.0.0',
        # 域名
        # 用于检查是否有登录账号
        'Cookie': "index_location_city=%E5%85%A8%E5%9B%BD; RECOMMEND_TIP=1; _putrc=38D560406F0B5953123F89F2B170EADC; gate_login_token=v1####882f6213c98f0cf5a74d6b3bf89aa3d148a952c69dd16435448abc0472b8d639; JSESSIONID=ABAACCCABCHACFF7CF796665FB01F6D93A74D4B6A477529; login=true; unick=%E9%BE%9A%E5%B0%9A%E8%BE%89; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=0; privacyPolicyPopup=false; WEBTJ-ID=20250619231727-19788c42e27aa6-04074123eccc798-4c657b58-1821369-19788c42e28f28; sajssdk_2015_cross_new_user=1; sensorsdata2015session=%7B%7D; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2219788c42e778f8-027c4114ac6ae08-4c657b58-1821369-19788c42e78be9%22%2C%22first_id%22%3A%22%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24os%22%3A%22Windows%22%2C%22%24browser%22%3A%22Chrome%22%2C%22%24browser_version%22%3A%22137.0.0.0%22%7D%2C%22%24device_id%22%3A%2219788c42e778f8-027c4114ac6ae08-4c657b58-1821369-19788c42e78be9%22%7D"
    }

    # get数据
    try:
        response = session.get(url,headers=headers,timeout=15)       #设置最大超时时间为15秒

        # 检查HTTP状态码
        if response.status_code != 200:
            print(f"第{page}页请求失败，状态码: {response.status_code}")
            return None

        #爬取整页内容
        html_text = response.text
        # 返回的是一个列表，所以取[0]就行了,这样返回的就是一个字典
        res = re.findall(r'<script id="__NEXT_DATA__" type="application/json">(.*?)</script>', html_text)[0]
        data_dict = json.loads(res)
        return data_dict

    except Exception as e:
        print(f"爬取第{page}页时发生异常: {str(e)}")
        return None


def crawl_lagou():
    """主爬虫函数"""
    print("开始爬取拉勾网c++岗位数据")
    init_csv() #初始化csv文件
    max_pages = 30  # 最多爬取30页
    session = requests.Session() #创建session对象

    for page in range(1, max_pages + 1): #爬取30页的信息，从第一页开始
        one_page_data = crawl_page(session, page)

        if not one_page_data:
            print(f"第{page}页无数据，可能已达末尾或遇到限制")
            break

        save_to_csv(one_page_data)


        # 随机延迟防止被封IP（10-20秒
        delay = random.uniform(10, 20)
        print(f"已保存第{page}页数据，等待{delay:.1f}秒后继续...")
        time.sleep(delay)

    print("爬取完成！已保存到:拉钩招聘网技术岗C++相关的在招信息.csv")


if __name__ == '__main__':
    crawl_lagou()