import requests
import time
import random
import json
from bs4 import BeautifulSoup
import csv
import os
from urllib.parse import urlencode

class BossSpider:
    def __init__(self):
        self.base_url = 'https://www.zhipin.com/wapi/zpgeek/search/joblist.json'
        # 常用User-Agent列表
        self.user_agents = [
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.107 Safari/537.36',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Safari/605.1.15',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:90.0) Gecko/20100101 Firefox/90.0',
            'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36'
        ]
        self.headers = {
            'User-Agent': random.choice(self.user_agents),
            'Accept': 'application/json, text/plain, */*',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Referer': 'https://www.zhipin.com/web/geek/job?query=%E5%89%8D%E7%AB%AF&city=101220100',
            'Cookie': '__zp_seo_uuid__=e238fae3-4689-4f59-98e3-e0e50bef0e24; __g=-; wt2=DAj-NY-tRD3Z6UMbH715kTys5-AD9Zm4XgVsy0GnjiveRPIX5fw2gpZPjtmBmeMAXvnZmgF9CT8VmyiJC-CMcIQ~~; wbg=0; zp_at=mXp48LZY6OF_YWcYfYQZc7-o18Q8O2pp8VuRy0giYI4~; ab_guid=e0769d42-e87f-4194-a223-8217aa096601; lastCity=101220100; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1741675287; HMACCOUNT=92356EB2A996262F; __c=1741673666; __l=r=https%3A%2F%2Fwww.google.com%2F&l=%2Fwww.zhipin.com%2Fhefei%2F%3Fka%3Dheader-home&s=3&friend_source=0&s=3&friend_source=0; __a=51502859.1741673666..1741673666.24.1.24.24; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1742439054; bst=V2RtouEeD80ldgXdJtzB8fLSOw7DLUww~~|RtouEeD80ldgXdJtzB8fLSOw7D_Qww~~; __zp_stoken__=4e5dfw5fDin4kRiJ1JHQkIMKKVsOMYHXDjHB9w4%2FCg2DDh8OOw5DCu2fCvmvCs13CvnDCssOQwrx8wqLCu8SMfsKnw4bCqcOEw4V%2BxJPChcKtbMKYYsKtxKjDu8S9w7XEksKow4zCs0o%2FGRcXGhcZFxcaFxcZGB0YGhwcGRwWGBgdGEdAxIzCsRpJRkZJOmNlZRdecm9cclocaV1WTEYjJRxnRkVMw5DDjEpMw5XDicOIUsOVw4zDjVTDk8OMw4ghTFJMTMOLGTdRw4d2GDUYGx1OFlYYw5l1w7%2FDhsK4w4zCgT5GRsOJxY1VRypPR0lVR0pUSUc6SkDDmnXEgcOLwrjDh8K4N0knUUdHU0lJR0dVR1M7R1LChTtHTDlTJSUZIB84UsOIwr7DjMOyR0c%3D' # 需要填入自己的Cookie
        }
        self.city_code = '101220100'  # 合肥市的城市代码
        self.query = '前端'
        self.page = 1
        self.jobs = []
        self.output_dir = 'output'

        # 创建输出目录
        if not os.path.exists(self.output_dir):
            os.makedirs(self.output_dir)

    def get_params(self, page):
        """构造请求参数"""
        params = {
            'city': self.city_code,
            'query': self.query,
            'page': page,
            'pageSize': 30
        }
        return params

    def update_headers(self):
        """根据当前的城市代码和查询关键词更新请求头"""
        # 更新Referer，使其与当前爬取的城市和关键词保持一致
        query_encoded = urlencode({'query': self.query})
        self.headers['Referer'] = f'https://www.zhipin.com/web/geek/job?{query_encoded}&city={self.city_code}'
        # 随机选择一个User-Agent
        self.headers['User-Agent'] = random.choice(self.user_agents)

    def fetch_jobs(self, page):
        """获取指定页码的职位信息"""
        params = self.get_params(page)
        # 更新请求头
        self.update_headers()
        try:
            # 添加随机延迟，避免被反爬
            time.sleep(random.uniform(1, 3))
            response = requests.get(self.base_url, headers=self.headers, params=params)
            if response.status_code == 200:
                try:
                    data = response.json()
                    # 检查响应内容是否包含错误信息
                    if 'code' in data and data['code'] != 0:
                        print(f"请求返回错误: {data.get('message', '未知错误')}")
                        self.debug_response(response, params)
                        return None
                    return data
                except json.JSONDecodeError:
                    print("响应内容不是有效的JSON格式")
                    self.debug_response(response, params)
                    return None
            else:
                print(f"请求失败，状态码: {response.status_code}")
                self.debug_response(response, params)
                return None
        except Exception as e:
            print(f"请求异常: {e}")
            return None

    def parse_jobs(self, data):
        """解析职位数据"""
        if not data or 'zpData' not in data or 'jobList' not in data['zpData']:
            print("数据格式不正确或为空")
            return []

        job_list = data['zpData']['jobList']
        parsed_jobs = []

        for job in job_list:
            job_info = {
                '职位名称': job.get('jobName', ''),
                '薪资': job.get('salaryDesc', ''),
                '公司名称': job.get('brandName', '') or job.get('encCompanyName', ''),
                '工作地点': job.get('cityName', '') + ' ' + job.get('areaDistrict', ''),
                '工作经验': job.get('jobExperience', ''),
                '学历要求': job.get('jobDegree', ''),
                '公司规模': job.get('scaleName', ''),
                '公司行业': job.get('industryName', ''),
                '职位详情链接': f"https://www.zhipin.com/job_detail/{job.get('encryptJobId', '')}.html",
                '发布时间': job.get('createTime', '')
            }
            parsed_jobs.append(job_info)

        return parsed_jobs

    def crawl(self, max_pages=10):
        """爬取指定页数的职位信息"""
        total_jobs = []

        for page in range(1, max_pages + 1):
            print(f"正在爬取第 {page} 页...")
            data = self.fetch_jobs(page)

            if not data:
                print(f"第 {page} 页数据获取失败，停止爬取")
                break

            jobs = self.parse_jobs(data)
            if not jobs:
                print(f"第 {page} 页没有职位数据，停止爬取")
                break

            total_jobs.extend(jobs)
            print(f"第 {page} 页爬取完成，获取到 {len(jobs)} 个职位信息")

            # 判断是否还有下一页
            if 'zpData' in data and 'hasMore' in data['zpData'] and not data['zpData']['hasMore']:
                print("已到达最后一页，停止爬取")
                break

        self.jobs = total_jobs
        print(f"爬取完成，共获取到 {len(total_jobs)} 个职位信息")
        return total_jobs

    def save_to_csv(self, filename='jobs.csv'):
        """将职位信息保存为CSV文件"""
        if not self.jobs:
            print("没有职位信息可保存")
            return False

        filepath = os.path.join(self.output_dir, filename)
        try:
            with open(filepath, 'w', newline='', encoding='utf-8-sig') as f:
                writer = csv.DictWriter(f, fieldnames=self.jobs[0].keys())
                writer.writeheader()
                writer.writerows(self.jobs)
            print(f"职位信息已保存至 {filepath}")
            return True
        except Exception as e:
            print(f"保存CSV文件时出错: {e}")
            return False

    def save_to_json(self, filename='jobs.json'):
        """将职位信息保存为JSON文件"""
        if not self.jobs:
            print("没有职位信息可保存")
            return False

        filepath = os.path.join(self.output_dir, filename)
        try:
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(self.jobs, f, ensure_ascii=False, indent=4)
            print(f"职位信息已保存至 {filepath}")
            return True
        except Exception as e:
            print(f"保存JSON文件时出错: {e}")
            return False

    def debug_response(self, response, params):
        """调试响应内容，帮助诊断问题"""
        print("\n===== 调试信息 =====")
        print(f"请求URL: {response.url}")
        print(f"请求参数: {params}")
        print(f"请求头: {self.headers}")
        print(f"响应状态码: {response.status_code}")
        print("响应头:")
        for key, value in response.headers.items():
            print(f"  {key}: {value}")

        # 尝试打印响应内容的前200个字符
        try:
            content_preview = response.text[:200] + '...' if len(response.text) > 200 else response.text
            print(f"响应内容预览: {content_preview}")
        except Exception as e:
            print(f"无法打印响应内容: {e}")

        print("可能的问题:")
        print("1. Cookie已过期或无效，请更新Cookie")
        print("2. IP被临时封禁，请稍后再试或使用代理IP")
        print("3. 请求参数格式不正确")
        print("4. 网站接口已更改，需要更新爬虫代码")
        print("===== 调试信息结束 =====\n")

if __name__ == '__main__':
    spider = BossSpider()
    spider.crawl(max_pages=10)  # 爬取10页数据
    spider.save_to_csv()  # 保存为CSV
    spider.save_to_json()  # 保存为JSON