import scrapy
import json
from myspider.items import WangYiItem
import datetime


class WangyiSpider(scrapy.Spider):
    name = "wangyi"
    allowed_domains = ["hr.163.com"]
    start_urls = ["https://hr.163.com/api/hr163/position/queryPage"]

    def __init__(self):
        self.table_name = 'wangyijob'
        self.table_fields = ['name', 'productName', 'postTypeFullName', 'description', 'recruitNum','reqEducationName', 'reqWorkYearsName', 'requirement', 'firstPostTypeName','workPlaceNameList']

    def start_requests(self):
        # 起始url
        url = self.start_urls[0]
        headers = {
            "accept": "application/json, text/plain, */*",
            "accept-language": "zh-CN,zh;q=0.9",
            "authtype": "ursAuth",
            "cache-control": "no-cache",
            "content-type": "application/json;charset=UTF-8",
            "language": "zh",
            "origin": "https://hr.163.com",
            "pragma": "no-cache",
            "priority": "u=1, i",
            "referer": "https://hr.163.com/job-list.html",
            "sec-ch-ua": "\"Chromium\";v=\"130\", \"Google Chrome\";v=\"130\", \"Not?A_Brand\";v=\"99\"",
            "sec-ch-ua-mobile": "?0",
            "sec-ch-ua-platform": "\"Windows\"",
            "sec-fetch-dest": "empty",
            "sec-fetch-mode": "cors",
            "sec-fetch-site": "same-origin",
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36",
            "x-ehr-uuid": "a0740b64-be4b-4148-838e-26dcd0e212"
        }
        cookies = {
            "_ga": "GA1.1.737302981.1731993936",
            "_ga_Z0JVTF6WF2": "GS1.1.1731993936.1.0.1731993939.0.0.0",
            "_ntes_nnid": "b1d44c6ce6f80a8e9ce6132563457a8b,1731993946454",
            "_ntes_nuid": "b1d44c6ce6f80a8e9ce6132563457a8b",
            "__root_domain_v": ".163.com",
            "_qddaz": "QD.362045337014545",
            "userName": "",
            "accountType": "",
            "hb_MA-8E16-605C3AFFE11F_source": "www.baidu.com",
            "JSESSIONID": "6F3186F1E50AA63C6383D6029E9F5D7D"
        }

        data = {
            "currentPage": 1,
            "pageSize": 10
        }
        # 构建post请求
        yield scrapy.Request(
            url=url,
            body=json.dumps(data),
            method='POST',
            headers=headers,
            cookies=cookies,
            callback=self.parse_job,
            meta={'page': 1},
        )

    def parse_job(self, response):
        try:
            data = response.json()

            # 如果没有数据则停止
            if not data.get('data', {}).get('list'):
                print(f"没有更多数据，停止在页码 {response.meta['page']}")
                return

            for job in data['data']['list']:
                try:
                    item = WangYiItem()

                    # 直接映射字段  strip去掉空格
                    item['name'] = job.get('name', '').strip()
                    item['product'] = job.get('product', '').strip()
                    item['productName'] = job.get('productName', '').strip()
                    item['description'] = job.get('description', '').strip()
                    item['recruitNum'] = job.get('recruitNum', 0)

                    # 处理更新时间（从时间戳转换为YYYY-MM-DD格式）
                    update_time = job.get('updateTime')
                    if update_time:
                        try:
                            item['updateTime'] = datetime.datetime.fromtimestamp(update_time / 1000).strftime('%Y-%m-%d')
                        except:
                            item['updateTime'] = ''
                    else:
                        item['updateTime'] = ''

                    yield item

                except Exception as e:
                    print(f"Error processing job {job.get('id')}: {str(e)}")
                    continue

            # 翻到下一页
            next_page = response.meta['page'] + 1
            yield scrapy.Request(
                url=response.url,
                body=json.dumps({"currentPage": next_page, "pageSize": 10}),
                method='POST',
                headers=response.headers,
                cookies=response.request.cookies,
                callback=self.parse_job,
                meta={'page': next_page}
            )


        except ValueError as e:
            print(f"JSON decode error: {str(e)}")
        except Exception as e:
            print(f"Unexpected error: {str(e)}")







