import scrapy
import json
from tencent_spider.items import TencentSpiderItem
from copy import deepcopy


class TencentSpider(scrapy.Spider):
    name = "tencent"
    allowed_domains = ["tencent.com"]
    url_page = "https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1696392031715&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword=%25E9%2594%2580%25E5%2594%25AE&pageIndex={}&pageSize=10&language=zh-cn&area=cn"
    detail_url = "https://careers.tencent.com/tencentcareer/api/post/ByPostId?timestamp=1696403296447&postId={}&language=zh-cn"
    start_urls = [url_page.format(1)]

    # 翻页写法
    def parse(self, response, **kwargs):
        for num in range(1, 4):
            url = self.url_page.format(num)
            yield scrapy.Request(url=url, callback=self.parse_data_index)

    def parse_data_index(self, response):
        dict_data = json.loads(response.text)["Data"]["Posts"]
        for data in dict_data:
            item = TencentSpiderItem()
            item["RecruitPostName"] = data["RecruitPostName"]
            item["CategoryName"] = data["CategoryName"]
            item["RequireWorkYearsName"] = data["RequireWorkYearsName"]
            item["LastUpdateTime"] = data["LastUpdateTime"]
            PostId = data["PostId"]

            # 构建详情页数据所在api的请求
            detail_url = self.detail_url.format(PostId)
            yield scrapy.Request(url=detail_url, callback=self.parse_detail_data, meta={"item": deepcopy(item)})

    def parse_detail_data(self, response):
        item = response.meta["item"]
        dict_data = json.loads(response.text)["Data"]
        item["Responsibility"] = dict_data["Responsibility"].replace("\r\n", "")
        item["Requirement"] = dict_data["Requirement"].replace("\r\n", "")
        yield item
