import scrapy
from urllib import parse
import json
from ..items import TencentItem


class TencentSpider(scrapy.Spider):
    name = "tencent"
    allowed_domains = ["careers.tencent.com"]
    keyword = input("请输入职位类别：")
    parse.quote(keyword)
    # 默认第一次url地址不参与去重 dont_filter=True
    first_url = 'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1709016543320&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword={}&pageIndex=1&pageSize=10&language=zh-cn&area=cn'.format(
        keyword)
    start_urls = [first_url]

    def parse(self, response):
        # 计算总页数 response.text是获取响应内容为字符串格式
        html = json.loads(response.text)
        count = html['Data']['Count']
        total = count // 10 if count % 10 == 0 else count // 10 + 1
        for index in range(1, total + 1):
            page_url = 'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp=1709016543320&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword={}&pageIndex=1&pageSize=10&language=zh-cn&area=cn'.format(
                self.keyword, index)
            # 把所有要抓取的一级页面的url地址一次性交给调度器入队列
            yield scrapy.Request(url=page_url, callback=self.parse_detail)

    def parse_detail(self, response):
        # 一级页面解析 提取postId
        one_html = json.loads(response.text)
        for one_job in one_html['Data']['Posts']:
            post_id = one_job['PostId']
            # 拼接二级页面地址
            two_url = 'https://careers.tencent.com/tencentcareer/api/post/ByPostId?timestamp=1709016852328&postId={}&language=zh-cn'.format(
                post_id)
            yield scrapy.Request(url=two_url, callback=self.get_job)

    def get_job(self, response):
        # 提取每个职位的具体信息
        two_html = json.loads(response.text)
        item = TencentItem()
        item['job_name'] = two_html['Data']['RecruitPostName']
        item['job_address'] = two_html['Data']['LocationName']
        item['job_type'] = two_html['Data']['CategoryName']
        item['job_time'] = two_html['Data']['LastUpdateTime']
        item['job_responsibility'] = two_html['Data']['Responsibility']
        item['job_requirement'] = two_html['Data']['Requirement']
        yield item
