# -*- coding: utf-8 -*-
import scrapy
from ..items import TenxunItem
import json
import time
from urllib import parse


class TenxunjobSpider(scrapy.Spider):
    name = 'tenxunjob'
    url1 = 'https://careers.tencent.com/tencentcareer/api/post/Query?timestamp={}&countryId=&cityId=&bgIds=&productId=&categoryId=&parentCategoryId=&attrId=&keyword={}&pageIndex={}&pageSize=10&language=zh-cn&area=cn'
    url2 = 'https://careers.tencent.com/tencentcareer/api/post/ByPostId?timestamp={}&postId={}&language=zh-cn'
    allowed_domains = ['careers.tencent.com']
    timestamp = int(time.time()*1000)
    keyword = input("请输入职位：")
    keyword = parse.quote(keyword)
    start_urls = [url1.format(timestamp,keyword,1)]


    def parse(self, response):
        html = json.loads(response.text)
        count = html['Data']['Count']
        page = count//10 if count//10 else count+1
        for index in range(1,page):
            page_urls = self.url1.format(self.timestamp,self.keyword,index)
            yield scrapy.Request(url=page_urls,callback=self.first_page)

    def first_page(self,response):
        html = json.loads(response.text)
        item = TenxunItem()
        for post_id in html['Data']['Posts']:
            item["job_id"] = post_id['PostId']
            urls = self.url2.format(self.timestamp,item["job_id"])
            yield scrapy.Request(url=urls,meta={'item':item},callback=self.page_info)

    def page_info(self,response):
        html = json.loads(response.text)
        item = response.meta['item']
        item["job_name"] = html['Data']['RecruitPostName']
        item["job_city"] = html['Data']['LocationName']
        item["job_type"] = html['Data']['CategoryName']
        item["job_time"] = html['Data']['LastUpdateTime']
        job_duty = html['Data']['Responsibility']
        job_duty = "\n".join(job_duty.split())
        item["job_duty"] = "".join(job_duty.split())
        job_require = html['Data']['Requirement']
        job_require = "\n".join(job_require.split())
        item["job_require"] = "".join(job_require.split())
        yield item