# -*- coding: utf-8 -*-
import scrapy
import time
from www_job_com.items import WwwJobComItem


class ZhaopinSpider(scrapy.Spider):
    name = 'zhaopin'
    allowed_domains = ['sou.zhaopin.com']
    start_urls = ['http://sou.zhaopin.com/']
    # start_urls = ['https://sou.zhaopin.com/?jl=682&kw=python&et=2&p=1']
    positionUrl = ''
    curPage = 0
    headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.5735.289 Safari/537.36','cookie':'x-zp-client-id=826a5d96-67ac-46cf-abec-13a5c7088eb2; FSSBBIl1UgzbN7NO=5X.aLFinbn5ODbAPn_PHXq1w_q92Km7xVvPTCxnVHFg.bBp8TQAMfEiGXPFJ6zSJmVoG_WWQfl1KKQNzsO0ISvG; locationInfo_search={%22code%22:%22682%22%2C%22name%22:%22%E5%8E%A6%E9%97%A8%22%2C%22message%22:%22%E5%8C%B9%E9%85%8D%E5%88%B0%E5%B8%82%E7%BA%A7%E7%BC%96%E7%A0%81%22}; _uab_collina=171755699005139934110655; LastCity=%E5%8E%A6%E9%97%A8; LastCity%5Fid=682; selectCity_search=682; Hm_lvt_7fa4effa4233f03d11c7e2c710749600=1717572508,1717637895; at=7b8f3f753e774147ba6cdb919f9d3ffe; rt=d70a577d1eae4ea8a5c0f6a2ec2421b6; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221201071576%22%2C%22first_id%22%3A%2218fe65fcd28117-00c5eaca21924bb8-3c2a7345-1327104-18fe65fcd2a29f%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMThmZTY1ZmNkMjgxMTctMDBjNWVhY2EyMTkyNGJiOC0zYzJhNzM0NS0xMzI3MTA0LTE4ZmU2NWZjZDJhMjlmIiwiJGlkZW50aXR5X2xvZ2luX2lkIjoiMTIwMTA3MTU3NiJ9%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%24identity_login_id%22%2C%22value%22%3A%221201071576%22%7D%2C%22%24device_id%22%3A%2218fe65fcd28117-00c5eaca21924bb8-3c2a7345-1327104-18fe65fcd2a29f%22%7D; sts_deviceid=18feb3e955b520-0f9589d47acc6d-3c2a7345-1327104-18feb3e955c40c; acw_tc=276077d217180650381758728eae3f0b3c6d787e4da6f4df396f7f9e80765d; Hm_lvt_21a348fada873bdc2f7f75015beeefeb=1717556990,1717632636,1718065041; Hm_lpvt_21a348fada873bdc2f7f75015beeefeb=1718065041; FSSBBIl1UgzbN7NP=5Rb8UOCRv2g3qqqDArtOhEAhCwTthF5CyTGc.vukhtMzJW1mOU8wkRBhVLON82MttW9vAkORk0X7XsP1JUjH6kp5CDmNeWkUceD5mffMrLtChWKr6_4xGr6HuLtUh6kJwQEcRZvo2Kkm1ZEqqcDUVPJjdghlUKWAveoYTakTt0mYyBfUV8Y1EdRM3XJm7ilE_CpmnaRVr7A4SWdgufzM5tAsz46lPCVFCNDKqtpdCG2KLpDv1i_nDST0z71HkdswHTOF1HT4F6D0mZMLgIIJIMQnQAiWPs7phebJ4bfGPboayHcbDJIdNH1OMHg_wjgM_I9laaVq.CKpvF98eySY5KH'}


    def start_requests(self):
        return [self.next_request()]

    def parse(self, response):
        print("request -> " + response.url)
        html = response.body.decode('utf-8')
        # job_list = response.css('table.newlist > tr')
        job_list = response.css('div.joblist-box__item')
        # print("request -> job_list:")
        # print(job_list)
        if (len(job_list) > 1):
            print("zhaopin Nums:" + str(len(job_list)))
            i = 0;
            for job in job_list:
                name = job.css('a.jobinfo__name::text').extract_first().strip()
                print("request -> job_name:")
                print(name)
                i += 1
                # if (i > 1 and (i % 2) == 0):
                item = WwwJobComItem()
                item['position_id'] = i#job.css('td.zwmc > input::attr(data-monitor)').extract_first().strip().replace("|", "")
                # name = job.css('td.zwmc > div > a').extract_first().strip()
                name0 = job.css('a.jobinfo__name::text').extract_first().strip()
                name = name0.lower()
                if (name.find("php") > -1 or name.find("python") > -1 or name.find("c") > -1 or name.find("java") > -1):
                    item["position_name"] = name
                    salary = job.css('p.jobinfo__salary::text').extract_first().strip()#.split("-")
                    item["salary"] = salary#str(int(int(salary[0]) / 1000)) + "K-" + str(int(int(salary[1]) / 100)) + "K"
                    item["avg_salary"] = ""#(int(salary[0]) + int(salary[1])) / 2000
                    item['city'] = job.css('div.jobinfo__other-info-item > span::text').extract_first().strip()
                    item['work_year'] = job.css('div.jobinfo__other-info-item::text').extract()[1].strip()
                    item['education'] = ""
                    item['company_name'] = job.css('a.companyinfo__name::text').extract_first().strip()
                    item['industry_field'] = ""
                    item['finance_stage'] = ""
                    item['company_size'] = job.css('div.joblist-box__item-tag::text').extract()[1].strip()
                    item['position_lables'] = ""
                    item['time'] = ""#job.css('td.gxsj > span::text').extract_first().strip()
                    item['updated_at'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
                    item['platform'] = "zhaopin"
                    yield item
            yield self.next_request()

    # 发送请求
    def next_request(self):
        self.curPage += 1
        if (self.curPage <= 5):
            # self.positionUrl = "http://sou.zhaopin.com/jobs/searchresult.ashx?jl=%E9%83%91%E5%B7%9E&kw=php&sm=0&fl=719&isadv=0&sb=1&isfilter=1&et=2&p=" + str(self.curPage)
            # self.positionUrl = "https://sou.zhaopin.com/?jl=538&kw=python&et=2&p=" + str(self.curPage)
            self.positionUrl = "https://sou.zhaopin.com/?jl=682&kw=python&et=2&p=" + str(self.curPage)
            print("zhaopin page:" + str(self.curPage))
            time.sleep(10)
            return scrapy.http.FormRequest(self.positionUrl,
                                           headers=self.headers,
                                           callback=self.parse)
