# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request,Selector
from zwspider.items import ZwspiderItem
import json
import re
from scrapy.exceptions import DropItem
from urllib.parse import quote

class ZpSpider(scrapy.Spider):
    name = 'zp'
    allowed_domains = ['zhaopin.com', 'fe-api.zhaopin.com', 'jobs.zhaopin.com']  # 允许访问的域名
    start_urls = ['https://www.zhaopin.com/']  # 进口链接
    headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
        'Cookie': 'acw_tc=2760829d15541744984095511e0e94cb9762e9d9ddb5395366fab281d5eb9b; CNZZDATA1256793290=331662787-1554180895-%7C1554341753; sts_deviceid=16a01f14c8f193-0cd879ac3cd7ab-2464751f-2073600-16a01f14c9017e; jobRiskWarning=true; sts_sg=1; sts_chnlsid=Unknown; zp_src_url=https%3A%2F%2Fwww.zhaopin.com%2F; sajssdk_2015_cross_new_user=1; Hm_lvt_38ba284938d5eddca645bb5e02a02006=1554297868,1554687045; Hm_lpvt_38ba284938d5eddca645bb5e02a02006=1554810757; dywez=95841923.1554810862.1.1.dywecsr=sou.zhaopin.com|dyweccn=(referral)|dywecmd=referral|dywectr=undefined|dywecct=/; __utmt=1; firstchannelurl=https%3A//passport.zhaopin.com/login%3FbkUrl%3Dhttps%3A//i.zhaopin.com/blank%3Fhttps%253A%252F%252Fsou.zhaopin.com%252F%253Fjl%253D538%2526kw%253D%2525E6%25258A%252595%2525E8%2525B5%252584%2525E7%2525BB%25258F%2525E7%252590%252586%2526kt%253D3; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2216a01f15d2b9d-0c6dce2ee0ea14-2464751f-2073600-16a01f15d2c230%22%2C%22%24device_id%22%3A%2216a01f15d2b9d-0c6dce2ee0ea14-2464751f-2073600-16a01f15d2c230%22%2C%22props%22%3A%7B%7D%7D; lastchannelurl=https%3A//sou.zhaopin.com/%3Fjl%3D538%26kw%3D%25E6%258A%2595%25E8%25B5%2584%25E7%25BB%258F%25E7%2590%2586%26kt%3D3; dywea=95841923.3285671991617382000.1554810862.1554810862.1554810862.1; dywec=95841923; dyweb=95841923.10.6.1554811080993; __utma=269921210.728871691.1554810862.1554810862.1554810862.1; __utmb=269921210.10.6.1554811080998; __utmc=269921210; __utmz=269921210.1554810862.1.1.utmcsr=sou.zhaopin.com|utmccn=(referral)|utmcmd=referral|utmcct=/; JsNewlogin=1953673919; JSloginnamecookie=1009137312%40qq%2Ecom; JSShowname=%E8%8C%83%E5%BF%97%E4%BF%8A; at=b8eb2bbdd91c4ebab89b3ccb2eb88896; Token=b8eb2bbdd91c4ebab89b3ccb2eb88896; rt=46d949726a634a729ce3126ad94e6b42; JSpUserInfo=3d753d6857645e754c685b645a754b685e645e754a685364537535682464557548685a64587540685b645b754e68596459754b682a641975086844640b751668076453752c68246455757aebbd3ba23a42682f64257544685b6458754968536459754a685d645b7548685864287508681b6446751a6805640575426839643c7544685b64537538683e6455754a6847645a7559685b64517543685f64507542682b64247544685b6453752c682b645575336827645d75486858645a754d685c645b7540685e6453752c683e645575486851643b7530685764587542683; uiioit=3b622a6459640e644264466a5a6e506e5064523857775d7751682c622a64596408644c646; ZL_REPORT_GLOBAL={%22sou%22:{%22actionid%22:%22aee4c3cc-0b4d-4540-9eeb-d7d4bb0a35ce-sou%22%2C%22funczone%22:%22smart_matching%22}}; ZP_OLD_FLAG=false; sts_evtseq=8; sts_sid=16a01f154444f-0445f8903b4c77-2464751f-2073600-16a01f154453ba; LastCity=%E4%B8%8A%E6%B5%B7; LastCity%5Fid=538',
        'Host': 'fe-api.zhaopin.com',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Maxthon/5.0 Chrome/47.0.2526.73 Safari/537.36'
    }
    job_headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Host': 'jobs.zhaopin.com',
        'cookie': 'sts_deviceid=169dc04dfd2109-0722b41ec879a-2464751f-2073600-169dc04dfd464f; UM_distinctid=169dc04e3ea41a-07b3e7c28f3d0b-2464751f-1fa400-169dc04e3eb5ed; acw_tc=2760823015543419018593018e8d0ac0f2c4e8f95991d6d82442ec5e863bf4; dywez=95841923.1554341905.1.1.dywecsr=(direct)|dyweccn=(direct)|dywecmd=(none)|dywectr=undefined; jobRiskWarning=true; sts_sg=1; sts_chnlsid=Unknown; CNZZDATA1256793290=1940995170-1554337501-%7C1554683132; zp_src_url=https%3A%2F%2Fwww.zhaopin.com%2F; JsNewlogin=1953673919; JSloginnamecookie=15900792449; JSShowname=%E8%8C%83%E5%BF%97%E4%BF%8A; Token=a46006531ea64429aa305bbac39254b4; rt=bef3de3eb35043baabf8253e3704d456; JSsUserInfo=376436655a665266517754754b6f4b754d69546441655e665f6628772b75446f48754c695b644265576653665d7757754d6f4d754069596427652966596667f4b22ab32042753c692f644f655666546654775c75486f4a754e69516443655566246615771475576f1a7516690f6449653466306659775475426f38752d695f6441654a66556644775c75436f4a754869596433652b66596654775e752c6f3875446928643f6552665566567757754d6f4f754a695b6446655c66316630775875486f42752a692b644f6557665f660; uiioit=3b622a6459640e644264466a5a6e506e5064523857775d7751682c622a64596408644c646; ZP_OLD_FLAG=false; urlfrom=121126445; urlfrom2=121126445; adfcid=none; adfcid2=none; adfbid=0; adfbid2=0; sts_evtseq=1; sts_sid=16a01e8b937388-09d81d9b78c141-2464751f-2073600-16a01e8b93a6d8; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%22651224639%22%2C%22%24device_id%22%3A%22169dc04e0a71d3-00b37f0e3f6cae-2464751f-2073600-169dc04e0a89fe%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%2C%22first_id%22%3A%22169dc04e0a71d3-00b37f0e3f6cae-2464751f-2073600-169dc04e0a89fe%22%7D; dywea=95841923.885615016855425900.1554341905.1554687045.1554810190.3; dywec=95841923; dywem=95841923.y; dyweb=95841923.1.10.1554810190; __utmt=1; __utma=269921210.1635137474.1554341905.1554687045.1554810190.3; __utmb=269921210.1.10.1554810190; __utmc=269921210; __utmz=269921210.1554341905.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); Hm_lvt_38ba284938d5eddca645bb5e02a02006=1554297868,1554687045; Hm_lpvt_38ba284938d5eddca645bb5e02a02006=1554810190; LastCity=%E4%B8%8A%E6%B5%B7; LastCity%5Fid=538; referrerUrl=; stayTimeCookie=1554810191650',
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36 Qiyu/2.1.1.1'
    }

    def parse(self, response):
        sel = scrapy.Selector(response)
        div_list = sel.xpath('//div[@class="zp-jobNavigater__pop--container"]')
        for div_item in div_list:
            zwlb_big = div_item.xpath('div[@class="zp-jobNavigater__pop--title"]/text()').extract_first()
            for zwlb in div_item.xpath('div[@class="zp-jobNavigater__pop--list"]/a/text()').extract():
                url = 'https://fe-api.zhaopin.com/c/i/sou?start=0&pageSize=60&cityId=489&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw={}&kt=3'.format(
                    quote(zwlb))
                self.headers['Referer']='https://sou.zhaopin.com/?kt=3&kw={}&jl=489&kt=3'.format(quote(zwlb))
                yield scrapy.Request(url=url, callback=self.parse_list, dont_filter=True,
                                     meta={'zwlb_big': zwlb_big, 'zwlb': zwlb, 'p': 1, 'size': 60, 'start': 60},
                                     headers=self.headers)

    # 通过列表页面获取详细页面链接，并完成分页处理
    def parse_list(self, response):
        p = response.meta['p'] + 1
        try:
            js = json.loads(response.body_as_unicode())
            res_zwlb = response.meta['zwlb']
            data = js.get('data', '')
            if data:
                for index, i in enumerate(js['data'].get('results', [])):
                    zwmc = i['jobName']  # zwmc
                    zwlb_big = i['jobType']['items'][0].get('name', i['jobType'].get('display', res_zwlb))  # zwlb_big
                    zwlb = i['jobType']['items'][1].get('name', res_zwlb)  # zwlb
                    gsmc = i['company']['name']  # gsmc
                    gsgm = i['company']['size']['name']  # gsgm
                    gsxz = i['company']['type']['name']  # gsxz
                    gzjy = i['workingExp']['name']  # gzjy
                    type = i['emplType']  # type
                    zwyx = i['salary']
                    zdxl = i['eduLevel']['name']  # zdxl
                    gzdd = i['city']['items'][0]['name']
                    fbrq = i['updateDate']
                    flxx = i['welfare']
                    pattern = re.compile('(\d+)K-(\d+)K').findall(zwyx)
                    if pattern:
                        min_zwyx = pattern[0][0] + '000'
                        max_zwyx = pattern[0][1] + '000'
                    else:
                        min_zwyx = max_zwyx = 0

                    item_one = ZwspiderItem()
                    item_one['zwmc'] = zwmc
                    item_one['gsmc'] = gsmc
                    item_one['flxx'] = flxx
                    item_one['zwyx'] = zwyx
                    item_one['min_zwyx'] = min_zwyx
                    item_one['max_zwyx'] = max_zwyx
                    item_one['dd'] = gzdd
                    item_one['fbrq'] = fbrq
                    item_one['gsxz'] = gsxz
                    item_one['gzjy'] = gzjy
                    item_one['xl'] = zdxl
                    item_one['zwlb'] = zwlb
                    item_one['gsgm'] = gsgm
                    item_one['zwlb_big'] = zwlb_big
                    item_one['type'] = type
                    url = i['positionURL']
                    if url:
                        self.headers['Referer'] = 'https://sou.zhaopin.com/?p={}&jl=489&kw={}&kt=3&sf=0&st=0'.format(p,quote(response.meta['zwlb']))
                        yield scrapy.Request(url=url, callback=self.parse_info, dont_filter=True, meta={'item': item_one},headers=self.job_headers)
                num = js['data']['numFound']
                if num > response.meta['start']:

                    start = response.meta['start'] + 60
                    self.headers['Referer'] = 'https://sou.zhaopin.com/?p={}&jl=489&kw={}&kt=3&sf=0&st=0'.format(p,quote(response.meta['zwlb']))
                    url2 = 'https://fe-api.zhaopin.com/c/i/sou?start={0}&pageSize={1}&cityId=489&workExperience=-1&education=-1&companyType=-1&employmentType=-1&jobWelfareTag=-1&kw={2}&kt=3'.format(
                        start, response.meta['size'], response.meta['zwlb'])

                    yield scrapy.Request(url=url2, callback=self.parse_list, dont_filter=True,
                                         meta={'zwlb_big': response.meta['zwlb_big'], 'zwlb': response.meta['zwlb'], 'p': p,
                                               'size': response.meta['size'], 'start': start},
                                         headers=self.headers)
        except json.decoder.JSONDecodeError as e:
            with open('zhaopin/' + response.meta['zwlb']+'.html',
                      'wb') as f:
                f.write(response.body)
            #raise DropItem('has a problem')
    def parse_info(self,response):
        item=response.meta['item']
        item['gshy']=response.xpath('//button[@class="company__industry"]/text()').extract_first()
        item['rzyq']=response.xpath('string(//div[@class="describtion__detail-content"])').extract_first()
        item['source']='智联招聘'
        print(item)
