# -*- coding: utf-8 -*-
import scrapy
from liepin.items import LiepinItem
from urllib.parse import urlparse, parse_qs
import pandas as pd 
from requests_html import HTMLSession
import requests
url="https://www.liepin.com/zhaopin/?industries=150&subIndustry=&dqs=050090&salary=&jobKind=&pubTime=&compkind=&compscale=&searchType=1&isAnalysis=&sortFlag=15&d_headId=29ad48338b62368ba7537c9cdb34d6ff&d_ckId=9716154c414583db320e90530af762d5&d_sfrom=search_prime&d_curPage=0&d_pageSize=40&siTag=bFGQTbwE_AAQSb-u11jrBw%7ECo2BjAbKTuYdbouBrhQntw&key=%E6%8A%95%E8%B5%84"
from urllib.parse import urlparse, parse_qs,urlencode
import pandas as pd
def parse_url_qs_for_curPage (url):
    six_parts = urlparse(url) #把url拆成6部分
    out = parse_qs(six_parts.query)#取出query值并输出为字典out
    return (out)
参数模板=parse_url_qs_for_curPage(url)
参数模板
#下面这个函数要改，上面的url要改
def 参数修改(key,curPage):
    参数=参数模板.copy()
    参数["key"]=key
    参数["curPage"]=curPage
    return 参数
#关键词换
参数修改后列表=[参数修改(curPage=[i],key=["投资"]) for i in range(10)]
参数修改后列表
starts_url=list()
for a in 参数修改后列表:
    querys = urlencode(a, doseq = True) # doseq = True
    #print(querys)
    #six_new = six._replace(query=q)
    url="https://www.liepin.com/zhaopin/?"+querys
    starts_url.append(url)
starts_url

class LiepinspiderSpider(scrapy.Spider):
    name = 'liepinSpider'
    allowed_domains = ['www.liepin.com']
    start_urls =starts_url
    def parse(self, response): 
        r=response.xpath('//ul[@class="sojob-list"]/li')
        for a in r:
            job_xueli =a.xpath('//div[contains(@class,"job-info")]/p/span[@class="edu"]/text()').extract()
            job_jingyan=a.xpath('//div[contains(@class,"job-info")]/p/span[@class="edu"]/following-sibling::span/text()').extract()
            job_xinshui=a.xpath('//div[contains(@class,"job-info")]/p/span[@class="text-warning"]/text()').extract()
            job_shijian=a.xpath('//div[contains(@class,"job-info")]/p/time/@title/text()').extract()
            job_zhicheng=[x.strip()for x in (a.xpath('//div[contains(@class,"job-info")]/h3/a/text()')).extract() ]
            job_company_name=a.xpath('//div[contains(@class,"sojob-item-main")]//p[@class="company-name"]/a/text()').extract()
            job_url=a.xpath('//div[contains(@class,"job-info")]/h3/a/@href').extract()
            job_company_url=a.xpath('//div[contains(@class,"sojob-item-main")]//p[@class="company-name"]/a/@href').extract()   
        
        
        item=LiepinItem()
        item["liepin_jingyan"]=job_jingyan
        item["liepin_xueli"]=job_xueli
        item["job_xinshui"]=job_xinshui
        item["job_zhicheng"]=job_zhicheng
        item["job_company_name"]=job_company_name
        item["job_url"]=job_url
        item["job_company_url"]=job_company_url
        yield item

