import scrapy


from lp.items import LpItem
from fake_useragent import UserAgent

import os, time, requests
import copy

# def gethttpdaili():
# 	url = ''
# 	t1 = time.time()
# 	r = requests.get(url)
# 	t2 = time.time()
# 	ipport = r.text.strip()
# 	#print(ipport)
# 	#print("时间差:" , (t2 - t1))
# 	ret

class LiepinSpider(scrapy.Spider):
    name = 'liepin'
    allowed_domains = ['liepin.com']
    print(os.getcwd())
    ua = UserAgent(path=r'E:/陈夏——1/1/lp/lp/spiders/ua.json')
    start_urlss = ['https://www.liepin.com/zhaopin/?compkind=&dqs=&pubTime=&pageSize=40&salary=&'
                  'compTag=&sortFlag=15&degradeFlag=0&compIds=&subIndustry=&jobKind=&industries=&compscale=&'
                  'key=%E5%A4%A7%E6%95%B0%E6%8D%AE%E5%BC%80%E5%8F%91'
                  '&siTag=6bVho5nBxgRkeqpwjrE2YQ%7EfA9rXquZc5IkJpXC-Ycixw&d_sfrom=search_prime&d_ckId='
                  '57dd1f65172b6c704a51d6e962741b92&d_curPage=0&d_pageSize=40&d_headId=57dd1f65172b6c704a51d6e962741b92'
                  '&curPage={}'.format(i) for i in range(0, 25)]
    # start_urlss = ['https://www.liepin.com/zhaopin/?init=1&imscid=R000000058&d_sfrom=search_fp_bar&key=算法'.format(i) for i in range(0, 31)]

    def start_requests(self):
        for url in self.start_urlss:
            yield scrapy.Request(url, callback=self.parse, headers={'User-Agent': self.ua.random})

    def parse(self, response):

        for obj in response.xpath("//ul[@class='sojob-list']/li"):
            item = LpItem()
            urls = obj.xpath(".//h3/a/@href").get()
            print(urls)
            item['job'] = obj.xpath(".//div[@class='job-info']/h3/a/text()").get().strip()       # 职位名称
            item['salary'] = obj.xpath(".//div[@class='job-info']/p/span[@class='text-warning']/text()").get().strip()     # 薪资待遇
            item['company'] = obj.xpath(".//div[@class='company-info nohover']/p/a/text()").get().strip()       # 公司名称
            item['address'] = obj.xpath(".//div[@class='job-info']/p/span[@class='edu']/preceding-sibling::*[1]/text()").get().strip()          # 公司位置
            item['experience'] = obj.xpath(".//div[@class='job-info']/p/span[@class='edu']/following-sibling::*[1]/text()").get().strip()          # 工作经验
            item['education'] = obj.xpath(".//div[@class='job-info']/p/span[@class='edu']/text()").get().strip()    # 学历
            print(item)
            # 跳转到详细页,并传递数据item

            yield scrapy.Request(urls, meta={'item': item}, dont_filter=True, headers={'User-Agent': self.ua.random})

    # def detailed(self, response):
    #     # print(response.text)
    #     item = response.meta['item']
    #     item['position_info'] = " ".join(response.xpath("//div[contains(@class,'job-description')]/div[1]/text()").getall())
    #     yield item

