# -*- coding: utf-8 -*-
# @Date     :5/9/21 4:00 PM
# @Author   :xuhe
import random
import re
from time import sleep
import parsel
import scrapy
from bossPro.items import BossproItem
from fake_useragent import UserAgent


class BossSpider(scrapy.Spider):
    name = 'boss'
    # allowed_domains = ['www.bb.com']
    start_urls = ['https://www.liepin.com/zhaopin/?key=美术']
    url = 'https://www.liepin.com/zhaopin/?key=美术&curPage=%d'
    page_num = 1
    # user_agent_list = [
    #     "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 "
    #     "(KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
    #     "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 "
    #     "(KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
    #     "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 "
    #     "(KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
    #     "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 "
    #     "(KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
    #     "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 "
    #     "(KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
    #     "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 "
    #     "(KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
    #     "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 "
    #     "(KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
    #     "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
    #     "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    #     "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 "
    #     "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    #     "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 "
    #     "(KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
    #     "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
    #     "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    #     "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
    #     "(KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
    #     "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
    #     "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    #     "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 "
    #     "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    #     "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 "
    #     "(KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
    #     "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 "
    #     "(KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
    #     "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 "
    #     "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
    #     "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 "
    #     "(KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    # ]
    ua = UserAgent()
    headers = {"User-Agent": ua.random}

    def parse_detail(self, response):
        # print(response.text)
        selector = parsel.Selector(response.text)
        job_name = selector.css('h1::text').get()
        job_company = selector.css(".title-info h3 a::text,.title h3::text").get().strip()
        wage = selector.css('.job-item-title::text,.job-main-title::text').get().strip()
        position = selector.css('.job-main p span::text,.job-item span a::text').get().strip()
        job_list = selector.css('.resume span::text,.job-qualifications span::text').getall()
        job_info = '、'.join(job_list)  # 列表转字符串
        dic = {
            '职位': job_name,
            '公司': job_company,
            '薪资': wage,
            '地点': position,
            '职位要求': job_info,
        }

        new_job_name = self.change_name(job_name)
        new_company_name = self.change_name(job_company)
        job_desc_list = selector.css('.job-description .content::text').getall()
        job_desc = '\n'.join(job_desc_list)
        item = BossproItem()
        item['job_name'] = new_job_name
        item['job_company'] = new_company_name
        item['wage'] = wage
        item['position'] = position
        item['job_info'] = job_info
        item['job_desc'] = job_desc
        yield item
        print(job_name, job_company, wage, position, job_info, sep=' | ')
        print(job_desc)

    def parse(self, response):

        li_list = response.xpath('//div[@class="container"]/div/div[1]/div[@class="sojob-result "]/ul/li')
        # print(li_list)
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.190 Safari/537.36'
        }
        if self.page_num <= 10:
            new_url = format(self.url % self.page_num)
            print(f'====================================正在爬取第{self.page_num}页内容====================================')
            self.page_num += 1
            for li in li_list:
                job_name = li.xpath('./div/div/h3/@title').extract_first()
                job_name = job_name[2:]
                # print(job_name)
                job_link = li.xpath('./div/div/h3/a/@href').extract_first()
                if job_link[0:1] == '/':
                    job_link = 'https://www.liepin.com' + job_link
                # print(job_link)
                sleep(random.uniform(2, 5))
                yield scrapy.Request(url=job_link, callback=self.parse_detail, headers={"User-Agent": self.ua.random})
            yield scrapy.Request(url=new_url, callback=self.parse, headers={"User-Agent": self.ua.random})

    def change_name(self, name):
        mode = re.compile(r'[\\\/\:\?\*\"\<\>\|]')
        new_name = re.sub(mode, '_', name)
        return new_name
