# -*- coding: utf-8 -*-
import scrapy
# from selenium import webdriver
from boss.items import BossItem
from bs4 import BeautifulSoup


class ZhipinSpider(scrapy.Spider):
    name = 'zhipin'
    allowed_domains = ['www.zhipin.com']

    def __init__(self):
        self.headers={
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2844.4 Safari/537.36'
        }

    def start_requests(self):
        start_urls = ['https://www.zhipin.com/c101210100/h_101210100/?query=python&page={}&ka=page-{}'.format(str(i), str(i)) for i in range(10)]
        # start_urls = ['https://www.zhipin.com/c101210100/h_101210100/?query=python&page=1&ka=page-1']

        for url in start_urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        parent = response.css('div.info-primary > h3 > a')
        for child in parent:
            href = child.css('::attr(href)')[0].extract()
            target = child.css('::attr(target)')[0].extract()
            lid = child.css('::attr(data-lid)')[0].extract()
            ka = child.css('::attr(ka)')[0].extract()
            url = 'https://www.zhipin.com{}?ka={}{}&lid={}'.format(href, ka, target, lid)

            yield scrapy.Request(url=url, callback=self.detail_parse)

    def detail_parse(self, response):
        # soup = BeautifulSoup(response.text, 'lxml')
        # print(soup)
        item = BossItem()
        item['name'] = response.css('div.info-primary > div.name::text')[0].extract()
        item['baseinfo'] = response.css('div.info-primary > p::text')[0].extract()
        item['desc'] = response.css('div.detail-content > div:nth-child(1) > div')[0].extract()
        item['space'] = response.css('div.location-address::text')[0].extract()
        # name = soup.div['info-primary'].div['name'].text()
        print(item)
        yield item


