# -*- coding: utf-8 -*-
import scrapy
from bs4 import Tag
from scrapy import Request
import bs4
from zhilian_homework.items import ZhilianHomeworkItem


class ZhilianSpider(scrapy.Spider):
    name = 'zhilian'
    allowed_domains = ['zhaopin.com']
    # start_urls = ['https://sou.zhaopin.com/']

    def start_requests(self):
        start_urls = 'https://sou.zhaopin.com/?jl=822&in=10100&kw=python&kt=3&sf=0&st=0'

        yield Request(start_urls, callback=self.parse)

    def parse(self, response):
        soup = bs4.BeautifulSoup(response.text)
        contentpile_content_element = soup.find("div", attrs={"class": "contentpile__content"})

        assert isinstance(contentpile_content_element, Tag)

        for job in contentpile_content_element.find_all("div", attrs={"class": "contentpilr__content__wrapper clearfix"}):
            assert isinstance(job, Tag)
            item = ZhilianHomeworkItem()

            item["name"] = list(job.find("div",
                            attrs={"class": "contentpilr__content__wrapper__item__info__box__jobname jobName"}
                            ).find("span",
                                   attrs={"class": "contentpilr__content__wrapper__item__info__box__jobname__title"}
                                   ))[0].attrs['title']

            item["salary"] = job.find("div",
                            attrs={"class": "contentpilr__content__wrapper__item__info__box__job jobDesc"}
                            ).find("p",
                                   attrs={"class": "contentpilr__content__wrapper__item__info__box__saray"}
                                   ).text

            item["years"] = list(job.find("div",
                            attrs={"class": "contentpilr__content__wrapper__item__info__box__job jobDesc"}
                            ).find("li",
                                   attrs={"class": "contentpilr__content__wrapper__item__info__box__job__demand__item"}
                                   ))[1].string

            yield item






