# -*- coding: utf-8 -*-
import scrapy


class StdSpiderSpider(scrapy.Spider):
    name = 'std_spider'
    # 只会向lagou.com发请求，不会向其他网站发请求
    allowed_domains = ['lagou.com']
    start_urls = ['https://www.lagou.com/beijing-zhaopin/Java/?labelWords=label']

    def parse(self, response):
        print('-'*50)
        print('-'*50)
        print('-'*50)

        datas=[]
        # 解析html用的是：lxml。之前讲的bs4
        l=response.css("li[class='con_list_item default_list']")#.extract() 不要加该方法
        for item in l:
            # 获取第一个h3标签的文本内容
            title=item.css("h3::text").extract_first()
            # .extract_first() 与 .extract()[0] 等价
            salary=item.css("span[class='money']::text").extract()[0]
            company_name=item.css("div[class='company_name']  a  ::text").extract()[0]

            print(title,"---",salary,"---",company_name)

            d={"title":title,"salary":salary,"company":company_name}
            datas.append(d)

        


        print('-'*50)
        print('-'*50)
        print('-'*50)

        yield {"datas":datas}


        # 获取下一页的url
        next_page=response.css("a[class='page_no']")[-1]
        # 获取a标签的href属性值
        next_page_url=next_page.attrib["href"]
        print("next page url:",next_page_url)
        # next_page.css("::text")

        print('-'*50)
        print('-'*50)
        print('-'*50)


        yield scrapy.Request(next_page_url,callback=self.parse)