#They must subclass Spider and define the initial requests to make, optionally how to follow links in the pages, and how to parse the downloaded page content to extract data.
import scrapy
import bs4
from ..items import ScrapyTestItem
# from scrapy.utils.response import open_in_browser
# from scrapy.crawler import CrawlerProcess

# 创建爬虫类，继承自scrapy.Spider --> 最基础的类
class JobuiSpider(scrapy.Spider):
    name = 'jobui'      # 爬虫名字 --> “必须唯一”，不与文件名一致
    allowed_domains = ['www.jobui.com']        # 允许爬虫的范围,去掉http://
    start_urls = ['http://www.jobui.com/rank/company/']     # 开始采集的网站

    # 解析response数据[网页源码]  提取数据 handle the response downloaded  
    def parse(self, response):
        bs = bs4.BeautifulSoup(response.text, 'html.parser')
        ul_list = bs.find_all('ul',class_="textList flsty cfix")
        for ul in ul_list:
            a_list = ul.find_all('a')
            for a in a_list:
                company_id = a['href']
                real_url = 'https://www.jobui.com{id}jobs/'.format(id=company_id) # 差一个/斜杠号就取不到数据了
                yield scrapy.Request(real_url, callback=self.parse_job,dont_filter=True)
                # 用yield语句把构造好的request对象real_url传递给引擎，并用callback调用parse_job方法
                # scrapy.Request是将real_url封装为requests对象，由scrapy将其分配给下载器去获取HTML代码
                # real_url是往requests对象里传入的每家公司招聘信息网址的参数
                # callback是回调。用于接收请求后的返回信息(parse_job的返回值)，若没指定，则默认为parse()函数
   
    def parse_job(self, response):
    #定义新的用来处理response的方法parse_job（方法的名字可以自己起）
        # 调试方式1
        # open_in_browser(response)   #打开所有url,可查看是否有验证码
        # 调试方式2 shell [https://docs.scrapy.org/en/latest/topics/shell.html#topics-shell-inspect-response]
        # from scrapy.shell import inspect_response
        # inspect_response(response, self)

        bs = bs4.BeautifulSoup(response.text, 'html.parser')
        #用BeautifulSoup解析response(公司招聘信息的网页源代码)
        company = bs.find(id="companyH1").text.split()
        #用find方法提取出公司名称
        datas = bs.find_all('div',class_="c-job-list")
        #用find_all提取<li class_="company-job-list">标签，里面含有招聘信息的数据
        for data in datas:
        #遍历datas
            item = ScrapyTestItem()
            #实例化JobuiItem这个类
            item['company'] = "".join(company)  # 将列表转为字符串
            item['position']= data.find('h3').text
            item['address'] = data.find_all('span')[0].text
            item['detail'] = data.find_all('span')[1].text
            yield item      #用yield语句把item传递给引擎，运行process_item

# process = CrawlerProcess()
# process.crawl(JobuiSpider)
# process.start()
# 调试用

        #pass 
        # 正文代码  selectors = response.xpath('从chroma中获得表达式')
        # for 循环遍历
        # for selector in selectors:
        # a = selector.xpath(./text()).get()    #从当前节点位置提取
        # b = selector.xpath(./text()).get() 
        # print(a,b)
        
        # 爬取链接：跟踪所有链接到下一页，直到找不到一个为止
        # next_page = response.css('li.next a::attr(href)').get()
        # if next_page is not None:
        #     next_page = response.urljoin(next_page)
        #     yield scrapy.Request(next_page, callback=self.parse)
        # callback的应用：when you yield a Request in a callback method, Scrapy will schedule that request to be sent and register a callback method to be executed when that request finishes.
        # yield的原理： 函数转为生成器 https://www.runoob.com/w3cnote/python-yield-used-analysis.html