import scrapy


class QuotesSpider(scrapy.Spider):
	name = 'quotes'
	
	# allowed_domains = ['quotes.com']
	# start_urls = ['http://quotes.com/']
	
	def start_requests(self):
		
		"""
		# 运行命令:
		scrapy crawl quotes -O quotes-humor.json -a tag=humor
		-O quotes-humor.json 指定导出数据格式
		-a tag=humor  给指定参数赋值
		示例: 获取指定的参数tag值,未传时为None
		"""
		tag = getattr(self, 'tag', None)
		if tag is not None:
			print(tag)
		
		urls = [
			'http://quotes.toscrape.com/page/1/',
		]
		for url in urls:
			yield scrapy.Request(url=url, callback=self.parse)
		pass
	
	def parse(self, response, **kwargs):
		"""
		# 导出请求内容为HTML文件
		page = response.url.split("/")[-2]
		file_path = self.settings.get('LOCAL_PATH')
		filename = f'{file_path}quotes-{page}.html'
		with open(filename, 'wb') as f:
			f.write(response.body)
		self.log(f'Saved file {filename}')

		"""
		for quote in response.css('div.quote'):
			yield {
				'text': quote.css('span.text::text').get(),
				'author': quote.css('small.author::text').get(),
				'tags': quote.css('div.tags a.tag::text').getall()
			}
		
		next_page = response.css('li.next a::attr(href)').get()
		if next_page is not None:
			next_page = response.urljoin(next_page)
			yield scrapy.Request(next_page, callback=self.parse)
