# -*- coding: utf-8 -*-
import scrapy
import os

# 继承scrapy.Spider类
class TestSpider(scrapy.Spider):
    name = 'test'  # 爬虫名字
    # allowed_domains = ['test.com']
    start_urls = [
            "http://lab.scrapyd.cn",
        ]
    # def start_requests(self):
    #     # 定义爬取的链接
    #     urls = [
    #         "http://lab.scrapyd.cn/page/1/",
    #         "http://lab.scrapyd.cn/page/2/",
    #     ]
    #     for url in urls:
    #         yield scrapy.Request(url=url, callback=self.parse)  # 爬取到的页面提交给parse方法处理



    def parse(self, response):
        # 爬取一条数据
        # mingyan = response.css('div.quote')[0]
        # content = mingyan.css('.text::text').extract_first()   # 提取句子内容
        # author = mingyan.css('.author::text').extract_first()
        # tags = mingyan.css('.tags .tag::text').extract()
        # tags = ','.join(tags)
        # filename = f'{author}语录.txt'
        # try:
        #     with open(filename, 'a+') as f:
        #         f.write(content)
        #         f.write('\n')
        #         f.write(f'分类:{tags}')
        #     f.close()
        # except Exception as e:
        #     print('写入失败', e)

        # 爬取多条数据
        mingyan_list = response.css('div.quote')
        for item in mingyan_list:
            content = item.css('.text::text').extract_first()
            if 'scrapy中文网' in content:
                content = content.split('scrapy中文网')[-2]
            else:
                content = content
            author = item.css('.author::text').extract_first()
            tags = item.css('.tags .tag::text').extract()
            tags = ','.join(tags)
            filename = f'{author}语录.txt'
            # 创建一个文件夹
            if os.path.exists('名言名句'):
                os.chdir('名言名句')
            else:
                os.mkdir('名言名句')
                os.chdir('名言名句')
            try:
                with open(filename, 'a+') as f:
                    f.write(content)
                    f.write('\n')
                    f.write(f'标签：{tags}')
                    f.write('\n--------\n')
                    f.close()
            except Exception as e:
                print('写入失败：', e)
            os.chdir(os.path.pardir)

        # 爬取多页数据
        next_page = response.css('.next a::attr(href)').extract_first()
        if next_page is not None:
            next_page = response.urljoin(next_page)
            yield scrapy.Request(next_page, callback=self.parse)