import scrapy


class AuthorSpider(scrapy.Spider):
    name = "author"

    # start_urls = ["https://space.bilibili.com/229733301/upload/video"]
    start_urls = ["http://www.gushiwen365.com/poetry/"]
    base_url='http://www.gushiwen365.com'

    def parse(self, response):
        poem_page_links = response.css('.gs-cont h3 a::attr(href)').getall()
        # for i in range(len(poem_page_links)):
        #     poem_page_links[i]=poem_page_links[i][7:]
        self.logger.info('********************************\n'*3)
        self.logger.info(poem_page_links)
        # with open('poem.txt','a',encoding='utf-8') as f:
        #     f.write(str(poem_page_links))
        yield from response.follow_all(poem_page_links,self.parse_author)
        # for link in poem_page_links:
        #     with open('video_links.txt','a',encoding='utf-8') as f:
        #         self.logger.info(link)
        #         f.writelines(link)
        pagination_links = response.css(".next-page a")
        yield from response.follow_all(pagination_links, self.parse)

    def parse_author(self, response):
        def extract_with_css(query):
            return ''.join(response.css(query).getall())
        content1=extract_with_css('h1::text')
        content2=extract_with_css('.gs-works .gs-service-dynsty::text')
        content3=extract_with_css('.gs-works .gs-service-author::text')
        content4=extract_with_css('.gs-works .gs-conview-def *::text')
        self.logger.info('***************************\n'*3)
        content=content1+'\n'+content2+'\n'+content3+'\n'+content4
        self.logger.info(content)
        with open('poem.txt','a',encoding='utf-8') as f:
            f.write(content)
        yield {
            'title':content1,
            'author':content3,
            'dynsty':content2,
            'content':content4,
            'url':response.url
        }
       
       
        # with open('poem.txt','a',encoding='utf-8') as f:
        #     f.write(''.join(response.css('.gs-works  .gs-conview-def *::text').getall()))