import scrapy
from FontWorks.items import FontworksItem
class FontworksSpider(scrapy.Spider):
    name = 'fontworks'
    allowed_domains = ['fontworks.co.jp']
    start_urls = ['https://fontworks.co.jp/case/anime/']
    def parse(self, response):
        anime_list=response.xpath('//div[@class="p-indexSection"]')
        #===============字典方法传输yeild item==========
        '''
        item={}
        item["title"]=response.xpath('.//a/p[@class="c-card__title"]/text()').extract()
        item["font"]=response.xpath('.//a/p[@class="c-card__fonts"]/text()').extract()
        item["credit"]=response.xpath('.//a/p[@class="c-card__credit"]/small/text()').extract()
        yield item
        '''
        #==========调用从items.py里规定好的数据类型=============
        for anime in anime_list:
            title=anime.xpath('.//p[@class="c-card__title"]/text()').extract()
            font=anime.xpath('.//p[@class="c-card__fonts"]/text()').extract()
            credit=anime.xpath('.//p[@class="c-card__credit"]/small/text()').extract()
            photo=anime.xpath('.//img/@src').extract()
            '''
            #数据类型为字符串，方便后续转为json
            font_msg=FontworksItem(title=",".join(title),font=",".join(font),credit=",".join(credit),photo=",".join(photo)) 
            '''
            #数据类型为列表，后续可以转换成Dict再变成DataFrame  
            font_msg=FontworksItem(title=title,font=font,credit=credit,photo=photo)    
               
            #获取一个font信息就传递给pipelines
            yield font_msg
                   
        #调用parse方法，获取多页信息，scrapy.Request就是scrapy的get请求,callback是你要执行的函数,不需要加()
        for i in range(2,5):
            next_url='https://fontworks.co.jp/case/anime/page/{}'.format(i)
            yield scrapy.Request(url=next_url,callback=self.parse,meta={'title':title})
            
        print(font_msg)