from urllib.parse import urljoin

import scrapy
from scrapy import cmdline, Selector
from ff import items
from icecream import ic

maps = lambda x: x[0] if x else ''

class XlSpider(scrapy.Spider):
    name = 'xl'
    # allowed_domains = ['www.douban.com']
    start_urls = ['https://movie.douban.com/top250']

    def parse(self, response):
        ic(response)
        selecter = Selector(response)
        # ic(selecter)
        item = items.FfItem()
        tags = selecter.xpath('//div[@class="info"]')
        # ic(tags)
        for i in tags:
            title = i.xpath('div[@class="hd"]/a/span/text()').extract()
            titles = ''.join(title)
            movieinfo = i.xpath('div[@class="bd"]/p/text()').extract()
            star = i.xpath('div[@class="bd"]/div/span[@class="rating_num"]/text()').extract_first()
            qu = maps(i.xpath('div[@class="bd"]/p[@class="quote"]/span/text()').extract())
            ic(qu)
            item['title'] = ''.join(titles.replace('\n','').replace(' ','').split())
            item['movieinfo'] = ''.join(';'.join(movieinfo).replace('\n','').replace(' ','').split())
            item['star'] = star
            item['qu'] = qu
            yield item
            # 处理翻页
        next = selecter.xpath('//span[@class="next"]/a/@href').extract_first()
        if next:
            yield scrapy.Request(urljoin(response.url,next),callback = self.parse)



if __name__ == '__main__':
    cmdline.execute('scrapy crawl xl'.split())
