import scrapy
import pymongo
from scrapy import cmdline
from scrapy.http import HtmlResponse


class QingTingSpider(scrapy.Spider):
    name = "qingting"
    allowed_domains = ["m.qingting.fm", "pic.qtfm.cn"]
    start_urls = ["http://m.qingting.fm/rank"]

    def parse(self, response: HtmlResponse, **kwargs):
        a_list = response.xpath('//div[@class="rank-list"]/a')
        for a_temp in a_list:
            rank_number = a_temp.xpath('./div[@class="badge"]/text()').extract_first()  # 排名
            img_url = a_temp.xpath('./img/@src').extract_first()  # 图片地址
            title = a_temp.xpath('./div[@class="content"]/div[@class="title"]/text()').extract_first()  # 标题
            desc = a_temp.xpath('./div[@class="content"]/div[@class="desc"]/text()').extract_first()  # 描述
            play_number = a_temp.xpath('.//div[@class="info-item"][1]/span/text()').extract_first()  # 播放次数

            # 需要使用yield关键字将解析好的数据提交给管道模块
            yield {
                "type": "info",
                'rank_number': rank_number,
                'img_url': img_url,
                'title': title,
                'description': desc,
                'play_number': play_number
            }

            # 自行构造新的request请求并交给下载器
            """
            callback: 指定解析方法
            cb_kwargs: 如果解析方法中存在形参, 则可以通过cb_kwargs传递, 传递值的类型必须是字典, 字典中的key必须与形参名称保持一致
            """
            yield scrapy.Request(img_url, callback=self.parse_image, cb_kwargs={'image_name': title})

    def parse_image(self, response, image_name):
        yield {
            'type': 'image',
            'image_name': image_name + '.png',
            'image_content': response.body
        }


if __name__ == '__main__':
    # 使用cmdline执行爬虫运行指令
    cmdline.execute('scrapy crawl qingting'.split())
