import scrapy
from ..items import PptItem


class PptSpider(scrapy.Spider):
    name = "ppt"
    allowed_domains = ["www.1ppt.com"]
    start_urls = ["https://www.1ppt.com/xiazai/"]

    def parse(self, response):
        # 一级页面解析函数 提取分类名称和链接
        li_list = response.xpath('//div[@class="col_nav clearfix"]/ul/li/a')
        for li in li_list:
            item = PptItem()
            item['class_name'] = li.xpath('./text()').get()
            class_href = "https://www.1ppt.com" + li.xpath('./@href').get()

            yield scrapy.Request(url=class_href, meta={'meta1': item}, callback=self.parse_two)

    def parse_two(self, response):
        # 二级页面解析
        meta1 = response.meta['meta1']
        li_list = response.xpath('//ul[@class="tplist"]/li/h2/a')
        for li in li_list:
            item = PptItem()
            item['ppt_name'] = li.xpath('./text()').get()
            item['class_name'] = meta1['class_name']
            ppt_url = "https://www.1ppt.com" + li.xpath('./@href').get()
            yield scrapy.Request(url=ppt_url, meta={'meta2': item}, callback=self.parse_three)

    def parse_three(self, response):
        # 三级页面解析 提取下载链接
        meta2 = response.meta['meta2']
        enter_url = "https://www.1ppt.com" + response.xpath('//ul[@class="downurllist"]/li[1]/a/@href').get()

        # 此处网站做了滑动验证 调度器无法正常访问了
        yield scrapy.Request(url=enter_url, meta={'item': meta2}, callback=self.parse_forth)

    def parse_forth(self, response):
        # 提取下载链接
        item = response.meta['item']
        item['ppt_down_url'] = response.xpath('//ul[@class="downloadlist"]/li[1]/a/@href').get()
        yield item
