import scrapy
from picWish.items import PicwishItem

class PicwishSpider(scrapy.Spider):
    name = "picwish"
    allowed_domains = ["www.picwish.cn"]
    start_urls = ["https://www.picwish.cn"]

    # 基于终端命令的持久化存储方式
    # def parse(self, response):
    #     # 解析图片路径 = 解析描述
    #     title = response.xpath('//title/text()').extract()[0]
    #     print(title)
    #     div_list = response.xpath('//section[@class="lg:pt-0 lg:pb-[160px] | pb-[84px] pt-10"]'
    #                               '//div[@class="swiper mySwiper relative !py-25 !2xl:overflow-inherit"]'
    #                               '/div[@class="swiper-wrapper"]/div')
    #     print(len(div_list))
    #
    #     # 存储所有解析到的数据
    #     all_data = []
    #     for div in div_list:
    #         # tag_path：找到的相对标签的路径
    #         tag_path = './a/div[@class="cardClass flex flex-col h-full rounded-[30px] overflow-hidden"]'
    #         pic1 = div.xpath(tag_path + '//img/@srcset').extract()[0]
    #         msg1 = div.xpath(tag_path + '//p/text()').extract()[0]
    #         print(msg1, ': ', pic1)
    #         dic1 = {
    #             'img_msg': pic1,
    #             'img_url': msg1
    #         }
    #
    #         pic2 = div.xpath(tag_path + '//img/@srcset').extract()[1]
    #         msg2 = div.xpath(tag_path + '//p/text()').extract()[1]
    #         print(msg2, ': ', pic2)
    #
    #         dic2 = {
    #             'img_msg':pic2,
    #             'img_url':msg2
    #         }
    #         all_data.append(dic1)
    #         all_data.append(dic2)
    #         # 基于终端命令的方式进行持久化存储
    #     return all_data

    def parse(self, response):
        # 解析图片路径 = 解析描述
        title = response.xpath('//title/text()').extract()[0]
        print(title)
        div_list = response.xpath('//section[@class="lg:pt-0 lg:pb-[160px] | pb-[84px] pt-10"]'
                                  '//div[@class="swiper mySwiper relative !py-25 !2xl:overflow-inherit"]'
                                  '/div[@class="swiper-wrapper"]/div')
        print(len(div_list))

        # 存储所有解析到的数据
        all_data = []
        for div in div_list:
            # tag_path：找到的相对标签的路径
            tag_path = './a/div[@class="cardClass flex flex-col h-full rounded-[30px] overflow-hidden"]'
            i = 0
            while i < 2:
                img_url = div.xpath(tag_path + '//img/@srcset').extract()[i]
                img_msg = div.xpath(tag_path + '//p/text()').extract()[i]
                print(img_msg, ': ', img_url)

                item = PicwishItem()
                item['img_url'] = img_url
                item['img_msg'] = img_msg

                i = i + 1

                # 将 item 类型的对象提交给管道进行持久化存储操作
                yield item




