# -*- coding: utf-8 -*-
import random
from pprint import pprint

import scrapy
from copy import deepcopy

import time

from ..items import BobopicItem


class BbpSpider(scrapy.Spider):
    name = 'bbp'
    allowed_domains = ['bobopic.com']
    start_urls = ['https://bobopic.com/']

    def parse(self, response):
        article_list = response.xpath(".//div[@class='row posts-wrapper']/article")
        for article in article_list:
            item = BobopicItem()
            item["file_name"] = article.xpath('./div/div[2]//h2[@class="entry-title"]/a/text()').extract_first()
            item["imgs_href"] = article.xpath('./div/div[2]//h2[@class="entry-title"]/a/@href').extract_first()
            pprint(item["imgs_href"])
            yield scrapy.Request(
                item["imgs_href"],
                callback=self.detail_imgs,
                meta={"item":deepcopy(item)},
                dont_filter=True
            )


    def detail_imgs(self, response):
        time.sleep(5)
        item = response.meta["item"]
        images = response.xpath('.//div[@class="entry-content u-clearfix"]/p[position()>2]/img/@src').extract()
        # item["image_urls"] = response.xpath('.//div[@class="entry-content u-clearfix"]/p[position()>2]/img/@src').extract()
        # pprint(item["src"])
        for img in images:
            item = deepcopy(item)
            item["image_urls"] = []
            item["image_urls"].append(img)
            item["title"] = images.index(img) + 1
            # pprint(item)
            yield item