from scrapy.spider import Spider
from scrapy.selector import Selector
from tutorial.items import CookItem
from tutorial.lib import html2text
from tutorial.lib import readUrl


class CookSpider(Spider):
    name = "cook"
    allowed_domains = ["instructables.com"]
    start_urls = readUrl.readUrl("/home/kenshin/scrapyproject/tutorial/link.json")
    # start_urls = []
    def parse(self, response):
        sel = Selector(response)
        item = CookItem()
        cook_title = sel.xpath("//h1/text()").extract()
        step_containers = sel.xpath("//div[@class='step-container']")
        imgthum = step_containers[0].xpath(".//div[@class='photo-container']/img/@src")[0].extract()
        ca = []
        for container in step_containers:
            imgs = container.xpath(".//div[@class='photo-container']/img[contains(@class,'photoset')]").extract()
            for s in imgs:
                if s.find("pixel.png") == -1:
                    ca.append(html2text.html2text(s))
            ca.append(html2text.html2text(container.xpath(".//div[@class='txt step-body']").extract()[0]))
        # print h.handle(step_containers[0].xpath(".//div[@class='txt step-body']").extract()[0])
        # print h.handle(step_containers[0].xpath(".//div[@class='photo-container']/img").extract())
        #  print imgthum
        item["title"] = cook_title
        item["imgthumb"] = imgthum
        item["content"] = "\n".join(ca)
        return item

