import scrapy
from leiciSpider.items import LeicispiderItem
import re

class LeiciSpider(scrapy.Spider):
    name = 'leici'
    allowed_domains = ['lei-ci.com']
    start_urls = ['http://www.lei-ci.com/product/9/']

    def parse(self, response):

        li_list = response.xpath("//ul[contains(@class, 'nav_03')]/li")
        for li in li_list:
            item = LeicispiderItem()
            # item = {}
            item["category"] = li.xpath("./a/text()").extract_first()
            item['src'] = "http://www.lei-ci.com" + li.xpath("./a/@href").extract_first()
            # print(item)
            yield scrapy.Request(
                url=item["src"],
                callback=self.parse_category,
                meta={"item": item},
                dont_filter=False
            )

    def parse_category(self, response):
        item = response.meta["item"]
        li_list = response.xpath("//ul[@class='product-list clearfix']/li")
        for li in li_list:
            content = {}
            content["name"] = li.xpath("./a/@title").extract_first()
            content["href"] = "http://www.lei-ci.com" + li.xpath("./a/@href").extract_first()
            item["content"] = content
            # print(item)
            yield scrapy.Request(
                url=content["href"],
                callback=self.parse_detail,
                meta={"item": item},
                dont_filter=False
            )

    def parse_detail(self, response):
        item = response.meta["item"]
        item["content"]["desc"] = "".join([x.strip() for x in response.xpath("//div[contains(@class, 'description')]//text()").extract() if x.strip()!=""])
        # with open("data.html", 'w') as f:
        #     f.write(response.text)
        yield item