
from scrapy_redis.spiders import RedisSpider
from b2bCategory.items import B2BMaincategoryLoader
from redis import Redis
from scrapy import log
from time import sleep
from bs4 import BeautifulSoup

class Myspider(RedisSpider):
    '''spider that reads urls from redis queue (myspider:start_urls).'''
    name = 'b2bH5Category'
    redis_key = 'myspider:taobaoh5_urls'

    def __init__(self, *args, **kwargs):
        domain = kwargs.pop('domain', '')
        self.allowed_domans = filter(None, domain.split(','))
        super(Myspider, self).__init__(*args, **kwargs)
        self.url = 'http://bj.58.com'

    def parse(self, response):
        el = B2BMaincategoryLoader(response=response)
        html = BeautifulSoup(response.body,'lxml')
        divs = html.find_all('div',class_="search-list")
        if len(divs):
            li = divs[0].find_all('li')
            if len(li):
                for liP in li:
                    href = liP.find('a')['href']
                    print(href)
        # print(ul)
        # print(ul)
        # PageUrl = response.xpath('//a[contains(@class, "level-two-cat-list")]/@href').extract()
        # self.log(PageUrl, level=log.WARNING)
        # r = Redis()
        # if PageUrl != []:
        #     r.lpush('myspider:taobao_urls', self.url + PageUrl[0])
        #     sleep(1)
        #     el.add_value('UrlofPage', self.url + PageUrl[0])
        # urls = response.xpath('//table[contains(@class, "tbimg")]/tr')
        # for url in urls:
        #     url = url.xpath('td[contains(@class, "t")]/a/@href').extract()
        #     if len(url) == 1 and 'zhuan' not in url[0]:
        #         r.lpush('myspider:start_urls', url[0])
        return el.load_item()