#!/usr/bin/python
# -*- coding:utf-8 -*-

from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy import log
import time
import sys
from tutorial.items import SMTXItem
reload(sys)
sys.setdefaultencoding('utf-8')
from items import SXBMMItem
class SHMZXSpider(Spider):
    """爬取xbmiaomu标签"""
    #log.start("log",loglevel='INFO')
    name = "xbmiaomu"
    custom_settings = {'ITEM_PIPELINES':{'tutorial.pipelines.xbmiaomuPipeline':700,}}
    allowed_domains = ["xbmiaomu.com"]
    start_urls = []
    GetthisAll = False #开关 是否搜全部的发布求购项目 False 不搜 True 全部 正确下 做一次就够了，耗时久 by cq
    tcount =1
    if GetthisAll:
        for  num in range(1,1000): #撑死1000页
            start_urls.append("http://www.xbmiaomu.com/qiugouxinxi/index-htm-page-{0}.html".format(str(num)))
    else:
        for  num in range(1,2): #比较笨的方法 观测这个网站一天撑死就3页求购数据 如果是只要当天的 爬前10页
            start_urls.append("http://www.xbmiaomu.com/qiugouxinxi/index-htm-page-{0}.html".format(str(num)))

    def parse(self, response):
        sel = Selector(response)
        ctables = sel.xpath(u'//td[@bgcolor="#cccccc"]/table')
        for tab in ctables:
            desc = tab.xpath(u'tr[2]/td[1]/text()').extract()
            title = tab.xpath(u'tr[1]/td[2]/u/a/font/text()').extract()
            pubdate = tab.xpath(u'tr[1]/td[3]/text()').extract()         
