#!/usr/bin/python
# -*- coding:utf-8 -*-

from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy import log
import time
import sys
sys.path.append('D:\\PYTHON\\tutorial\\tutorial')
reload(sys)
sys.setdefaultencoding('utf-8')
from items import SHMZXItem
class SHMZXSpider(Spider):
    """爬取5858168标签"""
    #log.start("log",loglevel='INFO')
    name = "5858168_2"
    custom_settings = {'ITEM_PIPELINES':{'tutorial.pipelines.SHMZX_2_Pipeline':300,}}
    allowed_domains = ["5858168.com"]
    start_urls = []
    GetthisAll = True #开关 是否搜全部的发布求购项目 False 不搜 True 全部 正确下 做一次就够了，耗时久 by cq

    if GetthisAll:
        for  num in range(1,280): #撑死1000页
            start_urls.append("http://www.5858168.com/com/index.asp?page="+str(num))
    else:
        for  num in range(1,2): #比较笨的方法 观测这个网站一天撑死就3页求购数据 如果是只要当天的 爬前10页
            start_urls.append("http://www.5858168.com/com/index.asp?page="+str(num))

    def parse(self, response):
        sel = Selector(response)
        sites = sel.xpath('//div[@class="PLRgiht"]')
        items = []
        for site in sites:
            item = SHMZXItem()
            item['people'] = site.xpath('p/span/a/text()').extract()[0]
            tel =str(site.xpath('p/text()').extract()[0])
            item['tel'] = tel[2:]
            entname= site.xpath('p[@class="entname"]/text()').extract()
            item['title'] =entname[0]  if  entname else ''
            entproduct= site.xpath('p[@class="entproduct"]/text()').extract()
            item['desc'] = entproduct[0]  if  entproduct else ''
            item['link'] ="www.5858168.com"+ site.xpath('p/span/a/@href').extract()[0]
            items.append(item)
        return items
