#!/usr/bin/python
# -*- coding:utf-8 -*-

from scrapy.spider import Spider
from scrapy.selector import Selector
from scrapy import log
import time
import sys
import codecs
sys.path.append('D:\\PYTHON\\tutorial\\tutorial')
reload(sys)
sys.setdefaultencoding('utf-8')
from items import SHMZXItem

class SHMZXSpider(Spider):



    existslist=[] #定义一个已爬过的地址的集合
    currentSpiderHaveDone =[]
    name = "312green2"
    custom_settings = {'ITEM_PIPELINES':{'tutorial.pipelines.S312greenPipeline':300,}}
    allowed_domains = ["312green.com"]
    start_urls = []
    GetthisAll = True #开关 是否搜全部的发布求购项目 False 不搜 True 全部 正确下 做一次就够了，耗时久 by cq

    if GetthisAll:
        Historyfile = codecs.open("312greenCompanyinfohaveSpidered.txt",'a+',encoding="utf-8")
        s=Historyfile.readlines()
        Historyfile.flush()
        Historyfile.close()
        for fileLine in s:
            existslist.append(fileLine.strip('\n'))
        #print existslist
        for  num in range(120,13820): #撑死1000页
            productaddress=u"http://www.312green.com/sell/view-c10-s-t-v-p{0}.html".format(str(num))
            if productaddress in  existslist:
                #print 666666666666
                continue
            else:
                start_urls.append(productaddress)


    def parse(self, response):
        #time.sleep(2)
        if response.url not in self.existslist:
            Historyfile = codecs.open('312greenCompanyinfohaveSpidered.txt','a+',encoding="utf-8")
            Historyfile.writelines(str(response.url)+"\n")
            Historyfile.close()

        sel = Selector(response)
        divs = sel.xpath('//div[@class="box2"]')
        items = []
        for site in divs:
            item = SHMZXItem()

            #if currentDate in self.today or self.GetthisAll:
            Xtitle =site.xpath('div/h2/a/text()').extract()
            title = str(Xtitle[0].encode('utf-8')) if Xtitle else ''
            Xlink =site.xpath('div/h2/a/@href').extract()
            link =  str(Xlink[0].encode('utf-8')) if Xlink else ''
            Xdesc =site.xpath('div[@class="content"]/text()').extract()
            desc = str(Xdesc[0].encode('utf-8')) if Xdesc else ''
            Xpeople = site.xpath('div[@class="linkmode"]/text()').extract()
            people = str(Xpeople[0].encode('utf-8')) if Xpeople else ''
            Xcname = site.xpath('div[@class="linkmode"]/a[1]/text()').extract()
            cname = str(Xcname[0]) if Xcname else ''
            Xacontent = site.xpath('div[@class="linkmode"]').xpath('string(.)').extract()
            acontext =  Xacontent[0] if Xacontent else '12345 '
            tels = acontext[acontext.find(u"电  话")+5:]
            item['title'] = title
            item['link'] =  link
            item['desc'] =  desc
            item['tel'] =   tels
            item['people'] = people
            item['cname'] = cname
            print item
            items.append(item)
        return items
