#!/usr/bin/python
# -*- coding:utf-8 -*-
import scrapy
from scrapy import FormRequest,Request
from scrapy.selector import Selector
import sys
import time
import codecs
from tutorial.items import SMTXItem
reload(sys)
sys.setdefaultencoding('utf8')

class yunlin356pider(scrapy.Spider):

    def __init__(self):
        #每次启动，会最先调用构造函数，在这里打开文本 （已爬过的url记录）
        Historyfile = codecs.open(time.strftime("%Y-%m-%d",time.localtime()) +'SpiderHaveDoneUrls.txt','w+',encoding="utf-8")
        #然后把文本里面的内容分行读到existlist里面
        s = Historyfile.readlines()
        Historyfile.flush()
        Historyfile.close()
        for fileLine in s:
            existslist.append(fileLine)

    existslist=[] #定义一个已爬过的地址的集合
    currentSpiderHaveDone =[]
    name = "yuanlin365"
    custom_settings = {'ITEM_PIPELINES':{'tutorial.pipelines.yunlin356Pipeline':400,}}
    allowed_domains = ["yuanlin365.com"]
    start_urls = []

    for num in range(188,400):
        start_urls.append('http://www.yuanlin365.com/supply/search_%B9%A9%D3%A6____{0}.html'.format(str(num)))

    def parse(self, response):

        strdata =str( response.text.encode('utf-8'))
        sel = Selector(text=strdata)
        trs = sel.xpath('//div[@class="main"]')
        for tr in trs:
            productaddress = str(tr.xpath('a[2]/@href').extract()[0])
            print productaddress
            #假设已经有过这个网址被爬过的记录 ，不要进行
            if productaddress in self.existslist:
                continue
            else:
                yield scrapy.Request(productaddress,callback=self.GetDetailInfo)

    def GetDetailInfo(self,response):
        if response.url not in self.existslist:
            Historyfile = codecs.open(time.strftime("%Y-%m-%d",time.localtime()) +'SpiderHaveDoneUrls.txt','w+',encoding="utf-8")
            self.existslist.append(response.url)
            #currentSpiderHaveDone.append(response.url)
            Historyfile.writelines(response.url)
            Historyfile.close()

        items=[]
        item = SMTXItem()
        sel = Selector(response)
        print sel.xpath('//*[@id="dtl"]/tbody/tr/td/text()[9]').extract()
        #titlevalues = sel.xpath('//table[@id="gsxx_infotb"]/tr[3]/td/table/tr[2]/td/div/strong')
