#!/usr/bin/python
# -*- coding:utf-8 -*-
import scrapy
from scrapy import FormRequest,Request
from scrapy.selector import Selector
import sys
import time
import codecs
import os
from tutorial.items import SMTXItem
reload(sys)
sys.setdefaultencoding('utf8')

class s597mmpider(scrapy.Spider):
    currentSpiderHaveDone =[]
    name = "597mm"
    custom_settings = {'ITEM_PIPELINES':{'tutorial.pipelines.S597mmPipeline':400,}}
    allowed_domains = ["597mm.com"]
    start_urls = []
    GetthisAll =True
    existslist=[]

    if GetthisAll:
        Historyfile = codecs.open(os.getcwd()+"\\"+'S597mmSpiderHaveDoneUrls.txt','r',encoding="utf-8")

        s = Historyfile.readlines()
        Historyfile.flush()
        Historyfile.close()
        for fileLine in s:
            existslist.append(fileLine.strip('\n'))

        #for num in range(1,1992):
        for num in range(1,1993):
            waitingtoscrapy ='http://www.597mm.com/company/index.php?page={0}'.format(str(num))
            if waitingtoscrapy not in existslist:
                print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
                print waitingtoscrapy
                print ">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>"
                start_urls.append(waitingtoscrapy)

    def parse(self, response):
        if response.url not in self.existslist:
            Historyfile = codecs.open(os.getcwd()+"\\"+'S597mmSpiderHaveDoneUrls.txt','a+',encoding="utf-8")
            Historyfile.writelines(str(response.url)+"\n")
            Historyfile.close()

        strdata =str( response.text.encode('utf-8'))
        sel = Selector(text=strdata)
        trs = sel.xpath('//div[@class="list"]')
        for tr in trs:
            productaddress = str(tr.xpath('table/tr/td[3]/h3/a/@href').extract()[0])
            desc =str(tr.xpath('table/tr/td[3]/span/text()').extract()[0])
            cname = str(tr.xpath('table/tr/td[3]/h3/a/text()').extract()[0])
            item = SMTXItem()
            item['desc'] =desc
            item["link"]=productaddress
            item['cname'] =cname
            if productaddress in self.existslist:
                print ">>>>>>>>>>>>>>have done before >>>>>>>>>>>>>>"
                continue
            else:
                time.sleep(1)
                print ">>>>>>>>>>>>>>go to the url : "+productaddress+" >>>>>>>>>>>>>>"
                yield scrapy.Request(productaddress,callback=self.GetDetailInfo,meta={'item':item})

    def GetDetailInfo(self,response):
        if response.url not in self.existslist:
            Historyfile = codecs.open(os.getcwd()+"\\"+'S597mmSpiderHaveDoneUrls.txt','a+',encoding="utf-8")
            Historyfile.writelines(str(response.url)+"\n")
            Historyfile.close()
        
        sel = Selector(response)
        item = response.meta['item']
        item['people'] = ''
        item['QQ'] = ''
        item['tel'] = ''
        item['mobilephone'] = ''
        item['fax'] = ''
        item['email'] = ''
        sheaddivs = sel.xpath('//div[@class="side_head"]')
        sbodydivs = sel.xpath('//div[@class="side_body"]')
        for i in range(0,len(sheaddivs)):
            htitle = sheaddivs[i].xpath('div/strong/text()').extract()[0]
            if htitle=='联系方式':
                lis = len(sbodydivs[i].xpath('ul/li'))
                for j in range(1,lis+1):
                    text = sbodydivs[i].xpath('ul/li['+str(j)+']/text()').extract()[0]
                    self.buildItem(item,text)
        yield item

    def buildItem(self,item,text):
        print ">>>>>>>>>>>>>> building data  >>>>>>>>>>>>>>"
        if '联系人' in text:
            item['people'] = text
        elif 'QQ' in text:
            item['QQ'] = text
        elif '电话' in text:
            item['tel'] = text
        elif '手机' in text:
            item['mobilephone'] = text
        elif '传真' in text:
            item['fax'] = text
        elif '邮件' in text:
            item['email'] = text
