#coding:utf-8  
import sys  
reload(sys)
import md5
import time  
sys.setdefaultencoding('utf-8') 
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from newsmth.items import NewsmthItem
from scrapy.http import Request

class NewsmthSpider(BaseSpider):
    fileNameIndex = 1
    name = "newsmth"
    allowed_domains = ["www.newsmth.net"]
    start_urls = [
        "http://www.newsmth.net/nForum/board/Shopping"
    ]

    def parse (self, response):
        #filename = response.url.split("/")[-1]
        #content =  response.body.decode('gbk').encode('utf-8')
        #open(filename,'wb').write(content)
	hxs = HtmlXPathSelector(response)
	sites = hxs.select('/html/body/div[@id="main_wrap"]/div[@id="main"]/div[@id="body"]/div[@id="body"]/div[@class="t-pre"]/div[@class="page"]/ul[@class="pagination"]/li/ol/li')
        liNum = 1
        lastPageNum = 0
        for site in sites:
            if(liNum == (len(sites)-1)):
                lastPageNum =  int(site.select('a/text()').extract()[0])
            liNum = liNum+1

        strEveryUrlHead = "http://www.newsmth.net/nForum/board/Shopping?p="
        rangPageNum = range(1,lastPageNum+1)
        for i in rangPageNum:
            everyPageUrl = strEveryUrlHead+str(i)
            print everyPageUrl
            yield Request(everyPageUrl, callback=self.parseEveryPage)
        #yield Request("http://www.newsmth.net/nForum/board/Shopping?p=1", callback=self.parseEveryPage)

    def parseEveryPage(self,response):
        hxs = HtmlXPathSelector(response)
        sites = hxs.select('/html/body/div[@id="main_wrap"]/div[@id="main"]/div[@id="body"]/div[@id="body"]/div[@class="b-content corner"]/table[@class="board-list tiz"]/tr/td[@class="title_9"]')
        for site in sites:
            noteLink = "http://www.newsmth.net"+site.select('a/@href').extract()[0]
            yield Request(noteLink, meta={'url': noteLink},callback=self.parseNotePage)
        #yield Request("http://www.newsmth.net/nForum/article/Shopping/485967", meta={'url': "http://www.newsmth.net/nForum/article/Shopping/485967"},callback=self.parseNotePage)

    def parseNotePage(self,response):
        url = response.meta['url']
        content = response.body
        print url
        key = md5.new()
        key.update(content)
        #print key.hexdigest()
        resultDict = {}
        resultDict['url'] = url
        resultDict['content'] = content.decode('gbk','ignore').encode('utf-8')
        resultDict['md5'] = key.hexdigest()
        resultDict['time'] = time.time()
        open("./data/Newsmth"+str(self.fileNameIndex),'wb').write(str(resultDict))
        self.fileNameIndex = self.fileNameIndex+1

        hxs = HtmlXPathSelector(response)
        sites = hxs.select('/html/body/div[@id="main_wrap"]/div[@id="main"]/div[@id="body"]/div[@class="t-pre"]/div[@class="page"]/ul[@class="pagination"]/li/ol/li')
        liNum = 0
        for site in sites:
            liNum = liNum+1
        if liNum >1:
           numFlag = 1
           for site in sites:
               if (numFlag < liNum and numFlag > 1):
                   pageNum = int(site.select('a/text()').extract()[0])
                   yield Request(url+"?p="+str(pageNum),meta={'url':url+"?p="+str(pageNum)},callback=self.parseNextPage)
               numFlag = numFlag + 1

    def parseNextPage(self,response):
        url = response.meta['url']
        content = response.body
        print url
        key = md5.new()
        key.update(content)
        #print key.hexdigest()
        resultDict = {}
        resultDict['url'] = url
        resultDict['content'] = content.decode('gbk','ignore').encode('utf-8')
        resultDict['md5'] = key.hexdigest()
        resultDict['time'] = time.time()
        open("./data/Newsmth"+str(self.fileNameIndex),'wb').write(str(resultDict))
        self.fileNameIndex = self.fileNameIndex+1


