# -*- coding: utf-8 -*-
import scrapy
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from zhuaqu.items import DmozItem
import re
from pymongo import MongoClient
__SUCCESS__  = 0
__INDEX__ = 116958
items = {}
sameUrl = "http://db.yaozh.com/instruct/19700101/"
class TestSpider(BaseSpider):
   name = "testSpider"
   http_handle = [400,404,500]
   allowed_domains = ["yaozh.com"]
   start_urls = [
       "http://db.yaozh.com/instruct/19700101/"+ str(__INDEX__) +".html",
   ]
   def parse(self, response):
       global __SUCCESS__
       global __INDEX__
       global items
       __INDEX__ += 1
       client = MongoClient("127.0.0.1",27017)
       db = client.test
       mydb = db.mydb
       if __SUCCESS__ < 300000 :
           hxs = HtmlXPathSelector(response)
           if response.url == 'http://db.yaozh.com/404.html':
                print('\t'+'************************************未抓取'+'\t' +           str(response.status) + '******************************\t')
                yield scrapy.Request(self.makeNextRequst(), callback=self.parse)
           else:
               item = DmozItem()
               item['title'] = hxs.xpath("//title/text()").extract()[0].split("、")[0]
               item['yaoZhiID'] = response.url.split("/")[-1][0:6]
               item['intro'] = re.findall('<META name="description" content="(.*?)">',response.body)[0]
               cout = self.writeToDB(item,mydb)
               if cout == 0:
                   print('\t'+'************************************成功插入'+'\n' + str(response.status) + '******************************\n')
               else:
                   print('\t'+'************************************找到重复'+'\n' + str(response.status) + '******************************\n')
               yield scrapy.Request(self.makeNextRequst(), callback=self.parse)
               __SUCCESS__ += 1
       else:
           print("test success 20000")
           yield items



   def makeNextRequst(self):
       return sameUrl + str(__INDEX__) + '.html'


   def writeToDB(self,data,handle):
       #先查询数据库中是否存在
       exsited_h = handle.find({'title':data['title']}).count()
       if exsited_h == 0:
           handle.save(data)
       return exsited_h


