# -*- coding: utf-8 -*-
import scrapy,sys,os,json,pymongo
reload(sys).setdefaultencoding('UTF-8')
from njupt.items import bItem
import logging
from pyquery import PyQuery as pq


class crcaSpider(scrapy.Spider):
		name = "b"
		allowed_domains = ["www.shangdun.org",'api.shangdun.org']
		'''start_urls = [
				"http://www.crca.com.cn/list.aspx?cid=20&page=1",
				]'''
		
		def __init__(self):
			self.APIkey = 'UYT856h09TQurw8rsW'
			self.APIpassword = 'Tdwh58dkP04w'
			connection = pymongo.MongoClient('localhost',27017)
			db = connection['shangbiao']
			self.collection = db['shangdun']

		def start_requests(self):#17000000
			for i in range(10000000,11000000):
				next_page_url = "http://api.shangdun.org/search/?APIkey=%s&APIpassword=%s&SearchWord=%d&SearchKind=7" %(self.APIkey,self.APIpassword,i)
				request = scrapy.Request(next_page_url,callback=self.parse)
				request.meta['regNO'] = i
				yield request

		def parse(self,response):
			if response.status == 200:
				body = json.loads(response.body)
				regNO = response.meta['regNO']
				if body['Datas']:
					RegNo = body['Datas'][0]['RegNo']
					TMclass = body['Datas'][0]['TMclass']
					url = 'http://api.shangdun.org/show/?APIkey=%s&APIpassword=%s&RegNo=%s&ClassNo=%s' %(self.APIkey,self.APIpassword,RegNo,TMclass)
					yield scrapy.Request(url,callback=self.parse_content)
				else:
					logging.info("商标%s不存在" %(regNO))

		def parse_content(self,response):
			if response.status == 200: 
				body = json.loads(response.body)
				self.collection.insert(dict(body))
				logging.info("添加成功!")
