# # -*- coding: utf-8 -*-
# import scrapy
# from scrapy import Request, FormRequest
# from scrapyluke.uuCode import UUcode
# import datetime
# import time
# from test1.items import GsxtGovItem
# import re
# import urllib
# from urllib import unquote
#
#
# class SiChuanSpider(scrapy.Spider):
#     name = 'sichuan'
#     start_urls = ['http://gsxt.scaic.gov.cn/ztxy.do?method=index&random=1459308086057']
#
#     def __init__(self):
#         super(SiChuanSpider, self).__init__()
#         self.uu = UUcode('109521', '3c8906d3666e44bb9c961e1647126dbc')
#         self.word_ori = '哈哈'
#         self.word = unquote(self.strencode(self.word_ori))
#         self.base_path = '/home/li-x/imgs'
#         self.time = self.gettime()
#         self.code = 'http://gsxt.scaic.gov.cn/ztxy.do?method=createYzm&dt=%s&random=%s'%(self.time,self.time)  #验证码请求网址
#         self.search = 'http://gsxt.scaic.gov.cn/ztxy.do?method=list&djjg=&random=%s' %self.gettime()  #搜索请求网址
#
#     def parse(self, response):
#
#         yield Request(url=self.code,callback=self.code_ocr)
#
#
#     def code_ocr(self, response):
#         img_abspath = self.uu.store_code(response.body, str(int(time.time()))+'.jpg', self.base_path)
#         result = self.uu.check_code(img_abspath, 8001, 30)
#         if result :
#             code = result[1]
#             # 判断验证码请求
#             Formdata={
#                             'currentPageNo':'1',
#                             'yzm':code,
#                             'maent.entname':self.word
#
#                         }
#             yield FormRequest(url=self.search,
#                             formdata=Formdata,
#                                callback=self.check_page)
#     def check_page(self,response):
#         # print response.body
#         # print response.request.body
#
#
#         page_nums = response.xpath("//div[@class = 'list-a']/a/text()").extract()
#         for page in page_nums:
#             datas = {
#                 'currentPageNo':'',
#                 'yzm':'',
#                 'cyzm':'cxlist',
#                 'maent.entname':self.word
#             }
#             next_url = 'http://gsxt.scaic.gov.cn/ztxy.do?method=list&djjg=&yzmYesOrNo=no&random=1459390538348&pageNum=2'
#             yield scrapy.FormRequest("")
#
#
#         id_list = response.xpath("//div[@class='center-1']/div//li[@class = 'font16']/a/@onclick").extract()
#         for id in id_list:
#             if re.findall(u'([a-zA-Z0-9]+)',id):
#                 id_curr = re.findall(u'([a-zA-Z0-9]+)',id)[1]
#                 type_curr = re.findall(u'([a-zA-Z0-9]+)',id)[2]
#                 print id_curr,type_curr
#                 info_list = ['qyInfo','baInfo','cfInfo','gqczxxInfo','dcdyInfo','jyycInfo','ccjcInfo','qygsInfo','qtgsInfo','qygsForXzxkInfo','qygsForZzcqInfo','qygsForTzrxxInfo','qygsForXzcfInfo',
#                             'yzwfInfo','qygsForTzrbgxxInfo','qtgsForCfInfo','sfgsInfo','sfgsbgInfo','spyzInfo','qtgsScaqsgInfo','qtgsForCfInfo']
#                 for info in info_list:
#                     data = {
#                         'method':info,
#                         'maent.pripid':id_curr,
#                         # 'maent.entbigtype':type_curr,
#                         'czmk':'czmk1',
#                         'random':self.gettime()
#                     }
#                     yield scrapy.FormRequest( url= 'http://gsxt.scaic.gov.cn/ztxy.do',formdata=data,meta={'id':id_curr,'info':info},callback=self.parse_info)
#
#
#     def parse_info(self,response):
#         if response.xpath("//table/tr/td").extract():
#             response_name = ''
#             name_ori = response.xpath("//div[@id='details']/h2/text()").extract_first()
#             response_name_list = response.xpath("//table/tr[1]/th/text()").extract()
#             for response_name_i in response_name_list:
#                 if response_name_i:
#                     if u'红色为修改过的信息项' in response_name_i:
#                         name = re.findall(u'([\s\S]+)红色为修改过的信息项',response_name_i)[0].strip()
#                         response_name = '|'+ name
#                         break
#                     response_name = response_name + '|' + response_name_i.strip()
#
#
#             company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else None
#             response_name =  response.request.meta['info'] + ':' + response_name[1:]
#             print company_name,response_name
#
#             url_list = response.xpath("//table//td/a/@onclick").extract()
#             if url_list:
#                 for url_id in url_list:
#                     if u'showRyxx' in url_id:
#                         ids = re.findall(u'(\d+)',url_id)
#                         if ids:
#                             id1 = ids[0]
#                             id2 = ids[1]
#                             data = {
#                                 'method':'tzrCzxxDetial',
#                                 'maent.xh':str(id1),
#                                 'maent.pripid':str(id2),
#                                 'random':self.gettime()
#                             }
#                             yield scrapy.FormRequest('http://gsxt.scaic.gov.cn/ztxy.do',formdata=data,callback=self.parse_info,meta={'id':response.request.meta['id'], 'info':'tzrCzxxDetial'})
#                     elif u'doNdbg' in url_id:
#                         ids = re.findall(u'(\d+)',url_id)
#                         id1 = ids[0] if ids else None
#                         data = {
#                                 'method':'ndbgDetail',
#                                 'maent.nd':str(id1),
#                                 'maent.pripid':str(response.request.meta['id']),
#                                 'random':self.gettime()
#                             }
#                         yield scrapy.FormRequest('http://gsxt.scaic.gov.cn/ztxy.do',formdata=data,callback=self.parse_info,meta={'id':response.request.meta['id'], 'info':'ndbgDetail'})
#
#
#
#     def gettime(self):
#         return str(int(time.time()))
#
#     def strencode(self,str):
#         haha = str.encode('gb2312')
#         return urllib.quote(haha)