# -*- coding: utf-8 -*-

import scrapy
from scrapy import Request, FormRequest
from scrapyluke.uuCode import UUcode
import datetime
import time
from test1.items import GsxtGovItems
import re
from scrapyluke.items import MongoItem
from scrapyluke.processors import html_to_dict, html_to_list_dict, tables_names

class YunNanSpider(scrapy.Spider):

    name = 'yunnan'
    start_urls = ['http://gsxt.ynaic.gov.cn/notice/']

    def __init__(self):
        super(YunNanSpider, self).__init__()
        self.uu = UUcode('109521', '3c8906d3666e44bb9c961e1647126dbc')
        self.word = '哈哈'
        self.base_path = '/home/li-x/imgs'
        self.code = 'http://gsxt.ynaic.gov.cn/notice/captcha?preset=&ra=0.12810330255888402'    #验证码请求网址
        self.search = 'http://gsxt.ynaic.gov.cn/notice/search/ent_info_list'    #搜索请求网址

    def code_right(self, res):
        a = res.xpath("//div[@class='link']/a/text()").extract()
        if a:
            return True
        else:
            return False

    # 请求验证码
    def parse(self, response):
        b = re.findall(u'code:[\s\S]*"([\s\S]*)"[\s\S]*data:', response.body)
        if b:
            sessiontoken = b[0]
        else:
            sessiontoken = None
            print 'sessiontoken 获取错误'
        yield Request(url=self.code,meta={'session':sessiontoken} ,callback=self.code_ocr)

    # 验证码识别
    def code_ocr(self, response):
        img_abspath = self.uu.store_code(response.body, str(int(time.time()))+'.jpg', self.base_path)
        result = self.uu.check_code(img_abspath, 8001, 30)
        if result :
            id = result[0]
            code = result[1]
            # 判断验证码请求
            session = response.request.meta['session']
            Formdata={
                                'captcha':code,
                                'condition.pageNo':'1',
                                'condition.insType':'',
                                'session.token':session,
                                'condition.keyword':self.word
                        }
            yield FormRequest(url=self.search,
                                formdata=Formdata
                               ,meta={'pages':'1','code':code,'session':session,'code_id': id},
                               callback=self.check_page)

    #将全部搜索结果发到link_lists
    def check_page(self,response):
        if self.code_right(response):
            pages = int(response.request.meta['pages'])
            code = response.request.meta['code']
            session = response.request.meta['session']
            print pages,code,session
            Formdata2={
                            'captcha':code,
                            'condition.pageNo':str(pages),
                            'condition.insType':'',
                            'session.token':session,
                            'condition.keyword':self.word}
            yield FormRequest(  url=self.search,
                                formdata = Formdata2 ,
                                callback =self.link_lists,dont_filter=True)
            if response.xpath("//div[@class='list-item'][10]//text()").extract():
                check_word = response.xpath("//div[@class='list-item'][10]//text()").extract()
                if check_word != response.request.meta.get('check_response'):
                    pages += 1
                    Formdata3={
                                'captcha':code,
                                'condition.pageNo':str(pages),
                                'condition.insType':'',
                                'session.token':session,
                                'condition.keyword':self.word}
                    yield FormRequest(  url=self.search,
                                        formdata=Formdata3,
                                        meta={'pages':str(pages),'code':code,'session':session,
                                                'check_response':check_word },
                                        callback=self.check_page)
        else:
                self.uu.report_error(response.meta['code_id'])

    #在搜索结果的每一页找到进入详细信息页面的url
    def link_lists(self, response):
        class_list = response.xpath("//div[@class='list-item']")
        for url in class_list:
            link = url.xpath("div[@class='link']/a/@href").extract_first()
            yield scrapy.Request(link,callback=self.parse_links)

    #找到最左侧一列信息的url，并发送请求
    def parse_links(self,response):
        DelLastlink = response.url[:-2]
        # onclick = response.xpath("//div[@class='main']/div[@class='notice']/div[@class='cont clearfix']/div[@class='cont-l']/ul/li/@onclick").extract()
        # for urlid in onclick:
        #     linkid = re.findall(u'(\d+)', urlid)
        #     if linkid:
        #         link = DelLastlink + str(linkid[0])
        #         yield scrapy.Request( link , callback= self.parse_link_info,dont_filter=True)
        url1 = DelLastlink+'01'
        url2 = DelLastlink+'02'
        url3 = DelLastlink+'03'
        url6 = DelLastlink+'06'
        yield scrapy.Request( url=url1 , callback= self.parse_link_info1,dont_filter=True)
        yield scrapy.Request( url=url2 , callback= self.parse_link_info2,dont_filter=True)
        yield scrapy.Request( url=url3 , callback= self.parse_link_info3,dont_filter=True)
        yield scrapy.Request( url=url6 , callback= self.parse_link_info6,dont_filter=True)

    #对所有过来的url进行解析，若传来的界面还有链接地址。则将改url发送回本函数
    def parse_link_info1(self,response):
        # hebei = GsxtGovItem()
        # url_list = response.xpath("//div[@class='main']/div[@class='notice']/div[@class='cont clearfix']/div[@class='cont-r']/div[@class='cont-r-b']/div[@class='hide']//tr//a/@href").extract()
        # if url_list:
        #     for next_url in url_list:
        #         if self.isurl(next_url):
        #             list_index = url_list.index(next_url) + 1
        #             yield scrapy.Request(next_url,callback=self.parse_link_info1,meta = {'index':list_index})
        # name_list = response.xpath("//div/div/table/tr[1]/th/text()").extract()
        # name = ''
        # for i in name_list:
        #     if u'红色为修改过的信息项' in i:
        #         # index = name_list.index(i)
        #         # i = response.xpath('//div/div/table[%s]/tr[2]/th/text()'%str(index+1)).extract_first()
        #         name = re.findall(u'([\s\S]+)红色为修改过的信息项',i)[0].strip()
        #         name = '|'+ name
        #         break
        #     if (u'股东及出资信息' in i and response.request.meta.has_key('index')):
        #         name = '股东及出http://www.hebscztxyxx.gov.cn/notice/notice/view?uuid=dLdkOHGnIk0DNrPBOHnd.8MFr4dvZDts&tab=01资信息' + '_' + str(response.request.meta['index'])
        #         name = '|' + name
        #         break
        #     while ' ' in i:
        #         i = i.replace(' ','')
        #     name = name + '|' + i

        # hebei['response_name'] = name[1:]
        # hebei['content'] = str(response.body)
        # hebei['insert_time']  = datetime.datetime.now()
        # hebei['url'] = response.url
        # hebei['company_name'] = response.xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
        # yield hebei

        # investorlist = response.xpath("//table[@id='investorTable']/tr[@class='page-item']")
        # for invest in investorlist:
            # name = invest.xpath("td[2]/text()").extract_first()
            # url = invest.xpath("td[5]/a/@href").extract_first()
            # yield scrapy.Request(url = url, callback= self.invest_info,meta={'name':name})

        company_name = response.xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
        gs = GsxtGovItems(response)
        gs['company_name'] = company_name
        gs['response_name'] = tables_names(response.xpath('//table').extract())
        gs['content'] = response.body
        yield gs

        mo = MongoItem(response)
        mo['_id'] = company_name
        startends = [['<th colspan="4">基本信息</th>', '</table>','基本信息'],
                     ['<th colspan="5">抽查检查信息</th>', '</table>','抽查检查信息'],
                     ['<th colspan="10">股权出质登记信息</th>', '</table>','股权出质登记信息'],
                 ['<th colspan="6">主要人员信息</th>','</table>','主要人员信息'],
                    ['<th colspan="7">动产抵押登记信息</th>','</table>','动产抵押登记信息'],
                 ['<th colspan="6">经营异常信息</th>','</table>','经营异常信息'],
                 ['<th colspan="6">严重违法信息</th>','</table>','严重违法信息'],
                 ['<th colspan="7">行政处罚信息</th>','</table>','行政处罚信息'],
                 ['<tr><th colspan="4">分支机构信息</th></tr>','</table>','分支机构信息'],
                 ['股东信息<br />','</table>','股东信息'],
                 ['<th colspan="4">变更信息</th>','</table>','变更信息'],
                 ['<th colspan="5">清算信息</th>','</table>','清算信息'],
                    ['<th colspan="2">投资人信息</th>','</table>','投资人信息']
                 ]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

    # def invest_info(self,response):
    #     mo = MongoItem(response)
    #     mo['_id'] = response.xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
    #     startends = [['<tr><th colspan="9">股东及出资信息 </th></tr>', '</table>','股东及出资详细信息--%s' %response.request.meta['name']],
    #
    #              ]
    #     mo['document'] = html_to_list_dict(response.body, startends)
    #     yield mo

    def parse_link_info2(self,response):
        company_name = response.xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
        gs = GsxtGovItems(response)
        gs['company_name'] = company_name
        gs['response_name'] = tables_names(response.xpath('//table').extract())
        gs['content'] = response.body
        yield gs
        mo = MongoItem(response)
        mo['_id'] = company_name
        startends = [['<th colspan="7">行政处罚信息</th>', '</table>','企业公示-行政处罚信息'],
                     ['<th colspan="9" align="center">股东及出资信息（币种与注册资本一致）</th>', '</table>', '企业公示-股东及出资信息'],
                     ['<th colspan="5" align="center">变更信息</th>','</table>','企业公示-变更信息'],
                     ['<th colspan="9">行政许可信息</th>','</table>','企业公示-行政许可信息'],
                     ['<th colspan="3">企业年报</th>','</table>','企业公示-企业年报'],
                     ['<th colspan="5">股权变更信息</th>','</table>','企业公示-股权变更信息'],
                     ['<th colspan="9" align="center">知识产权出质登记信息</th>','</table>','企业公示-知识产权出质登记信息']
                 ]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

    def parse_link_info3(self,response):
        company_name = response.xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
        gs = GsxtGovItems(response)
        gs['company_name'] = company_name
        gs['response_name'] = tables_names(response.xpath('//table').extract())
        gs['content'] = response.body
        yield gs
        mo = MongoItem(response)
        mo['_id'] = company_name
        startends = [['<th colspan="9">行政许可信息</th>', '</table>', '其他部门公示-行政许可信息'],
                    ['<th colspan="7">行政处罚信息</th>', '</table>', '其他部门公示-行政处罚信息']
                 ]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

    def parse_link_info6(self,response):
        company_name = response.xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
        gs = GsxtGovItems(response)
        gs['company_name'] = company_name
        gs['response_name'] = tables_names(response.xpath('//table').extract())
        gs['content'] = response.body
        yield gs
        mo = MongoItem(response)
        mo['_id'] = company_name
        startends = [['<th colspan="6">司法股东变更登记信息</th>', '</table>', '司法协助公示-司法股东变更登记信息'],
                     ['<th colspan="7">司法股权冻结信息</th>', '</table>', '司法协助公示-司法股权冻结信息']
                 ]

        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo


    def isurl(self,checkurl):
        regex = re.compile(
            r'^(?:http|ftp)s?://' # http:// or https://
            r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
            r'localhost|' #localhost...
            r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
            r'(?::\d+)?' # optional port
            r'(?:/?|[/?]\S+)$', re.IGNORECASE)
        b = re.findall(regex,checkurl)
        if b:
            return True
        else:
            return False