# -*- coding: utf-8 -*-
# !/usr/bin/python
__author__ = 'bitfeng'


from scrapy import Spider
from scrapy import Request, FormRequest
import datetime
from scrapyluke.uuCode import UUcode
from scrapyluke.commonfun import to_str
from scrapyluke.processors import html_to_dict, html_to_list_dict, tables_names
from scrapyluke.items import MongoItem
from gsxt_gov.items import GsxtGovItem
import re


class HeiLongJiang(Spider):

    name = 'heilongjiang'
    start_urls = ['http://gsxt.hljaic.gov.cn/']

    def __init__(self):
        super(HeiLongJiang, self).__init__()
        self.uu = UUcode('109521', '3c8906d3666e44bb9c961e1647126dbc')
        self.code_type = 6001
        self.code_getMax = 20
        self.words = ['哈哈科技', '京蓝']
        self.host = 'http://gsxt.hljaic.gov.cn'
        
    def code_right(self, res):
        if 'true' in res:
            return True
        return 

    # 请求验证码
    def parse(self, response):
        yield Request(url=self.host + '/validateCode.jspx?type=0', callback=self.code_ocr)

    # 验证码识别
    def code_ocr(self, response):
        result = self.uu.check_code_cache(response.body, self.code_type, self.code_getMax)
        if result:
            id, code = result
            # 判断验证码的请求
            yield FormRequest(url=self.host + '/checkCheckNo.jspx', method='POST', callback=self.check_code,
                              formdata={'checkNo': code}, meta={'code': code, 'code_id': id})

    # 验证验证码识别结果, 并进行搜索
    def check_code(self, response):
        if self.code_right(response.body):
            if self.words:
                keyword = self.words.pop()
                data = {'checkNo': response.meta['code'], 'entName': keyword}
                yield FormRequest(url=self.host + '/searchList.jspx', formdata=data,
                                  callback=self.parse_search, meta={'keyword': keyword})
        else:
            # 提交验证码错误信息(费用会被退还)
            self.uu.report_error(response.meta['code_id'])

    def parse_search(self, response):
        gs = GsxtGovItem(response)
        gs['company_name'] = 'search'
        gs['response_name'] = response.meta['keyword']
        gs['content'] = response.body
        yield gs

        links = re.findall(u'\<a href="(/businessPublicity.jspx\?id=.*?)">', response.body)
        com_names = re.findall(u'\<a href="/businessPublicity.jspx\?id=.*?">(.*?)</a>', response.body)
        for link, com_name in zip(links, com_names):
            # 如果企业名称在self.words中,则将其删除,后期self.words会放在redis内存数据库中,便于管理
            if com_name in self.words:
                self.words.remove(com_name)
            id = re.findall(u'id=(.*)', link)
            meta = {'company_name': com_name}
            # 工商公示信息
            yield FormRequest(url=self.host + link, callback=self.parse_gongshang, meta=meta)
            # 企业公示信息
            # yield FormRequest(url='http://gsxt.hljaic.gov.cn/enterprisePublicity.jspx', formdata={'id': id}, meta=meta)
            # # 其他部门公示信息
            # yield FormRequest(url='http://gsxt.hljaic.gov.cn/otherDepartment.jspx', formdata={'id': id})
            # # 司法协助公示信息
            # yield FormRequest(url='http://gsxt.hljaic.gov.cn/justiceAssistance.jspx', formdata={'id': id})

    def parse_gongshang(self, response):

        # 存储网页内容, postgresql
        gs = GsxtGovItem(response)
        gs['company_name'] = response.meta['company_name']
        gs['response_name'] = tables_names(response.xpath('//table').extract())
        gs['content'] = response.body
        yield gs

        # 存储结构化数据, mongodb
        mo = MongoItem()
        mo['_id'] = response.meta['company_name']
        startends = [['id="jibenxinxi"', '</table>'],
                     ['id="touziren"', '</div>'],
                     ['变更信息</th>', '</div>']]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo