# -*- coding: utf-8 -*-


import scrapy
from scrapy import Spider
from scrapy import Request, FormRequest
import datetime
from scrapyluke.uuCode import UUcode
from scrapyluke.commonfun import to_str
from scrapyluke.processors import html_to_dict, html_to_list_dict, tables_names
from scrapyluke.items import MongoItem
from gsxt_gov.spiders.report_format import tables_to_dict
from gsxt_gov.items import GsxtGovItem
import re
import hashlib
import time

class qinghaiSpider(scrapy.Spider):

    name = 'qinghai'

    def __init__(self):
        super(qinghaiSpider, self).__init__()
        self.uu = UUcode('109521', '3c8906d3666e44bb9c961e1647126dbc')
        self.code_type = 8001
        self.code_getMax = 30
        self.host = 'http://218.95.241.36'
        # 该省份分词读取路径


        self.ppl_path = '/home/li/working/fenci'
        # 实时记录跑过的词语


        self.realtime_record_path = '/home/li/working/finished_fenci_qinghai'
        # 记录本次运行跳过的词语(上次已经跑过的词语)


        self.skip_record_path = '/home/li/working/finished_already_qinghai'

        self.already = self.wordcount()
        self.words =  self.creatlist(1,10)

    def start_requests(self):
        for i in self.words:
            yield Request(url=self.host + '/search.jspx',meta={'cookiejar': i, 'keyword':i}, dont_filter='true')

    def code_right(self, res):
        if 'true' in res:
            return True
        return

    def parse(self, response):
        yield Request(url=self.host + '/validateCode.jspx?type=0',meta={'cookiejar': response.meta['keyword'], 'keyword':response.meta['keyword']},
                          callback=self.code_ocr, dont_filter='true')

    def code_ocr(self, response):
        result = self.uu.check_code_cache(response.body, self.code_type, self.code_getMax)
        if result:
            id, code = result
            # 判断验证码请求


            yield scrapy.FormRequest(url=self.host + '/checkCheckNo.jspx', method='POST',
                                     formdata={'checkNo': code},
                                     meta={'cookiejar': response.meta['keyword'], 'code': code, 'code_id': id, 'keyword':response.meta['keyword']},
                                     callback=self.check_code, dont_filter='true')

    def check_code(self, response):
        if self.code_right(response.body):
            keyword = response.meta['keyword']
            data = {'checkNo': response.meta['code'], 'entName': keyword}
            yield  scrapy.FormRequest(url=self.host + '/searchList.jspx', method='POST',
                                      formdata=data,
                                      meta={'cookiejar': keyword, 'keyword':keyword},
                                      callback=self.get_infolist, dont_filter='true')
        else:
            # 提交验证码错误信息(费用会被退还)


            self.uu.report_error(response.meta['code_id'])

    def get_infolist(self, response):
        url_list = response.xpath("//div[@class='list']/ul/li/a/@href").extract()
        name_list = response.xpath("//div/ul/li[@class='font16']//text()").extract()
        zip(url_list, name_list)
        list(zip(url_list, name_list))

        listname = ''
        # 将搜索列表记录


        op = open(self.realtime_record_path,'a')
        op.write(response.request.meta['keyword']+':'+ listname.join([i+'-' for i in name_list]) +','+str(datetime.datetime.now())+','+response.url+'\n')
        op.close()

        for (url, name) in zip(url_list, name_list):

            # 工商公示信息


            onelink = self.host + url
            yield scrapy.Request(url = onelink,meta={'cookiejar': response.meta['keyword'],'company_name':name, 'keyword':response.meta['keyword']},callback=self.get_info_gsgs, dont_filter='true')
            # 企业公示信息


            twolink = onelink.replace('businessPublicity','enterprisePublicity')
            yield scrapy.Request(url = twolink,meta={'cookiejar': response.meta['keyword'],'company_name':name, 'keyword':response.meta['keyword']},callback=self.get_info_qygs, dont_filter='true')
            # 其他部门公示信息


            threelink = onelink.replace('businessPublicity','otherDepartment')
            yield scrapy.Request(url = threelink,meta={'cookiejar': response.meta['keyword'],'company_name':name, 'keyword':response.meta['keyword']},callback=self.get_info_qtbm, dont_filter='true')
            # 司法协助公式信息


            fourlink = onelink.replace('businessPublicity','justiceAssistance')
            yield scrapy.Request(url = fourlink,meta={'cookiejar': response.meta['keyword'],'company_name':name, 'keyword':response.meta['keyword']},callback=self.get_info_sfxz, dont_filter='true')

    # 工商公示


    def get_info_gsgs(self, response):
        # 存储结构化数据, mongodb


        mo = MongoItem()
        mo['_id'] = response.meta['company_name']
        startends = [['id="jibenxinxi"', '</table>', '工商公示-基本信息'],
                     ['id="touziren"', '</div>', '工商公示-股东信息'],
                     ['变更信息</th>', '</div>', '工商公示-变更信息'],
                     ['主要人员信息</th>', '</div>', '工商公示-主要人员信息'],
                     ['分支机构信息</th>', '</div>', '工商公示-分支机构信息'],
                     ['清算信息</th>', '</div>', '工商公示-清算信息'],
                     ['股权出质登记信息</th>', '</div>', '工商公示-股权出质登记信息'],
                     ['id="dongchandiya"', '</div>', '工商公示-动产抵押登记信息'],
                     ['id="jingyingyichangminglu"', '</div>', '工商公示-经营异常信息'],
                     ['id="yanzhongweifaqiye"', '</div>', '工商公示-严重违法信息'],
                     ['id="xingzhengchufa"', '</div>', '工商公示-行政处罚信息'],
                     ['id="chouchaxinxi"', '</div>', '工商公示-抽查检查信息'],
                     ['参加经营的家庭成员姓名</th>', '</div>', '工商公示-参加经营的家庭成员姓名'],
                     ['年报信息</th>', '</div>', '工商公示-年报信息']]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

        # 一些onclick链接


        onclick_set = response.xpath("//table//a/@onclick").extract()
        for j in onclick_set:
            onclick_link_info = re.findall(u"window.open\(\'(.*?)\'\)",j)
            if onclick_link_info:
                onclick_link_url = onclick_link_info[0]
                yield scrapy.Request(url = self.host + onclick_link_url,
                                     meta={'cookiejar': response.meta['keyword'], 'company_name':response.meta['company_name'],'keyword':response.meta['keyword']},
                                     callback=self.parse_info,
                                     dont_filter='true')

    # 股东及出资人详情


    def parse_info(self, response):
        mo = MongoItem()
        mo['_id'] = response.meta['company_name']
        startends = [['class="detailsList"', '</table>', '工商公示-股东及出资信息']]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

    # 企业公示


    def get_info_qygs(self, response):
        # 年报链接


        annual_link = response.xpath("//table//a/@href").extract()
        for url in annual_link:
            if u'QueryYear' in url:
                yield Request(url = self.host + url,
                              meta={'cookiejar': response.meta['keyword'], 'company_name':response.meta['company_name'],'keyword':response.meta['keyword']},
                              callback= self.annual_info,
                              dont_filter='true')

        # 存储结构化数据, mongodb


        mo = MongoItem()
        mo['_id'] = response.meta['company_name']
        startends = [['id="qiyenianbao"', '</div>', '企业公示-企业年报'],
                     ['id="xingzhengxuke"', '</div>', '企业公示-行政许可信息'],
                     ['id="zscqDiv"', '</div>', '企业公示-知识产权出质登记信息'],
                     ['id="gdDiv"', '</div>', '企业公示-股东及出资信息'],
                     ['id="altInv"', '</div>', '企业公示-变更信息'],
                     ['id="xingzhengchufa"', '</div>', '企业公示-行政处罚信息'],
                     ['id="gqbg"', '</div>', '企业公示-股权变更信息']]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

    # 年报信息


    def annual_info(self,response):
        name = response.xpath("//div/div/div[1]/table[1]/tr[1]/th/text()").extract_first()
        if u"红色为修改过的信息项" in name:
            name = re.findall(u'([\s\S]+)红色为修改过的信息项',name)[0].strip()

        mo = MongoItem()
        mo['_id'] = response.meta['company_name']
        mo['document'] = tables_to_dict(response.body, name, 'detailsList')
        yield mo

    # 其他部门公示


    def get_info_qtbm(self, response):
        # 存储结构化数据, mongodb


        mo = MongoItem()
        mo['_id'] = response.meta['company_name']
        startends = [['id="jibenxinxi"', '</div>', '其他公示-行政许可信息'],
                     ['id="xzcfDiv"', '</div>', '其他公示-行政处罚信息']]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

    # 司法协助公示


    def get_info_sfxz(self, response):
        # 存储结构化数据, mongodb


        mo = MongoItem()
        mo['_id'] = response.meta['company_name']
        startends = [['id="EquityFreezeDiv"', '</div>', '司法公示-司法股权冻结信息'],
                     ['id="xzcfDiv"', '</div>', '司法公示-司法股东变更登记信息']]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

    # 排除上次已经跑过的词语，创建词库列表


    def creatlist(self,startnum,endnum):
        # 清空"跳过词语"的记录


        fp = open(self.skip_record_path, 'w')
        fp.close()
        a = 1

        f = open(self.ppl_path, 'rb')
        list = []
        while a < endnum:
            line = f.readline()
            if not line:
                    break
            if startnum <= a :
                line = line.replace('\'','')
                line = line.replace('\n','')
                if line not in self.already:
                    list.append(line)
                else:
                    op = open(self.skip_record_path, 'a')
                    op.write(line+'\n')
                    op.close()
            a += 1
        return list

    # 读取上次已经跑过的词语


    def wordcount(self):
        already = []
        for line in open(self.realtime_record_path, 'rb'):
            if line.strip():
                vars = line.split(':', 2)
                if vars:
                    word = vars[0]
                    if word not in already:
                        already.append(word)
        return already