# -*- coding: utf-8 -*-


import scrapy
from scrapy import Request, FormRequest
from scrapyluke.uuCode import UUcode
import datetime
import time
from gsxt_gov.items import GsxtGovItem
import re
from scrapyluke.items import MongoItem
from scrapyluke.processors import html_to_dict, html_to_list_dict, tables_names
from formatneimenggu import tables_to_dict
import urllib
from urllib import unquote
import json
from scrapy.selector import Selector

class NeiMengGuSpider(scrapy.Spider):
    name = 'neimenggu'
    # start_urls = ['http://www.nmgs.gov.cn:7001/aiccips/']



    def __init__(self):
        super(NeiMengGuSpider, self).__init__()
        self.uu = UUcode('109521', '3c8906d3666e44bb9c961e1647126dbc')
        self.word = '集团'
        self.base_path = '/home/li-x/imgs'
        self.code = 'http://www.nmgs.gov.cn:7001/aiccips/verify.html?random=0.5151826862711459'
        self.oriurl = 'http://www.nmgs.gov.cn:7001/aiccips/'

        # 起始url


        self.surl = 'http://www.nmgs.gov.cn:7001/aiccips/'
        # 要查询的关键词（分词）的文件路径


        self.fenci_path = '/home/li-x/provinces/finished/内蒙古_fenci'
        # 已查询的关键词，企业名称，插入时间的文件路径


        self.finished_fenci_path = '/home/li-x/provinces/finished_fenci_内蒙古'
        # 要查询的分词列表


        # self.words = self.creatlist(1,5)


        self.words = ['哈哈']

    # 请求起始url


    def start_requests(self):
        for check_word in self.words:
            yield  Request(url=self.surl,callback=self.parse,dont_filter=True, meta={'check_word':check_word,'cookiejar':check_word})
    # 申请验证码


    def parse(self, response):
        check_word = response.request.meta['check_word']
        yield Request(url=self.code,callback=self.code_ocr, meta={'check_word':check_word,'cookiejar':check_word},dont_filter=True)

    # 发送验证码以及关键词


    def code_ocr(self, response):
        img_abspath = self.uu.store_code(response.body, str(int(time.time()))+'.jpg', self.base_path)
        result = self.uu.check_code(img_abspath, 8001, 30)

        if result :
            check_word = response.request.meta['check_word']
            code = result[1]
            code_id = result[0]
            # code = '-3'


            data = {
                'textfield':check_word,
                'code':code
            }
            yield scrapy.FormRequest(url=self.oriurl+'CheckEntContext/checkCode.html',formdata=data,meta={'code':code,'code_id':code_id,'check_word':check_word,'cookiejar':check_word},callback=self.check_ent, dont_filter=True)

    # 得到textfield


    def check_ent(self,response):
        text = json.loads(response.body_as_unicode())
        if text:
            textfield = text.get('textfield')
            if textfield:
                data = {
                    'textfield':textfield,
                    'code':response.request.meta['code']
                }
                check_word = response.request.meta['check_word']
                yield scrapy.FormRequest(url = self.oriurl+'CheckEntContext/showInfo.html',formdata=data,callback=self.page_list,meta = {'code_id':response.request.meta['code_id'],'check_word':check_word,'cookiejar':check_word}, dont_filter=True)

    # 通过textfield得到各个公司链接的页面，并判断验证码是否正确


    def page_list(self,response):
        if Selector(text=response.body).xpath("//div[@class='list']/ul/li/a").extract_first():
            check_word = response.request.meta['check_word']
            page_list = Selector(text=response.body).xpath("//div[@class = 'list']//a/@href").extract()
            for page_url in page_list:
                url = self.oriurl + re.findall(u'\.\./([\s\S]+)',page_url)[0] if re.findall(u'\.\./([\s\S]+)',page_url) else None
                yield scrapy.Request(url = url,callback=self.page_jump,meta={'check_word':check_word,'cookiejar':check_word})
        else:
            self.uu.report_error(response.request.meta['code_id'])

    # 得到具体的公司详细信息


    def page_jump(self,response):
        com_name = Selector(text=response.body).xpath("//div[@id='details']/div[1]/h2/text()").extract_first()
        company_name= re.findall(u'([\s\S]+)注册',com_name.strip())[0].strip() if (re.findall(u'([\s\S]+)注册',com_name) and com_name) else com_name
        check_word = response.request.meta['check_word']
        op = open(self.finished_fenci_path,'a')
        op.write(response.request.meta['check_word']+':'+company_name+','+str(datetime.datetime.now())+','+response.url+'\n')
        op.close()

        url_list1 = [
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=entInfo",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=entChaInfo",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=entCheckInfo",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=curStoPleInfo",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=pleInfo",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=cipUnuDirInfo",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=cipBlackInfo",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=cipPenaltyInfo",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=cipSpotCheInfo",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=twadm",
            "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=twcredit",
        ]

        url_list2 = [
            "http://www.nmgs.gov.cn:7001/aiccips/BusinessAnnals/BusinessAnnalsList.html",
            "http://www.nmgs.gov.cn:7001/aiccips/QualificationMsg.html",
            "http://www.nmgs.gov.cn:7001/aiccips/PropertyRightsMsg.html",
            "http://www.nmgs.gov.cn:7001/aiccips/ContributionCapitalMsg.html",
            "http://www.nmgs.gov.cn:7001/aiccips/XZPunishmentMsg.html",
            "http://www.nmgs.gov.cn:7001/aiccips/AppPerInformation.html",
            "http://www.nmgs.gov.cn:7001/aiccips/CreditInformation.html",
            "http://www.nmgs.gov.cn:7001/aiccips/intPropertyMsg.html",
            'http://www.nmgs.gov.cn:7001/aiccips/GDGQTransferMsg/shareholderTransferMsg.html',
        ]

        url_list3 = [
            "http://www.nmgs.gov.cn:7001/aiccips/OtherPublicity/otherDeptInfo.html",
        ]

        url_list4 = [
            "http://www.nmgs.gov.cn:7001/aiccips/OtherPublicity/highCourt.html"
        ]

        entno = Selector(text=response.body).xpath("//input[@id='entNo']/@value").extract_first()
        enttype = Selector(text=response.body).xpath("//input[@id='entType']/@value").extract_first()
        regorg = Selector(text=response.body).xpath("//input[@id='regOrg']/@value").extract_first()
        data = {
            'entNo':entno,
            'entType':enttype,
            "regOrg":regorg
        }
        for url in url_list1:
            yield scrapy.FormRequest(url = url,formdata=data,callback=self.page_info1, meta={'check_word':check_word,'cookiejar':check_word})
        for url in url_list2:
            yield scrapy.FormRequest(url = url,formdata=data,callback=self.page_info2, meta={'check_word':check_word,'cookiejar':check_word})
        for url in url_list3:
            yield scrapy.FormRequest(url = url,formdata=data,callback=self.page_info3, meta={'check_word':check_word,'cookiejar':check_word})
        for url in url_list4:
            yield scrapy.FormRequest(url = url,formdata=data,callback=self.page_info4, meta={'check_word':check_word,'cookiejar':check_word})

    # 发送各个详细信息的请求


    def page_info1(self,response):
        if Selector(text=response.body).xpath("//table/tr//td[2]").extract():
            check_word = response.request.meta['check_word']
            name_ori = Selector(text=response.body).xpath("//div[@id='details']/div[1]/h2/text()").extract_first()
            company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else name_ori

            if 'entInfo' in response.url:
                url_list = Selector(text=response.body).xpath("//table//a/@onclick").extract()
                for url in url_list:
                    url_re = re.findall('window.open\(\'([\s\S]*?)\'',url)[0] if re.findall('window.open\(\'([\s\S]*?)\'',url) else None
                    yield Request(url = url_re ,callback= self.gdxx , meta={'com_name':company_name, 'check_word':check_word,'cookiejar':check_word}, dont_filter=True)

            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [
                ['基本信息 </th>', '</table>','工商公示-基本信息'],
                ['基本信息</th>', '</table>','工商公示-基本信息'],
                ['id="touzirentop"','</div>','工商公示-投资人信息'],
                ['变更信息</th></tr>','</div>','工商公示-变更信息'],
                ['主要人员信息</th>','</div>','工商公示-主要人员信息'],
                ['参加经营的家庭成员姓名</th>','</div>','工商公示-参与经营的家庭成员姓名'],
                ['分支机构信息</th>','</div>','工商公示-分支机构信息'],
                ['清算信息</th>','</div>',],
                ['动产抵押信息</th>','</div>','工商公示-动产抵押登记信息'],
                ['行政处罚信息</th>','</div>','工商公示-行政处罚信息'],
                ["经营异常信息</th>",'</div>', '工商公示-经营异常信息'],
                ['严重违法信息</th>','</div>','工商公示-严重违法信息'],
                ['抽查检查信息</th>','</div>','工商公示-抽查检查信息'],
                     ]
            mo['document'] = html_to_list_dict(response.body, startends)
            yield mo

    def gdxx(self,response):
        company_name = response.request.meta['com_name']
        mo = MongoItem()
        mo['_id'] = company_name
        startends = [
            ['股东及出资信息</th>', '</div>', '股东及出资详细信息']
        ]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

    def page_info2(self,response):
        check_word = response.request.meta['check_word']
        if Selector(text=response.body).xpath("//table//tr//td[2]").extract():
            name_ori = Selector(text=response.body).xpath("//div[@id='details']/div[1]/h2/text()").extract_first()
            company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else name_ori

            if 'Annals' in response.url:
                url_list = Selector(text=response.body).xpath("//table//a//@href").extract()
                for url in url_list:
                    yield Request(url = url , callback=self.annals_info,meta={'com_name':company_name,'check_word':check_word,'cookiejar':check_word})
                    # print url


            if 'AppPerInformation' in response.url:
                id_list = Selector(text=response.body).xpath("//table//a/@onclick").extract()
                for id in id_list:
                    id_re = re.findall(r'toDetail\(\'([\s\S]*?)\'\,',id)[0] if re.findall(r'toDetail\(\'([\s\S]*?)\'\,',id) else None
                    yield Request(url = 'http://www.nmgs.gov.cn:7001/aiccips/detailedness?status=%E6%9C%89%E6%95%88&id=' + id_re , callback=self.xzxk_info,meta={'com_name':company_name,'check_word':check_word,'cookiejar':check_word})


            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [
                ['id="qiyenianbao"','</div>','企业公示-企业年报'],
                ['股东及出资信息</th>','</div>','企业公示-股东及出资信息'],
                ['股东及出资信息 </th>','</div>','企业公示-股东及出资信息'],
                ['变更信息</th>','</table>','企业公示-变更信息'],
                ['股权变更信息</th>','</div>','企业公示-股权变更信息'],
                ['id="appPer"','</div>','企业公示-行政许可信息'],
                ['id="inproper"','</div>','企业公示-知识产权出质登记信息'],
                ['id="xzpun"','</div>','企业公示-行政处罚信息'],
                     ]
            mo['document'] = html_to_list_dict(response.body, startends)
            yield mo

    def annals_info(self,response):
        company_name = response.request.meta['com_name']
        # print company_name


        name = response.xpath("//table//tr[1]/th/text()").extract_first()
        # print name


        if u"红色为修改过的信息项" in name:
            name = re.findall(u'([\s\S]+)红色为修改过的信息项',name)[0].strip()
        # print name


        mo = MongoItem()
        mo['_id'] = company_name
        mo['document'] = tables_to_dict(response.body, name, 'detailsList')
        yield mo

    def xzxk_info(self,response):
        company_name = response.request.meta['com_name']
        mo = MongoItem()
        mo['_id'] = company_name
        startends = [
            ['变更信息</th>', '</div>', '行政许可详细信息']
        ]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo


    def page_info3(self,response):
        if Selector(text=response.body).xpath("//table//tr//td[2]").extract():
            name_ori = Selector(text=response.body).xpath("//div[@id='details']/div[1]/h2/text()").extract_first()
            company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else name_ori
            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [
                ['行政许可信息</th>', '</div>', '其他公示-行政许可信息'],
                ['行政处罚信息</th>', '</div', '其他公示-行政处罚信息']
                     ]
            mo['document'] = html_to_list_dict(response.body, startends)
            yield mo

    def page_info4(self,response):
        if response.xpath("//table//tr//td[2]").extract():
            name_ori = Selector(text=response.body).xpath("//div[@id='details']/div[1]/h2/text()").extract_first()
            company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else name_ori
            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [
                ['司法股权冻结信息</th>', '</div>', '司法公示-司法股权冻结信息'],
                ['司法股东变更登记信息</th>', '</div>', '司法公示-司法股东变更登记表']
                     ]
            mo['document'] = html_to_list_dict(response.body, startends)
            yield mo

    # 构建列表并排重


    def creatlist(self,startnum,endnum):
        a = 1

        list = []
        list_fin = []
        f = open(self.fenci_path,'rb')

        for line2 in open(self.finished_fenci_path, 'rb'):
            fenci = re.findall(r'([\s\S]*?):',line2)[0] if re.findall(r'([\s\S]*?):',line2) else None
            if fenci not in list_fin:
                list_fin.append(fenci)
        while a < endnum:
            line = f.readline()
            if not line:
                    break
            if startnum <= a :
                line = line.replace('\'','')
                line = line.replace('\n','')
                hehe = line.split(',',2)
                if hehe[0]:
                    if len(hehe[0]) >= 2:
                        if hehe[0] not in list_fin:
                    # if hehe[0]


                            list.append(hehe[0])
            a += 1
        return list

#


# if __name__ == '__main__':


#     f = open('test.html', 'rb')


#     content = f.read()


#     f.close()


#     startends1 = [


#                 ['基本信息 </th>', '</table>','基本信息'],


#                 ['基本信息</th>', '</table>','基本信息'],


#                 ['id="touzirentop"','</div>','投资人信息'],


#                 ['变更信息</th>','</div>','变更信息'],


#                 ['主要人员信息</th>','</div>','主要人员信息'],


#                 ['参加经营的家庭成员姓名</th>','</div>','参与经营的家庭成员姓名'],


#                 ['分支机构信息</th>','</div>','分支机构信息'],


#                 ['清算信息</th>','</div>',],


#                 ['动产抵押信息</th>','</div>','动产抵押登记信息'],


#                 ['行政处罚信息</th>','</div>','行政处罚信息'],


#                 ["经营异常信息</th>",'</div>', '经营异常信息'],


#                 ['严重违法信息</th>','</div>','严重违法信息'],


#                 ['抽查检查信息</th>','</div>','抽查检查信息'],


#                 ]


#


#     startends2 = [


#         ['id="qiyenianbao"','</div>','企业公示-年报'],


#


#         ['股东及出资信息 </th>','</div>','企业公示-股东及出资信息'],


#         ['股东及出资信息</th>','</div>','企业公示-股东及出资信息'],


#         ['变更信息</th>','</table>','企业公示-变更信息'],


#         ['股权变更信息</th>','</div>','企业公示-股权变更信息'],


#         ['id="appPer"','</div>','企业公示-行政许可信息'],


#         ['id="inproper"','</div>','企业公示-知识产权出质登记信息'],


#         ['id="xzpun"','</div>','企业公示-行政处罚信息'],


#     ]


#


#     startends3 = [


#         ['行政许可信息</th>', '</div>', '其他部门公示-行政许可信息'],


#         ['行政处罚信息</th>', '</div', '其他部门公示-行政处罚信息']


#     ]


#


#     startends4 = [


#         ['司法股权冻结信息</th>', '</div>', '司法协助公示-司法股权冻结信息'],


#         ['司法股东变更登记信息</th>', '</div>', '司法协助公示-司法股东变更登记表']


#     ]


#     con = html_to_list_dict(content, startends4)


#     print json.dumps(con)

