# -*- coding: utf-8 -*-
import scrapy
from scrapy import Request, FormRequest
from scrapyluke.uuCode import UUcode
import datetime
import time
from test1.items import GsxtGovItem
import re
import urllib
from urllib import unquote
import json

class NeiMengGuSpider(scrapy.Spider):
    name = 'neimenggu'
    start_urls = ['http://www.nmgs.gov.cn:7001/aiccips/']

    def __init__(self):
        super(NeiMengGuSpider, self).__init__()
        self.uu = UUcode('109521', '3c8906d3666e44bb9c961e1647126dbc')
        self.word = '哈哈'
        self.base_path = '/home/li-x/imgs'
        self.code = 'http://www.nmgs.gov.cn:7001/aiccips/verify.html?random=0.5151826862711459'
        self.oriurl = 'http://www.nmgs.gov.cn:7001/aiccips/'

    def parse(self, response):
        yield Request(url=self.code,callback=self.code_ocr)

    def code_ocr(self, response):
        img_abspath = self.uu.store_code(response.body, str(int(time.time()))+'.jpg', self.base_path)
        result = self.uu.check_code(img_abspath, 8001, 30)
        if result :
            code = result[1]
            data = {
                'textfield':self.word,
                'code':code
            }
            yield scrapy.FormRequest(url=self.oriurl+'CheckEntContext/checkCode.html',formdata=data,meta={'code':code},callback=self.check_ent)

    def check_ent(self,response):
        text = json.loads(response.body_as_unicode())
        if text:
            textfield = text['textfield']
            data = {
                'textfield':textfield,
                'code':response.request.meta['code']
            }
            yield scrapy.FormRequest(url = self.oriurl+'CheckEntContext/showInfo.html',formdata=data,callback=self.page_list)

    def page_list(self,response):
        page_list = response.xpath("//div[@class = 'list']//a/@href").extract()
        for page_url in page_list:
            url = self.oriurl + re.findall(u'\.\./([\s\S]+)',page_url)[0] if re.findall(u'\.\./([\s\S]+)',page_url) else None
            yield scrapy.Request(url = url,callback=self.page_jump)

    def page_jump(self,response):
        com_name = response.xpath("//div[@id='details']/div[1]/h2/text()").extract_first()
        company_name= re.findall(u'([\s\S]+)注册',com_name.strip())[0].strip() if (re.findall(u'([\s\S]+)注册',com_name) and com_name) else com_name
        # url_list = ['entInfo','entChaInfo','entCheckInfo','curStoPleInfo','pleInfo','cipUnuDirInfo','cipBlackInfo','cipPenaltyInfo','cipSpotCheInfo','twadm','twcredit',
        #              ]
        url_list = ["http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=entInfo",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=entChaInfo",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=entCheckInfo",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=curStoPleInfo",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=pleInfo",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=cipUnuDirInfo",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=cipBlackInfo",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=cipPenaltyInfo",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=cipSpotCheInfo",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=twadm",
                    "http://www.nmgs.gov.cn:7001/aiccips/GSpublicity/GSpublicityList.html?service=twcredit",
                    "http://www.nmgs.gov.cn:7001/aiccips/BusinessAnnals/BusinessAnnalsList.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/QualificationMsg.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/PropertyRightsMsg.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/ContributionCapitalMsg.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/XZPunishmentMsg.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/AppPerInformation.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/CreditInformation.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/intPropertyMsg.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/GDGQTransferMsg/shareholderTransferMsg.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/OtherPublicity/otherDeptInfo.html",
                    "http://www.nmgs.gov.cn:7001/aiccips/OtherPublicity/highCourt.html"
                    ]
        entno = response.xpath("//input[@id='entNo']/@value").extract_first()
        enttype = response.xpath("//input[@id=entType']/@value").extract_first()
        regorg = response.xpath("//input[@id='regOrg']/@value").extract_first()
        data = {
            'entNo':entno,
            'entType':enttype,
            "regOrg":regorg
        }
        for url in url_list:
            yield scrapy.FormRequest(url = url,formdata=data,callback=self.page_info)

    def page_info(self,response):
        if response.xpath("//table/tr/td[2]").extract():
            name = ''
            response_name_list = response.xpath("//div[@class='dConBox']/div/table/tr[1]/th/text()").extract()
            for res_name in response_name_list:
                if not ('<<' in res_name):
                    name = name + '|' + res_name
            if response.url == 'http://www.nmgs.gov.cn:7001/aiccips/BusinessAnnals/BusinessAnnalsList.html':
                name = '+'  +'企业年报'

            response_name = name[1:0]