# -*- coding: utf-8 -*-

import scrapy
from test1.items import hc360cominfo
from scrapy import Request, FormRequest
from scrapyluke.uuCode import UUcode
from scrapy.loader import ItemLoader
from scrapyluke.processors import *
import datetime
import time
from test1.items import GsxtGovItem
import re

class AnHuiSpider(scrapy.Spider):
    name = 'anhui'
    start_urls = ['http://www.ahcredit.gov.cn/search.jspx']

    def __init__(self):
        super(AnHuiSpider, self).__init__()
        self.uu = UUcode('109521', '3c8906d3666e44bb9c961e1647126dbc')
        self.word = '哈哈'
        self.base_path = '/home/li-x/imgs'

    def parse(self, response):
        yield scrapy.Request('http://www.ahcredit.gov.cn/validateCode.jspx?type=1&id=0.07181660505011678',callback = self.code_ocr)

    def code_ocr(self, response):
        img_abspath = self.uu.store_code(response.body, str(int(time.time()))+'.jpg', self.base_path)
        result = self.uu.check_code(img_abspath, 8001, 30)
        if result!= -3:
            code = result[1]
            print code
            yield scrapy.FormRequest('http://www.ahcredit.gov.cn/searchList.jspx',formdata={'checkNo':code,
                                                    'entName':self.word},callback=self.url_list)


    def url_list(self,response):
        url_list = response.xpath("/html/body/div[@class='da']/div[@class='center-1']/div[2]/div[@class='list']/ul/li[@class='font16']/a/@href").extract()
        print 'urllist',url_list
        for url in url_list:
            # url = 'http://www.ahcredit.gov.cn' + url
            url_Suffix_list = re.findall(u'businessPublicity\.jspx\?id\=([\s\S]*)', url)
            print url_Suffix_list
            if url_Suffix_list:
                id = url_Suffix_list[0]
                link1 = 'http://www.ahcredit.gov.cn/businessPublicity.jspx?id=' + id
                link2 = 'http://www.ahcredit.gov.cn/enterprisePublicity.jspx?id=' + id
                link3 = 'http://www.ahcredit.gov.cn/otherDepartment.jspx?id=' + id
                yield Request(link1 , callback = self.parse_info)
                yield Request(link2 , callback = self.parse_info)
                yield Request(link3 , callback = self.parse_info)
                # yield scrapy.Request( url = url, callback=self.parse_info)
    def parse_info(self,response):
        anhui = GsxtGovItem()
        # list = response.xpath("/html/body/div[@id='details']/div[@id='detailsCon']/div[@class='dConBox']/div/table/tr[1]/th/text()").extract()
        # for i in list:
        #     print i
        name = ''
        name_list = response.xpath("//table/tr[1]/th/text()").extract()
        for names in name_list:
            name = name + '|' +names
        name = name[1:]
        anhui['response_name'] = name.strip()
        anhui['content'] =response.body
        anhui['url'] =response.url
         # = response.xpath("//div[@id='details']/h2/text()").extract_first()
        company_name = re.findall(u'([\s\S]+)注册号',response.xpath("//div[@id='details']/h2/text()").extract_first())
        # a =
        anhui['company_name'] = company_name[0].strip() if company_name else None
        anhui['insert_time'] = datetime.datetime.now()
        print anhui['company_name'],anhui['response_name']