# -*- coding: utf-8 -*-


import scrapy
from scrapy import Request, FormRequest
from scrapyluke.uuCode import UUcode
import datetime
import time
from gsxt_gov.items import GsxtGovItem
import re
from scrapyluke.items import MongoItem
from scrapyluke.processors import html_to_dict, html_to_list_dict, tables_names
from report_format import tables_to_dict
import urllib
from urllib import unquote
import json

class SiChuanSpider(scrapy.Spider):
    name = 'sichuan'
    # start_urls = ['http://gsxt.scaic.gov.cn/ztxy.do?method=index&random=1459308086057']



    def __init__(self):
        super(SiChuanSpider, self).__init__()
        self.uu = UUcode('109521', '3c8906d3666e44bb9c961e1647126dbc')
        # self.word_ori = '哈哈'


        # self.word = unquote(self.strencode(self.word_ori))


        self.base_path = '/home/li-x/imgs'
        # 获取即时时间戳


        self.time = self.gettime()
        # 验证码请求地址


        self.code = 'http://gsxt.scaic.gov.cn/ztxy.do?method=createYzm&dt=%s&random='
        self.search = 'http://gsxt.scaic.gov.cn/ztxy.do?method=list&djjg=&random='
        self.ori_url = 'http://gsxt.scaic.gov.cn/ztxy.do'
        self.surl = 'http://gsxt.scaic.gov.cn/ztxy.do?method=index&random='
        # self.words = ['便利','哈哈']




        # 要查询的关键词（分词）的文件路径


        self.fenci_path = '/home/li-x/provinces/finished/四川_fenci'
        # 已查询的关键词，企业名称，插入时间的文件路径


        self.finished_fenci_path = '/home/li-x/provinces/finished_fenci_四川'
        self.words = self.creatlist(1,30)

    def start_requests(self):
        for word in self.words:
            check_word = word
            yield scrapy.Request(url = self.surl + self.gettime() , meta = {'check_word':check_word,'cookiejar':check_word}, callback=self.parse,dont_filter=True)

    def parse(self, response):
        check_word = response.request.meta['check_word']
        yield Request(url=self.code + self.gettime(), callback=self.code_ocr, meta={'check_word':check_word,'cookiejar':check_word},dont_filter=True)

    def code_ocr(self, response):
        img_abspath = self.uu.store_code(response.body, str(int(time.time()))+'.jpg', self.base_path)
        result = self.uu.check_code(img_abspath, 8001, 30)
        check_word = response.request.meta['check_word']
        if result :
            code = result[1]
            yzmid = result[0]
            # 判断验证码请求


            Formdata={
                            'currentPageNo':'1',
                            'yzm':code,
                            'maent.entname':unquote(self.strencode(check_word))
                        }
            yield FormRequest(url=self.search + self.gettime(),
                            formdata=Formdata,
                               callback=self.page_jump,meta={'check_word':check_word,'cookiejar':check_word,'yzmid':yzmid})


    # 翻页


    def page_jump(self,response):
        page_nums = response.xpath("//div[@class = 'list-a']/a/text()").extract()
        check_word = response.request.meta['check_word']
        print page_nums
        for page in page_nums:
            datas = {
                'currentPageNo':'',
                'yzm':'',
                'cyzm':'cxlist',
                'maent.entname':unquote(self.strencode(check_word))
            }
            next_url = self.ori_url + '?method=list&djjg=&yzmYesOrNo=no&random=%s&pageNum=%s' % (self.gettime(), page)
            yield scrapy.FormRequest(next_url,formdata=datas,callback=self.parse_next,dont_filter=True,meta={'pages':page,'cookiejar':check_word,'check_word':check_word,'yzmid':response.request.meta['yzmid']})

    def parse_next(self,response):
        print response.request.meta['pages']
        check_word = response.request.meta['check_word']
        id_list = response.xpath("//div[@class='center-1']/div//li[@class = 'font16']/a/@onclick").extract()
        if id_list:
            for id in id_list:
                if re.findall(u'([a-zA-Z0-9]+)',id):
                    id_curr = re.findall(u'([a-zA-Z0-9]+)',id)[1]
                    type_curr = re.findall(u'([a-zA-Z0-9]+)',id)[2]
                    print id_curr,type_curr
                    info_list = ['qyInfo','baInfo','cfInfo','gqczxxInfo','dcdyInfo','jyycInfo','ccjcInfo','qygsInfo','qtgsInfo','qygsForXzxkInfo','qygsForZzcqInfo','qygsForTzrxxInfo','qygsForXzcfInfo',
                                'yzwfInfo','qygsForTzrbgxxInfo','qtgsForCfInfo','sfgsInfo','sfgsbgInfo','spyzInfo','qtgsScaqsgInfo','qtgsForCfInfo']
                    info_list1 = ['qyInfo','baInfo','dcdyInfo','gqczxxInfo','cfInfo','jyycInfo','yzwfInfo','ccjcInfo',]
                    info_list2 = ['qygsInfo','qygsForTzrxxInfo','qygsForTzrbgxxInfo','qygsForXzxkInfo','qygsForZzcqInfo','qygsForXzcfInfo']
                    info_list3 = ['qtgsForCfInfo','qtgsInfo']
                    info_list4 = ['sfgsInfo','sfgsbgInfo']
                    for info1 in info_list1:
                        data = {
                            'method':info1,
                            'maent.pripid':id_curr,
                            # 'maent.entbigtype':type_curr,


                            'czmk':'czmk1',
                            'random':self.gettime()
                        }
                        yield scrapy.FormRequest( url= self.ori_url,formdata=data,meta={'id':id_curr,'info':info1,'cookiejar':check_word, 'check_word':check_word},callback=self.parse_info1)

                    for info2 in info_list2:
                        data = {
                            'method':info2,
                            'maent.pripid':id_curr,
                            # 'maent.entbigtype':type_curr,


                            'czmk':'czmk1',
                            'random':self.gettime()
                        }
                        yield scrapy.FormRequest( url= self.ori_url,formdata=data,meta={'id':id_curr,'info':info2,'cookiejar':check_word, 'check_word':check_word},callback=self.parse_info2)

                    for info3 in info_list3:
                        data = {
                            'method':info3,
                            'maent.pripid':id_curr,
                            # 'maent.entbigtype':type_curr,


                            'czmk':'czmk1',
                            'random':self.gettime()
                        }
                        yield scrapy.FormRequest( url= self.ori_url,formdata=data,meta={'id':id_curr,'info':info3,'cookiejar':check_word, 'check_word':check_word},callback=self.parse_info3)

                    for info4 in info_list4:
                        data = {
                            'method':info4,
                            'maent.pripid':id_curr,
                            # 'maent.entbigtype':type_curr,


                            'czmk':'czmk1',
                            'random':self.gettime()
                        }
                        yield scrapy.FormRequest( url= self.ori_url,formdata=data,meta={'id':id_curr,'info':info4,'cookiejar':check_word, 'check_word':check_word},callback=self.parse_info4)
        else:self.uu.report_error(response.meta['yzmid'])

    def parse_info1(self,response):

        if response.xpath("//table/tr/td").extract():

            check_word = response.request.meta['check_word']
            name_ori = response.xpath("//div[@id='details']/h2/text()").extract_first()
            company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else '无'
            op = open(self.finished_fenci_path,'a')
            op.write(response.request.meta['check_word']+':'+company_name+','+str(datetime.datetime.now())+','+response.url+'\n')
            op.close()


            url_list = response.xpath("//table//td/a/@onclick").extract()
            if url_list:
                for url_id in url_list:
                    if u'showRyxx' in url_id:
                        ids = re.findall(u'(\d+)',url_id)
                        if ids:
                            id1 = ids[0]
                            id2 = ids[1]
                            data = {
                                'method':'tzrCzxxDetial',
                                'maent.xh':str(id1),
                                'maent.pripid':str(id2),
                                'random':self.gettime()
                            }
                            yield scrapy.FormRequest(url = self.ori_url,formdata=data,callback=self.inv_info,meta={'id':response.request.meta['id'], 'info':'tzrCzxxDetial','cookiejar':check_word})

            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [
                [u'基本信息 </th>', '</table>','工商公示-基本信息'],
                [u'股东信息<br>','</table>','工商公示-股东信息'],
                [u'变更信息</th></tr>','</table>','工商公示-变更信息'],
                [u'主要人员信息</th>','</table>','工商公示-主要人员信息'],
                [u'参加经营的家庭成员姓名</th>','</table>','工商公示-参与经营的家庭成员姓名'],
                [u'分支机构信息</th>','</table>','工商公示-分支机构信息'],
                [u'清算信息</th>','</table>','工商公示-清算信息'],
                [u'动产抵押登记信息</th>','</table>','工商公示-动产抵押登记信息'],
                [u'行政处罚信息</th>','</table>','工商公示-行政处罚信息'],
                [u"经营异常信息</th>",'</table>', '工商公示-经营异常信息'],
                [u'严重违法信息</th>','</table>','工商公示-严重违法信息'],
                [u'抽查检查信息</th>','</table>','工商公示-抽查检查信息'],

                     ]
            mo['document'] = html_to_list_dict(response.body.decode('gb2312'), startends)
            yield mo
    def inv_info(self,response):
        name_ori = response.xpath("//div[@id='details']/h2/text()").extract_first()
        company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else name_ori

        startends = [
            [u'class="detailsList','</table>'],
                 ]

        mo = MongoItem()
        mo['_id'] = company_name
        mo['document'] = html_to_list_dict(response.body.decode('gb2312'), startends)
        yield mo

    def parse_info2(self,response):
        if response.xpath("//table/tr/td").extract():
            name_ori = response.xpath("//div[@id='details']/h2/text()").extract_first()
            company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else name_ori
            check_word = response.request.meta['check_word']
            url_list = response.xpath("//table//a/@onclick").extract()
            if url_list:
                for url_id in url_list:
                    if u'doNdbg' in url_id:
                        ids = re.findall(u'(\d+)',url_id)
                        id1 = ids[0] if ids else None
                        # print id1


                        data = {
                                'method':'ndbgDetail',
                                'maent.nd':str(id1),
                                'maent.pripid':str(response.request.meta['id']),
                                'random':self.gettime()
                            }
                        yield scrapy.FormRequest(url =self.ori_url,formdata=data,callback=self.annual_info,meta={'id':response.request.meta['id'], 'info':'ndbgDetail','cookiejar':check_word})


            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [
                [u'企业年报 </th>','</table>','企业公示-企业年报'],
                [u'个体工商户年报</th>','</table>','企业公示-个体工商户年报'],
                [u'股东及出资信息</th>','</table>','企业公示-股东及出资信息'],
                [u'变更信息</th>','</table>','企业公示-变更信息'],
                [u'股权变更信息</th>','</table>','企业公示-股权变更信息'],
                [u'行政许可信息</th>','</table>','企业公示-行政许可信息'],
                [u'知识产权出质登记信息</th>','</table>','企业公示-知识产权出质登记信息'],

                     ]
            mo['document'] = html_to_list_dict(response.body.decode('gb2312'), startends)
            yield mo

    def annual_info(self,response):
        name_ori = response.xpath("//div[@id='details']/h2/text()").extract_first()

        company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else name_ori
        # print company_name


        name = response.xpath("//table[@class='detailsList'][1]/tr[1]/th/text()").extract_first()

        if u"红色为修改过的信息项" in name:
            name = re.findall(u'([\s\S]+)红色为修改过的信息项',name)[0].strip()
        # print name


        mo = MongoItem()
        mo['_id'] = company_name
        mo['document'] = tables_to_dict(response.body.decode('gb2312'), name, 'detailsList')
        yield mo


    def parse_info3(self,response):
        if response.xpath("//table/tr/td").extract():
            name_ori = response.xpath("//div[@id='details']/h2/text()").extract_first()
            company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else name_ori

            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [
                [u'行政处罚信息</th>','</table>','其他公示-行政处罚信息'],
                [u'行政许可信息</th>','</table>','其他公示-行政许可信息']

                     ]
            mo['document'] = html_to_list_dict(response.body.decode('gb2312'), startends)
            yield mo

    def parse_info4(self,response):
        if response.xpath("//table/tr/td").extract():
            name_ori = response.xpath("//div[@id='details']/h2/text()").extract_first()
            company_name = re.findall(u'([\s\S]+)注册',name_ori.strip())[0].strip() if name_ori else name_ori

            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [
                [u'司法股权冻结信息</th>','</table>','司法公示-司法股权冻结信息'],
                [u'司法股东变更登记信息</th>','</table>','司法公示-司法股东变更登记信息']

                     ]
            mo['document'] = html_to_list_dict(response.body.decode('gb2312'), startends)
            yield mo


    def gettime(self):
        return str(int(time.time()))

    def strencode(self,str):
        haha = str.encode('gb2312')
        return urllib.quote(haha)

    def clean_dict(self, data_dict):
        if isinstance(data_dict, dict):
            for k, v in data_dict.items():
                if not v:
                    data_dict.pop(k)
                else:
                    if '.' in k:
                        data_dict[re.sub('\.', '~', k)] = data_dict.pop(k)
                    self.clean_dict(v)
        elif isinstance(data_dict, list):
            for var in data_dict:
                self.clean_dict(var)
        # else:


        #     print 'clean_dict error: %s not a list or dict' % to_str(data_dict)


        return data_dict

    def creatlist(self,startnum,endnum):
        a = 1

        list = []
        list_fin = []
        f = open(self.fenci_path,'rb')

        for line2 in open(self.finished_fenci_path, 'rb'):
            fenci = re.findall(r'([\s\S]*?):',line2)[0] if re.findall(r'([\s\S]*?):',line2) else None
            if fenci not in list_fin:
                list_fin.append(fenci)
        while a < endnum:
            line = f.readline()
            if not line:
                    break
            if startnum <= a :
                line = line.replace('\'','')
                line = line.replace('\n','')
                hehe = line.split(',',2)
                if hehe[0]:
                    if len(hehe[0]) >= 2:
                        if hehe[0] not in list_fin:
                    # if hehe[0]


                            list.append(hehe[0])
            a += 1
        return list



if __name__ == '__main__':
    f = open('test.html', 'rb')
    content = f.read()
    f.close()
    haha = content.decode('gb2312')
    # print haha


    startends = [
                [u'基本信息 </th>', '</table>','基本信息'],
                [u'股东信息<br>','</table>','股东信息'],
                [u'变更信息</th></tr>','</table>','变更信息'],
                [u'主要人员信息</th>','</table>','主要人员信息'],
                [u'参加经营的家庭成员姓名</th>','</table>','参与经营的家庭成员姓名'],
                [u'分支机构信息</th>','</table>','分支机构信息'],
                [u'清算信息</th>','</table>','清算信息'],
                [u'动产抵押登记信息</th>','</table>','动产抵押登记信息'],
                [u'行政处罚信息</th>','</table>','行政处罚信息'],
                [u"经营异常信息</th>",'</table>', '经营异常信息'],
                [u'严重违法信息</th>','</table>','严重违法信息'],
                [u'抽查检查信息</th>','</table>','抽查检查信息'],
                ]
    startends2 =[
        [u'企业年报 </th>','</table>','企业年报'],
        [u'个体工商户年报</th>','</table>','个体工商户年报'],
        [u'股东及出资信息</th>','</table>','股东及出资信息'],
        [u'变更信息</th>','</table>','变更信息'],
        [u'股权变更信息</th>','</table>','股权变更信息'],
        [u'行政许可信息</th>','</table>','行政许可信息'],
        [u'知识产权出质登记信息</th>','</table>','知识产权出质登记信息'],
                 ]
    startends3 = [

        [u'行政处罚信息</th>','</table>','行政处罚信息'],
        [u'行政许可信息</th>','</table>','行政许可信息']

    ]

    startends4 = [
        [u'司法股权冻结信息</th>','</table>','司法股权冻结信息'],
        [u'司法股东变更登记信息</th>','</table>','司法股东变更登记信息']

    ]
    sta = [
        [u'class="detailsList','</table>'],

    ]
    con = html_to_list_dict(haha, sta)
    print json.dumps(con,encoding="utf-8")