# -*- coding: utf-8 -*-


import scrapy
from scrapy import Request, FormRequest
from scrapyluke.uuCode import UUcode
import datetime
import time
from gsxt_gov.items import GsxtGovItem
import re
from scrapyluke.items import MongoItem
from scrapyluke.processors import html_to_dict, html_to_list_dict, tables_names
from report_format import tables_to_dict
import jieba
from scrapy.selector import Selector

class ShangHaiSpider(scrapy.Spider):

    name = 'shanghai_qygs'
    start_urls = ['https://www.sgs.gov.cn/notice/home']

    def __init__(self):
        super(ShangHaiSpider, self).__init__()
        self.uu = UUcode('109521', '3c8906d3666e44bb9c961e1647126dbc')
        self.word = '哈哈'
        self.base_path = '/home/li-x/imgs'
        self.code = 'https://www.sgs.gov.cn/notice/captcha?preset=&ra=0.04232474394857644'    #验证码请求网址

        self.search = 'https://www.sgs.gov.cn/notice/search/ent_info_list'    #搜索请求网址


        # 要查询的关键词（分词）的文件路径


        self.fenci_path = '/home/li-x/provinces/finished/上海_fenci'
        # 已查询的关键词，企业名称，插入时间的文件路径


        self.finished_fenci_path = '/home/li-x/provinces/finished_fenci_上海'
        self.words = self.creatlist(1,2000)
        # 360020


    def code_right(self, res):
        a = res.xpath("//div[@class='link']/a/text()").extract()
        if a:
            return True
        else:
            return False

    # 请求验证码


    def parse(self, response):
        b = re.findall(u'code:[\s\S]*"([\s\S]*)"[\s\S]*data:', response.body)
        if b:
            sessiontoken = b[0]
        else:
            sessiontoken = None
            print 'sessiontoken 获取错误'
        yield Request(url=self.code, meta={'session':sessiontoken}, callback=self.code_ocr)

    # 验证码识别


    def code_ocr(self, response):
        # img_abspath = self.uu.store_code(response.body, str(int(time.time()))+'.jpg', self.base_path)


        # result = self.uu.check_code(img_abspath, 8001, 30)


        # if result :


        for check_word in self.words:
            # check_word = self.subword(check_word)


            id = '1'
            code = '-3'
            # print check_word


            # 判断验证码请求


            session = response.request.meta['session']
            Formdata = {
                        'captcha':code,
                        'condition.pageNo':'1',
                        'condition.insType':'',
                        'session.token':session,
                        'condition.keyword':check_word
                        }
            yield FormRequest(url=self.search,
                              formdata=Formdata,meta={'pages':'1','code':code,'session':session,'code_id': id,'check_word':check_word,'chword':''},
                              callback=self.check_page)

    #将全部搜索结果发到link_lists


    def check_page(self,response):
        if self.code_right(response):
            check_word = response.request.meta['check_word']
            pages = int(response.request.meta['pages'])
            code = response.request.meta['code']
            session = response.request.meta['session']
            # print pages,code,session


            # Formdata2={


            #                 'captcha':code,


            #                 'condition.pageNo':str(pages),


            #                 'condition.insType':'',


            #                 'session.token':session,


            #                 'condition.keyword':check_word}


            # yield FormRequest(  url=self.search,


            #                     formdata = Formdata2 ,


            #                     callback =self.link_lists)


            class_list = Selector(text=response.body).xpath("//div[@class='list-item']")
            for url in class_list:
                link = url.xpath("div[@class='link']/a/@href").extract_first()
                yield scrapy.Request(link,callback=self.parse_links,meta={'check_word':check_word})

            if Selector(text=response.body).xpath("//div[@class='list-item'][10]//text()").extract():
                chword = response.xpath("//div[@class='list-item'][10]//text()").extract()
                if chword != response.request.meta.get('chword'):
                    pages += 1
                    Formdata3={
                                'captcha':code,
                                'condition.pageNo':str(pages),
                                'condition.insType':'',
                                'session.token':session,
                                'condition.keyword':check_word}
                    yield FormRequest(  url=self.search,
                                        formdata=Formdata3,
                                        meta={'pages':str(pages),'code':code, 'session':session,
                                                'check_word':check_word, 'chword':chword},
                                        callback=self.check_page)
        # else:


        #         self.uu.report_error(response.meta['code_id'])



    #在搜索结果的每一页找到进入详细信息页面的url


    # def link_lists(self, response):


    #     class_list = response.xpath("//div[@class='list-item']")


    #     for url in class_list:


    #         link = url.xpath("div[@class='link']/a/@href").extract_first()


    #         yield scrapy.Request(link,callback=self.parse_links)



    #找到最左侧一列信息的url，并发送请求


    def parse_links(self,response):
        company_name = Selector(text=response.body).xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
        op = open(self.finished_fenci_path,'a')
        op.write(response.request.meta['check_word']+':'+company_name+','+str(datetime.datetime.now())+','+response.url+'\n')
        op.close()

        delLastlink = response.url[:-2]
        # onclick = response.xpath("//div[@class='main']/div[@class='notice']/div[@class='cont clearfix']/div[@class='cont-l']/ul/li/@onclick").extract()


        # for urlid in onclick:


        #     linkid = re.findall(u'(\d+)', urlid)


        #     if linkid:


        #         link = DelLastlink + str(linkid[0])


        #         yield scrapy.Request( link , callback= self.parse_link_info,dont_filter=True)


        url1 = delLastlink+'01'
        url2 = delLastlink+'02'
        url3 = delLastlink+'03'
        url6 = delLastlink+'06'
        yield scrapy.Request(url=url1 , callback= self.parse_link_info1, dont_filter=True)
        yield scrapy.Request(url=url2 , callback= self.parse_link_info2, dont_filter=True)
        yield scrapy.Request(url=url3 , callback= self.parse_link_info3, dont_filter=True)
        yield scrapy.Request(url=url6 , callback= self.parse_link_info6, dont_filter=True)

    #对所有过来的url进行解析，若传来的界面还有链接地址。则将改url发送回本函数


    def parse_link_info1(self,response):

        # hebei = GsxtGovItem()


        # url_list = response.xpath("//div[@class='main']/div[@class='notice']/div[@class='cont clearfix']/div[@class='cont-r']/div[@class='cont-r-b']/div[@class='hide']//tr//a/@href").extract()


        # if url_list:


        #     for next_url in url_list:


        #         if self.isurl(next_url):


        #             list_index = url_list.index(next_url) + 1


        #             yield scrapy.Request(next_url,callback=self.parse_link_info1,meta = {'index':list_index})


        # name_list = response.xpath("//div/div/table/tr[1]/th/text()").extract()


        # name = ''


        # for i in name_list:


        #     if u'红色为修改过的信息项' in i:


        #         # index = name_list.index(i)


        #         # i = response.xpath('//div/div/table[%s]/tr[2]/th/text()'%str(index+1)).extract_first()


        #         name = re.findall(u'([\s\S]+)红色为修改过的信息项',i)[0].strip()


        #         name = '|'+ name


        #         break


        #     if (u'股东及出资信息' in i and response.request.meta.has_key('index')):


        #         name = '股东及出http://www.hebscztxyxx.gov.cn/notice/notice/view?uuid=dLdkOHGnIk0DNrPBOHnd.8MFr4dvZDts&tab=01资信息' + '_' + str(response.request.meta['index'])


        #         name = '|' + name


        #         break


        #     while ' ' in i:


        #         i = i.replace(' ','')


        #     name = name + '|' + i



        # hebei['response_name'] = name[1:]


        # hebei['content'] = str(response.body)


        # hebei['insert_time']  = datetime.datetime.now()


        # hebei['url'] = response.url


        # hebei['company_name'] = response.xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()


        # yield hebei


        company_name = Selector(text=response.body).xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
        # if company_name in self.ori_words:


        #     self.count+=1


        #     print self.count


        investorlist = Selector(text=response.body).xpath("//table[@id='investorTable']//tr[@class='page-item']//a/@href").extract()
        for invest in investorlist:

            # op = open('/home/li-x/provinces/未完成_上海','a')


            # op.write(company_name+':'+ invest +','+str(datetime.datetime.now())+','+response.url+'\n')


            # op.close()


            yield scrapy.Request(url = invest, callback= self.invest_info, meta={'com_name':company_name, 'proxy':'http://192.168.51.32:8008'})
        if Selector(text=response.body).xpath("//table/tr/td[1]").extract():

            gs = GsxtGovItem(response)
            gs['company_name'] = company_name
            gs['response_name'] = tables_names(response.xpath('//table').extract())
            gs['content'] = response.body
            yield gs

            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [['<th colspan="4">基本信息</th>', '</table>','工商公示-基本信息'],
                         ['<th colspan="5">抽查检查信息</th>', '</table>','工商公示-抽查检查信息'],
                         ['<th colspan="10">股权出质登记信息</th>', '</table>','工商公示-股权出质登记信息'],
                     ['<th colspan="6">主要人员信息</th>','</table>','工商公示-主要人员信息'],
                        ['<th colspan="7">动产抵押登记信息</th>','</table>','工商公示-动产抵押登记信息'],
                     ['<th colspan="6">经营异常信息</th>','</table>','工商公示-经营异常信息'],
                     ['<th colspan="6">严重违法信息</th>','</table>','工商公示-严重违法信息'],
                     ['<th colspan="7">行政处罚信息</th>','</table>','工商公示-行政处罚信息'],
                     ['<tr><th colspan="4">分支机构信息</th></tr>','</table>','工商公示-分支机构信息'],
                     ['股东信息<br />','</table>','工商公示-股东信息'],
                     ['<th colspan="4">变更信息</th>','</table>','工商公示-变更信息'],
                     ['<th colspan="5">清算信息</th>','</table>','工商公示-清算信息'],
                         ['<th colspan="2">投资人信息</th>','</table>','工商公示-投资人信息'],
                         ['<th colspan="4">参加经营的家庭成员姓名</th>','</table>','工商公示-参加经营的家庭成员姓名'],
                     ]
            mo['document'] = html_to_list_dict(response.body, startends)
            yield mo

    def invest_info(self,response):
        # print type(response),dir(response)


        # print response.body


        mo = MongoItem(response)
        mo['_id'] = response.request.meta['com_name']
        startends = [['股东及出资信息 </th>', '</table>','工商公示-股东及出资详细信息'],
                     ['股东及出资信息</th>', '</table>','工商公示-股东及出资详细信息']
                 ]
        mo['document'] = html_to_list_dict(response.body, startends)
        yield mo

    def parse_link_info2(self,response):
        if Selector(text=response.body).xpath("//table/tr/td[1]").extract():
            company_name = Selector(text=response.body).xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
            annual_link = Selector(text=response.body).xpath("//table//a/@href").extract()
            for url in annual_link:
                if u'view_annual' in url:

                    yield Request(url = url, callback= self.annual_info)
            gs = GsxtGovItem(response)
            gs['company_name'] = company_name
            gs['response_name'] = tables_names(response.xpath('//table').extract())
            gs['content'] = response.body
            yield gs

            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [['<th colspan="7">行政处罚信息</th>', '</table>','企业公示-行政处罚信息'],
                         ['<th colspan="9" align="center">股东及出资信息（币种与注册资本一致）</th>', '</table>', '企业公示-股东及出资信息'],
                         ['<th colspan="5" align="center">变更信息</th>','</table>','企业公示-变更信息'],
                         ['<th colspan="9">行政许可信息</th>','</table>','企业公示-行政许可信息'],
                         ['<th colspan="3">企业年报</th>','</table>','企业公示-企业年报'],
                         ['<th colspan="3">个体工商户年报</th>','</table>','企业公示-个体工商年报'],
                         ['<th colspan="5">股权变更信息</th>','</table>','企业公示-股权变更信息'],
                         ['<th colspan="9" align="center">知识产权出质登记信息</th>','</table>','企业公示-知识产权出质登记信息']
                     ]
            mo['document'] = html_to_list_dict(response.body, startends)
            yield mo

    def annual_info(self,response):
        if Selector(text=response.body).xpath("//table/tr/td[1]").extract():

            company_name = Selector(text=response.body).xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()
            name = Selector(text=response.body).xpath("//div[@class='detail-cont'][1]/div[@class='detail-info']/table[@class='info m-bottom m-top'][1]/tr[1]/th/text()").extract_first()
            if u"红色为修改过的信息项" in name:
                name = re.findall(u'([\s\S]+)红色为修改过的信息项',name)[0].strip()
            # print name , company_name,response.url



            mo = MongoItem(response)
            mo['_id'] = company_name
            mo['document'] = tables_to_dict(response.body, name, 'info m-bottom m-top')
            yield mo

            gs = GsxtGovItem(response)
            gs['company_name'] = company_name
            gs['response_name'] = name
            gs['content'] = response.body
            yield gs

    def parse_link_info3(self,response):
        if Selector(text=response.body).xpath("//table/tr/td[1]").extract():

            company_name = Selector(text=response.body).xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()

            gs = GsxtGovItem(response)
            gs['company_name'] = company_name
            gs['response_name'] = tables_names(response.xpath('//table').extract())
            gs['content'] = response.body
            yield gs

            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [['<th colspan="9">行政许可信息</th>', '</table>', '其他公示-行政许可信息'],
                        ['<th colspan="7">行政处罚信息</th>', '</table>', '其他公示-行政处罚信息']
                     ]
            mo['document'] = html_to_list_dict(response.body, startends)
            yield mo

    def parse_link_info6(self,response):
        if Selector(text=response.body).xpath("//table/tr/td[1]").extract():
            company_name = Selector(text=response.body).xpath("//div[@class='main']/div[@class='notice']/ul/li[1]/text()").extract_first()

            gs = GsxtGovItem(response)
            gs['company_name'] = company_name
            gs['response_name'] = tables_names(Selector(text=response.body).xpath('//table').extract())
            gs['content'] = response.body
            yield gs

            mo = MongoItem(response)
            mo['_id'] = company_name
            startends = [['<th colspan="6">司法股东变更登记信息</th>', '</table>', '司法公示-司法股东变更登记信息'],
                         ['<th colspan="7">司法股权冻结信息</th>', '</table>', '司法公示-司法股权冻结信息']]

            mo['document'] = html_to_list_dict(response.body, startends)
            yield mo

    def isurl(self,checkurl):
        regex = re.compile(
            r'^(?:http|ftp)s?://' # http:// or https://

            r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...

            r'localhost|' #localhost...

            r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip

            r'(?::\d+)?' # optional port

            r'(?:/?|[/?]\S+)$', re.IGNORECASE)
        b = re.findall(regex,checkurl)
        if b:
            return True
        else:
            return False

    def creatlist(self,startnum,endnum):
        a = 1

        list = []
        list_fin = []
        f = open(self.fenci_path,'rb')

        for line2 in open(self.finished_fenci_path, 'rb'):
            fenci = re.findall(r'([\s\S]*?):',line2)[0] if re.findall(r'([\s\S]*?):',line2) else None
            if fenci not in list_fin:
                list_fin.append(fenci)
        while a < endnum:
            line = f.readline()
            if not line:
                    break
            if startnum <= a :
                line = line.replace('\'','')
                line = line.replace('\n','')
                hehe = line.split(',',2)
                if hehe[0]:
                    if len(hehe[0]) >= 2:
                        if hehe[0] not in list_fin:
                    # if hehe[0]


                            list.append(hehe[0])
            a += 1
        return list

    def subword(self,str):
        clean_list = ["石家庄市", "唐山市", "河北省", "委员会", "长安区", "人民政府", "桥西区", "中共", "新华区", "桥东区",]
        for clean_word in clean_list:
            while clean_word in str:
                str = str.replace(clean_word,'')
        return str

    def jiebafenci(self):
        lis = []
        f = open('/home/li-x/provinces/hebei1000','rb')
        content = f.read()
        seg_list = jieba.cut(content, cut_all=True)
        for i in seg_list:
            if (i and ('\n' not in i)):
                # print i



                lis.append(i)
        # print type(seg_list)


        list = set(lis)

        c = [h for h in list]
        return c

from scrapyluke.processors import html_to_list_dict
from scrapyluke.commonfun import to_str
import json


if __name__ == '__main__':
    f = open('test.html', 'rb')
    content = f.read()
    f.close()
    startends = [['股东及出资信息 </th>', '</table>','股东及出资信息'],
                 ['股东及出资信息</th>', '</table>','股东及出资信息'],
                 ]
    con = html_to_list_dict(content, startends)
    print json.dumps(con)
