# -*- coding: utf-8 -*-
"""
根据data_4_post.xls里的待查找银监会处罚文号，查询该处罚所属省份

@author: 伍钱居士UIBE
"""

import requests,xlrd,re
import pandas as pd
from bs4 import BeautifulSoup
import urllib
#searchurl='http://www.cbrc.gov.cn/search/index.jsp'
base_headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.71 Safari/537.36',
    'Accept-Encoding': 'gzip, deflate, sdch',
    'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
}
"""
此处excel内是待查询归属省份的文号字段
"""
workbook = xlrd.open_workbook(r'C:\Users\琪仁\Desktop\data_4_post.xlsx')
sheet1 = workbook.sheet_by_index(1)
num = sheet1.col_values(0, 1)

#content='揭银监罚决字〔2017〕6号'

def url_parse(content):
    url='http://www.cbrc.gov.cn/search/search.jsp?searchword='+urllib.parse.quote(content)
    #searchword='DOC_FORMDATE=2000.1.1 to 2018.9.20 AND DOC_CLOB='+urllib.parse.quote(content)
    #
    #post={
    #'searchword': searchword,
    #'agencycode': '',
    #'agencyShortlink': '',
    #'Title': '',
    #'Relation': 'AND',
    #'Content': urllib.parse.quote(content),
    #'dc1': '2000.1.1',
    #'dc2': '2018.9.20',
    #'sortfield': '-DOC_FORMDATE',
    #'sub1': '检索'
    #      }
    res1=requests.get(url,headers=base_headers)
    soup=BeautifulSoup(res1.text,'html.parser')
    a=soup.find_all('a')
    if len(a) == 0:
        return None
    else:
        link='http://www.cbrc.gov.cn'+a[0].get('href')
        return link



def cbrcparse(html):#逐个分析处罚细节网页
    req=requests.get(html,headers=base_headers)
    sp=BeautifulSoup(req.text,'html.parser')
    province=re.search('文章来源 : (.*?)&nbsp',req.text,re.DOTALL).group(1).replace(' ','')#得到处罚的省份信息
    try:
        tr=sp.find('table',{'class':re.compile('Mso.*')}).select('tr')
    except:
        return province,'NA'
    while len(tr[0].find_all('td'))==1:
        tr.pop(0)
    number=tr[0].find_all('td')[-1].text.replace('\n','')
    return province,number


if __name__=='__main__':
    bug=[]
    origin,title,url=[],[],[]
    province=[]
    start=0
    end=len(num)
    for content in num[start:end]:
        i=num.index(content)
        print('正在分析第'+str(i)+'个')
        try:
            origin.append(content)
            content=content.replace(' ','').replace('[','（').replace(']','）')
            link=url_parse(content)
            if type(link)==type(None):
                bug.append(i)
                url.append('NA')
                province.append('NA')
                title.append('NA')
                continue
            url.append(link)
            pro,art=cbrcparse(link)
            province.append(pro)
            title.append(art)
        except:
            bug.append(i)
            continue
    

    df=pd.DataFrame({'原始文号':origin,
                     '新文号':title,
                     '省份':province,
                     '链接':url})
    #保存到excel文件
    df.to_excel('C:\\Users\\琪仁\\Desktop\\文号更新\\'+'文号'+str(start)+'-'+str(end)+'.xls')
    print('文号'+str(start)+'-'+str(end)+'.xls 保存成功!')









