import pandas as pd
import json,time
import urllib.request
from bs4 import BeautifulSoup
def catList(x,titles):
    try:
        f = urllib.request.urlopen(x,timeout=120)
    except:
        print(f"{x}超时")
        return catList(x,titles)
    else:
        soup = BeautifulSoup(json.load(f)["tb"],features="html.parser")
        trs = [titles]
        for tr in soup.findAll("tr"):
            tds = []
            for td in tr.findAll("td"):
                if td.has_attr("title"):
                    tds.append(td["title"])
                else:
                    tds.append(td.getText().strip())
#                 tds.append(td.getText().strip())
            a = tr.a
            if a is not None:
                allText = a["href"].split("=")
                if len(allText) > 1:
                    tds.append(allText[1])
                else:
                    tds.append("")
            trs.append(tds)
        return trs
def catUrl(x):
    try:
        f = urllib.request.urlopen(x,timeout=15)
    except:
        print(f"{x}超时")
        return "",""
    else:
        soup = BeautifulSoup(f.read().decode('utf-8'),features="html.parser")
        detaildic = {}
        zizhis = []
        detail = soup.select_one(".cpd_basic_table")
        if detail is not None:
            detailArr = []
            for tr in detail.findAll("tr"):
                for td in tr.findAll("td"):
                    detailArr.append(td.getText().strip())
            if len(detailArr) > 0 and detailArr[0]=="企业名称：":
                for i in range(0,int(len(detailArr)/2)):
                    detaildic[detailArr[i*2]] = detailArr[i*2+1]
            
        
        zizhiDetails = soup.select_one(".details_infor_content_01")
        if zizhiDetails is not None:
            leibies = zizhiDetails.select(".leibie")
            tables = zizhiDetails.select(".no_list_table")
            if len(leibies) == len(tables) and len(tables) > 0:
                for i in range(0,len(tables)):
                    leibie = leibies[i]
                    zizhiDetail = tables[i]
                    zizhiDetaildic = {}
                    zizhiDetailArr = []
                    zizhiDetaildic["type"] = leibie.getText().strip()
                    for tr in zizhiDetail.findAll("tr"):
                        for td in tr.findAll("td"):
                            zizhiDetailArr.append(td.getText().strip())
                    if len(zizhiDetailArr) > 0 and zizhiDetailArr[0]=="资质证书编号：":
                        for i in range(0,int(len(zizhiDetailArr)/2)):
                            zizhiDetaildic[zizhiDetailArr[i*2]] = zizhiDetailArr[i*2+1]
                zizhis.append(zizhiDetaildic)
        return detaildic,zizhis
#         zhuceDetails = soup.select_one(".details_infor_content_02")
#         print(zhuceDetails)
#         if zhuceDetails is not None:
#             trs = []
#             for tr in zhuceDetails.findAll("tr"):
#                 tds = []
#                 for td in tr.findAll("td"):
#                     tds.append(td.getText().strip())
#                 for td in tr.findAll("th"):
#                     tds.append(td.getText().strip())
#             print(trs)

def detail(uid,code,name):
    data = {"rowGuid":uid,
            "CorpCode":code,
            "CorpName":name,
            "VType":1}
    data1 = {"CorpCode":code,
            "CorpName":name,}
    data2 = {"CorpCode":code,
            "CorpName":name,
            "type":3,}
    data3 = {"CorpCode":code,
            "CorpName":name,
            "nPageCount":0,
            "nPageIndex":1,
            "nRecordSetCount":0,
            "nPageSize":10000,}
    data4 = {"CorpCode":code,
            "CorpName":name,}
    data_string=urllib.parse.urlencode(data)
    data_string1=urllib.parse.urlencode(data1)
    data_string2=urllib.parse.urlencode(data2)
    data_string3=urllib.parse.urlencode(data3)
    data_string4=urllib.parse.urlencode(data4)
    ddetail = catUrl(f"http://218.60.144.163/LNJGPublisher/corpinfo/CorpDetailInfo.aspx?{data_string}")
    pers = catList(f"http://218.60.144.163/LNJGPublisher/handle/Company_Details_CertifiedEngineers.ashx?{data_string1}",["序号","人员姓名","证件号码","人员类型","注册证书编号","发证日期","有效期至","id"])
    pers2 = catList(f"http://218.60.144.163/LNJGPublisher/handle/Company_Details_CertifiedEngineers.ashx?{data_string2}",["序号","人员姓名","证件号码","人员类型","注册证书编号","发证日期","有效期至","id"])
    projects = catList(f"http://218.60.144.163/LNJGPublisher/handle/Corp_Project.ashx?{data_string3}",["序号","项目名称","所在市州","项目编号","项目分类","建设单位名称","id"])
    losts = catList(f"http://218.60.144.163/LNJGPublisher/handle/Corp_Credit.ashx?{data_string4}",["序号","类别","决定内容","行为描述","发布有效期至","实施部门","文号"])
    
    if len(ddetail[0]) > 0 and len(pers)>0 and len(pers2)>0 and len(projects)>0 and len(losts)>0:
        return {"detail":ddetail[0],"cers":ddetail[1],"pers":pers,"pers2":pers2,"projects":projects,"losts":losts}
# 项目例子
# print(detail("91210102769558708Q","91210102769558708Q","沈阳电能建设集团有限公司"))
#处罚例子
# detail("912102007644045901","912102007644045901","大连泓源建设有限公司")

df = pd.read_excel("/Users/wangshuguan/Desktop/辽宁列表.xls")
def startLoad(x):
    contentInfos = df.iloc[:,-1].to_numpy()
    for i in range(x,len(df)):
        data = contentInfos[i]
        datas = data.replace("'","").split(",")
        if len(datas) == 3:
            readData = detail(datas[0],datas[1],datas[2])
            def save(filename, contents):
                fh = open(filename, 'w', encoding='utf-8')
                fh.write(contents)
                fh.close()
            save(f"/Users/wangshuguan/Desktop/辽宁企业2/辽宁企业_{i}_{datas[2]}_{datas[1]}.txt", json.dumps(readData,ensure_ascii=False))
            print(i)
            time.sleep(1)
startLoad(5689)    