#encoding='utf8'
import json,time
import urllib.request
from bs4 import BeautifulSoup
import sys
import re
def analysis1(table):
    dic = {}
    arr = []
    for td in table.findAll("td"):
        arr.append(td.getText().strip())
    if len(arr) > 0 and arr[0]=="企业基本信息":
        for i in range(0,int(len(arr)/2)):
            dic[arr[i*2+1]] = arr[i*2+2]
    return dic

def analysis2(table):
    arr = []
    for tr in table.findAll("tr"):
        tds = []
        for td in tr.findAll("td"):
            tds.append(td.getText().strip())
        arr.append(tds)
    return arr

def analysis3(table):
    dic = {}
    arr = []
    for td in table.findAll("td"):
        arr.append(td.getText().strip())
    if len(arr) > 0 and arr[0]=="安全许可信息":
        for i in range(0,int(len(arr)/2)):
            dic[arr[i*2+1]] = arr[i*2+2]
    return dic

def analysis4(table):
    arr = []
    for tr in table.findAll("tr"):
        tds = []
        for td in tr.findAll("td"):
            tds.append(td.getText().strip())
        arr.append(tds)
    return arr


def analysis5(table):
    arr = []
    for tr in table.findAll("tr"):
        tds = []
        for td in tr.findAll("td"):
            if td.has_attr("style") and td["style"].find("display: none") != -1:
                pass
            else:
                if td.has_attr("title"):
                    tds.append(td["title"])
                else:
                    tds.append(td.getText().strip())
                    
        a = tr.select_one("td a")
        if a is not None and a.has_attr("onclick") and a["onclick"].startswith("window.open"):
            clickInfo = a["onclick"]
            allText = re.findall(re.compile(r'window.open\(\'([^\']*)\''), clickInfo)
            if len(allText) > 0:
                tds.append(allText[0])
            else:
                tds.append("")
        else:
            tds.append("")
            
        arr.append(tds)
    return arr

def analysis6(table):
    arr = []
    for tr in table.findAll("tr"):
        tds = []
        for td in tr.findAll("td"):
            tds.append(td.getText().strip())
        arr.append(tds)
    return arr

def analysis7(table):
    arr = []
    for tr in table.findAll("tr"):
        tds = []
        for td in tr.findAll("td"):
            tds.append(td.getText().strip())
        arr.append(tds)
    return arr

def getData(enid,name,org_code):
#     data_string=urllib.parse.urlencode(name)
    data = {"enid": enid,
        "name": name,
        "org_code": org_code,
        "type":"",}
    data_string=urllib.parse.urlencode(data)
    try:
        f = urllib.request.urlopen(f"http://jzscyth.shaanxi.gov.cn:7001/PDR/network/Enterprise/Informations/view?{data_string}",timeout=20)
    except:
        print("超时")
        return {}
    else:
        fileContent = f.read().decode('utf-8')
        soup = BeautifulSoup(fileContent.replace('class="detailTable">\r\n                    <td','class="detailTable">\r\n                    <tr><td'),features="html.parser")
        tables = soup.find_all("table",class_="detailTable")
        dic = {}
        dic["企业基本信息"] = analysis1(tables[0])
        dic["企业资质信息"] = analysis2(tables[1])
        dic["安全许可信息"] = analysis3(tables[2])
        dic["人员信息"] = analysis4(tables[3])
        dic["项目信息"] = analysis5(tables[4])
        dic["良好信用"] = analysis6(tables[5])
        dic["不良信用"] = analysis7(tables[6])
        return dic
#     print(analysis2(tables[1]))
#     print(analysis3(tables[2]))
#     print(analysis4(tables[3]))
#     print(analysis5(tables[4]))
#     print(analysis6(tables[5]))
#     print(tables[6])
#     print(dic)
    
#     datas = []
#     for tr in soup.select("#enterpriseLibraryIsHides tr"):
#         cols = []
#         for td in tr.findAll("td"):
#             if td.has_attr("style") and td["style"].find("display: none") != -1:
#                 pass
#             else:
#                 if td.has_attr("title"):
#                     cols.append(td["title"])
#                 else:
#                     cols.append(td.getText().strip())
#         a = tr.select_one("td a")
#         if a is not None and a.has_attr("onclick") and a["onclick"].startswith("vie1"):
#             allText = re.findall(re.compile(r'vie1\((.*)\)'), a["onclick"])
#             if len(allText) > 0:
#                 cols.append(allText[0])
#             else:
#                 cols.append("")
#         else:
#             cols.append("")
#         
#         datas.append(cols)


def saveDatas(start):
    
    alldata = readAllFile()
        
    for x in range(start,len(alldata)):
        company = alldata[x].replace("'","").replace(" ","").split(",")
        if len(company) == 4:
            dddData = getData(company[1], company[0], company[2])
            if len(dddData) > 0:
                ltp_data = json.dumps(dddData)
                def save(filename, contents):
                      fh = open(filename, 'w', encoding='utf-8')
                      fh.write(contents)
                      fh.close()
                save(f'/Users/wangshuguan/陕西省企业数据/企业_{x}_{company[0]}_{company[2]}.txt', ltp_data)
                time.sleep(1)
                print(x)
            else:
                saveDatas(x)
        
def readAllFile():
    with open('/Users/wangshuguan/陕西省全部数据.txt') as f:
        str = f.read()
        j = json.loads(str)
        return j
    
if __name__ == "__main__":
    saveDatas(20434)