import sys,json,re
sys.path.append("..")
import urllib.request
# from 查找没用的图片 import listDir,printList
from 江苏省资质信息 import readAllData
from bs4 import BeautifulSoup
# allSourceFiles = listDir('/Users/wangshuguan/江苏省',extensions=["txt"],reuslts = [])
def readData(x):
    with open(f'/Users/wangshuguan/江苏省/aa{x}.txt') as f:
        str = f.read()
        j = json.loads(str)
        return j
def saveAllData():
    arr = []
    for x in range(1,len(allSourceFiles)+1):
        for x2 in readData(x):
            arr.append(x2)
    def save(filename, contents):
          fh = open(filename, 'w', encoding='utf-8')
          fh.write(contents)
          fh.close()
     
    save(f'/Users/wangshuguan/江苏省全部数据.txt', json.dumps(arr))
    
    
#监理企业
def saveJianliQiye():
    with open('/Users/wangshuguan/Desktop/companyies.html') as f:
        arr = []
        fileContent = f.read()
        soup = BeautifulSoup(fileContent,features="html.parser")
        trs = soup.select("#SimpleRichTableBody090299ceb0844faf9a747388e09009ae table tr")
        for tr in trs:
            trarr = []
            for td in tr.findAll("td"):
                trarr.append(td.getText())
            allText = re.findall(re.compile(r'javascript:newWindow\(\'([^\']*)\''), tr.a["href"])
            if len(allText) > 0:
                trarr.append(allText[0])
            else:
                trarr.append("")
            arr.append(trarr)
            def save(filename, contents):
                fh = open(filename, 'w', encoding='utf-8')
                fh.write(contents)
                fh.close()
         
        save(f'/Users/wangshuguan/江苏省监理企业.txt', json.dumps(arr))
#勘察企业
def saveKanchaQiye():
    with open('/Users/wangshuguan/Desktop/companyies.html') as f:
        arr = []
        fileContent = f.read()
        soup = BeautifulSoup(fileContent,features="html.parser")
        trs = soup.select("#SimpleRichTableBody28d429b8ab8f4c1c9a53a77c45897171 table tr")
        for tr in trs:
            trarr = []
            for td in tr.findAll("td"):
                trarr.append(td.getText())
            allText = re.findall(re.compile(r'javascript:newWindow\(\'([^\']*)\''), tr.a["href"])
            if len(allText) > 0:
                trarr.append(allText[0])
            else:
                trarr.append("")
            arr.append(trarr)
            def save(filename, contents):
                fh = open(filename, 'w', encoding='utf-8')
                fh.write(contents)
                fh.close()
         
        save(f'/Users/wangshuguan/江苏省勘察企业.txt', json.dumps(arr))
    
#设计企业
def saveShejiQiye():
    with open('/Users/wangshuguan/Desktop/companyies.html') as f:
        arr = []
        fileContent = f.read()
        soup = BeautifulSoup(fileContent,features="html.parser")
        trs = soup.select("#SimpleRichTableBody48e3ce42ff4e4499ae27fca09251d78c table tr")
        for tr in trs:
            trarr = []
            for td in tr.findAll("td"):
                trarr.append(td.getText())
            allText = re.findall(re.compile(r'javascript:newWindow\(\'([^\']*)\''), tr.a["href"])
            if len(allText) > 0:
                trarr.append(allText[0])
            else:
                trarr.append("")
            arr.append(trarr)
            def save(filename, contents):
                fh = open(filename, 'w', encoding='utf-8')
                fh.write(contents)
                fh.close()
         
        save(f'/Users/wangshuguan/江苏省设计企业.txt', json.dumps(arr))
        
def getData(entpcode,certcode):
    data = {"page": 1,
            "rows": 1000}
    data_string=urllib.parse.urlencode(data)
    last_data=bytes(data_string,encoding='utf-8')
    try:
        f = urllib.request.urlopen(f"http://221.226.118.170:8080/entpcertlist/queryQualType/{entpcode}/{certcode}",data=last_data,timeout=10)
    except:
        print("超时")
        return ""
    else:
        fileContent = f.read().decode('utf-8')
        return fileContent        
def readAllData2():
    
    def readSpecFile(x):
        with open(x) as f:
            str = f.read()
            j = json.loads(str)
            return j
        
    arr = []
    for x in readSpecFile('/Users/wangshuguan/江苏省监理企业.txt'):
        arr.append(x)
#     for x in readSpecFile('/Users/wangshuguan/江苏省勘察企业.txt'):
#         arr.append(x)
#     for x in readSpecFile('/Users/wangshuguan/江苏省设计企业.txt'):
#         arr.append(x)
    return arr

def catLists(x):
    try:
        f = urllib.request.urlopen(x,timeout=10)
    except:
        print(x)
        print("超时")
        return []
    else:
        try:
            fileContent = f.read().decode("gbk")
        except:
            f = urllib.request.urlopen(x,timeout=5)
            fileContent = f.read()
            soup = BeautifulSoup(fileContent,features="html.parser")
            arr = []
            for tr in soup.findAll("tr"):
                tds = []
                for td in tr.findAll("td"):
                    tds.append(td.getText().strip())
                arr.append(tds)
            return arr
        else:
            soup = BeautifulSoup(fileContent,features="html.parser")
            arr = []
            for tr in soup.findAll("tr"):
                tds = []
                for td in tr.findAll("td"):
                    tds.append(td.getText().strip())
                arr.append(tds)
            return arr

def catDatail(x):
    try:
        f = urllib.request.urlopen(x,timeout=5)
    except:
        print(f"{x}超时")
        return "",""
    else:
        soup = BeautifulSoup(f.read().decode('gbk'),features="html.parser")
        
        tables = soup.select("#page1 table")
        
        detailArr = [] 
        detaildic = {}
        
        trs = list(tables[0].select("tr[align],tr[bgcolor]"))
        for tr in trs[:4]:
            for td in tr.findAll("td"):
                detailArr.append(td.getText().strip())
        if len(detailArr) > 0 and detailArr[0]=="企业名称":
            for i in range(0,int(len(detailArr)/2)):
                detaildic[detailArr[i*2]] = detailArr[i*2+1]
        if "企业名称" in detaildic:
            detaildic["企业名称"] = detaildic["企业名称"].split("\n\r\n\t")[0]
        zizhiArr = []
        for tr in tables[0].select("tr td table tr"):
            tds = []
            for td in tr.findAll("td"):
                tds.append(td.getText().strip())
            zizhiArr.append(tds)
            
        if len(zizhiArr)>0 and len(trs)>0:
            return detaildic,zizhiArr
        else:
            return "",""
#             if len(trs) == 7:
#                 for tr in trs[:6]:
#                     for td in tr.findAll("td"):
#                         detailArr.append(td.getText().strip())
#                 if len(detailArr) > 0 and detailArr[0]=="企业名称":
#                     for i in range(0,int(len(detailArr)/2)):
#                         detaildic[detailArr[i*2]] = detailArr[i*2+1]
#                         
#                 zizhiArr = []
#                 for tr in trs[6].select("td table tr"):
#                     tds = []
#                     for td in tr.findAll("td"):
#                         tds.append(td.getText().strip())
#                     zizhiArr.append(tds)
#                 return detaildic,zizhiArr
#             else:
#                 print(len(trs))
#                 print("不符合规则")
#                 return "",""
            
def getData2(code,para):
    detail = catDatail(f"http://58.213.147.230:7001/Jsjzyxyglpt/faces/public/{para}/basicInfoView.jsp?action=viewKcsjqyJbxx&corpCode={code}")
    pers = catLists(f"http://58.213.147.230:7001/Jsjzyxyglpt/faces/public/{para}/content.jsp?action=getZyryList&corpCode={code}")
    projects = catLists(f"http://58.213.147.230:7001/Jsjzyxyglpt/faces/public/{para}/content.jsp?action=getProjectList&corpCode={code}")
    wins = catLists(f"http://58.213.147.230:7001/Jsjzyxyglpt/faces/public/{para}/content.jsp?action=getHjqkList&corpCode={code}")
    losts = catLists(f"http://58.213.147.230:7001/Jsjzyxyglpt/faces/public/{para}/content.jsp?action=getBljlList&corpCode={code}")
#     print(len(detail[0]))
#     print(len(pers))
#     print(len(projects))
#     print(len(wins))
#     print(len(losts))
#     print(f"http://58.213.147.230:7001/Jsjzyxyglpt/faces/public/{para}/content.jsp?action=getZyryList&corpCode={code}")
    if len(detail[0]) > 0 and len(pers)>0 and len(projects)>0 and len(wins)>0 and len(losts)>0:
        return {"detail":detail[0],"cers":detail[1],"pers":pers,"projects":projects,"wins":wins,"losts":losts}
def saveDatas(start):
    alldatas = readAllData2()
    for x in range(start,len(alldatas)):
        data = alldatas[x]
        code = data[len(data)-1].split("=")[2]
        def save(filename, contents):
              fh = open(filename, 'w', encoding='utf-8')
              fh.write(contents)
              fh.close()
        result = getData2(code,"jlqy")
        if result is not None:
            save(f'/Users/wangshuguan/江苏省特别企业详情/监理企业/监理企业_{x}_{data[1]}_{data[3]}.txt', json.dumps(result))
            print(x)
        else:
            saveDatas(x)
if __name__ == "__main__":
    saveDatas(1029)
#     print(readAllData2()[1031])
#     code = readAllData2()[1031][7].split("=")[2]
#     data = getData2(code,"jlqy")
#     print(data)
#     print(data)
#     print(data["detail"])
#     print(readAllData2()[0])
    





# print(len(readAllData()))


# printList([len(readData(x)) for x in range(1,len(allSourceFiles)+1)])
# dataFrames = []
# for x in range(1,len(allSourceFiles)):
#     numpArr = np.array(readFile(x))
#     if len(numpArr)>0:
#         pdArr = pd.DataFrame(numpArr,columns=list(numpArr[0]))
#         dataFrames.append(pdArr)
#     else:
#         print(x)
# allframes = pd.concat(dataFrames)
# print(readData(1)[0])