# import requests
# # from bs4 import BeautifulSoup
 
 
# url='https://www.zhihuishu.com/supportService-new/page/tch/qaDetail/zhucedenglu.html?questionId=2105'
# response=requests.get(url)
# # response.encoding = 'utf-8'
# # soup = BeautifulSoup(response.text, 'html.parser')
# # print(soup.prettify())

# print(response);




import urllib.request 

def getHTML(url):  
     html = urllib.request.urlopen(url).read()   
     return html
  
def saveHTML(file_name, file_content):  
     # 注意windows文件命名的禁用符，比如 /   
     with open(file_name.replace('/', '_') + ".html", "wb") as f:  
            # 写文件用bytes而不是str，所以要转码   
            f.write(file_content) 


# aurl = "https://www.zhihuishu.com/supportService-new/page/tch/qaDetail/zhucedenglu.html?questionId=2105"

def baseUrl (filename, id):
    return "https://www.zhihuishu.com/supportService-new/page/tch/qaDetail/"+filename+".html?questionId="+id

parentList = [
    {
        'name': 'zhucedenglu',
        'children': ['2105','2101','2102','2103','2104']
    },
    {
        'name': 'zaixiandaxue',
        'children': ['2201','2202','2203','2204','2206','2207','2208']
    },
    {
        'name': 'laoshiduan',
        'children': ['2301','2302','2303','2304','2305','2306','2307','2308','2309','2310','2311','2312','2313','2314','2315']
    }
]


for p,pkey in enumerate(parentList):
    for c,ckey in enumerate(pkey["children"]):
        print("网页已爬取",pkey["name"],ckey)
        saveHTML( pkey["name"] + ckey , getHTML(baseUrl(pkey["name"], ckey)))







# html = getHTML(aurl)
# print("网页已爬取")

# saveHTML("sina", html)
# print("网页已存储至本地")




