# import requests
# from lxml import etree
#
# domain = "https://desk.zol.com.cn"
# # # 1.拿到页面源代码
# # # url = "https://desk.zol.com.cn/dongman/"
# url = "https://desk.zol.com.cn/pc/"
# #
# #
# headers = {
#    "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36 Edg/130.0.0.0"
# }
# #
# response = requests.get(url, headers=headers)
# response.encoding = response.apparent_encoding
# # # print(response.text)
# #
# et = etree.HTML(response.text)
# result = et.xpath("//ul[@class='pic-list2  clearfix']/li/a/@href")
# # print(result)
#
# # print(result[0])
# # print("https:" + result[1])
#
#
# for item in result[2:]:
#     url = domain + item
# print(url)


# wuyu = et.xpath("//dd[@class='brand-sel-box clearfix']/a")               # 获取a标签
#
# dic = {}
# for i in wuyu[1:]:
#     if not i.xpath("@href"):
#         break
#
#     # print(domain + i.xpath("@href")[0])
#
#     name = i.xpath("@href")[0]
#     url = domain + i.xpath("@href")[0]
#     response = requests.get(url, headers=headers)
#     response.encoding = response.apparent_encoding
#
#     et = etree.HTML(response.text)
#     result = et.xpath("//ul[@class='pic-list2  clearfix']/li/a/@href")
#     # print(result)
#
#     for item in result:
#         if item[0:5] == "https":
#             dic[name] = ["https://down10.zol.com.cn/desktoptools/XZDesktop_5018_3.1.3.6.exe"]
#         elif item[0:6] == "//desk":
#             dic[name] += ["https:" + item]
#         else:
#             dic[name] += [domain + item]
#
#         # print(urls)
#
# print(dic)
#
# with open("bizhi.txt", "w", encoding="utf-8") as f:
#     f.write(str(dic).strip("{}"))
import csv