#coding=utf-8

import os
import re
import time
from bs4 import BeautifulSoup
from SqliteTool import SqliteTool
from HttpTool import httpTool 

#保存《系统之家》最新镜像信息www.xitongzhijia.net
def FetchNewestISOInfos__www_xitongzhijia_net():
	url = "http://www.xitongzhijia.net/"
	dirName = "www.xitongzhijia.net"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	sqliteTool = SqliteTool() 
	html = httpTool.FetchUrlData(url,"gbk")
	soup = BeautifulSoup(html,"html.parser")
	for info in soup.find("div",'newdown_cnt').find_all("p"):
		pageLink = info.a.get("href")
		isoName = info.a.get("title")
		html = httpTool.FetchUrlData(pageLink,"gbk")
		childSoup = BeautifulSoup(html,"html.parser")
		softSize = childSoup.find("div","fg nrupr").ul.li.contents[0][5:]
		updateTime = childSoup.find("div","fg nrupr").ul.li.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.contents[0][5:]
		#md5 = str(childSoup.find("div","xtfile").p.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.contents[0])[4:]#MD5值
		for childinfo in childSoup.find_all("div","nyxz_div"):
			if not childinfo.span.contents:
				continue
			if childinfo.span.contents[0] == "本地下载:":#默认使用本地下载
				downloadLink = childinfo.div.a.get("o_href")
				if not sqliteTool.IsISOInfoExist(downloadLink):
					sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)
				break
	print("www.xitongzhijia.net镜像信息保存完成")		

#保存《Windows7en》最新镜像信息www.windows7en.com
def FetchNewestISOInfos_www_windows7en_com():
	url = "http://www.windows7en.com/"
	dirName = "www.windows7en.com"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	sqliteTool = SqliteTool()
	html = httpTool.FetchUrlData(url,"utf-8")
	soup = BeautifulSoup(html,"html.parser")
	for info in soup.find("div","middle_bottom_list").ul.find_all("li"):
		pageLink = url + info.a.get("href")
		isoName = info.a.get("title")
		html = httpTool.FetchUrlData(pageLink,"utf-8")
		childSoup = BeautifulSoup(html,"html.parser")
		softSize = childSoup.find("div","con_info").ul.li.next_sibling.next_sibling.contents[0][5:]
		updateTime = childSoup.find("div","con_info").ul.li.contents[0][5:]
		if not childSoup.find("div","con_download").find_all("a"):
			continue
		for childInfo in childSoup.find("div","con_download").find_all("a"):
			if not childInfo.contents:
				continue
			if childInfo.contents[0]=="本地下载":
				downloadLink = childInfo.get("href")
				if not sqliteTool.IsISOInfoExist(downloadLink):
					sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)	
				break
	print("www.windows7en.com镜像信息保存完成")		

#保存《XP系统下载》最新镜像信息www.ghostxpsp3.net
def FetchNewestISOInfos_www_ghostxpsp3_net():
	url = "http://www.ghostxpsp3.net/"
	dirName = "www.ghostxpsp3.net"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	sqliteTool = SqliteTool()
	html = httpTool.FetchUrlData(url,"gbk")
	soup = BeautifulSoup(html,"html.parser")	
	for info in soup.find("div","fl ml8 topnews").ul.find_all("li"):
		pageLink = info.a.get("href")
		isoName = info.a.get("title")
		html = httpTool.FetchUrlData(pageLink,"gbk")
		childSoup = BeautifulSoup(html,"html.parser")
		softSize = childSoup.find("ul","clearfix software-infolist").li.contents[1]
		updateTime = childSoup.find("ul","clearfix software-infolist").li.next_sibling.next_sibling.contents[1]
		rex = "thunder_url01 = \"(http://.*?)\""
		downloadLink = re.findall(rex,childSoup.find("script",language="javascript").contents[0])[0]#本地高速下载1下载链接
		if not sqliteTool.IsISOInfoExist(downloadLink):
			sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)	
	print("www.ghostxpsp3.net镜像信息保存完成")		

#保存《Win8系统之家》最新镜像信息www.win8.net
def FetchNewestISOInfos_www_win8_net():
	url = "http://www.win8.net/"
	dirName = "www.win8.net"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoName = ""#镜像名称
	pageLink = url#镜像地址
	sqliteTool = SqliteTool()
	html = httpTool.FetchUrlData(url,"utf-8")
	soup = BeautifulSoup(html,"html.parser")
	rex = "thunder_url = \"(.*?)\""
	#32位下载
	isoName = "Win8 GHOST系统32位"
	downloadLink = re.findall(rex,soup.find("div","dowm bite").script.contents[0])[0]
	softSize = soup.find("div","other").find("div","mr").span.contents[0][3:]
	updateTime = time.strftime("%Y")+"-"+soup.find("div","other").find("div","mr").span.next_sibling.next_sibling.next_sibling.next_sibling.contents[0][3:]
	if not sqliteTool.IsISOInfoExist(downloadLink):
		sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)
	#64位下载
	isoName = "Win8 GHOST系统64位"
	downloadLink = re.findall(rex,soup.find("div","dowm bite64").script.contents[0])[0]
	softSize = soup.find("div","other").find_next("div","other").find("div","mr").span.contents[0][3:]
	updateTime = time.strftime("%Y")+"-"+soup.find("div","other").find_next("div","other").find("div","mr").span.next_sibling.next_sibling.next_sibling.next_sibling.contents[0][3:]
	if not sqliteTool.IsISOInfoExist(downloadLink):
		sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)
	print("www.win8.net镜像信息保存完成")		

#保存《系统城》最新镜像信息www.xitongcheng.com
def FetchNewestISOInfos_www_xitongcheng_com():
	url = "http://www.xitongcheng.com/"
	dirName = "www.xitongcheng.com"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	sqliteTool = SqliteTool()
	html = httpTool.FetchUrlData(url,"utf-8")
	soup = BeautifulSoup(html,"html.parser")	
	for info in soup.find("ul","new_list").find_all("li"):
		isoName = info.div.a.contents[0]
		pageLink = url + info.div.a.get("href")
		html = httpTool.FetchUrlData(pageLink,"utf-8")
		childSoup = BeautifulSoup(html,"html.parser")
		softSize = childSoup.find("div","softinfo").ul.li.next_sibling.next_sibling.contents[0][5:]
		updateTime = childSoup.find("div","softinfo").ul.li.contents[0][6:]
		rex = "fUrl= \"(.*?)\""
		for childInfo in childSoup.find_all("script"):
			if not childInfo.contents:
				continue
			array = re.findall(rex,childInfo.contents[0])
			if not array:
				continue
			downloadLink = array[0]
			if not sqliteTool.IsISOInfoExist(downloadLink):
				sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)
	print("www.xitongcheng.com镜像信息保存完成")		

#保存《Win10系统之家》最新镜像信息www.ghost580.com
def FetchNewestISOInfos_www_ghost580_com():
	url = "http://www.ghost580.com/"
	dirName = "www.ghost580.com"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoInfos = []#镜像信息列表
	sqliteTool = SqliteTool()
	html = httpTool.FetchUrlData(url,"gbk")
	soup = BeautifulSoup(html,"html.parser")
	#根据需求，Win10镜像下载2个，Win8镜像下载2个，xp镜像下载1个，win7镜像下载2个
	isoInfo = []
	#Win10镜像
	isoInfo.append(soup.find("ul",id="list_1_1").li.find_next("a").find_next("a").get("title"))
	isoInfo.append(url + soup.find("ul",id="list_1_1").li.find_next("a").find_next("a").get("href"))
	isoInfos.append(list(isoInfo))
	
	isoInfo[:] = []
	isoInfo.append(soup.find("ul",id="list_1_1").li.next_sibling.next_sibling.find_next("a").find_next("a").get("title"))
	isoInfo.append(url + soup.find("ul",id="list_1_1").li.next_sibling.next_sibling.find_next("a").find_next("a").get("href"))
	isoInfos.append(list(isoInfo))	
	#Win8镜像
	isoInfo[:] = []
	isoInfo.append(soup.find("ul",id="list_1_2").li.find_next("a").find_next("a").get("title"))
	isoInfo.append(url + soup.find("ul",id="list_1_2").li.find_next("a").find_next("a").get("href"))
	isoInfos.append(list(isoInfo))

	isoInfo[:] = []
	isoInfo.append(soup.find("ul",id="list_1_2").li.next_sibling.next_sibling.find_next("a").find_next("a").get("title"))
	isoInfo.append(url + soup.find("ul",id="list_1_2").li.next_sibling.next_sibling.find_next("a").find_next("a").get("href"))
	isoInfos.append(list(isoInfo))
	#xp镜像
	isoInfo[:] = []
	isoInfo.append(soup.find("ul",id="list_1_3").li.find_next("a").find_next("a").get("title"))
	isoInfo.append(url + soup.find("ul",id="list_1_3").li.find_next("a").find_next("a").get("href"))
	isoInfos.append(list(isoInfo))
	#win7镜像
	isoInfo[:] = []
	isoInfo.append(soup.find("ul",id="list_1_4").li.find_next("a").find_next("a").get("title"))
	isoInfo.append(url + soup.find("ul",id="list_1_4").li.find_next("a").find_next("a").get("href"))
	isoInfos.append(list(isoInfo))

	isoInfo[:] = []
	isoInfo.append(soup.find("ul",id="list_1_4").li.next_sibling.next_sibling.find_next("a").find_next("a").get("title"))
	isoInfo.append(url + soup.find("ul",id="list_1_4").li.next_sibling.next_sibling.find_next("a").find_next("a").get("href"))
	isoInfos.append(list(isoInfo))
	#打开链接获取下载信息
	for info in isoInfos:
		html = httpTool.FetchUrlData(info[1],"gbk")
		childSoup = BeautifulSoup(html,"html.parser")
		softSize = childSoup.find("div","sinfo").ul.li.contents[0][5:]
		updateTime = childSoup.find("div","sinfo").ul.li.next_sibling.contents[0][5:]
		downloadLink = url + childSoup.find("div","downs").ul.li.a.get("href")
		if not sqliteTool.IsISOInfoExist(downloadLink):
			sqliteTool.SaveISOInfo(md5,dirName,info[0],info[1],downloadLink,softSize,updateTime)
	print("www.ghost580.com镜像信息保存完成")		

#保存《系统之家》最新镜像信息www.xp5.com
def FetchNewestISOInfos_www_xp5_com():
	url = "http://www.xp5.com/"
	dirName = "www.xp5.com"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	sqliteTool = SqliteTool()
	html = httpTool.FetchUrlData(url,"gbk")
	soup = BeautifulSoup(html,"html.parser")
	for info in soup.find("ul","cong_ul").find_all("li"):
		isoName = info.a.get("title")
		pageLink = url + info.a.get("href")
		html = httpTool.FetchUrlData(pageLink,"gbk")
		childSoup = BeautifulSoup(html,"html.parser")
		softSize = childSoup.find("div","infolist").p.span.contents[0].lstrip()[5:]
		updateTime = childSoup.find("div","infolist").p.next_sibling.next_sibling.span.next_sibling.next_sibling.contents[0].lstrip()[5:]
		#因为该网站只放出迅雷下载链接，对其进行转换
		downloadLink = httpTool.thunder2Real(childSoup.find("div","dowmnfs").ul.li.a.get("href"))
		if not sqliteTool.IsISOInfoExist(downloadLink):
			sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)	
	print("www.xp5.com镜像信息保存完成")		

#保存《系统之家》最新镜像信息www.xp61.com
def FetchNewestISOInfos_www_xp61_com():
	url = "http://www.xp61.com/"
	dirName = "www.xp61.com"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	sqliteTool = SqliteTool()
	html = httpTool.FetchUrlData(url,"gbk")
	soup = BeautifulSoup(html,"html.parser")	
	for info in soup.find("ul","cong_ul").find_all("li"):
		isoName = info.a.get("title")
		pageLink = url + info.a.get("href")
		html = httpTool.FetchUrlData(pageLink,"gbk")
		childSoup = BeautifulSoup(html,"html.parser")
		softSize = childSoup.find("div","infolist").p.span.contents[0][5:]
		updateTime = childSoup.find("div","infolist").p.next_sibling.next_sibling.span.next_sibling.next_sibling.contents[0][5:]
		for childInfo in childSoup.find("ul","downlistbox").find_all("li"):
			if not childInfo.a:
				continue
			if childInfo.a.contents[0]=="电信下载地址":
				downloadLink = childInfo.a.get("href")
				if not sqliteTool.IsISOInfoExist(downloadLink):
					sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)
				break
	print("www.xp61.com镜像信息保存完成")		

#保存《Win7纯净版》最新镜像信息www.win7cjb.com
def FetchNewestISOInfos_www_win7cjb_com():
	url = "http://www.win7cjb.com/"
	dirName = "www.win7cjb.com"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	sqliteTool = SqliteTool()
	html = httpTool.FetchUrlData(url,"utf-8")
	soup = BeautifulSoup(html,"html.parser")
	#按照需求Win7 64位纯净版下载1个，Win7 32位纯净版下载1个，纯净版XP系统下载1个
	for info in soup.find("div",id="nav").find_all("li"):
		if info.a.get("title")=="Win7 64位纯净版" or info.a.get("title")=="win7 32位纯净版" or info.a.get("title")=="纯净版XP系统":
			link = info.a.get("href")
			html = httpTool.FetchUrlData(link,"utf-8")
			childSoup = BeautifulSoup(html,"html.parser")
			isoName = childSoup.find("div",id="soft-new").find("li").a.get("title")
			pageLink = childSoup.find("div",id="soft-new").find("li").a.get("href")
			html = httpTool.FetchUrlData(pageLink,"utf-8")
			grandChildSoup = BeautifulSoup(html,"html.parser")
			softSize = grandChildSoup.find("div","soft-detail").ul.li.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.contents[0][5:]
			updateTime = grandChildSoup.find("div","soft-detail").ul.li.next_sibling.next_sibling.next_sibling.next_sibling.contents[0][5:]
			rex = "thunder_url01 = \"(.*?)\""
			downloadLink = re.findall(rex,grandChildSoup.find("script",language="javascript").contents[0])[0]
			if not sqliteTool.IsISOInfoExist(downloadLink):
				sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)	
	print("www.win7cjb.com镜像信息保存完成")		

#保存《系统114》最新镜像信息www.xitong114.com
def FetchNewestISOInfos_www_xitong114_com():
	url = "http://www.xitong114.com/"
	dirName = "www.xitong114.com"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	updateTime = ""#更新时间
	softSize = ""#ISO大小
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	sqliteTool = SqliteTool()
	html = httpTool.FetchUrlData(url,"utf-8")
	soup = BeautifulSoup(html,"html.parser")
	#根据需求，Win7系统镜像下载1个
	link = soup.find("ul","menu_ul").li.next_sibling.next_sibling.find("a").get("href")
	html = httpTool.FetchUrlData(link,"utf-8")
	childSoup = BeautifulSoup(html,"html.parser")
	isoName = childSoup.find("ul","list-arc").li.find("a").get("title")
	pageLink = childSoup.find("ul","list-arc").li.find("a").get("href")
	html = httpTool.FetchUrlData(pageLink,"utf-8")
	grandChildSoup = BeautifulSoup(html,"html.parser")
	softSize = grandChildSoup.find("ul","list-parxt fix").li.contents[2]
	updateTime = grandChildSoup.find("ul","list-parxt fix").li.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.contents[2]
	for info in grandChildSoup.find("div","dfscntxz").find_all("div"):
		if info.a.contents[0]=="本地下载":#默认使用本地下载
			downloadLink = info.a.get("href")
			if not sqliteTool.IsISOInfoExist(downloadLink):
				sqliteTool.SaveISOInfo(md5,dirName,isoName,pageLink,downloadLink,softSize,updateTime)	
			break
	print("www.win7cjb.com镜像信息保存完成")		

def Log(html):
	out = open("data.txt","w",encoding='utf-8')#windows下打开文件默认编码是GBK，必须转为utf-8
	out.write(html+"\n")
	out.close()

#--------------------------->
'''
#保存《Win7旗舰版》最新镜像信息
def FetchNewestISOInfos_www_win7qjb_com():
	url = "http://www.win7qjb.com/"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	html = httpTool.FetchUrlData(url,"utf-8")
	Log(html)
	soup = BeautifulSoup(html,"html.parser")
	print(soup.find("div",id="main"))#.div.div.next_sibling)
	
	#for info in soup.find("div","soft-new").find_all("li"):
	#	pageLink = info.a.get("href")
	#	isoName = info.a.get("title")
	#	print(pageLink)
	#	print(isoName)

#保存《Win7之家》最新镜像信息
def FetchNewestISOInfos_www_win7zhijia_cn():
	url = "http://www.win7zhijia.cn/"
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	isoName = ""#镜像名称
	pageLink = ""#镜像地址
	html = httpTool.FetchUrlData(url,"utf-8")
	soup = BeautifulSoup(html,"html.parser")
	for info in soup.find("div","new_all").find_all("li"):
		isoName = info.a.get("title")
		pageLink = info.a.get("href")
		html = httpTool.FetchUrlData(pageLink,"utf-8")
		Log(html)
		childSoup = BeautifulSoup(html,"html.parser")

def Download__win7_hfhdo_cn():
	downloadPath = os.getcwd()+"\\isoFile\\win7.hfhdo.cn\\"
	if not os.path.exists(downloadPath):
		os.mkdir(downloadPath)
	url = "http://win7.hfhdo.cn/"
	html = httpTool.GetUrlData(url)
	soup = BeautifulSoup(html,"html.parser")
	for iosinfo in soup.find_all("div","box"):
		downloadLink = ""#镜像下载地址
		filename = ""#镜像文件名称
		md5 = ""#镜像MD5值
		link = url + iosinfo.h2.a.get("href")#镜像地址
		isoName = iosinfo.h2.a.contents[0]#iso镜像名称
		i = 0
		for info in iosinfo.find_all('p'):
			if i == 1:
				isoSystemType = info.contents[0]#系统类型
			elif i == 2:
				isoSystemLanguage = info.contents[0]#系统语言
			i += 1

		html = httpTool.GetUrlData(link)
		childSoup = BeautifulSoup(html,"html.parser")
		isGetDownLoadLink = False
		for info in childSoup.find_all("div","ls"):
			title = info.find("div","title").contents[0]
			if title=="下载地址":
				if isGetDownLoadLink:
					continue
				downloadInfos = info.find_all("li")
				for i in downloadInfos:
					link = i.a.get("href")
					downloadType = i.a.contents[0]
					if downloadType=="本地下载1":#选择本地下载1作为默认下载链接
						downloadLink = url+link

				isGetDownLoadLink = True
			elif title=="文件信息":
				filename = str(info.find("div","description").p.contents[0])[4:]#文件名称
				md5 = str(info.find("div","description").p.br.br.contents[0])[11:]#MD5值	
		if not httpTool.IsISODownloaded(md5):
			httpTool.DownLoad(downloadLink,downloadPath,filename)
#系统之家镜像信息检测保存
def FetchISOInfos__www_xitongzhijia_net():
	downloadPath = os.getcwd()+"\\isoFile\\www.xitongzhijia.net\\"
	if not os.path.exists(downloadPath):
		os.mkdir(downloadPath)
	url = "http://www.xitongzhijia.net/"
	html = httpTool.FetchUrlData(url,"gbk")
	soup = BeautifulSoup(html,"html.parser")
	xpUrl = ""
	win7Url = ""
	for info in soup.find("ul",'clearfix').find_all("li"):
		title = info.a.contents[0]
		if title=="XP系统":
			xpUrl = info.a.get("href")
		if title=="Win7 系统":
			win7Url = info.a.get("href")
	
	#xp镜像下载
	html = httpTool.FetchUrlData(xpUrl,"gbk")
	soup = BeautifulSoup(html,"html.parser")
	downloadLink = ""#镜像下载地址
	md5 = ""#镜像MD5值
	isoName = ""#镜像名称
	link = ""#镜像地址
	fileInfo = ""#文件信息
	for info in soup.find("div","sf_list_cnt").ul.find_all("li"):
		isoName = info.h3.a.get("title")
		link = url + info.h3.a.get("href")
		html = httpTool.FetchUrlData(link,"gbk")
		childSoup = BeautifulSoup(html,"html.parser")
		#filename = str(childSoup.find("div","xtfile").p.next_sibling.next_sibling.contents[0])[6:]#文件名称
		md5 = str(childSoup.find("div","xtfile").p.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.contents[0])[4:]#MD5值 
		for childinfo in childSoup.find_all("div","nyxz_div"):
			if childinfo.span.contents[0] == "本地下载:":#默认使用本地下载
				downloadLink = childinfo.div.a.get("o_href")
				httpTool.SaveISOInfo(md5,isoName,link,downloadLink)

	#翻页继续下载
	while True:
		isEnd = True
		for page in soup.find("div","page clearfix").find_all("a"):
			if page.contents[0]=="下一页":
				isEnd = False
				html = httpTool.FetchUrlData(xpUrl + page.get("href"),"gbk")
				soup = BeautifulSoup(html,"html.parser")
				for info in soup.find("div","sf_list_cnt").ul.find_all("li"):
					isoName = info.h3.a.get("title")#镜像名称
					link = url + info.h3.a.get("href")#镜像地址
					html = httpTool.FetchUrlData(link,"gbk")
					childSoup = BeautifulSoup(html,"html.parser")
					md5 = str(childSoup.find("div","xtfile").p.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.contents[0])[4:]#MD5值
					for childinfo in childSoup.find_all("div","nyxz_div"):
						if childinfo.span.contents[0] == "本地下载:":#默认使用本地下载
							downloadLink = childinfo.div.a.get("o_href")
							httpTool.SaveISOInfo(md5,isoName,link,downloadLink)			
		if isEnd:
			break

'''
#------------------------------->

