# 第二阶段，获得所有目标数据页面的url并测试其是否可被爬取,全部验证通过才可开始第三阶段
# 否则剔除的url，
import utils
import copy
from rich.progress import track
success_pages=utils.get_pkl_data("step1ed.pkl")
urls,success_urls=[],[]
# ---------------------------------------------------------------------------------------------------------


for page in track(success_pages,"[\033[32m获取"+str(len(success_pages))+"个页面的目的url中...\033[0m"+"]:"):
    elements=utils.get_elements(page,{"class":"textlist"})
    # print(elements)
    for i in elements:
        # print("http://127.0.0.1:8000"+i.find_all("a")[0].attrs['href'])
        # print("https:"+i.find_all("a")[0].attrs['href'])
        urls.append("http://127.0.0.1:8000"+i.find_all("a")[0].attrs['href'])
# success_urls=utils.check_pages(urls,"bookCover",0)
success_urls=copy.deepcopy(urls)



# # ---------------------------------------------------------------------------------------------------------
utils.log("获得可用目的url数:"+str(len(success_urls)))
utils.save_file("step2ed.pkl",success_urls)