# 第三阶段，获得所有目标urls的数据
# 否则剔除的url，
import utils
import copy
from rich.progress import track
success_urls=utils.get_pkl_data("step2ed.pkl")
data,attrs,fail_num={},["cover","title","intro"],0
for i in attrs:data[i]=[]
# ---------------------------------------------------------------------------------------------------------
# for index in track(range(len(success_urls)),"正在获取"+str(len(success_urls))+"个目标url的数据"):
#     url=success_urls[index]
#     soup=utils.get_soup(success_urls[index])
#     cover=soup.find_all(attrs = {"class":"detail"})[0].find("img").attrs['src']
#     title=soup.find_all(attrs = {"class":"detail"})[0].find_all("div")[0].text
#     intro=soup.find_all(attrs = {"class":"detail"})[0].find_all("div")[1].text
#     data["cover"].append(cover)
#     data["title"].append(title)
#     data["intro"].append(intro)
# print(data)
for index in track(range(len(success_urls)),"正在获取"+str(len(success_urls))+"个目标url的数据"):
    try:
        url=success_urls[index]
        soup=utils.get_soup(success_urls[index])
        cover=soup.find_all(attrs = {"class":"detail"})[0].find("img").attrs['src']
        title=soup.find_all(attrs = {"class":"detail"})[0].find_all("div")[0].text
        intro=soup.find_all(attrs = {"class":"detail"})[0].find_all("div")[1].text
        data["cover"].append(cover)
        data["title"].append(title)
        data["intro"].append(intro)
        if (index !=0 and index%20) or index==len(success_urls):
            utils.save_file("step3ed.pkl",data)
    except:
        utils.log("目标地址出现错误："+success_urls[index])
        fail_num+=1
        break
utils.log("目标url数："+str(len(success_urls))+"；成功："+str(len(success_urls)-fail_num)+"；失败："+str(fail_num))





# ---------------------------------------------------------------------------------------------------------
# utils.log("获得可用目的url数:"+str(len(success_urls)))
# utils.save_file("step3ed.pkl",success_urls)