import requests
from bs4 import BeautifulSoup
import time
import yaml
from rich.progress import track
import pickle
headers={
    "user-agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Mobile Safari/537.36 Edg/112.0.1722.34"
}
#验证单个url可爬性，测试某个关键字段是否在获取到的html中
def check_page(url,val_code="",save=False,show=True):
    requests_result=requests.get(url,headers=headers)
    requests_result.encoding='utf-8'
    if save:save_file("html1.html",requests_result.text)
    if val_code in requests_result.text:
        if show:log("该网站可爬性测试：通过")
        return True
    else:
        if show:log("该网站可爬性测试：不通过")
        return False
    
#验证多个url可爬性，测试某个关键字段是否在获取到的html中
def check_pages(all_pages:list,val_code:str,interval:float=1.0):
    successed_pages=[]
    for i in track(all_pages,"[\033[32m验证"+str(len(all_pages))+"个url的可爬性中...\033[0m"+"]:"):
        time.sleep(interval)
        if check_page(i,val_code,show=False):
            successed_pages.append(i)
        else:
            log("不可爬页面:"+i,"warn")
    log("目标url数："+str(len(all_pages))+"；成功："+str(len(successed_pages))+"；失败："+str(len(all_pages)-len(successed_pages)))
    return successed_pages

def get_soup(url:str):
    requests_result=requests.get(url,headers=headers)
    requests_result.encoding='utf-8'
    return BeautifulSoup(requests_result.text,'html.parser')

def get_elements(url,search_by):
    requests_result=requests.get(url,headers=headers)
    requests_result.encoding='utf-8'
    soup = BeautifulSoup(requests_result.text,'html.parser')
    return soup.find_all(attrs = search_by)
    
def save_file(name:str,file):
    cate=name.split(".")[-1]
    if cate=="txt":
        with open(name, "w",encoding='utf-8') as f:
            f.write(file)
    elif cate=="html":
        with open(name, "w",encoding='utf-8') as f:
            f.write(file)
    elif cate=="pkl":
        with open(name,'wb')as f:
            pickle.dump(file,f,-1)
    elif cate=="jpg" or "png":
        # img_data=requests.get(url,headers=headers)
        img_data=requests.get(file,headers=headers)
        with open(name,"wb")as f:  # wb是写二进制
            f.write(img_data.content)
def get_pkl_data(name:str):
    with open(name,'rb')as f:
        data=pickle.load(f)
    return data
def log(msg,cate="info"):
    if cate=="info":print("["+"\033[32mINFO\033[0m"+"]:"+msg)
    if cate=="error":print("["+"\033[31mERROR\033[0m"+"]:"+msg)
    if cate=="warn":print("["+"\033[33mWARN\033[0m"+"]:"+msg)

