

import requests #先导入爬虫的库，不然调用不了爬虫的函数
from bs4 import BeautifulSoup
class hostsCrawler:
    """
    用于获取GitHub网站对应ip地址
    参考博客：本文为CSDN博主「TT图图」的原创文章，遵循CC 4.0 BY-SA版权协议，转载请附上原文出处链接及本声明。
    原文链接：https://blog.csdn.net/m0_73720982/article/details/126990696
    """
    headers={
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
        }
    @staticmethod
    def getResponse(url):
        return requests.get(url,headers=hostsCrawler.headers)  #get方法的到图片响应
    @staticmethod
    def url_crawler(craw_url="https://ip.tool.chinaz.com/github.com"):
        print("尝试爬取:"+craw_url)
        response=hostsCrawler.getResponse(craw_url)
        #创建beautifulsoup解析对象
        soup = BeautifulSoup(response.text, 'html.parser')

        scr_arr=soup.find_all("span",class_="Whwtdhalf w15-0 lh45")
        dnsname=scr_arr[0].contents[0]
        ip_addr=str(scr_arr[1].contents[0]).strip()
        dig_addr=str(scr_arr[2].contents[0]).strip()
        try:
            dns_addr=str(soup.find_all("span",class_="Whwtdhalf w45-0 lh45")[0].contents[1].text).strip()
        except Exception as e:
            print("错误",e,"\n尝试另一种方式获取")
            dns_addr=str(soup.find(id="infoLocation").contents[0].text).strip()
        return {"ip":ip_addr,"dig":dig_addr,"dns":dns_addr,"name":dnsname}

        # scr_arr=soup.find_all("ul",class_="DnsResuListWrap fl DnsWL")



