import requests
from bs4 import BeautifulSoup
# 函数：获取网站的 favicon URL

web_list = [
    {
        'id': '1',
        'name': '百度',
        'address': 'https://www.baidu.com',
        'tip': '这是百度地址',
        'icon': '',
        'join_time': '2',
        'users': '2',
    }, {
        'id': '2',
        'name': 'bilibili',
        'address': 'https://www.bilibili.com',
        'tip': '这是bilibili地址',
        'icon': '',
        'join_time': '2',
        'users': '1',
    }
]


def get_favicon_url(url):
    try:
        headers = {
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0',
        }
        # 发送请求获取网页内容
        response = requests.get(url,headers=headers)
        response.raise_for_status()  # 检查请求是否成功
        soup = BeautifulSoup(response.text, 'html.parser')

        # 查找 favicon 链接
        for link in soup.find_all('link'):
            rel = link.get('rel')
            if rel and ('icon' in rel or 'shortcut' in rel):
                href = link.get('href')
                # print(href)
                if href.startswith('//'):
                    href = 'https:' + href
                elif href.startswith('/'):
                    href = url + href
                else:
                    href = url + '/' + href
                return href

        # 如果没有找到 favicon，返回默认图标
        return 'default_icon.png'
    except requests.RequestException as e:
        print(f"请求 {url} 时出错: {e}")
        return None


# 获取并打印每个网站的 favicon URL
# for website in web_list:
#     favicon_url = get_favicon_url(website['address'])
#     if favicon_url:
#         print(f"{website['address']} 的 favicon URL 是: {favicon_url}")
#     else:
#         print(f"无法获取 {website['address']} 的 favicon URL")