import requests
from bs4 import BeautifulSoup
import re
import chardet
import favicon

def get_website_business_card(url):
    try:
        headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'}
        response = requests.get(url,headers=headers)

        raw_html = response.content
        encoding = chardet.detect(raw_html)['encoding']
        response.encoding = encoding
        soup = BeautifulSoup(response.text, 'html.parser',from_encoding=encoding)
        business_card = {}
        # 1. 获取标题
        title = soup.title.string if soup.title else None
        business_card['title'] = title

        # 2. 获取logo（尝试多种方式）
        logo_link = None
        business_card['imageformat'] = None
        icons = favicon.get(url)
        if icons:
            # 选择第一个图标（通常是主要的favicon）
            main_icon = icons[0]
            print(1, main_icon.url)
            try:
                # 发送请求获取图标内容
                logo_link = main_icon.url
                business_card['imageformat'] = main_icon.format
            except Exception as e:
                print(f"下载图标时出现错误: {e}")
        else:
            print("未找到该网站的图标。")
        # 如果没找到，再尝试从<img>标签找可能的logo（这里只是简单示例，可能不准确）
        if not logo_link:
            img_tags = soup.find_all('img')
            if img_tags:
                logo_link = img_tags[0].get('src')
                if not logo_link.startswith('http'):
                    logo_link = url + logo_link if url.endswith('/') else url + '/' + logo_link
        business_card['logo'] = logo_link

        # 3. 获取描述（从<meta>标签中的description）
        description = None
        meta_tags = soup.find_all('meta')
        for meta in meta_tags:
            if meta.get('name') == 'description':
                description = meta.get('content')
                break
        business_card['description'] = description

        # 4. 获取关键词（从<meta>标签中的keywords）
        keywords = []
        for meta in meta_tags:
            if meta.get('name') == 'keywords':
                keywords = meta.get('content').split(',')
                break
        business_card['keywords'] = keywords

        # 5. 获取网站的主域名
        domain = re.findall(r'(https?://)?([^/?]+)', url)[0][1]
        business_card['domain'] = domain

        return business_card
    except requests.RequestException as e:
        print(f"请求网页时出错: {e}")
        return None

def randon_image():
    try:
        url = "https://api.vvhan.com/api/bing?rand=sj&size=300"
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.119 Safari/537.36'}

        data = requests.get(url, headers=headers)
        return data.content
    except requests.RequestException as e:
        print(f"请求网页时出错: {e}")
        return None

if __name__ == '__main__':
   pass

