import requests
from bs4 import BeautifulSoup
import json
from urllib.parse import urljoin
import os

def get_page_info(url):
    """
    获取网页信息
    """
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        response = requests.get(url, headers=headers, timeout=10)
        response.raise_for_status()
        response.encoding = response.apparent_encoding  # 自动检测编码
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取基本信息
        page_info = {
            'url': url,
            'status_code': response.status_code,
            'title': soup.title.string if soup.title else '无标题',
            'meta_description': soup.find('meta', attrs={'name': 'description'})['content'] 
                              if soup.find('meta', attrs={'name': 'description'}) else '无描述',
            'content_length': len(response.text),
            'links': [],
            'images': []
        }
        
        # 提取所有链接
        for link in soup.find_all('a', href=True):
            absolute_url = urljoin(url, link['href'])
            page_info['links'].append({
                'text': link.get_text(strip=True),
                'url': absolute_url
            })
        
        # 提取所有图片
        for img in soup.find_all('img', src=True):
            absolute_src = urljoin(url, img['src'])
            page_info['images'].append({
                'alt': img.get('alt', '无alt文本'),
                'src': absolute_src
            })
            
        return page_info
    
    except Exception as e:
        print(f"爬取 {url} 时出错: {type(e).__name__} - {e}")
        return None

def save_results(data, format='both'):
    """
    保存结果到文件和打印到控制台
    :param format: 'json', 'txt' 或 'both'
    """
    if not os.path.exists('output'):
        os.makedirs('output')
    
    # 保存为JSON
    if format in ('json', 'both'):
        json_path = os.path.join('output', 'result.json')
        with open(json_path, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        print(f"JSON结果已保存到 {json_path}")
    
    # 保存为TXT
    if format in ('txt', 'both'):
        txt_path = os.path.join('output', 'result.txt')
        with open(txt_path, 'w', encoding='utf-8') as f:
            f.write(f"=== 页面分析结果 ===\n")
            f.write(f"URL: {data['url']}\n")
            f.write(f"状态码: {data['status_code']}\n")
            f.write(f"标题: {data['title']}\n")
            f.write(f"描述: {data['meta_description']}\n")
            f.write(f"内容长度: {data['content_length']} 字符\n\n")
            
            f.write("=== 前10个链接 ===\n")
            for link in data['links'][:10]:
                f.write(f"{link['text']} -> {link['url']}\n")
                
            f.write("\n=== 前5张图片 ===\n")
            for img in data['images'][:5]:
                f.write(f"{img['alt']} -> {img['src']}\n")
        print(f"文本结果已保存到 {txt_path}")

def print_summary(data):
    """
    打印摘要信息到控制台
    """
    print("\n" + "="*50)
    print(f"URL: {data['url']}")
    print(f"状态码: {data['status_code']}")
    print(f"标题: {data['title']}")
    print(f"描述: {data['meta_description']}")
    print(f"发现链接数: {len(data['links'])}")
    print(f"发现图片数: {len(data['images'])}")
    print("="*50 + "\n")

if __name__ == "__main__":
    # 用户输入目标URL
    target_url = input("请输入要爬取的URL（例如 http://example.com）: ").strip()
    
    # 获取页面信息
    print(f"\n正在爬取 {target_url} ...")
    page_data = get_page_info(target_url)
    
    if page_data:
        # 打印摘要
        print_summary(page_data)
        
        # 保存结果
        save_results(page_data, format='both')
        
        # 是否查看详细数据
        view_details = input("是否查看详细数据？(y/n): ").lower()
        if view_details == 'y':
            print("\n=== 前5个链接 ===")
            for i, link in enumerate(page_data['links'][:5], 1):
                print(f"{i}. {link['text']} -> {link['url']}")
                
            print("\n=== 前3张图片 ===")
            for i, img in enumerate(page_data['images'][:3], 1):
                print(f"{i}. [{img['alt']}] -> {img['src']}")
    else:
        print("未能获取页面信息，请检查URL是否正确或网络连接")