"""
网络数据爬取程序
包含：网页爬取、API 调用、数据解析、文件存储等功能
"""

import requests
from bs4 import BeautifulSoup
import json
import csv
import time
import os
from urllib.parse import urljoin, urlparse
import pandas as pd


class WebScraper:
    """网页爬取工具类"""

    def __init__(self, delay=1):
        """
        初始化爬虫
        delay: 请求延迟时间，避免过于频繁请求
        """
        self.delay = delay
        self.session = requests.Session()
        # 设置请求头，模拟浏览器访问
        self.session.headers.update({
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        })

    def get_html(self, url, params=None):
        """获取网页 HTML 内容"""
        try:
            print(f"正在获取: {url}")
            response = self.session.get(url, params=params, timeout=10)
            response.raise_for_status()  # 如果状态码不是200，抛出异常
            response.encoding = 'utf-8'  # 设置编码

            # 延迟，避免请求过快
            time.sleep(self.delay)

            return response.text
        except requests.exceptions.RequestException as e:
            print(f"请求失败: {e}")
            return None

    def get_json(self, url, params=None):
        """获取 JSON 数据"""
        try:
            print(f"正在获取 JSON 数据: {url}")
            response = self.session.get(url, params=params, timeout=10)
            response.raise_for_status()

            time.sleep(self.delay)

            return response.json()
        except requests.exceptions.RequestException as e:
            print(f"JSON 请求失败: {e}")
            return None

    def save_to_file(self, data, filename, format='json'):
        """保存数据到文件"""
        os.makedirs('data', exist_ok=True)  # 创建数据目录
        filepath = os.path.join('data', filename)

        try:
            if format == 'json':
                with open(filepath, 'w', encoding='utf-8') as f:
                    json.dump(data, f, ensure_ascii=False, indent=2)
            elif format == 'txt':
                with open(filepath, 'w', encoding='utf-8') as f:
                    f.write(data)
            elif format == 'csv' and isinstance(data, list):
                if data and isinstance(data[0], dict):
                    keys = data[0].keys()
                    with open(filepath, 'w', encoding='utf-8', newline='') as f:
                        writer = csv.DictWriter(f, fieldnames=keys)
                        writer.writeheader()
                        writer.writerows(data)

            print(f"数据已保存到: {filepath}")
            return True
        except Exception as e:
            print(f"保存文件失败: {e}")
            return False


def example_1_scrape_quotes():
    """
    示例1：爬取名言网站数据
    目标网站：http://quotes.toscrape.com
    """
    print("=== 示例1：爬取名言网站数据 ===")

    scraper = WebScraper(delay=1)
    base_url = "http://quotes.toscrape.com"

    quotes_data = []

    # 爬取多页数据
    for page in range(1, 3):  # 只爬取前2页作为示例
        url = f"{base_url}/page/{page}/"
        html = scraper.get_html(url)

        if not html:
            continue

        soup = BeautifulSoup(html, 'html.parser')
        quotes = soup.find_all('div', class_='quote')

        for quote in quotes:
            text = quote.find('span', class_='text').get_text(strip=True)
            author = quote.find('small', class_='author').get_text(strip=True)
            tags = [tag.get_text(strip=True) for tag in quote.find_all('a', class_='tag')]

            quotes_data.append({
                'text': text,
                'author': author,
                'tags': ', '.join(tags),
                'page': page
            })

            print(f"爬取名言: {text[:50]}... - {author}")

    # 保存数据
    if quotes_data:
        scraper.save_to_file(quotes_data, 'quotes.json', 'json')
        scraper.save_to_file(quotes_data, 'quotes.csv', 'csv')

    return quotes_data


def example_2_api_data():
    """
    示例2：调用公开 API 获取数据
    使用 JSONPlaceholder API（测试用的假数据API）
    """
    print("\n=== 示例2：调用公开 API 获取数据 ===")

    scraper = WebScraper(delay=0.5)
    base_url = "https://jsonplaceholder.typicode.com"

    # 获取用户数据
    users_url = f"{base_url}/users"
    users_data = scraper.get_json(users_url)

    if users_data:
        print(f"获取到 {len(users_data)} 个用户")

        # 简化用户数据，只保留部分字段
        simplified_users = []
        for user in users_data:
            simplified_users.append({
                'id': user['id'],
                'name': user['name'],
                'username': user['username'],
                'email': user['email'],
                'city': user['address']['city'],
                'company': user['company']['name']
            })

        scraper.save_to_file(simplified_users, 'users.json', 'json')
        scraper.save_to_file(simplified_users, 'users.csv', 'csv')

        return simplified_users

    return None


def example_3_scrape_news():
    """
    示例3：爬取新闻标题（示例网站）
    注意：请遵守网站的 robots.txt 和使用条款
    """
    print("\n=== 示例3：爬取新闻标题（示例） ===")

    # 这个示例使用一个模拟的新闻页面
    # 在实际使用中，请替换为真实的网址并遵守相关规则

    html_content = """
    <html>
    <head><title>新闻网站</title></head>
    <body>
        <div class="news-container">
            <article class="news-item">
                <h2 class="title">Python 3.11 发布，性能大幅提升</h2>
                <p class="summary">最新版本的 Python 在性能方面有显著改进...</p>
                <span class="date">2023-10-01</span>
            </article>
            <article class="news-item">
                <h2 class="title">人工智能助力科学研究</h2>
                <p class="summary">AI 技术在各个科学领域发挥重要作用...</p>
                <span class="date">2023-10-02</span>
            </article>
            <article class="news-item">
                <h2 class="title">Web 开发新趋势</h2>
                <p class="summary">现代 Web 开发框架不断演进...</p>
                <span class="date">2023-10-03</span>
            </article>
        </div>
    </body>
    </html>
    """

    # 解析 HTML
    soup = BeautifulSoup(html_content, 'html.parser')
    news_items = soup.find_all('article', class_='news-item')

    news_data = []
    for item in news_items:
        title = item.find('h2', class_='title')
        summary = item.find('p', class_='summary')
        date = item.find('span', class_='date')

        news_data.append({
            'title': title.get_text(strip=True) if title else '无标题',
            'summary': summary.get_text(strip=True) if summary else '无摘要',
            'date': date.get_text(strip=True) if date else '无日期'
        })

    # 在实际爬取中，这里应该是：
    # scraper = WebScraper()
    # html = scraper.get_html('https://真实新闻网站.com')
    # 然后用 BeautifulSoup 解析

    for news in news_data:
        print(f"新闻: {news['title']} - {news['date']}")

    scraper = WebScraper()
    scraper.save_to_file(news_data, 'news.json', 'json')

    return news_data


def example_4_advanced_scraping():
    """
    示例4：高级爬取技巧 - 处理动态内容、分页等
    """
    print("\n=== 示例4：高级爬取技巧 ===")

    # 这个示例展示一些高级技巧
    scraper = WebScraper(delay=1)

    # 1. 带参数的请求
    params = {
        'page': 1,
        'limit': 10,
        'sort': 'newest'
    }

    # 2. 处理相对链接
    base_url = "https://example.com"
    relative_link = "/articles/123"
    absolute_url = urljoin(base_url, relative_link)
    print(f"相对链接转绝对链接: {absolute_url}")

    # 3. 解析 URL 组件
    parsed_url = urlparse(absolute_url)
    print(f"URL 组件: 协议={parsed_url.scheme}, 域名={parsed_url.netloc}, 路径={parsed_url.path}")

    # 实际爬取代码会根据具体网站结构编写
    print("高级爬取技巧演示完成")

    return {
        'techniques': [
            '带参数请求',
            '相对链接处理',
            'URL 解析',
            '动态内容处理（需要 Selenium）',
            '登录会话保持',
            '反爬虫绕过'
        ]
    }


def analyze_data():
    """
    数据分析示例 - 对爬取的数据进行简单分析
    """
    print("\n=== 数据分析示例 ===")

    try:
        # 读取爬取的数据
        with open('data/quotes.json', 'r', encoding='utf-8') as f:
            quotes_data = json.load(f)

        print(f"总共爬取 {len(quotes_data)} 条名言")

        # 统计作者出现次数
        author_count = {}
        for quote in quotes_data:
            author = quote['author']
            author_count[author] = author_count.get(author, 0) + 1

        print("\n作者统计:")
        for author, count in sorted(author_count.items(), key=lambda x: x[1], reverse=True):
            print(f"  {author}: {count} 条名言")

        # 使用 pandas 进行数据分析
        df = pd.DataFrame(quotes_data)
        print(f"\n数据框形状: {df.shape}")
        print(f"\n数据预览:")
        print(df.head())

        return df

    except FileNotFoundError:
        print("数据文件不存在，请先运行爬取示例")
        return None


def main():
    """主函数"""
    print("Python 网络数据爬取程序")
    print("=" * 50)

    # 创建数据目录
    os.makedirs('data', exist_ok=True)

    # 运行各个示例
    quotes_data = example_1_scrape_quotes()
    users_data = example_2_api_data()
    news_data = example_3_scrape_news()
    advanced_data = example_4_advanced_scraping()

    # 数据分析
    df = analyze_data()

    print("\n" + "=" * 50)
    print("爬取程序执行完成！")
    print("生成的文件:")
    for file in os.listdir('data'):
        print(f"  - data/{file}")


if __name__ == "__main__":
    main()