#

import sys
import os
import re
import requests
import hashlib
from urllib.parse import urljoin

# 将项目根目录添加到Python的模块搜索路径中
project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
print(111, project_root)
if project_root not in sys.path:
    sys.path.insert(0, project_root)

from core.web_spider.WebAIExtracto.core.website_extract import WebInfo
from newspaper import Article, build
import time
import traceback
from bs4 import BeautifulSoup
import requests
from core.web_spider.language_detact import get_language_code
import argparse
import json
from datetime import datetime


# 缓存文件路径
CACHE_FILE = 'db/spider_cache.json'

def load_cache():
    """加载爬取缓存"""
    if os.path.exists(CACHE_FILE):
        try:
            with open(CACHE_FILE, 'r', encoding='utf-8') as f:
                return json.load(f)
        except:
            return {}
    return {}

def save_cache(cache):
    """保存爬取缓存"""
    os.makedirs(os.path.dirname(CACHE_FILE), exist_ok=True)
    with open(CACHE_FILE, 'w', encoding='utf-8') as f:
        # 处理datetime对象
        def datetime_handler(obj):
            if isinstance(obj, datetime):
                return obj.isoformat()
            return str(obj)
        json.dump(cache, f, ensure_ascii=False, indent=2, default=datetime_handler)

def download_image(img_url, save_dir='db/images'):
    """下载图片并返回本地路径"""
    try:
        # 确保保存目录存在
        os.makedirs(save_dir, exist_ok=True)

        # 生成文件名
        img_hash = hashlib.md5(img_url.encode()).hexdigest()
        img_ext = os.path.splitext(img_url)[1]
        if not img_ext:
            img_ext = '.jpg'
        local_filename = os.path.join(save_dir, f"{img_hash}{img_ext}")

        # 如果文件已存在则直接返回路径
        if os.path.exists(local_filename):
            return local_filename

        # 下载图片
        response = requests.get(img_url, stream=True)
        if response.status_code == 200:
            with open(local_filename, 'wb') as f:
                for chunk in response.iter_content(1024):
                    f.write(chunk)
            return local_filename
    except Exception as e:
        print(f"下载图片失败: {e}")
        return None


def extract_article_content(link, media_name):
    try:
        # 使用语言检测函数判断文章语言
        language = get_language_code(media_name)
        print("sb ch=", language)

        # 获取原始HTML
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(link, headers=headers)
        response.encoding = response.apparent_encoding  # 自动检测编码
        soup = BeautifulSoup(response.text, 'html.parser')
        html_content = response.text

        # 使用newspaper3k提取标题
        article = Article(link, language=language)
        article.download()
        article.parse()
        title = article.title
        text = article.text
        # 时间
        time = article.publish_date if article.publish_date else datetime.now()
        # 来源
        source = article.source_url

        # 如果标题为空则返回None
        if not title:
            return None

        # 保留原始HTML结构和图片
        # 移除script和style标签
        for script in soup(['script', 'style']):
            script.decompose()

        # 获取正文内容
        content = soup.find('div', class_=lambda x: x and ('article' in x.lower() or 'content' in x.lower()))
        if not content:
            content = soup.find('article')
        if not content:
            content = article.text

        # 如果找到了HTML内容,处理图片并转换为字符串
        if isinstance(content, BeautifulSoup):
            # 下载图片并替换src
            for img in content.find_all('img'):
                if img.get('src'):
                    img_url = urljoin(link, img['src'])
                    local_path = download_image(img_url)
                    if local_path:
                        img['src'] = local_path
            # 使用prettify()美化输出并指定编码
            html_text = content.prettify(formatter='html')
        else:
            html_text = content

    except Exception as e:
        print(e)
        return None

    result = {
        '标题': title,
        '正文': text,
        'html_content': html_content,
        'html_text': html_text,
        '时间': str(time) if time else None,
        '来源': source
    }
    return result


def execute_task(url, media_name, column_name):
    """执行单个爬取任务"""
    # 加载缓存
    cache = load_cache()
    
    webtool = WebInfo(url)
    webtool.tool_type = 'playwright'  # 传参支持 request selenium 和playwright
    res = webtool.get_page_info(url=url, detail=True)
    
    for link in res['categorize_url']['owner']:
        print(link)
        
        # 检查链接是否已经爬取过
        if link in cache:
            print(f"跳过已爬取的链接: {link}")
            continue

        result = extract_article_content(link, media_name)
        if result:
            print("爬取成功", result)
            # 将结果存入缓存
            cache[link] = result
            # 每爬取一篇文章就保存一次缓存
            save_cache(cache)


def main():
    """主函数，用于启动快速新闻爬虫"""
    parser = argparse.ArgumentParser(description="快速新闻爬虫，用于抓取指定网站的最新文章。")
    parser.add_argument(
        '--url',
        type=str,
        default="https://www.cena.com.cn/",
        help="要抓取的目标网站URL"
    )
    parser.add_argument(
        '--media-name',
        type=str,
        default="示例媒体",
        help="媒体名称"
    )
    parser.add_argument(
        '--column-name',
        type=str,
        default="新闻",
        help="栏目名称"
    )

    args = parser.parse_args()

    print(f"[*] 开始快速抓取任务: {args.url}")
    print(f"    - 媒体: {args.media_name}")
    print(f"    - 栏目: {args.column_name}")

    try:
        execute_task(args.url, args.media_name, args.column_name)
        print("[+] 任务执行完毕。")
    except Exception as e:
        print(traceback.format_exc())
        print(f"[!] 执行任务时发生错误: {e}")


if __name__ == "__main__":
    main()
