import requests
from bs4 import BeautifulSoup
import time
import os
import json
import uuid
from database_operations import insert_news_data
from sina_news_api_crawler import call_java_interface


def crawl_sina_finance_article(url):
    """爬取新浪财经文章内容"""
    # 设置请求头，模拟浏览器访问
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Connection': 'keep-alive'
    }
    
    try:
        # 发送GET请求获取网页内容，禁用代理
        response = requests.get(url, headers=headers, timeout=10, proxies=None)
        response.raise_for_status()  # 如果请求不成功则抛出异常
        response.encoding = 'utf-8'  # 设置编码
        
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取文章标题
        title = soup.select_one('h1.main-title')
        if not title:
            title = soup.select_one('h1.l_tit')
        title_text = title.text.strip() if title else "标题未找到"
        
        # 提取文章内容
        article_content = soup.select_one('div.article')
        if not article_content:
            article_content = soup.select_one('div#artibody')
        
        # 提取所有段落文本
        paragraphs = []
        if article_content:
            for p in article_content.select('p'):
                text = p.text.strip()
                if text and not text.startswith('责任编辑'):
                    paragraphs.append(text)
        
        # 提取发布时间
        publish_time = soup.select_one('span.date')
        time_text = publish_time.text.strip() if publish_time else "时间未找到"
        
        # 提取来源
        source = soup.select_one('span.source')
        source_text = source.text.strip() if source else "来源未找到"
        
        # 组织爬取结果
        result = {
            'title': title_text,
            'description': title_text,
            'publish_time': time_text,
            'source': source_text,
            'content': '\n\n'.join(paragraphs),
            'url': url,
            'crawl_time': time.strftime('%Y-%m-%d %H:%M:%S')
        }
        
        return result
    
    except requests.exceptions.RequestException as e:
        print(f"请求错误: {e}")
        return None
    except Exception as e:
        print(f"解析错误: {e}")
        return None


def save_to_file(data, filename):
    """将爬取结果保存到文件"""
    # 确保目录存在
    os.makedirs(os.path.dirname(os.path.abspath(filename)) if os.path.dirname(filename) else '.', exist_ok=True)
    
    # 保存为JSON文件
    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=4)
    
    # 保存为文本文件
    txt_filename = os.path.splitext(filename)[0] + '.txt'
    with open(txt_filename, 'w', encoding='utf-8') as f:
        f.write(f"标题: {data['title']}\n")
        f.write(f"发布时间: {data['publish_time']}\n")
        f.write(f"来源: {data['source']}\n")
        f.write(f"URL: {data['url']}\n")
        f.write(f"爬取时间: {data['crawl_time']}\n\n")
        f.write("正文内容:\n")
        f.write(data['content'])
    
    return txt_filename, filename



def CATEGORY_MAP():
    return {
        "国内": "2510",
        "国际": "2511",
        "社会": "2669",
        "体育": "2512",
        "娱乐": "2513",
        "军事": "2514",
        "科技": "2515",
        "财经": "2516",
        "股市": "2517",
        "美股": "2518"
    }


def crawlArticles(lid, page):
    from sina_news_api_crawler import crawl_sina_news_api

    api_url = f"https://feed.mix.sina.com.cn/api/roll/get?pageid=153&lid={lid}&k=&num=50&page={page}&r={str(time.time()).replace('.', '')}&callback=jQuery111205078716479550882_{int(time.time()*1000)}&_={int(time.time()*1000+1)}"
    news_data = crawl_sina_news_api(api_url)

    if news_data and 'news_list' in news_data:
        for news in news_data['news_list']:
            url = news['url']
            print(f"开始爬取文章: {url}")
            article_data = crawl_sina_finance_article(url)
            article_data['id'] = str(uuid.uuid4())
            contentUrl = call_java_interface(article_data['id'], article_data['content'])
            article_data['contentUrl'] = contentUrl
            article_data['categoryId'] = lid
            article_data['coverImage'] = 'cover_image.jpg'
            if article_data:
                insert_news_data(article_data)

                yield f"data: {article_data['title']}\n\n"
                yield f"data: http://localhost/news?id={article_data['id']}\n\n"

            else:
                print("爬取失败!")
    else:
        print("未能获取新闻数据!")


if __name__ == "__main__":
    crawlArticles(2511,1)