from flask import Flask, request, jsonify
import asyncio
from crawl4ai import AsyncWebCrawler, BrowserConfig, CrawlerRunConfig,CacheMode
import aiohttp
import urllib.parse
from bs4 import BeautifulSoup
from langchain_deepseek.chat_models import ChatDeepSeek
import time
from langchain.schema import HumanMessage, SystemMessage
import os
from flask_cors import CORS
from wscn_api import fetch_wscn_news
from us_stock_api import fetch_us_stock_news
from hk_stock_api import fetch_hk_stock_news
from ws_cn_stock_api import fetch_cn_stock_news
import logging
from utils import async_retry, get_token_count
import tiktoken

# 配置日志
class RegionFilter(logging.Filter):
    def __init__(self):
        self.region = ''

    def filter(self, record):
        record.region = self.region
        return True

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - [%(region)s] - %(message)s',
    handlers=[
        logging.StreamHandler(),  # 输出到控制台
        logging.FileHandler('news_analyzer.log', encoding='utf-8')  # 输出到文件
    ]
)
logger = logging.getLogger(__name__)
region_filter = RegionFilter()
logger.addFilter(region_filter)

app = Flask(__name__)
CORS(app)

# 设置响应头部，确保返回的JSON是UTF-8编码
app.config['JSON_AS_ASCII'] = False
app.config['JSONIFY_MIMETYPE'] = 'application/json; charset=utf-8'

# 读取配置文件
import yaml

with open('config.yaml', 'r') as f:
    config = yaml.safe_load(f)
    logger.info("成功加载配置文件")

# 设置DeepSeek API密钥
os.environ['DEEPSEEK_API_KEY'] = config['api']['deepseek']['key']

# 初始化DeepSeek模型
chat_model = ChatDeepSeek(model="deepseek-chat")
logger.info("DeepSeek模型初始化完成")

# 封装大模型调用函数，添加日志记录
@async_retry(retries=3, delay=2.0, backoff=2.0)
async def call_llm_with_logging(messages):
    start_time = time.time()
    
    # 计算输入token数量
    input_text = "\n".join([str(msg.content) for msg in messages])
    input_tokens = get_token_count(input_text)
    
    logger.info(
        "开始调用DeepSeek模型，输入内容长度：%d字符，输入token数：%d", 
        len(input_text), input_tokens
    )
    
    response = await chat_model.agenerate(messages=[messages])
    response_text = response.generations[0][0].text
    
    # 计算输出token数量
    output_tokens = get_token_count(response_text)
    
    end_time = time.time()
    processing_time = end_time - start_time
    
    logger.info(
        "DeepSeek模型调用完成，处理耗时：%.2f秒，输出长度：%d字符，"
        "输出token数：%d，总token消耗：%d", 
        processing_time, len(response_text), output_tokens, 
        input_tokens + output_tokens
    )
    
    return response_text

async def fetch_url_content(region):
    try:
        region_filter.region = region
        url = NEWS_SOURCES[region]
        logger.info("开始抓取%s地区新闻，URL: %s", region, url)
        
        # 对global区域使用特殊的爬取方法
        if region == 'global':
            logger.debug("使用特殊方法抓取全球新闻")
            content = fetch_wscn_news()
            return content
        elif region == 'us':
            logger.debug("使用特殊方法抓取美股新闻")
            content = fetch_us_stock_news()
            return content
        elif region == 'hk':
            logger.debug("使用特殊方法抓取港股新闻")
            content = fetch_hk_stock_news()
            return content
        elif region == 'china':
            logger.debug("使用特殊方法抓取港股新闻")
            content = fetch_cn_stock_news()
            return content
            
        async with AsyncWebCrawler(verbose=True) as crawler:
            logger.info("开始使用AsyncWebCrawler抓取内容")
            # 首次尝试抓取内容
            result = await crawler.arun(
                url=url,
                cache_mode=CacheMode.BYPASS,
                magic=True,
                word_count_threshold=10,
                exclude_external_links=True,
                exclude_external_images=True,
                excluded_tags=['form', 'nav'],
                remove_overlay_elements=True,
                use_selenium=True,  # 始终使用Selenium
                selenium_wait=10,    # 增加等待时间以确保动态内容加载
                selenium_scroll=True,  # 启用页面滚动
                selenium_scroll_interval=1.0,  # 滚动间隔时间
                selenium_scroll_count=5  # 滚动次数
            )
            content = result.markdown
            if isinstance(content, bytes):
                content = content.decode('utf-8')
            
            logger.info("抓取到的内容长度：%d 字符", len(content))
            # 根据不同区域的URL进行特定处理
            logger.info("[%s]开始处理新闻内容",region)
            if region == 'china':
                logger.debug("开始处理国内新闻内容")
                try:
                    if "国内经济" in content:
                        start_index = content.find("国内经济") + len("国内经济")
                        extracted_content = content[start_index:].strip()
                        for delimiter in ["[查看更多]", "下一页", "共"]:
                            if delimiter in extracted_content:
                                end_index = extracted_content.find(delimiter)
                                extracted_content = extracted_content[:end_index].strip()
                        content = extracted_content
                        logger.info("成功提取国内经济新闻内容")
                    if not content.strip():
                        logger.warning("无法从%s新闻源提取内容，使用原始内容", region)
                except Exception as e:
                    logger.error("[%s]处理%s新闻源时发生错误：%s，使用原始内容", region, str(e))
            elif region == 'us':

                try:
                    if "美股要闻" in content:
                        start_index = content.find("美股要闻") + len("美股要闻")
                        extracted_content = content[start_index:].strip()
                        for delimiter in ["[查看更多]", "下一页", "共"]:
                            if delimiter in extracted_content:
                                end_index = extracted_content.find(delimiter)
                                extracted_content = extracted_content[:end_index].strip()
                        content = extracted_content
                        logger.info("成功提取美股要闻内容")
                    if not content.strip():
                        logger.warning("无法从%s新闻源提取内容，使用原始内容", region)
                except Exception as e:
                    logger.error("[%s]处理新闻源时发生错误：%s，使用原始内容", region, str(e))
            elif region == 'hk':
                if "## 港股要闻" in content:
                    start_index = content.find("## 港股要闻") + len("## 港股要闻")
                    extracted_content = content[start_index:].strip()
                    if "[查看更多]" in extracted_content:
                        end_index = extracted_content.find("[查看更多]")
                        extracted_content = extracted_content[:end_index].strip()
                    content = extracted_content
            logger.info("[%s]成功提取要闻内容",region)
            return content
    except Exception as e:
        logger.error("从%s获取内容失败：%s", url, str(e))
        raise Exception(f"从{url}获取内容失败：{str(e)}")

# 定义新闻源配置
NEWS_SOURCES = {
    'china': 'https://finance.eastmoney.com/a/cgnjj.html',
    'us': 'https://stock.eastmoney.com/a/cmgyw.html',## https://finance.sina.com.cn/other/hsnews/detail_index.d.html 后期融合
    'hk': 'https://finance.sina.com.cn/stock/hkstock/',
    'global': 'https://wallstreetcn.com/live/global'
}

# 添加独立的重试函数
async def retry_crawl(url, existing_crawler=None):
    logger.info("开始重试抓取: %s", url)
    try:
        # 使用更激进的配置重试
        browser_config = BrowserConfig(
            headless=False,
            user_agent="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36",
            timeout=90,
            window_size=(1920, 1080)
        )
        
        run_config = CrawlerRunConfig(
            js_code=[
                """
                // 更激进的滚动策略
                async function aggressiveScroll() {
                    for(let i = 0; i < 10; i++) {
                        window.scrollTo(0, document.body.scrollHeight);
                        await new Promise(r => setTimeout(r, 2000));
                        
                        // 尝试点击所有可能的"加载更多
                        const buttons = Array.from(document.querySelectorAll('button, a'));
                        for(const btn of buttons) {
                            if(btn.innerText && btn.innerText.includes('更多')) {
                                try {
                                    btn.click();
                                    console.log('点击了加载更多按钮');
                                    await new Promise(r => setTimeout(r, 2000));
                                } catch(e) {}
                            }
                        }
                    }
                }
                await aggressiveScroll();
                """
            ],
            cache_mode=CacheMode.BYPASS,
            screenshot=True,
            check_robots_txt=False,
            wait_after_execution=15 
        )
        
        if existing_crawler:
            crawler = existing_crawler
        else:
            crawler = AsyncWebCrawler(config=browser_config)
            await crawler.__aenter__()
        
        try:
            result = await crawler.arun(
                url=url,
                config=run_config,
                magic=True,
                word_count_threshold=10,
                page_load_strategy="eager",
                wait_for_selector=".live-list-item",  # 等待特定元素出现
                wait_for_timeout=30  # 等待超时时间
            )
            
            content = result.markdown
            print(f"重试抓取内容长度：{len(content)}")
            return content
        finally:
            if not existing_crawler:
                await crawler.__aexit__(None, None, None)
                
    except Exception as e:
        print(f"重试抓取失败：{str(e)}")
        return "抓取失败，请稍后再试。"


@app.route('/analyze/<region>', methods=['GET'])
async def analyze_news(region):
    if region not in NEWS_SOURCES:
        return jsonify({'error': '无效的地域参数'}), 400

    try:
        content = await fetch_url_content(region)
        # 调用大模型分析内容的重要性和影响力
        analyzed_result = await analyze_content(content, region)
        return jsonify(analyzed_result), 200
    except aiohttp.ClientError as e:
        return jsonify({'error': f'网络错误：{str(e)}'}), 500
    except Exception as e:
        return jsonify({'error': f'发生错误：{str(e)}'}), 500

async def analyze_content(content, region=None):
    # 根据不同地域设置不同的分析重点
    region_focus = {
        'china': '国内A股市场和政策导向',
        'us': '美股市场和全球影响',
        'hk': '港股市场表现',
        'global': '全球经济形势'
    }

    focus = region_focus.get(region, '整体市场')
    system_prompt = f"""你是一个专业的财经新闻分析专家。请分析给定的新闻内容，重点关注{focus}，并按照以下标准评估：
    1. 重要性：评估新闻的整体重要程度
    2. 影响力：评估新闻可能对股票产生的影响
    3. 时效性：评估新闻的时间敏感度
    
    请将分析结果组织为结构化数据，包含以下字段：
    ### 重要新闻要点
    - 请使用无序列表列出重要新闻要点，10至15条（按重要性排序）
    - 每个要点应该简明扼要,如果有提及具体的公司需要展示
    
    ### 潜在影响分析
    > 请使用引用块格式提供整体影响分析
    - 分点说明(1. 市场影响2. 行业影响3. 股市影响)具体影响：
    """

    try:
        # 构建消息列表
        messages = [
            SystemMessage(content=system_prompt),
            HumanMessage(content=f"请分析以下新闻内容：\n{content}")
        ]
        
        # 记录输入信息
        logger.info("[%s]新闻分析开始，输入内容长度：%d字符", region, len(content))
        
        # 调用大模型进行分析
        response_text = await call_llm_with_logging(messages)
        
        # 记录分析完成
        logger.info("[%s]新闻分析完成，输出内容长度：%d字符", region,len(response_text))
        
        return {
            'analysis': response_text,
            'status': 'success'
        }
    except Exception as e:
        error_msg = f"新闻分析失败：{str(e)}"
        logger.error(error_msg)
        return {
            'analysis': '',
            'status': 'error',
            'error': error_msg
        }

if __name__ == '__main__':
    app.run(debug=True, host='0.0.0.0', port=20010)