import requests
from bs4 import BeautifulSoup
import time
import random
from openai import OpenAI
import json
import uvicorn
from api.routes import app
from config.settings import API_HOST, API_PORT

def fetch_webpage(url, max_retries=3):
    """获取网页内容，支持重试机制"""
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Connection': 'keep-alive'
    }
    
    for attempt in range(max_retries):
        try:
            time.sleep(random.uniform(2, 5))  # 增加延迟时间
            response = requests.get(url, headers=headers, timeout=30)  # 添加超时设置
            response.raise_for_status()
            response.encoding = response.apparent_encoding
            return response.text
        except requests.exceptions.RequestException as e:
            print(f"第 {attempt + 1} 次尝试失败: {e}")
            if attempt == max_retries - 1:
                print(f"已达到最大重试次数 ({max_retries})")
                return None
            print("等待后重试...")
            time.sleep(random.uniform(5, 10))  # 失败后等待更长时间

def parse_html(html_content):
    """解析HTML内容，提取LPR相关信息"""
    if not html_content:
        return None
    
    soup = BeautifulSoup(html_content, 'html.parser')
    lpr_items = []
    
    # 查找包含LPR信息的表格
    tables = soup.find_all('table', {'width': '100%'})
    for table in tables:
        links = table.find_all('a', href=lambda x: x and '3876551' in x)
        for link in links:
            # 获取标题和链接
            title = link.get('title', '').strip()
            href = link.get('href', '').strip()
            
            # 获取日期
            date_span = link.find_next('span', class_='hui12')
            date = date_span.text.strip() if date_span else ''
            
            if title and href and 'LPR' in title:
                full_url = f"http://www.pbc.gov.cn{href}"
                lpr_items.append({
                    'title': title,
                    'url': full_url,
                    'date': date
                })
                print(f"标题: {title}")
                print(f"链接: {full_url}")
                print(f"日期: {date}")
                print("-" * 50)
    
    return lpr_items

def get_lpr_info_from_deepseek(content):
    """使用Deepseek API解析LPR信息"""
    client = OpenAI(
        api_key="sk-9f35dabd24484ffb8c9adc04c1eda706", 
        base_url="https://api.deepseek.com"
    )
    
    try:
        print("\n发送给Deepseek的内容：")
        print(content)  # 添加调试输出
        
        response = client.chat.completions.create(
            model="deepseek-chat",
            messages=[
                {
                    "role": "system",
                    "content": """
                    你是金融数据分析师，请仔细分析文本内容，提取以下信息：
                    1. 文章发布日期
                    2. 1年期LPR数值（以百分比形式）
                    3. 5年期LPR数值（以百分比形式）
                    
                    请以下面的JSON格式返回，只返回JSON数据：
                    {
                        "publish_date": "YYYY-MM-DD",
                        "one_year_lpr": "X.XX%",
                        "five_year_lpr": "X.XX%"
                    }
                    """
                },
                {
                    "role": "user",
                    "content": content
                }
            ],
            stream=False,
            temperature=0.1  # 降低随机性，使输出更确定
        )
        
        result = response.choices[0].message.content
        print("\nDeepseek返回的原始结果：")
        print(result)  # 添加调试输出
        
        # 确保返回的是有效的JSON字符串
        result = result.strip()
        if result.startswith("```json"):
            result = result[7:-3]  # 移除可能的markdown json标记
        
        return json.loads(result)
    except Exception as e:
        print(f"Deepseek API调用失败: {e}")
        return None

def parse_lpr_detail(html_content):
    """解析LPR详情页面的内容"""
    if not html_content:
        return None
    
    try:
        soup = BeautifulSoup(html_content, 'html.parser')
        content_area = soup.find('div', attrs={'aria-label': '内容文本区'})
        if not content_area:
            print("未找到内容文本区")
            return None
            
        title = content_area.find('h2')
        title_text = title.text.strip() if title else ""
        
        zoom_div = content_area.find('div', id='zoom')
        if zoom_div:
            # 获取所有段落的文本
            paragraphs = zoom_div.find_all('p')
            content_text = '\n'.join([p.text.strip() for p in paragraphs if p.text.strip()])
        
        if title_text and content_text:
            print("\n提取到的原始内容：")
            print(f"标题: {title_text}")
            print(f"内容: {content_text}")
            
            # 调用Deepseek API解析内容
            lpr_info = get_lpr_info_from_deepseek(f"{title_text}\n{content_text}")
            
            result = {
                'title': title_text,
                'content': content_text,
                'lpr_info': lpr_info
            }
            
            if lpr_info:
                print("\nLPR信息：")
                print(f"发布日期: {lpr_info.get('publish_date')}")
                print(f"1年期LPR: {lpr_info.get('one_year_lpr')}")
                print(f"5年期LPR: {lpr_info.get('five_year_lpr')}")
            
            return result
        else:
            print("未能提取到完整信息")
            return None
            
    except Exception as e:
        print(f"解析详情页时发生错误: {e}")
        return None

def main():
    # 获取LPR列表页面
    url = "http://www.pbc.gov.cn/zhengcehuobisi/125207/125213/125440/index.html"
    html_content = fetch_webpage(url)
    
    if html_content:
        lpr_items = parse_html(html_content)
        
        if lpr_items and len(lpr_items) > 0:
            # 获取最新的LPR公告链接
            latest_lpr = lpr_items[0]
            print(f"\n正在获取最新LPR详情: {latest_lpr['title']}")
            print(f"链接: {latest_lpr['url']}")
            
            # 获取并解析详情页
            detail_html = fetch_webpage(latest_lpr['url'])
            # print(detail_html)
            if detail_html:
                lpr_detail = parse_lpr_detail(detail_html)
                if lpr_detail:
                    print("\n成功获取LPR详细信息")
                else:
                    print("\n获取LPR详细信息失败")
        else:
            print("未找到LPR相关信息")
        
if __name__ == "__main__":
    uvicorn.run(app, host=API_HOST, port=API_PORT)
