import requests
from bs4 import BeautifulSoup
import re
from datetime import datetime, timedelta
import pickle
from openai import OpenAI
from tqdm import tqdm

client = OpenAI(
    # 此为默认路径，您可根据业务所在地域进行配置
    base_url="https://ark.cn-beijing.volces.com/api/v3",
    # 从环境变量中获取您的 API Key
    api_key="",  # 自己去找自己账户的API代码
)


# 从新浪财经获取有效的新闻标题以及链接
def get_sina_news(start_t=(datetime.today() - timedelta(1)).strftime("%Y-%m-%d"),
                  end_t=datetime.today().strftime("%Y-%m-%d"),
                  page_max=10):

    def is_valid_link(link):
        # 检查链接是否包含新浪新闻的特定格式
        return 'doc-i' in link or '/doc-' in link

    def extract_date_from_link(link):
        # 从链接中提取日期（格式：YYYY-MM-DD）
        match = re.search(r'(\d{4}-\d{2}-\d{2})', link)
        if match:
            return match.group(0)
        return None

    news_data = []
    time_trigger = 0
    # 目标网页URL
    # url = "https://finance.sina.com.cn/roll"

    for page in range(1, page_max + 1):
        url = f"https://finance.sina.com.cn/roll/#pageid=384&lid=2519&k=&num=50&page={page}"
        print(f"正在抓取第 {page} 页：{url}")

        # 发送HTTP GET请求
        response = requests.get(url)

        # 手动设置编码为UTF-8
        response.encoding = 'utf-8'

        # 检查请求是否成功
        if response.status_code == 200:
            # 解析HTML内容
            soup = BeautifulSoup(response.text, 'html.parser')

            # 查找新闻标题和链接
            news_items = soup.find_all('a', href=True)

            last_len = len(news_data)
            # 存储标题和链接
            for item in news_items:
                title = item.get_text(strip=True)
                link = item['href']
                if title and link:
                    if is_valid_link(link):
                        date_str = extract_date_from_link(link)
                        if date_str is not None:
                            if start_t <= date_str <= end_t:
                                news_data.append({'title': title, 'link': link})
                            elif date_str < start_t:
                                time_trigger += 1

            print("Successfully get " + str(len(news_data) - last_len) + " news from page " + str(page))

            response.close()
        else:
            print("Failed to retrieve the webpage:", page)
            print("Status code: ", response.status_code)

        if time_trigger > 0:
            break

    return news_data


# 调用 OpenAI API 的函数
def call_openai(prompt):
    response = client.chat.completions.create(
        model="deepseek-v3-241226",  # 根据需要选择模型
        messages=[{"role": "user", "content": prompt}]
    )
    return response.choices[0].message.content


# 筛选相关的新闻
def filter_relevant_news(news_data):
    relevant_news = []
    for news in tqdm(news_data):
        title = news["title"]
        # 设计提示词，让AI判断新闻是否与量化投资相关
        prompt = f"请判断以下新闻标题是否与量化投资、金融市场或经济政策相关。只需回答'是'或'否'。标题：{title}"
        response = call_openai(prompt)
        if "是" in response:
            relevant_news.append(news)
    return relevant_news


# 新闻相关度打分
def score_news_importance(news_data):
    scored_news = []
    for news in tqdm(news_data):
        title = news["title"]
        # 设计提示词，让AI对新闻的重要性进行评分（1-10分）
        prompt = f"""请对以下新闻标题对股市的市场表现和风险影响的重要性进行评分（1-10分，10分最重要）。
        重点关注A股、港股指数、美股指数行情，特别是隔夜中概股表现。
        重点关注中美货币政策、政治新闻。
        更偏好来自官方的信息。
        只需返回分数。
        标题：{title}"""
        score = call_openai(prompt)
        try:
            score = int(score.strip())  # 将评分转换为整数
        except ValueError:
            score = 0  # 如果无法解析评分，默认给 0 分
        news["score"] = score
        scored_news.append(news)
    return scored_news


# 获取新闻内容
def fetch_news_content(url):
    try:
        response = requests.get(url)
        soup = BeautifulSoup(response.text, "html.parser")
        # 假设新闻内容在<p>标签中
        paragraphs = soup.find_all("p")
        content = " ".join([p.get_text() for p in paragraphs])
        return content
    except Exception as e:
        print(f"抓取新闻内容失败：{e}")
        return None


# 生成新闻摘要
def generate_summary(content):
    if not content:
        return None
    # 设计提示词，让AI生成摘要
    prompt = f"请对以下新闻内容进行简要总结，不超过100字：\n{content}"
    summary = call_openai(prompt)
    return summary


def main(search_page_max=1, summary_news_max=10):
    news_data = get_sina_news(page_max=search_page_max)

    print("新闻相关度打分")
    scored_news = score_news_importance(news_data)
    sorted_news = sorted(scored_news, key=lambda x: x["score"], reverse=True)
    sorted_news = sorted_news[:summary_news_max]

    print("对相关新闻生成摘要")
    for news in tqdm(sorted_news):
        url = news["link"]
        content = fetch_news_content(url)
        if content:
            news["summary"] = generate_summary(content)
        else:
            news["summary"] = "无法获取内容"

    # 输出结果
    for news in sorted_news:
        print(f"标题: {news['title']}")
        print(f"打分: {news['score']}")
        print(f"链接: {news['link']}")
        print(f"摘要: {news['summary']}")
        print("-" * 80)

    # 缓存数据
    with open(datetime.today().strftime("%Y%m%d") + '_data.pkl', 'wb') as file:
        pickle.dump(sorted_news, file)


if __name__ == "__main__":
    main()