import os
import re
import sys
import time
import random
import logging
import datetime
import requests
import traceback
from tqdm import tqdm
from bs4 import BeautifulSoup, Comment
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.edge.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from fpdf import FPDF
import openai

# 添加docx相关导入
from docx import Document
from docx.shared import Pt, RGBColor
from docx.oxml.ns import qn
from docx.enum.text import WD_ALIGN_PARAGRAPH

logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

CUR_DIR = os.path.dirname(os.path.abspath(__file__))
EDGE_DRIVER_PATH = os.path.join(CUR_DIR, 'msedgedriver.exe')
SIMHEI_FONT_PATH = os.path.join(CUR_DIR, 'fonts', 'simhei.ttf')
SIMFANG_FONT_PATH = os.path.join(CUR_DIR, 'fonts', 'simfang.ttf')

HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 "
                  "(KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0"
}

# --------------------------------------------------
# 1. 国内/国际新闻（hebingyuzhoapian.py 原样）
# --------------------------------------------------
KIMI_YC = "sk-SzytvBrxyPfHOFZ1qXxWGahZceRFoyeoVas6IOk341lDLcuz"
MODEL_YC = "kimi-latest"


def yc_driver(headless=True):
    opt = Options()
    if headless:
        opt.add_argument('--headless')
    opt.add_argument('--disable-gpu')
    opt.add_argument('--start-maximized')
    return webdriver.Edge(service=Service(EDGE_DRIVER_PATH), options=opt)


def get_oil_info():
    """
    获取国际原油期货信息
    """
    driver = yc_driver(headless=True)
    try:
        logger.info("正在获取国际原油信息...")
        # 搜索国际油价
        driver.get("https://www.yicai.com/search?keys=%E5%9B%BD%E9%99%85%E6%B2%B9%E4%BB%B7")
        logger.info("正在加载搜索页面...")

        # 等待页面加载完成，使用更通用的条件
        WebDriverWait(driver, 30).until(
            lambda d: d.execute_script("return document.readyState") == "complete"
        )

        # 等待并查找新闻列表容器
        try:
            WebDriverWait(driver, 10).until(
                EC.presence_of_element_located((By.CLASS_NAME, "f-main-news-list"))
            )
        except:
            logger.warning("未找到预期的新闻列表容器，继续处理...")

        soup = BeautifulSoup(driver.page_source, 'html.parser')

        # 查找新闻链接，使用更广泛的查找方法
        news_links = soup.find_all('a', href=True)

        article_url = None
        for link in news_links:
            href = link.get('href')
            if href and '/news/' in href and href.endswith('.html'):
                article_url = href if href.startswith('http') else "https://www.yicai.com" + href
                logger.info(f"找到文章链接: {article_url}")
                break

        if not article_url:
            logger.warning("未找到任何文章链接")
            return None

        # 访问文章页面提取原油信息
        logger.info(f"正在访问文章页面: {article_url}")
        driver.get(article_url)

        # 等待页面加载完成
        WebDriverWait(driver, 30).until(
            lambda d: d.execute_script("return document.readyState") == "complete"
        )

        # 尝试等待文章内容加载
        try:
            WebDriverWait(driver, 10).until(
                EC.presence_of_element_located((By.ID, "multi-text"))
            )
        except:
            logger.warning("未找到 multi-text 容器，继续处理...")

        soup = BeautifulSoup(driver.page_source, "html.parser")

        # 尝试多种方式查找文章内容
        content = soup.select_one('div#multi-text') or soup.find('div', class_='m-txt') or soup.body

        if not content:
            logger.warning("未找到正文容器")
            return None

        # 查找包含原油信息的段落
        paragraphs = content.find_all(['p', 'div'])
        oil_info = None

        for p in paragraphs:
            text = p.get_text(strip=True)
            # 查找包含国际油价或原油期货的段落
            if ('国际油价' in text or '原油期货' in text or 'WTI' in text or '布伦特' in text) and len(text) > 20:
                oil_info = text
                break

        # 如果没有找到特定关键词，尝试查找包含商品或期货的段落
        if not oil_info:
            for p in paragraphs:
                text = p.get_text(strip=True)
                if ('商品' in text and '期货' in text) and len(text) > 20:
                    oil_info = text
                    break

        if oil_info:
            logger.info(f"找到原油信息: {oil_info}")
            return oil_info
        else:
            logger.warning("未找到原油相关信息")
            return None

    except Exception as e:
        logger.error(f"获取原油信息时出错: {str(e)}")
        return None
    finally:
        driver.quit()


def get_first_article_url():
    driver = yc_driver()
    try:
        driver.get("https://www.yicai.com/news/")
        WebDriverWait(driver, 30).until(
            EC.presence_of_element_located(
                (By.CSS_SELECTOR, '.news-item, .f-title, .m-list, .m-con, .m-content, .m-feedcard')
            )
        )
        soup = BeautifulSoup(driver.page_source, 'html.parser')

        # 查找所有新闻链接
        news_links = soup.find_all('a', href=True)

        for link in news_links:
            title_elem = link.find(['h2', 'h3', 'div', 'span'], string=re.compile(r'早报'))
            if not title_elem:
                # 检查链接文本或title属性是否包含"早报"
                link_text = link.get_text(strip=True)
                title_attr = link.get('title', '')
                if '早报' not in link_text and '早报' not in title_attr:
                    continue

            href = link.get('href')
            if href:
                full_url = href if href.startswith('http') else "https://www.yicai.com" + href
                logger.info(f"找到早报文章: {full_url}")
                return full_url

        logger.warning("未找到任何早报文章链接")
        return None
    finally:
        driver.quit()


def parse_sections(article_url):
    driver = yc_driver(headless=True)
    try:
        driver.get(article_url)
        WebDriverWait(driver, 20).until(
            EC.presence_of_element_located((By.ID, "multi-text"))
        )
        soup = BeautifulSoup(driver.page_source, "html.parser")
        content = soup.select_one('div#multi-text')
        if not content:
            logger.warning("未找到正文容器")
            return {}
        section_titles = {
            "今日推荐": "【今日推荐】",
            "观国内": "【观国内】",
            "览海外": "【览海外】"
        }
        end_markers = [
            "【大公司】", "【掘金圈】", "【一财精选】",
            "【今日推荐】", "【观国内】", "【览海外】"
        ]
        sections = {}
        current_section = None
        items = []

        for element in content.find_all(recursive=False):
            if element.name != "p":
                continue

            strong_elements = element.find_all("strong")
            if strong_elements:
                strong_text = "".join([s.get_text(strip=True) for s in strong_elements])

                for sec_name, sec_title in section_titles.items():
                    if sec_title in strong_text:
                        if current_section and items:
                            sections[current_section] = items.copy()
                        current_section = sec_name
                        items = []
                        break

                if not current_section:
                    continue

                for marker in end_markers:
                    if marker in strong_text and marker != section_titles[current_section]:
                        if items:
                            sections[current_section] = items.copy()
                        current_section = None
                        items = []
                        break

            if current_section and strong_elements:
                item_text = "".join([s.get_text(strip=True) for s in strong_elements])
                if section_titles[current_section] not in item_text:
                    clean_text = re.sub(r'\s{2,}', ' ', item_text)
                    if clean_text:
                        items.append(clean_text)

        if current_section and items:
            sections[current_section] = items

        return sections
    except Exception as e:
        logger.error(f"解析内容时出错: {str(e)}")
        traceback.print_exc()
        return {}
    finally:
        driver.quit()


def simplify_content(content):
    if len(content) <= 50 or content.endswith(('。', '！', '？')):
        return content
    for end_char in ['。', '！', '？', '；', '，']:
        pos = content.find(end_char)
        if pos != -1 and pos > 10:
            return content[:pos + 1]
    return content[:80] + "..."


def merge_and_deduplicate(yicai_content, eastmoney_content):
    merged = {}
    for section, items in yicai_content.items():
        merged.setdefault(section, [])
        for item in items:
            simplified = simplify_content(item)
            merged[section].append(simplified)

    for section, items in eastmoney_content.items():
        if section == "热点题材":
            target_section = "热点题材"
        elif section == "商品期货":
            target_section = "商品期货"
        else:
            target_section = "每日精选"

        merged.setdefault(target_section, [])

        for item in items:
            simplified = simplify_content(item)
            duplicate = False
            for existing in merged[target_section]:
                if simplified in existing or existing in simplified:
                    duplicate = True
                    break
            if not duplicate:
                merged[target_section].append(simplified)
    return merged


def call_ai(messages, max_tokens=4000, temperature=0.2):
    headers = {
        "Authorization": f"Bearer {KIMI_YC}",
        "Content-Type": "application/json"
    }
    payload = {
        "model": MODEL_YC,
        "messages": messages,
        "max_tokens": max_tokens,
        "temperature": temperature,
    }
    r = requests.post("https://api.moonshot.cn/v1/chat/completions",
                      headers=headers, json=payload, timeout=180)
    r.raise_for_status()
    return r.json()["choices"][0]["message"]["content"].strip()


def summarize_with_ai(raw_file_path):
    with open(raw_file_path, 'r', encoding='utf-8') as f:
        lines = f.readlines()

    split_sections = {"【今日推荐】", "【观国内】", "【览海外】"}
    new_lines = []
    current_section = None

    for line in lines:
        line = line.strip()
        if line.startswith("【") and line.endswith("】"):
            current_section = line
            new_lines.append(line)
            continue

        if current_section in split_sections and "：" in line:
            raw = re.sub(r'^\d+\.\s*', '', line)
            part1, part2 = raw.split("：", 1)
            new_lines.append(f"{part1}。")
            new_lines.append(f"{part2}")
        else:
            new_lines.append(line)

    processed_text = "\n".join(new_lines)

    prompt = (
        "你是资深财经编辑请对以下事件进行总结，要求如下。\n"
        "1. 事件粒度：\n"
        "   - 【今日推荐】【观国内】【览海外】板块已按冒号拆成两事件，无需改动。\n"
        "   - 【每日精选】【热点题材】【商品期货】逐条一句总结，保留日期/数字/机构/人名。\n"
        "2. 去重：语义完全重复只保留首次。\n"
        "3. 格式：\n国内新闻\n1. 事件1\n2. 事件2\n…\n\n国际新闻\n1. 事件1\n2. 事件2\n…\n"
        "4. 不要序号以外的任何文字、空行或标题。"
        "请严格按上述格式输出。只输出这些内容，无关内容不再输出。"
        f"{processed_text}"
    )
    summary = call_ai([{"role": "user", "content": prompt}])
    return summary.strip()


def get_national_international_news():
    yicai_url = get_first_article_url()
    if not yicai_url:
        logger.error("未能获取第一财经晨报链接")
        return {"国内新闻": [], "国际新闻": []}
    logger.info(f"第一财经晨报链接: {yicai_url}")
    yicai_content = parse_sections(yicai_url)

    # 东方财富当日
    driver = yc_driver()
    try:
        LIST_URL = "https://stock.eastmoney.com/a/czpnc.html"
        TODAY_STR = f"{datetime.date.today().month}月{datetime.date.today().day}日"
        driver.get(LIST_URL)
        time.sleep(3)
        em_url = ""
        for a in driver.find_elements(By.CSS_SELECTOR,
                                      ".repeatList a, .Zt a, .newsList a"):
            if TODAY_STR in a.text:
                href = a.get_attribute("href")
                em_url = href if href.startswith("http") else "https:" + href
                break
        eastmoney_content = {}
        if em_url:
            # 打印东方财富文章URL
            print(f"\n东方财富文章URL: {em_url}")
            driver.get(em_url)
            time.sleep(3)
            soup = BeautifulSoup(driver.page_source, "lxml")
            body = soup.select_one(".newsContent, #ContentBody, .article-body") or soup
            sections, cur = {}, "正文"
            sections[cur] = []
            for tag in body.children:
                if tag.name == "h3":
                    cur = tag.get_text(strip=True)
                    sections[cur] = []
                elif tag.name == "p":
                    txt = re.sub(r'<[^>]+>', '', str(tag))
                    txt = re.sub(r'\s+', ' ', txt).strip()
                    if txt and not re.match(r'^\d+\.\s*[\u4e00-\u9fa5]+：$', txt):
                        sections[cur].append(txt)
            keep_titles = ["每日精选", "热点题材", "商品期货"]
            sections = {k: v for k, v in sections.items() if k in keep_titles}
            if "商品期货" in sections:
                sections["商品期货"] = [l for l in sections["商品期货"] if "国内期货" in l]
            eastmoney_content = sections
    finally:
        driver.quit()

    merged = merge_and_deduplicate(yicai_content, eastmoney_content)
    today_date = datetime.date.today().strftime("%Y%m%d")
    raw_filename = os.path.join(CUR_DIR, f"原始内容---{today_date}.txt")
    with open(raw_filename, 'w', encoding='utf-8') as f:
        for title, plist in merged.items():
            f.write(f"\n【{title}】\n")
            merged_items = []
            i = 0
            while i < len(plist):
                if i + 1 < len(plist) and len(plist[i]) < 50 and not re.search(r'[。？！]$', plist[i]):
                    if not plist[i].endswith(('：', ':', '。', '!', '?')):
                        merged = f"{plist[i]}：{plist[i + 1]}"
                    else:
                        merged = f"{plist[i]}{plist[i + 1]}"
                    merged_items.append(merged)
                    i += 2
                else:
                    merged_items.append(plist[i])
                    i += 1
            for idx, item in enumerate(merged_items, 1):
                f.write(f"{idx}. {item}\n")
            f.write("\n")
    summary = summarize_with_ai(raw_filename)

    # 解析为 dict
    final = {"国内新闻": [], "国际新闻": []}
    lines = summary.splitlines()
    cur = None
    for line in lines:
        line = line.strip()
        if line == "国内新闻":
            cur = "国内新闻"
        elif line == "国际新闻":
            cur = "国际新闻"
        elif cur and line:
            final[cur].append(line)
    return final


# --------------------------------------------------
# 4. 兵团新闻（test_btnews.py 原样，昨日）
# --------------------------------------------------
KIMI_BT = "sk-SzytvBrxyPfHOFZ1qXxWGahZceRFoyeoVas6IOk341lDLcuz"
KIMI_MODEL_BT = "moonshot-v1-8k"


def bt_parse_date_arg():
    return datetime.date.today() - datetime.timedelta(days=1)


def bt_fetch(url, encoding=None):
    resp = requests.get(url, headers=HEADERS, timeout=15)
    resp.encoding = encoding or resp.apparent_encoding
    return resp.text


def bt_parse(url, target_date, encoding=None):
    html = bt_fetch(url, encoding)
    soup = BeautifulSoup(html, "lxml")
    links = []
    if "huyangnet.cn" in url:
        for li in soup.select("div.center_page_in li"):
            span = li.find("span")
            dt = span.get_text(strip=True)[:10] if span else ""
            try:
                if datetime.datetime.strptime(dt, "%Y-%m-%d").date() == target_date:
                    href = li.a["href"]
                    links.append("https://www.huyangnet.cn" + href if href.startswith("/") else href)
            except (ValueError, TypeError):
                continue
    elif "bt.chinanews.com.cn" in url:
        for li in soup.select("div#LB li"):
            date_span = li.find(class_="date")
            dt = date_span.get_text(strip=True)[1:11] if date_span else ""
            try:
                if datetime.datetime.strptime(dt, "%Y.%m.%d").date() == target_date:
                    href = li.a["href"]
                    links.append("https://bt.chinanews.com.cn" + href if href.startswith("/") else href)
            except (ValueError, TypeError):
                continue
    return links


def bt_extract_body(url):
    try:
        html = bt_fetch(url)
        soup = BeautifulSoup(html, "lxml")
        desc = soup.find("meta", attrs={"name": "description"})
        if desc and desc.get("content"):
            return desc["content"].strip()
        p = soup.find("p")
        return p.get_text(strip=True) if p else ""
    except Exception:
        return ""


def bt_summarize(texts):
    openai.api_key = KIMI_BT
    openai.base_url = "https://api.moonshot.cn/v1/"
    combined = "\n\n".join([f"{i + 1}. {t}" for i, t in enumerate(texts)])
    prompt = (
        "请用简洁的语言逐条总结以下新闻内容，并去除重复事件，每条一句话以内，突出核心事实，"
        "按“1. ... 2. ...”格式输出，不要添加额外说明。\n\n"
        f"{combined}"
    )
    try:
        response = openai.chat.completions.create(
            model=KIMI_MODEL_BT,
            messages=[
                {"role": "system", "content": "你是一个专业的新闻简报助手。"},
                {"role": "user", "content": prompt}
            ],
            temperature=0.3,
            max_tokens=600
        )
        summary = response.choices[0].message.content.strip()
        return [line.strip() for line in summary.splitlines() if line.strip()]
    except Exception as e:
        return [f"[总结失败: {e}]"]


def get_bt_news():
    target_date = bt_parse_date_arg()
    urls_index = {
        "胡杨网": "http://www.huyangnet.cn/node_60259.html",
        "中新网兵团": "https://www.bt.chinanews.com.cn/bingtuan/index.shtml"
    }
    all_links = []
    for site, url in urls_index.items():
        enc = "gb2312" if "bt.chinanews.com.cn" in url else None
        links = bt_parse(url, target_date, enc)
        all_links.extend(links)
        logger.info(f"{site} 找到 {len(links)} 篇新闻")
    if not all_links:
        logger.info(f"未找到 {target_date} 的兵团文章")
        return ["无"]
    seen = set()
    cleaned_lines = []
    for link in all_links:
        body = bt_extract_body(link)
        if body and body not in seen:
            seen.add(body)
            cleaned_lines.append(body)
    return bt_summarize(cleaned_lines) if cleaned_lines else ["无"]


# --------------------------------------------------
# 5. 镁业新闻（meiye.py 原样，昨日+今日）
# --------------------------------------------------
KIMI_ME = "sk-SzytvBrxyPfHOFZ1qXxWGahZceRFoyeoVas6IOk341lDLcuz"
KIMI_MODEL_ME = "moonshot-v1-8k"


def me_clean_text(text: str) -> str:
    if not text:
        return ""
    text = re.sub(r'[\n\r\t\u3000]+', ' ', text)
    text = re.sub(r' +', ' ', text)
    return text.strip()


def me_fetch_article(url: str) -> str:
    try:
        resp = requests.get(url, headers=HEADERS, timeout=15)
        resp.raise_for_status()
        try:
            html = resp.content.decode('utf-8', errors='strict')
        except UnicodeDecodeError:
            encoding = resp.encoding
            if not encoding or encoding.lower() == 'iso-8859-1':
                encoding_match = re.search(r'charset=["\']?([\w-]+)', resp.text[:1024], re.I)
                encoding = encoding_match.group(1) if encoding_match else 'gbk'
            html = resp.content.decode(encoding, errors='ignore')
        soup = BeautifulSoup(html, 'lxml')
        article = (
                soup.find("div", class_="rich_media_content") or
                soup.find("div", id="js_content") or
                soup.find("div", class_="article-content") or
                soup.body
        )
        if not article:
            return "[错误] 未找到正文区域"
        for tag in article(["script", "style", "noscript", "button", "img", "svg", "iframe", "video"]):
            tag.decompose()
        for comment in article.find_all(string=lambda text: isinstance(text, Comment)):
            comment.extract()
        text = article.get_text(separator=" ", strip=True)
        return me_clean_text(text)
    except requests.exceptions.RequestException as e:
        return f"[网络错误] {str(e)}"
    except Exception as e:
        return f"[解析错误] {str(e)}"


def me_login():
    options = Options()
    options.add_argument('--disable-gpu')
    options.add_argument('--no-sandbox')
    options.add_argument('--disable-dev-shm-usage')
    options.add_argument('--disable-extensions')
    driver = webdriver.Edge(service=Service(EDGE_DRIVER_PATH), options=options)
    try:
        driver.get('https://mp.weixin.qq.com/')
        logger.info('请在浏览器中扫码登录微信公众号平台...')
        start_time = time.time()
        while time.time() - start_time < 120:
            if '/cgi-bin/home' in driver.current_url:
                break
            time.sleep(3)
        else:
            raise RuntimeError('登录超时，请重试')
        time.sleep(2)
        return {c['name']: c['value'] for c in driver.get_cookies()}
    except Exception as e:
        logger.error(f'登录过程中出错: {str(e)}')
        raise
    finally:
        driver.quit()


def me_get_token(cookies: dict) -> str:
    try:
        rsp = requests.get('https://mp.weixin.qq.com', cookies=cookies, allow_redirects=False, timeout=10)
        if rsp.status_code != 302:
            raise RuntimeError('未触发重定向，可能Cookie无效')
        location = rsp.headers.get('Location', '')
        if not location:
            raise RuntimeError('重定向地址为空')
        token_match = re.search(r'token=(\d+)', location)
        if not token_match:
            raise RuntimeError('重定向地址中未找到token参数')
        return token_match.group(1)
    except Exception as e:
        logger.error(f'获取token失败: {str(e)}')
        raise


def me_get_fakeid(token: str, cookies: dict, nickname: str) -> str:
    try:
        params = {
            'action': 'search_biz',
            'token': token,
            'query': nickname,
            'begin': '0',
            'count': '5',
            'lang': 'zh_CN',
            'f': 'json',
            'ajax': '1',
            'random': random.random()
        }
        rsp = requests.get('https://mp.weixin.qq.com/cgi-bin/searchbiz',
                           params=params,
                           cookies=cookies,
                           timeout=15)
        rsp.raise_for_status()
        data = rsp.json()
        if 'list' not in data or not data['list']:
            raise RuntimeError(f'公众号 "{nickname}" 未找到')
        return data['list'][0]['fakeid']
    except Exception as e:
        logger.error(f'获取公众号 {nickname} 的fakeid失败: {str(e)}')
        raise


def me_get_wechat_articles(token: str, fakeid: str, cookies: dict, days=2) -> list:
    try:
        start_date = (datetime.datetime.now() - datetime.timedelta(days=days - 1)).strftime('%Y-%m-%d')
        params = {
            'token': token,
            'fakeid': fakeid,
            'action': 'list_ex',
            'begin': '0',
            'count': '20',
            'type': '9',
            'lang': 'zh_CN',
            'f': 'json',
            'ajax': '1',
            'random': random.random()
        }
        rsp = requests.get('https://mp.weixin.qq.com/cgi-bin/appmsg',
                           params=params,
                           cookies=cookies,
                           timeout=15)
        rsp.raise_for_status()
        data = rsp.json()
        if 'app_msg_list' not in data:
            raise RuntimeError('响应中未找到文章列表')
        articles = []
        for item in data['app_msg_list']:
            try:
                pub_time = datetime.datetime.fromtimestamp(item['update_time'])
                if pub_time.date() >= (datetime.datetime.now() - datetime.timedelta(days=days - 1)).date():
                    articles.append({
                        'title': item['title'],
                        'url': item['link'],
                        'pub_date': pub_time.strftime('%Y-%m-%d %H:%M')
                    })
            except KeyError:
                continue
        return articles
    except Exception as e:
        logger.error(f'获取文章列表失败: {str(e)}')
        raise


def me_summarize_single_article(title, content):
    if not KIMI_ME:
        logger.error("未配置Kimi API密钥，无法进行总结")
        return "[未配置API密钥]"

    openai.api_key = KIMI_ME
    openai.base_url = "https://api.moonshot.cn/v1/"

    # 新的提示词
    prompt = (
        "总结这篇文章主要内容，一句话讲清，什么事件什么单位或者什么人干了什么事，"
        "产生了什么影响，有什么意义。只输出总结内容其余不输出。\n\n"
        f"文章标题: {title}\n"
        f"文章内容: {content[:4000]}"  # 限制内容长度
    )

    try:
        response = openai.chat.completions.create(
            model=KIMI_MODEL_ME,
            messages=[
                {"role": "system", "content": "你是一个专业的内容总结助手。"},
                {"role": "user", "content": prompt}
            ],
            temperature=0.3,
            max_tokens=150
        )
        summary = response.choices[0].message.content.strip()
        return summary
    except Exception as e:
        logger.error(f"总结文章失败: {title} - {str(e)}")
        return f"[总结失败: {str(e)}]"


def get_me_news():
    try:
        logger.info("开始获取镁业新闻...")
        cookies = me_login()
        logger.info('登录成功，获取cookies完成')
        token = me_get_token(cookies)
        logger.info(f'获取到token: {token}')
        nicknames = ['元镁体', '尚镁网', '府谷镁']
        all_articles = []
        summaries = []  # 存储每篇文章的总结结果
        summary_counter = 1  # 总结序号计数器

        # 创建原始内容文件
        today_date = datetime.date.today().strftime("%Y%m%d")
        raw_filename = os.path.join(CUR_DIR, f"原始内容_{today_date}.txt")
        with open(raw_filename, 'a', encoding='utf-8') as raw_file:
            raw_file.write("\n\n======= 镁业新闻原始内容 =======\n\n")

            for nickname in nicknames:
                try:
                    logger.info(f'处理公众号: {nickname}')
                    fakeid = me_get_fakeid(token, cookies, nickname)
                    articles = me_get_wechat_articles(token, fakeid, cookies, days=2)
                    logger.info(f'找到{len(articles)}篇相关文章')
                    if not articles:
                        logger.warning(f'公众号 {nickname} 没有相关文章')
                        continue

                    for article in tqdm(articles, desc=f'抓取{nickname}内容'):
                        content = me_fetch_article(article['url'])
                        # 写入原始内容
                        raw_file.write(f"【公众号】: {nickname}\n")
                        raw_file.write(f"【标题】: {article['title']}\n")
                        raw_file.write(f"【发布时间】: {article['pub_date']}\n")
                        raw_file.write(f"【URL】: {article['url']}\n")
                        raw_file.write(f"【内容】:\n{content}\n")
                        raw_file.write("-" * 80 + "\n\n")

                        # 每篇文章单独发送给AI总结
                        logger.info(f"发送第 {summary_counter} 篇镁业文章给AI总结...")
                        summary = me_summarize_single_article(article['title'], content)

                        # 记录总结结果
                        summaries.append(f"{summary_counter}. {summary}")
                        summary_counter += 1

                        # 写入原始内容文件
                        raw_file.write(f"【AI总结】: {summary}\n")
                        raw_file.write("=" * 80 + "\n\n")

                        time.sleep(random.uniform(1, 3))
                except Exception as e:
                    logger.error(f'处理公众号{nickname}时出错: {str(e)}')
                    raw_file.write(f"处理公众号{nickname}时出错: {str(e)}\n")

        return summaries
    except Exception as e:
        logger.error(f'获取镁业新闻失败: {str(e)}')
        traceback.print_exc()
        return ["无"]


# ---------- 主程序 ----------
def main():
    today = datetime.date.today()
    logger.info("开始生成班超要闻")

    # 创建原始内容文件
    today_date = today.strftime("%Y%m%d")
    raw_filename = os.path.join(CUR_DIR, f"原始内容_{today_date}.txt")
    with open(raw_filename, 'w', encoding='utf-8') as raw_file:
        raw_file.write(f"原始内容 - {today_date}\n")
        raw_file.write("=" * 50 + "\n\n")

    ni = get_national_international_news()
    # 将国内国际新闻原始内容添加到总原始文件
    ni_raw_filename = os.path.join(CUR_DIR, f"原始内容---{today_date}.txt")
    if os.path.exists(ni_raw_filename):
        with open(ni_raw_filename, 'r', encoding='utf-8') as ni_file:
            ni_raw_content = ni_file.read()
        with open(raw_filename, 'a', encoding='utf-8') as raw_file:
            raw_file.write("\n\n======= 国内国际新闻原始内容 =======\n\n")
            raw_file.write(ni_raw_content)

    time.sleep(20)
    bt = get_bt_news()
    # 将兵团新闻原始内容添加到总原始文件
    with open(raw_filename, 'a', encoding='utf-8') as raw_file:
        raw_file.write("\n\n======= 兵团新闻原始内容 =======\n\n")
        raw_file.write("\n".join(bt) + "\n")

    time.sleep(20)
    me = get_me_news()  # 镁业新闻函数已包含原始内容写入

    # 获取国际原油信息
    time.sleep(20)
    oil_info = get_oil_info()
    oil_news = [oil_info] if oil_info else ["未获取到国际原油信息"]

    # 汇总所有新闻
    all_news = {
        "国内新闻": ni.get("国内新闻", ["无"]),
        "国际新闻": ni.get("国际新闻", ["无"]),
        "兵团新闻": bt or ["无"],
        "镁业新闻": me or ["无"],
        "国际原油": oil_news
    }

    # 1. 先生成 txt
    txt_filename = f"班超要闻-{today:%Y%m%d}.txt"
    with open(txt_filename, 'w', encoding='utf-8') as f:
        for sec, items in all_news.items():
            f.write(f"{sec}：\n")
            for it in items:
                f.write(it + "\n")
            f.write("\n")
    logger.info(f"txt 已保存: {txt_filename}")
    print(f"\n已生成 {txt_filename}，请修改后保存")
    input()
    
    # 2. 生成Word文档
    generate_word_document(all_news, today)


def generate_word_document(news_data, today):
    """
    生成Word文档
    
    Args:
        news_data (dict): 新闻数据
        today (datetime.date): 今天的日期
    """
    template_path = os.path.join(CUR_DIR, "banchao.docx")
    output_path = os.path.join(CUR_DIR, f"班超要闻-{today:%Y%m%d}.docx")
    
    try:
        # 加载模板文档
        doc = Document(template_path)
        logger.info(f"成功加载模板文档: {template_path}")
        
        # 更新文档中的日期
        update_date_in_document(doc, today)
        
        # 更新新闻内容
        update_news_content_in_document(doc, news_data)
        
        # 保存文档
        doc.save(output_path)
        logger.info(f"Word文档已保存: {output_path}")
        
    except Exception as e:
        logger.error(f"生成Word文档失败: {str(e)}")
        traceback.print_exc()


def update_date_in_document(doc, today):
    """
    更新文档中的日期
    
    Args:
        doc: Document对象
        today (datetime.date): 今天的日期
    """
    try:
        # 获取当前日期
        date_str = f"{today.month}月{today.day}日"
        
        # 查找并替换文档中的日期
        for paragraph in doc.paragraphs:
            original_text = paragraph.text
            # 查找类似"8月20日"的日期格式并替换
            new_text = re.sub(r'\d+月\s*\d+日', date_str, original_text)
            if new_text != original_text:
                paragraph.clear()
                run = paragraph.add_run(new_text)
                # 如果是标题行，设置为黑体2号字
                if "班超" in original_text:
                    run.font.name = 'SimHei'
                    run._element.rPr.rFonts.set(qn('w:eastAsia'), 'SimHei')
                    run.font.size = Pt(22)  # 2号字
                    # 设置段落居中对齐
                    paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
                break
                
        logger.info("日期更新完成")
    except Exception as e:
        logger.error(f"更新日期失败: {str(e)}")


def update_news_content_in_document(doc, news_data):
    """
    更新新闻内容
    
    Args:
        doc: Document对象
        news_data (dict): 新闻数据，格式为 {"栏目名": ["内容1", "内容2", ...]}
    """
    try:
        # 定义栏目标题和在文档中的查找关键词
        sections = {
            "国际新闻": "国际新闻",
            "国内新闻": "国内新闻", 
            "兵团新闻": "兵团新闻",
            "镁业新闻": "镁业新闻",
            "国际原油": "国际原油"
        }
        
        # 为每个栏目更新内容
        for section_name, section_keyword in sections.items():
            if section_name in news_data:
                content_list = news_data[section_name]
                replace_section_content(doc, section_keyword, content_list)
            
        logger.info("新闻内容更新完成")
    except Exception as e:
        logger.error(f"更新新闻内容失败: {str(e)}")


def replace_section_content(doc, section_keyword, content_list):
    """
    替换特定栏目的内容
    
    Args:
        doc: Document对象
        section_keyword (str): 栏目关键词
        content_list (list): 内容列表
    """
    try:
        # 查找栏目标题位置
        section_start_index = -1
        
        for i, paragraph in enumerate(doc.paragraphs):
            text = paragraph.text.strip()
            # 检查是否匹配栏目标题
            if section_keyword == text:
                section_start_index = i
                # 设置栏目标题格式（黑体3号字）
                paragraph.clear()
                run = paragraph.add_run(section_keyword)
                run.font.name = 'SimHei'
                run._element.rPr.rFonts.set(qn('w:eastAsia'), 'SimHei')
                run.font.size = Pt(16)  # 3号字约为16pt
                break
                
        # 如果找到了栏目标题
        if section_start_index != -1:
            # 标记需要删除的段落（从栏目标题下一行开始）
            paragraphs_to_clear = []
            i = section_start_index + 1
            
            # 收集该栏目下的所有内容段落，直到遇到下一个栏目或文档结束
            while i < len(doc.paragraphs):
                paragraph = doc.paragraphs[i]
                text = paragraph.text.strip()
                
                # 检查是否是其他栏目标题
                other_sections = ["国际新闻", "国内新闻", "兵团新闻", "镁业新闻", "国际原油"]
                is_other_section = text in other_sections and text != section_keyword
                
                # 如果是其他栏目标题或文档接近末尾的空行，则停止
                if is_other_section or (not text and i >= len(doc.paragraphs) - 2):
                    break
                    
                paragraphs_to_clear.append(i)
                i += 1
                
            # 清空收集到的段落
            for idx in paragraphs_to_clear:
                doc.paragraphs[idx].clear()
                
            # 在正确位置插入新内容
            insert_position = section_start_index + 1
            for item_text in content_list:
                # 过滤掉无效内容
                if item_text.strip() and item_text.strip() not in ["无", "获取失败", "未获取到国际原油信息"]:
                    # 确保有足够的段落供写入
                    while len(doc.paragraphs) <= insert_position:
                        doc.add_paragraph()
                        
                    # 写入内容
                    paragraph = doc.paragraphs[insert_position]
                    paragraph.clear()
                    run = paragraph.add_run(item_text.strip())
                    run.font.name = 'SimFang'
                    run._element.rPr.rFonts.set(qn('w:eastAsia'), 'SimFang')
                    run.font.size = Pt(15)  # 3号字约为15-16pt，仿宋用15pt更合适
                    insert_position += 1
                    
    except Exception as e:
        logger.error(f"替换栏目 {section_keyword} 内容时出错: {str(e)}")


if __name__ == '__main__':
    main()