import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
from datetime import date
import calendar
import time
import logging
import re # Ensure re is imported

# --- 配置 (Constants) ---
BASE_URL = "https://paper.cntheory.com/cntheory/"
REQUEST_DELAY = 1
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

# --- 日志设置 ---
# Keep INFO level to hide DEBUG messages
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# --- 函数定义 ---

def get_soup(url):
    """发送GET请求并返回BeautifulSoup对象 (不变)"""
    try:
        response = requests.get(url, headers=HEADERS, timeout=15)
        if "您要查看的页面不存在" in response.text or "资源不存在" in response.text:
             logging.warning(f"页面返回成功状态码，但内容指示页面不存在: {url}")
             return None
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        soup = BeautifulSoup(response.text, 'html.parser')
        return soup
    except requests.exceptions.HTTPError as e:
        if e.response.status_code == 404: logging.warning(f"页面未找到 (404): {url}")
        else: logging.error(f"请求URL时发生HTTP错误 {url}: {e}")
        return None
    except requests.exceptions.RequestException as e:
        logging.error(f"请求URL时发生网络错误 {url}: {e}")
        return None
    except Exception as e:
        logging.error(f"解析URL时发生未知错误 {url}: {e}")
        return None

# --- scrape_article_details FUNCTION (Unchanged from your provided version) ---
def scrape_article_details(article_url):
    """爬取单个文章页面的标题、作者和职务。(逻辑不变)"""
    logging.debug(f"--- 开始处理文章详情: {article_url} ---") # Hidden
    soup = get_soup(article_url)
    if not soup:
        return None, None, None

    title = None; author = "N/A"; position = "N/A"
    try:
        # 1. 提取标题
        title_selector = '.main-content h1'; fallback_selector = 'h1'
        title_tag = soup.select_one(title_selector) or soup.select_one(fallback_selector)
        if title_tag: title = title_tag.get_text(strip=True)
        else: logging.error(f"未找到标题 in {article_url}")

        # 2. 提取作者 (如果找到标题)
        if title:
            h3_tag = soup.select_one('h3')
            if h3_tag:
                author_p_tag = h3_tag.find_next_sibling('p')
                if author_p_tag:
                    author_text_full = author_p_tag.get_text(strip=True)
                    if author_text_full: author = author_text_full

        # 3. 提取职务 (如果找到标题)
        if title:
            article_body = soup.select_one('div.text') or soup.select_one('.main-content')
            if article_body:
                paragraphs = article_body.select('p')
                if paragraphs:
                    last_p_tag = paragraphs[-1]
                    position_strong_tag = last_p_tag.find('strong')
                    if position_strong_tag:
                        position_text = position_strong_tag.get_text(strip=True)
                        if position_text: position = position_text
            else: logging.warning(f"未找到文章主体容器来搜索职务 for {article_url}")

    except Exception as e:
        logging.error(f"解析文章详情 {article_url} 时发生异常: {e}", exc_info=False)
        title = title or None; author = "Error"; position = "Error"

    logging.debug(f"返回: Title='{title}', Author='{author}', Position='{position}'") # Hidden
    return title, author, position

# --- Function to scrape a specific month (不变) ---
def scrape_month_data(target_year, target_month):
    """Scrapes all articles for the given year and month."""
    month_data = []
    try:
        _, num_days = calendar.monthrange(target_year, target_month)
        logging.info(f"{target_year}-{target_month:02d} 月份共有 {num_days} 天。开始处理...")
    except Exception as e:
        logging.error(f"无法确定月份 {target_year}-{target_month:02d} 的天数: {e}")
        return month_data

    for day_num in range(1, num_days + 1):
        try: current_date = date(target_year, target_month, day_num)
        except ValueError: logging.error(f"无法创建日期: {target_year}-{target_month}-{day_num}"); continue

        date_str_url = current_date.strftime("%Y-%m/%d")
        front_page_url = f"{BASE_URL}{date_str_url}/node_1.html"
        logging.info(f"--- 处理日期: {current_date.strftime('%Y-%m-%d')} ---")

        front_page_soup = get_soup(front_page_url)
        if not front_page_soup:
            logging.info(f"跳过日期 {current_date.strftime('%Y-%m-%d')} (无首页)")
            time.sleep(REQUEST_DELAY); continue

        article_links = front_page_soup.select('.news-item a[href^="content_"]')
        logging.info(f"找到 {len(article_links)} 个文章链接。")

        processed_urls_today = set()
        articles_found_today = 0
        for link_tag in article_links:
            relative_link = link_tag.get('href'); article_url = urljoin(front_page_url, relative_link)
            if not relative_link or article_url in processed_urls_today: continue
            processed_urls_today.add(article_url)

            logging.info(f"-> 抓取: {article_url}")
            # Calls the original scrape_article_details which might return author with "字数："
            title, author, position = scrape_article_details(article_url)

            if title:
                articles_found_today += 1
                month_data.append({
                    "date": current_date, "title": title,
                    "author": author, # Stores potentially uncleaned author
                    "position": position,
                    "url": article_url })
                logging.info(f"  => 获取成功: '{title[:40]}...'")
            else:
                 logging.warning(f"  => 未获取到标题，跳过: {article_url}")
            time.sleep(REQUEST_DELAY)

        logging.info(f"日期 {current_date.strftime('%Y-%m-%d')} 处理完成，获取 {articles_found_today} 篇详情。")

    return month_data

# --- MODIFIED Function to print results ---
def print_results(data_list, month_str):
    """Prints the scraped data, cleaning the author output just before printing."""
    print("\n" + "="*30)
    print(f"--- {month_str} 爬取结果 (按日期排序) ---")
    print("="*30 + "\n")
    if not data_list:
        print("未能抓取到任何文章信息。")
        logging.warning(f"月份 {month_str} 未抓取到数据。")
    else:
        # Sort data by date before printing
        data_list.sort(key=lambda x: x['date'])
        for article in data_list:
            # --- Author Cleanup Logic moved here ---
            author_raw = article.get('author', 'N/A') # Get author safely
            # Use regex to remove optional whitespace + "字数：" + anything to the end
            # Handle potential None or non-string types safely
            if isinstance(author_raw, str):
                cleaned_author = re.sub(r'\s*字数:.*$', '', author_raw).strip()
            else:
                cleaned_author = str(author_raw) # Convert non-strings if necessary
            # --- End Author Cleanup ---

            print(f"日期: {article['date'].strftime('%Y-%m-%d')}")
            print(f"  标题: {article['title']}")
            # Print the cleaned author string
            print(f"  作者: {cleaned_author}")
            print(f"  职务: {article['position']}")
            print("-" * 20)

# --- 主逻辑 (不变) ---

if __name__ == "__main__":
    logging.info("理论中国网文章爬虫启动。")
    logging.info(f"日志级别设置为 INFO。")

    while True:
        month_input = input("\n请输入目标月份 (格式 YYYY-MM)，或输入 'quit' 退出: ")
        if month_input.lower() == 'quit': logging.info("收到退出指令。"); break
        if not re.match(r"^\d{4}-\d{2}$", month_input):
            print("错误: 无效的月份格式。请使用 YYYY-MM 格式。"); logging.warning(f"用户输入无效格式: {month_input}"); continue
        try:
            year_str, month_str = month_input.split('-')
            target_year = int(year_str); target_month = int(month_str)
            if not (1 <= target_month <= 12): raise ValueError("月份必须在 01 和 12 之间")
        except ValueError as e:
            print(f"错误: 无效的月份值。 {e}"); logging.warning(f"用户输入无效月份值: {month_input}"); continue

        start_time = time.time()
        logging.info(f"======== 开始处理月份: {month_input} ========")
        scraped_data = scrape_month_data(target_year, target_month)
        end_time = time.time(); total_time = end_time - start_time
        logging.info(f"======== 月份 {month_input} 处理完毕 ========"); logging.info(f"共获取 {len(scraped_data)} 篇文章数据。"); logging.info(f"耗时: {total_time:.2f} 秒。")
        # Call the modified print_results function
        print_results(scraped_data, month_input)
        print("\n" + "*"*40) # Separator

    logging.info("程序执行完毕。")