import os
import shutil
import time

import requests
import re
from util.logger import get_looger

logger = get_looger("csdn文章.log", __file__)

logger.info("爬虫启动......")
root = 'static/csdn文章/'
if os.path.exists(root):
    shutil.rmtree(root)
os.makedirs(root)

url = "https://www.csdn.net/"

headers = {
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36',
    'referer': 'https://www.csdn.net/'
}


def extract_real_title(text):
    """从可能被截断的文本中提取真实标题"""
    # 清理HTML标签和多余空格
    text = re.sub(r'<[^>]+>', '', text)
    text = re.sub(r'\s+', ' ', text).strip()

    # 如果文本包含常见摘要开头词，尝试截取前面的内容作为标题
    patterns = [
        r'^([^。！？.!?]{5,}?)[。！？.!?]',  # 在标点符号处截断
        r'^(.{10,}?)(?:\s*\.\.\.|\s*…|\s*$)',  # 在省略号处截断
        r'^(.{10,}?)(?:本文|主要|介绍了|探讨|摘要|内容将覆盖)',  # 在常见摘要开头词前截断
    ]

    for pattern in patterns:
        match = re.match(pattern, text)
        if match and len(match.group(1).strip()) >= 5:
            return match.group(1).strip()

    # 如果都不匹配，返回原始文本（但限制长度）
    if len(text) > 80:
        return text[:80].strip()

    return text


response = requests.get(url, headers=headers)
response.encoding = 'utf-8'
html_content = response.text

# 改进正则，更精确匹配文章链接和标题
title_pattern = r'<a[^>]*href="(https?://blog\.csdn\.net/[^/]+/article/details/\d+)"[^>]*>(.*?)</a>'
items = re.findall(title_pattern, html_content, re.S)

# 过滤和清理标题
filtered_items = []
seen_links = set()

for link, title in items:
    if link in seen_links:
        continue
    seen_links.add(link)

    # 清理标题
    clean_title = extract_real_title(title)

    # 过滤条件：合理的标题长度，且不包含明显的摘要特征
    if (5 <= len(clean_title) <= 80 and
            not re.search(r'^(本文|主要|介绍了|摘要|内容将覆盖|据相关统计|说到环境搭建)', clean_title) and
            not re.search(r'(注册|登录|下载|更多|查看|点击|readmore)', clean_title, re.I)):
        filtered_items.append((link, clean_title))

logger.info(f"找到 {len(filtered_items)} 个有效的文章链接")

success_count = 0

for i, (link, clean_title) in enumerate(filtered_items, 1):
    logger.info(f"开始爬取 ({i}/{len(filtered_items)}): {clean_title}")

    try:
        # 访问文章页面
        article_response = requests.get(link, headers=headers, timeout=10)
        article_response.encoding = 'utf-8'
        article_html = article_response.text

        # 提取文章内容 - 使用更可靠的选择器
        content_patterns = [
            r'<article[^>]*>(.*?)</article>',
            r'<div[^>]*id="content_views"[^>]*>(.*?)</div>',
            r'<div[^>]*class="blog-content-box"[^>]*>(.*?)</div>',
            r'<div[^>]*class="article_content"[^>]*>(.*?)</div>'
        ]

        content_text = ""
        for pattern in content_patterns:
            content_match = re.search(pattern, article_html, re.S)
            if content_match:
                content_html = content_match.group(1)

                # 清理HTML标签，但保留段落结构
                content_html = re.sub(r'<pre[^>]*>.*?</pre>', '', content_html, flags=re.S)
                content_html = re.sub(r'<code[^>]*>.*?</code>', '', content_html, flags=re.S)
                content_html = re.sub(r'<script[^>]*>.*?</script>', '', content_html, flags=re.S)
                content_html = re.sub(r'<style[^>]*>.*?</style>', '', content_html, flags=re.S)
                content_html = re.sub(r'<div[^>]*class="hljs-button"[^>]*>.*?</div>', '', content_html, flags=re.S)

                # 提取纯文本，但保留段落分隔
                content_text = re.sub(r'</p>', '\n\n', content_html)  # 将段落结束标签转换为换行
                content_text = re.sub(r'<br\s*/?>', '\n', content_text)  # 将换行标签转换为换行
                content_text = re.sub(r'<[^>]+>', '', content_text)  # 移除其他HTML标签
                content_text = re.sub(r'\n\s*\n', '\n\n', content_text)  # 规范化换行
                content_text = re.sub(r'[ \t]+', ' ', content_text)  # 规范化空格
                content_text = content_text.strip()

                if len(content_text) > 100:  # 确保有足够的内容
                    break

        if content_text and len(content_text) > 100:
            # 生成安全的文件名
            safe_title = re.sub(r'[\\/*?:"<>|\n\r\t]', '', clean_title)
            if not safe_title.strip():
                safe_title = f"未命名文章_{i}"

            filename = f"{safe_title}.txt"
            filepath = os.path.join(root, filename)

            # 如果文件已存在，添加序号
            counter = 1
            original_filepath = filepath
            while os.path.exists(filepath):
                name, ext = os.path.splitext(original_filepath)
                filepath = f"{name}_{counter}{ext}"
                counter += 1

            # 写入文件
            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(f"标题: {clean_title}\n")
                f.write(f"链接: {link}\n")
                f.write(f"爬取时间: {time.strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write("=" * 50 + "\n\n")
                f.write(content_text)

            logger.info(f"文章内容长度{len(content_text)}，保存到{filepath}")
            success_count += 1
        else:
            logger.warning(f"未找到有效文章内容或内容过短: {clean_title}")

        # 添加延迟
        time.sleep(1)

    except requests.exceptions.RequestException as e:
        logger.error(f"访问文章页面失败 {clean_title}: {e}")
    except OSError as e:
        logger.error(f"文件操作失败 {clean_title}: {e}")
    except Exception as e:
        logger.error(f"爬取文章失败 {clean_title}: {e}")

logger.info(f"爬取完成！成功爬取 {success_count}/{len(filtered_items)} 篇文章")