import requests
from bs4 import BeautifulSoup
import json
import time
from datetime import datetime
import os
import re
from urllib.parse import urljoin
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

class EducationAICrawler:
    def __init__(self):
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        self.results = []
        
    def search_zhihu(self, keyword):
        """搜索知乎相关文章"""
        try:
            url = f"https://www.zhihu.com/search?type=content&q={keyword}"
            response = requests.get(url, headers=self.headers)
            soup = BeautifulSoup(response.text, 'html.parser')
            
            articles = soup.find_all('div', class_='SearchResult-Card')
            for article in articles[:5]:  # 获取前5篇文章
                title = article.find('h2').text.strip()
                content = article.find('div', class_='RichText').text.strip()
                self.results.append({
                    'source': '知乎',
                    'title': title,
                    'content': content[:200] + '...'  # 只保存前200个字符
                })
        except Exception as e:
            logger.error(f"知乎搜索出错: {str(e)}")

    def search_baidu(self, keyword):
        """搜索百度相关文章"""
        try:
            url = f"https://www.baidu.com/s?wd={keyword}"
            response = requests.get(url, headers=self.headers)
            soup = BeautifulSoup(response.text, 'html.parser')
            
            articles = soup.find_all('div', class_='result')
            for article in articles[:5]:  # 获取前5篇文章
                title = article.find('h3').text.strip()
                content = article.find('div', class_='c-abstract').text.strip()
                self.results.append({
                    'source': '百度',
                    'title': title,
                    'content': content[:200] + '...'
                })
        except Exception as e:
            logger.error(f"百度搜索出错: {str(e)}")

    def search_wechat(self, keyword):
        """搜索微信文章（通过搜狗微信）"""
        try:
            url = f"https://weixin.sogou.com/weixin?type=2&query={keyword}"
            response = requests.get(url, headers=self.headers)
            soup = BeautifulSoup(response.text, 'html.parser')
            
            articles = soup.find_all('div', class_='txt-box')
            for article in articles[:5]:  # 获取前5篇文章
                title = article.find('h3').text.strip()
                content = article.find('p', class_='txt-info').text.strip()
                self.results.append({
                    'source': '微信',
                    'title': title,
                    'content': content[:200] + '...'
                })
        except Exception as e:
            logger.error(f"微信搜索出错: {str(e)}")

    def save_results(self):
        """保存搜索结果"""
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        filename = f'crawler_results_{timestamp}.md'
        
        with open(filename, 'w', encoding='utf-8') as f:
            f.write('# 智能体教学相关信息收集\n\n')
            f.write(f'收集时间：{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n\n')
            
            for result in self.results:
                f.write(f'## {result["source"]} - {result["title"]}\n\n')
                f.write(f'{result["content"]}\n\n')
                f.write('---\n\n')
        
        logger.info(f"结果已保存到文件: {filename}")
        return filename

    def run(self, keywords):
        """运行爬虫"""
        for keyword in keywords:
            logger.info(f"正在搜索关键词: {keyword}")
            self.search_zhihu(keyword)
            time.sleep(1)  # 避免请求过快
            self.search_baidu(keyword)
            time.sleep(1)
            self.search_wechat(keyword)
            time.sleep(1)
        
        return self.save_results()

def main():
    # 搜索关键词列表
    keywords = [
        "AI教学助手",
        "智能体教育",
        "AI课堂互动",
        "教育智能体应用",
        "AI教学案例"
    ]
    
    crawler = EducationAICrawler()
    result_file = crawler.run(keywords)
    print(f"爬取完成，结果已保存到: {result_file}")

if __name__ == "__main__":
    main() 