from services.crawler.zhihu_crawler import ZhihuCrawler
from datetime import datetime
import time
import random
from services.crawler.address_crawler import WebScraper
import traceback

def zhihu_crawler():
    # 大学相关话题的ID
    university_topics = {
        '大学生活': '19552234',
        '大学': '19556353',
        '大学专业': '19560372'
    }
    
    crawler = ZhihuCrawler()
    all_results = {}
    
    for topic_name, topic_id in university_topics.items():
        
        print(f"\n开始爬取「{topic_name}」话题...")
        
        # 爬取话题内容
        answers = crawler.crawl_topic(topic_id)
        
        # 筛选高赞回答
        filtered_answers = crawler.filter_answers(answers)
        
        # 格式化回答
        formatted_answers = [crawler.format_answer(ans) for ans in filtered_answers]
        
        # 按点赞数排序
        formatted_answers.sort(key=lambda x: x['vote_count'], reverse=True)
        
        all_results[topic_name] = formatted_answers
        
        # 保存每个话题的结果
        filename = f'zhihu_{topic_name}_{datetime.now().strftime("%Y%m%d")}.json'
        crawler.save_to_file(formatted_answers, filename)
        
        print(f"已保存 {len(formatted_answers)} 条「{topic_name}」话题的高赞回答")
        time.sleep(random.uniform(2, 4))  # 话题间随机延迟


def address_crawler():
    try:
        print("开始爬取地址数据...")
        scraper = WebScraper()
        address_info = scraper.get_address_info()
        
        try:
             scraper.save_to_csv(address_info,"地址数据.csv")
        except Exception as e:
            print(f"保存数据失败: {e}")
            raise  # 重新抛出保存异常
        level_info = scraper.handle_address_info(address_info)

        # 保存到json文件
        try:
            scraper.save_to_json_list(level_info,"地址数据.json")
        except Exception as e:
            print(f"保存数据失败: {e}")
            raise  # 重新抛出保存异常

    except Exception as e:
        print(f"地址爬取错误: {e}")
        print(f"详细错误信息:\n{traceback.format_exc()}")
        raise  # 重新抛出异常
    finally:
        print("爬取任务结束")