#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
知乎爬虫脚本
用于测试与Tauri GUI的集成
"""

import argparse
import json
import time
import sys
import os
from datetime import datetime
import sqlite3

class ZhihuCrawler:
    def __init__(self, keywords, count=100, enable_comments=False, enable_media=False):
        self.keywords = keywords
        self.count = count
        self.enable_comments = enable_comments
        self.enable_media = enable_media
        self.crawled_count = 0
        self.success_count = 0
        self.failed_count = 0
        
        # 初始化数据库
        self.init_database()
        
    def init_database(self):
        """初始化SQLite数据库"""
        try:
            self.conn = sqlite3.connect('crawler_data.db')
            cursor = self.conn.cursor()
            
            # 创建数据表
            cursor.execute('''
                CREATE TABLE IF NOT EXISTS crawled_data (
                    id TEXT PRIMARY KEY,
                    platform TEXT NOT NULL,
                    data_type TEXT NOT NULL,
                    title TEXT NOT NULL,
                    content TEXT,
                    author TEXT NOT NULL,
                    author_id TEXT NOT NULL,
                    url TEXT NOT NULL,
                    like_count INTEGER DEFAULT 0,
                    comment_count INTEGER DEFAULT 0,
                    share_count INTEGER DEFAULT 0,
                    view_count INTEGER DEFAULT 0,
                    created_at TEXT NOT NULL,
                    crawled_at TEXT NOT NULL,
                    keywords TEXT NOT NULL
                )
            ''')
            
            self.conn.commit()
            print(f"[INFO] 数据库初始化成功")
            
        except Exception as e:
            print(f"[ERROR] 数据库初始化失败: {e}")
            sys.exit(1)
    
    def log_message(self, level, message):
        """输出日志消息"""
        timestamp = datetime.now().strftime("%H:%M:%S")
        print(f"[{timestamp}] [{level}] {message}")
        sys.stdout.flush()
    
    def save_data(self, data):
        """保存数据到数据库"""
        try:
            cursor = self.conn.cursor()
            cursor.execute('''
                INSERT OR REPLACE INTO crawled_data 
                (id, platform, data_type, title, content, author, author_id, url,
                 like_count, comment_count, share_count, view_count, 
                 created_at, crawled_at, keywords)
                VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
            ''', (
                data['id'], data['platform'], data['data_type'], data['title'],
                data['content'], data['author'], data['author_id'], data['url'],
                data['like_count'], data['comment_count'], data['share_count'],
                data['view_count'], data['created_at'], data['crawled_at'],
                data['keywords']
            ))
            self.conn.commit()
            return True
        except Exception as e:
            self.log_message("ERROR", f"保存数据失败: {e}")
            return False
    
    def simulate_crawl_item(self, index):
        """模拟爬取单个数据项"""
        # 模拟网络延时
        time.sleep(0.5 + (index % 3) * 0.2)
        
        # 模拟偶尔的失败
        if index % 25 == 0:
            self.failed_count += 1
            self.log_message("WARN", f"爬取第 {index + 1} 条数据失败")
            return False
        
        # 生成模拟数据
        data = {
            'id': f"zhihu_{int(time.time())}_{index}",
            'platform': 'zhihu',
            'data_type': 'answer',
            'title': f"知乎问答 {index + 1} - {self.keywords}",
            'content': f"这是关于'{self.keywords}'的第 {index + 1} 个回答内容。这里包含了详细的分析和见解...",
            'author': f"知乎用户{(index % 100) + 1}",
            'author_id': f"zhihu_user_{(index % 100) + 1}",
            'url': f"https://www.zhihu.com/question/12345/answer/{index + 1}",
            'like_count': (index % 500) + 10,
            'comment_count': (index % 50) + 1,
            'share_count': index % 20,
            'view_count': (index % 2000) + 100,
            'created_at': datetime.now().isoformat(),
            'crawled_at': datetime.now().isoformat(),
            'keywords': self.keywords
        }
        
        # 保存数据
        if self.save_data(data):
            self.success_count += 1
            self.log_message("INFO", f"成功爬取: {data['title']}")
            return True
        else:
            self.failed_count += 1
            return False
    
    def run(self):
        """运行爬虫"""
        self.log_message("INFO", f"开始爬取知乎数据")
        self.log_message("INFO", f"关键词: {self.keywords}")
        self.log_message("INFO", f"目标数量: {self.count}")
        self.log_message("INFO", f"评论爬取: {'启用' if self.enable_comments else '禁用'}")
        self.log_message("INFO", f"媒体下载: {'启用' if self.enable_media else '禁用'}")
        
        start_time = time.time()
        
        try:
            for i in range(self.count):
                # 检查是否需要停止（可以通过文件信号等方式实现）
                if os.path.exists('stop_crawler.signal'):
                    self.log_message("WARN", "收到停止信号，正在停止爬虫...")
                    os.remove('stop_crawler.signal')
                    break
                
                self.crawled_count = i + 1
                progress = (self.crawled_count / self.count) * 100
                
                # 爬取数据
                self.simulate_crawl_item(i)
                
                # 输出进度
                if i % 10 == 0 or i == self.count - 1:
                    elapsed = time.time() - start_time
                    speed = self.crawled_count / (elapsed / 60) if elapsed > 0 else 0
                    self.log_message("INFO", 
                        f"进度: {progress:.1f}% ({self.crawled_count}/{self.count}) "
                        f"成功: {self.success_count} 失败: {self.failed_count} "
                        f"速度: {speed:.1f}/分钟"
                    )
        
        except KeyboardInterrupt:
            self.log_message("WARN", "用户中断爬虫")
        except Exception as e:
            self.log_message("ERROR", f"爬虫运行出错: {e}")
        finally:
            # 清理资源
            if hasattr(self, 'conn'):
                self.conn.close()
            
            elapsed = time.time() - start_time
            self.log_message("INFO", f"爬虫结束")
            self.log_message("INFO", f"总耗时: {elapsed:.1f}秒")
            self.log_message("INFO", f"成功: {self.success_count} 失败: {self.failed_count}")

def main():
    parser = argparse.ArgumentParser(description='知乎爬虫')
    parser.add_argument('--platform', default='zhihu', help='平台名称')
    parser.add_argument('--keywords', required=True, help='搜索关键词')
    parser.add_argument('--count', type=int, default=100, help='爬取数量')
    parser.add_argument('--type', default='search', help='爬取类型')
    parser.add_argument('--enable-comments', action='store_true', help='启用评论爬取')
    parser.add_argument('--enable-media', action='store_true', help='启用媒体下载')
    
    args = parser.parse_args()
    
    # 创建爬虫实例
    crawler = ZhihuCrawler(
        keywords=args.keywords,
        count=args.count,
        enable_comments=args.enable_comments,
        enable_media=args.enable_media
    )
    
    # 运行爬虫
    crawler.run()

if __name__ == '__main__':
    main()