"""
Django管理命令：运行爬虫
使用方法: python manage.py crawl [options]
"""
import logging
import sys
from django.core.management.base import BaseCommand, CommandError
from spider.core.spider_manager import SpiderManager
from spider.config.settings import LIST_API_URL

logger = logging.getLogger(__name__)


class Command(BaseCommand):
    help = '运行爬虫，从公共资源交易平台采集数据'
    
    def add_arguments(self, parser):
        parser.add_argument(
            '--list-url',
            type=str,
            default=None,
            help='列表页URL（已废弃，实际使用API URL）',
        )
        parser.add_argument(
            '--list-only',
            action='store_true',
            help='仅爬取列表页',
        )
        parser.add_argument(
            '--detail-only',
            action='store_true',
            help='仅爬取待处理的详情页',
        )
        parser.add_argument(
            '--sub-detail-only',
            action='store_true',
            help='仅爬取待处理的子详情页',
        )
        parser.add_argument(
            '--limit',
            type=int,
            default=None,
            help='限制爬取数量（仅对详情页和子详情页有效）',
        )
        parser.add_argument(
            '--time-begin',
            type=str,
            default=None,
            help='开始时间，格式：YYYY-MM-DD（例如：2025-01-01）',
        )
        parser.add_argument(
            '--time-end',
            type=str,
            default=None,
            help='结束时间，格式：YYYY-MM-DD（例如：2025-11-06）',
        )
        parser.add_argument(
            '--page-number',
            type=int,
            default=1,
            help='起始页码，默认从第1页开始',
        )
        parser.add_argument(
            '--max-pages',
            type=int,
            default=None,
            help='最大爬取页数，None表示爬取所有页',
        )
        parser.add_argument(
            '--workers',
            type=int,
            default=None,
            help='并发抓取的工作线程数（仅详情页与子详情页）',
        )
    
    def handle(self, *args, **options):
        """执行命令"""
        # 配置日志
        logging.basicConfig(
            level=logging.INFO,
            format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        )
        
        manager = SpiderManager()
        
        try:
            if options['list_only']:
                # 仅爬取列表页
                max_pages = options['max_pages']
                
                # 如果没有指定最大页数，提示用户输入
                if max_pages is None:
                    self.stdout.write('')
                    self.stdout.write(self.style.WARNING('未指定最大页数，请输入最大爬取页数（直接回车表示爬取所有页）：'))
                    try:
                        user_input = input('最大页数（留空表示爬取所有页）: ').strip()
                        if user_input:
                            max_pages = int(user_input)
                            if max_pages <= 0:
                                raise ValueError('最大页数必须大于0')
                            self.stdout.write(self.style.SUCCESS(f'将爬取最多 {max_pages} 页'))
                        else:
                            self.stdout.write(self.style.SUCCESS('将爬取所有页'))
                    except ValueError as e:
                        self.stdout.write(
                            self.style.ERROR(f'输入无效: {e}，将爬取所有页')
                        )
                        max_pages = None
                    except KeyboardInterrupt:
                        self.stdout.write(self.style.WARNING('\n用户取消操作'))
                        return
                
                self.stdout.write('开始爬取列表页...')
                count = manager.crawl_list_page(
                    url=options['list_url'],
                    page_number=options['page_number'],
                    max_pages=max_pages,
                    time_begin=options['time_begin'],
                    time_end=options['time_end'],
                )
                self.stdout.write(
                    self.style.SUCCESS(f'成功爬取 {count} 个列表项')
                )
                
            elif options['detail_only']:
                spider_logger = logging.getLogger('spider')
                prev_level = spider_logger.level
                spider_logger.setLevel(logging.CRITICAL)
                def _progress(processed, success, failure, total):
                    bar_len = 30
                    pct = 0 if total == 0 else int(processed * 100 / total)
                    filled = int(bar_len * pct / 100)
                    bar = '#' * filled + '-' * (bar_len - filled)
                    line = f"\r[{bar}] {processed}/{total} 成功 {success} 失败 {failure}"
                    sys.stdout.write(line)
                    sys.stdout.flush()
                success_count, failure_count, failures = manager.crawl_pending_details(
                    options['limit'], workers=options.get('workers'), quiet=True, progress_cb=_progress
                )
                sys.stdout.write('\n')
                self.stdout.write(self.style.SUCCESS(f'成功 {success_count} 个详情页'))
                self.stdout.write(self.style.WARNING(f'失败 {failure_count} 个详情页'))
                if failures:
                    for url, reason in failures:
                        self.stdout.write(f'失败原因: {url} - {reason}')
                spider_logger.setLevel(prev_level)
                
            elif options['sub_detail_only']:
                spider_logger = logging.getLogger('spider')
                prev_level = spider_logger.level
                spider_logger.setLevel(logging.CRITICAL)
                def _progress(processed, success, failure, total):
                    bar_len = 30
                    pct = 0 if total == 0 else int(processed * 100 / total)
                    filled = int(bar_len * pct / 100)
                    bar = '#' * filled + '-' * (bar_len - filled)
                    line = f"\r[{bar}] {processed}/{total} 成功 {success} 失败 {failure}"
                    sys.stdout.write(line)
                    sys.stdout.flush()
                success_count, failure_count, failures = manager.crawl_pending_sub_details(
                    options['limit'], workers=options.get('workers'), quiet=True, progress_cb=_progress
                )
                sys.stdout.write('\n')
                self.stdout.write(self.style.SUCCESS(f'成功 {success_count} 个子详情页'))
                self.stdout.write(self.style.WARNING(f'失败 {failure_count} 个子详情页'))
                if failures:
                    for url, reason in failures:
                        self.stdout.write(f'失败原因: {url} - {reason}')
                
            else:
                # 完整爬取流程
                max_pages = options['max_pages']
                
                # 如果没有指定最大页数，提示用户输入
                if max_pages is None:
                    self.stdout.write('')
                    self.stdout.write(self.style.WARNING('未指定最大页数，请输入最大爬取页数（直接回车表示爬取所有页）：'))
                    try:
                        user_input = input('最大页数（留空表示爬取所有页）: ').strip()
                        if user_input:
                            max_pages = int(user_input)
                            if max_pages <= 0:
                                raise ValueError('最大页数必须大于0')
                            self.stdout.write(self.style.SUCCESS(f'将爬取最多 {max_pages} 页'))
                        else:
                            self.stdout.write(self.style.SUCCESS('将爬取所有页'))
                    except ValueError as e:
                        self.stdout.write(
                            self.style.ERROR(f'输入无效: {e}，将爬取所有页')
                        )
                        max_pages = None
                    except KeyboardInterrupt:
                        self.stdout.write(self.style.WARNING('\n用户取消操作'))
                        return
                
                self.stdout.write('开始完整爬取流程...')
                manager.crawl_all(
                    list_url=options['list_url'],
                    page_number=options['page_number'],
                    max_pages=max_pages,
                    time_begin=options['time_begin'],
                    time_end=options['time_end'],
                )
                self.stdout.write(
                    self.style.SUCCESS('完整爬取流程完成')
                )
                
        except Exception as e:
            self.stdout.write(
                self.style.ERROR(f'爬取过程中发生错误: {e}')
            )
            raise CommandError(f'爬取失败: {e}')
            
        finally:
            if options.get('sub_detail_only') or options.get('detail_only'):
                manager.close(quiet=True)
                spider_logger = logging.getLogger('spider')
                spider_logger.setLevel(logging.INFO)
            else:
                manager.close()

