#!/usr/bin/env python3
# -*- coding: utf-8 -*-

"""
文章爬取与发布主程序
使用重构后的类实现
"""

import argparse
import json
import os
import random
import sys
import logging
from pathlib import Path
from typing import Dict, Any, List

# 首先设置日志系统
from utils.logger_config import setup_logging
setup_logging(
    level=logging.INFO,
    log_format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    log_file=os.path.join(os.path.dirname(__file__), 'logs', 'app.log')
)

# 导入重构后的类
from crawlers import ArticleCrawler
from publishers import WordPressPublisher
from adapters import WordPressDataAdapter
from utils.logger_config import get_logger
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))


class ArticleCrawlerPublisher:
    """文章爬取与发布器"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化文章爬取与发布器
        
        Args:
            config: 配置信息
        """
        self.config = config
        
        # 获取logger - 日志系统已在文件开头初始化
        self.logger = get_logger(__name__)
        
        self.crawler = ArticleCrawler(config)
        self.publisher = WordPressPublisher()
        self.data_adapter = WordPressDataAdapter()
        
        # 获取WordPress网站上的分类信息
        self.categories = self._get_wordpress_categories()
    
    def _get_wordpress_categories(self):
        """获取WordPress网站上的分类信息"""
        try:
            result = self.publisher.get_categories()
            if result['success']:
                categories = result['categories']
                self.logger.info(f"获取到 {len(categories)} 个分类")
                # 返回分类ID和名称的映射
                return {cat['id']: cat['name'] for cat in categories}
            else:
                self.logger.warning(f"获取分类失败: {result.get('error', '未知错误')}")
                return {1: '未分类'}  # 默认分类
        except Exception as e:
            self.logger.error(f"获取分类时出错: {e}")
            return {1: '未分类'}  # 默认分类
    
    def crawl_and_publish_single(self, url: str) -> Dict[str, Any]:
        """
        爬取并发布单篇文章
        
        Args:
            url: 文章URL
            
        Returns:
            包含结果的字典
        """
        self.logger.info(f"开始爬取文章: {url}")
        
        # 爬取文章
        article_data = self.crawler.crawl_article(url)
        
        if not article_data:
            return {
                'success': False,
                'error': '文章爬取失败',
                'url': url
            }
        
        self.logger.info(f"文章爬取成功: {article_data['title']}")
        
        # 下载图片
        if article_data.get('images'):
            self.logger.info(f"开始下载 {len(article_data['images'])} 张图片")
            image_mapping = self.crawler.download_images(article_data)
            article_data['image_mapping'] = image_mapping
            # 提取本地图片路径列表（为了向后兼容）
            local_images = list(image_mapping.values())
            article_data['local_images'] = local_images
            self.logger.info(f"图片下载完成，本地路径: {local_images}")
        
        # 转换数据模型
        wordpress_article = self.data_adapter.convert(article_data)
        
        # 随机选择一个分类ID
        category_ids = list(self.categories.keys())
        if category_ids:
            random_category_id = random.choice(category_ids)
            category_name = self.categories[random_category_id]
            self.logger.info(f"随机选择分类: {category_name} (ID: {random_category_id})")
            wordpress_article.category_id = random_category_id
        else:
            # 如果没有分类，使用默认分类ID 1
            self.logger.warning("没有可用分类，使用默认分类ID 1")
            wordpress_article.category_id = 1
        
        # 发布文章
        self.logger.info(f"开始发布文章到WordPress")
        result = self.publisher.publish(wordpress_article)
        
        if result['success']:
            self.logger.info(f"文章发布成功")
            return {
                'success': True,
                'url': url,
                'post_id': result.get('post_id'),
                'post_data': result.get('post_data')
            }
        else:
            self.logger.error(f"文章发布失败")
            return {
                'success': False,
                'error': result.get('error', '文章发布失败'),
                'url': url
            }
    
    def crawl_and_publish_batch(self, urls: List[str]) -> List[Dict[str, Any]]:
        """
        批量爬取并发布文章
        
        Args:
            urls: 文章URL列表
            
        Returns:
            包含所有结果的列表
        """
        results = []
        
        for url in urls:
            result = self.crawl_and_publish_single(url)
            results.append(result)
            
            # 如果发布失败，记录错误但继续处理下一篇文章
            if not result['success']:
                self.logger.error(f"处理文章失败: {url}, 错误: {result.get('error', '未知错误')}")
        
        return results


def load_config(config_path: str) -> Dict[str, Any]:
    """
    加载配置文件
    
    Args:
        config_path: 配置文件路径
        
    Returns:
        配置字典
    """
    logger = get_logger(__name__)
    
    try:
        full_config_path = os.path.join(os.path.dirname(__file__), config_path)
        logger.info(f"加载配置文件: {full_config_path}")
        
        with open(full_config_path, 'r', encoding='utf-8') as f:
            config = json.load(f)
            logger.info("配置文件加载成功")
            return config
    except Exception as e:
        logger.error(f"加载配置文件失败: {e}")
        sys.exit(1)


def main():
    """主函数"""
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='文章爬取与发布工具')
    parser.add_argument('--config', '-c', default='configs/config.json', 
                       help='配置文件路径')
    parser.add_argument('--mode', '-m', choices=['single', 'batch'], 
                       default='single', help='运行模式')
    parser.add_argument('--url', '-u', help='文章URL（单篇模式）')
    parser.add_argument('--urls', nargs='+', help='文章URL列表（批量模式）')
    
    args = parser.parse_args()
    
    # 加载配置
    config = load_config('configs/config.json')
    
    # 创建爬取发布器
    crawler_publisher = ArticleCrawlerPublisher(config)
    crawler_publisher.crawl_and_publish_single('https://www.zhuangxingjia.com/zhishi/jcjj/636.html')
    
    # 根据模式执行
    if args.mode == 'single':
        if not args.url:
            print("单篇模式需要指定 --url 参数")
            sys.exit(1)
        
        result = crawler_publisher.crawl_and_publish_single(args.url)
        
        if result['success']:
            post_id = result.get('post_id', '未知')
            print(f"文章发布成功，ID: {post_id}")
        else:
            print(f"文章发布失败: {result.get('error', '未知错误')}")
            sys.exit(1)
    
    elif args.mode == 'batch':
        if not args.urls:
            print("批量模式需要指定 --urls 参数")
            sys.exit(1)
        
        results = crawler_publisher.crawl_and_publish_batch(args.urls)
        
        # 统计结果
        success_count = sum(1 for r in results if r['success'])
        total_count = len(results)
        
        print(f"批量处理完成: {success_count}/{total_count} 成功")
        
        # 打印失败的文章
        for result in results:
            if not result['success']:
                print(f"失败: {result.get('url', '未知URL')} - {result.get('error', '未知错误')}")


if __name__ == '__main__':
    main()