#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
搜狐号自动保存草稿程序 v3.0
新增功能：
1. 自动获取搜狐号素材库图片
2. 随机选择素材插入到文章正文
3. 自动设置封面图片
4. 保持v2.0的所有功能
"""

import requests
import json
import time
import os
import shutil
import re
import random
from pathlib import Path
from urllib.parse import urlencode
from typing import Dict, Any, Optional, List
import logging
import markdown
from datetime import datetime

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('sohu_draft_v3.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

class SohuDraftPublisherV3:
    """搜狐号草稿发布器 v3.0"""
    
    def __init__(self, config_file: str = 'sohu_config.json'):
        """初始化发布器
        
        Args:
            config_file: 配置文件路径
        """
        self.config = self._load_config(config_file)
        self.session = requests.Session()
        self._setup_session()
        
        # 初始化markdown转换器
        self.md = markdown.Markdown(extensions=['extra', 'codehilite'])
        
        # 素材缓存
        self.materials_cache = []
        self.cache_timestamp = 0
        self.cache_duration = 300  # 缓存5分钟
    
    def _load_config(self, config_file: str) -> Dict[str, Any]:
        """加载配置文件"""
        try:
            with open(config_file, 'r', encoding='utf-8') as f:
                return json.load(f)
        except FileNotFoundError:
            logger.error(f"配置文件 {config_file} 不存在")
            raise
        except json.JSONDecodeError:
            logger.error(f"配置文件 {config_file} 格式错误")
            raise
    
    def _setup_session(self):
        """设置请求会话"""
        headers = {
            'accept': '*/*',
            'accept-encoding': 'gzip, deflate, br, zstd',
            'accept-language': 'zh-CN,zh;q=0.9',
            'connection': 'keep-alive',
            'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'host': 'mp.sohu.com',
            'origin': 'https://mp.sohu.com',
            'referer': 'https://mp.sohu.com/mpfe/v4/contentManagement/news/addarticle?contentStatus=1',
            'sec-ch-ua': '"Google Chrome";v="137", "Chromium";v="137", "Not/A)Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"macOS"',
            'sec-fetch-dest': 'empty',
            'sec-fetch-mode': 'cors',
            'sec-fetch-site': 'same-origin',
            'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36',
            'x-requested-with': 'XMLHttpRequest'
        }
        
        # 设置Cookie
        if 'cookie' in self.config:
            headers['cookie'] = self.config['cookie']
        
        self.session.headers.update(headers)
    
    def get_materials(self, page_num: Optional[int] = None, page_size: int = 15) -> List[Dict[str, Any]]:
        """获取素材库图片
        
        Args:
            page_num: 页码（如果为None则随机选择1-50页）
            page_size: 每页数量
            
        Returns:
            素材列表
        """
        # 检查缓存
        current_time = time.time()
        if (self.materials_cache and 
            current_time - self.cache_timestamp < self.cache_duration):
            logger.info("使用缓存的素材数据")
            return self.materials_cache
        
        # 如果没有指定页码，随机选择1-50页
        if page_num is None:
            page_num = random.randint(1, 50)
        
        account_id = self.config.get('account_id', '121905676')
        url = f"https://mp.sohu.com/mpbp/bp/user/resource/page"
        
        params = {
            'accountId': account_id,
            'pno': page_num,
            'psize': page_size
        }
        
        # 设置素材获取的请求头
        material_headers = {
            'accept': 'application/json, text/plain, */*',
            'referer': 'https://mp.sohu.com/mpfe/v4/materialLibrary/library'
        }
        
        try:
            logger.info(f"获取第{page_num}页素材，每页{page_size}条")
            
            # 临时更新请求头
            original_headers = self.session.headers.copy()
            self.session.headers.update(material_headers)
            
            response = self.session.get(url, params=params, timeout=30)
            response.raise_for_status()
            
            # 恢复原始请求头
            self.session.headers = original_headers
            
            result = response.json()
            
            if result.get('success', True) and result.get('code') == 2000000:
                materials = result.get('data', {}).get('data', [])
                logger.info(f"成功获取{len(materials)}个素材")
                
                # 更新缓存
                self.materials_cache = materials
                self.cache_timestamp = current_time
                
                return materials
            else:
                logger.error(f"获取素材失败: {result}")
                return []
                
        except requests.exceptions.RequestException as e:
            logger.error(f"获取素材网络请求错误: {e}")
            return []
        except json.JSONDecodeError as e:
            logger.error(f"素材响应解析错误: {e}")
            return []
        except Exception as e:
            logger.error(f"获取素材未知错误: {e}")
            return []
    
    def select_random_material(self, materials: List[Dict[str, Any]]) -> Optional[Dict[str, Any]]:
        """从素材列表中随机选择一个
        
        Args:
            materials: 素材列表
            
        Returns:
            随机选择的素材
        """
        if not materials:
            logger.warning("素材列表为空，无法选择")
            return None
        
        selected = random.choice(materials)
        logger.info(f"随机选择素材: {selected.get('name', 'Unknown')}")
        return selected
    
    def format_material_url(self, url: str) -> str:
        """格式化素材URL
        
        Args:
            url: 原始URL
            
        Returns:
            格式化后的URL
        """
        if url.startswith('//'):
            return 'https:' + url
        elif not url.startswith('http'):
            return 'https://' + url
        return url
    
    def insert_material_to_content(self, content: str, material: Dict[str, Any]) -> str:
        """将素材插入到内容中（标题下方）
        
        Args:
            content: 原始HTML内容
            material: 素材信息
            
        Returns:
            插入素材后的HTML内容
        """
        if not material or 'content' not in material:
            logger.warning("素材信息无效，跳过插入")
            return content
        
        material_url = self.format_material_url(material['content']['url'])
        
        # 创建图片HTML标签
        img_tag = f'<p class="ql-align-center"><img src="{material_url}"></p>'
        
        # 查找第一个</h1>标签，在其后插入图片
        h1_pattern = r'</h1>'
        match = re.search(h1_pattern, content)
        
        if match:
            # 在第一个</h1>后插入图片
            insert_pos = match.end()
            new_content = content[:insert_pos] + img_tag + content[insert_pos:]
            logger.info(f"成功插入素材到标题下方: {material.get('name', 'Unknown')}")
            return new_content
        else:
            # 如果没有找到h1标签，在内容开头插入
            logger.info(f"未找到h1标签，在内容开头插入素材: {material.get('name', 'Unknown')}")
            return img_tag + content
    
    def _extract_title_from_filename(self, filename: str) -> str:
        """从文件名提取标题"""
        # 移除.md扩展名
        title = filename.replace('.md', '')
        return title
    
    def _extract_brief_from_content(self, content: str, max_length: int = 100) -> str:
        """从内容中提取摘要"""
        # 移除markdown标记
        text = re.sub(r'[#*`\[\]()_~]', '', content)
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text).strip()
        
        # 提取前几句话作为摘要
        sentences = text.split('。')
        brief = ''
        for sentence in sentences:
            if len(brief + sentence + '。') <= max_length:
                brief += sentence + '。'
            else:
                break
        
        # 如果摘要太短，直接截取前max_length个字符
        if len(brief) < 20:
            brief = text[:max_length] + '...' if len(text) > max_length else text
        
        return brief
    
    def _markdown_to_html(self, markdown_content: str) -> str:
        """将markdown内容转换为HTML"""
        # 转换markdown为HTML
        html_content = self.md.convert(markdown_content)
        
        # 重置markdown转换器状态
        self.md.reset()
        
        return html_content
    
    def _read_markdown_file(self, file_path: str) -> Dict[str, str]:
        """读取markdown文件并解析内容"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                content = f.read()
            
            # 提取标题（从文件名）
            filename = os.path.basename(file_path)
            title = self._extract_title_from_filename(filename)
            
            # 提取摘要
            brief = self._extract_brief_from_content(content)
            
            # 转换为HTML
            html_content = self._markdown_to_html(content)
            
            return {
                'title': title,
                'brief': brief,
                'content': html_content,
                'original_file': file_path
            }
            
        except Exception as e:
            logger.error(f"读取文件 {file_path} 失败: {e}")
            return None
    
    def save_draft(self, title: str, brief: str, content: str, 
                   cover: str = '', draft_id: Optional[str] = None) -> Dict[str, Any]:
        """保存草稿
        
        Args:
            title: 文章标题
            brief: 文章摘要
            content: 文章正文（HTML格式）
            cover: 封面图片URL
            draft_id: 草稿ID（可选，用于更新现有草稿）
        
        Returns:
            API响应结果
        """
        # 构建请求URL
        account_id = self.config.get('account_id', '121905676')
        url = f"https://mp.sohu.com/mpbp/bp/news/v4/news/draft?accountId={account_id}"
        
        # 构建表单数据
        form_data = {
            'title': title,
            'brief': brief,
            'content': content,
            'channelId': self.config.get('channel_id', 31),
            'categoryId': self.config.get('category_id', -1),
            'userColumnId': self.config.get('user_column_id', 0),
            'columnNewsIds': self.config.get('column_news_ids', ''),
            'businessCode': self.config.get('business_code', 0),
            'isOriginal': self.config.get('is_original', False),
            'cover': cover,  # 使用传入的封面
            'attrIds': self.config.get('attr_ids', ''),
            'topicIds': self.config.get('topic_ids', ''),
            'userLabels': json.dumps(self.config.get('user_labels', []), ensure_ascii=False),
            'isAd': self.config.get('is_ad', 0),
            'reprint': self.config.get('reprint', False),
            'customTags': self.config.get('custom_tags', ''),
            'infoResource': self.config.get('info_resource', 0),
            'sourceUrl': self.config.get('source_url', ''),
            'visibleToLoginedUsers': self.config.get('visible_to_logined_users', 0),
            'accountId': account_id
        }
        
        # 如果提供了草稿ID，添加到表单数据中
        if draft_id:
            form_data['id'] = draft_id
        
        try:
            logger.info(f"开始保存草稿: {title}")
            
            # 发送POST请求
            response = self.session.post(url, data=form_data, timeout=30)
            response.raise_for_status()
            
            # 解析响应
            result = response.json()
            
            if result.get('status') == 'success' or response.status_code == 200:
                logger.info(f"草稿保存成功: {title}")
                return {
                    'success': True,
                    'message': '草稿保存成功',
                    'data': result
                }
            else:
                logger.error(f"草稿保存失败: {result}")
                return {
                    'success': False,
                    'message': f"草稿保存失败: {result.get('message', '未知错误')}",
                    'data': result
                }
                
        except requests.exceptions.RequestException as e:
            logger.error(f"网络请求错误: {e}")
            return {
                'success': False,
                'message': f"网络请求错误: {e}",
                'data': None
            }
        except json.JSONDecodeError as e:
            logger.error(f"响应解析错误: {e}")
            return {
                'success': False,
                'message': f"响应解析错误: {e}",
                'data': None
            }
        except Exception as e:
            logger.error(f"未知错误: {e}")
            return {
                'success': False,
                'message': f"未知错误: {e}",
                'data': None
            }
    
    def move_file_to_published(self, source_file: str, published_dir: str) -> bool:
        """将文件移动到已发目录
        
        Args:
            source_file: 源文件路径
            published_dir: 已发目录路径
            
        Returns:
            是否移动成功
        """
        try:
            # 确保已发目录存在
            os.makedirs(published_dir, exist_ok=True)
            
            # 构建目标文件路径
            filename = os.path.basename(source_file)
            target_file = os.path.join(published_dir, filename)
            
            # 如果目标文件已存在，添加时间戳
            if os.path.exists(target_file):
                name, ext = os.path.splitext(filename)
                timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
                filename = f"{name}_{timestamp}{ext}"
                target_file = os.path.join(published_dir, filename)
            
            # 移动文件
            shutil.move(source_file, target_file)
            logger.info(f"文件已移动: {source_file} -> {target_file}")
            return True
            
        except Exception as e:
            logger.error(f"移动文件失败: {e}")
            return False
    
    def process_single_file(self, file_path: str, published_dir: str = None) -> Dict[str, Any]:
        """处理单个markdown文件
        
        Args:
            file_path: markdown文件路径
            published_dir: 已发目录路径
            
        Returns:
            处理结果
        """
        logger.info(f"开始处理文件: {file_path}")
        
        # 读取并解析文件
        file_data = self._read_markdown_file(file_path)
        if not file_data:
            return {
                'success': False,
                'message': '文件读取失败',
                'file': file_path
            }
        
        # 获取素材
        materials = self.get_materials()
        selected_material = self.select_random_material(materials)
        
        # 插入素材到内容中
        if selected_material:
            file_data['content'] = self.insert_material_to_content(
                file_data['content'], selected_material
            )
            
            # 设置封面
            cover_url = self.format_material_url(selected_material['content']['url'])
        else:
            logger.warning("未能获取素材，使用默认封面")
            cover_url = self.config.get('cover', '')
        
        # 保存草稿
        result = self.save_draft(
            title=file_data['title'],
            brief=file_data['brief'],
            content=file_data['content'],
            cover=cover_url
        )
        
        # 如果保存成功且指定了已发目录，移动文件
        if result['success'] and published_dir:
            move_success = self.move_file_to_published(file_path, published_dir)
            result['file_moved'] = move_success
        
        result['file'] = file_path
        result['material_used'] = selected_material.get('name', 'None') if selected_material else 'None'
        
        return result
    
    def process_directory(self, source_dir: str, published_dir: str = None, 
                         delay: float = 2.0) -> Dict[str, Any]:
        """批量处理目录下的markdown文件
        
        Args:
            source_dir: 源目录路径
            published_dir: 已发目录路径
            delay: 处理间隔（秒）
            
        Returns:
            处理结果统计
        """
        logger.info(f"开始批量处理目录: {source_dir}")
        
        # 查找所有markdown文件
        md_files = []
        for file in os.listdir(source_dir):
            if file.endswith('.md'):
                file_path = os.path.join(source_dir, file)
                if os.path.isfile(file_path):
                    md_files.append(file_path)
        
        if not md_files:
            logger.warning(f"目录 {source_dir} 中没有找到markdown文件")
            return {
                'total_files': 0,
                'success_count': 0,
                'failed_count': 0,
                'results': []
            }
        
        logger.info(f"找到 {len(md_files)} 个markdown文件")
        
        # 处理结果统计
        results = []
        success_count = 0
        failed_count = 0
        
        # 逐个处理文件
        for i, file_path in enumerate(md_files):
            logger.info(f"处理进度: {i+1}/{len(md_files)}")
            
            result = self.process_single_file(file_path, published_dir)
            results.append(result)
            
            if result['success']:
                success_count += 1
            else:
                failed_count += 1
            
            # 延迟处理，避免请求过于频繁
            if i < len(md_files) - 1:  # 最后一个文件不需要延迟
                time.sleep(delay)
        
        # 生成处理报告
        report = {
            'total_files': len(md_files),
            'success_count': success_count,
            'failed_count': failed_count,
            'results': results
        }
        
        logger.info(f"批量处理完成: 总计{len(md_files)}个文件，成功{success_count}个，失败{failed_count}个")
        
        return report
    
    def generate_report(self, report_data: Dict[str, Any], output_file: str = None) -> str:
        """生成处理报告
        
        Args:
            report_data: 报告数据
            output_file: 输出文件路径
            
        Returns:
            报告文件路径
        """
        if output_file is None:
            timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
            output_file = f'sohu_publish_report_v3_{timestamp}.txt'
        
        try:
            with open(output_file, 'w', encoding='utf-8') as f:
                f.write("搜狐号草稿发布报告 v3.0\n")
                f.write("=" * 50 + "\n")
                f.write(f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
                f.write(f"总文件数: {report_data['total_files']}\n")
                f.write(f"成功数量: {report_data['success_count']}\n")
                f.write(f"失败数量: {report_data['failed_count']}\n")
                f.write("\n详细结果:\n")
                f.write("-" * 50 + "\n")
                
                for i, result in enumerate(report_data['results'], 1):
                    f.write(f"{i}. 文件: {os.path.basename(result['file'])}\n")
                    f.write(f"   状态: {'成功' if result['success'] else '失败'}\n")
                    f.write(f"   使用素材: {result.get('material_used', 'None')}\n")
                    if not result['success']:
                        f.write(f"   错误信息: {result['message']}\n")
                    if result.get('file_moved'):
                        f.write(f"   文件已移动: 是\n")
                    f.write("\n")
            
            logger.info(f"报告已生成: {output_file}")
            return output_file
            
        except Exception as e:
            logger.error(f"生成报告失败: {e}")
            return None


def main():
    """主函数 - 命令行使用示例"""
    import argparse
    
    parser = argparse.ArgumentParser(description='搜狐号草稿发布器 v3.0')
    parser.add_argument('--source', '-s', required=True, help='源目录路径')
    parser.add_argument('--published', '-p', help='已发目录路径')
    parser.add_argument('--config', '-c', default='sohu_config.json', help='配置文件路径')
    parser.add_argument('--delay', '-d', type=float, default=10.0, help='处理间隔（秒）')
    
    args = parser.parse_args()
    
    try:
        # 创建发布器实例
        publisher = SohuDraftPublisherV3(args.config)
        
        # 批量处理
        report = publisher.process_directory(
            source_dir=args.source,
            published_dir=args.published,
            delay=args.delay
        )
        
        # 生成报告
        report_file = publisher.generate_report(report)
        
        print(f"处理完成！报告文件: {report_file}")
        
    except Exception as e:
        logger.error(f"程序执行失败: {e}")
        return 1
    
    return 0


if __name__ == '__main__':
    exit(main())