#!/usr/bin/env python3
"""
Readwise API客户端
获取用户的阅读内容并处理数据导出
"""

import requests
from datetime import datetime, timedelta
import os
import json
import time
import re
from bs4 import BeautifulSoup
from logger import LOG

class ReadwiseClient:
    """
    Readwise API客户端 - 阅读内容数据源
    
    设计模式: 适配器模式（与现有客户端保持一致）
    职责:
    - Readwise API认证和请求
    - 文档数据获取和处理
    - 内容清洗和格式化
    - 数据导出和持久化
    """
    
    def __init__(self, config=None):
        """
        初始化Readwise客户端
        
        Args:
            config: 配置对象，包含API token和代理信息
        """
        self.config = config
        self.base_url = 'https://readwise.io/api/v3'
        self.api_token = self._get_api_token()
        self.session = self._create_session()
    
    def _get_api_token(self):
        """获取API Token"""
        if self.config and hasattr(self.config, 'readwise_api_token'):
            return self.config.readwise_api_token
        
        # 从环境变量获取
        token = os.getenv('READWISE_API_TOKEN')
        if not token:
            LOG.error("Readwise API Token未配置")
            raise ValueError("Readwise API Token未配置，请在.env文件中设置READWISE_API_TOKEN")
        
        return token
    
    def _create_session(self):
        """创建带有认证和代理配置的requests会话"""
        session = requests.Session()
        
        # 设置认证头
        session.headers.update({
            'Authorization': f'Token {self.api_token}',
            'User-Agent': 'IntelligentAnalyzer/1.0 Readwise Client',
            'Content-Type': 'application/json'
        })
        
        # 代理配置（复用现有代理逻辑）
        if self.config and hasattr(self.config, 'proxy_enabled') and self.config.proxy_enabled:
            proxies = {}
            if hasattr(self.config, 'proxy_http') and self.config.proxy_http:
                proxies['http'] = self.config.proxy_http
            if hasattr(self.config, 'proxy_https') and self.config.proxy_https:
                proxies['https'] = self.config.proxy_https
            
            if proxies:
                session.proxies.update(proxies)
                LOG.info("Readwise客户端代理配置已应用")
        
        return session
    
    def fetch_documents(self, location='feed', category=None, limit=None, days_back=None):
        """
        获取Readwise文档列表

        Args:
            location: 文档位置 ('feed', 'archive', 'later')
            category: 文档类别 ('rss', 'article', 'pdf', 'epub', 'tweet', 'email')
            limit: 获取数量限制 (None时使用配置默认值)
            days_back: 获取多少天前的文档 (None时使用配置默认值)

        Returns:
            list: 文档列表
        """
        # 使用配置默认值
        if limit is None:
            limit = getattr(self.config, 'readwise_documents_limit', 15)
        if days_back is None:
            days_back = getattr(self.config, 'readwise_days_back', 5)

        LOG.debug(f"准备获取Readwise文档，位置: {location}, 类别: {category}, 限制: {limit}, 天数: {days_back}")

        try:
            # 构建API参数
            params = {
                'location': location,
                'withHtmlContent': 'true',
                'limit': limit
            }
            
            if category:
                params['category'] = category
            
            # 添加日期过滤
            if days_back > 0:
                since_date = datetime.now() - timedelta(days=days_back)
                params['updatedAfter'] = since_date.isoformat()
            
            url = f"{self.base_url}/list/"
            response = self.session.get(url, params=params, timeout=30)
            response.raise_for_status()
            
            data = response.json()
            documents = data.get('results', [])
            
            LOG.info(f"成功获取 {len(documents)} 个Readwise文档")
            return documents
            
        except requests.exceptions.ProxyError as e:
            LOG.error(f"代理连接失败：{str(e)}")
            return self._fallback_fetch(location, category, limit, days_back)
        except requests.exceptions.Timeout as e:
            LOG.error(f"请求超时：{str(e)}")
            return self._fallback_fetch(location, category, limit, days_back)
        except requests.exceptions.ConnectionError as e:
            LOG.error(f"连接错误：{str(e)}")
            return self._fallback_fetch(location, category, limit, days_back)
        except requests.exceptions.HTTPError as e:
            if response.status_code == 401:
                LOG.error("Readwise API认证失败，请检查API Token")
            elif response.status_code == 429:
                LOG.error("API请求频率限制，请稍后重试")
            else:
                LOG.error(f"HTTP错误 {response.status_code}: {str(e)}")
            return []
        except Exception as e:
            LOG.error(f"获取Readwise文档失败：{str(e)}")
            return []
    
    def _fallback_fetch(self, location, category, limit, days_back):
        """代理失败时的回退方法"""
        LOG.warning("尝试不使用代理直接连接Readwise...")
        try:
            # 创建一个不使用代理的临时session
            temp_session = requests.Session()
            temp_session.headers.update({
                'Authorization': f'Token {self.api_token}',
                'User-Agent': 'IntelligentAnalyzer/1.0 Readwise Client',
                'Content-Type': 'application/json'
            })
            
            params = {
                'location': location,
                'withHtmlContent': 'true',
                'limit': limit
            }
            
            if category:
                params['category'] = category
                
            if days_back > 0:
                since_date = datetime.now() - timedelta(days=days_back)
                params['updatedAfter'] = since_date.isoformat()
            
            url = f"{self.base_url}/list/"
            response = temp_session.get(url, params=params, timeout=15)
            response.raise_for_status()
            
            data = response.json()
            documents = data.get('results', [])
            
            LOG.info("直接连接成功，已获取Readwise数据")
            return documents
        except Exception as e:
            LOG.error(f"直接连接也失败：{str(e)}")
            return []
    
    def get_document_content(self, document_id):
        """
        获取特定文档的详细内容
        
        Args:
            document_id: 文档ID
            
        Returns:
            dict: 文档详细内容
        """
        try:
            url = f"{self.base_url}/get/{document_id}/"
            response = self.session.get(url, timeout=30)
            response.raise_for_status()
            
            return response.json()
        except Exception as e:
            LOG.error(f"获取文档 {document_id} 内容失败：{str(e)}")
            return None
    
    def _clean_content(self, content, max_length=3000):
        """
        清理和格式化HTML内容

        Args:
            content: 原始内容（可能包含HTML）
            max_length: 最大内容长度

        Returns:
            str: 清理后的纯文本内容
        """
        if not content:
            return ""

        try:
            # 使用BeautifulSoup解析HTML内容
            soup = BeautifulSoup(content, 'html.parser')

            # 移除脚本和样式标签
            for script in soup(["script", "style", "meta", "link"]):
                script.decompose()

            # 处理特殊标签，保持结构
            self._process_html_structure(soup)

            # 获取纯文本
            text = soup.get_text()

            # 清理文本
            cleaned_text = self._clean_text(text)

            # 限制长度
            if len(cleaned_text) > max_length:
                cleaned_text = self._truncate_text(cleaned_text, max_length)

            return cleaned_text

        except Exception as e:
            LOG.warning(f"HTML解析失败，使用简单清理: {e}")
            return self._simple_clean_content(content, max_length)

    def _process_html_structure(self, soup):
        """处理HTML结构，保持可读性"""
        # 在段落、标题、列表项后添加换行
        for tag in soup.find_all(['p', 'div', 'br', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']):
            tag.append('\n')

        # 在列表项前添加项目符号
        for li in soup.find_all('li'):
            li.insert(0, '• ')
            li.append('\n')

        # 处理引用块
        for blockquote in soup.find_all('blockquote'):
            blockquote.insert(0, '> ')
            blockquote.append('\n')

        # 处理代码块
        for code in soup.find_all(['code', 'pre']):
            code.insert(0, '`')
            code.append('`')

    def _clean_text(self, text):
        """清理提取的文本"""
        if not text:
            return ""

        # 规范化空白字符
        text = re.sub(r'[ \t]+', ' ', text)  # 多个空格/制表符合并为一个空格
        text = re.sub(r'\n\s*\n\s*\n+', '\n\n', text)  # 多个连续换行合并为两个
        text = re.sub(r'^\s+|\s+$', '', text, flags=re.MULTILINE)  # 移除行首行尾空格

        # 清理特殊字符
        text = re.sub(r'[^\w\s\u4e00-\u9fff\u3400-\u4dbf\u20000-\u2a6df\u2a700-\u2b73f\u2b740-\u2b81f\u2b820-\u2ceaf\uf900-\ufaff\u3300-\u33ff\ufe30-\ufe4f\uf900-\ufaff\u2f800-\u2fa1f.,!?;:()\[\]{}"\'-/\\@#$%^&*+=<>|~`]', ' ', text)

        # 再次清理多余空格
        text = re.sub(r'\s+', ' ', text).strip()

        return text

    def _truncate_text(self, text, max_length):
        """智能截断文本，保持完整性"""
        if len(text) <= max_length:
            return text

        # 在句子边界截断
        truncated = text[:max_length]

        # 寻找最后一个句号、问号或感叹号
        last_sentence_end = max(
            truncated.rfind('.'),
            truncated.rfind('!'),
            truncated.rfind('?'),
            truncated.rfind('。'),  # 中文句号
            truncated.rfind('！'),  # 中文感叹号
            truncated.rfind('？')   # 中文问号
        )

        if last_sentence_end > max_length * 0.7:  # 如果截断点不太靠前
            return truncated[:last_sentence_end + 1] + "\n\n[内容已截断...]"
        else:
            # 在词边界截断
            last_space = truncated.rfind(' ')
            if last_space > max_length * 0.8:
                return truncated[:last_space] + "...\n\n[内容已截断...]"
            else:
                return truncated + "...\n\n[内容已截断...]"

    def _simple_clean_content(self, content, max_length):
        """简单的内容清理（备用方法）"""
        if not content:
            return ""

        # 移除HTML标签
        clean_content = re.sub(r'<[^>]+>', '', content)

        # 清理HTML实体
        clean_content = re.sub(r'&[a-zA-Z0-9#]+;', ' ', clean_content)

        # 清理多余的空白字符
        clean_content = re.sub(r'\s+', ' ', clean_content).strip()

        # 限制长度
        if len(clean_content) > max_length:
            clean_content = clean_content[:max_length] + "...\n\n[内容已截断...]"

        return clean_content

    def _analyze_content_quality(self, doc):
        """
        分析文档内容质量，选择最佳内容源

        Args:
            doc: 文档对象

        Returns:
            tuple: (最佳内容, 内容类型, 内容质量分数)
        """
        html_content = doc.get('html_content', '')
        content = doc.get('content', '')
        summary = doc.get('summary', '')

        candidates = []

        # 评估HTML内容
        if html_content:
            score = self._calculate_content_score(html_content, is_html=True)
            candidates.append((html_content, 'HTML内容', score))

        # 评估普通内容
        if content:
            score = self._calculate_content_score(content, is_html=False)
            candidates.append((content, '文本内容', score))

        # 评估摘要
        if summary:
            score = self._calculate_content_score(summary, is_html=False) * 0.8  # 摘要权重稍低
            candidates.append((summary, '摘要', score))

        # 选择最高分的内容
        if candidates:
            best_content, content_type, score = max(candidates, key=lambda x: x[2])
            return best_content, content_type, score

        return "", "无内容", 0

    def _calculate_content_score(self, content, is_html=False):
        """
        计算内容质量分数

        Args:
            content: 内容文本
            is_html: 是否为HTML内容

        Returns:
            float: 质量分数 (0-100)
        """
        if not content:
            return 0

        score = 0

        # 基础长度分数 (0-30分)
        length = len(content)
        if length > 500:
            score += 30
        elif length > 200:
            score += 20
        elif length > 50:
            score += 10

        # HTML结构分数 (0-20分)
        if is_html:
            soup = BeautifulSoup(content, 'html.parser')
            # 检查是否有段落结构
            if soup.find_all(['p', 'div', 'article']):
                score += 10
            # 检查是否有标题结构
            if soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6']):
                score += 5
            # 检查是否有列表结构
            if soup.find_all(['ul', 'ol', 'li']):
                score += 5

        # 内容丰富度分数 (0-25分)
        # 检查句子数量
        sentences = re.split(r'[.!?。！？]', content)
        sentence_count = len([s for s in sentences if s.strip()])
        if sentence_count > 10:
            score += 15
        elif sentence_count > 5:
            score += 10
        elif sentence_count > 2:
            score += 5

        # 检查词汇多样性
        words = re.findall(r'\b\w+\b', content.lower())
        unique_words = set(words)
        if len(words) > 0:
            diversity = len(unique_words) / len(words)
            score += min(10, diversity * 20)

        # 内容完整性分数 (0-25分)
        # 检查是否有完整的句子结构
        if re.search(r'[.!?。！？]\s*$', content.strip()):
            score += 10

        # 检查是否包含有意义的内容（非纯标签或空白）
        clean_text = re.sub(r'<[^>]+>', '', content)
        clean_text = re.sub(r'\s+', ' ', clean_text).strip()
        if len(clean_text) > len(content) * 0.3:  # 至少30%是有意义的文本
            score += 15

        return min(100, score)
    
    def export_reading_summary(self, location='feed', category=None, days_back=7, date=None, hour=None):
        """
        导出阅读摘要到Markdown文件
        
        Args:
            location: 文档位置
            category: 文档类别
            days_back: 获取天数
            date: 日期字符串，默认为当前日期
            hour: 小时字符串，默认为当前小时
            
        Returns:
            str: 导出文件路径
        """
        LOG.debug(f"准备导出Readwise阅读摘要，位置: {location}, 类别: {category}")
        
        documents = self.fetch_documents(location, category, limit=50, days_back=days_back)
        
        if not documents:
            LOG.warning("未找到任何Readwise文档")
            return None
        
        # 如果未提供 date 和 hour 参数，使用当前日期和时间
        if date is None:
            date = datetime.now().strftime('%Y-%m-%d')
        if hour is None:
            hour = datetime.now().strftime('%H')
        
        # 构建存储路径
        category_suffix = f"_{category}" if category else ""
        dir_path = os.path.join('readwise_data', f'{location}{category_suffix}', date)
        os.makedirs(dir_path, exist_ok=True)
        
        file_path = os.path.join(dir_path, f'{hour}.md')
        
        # 写入文件（使用UTF-8编码）
        with open(file_path, 'w', encoding='utf-8') as file:
            file.write(f"# Readwise阅读摘要 - {location.title()}")
            if category:
                file.write(f" ({category.upper()})")
            file.write(f" ({date} {hour}:00)\n\n")
            
            file.write(f"## 📊 数据概览\n")
            file.write(f"- 获取时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n")
            file.write(f"- 文档位置: {location}\n")
            if category:
                file.write(f"- 文档类别: {category}\n")
            file.write(f"- 文档数量: {len(documents)}\n")
            file.write(f"- 时间范围: 最近{days_back}天\n\n")
            
            file.write(f"## 📚 阅读内容\n\n")
            
            for idx, doc in enumerate(documents, start=1):
                title = doc.get('title', '无标题').strip()
                author = doc.get('author', '未知作者').strip()
                source = doc.get('source', '未知来源').strip()
                url = doc.get('source_url', '')
                summary = doc.get('summary', '')
                content = doc.get('content', '')
                html_content = doc.get('html_content', '')  # 获取HTML内容

                # 获取创建时间
                created_at = doc.get('created_at', '')
                if created_at:
                    try:
                        created_time = datetime.fromisoformat(created_at.replace('Z', '+00:00'))
                        time_str = created_time.strftime('%Y-%m-%d %H:%M')
                    except:
                        time_str = created_at
                else:
                    time_str = '未知时间'

                file.write(f"### {idx}. {title}\n\n")
                file.write(f"**作者**: {author}  \n")
                file.write(f"**来源**: {source}  \n")
                file.write(f"**时间**: {time_str}  \n")
                if url:
                    file.write(f"**链接**: {url}  \n")

                # 使用内容质量分析选择最佳内容
                best_content, content_type, quality_score = self._analyze_content_quality(doc)

                # 使用配置的最大内容长度
                max_length = getattr(self.config, 'readwise_max_content_length', 1500)

                # 清理和格式化最佳内容
                if best_content:
                    main_content = self._clean_content(best_content, max_length=max_length)
                    if main_content:
                        file.write(f"\n**{content_type}** (质量分数: {quality_score:.1f}):\n{main_content}\n")

                # 如果有单独的摘要且与主内容不同，也写入
                if summary and summary != content and summary != html_content:
                    clean_summary = self._clean_content(summary, max_length=800)
                    if clean_summary and clean_summary != main_content[:len(clean_summary)]:
                        file.write(f"\n**摘要**: {clean_summary}\n")

                file.write("\n---\n\n")
        
        LOG.info(f"Readwise阅读摘要文件生成：{file_path}")
        return file_path

# 使用示例
if __name__ == "__main__":
    try:
        from config import Config
        config = Config()
        client = ReadwiseClient(config)
    except ImportError:
        client = ReadwiseClient()
    
    # 导出最近7天的阅读内容
    client.export_reading_summary(location='feed', days_back=7)
