import requests
import json
import logging
from datetime import datetime
from bs4 import BeautifulSoup
import time
import random
import os
from app.models import Article, db

class CrawlerService:
    """微信文章爬虫服务"""
    
    def __init__(self, token, cookie, fakeid):
        """初始化爬虫服务
        
        Args:
            token (str): 微信token
            cookie (str): 微信cookie
            fakeid (str): 微信fakeid
        """
        self.token = token
        self.cookie = cookie
        self.fakeid = fakeid
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
            'Connection': 'keep-alive',
            'Cookie': cookie
        }
        
        # 创建会话并禁用代理
        self.session = requests.Session()
        self.session.trust_env = False  # 禁用环境变量中的代理设置
        
        # 清除可能存在的代理设置
        if 'http_proxy' in os.environ:
            del os.environ['http_proxy']
        if 'https_proxy' in os.environ:
            del os.environ['https_proxy']
        if 'HTTP_PROXY' in os.environ:
            del os.environ['HTTP_PROXY']
        if 'HTTPS_PROXY' in os.environ:
            del os.environ['HTTPS_PROXY']
            
        self.logger = logging.getLogger(__name__)
        
        # 创建HTML输出目录
        self.html_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'data', 'html')
        os.makedirs(self.html_dir, exist_ok=True)
    
    def parse_nested_json(self, data):
        """解析嵌套的JSON字符串
        
        Args:
            data: 可能是JSON字符串或字典的数据
            
        Returns:
            dict: 解析后的数据
        """
        if isinstance(data, str):
            try:
                data = json.loads(data)
            except json.JSONDecodeError:
                return data
                
        if isinstance(data, dict):
            return {k: self.parse_nested_json(v) for k, v in data.items()}
        elif isinstance(data, list):
            return [self.parse_nested_json(item) for item in data]
        else:
            return data
    
    def collect_articles(self):
        """采集文章
        
        Returns:
            list: 采集到的文章列表
        """
        try:
            articles = []
            page = 0
            count = 5  # 每页数量
            max_articles = 5  # 最大文章数量
            
            while len(articles) < max_articles:
                self.logger.info(f"正在获取第 {page + 1} 页文章...")
                
                # 构建URL
                url = 'https://mp.weixin.qq.com/cgi-bin/appmsgpublish'
                params = {
                    'sub': 'list',
                    'search_field': 'null',
                    'begin': page * count,
                    'count': count,
                    'query': '',
                    'fakeid': self.fakeid,
                    'type': '101_1',
                    'free_publish_type': '1',
                    'sub_action': 'list_ex',
                    'fingerprint': '9efe936eecdc83855597396c66aafdf7',
                    'token': self.token,
                    'lang': 'zh_CN',
                    'f': 'json',
                    'ajax': '1'
                }
                
                # 发送请求
                response = self.session.get(url, params=params, headers=self.headers, timeout=10)
                data = response.json()
                
                # 解析嵌套的JSON数据
                parsed_data = self.parse_nested_json(data)
                
                # 检查响应
                if parsed_data.get('base_resp', {}).get('ret') == 0:
                    # 获取publish_page数据
                    publish_page = parsed_data.get('publish_page', {})
                    if isinstance(publish_page, str):
                        publish_page = json.loads(publish_page)
                    
                    # 获取文章列表
                    publish_list = publish_page.get('publish_list', [])
                    
                    if not publish_list:
                        break
                        
                    for publish_item in publish_list:
                        try:
                            # 解析publish_info
                            publish_info = publish_item.get('publish_info', {})
                            if isinstance(publish_info, str):
                                publish_info = json.loads(publish_info)
                            
                            # 获取文章信息
                            appmsgex = publish_info.get('appmsgex', [])
                            if not appmsgex:
                                continue
                                
                            article = appmsgex[0]  # 获取第一篇文章
                            
                            # 获取文章内容
                            self.logger.info(f"正在获取文章: {article['title']}")
                            content = self.get_article_content(article['link'])
                            
                            if content:
                                # 保存文章到数据库
                                article_data = {
                                    'title': article['title'],
                                    'digest': article.get('digest', ''),
                                    'content_url': article['link'],
                                    'content': content,  # 保存完整HTML内容到数据库
                                    'cover': article['cover'],
                                    'create_time': datetime.fromtimestamp(article['create_time']),
                                    'update_time': datetime.now(),
                                    'author': article['author_name'],
                                    'copyright_stat': article.get('copyright_stat', 0)
                                }
                                articles.append(article_data)
                                self.logger.info(f"已获取 {len(articles)}/{max_articles} 篇文章")
                                
                                if len(articles) >= max_articles:
                                    break
                                
                        except Exception as e:
                            self.logger.error(f"处理文章出错: {str(e)}")
                            continue
                            
                    if len(articles) >= max_articles:
                        break
                        
                    page += 1
                    time.sleep(random.uniform(1, 3))  # 随机延迟
                else:
                    error_msg = parsed_data.get('base_resp', {}).get('err_msg', '未知错误')
                    raise Exception(f"获取文章列表失败: {error_msg}")
            
            self.logger.info(f"文章采集完成，共获取 {len(articles)} 篇文章")
            return articles
            
        except Exception as e:
            self.logger.error(f"采集文章失败: {str(e)}")
            raise
    
    def get_article_content(self, url):
        """获取文章内容
        
        Args:
            url (str): 文章URL
            
        Returns:
            str: 文章内容
        """
        try:
            response = self.session.get(url, headers=self.headers, timeout=10)
            response.encoding = 'utf-8'
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 提取文章内容
            content_div = soup.find('div', class_='rich_media_content')
            if content_div:
                # 处理图片
                for img in content_div.find_all('img'):
                    if img.get('data-src'):
                        img['src'] = img['data-src']
                
                # 处理视频
                for video in content_div.find_all('video'):
                    if video.get('data-src'):
                        video['src'] = video['data-src']
                
                return str(content_div)
            return None
            
        except Exception as e:
            self.logger.error(f"获取文章内容失败: {str(e)}")
            return None
            
    def _save_html(self, title, content):
        """保存HTML文件
        
        Args:
            title (str): 文章标题
            content (str): 文章内容
            
        Returns:
            str: HTML文件路径
        """
        try:
            # 生成文件名
            filename = f"{datetime.now().strftime('%Y%m%d_%H%M%S')}_{title[:30]}.html"
            filename = "".join(c for c in filename if c.isalnum() or c in (' ', '_', '-', '.'))
            filepath = os.path.join(self.html_dir, filename)
            
            # 生成完整的HTML内容
            html_content = f'''<!DOCTYPE html>
<html lang="zh-CN">
<head>
    <meta charset="UTF-8">
    <meta name="viewport" content="width=device-width, initial-scale=1.0">
    <title>{title}</title>
    <style>
        body {{
            font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
            line-height: 1.6;
            color: #333;
            max-width: 800px;
            margin: 0 auto;
            padding: 20px;
        }}
        .article-content {{
            font-size: 16px;
        }}
        .article-content img {{
            max-width: 100%;
            height: auto;
            margin: 10px 0;
        }}
        .article-content p {{
            margin-bottom: 1em;
        }}
    </style>
</head>
<body>
    <div class="article-content">
        {content}
    </div>
</body>
</html>'''
            
            # 保存文件
            with open(filepath, 'w', encoding='utf-8') as f:
                f.write(html_content)
                
            return filepath
            
        except Exception as e:
            self.logger.error(f"保存HTML文件失败: {str(e)}")
            return None 