#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import re
import json
import time
import logging
import requests
import argparse
import urllib.parse
import pymysql
from bs4 import BeautifulSoup
from dotenv import load_dotenv
from datetime import datetime

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

class ZsxqSpider:
    def __init__(self):
        # 加载环境变量
        load_dotenv()
        
        # 配置参数
        self.cookie = os.getenv('ZSXQ_COOKIE', 'abtest_env=product; sajssdk_2015_cross_new_user=1; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%221995bc97b09925-0d7777777777778-4c657b58-3686400-1995bc97b0a198d%22%2C%22first_id%22%3A%22%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24latest_referrer%22%3A%22%22%7D%2C%22identities%22%3A%22eyIkaWRlbnRpdHlfY29va2llX2lkIjoiMTk5NWJjOTdiMDk5MjUtMGQ3Nzc3Nzc3Nzc3Nzc4LTRjNjU3YjU4LTM2ODY0MDAtMTk5NWJjOTdiMGExOThkIn0%3D%22%2C%22history_login_id%22%3A%7B%22name%22%3A%22%22%2C%22value%22%3A%22%22%7D%7D; zsxq_access_token=79411321-476B-4973-8189-2338D030190A_0EFD5A720EAE963B')
        self.group_id = os.getenv('GROUP_ID', '88851142822152')
        self.max_retries = int(os.getenv('MAX_RETRIES', 3))
        self.request_delay = float(os.getenv('REQUEST_DELAY', 1))
        self.output_dir = os.getenv('OUTPUT_DIR', './output')
        
        # MySQL数据库配置
        self.mysql_host = os.getenv('MYSQL_HOST', 'localhost')
        self.mysql_port = int(os.getenv('MYSQL_PORT', 3306))
        self.mysql_user = os.getenv('MYSQL_USER', 'root')
        self.mysql_password = os.getenv('MYSQL_PASSWORD', 'root')
        self.mysql_database = os.getenv('MYSQL_DATABASE', 'zsxq_group')
        self.db_conn = None
        
        # 确保输出目录存在
        os.makedirs(self.output_dir, exist_ok=True)
        os.makedirs(os.path.join(self.output_dir, 'images'), exist_ok=True)
        os.makedirs(os.path.join(self.output_dir, 'files'), exist_ok=True)
        
        # 初始化数据库连接
        self.init_database()
        
        # 设置请求头
        self.headers = {
            'authority': 'api.zsxq.com',
            'accept': 'application/json, text/plain, */*',
            'accept-language': 'zh-CN,zh;q=0.9',
            'origin': 'https://wx.zsxq.com',
            'referer': f'https://wx.zsxq.com/group/{self.group_id}',
            'sec-ch-ua': '"Google Chrome";v="107", "Chromium";v="107", "Not=A?Brand";v="24"',
            'sec-ch-ua-mobile': '?0',
            'sec-ch-ua-platform': '"Windows"',
            'sec-fetch-dest': 'empty',
            'sec-fetch-mode': 'cors',
            'sec-fetch-site': 'same-site',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/140.0.0.0 Safari/537.36',
            'x-version': '2.28.0',
            'cookie': self.cookie  # 直接在请求头中使用cookie
        }
        
        # 验证配置
        if not self.cookie:
            logger.error("请配置ZSXQ_COOKIE环境变量")
            raise ValueError("缺少必要的cookie认证信息")
    
    def init_database(self):
        """初始化数据库连接和表结构"""
        logger.info(f"尝试连接MySQL数据库: host={self.mysql_host}, port={self.mysql_port}, user={self.mysql_user}, database={self.mysql_database}")
        try:
            # 连接数据库
            self.db_conn = pymysql.connect(
                host=self.mysql_host,
                port=self.mysql_port,
                user=self.mysql_user,
                password=self.mysql_password,
                charset='utf8mb4',
                connect_timeout=10
            )
            
            logger.info(f"MySQL服务器连接成功")
            
            # 创建游标
            cursor = self.db_conn.cursor()
            
            # 创建数据库（如果不存在）
            cursor.execute(f"CREATE DATABASE IF NOT EXISTS {self.mysql_database}")
            cursor.execute(f"USE {self.mysql_database}")
            
            # 创建topics表
            create_topics_table_sql = """
            CREATE TABLE IF NOT EXISTS topics (
                id INT AUTO_INCREMENT PRIMARY KEY,
                topic_id VARCHAR(100) NOT NULL UNIQUE,
                type VARCHAR(50),
                create_time DATETIME,
                owner JSON,
                text TEXT,
                likes_count INT DEFAULT 0,
                comments_count INT DEFAULT 0,
                json_file_path VARCHAR(255),
                md_file_path VARCHAR(255),
                created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
            """
            cursor.execute(create_topics_table_sql)
            
            # 创建images表
            create_images_table_sql = """
            CREATE TABLE IF NOT EXISTS images (
                id INT AUTO_INCREMENT PRIMARY KEY,
                topic_id VARCHAR(100) NOT NULL,
                image_id VARCHAR(100) NOT NULL,
                url VARCHAR(255),
                local_path VARCHAR(255),
                FOREIGN KEY (topic_id) REFERENCES topics(topic_id) ON DELETE CASCADE
            )
            """
            cursor.execute(create_images_table_sql)
            
            # 创建files表
            create_files_table_sql = """
            CREATE TABLE IF NOT EXISTS files (
                id INT AUTO_INCREMENT PRIMARY KEY,
                topic_id VARCHAR(100) NOT NULL,
                file_id VARCHAR(100) NOT NULL,
                name VARCHAR(255),
                download_url VARCHAR(255),
                local_path VARCHAR(255),
                FOREIGN KEY (topic_id) REFERENCES topics(topic_id) ON DELETE CASCADE
            )
            """
            cursor.execute(create_files_table_sql)
            
            # 提交更改
            self.db_conn.commit()
            
            # 关闭游标
            cursor.close()
            
            logger.info(f"MySQL数据库初始化成功: {self.mysql_database}")
        except pymysql.err.OperationalError as e:
            error_code, error_message = e.args
            logger.error(f"MySQL连接操作错误 (code: {error_code}): {error_message}")
            if error_code == 1045:
                logger.error("访问被拒绝，请检查MySQL用户名和密码是否正确")
            elif error_code == 2003:
                logger.error("无法连接到MySQL服务器，请检查服务器是否运行以及主机和端口是否正确")
            else:
                logger.error(f"MySQL操作错误: {str(e)}")
            self.db_conn = None
        except Exception as e:
            logger.error(f"数据库初始化失败: {type(e).__name__}: {str(e)}")
            # 如果数据库连接失败，继续执行程序，但不保存数据到数据库
            self.db_conn = None
    
    def save_to_database(self, topic_data, json_file, md_file):
        """将帖子数据保存到MySQL数据库"""
        if not self.db_conn:
            logger.warning("数据库连接未初始化，跳过数据库保存")
            return False
        
        try:
            # 创建游标
            cursor = self.db_conn.cursor()
            
            # 选择数据库
            cursor.execute(f"USE {self.mysql_database}")
            
            # 准备topic数据
            topic_id = topic_data['topic_id']
            type_ = topic_data['type']
            
            # 处理日期时间格式，转换为MySQL兼容的格式
            create_time_str = topic_data['create_time'] if topic_data['create_time'] else None
            create_time = None
            if create_time_str:
                try:
                    # 处理格式：2025-09-20T14:14:35.595+0800
                    # 转换为MySQL DATETIME兼容的格式：2025-09-20 14:14:35
                    if 'T' in create_time_str:
                        # 移除毫秒和时区信息
                        if '+' in create_time_str:
                            create_time_str = create_time_str.split('+')[0]
                        # 移除毫秒部分
                        if '.' in create_time_str:
                            create_time_str = create_time_str.split('.')[0]
                        # 将T替换为空格
                        create_time_str = create_time_str.replace('T', ' ')
                    create_time = create_time_str
                except Exception as e:
                    logger.warning(f"日期时间格式转换失败: {str(e)}")
                    create_time = None
            
            owner = json.dumps(topic_data['owner'], ensure_ascii=False)
            text = topic_data['text']
            likes_count = topic_data['likes_count']
            comments_count = topic_data['comments_count']
            
            # 插入topic数据
            insert_topic_sql = """
            INSERT INTO topics (topic_id, type, create_time, owner, text, likes_count, comments_count, json_file_path, md_file_path)
            VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
            ON DUPLICATE KEY UPDATE
                type = VALUES(type),
                create_time = VALUES(create_time),
                owner = VALUES(owner),
                text = VALUES(text),
                likes_count = VALUES(likes_count),
                comments_count = VALUES(comments_count),
                json_file_path = VALUES(json_file_path),
                md_file_path = VALUES(md_file_path)
            """
            cursor.execute(insert_topic_sql, (
                topic_id, type_, create_time, owner, text, likes_count, comments_count, json_file, md_file
            ))
            
            # 先删除旧的images和files数据
            cursor.execute("DELETE FROM images WHERE topic_id = %s", (topic_id,))
            cursor.execute("DELETE FROM files WHERE topic_id = %s", (topic_id,))
            
            # 插入images数据
            for img in topic_data['images']:
                insert_image_sql = """
                INSERT INTO images (topic_id, image_id, url, local_path)
                VALUES (%s, %s, %s, %s)
                """
                cursor.execute(insert_image_sql, (
                    topic_id, img['image_id'], img['url'], img['local_path']
                ))
            
            # 插入files数据
            for file in topic_data['files']:
                insert_file_sql = """
                INSERT INTO files (topic_id, file_id, name, download_url, local_path)
                VALUES (%s, %s, %s, %s, %s)
                """
                cursor.execute(insert_file_sql, (
                    topic_id, file['file_id'], file['name'], file['download_url'], file['local_path']
                ))
            
            # 提交更改
            self.db_conn.commit()
            
            # 关闭游标
            cursor.close()
            
            logger.info(f"帖子数据已保存到数据库: {topic_id}")
            return True
        except Exception as e:
            logger.error(f"保存数据到数据库失败: {str(e)}")
            # 回滚事务
            if self.db_conn:
                self.db_conn.rollback()
            return False
    
    def make_request(self, url, params=None, method='GET', data=None, is_file=False):
        """发送HTTP请求，包含重试机制"""
        retries = 0
        while retries < self.max_retries:
            try:
                if method == 'GET':
                    response = requests.get(
                        url, 
                        params=params, 
                        headers=self.headers, 
                        verify=False, 
                        timeout=10
                    )
                else:
                    response = requests.post(
                        url, 
                        params=params, 
                        headers=self.headers,
                        json=data, 
                        verify=False, 
                        timeout=10
                    )
                
                response.raise_for_status()
                
                if is_file:
                    return response
                
                result = response.json()
                if result.get('succeeded', False):
                    return result
                else:
                    logger.error(f"请求失败: {result.get('error_msg', '未知错误')}")
                    retries += 1
                    if retries < self.max_retries:
                        logger.info(f"{retries}秒后重试...")
                        time.sleep(self.request_delay)
            except requests.exceptions.RequestException as e:
                logger.error(f"请求异常: {str(e)}")
                retries += 1
                if retries < self.max_retries:
                    logger.info(f"{retries}秒后重试...")
                    time.sleep(self.request_delay)
        
        logger.error(f"达到最大重试次数({self.max_retries})，请求失败")
        return None
    
    def download_file(self, url, save_path):
        """下载文件"""
        try:
            response = self.make_request(url, is_file=True)
            if response and response.status_code == 200:
                with open(save_path, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=8192):
                        if chunk:
                            f.write(chunk)
                logger.info(f"文件下载成功: {save_path}")
                return True
        except Exception as e:
            logger.error(f"文件下载失败: {str(e)}")
        return False
    
    def get_topic_detail(self, topic_id):
        """获取帖子详情"""
        url = f"https://api.zsxq.com/v2/topics/{topic_id}/info"
        result = self.make_request(url)
        if result:
            return result.get('resp_data', {})
        return None
    
    def get_topic_list(self, end_time=None):
        """获取帖子列表"""
        url = f"https://api.zsxq.com/v2/groups/{self.group_id}/topics"
        params = {
            'scope': 'by_owner',
            'count': '20',
        }
        
        if end_time:
            params['end_time'] = end_time
        
        result = self.make_request(url, params=params)
        if result:
            return result.get('resp_data', {})
        return None
    
    def extract_content(self, html_content):
        """从HTML内容中提取纯文本"""
        soup = BeautifulSoup(html_content, 'html.parser')
        return soup.get_text().strip()
    
    def save_topic(self, topic_data):
        """保存帖子数据"""
        try:
            # 获取帖子类型和内容
            topic_type = topic_data.get('type')
            if not topic_type or topic_type not in topic_data:
                logger.warning("无效的帖子数据")
                return False
            
            talk = topic_data[topic_type]
            topic_id = topic_data.get('topic_id', '')
            create_time = topic_data.get('create_time', '')
            
            # 准备保存数据
            save_data = {
                'topic_id': topic_id,
                'type': topic_type,
                'create_time': create_time,
                'owner': topic_data.get('owner', {}),
                'text': talk.get('text', ''),
                'likes_count': talk.get('likes_count', 0),
                'comments_count': talk.get('comments_count', 0),
                'images': [],
                'files': []
            }
            
            # 处理图片
            if 'images' in talk:
                for img in talk['images']:
                    img_url = None
                    if 'original' in img:
                        img_url = img['original'].get('url')
                    elif 'large' in img:
                        img_url = img['large'].get('url')
                    elif 'thumbnail' in img:
                        img_url = img['thumbnail'].get('url')
                    
                    if img_url:
                        img_id = img.get('image_id', '')
                        img_ext = img_url.split('.')[-1].split('?')[0] if '.' in img_url else 'jpg'
                        img_path = os.path.join(self.output_dir, 'images', f"{img_id}.{img_ext}")
                        
                        if self.download_file(img_url, img_path):
                            save_data['images'].append({
                                'image_id': img_id,
                                'url': img_url,
                                'local_path': img_path
                            })
            
            # 处理文件
            if 'files' in talk:
                for file in talk['files']:
                    file_id = file.get('file_id', '')
                    file_name = file.get('name', '')
                    
                    # 获取文件下载链接
                    file_url = f"https://api.zsxq.com/v2/files/{file_id}/download_url"
                    file_result = self.make_request(file_url)
                    
                    if file_result:
                        download_url = file_result.get('resp_data', {}).get('download_url', '')
                        if download_url:
                            file_path = os.path.join(self.output_dir, 'files', file_name)
                            
                            if self.download_file(download_url, file_path):
                                save_data['files'].append({
                                    'file_id': file_id,
                                    'name': file_name,
                                    'download_url': download_url,
                                    'local_path': file_path
                                })
            
            # 保存为JSON文件
            timestamp = create_time.replace('T', '_').replace(':', '-').split('+')[0] if create_time else str(int(time.time()))
            safe_title = re.sub(r'[\\/:*?"<>|]', '_', talk.get('text', '').split('\n')[0][:30] or f"topic_{topic_id}")
            json_file = os.path.join(self.output_dir, f"{timestamp}_{safe_title}.json")
            
            with open(json_file, 'w', encoding='utf-8') as f:
                json.dump(save_data, f, ensure_ascii=False, indent=2)
            
            # 同时保存为Markdown格式便于阅读
            md_file = os.path.join(self.output_dir, f"{timestamp}_{safe_title}.md")
            with open(md_file, 'w', encoding='utf-8') as f:
                # 写入标题
                title = talk.get('text', '').split('\n')[0] or f"主题 {topic_id}"
                f.write(f"# {title}\n\n")
                
                # 写入作者和时间
                owner_name = save_data['owner'].get('name', '未知作者')
                f.write(f"**作者**: {owner_name}\n")
                f.write(f"**发布时间**: {create_time}\n\n")
                
                # 写入正文
                for line in talk.get('text', '').split('\n')[1:]:
                    if line.strip():
                        f.write(f"{line}\n\n")
                
                # 写入图片引用
                for img in save_data['images']:
                    img_name = os.path.basename(img['local_path'])
                    f.write(f"![图片](images/{img_name})\n\n")
                
                # 写入文件引用
                for file in save_data['files']:
                    file_name = file['name']
                    f.write(f"[文件: {file_name}](files/{file_name})\n\n")
                
                # 写入统计信息
                f.write(f"---\n**点赞数**: {save_data['likes_count']} | **评论数**: {save_data['comments_count']}")
            
            # 保存到MySQL数据库
            self.save_to_database(save_data, json_file, md_file)
            
            logger.info(f"帖子保存成功: {json_file}")
            return True
        except Exception as e:
            logger.error(f"保存帖子失败: {str(e)}")
        return False
    
    def crawl(self, max_topics=None):
        """开始爬取"""
        logger.info(f"开始爬取知识星球群组: {self.group_id}")
        
        end_time = None
        topics_count = 0
        
        try:
            while True:
                # 获取帖子列表
                data = self.get_topic_list(end_time)
                
                if not data or 'topics' not in data or len(data['topics']) == 0:
                    logger.info("没有更多帖子了，爬取结束")
                    break
                
                # 处理每个帖子
                for topic in data['topics']:
                    # 跳过置顶帖
                    if topic.get('sticky', False):
                        continue
                    
                    # 保存帖子
                    self.save_topic(topic)
                    topics_count += 1
                    
                    # 更新结束时间用于分页
                    end_time = topic.get('create_time')
                    
                    # 达到最大爬取数量则停止
                    if max_topics and topics_count >= max_topics:
                        logger.info(f"已达到最大爬取数量: {max_topics}")
                        return topics_count
                    
                    # 避免请求过快
                    time.sleep(self.request_delay)
                
                logger.info(f"已爬取 {topics_count} 个帖子，继续下一页...")
                
        except KeyboardInterrupt:
            logger.info("用户中断爬取")
        except Exception as e:
            logger.error(f"爬取过程中发生错误: {str(e)}")
        
        logger.info(f"爬取完成，共获取 {topics_count} 个帖子")
        return topics_count
    
    def export_summary(self):
        """导出爬取摘要"""
        summary_file = os.path.join(self.output_dir, 'summary.json')
        
        try:
            # 统计已爬取的内容
            json_files = [f for f in os.listdir(self.output_dir) if f.endswith('.json') and not f == 'summary.json']
            md_files = [f for f in os.listdir(self.output_dir) if f.endswith('.md')]
            images_count = len([f for f in os.listdir(os.path.join(self.output_dir, 'images')) if os.path.isfile(os.path.join(self.output_dir, 'images', f))])
            files_count = len([f for f in os.listdir(os.path.join(self.output_dir, 'files')) if os.path.isfile(os.path.join(self.output_dir, 'files', f))])
            
            summary = {
                'crawl_time': datetime.now().isoformat(),
                'group_id': self.group_id,
                'total_topics': len(json_files),
                'total_images': images_count,
                'total_files': files_count,
                'json_files': json_files,
                'md_files': md_files
            }
            
            with open(summary_file, 'w', encoding='utf-8') as f:
                json.dump(summary, f, ensure_ascii=False, indent=2)
            
            logger.info(f"爬取摘要已导出: {summary_file}")
            return summary
        except Exception as e:
            logger.error(f"导出摘要失败: {str(e)}")
        return None


    def __del__(self):
        # 关闭数据库连接
        if hasattr(self, 'db_conn') and self.db_conn:
            try:
                self.db_conn.close()
                logger.info("MySQL数据库连接已关闭")
            except Exception as e:
                logger.error(f"关闭数据库连接时出错: {str(e)}")


def main():
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='知识星球群组爬虫')
    parser.add_argument('--max-topics', type=int, help='最大爬取帖子数量')
    parser.add_argument('--export-summary', action='store_true', help='仅导出爬取摘要')
    args = parser.parse_args()
    
    try:
        # 初始化爬虫
        spider = ZsxqSpider()
        
        if args.export_summary:
            # 仅导出摘要
            spider.export_summary()
        else:
            # 开始爬取
            spider.crawl(args.max_topics)
            # 导出摘要
            spider.export_summary()
            
    except Exception as e:
        logger.error(f"程序异常终止: {str(e)}")
        exit(1)


if __name__ == '__main__':
    main()