"""
微信公众号文章采集主程序（优化重构版）
"""
import time
import datetime
import redis
import json
import requests
import hashlib
from bs4 import BeautifulSoup
from elasticsearch import Elasticsearch
from loguru import logger
from minio import Minio
from io import BytesIO
import re
from typing import Any, Dict, Optional, List

# ================= 配置区 =================
REDIS_CONFIG = {
    'host': '192.168.1.112',
    'port': 6379,
    'db': 15,
    'password': 'qqqAAA0130',
    'decode_responses': True
}
ES_CONFIG = {
    'hosts': ['http://192.168.1.92:9200'],
    'basic_auth': ('elastic', 'qqqAAA0130'),
    'request_timeout': 3600
}
MINIO_CONFIG = {
    'endpoint': '192.168.1.104:9002',
    'access_key': 'H53CPN3BAVPTBUTDVW33',
    'secret_key': 'KzgUpTuVjtzdnLBD35DxoZF99MexmT9PNgW23n0A',
    'secure': False
}
OSS_DOMAIN = 'http://oss.nei.47466.com:9002'

# ================= 工具类 =================
class WxGzhCaiji:
    def __init__(self, redis_conf: dict, es_conf: dict, minio_conf: dict):
        """初始化各类连接"""
        self.r = redis.Redis(**redis_conf)
        self.es = Elasticsearch(**es_conf)
        self.minio = Minio(
            endpoint=minio_conf['endpoint'],
            access_key=minio_conf['access_key'],
            secret_key=minio_conf['secret_key'],
            secure=minio_conf['secure']
        )

    def fetch_article_html(self, url: str) -> Optional[BeautifulSoup]:
        """获取文章正文的 soup 对象"""
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36 Edg/138.0.0.0',
        }
        resp = requests.get(url, headers=headers, allow_redirects=True, timeout=10)
        if resp.status_code != 200:
            return None
        soup = BeautifulSoup(resp.text, 'lxml')
        div = soup.find('div', id='js_content')
        return div

    def upload_image(self, img_url: str, bucket: str, img_dir: str) -> Optional[str]:
        """下载图片并上传到 Minio，返回新图片 URL"""
        try:
            bucket = bucket.replace('_', '-').strip()
            if not self.minio.bucket_exists(bucket):
                self.minio.make_bucket(bucket)
                logger.info(f"存储桶 '{bucket}' 创建成功")
            res = requests.get(img_url, timeout=10)
            res.raise_for_status()
            img_name = f"{img_dir}/{hashlib.md5(res.content).hexdigest()}.jpeg"
            self.minio.put_object(
                bucket,
                img_name,
                BytesIO(res.content),
                len(res.content)
            )
            logger.info(f"图片 {img_name} 上传成功")
            return f"{OSS_DOMAIN}/{bucket}/{img_name}"
        except Exception as e:
            logger.warning(f"图片上传失败: {img_url} - {e}")
            return None

    def process_images(self, soup: BeautifulSoup, save_img_num: int, bucket: str, img_dir: str) -> None:
        """处理正文中的图片，超出数量的图片删除，前 save_img_num 张上传到 Minio"""
        img_tags = soup.select('img')
        for idx, img in enumerate(img_tags):
            if idx >= save_img_num:
                img.decompose()
                continue
            img_src = img.get('data-src') or img.get('src')
            if not img_src:
                img.decompose()
                continue
            if not img_dir:
                img_dir = f"default/{datetime.datetime.now():%Y/%m/%d}"
            new_url = self.upload_image(img_src, bucket, img_dir)
            if new_url:
                img['src'] = new_url
            else:
                img.decompose()
            if 'data-src' in img.attrs:
                del img['data-src']

    def save_to_es(self, doc: dict, index: str, doc_id: str) -> None:
        """保存文档到 Elasticsearch"""
        try:
            self.es.index(index=index, id=doc_id, body=doc)
            logger.info(f"文章 {doc.get('title', '')} 入库成功")
        except Exception as e:
            logger.error(f"ES 入库失败: {e}")

    def process_article(self, article: dict, config: dict) -> None:
        """处理单篇文章：抓取、图片处理、入库"""
        name = config['name']
        article_index = config['es_index'].strip()
        # 保留图片数量，超出数量的图片删除，前 save_img_num 张上传到 Minio
        save_img_num = config['save_img_num']
        # 图片存储桶
        minio_bucket = config['minio_bucket']
        # 图片存储目录
        img_dir = config['img_dir']
        # 文章链接
        url = article['link']
        # 文章 ID
        aid = article.get('aid', article.get('id', ''))
        # 文章唯一标识
        key = f"{name}:{aid}"
        # 文章唯一标识的 MD5 值
        es_doc_id = hashlib.md5(key.encode()).hexdigest()
        # 文章标题
        title = article['title']
        # 如果文章已入库，则跳过
        if self.es.exists(index=article_index, id=es_doc_id):
            logger.warning(f"【{name}】 文章 '{title}' 已入库，跳过")
            return
        divContent = self.fetch_article_html(url)
        if not divContent:
            logger.warning(f"【{name}】 获取文章 '{title}' 内容失败")
            return
        # 是否有 style 属性
        if 'style' in divContent.attrs:
            del divContent['style']
        content = divContent.text
        if not re.search(r'[\u4e00-\u9fff\u3400-\u4dbf\U00020000-\U0002a6df]', content):
            logger.warning(f"【{name}】 文章 {title} 内容无中文，跳过")
            return
        # 处理图片
        self.process_images(divContent, save_img_num, minio_bucket, img_dir)
        content_str = str(divContent) 
        doc = {
            "@timestamp": datetime.datetime.now().isoformat() + "+0800",
            "title": title,
            "content": content_str,
            "source_url": url,
            "gzh_name": name,
            "pubnum": 0,
        }
        doc.update(config.get('other_map', {}))
        self.save_to_es(doc, article_index, es_doc_id)
        time.sleep(3)

    def run(self, sleep_min: int = 1, sleep_all: int = 5) -> None:
        """主循环：不断从 Redis 读取公众号配置并处理文章"""
        while True:
            try:
                config_keys = self.r.keys("wx_caiji:*:config")
                logger.info(f"获取所有公众号配置: {config_keys}")
                for config_key in config_keys:
                    config = json.loads(self.r.get(config_key))
                    name = config['name']
                    is_once = config.get('once', False)
                    if is_once:
                        logger.info(f"【{name}】一次性采集所有文章")
                        article_key = f"wx_caiji:{name}:article_list"
                        while True:
                            article_json = self.r.lpop(article_key)
                            if not article_json:
                                break
                            article = json.loads(article_json)
                            self.process_article(article, config)
                    else:
                        article_key = f"wx_caiji:{name}:list"
                        now = datetime.datetime.now()
                        yesterday = now - datetime.timedelta(days=1)
                        now_ts = time.mktime(now.timetuple())
                        yest_ts = time.mktime(yesterday.timetuple())
                        article_ids = self.r.zrangebyscore(article_key, yest_ts, now_ts)
                        if not article_ids:
                            logger.info(f"【{name}】最近一天无新文章，跳过")
                            continue
                        logger.info(f"【{name}】 最近一天文章数量：{len(article_ids)}")
                        for article_id in article_ids:
                            article_key2 = f"wx_caiji:{name}:article:{article_id}"
                            article = json.loads(self.r.get(article_key2))
                            self.process_article(article, config)
                    logger.info(f"【{name}】 运行完成，休眠 {sleep_min} 分钟")
                    time.sleep(sleep_min * 60)
                logger.info(f"所有公众号已完成采集，休眠 {sleep_all} 分钟")
                time.sleep(sleep_all * 60)
            except Exception as e:
                logger.error(f"主循环异常: {e}")
                time.sleep(60)

if __name__ == "__main__":
    caiji = WxGzhCaiji(REDIS_CONFIG, ES_CONFIG, MINIO_CONFIG)
    caiji.run()