#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
cron: 15 4 * * *
new Env('rss订阅推送')
依赖两个json配置文件，一个是`订阅配置.json`文件，一个是`rss配置.json`文件
这两个配置文件需要你根据自己的订阅网站编写，教程在https://gitee.com/cao15110/rsssend
"""

import json
import sqlite3
import requests
from datetime import datetime, timedelta, timezone
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import re
import cn2an
from dateutil import parser
import feedparser
import os
# ---------- 聚合推送配置 ----------
AGGRE_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'dailylog')

IMMEDIATE = False          # True 实时推送，False 汇总推送
def push(title, text=""):
    if IMMEDIATE:
        from notify import send
        send(title, text)
    else:
        with open(AGGRE_FILE, "a", encoding="utf-8") as f:
            f.write(f"{title}\n{text}\n{'-'*40}\n")
# ---------- 路径常量 ----------
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(BASE_DIR, 'data')
os.makedirs(data_dir, exist_ok=True)
# ---------- 通用工具 ----------
def parse_chinese_date(text):
    """全面处理中文日期格式"""
    try:
        # 统一替换中文符号
        text = text.replace("－", "-").replace("／", "/").replace("．", ".")
        
        # 处理年份（兼容多种中文写法）
        year_pattern = r"([〇一二三四五六七八九零一二三四五六七八九]{4})年"
        text = re.sub(
            year_pattern,
            lambda m: f"{''.join([str('零一二三四五六七八九'.index(c)) if c != '〇' else '0' for c in m.group(1)])}年",
            text
        )
        
        # 处理月份和日期的中文数字
        for unit in ["月", "日", "号"]:
            pattern = fr"([零一二三四五六七八九十百廿卅]+){unit}"
            text = re.sub(
                pattern,
                lambda m: f"{cn2an.cn2an(m.group(1), 'smart')}{unit.replace('号', '日')}",
                text
            )
        
        # 处理特殊农历表达
        text = re.sub(r"腊月", "12月", text)
        text = re.sub(r"正月", "1月", text)
        return text
    except Exception as e:
        raise

def parse_relative_time(text):
    """解析相对时间表达"""
    now = datetime.now()
    text = text.replace(" ", "")

    # 定义时间匹配模式
    time_pattern = r'(\d{1,2}):(\d{1,2})'
    time_match = re.search(time_pattern, text)
    time_part = None
    if time_match:
        hour = int(time_match.group(1))
        minute = int(time_match.group(2))
        time_part = timedelta(hours=hour, minutes=minute)
        text = re.sub(time_pattern, '', text)

    # 处理昨天/今天/明天/后天
    if text == "昨天":
        result = now - timedelta(days=1)
    elif text == "今天":
        result = now
    elif text == "明天":
        result = now + timedelta(days=1)
    elif text == "后天":
        result = now + timedelta(days=2)
    else:
        result = None

    # 如果有时间部分，添加到日期上
    if result and time_part:
        result = result.replace(hour=0, minute=0, second=0, microsecond=0) + time_part
        return result


    # 相对天数
    day_match = re.match(r"([+-]?\d+)天后", text)
    if day_match:
        return now + timedelta(days=int(day_match.group(1)))
    
    # 相对周数
    week_match = re.match(r"([+-]?\d+)周后", text)
    if week_match:
        return now + timedelta(weeks=int(week_match.group(1)))
    
    # 下个月/上个月
    if text == "下个月":
        return (now.replace(day=1) + timedelta(days=32)).replace(day=1)
    if text == "上个月":
        return (now.replace(day=1) - timedelta(days=1)).replace(day=1)
    
    
    return None

def unified_date_parser(date_str):
    """统一日期解析入口"""
    original_str = date_str
    try:
        # 预处理
        date_str = date_str.strip().replace("号", "日")
        # 给 processed_str 赋默认值
        processed_str = date_str
        processed_str = parse_chinese_date(date_str)
        
        # 解析相对时间
        relative_result = parse_relative_time(processed_str)
        if relative_result:
            return relative_result.isoformat()
        
        # 主解析逻辑
        try:
            dt = parser.parse(processed_str, dayfirst=True)
        except parser.ParserError:
            # 处理特殊格式
            if re.match(r"^\d{8}$", processed_str):  # YYYYMMDD
                dt = datetime.strptime(processed_str, "%Y%m%d")
            elif re.match(r"^\d{4}[./-]\d{1,2}[./-]\d{1,2}$", processed_str):  # 带分隔符
                normalized = re.sub(r"[./-]", "-", processed_str)
                dt = datetime.strptime(normalized, "%Y-%m-%d")
            elif re.match(r"^\d{2}/\d{2}/\d{4}$", processed_str):  # MM/DD/YYYY 或 DD/MM/YYYY
                dt = parser.parse(processed_str, dayfirst=True)
            elif re.match(r"^\d{4}年\d{1,2}月\d{1,2}日$", processed_str):  # YYYY年MM月DD日
                dt = datetime.strptime(processed_str, "%Y年%m月%d日")
            else:
                raise ValueError("未匹配到已知格式")
        
        # 日期有效性验证
        if not (1 <= dt.month <= 12 and 1 <= dt.day <= 31):
            raise ValueError(f"非法日期值: {dt.month}月{dt.day}日")
        
        return dt.isoformat()
    
    except Exception as e:
        raise ValueError(f"无法解析的日期格式: {original_str} (预处理: {processed_str})") from e

# ---------- SQLite 封装 ----------
class SddDB:
    def __init__(self, sdd_name: str):
        db_path = os.path.join(data_dir, f"{sdd_name}.db")
        self.conn = sqlite3.connect(db_path)
        self.cur = self.conn.cursor()
        # 用 guid 做主键，避免重复
        self.cur.execute(
            """CREATE TABLE IF NOT EXISTS content_items (
                guid TEXT PRIMARY KEY,
                title TEXT,
                link TEXT,
                pub_date TEXT,
                created_at TEXT
            )"""
        )
        self.conn.commit()

    # 返回已存在的 guid 集合
    def existing_guids(self) -> set:
        self.cur.execute("SELECT guid FROM content_items")
        return {row[0] for row in self.cur.fetchall()}

    # 批量写入新增条目
    def insert_items(self, items: list):
        now = datetime.now(timezone.utc).isoformat()
        self.cur.executemany(
            "INSERT OR IGNORE INTO content_items(guid, title, link, pub_date, created_at) VALUES (?,?,?,?,?)",
            [(i['guid'], i['title'], i['link'], i['pub_date'], now) for i in items]
        )
        self.conn.commit()

    def close(self):
        self.conn.close()

# ---------- 核心逻辑 ----------
def get_page_html(url: str, ua: str = "") -> str:
    headers = {"User-Agent": ua or "Mozilla/5.0"}
    r = requests.get(url, headers=headers, timeout=15)
    r.encoding = r.apparent_encoding
    return r.text

def extract_content_items(sdd: dict) -> list:
    """从 sdd 配置中提取内容项，返回包含 title, link, guid, pubDate 的列表"""
    html = get_page_html(sdd["url"], sdd.get("user_agent", ""))
    soup = BeautifulSoup(html, "html.parser")

    items = soup.select(sdd["data_list"]["selector"]["css"])
    if unsel := sdd["data_list"].get("un_selectors"):
        for item in items:
            for sel in unsel:
                for e in item.select(sel):
                    e.decompose()

    content_items = []
    rss_items = sdd["rss"]["items"]
    
    for item in items:
        entry = {}
        for field, cfg in sdd["data_list_elements"].items():
            try:
                if cfg["type"] == "var":
                    _, k = cfg["value"].split(".")
                    cfg2 = sdd["meta"][k]
                    elem = soup.select_one(cfg2["selector"]["css"])
                    entry[field] = elem.get_text(strip=True) if cfg2["type"] == "text" else elem.get(cfg2["value"], "")
                else:
                    elem = item.select_one(cfg["selector"]["css"])
                    if elem:
                        for sel in cfg.get("un_selectors", []):
                            for e in elem.select(sel):
                                e.decompose()
                        if cfg["type"] == "attr":
                            entry[field] = elem.get(cfg["value"], "")
                        elif cfg["type"] == "text":
                            entry[field] = elem.get_text(strip=True)
                        elif cfg["type"] == "image":
                            entry[field] = elem.get("src", "")
            except Exception:
                entry[field] = ""

        # 补全 URL
        if rss_items.get("link") and entry.get(rss_items["link"]):
            entry[rss_items["link"]] = urljoin(sdd["url"], entry[rss_items["link"]])

        # 提取所需字段
        title = entry.get(rss_items["title"], "Untitled")
        link = entry.get(rss_items["link"], sdd["url"])
        guid = entry.get(rss_items.get("guid", "")) or link
        pub_date = ""
        
        # 处理发布日期
        if date_str := entry.get(rss_items.get("date", "")):
            try:
                dt = datetime.fromisoformat(unified_date_parser(date_str))
                dt = dt.replace(tzinfo=timezone(timedelta(hours=8)))
                pub_date = dt.isoformat()
            except Exception:
                pub_date = datetime.now(timezone(timedelta(hours=8))).isoformat()
        else:
            pub_date = datetime.now(timezone(timedelta(hours=8))).isoformat()

        content_items.append({
            'title': title,
            'link': link,
            'guid': guid,
            'pub_date': pub_date
        })

    return content_items

# ---------- RSS解析功能 ----------
def load_rss_config(config_file):
    """加载RSS订阅配置文件"""
    try:
        with open(config_file, 'r', encoding='utf-8') as f:
            config = json.load(f)
            return config.get('subscriptions', [])
    except FileNotFoundError:
        print(f"错误: RSS配置文件 '{config_file}' 不存在")
        return []
    except json.JSONDecodeError:
        print(f"错误: RSS配置文件 '{config_file}' 格式不正确")
        return []
    except Exception as e:
        print(f"加载RSS配置文件时出错: {str(e)}")
        return []

def parse_rss_feed(url):
    """解析单个RSS订阅源，返回文章列表"""
    try:
        feed = feedparser.parse(url)
        if feed.bozo != 0:
            print(f"解析RSS时出错: {feed.bozo_exception}")
            return None
        
        articles = []
        for entry in feed.entries:
            # 处理发布日期
            pub_date = ""
            if 'published' in entry:
                try:
                    dt = parser.parse(entry.published)
                    dt = dt.replace(tzinfo=timezone(timedelta(hours=8)))
                    pub_date = dt.isoformat()
                except Exception:
                    pub_date = datetime.now(timezone(timedelta(hours=8))).isoformat()
            else:
                pub_date = datetime.now(timezone(timedelta(hours=8))).isoformat()
            
            article = {
                'title': entry.get('title', '无标题'),
                'link': entry.get('link', '无链接'),
                'guid': entry.get('guid', entry.get('link', '无链接')),
                'pub_date': pub_date
            }
            articles.append(article)
            
        return {
            'feed_title': feed.feed.get('title', '未知源'),
            'articles': articles
        }
    except Exception as e:
        print(f"解析RSS订阅源时出错: {str(e)}")
        return None

# ---------- 核心调度 ----------
def run():
    # 加载HTML订阅配置
    subscription_file = os.path.join(BASE_DIR, "订阅配置.json")
    html_subscriptions = []
    try:
        with open(subscription_file, encoding="utf-8") as f:
            html_subscriptions = json.load(f)
    except Exception as e:
        print(f"读取订阅配置文件失败：{e}")
    
    # 加载RSS订阅配置
    rss_config_file = os.path.join(BASE_DIR, "rss配置.json")
    rss_subscriptions = load_rss_config(rss_config_file)
    
    # 合并所有订阅
    all_subscriptions = []
    # 添加HTML订阅标记
    for sdd in html_subscriptions:
        sdd['type'] = 'html'
        all_subscriptions.append(sdd)
    # 添加RSS订阅标记
    for sdd in rss_subscriptions:
        sdd['type'] = 'rss'
        all_subscriptions.append(sdd)

    # ✅ 统一收集所有更新内容
    all_updates = []

    for idx, sdd in enumerate(all_subscriptions):
        name = sdd.get("title", f"subscription_{idx}")
        try:
            if sdd.get('type') == 'rss':
                # 处理RSS订阅
                url = sdd.get('url')
                if not url:
                    print(f"订阅源 '{name}' 缺少URL，跳过")
                    continue
                
                feed_data = parse_rss_feed(url)
                
                if feed_data and feed_data['articles']:
                    content_items = feed_data['articles']
                    # 确保标题正确
                    if 'feed_title' in feed_data and feed_data['feed_title'] != '未知源':
                        name = feed_data['feed_title']
            else:
                # 处理HTML订阅
                content_items = extract_content_items(sdd)
            
            if content_items:
                db_name = re.sub(r'[/:*?"<>|]', '_', name)
                db = SddDB(db_name)

                # 找出本次新增的 guid
                exist_guids = db.existing_guids()
                new_items = [it for it in content_items if it['guid'] not in exist_guids]

                if new_items:                       # 有新增
                    db.insert_items(new_items)      # 写入数据库
                    notify_content = f"📰 发现 {len(new_items)} 条更新\n\n"
                    for i, item in enumerate(new_items, 1):
                        notify_content += f"{i}. {item['title']}\n{item['link']}\n\n"
                    # 测试先用print
                    print(f"网站更新：{name}")
                    print(notify_content)

                    # 收集更新内容
                    all_updates.append(f"====🔽【{name}】====\n{notify_content}")
                else:
                    print(f"无新内容：[{name}] ")

                db.close()
        except Exception as e:
            print(f"[{name}] 处理失败：{e}")

    # ✅ 统一发送所有更新
    if all_updates:
        final_content = "\n".join(all_updates)
        push("订阅网站更新", final_content)
if __name__ == "__main__":
    run()