import requests
import json
import time
import random
from typing import Dict, List, Optional, Tuple
import os
import re
import sys
import traceback
from datetime import datetime
import redis
from urllib.parse import urlparse, parse_qs, unquote
from util.tools import *
import urllib3
from bs4 import BeautifulSoup, NavigableString, Tag
from setting import *
import myglobal

# 禁用不安全请求警告
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

# Redis连接池
_redis_pool = None

def get_redis_connection():
    """获取Redis连接，支持自动重连"""
    global _redis_pool
    try:
        if _redis_pool is None:
            _redis_pool = redis.ConnectionPool(
                host='127.0.0.1', 
                port=6379, 
                db=3,
                socket_keepalive=True,
                retry_on_timeout=True,
                health_check_interval=30
            )
        
        r = redis.Redis(connection_pool=_redis_pool)
        r.ping()  # 测试连接
        return r
    except Exception as e:
        print(f"Redis连接失败: {e}")
        # 尝试重新连接
        _redis_pool = None
        return None

def safe_redis_operation(operation_func, default_return=None, max_retries=2):
    """安全的Redis操作包装器"""
    for attempt in range(max_retries):
        r = get_redis_connection()
        if r is None:
            time.sleep(1)
            continue
            
        try:
            return operation_func(r)
        except redis.exceptions.ConnectionError:
            print(f"Redis连接断开，第{attempt+1}次重试...")
            _redis_pool = None  # 重置连接池
            time.sleep(1)
        except Exception as e:
            print(f"Redis操作失败: {e}")
            break
    
    return default_return

def load_cookies_from_file(filename: str = "weibo_cookies.json") -> Dict:
    """从JSON文件加载cookies"""
    try:
        with open(filename, 'r', encoding='utf-8') as f:
            return json.load(f)
    except FileNotFoundError:
        print(f"找不到cookies文件: {filename}")
        return {}
    except json.JSONDecodeError:
        print(f"cookies文件格式错误: {filename}")
        return {}

def cookies_dict_to_string(cookies_dict: Dict) -> str:
    """将cookies字典转换为字符串格式"""
    return "; ".join([f"{key}={value}" for key, value in cookies_dict.items()])

def create_weibo_headers(cookies_dict: Dict) -> Dict:
    """创建微博请求头"""
    cookie_string = cookies_dict_to_string(cookies_dict)
    
    headers = {
        'authority': 'www.weibo.com',
        'accept': 'application/json, text/plain, */*',
        'accept-language': 'zh-CN,zh;q=0.9',
        'client-version': 'v2.47.132',
        'cookie': cookie_string,
        'priority': 'u=1, i',
        'referer': 'https://www.weibo.com/u/5069029750',
        'sec-ch-ua': '"Chromium";v="128", "Not;A=Brand";v="24", "Google Chrome";v="128"',
        'sec-ch-ua-mobile': '?0',
        'sec-ch-ua-platform': '"Windows"',
        'sec-fetch-dest': 'empty',
        'sec-fetch-mode': 'cors',
        'sec-fetch-site': 'same-origin',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36',
        'x-requested-with': 'XMLHttpRequest',
    }
    
    # 添加XSRF token
    if 'XSRF-TOKEN' in cookies_dict:
        headers['x-xsrf-token'] = cookies_dict['XSRF-TOKEN']
    
    return headers

def make_weibo_request(url: str, headers: Dict, params: Dict = None, method: str = 'GET') -> Optional[requests.Response]:
    """统一的微博请求函数"""
    time.sleep(random.uniform(5, 8))
    try:
        if method.upper() == 'GET':
            response = requests.get(
                url, 
                headers=headers, 
                params=params,
                verify=False,
                timeout=15
            )
        else:
            response = requests.post(
                url,
                headers=headers,
                json=params,
                verify=False,
                timeout=15
            )
        
        print(f"请求状态码: {response.status_code}, URL: {response.url}")
        return response
        
    except requests.exceptions.RequestException as e:
        print(f"请求异常: {e}")
        return None

def get_user_weibo_list(uid: str, headers: Dict, page: int = 1, feature: int = 0) -> Optional[Dict]:
    """获取用户微博列表"""
    url = "https://www.weibo.com/ajax/statuses/mymblog"
    params = {'uid': uid, 'page': page, 'feature': feature}
    
    response = make_weibo_request(url, headers, params)
    if not response:
        return None
        
    if response.status_code == 200:
        time.sleep(random.uniform(1, 3))  # 请求间隔
        return response.json()
    else:
        print(f"微博列表请求失败: {response.status_code}, 响应: {response.text[:200]}")
        return None

def convert_to_target_format(weibo_data: Dict) -> Dict:
    """将原始微博数据转换为目标格式 - 修复版"""
    try:
        print(f"开始转换微博数据，ID: {weibo_data.get('id', 'unknown')}")
        
        user_info = weibo_data.get('user', {})
        retweeted_status = weibo_data.get('retweeted_status')
        
        # 解析内容
        content = parse_weibo_content(weibo_data)
        product_num = count_products_in_content(content)
        
        # 构建基础数据
        formatted_data = {
            "avatar": user_info.get('avatar_hd') or user_info.get('profile_image_url', ''),
            "user_id": user_info.get('id', 0),
            "nickname": user_info.get('screen_name', ''),
            "publish_time": parse_weibo_time(weibo_data.get('created_at', '')),
            "content": content,
            "images": parse_images_from_weibo(weibo_data),
            "has_retweet": bool(retweeted_status),
            "article_id": weibo_data.get('idstr', str(weibo_data.get('id', ''))),
            "product_num": product_num,
            "article_detail_url": f"https://m.weibo.cn/detail/{weibo_data.get('id', '')}",
            "is_weibo_chaohua": False
        }
        
        print(f"基础数据构建完成，文章ID: {formatted_data['article_id']}")
        
        # 处理转发内容
        if retweeted_status:
            print("处理转发内容...")
            retweeted_formatted = convert_to_target_format(retweeted_status)
            if retweeted_formatted:
                formatted_data.update({
                    "retweet_article_id": retweeted_status.get('idstr', str(retweeted_status.get('id', ''))),
                    "retweeted_content": retweeted_formatted
                })
                print(f"转发内容处理完成: {formatted_data['retweet_article_id']}")
        
        return formatted_data
        
    except Exception as e:
        print(f"数据转换失败: {e}")
        traceback.print_exc()
        return {}
def get_multiple_pages_weibo(uid: str, headers: Dict, start_page: int = 1, end_page: int = 3, delay: float = 2.0) -> List[Dict]:
    """
    获取多页微博数据
    """
    all_weibos = []
    
    for page in range(start_page, end_page + 1):
        print(f"正在获取第 {page} 页数据...")
        
        data = get_user_weibo_list(uid, headers, page)
        if data:
            weibos = parse_weibo_data(data)
            all_weibos.extend(weibos)
            print(f"第 {page} 页获取到 {len(weibos)} 条微博")
        else:
            print(f"第 {page} 页获取失败")
        
        # 添加随机延迟，避免请求过快
        if page < end_page:
            sleep_time = delay + random.uniform(0.5, 1.5)
            print(f"等待 {sleep_time:.2f} 秒后继续...")
            time.sleep(sleep_time)
    
    print(f"总共获取到 {len(all_weibos)} 条微博")
    return all_weibos

def extract_weibo_id_from_url(weibo_url: str) -> Optional[str]:
    """从微博URL中提取微博ID，支持所有常见格式"""
    
    # 统一处理URL，移除协议和多余参数
    clean_url = weibo_url.split('?')[0].split('#')[0]
    
    # 定义所有可能的模式
    patterns = [
        # PC端字母数字ID格式
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})$', "PC端字母数字ID"),
        
        # 移动端数字ID格式 - detail
        (r'm\.weibo\.cn/detail/(\d+)$', "移动端detail数字ID"),
        
        # 移动端数字ID格式 - status  
        (r'm\.weibo\.cn/status/(\d+)$', "移动端status数字ID"),
        
        # 带路径的移动端格式
        (r'm\.weibo\.cn/detail/(\d+)/?', "移动端detail带斜杠"),
        (r'm\.weibo\.cn/status/(\d+)/?', "移动端status带斜杠"),
        
        # PC端带查询参数
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})\?', "PC端带参数"),
        
        # PC端带锚点
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})#', "PC端带锚点"),
        
        # 备用模式：任意长度的字母数字
        (r'weibo\.com/\d+/([A-Za-z0-9]+)', "PC端任意长度ID"),
    ]
    
    for pattern, pattern_type in patterns:
        match = re.search(pattern, clean_url)
        if match:
            weibo_id = match.group(1)
            print(f"从{pattern_type}提取微博ID: {weibo_id}")
            return weibo_id
    
    # 如果以上模式都不匹配，尝试直接提取数字ID（针对移动端）
    if 'm.weibo.cn' in weibo_url:
        # 尝试从移动端URL中提取纯数字ID
        numbers = re.findall(r'/(\d+)', weibo_url)
        if numbers:
            # 取最后一个数字（通常是最长的，可能是微博ID）
            potential_id = max(numbers, key=len)
            if len(potential_id) >= 9:  # 微博ID通常是16位或更长
                print(f"从移动端URL提取数字ID: {potential_id}")
                return potential_id
    
    print(f"无法识别的微博URL格式: {weibo_url}")
    return None

def get_single_weibo_detail(weibo_url: str, headers: Dict) -> Optional[Dict]:
    """获取单条微博详情"""
    weibo_id = extract_weibo_id_flexible(weibo_url)
    if not weibo_id:
        print(f"无法从URL提取微博ID: {weibo_url}")
        return None
    
    url = "https://www.weibo.com/ajax/statuses/show"
    params = {'id': weibo_id}
    
    response = make_weibo_request(url, headers, params)
    if response and response.status_code == 200:
        data = response.json()
        print(f"成功获取微博详情: {weibo_id}")
        return data
    
    print(f"获取微博详情失败: {weibo_url}, 状态码: {response.status_code if response else '无响应'}")
    return None

# 其他可能缺失的函数
def parse_html_content(html_content: str) -> List[Dict]:
    """解析HTML内容 - 修复微博商品卡片版"""
    result = []
    
    try:
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 先提取所有文本内容
        all_text = soup.get_text()
        print(f"完整文本内容: {all_text}")
        
        # 查找所有链接，包括微博的特殊链接卡片
        links = soup.find_all('a', href=True)
        print(f"找到 {len(links)} 个链接")
        
        for i, link in enumerate(links):
            href = link.get('href', '')
            link_text = clean_text(link.get_text(strip=True))
            
            print(f"链接 {i+1}: 文本='{link_text}', URL='{href}'")
            
            # 处理微博商品卡片链接
            if 'apps.weibo.com/linkcard' in href:
                product_info = extract_product_from_linkcard(href, link_text)
                if product_info:
                    result.append(product_info)
                    print(f"  识别为微博商品卡片: {product_info}")
                else:
                    result.append({
                        "type": "link",
                        "text": href,
                        "platform": "微博商品",
                        "link_text": link_text
                    })
            # 处理微博跳转链接
            elif 'shop.sc.weibo.com' in href:
                product_info = extract_product_from_weibo_shop(href, link_text)
                if product_info:
                    result.append(product_info)
                    print(f"  识别为微博商品链接: {product_info}")
                else:
                    add_link_to_result(href, link_text, result)
            # 处理短链接
            elif 't.cn' in href:
                real_url = extract_real_url_from_weibo(href)
                if real_url:
                    add_link_to_result(real_url, link_text, result)
                else:
                    add_link_to_result(href, link_text, result)
            # 处理其他链接
            elif href.startswith('http'):
                add_link_to_result(href, link_text, result)
        
        # 提取纯文本内容
        for element in soup.descendants:
            if isinstance(element, NavigableString):
                parent = element.parent
                if parent and parent.name == 'a':
                    continue
                    
                text = clean_text(str(element))
                if (text and text.strip() and 
                    text not in ['网页链接', '查看图片', '展开', '全文', '微博正文'] and
                    len(text.strip()) > 1):
                    result.append({"type": "text", "text": text.strip()})
                        
    except Exception as e:
        print(f"HTML解析失败: {e}")
        traceback.print_exc()
    
    # 去重文本内容
    unique_result = []
    seen_texts = set()
    
    for item in result:
        if item['type'] == 'text':
            if item['text'] not in seen_texts:
                seen_texts.add(item['text'])
                unique_result.append(item)
        else:
            unique_result.append(item)
    
    print(f"解析完成，共 {len(unique_result)} 个内容项")
    return unique_result

def extract_product_from_linkcard(linkcard_url: str, link_text: str) -> Optional[Dict]:
    """从微博linkcard链接中提取商品信息"""
    try:
        match = re.search(r'shop_sc_weibo_(\d+)', linkcard_url)
        if match:
            product_id = match.group(1)
            platform = infer_platform_from_text(link_text)
            
            return {
                "type": "link",
                "text": linkcard_url,
                "product_id": product_id,
                "platform_name": platform,
                "link_text": link_text,
                "platform": platform
            }
    except Exception as e:
        print(f"从linkcard提取商品信息失败: {e}")
    
    return None

def add_link_to_result(url: str, link_text: str, result: List[Dict]):
    """添加链接到结果"""
    print(f"处理普通链接: {url}")
    
    link_data = {
        "type": "link",
        "text": url,
        "platform": get_platform(url),
        "link_text": link_text if link_text and link_text not in ['网页链接', '查看图片'] else "网页链接"
    }
    
    result.append(link_data)

def parse_plain_text(text: str) -> List[Dict]:
    """解析纯文本"""
    result = []
    
    for line in text.split('\n'):
        line = clean_text(line)
        if not line or line in ['网页链接', '查看图片', '展开']:
            continue
        
        # URL处理
        if line.startswith(('http://', 'https://')):
            add_link_to_result(line, "网页链接", result)
        else:
            result.append({"type": "text", "text": line})
    
    return result

def parse_html_content_ordered(html_content: str) -> List[Dict]:
    """解析HTML内容 - 安全修复版"""
    result = []
    
    try:
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 遍历所有子节点，保持原始顺序
        for element in soup.descendants:
            if isinstance(element, NavigableString):
                # 处理文本节点
                text = clean_text(str(element))
                if (text and text.strip() and 
                    text not in ['网页链接', '查看图片', '展开', '全文'] and
                    len(text.strip()) > 1):
                    
                    # 检查是否重复（避免相邻重复文本）
                    if not result or not isinstance(result[-1], dict) or result[-1].get('text') != text.strip():
                        result.append({"type": "text", "text": text.strip()})
                        
            elif isinstance(element, Tag) and element.name == 'a':
                # 处理链接节点
                href = element.get('href', '')
                link_text = clean_text(element.get_text(strip=True))
                
                # 跳过无用链接文本
                if link_text in ['网页链接', '查看图片', '展开', '全文']:
                    continue
                
                print(f"处理链接: 文本='{link_text}', URL='{href}'")
                
                # 处理微博链接 - 特殊处理
                if is_weibo_detail_url(href):
                    weibo_data = parse_weibo_link_to_extra(href, link_text)
                    if weibo_data:
                        result.append(weibo_data)
                        continue
                
                # 处理微博商品卡片链接
                if 'apps.weibo.com/linkcard' in href:
                    product_info = extract_product_from_linkcard(href, link_text)
                    if product_info:
                        result.append(product_info)
                    else:
                        result.append({
                            "type": "link",
                            "text": href,
                            "platform": "微博商品",
                            "link_text": link_text
                        })
                # 处理微博商品链接
                elif 'shop.sc.weibo.com' in href:
                    product_info = extract_product_from_weibo_shop(href, link_text)
                    if product_info:
                        result.append(product_info)
                    else:
                        result.append({
                            "type": "link", 
                            "text": href,
                            "platform": "微博商品",
                            "link_text": link_text
                        })
                # 处理其他链接
                elif href.startswith('http'):
                    # 处理普通微博链接（非详情页）
                    if 'weibo.com' in href and not href.startswith('https://shop.sc.weibo.com'):
                        result.append({
                            "type": "link",
                            "text": href,
                            "platform": "其他",
                            "link_text": "微博正文" if link_text in ['网页链接', ''] else link_text
                        })
                    else:
                        result.append({
                            "type": "link",
                            "text": href,
                            "platform": get_platform(href),
                            "link_text": link_text if link_text else "网页链接"
                        })
                        
    except Exception as e:
        print(f"HTML解析失败: {e}")
        traceback.print_exc()
        # 降级到纯文本解析
        try:
            text_only = BeautifulSoup(html_content, 'html.parser').get_text()
            return parse_plain_text_ordered(text_only)
        except:
            return []
    
    return result

def parse_plain_text_ordered(text: str) -> List[Dict]:
    """解析纯文本 - 保持原始顺序，支持微博链接解析"""
    result = []
    lines = text.split('\n')
    
    for line in lines:
        line = clean_text(line)
        if not line or line in ['网页链接', '查看图片', '展开']:
            continue
        
        # URL处理
        if line.startswith(('http://', 'https://')):
            # 判断是否为微博链接
            if is_weibo_detail_url(line):
                weibo_data = parse_weibo_link_to_extra(line, "微博正文")
                if weibo_data:
                    result.append(weibo_data)
                    continue
            
            # 微博商品链接
            if 'shop.sc.weibo.com' in line:
                product_info = extract_product_from_weibo_shop(line, "商品链接")
                if product_info:
                    result.append(product_info)
                else:
                    result.append({
                        "type": "link",
                        "text": line,
                        "platform": "微博商品", 
                        "link_text": "商品链接"
                    })
            elif 'weibo.com' in line:
                result.append({
                    "type": "link",
                    "text": line,
                    "platform": "其他",
                    "link_text": "微博正文"
                })
            else:
                result.append({
                    "type": "link",
                    "text": line,
                    "platform": get_platform(line),
                    "link_text": "网页链接"
                })
        else:
            # 文本内容
            result.append({"type": "text", "text": line})
    
    return result
def parse_images_from_weibo(weibo_data: Dict) -> List[str]:
    """从微博数据中解析图片 - 安全修复版"""
    images = []
    
    try:
        # 从pic_ids字段获取
        pic_ids = weibo_data.get('pic_ids', [])
        if isinstance(pic_ids, list):
            for pic_id in pic_ids:
                if pic_id and isinstance(pic_id, str):
                    # 生成原始链接并优化
                    original_url = f"https://wx1.sinaimg.cn/large/{pic_id}.jpg"
                    optimized_url = optimize_image_url(original_url)
                    images.append(optimized_url)
        
        # 从pic_infos字段获取 - 安全处理字典
        pic_infos = weibo_data.get('pic_infos', {})
        if isinstance(pic_infos, dict):
            for pic_key, pic_info in pic_infos.items():
                # 安全处理pic_info
                if isinstance(pic_info, str):
                    optimized_url = optimize_image_url(pic_info)
                    images.append(optimized_url)
                elif isinstance(pic_info, dict):
                    # 按优先级获取URL
                    url = None
                    for size_key in ['largest', 'large', 'url']:
                        if size_key in pic_info:
                            url_value = pic_info[size_key]
                            if isinstance(url_value, str):
                                url = url_value
                                break
                            elif isinstance(url_value, dict) and 'url' in url_value:
                                url = url_value['url']
                                break
                    
                    if url:
                        optimized_url = optimize_image_url(url)
                        images.append(optimized_url)
        
        # 去重并返回
        unique_images = []
        for img in images:
            if img and img not in unique_images:
                unique_images.append(img)
        
        print(f"解析到 {len(unique_images)} 张图片")
        return unique_images
        
    except Exception as e:
        print(f"图片解析失败: {e}")
        return []

def count_products_in_content(content: List[Dict]) -> int:
    """统计内容中的商品数量 - 安全版"""
    try:
        count = 0
        for item in content:
            if (isinstance(item, dict) and 
                item.get('type') == 'link' and 
                'product_id' in item):
                count += 1
        return count
    except Exception as e:
        print(f"商品数量统计失败: {e}")
        return 0

def parse_weibo_content(weibo_data: Dict) -> List[Dict]:
    """解析微博内容 - 修复extra处理版"""
    try:
        result = []
        
        # 使用 text_raw 但也要检查 HTML 中的链接
        text_raw = weibo_data.get('text_raw', '')
        text_html = weibo_data.get('text', '')
        
        print(f"原始文本: {repr(text_raw)}")
        print(f"HTML内容长度: {len(text_html) if text_html else 0}")
        
        # 首先从HTML中提取所有链接
        html_links = extract_links_from_html(text_html) if text_html else []
        print(f"从HTML提取到 {len(html_links)} 个链接")
        
        # 按行处理原始文本，保持换行
        if text_raw:
            lines = text_raw.split('\n')
            for line in lines:
                line_clean = clean_text(line)
                if not line_clean or line_clean in ['网页链接', '查看图片', '展开', '全文']:
                    continue
                
                # 检查这一行是否包含链接
                urls_in_line = extract_urls_from_text(line_clean)
                
                if urls_in_line:
                    # 如果有链接，先添加文本，再添加链接
                    text_without_urls = remove_urls_from_text(line_clean)
                    if text_without_urls:
                        result.append({"type": "text", "text": text_without_urls})
                    
                    # 添加链接 - 使用修复版的create_link_item_with_extra
                    for url in urls_in_line:
                        link_item = create_link_item_with_extra(url, html_links)
                        if link_item:
                            result.append(link_item)
                else:
                    # 普通文本
                    result.append({"type": "text", "text": line_clean})
        
        # 如果还有未处理的HTML链接，添加到末尾
        for link_data in html_links:
            if not any(item.get('text') == link_data['url'] for item in result if item.get('type') in ['link', 'extra']):
                link_item = create_link_item_with_extra(link_data['url'], html_links)
                if link_item:
                    result.append(link_item)
        
        print(f"解析结果: {len(result)} 项")
        for i, item in enumerate(result):
            print(f"  {i+1}. {item}")
            
        return result
        
    except Exception as e:
        print(f"微博内容解析失败: {e}")
        traceback.print_exc()
        return []

def create_link_item_with_extra(url: str, html_links: List[Dict]) -> Dict:
    """创建链接项 - 支持extra格式"""
    # 从HTML链接中查找对应的链接文本
    link_text = "网页链接"
    for html_link in html_links:
        # 精确匹配URL
        if html_link['url'] == url and html_link['text'] and html_link['text'] not in ['网页链接', '查看图片']:
            link_text = html_link['text']
            break
    
    # 首先检查是否为微博链接，需要解析为extra格式
    if is_weibo_detail_url(url):
        print(f"检测到微博链接，尝试解析为extra: {url}")
        extra_data = parse_weibo_link_to_extra(url, link_text)
        if extra_data:
            print(f"成功解析为extra格式: {url}")
            return extra_data
        else:
            print(f"微博链接解析失败，降级为普通链接: {url}")
    
    # 判断链接类型
    if 'shop.sc.weibo.com' in url:
        product_info = extract_product_from_weibo_shop(url, link_text)
        if product_info:
            return product_info
        else:
            return {
                "type": "link",
                "text": url,
                "platform": "微博商品",
                "link_text": link_text
            }
    elif 'weibo.com' in url and len(url.split('/')) >= 5:
        return {
            "type": "link",
            "text": url,
            "platform": "其他",
            "link_text": "微博正文"
        }
    else:
        # 对于短链接，尝试获取平台信息
        platform = get_platform(url)
        return {
            "type": "link",
            "text": url,
            "platform": platform,
            "link_text": link_text
        }

def is_weibo_detail_url(url: str) -> bool:
    """判断是否为微博详情页链接 - 增强版"""
    # 先处理短链接
    if 't.cn' in url:
        # 对于t.cn链接，我们需要先解析真实URL
        real_url = extract_real_url_from_weibo(url)
        if real_url and real_url != url:
            return is_weibo_detail_url(real_url)
    
    weibo_patterns = [
        r'https?://weibo\.com/\d+/[A-Za-z0-9]{9}',
        r'https?://m\.weibo\.cn/detail/\d+',
        r'https?://m\.weibo\.cn/status/\d+',
        r'https?://weibo\.com/\d+/([A-Za-z0-9]+)',
        r'https?://www\.weibo\.com/\d+/([A-Za-z0-9]+)'
    ]
    
    for pattern in weibo_patterns:
        if re.match(pattern, url):
            print(f"识别为微博详情链接: {url}")
            return True
    
    return False

def parse_weibo_link_to_extra(weibo_url: str, link_text: str) -> Optional[Dict]:
    """将微博链接解析为extra格式 - 灵活提取微博ID"""
    try:
        print(f"开始解析微博链接: {weibo_url}")
        
        # 获取微博详情数据
        cookies_dict = load_cookies_from_file()
        if not cookies_dict:
            print("无法加载cookies，跳过微博链接解析")
            return None
            
        headers = create_weibo_headers(cookies_dict)
        
        # 灵活提取微博ID
        weibo_id = extract_weibo_id_flexible(weibo_url)
        if not weibo_id:
            print(f"无法从URL提取微博ID: {weibo_url}")
            return None
            
        print(f"提取到微博ID: {weibo_id}")
        
        # 获取微博数据
        weibo_data = get_weibo_by_id(weibo_id, headers)
        
        if not weibo_data:
            print(f"无法获取微博详情数据: {weibo_id}")
            return None
        
        print(f"成功获取微博数据，开始转换格式...")
        
        # 转换为目标格式
        formatted_data = convert_to_target_format_safe(weibo_data)
        if not formatted_data:
            print(f"微博数据转换失败: {weibo_url}")
            return None
        
        # 构建extra格式
        extra_data = {
            "type": "extra",
            "text": formatted_data,
            "url": weibo_url
        }
        
        print(f"成功解析微博链接为extra格式: {weibo_url}")
        return extra_data
        
    except Exception as e:
        print(f"解析微博链接失败: {e}")
        traceback.print_exc()
        return None

def extract_weibo_id_flexible(weibo_url: str) -> Optional[str]:
    """灵活提取微博ID - 支持多种格式"""
    
    # 如果是短链接，先尝试解析真实URL
    if 't.cn' in weibo_url:
        real_url = extract_real_url_from_weibo(weibo_url)
        if real_url and real_url != weibo_url:
            print(f"短链接解析为: {real_url}")
            return extract_weibo_id_flexible(real_url)
    
    # 统一处理URL，移除协议和多余参数
    clean_url = weibo_url.split('?')[0].split('#')[0]
    
    # 定义所有可能的模式
    patterns = [
        # PC端字母数字ID格式: https://weibo.com/2794284831/Qd34tAnjj
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})$', "PC端字母数字ID"),
        
        # PC端带参数: https://weibo.com/2794284831/Qd34tAnjj?...
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})\?', "PC端带参数"),
        
        # PC端带锚点: https://weibo.com/2794284831/Qd34tAnjj#...
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})#', "PC端带锚点"),
        
        # PC端任意长度ID: https://weibo.com/2794284831/AbC123
        (r'weibo\.com/\d+/([A-Za-z0-9]+)', "PC端任意长度ID"),
        
        # 移动端数字ID格式 - detail: https://m.weibo.cn/detail/5231436089262703
        (r'm\.weibo\.cn/detail/(\d+)$', "移动端detail数字ID"),
        
        # 移动端数字ID格式 - status: https://m.weibo.cn/status/5231436089262703
        (r'm\.weibo\.cn/status/(\d+)$', "移动端status数字ID"),
        
        # 带路径的移动端格式
        (r'm\.weibo\.cn/detail/(\d+)/?', "移动端detail带斜杠"),
        (r'm\.weibo\.cn/status/(\d+)/?', "移动端status带斜杠"),
        
        # www子域名
        (r'www\.weibo\.com/\d+/([A-Za-z0-9]+)', "www子域名"),
    ]
    
    for pattern, pattern_type in patterns:
        match = re.search(pattern, clean_url)
        if match:
            weibo_id = match.group(1)
            print(f"从{pattern_type}提取微博ID: {weibo_id}")
            return weibo_id
    
    # 如果以上模式都不匹配，尝试直接提取数字ID（针对移动端）
    if 'm.weibo.cn' in weibo_url:
        numbers = re.findall(r'/(\d+)', weibo_url)
        if numbers:
            # 取最长的数字（通常是微博ID）
            potential_id = max(numbers, key=len)
            if len(potential_id) >= 9:  # 微博ID通常是16位或更长
                print(f"从移动端URL提取数字ID: {potential_id}")
                return potential_id
    
    # 最后尝试：从URL参数中提取ID
    try:
        parsed = urlparse(weibo_url)
        query_params = parse_qs(parsed.query)
        
        # 常见的ID参数名
        id_params = ['id', 'mid', 'weibo_id', 'status_id']
        for param in id_params:
            if param in query_params:
                weibo_id = query_params[param][0]
                print(f"从URL参数{param}提取微博ID: {weibo_id}")
                return weibo_id
    except Exception as e:
        print(f"从URL参数提取ID失败: {e}")
    
    print(f"无法识别的微博URL格式: {weibo_url}")
    return None

def extract_real_url_from_weibo(weibo_url: str) -> Optional[str]:
    """从微博跳转链接中提取真实URL - 增强版"""
    try:
        # 如果是t.cn短链接，直接返回（因为可能无法解析）
        if 't.cn' in weibo_url:
            # 对于t.cn链接，我们可以尝试解析，但通常需要实际请求
            return weibo_url
            
        parsed = urlparse(weibo_url)
        query_params = parse_qs(parsed.query)
        
        # 微博跳转链接可能有多个参数名
        for param_name in ['url', 'u', 'link', 'target']:
            if param_name in query_params:
                real_url = unquote(query_params[param_name][0])
                print(f"从微博链接提取真实URL: {weibo_url} -> {real_url}")
                return real_url
            
        # 检查是否是微博的跳转服务
        if 'weibo.cn/sinaurl' in weibo_url:
            # 尝试从路径中提取
            path_parts = parsed.path.split('/')
            if len(path_parts) > 2:
                # 类似: https://weibo.cn/sinaurl?u=http://...
                for part in path_parts:
                    if part.startswith('http'):
                        return unquote(part)
            
    except Exception as e:
        print(f"解析微博URL失败: {e}")
    
    return None

def get_weibo_by_id(weibo_id: str, headers: Dict) -> Optional[Dict]:
    """通过微博ID获取微博详情 - 增强版"""
    try:
        # 首先尝试标准API
        url = "https://www.weibo.com/ajax/statuses/show"
        params = {'id': weibo_id}
        
        response = make_weibo_request(url, headers, params)
        if response and response.status_code == 200:
            data = response.json()
            print(f"成功获取微博详情: {weibo_id}")
            return data
        
        # 如果标准API失败，尝试移动端API
        print(f"标准API失败，尝试移动端API...")
        mobile_url = f"https://m.weibo.cn/statuses/show?id={weibo_id}"
        response = make_weibo_request(mobile_url, headers)
        if response and response.status_code == 200:
            data = response.json()
            print(f"通过移动端API获取微博详情: {weibo_id}")
            return data
            
        print(f"获取微博详情失败，状态码: {response.status_code if response else '无响应'}")
        return None
            
    except Exception as e:
        print(f"通过ID获取微博失败: {e}")
        return None

def convert_to_target_format_safe(weibo_data: Dict) -> Dict:
    """安全转换微博数据为目标格式"""
    try:
        user_info = weibo_data.get('user', {})
        
        # 解析内容
        content = parse_weibo_content_safe(weibo_data)
        product_num = count_products_in_content(content)
        
        # 构建基础数据
        formatted_data = {
            "avatar": user_info.get('avatar_hd') or user_info.get('profile_image_url', ''),
            "user_id": user_info.get('id', 0),
            "nickname": user_info.get('screen_name', ''),
            "publish_time": parse_weibo_time(weibo_data.get('created_at', '')),
            "content": content,
            "images": parse_images_from_weibo_safe(weibo_data),
            "has_retweet": bool(weibo_data.get('retweeted_status')),
            "article_id": weibo_data.get('idstr', str(weibo_data.get('id', ''))),
            "product_num": product_num,
            "article_detail_url": f"https://m.weibo.cn/detail/{weibo_data.get('id', '')}",
            "is_weibo_chaohua": False
        }
        
        # 处理转发内容
        retweeted_status = weibo_data.get('retweeted_status')
        if retweeted_status:
            retweeted_formatted = convert_to_target_format_safe(retweeted_status)
            if retweeted_formatted:
                formatted_data["retweeted_content"] = retweeted_formatted
        
        return formatted_data
        
    except Exception as e:
        print(f"安全转换微博数据失败: {e}")
        return {}

def parse_weibo_content_safe(weibo_data: Dict) -> List[Dict]:
    """安全解析微博内容"""
    try:
        result = []
        text_raw = weibo_data.get('text_raw', '')
        
        if text_raw:
            lines = text_raw.split('\n')
            for line in lines:
                line_clean = clean_text(line)
                if line_clean and line_clean not in ['网页链接', '查看图片', '展开', '全文']:
                    # 检查是否包含链接
                    urls = extract_urls_from_text(line_clean)
                    if urls:
                        text_without_urls = remove_urls_from_text(line_clean)
                        if text_without_urls:
                            result.append({"type": "text", "text": text_without_urls})
                        for url in urls:
                            result.append({
                                "type": "link",
                                "text": url,
                                "platform": get_platform(url),
                                "link_text": "网页链接"
                            })
                    else:
                        result.append({"type": "text", "text": line_clean})
        
        return result
        
    except Exception as e:
        print(f"安全解析微博内容失败: {e}")
        return []

def parse_images_from_weibo_safe(weibo_data: Dict) -> List[str]:
    """安全解析微博图片"""
    try:
        images = []
        pic_ids = weibo_data.get('pic_ids', [])
        
        for pic_id in pic_ids:
            if pic_id:
                optimized_url = f"https://lz.sinaimg.cn/oslarge/{pic_id}.jpg"
                images.append(optimized_url)
        
        return list(set(images))
        
    except Exception as e:
        print(f"安全解析图片失败: {e}")
        return []

def extract_urls_from_text(text: str) -> List[str]:
    """从文本中提取URL - 修复版"""
    # 更精确的URL匹配，避免匹配到标点符号
    url_pattern = r'https?://[^\s<>"\'{}|\\^`\[\]]+'
    urls = re.findall(url_pattern, text)
    
    # 清理URL末尾的标点符号
    cleaned_urls = []
    for url in urls:
        # 移除URL末尾的常见标点符号
        while url and url[-1] in '.,!?;:，。！？；：':
            url = url[:-1]
        if url:
            cleaned_urls.append(url)
    
    return cleaned_urls

def remove_urls_from_text(text: str) -> str:
    """从文本中移除URL - 修复版"""
    url_pattern = r'https?://[^\s<>"\'{}|\\^`\[\]]+'
    
    # 先提取URL，然后从文本中移除
    urls = extract_urls_from_text(text)
    result_text = text
    for url in urls:
        result_text = result_text.replace(url, '')
    
    # 清理多余的空格
    result_text = re.sub(r'\s+', ' ', result_text).strip()
    return result_text

def extract_links_from_html(html_content: str) -> List[Dict]:
    """从HTML内容中提取链接信息 - 修复版"""
    links = []
    
    try:
        if not html_content or '<' not in html_content:
            return links
            
        soup = BeautifulSoup(html_content, 'html.parser')
        a_tags = soup.find_all('a', href=True)
        
        for a_tag in a_tags:
            href = a_tag.get('href', '')
            link_text = clean_text(a_tag.get_text(strip=True))
            
            # 跳过无用链接
            if link_text in ['网页链接', '查看图片', '展开', '全文']:
                continue
            
            # 处理微博跳转链接
            if 'weibo.cn/sinaurl' in href or 't.cn' in href:
                real_url = extract_real_url_from_weibo(href)
                if real_url:
                    href = real_url
                
            links.append({
                'url': href,
                'text': link_text,
                'element': a_tag
            })
            
    except Exception as e:
        print(f"HTML链接提取失败: {e}")
    
    return links

def get_platform(url: str) -> str:
    """根据URL获取平台名称"""
    domain = urlparse(url).netloc.lower()
    
    platform_map = {
        'jd.com': "京东",
        'jd.hk': "京东", 
        'taobao.com': "淘宝",
        'tmall.com': "淘宝",
        'pinduoduo.com': "拼多多",
        'douyin.com': "抖音"
    }
    
    for key, value in platform_map.items():
        if key in domain:
            return value
    
    return "其他"

def extract_product_from_weibo_shop(weibo_url: str, link_text: str) -> Optional[Dict]:
    """从微博商品链接中提取商品信息"""
    try:
        # 从URL参数中提取商品ID
        parsed = urlparse(weibo_url)
        query_params = parse_qs(parsed.query)
        
        product_id = ""
        if 'iid' in query_params:
            product_id = query_params['iid'][0]
        
        # 如果链接文本是默认值，尝试从URL参数获取
        if link_text in ["网页链接", "商品链接"] and 'link_text' in query_params:
            link_text = unquote(query_params['link_text'][0])
        
        if product_id:
            return {
                "type": "link",
                "text": weibo_url,
                "product_id": product_id,
                "platform_name": "微博商品",
                "link_text": link_text,
                "platform": "微博商品"
            }
            
    except Exception as e:
        print(f"从微博商品链接提取商品信息失败: {e}")
    
    return None

def parse_weibo_time(time_str: str) -> str:
    """解析微博时间格式"""
    if not time_str or not isinstance(time_str, str):
        return ""
    
    time_str = time_str.strip()
    
    # 定义时间格式
    formats = [
        "%a %b %d %H:%M:%S %z %Y",  # Mon Nov 10 00:00:04 +0800 2025
        "%a %b %d %H:%M:%S %Y",     # 无时区格式
    ]
    
    for fmt in formats:
        try:
            dt = datetime.strptime(time_str, fmt)
            return dt.strftime('%Y-%m-%d %H:%M:%S')
        except ValueError:
            continue
    
    # 手动解析备选
    return manual_parse_weibo_time(time_str)

def manual_parse_weibo_time(time_str: str) -> str:
    """手动解析微博时间格式"""
    month_map = {
        'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
        'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12
    }
    
    pattern = r'(\w{3})\s+(\w{3})\s+(\d{1,2})\s+(\d{2}):(\d{2}):(\d{2})\s+\+\d{4}\s+(\d{4})'
    match = re.match(pattern, time_str)
    
    if match:
        weekday, month_str, day, hour, minute, second, year = match.groups()
        month = month_map.get(month_str, 1)
        
        try:
            dt = datetime(int(year), month, int(day), int(hour), int(minute), int(second))
            return dt.strftime('%Y-%m-%d %H:%M:%S')
        except ValueError:
            pass
    
    print(f"时间解析失败，返回原始值: {time_str}")
    return time_str

def clean_text(text: str) -> str:
    """清理文本中的特殊字符"""
    if not text:
        return ""
    
    # 移除零宽字符
    zero_width_chars = ['\u200b', '\u200c', '\u200d', '\ufeff', '\u200e', '\u200f']
    for char in zero_width_chars:
        text = text.replace(char, '')
    
    # 移除控制字符（保留换行和制表符）
    text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f]', '', text)
    
    # 规范化空白字符
    text = re.sub(r'\s+', ' ', text).strip()
    
    return text

def optimize_image_url(image_url: str) -> str:
    """
    优化图片URL，获取更高质量的图片
    将wxX.sinaimg.cn/large/ 转换为 lz.sinaimg.cn/oslarge/
    """
    if not image_url:
        return image_url
    
    # 微博图片URL优化规则
    optimization_rules = [
        # 规则1: wxX.sinaimg.cn/large/ -> lz.sinaimg.cn/oslarge/ (高清有损)
        (r'https://wx(\d)\.sinaimg\.cn/large/', r'https://lz.sinaimg.cn/oslarge/'),
        
        # 规则2: wxX.sinaimg.cn/bmiddle/ -> lz.sinaimg.cn/oslarge/ (中图转高清)
        (r'https://wx(\d)\.sinaimg\.cn/bmiddle/', r'https://lz.sinaimg.cn/oslarge/'),
        
        # 规则3: 处理其他尺寸的图片
        (r'https://wx(\d)\.sinaimg\.cn/(\w+)/', r'https://lz.sinaimg.cn/oslarge/'),
    ]
    
    # 应用优化规则
    for pattern, replacement in optimization_rules:
        optimized_url = re.sub(pattern, replacement, image_url)
        if optimized_url != image_url:
            return optimized_url
    
    return image_url

def is_article_processed(article_id: str) -> bool:
    """检查文章是否已经处理过"""
    def check_operation(r):
        return r.sismember("weibo_processed_ids", article_id)
    
    return safe_redis_operation(check_operation, False)

def mark_article_processed(article_id: str):
    """标记文章为已处理"""
    def mark_operation(r):
        return r.sadd("weibo_processed_ids", article_id)
    
    safe_redis_operation(mark_operation)

def parse_weibo_data(data: Dict) -> List[Dict]:
    """解析微博数据并转换为目标格式"""
    if not data or 'data' not in data or 'list' not in data['data']:
        return []
    
    weibo_list = []
    for weibo in data['data']['list']:
        try:
            article_id = weibo.get('idstr', weibo.get('id', ''))
            
            if is_article_processed(article_id):
                print(f"跳过已处理文章: {article_id}")
                continue
                
            formatted_weibo = convert_to_target_format(weibo)
            if formatted_weibo:
                weibo_list.append(formatted_weibo)
                
        except Exception as e:
            print(f"解析微博数据错误: {e}")
    
    return weibo_list

def toDatabase(data: Dict) -> bool:
    """将数据推送到远程数据库"""
    # 数据过滤
    if any(keyword in str(data) for keyword in ["__获取内容为空__", "__获取内容出错__", "__循环递归__"]):
        print(f"过滤无效数据: {data.get('article_id', 'unknown')}")
        return False
    
    # 数据预处理
    data_str = json.dumps(data, ensure_ascii=False).replace("微博正文", "内容正文")
    data = json.loads(data_str)

    if 'article_id' in data:
        article_id = data['article_id']
        data['article_detail_url'] = f"https://m.weibo.cn/detail/{article_id}"
        data['is_weibo_chaohua'] = myglobal.IS_WEIBO_SUBOR
        
        # 打印日志
        print('推送到远端数据库')
        print(f"文章ID: {article_id}")
        write_log(data, article_id)
        print('准备推送数据到远端数据库')
    
    # 添加必要字段
    if 'article_id' in data:
        data.update({
            'article_detail_url': f"https://m.weibo.cn/detail/{data['article_id']}",
            'is_weibo_chaohua': myglobal.IS_WEIBO_SUBOR
        })
    
    # 推送数据
    success_count = 0
    for url in URLS:
        time.sleep(random.uniform(3, 6))
        
        try:
            response = requests.post(url, json=data, verify=False, timeout=10)
            
            if response.status_code == 200 and "success" in response.text:
                success_count += 1
                mark_article_processed(data['article_id'])
                print(f"推送成功: {data['article_id']}")
            else:
                print(f"推送失败: {data['article_id']}, 响应: {response.text[:100]}")
                
        except Exception as e:
            print(f"推送异常: {e}")
    
    return success_count > 0
def extract_weibo_id_from_url(weibo_url: str) -> Optional[str]:
    """从微博URL中提取微博ID，支持所有常见格式"""
    
    # 统一处理URL，移除协议和多余参数
    clean_url = weibo_url.split('?')[0].split('#')[0]
    
    # 定义所有可能的模式
    patterns = [
        # PC端字母数字ID格式
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})$', "PC端字母数字ID"),
        
        # 移动端数字ID格式 - detail
        (r'm\.weibo\.cn/detail/(\d+)$', "移动端detail数字ID"),
        
        # 移动端数字ID格式 - status  
        (r'm\.weibo\.cn/status/(\d+)$', "移动端status数字ID"),
        
        # 带路径的移动端格式
        (r'm\.weibo\.cn/detail/(\d+)/?', "移动端detail带斜杠"),
        (r'm\.weibo\.cn/status/(\d+)/?', "移动端status带斜杠"),
        
        # PC端带查询参数
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})\?', "PC端带参数"),
        
        # PC端带锚点
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})#', "PC端带锚点"),
        
        # 备用模式：任意长度的字母数字
        (r'weibo\.com/\d+/([A-Za-z0-9]+)', "PC端任意长度ID"),
    ]
    
    for pattern, pattern_type in patterns:
        match = re.search(pattern, clean_url)
        if match:
            weibo_id = match.group(1)
            print(f"从{pattern_type}提取微博ID: {weibo_id}")
            return weibo_id
    
    # 如果以上模式都不匹配，尝试直接提取数字ID（针对移动端）
    if 'm.weibo.cn' in weibo_url:
        # 尝试从移动端URL中提取纯数字ID
        numbers = re.findall(r'/(\d+)', weibo_url)
        if numbers:
            # 取最后一个数字（通常是最长的，可能是微博ID）
            potential_id = max(numbers, key=len)
            if len(potential_id) >= 9:  # 微博ID通常是16位或更长
                print(f"从移动端URL提取数字ID: {potential_id}")
                return potential_id
    
    print(f"无法识别的微博URL格式: {weibo_url}")
    return None
def extract_weibo_id_from_url(weibo_url: str) -> Optional[str]:
    """从微博URL中提取微博ID，支持所有常见格式"""
    
    # 统一处理URL，移除协议和多余参数
    clean_url = weibo_url.split('?')[0].split('#')[0]
    
    # 定义所有可能的模式
    patterns = [
        # PC端字母数字ID格式
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})$', "PC端字母数字ID"),
        
        # 移动端数字ID格式 - detail
        (r'm\.weibo\.cn/detail/(\d+)$', "移动端detail数字ID"),
        
        # 移动端数字ID格式 - status  
        (r'm\.weibo\.cn/status/(\d+)$', "移动端status数字ID"),
        
        # 带路径的移动端格式
        (r'm\.weibo\.cn/detail/(\d+)/?', "移动端detail带斜杠"),
        (r'm\.weibo\.cn/status/(\d+)/?', "移动端status带斜杠"),
        
        # PC端带查询参数
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})\?', "PC端带参数"),
        
        # PC端带锚点
        (r'weibo\.com/\d+/([A-Za-z0-9]{9})#', "PC端带锚点"),
        
        # 备用模式：任意长度的字母数字
        (r'weibo\.com/\d+/([A-Za-z0-9]+)', "PC端任意长度ID"),
    ]
    
    for pattern, pattern_type in patterns:
        match = re.search(pattern, clean_url)
        if match:
            weibo_id = match.group(1)
            print(f"从{pattern_type}提取微博ID: {weibo_id}")
            return weibo_id
    
    # 如果以上模式都不匹配，尝试直接提取数字ID（针对移动端）
    if 'm.weibo.cn' in weibo_url:
        # 尝试从移动端URL中提取纯数字ID
        numbers = re.findall(r'/(\d+)', weibo_url)
        if numbers:
            # 取最后一个数字（通常是最长的，可能是微博ID）
            potential_id = max(numbers, key=len)
            if len(potential_id) >= 9:  # 微博ID通常是16位或更长
                print(f"从移动端URL提取数字ID: {potential_id}")
                return potential_id
    
    print(f"无法识别的微博URL格式: {weibo_url}")
    return None
def get_single_weibo_detail(weibo_url: str, headers: Dict) -> Optional[Dict]:
    """获取单条微博详情"""
    weibo_id = extract_weibo_id_flexible(weibo_url)
    if not weibo_id:
        print(f"无法从URL提取微博ID: {weibo_url}")
        return None
    
    url = "https://www.weibo.com/ajax/statuses/show"
    params = {'id': weibo_id}
    
    response = make_weibo_request(url, headers, params)
    if response and response.status_code == 200:
        data = response.json()
        print(f"成功获取微博详情: {weibo_id}")
        return data
    
    print(f"获取微博详情失败: {weibo_url}, 状态码: {response.status_code if response else '无响应'}")
    return None
def test_single_weibo_url(weibo_url: str, push_to_db: bool = False) -> Optional[Dict]:
    """测试单条微博URL"""
    print(f"测试单条微博: {weibo_url}")
    
    cookies_dict = load_cookies_from_file()
    if not cookies_dict:
        print("无法加载cookies")
        return None
    
    headers = create_weibo_headers(cookies_dict)
    weibo_data = get_single_weibo_detail(weibo_url, headers)
    
    if not weibo_data:
        return None
    
    formatted_data = convert_to_target_format(weibo_data)
    if formatted_data and push_to_db:
        toDatabase(formatted_data)
    
    return formatted_data

def main():
    """主函数"""
    # 这里可以添加命令行参数解析等逻辑
    # 示例：测试单条微博
    result = test_single_weibo_url("https://weibo.com/2794284831/Qd34tAnjj", push_to_db=False)
    if result:
        print(json.dumps(result, ensure_ascii=False, indent=2))

if __name__ == "__main__":
    main()
