import requests
import base64
import cv2
import os
import re
import uuid
import json
from urllib.parse import unquote
from PIL import Image
import numpy as np
import markdown
from markdown.extensions import tables
from bs4 import BeautifulSoup
from uploadAliyunOss import upload_local_file_to_oss

domain = 'https://oj5371580yb.vicp.fun'
def download_video(url, save_path):
    """下载视频到本地"""
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
        'Referer': 'https://www.douyin.com/',
        'Accept': '*/*',
        'Accept-Encoding': 'gzip, deflate, br',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Connection': 'keep-alive'
    }
    try:
        print(f"开始下载视频: {url}")
        response = requests.get(url, headers=headers, stream=True, verify=False)
        
        if response.status_code == 200:
            with open(save_path, 'wb') as f:
                for chunk in response.iter_content(chunk_size=1024):
                    if chunk:
                        f.write(chunk)
            print("视频下载成功")
            return True
        else:
            print(f"下载失败，状态码: {response.status_code}")
            return False
    except Exception as e:
        print(f"下载出错: {str(e)}")
        return False

def get_video_url_from_share(share_url):
    """从分享链接获取视频实际地址"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/122.0.0.0 Safari/537.36',
            'Referer': 'https://www.douyin.com/'
        }
        
        # 处理重定向获取真实URL
        response = requests.get(share_url, headers=headers, allow_redirects=True)
        real_url = response.url
        
        # 提取视频ID
        video_id = re.search(r'/video/(\d+)', real_url)
        if not video_id:
            print("无法从URL中提取视频ID")
            return None
            
        video_id = video_id.group(1)
        api_url = f'https://www.iesdouyin.com/web/api/v2/aweme/iteminfo/?item_ids={video_id}'
        
        # 获取视频信息
        response = requests.get(api_url, headers=headers)
        if response.status_code != 200:
            print(f"获取视频信息失败: {response.status_code}")
            return None
            
        data = response.json()
        if 'item_list' not in data or not data['item_list']:
            print("未找到视频信息")
            return None
            
        video_url = data['item_list'][0]['video']['play_addr']['url_list'][0]
        video_url = video_url.replace('playwm', 'play')  # 去水印
        
        return video_url
    except Exception as e:
        print(f"解析分享链接出错: {str(e)}")
        return None

def extract_frames(video_source, isBase64=False,fps = 30,oss = True):
    """
    从视频中提取帧并保存为图片或转换为base64
    """
    # 处理视频源
    if isinstance(video_source, str):
        video_path = video_source
        cap = cv2.VideoCapture(video_path)
        video_name = os.path.splitext(os.path.basename(video_path))[0]
    else:
        cap = video_source
        video_name = "video"

    # 只在非base64模式下创建文件夹
    if not isBase64:
        frames_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "frames")
        if not os.path.exists(frames_dir):
            os.makedirs(frames_dir)
            print(f"创建主文件夹: {frames_dir}")
        
        random_folder_name = f"{video_name}_{uuid.uuid4().hex[:8]}"
        local_images_dir = os.path.join(frames_dir, random_folder_name)
        if not os.path.exists(local_images_dir) and not oss:
            os.makedirs(local_images_dir)
            print(f"创建子文件夹: {local_images_dir}")
        
        web_images_dir = f"{domain}/frames/{random_folder_name}"
    
    isOpened = cap.isOpened()  # 判断是否打开
    if not isOpened:
        print("无法打开视频")
        return None
    
    # 视频信息获取
    # fps = cap.get(cv2.CAP_PROP_FPS)
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    duration = int(total_frames / fps)  # 视频总时长（秒）
    # print(f"视频帧率: {fps} fps, 总时长: {duration} 秒")
    
    frame_count = 0
    current_second = 0
    
    # 用于存储所有图片路径或base64数据
    image_paths = []
    
    while current_second <= duration:
        # 设置视频帧的位置
        cap.set(cv2.CAP_PROP_POS_FRAMES, current_second * fps)
        ret, frame = cap.read()
        
        if not ret:
            break
            
        # 处理当前帧
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = Image.fromarray(np.uint8(frame))
        frame = np.array(frame)
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        
        frame_count += 1
        path = f'image{frame_count-1}.jpg'
        if isBase64:
            _, buffer = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 100])
            base64_str = base64.b64encode(buffer).decode('utf-8')
            image_paths.append(f"data:image/jpeg;base64,{base64_str}")
        else:

            if oss:
                local_file_path = os.path.join(frames_dir, path)
                cv2.imwrite(local_file_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 100])

                # 修改这一行，移除路径开头的斜杠
                web_file_path = upload_local_file_to_oss(local_file_path, f'frames/{random_folder_name}/{path}')
                # 上传后删除临时文件
                if os.path.exists(local_file_path):
                    os.remove(local_file_path)
            else:
                local_file_path = os.path.join(local_images_dir, path)
                cv2.imwrite(local_file_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 100])
                web_file_path = f"{web_images_dir}/{path}"

            print(f"{web_file_path} successfully write in")
            image_paths.append(web_file_path)

        current_second += 1
    
    print('finish!')
    if isinstance(video_source, str):
        cap.release()
    
    if isBase64:
        return {
            image_paths:image_paths
        }
    else:
        return {
            'output_directory': web_images_dir,
            'image_paths': image_paths,
            'folder_name':random_folder_name
        }

def extract_frames_v2(video_source, isBase64=False,fps = 30,oss = True):
    """
    从视频中提取帧并保存为图片或转换为base64
    """
    # 处理视频源
    if isinstance(video_source, str):
        video_path = video_source
        cap = cv2.VideoCapture(video_path)
        video_name = os.path.splitext(os.path.basename(video_path))[0]
    else:
        cap = video_source
        video_name = "video"

    # 只在非base64模式下创建文件夹
    if not isBase64:
        frames_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "frames")
        if not os.path.exists(frames_dir):
            os.makedirs(frames_dir)
            print(f"创建主文件夹: {frames_dir}")
        
        random_folder_name = f"{video_name}_{uuid.uuid4().hex[:8]}"
        local_images_dir = os.path.join(frames_dir, random_folder_name)
        if not os.path.exists(local_images_dir) and not oss:
            os.makedirs(local_images_dir)
            print(f"创建子文件夹: {local_images_dir}")
        
        web_images_dir = f"{domain}/frames/{random_folder_name}"
    
    isOpened = cap.isOpened()  # 判断是否打开
    if not isOpened:
        print("无法打开视频")
        return None
    
    # 视频信息获取
    # 视频信息获取
    video_fps = cap.get(cv2.CAP_PROP_FPS)  # 获取实际视频帧率
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    real_duration = total_frames / video_fps  # 实际视频总时长（秒）
    
    # 修改采样逻辑：fps参数现在表示每秒采样的帧数
    # 例如：fps=12表示每秒采样12帧，相当于每(video_fps/12)帧采样一次
    if fps > video_fps:
        print(f"警告：指定的采样帧率({fps})大于视频实际帧率({video_fps})，将使用视频实际帧率")
        fps = video_fps
    
    # 计算采样间隔（每隔多少帧采样一次）
    sample_interval = max(1, int(video_fps / fps))
    # 预计采样总数
    estimated_sample_count = int(total_frames / sample_interval) + 1
    
    print(f"视频实际帧率: {video_fps} fps, 总时长: {real_duration:.2f} 秒, 总帧数: {total_frames}")
    print(f"采样设置: 每秒{fps}帧, 每{sample_interval}帧采样一次, 预计采样{estimated_sample_count}帧")
    
    # 用于存储所有图片路径或base64数据
    image_paths = []
    
    # 按照指定的间隔采样
    for frame_position in range(0, total_frames, sample_interval):
        cap.set(cv2.CAP_PROP_POS_FRAMES, frame_position)
        ret, frame = cap.read()
        
        if not ret:
            break
            
        # 处理当前帧
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = Image.fromarray(np.uint8(frame))
        frame = np.array(frame)
        frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
        
        # 计算当前帧的准确时间信息
        exact_second = frame_position / video_fps  # 精确的秒数
        second = int(exact_second)  # 整数秒
        frame_in_second = int((exact_second - second) * video_fps)  # 该秒内的第几帧
        
        if isBase64:
            _, buffer = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 100])
            base64_str = base64.b64encode(buffer).decode('utf-8')
            image_paths.append({
                "data": f"data:image/jpeg;base64,{base64_str}",
                "second": second,
                "frame_in_second": frame_in_second,
                "exact_time": exact_second
            })
        else:

            path =  f'image_{second}s_{frame_in_second}f.jpg'

            if oss:
                local_file_path = os.path.join(frames_dir, path)
                cv2.imwrite(local_file_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 100])
                # 修改这一行，移除路径开头的斜杠
                web_file_path = upload_local_file_to_oss(local_file_path, f'frames/{random_folder_name}/{path}')
                # 上传后删除临时文件
                if os.path.exists(local_file_path):
                    os.remove(local_file_path)
            else:
                local_file_path = os.path.join(local_images_dir, path)
                cv2.imwrite(local_file_path, frame, [cv2.IMWRITE_JPEG_QUALITY, 100])
                web_file_path = f"{web_images_dir}/{path}"


            image_paths.append({
                "path": web_file_path,
                "second": second,
                "frame_in_second": frame_in_second,
                "exact_time": exact_second
            })
    
    
    print('finish!')
    if isinstance(video_source, str):
        cap.release()
    
    if isBase64:
        return {
            image_paths:image_paths
        }
    else:
        return {
            'output_directory': web_images_dir,
            'image_paths': image_paths,
            'folder_name':random_folder_name
        }


def get_frame_images(folder_name, image_count=None):
    """
    获取指定文件夹下的图片并转换为base64格式
    :param folder_name: frames下的文件夹名称
    :param image_count: 需要获取的图片数量，None表示获取全部
    :return: base64编码的图片列表
    """
    frames_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "frames", folder_name)
    
    if not os.path.exists(frames_dir):
        raise FileNotFoundError(f"文件夹不存在: {frames_dir}")
    
    # 获取所有jpg文件并排序
    images = sorted([f for f in os.listdir(frames_dir) if f.endswith('.jpg')])
    
    if not images:
        raise FileNotFoundError(f"文件夹中没有jpg图片: {frames_dir}")
    
    if image_count is not None:
        images = images[:image_count]
    
    # 将图片转换为base64格式
    base64_images = []
    for img in images:
        img_path = os.path.join(frames_dir, img)
        try:
            base64_img = encode_image(img_path)
            base64_images.append(f"data:image/jpeg;base64,{base64_img}")
        except Exception as e:
            print(f"转换图片 {img} 失败: {str(e)}")
            continue
    
    return base64_images



#  base 64 编码格式
def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")


def extract_json_from_markdown(markdown_text):
    """
    从 markdown 代码块中提取 JSON 数据并转换为 Python 对象
    """

    # 使用正则表达式提取 ```py 和 ``` 之间的内容
    pattern = r"```py\n(.*?)\n```"
    match = re.search(pattern, markdown_text, re.DOTALL)
    
    if match:
        json_str = match.group(1)
        # 清理字符串
        json_str = json_str.strip()
        # 处理多行字符串
        json_str = json_str.replace('\n', ' ').replace('    ', '')
        # 处理多余的空格
        json_str = re.sub(r'\s+', ' ', json_str)
        
        try:
            # 解析 JSON 字符串为 Python 对象
            return json.loads(json_str)
        except json.JSONDecodeError as e:
            print(f"JSON 解析错误: {e}")
            return None
    return None

def extract_markdown_table(markdown_str):
    """
    使用 pandas 从 Markdown 字符串中提取表格数据
    返回表头和表格内容
    """
    # 去除前缀文本，只保留表格部分
    table_start = markdown_str.find('| 时段 |')
    if table_start == -1:
        return None, None
    
    table_text = markdown_str[table_start:]
    
    # 找到表格结束位置
    table_end = table_text.find('###')
    if table_end != -1:
        table_text = table_text[:table_end].strip()
    
    try:
        # 将表格文本按行分割并清理
        lines = [line.strip() for line in table_text.split('\n') if line.strip()]
        
        # 提取表头
        headers = [col.strip() for col in lines[0].split('|')[1:-1]]
        
        # 跳过分隔行（第二行）
        data_rows = []
        for line in lines[2:]:  # 从第三行开始
            if '|' in line:
                cols = [col.strip() for col in line.split('|')[1:-1]]
                if len(cols) == len(headers):
                    row_dict = dict(zip(headers, cols))
                    data_rows.append(row_dict)
        
        return headers, data_rows
        
    except Exception as e:
        print(f"解析表格时出错: {e}")
        return None, None
    
    # 使用 pandas 解析表格
    try:
        # 将 Markdown 表格转换为 pandas DataFrame
        df = pd.read_csv(io.StringIO(table_text), sep='|', skipinitialspace=True)
        
        # 清理列名（删除首尾空列）
        df = df.iloc[:, 1:-1]
        
        # 获取表头
        headers = df.columns.tolist()
        headers = [h.strip() for h in headers]
        
        # 转换为字典列表
        table_data = df.to_dict('records')
        
        # 清理数据
        for row in table_data:
            for key in row:
                if isinstance(row[key], str):
                    row[key] = row[key].strip()
        
        return headers, table_data
    except Exception as e:
        print(f"解析表格时出错: {e}")
        
        # 尝试使用备用方法
        return extract_markdown_table_fallback(table_text)

def extract_markdown_table_fallback(table_text):
    """备用的表格解析方法"""
    try:
        lines = table_text.split('\n')
        
        # 提取表头
        header_line = lines[0]
        headers = [h.strip() for h in header_line.split('|')[1:-1]]
        
        # 跳过分隔行
        data_lines = [line for line in lines[2:] if '|' in line]
        
        # 提取数据行
        table_data = []
        for line in data_lines:
            cells = [cell.strip() for cell in line.split('|')[1:-1]]
            if len(cells) == len(headers):
                row_dict = {}
                for i, header in enumerate(headers):
                    row_dict[header] = cells[i]
                table_data.append(row_dict)
        
        return headers, table_data
    except Exception as e:
        print(f"备用解析方法也失败: {e}")
        return None, None

def process_table_data(table_data, image_paths):
    """
    处理表格数据并匹配对应的图片
    """
    result_array = []
    for item in table_data:
        try:
            # 清理时段字符串，移除非数字字符
            time_str = ''.join(c for c in item['时段'] if c.isdigit() or c == '-')
            time_range = time_str.replace('s', '').split('-')
            
            # 确保时间值是有效的数字
            start_time = int(time_range[0]) if time_range[0].isdigit() else 0
            end_time = int(time_range[1]) if len(time_range) > 1 and time_range[1].isdigit() else start_time
            
            # 使用 filter 匹配图片路径
            matched_images = list(filter(
                lambda path: start_time <= int(re.search(r'image(\d+)', os.path.basename(path)).group(1)) <= end_time,
                image_paths
            ))
            item['切片'] = matched_images
            result_array.append(item)
            
        except Exception as e:
            print(f"处理时段数据出错: {e}")
            # 如果处理失败，添加空切片列表
            item['切片'] = []
            result_array.append(item)
    
    return result_array

def image_ins_table(table_data, image_list):
    """
    根据表格数据中的时段信息，匹配对应的图片并插入到表格数据中
    
    Args:
        table_data: 包含时段信息的表格数据列表
        image_list: 图片路径列表
        
    Returns:
        更新后的表格数据，每项中增加了对应的图片路径
    """
    # 提取图片中的时间信息
    time_pattern = re.compile(r'image_(\d+)s_(\d+)f\.jpg')
    image_time_map = {}
    
    for path in image_list:
        filename = os.path.basename(path)
        match = time_pattern.match(filename)
        if match:
            second = int(match.group(1))
            frame = int(match.group(2))
            image_time_map[path] = {"second": second, "frame": frame}
    
    # 为表格数据中的每一项找到对应的图片
    for item in table_data:
        time_range = item["时段"]
        # 解析时段格式 (例如: "0-3s", "4s", "5-7s")
        if "-" in time_range:
            start_s, end_s = time_range.replace("s", "").split("-")
            start_second, end_second = int(start_s), int(end_s)
        else:
            start_second = end_second = int(time_range.replace("s", ""))
        
        # 找到对应时间范围内的图片
        matching_images = []
        for path, time_info in image_time_map.items():
            if start_second <= time_info["second"] <= end_second:
                matching_images.append(path)
        
        # 将匹配到的图片添加到表格数据中
        item["切片"] = matching_images if matching_images else []
    
    return table_data




def convert_to_markdown_table(headers, data_list):
    """生成 Markdown 表格"""
    # 准备表格数据
    table_data = []
    
    # 添加表头行
    header_row = '| ' + ' | '.join(headers) + ' |'
    # 添加分隔行（对齐方式）
    separator_row = '| ' + ' | '.join([':-:' for _ in headers]) + ' |'
    
    table_data.append(header_row)
    table_data.append(separator_row)
    
    # 添加数据行
    for item in data_list:
        row_data = []
        for header in headers:
            if header == '切片':
                # 处理图片数组 - 只显示第一张图片，避免表格过于复杂
                images = item.get(header, [])
                if images:
                    image_md = f"![首图]({images[0]})"
                    # 添加图片数量信息
                    if len(images) > 1:
                        image_md += f" (共{len(images)}张)"
                else:
                    image_md = "无图片"
                row_data.append(image_md)
            else:
                # 处理其他字段，包括带空格的键名
                value = item.get(header, '') or item.get(f' {header}', '') or item.get(f'{header} ', '')
                # 确保值中的管道符号不会破坏表格结构
                value = str(value).replace('|', '\\|')
                row_data.append(value)
        
        row = '| ' + ' | '.join(row_data) + ' |'
        table_data.append(row)
    
    # 组合成最终的 Markdown 表格
    return '\n'.join(table_data)

# 直接从原始数据生成更简洁的Markdown表格
def generate_clean_markdown_table(headers, data_list):
    """
    直接从原始数据生成干净的Markdown表格，图片直接显示在表格中
    """
    md_lines = []
    
    # 添加标题
    md_lines.append("# 视频内容分析表格\n")
    
    # 添加表头
    md_lines.append("| " + " | ".join(headers) + " |")
    md_lines.append("| " + " | ".join([":-:" for _ in headers]) + " |")
    
    # 添加数据行
    for item in data_list:
        row = []
        for header in headers:
            if header == '切片':
                # 对于图片列，使用HTML img标签直接在表格中显示图片
                images = item.get(header, []) or item.get(f' {header}', []) or item.get(f'{header} ', [])
                if images:
                    # 使用HTML img标签并设置高度为200px
                    img_tags = []
                    for img_url in images:
                        img_tags.append(f'<img src="{img_url}" height="200px" style="margin:5px">')
                    value = "<br>".join(img_tags)
                else:
                    value = "无图片"
            else:
                # 处理其他列
                value = item.get(header, '') or item.get(f' {header}', '') or item.get(f'{header} ', '') or '无'
                # 替换可能破坏表格格式的字符
                value = str(value).replace('|', '\\|').replace('\n', ' ')
            
            row.append(value)
        
        md_lines.append("| " + " | ".join(row) + " |")
    
    return "\n".join(md_lines)


def save_html_table(headers, data_list, filename=None):
    """
    生成HTML表格并保存到tables文件夹中
    """
    # 生成HTML内容
    html_content = convert_to_html_table(headers, data_list)
    
    # 创建tables文件夹（如果不存在）
    tables_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "tables")
    if not os.path.exists(tables_dir):
        os.makedirs(tables_dir)
        print(f"创建tables文件夹: {tables_dir}")
    
    # 生成文件名
    if filename is None:
        random_name = f"video_table_{uuid.uuid4().hex[:8]}"
        filename = f"{random_name}.html"
    elif not filename.endswith('.html'):
        filename = f"{filename}.html"
    
    # 本地文件路径
    local_file_path = os.path.join(tables_dir, filename)
    # Web访问路径
    web_file_path = f"{domain}/tables/{filename}"
    
    # 保存HTML文件到本地
    with open(local_file_path, 'w', encoding='utf-8') as f:
        f.write(html_content)
    
    print(f"HTML表格已保存到: {local_file_path}")
    print(f"Web访问地址: {web_file_path}")
    return web_file_path


def convert_to_html_table(headers, data_list):
    """
    将表头和数据转换为HTML表格
    :param headers: 表头列表
    :param data_list: 数据列表
    :return: HTML格式的表格字符串
    """
    # 开始HTML文档
    html = """
    <!DOCTYPE html>
    <html lang="zh-CN">
    <head>
        <meta charset="UTF-8">
        <meta name="viewport" content="width=device-width, initial-scale=1.0">
        <title>视频分析表格</title>
        <style>
            body {
                font-family: Arial, sans-serif;
                margin: 20px;
            }
            table {
                border-collapse: collapse;
                width: 100%;
                margin-bottom: 20px;
            }
            th, td {
                border: 1px solid #ddd;
                padding: 8px;
                text-align: center;
                vertical-align: middle;
            }
            th {
                background-color: #f2f2f2;
                font-weight: bold;
            }
            tr:nth-child(even) {
                background-color: #f9f9f9;
            }
            img {
                max-width: 200px;
                max-height: 150px;
                margin: 5px;
                border: 1px solid #ddd;
            }
            .image-container {
                display: flex;
                flex-wrap: wrap;
                justify-content: center;
            }
        </style>
    </head>
    <body>
        <h1>视频内容分析表格</h1>
        <table>
            <thead>
                <tr>
    """
    
    # 添加表头
    for header in headers:
        html += f"                <th>{header}</th>\n"
    
    html += """
                </tr>
            </thead>
            <tbody>
    """
    
    # 添加数据行
    for item in data_list:
        html += "                <tr>\n"
        
        for header in headers:
            if header == '图片':
                # 处理图片数组
                images = item.get(header, [])
                html += "                    <td>\n"
                html += "                        <div class='image-container'>\n"
                
                for img_url in images:
                    html += f"                            <img style='height:200px' src='{img_url}' alt='视频截图'>\n"
                
                html += "                        </div>\n"
                html += "                    </td>\n"
            else:
                # 处理其他字段，包括带空格的键名
                value = item.get(header, '') or item.get(f' {header}', '') or item.get(f'{header} ', '') or '无'
                # 处理换行符
                value = str(value).replace('<br>', '<br/>')
                html += f"                    <td>{value}</td>\n"
        
        html += "                </tr>\n"
    
    # 结束HTML文档
    html += """
            </tbody>
        </table>
    </body>
    </html>
    """
    
    return html


def md2html_table(md_str):
    """
    将Markdown格式的表格转换为HTML格式并获取表格数据
    """
    if md_str:
        # 1. 规范化表格文本
        lines = md_str.split('\n')
        processed_lines = []
        
        for line in lines:
            # 清理每一行
            line = line.strip()
            if not line:
                continue
                
            # 处理被错误分割的行
            if line.startswith('|'):
                # 移除多余空格
                cells = [cell.strip() for cell in line.split('|')]
                # 重新组合行，确保格式正确
                processed_line = '|' + '|'.join(cell for cell in cells if cell) + '|'
                processed_lines.append(processed_line)
        
        # 2. 重新组合成规范的markdown表格
        md_str = '\n'.join(processed_lines)
        print("处理后的表格文本:", md_str)
    
    html_content = markdown.markdown(md_str, extensions=['tables'])
    print("HTML表格内容:", html_content)
    try:
        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 查找表格
        table = soup.find('table')
        if not table:
            return [], []
        
        # 提取表头
        headers = []
        header_row = table.find('tr')
        if header_row:
            headers = [th.get_text().strip() for th in header_row.find_all(['th', 'td'])]
        
        # 提取表格数据
        table_data = []
        data_rows = table.find_all('tr')[1:]  # 跳过表头行
        for row in data_rows:
            cells = row.find_all(['td', 'th'])
            if len(cells) == len(headers):
                row_data = {}
                for i, cell in enumerate(cells):
                    row_data[headers[i]] = cell.get_text().strip()
                table_data.append(row_data)
        
        return headers, table_data
        
    except Exception as e:
        print(f"解析HTML表格时出错: {e}")
        return [], []