import logging
import tempfile
import time
import uuid
from pathlib import Path

import requests
from langchain_core.runnables import RunnableConfig
from langgraph.runtime import Runtime

from video_summarize_tools.data_models import State, RuntimeContext, SourceUrlType, VideoSummarizeResult, MarkItem
from video_summarize_tools.tools import video_web_page_parser, ffmpeg_tools, image_tools

log = logging.getLogger(__name__)

def source_url_preprocess(state: State, config: RunnableConfig, runtime: Runtime[RuntimeContext]) -> State:
    url = state["source_url"]
    resp = requests.head(url, allow_redirects=True, timeout=3)
    content_type = resp.headers['content-type']
    if content_type.startswith('video'):
        return { "source_url_type" : SourceUrlType.VIDEO_FILE}
    else:
        return { "source_url_type" : SourceUrlType.DOMAIN_WEB_PAGE}

def download_from_video_url(state: State, config: RunnableConfig, runtime: Runtime[RuntimeContext]) -> State:
    download_file_path = Path(tempfile.gettempdir()) / str(uuid.uuid4())
    with requests.get(state["source_url"], stream=True) as resp, open(download_file_path, 'wb') as download_file:
        for chunk in resp.iter_content(chunk_size=8192):
            download_file.write(chunk)

    return {"video_file_path": download_file_path}

def download_from_domain_url(state: State, config: RunnableConfig, runtime: Runtime[RuntimeContext]) -> State:
    download_file_path = video_web_page_parser.download_page_video(state["source_url"])
    return {"video_file_path": download_file_path}

def video_file_post_process(state: State, config: RunnableConfig, runtime: Runtime[RuntimeContext]) -> State:
    path = state["video_file_path"]
    # 这里需要计算一下提取音频和视频时要使用的ffmpeg参数
    streams = ffmpeg_tools.separate_stream(path)

    return {"video_stream_file_path": streams['video_stream'], "audio_stream_file_path": streams['audio_stream']}

def audio_transcript(state: State, config: RunnableConfig, runtime: Runtime[RuntimeContext]) -> State:
    """
    将音频文件转写为文本
    """
    transcript_text = runtime.context.modelscope_service.transcript_audio(state["audio_stream_file_path"])
    return {"audio_transcript_text": transcript_text}

# 视频的分析感觉还是做成一个sub_graph比较合适点，太多耦合性的逻辑了
def extract_video_keyframe(state: State, config: RunnableConfig, runtime: Runtime[RuntimeContext]) -> State:
    frame_dir = Path(tempfile.gettempdir()) / str(uuid.uuid4())
    if not frame_dir.is_dir():
        frame_dir.mkdir(parents=True, exist_ok=True)

    frame_img_list = ffmpeg_tools.extract_frame_by_fps(state["video_stream_file_path"],
                                                       frame_dir,
                                                       fps=runtime.context.video_frame_extract_fps)
    return {"video_stream_key_frames": frame_img_list, "video_frame_extract_fps": runtime.context.video_frame_extract_fps}

def video_content_understanding(state: State, config: RunnableConfig, runtime: Runtime[RuntimeContext]):
    """
    执行基于llm的视频流文件内容理解
    """
    # 怎么处理过大的视频文件的分批分析和上传？modelscope_service自己分批去了
    result, _ = runtime.context.modelscope_service.video_understanding(video_frame_list = state["video_stream_key_frames"],
                                                           fps = runtime.context.video_frame_extract_fps)
    return {"video_summarize_text": result}


def merge_result(state: State, config: RunnableConfig, runtime: Runtime[RuntimeContext]):
    """
    将音频流和视频流的分析结果进行合并
    """
    prompt = f"""
    现有一份视频的音频流转写出的文本，以及视频流的的视频理解结果的JSON数据，请将这两个结果进行合并。
    音频流转写结果为：
    {state["audio_transcript_text"]}
    视频流理解结果为：
    {state["video_summarize_text"]}
    
    最终输出结果的格式需严格遵循以下JSON Schema
    {{
      "understanding_result": "结合视频理解的结果和音频流转写的结果中的信息，生成一份完整的视频理解结果，以markdown格式对内容进行编排",
      "mark_items": [
        {{
          "name": "标注的元素的名称或者介绍信息",
          "start_second": "该元素出现的起始时间点，单位是秒，数值是该元素在视频里开始出现时距离整个视频开始的第n秒",
          "end_second": "元素出现的结束时间点， 单位是秒，数值是该元素在视频里消失时距离整个视频开始的第n秒",
          "bbox": ["int数组类型,[x_min, y_min, x_max, y_max]格式的坐标"]
        }}
      ]
    }}
    """

    result = runtime.context.modelscope_service.chat(prompt = prompt, temperature = 0.1, use_json_format_response=True) #用低temperature来提高生成内容的确定性
    return {"merge_result": VideoSummarizeResult.model_validate_json(result)}

def write_markdown_result_file(state: State, config: RunnableConfig, runtime: Runtime[RuntimeContext]):
    """
    将结果输出为markdown文本
    :param state:
    :param config:
    :param runtime:
    :return:
    """
    if "markdown_result_file_path" not in state:
        md_file_path = Path(tempfile.gettempdir()) / f"video_summarize_{time.time_ns()}.md"
    else:
        md_file_path = state["markdown_result_file_path"]

    if md_file_path.exists():
        md_file_path.unlink()

    result = state["merge_result"]

    mark_item_markdown_text = []
    # 先处理截图文件,转换成markdown图片文本
    for mark_item in result.mark_items:
        source_img = _find_mark_item_img(mark_item, state["video_stream_key_frames"], state["video_frame_extract_fps"])
        mark_item_coord = runtime.context.modelscope_service.translate_bbox_to_image_coord(mark_item.bbox,
                                                                                           image_tools.get_image_size(source_img))
        mark_item_parts_img = image_tools.image_cut(source_img,
                                                (mark_item_coord[0], mark_item_coord[1]),
                                                (mark_item_coord[2], mark_item_coord[3]))
        mark_item_markdown_text.append(f"""
\n
\n
**{mark_item.name}**
![{mark_item.name}]({image_tools.page_image_to_base64(mark_item_parts_img, "png")})
        """)

    with open(md_file_path, "w", encoding="utf-8") as md_file:
        md_file.write(result.understanding_result)
        md_file.write("\n")
        md_file.write("\n")
        md_file.write("".join(mark_item_markdown_text))

    return {"markdown_result_file_path": md_file_path}

def _find_mark_item_img(mark_item: MarkItem, frame_img_list: list[Path], fps: float):
    """
    根据llm返回的元素标记信息，查找对应时间点的视频截图
    :param mark_item:
    :param frame_img_list:
    :param fps:
    :return:
    """

    img_idx = int(mark_item.start_second * fps)
    return frame_img_list[img_idx]
