# -*- coding: utf-8 -*-
import requests
from flask import Flask, render_template, request, jsonify
import json
import oss2
import cv2
import os
import numpy as np
import shutil
from openai import OpenAI
from auth import auth_login, require_login

app = Flask(__name__)

client = OpenAI(
    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
    api_key=os.getenv("DASHSCOPE_API_KEY"),
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

endpoint = f"https://{os.environ['OSS_ENDPOINT']}"
auth = oss2.Auth(os.environ['ALIBABA_CLOUD_ACCESS_KEY_ID'], os.environ['ALIBABA_CLOUD_ACCESS_KEY_SECRET'])
bucket = oss2.Bucket(auth, endpoint, os.environ['OSS_BUCKET'])

app.config['UPLOAD_FOLDER'] = './'
app.config['ALLOWED_EXTENSIONS'] = {'mp4'}


@app.route('/login', methods=['POST', 'GET'])
def login():
    return auth_login()


@app.route('/', methods=['GET'])
@require_login
def index():
    return render_template('index.html')


@app.route('/api/upload', methods=['POST'])
@require_login
def handle_upload():
    """视频上传接口"""
    if 'file' not in request.files:
        return jsonify({"error": "No file part"}), 400
    file = request.files['file']
    if file.filename == '':
        return jsonify({"error": "No selected file"}), 400
    if not (file and allowed_file(file.filename)):
        return jsonify({"error": "File type not allowed"}), 409
    try:
        filename = file.filename
        file_path = os.path.join(app.config['UPLOAD_FOLDER'], filename)
        file.save(file_path)
        output_folder = 'output_keyframes'
        extract_key_frames(filename, file_path, output_folder, 0, None, 10, 30)
        image_urls = upload_images_to_oss(output_folder)
        # 清理临时文件和目录
        os.remove(file_path)
        shutil.rmtree(output_folder, ignore_errors=True)
        data = {'keyframes': image_urls}
        return jsonify({'code': 200, 'data': data})
    except Exception as e:
        return jsonify({"error": str(e)}), 500


def allowed_file(filename):
    """检查文件扩展名是否允许"""
    return '.' in filename and \
        filename.rsplit('.', 1)[1].lower() in app.config['ALLOWED_EXTENSIONS']


def download_from_oss(url, destination):
    """下载文件到本地"""
    response = requests.get(url)
    if response.status_code == 200:
        with open(destination, 'wb') as f:
            f.write(response.content)
        return True
    return False


@app.route('/api/completions', methods=['POST'])
@require_login
def handle_completions():
    """信息提取接口"""
    try:
        data = request.json
        keyframes = data.get('keyframes')
        prompt = data.get('prompt', '')
        response = call_multimodal_conversation(keyframes, prompt)
        response = json.loads(response)
        if 'error' in response:
            error_message = response['error']['message']
            return jsonify({'code': 400, 'data': error_message})
        video_analysis_text = response['choices'][0]['message']['content']
        data = call_text_completions(video_analysis_text)
        data = json.loads(data)['choices'][0]['message']['content']
        return jsonify({'code': 200, 'data': data})
    except Exception as e:
        return jsonify({'error': str(e)}), 500


def extract_key_frames(filename, video_path, output_folder, start_time=0, end_time=None, frames_per_second=10,
                       similarity_threshold=30):
    """
      提取视频帧并在指定的时间范围内处理。

      参数:
      filename (str): 输出文件名前缀。
      video_path (str): 输入视频文件路径。
      output_folder (str): 输出关键帧保存的文件夹路径。
      start_time (float): 视频处理开始时间（单位：秒），默认为0秒。
      end_time (float or None): 视频处理结束时间（单位：秒），默认为None，表示处理到视频末尾。
      frames_per_second (int): 每秒抽取的关键帧数量，默认为10。
      similarity_threshold (int): 帧间差异阈值，超过该阈值则认为是关键帧，默认为30。
      """
    # 检查并创建输出文件夹
    if not os.path.exists(output_folder):
        os.makedirs(output_folder)

    # 打开视频文件
    cap = cv2.VideoCapture(video_path)

    if not cap.isOpened():
        print("Error: Could not open video.")
        return

    # 获取视频总帧数和帧率
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = cap.get(cv2.CAP_PROP_FPS)

    # 计算开始和结束帧号
    start_frame = int(start_time * fps)
    if end_time is None:
        end_frame = total_frames
    else:
        end_frame = min(int(end_time * fps), total_frames)

    # 根据帧率和每秒抽取的关键帧数量计算关键帧间隔
    frame_interval = max(1, int(fps / frames_per_second))

    frame_count = 0
    key_frame_count = 0
    prev_frame = None

    while True:
        ret, frame = cap.read()

        if not ret or frame_count >= end_frame:
            break

        # 跳过不在指定时间范围内的帧
        if frame_count < start_frame:
            frame_count += 1
            continue

        # 将当前帧转换为灰度图像
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        # 如果当前帧是每隔frame_interval帧之一
        if (frame_count - start_frame) % frame_interval == 0:
            if prev_frame is not None:
                # 计算当前帧与上一帧之间的绝对差异
                frame_diff = cv2.absdiff(gray_frame, prev_frame)

                # 计算平均差异
                avg_diff = np.mean(frame_diff)

                # 如果平均差异大于阈值，则保存当前帧作为关键帧
                if avg_diff > similarity_threshold:
                    key_frame_filename = f"{output_folder}/{filename}_keyframe_{key_frame_count}.jpg"
                    cv2.imwrite(key_frame_filename, frame)
                    key_frame_count += 1

            # 更新前一帧为当前帧
            prev_frame = gray_frame

        # 显示处理进度
        progress = ((frame_count - start_frame) / (end_frame - start_frame)) * 100 if end_frame != start_frame else 100
        print(f"\rProcessing: {progress:.2f}% [{frame_count - start_frame}/{end_frame - start_frame}]", end="")

        frame_count += 1

    cap.release()
    print(f"\nExtracted {key_frame_count} key frames.")


def upload_images_to_oss(directory):
    """上传视频帧到OSS"""
    image_urls = []
    for root, dirs, files in os.walk(directory):
        for file in files:
            if file.lower().endswith(('.jpg', '.jpeg')):
                local_file_path = os.path.join(root, file)
                object_name = os.path.relpath(local_file_path, directory)
                # 上传文件
                bucket.put_object_from_file(object_name, local_file_path)
                # 获取签名URL
                oss_sign_url = bucket.sign_url('GET', file, 3600)
                image_urls.append(oss_sign_url)
    return image_urls


def call_multimodal_conversation(keyframes, video_extra_info):
    """调用百炼模型服务-视觉模型"""
    prompt = (
        "# 角色\n"
        "你是一名视频分析师，擅长对各种视频片段进行理解。\n\n"
        "# 任务描述\n"
        "给你一个视频片段的多张关键帧图片，请你完成以下任务。\n"
        "- 输出每张图片的画面信息，包括人物、物体、动作、文字、字幕、镜头语言、一句话总结等。\n"
        "- 把每张图片的信息串联起来，生成视频的详细概述，还原该片段的剧情。\n\n"
        "# 限制\n"
        "- 分析范围严格限定于提供的视频子片段，不涉及视频之外的任何推测或背景信息。\n"
        "- 总结时需严格依据视频内容，不可添加个人臆测或创意性内容。\n"
        "- 保持对所有视频元素（尤其是文字和字幕）的高保真还原，避免信息遗漏或误解。\n\n"
        "# 输入数据\n"
        "## 视频补充信息 (可能对你理解该片段有帮助，如果输入为空则忽略补充信息)\n"
        f"{video_extra_info}\n\n"
        "# 输出格式\n"
        "直接按照任务目标里即可，先输出每张图片的描述，再串联起来输出整个视频片段的剧情。\n"
    )
    completion = client.chat.completions.create(
        model="qwen-vl-max-latest",
        messages=[{
            "role": "user",
            "content": [
                {
                    "type": "video",
                    "video": keyframes
                },
                {
                    "type": "text",
                    "text": prompt
                }]}]
    )
    return completion.model_dump_json()


def call_text_completions(video_analysis_text):
    """调用百炼模型服务-文本模型"""
    # 构建提示词
    prompt = (
        "# 角色\n"
        "你是一个专业的视频标注专员，擅长结合视频镜头信息来分析处理各种视频任务。\n\n"
        "# 任务目标\n"
        "请你结合输入数据串联、还原出整个视频的详细剧情。\n\n"
        "# 限制\n"
        "1. 如出现语法上错误，或逻辑不通，请直接修改\n"
        "2. 在视频分镜中，如果包含台词，可能会出现说话者与其所说内容不匹配的情况。因此，必须根据剧情的进展，准确判断每段台词的真实说话者\n"
        "3. 如果视频分镜中无台词，请根据视频音频文字为其匹配台词\n"
        "4. 修改后的故事请适当保留视频分镜中对人物、场景的描写\n"
        "5. 帮忙润色一下故事，使其更具逻辑性\n"
        "6. 结合视频分镜中的人物外观特点，如果有外观相近的人物是同一个角色。因此，需要将不同分镜中的人物角色统一。\n\n"
        "# 输入数据\n"
        "## 资料一：视频分镜信息 (视频各镜头的视觉描述信息)\n"
        f"{video_analysis_text}\n\n"
        "# 输出格式\n"
        "直接输出视频剧情，不要输出其他信息。\n"
    )

    completion = client.chat.completions.create(
        model="qwen-max-latest",
        messages=[
            {'role': 'user', 'content': prompt}],
    )

    return completion.model_dump_json()


if __name__ == "__main__":
    app.run(host="0.0.0.0", port=9000)
