from typing import List, Tuple, Optional
import uuid
import os
import tempfile
import ffmpeg

from PyQt5.QtCore import Qt, QTimer, QThread, pyqtSignal
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import (
    QWidget, QLabel, QLineEdit, QTextEdit, QPushButton, QComboBox, QGridLayout,
    QVBoxLayout, QColorDialog, QFontComboBox, QSpinBox, QHBoxLayout, QMessageBox,
    QProgressBar, QFileDialog, QCheckBox
)

from app.core.ffmpeg_utils import get_ffmpeg_exe, get_ffprobe_exe


class TitleContentSubtitlesThread(QThread):
    finished = pyqtSignal(bool, str)

    def __init__(
        self,
        video_path: str,
        output_path: str,
        title_text: str,
        content_text: str,
        font_name: str,
        font_size: int,
        color: QColor,
        outline: int,
        shadow: int,
        alignment: int,
        margin_v: int,
        title_duration_s: int,
        content_duration_s: int,
        max_chars_per_line: int,
        max_lines_per_entry: int,
        start_offset_s: int,
        bg_image_path: Optional[str] = None,
        bg_opacity_pct: int = 60,
        bg_scale_pct: int = 100,
        bg_only_during_subs: bool = True,
    ):
        super().__init__()
        self.video_path = video_path
        self.output_path = output_path
        self.title_text = title_text or ""
        self.content_text = content_text or ""
        self.font_name = font_name
        self.font_size = font_size
        self.color = color
        self.outline = outline
        self.shadow = shadow
        self.alignment = alignment
        self.margin_v = margin_v
        self.title_duration_s = max(1, int(title_duration_s))
        self.content_duration_s = max(1, int(content_duration_s))
        self.max_chars_per_line = max(8, int(max_chars_per_line))
        self.max_lines_per_entry = max(1, int(max_lines_per_entry))
        self.start_offset_s = max(0, int(start_offset_s))
        # Background overlay settings
        self.bg_image_path = (bg_image_path or '').strip() or None
        self.bg_opacity_pct = max(0, min(100, int(bg_opacity_pct)))
        self.bg_scale_pct = max(10, min(300, int(bg_scale_pct)))
        self.bg_only_during_subs = bool(bg_only_during_subs)

    @staticmethod
    def _qcolor_to_ass_primary_colour(color: QColor) -> str:
        r, g, b = color.red(), color.green(), color.blue()
        return f"&H00{b:02X}{g:02X}{r:02X}&"

    @staticmethod
    def _ms_to_hmsms(total_ms: int) -> Tuple[int, int, int, int]:
        hours = total_ms // 3_600_000
        total_ms %= 3_600_000
        minutes = total_ms // 60_000
        total_ms %= 60_000
        seconds = total_ms // 1_000
        ms = total_ms % 1_000
        return hours, minutes, seconds, ms

    @staticmethod
    def _wrap_text_to_lines(text: str, max_chars: int) -> List[str]:
        words = []
        for part in text.replace('\r', '').split('\n'):
            if not part.strip():
                words.append('\n')
            else:
                words.extend(part.split(' '))
                words.append('\n')
        if words and words[-1] == '\n':
            words.pop()

        lines: List[str] = []
        current: List[str] = []
        current_len = 0
        for w in words:
            if w == '\n':
                if current:
                    lines.append(' '.join(current))
                    current = []
                    current_len = 0
                else:
                    lines.append("")
                continue
            add_len = (1 if current else 0) + len(w)
            if current_len + add_len <= max_chars:
                current.append(w)
                current_len += add_len
            else:
                if current:
                    lines.append(' '.join(current))
                current = [w]
                current_len = len(w)
        if current:
            lines.append(' '.join(current))
        return [l.strip() for l in lines]

    def _lines_to_entries(self, lines: List[str], max_lines_per_entry: int) -> List[str]:
        entries: List[str] = []
        bucket: List[str] = []
        for ln in lines:
            if ln == "" and bucket:
                # paragraph break -> flush current bucket
                entries.append('\n'.join(bucket))
                bucket = []
                continue
            bucket.append(ln)
            if len(bucket) >= max_lines_per_entry:
                entries.append('\n'.join(bucket))
                bucket = []
        if bucket:
            entries.append('\n'.join(bucket))
        # remove accidental empties
        return [e if e.strip() else "" for e in entries]

    def _build_content_srt_and_duration(self) -> Tuple[str, int]:
        try:
            import pysrt  # noqa: F401
        except Exception as exc:  # pragma: no cover
            raise RuntimeError(f"未安装 pysrt: {exc}")

        from pysrt import SubRipFile, SubRipItem, SubRipTime

        srt = SubRipFile()

        current_ms = self.start_offset_s * 1000
        index = 1

        def add_entry(text: str, duration_s: int):
            nonlocal current_ms, index
            start_h, start_m, start_s, start_ms = self._ms_to_hmsms(current_ms)
            end_ms_total = current_ms + duration_s * 1000
            end_h, end_m, end_s, end_ms = self._ms_to_hmsms(end_ms_total)
            item = SubRipItem(
                index=index,
                start=SubRipTime(start_h, start_m, start_s, start_ms),
                end=SubRipTime(end_h, end_m, end_s, end_ms),
                text=text,
            )
            srt.append(item)
            index += 1
            current_ms = end_ms_total

        # Content entries only (we will render title as a separate track so they can overlap)
        content_text = self.content_text.strip()
        if content_text:
            lines = self._wrap_text_to_lines(content_text, self.max_chars_per_line)
            entries = self._lines_to_entries(lines, self.max_lines_per_entry)
            for e in entries:
                # Ensure at least one visible character; empty keeps a blank frame otherwise
                add_entry(e if e.strip() else " ", max(self.content_duration_s, 1))

        # Save to text and also compute total content duration (from first to last entry)
        # We use the library to guarantee proper SRT formatting
        tmp_dir = tempfile.gettempdir()
        tmp_path = os.path.join(tmp_dir, f"subtitle_{os.getpid()}_{id(self)}.srt")
        srt.save(tmp_path, encoding='utf-8')
        with open(tmp_path, 'r', encoding='utf-8', errors='ignore') as f:
            text = f.read()
        try:
            os.remove(tmp_path)
        except Exception:
            pass
        # total content duration in ms
        total_content_ms = 0
        if len(srt) > 0:
            first = srt[0]
            last = srt[-1]
            start_total = (first.start.hours * 3600000 + first.start.minutes * 60000 +
                           first.start.seconds * 1000 + first.start.milliseconds)
            end_total = (last.end.hours * 3600000 + last.end.minutes * 60000 +
                         last.end.seconds * 1000 + last.end.milliseconds)
            total_content_ms = max(0, end_total - start_total)
        return text, total_content_ms

    def _build_title_srt(self, content_total_ms: int) -> str:
        """Build a separate SRT for the title so it can be placed at the top
        and displayed concurrently with content.
        The title starts at start_offset_s and lasts for max(title_duration, content_total).
        """
        try:
            import pysrt  # noqa: F401
        except Exception as exc:  # pragma: no cover
            raise RuntimeError(f"未安装 pysrt: {exc}")

        from pysrt import SubRipFile, SubRipItem, SubRipTime

        title_text = self.title_text.strip()
        if not title_text:
            return ""

        lines = self._wrap_text_to_lines(title_text, self.max_chars_per_line)
        # Keep title as a single event with up to max_lines_per_entry lines
        entries = self._lines_to_entries(lines, self.max_lines_per_entry)
        if not entries:
            return ""

        srt = SubRipFile()
        start_ms = self.start_offset_s * 1000
        duration_ms = max(self.title_duration_s * 1000, content_total_ms)
        if duration_ms <= 0:
            duration_ms = self.title_duration_s * 1000 if self.title_duration_s > 0 else 1000

        start_h, start_m, start_s, start_ms_part = self._ms_to_hmsms(start_ms)
        end_h, end_m, end_s, end_ms_part = self._ms_to_hmsms(start_ms + duration_ms)

        item = SubRipItem(
            index=1,
            start=SubRipTime(start_h, start_m, start_s, start_ms_part),
            end=SubRipTime(end_h, end_m, end_s, end_ms_part),
            text='\n'.join(entries),
        )
        srt.append(item)

        tmp_dir = tempfile.gettempdir()
        tmp_path = os.path.join(tmp_dir, f"title_{os.getpid()}_{id(self)}.srt")
        srt.save(tmp_path, encoding='utf-8')
        with open(tmp_path, 'r', encoding='utf-8', errors='ignore') as f:
            text = f.read()
        try:
            os.remove(tmp_path)
        except Exception:
            pass
        return text

    def _write_temp_srt(self, text: str) -> str:
        # Create a temp file path next to output for better FFmpeg access on Windows
        base_dir = os.path.dirname(self.output_path) or tempfile.gettempdir()
        # Use a unique suffix each call to avoid accidental overwrites when creating multiple tracks
        unique = uuid.uuid4().hex
        tmp_path = os.path.join(base_dir, f"_temp_{os.getpid()}_{id(self)}_{unique}.srt")
        with open(tmp_path, 'w', encoding='utf-8') as f:
            f.write(text)
        return tmp_path

    def run(self) -> None:
        tmp_content_srt_path: Optional[str] = None
        tmp_title_srt_path: Optional[str] = None
        try:
            exe = get_ffmpeg_exe()
            if not exe:
                raise FileNotFoundError("未找到 ffmpeg，可安装 imageio-ffmpeg 或将 ffmpeg 加入 PATH")
            if not os.path.exists(self.video_path):
                raise FileNotFoundError("视频文件不存在")

            content_srt_text, content_total_ms = self._build_content_srt_and_duration()
            title_srt_text = self._build_title_srt(content_total_ms)
            if content_srt_text.strip():
                tmp_content_srt_path = self._write_temp_srt(content_srt_text)
            if title_srt_text.strip():
                tmp_title_srt_path = self._write_temp_srt(title_srt_text)

            # style for content (uses user-selected alignment)
            style_content = ",".join([
                f"Fontname={self.font_name}",
                f"Fontsize={self.font_size}",
                f"PrimaryColour={self._qcolor_to_ass_primary_colour(self.color)}",
                f"Outline={self.outline}",
                f"Shadow={self.shadow}",
                f"Alignment={self.alignment}",
                f"MarginV={self.margin_v}",
                "BorderStyle=1",
            ])
            # style for title (force to top center alignment 8)
            style_title = ",".join([
                f"Fontname={self.font_name}",
                f"Fontsize={self.font_size}",
                f"PrimaryColour={self._qcolor_to_ass_primary_colour(self.color)}",
                f"Outline={self.outline}",
                f"Shadow={self.shadow}",
                "Alignment=8",
                f"MarginV={self.margin_v}",
                "BorderStyle=1",
            ])

            ffprobe_exe = get_ffprobe_exe() or 'ffprobe'
            probe = ffmpeg.probe(self.video_path, cmd=ffprobe_exe)
            has_audio = any(s.get('codec_type') == 'audio' for s in probe.get('streams', []))

            inp = ffmpeg.input(self.video_path)
            v = inp.video
            # Optional background image overlay (top-center)
            if self.bg_image_path and os.path.exists(self.bg_image_path):
                bg = ffmpeg.input(self.bg_image_path)
                # Scale background by percentage while keeping aspect ratio, then pad to video width
                # Use width of video stream via 'iw' is not available directly in ffmpeg-python graph here;
                # so scale by percentage and overlay centered at top.
                scale_factor = max(10, min(300, int(self.bg_scale_pct))) / 100.0
                # Compute overlay alpha via colorchannelmixer
                alpha = max(0, min(100, int(self.bg_opacity_pct))) / 100.0
                bg_v = bg.filter('scale', f"iw*{scale_factor}", f"ih*{scale_factor}")
                # Add alpha
                bg_v = bg_v.filter('format', 'rgba').filter('colorchannelmixer', aa=alpha)
                # X to center, Y = 0 (top). Enable only during subtitle period when requested.
                # Build enable expression from subtitle timings (start_offset to end based on content/title)
                enable_expr = None
                if self.bg_only_during_subs:
                    # approximate: from start_offset to end of last subtitle among both tracks
                    total_ms = max(self.start_offset_s*1000 + (content_total_ms or 0), self.start_offset_s*1000 + max(self.title_duration_s*1000, content_total_ms or 0))
                    total_s = max(0, total_ms/1000.0)
                    enable_expr = f"between(t,{self.start_offset_s},{total_s})"
                if enable_expr:
                    v = ffmpeg.overlay(v, bg_v, x='(W-w)/2', y=0, enable=enable_expr)
                else:
                    v = ffmpeg.overlay(v, bg_v, x='(W-w)/2', y=0)

            if tmp_content_srt_path:
                v = v.filter('subtitles', filename=tmp_content_srt_path, force_style=style_content)
            if tmp_title_srt_path:
                v = v.filter('subtitles', filename=tmp_title_srt_path, force_style=style_title)

            if has_audio:
                out = ffmpeg.output(
                    v, inp.audio, self.output_path,
                    vcodec='libx264', acodec='copy', preset='veryfast', movflags='+faststart'
                )
            else:
                out = ffmpeg.output(
                    v, self.output_path,
                    vcodec='libx264', preset='veryfast', movflags='+faststart'
                )

            ffmpeg.run(out, overwrite_output=True, cmd=exe)
            self.finished.emit(True, self.output_path)
        except Exception as exc:
            self.finished.emit(False, str(exc))
        finally:
            if tmp_content_srt_path and os.path.exists(tmp_content_srt_path):
                try:
                    os.remove(tmp_content_srt_path)
                except Exception:
                    pass
            if tmp_title_srt_path and os.path.exists(tmp_title_srt_path):
                try:
                    os.remove(tmp_title_srt_path)
                except Exception:
                    pass


class TitleContentSubtitlesWidget(QWidget):
    def __init__(self, parent=None):
        super().__init__(parent)
        self._build_ui()
        self._wire()
        self.thread: Optional[TitleContentSubtitlesThread] = None
        self.sim_timer = QTimer(self)
        self.sim_timer.setInterval(150)
        self.sim_timer.timeout.connect(self._on_sim)
        self.simulating = False
        self.actual_done = False
        self.current_color = QColor(255, 255, 255)
        self._apply_color_btn()

    def _build_ui(self):
        self.video_edit = QLineEdit(self)
        self.video_btn = QPushButton("浏览视频", self)
        self.output_edit = QLineEdit(self)
        self.output_btn = QPushButton("保存到", self)

        self.title_edit = QLineEdit(self)
        self.content_edit = QTextEdit(self)
        # Background image controls
        self.bg_edit = QLineEdit(self)
        self.bg_btn = QPushButton("选择图片", self)
        self.bg_opacity_spin = QSpinBox(self); self.bg_opacity_spin.setRange(0, 100); self.bg_opacity_spin.setValue(60)
        self.bg_scale_spin = QSpinBox(self); self.bg_scale_spin.setRange(10, 300); self.bg_scale_spin.setValue(100)
        self.bg_only_during_cb = QCheckBox("仅在字幕期间显示", self); self.bg_only_during_cb.setChecked(True)

        self.font_combo = QFontComboBox(self)
        self.font_size = QSpinBox(self)
        self.font_size.setRange(8, 96)
        self.font_size.setValue(32)
        self.color_btn = QPushButton("选择颜色", self)
        self.outline_spin = QSpinBox(self)
        self.outline_spin.setRange(0, 10)
        self.outline_spin.setValue(2)
        self.shadow_spin = QSpinBox(self)
        self.shadow_spin.setRange(0, 10)
        self.shadow_spin.setValue(1)
        self.alignment_combo = QComboBox(self)
        self.alignment_combo.addItems(["左下","中下","右下","左中","正中","右中","左上","中上","右上"])
        self.alignment_combo.setCurrentIndex(1)
        self.margin_v_spin = QSpinBox(self)
        self.margin_v_spin.setRange(0, 200)
        self.margin_v_spin.setValue(20)

        self.title_dur_spin = QSpinBox(self)
        self.title_dur_spin.setRange(1, 15)
        self.title_dur_spin.setValue(3)
        self.content_dur_spin = QSpinBox(self)
        self.content_dur_spin.setRange(1, 20)
        self.content_dur_spin.setValue(3)
        self.max_chars_spin = QSpinBox(self)
        self.max_chars_spin.setRange(8, 80)
        self.max_chars_spin.setValue(28)
        self.max_lines_spin = QSpinBox(self)
        self.max_lines_spin.setRange(1, 4)
        self.max_lines_spin.setValue(2)
        self.offset_spin = QSpinBox(self)
        self.offset_spin.setRange(0, 60)
        self.offset_spin.setValue(0)

        self.progress_bar = QProgressBar(self)
        self.progress_bar.setRange(0, 100)
        self.progress_bar.setValue(0)
        self.start_btn = QPushButton("开始生成", self)

        grid = QGridLayout()
        r = 0
        grid.addWidget(QLabel("视频文件:"), r, 0); grid.addWidget(self.video_edit, r, 1); grid.addWidget(self.video_btn, r, 2); r += 1
        grid.addWidget(QLabel("输出视频:"), r, 0); grid.addWidget(self.output_edit, r, 1); grid.addWidget(self.output_btn, r, 2); r += 1
        grid.addWidget(QLabel("标题:"), r, 0); grid.addWidget(self.title_edit, r, 1, 1, 2); r += 1
        grid.addWidget(QLabel("内容:"), r, 0); grid.addWidget(self.content_edit, r, 1, 3, 2); r += 3

        grid.addWidget(QLabel("背景图片:"), r, 0); grid.addWidget(self.bg_edit, r, 1); grid.addWidget(self.bg_btn, r, 2); r += 1
        grid.addWidget(QLabel("背景透明度/缩放(%):"), r, 0)
        rowbg = QHBoxLayout(); rowbg.addWidget(self.bg_opacity_spin); rowbg.addWidget(self.bg_scale_spin); rowbg.addWidget(self.bg_only_during_cb)
        grid.addLayout(rowbg, r, 1, 1, 2); r += 1

        grid.addWidget(QLabel("字体/大小:"), r, 0)
        row2 = QHBoxLayout(); row2.addWidget(self.font_combo); row2.addWidget(self.font_size); grid.addLayout(row2, r, 1, 1, 2); r += 1
        grid.addWidget(QLabel("颜色:"), r, 0); grid.addWidget(self.color_btn, r, 1); r += 1
        grid.addWidget(QLabel("描边/阴影/对齐/下边距:"), r, 0)
        row3 = QHBoxLayout()
        for w in (QLabel("描边"), self.outline_spin, QLabel("阴影"), self.shadow_spin, QLabel("对齐"), self.alignment_combo, QLabel("MarginV"), self.margin_v_spin):
            row3.addWidget(w)
        grid.addLayout(row3, r, 1, 1, 2); r += 1

        grid.addWidget(QLabel("标题时长(s)/内容时长(s):"), r, 0)
        row4 = QHBoxLayout()
        for w in (self.title_dur_spin, self.content_dur_spin):
            row4.addWidget(w)
        grid.addLayout(row4, r, 1, 1, 2); r += 1

        grid.addWidget(QLabel("每行字数/每条最大行数/起始延迟(s):"), r, 0)
        row5 = QHBoxLayout()
        for w in (self.max_chars_spin, self.max_lines_spin, self.offset_spin):
            row5.addWidget(w)
        grid.addLayout(row5, r, 1, 1, 2); r += 1

        lay = QVBoxLayout(self)
        lay.addLayout(grid)
        lay.addWidget(self.progress_bar)
        lay.addWidget(self.start_btn, 0, Qt.AlignRight)
        self.setLayout(lay)

    def _wire(self):
        self.video_btn.clicked.connect(self._pick_video)
        self.output_btn.clicked.connect(self._pick_out)
        self.color_btn.clicked.connect(self._pick_color)
        self.start_btn.clicked.connect(self._start)
        self.bg_btn.clicked.connect(self._pick_bg)

    def _apply_color_btn(self):
        c = self.current_color
        self.color_btn.setStyleSheet(f"background-color: rgb({c.red()},{c.green()},{c.blue()}); color:#000;")

    def _pick_video(self):
        p, _ = QFileDialog.getOpenFileName(self, "选择视频", "", "视频文件 (*.mp4 *.mkv *.mov *.avi *.flv *.wmv *.webm)")
        if p:
            self.video_edit.setText(p)

    def _pick_out(self):
        p, _ = QFileDialog.getSaveFileName(self, "保存输出", "", "视频文件 (*.mp4)")
        if p:
            self.output_edit.setText(p)

    def _pick_color(self):
        color = QColorDialog.getColor(self.current_color, self, "选择字幕颜色")
        if color.isValid():
            self.current_color = color
            self._apply_color_btn()

    def _pick_bg(self):
        p, _ = QFileDialog.getOpenFileName(self, "选择背景图片", "", "图片文件 (*.png *.jpg *.jpeg *.bmp)")
        if p:
            self.bg_edit.setText(p)

    @staticmethod
    def _align_idx_to_ass(idx: int) -> int:
        return [1,2,3,4,5,6,7,8,9][idx]

    def _start(self):
        if self.thread is not None and self.thread.isRunning():
            return
        v = self.video_edit.text().strip()
        o = self.output_edit.text().strip()
        t = self.title_edit.text().strip()
        c = self.content_edit.toPlainText().strip()
        if not v:
            QMessageBox.warning(self, "提示", "请选择视频文件"); return
        if not o:
            QMessageBox.warning(self, "提示", "请选择输出位置"); return
        if not (t or c):
            QMessageBox.warning(self, "提示", "请输入标题或内容"); return

        self.progress_bar.setValue(0)
        self.simulating = True
        self.actual_done = False
        self.sim_timer.start()
        self.start_btn.setEnabled(False)

        self.thread = TitleContentSubtitlesThread(
            video_path=v,
            output_path=o,
            title_text=t,
            content_text=c,
            font_name=self.font_combo.currentFont().family(),
            font_size=int(self.font_size.value()),
            color=self.current_color,
            outline=int(self.outline_spin.value()),
            shadow=int(self.shadow_spin.value()),
            alignment=self._align_idx_to_ass(self.alignment_combo.currentIndex()),
            margin_v=int(self.margin_v_spin.value()),
            title_duration_s=int(self.title_dur_spin.value()),
            content_duration_s=int(self.content_dur_spin.value()),
            max_chars_per_line=int(self.max_chars_spin.value()),
            max_lines_per_entry=int(self.max_lines_spin.value()),
            start_offset_s=int(self.offset_spin.value()),
            bg_image_path=self.bg_edit.text().strip(),
            bg_opacity_pct=int(self.bg_opacity_spin.value()),
            bg_scale_pct=int(self.bg_scale_spin.value()),
            bg_only_during_subs=self.bg_only_during_cb.isChecked(),
        )
        self.thread.finished.connect(self._done)
        self.thread.start()

    def _on_sim(self):
        if not self.simulating:
            return
        v = self.progress_bar.value()
        if self.actual_done:
            self.progress_bar.setValue(100)
            self.sim_timer.stop(); self.simulating = False; return
        if v < 97:
            from random import randint
            self.progress_bar.setValue(min(97, v + randint(1, 3)))

    def _done(self, ok: bool, msg: str):
        self.actual_done = True
        if ok:
            QMessageBox.information(self, "成功", f"生成完成！\n输出文件: {msg}")
        else:
            self.simulating = False; self.sim_timer.stop(); self.progress_bar.setValue(0)
            QMessageBox.critical(self, "错误", msg)
        self.start_btn.setEnabled(True)


