# file: streaming_translate.py
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
import difflib
import time


def translate_streaming(input_stream, src_lang="zh", tgt_lang="en"):
    tokenizer = AutoTokenizer.from_pretrained("facebook/m2m100_1.2B")
    model = AutoModelForSeq2SeqLM.from_pretrained("facebook/m2m100_1.2B")

    tokenizer.src_lang = src_lang

    buffer = ""
    prev_translation = ""

    for segment in input_stream:
        buffer += segment

        encoded = tokenizer(buffer, return_tensors="pt")
        generated_tokens = model.generate(
            **encoded, forced_bos_token_id=tokenizer.get_lang_id(tgt_lang)
        )
        translated = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]

        # Check diff from last translation
        if translated != prev_translation:
            print("\033[92m\n📝 新翻译:", translated, "\033[0m")
            print("🔁 变化:")
            for diff in difflib.ndiff(prev_translation, translated):
                if diff.startswith("+ ") or diff.startswith("- "):
                    print(diff)
            prev_translation = translated

        time.sleep(0.3)  # simulate typing delay


if __name__ == '__main__':
    print("💬 输入模拟 (中文 -> 英文):")
    simulated_input = ["生", "活", "就", "像", "一", "盒", "巧", "克", "力", "，", "你", "永", "远", "不", "知", "道",
                       "你", "会", "得", "到", "什", "么"]
    translate_streaming(simulated_input, src_lang="zh", tgt_lang="en")
