from transformers import AutoTokenizer
import xfastertransformer
import whisper
import time
from typing import Union
from fastapi import FastAPI, File, UploadFile
import os

app = FastAPI()

def query_glm4(prompt="你好，你是谁？"):
    MODEL_PATH="/mnt/data/glm-4-9b-chat-xft"
    TOKEN_PATH="/mnt/data/glm-4-9b-chat"

    tokenizer = AutoTokenizer.from_pretrained(TOKEN_PATH, use_fast=False, padding_side="left", trust_remote_code=True)
    streamer = TextStreamer(tokenizer, skip_special_tokens=True, skip_prompt=False)
    model = xfastertransformer.AutoModel.from_pretrained(MODEL_PATH, dtype="bf16")
    input_ids = tokenizer(prompt, return_tensors="pt", padding=False).input_ids
    generated_ids = model.generate(input_ids, max_length=2500, streamer=streamer)
    generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
    return generated_text.split(prompt)[1].strip("\n")

def summary(text):
    prompt = f"""# 指令
    我将给您提供一节课程的逐字稿，请你首先对这些内容做润色，使得这些内容可读性更好，最后帮我梳理和总结这些内容。要求分点总结。

    ## 逐字稿内容：
    {text}"""
    
    res = query_glm4(prompt)
    
    return res

def mp32text(mp3_path):
    model = whisper.load_model("large")
    result = model.transcribe(mp3_path, initial_prompt="这是一段完整的老师讲课录音。")
    return result["text"]

@app.post("/uploadfile/")
async def create_upload_file(file: UploadFile = File(...)):
    
    if not os.path.exists('temp'):
        os.makedirs('temp')
    file_location = f"temp/{os.path.basename(file.filename)}"

    
    with open(file_location, "wb") as f:
        f.write(await file.read())
    
    text = mp32text(file_location)
    print(f"语音转文本结果: {text}\n\n\n")
    
    content = summary(text)
    print(f"内容: {content}\n\n\n")
    
    # 删除临时文件
    os.remove(file_location)
    
    return {"content": content}

