#!/usr/bin/env python
# -*- coding: UTF-8 -*-  
import datetime # 日期模块

# 调用初始时刻
time_start = datetime.datetime.now()

# 说明字典
dic_note = {
"版权":["源码：丽垚人工智能工作室，第三方使用应遵循GPL协议。"],
"作者":["吉更"],
"初创时间":["2025年05月"],
"功能":["基于whisper模型fastapi框架的多态文件大模型语义处理web服务"],
}

# 版本说明
def version(dic_p={}):

    print ("-----------------------------")
    [print("\n",x," --- ",re.sub("[\[,\],\',\,]", "", str(dic_p[x])),"\n") for x in dic_p] # 调试用
    print ("-----------------------------")

from fastapi import FastAPI, File, Form, UploadFile
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.staticfiles import StaticFiles

import os
import sys
import re # 正则表达式
import json # json模块

# 本地功能模块
# 耗时
def time_cost(start_time_c):

    end_time_c = datetime.datetime.now() #赋值结束时间
    end_time_c = str(end_time_c-start_time_c)
    arr_1 = end_time_c.split(":")
    
    try:
    
        all_time = 3600*float(arr_1[0]) + 60*float(arr_1[1]) + float(arr_1[2])
        
    except:
    
        all_time = 0
        
    return all_time

import whisper
        
import torch
        
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32

# 模型默认配置
model_is = "medium"

model_is = "tiny" # 调试用

if (sys.platform.startswith('linux')):
        
    model_path = os.path.abspath("") + "/model/" + model_is + ".pt" #模型路径
            
else:
        
    model_path = os.path.abspath("") + "\\model\\" + model_is + ".pt" #模型路径

model = whisper.load_model(model_path)

print("\n" + model_is + "模型加载耗时：",time_cost(time_start),"秒\n")


# whisper主处理文件
def whisper_main(**kwargs):
    
    time_start = datetime.datetime.now()
    
    result_p = ""
    
    model_is = kwargs.get("model_is","medium").strip()
    input_is = kwargs.get("input_is","test.wav").strip()
    output_is = kwargs.get("output_is","test.json").strip()
    path_input = kwargs.get("path_input","").strip()
    path_output = kwargs.get("path_output","").strip()
    output_txt_if = kwargs.get("output_txt_if",True)
    
    # 加载输入
        
    if (path_input == ""):
        
        if (sys.platform.startswith('linux')):
        
            path_input = os.path.abspath('') + "/input/" + input_is  # 替换为你的音频文件路径
        
        else:
        
            path_input = os.path.abspath('') + "\\input\\" + input_is  # 替换为你的音频文件路径
        
    print("\n输入路径：",path_input)
        

    # 调用模型后处理
        
    time_run = datetime.datetime.now()
        
    # 主处理 *重要
        
    import whisper
        
    data_last = whisper.transcribe(model, path_input)
        
    print("\n调用模型后处理耗时：",time_cost(time_run),"秒\n")
        
    # 最终结果处理
        
    # 特例处理
    if (model_is == "tiny" or model_is == "base"):
        
        from opencc import OpenCC
            
        # 创建OpenCC实例，指定转换配置文件（默认为“s2t.json”为简转繁，这里使用“t2s.json”为繁转简）
        cc = OpenCC("t2s")
 
        # 转换为简体
        result_p = cc.convert(data_last["text"])
            
    else:
        
        result_p = data_last["text"]
        
    print("\n大模型最后处理结果：",result_p,"\n")
        
        
    # 输出结果 默认是json 格式
        
    output_is = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + ".json"
        
    if (path_output == ""):
        
        if (sys.platform.startswith('linux')):
        
            path_output = os.path.abspath('') + "/output/" + output_is  # 替换为你的音频文件路径
        
        else:
        
            path_output = os.path.abspath('') + "\\output\\" + output_is  # 替换为你的音频文件路径
        
    print("\n结果输出路径：",path_output)
        
    # 输出到本地文本
    if (output_txt_if is True):
            
        with open(path_output, 'w', encoding="utf-8", errors="ignore") as file:
            
            file.write(str(data_last))
        
    print("\n大模型处理总计耗时：",time_cost(time_start),"秒\n")
        
    return result_p

app = FastAPI()

app.mount("/statics", StaticFiles(directory="statics"), name="statics")

# 临时保存分片的目录
UPLOAD_DIR = "statics/upload"

if not os.path.exists(UPLOAD_DIR):

    os.makedirs(UPLOAD_DIR)

@app.get("/", response_class=HTMLResponse)
async def main():

    result_p = """
    
    <!DOCTYPE html>
    <html lang="zh-CN">
    
    <head>
        <meta charset="UTF-8">
        <meta name="viewport" content="width=device-width, initial-scale=1.0">
        <title>多态文件大模型语义处理</title>
    </head>
    
    <body>
    
    <center>
    
        <img src="./statics/img/main.jpg" border="0" alert="上传文件" style="margin-top: -15px; /* 将元素向上移动20px */">
        <br><div id="content_result" style=\"width:80%;\"></div>
        <form id="uploadForm">
            <br>
            

    
    """
    
    # 取得模型名
    
    files_list = []
    
    str_t = ""
    
    try:
    
        files_list = os.listdir("model")
        
    except BaseException as e:
    
        print ("获取模型名错误：")
        
    #print (files_list,type(files_list))
    
    if (len(files_list) > 0):
        
        result_p += "模型：<select name=\"model_aim\" size=\"1\" id=\"id_model_is\"\n>"
        result_p += "<option value=\"\">选择</option>\n"
        
        for x in files_list:
            
            if (x[len(x)-3:] == ".pt"):
                
                str_t += "<option value=\"" + x[:-3] + "\">" + x[:-3] + "</option>\n"
                
        if (str_t.strip() != ""):
        
            result_p += str_t
            result_p += "</select><br><br>\n"
    
    result_p += """
        
            (*.mp4,*.m4a,*.wav)
            <br><br>
            <input type="file" id="fileInput" name="file" required>
            <br><br>
            <button type="submit">开始上传</button>
            
        </form>

        <script>
        document.getElementById('uploadForm').addEventListener('submit', function(event) {
            
            event.preventDefault();

            const file = document.getElementById('fileInput').files[0];
            const chunkSize = 10 * 1024 * 1024;  // 1MB per chunk
            const totalChunks = Math.ceil(file.size / chunkSize);
            const filename = file.name;
            const model_aim = document.getElementById('id_model_is').value;
            
            let currentChunk = 0;

            function uploadChunk() {
            
                const chunkStart = currentChunk * chunkSize;
                const chunkEnd = Math.min(chunkStart + chunkSize, file.size);
                const chunk = file.slice(chunkStart, chunkEnd);

                const formData = new FormData();
                formData.append("chunk", chunk);
                formData.append("chunk_index", currentChunk);
                formData.append("total_chunks", totalChunks);
                formData.append("filename", filename);
                formData.append("model_aim", model_aim);

                fetch("/upload_chunk/", {
                    method: "POST",
                    body: formData
                })
                .then(response => response.json())
                .then(data => {
                    
                    //消息处理
                    document.getElementById("content_result").innerHTML = data.message;
                   
                    console.log(data.message);
                    
                    currentChunk++;
                    if (currentChunk < totalChunks + 1) {
                        uploadChunk(); // 递归上传下一个分片
                    } else {
                        console.log("所有分片上传完成！");
                    }
                })
                .catch(error => console.error("上传失败:", error));
            }

            uploadChunk(); // 开始上传第一个分片
        });
        
        </script>
        
        </center>
        
    </body>
    
    </html>
    
    """
    
    return result_p 
    
@app.post("/upload_chunk/")
async def upload_chunk(
    
    chunk: UploadFile = File(...),
    chunk_index: int = Form(...),  # 从表单数据中获取
    total_chunks: int = Form(...),  # 从表单数据中获取
    filename: str = Form(...),  # 从表单数据中获取
    model_aim: str = Form(...),  # 从表单数据中获取
    
):
    print(" --- ", chunk_index, total_chunks, model_aim)
    
    message_last = ""
    
    # 声明模型文件为全局变量
    global model
    
    global model_is
    
    #print("内部处理主模型",model)
   
    # 临时保存每个分片
    chunk_path = os.path.join(UPLOAD_DIR, f"{filename}_part_{chunk_index}")
    
    with open(chunk_path, "wb") as f:
        
        f.write(await chunk.read())

    # 如果所有分片上传完毕，进行合并
    if chunk_index == total_chunks - 1:
        
        full_file_path = os.path.join(UPLOAD_DIR, filename)
        
        with open(full_file_path, "wb") as full_file:
            
            for i in range(total_chunks):
                
                part_path = os.path.join(UPLOAD_DIR, f"{filename}_part_{i}")
                
                with open(part_path, "rb") as part_file:
                    
                    full_file.write(part_file.read())
                
                os.remove(part_path)  # 上传完分片后删除临时文件
                
            print("message : 文件上传成功！")
        
        message_last += "文件 " + filename + "上传成功！正在进行大模型处理..."
        return JSONResponse(content={"message": message_last})
    
    # 合并完成进行处理
    if chunk_index == total_chunks:
        
        full_file_path = os.path.join(UPLOAD_DIR, filename)
        
        output_is = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + ".json"
        
        # 如果指定模型 则重新加载
        if(model_aim.strip() != ""):
        
            if (sys.platform.startswith('linux')):
        
                model_path = os.path.abspath("") + "/model/" + model_aim + ".pt" #模型路径
            
            else:
        
                model_path = os.path.abspath("") + "\\model\\" + model_aim + ".pt" #模型路径

                model = whisper.load_model(model_path)

            print("\n" + model_aim + "模型加载耗时：",time_cost(time_start),"秒\n")
        
        # **重要 主处理
        result_p = whisper_main(
        model=model,
        model_is = model_is,
        input_is = filename,
        output_is = output_is,
        path_input = full_file_path,
        path_output = "statics/download/" + output_is,
        output_txt_if=True
        )
        
        message_last += "<br><br><div style='width:512px;text-align:left;'>" + result_p.replace("\"","") + "</div>"
        message_last += "<br><br><a href='./statics/download/" + output_is + "' target='_blank'>完整处理结果</a>"
        
        return JSONResponse(content={"message": message_last})

    print("i", chunk_index, total_chunks, "message : 分片上传成功，等待更多分片...")
    
    return JSONResponse(content={"message": "分片上传成功，等待更多分片..."})

if __name__ == "__main__":
    
    version(dic_p=dic_note) # 打印版本
    
    import uvicorn
    
    uvicorn.run(app, host="127.0.0.1", port=8000)
