#!/usr/bin/env python
# -*- coding: UTF-8 -*-

import datetime # 日期模块

# 调用初始时刻
time_start = datetime.datetime.now()

# 说明字典
dic_note = {
"版权":["源码：丽垚人工智能工作室，第三方使用应遵循GPL协议。"],
"作者":["吉更"],
"初创时间":["2025年05月"],
"功能":["whisper本地部署"],
}

#--------- 外部模块处理<<开始>> ---------#

#-----系统自带必备模块引用-----

import sys # 操作系统模块1
import os # 操作系统模块2
import re # 正则表达式
import json # json模块

#-----系统外部需安装库模块引用-----
#import torch
#from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
#from datasets import load_dataset

#-----DIY自定义库模块引用-----
sys.path.append("..")


#--------- 外部模块处理<<结束>> ---------#


#--------- 内部模块处理<<开始>> ---------#

# 命令行格式下，参数字典的生成。
def args2dic_chat(list_p=[],fenge_p="="):
    
    dic_p = {}
    
    i = 0
        
    for x in list_p:

        if (x.split("=")):
            
            dic_p[x.split("=")[0]] = x.split("=")[1]
            #print (i,x,type(x)) # 调试用
            
        i += 1
        
    return dic_p

# ---外部参变量处理---

# shell模式下参数处理
dic_args = {} # 参数字典
#print (type(sys.argv),sys.argv)
list_t = sys.argv
del list_t[0] # 删除第一个参数
dic_args = args2dic_chat(list_p=list_t,fenge_p="=")

# 操作参数

action = dic_args.get("action","default").strip()
to_do = dic_args.get("to_do","default").strip()
model_is = dic_args.get("model_is","medium").strip()
input_is = dic_args.get("input_is","test.wav").strip()
output_is = dic_args.get("output_is","test.json").strip()
path_input = dic_args.get("path_input","").strip()
path_output = dic_args.get("path_output","").strip()
output_txt_if = dic_args.get("output_txt_if",True)
model_path = dic_args.get("model_path","").strip()


# ---全局变量处理---


# ---本模块内部类或函数定义区

# 耗时
def time_cost(start_time_c):

    end_time_c = datetime.datetime.now() #赋值结束时间
    end_time_c = str(end_time_c-start_time_c)
    arr_1 = end_time_c.split(":")
    
    try:
        all_time = 3600*float(arr_1[0]) + 60*float(arr_1[1]) + float(arr_1[2])
    except:
        all_time = 0
        
    return all_time

# 版本说明
def version(dic_p={}):

    print ("-----------------------------")
    [print("\n",x," --- ",re.sub("[\[,\],\',\,]", "", str(dic_p[x])),"\n") for x in dic_p] # 调试用
    print ("-----------------------------")

#*重要 模型加载在一切处理之前 以保证常驻内存

import whisper
        
import torch
        
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32

if (model_path == ""):

    if (sys.platform.startswith('linux')):
        
        model_path = os.path.abspath("") + "/model/" + model_is + ".pt" #模型路径
            
    else:
        
        model_path = os.path.abspath("") + "\\model\\" + model_is + ".pt" #模型路径
            
print("\n模型路径：" + model_path + "\n")

if (model_path != "" and model_path != "nothing"):

    model = whisper.load_model(model_path)
    
else:

    model = "n/a"
        
print("\n模型加载耗时：",time_cost(time_start),"秒\n")
    
# 主过程子函数
def run_it(**kwargs):
    
    result_p = ""
    
    model = kwargs.get("model","")
    
    if (model == ""):
    
        return "模型对象实例不能为空！"
     
    action = kwargs.get("action","default").strip()
    to_do = kwargs.get("to_do","default").strip()
    model_is = kwargs.get("model_is","medium").strip()
    input_is = kwargs.get("input_is","test.wav").strip()
    output_is = kwargs.get("output_is","test.json").strip()
    path_input = kwargs.get("path_input","").strip()
    path_output = kwargs.get("path_output","").strip()
    output_txt_if = kwargs.get("output_txt_if",True)
    
    print ("\n处理方式：",action)

    # 默认流程
    if (action == "default"):
    
        time_start = datetime.datetime.now()
        
        result_p = ""
        
        # 加载输入
        
        if (path_input == ""):
        
            if (sys.platform.startswith('linux')):
        
                path_input = os.path.abspath('') + "/input/" + input_is  # 替换为你的音频文件路径
        
            else:
        
                path_input = os.path.abspath('') + "\\input\\" + input_is  # 替换为你的音频文件路径
        
        print("\n输入路径：",path_input)
        

        # 调用模型后处理
        
        time_run = datetime.datetime.now()
        
        # 主处理 *重要
        
        import whisper
        
        data_last = whisper.transcribe(model, path_input)
        
        print("\n调用模型后处理耗时：",time_cost(time_run),"秒\n")
        
        # 最终结果处理
        
        # 特例处理
        if (model_is == "tiny" or model_is == "base"):
        
            from opencc import OpenCC
            
            # 创建OpenCC实例，指定转换配置文件（默认为“s2t.json”为简转繁，这里使用“t2s.json”为繁转简）
            cc = OpenCC("t2s")
 
            # 转换为简体
            result_p = cc.convert(data_last["text"])
            
        else:
        
            result_p = data_last["text"]
        
        print("\n最后处理结果：",result_p,"\n")
        
        
        # 输出结果 默认是json 格式
        
        output_is = datetime.datetime.now().strftime("%Y%m%d%H%M%S") + ".json"
        
        if (path_output == ""):
        
            if (sys.platform.startswith('linux')):
        
                path_output = os.path.abspath('') + "/output/" + output_is  # 替换为你的音频文件路径
        
            else:
        
                path_output = os.path.abspath('') + "\\output\\" + output_is  # 替换为你的音频文件路径
        
        print("\n输出路径：",path_output)
        
        # 输出到本地文本
        if (output_txt_if is True):
            
            with open(path_output, 'w', encoding="utf-8", errors="ignore") as file:
            
                file.write(str(data_last))
        
        print("\n总计耗时：",time_cost(time_start),"秒")
        
        result_p = "默认处理结束"
        
    # 测试流程
    elif (action == "test"):
        
        time_start = datetime.datetime.now()
        
        result_p = ""
        
        import whisper
        
        import torch
        
        from transformers import Wav2Vec2ForCTC, Wav2Vec2Config
 
        # 加载配置文件
        config = Wav2Vec2Config.from_pretrained(os.path.abspath("") + "\\model_test\\config.json")
 
        # 初始化模型结构
        model = Wav2Vec2ForCTC(config)
 
        # 加载权重
        model.load_state_dict(torch.load(os.path.abspath("") + "\\model_test\\model.bin", weights_only=False))
        
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        
        torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
        
        
        print("\n模型加载耗时：",time_cost(time_start),"秒")
        
        # 加载输入
        
        time_input= datetime.datetime.now()
        

        
        print("\n加载输入耗时：",time_cost(time_input),"秒")
        
        # 调用模型后处理
        
        time_run = datetime.datetime.now()
        
        
        
        print("\n调用模型后处理耗时：",time_cost(time_run),"秒\n")
              
        print("\n最后处理结果：",result_p,"\n")
        
        print("\n总计耗时：",time_cost(time_start),"秒")
        
        result_p = "测试结束"
    
    return result_p # 调试用

#--------- 内部模块处理<<结束>> ---------#

#---------- 过程<<开始>> -----------#

# 过程函数
def main():
    
    result_p = ""
    
    #1 过程一
    
    version(dic_p=dic_note) # 打印版本
    
    result_p = run_it(
    model=model,
    action = action,
    to_do = to_do,
    model_is = model_is,
    input_is = input_is,
    output_is = output_is,
    path_input = path_input,
    path_output = path_output,
    output_txt_if=output_txt_if
    )
    
    print("\n独立模块调用结果：",result_p)
    
    #2 过程二
    
    #3 收尾
    
    # rs_basedata_mysql.close() # 关闭数据库对象
    
if __name__ == '__main__':

    main()
    
#---------- 主过程<<结束>> -----------#