from flask import Flask,render_template,request,url_for,redirect,jsonify
from pathlib import Path
import os
from pyaudio import PyAudio,paInt16
import numpy as np
import time
import wave
import subprocess
from threading import Thread,Lock
from pydub import AudioSegment
from pydub.playback import play
import json
import requests
import whisper
import zhconv
import json
import pyperclip
from vosk import Model,KaldiRecognizer
# import sys
# sys.path.append('../ASRT_SpeechRecognition')
# from speech_model import ModelSpeech
# from model_zoo.speech_model.keras_backend import SpeechModel251BN
# from speech_features import Spectrogram
# from language_model3 import ModelLanguage

if not os.path.exists('trans'):
    os.mkdir('trans')
if not os.path.exists('wavs'):
    os.mkdir('wavs')

app=Flask(__name__)
print('本进程pid',os.getpid())

bTranscribed=False
lock=Lock()
# model=whisper.load_model('small')
if not os.path.exists('backend.txt'):
    backend='whisper'
    with open('backend.txt','w') as f:
        f.write('whisper')
else:
    with open('backend.txt','r') as f:
        backend=f.read()
model=None
if backend=='whisper':
    model=whisper.load_model('/home/tellw/models/whisper/large-v3-turbo.pt')
elif backend=='vosk':
    model=Model("../models/vosk-model-cn-0.22")

# old_cwd=os.getcwd()
# os.chdir('../ASRT_SpeechRecognition')

# os.environ["CUDA_VISIBLE_DEVICES"] = "0"

# AUDIO_LENGTH = 1600
# AUDIO_FEATURE_LENGTH = 200
# CHANNELS = 1
# # 默认输出的拼音的表示大小是1428，即1427个拼音+1个空白块
# OUTPUT_SIZE = 1429
# sm251bn = SpeechModel251BN(
#     input_shape=(AUDIO_LENGTH, AUDIO_FEATURE_LENGTH, CHANNELS),
#     output_size=OUTPUT_SIZE
# )
# feat = Spectrogram()
# ms = ModelSpeech(sm251bn, feat, max_label_length=64)

# ms.load_model('save_models/' + sm251bn.get_model_name() + '.model.h5')

# ml = ModelLanguage('model_language')
# ml.load_model()

# os.chdir(old_cwd)

# def asrt_infer(ms,ml,wav_id):
#     old_cwd=os.getcwd()
#     os.chdir('../../ASRT_SpeechRecognition')
#     res = ms.recognize_speech_from_file(f'../speech_input/wavs/{wav_id}.wav')
#     print('*[提示] 声学模型语音识别结果：\n', res)
#     res = ml.pinyin_to_text(res)
#     os.chdir(old_cwd)
#     with open(f'{wav_id}.txt','w',encoding='utf8') as f:
#         f.write(res)
#     return res

class recorder:
    NUM_SAMPLES=2000
    SAMPLING_RATE=16000
    voice_string=[]
    does=False

    def start(self):
        self.does=True
        self.voice_string=[]
        pa=PyAudio()
        stream=pa.open(format=paInt16,channels=1,rate=self.SAMPLING_RATE,input=True,frames_per_buffer=self.NUM_SAMPLES)
        save_buffer=[]
        while self.does:
            string_audio_data=stream.read(self.NUM_SAMPLES)
            self.voice_string.append(string_audio_data)

    def finish(self):
        self.does=False
        wav_id=time.strftime('%y%m%d%H%M%S')
        wf=wave.open(os.path.join('wavs',f'{wav_id}.wav'),'wb')
        wf.setnchannels(1)
        wf.setsampwidth(2)
        wf.setframerate(self.SAMPLING_RATE)
        wf.writeframes(np.array(self.voice_string).tobytes())
        wf.close()
        wav=wave.open(f'wavs/{wav_id}.wav','rb')
        wav_duration=wav.getnframes()/16000
        wav.close()
        return wav_id,wav_duration

rec=recorder()

@app.route('/')
def index():
    page=int(request.args.get('page',0))
    notice=request.args.get('notice',default='')
    wav_id=request.args.get('wav_id')
    wav_duration=float(request.args.get('wav_duration',default=-1))
    all_entries=sorted(Path('wavs').iterdir(),key=os.path.getmtime,reverse=True)
    total_pages=(len(all_entries)+9)//10
    transcript_filenames=os.listdir('trans')
    if page>=2:
        page_range=list(range(total_pages))[page-2:min(page+3,total_pages)]
    else:
        page_range=list(range(min(page+3,total_pages)))
    if 0 not in page_range:
        page_range.insert(0,0)
    if total_pages-1 not in page_range:
        page_range.append(total_pages-1)
    entries=all_entries[page*10:(page+1)*10]
    names=[]
    transcripts=[]
    wav_durations=[]
    if not os.path.exists('data.json'):
        with open('data.json','w',encoding='utf8') as f:
            json.dump({},f,ensure_ascii=False,indent=4)
    with open('data.json','r',encoding='utf8') as f:
        data=json.load(f)
    for entry in entries:
        names.append(entry.name[:-4])
        if entry.name[:-4] not in data:
            wav=wave.open(f'wavs/{entry.name[:-4]}.wav','rb')
            wav_duration=wav.getnframes()/16000
            data[entry.name[:-4]]=wav_duration
            wav.close()
        wav_durations.append(data[entry.name[:-4]])

        if f'{entry.name[:-4]}.txt' in transcript_filenames:
            with open(os.path.join('trans',f'{entry.name[:-4]}.txt'),'r',encoding='utf8') as f:
                transc=f.read()
            transcripts.append(transc)
        else:
            transcripts.append('暂未转录')
    with open('data.json','w',encoding='utf8') as f:
        json.dump(data,f,ensure_ascii=False,indent=4)
    return render_template('index.html',page=page,entries=entries,names=names,page_range=page_range,transcripts=transcripts,total_pages=total_pages,notice=notice,wav_id=wav_id,wav_duration=wav_duration,wav_durations=wav_durations,backend=backend)

@app.route('/record')
def record():
    record_thread=Thread(target=rec.start,args=())
    record_thread.daemon=True
    record_thread.start()
    return render_template('record.html')

@app.route('/end')
def end():
    wav_id,wav_duration=rec.finish()
    return redirect(url_for('.index',wav_id=wav_id,wav_duration=wav_duration))

@app.route('/play_wav/<wav_id>')
def play_wav(wav_id):
    song=AudioSegment.from_wav(os.path.join('wavs',f'{wav_id}.wav'))
    play(song)
    return redirect(url_for('.index'))

def transcri(wav_id):
    global backend
    # 1. whisper 延迟高
    # subprocess.run(f'whisper ../wavs/{wav_id}.wav --language Chinese --output_format txt')
    # with open(f'{wav_id}.txt','r',encoding='utf8') as f:
    #     trans=f.read().strip()
    # return trans

    # 2. whisper python 效果最佳，只能在GPU上运行
    if backend=='whisper':
        result=model.transcribe(f'../wavs/{wav_id}.wav',language='Chinese')
        result=zhconv.convert(result['text'],'zh-hans')
        with open(f'{wav_id}.txt','w',encoding='utf8') as f:
            f.write(result)
        return result

    # 3. whisper.cpp
    # transcripts=json.loads(requests.get(f'http://localhost:5004/index/{wav_id}').text)['data']
    # with open(f'{wav_id}.txt','w',encoding='utf8') as f:
    #     f.write(transcripts)
    # return transcripts

    # 4. ASRT_SpeechRecognition
    # trans=asrt_infer(ms,ml,wav_id)
    # return trans

    # 5. vosk,计算设备是CPU的情况下，速度较快
    elif backend=='vosk':
        wf=wave.open(f'../wavs/{wav_id}.wav','rb')
        rec=KaldiRecognizer(model,wf.getframerate())
        rec.SetWords(True)
        trans=''
        while True:
            data=wf.readframes(4000)
            if len(data)==0:
                break
            if rec.AcceptWaveform(data):
                result=rec.Result()
                result=json.loads(result)
                if 'text' in result:
                    trans+=result['text']
        result=json.loads(rec.FinalResult())
        if 'text' in result:
            trans+=result['text']
        wf.close()
        trans=''.join(trans.strip().split())
        with open(f'{wav_id}.txt','w',encoding='utf8') as f:
            f.write(trans)
        return trans

@app.route('/transcribe/<wav_id>')
def transcribe(wav_id):
    bt=json.loads(requests.get('http://localhost:5003/b_transcribed').text)['data']
    print(bt)
    bt=False
    wav_duration=float(request.args.get('wav_duration',default=-1))
    if not bt:
        with lock:
            global bTranscribed
            bTranscribed=True
            st=time.time()
            os.chdir('trans')
            trans=transcri(wav_id)
            pyperclip.copy(trans)
            os.chdir('..')
            duration=time.time()-st
            if wav_duration==-1:
                wav=wave.open(f'wavs/{wav_id}.wav','rb')
                wav_duration=wav.getnframes()/16000
                wav.close()
            duration_str=f'wavs/{wav_id}.wav {duration} {wav_duration}'
            print(duration_str)
            with open('logs','a',encoding='utf8') as f:
                f.write(duration_str)
            bTranscribed=False
    else:
        return 'transcribing'
    return redirect(url_for('.index',notice=duration_str))

@app.route('/transcribe_json/<wav_id>')
def transcribe_json(wav_id):
    bt=json.loads(requests.get('http://localhost:5003/b_transcribed').text)['data']
    print(bt)
    bt=False
    wav_duration=float(request.args.get('wav_duration',default=-1))
    if not bt:
        with lock:
            global bTranscribed
            bTranscribed=True
            st=time.time()
            os.chdir('trans')
            trans=transcri(wav_id)
            pyperclip.copy(trans)
            os.chdir('..')
            duration=time.time()-st
            if wav_duration==-1:
                wav=wave.open(f'wavs/{wav_id}.wav','rb')
                wav_duration=wav.getnframes()/16000
                wav.close()
            duration_str=f'wavs/{wav_id}.wav {duration} {wav_duration}'
            print(duration_str)
            with open('logs','a',encoding='utf8') as f:
                f.write(duration_str)
            bTranscribed=False
    else:
        return jsonify({'notice':'transcribing','trans':'暂未转录'})
    return jsonify({'notice':duration_str,'trans':trans})

@app.route('/delete/<wav_id>')
def dele(wav_id):
    os.remove(f'wavs/{wav_id}.wav')
    if f'{wav_id}.txt' in os.listdir('trans'):
        os.remove(f'trans/{wav_id}.txt')
    with open('data.json','r',encoding='utf8') as f:
        data=json.load(f)
    if wav_id in data:
        del data[wav_id]
        with open('data.json','w',encoding='utf8') as f:
            json.dump(data,f,ensure_ascii=False,indent=4)
    return redirect(url_for('.index'))

@app.route('/b_transcribed')
def b_transcribed():
    global bTranscribed
    return jsonify({'data':bTranscribed})

@app.route('/set_backend/<backen>')
def set_backend(backen):
    global backend,model
    backend=backen
    with open('backend.txt','w') as f:
        f.write(backend)
    if backend=='whisper':
        model=whisper.load_model('/home/tellw/models/whisper/large-v3-turbo.pt')
    elif backend=='vosk':
        model=Model("../models/vosk-model-cn-0.22")
    return 'ok'

app.run(host='0.0.0.0',port=5003,debug=False)