import time
from datetime import datetime
import json
import os
import uuid

import websockets
import numpy as np
import argparse
import ssl
from funasr.utils.postprocess_utils import rich_transcription_postprocess
from modelscope import pipeline
from whisper_live.transcriber import WhisperModel
from whisper_live.server import ServeClientFasterWhisper
from whisper_live.client import TranscriptionClient
from aiohttp import web
import ffmpeg

parser = argparse.ArgumentParser()
parser.add_argument("--host",
                    type=str,
                    default="0.0.0.0",
                    required=False,
                    help="host ip, localhost, 0.0.0.0")
parser.add_argument("--port",
                    type=int,
                    default=10095,
                    required=False,
                    help="grpc server port")
parser.add_argument("--httpport",
                    type=int,
                    default=10094,
                    required=False,
                    help="http server port")
parser.add_argument("--lang_model",
                    type=str,
                    default="damo/speech_campplus_five_lre_16k",
                    help="model from modelscope")
parser.add_argument("--lang_model_revision",
                    type=str,
                    default="v1.0.1",
                    help="")
parser.add_argument("--whisper_model",
                    type=str,
                    default="C:/Users/HP/.cache/huggingface/hub/faster_whisper",
                    help="model from modelscope")
parser.add_argument("--asr_model",
                    type=str,
                    default="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch",
                    help="model from modelscope")
parser.add_argument("--asr_model_revision",
                    type=str,
                    default="v2.0.4",
                    help="")
parser.add_argument("--asr_model_online",
                    type=str,
                    default="damo/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-online",
                    help="model from modelscope")
parser.add_argument("--asr_model_online_revision",
                    type=str,
                    default="v2.0.4",
                    help="")
parser.add_argument("--vad_model",
                    type=str,
                    default="damo/speech_fsmn_vad_zh-cn-16k-common-pytorch",
                    help="model from modelscope")
parser.add_argument("--vad_model_revision",
                    type=str,
                    default="v2.0.4",
                    help="")
parser.add_argument("--punc_model",
                    type=str,
                    default="damo/punc_ct-transformer_zh-cn-common-vad_realtime-vocab272727",
                    help="model from modelscope")
parser.add_argument("--punc_model_revision",
                    type=str,
                    default="v2.0.4",
                    help="")
parser.add_argument("--ngpu",
                    type=int,
                    default=1,
                    help="0 for cpu, 1 for gpu")
parser.add_argument("--device",
                    type=str,
                    default="cuda",
                    help="cuda, cpu")
parser.add_argument("--ncpu",
                    type=int,
                    default=4,
                    help="cpu cores")
parser.add_argument("--certfile",
                    type=str,
                    default="../../ssl_key/server.crt",
                    required=False,
                    help="certfile for ssl")

parser.add_argument("--keyfile",
                    type=str,
                    default="../../ssl_key/server.key",
                    required=False,
                    help="keyfile for ssl")
parser.add_argument("--temp_dir",
                    type=str,
                    default="temp_dir",
                    required=False,
                    help="temp dir")
args = parser.parse_args()


websocket_users = set()

print("model loading")
from funasr import AutoModel

# asr
model_asr = AutoModel(model=args.asr_model,
                      model_revision=args.asr_model_revision,
                      ngpu=args.ngpu,
                      ncpu=args.ncpu,
                      device=args.device,
                      disable_pbar=True,
                      disable_log=True,
					  disable_update=True
                      )
# asr
model_asr_streaming = AutoModel(model=args.asr_model_online,
                                model_revision=args.asr_model_online_revision,
                                ngpu=args.ngpu,
                                ncpu=args.ncpu,
                                device=args.device,
                                disable_pbar=True,
                                disable_log=True,
					  			disable_update=True
                                )
# vad
model_vad = AutoModel(model=args.vad_model,
                      model_revision=args.vad_model_revision,
                      ngpu=args.ngpu,
                      ncpu=args.ncpu,
                      device=args.device,
                      disable_pbar=True,
                      disable_log=True,
                      # chunk_size=60,
					  disable_update=True
                      )

if args.punc_model != "":
	model_punc = AutoModel(model=args.punc_model,
	                       model_revision=args.punc_model_revision,
	                       ngpu=args.ngpu,
	                       ncpu=args.ncpu,
	                       device=args.device,
	                       disable_pbar=True,
                           disable_log=True,
						   disable_update=True
	                       )
else:
	model_punc = None

model_lang = pipeline(
					task='speech-language-recognition',
					model=args.lang_model,
					model_revision=args.lang_model_revision,
					disable_update=True
				)

transcriber = WhisperModel(
            model_size_or_path=args.whisper_model,
            device="auto",
            compute_type="default",
            local_files_only=False,
        )

print("model loaded! only support one client at the same time now!!!!")

async def ws_reset(websocket):
	print("ws reset now, total num is ",len(websocket_users))

	websocket.status_dict_asr_online["cache"] = {}
	websocket.status_dict_asr_online["is_final"] = True
	websocket.status_dict_vad["cache"] = {}
	websocket.status_dict_vad["is_final"] = True
	websocket.status_dict_punc["cache"] = {}
	
	await websocket.close()


async def clear_websocket():
	for websocket in websocket_users.copy():
		if(websocket.status_dict_asr_online["is_final"]):
			await ws_reset(websocket)
			websocket_users.remove(websocket)



async def ws_serve(request):
	websocket = web.WebSocketResponse()
	await websocket.prepare(request)

	frames = []
	frames_asr = []
	frames_asr_online = []
	frames_lang = []
	global websocket_users
	await clear_websocket()
	websocket_users.add(websocket)
	websocket.status_dict_asr = {}
	websocket.status_dict_asr_online = {"cache": {}, "is_final": False}
	websocket.status_dict_vad = {'cache': {}, "is_final": False}
	websocket.status_dict_punc = {'cache': {}}
	websocket.chunk_interval = 10
	websocket.vad_pre_idx = 0
	speech_start = False
	speech_end_i = -1
	websocket.wav_name = "microphone"
	websocket.mode = "2pass"
	websocket.lang = "zh"
	print(f"new user connected, nums={len(websocket_users)}", flush=True)
	
	try:
		async for msg in websocket:
			message = msg.data
			if isinstance(message, str):
				messagejson = json.loads(message)
				
				if "is_speaking" in messagejson:
					websocket.is_speaking = messagejson["is_speaking"]
					websocket.status_dict_asr_online["is_final"] = not websocket.is_speaking
				if "chunk_interval" in messagejson:
					websocket.chunk_interval = messagejson["chunk_interval"]
				if "wav_name" in messagejson:
					websocket.wav_name = messagejson.get("wav_name")
				if "chunk_size" in messagejson:
					websocket.status_dict_asr_online["chunk_size"] = messagejson["chunk_size"]
				if "encoder_chunk_look_back" in messagejson:
					websocket.status_dict_asr_online["encoder_chunk_look_back"] = messagejson["encoder_chunk_look_back"]
				if "decoder_chunk_look_back" in messagejson:
					websocket.status_dict_asr_online["decoder_chunk_look_back"] = messagejson["decoder_chunk_look_back"]
				if "hotword" in messagejson:
					websocket.status_dict_asr["hotword"] = messagejson["hotword"]
				if "mode" in messagejson:
					websocket.mode = messagejson["mode"]
				if "lang" in messagejson:
					websocket.lang = messagejson["lang"]
			#Note:
			# chunk_size is the configuration for streaming latency.
			# [0,10,5] indicates that the real-time display granularity is 10*60=600ms, and the lookahead information is 5*60=300ms.
			# Each inference input is 600ms (sample points are 16000*0.6=960), and the output is the corresponding text.
			# For the last speech segment input, is_final=True needs to be set to force the output of the last word.
			websocket.status_dict_vad["chunk_size"] = int(websocket.status_dict_asr_online["chunk_size"][1]*60/websocket.chunk_interval)
			if len(frames_asr_online) > 0 or len(frames_asr) > 0 or not isinstance(message, str):
				if not isinstance(message, str):
					frames.append(message)
					duration_ms = len(message)//32
					websocket.vad_pre_idx += duration_ms
					
					# asr online 做翻译
					frames_asr_online.append(message)
					websocket.status_dict_asr_online["is_final"] = speech_end_i != -1
					if len(frames_asr_online) % websocket.chunk_interval == 0 or websocket.status_dict_asr_online["is_final"]:
						if websocket.mode == "2pass" or websocket.mode == "online":
							audio_in = b"".join(frames_asr_online)
							try:
								await async_asr_online(websocket, audio_in)
							except Exception as e:
								print("error in async_asr_online, Exception:", e)
								# print(f"error in asr streaming, {websocket.status_dict_asr_online}")
						frames_asr_online = []
					if speech_start:
						frames_asr.append(message)
					# vad online 有效语音的起止时间点数组
					try:
						speech_start_i, speech_end_i = await async_vad(websocket, message)
					except:
						print("error in vad")
					if speech_start_i != -1:
						speech_start = True
						beg_bias = (websocket.vad_pre_idx-speech_start_i)//duration_ms
						frames_pre = frames[-beg_bias:]
						frames_asr = []
						frames_asr.extend(frames_pre)
						# frames_lang.extend(frames_pre)
				# asr punc offline 离线做标点和翻译
				if speech_end_i != -1 or not websocket.is_speaking:
					# print("vad end point")
					if websocket.mode == "2pass" or websocket.mode == "offline":
						audio_in = b"".join(frames_asr)
						try:
							await async_asr(websocket, audio_in)
						except Exception as e:
							print("error in asr offline, Exception:", e)
					frames_asr = []
					speech_start = False
					frames_asr_online = []
					websocket.status_dict_asr_online["cache"] = {}
					if not websocket.is_speaking:
						websocket.vad_pre_idx = 0
						frames = []
						websocket.status_dict_vad["cache"] = {}
					else:
						frames = frames[-20:]
	
	
	except websockets.ConnectionClosed:
		print("ConnectionClosed...", websocket_users,flush=True)
		await ws_reset(websocket)
		websocket_users.remove(websocket)
	except websockets.InvalidState:
		print("InvalidState...")
	except Exception as e:
		print("Exception:", e)


async def async_vad(websocket, audio_in):
	
	segments_result = model_vad.generate(input=audio_in, **websocket.status_dict_vad)[0]["value"]
	# print("segments_result", segments_result)
	
	speech_start = -1
	speech_end = -1
	
	if len(segments_result) == 0 or len(segments_result) > 1:
		return speech_start, speech_end
	if segments_result[0][0] != -1:
		speech_start = segments_result[0][0]
	if segments_result[0][1] != -1:
		speech_end = segments_result[0][1]
	return speech_start, speech_end

def bytes_to_float_array(audio_bytes):
	"""
    Convert audio data from bytes to a NumPy float array.

    It assumes that the audio data is in 16-bit PCM format. The audio data is normalized to
    have values between -1 and 1.

    Args:
        audio_bytes (bytes): Audio data in bytes.

    Returns:
        np.ndarray: A NumPy array containing the audio data as float values normalized between -1 and 1.
    """
	raw_data = np.frombuffer(buffer=audio_bytes, dtype=np.int16)
	return raw_data.astype(np.float32) / 32768.0

def segment_to_dict(segment):
	return {
        "id": segment.id,
        "start": segment.start,
        "end": segment.end,
        "text": segment.text
    }

def process_segments(segments):
	text = ''
	segs = []
	if segments is not None and len(segments) > 0:
		for i, seg in enumerate(segments):
			if seg.no_speech_prob > 0.45:
				continue
			text += seg.text
			segs.append(segment_to_dict(seg))
	return {'text': text, 'segments': segs}

async def async_asr(websocket, audio_in):
	# print(len(audio_in))
	if len(audio_in) > 0:
		# lang_rec_result = model_lang([bytes_to_float_array(audio_in)], **websocket.status_dict_asr)
		# print(lang_rec_result)
		if websocket.lang is None or websocket.lang in ['', "zh"]:
			rec_result = model_asr.generate(input=audio_in, **websocket.status_dict_asr)[0]
			if model_punc is not None and len(rec_result["text"]) > 0:
				# print("offline, before punc", rec_result, "cache", websocket.status_dict_punc)
				rec_result = model_punc.generate(input=rec_result['text'], **websocket.status_dict_punc)[0]
				# print("offline, after punc", rec_result)
		else:
			segments, info = transcriber.transcribe(
				bytes_to_float_array(audio_in),
				initial_prompt=None,
				language=websocket.lang,
				task="transcribe",
				vad_filter=True,
				vad_parameters={"threshold": 0.5})

			# print(f"segments={segments}, info={info}")
			rec_result = process_segments(segments)

		print("offline_asr, ", rec_result)
		if len(rec_result["text"])>0:
			# print("offline", rec_result)
			mode = "2pass-offline" if "2pass" in websocket.mode else websocket.mode
			message = json.dumps({"mode": mode, "text": rich_transcription_postprocess(rec_result["text"]), "wav_name": websocket.wav_name,"is_final":websocket.is_speaking})
			await websocket.send_str(message)


async def async_asr_online(websocket, audio_in):
	# print(len(audio_in))
	if len(audio_in) > 0:
		# print(websocket.status_dict_asr_online.get("is_final", False))
		if websocket.lang is None or websocket.lang in ['', "zh"]:
			rec_result = model_asr_streaming.generate(input=audio_in, **websocket.status_dict_asr_online)[0]
		else:
			audioBytes = bytes_to_float_array(audio_in)
			print(f'audio shape={len(audio_in)}, audioBytes shape={audioBytes.shape}')
			# if audioBytes.shape[0] / 16000 < 1.0:
			# 	time.sleep(0.1)  # wait for audio chunks to arrive
			# 	return

			segments, info = transcriber.transcribe(
				audioBytes,
				initial_prompt=None,
				language=websocket.lang,
				task="transcribe",
				vad_filter=False,
				vad_parameters=None)
			# print(f"segments={segments}, info={info}")
			rec_result = process_segments(segments)

		print("online, ", rec_result)
		if websocket.mode == "2pass" and websocket.status_dict_asr_online.get("is_final", False):
			return
			#     websocket.status_dict_asr_online["cache"] = dict()
		if len(rec_result["text"]):
			mode = "2pass-online" if "2pass" in websocket.mode else websocket.mode
			message = json.dumps({"mode": mode, "text": rec_result["text"], "wav_name": websocket.wav_name,"is_final":websocket.is_speaking})
			await websocket.send_str(message)

def offline_whisper(lang, audio_file):
	assert lang is not None
	assert audio_file is not None
	audio_bytes, _ = (
		ffmpeg.input(audio_file, threads=0)
		.output("-", format="s16le", acodec="pcm_s16le", ac=1, ar=16000)
		.run(cmd=["ffmpeg", "-nostdin"], capture_stdout=True, capture_stderr=True)
	)
	segments, info = transcriber.transcribe(
		bytes_to_float_array(audio_bytes),
		initial_prompt=None,
		language=lang,
		task="transcribe",
		word_timestamps=True,
		vad_filter=True,
		vad_parameters={"threshold": 0.5})
	# print(f"segments={segments}, info={info}")
	rec_result = process_segments(segments)
	if lang in ['', "zh"] and model_punc is not None and len(rec_result["text"]) > 0:
		rec_result['text'] = model_punc.generate(input=rec_result['text'])[0]['text']
	return rec_result

async def api_recognition(request):
	reader = await request.multipart()
	text_params = {}
	audio_file = None
	while True:
		part = await reader.next()
		if not part:
			break

		# 如果是文本类型
		if part.filename is None:
			text_params[part.name] = await part.text()  # 读取文本内容

		if part.name == 'audio':
			filename = part.filename
			suffix = filename.split('.')[-1]
			size = 0
			audio_path = f'{args.temp_dir}/{datetime.now().strftime("%Y-%m-%d")}'
			os.makedirs(audio_path, exist_ok=True)
			audio_file = f'{audio_path}/{str(uuid.uuid1())}.{suffix}'
			with open(audio_file, 'wb') as f:
				while True:
					chunk = await part.read_chunk()  # 8192 bytes by default.
					if not chunk:
						break
					size += len(chunk)
					f.write(chunk)
	lang = text_params['lang']
	rec_result = offline_whisper(lang, audio_file)
	ret = {"results": rec_result, "code": 0}
	return web.json_response(ret)

# 用于跟踪文件的分片上传状态
file_parts = {}
async def api_recognition_chunk(request):
	# 使用 multipart/form-data 解析请求内容
	post_data = await request.post()
	file_name = post_data["File-Name"]
	total_file_size = int(post_data["File-Size"])
	part_number = int(post_data["Part-Number"])
	chunk_size = int(post_data["Chunk-Size"])

	# 读取额外的表单字段
	lang = post_data.get("lang")
	# 获取文件内容
	chunk_data = post_data["file"].file.read()  # 使用 .file.read() 获取文件数据
	part_path = os.path.join(f'{args.temp_dir}', f"{file_name}.part{part_number}")
	with open(part_path, "wb") as f:
		f.write(chunk_data)

	# 初始化或更新分片状态记录
	if file_name not in file_parts:
		total_parts = (total_file_size + chunk_size - 1) // chunk_size  # 计算分片总数
		file_parts[file_name] = {"received_parts": set(), "total_parts": total_parts}

	file_parts[file_name]["received_parts"].add(part_number)

	# 检查是否已接收所有分片
	if len(file_parts[file_name]["received_parts"]) == file_parts[file_name]["total_parts"]:
		file_path = await merge_file(file_name, file_parts[file_name]["total_parts"])
		del file_parts[file_name]  # 清理记录

		rec_result = offline_whisper(lang, file_path)
		ret = {"results": rec_result, "code": 0}
		return web.json_response(ret)

	# 返回上传的分片接收结果
	return web.json_response({
		"status": "part_received",
		"message": f"Part {part_number} received",
		"file_name": file_name,
		"part_number": part_number
	})


async def merge_file(file_name, total_parts):
	"""合并所有分片文件"""
	file_path = os.path.join(f'{args.temp_dir}', file_name)
	with open(file_path, "wb") as output_file:
		for part_number in range(1, total_parts + 1):
			part_path = f"{file_path}.part{part_number}"
			with open(part_path, "rb") as part_file:
				output_file.write(part_file.read())
			os.remove(part_path)  # 合并后删除分片文件
	print(f"{file_path} merged successfully")
	return file_path

app = web.Application(client_max_size=20 * 1024 * 1024)
app.add_routes([web.get('', ws_serve),
                web.post('/recognition', api_recognition),
				web.post('/recognition_chunk', api_recognition_chunk)])

if len(args.certfile)>0:
	ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
	
	# Generate with Lets Encrypt, copied to this location, chown to current user and 400 permissions
	ssl_cert = args.certfile
	ssl_key = args.keyfile
	
	ssl_context.load_cert_chain(ssl_cert, keyfile=ssl_key)
	web.run_app(app, host=args.host, port=args.port, ssl_context=ssl_context)
else:
	web.run_app(app, host=args.host, port=args.port)

