import grpc
from concurrent import futures
# import torch
import numpy as np
from silero_vad.model import load_silero_vad
from silero_vad.utils_vad import VADIterator
import vad_pb2
import vad_pb2_grpc
import logging
from threading import Lock

# 配置日志
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger = logging.getLogger("VADServer")


class VADProcessor:
    def __init__(self):
        self.model = load_silero_vad(onnx=False)
        self.session_states = {}  # 存储每个会话的状态
        self.lock = Lock()  # 线程锁，确保多线程安全

    def get_vad_iterator(self, session_id):
        """
        获取或创建会话的 VADIterator。
        """
        with self.lock:
            if session_id not in self.session_states:
                self.session_states[session_id] = {
                    "vad_iterator": VADIterator(
                        model=self.model,
                        threshold=0.6,
                        sampling_rate=16000,
                        min_silence_duration_ms=600, # 静音检测的阈值
                        speech_pad_ms=60 #  语音片段的填充时间
                    ),
                    # "current_start": None,  # 当前语音片段的开始时间
                    # "time_offset": 0.0,  # 时间偏移量
                }
            return self.session_states[session_id]

    def cleanup_session(self, session_id):
        """
        清理会话状态。
        """
        with self.lock:
            if session_id in self.session_states:
                del self.session_states[session_id]


class VADService(vad_pb2_grpc.VADServiceServicer):
    def __init__(self):
        self.vad_processor = VADProcessor()

    def StreamVAD(self, request_iterator, context):
        session_id = None
        try:
            for request in request_iterator:
                session_id = request.session_id
                session_state = self.vad_processor.get_vad_iterator(session_id)

                # 将字节数据转换为 NumPy 数组
                audio_data = np.frombuffer(request.audio_data, dtype=np.int16)
                audio_float32 = (audio_data / 32768.0).astype(np.float32)

                # 转换为 PyTorch 张量
                # audio_tensor = torch.from_numpy(audio_float32).unsqueeze(0)

                # 使用 VADIterator 检测语音
                result = session_state["vad_iterator"](audio_float32, return_seconds=True)

                # logger.info(f"Session {session_id} - Result: {result}")

                if result:
                    # 如果检测到语音片段的开始或结束
                    if 'start' in result:
                        yield vad_pb2.SpeechTimestamps(timestamps=[vad_pb2.Timestamp(start=result['start'], end=0.0)])
                        logger.info(f"Session {session_id} - Result: {vad_pb2.SpeechTimestamps(timestamps=[vad_pb2.Timestamp(start=result['start'], end=0.0)])}")
                    elif 'end' in result:
                        logger.info(f"Session {session_id} - Result: {vad_pb2.SpeechTimestamps(timestamps=[vad_pb2.Timestamp(start=0.0, end=result['end'])])}")
                        yield vad_pb2.SpeechTimestamps(timestamps=[vad_pb2.Timestamp(start=0.0, end=result['end'])])
 

        except Exception as e:
            logger.error(f"Error processing stream for session {session_id}: {e}")
            context.set_code(grpc.StatusCode.INTERNAL)
            context.set_details("Error processing stream")
        finally:
            if session_id:
                # 清理会话状态
                self.vad_processor.cleanup_session(session_id)


def serve():
    server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
    vad_pb2_grpc.add_VADServiceServicer_to_server(VADService(), server)
    server.add_insecure_port("[::]:50052")
    logger.info("gRPC 服务器已启动，监听端口: 50052")
    server.start()
    server.wait_for_termination()


if __name__ == "__main__":
    serve()

               # else:
                    # logger.info(f"No speech detected in session: {request.session_id}")
                    # yield vad_pb2.SpeechTimestamps(timestamps=[])
                # if result:
                    # # 处理返回的 start 和 end
                    # if 'start' in result:
                    #     session_state["current_start"] = result['start'] + session_state["time_offset"]
                    # elif 'end' in result and session_state["current_start"] is not None:
                    #     # 如果有 end 且之前有 start，则组装成一个完整的语音片段
                    #     start_time = session_state["current_start"]
                    #     end_time = result['end'] + session_state["time_offset"]
                    #     yield vad_pb2.SpeechTimestamps(timestamps=[
                    #         vad_pb2.Timestamp(start=start_time, end=end_time)
                    #     ])
                    #     session_state["current_start"] = None  # 重置 current_start
                    # elif 'end' in result and session_state["current_start"] is None:
                    #     # 如果只有 end，假设 start 为 0
                    #     start_time = session_state["time_offset"]
                    #     end_time = result['end'] + session_state["time_offset"]
                    #     yield vad_pb2.SpeechTimestamps(timestamps=[
                    #         vad_pb2.Timestamp(start=start_time, end=end_time)
                    #     ])

                # # 更新时间偏移量
                # session_state["time_offset"] += len(audio_data) / 16000  # 假设采样率为 16000 Hz

# import grpc
# from concurrent import futures
# import torch
# import numpy as np
# from silero_vad.model import load_silero_vad
# from silero_vad.utils_vad import (get_speech_timestamps, VADIterator,read_pcm)  # 导入 get_speech_timestamps
# import vad_pb2
# import vad_pb2_grpc

# # 配置日志
# import logging
# logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# logger = logging.getLogger("VADServer")


# class VADService(vad_pb2_grpc.VADServiceServicer):
#     def __init__(self):
#         self.model = load_silero_vad(onnx=False)
#         self.vad_iterator = VADIterator(model=self.model,
#                                          threshold=0.5,
#                                          sampling_rate=16000,
#                                          min_silence_duration_ms=60,
#                                          speech_pad_ms=30)
#         self.session_offsets = {}  # 为每个会话维护一个时间偏移量

#     def StreamVAD(self, request_iterator, context):
#         try:
#             current_start = None  # 用于存储当前语音片段的开始时间
#             session_id  = None  #当前会话ID

#             for request in request_iterator:
#                 # logger.info(f"Received audio chunk size: {len(request.audio_data)} for session: {request.session_id}")
#                 # 将字节数据转换为 NumPy 数组
#                 session_id = request.session_id

#                 audio_data=np.frombuffer(request.audio_data, dtype=np.int16)
#                 audio_float32 = (audio_data / 32768.0).astype(np.float32)

#                 # 转换为 PyTorch 张量
#                 audio_tensor = torch.from_numpy(audio_float32).unsqueeze(0)

#                 # 使用 VADIterator 检测语音
#                 result = self.vad_iterator(audio_tensor, return_seconds=True)

#                 logger.info(f"result: {result}")
                
#                 if result:
#                     # 处理返回的 start 和 end
#                     if 'start' in result:
#                         current_start = result['start']  # 保存当前语音片段的开始时间
#                         if session_id not in self.session_offsets:
#                             self.session_offsets[session_id] = current_start
#                     elif 'end' in result and current_start is not None:
#                         # 如果有 end 且之前有 start，则组装成一个完整的语音片段
#                         start_time = self.session_offsets[session_id]
#                         end_time = result['end'] - self.session_offsets[session_id]
#                         yield vad_pb2.SpeechTimestamps(timestamps=[
#                             vad_pb2.Timestamp(start=start_time, end=end_time)
#                         ])
#                         current_start = None  # 重置 current_start
#                     elif 'end' in result and current_start is None:
#                         # 如果只有 end，假设 start 为 0
#                         start_time = 0.0
#                         end_time = result['end'] - self.session_offsets.get(session_id, 0)
#                         yield vad_pb2.SpeechTimestamps(timestamps=[
#                             vad_pb2.Timestamp(start=start_time, end=end_time)
#                         ])
#                 # else:
#                     # logger.info(f"No speech detected in session: {request.session_id}")
#                     # yield vad_pb2.SpeechTimestamps(timestamps=[])
#         except Exception as e:
#             logger.error(f"Error processing stream: {e}")
#             context.set_code(grpc.StatusCode.INTERNAL)
#             context.set_details("Error processing stream")        


# def serve():
#     server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
#     vad_pb2_grpc.add_VADServiceServicer_to_server(VADService(), server)
#     server.add_insecure_port("[::]:50052")
#     logger.info("gRPC 服务器已启动，监听端口: 50052")
#     server.start()
#     server.wait_for_termination()


# if __name__ == "__main__":
#     serve()
    
    
    
                    # return vad_pb2.SpeechTimestamps(timestamps=result)
        #         if result:
        #                 timestamps = [vad_pb2.Timestamp(start=float(ts['start']), end=float(ts['end'])) for ts in result]
        #                 yield vad_pb2.SpeechTimestamps(timestamps=timestamps)
        #         else:
        #                 logger.info(f"No speech detected in session: {request.session_id}")
        #                 yield vad_pb2.SpeechTimestamps(timestamps=[])
        # except Exception as e:
        #     logger.error(f"Error processing stream: {e}")
        #     context.set_code(grpc.StatusCode.INTERNAL)
        #     context.set_details("Error processing stream")

# import grpc
# from concurrent import futures
# import torch
# import numpy as np
# from silero_vad.model import load_silero_vad
# from silero_vad.utils_vad import (
#                           VADIterator, 
#                           read_pcm,
#                           get_speech_timestamps)
# import vad_pb2
# import vad_pb2_grpc

# # 配置日志
# import logging
# logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# logger = logging.getLogger("VADServer")


# class VADService(vad_pb2_grpc.VADServiceServicer):
#     def __init__(self):
#         self.model = load_silero_vad(onnx=False)
#         self.vad_iterator=VADIterator(model=self.model,
#                            threshold=0.5,
#                            sampling_rate=16000,
#                            min_silence_duration_ms=100,
#                            speech_pad_ms=30)

#     def StreamVAD(self, request_iterator, context):
#         try:
#             for request in request_iterator:
#                 # 将字节数据转换为 NumPy 数组
#                 audio_data = np.frombuffer(request.audio_data, dtype=np.int16)
#                 audio_float32 = (audio_data / 32768.0).astype(np.float32)

#                 # 转换为 PyTorch 张量
#                 audio_tensor = torch.from_numpy(audio_float32).unsqueeze(0)

#                 # 使用 VADIterator 检测语音
#                 result = self.vad_iterator(audio_tensor, return_seconds=True)
#                 if result:
#                     # 如果检测到语音片段的开始或结束
#                     if 'start' in result:
#                         yield vad_pb2.SpeechTimestamps(timestamps=[vad_pb2.Timestamp(start=result['start'], end=0.0)])
#                     elif 'end' in result:
#                         yield vad_pb2.SpeechTimestamps(timestamps=[vad_pb2.Timestamp(start=0.0, end=result['end'])])
#                 else:
#                     logger.info(f"No speech detected in session: {request.session_id}")
#                     yield vad_pb2.SpeechTimestamps(timestamps=[])
#         except Exception as e:
#             logger.error(f"Error processing stream: {e}")
#             context.set_code(grpc.StatusCode.INTERNAL)
#             context.set_details(str(e))

#     # def StreamVAD(self, request_iterator, context):
#     #     try:
#     #         for request in request_iterator:
#     #             # 将字节数据转换为 NumPy 数组
#     #             audio_data = np.frombuffer(request.audio_data, dtype=np.int16).astype(np.float32)
#     #             sample_rate = request.sample_rate

#     #             # 获取语音时间戳
#     #             speech_timestamps = get_speech_timestamps(audio_data, self.model, sampling_rate=sample_rate)
#     #             if speech_timestamps:
#     #                 timestamps = [vad_pb2.Timestamp(start=ts['start'], end=ts['end']) for ts in speech_timestamps]
#     #                 yield vad_pb2.SpeechTimestamps(timestamps=timestamps)
#     #             else:
#     #                 logger.info(f"No speech detected in session: {request.session_id}")
#     #                 yield vad_pb2.SpeechTimestamps(timestamps=[])
#     #     except Exception as e:
#     #         logger.error(f"Error processing stream: {e}")
#     #         context.set_code(grpc.StatusCode.INTERNAL)
#     #         context.set_details(str(e))


# def serve():
#     server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
#     vad_pb2_grpc.add_VADServiceServicer_to_server(VADService(), server)
#     server.add_insecure_port("[::]:50052")
#     logger.info("gRPC 服务器已启动，监听端口: 50052")
#     server.start()
#     server.wait_for_termination()


# if __name__ == "__main__":
#     serve()
    
    
    # def Int2Float(self, sound):
    #     _sound = np.copy(sound)  #
    #     abs_max = np.abs(_sound).max()
    #     _sound = _sound.astype('float32')
    #     if abs_max > 0:
    #         _sound *= 1/abs_max
    #     audio_float32 = torch.from_numpy(_sound.squeeze())
    #     return audio_float32

# import grpc
# from concurrent import futures
# import torch
# import multiprocessing
# from concurrent.futures import ProcessPoolExecutor
# import vad_pb2
# import vad_pb2_grpc
# from collections import defaultdict
# import logging
# import numpy as np
# from grpc_reflection.v1alpha import reflection

# from silero_vad.model import load_silero_vad
# from silero_vad.utils_vad import (
#                           read_pcm,
#                           get_speech_timestamps)

# # 配置日志
# logging.basicConfig(
#     level=logging.INFO,
#     format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
# )
# logger = logging.getLogger("VADServer")


# class VADProcessor:
#     def __init__(self):
#         self.executor = ProcessPoolExecutor(max_workers=multiprocessing.cpu_count(),
#                                            initializer=self._init_model)
#         self.vad_models = defaultdict(dict)  # 按 session_id 存储模型

#     def _init_model(self):
#         pid = multiprocessing.current_process().pid
#         model = load_silero_vad(onnx=False, opset_version=16)
#         self.vad_models[pid] = model

#     def vad_process(self, pcm_data, sample_rate, session_id):
#         pid = multiprocessing.current_process().pid
#         with torch.no_grad():
#             # 将音频数据转换为 float32
#             # wav = torch.frombuffer(audio_data, dtype=torch.float32)
#             # tensors= read_pcm(audio_data, sampling_rate=16000)
#             # 将字节数据转换为 numpy 数组
#             pcm_data = np.frombuffer(pcm_data, dtype=np.int16)

#             # 如果是多声道，转换为单声道
#             # if num_channels > 1:
#             #     audio_data = audio_data.reshape(-1, num_channels).mean(axis=1)

#             # audio_data.setflags(write=True)  # 设置为可写
#             pcm_data = np.frombuffer(pcm_data, dtype=np.int16).copy()  # 复制数组

#             # 将 numpy 数组转换为 PyTorch 张量
#             tensors= torch.from_numpy(pcm_data).float()
#             speech_timestamps = get_speech_timestamps(
#                 tensors,
#                 self.vad_models[pid],
#                 threshold=0.46,  # 语音概率阈值
#                 sampling_rate=sample_rate,
#                 min_speech_duration_ms=300,  # 最小语音持续时间
#                 max_speech_duration_s=20,  # 最大语音持续时间
#                 min_silence_duration_ms=600,  # 最小静音持续时间
#                 window_size_samples=512,  # 窗口大小
#                 speech_pad_ms=200,  # 语音填充时间
#             )
#             return speech_timestamps


# class VADService(vad_pb2_grpc.VADServiceServicer):
#     def __init__(self):
#         self.vad_processor = VADProcessor()
#         self.sessions = defaultdict(list)  # 按 session_id 存储音频数据

# def StreamVAD(self, request_iterator, context):
#     for _ in request_iterator:
#         yield vad_pb2.SpeechTimestamps(timestamps=[vad_pb2.Timestamp(start=0, end=100)])
# # def StreamVAD(self, request_iterator, context):
# #     try:
# #         for audio_chunk in request_iterator:
# #             # 处理音频数据
# #             # result = self.vad_processor.vad_process(
# #             #     audio_chunk.audio_data,
# #             #     audio_chunk.sample_rate,
# #             #     audio_chunk.session_id
# #             # )
# #             future = self.vad_processor.executor.submit(
# #                 self.vad_processor.vad_process,
# #                 audio_chunk.audio_data,
# #                 audio_chunk.sample_rate,
# #                 audio_chunk.session_id
# #             )
# #             result = future.result()

# #             # 返回结果
# #             if result:
# #                 timestamps = [vad_pb2.Timestamp(start=ts['start'], end=ts['end']) for ts in result]
# #                 yield vad_pb2.SpeechTimestamps(timestamps=timestamps)
# #             else:
# #                 logger.info(f"No speech detected in session: {audio_chunk.session_id}")
# #                 yield vad_pb2.SpeechTimestamps(timestamps=[])  # 返回空的时间戳列表
# #     except grpc.RpcError as e:
# #         logger.error(f"RPC error: {e}")
# #         context.set_code(grpc.StatusCode.INTERNAL)
# #         context.set_details("Internal error")
# #     except Exception as e:
# #         logger.error(f"Error processing stream: {e}")
# #         context.set_code(grpc.StatusCode.INTERNAL)
# #         context.set_details("Internal error")
# #         raise
# # 启动 gRPC 服务器
# def serve():
#     try:
#         # 创建 gRPC 服务器
#         server = grpc.server(
#             futures.ThreadPoolExecutor(max_workers=10),
#             options=[
#                 ("grpc.max_send_message_length", 1024 * 1024 * 100),  # 100MB
#                 ("grpc.max_receive_message_length", 1024 * 1024 * 100),  # 100MB
#             ],
#         )

#         # 注册服务
#         vad_pb2_grpc.add_VADServiceServicer_to_server(VADService(), server)
       
#         # 注册反射服务
#         SERVICE_NAMES = (
#             vad_pb2.DESCRIPTOR.services_by_name['VADService'].full_name,
#             reflection.SERVICE_NAME,
#         )
#         reflection.enable_server_reflection(SERVICE_NAMES, server)

#         # 绑定端口
#         port = 50052
#         server.add_insecure_port(f"[::]:{port}")

#         # 启动服务器
#         server.start()
#         logger.info(f"gRPC 服务器已启动，监听端口: {port}")

#         # 等待服务器终止
#         server.wait_for_termination()
#     except Exception as e:
#         logger.error(f"服务器启动失败: {e}")
#         raise
#     finally:
#         logger.info("gRPC 服务器已关闭")


# if __name__ == '__main__':
#     serve()




    # def StreamVAD(self, request_iterator, context):
    #     for audio_chunk in request_iterator:
    #         session_id = audio_chunk.session_id
    #         audio_data = audio_chunk.audio_data
    #         sample_rate = audio_chunk.sample_rate

    #         # 提交任务到进程池
    #         future = self.vad_processor.executor.submit(
    #             self.vad_processor.vad_process,
    #             audio_data,
    #             sample_rate,
    #             session_id
    #         )
    #         result = future.result()

    #         # 返回结果
    #         if result:  # 如果有语音时间戳
    #             timestamps = [vad_pb2.Timestamp(start=ts['start'], end=ts['end']) for ts in result]
    #             yield vad_pb2.SpeechTimestamps(timestamps=timestamps)
    #         else:
    #             logger.info(f"No speech detected in session: {session_id}")

# import grpc
# from concurrent import futures
# import torch
# import multiprocessing
# from concurrent.futures import ProcessPoolExecutor
# import vad_pb2
# import vad_pb2_grpc
# from collections import defaultdict
# import logging

# from silero_vad.model import load_silero_vad
# # 配置日志
# logging.basicConfig(
#     level=logging.INFO,
#     format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
# )
# logger = logging.getLogger("VADServer")


# class VADProcessor:
#     def __init__(self):
#         self.executor = ProcessPoolExecutor(max_workers=multiprocessing.cpu_count(),
#                                            initializer=self._init_model)
#         self.vad_models = defaultdict(dict)  # 按 session_id 存储模型

#     def _init_model(self):
#         pid = multiprocessing.current_process().pid
#         # model, _ = torch.hub.load(repo_or_dir='snakers4/silero-vad',
#         #                           model='silero_vad',
#         #                           force_reload=False,
#         #                           onnx=False)
#         model= load_silero_vad(onnx=False, opset_version=16)
#         self.vad_models[pid] = model

#     def vad_process(self, audio_data, sample_rate, session_id):
#         pid = multiprocessing.current_process().pid
#         with torch.no_grad():
#             pcm = torch.frombuffer(audio_data, dtype=torch.float32)
#             speech_timestamps = get_speech_timestamps(
#                 pcm,
#                 self.vad_models[pid],
#                 threshold=0.46,  # 语音概率阈值
#                 sampling_rate=sample_rate,
#                 min_speech_duration_ms=300,  # 最小语音持续时间
#                 max_speech_duration_s=20,  # 最大语音持续时间
#                 min_silence_duration_ms=600,  # 最小静音持续时间
#                 window_size_samples=512,  # 窗口大小
#                 speech_pad_ms=200,  # 语音填充时间
#             )
#             return speech_timestamps
        


# class VADService(vad_pb2_grpc.VADServiceServicer):
#     def __init__(self):
#         self.vad_processor = VADProcessor()
#         self.sessions = defaultdict(list)  # 按 session_id 存储音频数据

# def StreamVAD(self, request_iterator, context):
#     for audio_chunk in request_iterator:
#         session_id = audio_chunk.session_id
#         audio_data = audio_chunk.audio_data
#         sample_rate = audio_chunk.sample_rate

#         # 提交任务到进程池
#         future = self.vad_processor.executor.submit(
#             self.vad_processor.vad_process,
#             audio_data,
#             sample_rate,
#             session_id
#         )
#         result = future.result()

#         # 返回结果
#         if result["result"] == "speech":
#             timestamps = [vad_pb2.Timestamp(start=ts['start'], end=ts['end']) for ts in result["timestamps"]]
#             yield vad_pb2.SpeechTimestamps(timestamps=timestamps)
#         # elif result["result"] == "silence":
#         #     yield vad_pb2.SpeechTimestamps(end_marker="[END]")



# # 启动 gRPC 服务器
# def serve():
#     try:
#         # 创建 gRPC 服务器
#         server = grpc.server(
#             futures.ThreadPoolExecutor(max_workers=10),
#             options=[
#                 ("grpc.max_send_message_length", 1024 * 1024 * 100),  # 100MB
#                 ("grpc.max_receive_message_length", 1024 * 1024 * 100),  # 100MB
#             ],
#         )

#         # 注册服务
#         vad_pb2_grpc.add_VADServiceServicer_to_server(VADService(), server)

#         # 绑定端口
#         port = 50052
#         server.add_insecure_port(f"[::]:{port}")

#         # 启动服务器
#         server.start()
#         logger.info(f"gRPC 服务器已启动，监听端口: {port}")

#         # 等待服务器终止
#         server.wait_for_termination()
#     except Exception as e:
#         logger.error(f"服务器启动失败: {e}")
#         raise
#     finally:
#         logger.info("gRPC 服务器已关闭")

# if __name__ == '__main__':
#     serve()