import socket,time,pyttsx4,threading,queue ,sys 
import tkinter as tk
from tkinter import font

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torch.nn.functional as F

class GestureRecognitionNet(nn.Module):
    def __init__(self, num_features, num_classes):
        super(GestureRecognitionNet, self).__init__()
        # 第一层全连接层
        self.fc1 = nn.Linear(num_features, 128)
        # 第一个Dropout层，丢弃率为20%
        self.dropout1 = nn.Dropout(0.2)
        # 第二层全连接层
        self.fc2 = nn.Linear(128, 256)
        # 第二个Dropout层，丢弃率为30%
        self.dropout2 = nn.Dropout(0.3)
        # 第三层全连接层
        self.fc3 = nn.Linear(256, 128)
        # 第四层全连接层
        self.fc4 = nn.Linear(128, 64)
        # 第五层全连接层
        self.fc5 = nn.Linear(64, 32)
        # 第六层全连接层
        self.fc6 = nn.Linear(32, 16)
        # 第七层全连接层
        self.fc7 = nn.Linear(16, 8)
        # 最后一层全连接层，输出层
        self.fc8 = nn.Linear(8, num_classes)
    def forward(self, x):
        # 通过第一层全连接层并应用ReLU激活函数
        x = F.relu(self.fc1(x))
        # 应用第一个Dropout层
        #x = self.dropout1(x)
        # 通过第二层全连接层并应用ReLU激活函数
        x = F.relu(self.fc2(x))
        # 应用第二个Dropout层
        #x = self.dropout2(x)
        # 通过第三层全连接层并应用ReLU激活函数
        x = F.relu(self.fc3(x))
        # 通过第四层全连接层
        x = self.fc4(x)
        # 通过第五层全连接层
        x = F.relu(self.fc5(x))
        # 通过第六层全连接层
        x = F.relu(self.fc6(x))
        # 通过第七层全连接层
        x = F.relu(self.fc7(x))
        # 通过最后的全连接层，输出层通常不使用激活函数，尤其是在使用CrossEntropyLoss时
        x = self.fc8(x)
        return x
    
    
    
def split_hand_data(hand_data):
    parts=hand_data.split('_')
    figer_data=int(parts[0])
    data_values = []  
    for i in range(1, len(parts), 2):  
        # 符号位  
        sign_bit = int(parts[i])
        if(sign_bit==0):
            sign_bit=-1
          
        # 具体的数据  
        data_value = int(parts[i + 1]) 
        
        data_values.append(data_value*sign_bit)
    return figer_data,data_values[0],data_values[1],data_values[2]
data_queue = queue.Queue()        
       
 
    
# def start_client():  
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)  
client_socket.connect(('192.168.1.107', 8899)) #124与138
engine = pyttsx4.init() 


def receive_data(client_socket):  
    while True:  
        # 接收客户端发送的数据  
        data, client_addr = client_socket.recvfrom(1024) 
        if not data:  
            break
        # 将数据放入队列中  
        data_queue.put(data) 
        # 处理数据，这里只是简单地打印出来
        #print(f"Received data from {client_addr}: {data.decode()}")  


def process_data(engine,compiled_model): 
    
    repeat=4
    rec_for=''
    while True:  
        # 从队列中获取数据 
        if(repeat<6):
            print(repeat)
            repeat=repeat+1
            data = data_queue.get()
            data_queue.task_done()
            continue
        data = data_queue.get().decode() 
        data=data.split('.') 
        if(len(data)<1):
            continue
        print(f"process data : {data[0]}")  
        glove_data=data[0].split(',')
        #print(f"process data : {glove_data}")  
        if(len(glove_data)<2):
          continue
        left_glove=glove_data[0]
        righ_glove=glove_data[1]
        
        dac_l,hx_l,fa_l,fg_l=split_hand_data(left_glove)
        dac_r,hx_r,fa_r,fg_r=split_hand_data(righ_glove)
        print(f"left: {dac_l,hx_l,fa_l,fg_l}  right: {dac_r,hx_r,fa_r,fg_r}") 
        
        new_sample_features = np.array([dac_l,fa_l,fg_l,dac_r,fa_r,fg_r] )    

        new_sample_features = scaler.transform(new_sample_features.reshape(1, -1))  
          
        # 将预处理后的特征转换为 PyTorch 张量  
        new_sample_features = torch.tensor(new_sample_features, dtype=torch.float32)  
          
    
        # 运行模型进行推理
        prediction=torch.tensor(compiled_model(new_sample_features)[0]).squeeze(0).softmax(0)
        # prediction=compiled_model(new_sample_features)[0]
        # probabilities = torch.nn.functional.softmax(prediction, dim=0)
        # _, predicted_index = torch.max(probabilities, 0)  
        # confidence = probabilities[predicted_index]  # 获取最大概率值，即置信度  
          
        # # 判断置信度是否超过阈值（90%）  
        # threshold = 0.9  
        # if confidence.item() > threshold:  
        #     print(f"Predicted class index: {predicted_index.item()}")  
        #     print(f"Confidence: {confidence.item()}")  
        #     # 在这里可以添加你的处理逻辑，例如输出预测结果  
        # else:  
        #     print("No confident prediction, confidence below threshold.")  
        #     # 在这里可以添加你的处理逻辑，例如标记为无预测或进行其他处理
        
        
        class_id = prediction.argmax().item()
        score = prediction[class_id].item()
        print(score)
        if(score>0.9):
            data = label_encoder.inverse_transform([class_id])[0] 
        else:
            data =''  
        if(data):
            
            if(data != rec_for ):
                repeat=0
                rec_for=data
                print(f"识别为：{data}")
                label['text']=data
                root.update()
                engine.say(data)    
                engine.runAndWait()  # 语音合成完成，结束循环
                print('--------------')
        # 标记任务完成  
        data_queue.task_done()  
    #engine=None
    
    
    
model_with_info = torch.load('./model/model_with_info.pth',map_location=torch.device('cpu'),weights_only=False)

# 从加载的状态字典中提取信息
model = GestureRecognitionNet(model_with_info['num_features'], model_with_info['num_classes'])
model.load_state_dict(model_with_info['state_dict'])
label_encoder=model_with_info['label_encoder']
scaler = model_with_info['scale']
model.eval()  # 将模型设置为评估模式
input_names = ["input_1"]
output_names = ["output_1"]

new_sample_features = np.array([2,-2,155,0,5,-119] ) # 示例特征

# 使用 scaler 对特征进行预处理  
# 假设 scaler 已经定义并且包含必要的缩放参数  
new_sample_features = scaler.transform(new_sample_features.reshape(1, -1))  
  
# 将预处理后的特征转换为 PyTorch 张量  
new_sample_features = torch.tensor(new_sample_features, dtype=torch.float32)  
torch.onnx.export(model, new_sample_features,"./model/rec.onnx", verbose=True)

import openvino as ov
newmodel=ov.convert_model('./model/rec.onnx')
compiled_model = ov.compile_model(newmodel,'CPU')

    
root = tk.Tk()
root.geometry("1920x600") 
my_font = font.Font(family='Arial', size=50, weight='bold')
label = tk.Label(root, text="欢迎使用翻译设备", font=my_font, fg="blue")  

label.pack(pady=20)  # pady 用于调整垂直间距  
  

button = tk.Button(root, text="点击我")  

button.pack(pady=20)  # pady 用于调整垂直间距  
  
# 运行主循环  




# 创建并启动接收数据的线程  
receive_thread = threading.Thread(target=receive_data, args=(client_socket,))  
receive_thread.daemon = True  # 设置为守护线程，这样主程序退出时它也会退出  
receive_thread.start()


# 创建并启动接收数据的线程  
process_thread = threading.Thread(target=process_data,args=(engine,compiled_model))  
process_thread.daemon = True  # 设置为守护线程，这样主程序退出时它也会退出  
process_thread.start()


def on_close():  
    root.destroy() 
    engine.stop()
    client_socket.close()
    sys.exit()
root.protocol("WM_DELETE_WINDOW", on_close)
root.title("识别客户端") 
root.mainloop()

 
  
