import socket
import threading
import time
from queue import Queue
import train_diffusion_val
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn as nn
import numpy as np
import pickle
import random
class EdgeNode:
    def __init__(self, server_address, data_set, model):
        # self.node_id = node_id
        # self.data_buffer = []
        # self.data_stream = []
        self.ratio = 0.6
        self.model = model
        self.server_address = server_address
        self.data_set = data_set
        self.data_index = 0
        self.lock = threading.Lock()
        self.is_running = True  # 用于控制数据流读取的循环
        self.results = []
        self.time = 0

    def read_data_stream(self):
        while self.is_running:
            time.sleep(2)
            new_data_point = self.generate_data_point()
            # self.data_stream.append(new_data_point)

            with self.lock:
                print(f"Processing data: ")
                # 创建新线程处理数据
                self.process_data(new_data_point)
                self.data_index+=1
                # if len(self.data_buffer) == 3:
                #     print(f"Node {self.node_id} - Processing data: {self.data_buffer}")
                #     # 创建新线程处理数据
                #     processing_thread = threading.Thread(target=self.process_data, args=(self.data_buffer,))
                #     processing_thread.start()

                #     # 移动窗口，准备接收下一个数据点
                #     # self.data_buffer = self.data_buffer[1:]

    def generate_data_point(self):
        # 按顺序从数据集中获取数据点
        if(self.data_index == np.shape(self.data_set)[0] - 1):
            self.is_running = False
        # data_point = self.data_set[self.data_index]
        return self.data_set[self.data_index].unsqueeze(0)

    def process_data(self, data_fragment):
        # import time
        startTime = time.time()
        noised_series = self.noising(data_fragment)

        if(random.randint(1,100) < 10):
            print("网络故障，本地计算")
            self.results.append(noised_series)
            endTime = time.time()
            print("耗时", endTime - startTime)
            self.time += endTime - startTime
            return

        print(f"将加噪后的时序传输给服务器：")
        # 异常检测到，将数据片段、异常数据点的ID和边缘节点ID上报给中心服务器
        with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as client_socket:
            client_socket.connect(self.server_address)

            send_dict = {"ratio":self.ratio,"data":noised_series}
   
            serialized_data = pickle.dumps(send_dict)
            chunk_size = 1024
            
            client_socket.sendall(str(len(serialized_data)).encode())
            point = client_socket.recv(1024)
            print("收到信号", point.decode())
            print(len(serialized_data))
            for i in range(0, len(serialized_data), chunk_size):  
                chunk = serialized_data[i:i+chunk_size]  
                client_socket.sendall(chunk) 
            # client_socket.sendall(data_to_send)

            print("等待接受服务端数据：")
            length = int(client_socket.recv(1024).decode())

            print("收到长度",length)
            client_socket.sendall('go'.encode())
            serialized_data = b''
            while True:  
                chunk = client_socket.recv(1024)  # 接收数据块（这里假设每次接收1KB）  
                
                serialized_data += chunk  
                if len(serialized_data) == length:  # 如果接收到的数据为空，表示传输完毕  
                    break
                # print(len(serialized_data))
            deserialized_data = pickle.loads(serialized_data)  
            # print(deserialized_data)
            print(len(serialized_data))
            self.results.append(deserialized_data)
            endTime = time.time()
            print("耗时", endTime - startTime)
            self.time += endTime - startTime

    def noising(self, data):
        return self.model(data, 0, int(self.model.denoise_steps*self.ratio))[1].transpose(2,1)


if __name__ == "__main__":
    server_address = ('localhost', 8892)  # 中心服务器地址和端口

    training_mode = "diffusion"
    lr = 1e-3
    window_size = 128
    p1 = 1
    p2 = 1
    dataset_name = "point_global"
    batch_size = 32
    noise_steps = 100
    denoise_steps = 50
    diff_lambda = 0.1
    part = None
    device = "cuda"

    experiment = f'diffv4_{dataset_name}_{noise_steps}-{denoise_steps}_{diff_lambda}_1e-3_{batch_size}_{window_size}'
    
    train_loader, test_loader, validation_loader, labels, validation_labels = train_diffusion_val.load_dataset(dataset_name, part)

    model, diffusion_training_net, diffusion_prediction_net, optimizer, scheduler = \
                        train_diffusion_val.load_model(training_mode ,lr, window_size, p1, p2, labels.shape[1], batch_size, noise_steps, denoise_steps)
    model, diffusion_training_net = train_diffusion_val.load_from_checkpoint(training_mode, experiment, model, diffusion_training_net)
    diffusion_training_net = diffusion_training_net.to(device)
    diffusion_prediction_net = diffusion_prediction_net.to(device)
    
    diffusion_prediction_net.load_state_dict(diffusion_training_net.state_dict())
    diffusion_prediction_net.eval()
    diffusion_training_net.eval()
    
    trainD, testD, validationD = next(iter(train_loader)), next(iter(test_loader)), next(iter(validation_loader))
    testD = train_diffusion_val.convert_to_windows(testD, window_size)
    print(np.shape(testD))
    data_x = torch.tensor(testD, dtype=torch.float32)
    data_x = data_x.to(device)
    # dataset = TensorDataset(data_x, data_x)
    # dataloader = DataLoader(dataset, batch_size = batch_size)

    # STime = time.time()
    # l1s = []
    # feats=labels.shape[1]   
    # for window, _ in dataloader:
    #     window = window.to(device)
    #     _, x_recon = diffusion_prediction_net(window,0,45)
    #     _, x_recon = diffusion_prediction_net(window,45,50)
    #     x_recon = x_recon.transpose(2,1)
    #     l = nn.MSELoss(reduction = 'none')
    #     loss = l(x_recon, window)
    #     l1s.append(loss)
    # ETime = time.time()
    # loss0 = torch.cat(l1s).detach().cpu().numpy()
    # loss0 = loss0.reshape(-1,feats)
    
    # lossFinal = np.mean(np.array(loss0), axis=1)
    # labelsFinal = (np.sum(labels, axis=1) >= 1) + 0
    # validation_thresh = 0
    # result, fprs, tprs = train_diffusion_val.evaluate(lossFinal, labelsFinal, validation_thresh=validation_thresh)
    # result_roc = result["ROC/AUC"]
    # result_f1 = result["f1"]
    
    # print(result, ETime - STime)
    
    edge_node = EdgeNode(server_address=server_address, data_set=data_x, model = diffusion_prediction_net)
    edge_node.read_data_stream()
    # print(torch.stack(edge_node.results))
    denoised_data = torch.stack(edge_node.results).squeeze(1)
    print(edge_node.time)
    # 边缘节点线程结束后，自动关闭客户端
    print(f"Client closed.")
    # print(np.shape(data_x))
    dataset = TensorDataset(data_x, denoised_data)
    dataloader = DataLoader(dataset, batch_size = batch_size)

    l1s = []
    feats=labels.shape[1]   
    for raw, window in dataloader:
        window = window.to(device)
        l = nn.MSELoss(reduction = 'none')
        loss = l(raw, window)
        l1s.append(loss)
    loss0 = torch.cat(l1s).detach().cpu().numpy()
    loss0 = loss0.reshape(-1,feats)
    
    print(np.shape(loss0))
    lossFinal = np.mean(np.array(loss0), axis=1)
    labelsFinal = (np.sum(labels, axis=1) >= 1) + 0
    validation_thresh = 0
    result, fprs, tprs = train_diffusion_val.evaluate(lossFinal, labelsFinal, validation_thresh=validation_thresh)
    result_roc = result["ROC/AUC"]
    result_f1 = result["f1"]
    print("ROC",result_roc)
    print("f1", result_f1)
    print("time",edge_node.time / np.shape(denoised_data)[0]) 