import socket
import threading
import os
import ast
import time
import csv
import diffusion_module2
import train_diffusion_val
import torch
from torch.utils.data import Dataset, DataLoader, TensorDataset
import torch.nn as nn
import numpy as np
import time
import pickle
device = "cuda"
class Server:
    def __init__(self, address, model):
        self.address = address
        self.clients = []
        self.real_anomalies = set()
        self.model = model

        # timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
        
        # # 将时间戳添加到 CSV 文件名中
        # self.log_file_name = f"server_log_{timestamp}.csv"

        # # 创建 CSV 文件并写入表头
        # with open(self.log_file_name, 'w', newline='') as csvfile:
        #     fieldnames = ['Node ID', 'Anomaly Data ID', 'Is Real Anomaly']
        #     writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        #     writer.writeheader()


    def start_server(self):
        with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:
            server_socket.bind(self.address)
            server_socket.listen()

            print(f"Server listening on {self.address}")

            while True:
                client_socket, client_address = server_socket.accept()
                client_thread = threading.Thread(target=self.handle_client, args=(client_socket, client_address))
                client_thread.start()
                self.clients.append(client_thread)

    def handle_client(self, client_socket, client_address):
        print(f"Accepted connection from {client_address}")

        length = int(client_socket.recv(1024).decode())
        print("收到长度",length)
        client_socket.sendall('go'.encode())
        serialized_data = b''
        while True:  
            chunk = client_socket.recv(1024)  # 接收数据块（这里假设每次接收1KB）  
              
            serialized_data += chunk  
            if len(serialized_data) == length:  # 如果接收到的数据为空，表示传输完毕  
                break
            # print(len(serialized_data))
        deserialized_data = pickle.loads(serialized_data)  
        # 处理反序列化后的数据...  
        ratio = deserialized_data["ratio"]
        data_fragment = deserialized_data["data"]
        print(f"Received ratio: {ratio}")

        # 进行进一步的异常检测
        data = self.denoising(data_fragment, ratio)

        serialized_data = pickle.dumps(data)
        chunk_size = 1024

        client_socket.sendall(str(len(serialized_data)).encode())
        point = client_socket.recv(1024)
        print("收到信号", point.decode())
        print(np.shape(data))
        print(len(serialized_data))
        for i in range(0, len(serialized_data), chunk_size):  
            chunk = serialized_data[i:i+chunk_size]  
            client_socket.sendall(chunk) 
        print(f"Connection from {client_address} closed")
        client_socket.close()
    def denoising(self, data, ratio):
        # print(np.shape(data))
        data.to(device)
        return self.model(data,int(self.model.denoise_steps* ratio),self.model.denoise_steps)[1].transpose(2,1)
    

if __name__ == "__main__":
    server_address = ('localhost', 8892)  # 中心服务器地址和端口


    training_mode = "diffusion"
    lr = 1e-3
    window_size = 128
    p1 = 1
    p2 = 1
    dataset_name = "point_global"
    batch_size = 32
    noise_steps = 100
    denoise_steps = 50
    diff_lambda = 0.1
    part = None
    device = "cuda"

    experiment = f'diffv4_{dataset_name}_{noise_steps}-{denoise_steps}_{diff_lambda}_1e-3_{batch_size}_{window_size}'
    
    train_loader, test_loader, validation_loader, labels, validation_labels = train_diffusion_val.load_dataset(dataset_name, part)

    model, diffusion_training_net, diffusion_prediction_net, optimizer, scheduler = \
                        train_diffusion_val.load_model(training_mode ,lr, window_size, p1, p2, labels.shape[1], batch_size, noise_steps, denoise_steps)
    model, diffusion_training_net = train_diffusion_val.load_from_checkpoint(training_mode, experiment, model, diffusion_training_net)
    diffusion_training_net = diffusion_training_net.to(device)
    diffusion_prediction_net = diffusion_prediction_net.to(device)
    
    diffusion_prediction_net.load_state_dict(diffusion_training_net.state_dict())
    diffusion_prediction_net.eval()
    # diffusion_training_net.eval()
    
    # trainD, testD, validationD = next(iter(train_loader)), next(iter(test_loader)), next(iter(validation_loader))
    # testD = train_diffusion_val.convert_to_windows(testD, window_size)
    # data_x = torch.tensor(testD, dtype=torch.float32); 
    # dataset = TensorDataset(data_x, data_x)
    # dataloader = DataLoader(dataset, batch_size = batch_size)

    # STime = time.time()
    # l1s = []
    # feats=labels.shape[1]   
    # for window, _ in dataloader:
    #     window = window.to(device)
    #     _, x_recon = diffusion_prediction_net(window, 0,50)
    #     x_recon = x_recon.transpose(2,1)
    #     l = nn.MSELoss(reduction = 'none')
    #     loss = l(x_recon, window)
    #     l1s.append(loss)
    # ETime = time.time()
    # loss0 = torch.cat(l1s).detach().cpu().numpy()
    # loss0 = loss0.reshape(-1,feats)
    
    # lossFinal = np.mean(np.array(loss0), axis=1)
    # labelsFinal = (np.sum(labels, axis=1) >= 1) + 0
    # validation_thresh = 0
    # result, fprs, tprs = train_diffusion_val.evaluate(lossFinal, labelsFinal, validation_thresh=validation_thresh)
    # result_roc = result["ROC/AUC"]
    # result_f1 = result["f1"]
    
    # print(result, ETime - STime)

    server = Server(server_address, diffusion_prediction_net)
    server.start_server()
