import time
import torch
from torch import nn
from torch.utils.data import DataLoader, Dataset
from spikingjelly.clock_driven import neuron, encoding, functional
from tqdm import tqdm
import torch.nn.functional as F
class DatasetSplit(Dataset):
	"""An abstract Dataset class wrapped around Pytorch Dataset class.
	"""

	def __init__(self, dataset, idxs):
		self.dataset = dataset
		self.idxs = [int(i) for i in idxs]

	def __len__(self):
		return len(self.idxs)

	def __getitem__(self, item):
		image, label = self.dataset[self.idxs[item]]
		return torch.tensor(image), torch.tensor(label)

class LocalUpdate(object):

	def __init__(self, args, dataset, idxs):
		self.args = args
		self.idxs = idxs
		self.trainloader, self.validloader, self.testloader = self.train_val_test(dataset, list(idxs))
		self.device = args.device
		# Default criterion set to NLL loss function
		self.criterion = nn.NLLLoss().to(self.device)
	def train_val_test(self, dataset, idxs):
		"""
		Returns train, validation and test dataloaders for a given dataset
		and user indexes.
		"""
		# split indexes for train, validation, and test (80, 10, 10)
		# 给每个设备分配图片
		idxs_train = idxs[:int(0.8 * len(idxs))]
		idxs_val = idxs[int(0.8 * len(idxs)):int(0.9 * len(idxs))]
		idxs_test = idxs[int(0.9 * len(idxs)):]
		# trainloader = DataLoader(DatasetSplit(dataset, idxs_train), batch_size=self.args.local_bs, shuffle=True)
		# validloader = DataLoader(DatasetSplit(dataset, idxs_val), batch_size=int(min(1, len(idxs_val) / 10)), shuffle=False)
		# testloader = DataLoader(DatasetSplit(dataset, idxs_test), batch_size=int(len(idxs_test) / 10), shuffle=False)
		trainloader = DataLoader(DatasetSplit(dataset, idxs_train), batch_size=1200, shuffle=True)
		validloader = DataLoader(DatasetSplit(dataset, idxs_val), batch_size=300, shuffle=False)
		testloader = DataLoader(DatasetSplit(dataset, idxs_test), batch_size=800, shuffle=False)
		return trainloader, validloader, testloader

	def update_weights(self, model, global_round):  # 更新参数-snn模型
		model.cuda()
		model.train()# Set mode to train model
		epoch_loss = []
		optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr)# Set optimizer for the local updates
		encoder = encoding.PoissonEncoder()
		train_times = 0
		max_test_accuracy = 0
		test_accs = []
		train_accs = []
		epoch_accuracy = []
		for iter in range(self.args.local_epoch):#对每个终端设备进行训练
			print("Training...")
			out_spikes_counter_frequency_sum = 0.0
			out_spikes_counter_frequency_avg = 0.0
			train_correct_sum = 0
			train_sum = 0
			batch_loss = [] #记录loss
			index = 0
			for img, label in tqdm(self.trainloader):
				img = img.to(self.device)
				label = label.to(self.device)
				# 注意神经元的大小，需要匹配，详见model.py
				label_one_hot = F.one_hot(label, 10).float()
				optimizer.zero_grad()
				# 运行T个时长，out_spikes_counter是shape=[batch_size, 10]的tensor
				# 记录整个仿真时长内，输出层的10个神经元的脉冲发放次数
				for t in range(self.args.T):
					if t == 0:
						out_spikes_counter = model(encoder(img).float())
					else:
						out_spikes_counter += model(encoder(img).float())
				# out_spikes_counter / T 得到输出层10个神经元在仿真时长内的脉冲发放频率
				index = index+1
				out_spikes_counter_frequency = out_spikes_counter / self.args.T
				out_spikes_counter_frequency_sum = out_spikes_counter_frequency_sum + out_spikes_counter_frequency.mean()
				# 损失函数为输出层神经元的脉冲发放频率，与真实类别的MSE
				# 这样的损失函数会使，当类别i输入时，输出层中第i个神经元的脉冲发放频率趋近1，而其他神经元的脉冲发放频率趋近0
				loss = F.mse_loss(out_spikes_counter_frequency, label_one_hot)
				loss.backward()
				optimizer.step()
				# 优化一次参数后，需要重置网络的状态，因为SNN的神经元是有“记忆”的
				functional.reset_net(model)
				# 正确率的计算方法如下。认为输出层中脉冲发放频率最大的神经元的下标i是分类结果
				train_correct_sum += (out_spikes_counter_frequency.max(1)[1] == label.to(self.args.device)).float().sum().item()
				train_sum += label.numel()
				train_batch_accuracy = (out_spikes_counter_frequency.max(1)[1] == label.to(self.args.device)).float().mean().item()
				# writer.add_scalar('train_batch_accuracy', train_batch_accuracy, train_times, loss.item())####
				train_accs.append(train_batch_accuracy)
				train_times += 1
				#self.logger.add_scalar('loss',loss.item)
				batch_loss.append(loss.item())
			out_spikes_counter_frequency_avg = out_spikes_counter_frequency_sum / index
			train_accuracy = train_correct_sum / train_sum
			epoch_loss.append(sum(batch_loss)/len(batch_loss))
			epoch_accuracy.append(train_accuracy)

			print("Testing")
			model.eval()
			with torch.no_grad():
				# 每遍历一次全部数据集，就在测试集上测试一次
				test_correct_sum = 0
				test_sum = 0
				for img, label in tqdm(self.testloader):
					img = img.to(self.args.device)
					for t in range(self.args.T):
						if t == 0:
							out_spikes_counter = model(encoder(img).float())
						else:
							out_spikes_counter += model(encoder(img).float())

					test_correct_sum += (out_spikes_counter.max(1)[1] == label.to(self.args.device)).float().sum().item()
					test_sum += label.numel()
					functional.reset_net(model)
				test_accuracy = test_correct_sum / test_sum
				# writer.add_scalar('test_accuracy', test_accuracy, self.args.epoch)
				max_test_accuracy = max(max_test_accuracy, test_accuracy)
				test_accs.append(max_test_accuracy)
			print("Local Epoch {}: train_acc = {:.2f}%,Test_acc = {:.2f}%, train_times={}".format(iter + 1, train_accuracy * 100,max_test_accuracy * 100, train_times))
		avg_accuracy = sum(epoch_accuracy)/len(epoch_accuracy)
		fre = out_spikes_counter_frequency_avg.item()
		# 返回权重和损失
		return model.state_dict(), sum(epoch_loss) / len(epoch_loss), avg_accuracy, max_test_accuracy, time.time(), train_sum, fre
	def update_weights_cnn(self, model, global_round):
		# 定义损失函数，优化方法
		# 采用Cross-Entropy loss,  SGD with moment
		criterion = nn.CrossEntropyLoss()
		optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
		# 训练网络
		# 迭代epoch
		for epoch in range(self.args.local_epoch):

			running_loss = 0.0
			for i, data in enumerate(self.trainloader, 0):
				# get the input
				inputs, labels = data

				# zeros the paramster gradients
				optimizer.zero_grad()  #

				# forward + backward + optimize
				outputs = model(inputs)
				loss = criterion(outputs, labels)  # 计算loss
				loss.backward()  # loss 求导
				optimizer.step()  # 更新参数

				# print statistics
				running_loss += loss.item()  # tensor.item()  获取tensor的数值
				if i % 2000 == 1999:
					print('[%d, %5d] loss: %.3f' %
						  (epoch + 1, i + 1, running_loss / 2000))  # 每2000次迭代，输出loss的平均值
					running_loss = 0.0

		print('Finished Training')
		return  model.state_dict(), loss
	def inference(self, model):
		""" Returns the inference accuracy and loss.
		"""
		model.eval()
		loss, total, correct = 0.0, 0.0, 0.0
		for batch_idx, (images, labels) in enumerate(self.testloader):
			images, labels = images.to(self.device), labels.to(self.device)
			# Inference
			outputs = model(images)
			batch_loss = self.criterion(outputs, labels)
			loss += batch_loss.item()
			# Prediction
			_, pred_labels = torch.max(outputs, 1)
			pred_labels = pred_labels.view(-1)
			correct += torch.sum(torch.eq(pred_labels, labels)).item()
			total += len(labels)

		accuracy = correct / total
		return accuracy, loss

def test_inference(args, model, test_dataset):
    """ Returns the test accuracy and loss.
    """
    model.eval()
    loss, total, correct = 0.0, 0.0, 0.0

    device = 'cuda' if args.gpu else 'cpu'
    criterion = nn.NLLLoss().to(device)
    testloader = DataLoader(test_dataset, batch_size=128, shuffle=False)

    for batch_idx, (images, labels) in enumerate(testloader):
        images, labels = images.to(device), labels.to(device)

        # Inference
        outputs = model(images)
        batch_loss = criterion(outputs, labels)
        loss += batch_loss.item()

        # Prediction
        _, pred_labels = torch.max(outputs, 1)
        pred_labels = pred_labels.view(-1)
        correct += torch.sum(torch.eq(pred_labels, labels)).item()
        total += len(labels)

    accuracy = correct/total
    return accuracy, loss