import torch
import torch.nn as nn
import numpy as np
from spikingjelly.clock_driven import neuron, encoding, functional
from options import args_parser
from tqdm import tqdm
import matplotlib.pyplot as plt
import copy
from update import LocalUpdate, test_inference
from utils import average_weights, get_dataset, transfer_weights
from model import Snet, Net, CNN
import time
import torch.nn.functional as F
import torchvision
import random
import torch.utils.data as data
def train(train_dataset, test_dataset, user_groups):
	args = args_parser()
	dataset_dir = args.dataset_dir
	device = args.device
	batch_size = args.batch_size
	train_epoch = args.epoch
	# writer = SummaryWriter('../log')

	# 初始化网络
	global_model = CNN()
	global_model.to(device)# 指定运行路径
	global_model.train()  # 预训练模型
	train_loss, train_accuracy = [], []
	avg_acc_train_all = []
	avg_acc_test_all = []
	global_weights = global_model.state_dict()  # 存放训练过程中需要学习的权重和偏执系数,state_dict是在定义了model或optimizer之后pytorch自动生成的,可以直接调用
	max_acc = 0.0
	max_epoch = 0
	global_model_acc = []
	max_test_accuracy = 0

	for epoch in range(train_epoch):
		print("Global Epoch(全局通信轮次) {}:".format(epoch+1))
		local_weights, local_losses = [], []
		global_model.train()
		m = max(int(args.frac * args.num_users), 1) # 确定参与设备人数
		# idxs_users = np.random.choice(range(args.num_users), m, replace=False)  # 随机挑选N个用户,可以自己设置
		# 选择固定的用户
		avg_train_acc = []
		avg_test_acc = []
		idxs_users = []
		idxs_users.append(1)
		for idx in idxs_users:
			print("设备号 {}:".format(idx))
			local_model = LocalUpdate(args=args, dataset=train_dataset, idxs=user_groups[idx])# 声明一个本地模型
			# 这里应该用深度拷贝（但是会报错：why？）
			# 直接赋值：地址是同一个
			# 深度拷贝：两者完全独立
			# w, loss = local_model.update_weights(model=global_model, global_round=epoch)  # 获取本地训练的权重
			#生成一个过渡模型，用来迁移参数
			new_model = CN()
			transfer_weights(global_model, new_model) # transfer_weights(model_from, model_to)
			w, loss = local_model.update_weights_cnn(model=new_model, global_round=epoch)# 获取本地训练的权重、训练精度
			local_weights.append(copy.deepcopy(w))
			local_losses.append(copy.deepcopy(loss))

		# update global weights
		global_weights = average_weights(local_weights)  # 求各个本地模型的参数平均
		global_model.load_state_dict(global_weights) # copy weight to global_model
		loss_avg = sum(local_losses) / len(local_losses)  # 平均损失
		train_loss.append(loss_avg)


		# 每遍历一次全部数据集，就在测试集上测试一次,自己写吧
		global_model.eval()
		encoder = encoding.PoissonEncoder()
		test_data_loader = data.DataLoader(
			dataset=test_dataset,
			batch_size=256,
			shuffle=False,
			drop_last=False
		)
		with torch.no_grad():
			print("Global Testing.....")



	# global_test_acc,train_loss
	return global_model_acc, train_loss





