import os
import numpy as np
import random
import socket
import itertools
import csv
from collections import deque
import torch
from torch.nn import Sequential, Linear, MSELoss
import torch.optim as optim

from util.argparser import AgentParser
from util.comm import ServerSocket
from util.plot import AgentPloter
from vqc import QuantumLayer

os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"

class DQNAgent:
	def __init__(self, state_size, action_size):
		self.state_size  = state_size
		self.action_size = action_size
		self.actions=list(range(9))
		self.action_space = list(itertools.product(range(3), repeat=2))
		print(self.action_space)

		# Hyperparameter
		self.learning_rate = 0.05 # 0.01
		self.discount_factor = 0.99
		self.epsilon = 1
		self.epsilon_decay = 0.08 # 0.99
		self.epsilon_min = 0 # 0.1
		self.batch_size = 32
		self.train_start = 50 #200
		self.q_max = 0
		self.avg_q_max = 0
		self.currentLoss = 0

		# Replay memory
		self.memory = deque(maxlen=500)

		# Model initialization
		self.model = self.build_model()
		self.target_model = self.build_model()
		self.update_target_model()

	def build_model(self):
		''' Build the model '''
		# Quantum Network
		model = Sequential()
		model.add_module('vqc', QuantumLayer())
		model.add_module('fc', Linear(self.state_size, self.action_size))
		# TODO：能否加激活函数
		return model
	
	def load_model(self, model_path = 'model/model.pth'):
		''' Load pre-trained parameters '''
		torch.load(self.model.state_dict(), model_path)
		self.epsilon = 0.1
	
	def save_model(self, model_path = 'model/model.pth'):
		''' Save model parameters '''
		torch.save(self.model.state_dict(), model_path)
	
	def update_target_model(self):
		''' Update the target model '''
		self.target_model.load_state_dict(self.model.state_dict())
	
	def get_action(self, state):
		''' Get an action '''
		print('--- GET ACTION ---')
		state = torch.tensor([state])
		q_value = self.model(state)
		print(f'state: {state[0]}, qvalue: {q_value[0]}, epsilon: {self.epsilon}')
		# Exploration
		if np.random.rand() <= self.epsilon:
			action = random.randrange(self.action_size)
			print(f'action(exploration)={action}\n-----')
			return action
		# Exploitation
		else:
			q_array = q_value.detach().numpy()
			action = np.argmax(q_array)
			print(f'action={action}\n-----')
			return action

	def append_sample(self, state, action, reward, next_state, done):
		''' Append a sample to replay memory '''
		self.memory.append((state, action, reward, next_state, done))

	def train_model(self):
		''' Train the model '''
		print('--- TRAIN MODEL ---')
		# Epsilon decay
		if self.epsilon > self.epsilon_min:
			self.epsilon *= self.epsilon_decay
		else:
			self.epsilon = self.epsilon_min

		# Sample random mini-batch from replay memory
		mini_batch = random.sample(self.memory, self.batch_size)

		states = np.zeros((self.batch_size, self.state_size))
		next_states = np.zeros((self.batch_size, self.state_size))
		actions, rewards, dones = [], [], []

		for i in range(self.batch_size):
			states[i] = mini_batch[i][0]
			actions.append(mini_batch[i][1])
			rewards.append(mini_batch[i][2])
			next_states[i] = mini_batch[i][3]
			dones.append(mini_batch[i][4])

		# Construct target value
		target = self.model(torch.tensor(states))
		target_val = self.target_model(torch.tensor(next_states))

		# TODO: done是什么
		for i in range(self.batch_size):
			if dones[i]:
				target[i][actions[i]] = rewards[i]
			else:
				target[i][actions[i]] = rewards[i] + self.discount_factor * (np.amax(target_val[i]))

		# Model training
		# Define loss function and optimizer
		criterion = MSELoss()
		optimizer = optim.Adam(self.model.parameters(), lr=0.1)
		
		# Forward
		output = self.model(torch.tensor(states))
		loss = criterion(output, target)

		# Back propagation
		optimizer.zero_grad() # Clear previously calculated gradient
		loss.backward()
		optimizer.step()

		self.currentLoss = loss.item()

		print(f'loss = {self.currentLoss}')

		return

def get_reward(fps, target_fps, temp, threshold_temp, lamb):
	'''
	Get rewards from environment
	'''
	# QoE reward
	u = max(1, fps/target_fps)

	# Temperature reward
	# TODO: 用temp代替power
	w = 0
	if temp < threshold_temp:
		w = lamb * np.tanh(threshold_temp - temp)
	else:
		w = -10 * lamb
	return u + w
	
if __name__=="__main__":
	#################
	# Get arguments #
	#################
	args = AgentParser()

	experiment_time = args.exp_time
	port = args.port
	target_fps = args.target_fps
	threshold_temp = args.threshold_temp

	# Create an agent
	agent = DQNAgent(4, 9)

	# Set up
	step = 1
	copy = 0 # repeated samples in replay memory
	time_step = []
	fps_data = []
	avg_q_max_data = []
	loss_data = []
	avg_reward = []
	reward_tmp = []

	# Initial state
	action = 0
	# CPU frequencies
	(cpu_little_freq, cpu_big_freq) = agent.action_space[action]
	# Temperature
	temp = 37
	# FPS
	fps = 30
	state = (cpu_little_freq, cpu_big_freq, temp, fps)

	# Create a server socket
	socket = ServerSocket(port)
	print('Waiting for client ...')
	socket.accept()

	# Create a figure ploter
	agent_ploter = AgentPloter(2, 1, 2, ['FPS', 'Loss'])

	try:
		while step <= experiment_time:
			print(f'=== STEP {step} ===')

			# Get data from the device
			state_msg = socket.receive()
			[cpu_little_freq, cpu_big_freq, temp, fps] = state_msg
			
			# Record data
			time_step.append(step)
			fps_data.append(fps)
			# TODO: record power data

			next_state = (cpu_little_freq, cpu_big_freq, temp, fps)
			print(f'[{step}] state: {state}, next_state: {next_state}')

			print('GET Q MAX')
			q_value = agent.model(torch.tensor([next_state]))
			agent.q_max += torch.max(q_value).item()
			agent.avg_q_max = agent.q_max / step
			avg_q_max_data.append(agent.avg_q_max)
			loss_data.append(agent.currentLoss)

			# Get reward
			# TODO: Handle dummy value at the first sensing
			reward = get_reward(fps, target_fps, temp, threshold_temp, lamb=0.2)
			reward_tmp.append(reward)
			if (len(reward_tmp) >= 300):
				reward_tmp.pop(0)

			# Update replay memory
			done = 1
			agent.append_sample(state, action, reward, next_state, done)
			for i in range(copy): # Repeat critical samples for 'copy' times
				if (reward < 0 or reward > 1):
					agent.append_sample(state, action, reward, next_state, done)
			print(f'[{step}] state: {state} action: {action} reward: {reward:.2f} next_state: {next_state}')

			# Train the model
			if (len(agent.memory) >= agent.train_start):
				agent.train_model()
			avg_reward.append(sum(reward_tmp) / len(reward_tmp))

			# Decide the next action
			state = next_state

			# TODO: 处理特殊情况，比如温度过高
			# if temp >= threshold_temp:
			# 	cpu_little_freq -= 1
			# 	cpu_big_freq -= 1
			action = agent.get_action(state)
			cpu_little_freq = agent.action_space[action][0]
			cpu_big_freq = agent.action_space[action][1]

			# Send the action to the device
			socket.send(f'{cpu_little_freq},{cpu_big_freq}')

			# Update the target model
			if done:
				agent.update_target_model()

			step += 1

			# Redraw
			agent_ploter.update(fps_data, loss_data)
			# Save the model
			if (step % 60 == 0):
				agent.learning_rate = 0.1
			if (step % 500 == 0):
				agent.save_model(f'model/model_{step}.pth')

	finally:
		# Stop training
		print(f'=== STOP TRAINING ===\nreward_tmp: {reward_tmp}')
		agent.save_model(f'model/model_{step-1}.pth')

		# Output data to a log file
		csv_file = f'./output/231106-agent_{step-1}.csv'
		with open(csv_file, mode = 'w', newline = '') as file:
			writer = csv.writer(file)
			# Record loss
			writer.writerow(fps_data)
			writer.writerow(loss_data)
			writer.writerow(avg_q_max_data)

		# Close server socket
		bye = socket.receive()
		if (bye[0] == 1):
			socket.close()
