from torch.optim import SGD, Adam
from pytorch3d.transforms import Rotate, Translate,euler_angles_to_matrix
import torch
from torch import nn
import os
import sys

current_path = os.getcwd()
sys.path.append(current_path)

print(current_path)

def euler_to_rotation_matrix_zyx(angle_z, angle_y, angle_x):
    # Extract the individual angles
    #angle_z, angle_y, angle_x = euler_angles

    # Compute trigonometric functions for the angles
    cos_x, sin_x = torch.cos(angle_x), torch.sin(angle_x)
    cos_y, sin_y = torch.cos(angle_y), torch.sin(angle_y)
    cos_z, sin_z = torch.cos(angle_z), torch.sin(angle_z)

    # Compute the elements of the rotation matrix
    R00 = cos_z * cos_y
    R01 = cos_z * sin_y * sin_x - sin_z * cos_x
    R02 = cos_z * sin_y * cos_x + sin_z * sin_x
    R10 = sin_z * cos_y
    R11 = sin_z * sin_y * sin_x + cos_z * cos_x
    R12 = sin_z * sin_y * cos_x - cos_z * sin_x
    R20 = -sin_y
    R21 = cos_y * sin_x
    R22 = cos_y * cos_x

    # Construct the rotation matrix R from the elements
    rotation_matrix = torch.stack([torch.stack([R00, R01, R02]),
                                   torch.stack([R10, R11, R12]),
                                   torch.stack([R20, R21, R22])])

    return rotation_matrix

class PolicyNetAtt(nn.Module):

    def __init__(self,
                 input_dim: int,
                 policy_dim: int = 2):

        super(PolicyNetAtt, self).__init__()

        self.num_landmark = int((input_dim - 4) / 3)

        self.agent_pos_fc1_pi = nn.Linear(4, 32)
        self.agent_pos_fc2_pi = nn.Linear(32, 32)
        self.landmark_fc1_pi = nn.Linear(3, 64)
        self.landmark_fc2_pi = nn.Linear(64, 32)
        self.info_fc1_pi = nn.Linear(64, 64)
        self.action_fc1_pi = nn.Linear(64, 64)
        self.action_fc2_pi = nn.Linear(64, policy_dim)

        self.relu = nn.ReLU()
        self.tanh = nn.Tanh()
        self.softmax = nn.Softmax(dim=2)
        
    def forward(self, observation: torch.Tensor) -> torch.Tensor:
        if len(observation.size()) == 1:
            observation = observation[None, :]

        # compute the policy
        # embeddings of agent's position  
        agent_pos_embedding = self.relu(self.agent_pos_fc1_pi(observation[:, :4]))
        agent_pos_embedding = self.relu(self.agent_pos_fc2_pi(agent_pos_embedding))

        # embeddings of landmarkss
        estimated_landmark_pos = observation[:, 4: 4 + 3 * self.num_landmark]
        
        #landmark_info = torch.cat((estimated_landmark_pos.reshape(observation.size()[0], self.num_landmark, 2),
        #                        info_vector.reshape(observation.size()[0], self.num_landmark, 2)), 2)
        
        landmark_reshape=estimated_landmark_pos.reshape(observation.size()[0], self.num_landmark, 3)
        #landmark_embedding = self.relu(self.landmark_fc1_pi(landmark_info))
        
        landmark_embedding = self.relu(self.landmark_fc1_pi(landmark_reshape))
        landmark_embedding = self.relu(self.landmark_fc2_pi(landmark_embedding))

        # attention
        landmark_embedding_tr = torch.transpose(landmark_embedding, 1, 2)

        # mask
        mask = observation[:, - self.num_landmark:].unsqueeze(1)
        attention = torch.matmul(agent_pos_embedding.unsqueeze(1), landmark_embedding_tr) / 4
        attention = attention.masked_fill(mask == 0, -1e10)

        att = self.softmax(attention)
        landmark_embedding_att = self.relu((torch.matmul(att, torch.transpose(landmark_embedding_tr, 1, 2)).squeeze(1)))

        info_embedding = self.relu(self.info_fc1_pi(torch.cat((agent_pos_embedding, landmark_embedding_att), 1)))
        action = self.tanh(self.action_fc1_pi(info_embedding))
        action = self.tanh(self.action_fc2_pi(action))

        if action.size()[0] == 1:
            action = action.flatten()

        #scaled_action = torch.hstack(((1 + action[0]) * 2.0, action[1] * torch.pi/3))
        scaled_action=action

        return scaled_action


class Agent:
    
    def __init__(self,s=torch.zeros([4]),p_b=torch.zeros([3]),p_T=torch.ones([3]),max_num_landmarks=128,fov=torch.pi/2,lr=1e-5):
        self._s=s
        self._p_b=p_b
        self._p_T=p_T
        self._max_num_landmarks=max_num_landmarks
        self._num_landmarks=torch.randint(low=1, high=self._max_num_landmarks, size=(1,)).item()
        self._p_f_W=torch.ones([3,self._num_landmarks]) #feature position in world frame
        
        self._V_flatten=torch.tensor([100.,1.,1.])
        self._V=torch.diag(self._V_flatten)
        self._V_inv=torch.diag(1/self._V_flatten)
        
        #self._R_W_C=zy_rotation_matrix(angle_z=self._s[0],angle_y=self._s[1]) # camera pose in World frame
        self._input_dim=max_num_landmarks*3+3+3+4
        self._fov=fov
        
        self._policy=PolicyNetAtt(input_dim=self._input_dim)
        self._policy_optimizer=Adam(self._policy.parameters(), lr=lr)
        self.loss=0.
    
    def update_rotation_matrix(self):
        self._R_W_C=euler_to_rotation_matrix_zyx(self._s[0],self._s[1],torch.tensor(0.0))
    
    
    def calculate_visibility(self,a,bearing):
        a_vis=torch.tensor(a)
        v=1/(1+torch.exp(-a_vis*torch.cos(bearing*torch.pi/self._fov)))#*(1+torch.exp(-torch.tensor(a_vis)))
        v_max=1/(1+torch.exp(-a_vis))
        v_min=1/(1+torch.exp(a_vis))
        
        v_normalized=(v-v_min)/(v_max-v_min)
        return v_normalized
        
    def calculate_target_visiblity(self):
        p_T_C=self._R_W_C.T @ self._p_T
        bear_T=torch.acos(p_T_C[0]/torch.norm(p_T_C))
        vis_T=self.calculate_visibility(1.,bear_T)
        
        return vis_T
    
    def calculate_FIM(self):
        p_f_C=self._R_W_C.T @ self._p_f_W
        
        M_total=torch.zeros([3,3])
        for i in range(self._num_landmarks):
            v_f_C=p_f_C[:,i]
            dis_f=torch.norm(v_f_C)
            x_f=v_f_C[0]
            bear_f=torch.acos(x_f/dis_f)
            vis_f=self.calculate_visibility(44.,bear_f)
            #print(v)
            unitX=torch.tensor([1.,0.,0.])
            R=rodrigues_rotation(unitX,v_f_C)
            
            M=vis_f*(R.T@self._V_inv@R)/dis_f/dis_f
            M_total+=M
            
        return M_total
    
    def plan(self,s,p_b,p_T,p_f):
        padding=torch.zeros(3*(self._max_num_landmarks-p_f.shape[1]))

        
        mask=torch.tensor([True]*p_f.shape[1]+[False]*(self._max_num_landmarks-p_f.shape[1]))
        net_input=torch.hstack([s,p_b,p_T,p_f.flatten(),padding,mask])
        action=self._policy.forward(net_input)
        
        return action        
    
    
    
ag=Agent()
ag._policy.train()
ag._policy.load_state_dict(torch.load(current_path+'/checkpoints/model_info_5_moving_landmarks_2.pth'))
batch_size=1000
r_list=[]
for i in range(300):
    print(i)
    r_sum=torch.tensor(0.0)
    for j in range(batch_size):
        #ag.calculate_FIM()
        ag._s=torch.rand(4)
        ag._p_T=torch.rand(3)
        action=ag.plan(ag._s,ag._p_b,ag._p_T,ag._p_f_W)
    
        #rk4
        dt=5.0
        x=ag._s
        xdot=torch.hstack([ag._s[2:],action])

        k1=xdot
        k2=xdot+torch.hstack([k1[2:]*dt/2,torch.zeros(2)])
        k3=xdot+torch.hstack([k2[2:]*dt/2,torch.zeros(2)])
        k4=xdot+torch.hstack([k3[2:]*dt,torch.zeros(2)])
        ag._s=ag._s+(k1+2*k2+2*k3+k4)*dt/6

        ag.update_rotation_matrix()
        
        r=-(ag.calculate_target_visiblity())
        #r=torch.norm(action)
        #logr=torch.log10(r)
        r.backward()
        ag._policy_optimizer.step()
        
        r_sum+=r
    print(r_sum)
    torch.save(ag._policy.state_dict(), current_path+'/checkpoints/model_info_5_moving_landmarks_2.pth')
    r_list.append(r_sum)
    #print(r)
