repo_name
stringlengths 6
97
| path
stringlengths 3
341
| text
stringlengths 8
1.02M
|
---|---|---|
nikhilc149/e-utran-features-bug-fixes | cp/gx_app/include/gx_def.h | <filename>cp/gx_app/include/gx_def.h
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __GX_DEF_H__
#define __GX_DEF_H__
#define GX_TDF_DESTINATION_HOST_LEN 255
#define GX_PACKET_FILTER_CONTENT_LEN 255
#define GX_PHYSICAL_ACCESS_ID_LEN 255
#define GX_3GPP_RAT_TYPE_LEN 255
#define GX_TRACKING_AREA_IDENTITY_LEN 255
#define GX_OMC_ID_LEN 255
#define GX_RAI_LEN 255
#define GX_SECONDARY_EVENT_CHARGING_FUNCTION_NAME_LEN 255
#define GX_ORIGIN_HOST_LEN 255
#define GX_SERVICE_CONTEXT_ID_LEN 255
#define GX_LOGICAL_ACCESS_ID_LEN 255
#define GX_3GPP_SGSN_MCC_MNC_LEN 255
#define GX_TUNNEL_HEADER_FILTER_LEN 255
#define GX_ACCESS_NETWORK_CHARGING_IDENTIFIER_VALUE_LEN 255
#define GX_SECONDARY_CHARGING_COLLECTION_FUNCTION_NAME_LEN 255
#define GX_DESTINATION_HOST_LEN 255
#define GX_3GPP_SELECTION_MODE_LEN 255
#define GX_LOCATION_AREA_IDENTITY_LEN 255
#define GX_TDF_APPLICATION_IDENTIFIER_LEN 255
#define GX_FRAMED_IPV6_PREFIX_LEN 255
#define GX_3GPP_CHARGING_CHARACTERISTICS_LEN 255
#define GX_MDT_ALLOWED_PLMN_ID_LEN 255
#define GX_ORIGIN_REALM_LEN 255
#define GX_TWAN_IDENTIFIER_LEN 255
#define GX_FLOW_LABEL_LEN 255
#define GX_3GPP_GGSN_IPV6_ADDRESS_LEN 255
#define GX_RESTRICTION_FILTER_RULE_LEN 255
#define GX_3GPP_SGSN_ADDRESS_LEN 255
#define GX_TDF_DESTINATION_REALM_LEN 255
#define GX_SUBSCRIPTION_ID_DATA_LEN 255
#define GX_REDIRECT_SERVER_ADDRESS_LEN 255
#define GX_3GPP_SGSN_IPV6_ADDRESS_LEN 255
#define GX_3GPP2_BSID_LEN 255
#define GX_CHARGING_RULE_BASE_NAME_LEN 255
#define GX_USER_EQUIPMENT_INFO_VALUE_LEN 255
#define GX_ROUTE_RECORD_LEN 255
#define GX_PRESENCE_REPORTING_AREA_IDENTIFIER_LEN 255
#define GX_FILTER_ID_LEN 255
#define GX_SSID_LEN 255
#define GX_FLOW_DESCRIPTION_LEN 255
#define GX_POSITIONING_METHOD_LEN 255
#define GX_SOURCEID_LEN 255
#define GX_BEARER_IDENTIFIER_LEN 255
#define GX_SPONSOR_IDENTITY_LEN 255
#define GX_DEFAULT_QOS_NAME_LEN 255
#define GX_TRAFFIC_STEERING_POLICY_IDENTIFIER_UL_LEN 255
#define GX_ERROR_REPORTING_HOST_LEN 255
#define GX_CELL_GLOBAL_IDENTITY_LEN 255
#define GX_APPLICATION_SERVICE_PROVIDER_IDENTITY_LEN 255
#define GX_TRACE_NE_TYPE_LIST_LEN 255
#define GX_REDIRECT_HOST_LEN 255
#define GX_RAN_NAS_RELEASE_CAUSE_LEN 255
#define GX_TRACE_EVENT_LIST_LEN 255
#define GX_3GPP_USER_LOCATION_INFO_LEN 255
#define GX_SECURITY_PARAMETER_INDEX_LEN 255
#define GX_TRACE_INTERFACE_LIST_LEN 255
#define GX_TRAFFIC_STEERING_POLICY_IDENTIFIER_DL_LEN 255
#define GX_3GPP_GGSN_ADDRESS_LEN 255
#define GX_E_UTRAN_CELL_GLOBAL_IDENTITY_LEN 255
#define GX_CALLED_STATION_ID_LEN 255
#define GX_FRAMED_IP_ADDRESS_LEN 255
#define GX_PACKET_FILTER_IDENTIFIER_LEN 255
#define GX_TDF_APPLICATION_INSTANCE_IDENTIFIER_LEN 255
#define GX_PROXY_HOST_LEN 255
#define GX_PDN_CONNECTION_ID_LEN 255
#define GX_PRESENCE_REPORTING_AREA_ELEMENTS_LIST_LEN 255
#define GX_MONITORING_KEY_LEN 255
#define GX_3GPP_MS_TIMEZONE_LEN 255
#define GX_CHARGING_RULE_NAME_LEN 255
#define GX_ERROR_MESSAGE_LEN 255
#define GX_ROUTING_AREA_IDENTITY_LEN 255
#define GX_TFT_FILTER_LEN 255
#define GX_TRACE_REFERENCE_LEN 255
#define GX_MEASUREMENT_QUANTITY_LEN 255
#define GX_PROXY_STATE_LEN 255
#define GX_AF_CHARGING_IDENTIFIER_LEN 255
#define GX_ROUTING_RULE_IDENTIFIER_LEN 255
#define GX_DESTINATION_REALM_LEN 255
#define GX_SESSION_ID_LEN 255
#define GX_TOS_TRAFFIC_CLASS_LEN 255
#define GX_BSSID_LEN 255
#define GX_PRIMARY_EVENT_CHARGING_FUNCTION_NAME_LEN 255
#define GX_PRIMARY_CHARGING_COLLECTION_FUNCTION_NAME_LEN 255
#define DISABLED 1
#define ENABLED 0
#define NOT_PRESENT 0
#define PRESENT 1
#define DEFAULT_BEARER_ID 5
/*rfc4006 8.3*/
enum cc_request_type_value{
INITIAL_REQUEST = 1,
UPDATE_REQUEST,
TERMINATION_REQUEST,
EVENT_REQUEST,
};
enum network_request_support_value{
NETWORK_REQUEST_NOT_SUPPORTED = 0,
NETWORK_REQUEST_SUPPORTED,
};
enum ip_can_type_value{
TGPP_GPRS = 0,
DOCSIS,
XDSL,
WIMAX,
TGPP2,
TGPP_EPS,
NON_3GPP_EPS,
FBA,
};
enum metering_method_value{
DURATION = 0,
VOLUME,
DURATION_VOLUME,
EVENT,
};
enum mute_notif_value{
MUTE_REQUIRED = 0,
};
enum online_value{
DISABLE_ONLINE = 0,
ENABLE_ONLINE,
};
enum offline_value{
DISABLE_OFFLINE = 0,
ENABLE_OFFLINE,
};
enum packet_filter_usage_value{
SEND_TO_UE = 0,
};
enum packet_filter_operation_value{
DELETION = 0,
ADDITION,
MDIFICAITON,
};
enum pre_emption_capability_value{
PRE_EMPTION_CAPABILITY_ENABLED = 0,
PRE_EMPTION_CAPABILITY_DISABLED,
};
enum pre_emption_vulnerability_value{
PRE_EMPTION_VULNERABILITY_ENABLED = 0,
PRE_EMPTION_VULNERABILITY_DISABLED,
};
enum pcc_rule_status_value{
ACTIVE = 0,
INACTIVE,
TEMPORARILY_INACTIVE,
};
enum ps_to_cs_session_conitnuity_value{
VIDEO_PS2CS_CONT_CANDIDATE = 0,
};
enum qos_class_identifier_value{
QCI_1 = 1,
QCI_2,
QCI_3,
QCI_4,
QCI_5,
QCI_6,
QCI_7,
QCI_8,
QCI_9,
QCI_65 = 65,
QCI_66,
QCI_69 = 69,
QCI_70,
};
enum qos_negotiation_value{
NO_QOS_NEGOTIATION = 0,
QOS_NEGOTIATION_SUPPORTED,
};
enum qos_upgrade_value{
QOS_UPGRADE_NOT_SUPPORTED = 0,
QOS_UPGRADE_SUPPORTED,
};
enum rat_type_value{
GX_WLAN = 0,
GX_VIRTUAL,
GX_UTRAN = 1000,
GX_GERAN,
GX_GAN,
GX_HSPA_EVOLUTION,
GX_EUTRAN,
GX_CDMA2000_1X = 2000,
GX_HRPD,
GX_UMB,
GX_EHRPD,
};
enum redirect_support_value{
REDIRECTION_DISABLED = 0,
REDIRECTION_ENABLED ,
};
enum repoting_level_value{
SERVICE_IDENTIFIER_LEVEL = 0,
RATING_GROUP_LEVEL,
SPONSORED_CONNECTIVITY_LEVEL,
};
enum resource_alloc_notif_value{
ENABLE_NOTIFICATION = 0,
};
enum an_gw_status_value{
AN_GW_FAILED = 0,
};
enum bearer_control_mode_value{
UE_ONLY = 0,
RESERVED,
UE_NW,
};
enum bearer_operation_value{
TERMINATION = 0,
ESTABLISHMENT,
MODIFICATION,
};
enum bearer_usage_value{
GENERAL = 0,
IMS_SIGNALLING,
};
enum charging_correl_ind_value{
CHARGING_IDENTIFIER_REQUIRED = 0,
};
enum csg_info_reporting_value{
CHANGE_CSG_CELL = 0,
CHANGE_CSG_SUBSCRIBED_HYBRID_CELL,
CHANGE_CSG_UNSUBSCRIBED_HYBRID_CELL,
};
enum event_trigger_value{
SGSN_CHANGE = 0,
QOS_CHANGE,
RAT_CHANGE,
TFT_CHANGE,
PLMN_CHANGE,
LOSS_OF_BEARER,
RECOVERY_OF_BEARER,
IP_CAN_CHANGE,
QOS_CHANGE_EXCEEDING_AUTHORIZATION = 11,
RAI_CHANGE,
USER_LOCATION_CHANGE,
NO_EVENT_TRIGGERS,
OUT_OF_CREDIT,
REALLOCATION_OF_CREDIT,
REVALIDATION_TIMEOUT,
UE_IP_ADDRESS_ALLOCATE = 18,
UE_IP_ADDRESS_RELEASE,
DEFAULT_EPS_BEARER_QOS_CHANGE,
AN_GW_CHANGE,
SUCCESSFUL_RESOURCE_ALLOCATION = 22,
RESOURCE_MODIFICATION_REQUEST = 23,
PGW_TRACE_CONTROL,
UE_TIME_ZONE_CHANGE,
TAI_CHANGE,
ECGI_CHANGE,
CHARGING_CORRELATION_EXCHANGE,
APN_AMBR_MODIFICATION_FAILURE = 29,
USER_CSG_INFORMATION_CHANGE,
USAGE_REPORT = 33,
DEFAULT_EPS_BEARER_QOS_MODIFICATION_FAILURE = 34,
USER_CSG_HYBRID_SUBSCRIBED_INFORMATION_CHANGE,
USER_CSG_HYBRID_UNSUBSCRIBED_INFORMATION_CHANGE,
ROUTING_RULE_CHANGE,
APPLICATION_START = 39,
APPLICATION_STOP,
CS_TO_PS_HANDOVER = 42,
UE_LOCAL_IP_ADDRESS_CHANGE,
HENB_LOCAL_IP_ADDRESS_CHANGE,
ACCESS_NETWORK_INFO_REPORT,
CREDIT_MANAGEMENT_SESSION_FAILURE,
DEFAULT_QOS_CHANGE,
CHANGE_OF_UE_PRESENCE_IN_PRESENCE_REPORTING_AREA_REPORT,
RESOURCE_RELEASE = 53,
};
enum flow_direction_value{
GX_UNSPECIFIED = 0,
GX_DOWNLINK,
GX_UPLINK,
GX_BIDIRECTIONAL,
};
enum rule_failure_code{
NO_FAIL = 0,
UNKNOWN_RULE_NAME = 1,
RATING_GROUP_ERROR,
SERVICE_IDENTIFIER_ERROR,
GW_PCEF_MALFUNCTION,
RESOURCES_LIMITATION,
MAX_NR_BEARERS_REACHED,
UNKNOWN_BEARER_ID,
MISSING_BEARER_ID,
MISSING_FLOW_INFORMATION,
RESOURCE_ALLOCATION_FAILURE,
UNSUCCESSFUL_QOS_VALIDATION,
INCORRECT_FLOW_INFORMATION,
PS_TO_CS_HANDOVER,
TDF_APPLICATION_IDENTIFIER_ERROR,
NO_BEARER_BOUND = 15,
FILTER_RESTRICTIONS,
AN_GW_RULE_FAILED,
MISSING_REDIRECT_SERVER_ADDRESS,
CM_END_USER_SERVICE_DENIED,
CM_CREDIT_CONTROL_NOT_APPLICABLE,
CM_AUTHORIZATION_REJECTED,
CM_USER_UNKNOWN,
CM_RATING_FAILED = 23,
};
enum session_release_cause_value{
UNSPECIFIED_REASON = 0,
UE_SUBSCRIPTION_REASON,
INSUFFICIENT_SERVER_RESOURCES,
IP_CAN_SESSION_TERMINATION,
UE_IP_ADDRESS_SESS_RELEASE,
};
enum usage_monitoring_level_value{
SESSION_LEVEL = 0,
PCC_RULE_LEVEL,
ADC_RULE_LEVEL,
};
enum usage_monitoring_report_value{
USAGE_MONITORING_REPORT_REQUIRED = 0,
};
enum usage_monitoring_support_value{
USAGE_MONITORING_DISABLED = 0 ,
};
enum user_equipment_info_type{
IMEISV = 0,
};
enum subscription_id_type{
END_USER_E164 = 0,
END_USER_IMSI,
END_USER_SIP_URI,
END_USER_NAI,
END_USER_PRIVATE,
};
#endif /* __GX_DEF_H__ */
|
nikhilc149/e-utran-features-bug-fixes | dp/ipv6.c | <reponame>nikhilc149/e-utran-features-bug-fixes<filename>dp/ipv6.c
/*
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ipv6.h"
/**
* @brief : Function to construct ipv6 header.
* @param : m, mbuf pointer
* @param : len, len of header
* @param : protocol, next protocol id
* @param : src_ip, Source ip address
* @param : dst_ip, destination ip address
* @return : Returns nothing
*/
void
construct_ipv6_hdr(struct rte_mbuf *m, uint16_t len, uint8_t protocol,
struct in6_addr *src_ip, struct in6_addr *dst_ip)
{
build_ipv6_default_hdr(m);
set_ipv6_hdr(m, len, protocol, src_ip, dst_ip);
}
|
nikhilc149/e-utran-features-bug-fixes | cp_dp_api/tcp_client.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tcp_client.h"
#include "gw_adapter.h"
#ifdef CP_BUILD
extern pfcp_config_t config;
#endif
extern int clSystemLog;
#ifdef DP_BUILD
extern struct app_params app;
#endif
void
insert_fd(int *sock_arr, uint32_t *arr_size, int fd){
if(*arr_size == 0){
sock_arr[*arr_size] = fd;
*arr_size = *arr_size + 1;
return;
}
for(uint32_t i =0 ; i < *arr_size; i++){
if(sock_arr[i] == fd)
return;
}
sock_arr[*arr_size] = fd;
*arr_size = *arr_size + 1;
return;
}
|
nikhilc149/e-utran-features-bug-fixes | ulpc/legacy_df_interface/include/LegacyInterface.h | <filename>ulpc/legacy_df_interface/include/LegacyInterface.h
/*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
#ifndef _LEGACY_INTERFACE_H_
#define _LEGACY_INTERFACE_H_
#include <stdint.h>
#include <iostream>
#include "BaseLegacyInterface.h"
class LegacyClient;
class LegacyInterface : public BaseLegacyInterface
{
public:
/*
* @brief : Constructor of class BaseLegacyInterface
*/
LegacyInterface();
/*
* @brief : Destructor of class BaseLegacyInterface
*/
~LegacyInterface();
/*
* @brief : Function to assign EGetOpt object
* @param : opt, EGetOpt object
* @return : Returns void
*/
void ConfigureLogger(ELogger &log)
{
logger = &log;
logger->debug("LegacyInterface ELogger has been initilized");
}
/*
* @brief : Function to initialise legacy interface
* @param : strCommMode, mode of communication
* @return : Returns int8_t
*/
int8_t InitializeLegacyInterface(const std::string& strCommMode);
/*
* @brief : Function to connect with legacy DF
* @param : strRemoteIp, legacy DF IP
* @param : uiRemotePort, legacy DF port
* @return : Returns int8_t
*/
int8_t ConnectWithLegacyInterface(const std::string& strRemoteIp,
uint16_t uiRemotePort);
/*
* @brief : Function to send information/packet to legacy DF
* @param : pkt, packet to be sent
* @param : packetLen, size of packet
* @return : Returns int8_t
*/
int8_t SendMessageToLegacyInterface(uint8_t *pkt, uint32_t packetLen);
/*
* @brief : Function to disconnect from legacy DF
* @param : No arguments
* @return : Returns int8_t
*/
int8_t DisconnectWithLegacyInterface();
/*
* @brief : Function to de-initialise legacy DF
* @param : No arguments
* @return : Returns int8_t
*/
int8_t DeinitalizeLegacyInterface();
static ELogger &log() { return *logger; }
private:
static ELogger *logger;
LegacyClient *legacyClient;
};
#endif /* _LEGACY_INTERFACE_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp/prdef_rules.c |
/*
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cp.h"
#include "gw_adapter.h"
#include "pfcp_util.h"
#include "pfcp_set_ie.h"
#include "packet_filters.h"
#include "vepc_cp_dp_api.h"
#include "predef_rule_init.h"
#include "pfcp_messages_encoder.h"
#include "pfcp_session.h"
/* Maximum Rule counts */
#define MAX_RULE_CNT 16
const char *TFT_direction_str[] = {
[TFT_DIRECTION_DOWNLINK_ONLY] = "DOWNLINK_ONLY ",
[TFT_DIRECTION_UPLINK_ONLY] = "UPLINK_ONLY ",
[TFT_DIRECTION_BIDIRECTIONAL] = "BIDIRECTIONAL " };
extern int pfcp_fd;
extern int pfcp_fd_v6;
extern pfcp_config_t config;
extern peer_addr_t upf_pfcp_sockaddr;
extern int clSystemLog;
/* Validate the index already not in the list*/
static int8_t
check_exsting_indx_val(uint32_t indx, uint32_t num_cnt, uint32_t *rules_arr)
{
for (uint32_t idx = 0; idx < num_cnt; idx++) {
if (rules_arr[idx] == indx) {
return PRESENT;
}
}
return 0;
}
static struct pkt_filter *
build_sdf_rules(uint16_t index, pkt_fltr *sdf_filter)
{
char local_ip[IPV6_STR_LEN];
char remote_ip[IPV6_STR_LEN];
if(PRESENT == sdf_filter->v4){
snprintf(local_ip, sizeof(local_ip), "%s",
inet_ntoa(sdf_filter->local_ip_addr));
snprintf(remote_ip, sizeof(remote_ip), "%s",
inet_ntoa(sdf_filter->remote_ip_addr));
} else {
inet_ntop(AF_INET6, sdf_filter->local_ip6_addr.s6_addr, local_ip, IPV6_STR_LEN);
inet_ntop(AF_INET6, sdf_filter->remote_ip6_addr.s6_addr, remote_ip, IPV6_STR_LEN);
}
struct pkt_filter pktf = {
.rule_id = index
};
pktf.direction = sdf_filter->direction;
/* CP always send the SDF rule in Downlink Format */
if(sdf_filter->v4){
if ((sdf_filter->direction == TFT_DIRECTION_DOWNLINK_ONLY)
|| (sdf_filter->direction == TFT_DIRECTION_UPLINK_ONLY)
|| (sdf_filter->direction == TFT_DIRECTION_BIDIRECTIONAL)) {
/* Downlink Format */
snprintf(pktf.u.rule_str, MAX_LEN, "%s/%"PRIu8" %s/%"PRIu8
" %"PRIu16" : %"PRIu16" %"PRIu16" : %"PRIu16
" 0x%"PRIx8"/0x%"PRIx8"\n",
local_ip, sdf_filter->local_ip_mask, remote_ip,
sdf_filter->remote_ip_mask,
ntohs(sdf_filter->local_port_low),
ntohs(sdf_filter->local_port_high),
ntohs(sdf_filter->remote_port_low),
ntohs(sdf_filter->remote_port_high),
sdf_filter->proto, sdf_filter->proto_mask);
}else{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT "SDF flow direction not present for ipv4\n",
LOG_VALUE);
return NULL;
}
}else if(sdf_filter->v6){
if ((sdf_filter->direction == TFT_DIRECTION_DOWNLINK_ONLY)
|| (sdf_filter->direction == TFT_DIRECTION_UPLINK_ONLY)
|| (sdf_filter->direction == TFT_DIRECTION_BIDIRECTIONAL)) {
/* Downlink Format */
snprintf(pktf.u.rule_str, MAX_LEN, "%s/%"PRIu8" %s/%"PRIu8
" 0x%"PRIx8"/0x%"PRIx8"\n",
local_ip, sdf_filter->local_ip_mask, remote_ip,
sdf_filter->remote_ip_mask,
sdf_filter->proto, sdf_filter->proto_mask);
}else{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT "SDF flow direction not present for ipv6\n",
LOG_VALUE);
return NULL;
}
} else {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT "SDF flow direction not present\n",
LOG_VALUE);
return NULL;
}
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT "Installing %s pkt_filter #%"PRIu16" : %s",
LOG_VALUE,TFT_direction_str[sdf_filter->direction], index,
pktf.u.rule_str);
struct pkt_filter *pktf_t = NULL;
/* allocate memory for rule entry*/
pktf_t = rte_zmalloc("SDF_rule_Infos", sizeof(struct pkt_filter), RTE_CACHE_LINE_SIZE);
if (pktf_t == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate memory for rule entry.\n",
LOG_VALUE);
return NULL;
}
memcpy(pktf_t, &pktf, sizeof(struct pkt_filter));
return pktf_t;
}
/* Send the Rules on user-plane */
static int8_t
dump_rule_on_up(node_address_t upf_ip, struct msgbuf *msg_payload)
{
int ret = 0;
/* Fill the PFD MGMT Request and send to UP */
pfcp_pfd_mgmt_req_t *pfd_mgmt_req = NULL;
/* Initilized the obj with 0*/
pfd_mgmt_req = malloc(sizeof(pfcp_pfd_mgmt_req_t));
memset(pfd_mgmt_req, 0, sizeof(pfcp_pfd_mgmt_req_t));
/* Fill the rule in pfd content custom ie as rule string */
set_pfd_contents(&pfd_mgmt_req->app_ids_pfds[0].pfd_context[0].pfd_contents[0],
msg_payload);
/*Fill/Set the pfd request header */
fill_pfcp_pfd_mgmt_req(pfd_mgmt_req, 0);
/* Encode the PFD MGMT Request */
uint8_t pfd_msg[PFCP_MSG_LEN] = {0};
uint16_t pfd_msg_len = encode_pfcp_pfd_mgmt_req_t(pfd_mgmt_req, pfd_msg);
/* Set the destination UPF IP Adress */
ret = set_dest_address(upf_ip, &upf_pfcp_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
/* Send the PFD MGMT Request to UPF */
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfd_msg, pfd_msg_len,
upf_pfcp_sockaddr, SENT) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: pfcp_send(): %i\n",
LOG_VALUE, errno);
free(pfd_mgmt_req);
return -1;
}
free(pfd_mgmt_req);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"pfcp_send() sent rule to UP\n",
LOG_VALUE);
return 0;
}
/* Send the predefined rules SDF, MTR, ADC, and PCC on UP.*/
int8_t
dump_predefined_rules_on_up(node_address_t upf_ip)
{
int ret = 0;
uint32_t mtr_rule_cnt = 0;
uint32_t mtr_rule_indx[MAX_RULE_CNT] = {0};
uint32_t adc_rule_cnt = 0;
uint32_t adc_rule_indx[MAX_RULE_CNT] = {0};
uint32_t sdf_rule_cnt = 0;
uint32_t sdf_rule_indx[MAX_RULE_CNT] = {0};
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"Started UP_Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(upf_ip.ipv4_addr));
/* Get PCC rule name entry from centralized location to dump rules on UP*/
rules_struct *rule = NULL;
rule = get_map_rule_entry(config.pfcp_ip.s_addr, GET_RULE);
if (rule != NULL) {
rules_struct *current = NULL;
current = rule;
while (current != NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"PCC Rule retrive from the internal table and map,"
"Rule_Name: %s, Node_Count:%u\n", LOG_VALUE, current->rule_name.rname,
current->rule_cnt);
/* Retrive the PCC rule based on the rule name */
struct pcc_rules *pcc = NULL;
pcc = get_predef_pcc_rule_entry(¤t->rule_name, GET_RULE);
if (pcc == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to PCC Rule from the centralized map table"
" for Rule_Name: %s\n", LOG_VALUE, current->rule_name.rname);
/* Assign Next node address */
rule = current->next;
/* Get the next node */
current = rule;
continue;
}
/* Parse and dump the PCC rule on UP */
struct msgbuf msg_payload = {0};
if (build_rules_up_msg(MSG_PCC_TBL_ADD, (void *)pcc, &msg_payload) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to build PCC Rule struct to dump on UP"
" for Rule_Name: %s\n", LOG_VALUE, pcc->rule_name);
/* Assign Next node address */
rule = current->next;
/* Get the next node */
current = rule;
continue;
}
/* Dump PCC rule on UPF*/
if (dump_rule_on_up(upf_ip, &msg_payload) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to send PCC rule on UP"
" for Rule_Name: %s\n", LOG_VALUE, pcc->rule_name);
/* Assign Next node address */
rule = current->next;
/* Get the next node */
current = rule;
continue;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Sent the PCC rule '%s' on the UP:"IPV4_ADDR"\n",
LOG_VALUE, pcc->rule_name, IPV4_ADDR_HOST_FORMAT(upf_ip.ipv4_addr));
/* Get Attached SDF Rule Index */
if (pcc->sdf_idx_cnt) {
for (uint32_t indx = 0; indx < pcc->sdf_idx_cnt; indx++) {
if(!check_exsting_indx_val(pcc->sdf_idx[indx], sdf_rule_cnt, sdf_rule_indx)) {
sdf_rule_indx[sdf_rule_cnt++] = pcc->sdf_idx[indx];
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Get the unique attached SDF Indx: %u from pcc\n",
LOG_VALUE, pcc->sdf_idx[indx]);
}
}
}
/* Get Attached ADC Rule Index */
if (pcc->adc_idx_cnt) {
for (uint32_t indx = 0; indx < pcc->adc_idx_cnt; indx++) {
if(!check_exsting_indx_val(pcc->adc_idx[indx], adc_rule_cnt, adc_rule_indx)) {
adc_rule_indx[adc_rule_cnt++] = pcc->adc_idx[indx];
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Get the unique attached ADC Indx: %u from pcc\n",
LOG_VALUE, pcc->adc_idx[indx]);
}
}
}
/* Get Attached MTR Rule Index */
if (pcc->qos.mtr_profile_index) {
if(!check_exsting_indx_val(pcc->qos.mtr_profile_index,
mtr_rule_cnt, mtr_rule_indx)) {
mtr_rule_indx[mtr_rule_cnt++] = pcc->qos.mtr_profile_index;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Get the unique attached UL MTR Profile Indx: %u from pcc\n",
LOG_VALUE, pcc->qos.mtr_profile_index);
}
}
/* Assign Next node address */
rule = current->next;
/* Get the next node */
current = rule;
}
/* Retrive the MTR rule based on the Meter Index */
for (uint32_t idx = 0; idx < mtr_rule_cnt; idx++) {
void *mtr_rule = NULL;
ret = get_predef_rule_entry(mtr_rule_indx[idx], MTR_HASH, GET_RULE, &mtr_rule);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to Get MTR Rule from the internal table"
"for Mtr_Indx: %u\n", LOG_VALUE, mtr_rule_indx[idx]);
continue;
}
/* Parse and dump the MTR rule on UP */
struct msgbuf msg_payload = {0};
if (build_rules_up_msg(MSG_MTR_ADD, mtr_rule, &msg_payload) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to build MTR Rule struct to dump on UP"
" for MTR_Index: %u\n", LOG_VALUE, mtr_rule_indx[idx]);
continue;
}
/* Dump MTR rule on UPF*/
if (dump_rule_on_up(upf_ip, &msg_payload) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to send MTR rule on UP"
" for MTR_Indx: %u\n", LOG_VALUE, mtr_rule_indx[idx]);
continue;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Sent the MTR rule Index '%u' on the UP:"IPV4_ADDR"\n",
LOG_VALUE, mtr_rule_indx[idx], IPV4_ADDR_HOST_FORMAT(upf_ip.ipv4_addr));
}
/* Retrive the ADC rule based on the ADC Index */
for (uint32_t idx1 = 0; idx1 < adc_rule_cnt; idx1++) {
void *adc_rule = NULL;
ret = get_predef_rule_entry(adc_rule_indx[idx1], ADC_HASH, GET_RULE, &adc_rule);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to Get ADC Rule from the internal table"
"for ADC_Indx: %u\n", LOG_VALUE, adc_rule_indx[idx1]);
continue;
}
/* Parse and dump the ADC rule on UP */
struct msgbuf msg_payload = {0};
if (build_rules_up_msg(MSG_ADC_TBL_ADD, adc_rule, &msg_payload) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to build ADC Rule struct to dump on UP"
" for ADC_Indx: %u\n", LOG_VALUE, adc_rule_indx[idx1]);
continue;
}
/* Dump ADC rule on UPF*/
if (dump_rule_on_up(upf_ip, &msg_payload) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to send ADC rule on UP"
" for ADC_Indx: %u\n", LOG_VALUE, adc_rule_indx[idx1]);
continue;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Sent the ADC rule Index '%u' on the UP:"IPV4_ADDR"\n",
LOG_VALUE, adc_rule_indx[idx1], IPV4_ADDR_HOST_FORMAT(upf_ip.ipv4_addr));
}
/* Retrive the SDF rule based on the SDF Index */
for (uint32_t idx2 = 0; idx2 < sdf_rule_cnt; idx2++) {
void *sdf_rule_t = NULL;
pkt_fltr *tmp_sdf = NULL;
ret = get_predef_rule_entry(sdf_rule_indx[idx2], SDF_HASH, GET_RULE, &sdf_rule_t);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to Get SDF Rule from the internal table"
"for SDF_Indx: %u\n", LOG_VALUE, sdf_rule_indx[idx2]);
continue;
}
/* Typecast sdf rule */
tmp_sdf = (pkt_fltr *)sdf_rule_t;
if (tmp_sdf == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed not found the sdf rule"
"for SDF_Indx: %u\n", LOG_VALUE, sdf_rule_indx[idx2]);
continue;
}
struct pkt_filter *sdf_rule = NULL;
sdf_rule = build_sdf_rules(sdf_rule_indx[idx2], tmp_sdf);
if (sdf_rule == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to create the sdf rule"
"for SDF_Indx: %u\n", LOG_VALUE, sdf_rule_indx[idx2]);
continue;
}
/* Parse and dump the SDF rule on UP */
struct msgbuf msg_payload = {0};
if (build_rules_up_msg(MSG_SDF_ADD, (void *)sdf_rule, &msg_payload) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to build SDF Rule struct to dump on UP"
" for SDF_Indx: %u\n", LOG_VALUE, sdf_rule_indx[idx2]);
continue;
}
/* Dump SDF rule on UPF*/
if (dump_rule_on_up(upf_ip, &msg_payload) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to send SDF rule on UP"
" for SDF_Indx: %u\n", LOG_VALUE, sdf_rule_indx[idx2]);
continue;
}
rte_free(sdf_rule);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Sent the SDF rule Index '%u' on the UP:"IPV4_ADDR"\n",
LOG_VALUE, sdf_rule_indx[idx2], IPV4_ADDR_HOST_FORMAT(upf_ip.ipv4_addr));
}
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to Get PCC Rule from centralized map table\n",
LOG_VALUE);
return -1;
}
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"END UP_Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(upf_ip.ipv4_addr));
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp_util.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PFCP_UTIL_H
#define PFCP_UTIL_H
#include <sys/sysinfo.h>
#include <stdint.h>
#include <arpa/inet.h>
#include "gw_adapter.h"
#include "interface.h"
#ifdef CP_BUILD
#include "ue.h"
#include "gtp_messages.h"
#include "sm_struct.h"
#else
#define LDB_ENTRIES_DEFAULT (1024 * 512)
#endif /* CP_BUILD */
#ifdef CP_BUILD
#define S11_INTFC_IN 1
#define S11_INTFC_OUT 2
#define S5S8_C_INTFC_IN 3
#define S5S8_C_INTFC_OUT 4
#define SX_INTFC_IN 5
#define SX_INTFC_OUT 6
#endif /* CP_BUILD */
#define COPY_SIG_MSG_ON 2
#define SX_COPY_CP_MSG 1
#define SX_COPY_DP_MSG 2
#define SX_COPY_CP_DP_MSG 3
#define FRWDING_PLCY_SX 0
#define FRWDING_PLCY_WEST_DIRECTION 1
#define FRWDING_PLCY_WEST_CONTENT 2
#define FRWDING_PLCY_EAST_DIRECTION 3
#define FRWDING_PLCY_EAST_CONTENT 4
#define FRWDING_PLCY_FORWARD 5
#define FRWDING_PLCY_ID 6
extern uint32_t start_time;
extern struct rte_hash *heartbeat_recovery_hash;
#define QUERY_RESULT_COUNT 16
#define MAX_ENODEB_LEN 16
#define PFCP_MSG_LEN 4096
/*VS: Define the IPv6 Format Specifier to print IPv6 Address */
#define IPv6_CAST *(struct in6_addr *)
#define IPv6_FMT "%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x"
#define IPv6_PRINT(addr)\
(unsigned)((addr).s6_addr[0]),\
(unsigned)((addr).s6_addr[1]),\
(unsigned)((addr).s6_addr[2]),\
(unsigned)((addr).s6_addr[3]),\
(unsigned)((addr).s6_addr[4]),\
(unsigned)((addr).s6_addr[5]),\
(unsigned)((addr).s6_addr[6]),\
(unsigned)((addr).s6_addr[7]),\
(unsigned)((addr).s6_addr[8]),\
(unsigned)((addr).s6_addr[9]),\
(unsigned)((addr).s6_addr[10]),\
(unsigned)((addr).s6_addr[11]),\
(unsigned)((addr).s6_addr[12]),\
(unsigned)((addr).s6_addr[13]),\
(unsigned)((addr).s6_addr[14]),\
(unsigned)((addr).s6_addr[15])
/*This macro is used to print IPv6 address in string if IPv6 address is stored in uint8_t array*/
#define PRINT_IPV6_ADDR(addr)\
(unsigned)(addr[0]),\
(unsigned)(addr[1]),\
(unsigned)(addr[2]),\
(unsigned)(addr[3]),\
(unsigned)(addr[4]),\
(unsigned)(addr[5]),\
(unsigned)(addr[6]),\
(unsigned)(addr[7]),\
(unsigned)(addr[8]),\
(unsigned)(addr[9]),\
(unsigned)(addr[10]),\
(unsigned)(addr[11]),\
(unsigned)(addr[12]),\
(unsigned)(addr[13]),\
(unsigned)(addr[14]),\
(unsigned)(addr[15])
#ifdef CP_BUILD
#define FAILED_ENB_FILE "logs/failed_enb_queries.log"
typedef enum {
NO_DNS_QUERY,
ENODEB_BASE_QUERY,
APN_BASE_QUERY,
TAC_BASE_QUERY = 4
}dns_domain;
/**
* @brief : send DNS query
* @param : pdn, pdn connection context information
* @return : Returns 0 in case of success , -1 otherwise
*/
int
push_dns_query(pdn_connection *pdn);
/**
* @brief : DNS callback.
* @param : node_sel, node selectore information
* @param : data, contain callback information
* @return : Returns 0 in case of success , -1 otherwise
*/
int dns_callback(void *node_sel, void *data, void *user_data);
#endif /* CP_BUILD */
/**
* @brief : Read data from peer node
* @param : msg_payload, buffer to store received data
* @param : size, max size to read data
* @param : peer_addr, peer node address
* @return : Returns received number of bytes
*/
int
pfcp_recv(void *msg_payload, uint32_t size, peer_addr_t *peer_addr, bool is_ipv6);
/**
* @brief : Send data to peer node
* @param : fd_v4, IPv4 socket or file descriptor to use to send data
* @param : fd_v6, IPv6 socket or file descriptor to use to send data
* @param : msg_payload, buffer to store data to be send
* @param : size, max size to send data
* @param : peer_addr, peer node address
* @return : Returns sent number of bytes
*/
int
pfcp_send(int fd_v4 , int fd_v6, void *msg_payload, uint32_t size,
peer_addr_t peer_addr, Dir dir);
/**
* @brief : Returns system seconds since boot
* @param : No param
* @return : Returns number of system seconds since boot
*/
long
uptime(void);
/**
* @brief : creates associated upf hash
* @param : No param
* @return : Returns nothing
*/
void
create_associated_upf_hash(void );
/**
* @brief : Checks current ntp timestamp
* @param : No param
* @return : Returns timestamp value
*/
uint32_t
current_ntp_timestamp(void);
/**
* @brief : Converts timeval to ntp format
* @param : tv, input timeval
* @param : ntp, converted ntp time
* @return : Returns nothing
*/
void
time_to_ntp(struct timeval *tv, uint8_t *ntp);
/**
* @brief : Converts ntp time to unix/epoch(UTC) format
* @param : ntp, input ntp timeval
* @param : unix_tm, converted unix time
* @return : Returns nothing
*/
void
ntp_to_unix_time(uint32_t *ntp, struct timeval *unix_tm);
/* VS: */
/**
* @brief : Validate the IP Address is in the subnet or not
* @param : addr, IP address for search
* @param : net_init, Starting value of the subnet
* @param : net_end, End value of the subnet
* @return : Returns 1 if addr within the range, 0 not in the range
* */
int
validate_Subnet(uint32_t addr, uint32_t net_init, uint32_t net_end);
/* VS: Validate the IPv6 Address is in the subnet or not */
/**
* @brief : Validate the IPv6 Address is in the network or not
* @param : addr, IP address for search
* @param : local_addr, Compare Network ID
* @param : local_prefix, Network bits
* @return : Returns 1 if addr within the range, 0 not in the range
* */
int
validate_ipv6_network(struct in6_addr addr,
struct in6_addr local_addr, uint8_t local_prefix);
/**
* @brief : Retrieve the IPv6 Network Prefix Address
* @param : local_addr, Compare Network ID
* @param : local_prefix, Network bits
* @return : Returns Prefix
* */
struct in6_addr
retrieve_ipv6_prefix(struct in6_addr addr, uint8_t local_prefix);
/**
* @brief : Retrive UE Database From SEID and If require copy the message to LI server
* @param : sess_id, key for search
* @param : buf_tx, message to copy to LI server
* @param : buf_tx_size, message size
* @return : Returns 0 in case of success , -1 otherwise
*/
#ifdef CP_BUILD
/**
* @brief : Check LI is enabled or not
* @param : li_data, li_data information from context
* @param : intfc_name, interface name
* @return : Returns 1 if yes, 0 otherwise
*/
uint8_t
is_li_enabled(li_data_t *li_data, uint8_t intfc_name, uint8_t cp_type);
/**
* @brief : Check LI is enabled or not using imsi
* @param : uiImsi, IMSI of UE
* @param : intfc_name, interface name
* @return : Returns 1 if yes, 0 otherwise
*/
uint8_t
is_li_enabled_using_imsi(uint64_t uiImsi, uint8_t intfc_name, uint8_t cp_type);
/**
* @brief : Process li message
* @param : sess_id, session id
* @param : intfc_name, interface name
* @param : buf_tx
* @param : buf_tx_size, size of buf_tx
* @param : srcIp, source ip address
* @param : dstIp, destination ip address
* @param : uiSrcPort, source port number
* @param : uiDstPort, destination port number
* @return : Returns 0 on success, -1 otherwise
*/
int
process_cp_li_msg(uint64_t sess_id, uint8_t intfc_name, uint8_t *buf_tx,
int buf_tx_size, struct ip_addr srcIp, struct ip_addr dstIp, uint16_t uiSrcPort,
uint16_t uiDstPort);
/**
* @brief : Process messages for li
* @param : context, ue context details
* @param : intfc_name, interface name
* @param : msg, msg_info structure
* @param : srcIp, source ip address
* @param : dstIp, destination ip address
* @param : uiSrcPort, source port number
* @param : uiDstPort, destination port number
* @return : Returns 0 on success, -1 otherwise
*/
int
process_msg_for_li(ue_context *context, uint8_t intfc_name, msg_info *msg,
struct ip_addr srcIp, struct ip_addr dstIp, uint16_t uiSrcPort, uint16_t uiDstPort);
/**
* @brief : Process li message. Sender must check li is enabled or not
* @param : li_data, configurations for li
* @param : uiLiDataCntr, Number of li entries for single ue
* @param : intfc_name, interface name
* @param : buf_tx
* @param : buf_tx_size, size of buf_tx
* @param : srcIp, source ip address
* @param : dstIp, destination ip address
* @param : uiSrcPort, source port number
* @param : uiDstPort, destination port number
* @param : uiCpMode, control plane mode
* @param : uiImsi, imsi of ue
* @return : Returns 0 on success, -1 otherwise
*/
int
process_cp_li_msg_for_cleanup(li_data_t *li_data, uint8_t li_data_cntr, uint8_t intfc_name,
uint8_t *buf_tx, int buf_tx_size, struct ip_addr srcIp, struct ip_addr dstIp,
uint16_t uiSrcPort, uint16_t uiDstPort, uint8_t uiCpMode, uint64_t uiImsi);
/**
* @brief : Process packet for li.
* @param : context, context of ue
* @param : intfc_name, interface name
* @param : buf_tx, packet
* @param : buf_tx_size, size of buf_tx
* @param : srcIp, source ip address
* @param : dstIp, destination ip address
* @param : uiSrcPort, source port number
* @param : uiDstPort, destination port number
* @param : uiForward, forward to df2 or not
* @return : Returns 0 on success, -1 otherwise
*/
int
process_pkt_for_li(ue_context *context, uint8_t intfc_name, uint8_t *buf_tx,
int buf_tx_size, struct ip_addr srcIp, struct ip_addr dstIp, uint16_t uiSrcPort,
uint16_t uiDstPort);
#endif /* CP_BUILD */
#endif /* PFCP_UTIL_H */
|
nikhilc149/e-utran-features-bug-fixes | cp_dp_api/vepc_cp_dp_api.h | <filename>cp_dp_api/vepc_cp_dp_api.h
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/***************************CP-DP-Structures**************************/
#ifndef _CP_DP_API_H_
#define _CP_DP_API_H_
/**
* @file
* This file contains macros, data structure definitions and function
* prototypes to describe CP DP APIs.
*/
#include <time.h>
#include <rte_ether.h>
#include "pfcp_ies.h"
#include "pfcp_struct.h"
/**
* IPv6 address length
*/
#define IPV6_ADDR_LEN 16
/**
* Maximum CDR services.
*/
#define MAX_SERVICE 1
/**
* Maximum PCC rules per session.
*/
#define MAX_PCC_RULES 12
/**
* Maximum PCC rules per session.
*/
#define MAX_ADC_RULES 16
#define MAX_ADC_IDX_COUNT 16
/**
* Maximum BAR per session.
*/
#define MAX_BAR_PER_SESSION 255
/**
* Maximum number of SDF indices that can be referred in PCC rule.
* Max length of the sdf rules string that will be recieved as part of add
* pcc entry from FPC. String is list of SDF indices.
* TODO: Revisit this count
*/
#define MAX_SDF_IDX_COUNT 16
#define MAX_SDF_STR_LEN 4096
#define MAX_LI_HDR_SIZE 2048
/**
* Maximum buffer/name length
*/
#define MAX_LEN 128
/**
* @brief : Defines number of entries in local database.
*
* Recommended local table size to remain within L2 cache: 64000 entries.
* See README for detailed calculations.
*/
#define LDB_ENTRIES_DEFAULT (1024 * 512)
#define DEFAULT_DN_NUM 512
/**
* Gate closed
*/
#define CLOSE 1
/**
* Gate opened
*/
#define OPEN 0
/**
* Maximum rating groups per bearer session.
*/
#define MAX_RATING_GRP 6
#define NUM_EBI_RESERVED 1
/**
* Get ebi_index from bearer id
* Used MAX limit as 30 beacuse for dedicated bearer creation
* We use temp bearer_id INDEX which can be upto 30
* Normally it's upto 15
*/
#define GET_EBI_INDEX(a) ((a >= 1) && (a <= MAX_BEARERS*2)) ? (a - 1) : -1
/**
* Get pdn from context and ebi_index.
* Used double of MAX_BEARERS limit, for dedicated bearer creation
* dedicated bearers are stored on temporary location initially untill
* bearer id is assinged to it by MME.
*/
#define GET_PDN(x, i) \
( \
((x != NULL) \
&& (i >=0 && i < MAX_BEARERS*2) && (x->eps_bearers[i] != NULL)) ? x->eps_bearers[i]->pdn : NULL \
)
/**
* default bearer session.
*/
#define DEFAULT_BEARER 5
/**
* get dupl flag (apply action) status
*/
#define GET_DUP_STATUS(context) (context->dupl)
/**
* ip type for lawful interception
*/
#define IPTYPE_IPV4_LI 1
#define IPTYPE_IPV6_LI 2
/**
* get UE session id
*/
#define UE_SESS_ID(x) ((x & 0xfffffff) >> 4)
/**
* get bearer id
*/
#define UE_BEAR_ID(x) (x & 0xf)
/**
* set session id from the combination of
* unique UE id and Bearer id
*/
#define SESS_ID(ue_id, br_id) ({ \
time_t epoch = time(NULL); \
( (uint64_t)(epoch) << 32 | (0xfffffff & ( ( (uint64_t) (ue_id) << 4) | (0xf & (br_id)) ))); \
})
/**
* MAX DNS Sponsor ID name lenth
*/
#define MAX_DNS_SPON_ID_LEN 16
/**
* @brief : Select IPv4 or IPv6.
*/
enum iptype {
IPTYPE_IPV4 = 0, /* IPv4. */
IPTYPE_IPV6, /* IPv6. */
};
/**
* @brief : SDF Rule type field.
*/
enum rule_type {
RULE_STRING = 0,
FIVE_TUPLE,
};
/**
* @brief : Packet action field.
*/
enum sess_pkt_action {
ACTION_NONE = 0,
ACTION_DROP,
ACTION_FORWARD,
ACTION_BUFFER,
ACTION_NOTIFY_CP,
ACTION_DUPLICATE,
};
/**
* @brief : IPv4 or IPv6 address configuration structure.
*/
struct ip_addr {
enum iptype iptype; /* IP type: IPv4 or IPv6. */
union {
uint32_t ipv4_addr; /* IPv4 address*/
uint8_t ipv6_addr[IPV6_ADDR_LEN]; /* IPv6 address*/
} u;
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : IPv4 5 tuple rule configuration structure.
*/
struct ipv4_5tuple_rule {
uint32_t ip_src; /* Src IP address*/
uint32_t ip_dst; /* Dst IP address*/
uint32_t src_mask; /* Src Mask*/
uint32_t dst_mask; /* Dst Mask*/
uint16_t sport_s; /* Range start Src Port */
uint16_t sport_e; /* Range end Src Port */
uint16_t dport_s; /* Range start Dst Port */
uint16_t dport_e; /* Range end Dst Port */
uint8_t proto_s; /* Range start Protocol*/
uint8_t proto_e; /* Range end Protocol*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : IPv6 5 tuple rule configuration structure.
*/
struct ipv6_5tuple_rule {
uint8_t ip_src[IPV6_ADDR_LEN]; /* Src IP address*/
uint8_t ip_dst[IPV6_ADDR_LEN]; /* Dst IP address*/
uint32_t src_mask; /* Src Mask*/
uint32_t dst_mask; /* Dst Mask*/
uint16_t sport_s; /* Range start Src Port */
uint16_t sport_e; /* Range end Src Port */
uint16_t dport_s; /* Range start Dst Port */
uint16_t dport_e; /* Range end Dst Port */
uint8_t proto_s; /* Range start Protocol*/
uint8_t proto_e; /* Range end Protocol*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : 5 tuple rule configuration structure.
*/
struct five_tuple_rule {
enum iptype iptype; /* IP type: IPv4 or IPv6. */
union {
struct ipv4_5tuple_rule ipv4;
struct ipv6_5tuple_rule ipv6;
} u;
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Packet filter configuration structure.
*/
struct service_data_list {
uint32_t service[MAX_SERVICE]; /* list of service id*/
/* TODO: add other members*/
} ;
/**
* @brief : SDF Packet filter configuration structure.
*/
struct pkt_filter {
uint8_t direction;
uint32_t rule_id; /* PCC rule id*/
uint32_t precedence;
union {
char rule_str[MAX_LEN]; /* string of rule, please refer
* cp/main.c for example
* TODO: rule should be in struct five_tuple_rule*/
struct five_tuple_rule rule_5tp; /* 5 Tuple rule.
* This field is currently not used*/
} u;
enum rule_type sel_rule_type;
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : DNS selector type.
*/
enum selector_type {
DOMAIN_NAME = 0, /* Domain name. */
DOMAIN_IP_ADDR, /* Domain IP address */
DOMAIN_IP_ADDR_PREFIX, /* Domain IP prefix */
DOMAIN_NONE
};
/**
* @brief : IPv4 or IPv6 address configuration structure.
*/
struct ip_prefix {
struct ip_addr ip_addr; /* IP address*/
uint16_t prefix; /* Prefix*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Redirect configuration structure.
*/
struct redirect_info {
uint32_t info;
};
/*Allocation and Retention Priority*/
struct arp_pdef {
uint8_t priority_level;
uint8_t pre_emption_capability;
uint8_t pre_emption_vulnerability;
};
/**
* @brief : QoS parameters structure for DP
*/
struct qos_info {
uint16_t mtr_profile_index; /* mtr profile index 0 */
uint8_t qci; /*QoS Class Identifier*/
struct arp_pdef arp; /*Allocation and Retention Priority*/
};
/**
* @brief : Application Detection and Control Rule Filter config structure.
*/
struct adc_rules {
enum selector_type sel_type; /* domain name, IP addr
* or IP addr prefix*/
union {
char domain_name[MAX_LEN]; /* Domain name. */
struct ip_addr domain_ip; /* Domain IP address */
struct ip_prefix domain_prefix; /* Domain IP prefix */
} u;
uint32_t rule_id; /* Rule ID*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Metering Methods.
*/
enum mtr_mthds {
SRTCM_COLOR_BLIND = 0, /* Single Rate Three Color Marker - Color blind*/
SRTCM_COLOR_AWARE, /* Single Rate Three Color Marker - Color aware*/
TRTCM_COLOR_BLIND, /* Two Rate Three Color Marker - Color blind*/
TRTCM_COLOR_AWARE, /* Two Rate Three Color Marker - Color aware*/
};
/**
* @brief : Meter profile parameters
*/
struct mtr_params {
/* Committed Information Rate (CIR). Measured in bytes per second.*/
uint64_t cir;
/* Committed Burst Size (CBS). Measured in bytes.*/
uint64_t cbs;
/* Excess Burst Size (EBS). Measured in bytes.*/
uint64_t ebs;
/* TODO: add TRTCM params */
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Meter Profile entry config structure.
*/
struct mtr_entry {
uint16_t mtr_profile_index; /* Meter profile index*/
struct mtr_params mtr_param; /* Meter params*/
/** Uplink Maximum Bit Rate in kilobits (1000bps) - for non-GBR
* Bearers this field to be set to zero*/
uint64_t ul_mbr;
/** Downlink Maximum Bit Rate in kilobits (1000bps) - for non-GBR
* Bearers this field to be set to zero*/
uint64_t dl_mbr;
/** Uplink Guaranteed Bit Rate in kilobits (1000bps) - for non-GBR
* Bearers this field to be set to zero*/
uint64_t ul_gbr;
/** Downlink Guaranteed Bit Rate in kilobits (1000bps) - for non-GBR
* Bearers this field to be set to zero*/
uint64_t dl_gbr;
/** APN Aggregate Max Bitrate (AMBR) for Uplink */
uint64_t ul_ambr;
/** APN Aggregate Max Bitrate (AMBR) for Downlink */
uint64_t dl_ambr;
uint8_t metering_method; /* Metering Methods
* -fwd, srtcm, trtcm*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Direction on which the session is applicable.
*/
enum sess_direction {
SESS_UPLINK = 0,/* rule applicable for Uplink. */
SESS_DOWNLINK, /* rule applicable for Downlink*/
};
/**
* @brief : UpLink S1u interface config structure.
*/
struct ul_s1_info {
uint32_t sgw_teid; /* SGW teid*/
uint32_t s5s8_pgw_teid; /* PGW teid */
struct ip_addr enb_addr; /* eNodeB address*/
struct ip_addr sgw_addr; /* Serving Gateway address*/
struct ip_addr s5s8_pgwu_addr; /* S5S8_PGWU address*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : DownLink S1u interface config structure.
*/
struct dl_s1_info {
uint32_t enb_teid; /* eNodeB teid*/
struct ip_addr enb_addr; /* eNodeB address*/
struct ip_addr sgw_addr; /* Serving Gateway address*/
struct ip_addr s5s8_sgwu_addr; /* S5S8_SGWU address*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Policy and Charging Control structure for DP
*/
struct pcc_rules {
uint8_t rule_status; /* Rule Status*/
uint8_t ul_gate_status;
uint8_t dl_gate_status; /* gate status indicates whether the service data flow,
* detected by the service data flow filter(s),
* may pass or shall be discarded*/
uint8_t session_cont; /* Total Session Count*/
uint8_t report_level; /* Level of report*/
uint8_t online; /* Online : 1*/
uint8_t offline; /* Offline : 0*/
uint8_t flow_status; /* Flow Status Enable = 2*/
uint32_t rule_id; /* Rule ID*/
char rule_name[MAX_LEN]; /* Rule Name*/
uint32_t precedence; /* Precedence*/
uint32_t rating_group; /* Group rating*/
uint32_t service_id; /* identifier for the service or the service component
* the service data flow relates to.*/
uint32_t monitoring_key; /* key to identify monitor control instance that shall
* be used for usage monitoring control of the service
* data flows controlled*/
char sponsor_id[MAX_LEN]; /* to identify the 3rd party organization (the
* sponsor) willing to pay for the operator's charge*/
struct redirect_info redirect_info; /* Redirect info*/
uint64_t drop_pkt_count; /* Drop count*/
struct qos_info qos; /* QoS Parameters*/
uint8_t charging_mode; /* online and offline charging*/
uint8_t metering_method; /* Metering Methods * -fwd, srtcm, trtcm*/
uint8_t mute_notify; /* Mute on/off*/
uint32_t adc_idx_cnt;
uint32_t adc_idx[MAX_ADC_IDX_COUNT]; //GCC_Security flag
uint32_t sdf_idx_cnt;
uint32_t sdf_idx[MAX_SDF_IDX_COUNT];
#ifdef DP_BUILD
uint32_t qer_id; /*store the qer_id*/
#endif
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Maintains cdr details
*/
struct cdr {
uint64_t bytes;
uint64_t pkt_count;
};
/**
* @brief : Volume based Charging
*/
struct chrg_data_vol {
struct cdr ul_cdr; /* Uplink cdr*/
struct cdr dl_cdr; /* Downlink cdr*/
struct cdr ul_drop; /* Uplink dropped cdr*/
struct cdr dl_drop; /* Downlink dropped cdr*/
};
/**
* @brief : Rating group index mapping Data structure.
*/
struct rating_group_index_map {
uint32_t rg_val; /* Rating group*/
uint8_t rg_idx; /* Rating group index*/
};
/**
* @brief : IP-CAN Bearer Charging Data Records
*/
struct ipcan_dp_bearer_cdr {
uint32_t charging_id; /* Bearer Charging id*/
uint32_t pdn_conn_charging_id; /* PDN connection charging id*/
struct tm record_open_time; /* Record time*/
uint64_t duration_time; /* duration (sec)*/
uint8_t record_closure_cause; /* Record closure cause*/
uint64_t record_seq_number; /* Sequence no.*/
uint8_t charging_behavior_index; /* Charging index*/
uint32_t service_id; /* to identify the service
* or the service component
* the bearer relates to*/
char sponsor_id[MAX_DNS_SPON_ID_LEN]; /* to identify the 3rd party organization (the
* sponsor) willing to pay for the operator's charge*/
struct service_data_list service_data_list; /* List of service*/
uint32_t rating_group; /* rating group of this bearer*/
uint64_t vol_threshold; /* volume threshold in MBytes*/
struct chrg_data_vol data_vol; /* charing per UE by volume*/
uint32_t charging_rule_id; /* Charging Rule ID*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Bearer Session information structure
*/
struct session_info {
struct ip_addr ue_addr; /* UE ip address*/
struct ul_s1_info ul_s1_info; /* UpLink S1u info*/
struct dl_s1_info dl_s1_info; /* DownLink S1u info*/
uint8_t bearer_id; /* Bearer ID*/
/* PCC rules related params*/
uint32_t num_ul_pcc_rules; /* No. of UL PCC rule*/
uint32_t ul_pcc_rule_id[MAX_PCC_RULES]; /* PCC rule id supported in UL*/
uint32_t num_dl_pcc_rules; /* No. of PCC rule*/
uint32_t dl_pcc_rule_id[MAX_PCC_RULES]; /* PCC rule id*/
/* ADC rules related params*/
uint32_t num_adc_rules; /* No. of ADC rule*/
uint32_t adc_rule_id[MAX_ADC_RULES]; /* List of ADC rule id*/
/* Charging Data Records*/
struct ipcan_dp_bearer_cdr ipcan_dp_bearer_cdr; /* Charging Data Records*/
uint32_t client_id;
uint64_t sess_id; /* session id of this bearer
* last 4 bits of sess_id
* maps to bearer id*/
uint64_t cp_sess_id;
uint32_t service_id; /* Type of service given
* given to this session like
* Internet, Management, CIPA etc
*/
uint32_t ul_apn_mtr_idx; /* UL APN meter profile index*/
uint32_t dl_apn_mtr_idx; /* DL APN meter profile index*/
enum sess_pkt_action action;
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : DataPlane identifier information structure.
*/
struct dp_id {
uint64_t id; /* table identifier.*/
char name[MAX_LEN]; /* name string of identifier*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Type of CDR record to be flushed.
*/
enum cdr_type {
CDR_TYPE_BEARER,
CDR_TYPE_ADC,
CDR_TYPE_FLOW,
CDR_TYPE_RG,
CDR_TYPE_ALL
};
/**
* @brief : Structure to flush different types of UE CDRs into file.
*/
struct msg_ue_cdr {
uint64_t session_id; /* session id of the bearer, this field
* should have same value as set in sess_id
* in struct session_info during session create.*/
enum cdr_type type; /* type of cdrs to flush. It can be
* either Bearer, ADC, FLOW, Rating group
* or all. Please refer enum cdr_type for values*/
uint8_t action; /* 0 to append and 1 to clear old logs and
* write new logs into cdr log file.*/
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
typedef struct li_header_t {
uint32_t packet_len;
uint8_t type_of_payload;
uint64_t id;
uint64_t imsi;
uint8_t src_ip_type;
uint32_t src_ipv4;
uint8_t src_ipv6[IPV6_ADDRESS_LEN];
uint16_t src_port;
uint8_t dst_ip_type;
uint32_t dst_ipv4;
uint8_t dst_ipv6[IPV6_ADDRESS_LEN];
uint16_t dst_port;
uint8_t operation_mode;
uint32_t seq_no;
uint32_t len;
} li_header_t;
#ifdef DP_BUILD
/**
* @brief : SDF Packet filter configuration structure.
*/
struct sdf_pkt_filter {
uint8_t direction; /* Rule Direction */
uint32_t rule_indx; /* SDF Rule Index*/
uint32_t precedence; /* Precedence */
uint8_t rule_ip_type; /* Rule for which IP tpye(v4 or v6) */
union {
char rule_str[MAX_LEN]; /* string of rule, please refer
* cp/main.c for example
* TODO: rule should be in struct five_tuple_rule*/
struct five_tuple_rule rule_5tp; /* 5 Tuple rule.
* This field is currently not used*/
} u;
enum rule_type sel_rule_type;
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Structure to downlink data notification ack information struct.
*/
struct downlink_data_notification_ack_t {
/* todo! more to implement... see table 7.2.11.2-1
* 'recovery: this ie shall be included if contacting the peer
* for the first time'
*/
/* */
uint64_t dl_buff_cnt;
uint64_t dl_buff_duration;
};
/*
* @brief : Structure to store information for sending End Marker
*/
struct sess_info_endmark {
uint32_t teid;
uint8_t dst_port;
uint8_t src_port;
node_address_t dst_ip;
node_address_t src_ip;
struct ether_addr source_MAC;
struct ether_addr destination_MAC;
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Create and send endmarker packet
* @param : edmk, holds information to fill in packet
* @return : Returns nothing
*/
void
build_endmarker_and_send(struct sess_info_endmark *edmk);
#endif /* DP_BUILD */
#define MAX_NB_DPN 64
#define PRESENT 1
#define NOT_PRESENT 0
/********************* SDF Pkt filter table ****************/
/**
* @brief : Function to create Service Data Flow (SDF) filter
* table. This table is used to detect SDFs that each packet belongs to.
* It allows to configure 5 tuple rules to classify
* incomming traffic.
* @param : dp_id
* table identifier.
* @param : max_elements
* max number of rules that can be configured
* in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
sdf_filter_table_create(struct dp_id dp_id, uint32_t max_elements);
/**
* @brief : Add SDF filter entry. This api allows to configure SDF filter.
* Each filters are 5 tuple based and should be configured with unique pcc_rule_id
* and precedence.
* Please refer test/simu_cp/simu_cp.c for an example.
* @param : dp_id
* table identifier.
* @param : pkt_filter_entry
* sdf packet filter entry structure
* @return : Returns 0 in case of success , -1 otherwise
*/
int
sdf_filter_entry_add(struct dp_id dp_id, struct pkt_filter pkt_filter_entry);
/********************* ADC Rule Table ****************/
/**
* @brief : Function to create Application Detection and
* Control (ADC) table.
* This table allow to configure ADC rules. Each rules
* will have unique ADC id.
* @param : dp_id
* table identifier.
* @param : max_elements
* max number of elements in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int adc_table_create(struct dp_id dp_id, uint32_t max_elements);
/**
* @brief : Destroy ADC table. For deleting this table,
* make sure dp_id match with the one used when table created.
* @param : dp_id
* table identifier.
* @return : Returns 0 in case of success , -1 otherwise
*/
int adc_table_delete(struct dp_id dp_id);
/**
* @brief : Add entry in Application Detection and Control (ADC) table.
* This API allows to add an ADC rule. Each entry should have unique ADC rule_id.
* Please refer "struct adc_rules" for detailed information about the
* variabled that can be configured.
* @param : dp_id
* table identifier.
* @param : entry
* element to be added in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int adc_entry_add(struct dp_id dp_id, struct adc_rules entry);
/**
* @brief : Delete entry in ADC table. For deleting an entry,
* only ADC id is necessary. All other field can be left NULL.
* @param : dp_id
* table identifier.
* @param : entry
* element to be deleted in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int adc_entry_delete(struct dp_id dp_id, struct adc_rules entry);
/********************* PCC Table ****************/
/**
* @brief : Function to create Policy and Charging Control
* (PCC) table. This table allow to configure PCC rules.
* Each rules must have unique PCC id.
* @param : dp_id
* table identifier.
* @param : max_elements
* max number of elements in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int pcc_table_create(struct dp_id dp_id, uint32_t max_elements);
/**
* @brief : Add entry in Policy and Charging Control
* (PCC) table. Each entry should have unique PCC ruleid.
* The purpose of the PCC rule is to identify the service the Service
* Data Flow (SDF) contributes to, provide applicable charging parameters
* for the SDF and provide policy control for the SDF.
* @param : dp_id
* table identifier.
* @param : entry
* element to be added in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
pcc_entry_add(struct dp_id dp_id, struct pcc_rules entry);
/********************* Bearer Session ****************/
/**
* @brief : Function to create Bearer Session table.
* This table allow to configure Bearer Sessions per UEs.
* Please refer "struct session_info" for the
* configurable parameters.
* @param : dp_id
* table identifier.
* @param : max_element
* max number of elements in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int session_table_create(struct dp_id dp_id, uint32_t max_elements);
/**
* @brief : Create UE Session.
* This API allows to create Bearer sessions of UEs.
* Bearer session can be either per UE or per Bearer per UE based.
* In case of per bearer per UE, the last 3 bits of sess_id
* maps to bearer id.
* To update downlink related params please refer session_modify().
* @param : dp_id
* table identifier.
* @param : session
* Session information
* @return : Returns 0 in case of success , -1 otherwise
*/
int
session_create(struct dp_id dp_id, struct session_info session);
/**
* @brief : Modify Bearer Session per user.
* This API allows to modify Bearer sessions of UEs.
* The information regarding uplink and downlink should
* be updated when passing session.
* If there is mismatch in ul_s1_info this API overwrites
* the old rules which were set by session_create().
* @param : dp_id
* table identifier.
* @param : session
* Session information
* @return : Returns 0 in case of success , -1 otherwise
*/
int
session_modify(struct dp_id dp_id, struct session_info session);
#ifdef DP_BUILD
/**
* @brief : Downlink data notification ack information. The information
* regarding downlink should be updated bearer info.
* @param : dp_id
* table identifier.
* @param : ddn_ack
* Downlink data notification ack information
* @return : Returns 0 in case of success , -1 otherwise
*/
int
send_ddn_ack(struct dp_id dp_id,
struct downlink_data_notification_ack_t ddn_ack);
#endif /* DP_BUILD */
/**
* @brief : To Delete Bearer Session of user. For deleting session,
* sess_id must be updated and all other fields can be left NULL.
* @param : dp_id
* table identifier.
* @param : session
* Session information
* @return : Returns 0 in case of success , -1 otherwise
*/
int
session_delete(struct dp_id dp_id, struct session_info session);
/********************* Meter Table ****************/
/**
* @brief : Create Meter profile table.
* This API allows to create a standard meter profile table,
* The entries in this table can be used to configure metering
* across all UEs.
* @param : dp_id
* dp_id - table identifier.
* @param : max_element
* max_element - max number of elements in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
meter_profile_table_create(struct dp_id dp_id, uint32_t max_elements);
/**
* @brief : Add Meter profile entry. Each entry should be configured
* with unique id i.e. mtr_profile_index and with configurable mtr_params.
* This meter profile index can be used for PCC metering and APN metering.
* When creating PCC rule, the mtr_profile_index has
* to be set as per requirement. And when creating Bearer Session
* with APN metering, apn_mtr_idx has to be set as per requirement.
* @param : dp_id
* table identifier.
* @param : mtr_entry
* meter entry
* @return : Returns 0 in case of success , -1 otherwise
*/
int
meter_profile_entry_add(struct dp_id dp_id, struct mtr_entry mtr_entry);
/**
* @brief : Delete ADC filter entry.
* @param : dp_id, identifier which is unique across DataPlanes.
* @param : adc_filter_entry, element to be added in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
dp_adc_entry_delete(struct dp_id dp_id, struct adc_rules *adc_filter_entry);
int
encode_li_header(li_header_t *header, uint8_t *buf);
int8_t
create_li_header(uint8_t *uiPayload, int *uiPayloadLen, uint8_t type,
uint64_t id, uint64_t uiImsi, struct ip_addr srcIp, struct ip_addr dstIp,
uint16_t uiSrcPort, uint16_t uiDstPort, uint8_t uiOprMode);
struct ip_addr
fill_ip_info(uint8_t ip_type, uint32_t ipv4, uint8_t *ipv6);
#endif /* _CP_DP_API_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp_dp_api/teid_upf.h | <reponame>nikhilc149/e-utran-features-bug-fixes<filename>cp_dp_api/teid_upf.h
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TEID_UPF_H
#define TEID_UPF_H
/**
* @file
*
* Contains all data structures and functions to manage and/or
* obtain value for teid assignement.
*
*/
#include <stdint.h>
#include <pfcp_struct.h>
/* File name of TEIDRI and peer node address */
#define TEIDRI_FILENAME "../config/upf_teid_range_data.csv"
/* dataplane rte logs */
#define RTE_LOGTYPE_DP RTE_LOGTYPE_USER1
#define TEID_NAME "TEIDRI"
#define TEID_LEN 10
/**
* @brief : Collection of assinged TEID range and connected CP node address
*/
typedef struct teidri_info_t {
/* IP address of conneted CP */
node_address_t node_addr;
/* TEID range assinged to CP */
uint8_t teid_range;
struct teidri_info_t *next;
}teidri_info;
/**
* @brief : read assinged teid range and CP node address and adds thid data to
* teidri blocked list, initializes free teid range list.
* @param : filename
* filepath to store teid related information
* @param : blocked_list_head
* teidri_info linked list head of blocked teid ranges
* @param : free_list_head
* teidri_info linked list head free teid ranges
* @param : teidri_val
* configured teid range indicator value
* @return : Returns 0 on success -1 otherwise
*/
int
read_teidri_data (char *filename, teidri_info **blocked_list_head, teidri_info **free_list_head, uint8_t teidri_va);
/**
* @brief : search and get node TEIDRI value if available in stored data.
* @param : teid_range, TEIDRI value.
* @param : node_addr, node address of CP .
* @param : head
* teidri_info linked list head
* @return : Returns
* 1 - on success , node address and teidri found.
* 0 - node address not found.
*/
int
get_teidri_from_list(uint8_t *teid_range, node_address_t node_addr, teidri_info **head);
/**
* @brief : Write TEIDRI value and node address into file in csv format.
* @param : teid_range, TEIDRI value.
* @param : node_addr, node address of CP .
* @param : allocated_list_head
* teidri_info allocated linked list head
* @param : free_list_head
* teidri_info free linked list head
* @return : Returns 0 on success , -1 otherwise
*/
int
add_teidri_node_entry(uint8_t teid_range, node_address_t node_addr,
char *filename, teidri_info **allocated_list_head,
teidri_info **free_list_head);
/**
* @brief : delete all containt from file.
* @param : filename, file name,
* @param : allocated_list_head
* teidri_info allocated linked list head
* @param : free_list_head
* teidri_info free linked list head
* @param : free_list_head
* teidri_info free linked list head
* @param : teidri_val
* configured teid range indicator value
* @return : Returns 0 on success , -1 otherwise
*/
int
flush_inactive_teidri_data(char *filename, teidri_info **blocked_list_head, teidri_info **allocated_list_head,
teidri_info **free_list_head, uint8_t teidri_val);
/**
* @brief : Delete TEIDRI value and node address from file.
* @param : filename, file name.
* @param : node_addr, node address of CP .
* @param : head
* pointer to teidri_info list
* @param : free_list_head
* teidri_info free linked list head
* @param : teidri_val
* configured teid range indicator value
* @return : Returns
* 0 - on success.
* -1 - on fail.
*/
int
delete_teidri_node_entry(char *filename, node_address_t node_addr, teidri_info **head, teidri_info **free_list_head,
uint8_t teidri_val);
/**
* @brief : Assign teid range from next available teid ranges
* @param : val , teidri value , must be between 0 to 7
* @param : free_list_head
* linked list head of free teid ranges
* @return : Returns teid range in case of success, -1 otherwise
*/
int8_t
assign_teid_range(uint8_t val, teidri_info **free_list_head);
/**
* @brief : Retrives node from list for given ip
* @param : head
* teidri_info linked list head
* @param : ip
* ip address of CP
* @return : Returns pointer to node in case of success, NULL otherwise
*/
teidri_info *
get_teidri_info(teidri_info **head, node_address_t upf_ip);
/**
* @brief : Adds new node to the list
* @param : head
* teidri_info linked list head
* @param : newNode
* new node to be addded in list
* @return : Returns 0 in case of success, -1 otherwise
*/
int8_t
add_teidri_info(teidri_info **head, teidri_info *newNode);
/**
* @brief : Deletes node from list for given ip
* @param : ip
* ip address of DP
* @param : head
* pointer to teidri_info list
* @return : Returns nothing
*/
void
delete_entry_from_teidri_list_for_ip(node_address_t node_value, teidri_info **head);
/**
* @brief : Deletes node from list for given ip
* @param : head
* pointer to teidri_info list
* @param : teid_range
* teid range for which entry to be deleted
* @return : Returns nothing
*/
void
delete_entry_from_list_for_teid_range(teidri_info **head, uint8_t teid_range);
/**
* @brief : Searches for given teid range value in given list
* @param : head
* teidri_info linked list head
* @param : teidri_range
* teid range value to be searched
* @return : Returns 0 on success -1 otherwise
*/
int
search_list_for_teid_range(teidri_info **head, uint8_t teid_range);
/**
* @brief : Create list of free teid ranges
* @param : blocked_list_head
* teidri_info linked list head of blocked teid ranges
* @param : free_list_head
* teidri_info linked list head free teid ranges
* @param : teidri_val
* configured teid range indicator value
* @param : num_cp
* number of cp's in blocked list
* @return : Returns nothing
*/
void
create_teid_range_free_list(teidri_info **blocked_list_head, teidri_info **free_list_head, uint8_t teidri_val, uint8_t num_cp);
/**
* @brief : Compares IP types of both addresses
* @param : node
* temporary stored structure for ip address
* @param : addr
* ip address of node
* @return : Returns 1 if ip types are matching, otherwise 0
*/
uint8_t compare_ip_address(node_address_t node, node_address_t addr);
#endif /* TEID_UPF_H */
|
nikhilc149/e-utran-features-bug-fixes | cp/redis_client.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "redis_client.h"
extern int clSystemLog;
redisContext *ctx = NULL;
redisSSLContext *ssl = NULL;
redisContext* redis_connect(redis_config_t* cfg)
{
redisContext *ctx = NULL;
redisSSLContextError ssl_error = 0;
if ( redisInitOpenSSL() != REDIS_OK ) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to initialize SSL connection "
"with redis server", LOG_VALUE);
return NULL;
}
if (cfg->type == REDIS_TCP) {
ctx = redisConnectWithTimeout(cfg->conf.tcp.host,
cfg->conf.tcp.port, cfg->conf.tcp.timeout);
} else if (cfg->type == REDIS_TLS) {
redisOptions options = {0};
REDIS_OPTIONS_SET_TCP(&options, cfg->conf.tls.host,
cfg->conf.tls.port);
options.timeout = &cfg->conf.tls.timeout;
options.endpoint.tcp.source_addr = cfg->cp_ip;
ctx = redisConnectWithOptions(&options);
if (ctx == NULL || ctx->err) {
if (ctx) {
clLog(clSystemLog, eCLSeverityCritical,
"Connection error: %s\n", ctx->errstr);
redisFree(ctx);
} else {
clLog(clSystemLog, eCLSeverityCritical,
"Connection error: can't allocate"
"redis context\n");
}
return NULL;
}
ssl = redisCreateSSLContext(cfg->conf.tls.ca_cert_path, NULL,
cfg->conf.tls.cert_path, cfg->conf.tls.key_path, NULL, &ssl_error);
if (!ssl) {
clLog(clSystemLog, eCLSeverityCritical,
"Error: %s\n", redisSSLContextGetError(ssl_error));
redisFree(ctx);
return NULL;
}
if (redisInitiateSSLWithContext(ctx, ssl) != REDIS_OK) {
clLog(clSystemLog, eCLSeverityCritical,
"Couldn't initialize SSL!\n");
clLog(clSystemLog, eCLSeverityCritical,
"Error: %s\n", ctx->errstr);
redisFree(ctx);
redisFreeSSLContext(ssl);
return NULL;
}
} else {
clLog(clSystemLog, eCLSeverityCritical,"Invalid"
"Connection Type.only TCP and"
"TLS is supported");
return NULL;
}
if (ctx == NULL) {
clLog(clSystemLog, eCLSeverityCritical,"Connection"
"Failed\n");
return NULL;
} else if (ctx->err) {
clLog(clSystemLog, eCLSeverityCritical,
"Connection error: %s\n", ctx->errstr);
redisFree(ctx);
redisFreeSSLContext(ssl);
return NULL;
}
redisCommand(ctx, "SADD connected_cp %s", cfg->cp_ip);
return ctx;
}
int redis_save_cdr(redisContext* ctx, char *cp_ip, char* cdr)
{
redisCommand(ctx, "LPUSH %s %s", cp_ip, cdr);
return 0;
}
int redis_disconnect(redisContext* ctx)
{
redisFree(ctx);
redisFreeSSLContext(ssl);
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | ulpc/legacy_admf_interface/include/LegacyAdmfInterfaceTalker.h | <gh_stars>0
/*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __LEGACY_ADMF_INTERFACE_TALKER_H_
#define __LEGACY_ADMF_INTERFACE_TALKER_H_
#include <iostream>
#include <cstdlib>
#include "epctools.h"
#include "etevent.h"
#include "esocket.h"
#include "LegacyAdmfInterfaceThread.h"
class LegacyAdmfInterfaceThread;
class LegacyAdmfInterfaceTalker : public ESocket::TCP::TalkerPrivate
{
public:
LegacyAdmfInterfaceTalker(LegacyAdmfInterfaceThread &thread);
virtual ~LegacyAdmfInterfaceTalker();
Void onConnect();
Void onReceive();
Void onClose();
Void onError();
Void sendAck(uint32_t seqNum);
private:
LegacyAdmfInterfaceTalker();
};
#endif /* endif __LEGACY_ADMF_INTERFACE_TALKER_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp/state_machine/sm_struct.h | <gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SM_STRUCT_H
#define SM_STRUCT_H
#include "stdio.h"
#include "sm_enum.h"
#include "sm_hand.h"
#include "pfcp_set_ie.h"
#include "pfcp_messages.h"
#include "gtp_messages.h"
#define PROC_NAME_LEN 128
#define STATE_NAME_LEN 128
#define EVNT_NAME_LEN 128
#define MAX_TFT_LEN 257
struct rte_hash *sm_hash;
extern char state_name[STATE_NAME_LEN];
extern char event_name[EVNT_NAME_LEN];
extern struct rte_hash *li_info_by_id_hash;
extern struct rte_hash *li_id_by_imsi_hash;
enum source_interface {
GX_IFACE = 1,
S11_IFACE = 2,
S5S8_IFACE = 3,
PFCP_IFACE = 4,
};
enum fteid_interface {
S1_U_eNodeB_GTP_U = 0,
S1_U_SGW_GTP_U = 1,
S12_RNC_GTP_U = 2,
S12_SGW_GTP_U = 3,
S5_S8_SGW_GTP_U = 4,
S5_S8_PGW_GTP_U = 5,
S5_S8_SGW_GTP_C = 6,
S5_S8_PGW_GTP_C = 7,
S5_S8_SGW_PMIPv6 = 8,
S5_S8_PGW_PMIPv6 = 9,
S11_MME_GTP_C = 10,
};
//extern enum source_interface iface;
/**
* @brief : Maintains ue context Bearer identifier and tied
*/
typedef struct ue_context_key {
/* default bearer identifier index */
int32_t ebi_index;
/* UE Context key == teid */
uint32_t teid;
/* UE Context key == sender teid */
uint32_t sender_teid;
/* UE Context key == sequence number */
uint32_t sequence;
/*list of Bearers indentifiers*/
uint32_t bearer_ids[MAX_BEARERS];
/*imsi value */
uint64_t imsi;
} context_key;
/* TODO: Need to optimized generic structure. */
/**
* @brief : Maintains decoded message from different messages
*/
typedef struct msg_info{
uint8_t msg_type;
uint8_t state;
uint8_t event;
uint8_t proc;
uint8_t cp_mode;
uint8_t interface_type;
/* VS: GX Msg retrieve teid of key for UE Context */
uint8_t eps_bearer_id;
uint32_t teid;
char sgwu_fqdn[MAX_HOSTNAME_LENGTH];
// struct in_addr upf_ipv4;
node_address_t upf_ip;
//enum source_interface iface;
/* copy create bearer response for negative scenario (MBResp wrong teid) attach with dedicated flow */
create_bearer_rsp_t cb_rsp;
union gtpc_msg_info {
create_sess_req_t csr;
create_sess_rsp_t cs_rsp;
mod_bearer_req_t mbr;
mod_bearer_rsp_t mb_rsp;
del_sess_req_t dsr;
del_sess_rsp_t ds_rsp;
rel_acc_ber_req_t rel_acc_ber_req;
dnlnk_data_notif_ack_t ddn_ack;
dnlnk_data_notif_fail_indctn_t ddn_fail_ind;
create_bearer_req_t cb_req;
create_bearer_rsp_t cb_rsp;
del_bearer_req_t db_req;
del_bearer_rsp_t db_rsp;
upd_bearer_req_t ub_req;
upd_bearer_rsp_t ub_rsp;
pgw_rstrt_notif_ack_t pgw_rstrt_notif_ack;
upd_pdn_conn_set_req_t upd_pdn_req;
upd_pdn_conn_set_rsp_t upd_pdn_rsp;
del_pdn_conn_set_req_t del_pdn_req;
del_pdn_conn_set_rsp_t del_pdn_rsp;
del_bearer_cmd_t del_ber_cmd;
del_bearer_fail_indctn_t del_fail_ind;
bearer_rsrc_cmd_t bearer_rsrc_cmd;
bearer_rsrc_fail_indctn_t ber_rsrc_fail_ind;
mod_bearer_cmd_t mod_bearer_cmd;
mod_bearer_fail_indctn_t mod_fail_ind;
change_noti_req_t change_not_req;
change_noti_rsp_t change_not_rsp;
create_indir_data_fwdng_tunn_req_t crt_indr_tun_req;
create_indir_data_fwdng_tunn_rsp_t crt_indr_tun_rsp;
del_indir_data_fwdng_tunn_req_t dlt_indr_tun_req;
del_indir_data_fwdng_tunn_resp_t dlt_indr_tun_resp;
mod_acc_bearers_req_t mod_acc_req;
mod_acc_bearers_rsp_t mod_acc_resp;
}gtpc_msg;
union pfcp_msg_info_t {
pfcp_pfd_mgmt_rsp_t pfcp_pfd_resp;
pfcp_assn_setup_rsp_t pfcp_ass_resp;
pfcp_sess_estab_rsp_t pfcp_sess_est_resp;
pfcp_sess_mod_rsp_t pfcp_sess_mod_resp;
pfcp_sess_del_rsp_t pfcp_sess_del_resp;
pfcp_sess_rpt_req_t pfcp_sess_rep_req;
pfcp_sess_set_del_req_t pfcp_sess_set_del_req;
pfcp_sess_set_del_rsp_t pfcp_sess_set_del_rsp;
}pfcp_msg;
union gx_msg_info_t {
GxCCA cca;
GxRAR rar;
}gx_msg;
}msg_info;
/**
* @brief : Structure for Store the create Bearer Response only for attach with dedicated flow.
*/
struct cb_rsp_info {
uint8_t cause_value;
uint8_t bearer_cnt;
uint8_t bearer_cause_value[MAX_BEARERS];
uint8_t ebi_ebi[MAX_BEARERS];
uint32_t seq ;
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Structure for handling CS/MB/DS request synchoronusly.
*/
struct resp_info {
uint8_t proc;
uint8_t state;
uint8_t msg_type;
uint8_t cp_mode;
uint8_t pfcp_seq;
/*attach with dedicated flow*/
uint8_t cbr_seq;
/* Default Bearer Id */
uint8_t linked_eps_bearer_id;
uint8_t eps_bearer_id;
/* Dedicated Bearer Id */
uint8_t bearer_count;
uint8_t eps_bearer_ids[MAX_BEARERS];
/* Store the GX session ID for error scenario */
char gx_sess_id[GX_SESS_ID_LEN];
uint32_t s5s8_sgw_gtpc_teid;
uint32_t teid;
uint8_t *eps_bearer_lvl_tft[MAX_BEARERS];
uint8_t tft_header_len[MAX_BEARERS];
/*Store the create Bearer Response only for attach with dedicated flow*/
struct cb_rsp_info cb_rsp_attach;
union gtpc_msg {
create_sess_req_t csr;
create_sess_rsp_t cs_rsp;
mod_bearer_req_t mbr;
create_bearer_rsp_t cb_rsp;
create_bearer_req_t cb_req;
del_sess_req_t dsr;
rel_acc_ber_req_t rel_acc_ber_req;
del_bearer_cmd_t del_bearer_cmd;
bearer_rsrc_cmd_t bearer_rsrc_cmd;
mod_bearer_cmd_t mod_bearer_cmd;
change_noti_req_t change_not_req;
del_bearer_req_t db_req;
upd_bearer_req_t ub_req;
upd_bearer_rsp_t ub_rsp;
upd_pdn_conn_set_req_t upd_req;
upd_pdn_conn_set_rsp_t upd_rsp;
del_indir_data_fwdng_tunn_req_t dlt_indr_tun_req;
mod_acc_bearers_req_t mod_acc_req;
mod_acc_bearers_rsp_t mod_acc_resp;
}gtpc_msg;
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/* Declaration of state machine 3D array */
typedef int (*const EventHandler[END_PROC+1][END_STATE+1][END_EVNT+1])(void *t1, void *t2);
/**
* @brief : Create a session hash table to maintain the session information.
* @param : No param
* @return : Returns nothing
*/
void
init_sm_hash(void);
/**
* @brief : Add session entry in session table.
* @param : sess_id, session id
* @param : resp, structure to store session info
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
add_sess_entry(uint64_t sess_id, struct resp_info *resp);
/**
* @brief : Retrive session entry from session table.
* @param : sess_id, session id
* @param : resp, structure to store session info
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
get_sess_entry(uint64_t sess_id, struct resp_info **resp);
/**
* @brief : Retrive session state from session table.
* @param : sess_id, session id
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
get_sess_state(uint64_t sess_id);
/**
* @brief : Update session state in session table.
* @param : sess_id, session id
* @param : state, new state to be updated
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
update_sess_state(uint64_t sess_id, uint8_t state);
/**
* @brief : Delete session entry from session table.
* @param : sess_id, session id
* @return : Returns 0 in case of success , Cause value otherwise
*/
uint8_t
del_sess_entry(uint64_t sess_id);
/**
* @brief : Update UE state in UE Context.
* @param : context, structure for context information
* @param : state, new state to be updated
* @param : ebi_index, index of bearer id stored in array
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
update_ue_state(ue_context *context, uint8_t state, int ebi_index);
/**
* @brief : Retrive UE state from UE Context.
* @param : teid_key, key for search
* @param : ebi_index, index of bearer id stored in array
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
get_ue_state(uint32_t teid_key ,int ebi_index);
/**
* Retrive Bearer entry from Bearer table.
*/
int8_t
get_bearer_by_teid(uint32_t teid_key, struct eps_bearer_t **bearer);
/**
* Retrive ue context entry from Bearer table,using sgwc s5s8 teid.
*/
int8_t
get_ue_context_by_sgw_s5s8_teid(uint32_t teid_key, ue_context **context);
/**
* @brief : Retrive UE Context entry from Indirect Tunnel Sender Hash
* @param : teid_key, key to search context
* @param : context, structure to store retrived context
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
get_sender_teid_context(uint32_t teid_key, ue_context **context);
/**
* @brief : Retrive UE Context entry from UE Context table.
* @param : teid_key, key to search context
* @param : context, structure to store retrived context
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
get_ue_context(uint32_t teid_key, ue_context **context);
/**
* @brief : This function use only in clean up while error.
* @param : teid_key, key to search context
* @param : context, structure to store retrived context
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
get_ue_context_while_error(uint32_t teid_key, ue_context **context);
/**
* @brief : Retrive PDN entry from PDN table.
* @param : teid_key, key for search
* @param : pdn, structure to store retrived pdn
* @return : Returns 0 in case of success , -1 otherwise
*/
int
get_pdn(ue_context **context, apn *apn_requested, pdn_connection **pdn);
/**
* @brief : Get proc name from enum
* @param : value , enum value of procedure
* @return : Returns procedure name
*/
const char * get_proc_string(int value);
/**
* @brief : Get state name from enum
* @param : value , enum value of state
* @return : Returns state name
*/
const char * get_state_string(int value);
/**
* @brief : Get event name from enum
* @param : value , enum value of event
* @return : Returns event name
*/
const char * get_event_string(int value);
/**
* @brief : Update UE proc in UE Context.
* @param : context, structure for context information
* @param : proc, procedure
* @param : ebi_index, index of bearer id stored in array
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
update_ue_proc(ue_context *context, uint8_t proc, int ebi_index);
/**
* @brief : Update Procedure according to indication flags
* @param : msg, message data
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
get_procedure(msg_info *msg);
/**
* @brief : Find Pending CSR Procedure according to indication flags
* @param : csr, csr data
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
get_csr_proc(create_sess_req_t *csr);
#endif
|
nikhilc149/e-utran-features-bug-fixes | ulpc/admf/include/AdmfApp.h | /*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __ADMF_APP_H_
#define __ADMF_APP_H_
#include <iostream>
#include <signal.h>
#include <limits>
#include "etevent.h"
#include "epctools.h"
#include "efd.h"
#include "BaseLegacyAdmfInterface.h"
#include "UeEntry.h"
#define LOG_SYSTEM 1
#define LOG_AUDIT 2
#define LOG_ADMF 3
#define LOG_SYSTEM_LEGACY 4
#define OPERATION_DEBUG 1
#define OPERATION_LI 2
#define OPERATION_BOTH 3
#define D_ADMF_IP "DADMF_IP"
#define D_ADMF_PORT "DADMF_PORT"
#define D_ADMF_REQUEST 0
#define ADMF_REQUEST 1
#define ZERO 0
#define ONE 1
#define REQUEST_SOURCE_KEY "requestSource"
#define RET_SUCCESS 0
#define RET_FAILURE -1
#define INVALID_IMSI -1
#define EMPTY_STRING ""
#define TCP_PROT "tcp"
#define UDP_PROT "udp"
#define REST_PROT "rest"
#define ADD_REQUEST 1
#define UPDATE_REQUEST 3
#define START_UE 5
#define STOP_UE 7
#define DELETE_REQUEST 9
#define ADMF_PACKET 10
#define SAFE_DELETE(p) { if (p) { delete(p); (p) = NULL; } }
#define __file__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
class AdmfInterface;
class DAdmfInterface;
class AdmfController;
class LegacyAdmfInterface;
class ConfigParser;
/**
* @brief : Maintains ADMF configurations read from config file
*/
typedef struct configurations {
std::string dadmfIp;
std::string legacyInterfaceIp;
uint16_t dadmfPort;
uint16_t admfPort;
std::string admfIp;
} configurations_t;
typedef struct legacyAdmfIntfcConfig {
std::string admfIp;
std::string legacyAdmfIp;
uint16_t admfPort;
uint16_t legacyAdmfPort;
uint16_t legacyAdmfIntfcPort;
enum protocol {tcp, udp, rest};
protocol interfaceProtocol;
} legacy_admf_intfc_config_t;
#pragma pack(push, 1)
typedef struct admfPacket {
uint32_t packetLength;
struct ueEntry {
uint64_t seqId;
uint64_t imsi;
uint16_t packetType;
uint16_t requestType;
UChar startTime[21];
UChar stopTime[21];
} ue_entry_t;
} admf_packet_t;
#pragma pack(pop)
std::string
ConvertIpForRest(const std::string &strIp);
class AdmfApplication
{
public:
AdmfApplication() : mpAdmfInterface(NULL), mpLegacyAdmfInterface(NULL),
mpAdmfWorker(NULL)
{
}
~AdmfApplication()
{
}
/**
* @brief : Initializes all required objects
* @param : opt, command-line parameter
* @return : Returns nothing
*/
void startup(EGetOpt &opt, BaseLegacyAdmfInterface *legacyAdmfIntfc);
/**
* @brief : Deletes all the initialized objects before exiting the process
* @param : No param
* @return : Returns nothing
*/
void shutdown();
/**
* @brief : Sets shutdown event of EpcTools on handling the signal
* @param : No param
* @return : Returns nothing
*/
void setShutdownEvent() { mShutdown.set(); }
/**
* @brief : Waits until process is killed or shutdown event is set
* @param : No param
* @return : Returns nothing
*/
void waitForShutdown() { mShutdown.wait(); }
/**
* @brief : Getter method to fetch AdmfController reference
* @param : No param
* @return : Returns reference to admfController
*/
AdmfController &getAdmfController() { return *mpAdmfWorker; }
/**
* @brief : Getter method to fetch DadmfInterface reference
* @param : No param
* @return : Returns reference to dadmfInterface
*/
DAdmfInterface &getDadmfInterface() { return *mpDadmfInterface; }
/**
* @brief : Getter method to fetch AdmfInterface reference
* @param : No param
* @return : Returns reference to admfInterface
*/
AdmfInterface &getAdmfInterface() { return *mpAdmfInterface; }
/**
* @brief : Getter method to fetch LegacyAdmfInterface reference
* @param : No param
* @return : Returns reference to legacyAdmfInterface
*/
BaseLegacyAdmfInterface &getLegacyAdmfInterface() { return *mpLegacyAdmfInterface; }
/**
* @brief : Setter method to set LegacyAdmfInterface reference created
by dynamic loading of library.
* @param : ptr, pointer pointing to class object in library
* @return : Returns nothing
*/
void setLegacyAdmfInterface(BaseLegacyAdmfInterface *ptr)
{ mpLegacyAdmfInterface = ptr; }
/**
* @brief : Getter method to fetch Ue entries which has not received ACK
* @param : No param
* @return : Returns reference to map containing Ue entries.
*/
std::map<uint64_t, ack_t> &getMapPendingAck()
{
return mapPendingAck;
}
/**
* @brief : Setter method to add Ue entry which has not received ACK
* @param : ackMap, map containing Ue entries
* @return : Returns nothing
*/
void setMapPendingAck(const std::map<uint64_t, ack_t> ackMap)
{
mapPendingAck = ackMap;
}
private:
AdmfInterface *mpAdmfInterface;
DAdmfInterface *mpDadmfInterface;
BaseLegacyAdmfInterface *mpLegacyAdmfInterface;
EEvent mShutdown;
AdmfController *mpAdmfWorker;
std::map<uint64_t, ack_t> mapPendingAck;
};
#endif /* __ADMF_APP_H_ */
|
nikhilc149/e-utran-features-bug-fixes | dp/ipv6_rs.c | <gh_stars>0
/*
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rte_log.h>
#include <rte_common.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include "gw_adapter.h"
#include "ipv4.h"
#include "ipv6.h"
#include "gtpu.h"
#include "util.h"
extern int clSystemLog;
uint16_t remove_hdr_len = 0;
/**
* @brief : Function to set router solicitation request as router advertisement response
* @param : pkt rte_mbuf pointer
* @return : Returns nothing
*/
static void reset_req_pkt_as_resp(struct rte_mbuf *pkt) {
uint16_t len = 0;
struct udp_hdr *udphdr = NULL;
/* Get the Pkt Len */
len = rte_pktmbuf_data_len(pkt);
/* Swap src and destination mac addresses */
struct ether_hdr *eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
struct ether_addr tmp_mac;
ether_addr_copy(ð->d_addr, &tmp_mac);
ether_addr_copy(ð->s_addr, ð->d_addr);
ether_addr_copy(&tmp_mac, ð->s_addr);
/* Swap src and dst IP addresses */
if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
struct ipv4_hdr *ip_hdr = get_mtoip(pkt);
uint32_t tmp_ip = ip_hdr->dst_addr;
ip_hdr->dst_addr = ip_hdr->src_addr;
ip_hdr->src_addr = tmp_ip;
len = len - ETH_HDR_SIZE;
ip_hdr->total_length = htons(len);
/* Update len for UDP Header */
len = len - IPv4_HDR_SIZE;
udphdr = get_mtoudp(pkt);
} else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
struct ipv6_hdr *ip_hdr = get_mtoip_v6(pkt);
uint8_t tmp_ip[IPV6_ADDR_LEN] = {0};
memcpy(&tmp_ip, &ip_hdr->dst_addr, IPV6_ADDR_LEN);
memcpy(&ip_hdr->dst_addr, &ip_hdr->src_addr, IPV6_ADDR_LEN);
memcpy(&ip_hdr->src_addr, &tmp_ip, IPV6_ADDR_LEN);
len = len - (IPv6_HDR_SIZE + ETH_HDR_SIZE);
ip_hdr->payload_len = htons(len);
udphdr = get_mtoudp_v6(pkt);
}
/* Swap src and dst UDP ports */
uint16_t tmp_port = udphdr->dst_port;
udphdr->dst_port = udphdr->src_port;
udphdr->src_port = tmp_port;
udphdr->dgram_len = htons(len);
}
/**
* @brief : Function to set ipv6 router advertisement IE
* @param : pkt rte_mbuf pointer
* @param : teid tunnel identifier
* @return : Returns 0 in case of success , -1 otherwise
*/
static int set_ipv6_ra(struct rte_mbuf *pkt, uint32_t teid)
{
uint16_t len = 0, total_len = 0;
struct ether_hdr *eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
struct gtpu_hdr *gtpu_hdr = NULL;
struct ipv6_hdr *ipv6_hdr = NULL;
struct icmp6_hdr_ra *router_advert = NULL;
if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
gtpu_hdr = get_mtogtpu(pkt);
total_len += IPV4_HDR_LEN;
} else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
gtpu_hdr = get_mtogtpu_v6(pkt);
total_len += IPv6_HDR_SIZE;
}
total_len += (ETHER_HDR_LEN + UDP_HDR_SIZE + GTPU_HDR_SIZE + IPv6_HDR_SIZE);
/* Update Inner IPv6 header */
ipv6_hdr = (struct ipv6_hdr*)((char*)gtpu_hdr + GTPU_HDR_SIZE);
len = sizeof(struct icmp6_hdr_ra) - htons(ipv6_hdr->payload_len);
remove_hdr_len = htons(ipv6_hdr->payload_len);
router_advert = (struct icmp6_hdr_ra *)rte_pktmbuf_append(pkt, len);
if ((pkt->pkt_len - total_len) < sizeof(struct icmp6_hdr_ra)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"RA:Couldn't "
"append %u bytes to memory buffer, pkt_len:%u, total_len:%u, ipv6_payload_len:%u\n",
LOG_VALUE, len, pkt->pkt_len, total_len, htons(ipv6_hdr->payload_len));
return -1;
}
/* Update the payload entry */
ipv6_hdr->payload_len = htons(sizeof(struct icmp6_hdr_ra));
/* Swap Src Addr to DST Address */
memcpy(&ipv6_hdr->dst_addr, &ipv6_hdr->src_addr, IPV6_ADDR_LEN);
/* Point to the current location of the router advertisement ie */
router_advert = (struct icmp6_hdr_ra*)((char*)gtpu_hdr + GTPU_HDR_SIZE + IPv6_HDR_SIZE);
if (router_advert == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"RA:Couldn't "
"append %u bytes to memory buffer",
LOG_VALUE, len);
return -1;
}
/* Setting the GTPU Header Info */
gtpu_hdr->msgtype = GTP_GPDU;
gtpu_hdr->teid = teid;
gtpu_hdr->msglen = htons(IPv6_HDR_SIZE +
sizeof(struct icmp6_hdr_ra));
memset(router_advert, 0, sizeof(struct icmp6_hdr_ra));
/* Fill the router advertisement message information */
router_advert->icmp.icmp6_type = ICMPv6_ROUTER_ADVERTISEMENT;
router_advert->icmp.icmp6_code = 0;
router_advert->icmp.icmp6_data.icmp6_data8[0] = 64;
router_advert->icmp.icmp6_data.icmp6_data8[1] = 0;
router_advert->icmp.icmp6_data.icmp6_data16[1] = 65535;
router_advert->icmp6_reachable_time = 0;
router_advert->icmp6_retrans_time = 0;
router_advert->opt.type = PREFIX_INFORMATION;
router_advert->opt.flags = 0;
router_advert->opt.valid_lifetime = 0xffffffff;
router_advert->opt.preferred_lifetime = 0xffffffff;
router_advert->opt.reserved = 0;
router_advert->opt.length = 4;
router_advert->opt.prefix_length = 0;
memset(&router_advert->opt.prefix_addr, 0, IPV6_ADDR_LEN);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"RA: Added Router Advert IE in the pkt. \n", LOG_VALUE);
return 0;
}
/**
* @brief : Function to set checksum of IPv4 and UDP header
* @param : pkt rte_mbuf pointer
* @return : Returns nothing
*/
void ra_set_checksum(struct rte_mbuf *pkt)
{
struct udp_hdr *udphdr = NULL;
struct ether_hdr *eth = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
struct ipv4_hdr *ipv4hdr = get_mtoip(pkt);
ipv4hdr->hdr_checksum = 0;
udphdr = get_mtoudp(pkt);
udphdr->dgram_cksum = 0;
udphdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4hdr, udphdr);
ipv4hdr->hdr_checksum = rte_ipv4_cksum(ipv4hdr);
}else if (eth->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
/* Note: IPv6 header not contain the checksum */
struct ipv6_hdr *ipv6hdr = get_mtoip_v6(pkt);
udphdr = get_mtoudp_v6(pkt);
udphdr->dgram_cksum = 0;
udphdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6hdr, udphdr);
}
}
void process_router_solicitation_request(struct rte_mbuf *pkt, uint32_t teid)
{
int ret;
remove_hdr_len = 0;
ret = set_ipv6_ra(pkt, teid);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to create router advert resp msg\n", LOG_VALUE);
return;
}
reset_req_pkt_as_resp(pkt);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"RS: Resp packet created for router advert resp msg\n", LOG_VALUE);
}
|
nikhilc149/e-utran-features-bug-fixes | cp/gx_app/src/main.c | <gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unistd.h>
#include <signal.h>
#include "gx.h"
#include "ipc_api.h"
extern int gx_app_sock;
extern int g_gx_client_sock;
int done = 0;
void signal_handler(int sig)
{
done = 1;
}
int fdinit(const char *fdcfg)
{
/* Initialize the core freeDiameter library */
CHECK_FCT_DO( fd_core_initialize(), return FD_REASON_CORE_INIT_FAIL );
/* Parse the configuration file */
CHECK_FCT_DO( fd_core_parseconf(fdcfg), return FD_REASON_PARSECONF_FAIL );
return FD_REASON_OK;
}
int fdstart()
{
/* Start freeDiameter */
CHECK_FCT_DO( fd_core_start(), return FD_REASON_PARSECONF_FAIL );
return FD_REASON_OK;
}
/**
* @brief : Parse fd configuration
* @param : filename , config file name
* @param : peer_name , peer node name
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
parse_fd_config(const char *filename, char *peer_name)
{
FILE *gx_fd = NULL;
char data[1024] = {0};
char *token = NULL;
char *token1 = NULL;
size_t str_len = 0;
if((gx_fd = fopen(filename, "r")) <= 0) {
fprintf(stderr, "ERROR :[ %s ] unable to read [ %s ] file\n" ,__func__ ,filename);
return -1;
}
fseek(gx_fd, 0L, SEEK_SET);
while((fgets(data, 256, gx_fd)) != NULL) {
if(data[0] == '#') {
continue;
}
if(strstr(data, CONNECTPEER) != NULL) {
token = strchr(data, '"');
if(token != NULL){
token1 = strchr(token+1, '"');
str_len = token1 - token;
memcpy(peer_name, token+1, str_len-1);
}
fclose(gx_fd);
return 0;
}
}
fclose(gx_fd);
return -1;
}
int main(int argc, char **argv)
{
int rval = 0;
const char *fdcfg = "gx.conf";
char peer_name[MAX_PEER_NAME_LEN] = {0};
printf("Registering signal handler...");
if ( signal(SIGINT, signal_handler) == SIG_ERR )
{
printf("Cannot catch SIGINT\n");
return 1;
}
printf("complete\n");
printf("Initializing freeDiameter...");
if ( (rval = fdinit(fdcfg)) != FD_REASON_OK )
{
printf("Failure (%d) in fdinit()\n", rval);
return 1;
}
printf("complete\n");
printf("Calling gxInit()...");
if ( (rval = gxInit()) != FD_REASON_OK )
{
printf("Failure (%d) in gxInit()\n", rval);
return 1;
}
printf("complete\n");
printf("Calling gxRegistger()...");
if ( (rval = gxRegister()) != FD_REASON_OK )
{
printf("Failure (%d) in gxRegister()\n", rval);
return 1;
}
printf("complete\n");
printf("Starting freeDiameter...");
if ( (rval = fdstart()) != FD_REASON_OK )
{
printf("Failure (%d) in fdstart()\n", rval);
return 1;
}
printf("complete\n");
if(parse_fd_config(fdcfg, peer_name) < 0 ) {
fprintf(stderr, "unable to read [ %s ] file \n",fdcfg);
return -1;
}
printf("Waiting to connect to [%s] \n", peer_name);
while(1) {
struct peer_hdr *peer;
sleep(1);
if ( ! fd_peer_getbyid(peer_name, strnlen(peer_name,MAX_PEER_NAME_LEN), 1, &peer ) ){
int state = fd_peer_get_state(peer);
if ( state == STATE_OPEN || state == STATE_OPEN_NEW ) {
break;
}
}
if(done == 1) {
close_ipc_channel(g_gx_client_sock);
fd_core_shutdown();
fd_core_wait_shutdown_complete();
return -1;
}
}
printf("complete\n");
if ( (rval = unixsock()) != FD_REASON_OK )
{
printf("Failure (%d) in unixsock()\n", rval);
return 1;
}
printf("complete\n");
while (!done)
sleep(1);
close_ipc_channel(g_gx_client_sock);
fd_core_shutdown();
fd_core_wait_shutdown_complete();
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | ulpc/d_admf/include/UeTimer.h | /*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __UETIMER_H_
#define __UETIMER_H_
#include "emgmt.h"
#include "etevent.h"
#include "elogger.h"
#include "Common.h"
class EUeTimer
{
public:
EUeTimer(const ue_data_t &ueData, uint8_t action);
/**
* @brief : Getter method to fetch request body
* @param : No param
* @return : Returns strJsonRequest if it is set, empty string otherwise
*/
std::string getStrJsonRequest() { return strJsonRequest; }
/**
* @brief : Setter method to set request body
* @param : request, request body received with request
* @return : Returns nothing
*/
void setStrJsonRequest(const std::string &request)
{ strJsonRequest = request; }
uint64_t getTimeToElapse() { return timeToElapse; }
void setTimeToElapse(const uint64_t time)
{ timeToElapse = time; }
EThreadEventTimer& getTimer() {
return timer;
}
ue_data_t getUeData() {
return ueData;
}
uint64_t getTimerAction() {
return timerAction;
}
private:
ue_data_t ueData;
uint8_t timerAction;
std::string strJsonRequest;
uint64_t timeToElapse;
EThreadEventTimer timer;
};
#endif /* __UETIMER_H_ */
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/csid_up_cleanup.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "up_main.h"
#include "teid_upf.h"
#include "pfcp_util.h"
#include "pfcp_enum.h"
#include "csid_struct.h"
#include "pfcp_set_ie.h"
#include "pfcp_up_sess.h"
#include "gw_adapter.h"
#include "seid_llist.h"
#include "pfcp_messages_encoder.h"
extern bool assoc_available;
extern int clSystemLog;
extern uint16_t dp_comm_port;
/**
* @brief : Cleanup csid using csid entry
* @param : peer_fqcsid
* @param : local_fqcsid
* @param : iface
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
cleanup_csid_by_csid_entry(fqcsid_t *peer_fqcsid, fqcsid_t *local_fqcsid, uint8_t iface)
{
for (uint8_t itr = 0; itr < peer_fqcsid->num_csid; itr++) {
csid_t *local_csids = NULL;
csid_key_t key_t = {0};
key_t.local_csid = peer_fqcsid->local_csid[itr];
key_t.node_addr = peer_fqcsid->node_addr;
local_csids = get_peer_csid_entry(&key_t, iface, REMOVE_NODE);
if (local_csids == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Failed to Get Local CSID entry:"
" Peer Node addr : %u , CSID : %d \n",
LOG_VALUE, key_t.node_addr, key_t.local_csid);
continue;
}
for (uint8_t itr1 = 0; itr1 < local_fqcsid->num_csid; itr1++) {
for (uint8_t itr2 = 0; itr2 < local_csids->num_csid; itr2++) {
if (local_fqcsid->local_csid[itr1] == local_csids->local_csid[itr2]) {
for(uint32_t pos = itr2; pos < (local_csids->num_csid - 1); pos++ ) {
local_csids->local_csid[pos] = local_csids->local_csid[pos + 1];
}
local_csids->num_csid--;
}
}
}
if (!local_csids->num_csid) {
if (del_peer_csid_entry(&key_t, iface)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete "
" CSID entry: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
}
return 0;
}
static int8_t
get_peer_assoc_csids(fqcsid_t *peer_fqcsid, fqcsid_t *local_fqcsid)
{
for(uint8_t itr = 0; itr < peer_fqcsid->num_csid; itr++) {
uint8_t match = 0;
for(uint8_t itr1 = 0; itr1 < local_fqcsid->num_csid; itr1++) {
if (local_fqcsid->local_csid[itr1] == peer_fqcsid->local_csid[itr]){
match = 1;
break;
}
}
if (!match) {
local_fqcsid->local_csid[local_fqcsid->num_csid++] =
peer_fqcsid->local_csid[itr];
}
}
/* Node Addr */
local_fqcsid->node_addr = peer_fqcsid->node_addr;
return 0;
}
/**
* @brief : Cleanup session using csid entry
* @param : csids
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
cleanup_sess_by_csid_entry(fqcsid_t *csids)
{
uint8_t ret = 0;
fqcsid_t mme_fqcsid = {0};
fqcsid_t sgwc_fqcsid = {0};
fqcsid_t pgwc_fqcsid = {0};
fqcsid_t wb_fqcsid = {0};
fqcsid_t eb_fqcsid = {0};
/* Get the session ID by csid */
for (uint16_t itr = 0; itr < csids->num_csid; itr++) {
sess_csid *tmp_t = NULL;
sess_csid *current = NULL;
tmp_t = get_sess_csid_entry(csids->local_csid[itr], REMOVE_NODE);
if (tmp_t == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found, CSID: %u\n", LOG_VALUE, csids->local_csid[itr]);
continue;
}
/* TODO: Temp handling the corner scenarios for temp allocated CSIDs */
/* Check SEID is not ZERO */
if ((tmp_t->up_seid == 0) && (tmp_t->next == 0)) {
continue;
}
current = tmp_t;
while(current != NULL) {
sess_csid *tmp = NULL;
pfcp_session_t *sess = NULL;
/* Get the session information from session table based on UP_SESSION_ID*/
sess = get_sess_info_entry(current->up_seid,
SESS_DEL);
if (sess == NULL) {
tmp = current->next;
current->next = NULL;
/* free node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
continue;
}
/* MME FQ-CSID */
if(sess->mme_fqcsid != NULL) {
if (get_peer_assoc_csids(sess->mme_fqcsid, &mme_fqcsid) < 0) {
/* TODO: ERR Handling */
}
}
/* SGWC FQ-CSID */
if (sess->sgw_fqcsid != NULL) {
if (get_peer_assoc_csids(sess->sgw_fqcsid, &sgwc_fqcsid) < 0) {
/* TODO: ERR Handling */
}
}
/* PGWC FQ-CSID */
if (sess->pgw_fqcsid != NULL) {
if (get_peer_assoc_csids(sess->pgw_fqcsid, &pgwc_fqcsid) < 0) {
/* TODO: ERR Handling */
}
}
/* West Bound/eNB/SGWU FQ-CSID */
if(sess->wb_peer_fqcsid != NULL) {
if (get_peer_assoc_csids(sess->wb_peer_fqcsid, &wb_fqcsid) < 0) {
/* TODO: ERR Handling */
}
}
/* East Bound/PGWU FQ-CSID */
if(sess->eb_peer_fqcsid != NULL) {
if (get_peer_assoc_csids(sess->eb_peer_fqcsid, &eb_fqcsid) < 0) {
/* TODO: ERR Handling */
}
}
/* Cleanup Session dependant information such as PDR, QER and FAR */
if (up_delete_session_entry(sess, NULL))
continue;
/* Cleanup the session */
if (sess != NULL) {
rte_free(sess);
}
sess = NULL;
tmp = current->next;
current->next = NULL;
/* free node */
if(current != NULL) {
rte_free(current);
}
current = NULL;
current = tmp;
}
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&csids->local_csid[itr], current);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to update Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, csids->local_csid[itr],
rte_strerror(abs(ret)));
}
}
/* Cleanup MME FQ-CSID */
if (mme_fqcsid.num_csid != 0) {
if(cleanup_csid_by_csid_entry(&mme_fqcsid, csids, SX_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete "
"MME FQ-CSID entry while cleanup session by CSID entry, "
"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
/* Cleanup SGWC FQ-CSID associte with peer CSID */
if (sgwc_fqcsid.num_csid != 0) {
if(cleanup_csid_by_csid_entry(&sgwc_fqcsid, csids, SX_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete "
"SGW-C FQ-CSID entry while cleanup session by CSID entry, "
"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
/* Cleanup PGWC FQ-CSID associte with peer CSID */
if (pgwc_fqcsid.num_csid != 0) {
if(cleanup_csid_by_csid_entry(&pgwc_fqcsid, csids, SX_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete "
"PGW-C FQ-CSID entry while cleanup session by CSID entry, "
"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
/* Cleanup West_Bound/eNB/SGWU FQ-CSID */
if (wb_fqcsid.num_csid != 0) {
if(cleanup_csid_by_csid_entry(&wb_fqcsid, csids, S1U_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete "
"eNB/SGWU/WB FQ-CSID entry while cleanup session by CSID entry, "
"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
/* Cleanup East_Bound/PGWU FQ-CSID */
if (eb_fqcsid.num_csid != 0) {
if(cleanup_csid_by_csid_entry(&eb_fqcsid, csids, SGI_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete "
"PGW-U/EB FQ-CSID entry while cleanup session by CSID entry, "
"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
return 0;
}
/**
* @brief : Cleanup peer node address entry for fqcsid IE address.
* @param : key, key of the hash.
* @param : iface, .
* @return : In success return 0, otherwise -1.
*/
static int
cleanup_peer_node_addr_entry(peer_node_addr_key_t *key) {
fqcsid_ie_node_addr_t *tmp;
fqcsid_t *peer_csids = NULL;
tmp = get_peer_node_addr_entry(key, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Entry not found for Peer Node Addrres : %u\n",
LOG_VALUE, key->peer_node_addr.ipv4_addr);
return -1;
}
/* Get peer CSID associated with node */
peer_csids = get_peer_addr_csids_entry(&tmp->fqcsid_node_addr, UPDATE_NODE);
if (peer_csids == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are already cleanup, Node_Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(tmp->fqcsid_node_addr.ipv4_addr));
del_peer_node_addr_entry(key);
return 0;
}
/* Get the mapped Session */
for (int8_t itr = 0; itr < peer_csids->num_csid; itr++) {
peer_csid_key_t key_t = {0};
sess_csid *tmp = NULL;
key_t.iface = SX_PORT_ID;
key_t.peer_local_csid = peer_csids->local_csid[itr];
memcpy(&key_t.peer_node_addr,
&peer_csids->node_addr, sizeof(node_address_t));
tmp = get_sess_peer_csid_entry(&key_t, REMOVE_NODE);
if (tmp == NULL) {
del_peer_node_addr_entry(key);
return 0;
}
}
return 0;
}
/**
* @brief : Remove session from linked list entry
* @param : head,
* @param : seid,
* @param : key, peer csid hash key.
* @return : Returns 0 in case of success, -1 otherwise
*/
static int
remove_sess_entry(sess_csid *head, uint64_t seid, peer_csid_key_t *key) {
int ret = 0;
if (head == NULL) {
/* Delete Local CSID entry */
del_sess_peer_csid_entry(key);
}
/* Remove node from csid linked list */
head = remove_sess_csid_data_node(head, seid);
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seid_by_peer_csid_hash,
key, head);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry"
" for CSID = %u \n", LOG_VALUE, key->peer_local_csid);
return -1;
}
if (head == NULL) {
/* Delete Local CSID entry */
del_sess_peer_csid_entry(key);
}
return 0;
}
/**
* @brief : Cleanup session using csid entry
* @param : peer_csids,
* @oaram : iface,
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
cleanup_sess_by_peer_csid_entry(fqcsid_t *peer_csids, uint8_t iface)
{
/* TODO : add cl log for ipv6 */
uint8_t ret = 0;
uint8_t num_csid = 0;
node_address_t cp_ip = {0};
peer_csid_key_t key = {0};
fqcsid_t *csids = NULL;
/* Get the session ID by csid */
for (uint16_t itr = 0; itr < peer_csids->num_csid; itr++) {
sess_csid *tmp_t = NULL;
sess_csid *current = NULL;
key.iface = iface;
key.peer_local_csid = peer_csids->local_csid[itr];
memcpy(&key.peer_node_addr,
&peer_csids->node_addr, sizeof(node_address_t));
tmp_t = get_sess_peer_csid_entry(&key, REMOVE_NODE);
if (tmp_t == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found, CSID: %u\n", LOG_VALUE, peer_csids->local_csid[itr]);
continue;
}
/* Check SEID is not ZERO */
if ((tmp_t->up_seid == 0) && (tmp_t->next == 0)) {
continue;
}
current = tmp_t;
while(current != NULL) {
sess_csid *tmp = NULL;
pfcp_session_t *sess = NULL;
/* Get the session information from session table based on UP_SESSION_ID*/
sess = get_sess_info_entry(current->up_seid,
SESS_DEL);
if (sess == NULL) {
tmp = current->next;
current->next = NULL;
/* free node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
continue;
}
sess_csid *head = NULL;
/* Delete Session for peer CSID hash */
if ((sess->mme_fqcsid != NULL) && (((sess->mme_fqcsid)->num_csid) &&
(memcmp(&peer_csids->node_addr,
&(sess->mme_fqcsid)->node_addr, sizeof(node_address_t))))) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Removing Session from MME CSID Link List \n", LOG_VALUE);
/* Remove the session link from MME CSID */
key.iface = SX_PORT_ID;
key.peer_local_csid = (sess->mme_fqcsid)->local_csid[num_csid];
key.peer_node_addr = (sess->mme_fqcsid)->node_addr;
head = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(head, sess->up_seid, &key);
}
if ((sess->pgw_fqcsid != NULL) && (((sess->pgw_fqcsid)->num_csid) &&
(memcmp(&peer_csids->node_addr,
&(sess->pgw_fqcsid)->node_addr, sizeof(node_address_t))))) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Removing Session from PGWC CSID Link List \n", LOG_VALUE);
/* Remove the session link from UP CSID */
key.iface = SX_PORT_ID;
key.peer_local_csid = (sess->pgw_fqcsid)->local_csid[num_csid];
key.peer_node_addr = (sess->pgw_fqcsid)->node_addr;
head = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(head, sess->up_seid, &key);
}
if ((sess->sgw_fqcsid != NULL) && (((sess->sgw_fqcsid)->num_csid) &&
(memcmp(&peer_csids->node_addr,
&(sess->sgw_fqcsid)->node_addr, sizeof(node_address_t))))) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Removing Session from SGWC CSID Link List \n", LOG_VALUE);
/* Remove the session link from UP CSID */
key.iface = SX_PORT_ID;
key.peer_local_csid = (sess->sgw_fqcsid)->local_csid[num_csid];
key.peer_node_addr = (sess->sgw_fqcsid)->node_addr;
head = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(head, sess->up_seid, &key);
}
csids = sess->up_fqcsid;
/* Get the session ID by csid */
for (uint16_t itr = 0; itr < csids->num_csid; itr++) {
sess_csid *seids = NULL;
seids = get_sess_csid_entry(csids->local_csid[itr], REMOVE_NODE);
if (seids == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get Session "
"ID by CSID entry, Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
seids = remove_sess_csid_data_node(seids, sess->up_seid);
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&csids->local_csid[itr], seids);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, csids->local_csid[itr],
rte_strerror(abs(ret)));
return -1;
}
if (seids == NULL) {
if (sess->sgw_fqcsid != NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"SGWC/SAEGWC Node Addr:"IPV4_ADDR"\n",
LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->sgw_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->sgw_fqcsid, csids,
SX_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Failed to delete SGW CSID "
"entry from hash , Error: %s \n",
LOG_VALUE, strerror(errno));
return -1;
}
}
if (sess->pgw_fqcsid != NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"PGWC Node Addr:"IPV4_ADDR"\n",
LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->pgw_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->pgw_fqcsid, csids,
SX_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
if (sess->up_fqcsid != NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"UP Node Addr:"IPV4_ADDR"\n",
LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->up_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->up_fqcsid, csids,
SGI_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
if (sess->mme_fqcsid != NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"MME Node Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT((sess->mme_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->mme_fqcsid, csids,
SX_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Failed to delete MME CSID "
"entry from hash , Error: %s \n",
LOG_VALUE, strerror(errno));
return -1;
}
}
if (sess->wb_peer_fqcsid != NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"eNB/SGWU/West Bound Node Addr:"IPV4_ADDR"\n",
LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->wb_peer_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->wb_peer_fqcsid, csids,
S1U_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
if (sess->eb_peer_fqcsid != NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"PGWU/East Bound Node Addr:"IPV4_ADDR"\n",
LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->eb_peer_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->eb_peer_fqcsid, csids,
SGI_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error: %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
}
if (del_sess_csid_entry(csids->local_csid[itr])) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Failed to delete session by CSID "
"entry , Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
/* Decrement the csid counters */
csids->num_csid--;
}
}
if (!(is_present(&cp_ip))) {
memcpy(&cp_ip, &sess->cp_node_addr, sizeof(node_address_t));
}
/* Cleanup Session dependant information such as PDR, QER and FAR */
if (up_delete_session_entry(sess, NULL))
continue;
/* Cleanup the session */
if (sess != NULL) {
rte_free(sess);
}
sess = NULL;
tmp = current->next;
current->next = NULL;
/* free node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
}
/* Update CSID Entry in table */
rte_hash_add_key_data(seid_by_peer_csid_hash, &key, current);
}
/* Delete Peer CSID entry */
for (uint8_t itr = 0; itr < peer_csids->num_csid; ++itr) {
key.iface = iface;
key.peer_local_csid = peer_csids->local_csid[itr];
memcpy(&key.peer_node_addr,
&peer_csids->node_addr, sizeof(node_address_t));
if (del_sess_peer_csid_entry(&key) < 0) {
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT"Error on "
"Deleting peer CSID entry\n", LOG_VALUE);
}
}
peer_node_addr_key_t key_t = {0};
key_t.iface = SX_PORT_ID;
memcpy(&key_t.peer_node_addr, &cp_ip, sizeof(node_address_t));
cleanup_peer_node_addr_entry(&key_t);
return 0;
}
/**
* @brief : Send pfcp sess. del. ser req. to peer node's
* @param : peer_csids, Conten list of peer csid.
* @param : iface,
* @return : Returns 0 in case of success,
*/
static int8_t
send_pfcp_sess_del_set_req(fqcsid_t *csids, uint8_t iface)
{
uint8_t match = 0;
uint8_t num_node_addr = 0;
node_address_t node_addrs[MAX_CSID] = {0};
pfcp_sess_set_del_req_t del_set_req_t = {0};
/* Get the session ID by csid */
for (uint16_t itr = 0; itr < csids->num_csid; itr++) {
sess_csid *tmp_t = NULL;
sess_csid *current = NULL;
tmp_t = get_sess_csid_entry(csids->local_csid[itr], REMOVE_NODE);
if (tmp_t == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found, CSID: %u\n",
LOG_VALUE, csids->local_csid[itr]);
continue;
}
/* Check SEID is not ZERO */
if ((tmp_t->up_seid == 0) && (tmp_t->next == 0)) {
continue;
}
current = tmp_t;
while(current != NULL) {
sess_csid *tmp = NULL;
pfcp_session_t *sess = NULL;
/* Get the session information from session table based on UP_SESSION_ID*/
sess = get_sess_info_entry(current->up_seid, SESS_DEL);
if (sess == NULL) {
tmp = current->next;
current->next = NULL;
/* free node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
continue;
}
if(sess->cp_ip.type == IPV6_TYPE){
match = 0;
for (uint8_t itr1 = 0; itr1 < num_node_addr; itr1++) {
if (!memcmp(sess->cp_ip.ipv6.sin6_addr.s6_addr,
node_addrs[itr1].ipv6_addr, IPV6_ADDR_LEN)) {
match = 1;
break;
}
}
if (!match) {
node_addrs[num_node_addr].ip_type = IPV6_TYPE;
memcpy(node_addrs[num_node_addr++].ipv6_addr,
sess->cp_ip.ipv6.sin6_addr.s6_addr, IPV6_ADDR_LEN);
}
} else if(sess->cp_ip.type == IPV4_TYPE) {
match = 0;
for (uint8_t itr1 = 0; itr1 < num_node_addr; itr1++) {
if (sess->cp_ip.ipv4.sin_addr.s_addr == node_addrs[itr1].ipv4_addr) {
match = 1;
break;
}
}
if (!match) {
node_addrs[num_node_addr].ip_type = IPV4_TYPE;
node_addrs[num_node_addr++].ipv4_addr = sess->cp_ip.ipv4.sin_addr.s_addr;
}
}
/* Assign Next node address */
tmp = current->next;
current = tmp;
}
}
fill_pfcp_sess_set_del_req_t(&del_set_req_t, csids, iface);
/* Send the Delete set Request to peer node */
uint8_t pfcp_msg[1024]={0};
int encoded = encode_pfcp_sess_set_del_req_t(&del_set_req_t, pfcp_msg);
pfcp_header_t *header = (pfcp_header_t *) pfcp_msg;
if (encoded) {
for (uint8_t itr2 = 0; itr2 < num_node_addr; itr2++) {
int bytes = 0;
peer_address_t addr = {0};
addr.type = node_addrs[itr2].ip_type;
if (node_addrs[itr2].ip_type == IPV4_TYPE) {
dest_addr_t.ipv4.sin_family = AF_INET;
dest_addr_t.ipv4.sin_port = dp_comm_port;
dest_addr_t.ipv4.sin_addr.s_addr = node_addrs[itr2].ipv4_addr;
addr.ipv4.sin_addr.s_addr = node_addrs[itr2].ipv4_addr;
if(my_sock.sock_fd <= 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"IPv4:PFCP send is "
"not possible due to incompatiable IP Type at "
"Source and Destination\n", LOG_VALUE);
continue;
}
bytes = sendto(my_sock.sock_fd, (uint8_t *) pfcp_msg, encoded, MSG_DONTWAIT,
(struct sockaddr *) &dest_addr_t.ipv4, sizeof(struct sockaddr_in));
} else if (node_addrs[itr2].ip_type == IPV6_TYPE) {
dest_addr_t.ipv6.sin6_family = AF_INET6;
dest_addr_t.ipv6.sin6_port = dp_comm_port;
memcpy(dest_addr_t.ipv6.sin6_addr.s6_addr,
node_addrs[itr2].ipv6_addr, IPV6_ADDR_LEN);
memcpy(addr.ipv6.sin6_addr.s6_addr,
node_addrs[itr2].ipv6_addr, IPV6_ADDR_LEN);
if(my_sock.sock_fd_v6 <= 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"IPv6:PFCP send is "
"not possible due to incompatiable IP Type at "
"Source and Destination\n", LOG_VALUE);
continue;
}
bytes = sendto(my_sock.sock_fd_v6, (uint8_t *) pfcp_msg, encoded, MSG_DONTWAIT,
(struct sockaddr *) &dest_addr_t.ipv6, sizeof(struct sockaddr_in6));
}
if (bytes > 0) {
update_cli_stats(&addr, header->message_type, SENT, SX);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"UP: Send the PFCP Session "
"Set Deletion Request \n", LOG_VALUE);
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Send PFCP "
"Session Set Deletion Request, Error: %s \n",
LOG_VALUE, strerror(errno));
}
}
}
/* Reset the Number CP Address */
num_node_addr = 0;
return 0;
}
/* Cleanup Session information by local csid*/
int8_t
up_del_pfcp_peer_node_sess(node_address_t *node_addr, uint8_t iface)
{
int ret = 0;
fqcsid_t csids = {0};
peer_node_addr_key_t key = {0};
fqcsid_ie_node_addr_t *fqcsid_node_id = NULL;
fqcsid_t *peer_csids = NULL;
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT
"UP Cleanup Internal data Structures For peer node \n", LOG_VALUE);
if (iface == SX_PORT_ID) {
if (app.teidri_val != 0) {
/* TODO: TEID scenario handling for IPv6 */
/* cleanup teidri entry for node*/
ret = delete_teidri_node_entry(TEIDRI_FILENAME, *node_addr, &upf_teidri_allocated_list,
&upf_teidri_free_list, app.teidri_val);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete TEIDRI "
"node entry, Error: %s \n", LOG_VALUE, strerror(errno));
}
} else {
assoc_available = true;
}
}
/* Get peer CSID associated with node */
peer_csids = get_peer_addr_csids_entry(node_addr, UPDATE_NODE);
if (peer_csids == NULL) {
key.iface = iface;
key.peer_node_addr = *node_addr;
fqcsid_node_id = get_peer_node_addr_entry(&key, UPDATE_NODE);
if (fqcsid_node_id == NULL) {
(node_addr->ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are already cleanup, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(node_addr->ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are already cleanup, Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(node_addr->ipv4_addr));
return 0;
}
peer_csids = get_peer_addr_csids_entry(&fqcsid_node_id->fqcsid_node_addr,
UPDATE_NODE);
if (peer_csids == NULL) {
(node_addr->ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are already cleanup, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(node_addr->ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are already cleanup, Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(node_addr->ipv4_addr));
return 0;
}
}
/* Get the mapped local CSID */
for (int8_t itr = 0; itr < peer_csids->num_csid; itr++) {
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = peer_csids->local_csid[itr];
key.node_addr = peer_csids->node_addr;
tmp = get_peer_csid_entry(&key, iface, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get peer "
"CSID entry, Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
for (int8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
csids.node_addr = tmp->node_addr;
(csids.node_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Found Local CSIDs, Node IPv6 Addr:"IPv6_FMT", Num_CSID:%u\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(csids.node_addr.ipv6_addr)), csids.num_csid):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Found Local CSIDs, Node IPv4 Addr:"IPV4_ADDR", Num_CSID:%u\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(csids.node_addr.ipv4_addr), csids.num_csid);
}
if (!csids.num_csid) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"CSIDs are already cleanup\n", LOG_VALUE);
return 0;
}
if (iface != SX_PORT_ID) {
send_pfcp_sess_del_set_req(&csids, iface);
}
/* Cleanup Internal data structures */
ret = cleanup_sess_by_csid_entry(&csids);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to cleanup session "
"CSID entry, Error: %s \n", LOG_VALUE, strerror(errno));
return 0;
}
ret = del_csid_entry_hash(peer_csids, &csids, iface);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete CSID "
"hash, Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
if (key.peer_node_addr.ip_type) {
ret = del_peer_node_addr_entry(&key);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete CSID "
"hash, Error: %s \n", LOG_VALUE, strerror(errno));
}
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" UP Cleanup completed for peer "
"node \n", LOG_VALUE);
return 0;
}
int8_t
process_up_sess_set_del_req(pfcp_sess_set_del_req_t *pfcp_sess_set_del_req)
{
int ret = 0;
fqcsid_t csids = {0};
fqcsid_t peer_csids = {0};
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"PFCP Session Set Deletion Request :: START \n", LOG_VALUE);
/* MME FQ-CSID */
if (pfcp_sess_set_del_req->mme_fqcsid.header.len) {
if (pfcp_sess_set_del_req->mme_fqcsid.number_of_csids) {
for (uint8_t itr = 0; itr < pfcp_sess_set_del_req->mme_fqcsid.number_of_csids; itr++) {
/* Get linked local csid */
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = pfcp_sess_set_del_req->mme_fqcsid.pdn_conn_set_ident[itr];
if (pfcp_sess_set_del_req->mme_fqcsid.fqcsid_node_id_type == IPV4_GLOBAL_UNICAST) {
key.node_addr.ip_type = PDN_TYPE_IPV4;
memcpy(&key.node_addr.ipv4_addr,
&(pfcp_sess_set_del_req->mme_fqcsid.node_address), IPV4_SIZE);
} else {
key.node_addr.ip_type = PDN_TYPE_IPV6;
memcpy(&key.node_addr.ipv6_addr,
&(pfcp_sess_set_del_req->mme_fqcsid.node_address), IPV6_ADDRESS_LEN);
}
tmp = get_peer_csid_entry(&key, SX_PORT_ID, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to get peer "
"MME CSID entry, Error: %s \n", LOG_VALUE, strerror(errno));
continue;
}
/* TODO: Hanlde Multiple CSID with single MME CSID */
for (int8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
peer_csids.local_csid[peer_csids.num_csid++] =
pfcp_sess_set_del_req->mme_fqcsid.pdn_conn_set_ident[itr];
memcpy(&(peer_csids.node_addr), &(key.node_addr), sizeof(node_address_t));
}
}
}
/* SGW FQ-CSID */
if (pfcp_sess_set_del_req->sgw_c_fqcsid.header.len) {
if (pfcp_sess_set_del_req->sgw_c_fqcsid.number_of_csids) {
for (uint8_t itr = 0; itr < pfcp_sess_set_del_req->sgw_c_fqcsid.number_of_csids; itr++) {
/* Get linked local csid */
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = pfcp_sess_set_del_req->sgw_c_fqcsid.pdn_conn_set_ident[itr];
if (pfcp_sess_set_del_req->sgw_c_fqcsid.fqcsid_node_id_type == IPV4_GLOBAL_UNICAST) {
key.node_addr.ip_type = PDN_TYPE_IPV4;
memcpy(&key.node_addr.ipv4_addr,
&(pfcp_sess_set_del_req->sgw_c_fqcsid.node_address), IPV4_SIZE);
} else {
key.node_addr.ip_type = PDN_TYPE_IPV6;
memcpy(&key.node_addr.ipv6_addr,
&(pfcp_sess_set_del_req->sgw_c_fqcsid.node_address), IPV6_ADDRESS_LEN);
}
tmp = get_peer_csid_entry(&key, SX_PORT_ID, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to get peer "
"SGW-C CSID entry, Error: %s \n", LOG_VALUE, strerror(errno));
continue;
}
/* TODO: Hanlde Multiple CSID with single MME CSID */
for (int8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
peer_csids.local_csid[peer_csids.num_csid++] =
pfcp_sess_set_del_req->sgw_c_fqcsid.pdn_conn_set_ident[itr];
memcpy(&(peer_csids.node_addr), &(key.node_addr), sizeof(node_address_t));
}
}
}
/* PGW FQ-CSID */
if (pfcp_sess_set_del_req->pgw_c_fqcsid.header.len) {
if (pfcp_sess_set_del_req->pgw_c_fqcsid.number_of_csids) {
for (uint8_t itr = 0; itr < pfcp_sess_set_del_req->pgw_c_fqcsid.number_of_csids; itr++) {
/* Get linked local csid */
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = pfcp_sess_set_del_req->pgw_c_fqcsid.pdn_conn_set_ident[itr];
if (pfcp_sess_set_del_req->pgw_c_fqcsid.fqcsid_node_id_type == IPV4_GLOBAL_UNICAST) {
key.node_addr.ip_type = PDN_TYPE_IPV4;
memcpy(&key.node_addr.ipv4_addr,
&(pfcp_sess_set_del_req->pgw_c_fqcsid.node_address), IPV4_SIZE);
} else {
key.node_addr.ip_type = PDN_TYPE_IPV6;
memcpy(&key.node_addr.ipv6_addr,
&(pfcp_sess_set_del_req->pgw_c_fqcsid.node_address), IPV6_ADDRESS_LEN);
}
tmp = get_peer_csid_entry(&key, SX_PORT_ID, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to get peer "
"PGW-C CSID entry, Error: %s \n", LOG_VALUE, strerror(errno));
continue;
}
/* TODO: Hanlde Multiple CSID with single MME CSID */
for (int8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
peer_csids.local_csid[peer_csids.num_csid++] =
pfcp_sess_set_del_req->pgw_c_fqcsid.pdn_conn_set_ident[itr];
memcpy(&(peer_csids.node_addr), &(key.node_addr), sizeof(node_address_t));
}
}
}
if (csids.num_csid == 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Not found peer CSIDs \n", LOG_VALUE);
return -1;
}
/* Cleanup Internal data structures */
ret = cleanup_sess_by_peer_csid_entry(&peer_csids, SX_PORT_ID);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to cleanup session "
"CSID entry, Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
ret = del_csid_entry_hash(&peer_csids, &csids, SX_PORT_ID);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete CSID "
"hash while Processing UP Session Set Delete Request, "
"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"PFCP Session Set Deletion Request :: END \n", LOG_VALUE);
return 0;
}
/**
* @brief : Delete session with peer CSID.
* @param : sess, session info.
* @return : Returns 0 in case of success, cause value otherwise.
*/
static int
del_dp_session_csid_entry(pfcp_session_t *sess) {
uint8_t num_csid = 0;
sess_csid *tmp1 = NULL;
peer_csid_key_t key = {0};
if (sess->mme_fqcsid != NULL) {
if ((sess->mme_fqcsid)->num_csid) {
/* Remove the session link from MME CSID */
key.iface = SX_PORT_ID;
key.peer_local_csid = (sess->mme_fqcsid)->local_csid[num_csid];
memcpy(&key.peer_node_addr,
&(sess->mme_fqcsid)->node_addr, sizeof(node_address_t));
tmp1 = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(tmp1, sess->up_seid, &key);
}
}
if (sess->sgw_fqcsid != NULL) {
if ((sess->sgw_fqcsid)->num_csid) {
/* Remove the session link from MME CSID */
key.iface = SX_PORT_ID;
key.peer_local_csid = (sess->sgw_fqcsid)->local_csid[num_csid];
memcpy(&key.peer_node_addr,
&(sess->sgw_fqcsid)->node_addr, sizeof(node_address_t));
tmp1 = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(tmp1, sess->up_seid, &key);
}
}
if (sess->pgw_fqcsid != NULL) {
if ((sess->pgw_fqcsid)->num_csid) {
/* Remove the session link from MME CSID */
key.iface = SX_PORT_ID;
key.peer_local_csid = (sess->pgw_fqcsid)->local_csid[num_csid];
memcpy(&key.peer_node_addr,
&(sess->pgw_fqcsid)->node_addr, sizeof(node_address_t));
tmp1 = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(tmp1, sess->up_seid, &key);
}
}
return 0;
}
int8_t
del_sess_by_csid_entry(pfcp_session_t *sess, fqcsid_t *csids, uint8_t iface)
{
int ret = 0;
uint16_t num_csid = csids->num_csid;
peer_node_addr_key_t key = {0};
/* Get the session ID by csid */
for (uint16_t itr = 0; itr < num_csid; itr++) {
sess_csid *seids = NULL;
seids = get_sess_csid_entry(csids->local_csid[itr], REMOVE_NODE);
if (seids == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get Session "
"ID by CSID entry, Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
seids = remove_sess_csid_data_node(seids, sess->up_seid);
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&csids->local_csid[itr], seids);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, csids->local_csid[itr],
rte_strerror(abs(ret)));
return -1;
}
if (seids == NULL) {
if (sess->sgw_fqcsid != NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"SGWC/SAEGWC Node Addr:"IPV4_ADDR"\n",
LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->sgw_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->sgw_fqcsid, csids,
SX_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Failed to delete SGW CSID "
"entry from hash , Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
if (sess->pgw_fqcsid != NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"PGWC Node Addr:"IPV4_ADDR"\n",
LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->pgw_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->pgw_fqcsid, csids,
SX_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
}
/* TODO: VISHAL */
if (sess->up_fqcsid != NULL) {
((sess->up_fqcsid)->node_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"UP Node IPv6 Addr:"IPv6_FMT"\n", LOG_VALUE,
IPv6_PRINT(IPv6_CAST((sess->up_fqcsid)->node_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"UP Node IPv4 Addr:"IPV4_ADDR"\n", LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->up_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->up_fqcsid, csids,
SGI_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
}
if (sess->mme_fqcsid != NULL) {
((sess->mme_fqcsid)->node_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"MME Node IPv6 Addr:"IPv6_FMT"\n", LOG_VALUE,
IPv6_PRINT(IPv6_CAST((sess->mme_fqcsid)->node_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"MME Node IPv4 Addr:"IPV4_ADDR"\n", LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->mme_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->mme_fqcsid, csids,
SX_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Failed to delete MME CSID "
"entry from hash , Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
if (sess->wb_peer_fqcsid != NULL) {
((sess->wb_peer_fqcsid)->node_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"eNB/SGWU/West Bound Node IPv6 Addr:"IPv6_FMT"\n", LOG_VALUE,
IPv6_PRINT(IPv6_CAST((sess->wb_peer_fqcsid)->node_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"eNB/SGWU/West Bound Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->wb_peer_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->wb_peer_fqcsid, csids,
S1U_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
}
if (sess->eb_peer_fqcsid != NULL) {
((sess->eb_peer_fqcsid)->node_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"PGWU/East Bound Node IPv6 Addr:"IPv6_FMT"\n", LOG_VALUE,
IPv6_PRINT(IPv6_CAST((sess->eb_peer_fqcsid)->node_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"PGWU/East Bound Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE,
IPV4_ADDR_HOST_FORMAT((sess->eb_peer_fqcsid)->node_addr.ipv4_addr));
if (del_csid_entry_hash(sess->eb_peer_fqcsid, csids,
SGI_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
}
if (del_sess_csid_entry(csids->local_csid[itr])) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Failed to delete session by CSID "
"entry , Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
/* Decrement the csid counters */
csids->num_csid--;
key.iface = SX_PORT_ID;
memcpy(&key.peer_node_addr,
&sess->cp_node_addr, sizeof(node_address_t));
if ((cleanup_peer_node_addr_entry(&key)) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Peer node address entry already deleted \n",
LOG_VALUE);
}
}
}
del_dp_session_csid_entry(sess);
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | oss_adapter/libepcadapter/include/cdadmfapi.h | /*
* Copyright (c) 2019 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CDADMFAPI_H
#define __CDADMFAPI_H
#include <curl/curl.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "gw_structs.h"
#define CONTENT_TYPE_JSON "Content-Type: application/json"
#define X_USER_NAME "X-User-Name: YOUR_NAME"
#define USER_AGENT "curl/7.47.0"
#define UE_DB_KEY "uedatabase"
#define LI_ID_KEY "sequenceId"
#define IMSI_KEY "imsi"
#define S11_KEY "s11"
#define SGW_S5S8_C_KEY "sgw-s5s8c"
#define PGW_S5S8_C_KEY "<KEY>"
#define SXA_KEY "sxa"
#define SXB_KEY "sxb"
#define SXA_SXB_KEY "sxasxb"
#define S1U_CONTENT_KEY "s1u_content"
#define SGW_S5S8U_CONTENT_KEY "sgw_s5s8u_content"
#define PGW_S5S8U_CONTENT_KEY "pgw_s5s8u_content"
#define SGI_CONTENT_KEY "sgi_content"
#define S1U_KEY "s1u"
#define SGW_S5S8_U_KEY "sgw-s5s8u"
#define PGW_S5S8_U_KEY "<KEY>"
#define SGI_KEY "sgi"
#define FORWARD_KEY "forward"
#define MILLISECONDS 1000
#ifdef __cplusplus
extern "C" {
#endif
int registerCpOnDadmf(char *dadmf_ip,
uint16_t dadmf_port, char *pfcp_ip,
struct li_df_config_t *li_df_config, uint16_t *uiCntr);
int parseJsonReqFillStruct(const char *request,
char **response, struct li_df_config_t *li_config, uint16_t *uiCntr);
int parseJsonReqForId(const char *request,
char **response, uint64_t *uiIds, uint16_t *uiCntr);
int ConvertAsciiIpToNumeric(const char *ipAddr);
#ifdef __cplusplus
}
#endif
#endif /* #ifndef __CDADMFAPI_H */
|
nikhilc149/e-utran-features-bug-fixes | ulpc/legacy_df/include/TCPListener.h | <reponame>nikhilc149/e-utran-features-bug-fixes<filename>ulpc/legacy_df/include/TCPListener.h
/*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
#ifndef __TCPListener_H_
#define __TCPListener_H_
#include "Common.h"
#include "TCPDataProcessor.h"
class LegacyDfListener;
class TCPDataProcessor;
class TCPListener : public ESocket::ThreadPrivate {
public:
/*
* @brief : Constructor of class TCPListener
*/
TCPListener();
/*
* @brief : Constructor of class TCPListener
*/
~TCPListener();
/*
* @brief : Library function of EPCTool
*/
Void onInit();
/*
* @brief : Library function of EPCTool
*/
Void onQuit();
/*
* @brief : Library function of EPCTool
*/
Void onClose();
/*
* @brief : Functionto indicate socket exception
* @param : err, error type
* @param : psocket, socket
* @return : Returns void
*/
Void errorHandler(EError &err, ESocket::BasePrivate *psocket);
/*
* @brief : Function creates instance of TCPDataProcessor
* @param : No function arguments
* @return : Returns TCPDataProcessor instance
*/
TCPDataProcessor *createDdfTalker();
/* Void sendAck(DdfAckPacket_t &ackPacket); */
/*
* @brief : Function to fetch/create name of file to dump packet
* @param : uiImsi, contains IMSI
* @param : uiID, li identifier
* @return : Returns pcap_dumper_t
*/
pcap_dumper_t * getPcapDumper(const uint64_t uiImsi, const uint64_t uiId);
/*
* @brief : Function to free pcap dumper map
* @param : No function arguments
* @return : Returns void
*/
void freePcapDumper();
/*
* @brief : Function to delete instance of TCPDataProcessor
* on socket close, also tries re-connect to DF
* @param : psocket, socket
* @return : Returns void
*/
Void onSocketClosed(ESocket::BasePrivate *psocket);
/*
* @brief : Function to delete instance of TCPDataProcessor
* on socket close, also tries re-connect to DF
* @param : psocket, socket
* @return : Returns void
*/
Void onSocketError(ESocket::BasePrivate *psocket);
private:
LegacyDfListener *m_ptrListener = NULL;
std::list<TCPDataProcessor *> m_ptrDataProcessor;
std::map<std::string, pcap_dumper_t *> mapPcapDumper;
};
#endif /* __TCPListener_H_ */
|
nikhilc149/e-utran-features-bug-fixes | ulpc/d_admf/include/CpConfigAsCSV.h | /*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CP_CONFIG_AS_CSV_H_
#define __CP_CONFIG_AS_CSV_H_
#include <iostream>
#include <stdint.h>
#include <fstream>
#include "CpConfig.h"
class CpConfigAsCSV : public CpConfig
{
private:
std::string strCSVPath;
public:
CpConfigAsCSV();
CpConfigAsCSV(const std::string &strPath);
/**
* @brief : Reads csv file containing entries of all registered Cp's
and stores it in vector
* @param : No param
* @return : Returns 0 in case of Success, -1 otherwise
*/
int8_t ReadCpConfig(void);
/**
* @brief : Adds/Updates/Deletes Cp configuration entry from csv file
* @param : uiAction, action can be add(1)/update(2)/delete(3)
* @param : strIpAddr, Ip-address of Cp
* @return : Returns 0 in case of Success, -1 otherwise
*/
int8_t UpdateCpConfig(uint8_t uiAction, const std::string &strIpAddr);
~CpConfigAsCSV();
};
#endif /* __CP_CONFIG_AS_CSV_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp/gx_app/src/gx_cca.c | <gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include "gx.h"
#include "cp_app.h"
#include "ipc_api.h"
extern int g_gx_client_sock;
/*
*
* Fun: gx_send_cca
*
* Desc:
*
* Ret:
*
* Notes: None
*
* File: gx_cca.c
*
*/
int gx_send_cca(struct msg *rqst, void *data)
{
int ret = FD_REASON_OK;
struct msg *ans = rqst;
/* construct the message */
FDCHECK_MSG_NEW_ANSWER_FROM_REQ( fd_g_config->cnf_dict, ans, ret, goto err );
FDCHECK_MSG_ADD_ORIGIN( ans, ret, goto err );
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_destination_host, ans, MSG_BRW_LAST_CHILD, fd_g_config->cnf_diamid, fd_g_config->cnf_diamid_len, ret, goto err );
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_destination_realm, ans, MSG_BRW_LAST_CHILD, fd_g_config->cnf_diamrlm, fd_g_config->cnf_diamrlm_len, ret, goto err );
//TODO - FILL IN HERE
#if 0
FD_DUMP_MESSAGE(msg);
#endif
/* send the message */
FDCHECK_MSG_SEND( ans, NULL, NULL, ret, goto err );
goto fini;
err:
/* free the message since an error occurred */
FDCHECK_MSG_FREE(ans);
fini:
return ret;
}
/*
*
* Fun: gx_cca_cb
*
* Desc: CMDNAME call back
*
* Ret: 0
*
* File: gx_cca.c
*
The Credit-Control-Answer (CCA) command, indicated by
the Command-Code field set to 272 and the 'R'
bit cleared in the Command Flags field, is sent to/from MME or SGSN.
*
Credit-Control-Answer ::= <Diameter Header: 272, PXY, 16777238>
< Session-Id >
[ DRMP ]
{ Auth-Application-Id }
{ Origin-Host }
{ Origin-Realm }
[ Result-Code ]
[ Experimental-Result ]
{ CC-Request-Type }
{ CC-Request-Number }
[ OC-Supported-Features ]
[ OC-OLR ]
* [ Supported-Features ]
[ Bearer-Control-Mode ]
* [ Event-Trigger ]
[ Event-Report-Indication ]
[ Origin-State-Id ]
* [ Redirect-Host ]
[ Redirect-Host-Usage ]
[ Redirect-Max-Cache-Time ]
* [ Charging-Rule-Remove ]
* [ Charging-Rule-Install ]
[ Charging-Information ]
[ Online ]
[ Offline ]
* [ QoS-Information ]
[ Revalidation-Time ]
[ Default-EPS-Bearer-QoS ]
[ Default-QoS-Information ]
[ Bearer-Usage ]
* [ Usage-Monitoring-Information ]
* [ CSG-Information-Reporting ]
[ User-CSG-Information ]
[ PRA-Install ]
[ PRA-Remove ]
[ Presence-Reporting-Area-Information ]
[ Session-Release-Cause ]
[ NBIFOM-Support ]
[ NBIFOM-Mode ]
[ Default-Access ]
[ RAN-Rule-Support ]
* [ Routing-Rule-Report ]
* 4 [ Conditional-Policy-Information ]
[ Removal-Of-Access ]
[ IP-CAN-Type ]
[ Error-Message ]
[ Error-Reporting-Host ]
[ Failed-AVP ]
* [ Proxy-Info ]
* [ Route-Record ]
* [ Load ]
* [ AVP ]
*/
int gx_cca_cb
(
struct msg ** msg,
struct avp * pavp,
struct session * sess,
void * data,
enum disp_action * act
)
{
int ret = FD_REASON_OK;
struct msg *ans = *msg;
struct msg *qry = NULL;
uint8_t *send_buf = NULL;
gx_msg gx_resp = {0};
uint32_t buflen ;
#ifdef GX_DEBUG
FD_DUMP_MESSAGE(ans);
#endif
gx_resp.msg_type = GX_CCA_MSG;
/* retrieve the original query associated with the answer */
CHECK_FCT(fd_msg_answ_getq (ans, &qry));
ret = gx_cca_parse(*msg, &(gx_resp.data.cp_cca));
if (ret != FD_REASON_OK)
{
goto err;
}
/* Cal the length of buffer needed */
buflen = gx_cca_calc_length (&gx_resp.data.cp_cca);
gx_resp.msg_len = buflen + GX_HEADER_LEN;
send_buf = malloc(buflen + GX_HEADER_LEN);
if( send_buf == NULL){
printf("SendBuff memory fails\n");
return 1;
}
memset(send_buf, 0, buflen + GX_HEADER_LEN);
/* encoding the cca header value to buffer */
memcpy( send_buf, &gx_resp.msg_type, sizeof(gx_resp.msg_type));
memcpy( send_buf + sizeof(gx_resp.msg_type), &gx_resp.msg_len,
sizeof(gx_resp.msg_len));
if ( gx_cca_pack( &(gx_resp.data.cp_cca),
(unsigned char *)(send_buf + GX_HEADER_LEN),
buflen ) == 0 ) {
printf("CCA Packing failure \n");
free(send_buf);
goto err;
}
send_to_ipc_channel(g_gx_client_sock, send_buf, buflen + GX_HEADER_LEN);
goto fini2;
err:
goto fini2;
fini2:
free(send_buf);
FDCHECK_MSG_FREE(*msg);
*msg = NULL;
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/gx_app/src/gx_ccr.c | <gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include "gx.h"
#include "cp_app.h"
#include "ipc_api.h"
extern int g_gx_client_sock;
int
add_fd_msg(union avp_value *val, struct dict_object * obj,
struct msg **msg_buf);
/*
*
* Fun: gx_send_ccr
*
* Desc:
*
* Ret:
*
* Notes: None
*
* File: gx_ccr.c
*
*/
int gx_send_ccr(void *data)
{
int rval = FD_REASON_OK;
struct msg *msg = NULL;
struct avp *avp_ptr = NULL;;
union avp_value val;
GxCCR gx_ccr = {0};
gx_ccr_unpack((unsigned char *)data, &gx_ccr );
/* construct the Diameter CCR message */
FDCHECK_MSG_NEW_APPL( gxDict.cmdCCR, gxDict.appGX, msg, rval, goto err);
if( gx_ccr.presence.session_id )
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_session_id, msg, MSG_BRW_LAST_CHILD,
gx_ccr.session_id.val, gx_ccr.session_id.len, rval,goto err );
FDCHECK_MSG_ADD_ORIGIN( msg, rval, goto err );
//FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_destination_host, msg, MSG_BRW_LAST_CHILD,
// "dstest3.test3gpp.net", strlen("dstest3.test3gpp.net"), rval, goto err );
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_destination_realm, msg, MSG_BRW_LAST_CHILD,
fd_g_config->cnf_diamrlm, fd_g_config->cnf_diamrlm_len, rval, goto err );
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_auth_application_id, msg, MSG_BRW_LAST_CHILD,
gxDict.appGX, sizeof(gxDict.appGX), rval, goto err );
if( gx_ccr.presence.cc_request_number )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_cc_request_number, msg, MSG_BRW_LAST_CHILD,
gx_ccr.cc_request_number, rval, goto err );
if( gx_ccr.presence.cc_request_type )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_cc_request_type, msg, MSG_BRW_LAST_CHILD,
gx_ccr.cc_request_type, rval, goto err );
if( gx_ccr.presence.credit_management_status )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_credit_management_status, msg, MSG_BRW_LAST_CHILD,
gx_ccr.credit_management_status, rval, goto err );
if( gx_ccr.presence.origin_state_id )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_origin_state_id, msg, MSG_BRW_LAST_CHILD,
fd_g_config->cnf_orstateid, rval, goto err );
if( gx_ccr.presence.network_request_support )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_network_request_support, msg, MSG_BRW_LAST_CHILD,
gx_ccr.network_request_support, rval, goto err );
if( gx_ccr.presence.packet_filter_operation )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_packet_filter_operation, msg, MSG_BRW_LAST_CHILD,
gx_ccr.packet_filter_operation, rval, goto err );
if( gx_ccr.presence.bearer_operation )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_bearer_operation, msg, MSG_BRW_LAST_CHILD,
gx_ccr.bearer_operation, rval, goto err );
if( gx_ccr.presence.dynamic_address_flag )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_dynamic_address_flag, msg, MSG_BRW_LAST_CHILD,
gx_ccr.dynamic_address_flag, rval, goto err );
if( gx_ccr.presence.dynamic_address_flag_extension )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_dynamic_address_flag_extension, msg, MSG_BRW_LAST_CHILD,
gx_ccr.dynamic_address_flag_extension, rval, goto err );
if( gx_ccr.presence.pdn_connection_charging_id )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_pdn_connection_charging_id, msg, MSG_BRW_LAST_CHILD,
gx_ccr.pdn_connection_charging_id, rval, goto err );
if( gx_ccr.presence.ip_can_type )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_ip_can_type, msg, MSG_BRW_LAST_CHILD,
gx_ccr.ip_can_type, rval, goto err );
if( gx_ccr.presence.an_trusted )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_an_trusted, msg, MSG_BRW_LAST_CHILD,
gx_ccr.an_trusted, rval, goto err );
if( gx_ccr.presence.rat_type )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_rat_type, msg, MSG_BRW_LAST_CHILD,
gx_ccr.rat_type, rval, goto err );
if( gx_ccr.presence.termination_cause )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_termination_cause, msg, MSG_BRW_LAST_CHILD,
gx_ccr.termination_cause, rval, goto err );
if( gx_ccr.presence.qos_negotiation )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_qos_negotiation, msg, MSG_BRW_LAST_CHILD,
gx_ccr.qos_negotiation, rval, goto err );
if( gx_ccr.presence.qos_upgrade )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_qos_upgrade, msg, MSG_BRW_LAST_CHILD,
gx_ccr.qos_upgrade, rval, goto err );
if( gx_ccr.presence.an_gw_status )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_an_gw_status, msg, MSG_BRW_LAST_CHILD,
gx_ccr.an_gw_status, rval, goto err );
if( gx_ccr.presence.rai )
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_rai, msg, MSG_BRW_LAST_CHILD, gx_ccr.rai.val,
gx_ccr.rai.len, rval, goto err );
if( gx_ccr.presence.bearer_usage )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_bearer_usage, msg, MSG_BRW_LAST_CHILD,
gx_ccr.bearer_usage, rval, goto err );
if( gx_ccr.presence.online )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_online, msg, MSG_BRW_LAST_CHILD,
gx_ccr.online, rval, goto err );
if( gx_ccr.presence.offline )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_offline, msg, MSG_BRW_LAST_CHILD,
gx_ccr.offline, rval, goto err );
if( gx_ccr.presence.nbifom_support )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_nbifom_support, msg, MSG_BRW_LAST_CHILD,
gx_ccr.nbifom_support, rval, goto err );
if( gx_ccr.presence.nbifom_mode )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_nbifom_mode, msg, MSG_BRW_LAST_CHILD,
gx_ccr.nbifom_mode, rval, goto err );
if( gx_ccr.presence.default_access )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_default_access, msg, MSG_BRW_LAST_CHILD,
gx_ccr.default_access, rval, goto err );
if( gx_ccr.presence.origination_time_stamp )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_origination_time_stamp, msg, MSG_BRW_LAST_CHILD,
gx_ccr.origination_time_stamp, rval, goto err );
if( gx_ccr.presence.maximum_wait_time )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_maximum_wait_time, msg, MSG_BRW_LAST_CHILD,
gx_ccr.maximum_wait_time, rval, goto err );
if( gx_ccr.presence.access_availability_change_reason )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_access_availability_change_reason, msg, MSG_BRW_LAST_CHILD,
gx_ccr.access_availability_change_reason, rval, goto err );
if( gx_ccr.presence.user_location_info_time )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_user_location_info_time, msg, MSG_BRW_LAST_CHILD,
gx_ccr.user_location_info_time, rval, goto err );
if( gx_ccr.presence.udp_source_port )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_udp_source_port, msg, MSG_BRW_LAST_CHILD,
gx_ccr.udp_source_port, rval, goto err );
if( gx_ccr.presence.tcp_source_port )
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_tcp_source_port, msg, MSG_BRW_LAST_CHILD,
gx_ccr.tcp_source_port, rval, goto err );
if( gx_ccr.presence.access_network_charging_address)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_access_network_charging_address, msg, MSG_BRW_LAST_CHILD,
gx_ccr.access_network_charging_address.address,
strnlen((char *)gx_ccr.access_network_charging_address.address,MAX_FD_ADDRESS_LEN), rval, goto err );
if( gx_ccr.presence.bearer_identifier)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_bearer_identifier, msg, MSG_BRW_LAST_CHILD, gx_ccr.bearer_identifier.val,
gx_ccr.bearer_identifier.len, rval, goto err );
if( gx_ccr.presence.tgpp_charging_characteristics)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_3gpp_charging_characteristics, msg,
MSG_BRW_LAST_CHILD, gx_ccr.tgpp_charging_characteristics.val,
gx_ccr.tgpp_charging_characteristics.len, rval, goto err );
if( gx_ccr.presence.called_station_id)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_called_station_id, msg, MSG_BRW_LAST_CHILD, gx_ccr.called_station_id.val,
gx_ccr.called_station_id.len, rval, goto err );
if( gx_ccr.presence.pdn_connection_id)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_pdn_connection_id, msg, MSG_BRW_LAST_CHILD, gx_ccr.pdn_connection_id.val,
gx_ccr.pdn_connection_id.len, rval, goto err );
if( gx_ccr.presence.framed_ip_address)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_framed_ip_address, msg, MSG_BRW_LAST_CHILD, gx_ccr.framed_ip_address.val,
gx_ccr.framed_ip_address.len, rval, goto err );
if( gx_ccr.presence.framed_ipv6_prefix)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_framed_ipv6_prefix, msg, MSG_BRW_LAST_CHILD, gx_ccr.framed_ipv6_prefix.val,
gx_ccr.framed_ipv6_prefix.len, rval, goto err );
if( gx_ccr.presence.tgpp_rat_type)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_3gpp_rat_type, msg, MSG_BRW_LAST_CHILD, gx_ccr.tgpp_rat_type.val,
gx_ccr.tgpp_rat_type.len, rval, goto err );
if( gx_ccr.presence.twan_identifier)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_twan_identifier, msg, MSG_BRW_LAST_CHILD, gx_ccr.twan_identifier.val,
gx_ccr.twan_identifier.len, rval, goto err );
if( gx_ccr.presence.tgpp_ms_timezone)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_3gpp_ms_timezone, msg, MSG_BRW_LAST_CHILD, gx_ccr.tgpp_ms_timezone.val,
gx_ccr.tgpp_ms_timezone.len, rval, goto err );
if( gx_ccr.presence.tgpp_user_location_info)
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_3gpp_user_location_info, msg,
MSG_BRW_LAST_CHILD, gx_ccr.tgpp_user_location_info.val,
gx_ccr.tgpp_user_location_info.len, rval, goto err );
if( gx_ccr.presence.event_trigger ){
for( int k = 0 ; k < gx_ccr.event_trigger.count; k++ ){
val.u32 = gx_ccr.event_trigger.list[k];
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_event_trigger, msg, MSG_BRW_LAST_CHILD,
gx_ccr.event_trigger.list[k], rval, goto err )
}
}
if(gx_ccr.presence.fixed_user_location_info ){
if(gx_ccr.fixed_user_location_info.presence.ssid){
val.os.len = gx_ccr.fixed_user_location_info.ssid.len;
val.os.data = gx_ccr.fixed_user_location_info.ssid.val;
add_fd_msg(&val, gxDict.avp_ssid, (struct msg**)&avp_ptr);
}
if(gx_ccr.fixed_user_location_info.presence.bssid){
val.os.len = gx_ccr.fixed_user_location_info.bssid.len;
val.os.data = gx_ccr.fixed_user_location_info.bssid.val;
add_fd_msg(&val, gxDict.avp_bssid, (struct msg**)&avp_ptr);
}
if(gx_ccr.fixed_user_location_info.presence.logical_access_id){
val.os.len = gx_ccr.fixed_user_location_info.logical_access_id.len;
val.os.data = gx_ccr.fixed_user_location_info.logical_access_id.val;
add_fd_msg(&val, gxDict.avp_logical_access_id, (struct msg**)&avp_ptr);
}
if(gx_ccr.fixed_user_location_info.presence.physical_access_id){
val.os.len = gx_ccr.fixed_user_location_info.physical_access_id.len;
val.os.data = gx_ccr.fixed_user_location_info.physical_access_id.val;
add_fd_msg(&val, gxDict.avp_physical_access_id, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.presence.user_csg_information ) {
if( gx_ccr.user_csg_information.presence.csg_id ){
val.u32 = gx_ccr.user_csg_information.csg_id;
add_fd_msg(&val, gxDict.avp_csg_id, (struct msg**)&avp_ptr);
}
if( gx_ccr.user_csg_information.presence.csg_access_mode ){
val.i32 = gx_ccr.user_csg_information.csg_access_mode;
add_fd_msg(&val, gxDict.avp_csg_access_mode, (struct msg**)&avp_ptr);
}
if( gx_ccr.user_csg_information.presence.csg_membership_indication ){
val.i32 = gx_ccr.user_csg_information.csg_membership_indication;
add_fd_msg(&val, gxDict.avp_csg_membership_indication, (struct msg**)&avp_ptr);
}
}
if(gx_ccr.presence.oc_supported_features &&
gx_ccr.oc_supported_features.presence.oc_feature_vector ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_oc_supported_features,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
val.u64 = gx_ccr.oc_supported_features.oc_feature_vector;
add_fd_msg(&val, gxDict.avp_oc_feature_vector, (struct msg**)&avp_ptr);
}
if( gx_ccr.presence.tdf_information){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_tdf_information, 0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
if( gx_ccr.tdf_information.presence.tdf_destination_realm ){
val.os.len = gx_ccr.tdf_information.tdf_destination_realm.len;
val.os.data = gx_ccr.tdf_information.tdf_destination_realm.val;
add_fd_msg(&val, gxDict.avp_tdf_destination_realm, (struct msg**)&avp_ptr);
}
if( gx_ccr.tdf_information.presence.tdf_destination_host ){
val.os.len = gx_ccr.tdf_information.tdf_destination_host.len;
val.os.data = gx_ccr.tdf_information.tdf_destination_host.val;
add_fd_msg(&val, gxDict.avp_tdf_destination_host, (struct msg**)&avp_ptr);
}
if( gx_ccr.tdf_information.presence.tdf_ip_address){
/* need to fill address on the basis of type*/
val.os.data = gx_ccr.tdf_information.tdf_ip_address.address;
add_fd_msg(&val, gxDict.avp_tdf_ip_address, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.presence.user_equipment_info){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_user_equipment_info,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
if( gx_ccr.user_equipment_info.presence.user_equipment_info_type){
val.i32 = gx_ccr.user_equipment_info.user_equipment_info_type;
add_fd_msg(&val, gxDict.avp_user_equipment_info_type, (struct msg**)&avp_ptr);
}
if( gx_ccr.user_equipment_info.presence.user_equipment_info_value){
val.os.len = gx_ccr.user_equipment_info.user_equipment_info_value.len;
val.os.data = gx_ccr.user_equipment_info.user_equipment_info_value.val;
add_fd_msg(&val, gxDict.avp_user_equipment_info_value, (struct msg**)&avp_ptr);
}
}
/* Adding Subscription Id list params */
if( gx_ccr.presence.subscription_id ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_subscription_id,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for( int i=0; i < gx_ccr.subscription_id.count; i++){
if( gx_ccr.subscription_id.list[i].presence.subscription_id_type ){
val.i32 = gx_ccr.subscription_id.list[i].subscription_id_type;
add_fd_msg(&val, gxDict.avp_subscription_id_type, (struct msg**)&avp_ptr);
}
if( gx_ccr.subscription_id.list[i].presence.subscription_id_data ){
val.os.len = gx_ccr.subscription_id.list[i].subscription_id_data.len;
val.os.data = gx_ccr.subscription_id.list[i].subscription_id_data.val;
add_fd_msg(&val, gxDict.avp_subscription_id_data, (struct msg**)&avp_ptr);
}
}
}
/* Adding Supported feature list params */
if( gx_ccr.presence.supported_features ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_supported_features,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for(int i=0; i < gx_ccr.supported_features.count; i++){
if( gx_ccr.supported_features.list[i].presence.vendor_id ){
val.u32 = gx_ccr.supported_features.list[i].vendor_id;
add_fd_msg(&val, gxDict.avp_vendor_id, (struct msg**)&avp_ptr);
}
if( gx_ccr.supported_features.list[i].presence.feature_list_id ){
val.u32 = gx_ccr.supported_features.list[i].feature_list_id;
add_fd_msg(&val, gxDict.avp_feature_list_id, (struct msg**)&avp_ptr);
}
if( gx_ccr.supported_features.list[i].presence.feature_list ){
val.u32 = gx_ccr.supported_features.list[i].feature_list;
add_fd_msg(&val, gxDict.avp_feature_list, (struct msg**)&avp_ptr);
}
}
}
/* Adding avp_packet_filter_information list params */
if( gx_ccr.presence.packet_filter_information ){
for(int i=0; i < gx_ccr.packet_filter_information.count; i++){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_packet_filter_information, 0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
if( gx_ccr.packet_filter_information.list[i].presence.packet_filter_identifier ){
val.os.len = gx_ccr.packet_filter_information.list[i].packet_filter_identifier.len;
val.os.data = gx_ccr.packet_filter_information.list[i].packet_filter_identifier.val;
add_fd_msg(&val, gxDict.avp_packet_filter_identifier, (struct msg**)&avp_ptr);
}
if( gx_ccr.packet_filter_information.list[i].presence.precedence ){
val.u32 = gx_ccr.packet_filter_information.list[i].precedence;
add_fd_msg(&val, gxDict.avp_precedence, (struct msg**)&avp_ptr);
}
if( gx_ccr.packet_filter_information.list[i].presence.packet_filter_content ){
val.os.len = gx_ccr.packet_filter_information.list[i].packet_filter_content.len;
val.os.data = gx_ccr.packet_filter_information.list[i].packet_filter_content.val;
add_fd_msg(&val, gxDict.avp_packet_filter_content, (struct msg**)&avp_ptr);
}
if( gx_ccr.packet_filter_information.list[i].presence.tos_traffic_class){
val.os.len = gx_ccr.packet_filter_information.list[i].tos_traffic_class.len;
val.os.data = gx_ccr.packet_filter_information.list[i].tos_traffic_class.val;
add_fd_msg(&val, gxDict.avp_tos_traffic_class, (struct msg**)&avp_ptr);
}
if( gx_ccr.packet_filter_information.list[i].presence.security_parameter_index ){
val.os.len = gx_ccr.packet_filter_information.list[i].security_parameter_index.len;
val.os.data = gx_ccr.packet_filter_information.list[i].security_parameter_index.val;
add_fd_msg(&val, gxDict.avp_security_parameter_index, (struct msg**)&avp_ptr);
}
if( gx_ccr.packet_filter_information.list[i].presence.flow_label ){
val.os.len = gx_ccr.packet_filter_information.list[i].flow_label.len;
val.os.data = gx_ccr.packet_filter_information.list[i].flow_label.val;
add_fd_msg(&val, gxDict.avp_flow_label, (struct msg**)&avp_ptr);
}
if( gx_ccr.packet_filter_information.list[i].presence.flow_direction ){
val.i32 = gx_ccr.packet_filter_information.list[i].flow_direction;
add_fd_msg(&val, gxDict.avp_flow_direction, (struct msg**)&avp_ptr);
}
}
}
/* Adding Qos info */
if( gx_ccr.presence.qos_information ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_qos_information, 0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
if( gx_ccr.qos_information.presence.qos_class_identifier ){
val.i32 = gx_ccr.qos_information.qos_class_identifier;
add_fd_msg(&val,gxDict.avp_qos_class_identifier ,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.max_requested_bandwidth_ul ){
val.u32 = gx_ccr.qos_information.max_requested_bandwidth_ul;
add_fd_msg(&val,gxDict.avp_max_requested_bandwidth_ul,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.max_requested_bandwidth_dl ){
val.u32 = gx_ccr.qos_information.max_requested_bandwidth_dl;
add_fd_msg(&val,gxDict.avp_max_requested_bandwidth_dl,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.extended_max_requested_bw_ul ){
val.u32 = gx_ccr.qos_information.extended_max_requested_bw_ul;
add_fd_msg(&val,gxDict.avp_extended_max_requested_bw_ul,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.extended_max_requested_bw_dl ){
val.u32 = gx_ccr.qos_information.extended_max_requested_bw_dl;
add_fd_msg(&val,gxDict.avp_extended_max_requested_bw_dl,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.guaranteed_bitrate_ul ){
val.u32 = gx_ccr.qos_information.guaranteed_bitrate_ul;
add_fd_msg(&val,gxDict.avp_guaranteed_bitrate_ul,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.guaranteed_bitrate_dl ){
val.u32 = gx_ccr.qos_information.guaranteed_bitrate_dl;
add_fd_msg(&val,gxDict.avp_guaranteed_bitrate_dl,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.extended_gbr_ul ){
val.u32 = gx_ccr.qos_information.extended_gbr_ul;
add_fd_msg(&val,gxDict.avp_extended_gbr_ul,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.extended_gbr_dl ){
val.u32 = gx_ccr.qos_information.extended_gbr_dl;
add_fd_msg(&val,gxDict.avp_extended_gbr_dl,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.bearer_identifier ){
val.os.len = gx_ccr.qos_information.bearer_identifier.len;
val.os.data = gx_ccr.qos_information.bearer_identifier.val;
add_fd_msg(&val, gxDict.avp_bearer_identifier, (struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.allocation_retention_priority.presence.priority_level ){
val.u32 = gx_ccr.qos_information.allocation_retention_priority.priority_level;
add_fd_msg(&val,gxDict.avp_priority_level,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.allocation_retention_priority.presence.pre_emption_capability ){
val.i32 = gx_ccr.qos_information.allocation_retention_priority.pre_emption_capability;
add_fd_msg(&val,gxDict.avp_pre_emption_capability,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.allocation_retention_priority.presence.pre_emption_vulnerability ){
val.i32 = gx_ccr.qos_information.allocation_retention_priority.pre_emption_vulnerability;
add_fd_msg(&val,gxDict.avp_pre_emption_vulnerability,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.apn_aggregate_max_bitrate_ul ){
val.u32 = gx_ccr.qos_information.apn_aggregate_max_bitrate_ul;
add_fd_msg(&val,gxDict.avp_apn_aggregate_max_bitrate_ul,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.apn_aggregate_max_bitrate_dl ){
val.u32 = gx_ccr.qos_information.apn_aggregate_max_bitrate_dl;
add_fd_msg(&val,gxDict.avp_apn_aggregate_max_bitrate_dl,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.extended_apn_ambr_ul){
val.u32 = gx_ccr.qos_information.extended_apn_ambr_ul;
add_fd_msg(&val,gxDict.avp_extended_apn_ambr_ul,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.extended_apn_ambr_dl){
val.u32 = gx_ccr.qos_information.extended_apn_ambr_dl;
add_fd_msg(&val,gxDict.avp_extended_apn_ambr_dl,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.presence.conditional_apn_aggregate_max_bitrate ){
for(int i=0; i < gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.count; i++){
if( gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.list[i].
presence.apn_aggregate_max_bitrate_ul ){
val.u32 = gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].apn_aggregate_max_bitrate_ul;
add_fd_msg(&val,gxDict.avp_apn_aggregate_max_bitrate_ul,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].presence.apn_aggregate_max_bitrate_dl ){
val.u32 = gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].apn_aggregate_max_bitrate_dl;
add_fd_msg(&val,gxDict.avp_apn_aggregate_max_bitrate_dl,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.list[i].
presence.extended_apn_ambr_ul ){
val.u32 = gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].extended_apn_ambr_ul;
add_fd_msg(&val,gxDict.avp_extended_apn_ambr_ul,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.list[i].
presence.extended_apn_ambr_dl ){
val.u32 = gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].extended_apn_ambr_dl;
add_fd_msg(&val,gxDict.avp_extended_apn_ambr_dl,(struct msg**)&avp_ptr);
}
if( gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].presence.ip_can_type ){
for(int k = 0; k < gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].ip_can_type.count; k++){
val.u32 = gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].ip_can_type.list[k];
add_fd_msg(&val,gxDict.avp_ip_can_type,(struct msg**)&avp_ptr);
}
}
if( gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].presence.rat_type ){
for(int k = 0; k < gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.
list[i].rat_type.count; k++){
val.u32 = gx_ccr.qos_information.conditional_apn_aggregate_max_bitrate.list[i].rat_type.list[k];
add_fd_msg(&val,gxDict.avp_rat_type,(struct msg**)&avp_ptr);
}
}
}
}
}
/* Adding an gw address params */
if( gx_ccr.presence.an_gw_address ){
for(int r=0; r < gx_ccr.an_gw_address.count; r++){
val.os.len = (gx_ccr.an_gw_address.count) * sizeof(FdAddress) ;
val.os.data = gx_ccr.an_gw_address.list[r].address;
/*TODO : Need to fill an_gw_address on the basis of type */
add_fd_msg(&val,gxDict.avp_an_gw_address,(struct msg**)&avp_ptr);
}
}
/* Adding ran_nas_release_cause params*/
if( gx_ccr.presence.ran_nas_release_cause ){
for(int i=0; i < gx_ccr.ran_nas_release_cause.count; i++){
val.os.len = gx_ccr.ran_nas_release_cause.list[i].len;
val.os.data = gx_ccr.ran_nas_release_cause.list[i].val;
add_fd_msg(&val, gxDict.avp_ran_nas_release_cause, (struct msg**)&avp_ptr);
}
}
/* Adding packet filter info params */
if( gx_ccr.presence.tft_packet_filter_information ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_tft_packet_filter_information ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for(int i=0; i < gx_ccr.tft_packet_filter_information.count; i++){
if( gx_ccr.tft_packet_filter_information.list[i].presence.precedence ){
val.u32 = gx_ccr.tft_packet_filter_information.list[i].precedence;
add_fd_msg(&val, gxDict.avp_precedence, (struct msg**)&avp_ptr);
}
if( gx_ccr.tft_packet_filter_information.list[i].presence.tft_filter ){
val.os.len = gx_ccr.tft_packet_filter_information.list[i].tft_filter.len;
val.os.data = gx_ccr.tft_packet_filter_information.list[i].tft_filter.val;
add_fd_msg(&val, gxDict.avp_tft_filter, (struct msg**)&avp_ptr);
}
if( gx_ccr.tft_packet_filter_information.list[i].presence.tos_traffic_class){
val.os.len = gx_ccr.tft_packet_filter_information.list[i].tos_traffic_class.len;
val.os.data = gx_ccr.tft_packet_filter_information.list[i].tos_traffic_class.val;
add_fd_msg(&val, gxDict.avp_tos_traffic_class, (struct msg**)&avp_ptr);
}
if( gx_ccr.tft_packet_filter_information.list[i].presence.security_parameter_index ){
val.os.len = gx_ccr.tft_packet_filter_information.list[i].security_parameter_index.len;
val.os.data = gx_ccr.tft_packet_filter_information.list[i].security_parameter_index.val;
add_fd_msg(&val, gxDict.avp_security_parameter_index, (struct msg**)&avp_ptr);
}
if( gx_ccr.tft_packet_filter_information.list[i].presence.flow_label ){
val.os.len = gx_ccr.tft_packet_filter_information.list[i].flow_label.len;
val.os.data = gx_ccr.tft_packet_filter_information.list[i].flow_label.val;
add_fd_msg(&val, gxDict.avp_flow_label, (struct msg**)&avp_ptr);
}
if( gx_ccr.tft_packet_filter_information.list[i].presence.flow_direction ){
val.i32 = gx_ccr.tft_packet_filter_information.list[i].flow_direction;
add_fd_msg(&val, gxDict.avp_flow_direction, (struct msg**)&avp_ptr);
}
}
}
/* Adding charging rule report params */
if( gx_ccr.presence.charging_rule_report ){
for(int i = 0; i < gx_ccr.charging_rule_report.count; i++){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_charging_rule_report ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
if( gx_ccr.charging_rule_report.list[i].presence.charging_rule_name ){
for(int k = 0; k < gx_ccr.charging_rule_report.list[i].charging_rule_name.count; k++){
val.os.len = gx_ccr.charging_rule_report.list[i].
charging_rule_name.list[k].len ;
val.os.data = gx_ccr.charging_rule_report.list[i].
charging_rule_name.list[k].val;
add_fd_msg(&val,gxDict.avp_charging_rule_name,(struct msg**)&avp_ptr);
}
}
if( gx_ccr.charging_rule_report.list[i].presence.charging_rule_base_name ){
for(int k = 0; k < gx_ccr.charging_rule_report.list[i].
charging_rule_base_name.count; k++){
val.os.len = gx_ccr.charging_rule_report.list[i].
charging_rule_base_name.list[k].len ;
val.os.data = gx_ccr.charging_rule_report.list[i].
charging_rule_base_name.list[k].val;
add_fd_msg(&val,gxDict.avp_charging_rule_base_name,(struct msg**)&avp_ptr);
}
}
if( gx_ccr.charging_rule_report.list[i].presence.bearer_identifier ){
val.os.len = gx_ccr.charging_rule_report.list[i].
bearer_identifier.len;
val.os.data = gx_ccr.charging_rule_report.list[i].bearer_identifier.val;
add_fd_msg(&val, gxDict.avp_bearer_identifier, (struct msg**)&avp_ptr);
}
if( gx_ccr.charging_rule_report.list[i].presence.pcc_rule_status ){
val.i32 = gx_ccr.charging_rule_report.list[i].pcc_rule_status;
add_fd_msg(&val, gxDict.avp_pcc_rule_status, (struct msg**)&avp_ptr);
}
if( gx_ccr.charging_rule_report.list[i].presence.rule_failure_code ){
val.i32 = gx_ccr.charging_rule_report.list[i].rule_failure_code;
add_fd_msg(&val, gxDict.avp_rule_failure_code, (struct msg**)&avp_ptr);
}
if( gx_ccr.charging_rule_report.list[i].presence.final_unit_indication){
if( gx_ccr.charging_rule_report.list[i].final_unit_indication.
presence.final_unit_action){
val.i32 = gx_ccr.charging_rule_report.list[i].
final_unit_indication.final_unit_action;
add_fd_msg(&val, gxDict.avp_final_unit_action, (struct msg**)&avp_ptr);
}
if( gx_ccr.charging_rule_report.list[i].final_unit_indication.
presence.restriction_filter_rule ){
for(int k = 0; k < gx_ccr.charging_rule_report.list[i].final_unit_indication.
restriction_filter_rule.count; k++){
val.os.len = gx_ccr.charging_rule_report.list[i].final_unit_indication.
restriction_filter_rule.list[k].len ;
val.os.data = gx_ccr.charging_rule_report.list[i].final_unit_indication.
restriction_filter_rule.list[k].val;
add_fd_msg(&val,gxDict.avp_restriction_filter_rule, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.charging_rule_report.list[i].final_unit_indication.presence.filter_id ){
for(int k = 0; k < gx_ccr.charging_rule_report.list[i].
final_unit_indication.filter_id.count; k++ ){
val.os.len = gx_ccr.charging_rule_report.list[i].
final_unit_indication.filter_id.list[k].len ;
val.os.data = gx_ccr.charging_rule_report.list[i].
final_unit_indication.filter_id.list[k].val;
add_fd_msg(&val,gxDict.avp_filter_id, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.charging_rule_report.list[i].final_unit_indication.presence.redirect_server ){
if( gx_ccr.charging_rule_report.list[i].final_unit_indication.
redirect_server.presence.redirect_address_type){
val.i32 = gx_ccr.charging_rule_report.list[i].final_unit_indication.
redirect_server.redirect_address_type;
add_fd_msg(&val, gxDict.avp_redirect_address_type, (struct msg**)&avp_ptr);
}
if(gx_ccr.charging_rule_report.list[i].final_unit_indication.
redirect_server.presence.redirect_server_address ){
val.os.len = gx_ccr.charging_rule_report.list[i].
final_unit_indication.redirect_server.redirect_server_address.len;
val.os.data = gx_ccr.charging_rule_report.list[i].
final_unit_indication.redirect_server.redirect_server_address.val;
add_fd_msg(&val, gxDict.avp_redirect_server_address, (struct msg**)&avp_ptr);
}
}
}
if( gx_ccr.charging_rule_report.list[i].presence.ran_nas_release_cause){
for(int k = 0; k < gx_ccr.charging_rule_report.list[i].ran_nas_release_cause.count; k++){
val.os.len = gx_ccr.charging_rule_report.list[i].ran_nas_release_cause.list[k].len;
val.os.data = gx_ccr.charging_rule_report.list[i].ran_nas_release_cause.list[k].val;
add_fd_msg(&val,gxDict.avp_ran_nas_release_cause, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.charging_rule_report.list[i].presence.content_version){
for(int k = 0; k < gx_ccr.charging_rule_report.list[i].content_version.count; k++ ){
val.u64 = gx_ccr.charging_rule_report.list[i].content_version.list[k] ;
add_fd_msg(&val,gxDict.avp_content_version, (struct msg**)&avp_ptr);
}
}
}
}
/* Adding application detection info params */
if( gx_ccr.presence.application_detection_information ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_application_detection_information ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for( int i = 0; i < gx_ccr.application_detection_information.count; i++){
if( gx_ccr.application_detection_information.list[i].presence.
tdf_application_identifier){
val.os.len = gx_ccr.application_detection_information.list[i].
tdf_application_identifier.len;
val.os.data = gx_ccr.application_detection_information.list[i].
tdf_application_identifier.val;
add_fd_msg(&val,gxDict.avp_tdf_application_identifier, (struct msg**)&avp_ptr);
}
if( gx_ccr.application_detection_information.list[i].presence.
tdf_application_instance_identifier ){
val.os.len = gx_ccr.application_detection_information.list[i].
tdf_application_instance_identifier.len;
val.os.data = gx_ccr.application_detection_information.list[i].
tdf_application_instance_identifier.val;
add_fd_msg(&val,gxDict.avp_tdf_application_instance_identifier, (struct msg**)&avp_ptr);
}
if( gx_ccr.application_detection_information.list[i].presence.flow_information ){
for( int j = 0; j < gx_ccr.application_detection_information.list[i].
flow_information.count; j++){
if( gx_ccr.application_detection_information.list[i].flow_information.
list[j].presence.flow_description ){
val.os.len = gx_ccr.application_detection_information.list[i].
flow_information.list[j].flow_description.len;
val.os.data = gx_ccr.application_detection_information.list[i].
flow_information.list[j].flow_description.val;
add_fd_msg(&val,gxDict.avp_flow_description, (struct msg**)&avp_ptr);
}
if( gx_ccr.application_detection_information.list[i].flow_information.
list[j].presence.packet_filter_identifier){
val.os.len = gx_ccr.application_detection_information.list[i].
flow_information.list[j].packet_filter_identifier.len;
val.os.data = gx_ccr.application_detection_information.list[i].
flow_information.list[j].packet_filter_identifier.val;
add_fd_msg(&val,gxDict.avp_packet_filter_identifier, (struct msg**)&avp_ptr);
}
if( gx_ccr.application_detection_information.list[i].flow_information.
list[j].presence.packet_filter_usage ){
val.i32 = gx_ccr.application_detection_information.list[i].
flow_information.list[j].packet_filter_usage;
add_fd_msg(&val, gxDict.avp_packet_filter_usage, (struct msg**)&avp_ptr);
}
if( gx_ccr.application_detection_information.list[i].flow_information.
list[j].presence.tos_traffic_class ){
val.os.len = gx_ccr.application_detection_information.list[i].
flow_information.list[j].tos_traffic_class.len;
val.os.data = gx_ccr.application_detection_information.list[i].
flow_information.list[j].tos_traffic_class.val;
add_fd_msg(&val,gxDict.avp_tos_traffic_class, (struct msg**)&avp_ptr);
}
if( gx_ccr.application_detection_information.list[i].flow_information.
list[j].presence.security_parameter_index){
val.os.len = gx_ccr.application_detection_information.list[i].
flow_information.list[j].security_parameter_index.len;
val.os.data = gx_ccr.application_detection_information.list[i].
flow_information.list[j].security_parameter_index.val;
add_fd_msg(&val,gxDict.avp_security_parameter_index, (struct msg**)&avp_ptr);
}
if( gx_ccr.application_detection_information.list[i].flow_information.
list[j].presence.flow_label ){
val.os.len = gx_ccr.application_detection_information.list[i].flow_information.
list[j].flow_label.len;
val.os.data = gx_ccr.application_detection_information.list[i].flow_information.
list[j].flow_label.val;
add_fd_msg(&val,gxDict.avp_flow_label, (struct msg**)&avp_ptr);
}
if( gx_ccr.application_detection_information.list[i].flow_information.
list[j].presence.flow_direction){
val.i32 = gx_ccr.application_detection_information.list[i].
flow_information.list[j].flow_direction;
add_fd_msg(&val, gxDict.avp_flow_direction, (struct msg**)&avp_ptr);
}
if( gx_ccr.application_detection_information.list[i].flow_information.
list[j].presence.routing_rule_identifier){
val.os.len = gx_ccr.application_detection_information.list[i].
flow_information.list[j].routing_rule_identifier.len;
val.os.data = gx_ccr.application_detection_information.list[i].
flow_information.list[j].routing_rule_identifier.val;
add_fd_msg(&val,gxDict.avp_routing_rule_identifier, (struct msg**)&avp_ptr);
}
}
}
}
}
#if 0
/* Adding trigger list info params */
if( gx_ccr.presence.event_trigger ){
for( int k = 0 ; k < gx_ccr.event_trigger.count; k++ ){
val.u32 = gx_ccr.event_trigger.list[k];
add_fd_msg(&val,gxDict.avp_event_trigger, (struct msg**)&avp_ptr);
}
}
#endif
/* Adding event report ind params */
if( gx_ccr.presence.event_report_indication ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_event_report_indication ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
if( gx_ccr.event_report_indication.presence.an_trusted ){
val.i32 = gx_ccr.event_report_indication.an_trusted;
add_fd_msg(&val, gxDict.avp_an_trusted, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.event_trigger ){
for(int j = 0; j < gx_ccr.event_report_indication.event_trigger.count ; j++){
val.u32 = gx_ccr.event_report_indication.event_trigger.list[j];
add_fd_msg(&val,gxDict.avp_event_trigger, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.event_report_indication.presence.user_csg_information ){
if( gx_ccr.event_report_indication.user_csg_information.presence.csg_id ){
val.u32 = gx_ccr.event_report_indication.user_csg_information.csg_id;
add_fd_msg(&val, gxDict.avp_csg_id, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.user_csg_information.presence.csg_access_mode ){
val.i32 = gx_ccr.event_report_indication.user_csg_information.csg_access_mode;
add_fd_msg(&val, gxDict.avp_csg_access_mode, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.user_csg_information.presence.csg_membership_indication ){
val.i32 = gx_ccr.event_report_indication.user_csg_information.csg_membership_indication;
add_fd_msg(&val, gxDict.avp_csg_membership_indication, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.event_report_indication.presence.ip_can_type ){
val.i32 = gx_ccr.event_report_indication.ip_can_type;
add_fd_msg(&val, gxDict.avp_ip_can_type, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.an_gw_address ){
for(int r = 0; r < gx_ccr.event_report_indication.an_gw_address.count; r++){
val.os.len = (gx_ccr.event_report_indication.an_gw_address.count) * sizeof(FdAddress) ;
val.os.data = gx_ccr.event_report_indication.an_gw_address.list[r].address;
/*TODO : Need to fill an_gw_address on the basis of type and length */
add_fd_msg(&val,gxDict.avp_an_gw_address,(struct msg**)&avp_ptr);
}
}
if( gx_ccr.event_report_indication.presence.tgpp_sgsn_address){
val.os.len = (gx_ccr.event_report_indication.tgpp_sgsn_address.len);
val.os.data = gx_ccr.event_report_indication.tgpp_sgsn_address.val;
add_fd_msg(&val,gxDict.avp_3gpp_sgsn_address, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.tgpp_sgsn_ipv6_address){
val.os.len = (gx_ccr.event_report_indication.tgpp_sgsn_ipv6_address.len);
val.os.data = gx_ccr.event_report_indication.tgpp_sgsn_ipv6_address.val;
add_fd_msg(&val,gxDict.avp_3gpp_sgsn_ipv6_address, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.tgpp_sgsn_mcc_mnc){
val.os.len = (gx_ccr.event_report_indication.tgpp_sgsn_mcc_mnc.len);
val.os.data = gx_ccr.event_report_indication.tgpp_sgsn_mcc_mnc.val;
add_fd_msg(&val,gxDict.avp_3gpp_sgsn_mcc_mnc, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.framed_ip_address){
val.os.len = (gx_ccr.event_report_indication.framed_ip_address.len);
val.os.data = gx_ccr.event_report_indication.framed_ip_address.val;
add_fd_msg(&val,gxDict.avp_framed_ip_address, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.rat_type){
val.i32 = gx_ccr.event_report_indication.rat_type;
add_fd_msg(&val, gxDict.avp_rat_type, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.rai){
val.os.len = (gx_ccr.event_report_indication.rai.len);
val.os.data = gx_ccr.event_report_indication.rai.val;
add_fd_msg(&val,gxDict.avp_rai, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.tgpp_user_location_info){
val.os.len = (gx_ccr.event_report_indication.tgpp_user_location_info.len);
val.os.data = gx_ccr.event_report_indication.tgpp_user_location_info.val;
add_fd_msg(&val,gxDict.avp_3gpp_user_location_info, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.rai){
val.os.len = (gx_ccr.event_report_indication.rai.len);
val.os.data = gx_ccr.event_report_indication.rai.val;
add_fd_msg(&val,gxDict.avp_rai, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.trace_data){
if( gx_ccr.event_report_indication.trace_data.presence.trace_reference){
val.os.len = (gx_ccr.event_report_indication.trace_data.trace_reference.len);
val.os.data = gx_ccr.event_report_indication.trace_data.trace_reference.val;
add_fd_msg(&val,gxDict.avp_trace_reference, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.presence.trace_depth){
val.i32 = gx_ccr.event_report_indication.trace_data.trace_depth;
add_fd_msg(&val, gxDict.avp_trace_depth, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.presence.trace_ne_type_list){
val.os.len = (gx_ccr.event_report_indication.trace_data.trace_ne_type_list.len);
val.os.data = gx_ccr.event_report_indication.trace_data.trace_ne_type_list.val;
add_fd_msg(&val,gxDict.avp_trace_ne_type_list, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.presence.trace_ne_type_list){
val.os.len = (gx_ccr.event_report_indication.trace_data.trace_ne_type_list.len);
val.os.data = gx_ccr.event_report_indication.trace_data.trace_ne_type_list.val;
add_fd_msg(&val,gxDict.avp_trace_ne_type_list, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.presence.trace_interface_list){
val.os.len = (gx_ccr.event_report_indication.trace_data.trace_interface_list.len);
val.os.data = gx_ccr.event_report_indication.trace_data.trace_interface_list.val;
add_fd_msg(&val,gxDict.avp_trace_interface_list, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.presence.trace_event_list){
val.os.len = (gx_ccr.event_report_indication.trace_data.trace_event_list.len);
val.os.data = gx_ccr.event_report_indication.trace_data.trace_event_list.val;
add_fd_msg(&val,gxDict.avp_trace_event_list, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.presence.omc_id){
val.os.len = (gx_ccr.event_report_indication.trace_data.omc_id.len);
val.os.data = gx_ccr.event_report_indication.trace_data.omc_id.val;
add_fd_msg(&val,gxDict.avp_omc_id, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.presence.trace_collection_entity){
/*TODO :need to addres on the basis of type in Fdaddress */
val.os.len = strnlen((char *)gx_ccr.event_report_indication.trace_data.trace_collection_entity.address,MAX_FD_ADDRESS_LEN);
val.os.data = gx_ccr.event_report_indication.trace_data.trace_collection_entity.address;
add_fd_msg(&val,gxDict.avp_trace_collection_entity, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.presence.mdt_configuration){
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.job_type){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.job_type;
add_fd_msg(&val, gxDict.avp_job_type, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.area_scope){
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.
area_scope.presence.cell_global_identity ){
for(int k = 0; k < gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.cell_global_identity.count; k++ ){
val.os.len = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.cell_global_identity.list[k].len;
val.os.data = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.cell_global_identity.list[k].val;
add_fd_msg(&val,gxDict.avp_cell_global_identity, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.
area_scope.presence.e_utran_cell_global_identity){
for(int k = 0; k < gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.e_utran_cell_global_identity.count; k++ ){
val.os.len = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.e_utran_cell_global_identity.list[k].len;
val.os.data = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.e_utran_cell_global_identity.list[k].val;
add_fd_msg(&val,gxDict.avp_e_utran_cell_global_identity, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.
area_scope.presence.routing_area_identity){
for(int k = 0; k < gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.routing_area_identity.count; k++ ){
val.os.len = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.routing_area_identity.list[k].len;
val.os.data = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.routing_area_identity.list[k].val;
add_fd_msg(&val,gxDict.avp_routing_area_identity, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.
area_scope.presence.tracking_area_identity){
for(int k = 0; k < gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.tracking_area_identity.count; k++ ){
val.os.len = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.tracking_area_identity.list[k].len;
val.os.data = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.tracking_area_identity.list[k].val;
add_fd_msg(&val,gxDict.avp_tracking_area_identity, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.
area_scope.presence.location_area_identity){
for( int k = 0; k < gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.location_area_identity.count; k++ ){
val.os.len = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.location_area_identity.list[k].len;
val.os.data = gx_ccr.event_report_indication.trace_data.
mdt_configuration.area_scope.location_area_identity.list[k].val;
add_fd_msg(&val,gxDict.avp_location_area_identity, (struct msg**)&avp_ptr);
}
}
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.list_of_measurements){
val.u32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.list_of_measurements;
add_fd_msg(&val, gxDict.avp_list_of_measurements, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.reporting_trigger){
val.u32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.reporting_trigger;
add_fd_msg(&val, gxDict.avp_reporting_trigger, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.report_interval){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.report_interval;
add_fd_msg(&val, gxDict.avp_report_interval, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.report_amount){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.report_amount;
add_fd_msg(&val, gxDict.avp_report_amount, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.event_threshold_rsrp){
val.u32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.event_threshold_rsrp;
add_fd_msg(&val, gxDict.avp_event_threshold_rsrp, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.event_threshold_rsrq){
val.u32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.event_threshold_rsrq;
add_fd_msg(&val, gxDict.avp_event_threshold_rsrq, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.logging_interval){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.logging_interval;
add_fd_msg(&val, gxDict.avp_logging_interval, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.logging_duration){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.logging_duration;
add_fd_msg(&val, gxDict.avp_logging_duration, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.measurement_period_lte){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.measurement_period_lte;
add_fd_msg(&val, gxDict.avp_measurement_period_lte, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.measurement_period_umts){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.measurement_period_umts;
add_fd_msg(&val, gxDict.avp_measurement_period_umts, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.collection_period_rrm_lte){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.collection_period_rrm_lte;
add_fd_msg(&val, gxDict.avp_collection_period_rrm_lte, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.collection_period_rrm_umts){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.collection_period_rrm_umts;
add_fd_msg(&val, gxDict.avp_collection_period_rrm_umts, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.positioning_method){
val.os.len = (gx_ccr.event_report_indication.trace_data.mdt_configuration.positioning_method.len);
val.os.data = gx_ccr.event_report_indication.trace_data.mdt_configuration.positioning_method.val;
add_fd_msg(&val,gxDict.avp_positioning_method, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.measurement_quantity){
val.os.len =gx_ccr.event_report_indication.trace_data.mdt_configuration.measurement_quantity.len;
val.os.data = gx_ccr.event_report_indication.trace_data.mdt_configuration.measurement_quantity.val;
add_fd_msg(&val,gxDict.avp_measurement_quantity, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.event_threshold_event_1f){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.event_threshold_event_1f;
add_fd_msg(&val, gxDict.avp_event_threshold_event_1f, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.event_threshold_event_1i){
val.i32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.event_threshold_event_1i;
add_fd_msg(&val, gxDict.avp_event_threshold_event_1i, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.mdt_allowed_plmn_id){
for( int k = 0; k < gx_ccr.event_report_indication.trace_data.
mdt_configuration.mdt_allowed_plmn_id.count; k++){
val.os.len = gx_ccr.event_report_indication.trace_data.
mdt_configuration.mdt_allowed_plmn_id.list[k].len;
val.os.data = gx_ccr.event_report_indication.trace_data.
mdt_configuration.mdt_allowed_plmn_id.list[k].val;
add_fd_msg(&val,gxDict.avp_mdt_allowed_plmn_id, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.presence.mbsfn_area){
for(int k = 0; k < gx_ccr.event_report_indication.
trace_data.mdt_configuration.mbsfn_area.count; k++){
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.mbsfn_area.
list[k].presence.mbsfn_area_id){
val.u32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.mbsfn_area.
list[k].mbsfn_area_id;
add_fd_msg(&val,gxDict.avp_mbsfn_area_id, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.trace_data.mdt_configuration.mbsfn_area.
list[k].presence.carrier_frequency ){
val.u32 = gx_ccr.event_report_indication.trace_data.mdt_configuration.
mbsfn_area.list[k].carrier_frequency;
add_fd_msg(&val,gxDict.avp_carrier_frequency, (struct msg**)&avp_ptr);
}
}
}
}
}
if( gx_ccr.event_report_indication.presence.trace_reference ){
val.os.len = ( gx_ccr.event_report_indication.trace_reference.len );
val.os.data = gx_ccr.event_report_indication.trace_reference.val;
add_fd_msg(&val,gxDict.avp_trace_reference, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.tgpp2_bsid ){
val.os.len = ( gx_ccr.event_report_indication.tgpp2_bsid.len );
val.os.data = gx_ccr.event_report_indication.tgpp2_bsid.val;
add_fd_msg( &val, gxDict.avp_3gpp2_bsid, (struct msg**)&avp_ptr );
}
if( gx_ccr.event_report_indication.presence.tgpp_ms_timezone){
val.os.len = (gx_ccr.event_report_indication.tgpp_ms_timezone.len);
val.os.data = gx_ccr.event_report_indication.tgpp_ms_timezone.val;
add_fd_msg(&val,gxDict.avp_3gpp_ms_timezone, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.routing_ip_address){
/*TODO :Need to fill according to type*/
val.os.len = strnlen((char *) gx_ccr.event_report_indication.routing_ip_address.address,MAX_FD_ADDRESS_LEN);
val.os.data = gx_ccr.event_report_indication.routing_ip_address.address;
add_fd_msg(&val,gxDict.avp_routing_ip_address, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.ue_local_ip_address){
/*TODO :Need to fill according to type*/
val.os.len = strnlen((char *) gx_ccr.event_report_indication.ue_local_ip_address.address,MAX_FD_ADDRESS_LEN);
val.os.data = gx_ccr.event_report_indication.ue_local_ip_address.address;
add_fd_msg(&val,gxDict.avp_ue_local_ip_address, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.henb_local_ip_address){
/*TODO :Need to fill according to type*/
val.os.len = strnlen((char *) gx_ccr.event_report_indication.henb_local_ip_address.address,MAX_FD_ADDRESS_LEN);
val.os.data = gx_ccr.event_report_indication.henb_local_ip_address.address;
add_fd_msg(&val,gxDict.avp_henb_local_ip_address, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.udp_source_port){
val.u32 = gx_ccr.event_report_indication.udp_source_port;
add_fd_msg(&val, gxDict.avp_udp_source_port, (struct msg**)&avp_ptr);
}
if( gx_ccr.event_report_indication.presence.presence_reporting_area_information){
if ( gx_ccr.event_report_indication.presence_reporting_area_information.
presence.presence_reporting_area_identifier){
val.os.len = (gx_ccr.event_report_indication.
presence_reporting_area_information.presence_reporting_area_identifier.len);
val.os.data = gx_ccr.event_report_indication.
presence_reporting_area_information.presence_reporting_area_identifier.val;
add_fd_msg(&val,gxDict.avp_presence_reporting_area_identifier, (struct msg**)&avp_ptr);
}
if ( gx_ccr.event_report_indication.presence_reporting_area_information.
presence.presence_reporting_area_status){
val.u32 = gx_ccr.event_report_indication.
presence_reporting_area_information.presence_reporting_area_status;
add_fd_msg(&val,gxDict.avp_presence_reporting_area_status, (struct msg**)&avp_ptr);
}
if ( gx_ccr.event_report_indication.presence_reporting_area_information.
presence.presence_reporting_area_elements_list){
val.os.len = (gx_ccr.event_report_indication.
presence_reporting_area_information.presence_reporting_area_elements_list.len);
val.os.data = gx_ccr.event_report_indication.
presence_reporting_area_information.presence_reporting_area_elements_list.val;
add_fd_msg(&val,gxDict.avp_presence_reporting_area_elements_list, (struct msg**)&avp_ptr);
}
if ( gx_ccr.event_report_indication.presence_reporting_area_information.
presence.presence_reporting_area_node){
val.u32 = gx_ccr.event_report_indication.
presence_reporting_area_information.presence_reporting_area_node;
add_fd_msg(&val,gxDict.avp_presence_reporting_area_node, (struct msg**)&avp_ptr);
}
}
}
/* Adding access networrk charging identifier params */
if( gx_ccr.presence.access_network_charging_identifier_gx ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_access_network_charging_identifier_gx ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for( int i = 0; i < gx_ccr.access_network_charging_identifier_gx.count; i++){
if( gx_ccr.access_network_charging_identifier_gx.list[i].presence.
access_network_charging_identifier_value){
val.os.len = gx_ccr.access_network_charging_identifier_gx.list[i].
access_network_charging_identifier_value.len;
val.os.data = gx_ccr.access_network_charging_identifier_gx.list[i].
access_network_charging_identifier_value.val;
add_fd_msg(&val,gxDict.avp_access_network_charging_identifier_value, (struct msg**)&avp_ptr);
}
if( gx_ccr.access_network_charging_identifier_gx.list[i].presence.
charging_rule_base_name){
for(int k = 0; k < gx_ccr.access_network_charging_identifier_gx.
list[i].charging_rule_name.count; k++){
val.os.len = gx_ccr.access_network_charging_identifier_gx.list[i].
charging_rule_base_name.list[k].len ;
val.os.data = gx_ccr.access_network_charging_identifier_gx.list[i].
charging_rule_base_name.list[k].val;
add_fd_msg(&val,gxDict.avp_charging_rule_base_name,(struct msg**)&avp_ptr);
}
}
if( gx_ccr.access_network_charging_identifier_gx.list[i].presence.
charging_rule_name){
for( int k = 0; k < gx_ccr.access_network_charging_identifier_gx.
list[i].charging_rule_name.count; k++){
val.os.len = gx_ccr.access_network_charging_identifier_gx.list[i].
charging_rule_name.list[k].len ;
val.os.data = gx_ccr.access_network_charging_identifier_gx.list[i].
charging_rule_name.list[k].val;
add_fd_msg(&val,gxDict.avp_charging_rule_name,(struct msg**)&avp_ptr);
}
}
if( gx_ccr.access_network_charging_identifier_gx.list[i].
presence.ip_can_session_charging_scope ){
val.i32 = gx_ccr.access_network_charging_identifier_gx.
list[i].ip_can_session_charging_scope;
add_fd_msg(&val,gxDict.avp_ip_can_session_charging_scope, (struct msg**)&avp_ptr);
}
}
}
/* Adding coa infor params */
if( gx_ccr.presence.coa_information ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_coa_information ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for( int i = 0; i < gx_ccr.coa_information.count; i++){
if( gx_ccr.coa_information.list[i].presence.tunnel_information){
if ( gx_ccr.coa_information.list[i].tunnel_information.presence.tunnel_header_length ){
val.i32 = gx_ccr.coa_information.list[i].tunnel_information.tunnel_header_length;
add_fd_msg(&val,gxDict.avp_tunnel_header_length, (struct msg**)&avp_ptr);
}
if ( gx_ccr.coa_information.list[i].tunnel_information.presence.
tunnel_header_filter){
for (int k = 0; k < gx_ccr.coa_information.list[i].
tunnel_information.tunnel_header_filter.count; k++){
val.os.len = gx_ccr.coa_information.list[i].
tunnel_information.tunnel_header_filter.list[k].len ;
val.os.data = gx_ccr.coa_information.list[i].
tunnel_information.tunnel_header_filter.list[k].val ;
add_fd_msg(&val,gxDict.avp_tunnel_header_filter,(struct msg**)&avp_ptr);
}
}
}
if( gx_ccr.coa_information.list[i].presence.coa_ip_address ){
/*TODO address need to fill on the basis of type */
val.os.len = strnlen((char *)gx_ccr.coa_information.list[i].coa_ip_address.address,MAX_FD_ADDRESS_LEN);
val.os.data = gx_ccr.coa_information.list[i].coa_ip_address.address;
add_fd_msg(&val,gxDict.avp_coa_ip_address, (struct msg**)&avp_ptr);
}
}
}
/* Adding usage monitoring infor params */
if( gx_ccr.presence.usage_monitoring_information ){
CHECK_FCT_DO( fd_msg_avp_new( gxDict.avp_usage_monitoring_information, 0, &avp_ptr ), return -1 );
CHECK_FCT_DO( fd_msg_avp_add( msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1 );
for( int i = 0; i < gx_ccr.usage_monitoring_information.count; i++){
if( gx_ccr.usage_monitoring_information.list[i].presence.monitoring_key ){
val.os.len = gx_ccr.usage_monitoring_information.list[i].monitoring_key.len;
val.os.data = gx_ccr.usage_monitoring_information.list[i].monitoring_key.val;
add_fd_msg(&val,gxDict.avp_monitoring_key, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].presence.granted_service_unit ){
for( int j = 0; j < gx_ccr.usage_monitoring_information.
list[i].granted_service_unit.count; j++ ){
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].presence.tariff_time_change ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].tariff_time_change;
add_fd_msg(&val,gxDict.avp_tariff_time_change, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].presence.cc_time ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_time;
add_fd_msg(&val,gxDict.avp_cc_time, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].presence.cc_money ){
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_money.presence.unit_value){
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_money.
unit_value.presence.value_digits ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_money.unit_value.value_digits;
add_fd_msg(&val,gxDict.avp_value_digits, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_money.
unit_value.presence.exponent ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_money.unit_value.exponent;
add_fd_msg(&val,gxDict.avp_exponent, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_money.
presence.currency_code ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_money.currency_code;
add_fd_msg(&val,gxDict.avp_currency_code, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].presence.cc_total_octets ){
val.u64 = gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_total_octets;
add_fd_msg(&val,gxDict.avp_cc_total_octets, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].presence.cc_input_octets){
val.u64 = gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_input_octets;
add_fd_msg(&val,gxDict.avp_cc_input_octets, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].presence.cc_output_octets ){
val.u64 = gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_output_octets;
add_fd_msg(&val,gxDict.avp_cc_output_octets, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].presence.cc_service_specific_units){
val.u64 = gx_ccr.usage_monitoring_information.list[i].
granted_service_unit.list[j].cc_service_specific_units;
add_fd_msg(&val,gxDict.avp_cc_service_specific_units, (struct msg**)&avp_ptr);
}
}
}
if( gx_ccr.usage_monitoring_information.list[i].presence.used_service_unit ){
for( int k = 0; k < gx_ccr.usage_monitoring_information.list[i].
used_service_unit.count; k++ ){
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].presence.reporting_reason){
val.i32 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].reporting_reason;
add_fd_msg(&val,gxDict.avp_reporting_reason, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].presence.tariff_change_usage ){
val.i32 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].tariff_change_usage;
add_fd_msg(&val,gxDict.avp_tariff_change_usage, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].presence.cc_time ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_time;
add_fd_msg(&val,gxDict.avp_cc_time, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].presence.cc_money ){
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_money.presence.unit_value ){
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_money.unit_value.
presence.value_digits ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_money.unit_value.value_digits;
add_fd_msg(&val,gxDict.avp_value_digits, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_money.unit_value.presence.exponent ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_money.unit_value.exponent;
add_fd_msg(&val,gxDict.avp_exponent, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_money.presence.currency_code ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_money.currency_code;
add_fd_msg(&val,gxDict.avp_currency_code, (struct msg**)&avp_ptr);
}
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].presence.cc_total_octets ){
val.u64 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_total_octets;
add_fd_msg(&val,gxDict.avp_cc_total_octets, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].presence.cc_input_octets ){
val.u64 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_input_octets;
add_fd_msg(&val,gxDict.avp_cc_input_octets, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].presence.cc_output_octets ){
val.u64 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_output_octets;
add_fd_msg(&val,gxDict.avp_cc_output_octets, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].presence.cc_service_specific_units ){
val.u64 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].cc_service_specific_units;
add_fd_msg(&val,gxDict.avp_cc_service_specific_units, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].presence.event_charging_timestamp ){
for( int itr = 0; itr < gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].event_charging_timestamp.count; itr++ ){
val.u64 = gx_ccr.usage_monitoring_information.list[i].
used_service_unit.list[k].event_charging_timestamp.list[itr];
add_fd_msg(&val,gxDict.avp_event_charging_timestamp, (struct msg**)&avp_ptr);
}
}
}
}
if( gx_ccr.usage_monitoring_information.list[i].presence.quota_consumption_time ){
val.u32 = gx_ccr.usage_monitoring_information.list[i].quota_consumption_time;
add_fd_msg(&val,gxDict.avp_quota_consumption_time, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].presence.usage_monitoring_level ){
val.i32 = gx_ccr.usage_monitoring_information.list[i].usage_monitoring_level;
add_fd_msg(&val,gxDict.avp_usage_monitoring_level, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].presence.usage_monitoring_report ){
val.i32 = gx_ccr.usage_monitoring_information.list[i].usage_monitoring_report;
add_fd_msg(&val,gxDict.avp_usage_monitoring_report, (struct msg**)&avp_ptr);
}
if( gx_ccr.usage_monitoring_information.list[i].presence.usage_monitoring_support ){
val.i32 = gx_ccr.usage_monitoring_information.list[i].usage_monitoring_support;
add_fd_msg(&val,gxDict.avp_usage_monitoring_support, (struct msg**)&avp_ptr);
}
}
}
/* Adding routing rule install params */
if( gx_ccr.presence.routing_rule_install){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_routing_rule_install ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
if( gx_ccr.routing_rule_install.presence.routing_rule_definition ){
for( int i = 0; i < gx_ccr.routing_rule_install.routing_rule_definition.count; i++){
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i].
presence.routing_rule_identifier){
val.os.len = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_rule_identifier.len;
val.os.data = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_rule_identifier.val;
add_fd_msg(&val,gxDict.avp_routing_rule_identifier ,(struct msg**)&avp_ptr);
}
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i]
.presence.routing_filter){
for( int j = 0; j < gx_ccr.routing_rule_install.routing_rule_definition.
list[i].routing_filter.count; j++){
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].presence.flow_description ){
val.os.len = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].flow_description.len;
val.os.data = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].flow_description.val;
add_fd_msg(&val,gxDict.avp_flow_description ,(struct msg**)&avp_ptr);
}
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].presence.flow_direction ){
val.i32 = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].flow_direction;
add_fd_msg(&val,gxDict.avp_flow_direction ,(struct msg**)&avp_ptr);
}
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].presence.tos_traffic_class ){
val.os.len = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].tos_traffic_class.len;
val.os.data = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].tos_traffic_class.val;
add_fd_msg(&val,gxDict.avp_tos_traffic_class,(struct msg**)&avp_ptr);
}
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].presence.security_parameter_index ){
val.os.len = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].security_parameter_index.len;
val.os.data = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].security_parameter_index.val;
add_fd_msg(&val,gxDict.avp_security_parameter_index,(struct msg**)&avp_ptr);
}
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].presence.flow_label){
val.os.len = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].flow_label.len;
val.os.data = gx_ccr.routing_rule_install.routing_rule_definition.list[i].
routing_filter.list[j].flow_label.val;
add_fd_msg(&val,gxDict.avp_flow_label,(struct msg**)&avp_ptr);
}
}
}
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i].presence.precedence ){
val.u32 = gx_ccr.routing_rule_install.routing_rule_definition.list[i].precedence;
add_fd_msg(&val,gxDict.avp_precedence ,(struct msg**)&avp_ptr);
}
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i].presence.routing_ip_address ){
/*TODO address need to fill on the basis of type */
val.os.len = strnlen((char *) gx_ccr.routing_rule_install.routing_rule_definition.
list[i].routing_ip_address.address,MAX_FD_ADDRESS_LEN);
val.os.data = gx_ccr.routing_rule_install.routing_rule_definition.
list[i].routing_ip_address.address;
add_fd_msg(&val,gxDict.avp_routing_ip_address ,(struct msg**)&avp_ptr);
}
if( gx_ccr.routing_rule_install.routing_rule_definition.list[i].presence.ip_can_type){
val.i32 = gx_ccr.routing_rule_install.routing_rule_definition.list[i].ip_can_type;
add_fd_msg(&val,gxDict.avp_ip_can_type ,(struct msg**)&avp_ptr);
}
}
}
}
/* Adding routing rule remove params */
if( gx_ccr.presence.routing_rule_remove ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_routing_rule_remove ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for( int i = 0; i < gx_ccr.routing_rule_remove.routing_rule_identifier.count; i++){
val.os.len = gx_ccr.routing_rule_remove.routing_rule_identifier.list[i].len;
val.os.data = gx_ccr.routing_rule_remove.routing_rule_identifier.list[i].val;
add_fd_msg(&val,gxDict.avp_routing_rule_identifier,(struct msg**)&avp_ptr);
}
}
/* Adding presence_reporting_area_information params */
if( gx_ccr.presence.presence_reporting_area_information ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_presence_reporting_area_information ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for( int i = 0; i < gx_ccr.presence_reporting_area_information.count; i++){
if( gx_ccr.presence_reporting_area_information.list[i].presence.
presence_reporting_area_identifier ){
val.os.len = gx_ccr.presence_reporting_area_information.
list[i].presence_reporting_area_identifier.len;
val.os.data = gx_ccr.presence_reporting_area_information.list[i].
presence_reporting_area_identifier.val;
add_fd_msg(&val,gxDict.avp_presence_reporting_area_identifier,(struct msg**)&avp_ptr);
}
if( gx_ccr.presence_reporting_area_information.list[i].presence.
presence_reporting_area_status ){
val.u32 = gx_ccr.presence_reporting_area_information.list[i].
presence_reporting_area_status;
add_fd_msg(&val,gxDict.avp_presence_reporting_area_status,(struct msg**)&avp_ptr);
}
if( gx_ccr.presence_reporting_area_information.list[i].presence.
presence_reporting_area_elements_list ){
val.os.len = gx_ccr.presence_reporting_area_information.list[i].
presence_reporting_area_elements_list.len;
val.os.data = gx_ccr.presence_reporting_area_information.list[i].
presence_reporting_area_elements_list.val;
add_fd_msg(&val,gxDict.avp_presence_reporting_area_elements_list,(struct msg**)&avp_ptr);
}
if( gx_ccr.presence_reporting_area_information.list[i].presence.
presence_reporting_area_node ){
val.u32 = gx_ccr.presence_reporting_area_information.list[i].
presence_reporting_area_node;
add_fd_msg(&val,gxDict.avp_presence_reporting_area_node,(struct msg**)&avp_ptr);
}
}
}
/* Adding proxy info params */
if( gx_ccr.presence.proxy_info ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_proxy_info ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for( int i = 0; i < gx_ccr.proxy_info.count; i++){
if( gx_ccr.proxy_info.list[i].presence.proxy_host ){
val.os.len = gx_ccr.proxy_info.list[i].proxy_host.len;
val.os.data = gx_ccr.proxy_info.list[i].proxy_host.val;
add_fd_msg(&val,gxDict.avp_proxy_host,(struct msg**)&avp_ptr);
}
if( gx_ccr.proxy_info.list[i].presence.proxy_state ){
val.os.len = gx_ccr.proxy_info.list[i].proxy_state.len;
val.os.data = gx_ccr.proxy_info.list[i].proxy_state.val;
add_fd_msg(&val,gxDict.avp_proxy_state,(struct msg**)&avp_ptr);
}
}
}
/* Adding proxy info params */
if( gx_ccr.presence.route_record ){
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_route_record ,0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(msg, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
for(int i = 0; i < gx_ccr.route_record.count; i++){
val.os.len = gx_ccr.route_record.list[i].len;
val.os.data = gx_ccr.route_record.list[i].val;
add_fd_msg(&val,gxDict.avp_route_record ,(struct msg**)&avp_ptr);
}
}
/* Adding Default EPS Bearer Qos params */
if( gx_ccr.presence.default_eps_bearer_qos ) {
struct avp *default_eps_bearer_qos = NULL;
FDCHECK_MSG_ADD_AVP_GROUPED_2( gxDict.avp_default_eps_bearer_qos, msg, MSG_BRW_LAST_CHILD,
default_eps_bearer_qos, rval, goto err );
if ( gx_ccr.default_eps_bearer_qos.presence.qos_class_identifier )
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_qos_class_identifier, default_eps_bearer_qos,
MSG_BRW_LAST_CHILD, gx_ccr.default_eps_bearer_qos.qos_class_identifier, rval, goto err );
if( gx_ccr.default_eps_bearer_qos.presence.allocation_retention_priority ) {
struct avp *allocation_retention_priority = NULL;
FDCHECK_MSG_ADD_AVP_GROUPED_2( gxDict.avp_allocation_retention_priority,
default_eps_bearer_qos, MSG_BRW_LAST_CHILD, allocation_retention_priority, rval, goto err );
if( gx_ccr.default_eps_bearer_qos.allocation_retention_priority.presence.pre_emption_capability ){
FDCHECK_MSG_ADD_AVP_U32( gxDict.avp_priority_level, allocation_retention_priority,
MSG_BRW_LAST_CHILD, gx_ccr.default_eps_bearer_qos.allocation_retention_priority.priority_level,
rval, goto err );
}
if( gx_ccr.default_eps_bearer_qos.allocation_retention_priority.presence.pre_emption_capability ){
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_pre_emption_capability, allocation_retention_priority,
MSG_BRW_LAST_CHILD, gx_ccr.default_eps_bearer_qos.allocation_retention_priority.pre_emption_capability,
rval, goto err );
}
if( gx_ccr.default_eps_bearer_qos.allocation_retention_priority.presence.pre_emption_vulnerability ){
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_pre_emption_vulnerability, allocation_retention_priority,
MSG_BRW_LAST_CHILD, gx_ccr.default_eps_bearer_qos.allocation_retention_priority.pre_emption_vulnerability,
rval, goto err );
}
}
}
if(gx_ccr.presence.default_qos_information){
if(gx_ccr.default_qos_information.presence.qos_class_identifier){
val.i32 = gx_ccr.default_qos_information.qos_class_identifier;
add_fd_msg(&val,gxDict.avp_qos_class_identifier,(struct msg**)&avp_ptr);
}
if(gx_ccr.default_qos_information.presence.max_requested_bandwidth_ul){
val.u32 = gx_ccr.default_qos_information.max_requested_bandwidth_ul;
add_fd_msg(&val,gxDict.avp_max_requested_bandwidth_ul,(struct msg**)&avp_ptr);
}
if(gx_ccr.default_qos_information.presence.max_requested_bandwidth_dl){
val.u32 = gx_ccr.default_qos_information.max_requested_bandwidth_dl;
add_fd_msg(&val,gxDict.avp_max_requested_bandwidth_dl,(struct msg**)&avp_ptr);
}
if(gx_ccr.default_qos_information.presence.default_qos_name){
val.os.len = gx_ccr.default_qos_information.default_qos_name.len;
val.os.data = gx_ccr.default_qos_information.default_qos_name.val;
add_fd_msg(&val,gxDict.avp_default_qos_name,(struct msg**)&avp_ptr);
}
}
//TODO - FILL IN HERE
#ifdef GX_DEBUG
FD_DUMP_MESSAGE(msg);
#endif
/* send the message */
FDCHECK_MSG_SEND( msg, NULL, NULL, rval, goto err );
goto fini;
err:
/* free the message since an error occurred */
FDCHECK_MSG_FREE(msg);
fini:
return rval;
}
/*
*
* Fun: gx_ccr_cb
*
* Desc: CMDNAME call back
*
* Ret: 0
*
* File: gx_ccr.c
*
The Credit-Control-Request (CCR) command, indicated by
the Command-Code field set to 272 and the 'R'
bit set in the Command Flags field, is sent to/from MME or SGSN.
*
Credit-Control-Request ::= <Diameter Header: 272, REQ, PXY, 16777238>
< Session-Id >
[ DRMP ]
{ Auth-Application-Id }
{ Origin-Host }
{ Origin-Realm }
{ Destination-Realm }
{ CC-Request-Type }
{ CC-Request-Number }
[ Credit-Management-Status ]
[ Destination-Host ]
[ Origin-State-Id ]
* [ Subscription-Id ]
[ OC-Supported-Features ]
* [ Supported-Features ]
[ TDF-Information ]
[ Network-Request-Support ]
* [ Packet-Filter-Information ]
[ Packet-Filter-Operation ]
[ Bearer-Identifier ]
[ Bearer-Operation ]
[ Dynamic-Address-Flag ]
[ Dynamic-Address-Flag-Extension ]
[ PDN-Connection-Charging-ID ]
[ Framed-IP-Address ]
[ Framed-IPv6-Prefix ]
[ IP-CAN-Type ]
[ 3GPP-RAT-Type ]
[ AN-Trusted ]
[ RAT-Type ]
[ Termination-Cause ]
[ User-Equipment-Info ]
[ QoS-Information ]
[ QoS-Negotiation ]
[ QoS-Upgrade ]
[ Default-EPS-Bearer-QoS ]
[ Default-QoS-Information ]
* 2 [ AN-GW-Address ]
[ AN-GW-Status ]
[ 3GPP-SGSN-MCC-MNC ]
[ 3GPP-SGSN-Address ]
[ 3GPP-SGSN-Ipv6-Address ]
[ 3GPP-GGSN-Address ]
[ 3GPP-GGSN-Ipv6-Address ]
[ 3GPP-Selection-Mode ]
[ RAI ]
[ 3GPP-User-Location-Info ]
[ Fixed-User-Location-Info ]
[ User-Location-Info-Time ]
[ User-CSG-Information ]
[ TWAN-Identifier ]
[ 3GPP-MS-TimeZone ]
* [ RAN-NAS-Release-Cause ]
[ 3GPP-Charging-Characteristics ]
[ Called-Station-Id ]
[ PDN-Connection-ID ]
[ Bearer-Usage ]
[ Online ]
[ Offline ]
* [ TFT-Packet-Filter-Information ]
* [ Charging-Rule-Report ]
* [ Application-Detection-Information ]
* [ Event-Trigger ]
[ Event-Report-Indication ]
[ Access-Network-Charging-Address ]
* [ Access-Network-Charging-Identifier-Gx ]
* [ CoA-Information ]
* [ Usage-Monitoring-Information ]
[ NBIFOM-Support ]
[ NBIFOM-Mode ]
[ Default-Access ]
[ Origination-Time-Stamp ]
[ Maximum-Wait-Time ]
[ Access-Availability-Change-Reason ]
[ Routing-Rule-Install ]
[ Routing-Rule-Remove ]
[ HeNB-Local-IP-Address ]
[ UE-Local-IP-Address ]
[ UDP-Source-Port ]
[ TCP-Source-Port ]
* [ Presence-Reporting-Area-Information ]
[ Logical-Access-Id ]
[ Physical-Access-Id ]
* [ Proxy-Info ]
* [ Route-Record ]
[ 3GPP-PS-Data-Off-Status ]
* [ AVP ]
*/
int gx_ccr_cb
(
struct msg ** msg,
struct avp * pavp,
struct session * sess,
void * data,
enum disp_action * act
)
{
int ret = FD_REASON_OK;
struct msg *rqst = *msg;
struct msg *ans = rqst;
GxCCR *ccr = NULL;
*msg = NULL;
#if 1
FD_DUMP_MESSAGE(rqst);
#endif
/* allocate the ccr message */
ccr = (GxCCR*)malloc(sizeof(*ccr));
memset((void*)ccr, 0, sizeof(*ccr));
ret = gx_ccr_parse(rqst, ccr);
if (ret != FD_REASON_OK)
goto err;
/*
* TODO - Add request processing code
*/
FDCHECK_MSG_NEW_ANSWER_FROM_REQ( fd_g_config->cnf_dict, ans, ret, goto err );
FDCHECK_MSG_ADD_ORIGIN( ans, ret, goto err );
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_result_code, ans, MSG_BRW_LAST_CHILD, 2001, ret, goto err );
FDCHECK_MSG_SEND( ans, NULL, NULL, ret, goto err );
goto fini1;
err:
printf("Error (%d) while processing CCR\n", ret);
free(ccr);
goto fini2;
fini1:
fini2:
gx_ccr_free(ccr);
return ret;
}
|
nikhilc149/e-utran-features-bug-fixes | ulpc/d_df/include/TCPDataProcessor.h | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
#ifndef __PCAP_DATA_PROCESSOR_H_
#define __PCAP_DATA_PROCESSOR_H_
#include <pcap.h>
#include "Common.h"
#include "TCPListener.h"
#define ERROR -1
#define UDP_LEN 4096
class TCPListener;
class DdfListener : public ESocket::TCP::ListenerPrivate
{
public:
/*
* @brief : Constructor of class DdfListener
*/
DdfListener(TCPListener &thread);
/*
* @brief : Destructor of class DdfListener
*/
~DdfListener();
/*
* @brief : Library function of EPCTool
*/
Void onClose();
/*
* @brief : Library function of EPCTool
*/
Void onError();
/*
* @brief : Function to create the instance of DDF talker
* @param : thread, TCPListener thread
* @return : Returns instance of talker
*/
ESocket::TCP::TalkerPrivate *createSocket(ESocket::ThreadPrivate &thread);
private:
/*
* @brief : Constructor of class DdfListener
*/
DdfListener();
};
class TCPDataProcessor : public ESocket::TCP::TalkerPrivate
{
public:
/*
* @brief : Constructor of class TCPDataProcessor
*/
TCPDataProcessor(TCPListener &thread);
/*
* @brief : Destructor of class TCPDataProcessor
*/
virtual ~TCPDataProcessor();
/*
* @brief : Function to create packet
* @param : ddfPacket, DDFPacket pointer
* @param : packetLength, Packet Length
* @return : Returns packet
*/
uint8_t * createPacket(DdfPacket *ddfPacket, uint32_t *packetLength);
/*
* @brief : Function to dump packet buffer pcap in file
* @param : pcap_dumper, pointer to pcap_dumper_t
* @param : pktPtr, packet for dump
* @param : packetLength,packet Length
* @return : Returns nothing
*/
void dumpBufferInPcapFile(pcap_dumper_t *pcap_dumper, uint8_t *pktPtr,
uint32_t packetLength);
/*
* @brief : Function to process data received from CP/DP
* @param : buffer, collects packet/information to be processed
* @return : Returns void
*/
void processPacket(uint8_t *buffer);
/*
* @brief : Library function of EPCTool
*/
Void onConnect();
/*
* @brief : Library function of EPCTool
*/
Void onReceive();
/*
* @brief : Library function of EPCTool
*/
Void onClose();
/*
* @brief : Library function of EPCTool
*/
Void onError();
/*
* @brief : Function to send packet acknowledgement CP/DP
* @param : buffer, collects packet/information to be processed
* @return : Returns void
*/
Void sendAck(const uint32_t &sequenceNumber);
private:
/*
* @brief : Constructor of class TCPDataProcessor
*/
TCPDataProcessor();
std::string remoteIpAddress;
static uint32_t sequence_numb;
};
#endif /* __PCAP_DATA_PROCESSOR_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp/ue.h | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef UE_H
#define UE_H
/**
* @file
*
* Contains all data structures required by 3GPP TS 23.401 Tables 5.7.3-1 and
* 5.7.4-1 (that are nessecary for current implementaiton) to describe the
* Connections, state, bearers, etc as well as functions to manage and/or
* obtain value for their fields.
*
*/
#include <stdint.h>
#include <arpa/inet.h>
#include <rte_malloc.h>
#include <rte_jhash.h>
#include "main.h"
#include "gtpv2c_ie.h"
#include "interface.h"
#include "packet_filters.h"
#include "pfcp_struct.h"
#include "ngic_timer.h"
#ifdef USE_CSID
#include "csid_struct.h"
#endif /* USE_CSID */
/* li parameter */
#define LI_DF_CSV_IMSI_COLUMN 0
#define LI_DF_CSV_LI_DEBUG_COLUMN 1
#define LI_DF_CSV_EVENT_CC_COLUMN 2
#define LI_DF_CSV_DDF2_IP_COLUMN 3
#define LI_DF_CSV_DDF2_PORT_COLUMN 4
#define LI_DF_CSV_DDF3_IP_COLUMN 5
#define LI_DF_CSV_DDF3_PORT_COLUMN 6
#define LI_LDB_ENTRIES_DEFAULT 1024
#define SDF_FILTER_TABLE "sdf_filter_table"
#define ADC_TABLE "adc_rule_table"
#define PCC_TABLE "pcc_table"
#define SESSION_TABLE "session_table"
#define METER_PROFILE_SDF_TABLE "meter_profile_sdf_table"
#define METER_PROFILE_APN_TABLE "meter_profile_apn_table"
#define SDF_FILTER_TABLE_SIZE (1024)
#define ADC_TABLE_SIZE (1024)
#define PCC_TABLE_SIZE (1025)
#define METER_PROFILE_SDF_TABLE_SIZE (2048)
#define DPN_ID (12345)
#define MAX_BEARERS (14)
#define MAX_FILTERS_PER_UE (16)
#define MAX_RULES (32)
#define MAX_NETCAP_LEN (64)
#define MAX_APN_LEN (64)
#define MAX_SDF_DESC_LEN (512)
#define RULE_CNT (16)
#define PROC_LEN (64)
#define GET_UE_IP(ip_pool, ip_pool_mask, ue_index) \
(((ip_pool.s_addr | (~ip_pool_mask.s_addr)) \
- htonl(ue_index)) - 0x01000000)
#define INTERFACE \
( (SAEGWC == config.cp_type) ? Sxa_Sxb : ( (PGWC != config.cp_type) ? Sxa : Sxb ) )
#ifndef CP_BUILD
#define FQDN_LEN 256
#endif
#define NUMBER_OF_PDR_PER_RULE 2
#define NUMBER_OF_QER_PER_RULE 2
#define MAX_RULE_PER_BEARER 16
#define NUMBER_OF_PDR_PER_BEARER 32
#define NUMBER_OF_QER_PER_BEARER 32
#define QER_INDEX_FOR_ACCESS_INTERFACE 0
#define QER_INDEX_FOR_CORE_INTERFACE 1
#define CSR_SEQUENCE(x) (\
(x->header.gtpc.teid_flag == 1)? x->header.teid.has_teid.seq : x->header.teid.no_teid.seq \
)
#define LEN 12
#define DEFAULT_RULE_COUNT 1
#define QCI_VALUE 6
#define GX_PRIORITY_LEVEL 1
#define PREEMPTION_CAPABILITY_DISABLED 1
#define PREEMPTION_VALNERABILITY_ENABLED 0
#define GX_ENABLE 2
#define PRECEDENCE 2
#define SERVICE_INDENTIFIRE 11
#define RATING_GROUP 1
#define REQUESTED_BANDWIDTH_UL 16500
#define REQUESTED_BANDWIDTH_DL 16500
#define GURATEED_BITRATE_UL 0
#define GURATEED_BITRATE_DL 0
#define RULE_NAME "default rule"
#define RULE_LENGTH strnlen(RULE_NAME, LEN)
#define PROTO_ID 0
#define LOCAL_IP_MASK 0
#define LOCAL_IP_ADDR 0
#define PORT_LOW 0
#define PORT_HIGH 65535
#define REMOTE_IP_MASK 0
#define REMOTE_IP_ADDR 0
#define LOCAL_IPV6_MASK 4
#define REMOTE_IPV6_MASK 4
#define GX_FLOW_COUNT 1
#define MAX_UINT8_T_VAL 255
#pragma pack(1)
struct eps_bearer_t;
struct pdn_connection_t;
/**
* @brief : Maintains CGI (Cell Global Identifier) data from user location information
*/
typedef struct cgi_t {
uint8_t cgi_mcc_digit_2 :4;
uint8_t cgi_mcc_digit_1 :4;
uint8_t cgi_mnc_digit_3 :4;
uint8_t cgi_mcc_digit_3 :4;
uint8_t cgi_mnc_digit_2 :4;
uint8_t cgi_mnc_digit_1 :4;
uint16_t cgi_lac;
uint16_t cgi_ci;
} cgi_t;
/**
* @brief : Maintains SAI (Service Area Identifier) data from user location information
*/
typedef struct sai_t {
uint8_t sai_mcc_digit_2 :4;
uint8_t sai_mcc_digit_1 :4;
uint8_t sai_mnc_digit_3 :4;
uint8_t sai_mcc_digit_3 :4;
uint8_t sai_mnc_digit_2 :4;
uint8_t sai_mnc_digit_1 :4;
uint16_t sai_lac;
uint16_t sai_sac;
}sai_t;
/**
* @brief : Maintains RAI (Routing Area Identity) data from user location information
*/
typedef struct rai_t {
uint8_t ria_mcc_digit_2 :4;
uint8_t ria_mcc_digit_1 :4;
uint8_t ria_mnc_digit_3 :4;
uint8_t ria_mcc_digit_3 :4;
uint8_t ria_mnc_digit_2 :4;
uint8_t ria_mnc_digit_1 :4;
uint16_t ria_lac;
uint16_t ria_rac;
} rai_t;
/**
* @brief : Maintains TAI (Tracking Area Identity) data from user location information
*/
typedef struct tai_t {
uint8_t tai_mcc_digit_2 :4;
uint8_t tai_mcc_digit_1 :4;
uint8_t tai_mnc_digit_3 :4;
uint8_t tai_mcc_digit_3 :4;
uint8_t tai_mnc_digit_2 :4;
uint8_t tai_mnc_digit_1 :4;
uint16_t tai_tac;
} tai_t;
/**
* @brief : Maintains LAI (Location Area Identifier) data from user location information
*/
typedef struct lai_t {
uint8_t lai_mcc_digit_2 :4;
uint8_t lai_mcc_digit_1 :4;
uint8_t lai_mnc_digit_3 :4;
uint8_t lai_mcc_digit_3 :4;
uint8_t lai_mnc_digit_2 :4;
uint8_t lai_mnc_digit_1 :4;
uint16_t lai_lac;
} lai_t;
/**
* @brief : Maintains ECGI (E-UTRAN Cell Global Identifier) data from user location information
*/
typedef struct ecgi_t {
uint8_t ecgi_mcc_digit_2 :4;
uint8_t ecgi_mcc_digit_1 :4;
uint8_t ecgi_mnc_digit_3 :4;
uint8_t ecgi_mcc_digit_3 :4;
uint8_t ecgi_mnc_digit_2 :4;
uint8_t ecgi_mnc_digit_1 :4;
uint8_t ecgi_spare :4;
uint32_t eci :28;
} ecgi_t;
/**
* @brief : Maintains Macro eNodeB ID data from user location information
*/
typedef struct macro_enb_id_t {
uint8_t menbid_mcc_digit_2 :4;
uint8_t menbid_mcc_digit_1 :4;
uint8_t menbid_mnc_digit_3 :4;
uint8_t menbid_mcc_digit_3 :4;
uint8_t menbid_mnc_digit_2 :4;
uint8_t menbid_mnc_digit_1 :4;
uint8_t menbid_spare :4;
uint8_t menbid_macro_enodeb_id :4;
uint16_t menbid_macro_enb_id2;
} macro_enb_id_t;
typedef struct home_enb_id_t {
uint8_t henbid_mcc_digit_2 :4;
uint8_t henbid_mcc_digit_1 :4;
uint8_t henbid_mnc_digit_3 :4;
uint8_t henbid_mcc_digit_3 :4;
uint8_t henbid_mnc_digit_2 :4;
uint8_t henbid_mnc_digit_1 :4;
uint8_t henbid_spare :4;
uint8_t henbid_home_enodeb_id :4;
uint32_t henbid_home_enb_id2 :24;
} home_enb_id_t;
/**
* @brief : Maintains Extended Macro eNodeB ID data from user location information
*/
typedef struct extnded_macro_enb_id_t {
uint8_t emenbid_mcc_digit_2 :4;
uint8_t emenbid_mcc_digit_1 :4;
uint8_t emenbid_mnc_digit_3 :4;
uint8_t emenbid_mcc_digit_3 :4;
uint8_t emenbid_mnc_digit_2 :4;
uint8_t emenbid_mnc_digit_1 :4;
uint8_t emenbid_smenb :1;
uint8_t emenbid_spare :2;
uint8_t emenbid_extnded_macro_enb_id :5;
uint16_t emenbid_extnded_macro_enb_id2;
} extnded_macro_enb_id_t;
/**
* @brief : Maintains user location information data
*/
typedef struct user_loc_info_t {
uint8_t lai;
uint8_t tai;
uint8_t rai;
uint8_t sai;
uint8_t cgi;
uint8_t ecgi;
uint8_t macro_enodeb_id;
uint8_t extnded_macro_enb_id;
cgi_t cgi2;
sai_t sai2;
rai_t rai2;
tai_t tai2;
lai_t lai2;
ecgi_t ecgi2;
macro_enb_id_t macro_enodeb_id2;
extnded_macro_enb_id_t extended_macro_enodeb_id2;
} user_loc_info_t;
typedef struct presence_reproting_area_action_t {
uint8_t action;
uint32_t pres_rptng_area_idnt;
uint8_t number_of_tai;
uint8_t number_of_rai;
uint8_t nbr_of_macro_enb;
uint8_t nbr_of_home_enb;
uint8_t number_of_ecgi;
uint8_t number_of_sai;
uint8_t number_of_cgi;
uint8_t nbr_of_extnded_macro_enb;
cgi_t cgis[MAX_CGIS];
sai_t sais[MAX_SAIS];
rai_t rais[MAX_RAIS];
tai_t tais[MAX_TAIS];
ecgi_t ecgis[MAX_ECGIS];
macro_enb_id_t macro_enodeb_ids[MAX_MACRO_ENB_IDS];
home_enb_id_t home_enb_ids[MAX_HOME_ENB_IDS];
extnded_macro_enb_id_t extended_macro_enodeb_ids[MAX_EX_MACRO_ENB_IDS];
} presence_reproting_area_action_t;
typedef struct presence_reproting_area_info_t {
uint32_t pra_identifier;
uint8_t inapra;
uint8_t opra;
uint8_t ipra;
} presence_reproting_area_info_t;
/**
* @brief : Maintains serving network mcc and mnc information
*/
typedef struct serving_nwrk_t {
uint8_t mcc_digit_2;
uint8_t mcc_digit_1;
uint8_t mnc_digit_3;
uint8_t mcc_digit_3;
uint8_t mnc_digit_2;
uint8_t mnc_digit_1;
} serving_nwrk_t;
/**
* @brief : Maintains rat type information
*/
typedef struct rat_type_t {
uint8_t rat_type;
uint16_t len;
}rat_type_t;
/**
* @brief : Maintains apn related information
*/
typedef struct apn_t {
char *apn_name_label;
int apn_usage_type;
char apn_net_cap[MAX_NETCAP_LEN];
int trigger_type;
int uplink_volume_th;
int downlink_volume_th;
int time_th;
size_t apn_name_length;
int8_t apn_idx;
struct in_addr ip_pool_ip;
struct in_addr ip_pool_mask;
struct in6_addr ipv6_network_id;
uint8_t ipv6_prefix_len;
} apn;
/**
* @brief : Maintains secondary rat related information
*/
typedef struct secondary_rat_t {
uint8_t spare2:6;
uint8_t irsgw :1;
uint8_t irpgw :1;
uint8_t rat_type;
uint8_t eps_id:4;
uint8_t spare3:4;
uint32_t start_timestamp;
uint32_t end_timestamp;
uint64_t usage_data_dl;
uint64_t usage_data_ul;
} secondary_rat_t;
extern int total_apn_cnt;
/**
* @brief : Maintains eps bearer id
*/
typedef struct ebi_id_t {
uint64_t ebi_id;
}ebi_id;
/**
* @brief : Maintains sdf packet filter information
*/
typedef struct sdf_pkt_fltr_t {
uint8_t proto_id;
uint8_t v4;
uint8_t v6;
uint8_t proto_mask;
uint8_t direction;
uint8_t action;
uint8_t local_ip_mask;
uint8_t remote_ip_mask;
uint16_t local_port_low;
uint16_t local_port_high;
uint16_t remote_port_low;
uint16_t remote_port_high;
union {
struct in_addr local_ip_addr;
struct in6_addr local_ip6_addr;
}ulocalip;
union{
struct in_addr remote_ip_addr;
struct in6_addr remote_ip6_addr;
}uremoteip;
} sdf_pkt_fltr;
/**
* @brief : Maintains flow description data
*/
typedef struct flow_description {
char sdf_flow_description[MAX_SDF_DESC_LEN];
uint8_t pckt_fltr_identifier;
uint16_t flow_desc_len;
int32_t flow_direction;
sdf_pkt_fltr sdf_flw_desc;
}flow_desc_t;
/**
* @brief : Maintains information about dynamic rule
*/
typedef struct dynamic_rule{
uint8_t num_flw_desc;
bool predefined_rule;
int32_t online;
int32_t offline;
int32_t flow_status;
int32_t reporting_level;
uint32_t precedence;
uint32_t service_id;
uint32_t rating_group;
uint32_t def_bearer_indication;
char rule_name[RULE_NAME_LEN];
char af_charging_id_string[256];
bearer_qos_ie qos;
flow_desc_t flow_desc[32];
pdr_t *pdr[NUMBER_OF_PDR_PER_RULE];
}dynamic_rule_t;
enum rule_action_t {
RULE_ACTION_INVALID,
RULE_ACTION_ADD = 1,
RULE_ACTION_MODIFY = 2,
RULE_ACTION_MODIFY_ADD_RULE = 3,
RULE_ACTION_MODIFY_REMOVE_RULE = 4,
RULE_ACTION_DELETE = 5,
RULE_ACTION_MAX
};
/**
* @brief : Maintains information about pcc rule
*/
struct pcc_rule{
enum rule_action_t action;
bool predefined_rule;
union{
dynamic_rule_t dyn_rule;
/* maintain the predefined rule info */
dynamic_rule_t pdef_rule;
}urule;
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
typedef struct pcc_rule pcc_rule_t;
/**
* @brief : Currently policy from PCRF can be two thing
* 1. Default bearer QOS
* 2. PCC Rule
* Default bearer QOS can be modified
* PCC Rules can be Added, Modified or Deleted
* These policy shoulbe be applied to the PDN or eps_bearer
* data strutures only after sucess from access side
*/
struct policy{
bool default_bearer_qos_valid;
uint8_t count;
uint8_t num_charg_rule_install;
uint8_t num_charg_rule_modify;
uint8_t num_charg_rule_delete;
bearer_qos_ie default_bearer_qos;
pcc_rule_t *pcc_rule[MAX_RULES];
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
typedef struct policy policy_t;
/**
* @brief : Maintains selection mode info
*/
typedef struct selection_mode{
uint8_t spare2:6;
uint8_t selec_mode:2;
}selection_mode;
/**
* @brief : Maintains indication flag oi value
*/
typedef struct indication_flag_t {
uint8_t oi:1; /* Operation Indication */
uint8_t ltempi:1; /* LTE-M RAT Type reporting to PGW Indication */
uint8_t crsi:1; /* Change Reporting support indication */
uint8_t sgwci:1; /* SGW Change Indication */
uint8_t hi:1; /* Handover Indication */
uint8_t ccrsi:1; /* CSG Change Reporting support indication */
uint8_t cprai:1; /* Change of Presence Reporting Area information Indication */
uint8_t clii:1; /* Change of Location Information Indication */
uint8_t dfi:1; /* Direct Forwarding Indication */
uint8_t arrl:1; /* Abnormal Release of Radio Link */
uint8_t daf:1; /*Dual Address Bearer Flag*/
uint8_t cfsi:1; /* Change F-TEID support indication */
uint8_t pt:1; /*(S5/S8 Protocol Type */
uint8_t s11tf:1; /* S11-u teid Indication*/
}indication_flag_t;
/**
* @brief : Maintains Time zone information
*/
typedef struct ue_tz_t{
uint8_t tz;
uint8_t dst;
}ue_tz;
/**
* @brief : Maintains user CSG information
*/
typedef struct user_csg_i_t {
uint8_t mcc_digit_2 :4;
uint8_t mcc_digit_1 :4;
uint8_t mnc_digit_3 :4;
uint8_t mcc_digit_3 :4;
uint8_t mnc_digit_2 :4;
uint8_t mnc_digit_1 :4;
uint8_t spare2 :5;
uint32_t csg_id :3;
uint32_t csg_id2 :24;
uint8_t access_mode :2;
uint8_t spare3 :4;
uint8_t lcsg :1;
uint8_t cmi :1;
} user_csg_i;
/**
* @brief : Maintains timestamp and counter information
*/
typedef struct counter_t{
uint32_t timestamp_value;
uint8_t counter_value;
}counter;
/**
* @brief : Maintains li configurations
*/
typedef struct li_data {
uint64_t id;
uint8_t s11;
uint8_t sgw_s5s8c;
uint8_t pgw_s5s8c;
uint8_t sxa;
uint8_t sxb;
uint8_t sxa_sxb;
uint8_t forward;
} li_data_t;
/**
* @brief : Maintains imsi to li mapping
*/
typedef struct imsi_id_hash {
uint8_t cntr;
uint64_t ids[MAX_LI_ENTRIES_PER_UE];
} imsi_id_hash_t;
/**
* @brief : Status of request processing
*/
enum request_status_t {
REQ_PROCESS_DONE = 0,
REQ_IN_PROGRESS = 1
};
/**
* @brief : Maintains Status of current req in progress
*/
typedef struct req_status_info_t {
uint32_t seq;
enum request_status_t status;
} req_status_info;
/*
* @brief : Used to store rule status received in CCA
* send provision ack message to PCRF*/
typedef struct pro_ack_rule_status {
char rule_name[RULE_NAME_LEN];
uint8_t rule_status;
}pro_ack_rule_status_t;
typedef struct pro_ack_rule_array {
uint8_t rule_cnt;
pro_ack_rule_status_t rule[MAX_RULE_PER_BEARER];
}pro_ack_rule_array_t;
/**
* @brief : Maintains ue related information
*/
struct ue_context_t {
bool cp_mode_flag;
bool sgwu_changed;
bool ltem_rat_type_flag;
bool serving_nw_flag;
bool rat_type_flag;
bool second_rat_flag;
bool ue_time_zone_flag;
bool uci_flag;
bool mo_exception_flag;
bool mme_changed_flag;
bool change_report;
bool piggyback;
uint8_t cp_mode;
uint8_t imsi_len;
uint8_t unathenticated_imsi;
uint8_t msisdn_len;
uint8_t proc_trans_id;
uint8_t mbc_cleanup_status;
uint8_t uli_flag;
uint8_t is_sent_bearer_rsc_failure_indc;
uint8_t second_rat_count;
uint8_t change_report_action;
uint8_t bearer_count;
uint8_t pfcp_sess_count;
uint8_t selection_flag;
uint8_t up_selection_flag;
uint8_t promotion_flag;
uint8_t dcnr_flag;
uint8_t procedure;
uint8_t upd_pdn_set_ebi_index;
uint8_t num_pdns;
uint8_t dupl;
uint8_t li_data_cntr;
uint8_t indirect_tunnel_flag; /* indication for presence indirect tunnel */
uint8_t update_sgw_fteid; /* S1 HO Flag to forward MBR Req to PGWC */
uint8_t pfcp_rept_resp_sent_flag; /* Flag to indicate report response already sent or not*/
uint8_t pra_flag;
uint16_t bearer_bitmap;
uint16_t teid_bitmap;
uint32_t ue_initiated_seq_no;
uint32_t sequence;
uint32_t s11_sgw_gtpc_teid;
uint32_t s11_mme_gtpc_teid;
uint64_t imsi;
uint64_t mei;
uint64_t msisdn;
uint64_t event_trigger;
/*PFCP paramteres Unique IDs Per UE */
uint8_t bar_rule_id_offset;
uint16_t pdr_rule_id_offset;
uint32_t far_rule_id_offset;
uint32_t urr_rule_id_offset;
uint32_t qer_rule_id_offset;
/* Req Status
* retransmitted request identifying
*/
req_status_info req_status;
ambr_ie mn_ambr;
user_loc_info_t uli;
user_loc_info_t old_uli;
serving_nwrk_t serving_nw;
rat_type_t rat_type;
secondary_rat_t second_rat[MAX_BEARERS];
indication_flag_t indication_flag;
ue_tz tz;
user_csg_i uci;
counter mo_exception_data_counter;
#ifdef USE_CSID
/* Temp cyclic linking of the MME and SGW FQ-CSID */
sess_fqcsid_t *mme_fqcsid;
sess_fqcsid_t *sgw_fqcsid;
sess_fqcsid_t *pgw_fqcsid;
sess_fqcsid_t *up_fqcsid;
#endif /* USE_CSID */
selection_mode select_mode;
node_address_t s11_sgw_gtpc_ip;
node_address_t s11_mme_gtpc_ip;
struct pdn_connection_t *pdns[MAX_BEARERS];
/*VS: TODO: Move bearer information in pdn structure and remove from UE context */
struct eps_bearer_t *eps_bearers[MAX_BEARERS*2]; /* index by ebi - 1 */
/* temporary bearer to be used during resource bearer cmd -
* create/deletee bearer req - rsp */
struct eps_bearer_t *ded_bearer;
/* User Level Packet Copying Configurations */
li_data_t li_data[MAX_LI_ENTRIES_PER_UE];
struct indirect_tunnel_t *indirect_tunnel; /* maintains bearers and sessions for indirect tunnel */
presence_reproting_area_action_t *pre_rptng_area_act;
presence_reproting_area_info_t pre_rptng_area_info;
} __attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
typedef struct ue_context_t ue_context;
/**
* @brief : Maintains pdn connection information
*/
struct pdn_connection_t {
uint8_t proc;
uint8_t state;
uint8_t bearer_control_mode;
uint8_t prefix_len;
uint8_t enb_query_flag;
uint8_t generate_cdr;
uint8_t dns_query_domain; /* need to maintain DNS query Domain type */
uint8_t default_bearer_id;
uint8_t num_bearer;
uint8_t requested_pdn_type;
uint8_t is_default_dl_sugg_pkt_cnt_sent:1; /* Need to send default DL Buffering Suggested
Packet Count in first Report Response */
uint8_t fqdn[FQDN_LEN];
char gx_sess_id[GX_SESS_ID_LEN];
bool flag_fqcsid_modified;
bool old_sgw_addr_valid;
int16_t mapped_ue_usage_type;
uint32_t call_id; /* Call ID ref. to session id of CCR */
uint32_t apn_restriction;
uint32_t csr_sequence; /* CSR sequence number for identify CSR retransmission req. */
uint32_t s5s8_sgw_gtpc_teid;
uint32_t s5s8_pgw_gtpc_teid;
uint64_t seid;
uint64_t dp_seid;
unsigned long rqst_ptr; /* need to maintain reqs ptr for RAA*/
apn *apn_in_use;
ambr_ie apn_ambr;
#ifdef USE_CSID
fqcsid_t mme_csid;
fqcsid_t sgw_csid;
fqcsid_t pgw_csid;
fqcsid_t up_csid;
#endif /* USE_CSID */
struct eps_bearer_t *eps_bearers[MAX_BEARERS*2]; /* index by ebi - 1 */
struct eps_bearer_t *packet_filter_map[MAX_FILTERS_PER_UE];
struct{
struct in_addr ipv4;
struct in6_addr ipv6;
}uipaddr;
node_address_t upf_ip;
node_address_t s5s8_sgw_gtpc_ip;
node_address_t s5s8_pgw_gtpc_ip;
node_address_t old_sgw_addr;
pdn_type_ie pdn_type;
/* See 3GPP TS 32.298 5.1.2.2.7 for Charging Characteristics fields*/
charging_characteristics_ie charging_characteristics;
pro_ack_rule_array_t pro_ack_rule_array;
void *node_sel;
policy_t policy;
bar_t bar; /* As per spec at most one bar per session */
peerData *timer_entry; /* timer entry data for stop timer session */
ue_context *context; /* Create a cyclic linking to access the
data structures of UE */
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
typedef struct pdn_connection_t pdn_connection;
/**
* @brief : Maintains eps bearer related information
*/
struct eps_bearer_t {
uint8_t eps_bearer_id;
uint8_t pdr_count;
uint8_t qer_count;
uint8_t num_packet_filters;
uint8_t num_dynamic_filters;
uint8_t num_prdef_filters;
uint8_t flow_desc_check:1;
uint8_t qos_bearer_check:1;
uint8_t arp_bearer_check:1;
uint32_t sequence; /* To store seq number of incoming req for bearer*/
uint32_t charging_id; /* Generate ID while creating default bearer */
uint32_t cdr_seq_no; /* Seq no for each bearer used as CDR field*/
uint32_t s1u_sgw_gtpu_teid;
uint32_t s5s8_sgw_gtpu_teid;
uint32_t s5s8_pgw_gtpu_teid;
uint32_t s1u_enb_gtpu_teid;
uint32_t s11u_mme_gtpu_teid;
uint32_t s11u_sgw_gtpu_teid;
int packet_filter_map[MAX_FILTERS_PER_UE];
node_address_t s1u_sgw_gtpu_ip;
node_address_t s5s8_sgw_gtpu_ip;
node_address_t s5s8_pgw_gtpu_ip;
node_address_t s1u_enb_gtpu_ip;
node_address_t s11u_mme_gtpu_ip;
node_address_t s11u_sgw_gtpu_ip;
pdr_t *pdrs[NUMBER_OF_PDR_PER_BEARER]; /* Packet Detection identifier/Rule_ID */
qer qer_id[NUMBER_OF_QER_PER_BEARER];
bearer_qos_ie qos;
dynamic_rule_t *dynamic_rules[MAX_RULE_PER_BEARER];
dynamic_rule_t *prdef_rules[MAX_RULE_PER_BEARER]; /* Predefined rule support */
enum rule_action_t action;
struct pdn_connection_t *pdn;
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
typedef struct eps_bearer_t eps_bearer;
/**
* @brief : Stores data TEID and Msg_type as data for the use of error handling.
*/
typedef struct teid_value_t
{
uint32_t teid;
uint8_t msg_type;
} teid_value_t;
/**
* @brief : Rule Name is key for Mapping of Rules and Bearer table.
*/
typedef struct teid_seq_map_key {
/** Rule Name */
char teid_key[RULE_NAME_LEN];
}teid_key_t;
/**
* @brief : Maintains sessions and bearers created
* for indirect tunnel data transmission.
* */
struct indirect_tunnel_t {
pdn_connection *pdn;
uint8_t anchor_gateway_flag;
/*This bearer is UE context default bearer id */
uint8_t eps_bearer_id;
};
/*@brief: maintains pdr array for ddn requests */
typedef struct pdr_ids_t{
uint8_t pdr_count; /* pdr id count*/
uint16_t pdr_id[MAX_LIST_SIZE]; /* rule ids array*/
uint8_t ddn_buffered_count; /* number ddn buffered*/
}pdr_ids;
/*@brief: maintains pdr array for ddn requests */
typedef struct sess_info_t{
uint8_t pdr_count; /* pdr id count */
uint16_t pdr_id[MAX_LIST_SIZE]; /* rule ids array*/
uint64_t sess_id; /*session id*/
struct sess_info_t *next;
}sess_info;
#pragma pack()
extern struct rte_hash *ue_context_by_imsi_hash;
extern struct rte_hash *ue_context_by_fteid_hash;
extern struct rte_hash *ue_context_by_sender_teid_hash;
extern apn apn_list[MAX_NB_DPN];
extern int apnidx;
/**
* @brief : Initializes UE hash table
* @param : No param
* @return : Returns nothing
*/
void
create_ue_hash(void);
/**
* @brief : creates an UE Context (if needed), and pdn connection with a default bearer
* given the UE IMSI, and EBI
* @param : imsi
* value of information element of the imsi
* @param : imsi_len
* length of information element of the imsi
* @param : ebi
* Eps Bearer Identifier of default bearer
* @param : context
* UE context to be created
* @param : cp_mode
* [SGWC/SAEGWC/PGWC]
* @return : - 0 if successful
* - > 0 if error occurs during packet filter parsing corresponds to
* 3gpp specified cause error value
* - < 0 for all other errors
*/
int
create_ue_context(uint64_t *imsi_val, uint16_t imsi_len,
uint8_t ebi, ue_context **context, apn *apn_requested,
uint32_t sequence, uint8_t *check_ue_hash,
uint8_t cp_mode);
/**
* Create the ue eps Bearer context by PDN (if needed), and key is sgwc s5s8 teid.
* @param fteid_key
* value of information element of the sgwc s5s8 teid
* @param bearer
* Eps Bearer context
* @return
* \- 0 if successful
* \- > if error occurs during packet filter parsing corresponds to
* 3gpp specified cause error value
* \- < 0 for all other errors
*/
int
add_bearer_entry_by_sgw_s5s8_tied(uint32_t fteid_key, struct eps_bearer_t **bearer);
/**
* @brief : This function takes the c-string argstr describing a apn by url, for example
* label1.label2.label3 and populates the apn structure according 3gpp 23.003
* clause 9.1
* @param : an_apn
* apn to be initialized
* @param : argstr
* c-string containing the apn label
* @return : Returns nothing
*/
void
set_apn_name(apn *an_apn, char *argstr);
/**
* @brief : returns the apn strucutre of the apn referenced by create session message
* @param : apn_label
* apn_label within a create session message
* @param : apn_length
* the length as recorded by the apn information element
* @return : the apn label configured for the CP
*/
apn *
get_apn(char *apn_label, uint16_t apn_length);
/**
* @brief : returns the apn strucutre for default apn(Forwarding Gateway-S1 HO)
* @param : void
* @return : the apn label configured for the CP
*/
apn *
set_default_apn(void);
/**
* @brief : Simple ip-pool
* @param : ip_pool, IP subnet ID
* @param : ip_pool_mask, Mask to be used
* @param : ipv4
* ip address to be used for a new UE connection
* @return : - 0 if successful
* - > 0 if error occurs during packet filter parsing corresponds to
* 3gpp specified cause error value
*/
uint32_t
acquire_ip(struct in_addr ip_pool, struct in_addr ip_pool_mask,
struct in_addr *ipv4);
/**
* @brief : Simple ip-pool for ipv6
* @param : ipv6_network_id, Prefix for IPv6 creation
* @param : prefix_len, bearer_id that need to be used for IPv6 allocation
* @param : ipv6
* ip address to be used for a new UE connection
* @return : - 0 if successful
* - > 0 if error occurs during packet filter parsing corresponds to
* 3gpp specified cause error value
*/
uint32_t
acquire_ipv6(struct in6_addr ipv6_network_id, uint8_t prefix_len,
struct in6_addr *ipv6);
/* debug */
/**
* @brief : print (with a column header) either context by the context and/or
* iterating over hash
* @param : h
* pointer to rte_hash containing ue hash table
* @param : context
* denotes if some context is to be indicated by '*' character
* @return : Returns nothing
*/
void
print_ue_context_by(struct rte_hash *h, ue_context *context);
/**
* @brief : Initializes LI-DF IMSI hash table
* @param : No param
* @return : Returns nothing
*/
void
create_li_info_hash(void);
/**
* @brief : fill and send pfcp session modification with drop flag set
* @param : context, ue context
* @return : Returns 0 on success and -1 on error
*/
int
send_pfcp_sess_mod_with_drop(ue_context *context);
#endif /* UE_H */
|
nikhilc149/e-utran-features-bug-fixes | ulpc/admf/include/AdmfController.h | /*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __ADMF_CONTROLLER_H_
#define __ADMF_CONTROLLER_H_
#include <iostream>
#include <memory>
#include <list>
#include "UeEntry.h"
#include "AdmfApp.h"
class UeEntry;
class DeleteEvent;
class AdmfApplication;
class AdmfController
{
private:
static int iRefCnt;
static AdmfController *mpInstance;
AdmfApplication &mApp;
AdmfController(AdmfApplication &app);
public:
/**
* @brief : Creates singleton object of AdmfController
* @param : No param
* @return : Returns reference to singleton DAdmf object
*/
static AdmfController* getInstance(AdmfApplication &app);
/**
* @brief : Handles Add Ue Request. Forwards this request to D-ADMF
if request has not came from D-ADMF. Forwards
request to LegacyAdmfInterface if forward flag
is set to LI or both
* @param : ueEntries, list of Ue entries parsed from request body object
* @param : requestBody, request body received in the request
* @param : dadmfRequestFlag, to identify it request has came from D-ADMF
*/
void addUeController(std::list <ue_data_t *> &ueEntries,
std::string &requestBody,
uint16_t requestSource);
/**
* @brief : Handles Update Ue Request. Forwards this request to D-ADMF
if request has not came from D-ADMF. Forwards
request to LegacyAdmfInterface if forward flag
is set to LI or both
* @param : ueEntries, list of Ue entries parsed from request body object
* @param : requestBody, request body received in the request
* @param : dadmfRequestFlag, to identify it request has came from D-ADMF
*/
void modifyUeController(std::list <ue_data_t *> &ueEntries,
std::string &requestBody,
uint16_t requestSource);
/**
* @brief : Handles Delete Ue Request. Forwards this request to D-ADMF
if request has not came from D-ADMF. Forwards
request to LegacyAdmfInterface if forward flag
is set to LI or both
* @param : ueEntries, list of Ue entries parsed from request body object
* @param : requestBody, request body received in the request
* @param : dadmfRequestFlag, to identify it request has came from D-ADMF
*/
void deleteUeController(std::list <delete_event_t *> &ueEntries,
std::string &requestBody,
uint16_t requestSource);
/**
* @brief : Handles start/stop notifications for Ue entries whose starttime
or stoptime has been elapsed and forward flag is set.
* @param : ueEntries, list of Ue entries parsed from request body object
* @param : requestBody, request body received in the request
* @return : Returns nothing
*/
void notifyUeController(std::list<ue_notify_t *> &ueEntries,
std::string &requestBody);
/**
* @brief : Decreases reference count. Deletes the object if reference
count becomes zero.
* @param : No param
* @return : Returns nothing
*/
void ReleaseInstance();
};
#endif /* __ADMF_CONTROLLER_H_ */
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/csid_cp_cleanup.c | <gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cp.h"
#include "teid.h"
#include "pfcp.h"
#include "li_config.h"
#include "pfcp_util.h"
#include "pfcp_enum.h"
#include "debug_str.h"
#include "seid_llist.h"
#include "gw_adapter.h"
#include "csid_struct.h"
#include "pfcp_set_ie.h"
#include "cp/gtpc_session.h"
#include "pfcp_messages_encoder.h"
#include "cp_timer.h"
#include "cp_app.h"
#include "sm_enum.h"
#include "ipc_api.h"
#include "cp_timer.h"
#include "pfcp_session.h"
#include "csid_struct.h"
extern int gx_app_sock;
#define UPD_PARAM_HEADER_SIZE (4)
extern int pfcp_fd;
extern int pfcp_fd_v6;
extern peer_addr_t upf_pfcp_sockaddr;
extern socklen_t s11_mme_sockaddr_len;
extern int clSystemLog;
extern int s5s8_fd;
extern int s5s8_fd_v6;
peer_addr_t s5s8_recv_sockaddr;
extern socklen_t s5s8_sockaddr_len;
extern pfcp_config_t config;
extern int clSystemLog;
static uint16_t sequence = 0;
/**
* @brief : Fills ip address
* @param : ip_addr,
* @param : node_addr,
* @param : instance,
* @return : Returns 0 in case of success, -1 otherwise
*/
static void
fill_pgw_rstrt_notif_addr(gtp_ip_address_ie_t *ip_addr,
node_address_t *node_addr, uint8_t instance) {
uint8_t header_len = 0;
if ((node_addr->ip_type == PDN_TYPE_IPV4)
|| (node_addr->ip_type == IPV4_GLOBAL_UNICAST)) {
header_len = IPV4_SIZE;
memcpy(ip_addr->ipv4_ipv6_addr,
&node_addr->ipv4_addr, IPV4_SIZE);
} else {
header_len = IPV6_ADDRESS_LEN;
memcpy(ip_addr->ipv4_ipv6_addr,
&node_addr->ipv6_addr, IPV6_ADDRESS_LEN);
}
/* Fill the PGW S5/S8 IP Address */
set_ie_header(&ip_addr->header,
GTP_IE_IP_ADDRESS, instance, header_len);
}
int8_t
fill_pgw_restart_notification(gtpv2c_header_t *gtpv2c_tx,
node_address_t *s11_sgw, node_address_t *s5s8_pgw)
{
/* Encode the PGW Restart Notification request*/
pgw_rstrt_notif_t pgw_rstrt_notif = {0};
set_gtpv2c_teid_header((gtpv2c_header_t *)&pgw_rstrt_notif.header,
GTP_PGW_RESTART_NOTIFICATION, 0, ++sequence, 0);
fill_pgw_rstrt_notif_addr(&pgw_rstrt_notif.sgw_s11s4_ip_addr_ctl_plane,
s11_sgw, IE_INSTANCE_ONE);
if(!(is_present(s5s8_pgw))) {
fill_pgw_rstrt_notif_addr(
&pgw_rstrt_notif.pgw_s5s8_ip_addr_ctl_plane_or_pmip,
s11_sgw, IE_INSTANCE_ZERO);
} else {
fill_pgw_rstrt_notif_addr(
&pgw_rstrt_notif.pgw_s5s8_ip_addr_ctl_plane_or_pmip,
s5s8_pgw, IE_INSTANCE_ZERO);
}
/* Set Cause value */
set_ie_header(&pgw_rstrt_notif.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
pgw_rstrt_notif.cause.cause_value = GTPV2C_CAUSE_PGW_NOT_RESPONDING;
encode_pgw_rstrt_notif(&pgw_rstrt_notif, (uint8_t *)gtpv2c_tx);
return 0;
}
/**
* @brief : Fills delete set pdn connection request
* @param : gtpv2c_tx, buffer to be filled
* @param : local_csids, local csids list
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
fill_gtpc_del_set_pdn_conn_req_t(gtpv2c_header_t *gtpv2c_tx, fqcsid_t *local_csids)
{
del_pdn_conn_set_req_t del_pdn_conn_req = {0};
set_gtpv2c_teid_header((gtpv2c_header_t *)&del_pdn_conn_req.header,
GTP_DELETE_PDN_CONNECTION_SET_REQ, 0, ++sequence, 0);
if (local_csids->instance == 0) {
if (local_csids->num_csid) {
set_gtpc_fqcsid_t(&del_pdn_conn_req.mme_fqcsid, IE_INSTANCE_ZERO,
local_csids);
}
} else if (local_csids->instance == 1){
if (local_csids->num_csid) {
set_gtpc_fqcsid_t(&del_pdn_conn_req.sgw_fqcsid, IE_INSTANCE_ONE,
local_csids);
}
} else if (local_csids->instance == 2) {
if (local_csids->num_csid) {
set_gtpc_fqcsid_t(&del_pdn_conn_req.pgw_fqcsid, IE_INSTANCE_TWO,
local_csids);
}
}
/* Encode the del pdn conn set request*/
encode_del_pdn_conn_set_req(&del_pdn_conn_req, (uint8_t *)gtpv2c_tx);
return 0;
}
/**
* @brief : Match peer node address
* @param : num_node_addr, node addr count.
* @param : peer_node_addr,
* @param : peer_node_addrs,
* @return : Returns 0 in case of match not found, 1 otherwise
*/
static int
match_node_addr(uint8_t num_node_addr, node_address_t *peer_node_addr,
node_address_t *peer_node_addrs) {
int match = 0;
node_address_t ip_addr = {0}, ip_addrs = {0};
memcpy(&ip_addr, peer_node_addr, sizeof(node_address_t));
for (uint8_t itr = 0; itr < num_node_addr; itr++) {
memcpy(&ip_addrs, &peer_node_addrs[itr], sizeof(node_address_t));
if ((COMPARE_IP_ADDRESS(ip_addr, ip_addrs)) == 0) {
match = 1;
break;
}
}
return match;
}
/**
* @brief : Fills delete set pdn connection request
* @param : del_pdn_conn_req, buffer to be filled
* @param : local_csids, local csids list
* @param : cp_ip,
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
fill_gtpc_del_set_pdn_conn_req(del_pdn_conn_set_req_t *del_pdn_conn_req, fqcsid_t *local_csids,
node_address_t *cp_ip)
{
set_gtpv2c_teid_header((gtpv2c_header_t *)&del_pdn_conn_req->header,
GTP_DELETE_PDN_CONNECTION_SET_REQ, 0, ++sequence, 0);
int cnd = ((cp_ip->ip_type == PDN_TYPE_IPV4) ?
memcmp(&cp_ip->ipv4_addr, &config.s5s8_ip.s_addr, IPV4_SIZE) :
memcmp(&cp_ip->ipv6_addr, &config.s5s8_ip_v6.s6_addr, IPV6_SIZE));
if (cnd != 0) {
cnd = ((cp_ip->ip_type == PDN_TYPE_IPV4) ?
memcmp(&cp_ip->ipv4_addr, &config.s11_ip.s_addr, IPV4_SIZE) :
memcmp(&cp_ip->ipv6_addr, &config.s11_ip_v6.s6_addr, IPV6_SIZE));
}
/* Set the SGW FQ-CSID */
if ((is_present(cp_ip) == 0) || (cnd != 0)) {
if (local_csids->num_csid) {
set_gtpc_fqcsid_t(&del_pdn_conn_req->sgw_fqcsid, IE_INSTANCE_ONE,
local_csids);
}
} else {
/* Set the PGW FQ-CSID */
if (local_csids->num_csid) {
set_gtpc_fqcsid_t(&del_pdn_conn_req->pgw_fqcsid, IE_INSTANCE_TWO,
local_csids);
}
}
return 0;
}
uint32_t s5s8_node_addr = 0;
/**
* @brief : Fill ccr request
* @param : pdc, pdn connection details
* @param : ebi_index
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
fill_ccr_t_request(pdn_connection *pdn, uint8_t ebi_index)
{
int ret = 0;
uint16_t msglen = 0;
uint8_t *buffer = NULL;
gx_msg ccr_request = {0};
gx_context_t *gx_context = NULL;
/* Retrive Gx_context based on Sess ID. */
ret = rte_hash_lookup_data(gx_context_by_sess_id_hash,
(const void*)(pdn->gx_sess_id),
(void **)&gx_context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"NO ENTRY FOUND IN Gx "
"HASH [%s]\n", LOG_VALUE, pdn->gx_sess_id);
return -1;
}
/* Set the Msg header type for CCR-T */
ccr_request.msg_type = GX_CCR_MSG ;
/* Set Credit Control Request type */
ccr_request.data.ccr.presence.cc_request_type = PRESENT;
ccr_request.data.ccr.cc_request_type = TERMINATION_REQUEST ;
/* VG: Set Credit Control Bearer opertaion type */
ccr_request.data.ccr.presence.bearer_operation = PRESENT;
ccr_request.data.ccr.bearer_operation = TERMINATION ;
/* Fill the Credit Crontrol Request to send PCRF */
if(fill_ccr_request(&ccr_request.data.ccr, pdn->context, ebi_index, pdn->gx_sess_id,0) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed CCR request filling process\n", LOG_VALUE);
return -1;
}
/* Update UE State */
pdn->state = CCR_SNT_STATE;
/* Set the Gx State for events */
gx_context->state = CCR_SNT_STATE;
gx_context->proc = pdn->proc;
/* Calculate the max size of CCR msg to allocate the buffer */
msglen = gx_ccr_calc_length(&ccr_request.data.ccr);
ccr_request.msg_len = msglen + GX_HEADER_LEN;
buffer = rte_zmalloc_socket(NULL, msglen + GX_HEADER_LEN,
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (buffer == NULL) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Failure to allocate CCR-T Buffer memory"
"structure, Error : %s\n", LOG_VALUE, rte_strerror(rte_errno));
return -1;
}
memcpy(buffer, &ccr_request.msg_type, sizeof(ccr_request.msg_type));
memcpy(buffer + sizeof(ccr_request.msg_type),
&ccr_request.msg_len,
sizeof(ccr_request.msg_len));
if (gx_ccr_pack(&(ccr_request.data.ccr),
(unsigned char *)(buffer + GX_HEADER_LEN), msglen) == 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"ERROR in Packing CCR Buffer\n", LOG_VALUE);
rte_free(buffer);
return -1;
}
/* Write or Send CCR -T msg to Gx_App */
send_to_ipc_channel(gx_app_sock, buffer, msglen + GX_HEADER_LEN);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Sent CCR-T to PCRF \n", LOG_VALUE);
free_dynamically_alloc_memory(&ccr_request);
update_cli_stats((peer_address_t *) &config.gx_ip, OSS_CCR_TERMINATE, SENT, GX);
rte_free(buffer);
return 0;
}
/**
* @brief : Delete pdr entries
* @param : bearer, bearer information
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
flush_pdr_entries(eps_bearer *bearer)
{
for (uint8_t itr = 0; itr < bearer->pdr_count; itr++) {
if (NULL == bearer->pdrs[itr])
continue;
if (del_pdr_entry((bearer->pdrs[itr])->rule_id, bearer->pdn->seid)) {
return -1;
}
}
for (uint8_t itr1 = 0; itr1 < bearer->qer_count; itr1++) {
if (del_qer_entry(bearer->qer_id[itr1].qer_id, bearer->pdn->seid)) {
return -1;
}
}
return 0;
}
/**
* @brief : Delete session entry using csid
* @param : pdn, Structure to store PDN context
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
del_sess_by_csid_entry(pdn_connection *pdn)
{
int ret = 0;
int8_t ebi_index = GET_EBI_INDEX(pdn->default_bearer_id);
if ((config.use_gx) && ((pdn->context)->cp_mode != SGWC)) {
fill_ccr_t_request(pdn, ebi_index);
gx_context_t *gx_context = NULL;
/* Retrive Gx_context based on Sess ID. */
ret = rte_hash_lookup_data(gx_context_by_sess_id_hash,
(const void*)(pdn->gx_sess_id), (void **)&gx_context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"NO ENTRY FOUND IN "
"Gx HASH [%s]\n", LOG_VALUE, pdn->gx_sess_id);
} else {
/* Deleting PDN hash map with GX call id */
rte_hash_del_key(pdn_conn_hash, (const void *) &pdn->call_id);
/* Delete UE context entry from IMSI Hash */
if (rte_hash_del_key(gx_context_by_sess_id_hash, &pdn->gx_sess_id) < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Error on "
"Deleting GX context Key entry from Hash\n",
LOG_VALUE);
}
if (gx_context != NULL) {
rte_free(gx_context);
gx_context = NULL;
}
}
}
for (uint8_t ebi_index = 0; ebi_index < MAX_BEARERS; ebi_index++) {
eps_bearer *bearer = pdn->eps_bearers[ebi_index];
if (bearer == NULL)
continue;
if (flush_pdr_entries(bearer)) {
/* TODO: Error Handling */
return -1;
}
rte_free(pdn->eps_bearers[ebi_index]);
pdn->eps_bearers[ebi_index] = NULL;
}
rte_free(pdn);
pdn = NULL;
return 0;
}
/**
* @brief : Delete csid using csid entry
* @param : peer_fqcsid
* @param : local_fqcsid
* @param : iface
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
cleanup_csid_by_csid_entry(sess_fqcsid_t *peer_fqcsid, fqcsid_t *local_fqcsid, uint8_t iface)
{
for (uint8_t itr = 0; itr < peer_fqcsid->num_csid; itr++) {
csid_t *local_csids = NULL;
csid_key_t key_t = {0};
key_t.local_csid = peer_fqcsid->local_csid[itr];
memcpy(&key_t.node_addr, &peer_fqcsid->node_addr[itr],
sizeof(node_address_t));
local_csids = get_peer_csid_entry(&key_t, iface, REMOVE_NODE);
if (local_csids == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get CSID "
"entry while cleanup CSID by CSID entry, Error : %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
for (uint8_t itr1 = 0; itr1 < local_csids->num_csid; itr1++) {
for (uint8_t itr2 = 0; itr2 < local_fqcsid->num_csid; itr2++) {
if (local_fqcsid->local_csid[itr2] == local_csids->local_csid[itr1]) {
for(uint8_t pos = itr1; pos < (local_csids->num_csid - 1); pos++ ) {
local_csids->local_csid[pos] = local_csids->local_csid[pos + 1];
}
if (local_csids->num_csid)
local_csids->num_csid--;
}
}
}
if (!local_csids->num_csid) {
if (del_peer_csid_entry(&key_t, iface)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete CSID "
"entry while cleanup CSID by CSID entry, Error : %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
}
}
return 0;
}
/**
* @brief : Send gtpc pgw restart notification.
* @param : csids, Conten list of local csid.
* @param : iface,
* @return : void
*/
static void
send_gtpc_pgw_restart_notification(fqcsid_t *csids, uint8_t iface)
{
uint8_t num_mme_node_addr = 0;
uint8_t num_pgw_node_addr = 0;
uint8_t tmp_cnt = 0;
int8_t ebi = 0;
int8_t ebi_index = 0;
int ret = 0;
uint16_t payload_length = 0;
uint32_t teid_key = 0;
node_address_t mme_node_addrs[MAX_CSID] = {0};
node_address_t pgw_node_addrs[MAX_CSID] = {0};
node_address_t local_node_addr = {0};
ue_context *context = NULL;
pdn_connection *pdn = NULL;
sess_csid *tmp = NULL;
sess_csid *current = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
memset(gtpv2c_tx, 0, sizeof(gtpv2c_header_t));
for (uint8_t itr = 0; itr < csids->num_csid; itr++) {
tmp = get_sess_csid_entry(csids->local_csid[itr], REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to get CSID entry, CSID: %u\n", LOG_VALUE,
csids->local_csid[itr]);
continue;
}
/* Check SEID is not ZERO */
if ((tmp->cp_seid == 0) && (tmp->next == 0)) {
continue;
}
current = tmp;
while (current != NULL) {
teid_key = UE_SESS_ID(current->cp_seid);
ebi = UE_BEAR_ID(current->cp_seid);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Invalid EBI ID\n", LOG_VALUE);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &teid_key,
(void **) &context);
if (ret < 0 || context == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get UE context for teid : %u \n",
LOG_VALUE, teid_key);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
pdn = context->pdns[ebi_index];
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get PDN context for seid : %u \n",
LOG_VALUE, current->cp_seid);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
if (is_present(&pdn->mme_csid.node_addr)) {
if ((match_node_addr(num_mme_node_addr,
&pdn->mme_csid.node_addr, mme_node_addrs)) == 0) {
fill_peer_info(&mme_node_addrs[num_mme_node_addr++],
&pdn->mme_csid.node_addr);
}
}
if ((context->cp_mode != SAEGWC) && (is_present(&pdn->s5s8_pgw_gtpc_ip))) {
if ((match_node_addr(num_pgw_node_addr,
&pdn->s5s8_pgw_gtpc_ip, pgw_node_addrs)) == 0) {
fill_peer_info(&pgw_node_addrs[num_pgw_node_addr++],
&pdn->s5s8_pgw_gtpc_ip);
}
}
/* Assign Next node address */
tmp = current->next;
current = tmp;
break;
}
}
/* If SGWU failure detect, SGWC should not send PGW restart notfication to MME
* If SX failure detect and PGW node addr found, i.e peer node is SGWU */
if(iface == SX_PORT_ID) {
if((num_pgw_node_addr != 0)
&& (is_present(&pgw_node_addrs[num_pgw_node_addr - 1])))
return;
}
for (uint8_t itr1 = 0; itr1 < num_mme_node_addr; itr1++) {
/* one pgw/sgw/saewc might be connected with more than one mme */
if (mme_node_addrs[itr1].ip_type == PDN_TYPE_IPV4) {
local_node_addr.ip_type = PDN_TYPE_IPV4;
local_node_addr.ipv4_addr = config.s11_ip.s_addr;
} else {
local_node_addr.ip_type = PDN_TYPE_IPV6;
memcpy(local_node_addr.ipv6_addr,
config.s11_ip_v6.s6_addr, IPV6_ADDRESS_LEN);
}
/* Fill the PGW restart notification request */
fill_pgw_restart_notification(gtpv2c_tx, &local_node_addr,
&pgw_node_addrs[tmp_cnt]);
ret = set_dest_address(mme_node_addrs[itr1], &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
/* Send the Delete PDN Request to peer node */
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, SENT);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Send PGW Restart notification to MME \n",
LOG_VALUE);
memset(gtpv2c_tx, 0, sizeof(gtpv2c_header_t));
}
}
/**
* @brief : Cleanup fqcsid entry from UE context
* @param : pdn, Structure for store PDN Connection context
* @return : Returns void
*/
static void
cleanup_sess_csid_entry(pdn_connection *pdn)
{
ue_context *context = NULL;
context = pdn->context;
if (context->mme_fqcsid != NULL) {
if ((context->mme_fqcsid)->num_csid) {
remove_csid_from_cntx(context->mme_fqcsid, &pdn->mme_csid);
if ((context->mme_fqcsid)->num_csid == 0)
{
if (context->mme_fqcsid != NULL) {
rte_free(context->mme_fqcsid);
}
context->mme_fqcsid = NULL;
}
}
}
if (context->sgw_fqcsid != NULL) {
if ((context->sgw_fqcsid)->num_csid) {
remove_csid_from_cntx(context->sgw_fqcsid, &pdn->sgw_csid);
if ((context->sgw_fqcsid)->num_csid == 0)
{
if (context->sgw_fqcsid != NULL) {
rte_free(context->sgw_fqcsid);
}
context->sgw_fqcsid = NULL;
}
}
}
if (context->pgw_fqcsid != NULL) {
if ((context->pgw_fqcsid)->num_csid) {
remove_csid_from_cntx(context->pgw_fqcsid, &pdn->pgw_csid);
if ((context->pgw_fqcsid)->num_csid == 0)
{
if (context->pgw_fqcsid != NULL) {
rte_free(context->pgw_fqcsid);
}
context->pgw_fqcsid = NULL;
}
}
}
if (context->up_fqcsid != NULL) {
if ((context->up_fqcsid)->num_csid) {
remove_csid_from_cntx(context->up_fqcsid, &pdn->up_csid);
if ((context->up_fqcsid)->num_csid == 0)
{
if (context->up_fqcsid != NULL) {
rte_free(context->up_fqcsid);
}
context->up_fqcsid = NULL;
}
}
}
return;
}
/**
* @brief : Match sess. fqcsid and add.
* @param : fqcsids, conten list of peer node csid and address's.
* @param : fqcsid, conten list of csid and address.
* @return : Returns void.
*/
static void
match_and_add_sess_fqcsid_t(sess_fqcsid_t *fqcsids, fqcsid_t *fqcsid)
{
uint8_t match = 0;
for (uint8_t itr = 0; itr < fqcsid->num_csid; itr++) {
for (uint8_t itr1 = 0; itr1 < fqcsids->num_csid; itr1++) {
if((fqcsids->local_csid[itr1] == fqcsid->local_csid[itr])
&& (COMPARE_IP_ADDRESS(fqcsids->node_addr[itr1], fqcsid->node_addr)) == 0){
match = 1;
break;
}
}
if (match == 0) {
if ((fqcsid->node_addr.ip_type == IPV4_GLOBAL_UNICAST)
|| (fqcsid->node_addr.ip_type == PDN_TYPE_IPV4)) {
fqcsids->node_addr[(fqcsids->num_csid)].ip_type =
PDN_TYPE_IPV4;
fqcsids->node_addr[(fqcsids->num_csid)].ipv4_addr =
fqcsid->node_addr.ipv4_addr;
} else {
fqcsids->node_addr[(fqcsids->num_csid)].ip_type =
PDN_TYPE_IPV6;
memcpy(fqcsids->node_addr[(fqcsids->num_csid)].ipv6_addr,
fqcsid->node_addr.ipv6_addr, IPV6_ADDRESS_LEN);
}
fqcsids->local_csid[(fqcsids->num_csid++)] = fqcsid->local_csid[itr];
}
}
}
/**
* @brief : Match peer node address's and add.
* @param : peer_node_addr, node address.
* @param : peer_node_addrs, conten list of peer node address.
* @return : Returns void.
*/
static void
match_and_add_peer_node_addr(node_address_t *peer_node_addr,
node_addr_t *peer_node_addrs)
{
uint8_t match = 0;
node_address_t peer_ip = {0};
memcpy(&peer_ip, peer_node_addr, sizeof(node_address_t));
for (uint8_t itr = 0; itr < peer_node_addrs->num_addr; itr++) {
if((COMPARE_IP_ADDRESS(peer_node_addrs->node_addr[itr], peer_ip)) == 0){
match = 1;
break;
}
}
if (match == 0) {
if ((peer_ip.ip_type == IPV4_GLOBAL_UNICAST)
|| (peer_ip.ip_type == PDN_TYPE_IPV4)) {
peer_node_addrs->node_addr[peer_node_addrs->num_addr].ip_type =
PDN_TYPE_IPV4;
peer_node_addrs->node_addr[peer_node_addrs->num_addr++].ipv4_addr =
peer_ip.ipv4_addr;
} else {
peer_node_addrs->node_addr[peer_node_addrs->num_addr].ip_type =
PDN_TYPE_IPV6;
memcpy(&peer_node_addrs->node_addr[peer_node_addrs->num_addr++].ipv6_addr,
&peer_ip.ipv6_addr, IPV6_ADDRESS_LEN);
}
}
}
/**
* @brief : Cleanup peer node address entry for fqcsid IE address.
* @param : key, key of the hash.
* @param : iface, .
* @return : In success return 0, otherwise -1.
*/
static int
cleanup_peer_node_addr_entry(peer_node_addr_key_t *key, uint8_t iface) {
fqcsid_ie_node_addr_t *tmp;
fqcsid_t *peer_csids = NULL;
tmp = get_peer_node_addr_entry(key, REMOVE_NODE);
if (tmp == NULL) {
(key->peer_node_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Entry not found for Peer Node IPv6 Addr : "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(key->peer_node_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Entry not found for Peer Node Addr : "IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(key->peer_node_addr.ipv4_addr));
return -1;
}
/* Get peer CSID associated with node */
peer_csids = get_peer_addr_csids_entry(&tmp->fqcsid_node_addr, UPDATE_NODE);
if (peer_csids == NULL) {
(tmp->fqcsid_node_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are already cleanup, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(tmp->fqcsid_node_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are already cleanup, Node_Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(tmp->fqcsid_node_addr.ipv4_addr));
del_peer_node_addr_entry(key);
return 0;
}
/* Get the mapped Session */
for (int8_t itr = 0; itr < peer_csids->num_csid; itr++) {
peer_csid_key_t key_t = {0};
sess_csid *tmp = NULL;
key_t.iface = iface;
key_t.peer_local_csid = peer_csids->local_csid[itr];
memcpy(&key_t.peer_node_addr, &peer_csids->node_addr,
sizeof(node_address_t));
tmp = get_sess_peer_csid_entry(&key_t, REMOVE_NODE);
if (tmp == NULL) {
del_peer_node_addr_entry(key);
return 0;
}
}
return 0;
}
/**
* @brief : Cleanup session using csid entry
* @param : csids
* @param : iface
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
cleanup_sess_by_csid_entry(fqcsid_t *csids, uint8_t iface)
{
int ret = 0;
uint8_t cp_mode = 0;
int8_t ebi = 0;
int8_t ebi_index = 0;
uint32_t teid_key = 0;
node_addr_t mme_node_addr = {0};
node_addr_t sgw_node_addr = {0};
node_addr_t pgw_node_addr = {0};
node_addr_t up_node_addr = {0};
ue_context *context = NULL;
pdn_connection *pdn = NULL;
sess_fqcsid_t mme_csids = {0};
sess_fqcsid_t up_csids = {0};
sess_fqcsid_t tmp1 = {0};
/* Get the session ID by csid */
for (uint8_t itr1 = 0; itr1 < csids->num_csid; itr1++) {
sess_csid *tmp_t = NULL;
sess_csid *current = NULL;
tmp_t = get_sess_csid_entry(csids->local_csid[itr1], REMOVE_NODE);
if (tmp_t == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to get CSID entry, CSID: %u\n", LOG_VALUE,
csids->local_csid[itr1]);
continue;
}
/* Check SEID is not ZERO */
if ((tmp_t->cp_seid == 0) && (tmp_t->next == 0)) {
continue;
}
current = tmp_t;
while (current != NULL ) {
sess_csid *tmp = NULL;
teid_key = UE_SESS_ID(current->cp_seid);
ebi = UE_BEAR_ID(current->cp_seid);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Invalid EBI ID\n", LOG_VALUE);
/* Assign Next node address */
tmp = current->next;
/* free csid linked list node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
continue;
}
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &teid_key,
(void **) &context);
if ((ret < 0) || (context == NULL)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get UE context for teid : %u, Error: %s \n", LOG_VALUE,
teid_key, strerror(errno));
/* Assign Next node address */
tmp = current->next;
/* free csid linked list node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
continue;
}
delete_ddn_timer_entry(timer_by_teid_hash, teid_key, ddn_by_seid_hash);
delete_ddn_timer_entry(dl_timer_by_teid_hash, teid_key, pfcp_rep_by_seid_hash);
pdn = context->pdns[ebi_index];
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get PDn Connection context for EBI index: %d, Error: %s \n",
LOG_VALUE, ebi_index, strerror(errno));
/* Assign Next node address */
tmp = current->next;
/* free csid linked list node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
continue;
}
/*Copying CP mode type */
cp_mode = context->cp_mode;
/* MME FQ-CSID */
if(pdn->mme_csid.num_csid != 0) {
match_and_add_sess_fqcsid_t(&mme_csids, &pdn->mme_csid);
}
/* SGWC FQ-CSID */
if(PGWC == context->cp_mode && (pdn->sgw_csid.num_csid)) {
match_and_add_sess_fqcsid_t(&tmp1, &pdn->sgw_csid);
}
/* PGWC FQ-CSID */
if(SGWC == context->cp_mode && (pdn->pgw_csid.num_csid)) {
match_and_add_sess_fqcsid_t(&tmp1, &pdn->pgw_csid);
}
if (SX_PORT_ID != iface) {
/* UP FQ-CSID */
if(pdn->up_csid.num_csid) {
match_and_add_sess_fqcsid_t(&up_csids, &pdn->up_csid);
}
}
cleanup_sess_csid_entry(pdn);
/* peer mode address */
if (context->cp_mode != PGWC) {
/* MME S11 IP */
match_and_add_peer_node_addr(&context->s11_mme_gtpc_ip,
&mme_node_addr);
/* UP SX IP */
match_and_add_peer_node_addr(&pdn->upf_ip,
&up_node_addr);
/* PGWC S5S8 IP */
if (is_present(&pdn->s5s8_pgw_gtpc_ip)) {
match_and_add_peer_node_addr(&pdn->s5s8_pgw_gtpc_ip,
&pgw_node_addr);
}
} else {
/* SGWC S5S8 IP */
match_and_add_peer_node_addr(&pdn->s5s8_pgw_gtpc_ip,
&sgw_node_addr);
/* UP SX IP */
match_and_add_peer_node_addr(&pdn->upf_ip,
&up_node_addr);
}
/* Delete UE session entry from UE Hash */
if (del_sess_by_csid_entry(pdn)) {
/* TODO Handle Error */
}
context->num_pdns--;
if (context->num_pdns == 0) {
/* Delete UE context entry from IMSI Hash */
if (rte_hash_del_key(ue_context_by_imsi_hash, &context->imsi) < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Error on "
"Deleting UE context entry from IMSI Hash\n",
LOG_VALUE);
}
if (context != NULL) {
rte_free(context);
context = NULL;
}
/* Delete UE context entry from UE Hash */
if (rte_hash_del_key(ue_context_by_fteid_hash, &teid_key) < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Error on "
"Deleting UE context entry from UE Hash\n", LOG_VALUE);
}
}
update_sys_stat(number_of_users, DECREMENT);
update_sys_stat(number_of_active_session, DECREMENT);
/* Assign Next node address */
tmp = current->next;
/* free csid linked list node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
}
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&csids->local_csid[itr1], current);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to update Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, csids->local_csid[itr1],
rte_strerror(abs(ret)));
}
}
/* Cleanup MME FQ-CSID */
if(mme_csids.num_csid != 0) {
if(cleanup_csid_by_csid_entry(&mme_csids, csids,
((cp_mode == PGWC)? S5S8_PGWC_PORT_ID : S11_SGW_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Error on "
"Deleting MME FQ-CSID entry\n", LOG_VALUE);
return -1;
}
}
/* Cleanup SGWC or PGWC FQ-CSID */
if(tmp1.num_csid != 0) {
if(cleanup_csid_by_csid_entry(&tmp1, csids,
((cp_mode == SGWC) ? S5S8_SGWC_PORT_ID : S5S8_PGWC_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Error on "
"Deleting FQ-CSID entry\n", LOG_VALUE);
return -1;
}
}
/* Cleanup UP FQ-CSID */
if (SX_PORT_ID != iface) {
if(up_csids.num_csid != 0) {
if(cleanup_csid_by_csid_entry(&up_csids, csids, SX_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Error on "
"Deleting UP FQ-CSID entry\n", LOG_VALUE);
return -1;
}
}
}
peer_node_addr_key_t key = {0};
/* MME */
for (uint8_t itr = 0; itr < mme_node_addr.num_addr; itr++) {
key.iface = S11_SGW_PORT_ID;
memcpy(&key.peer_node_addr,
&mme_node_addr.node_addr[itr], sizeof(node_address_t));
/* cleanup Peer node address entry */
cleanup_peer_node_addr_entry(&key, S11_SGW_PORT_ID);
}
/* UP */
for (uint8_t itr = 0; itr < up_node_addr.num_addr; itr++) {
key.iface = SX_PORT_ID;
memcpy(&key.peer_node_addr, &up_node_addr.node_addr[itr],
sizeof(node_address_t));
/* cleanup Peer node address entry */
cleanup_peer_node_addr_entry(&key, SX_PORT_ID);
}
/* PGWC */
for (uint8_t itr = 0; itr < pgw_node_addr.num_addr; itr++) {
key.iface = S5S8_SGWC_PORT_ID;
memcpy(&key.peer_node_addr,
&pgw_node_addr.node_addr[itr], sizeof(node_address_t));
/* cleanup Peer node address entry */
cleanup_peer_node_addr_entry(&key, S5S8_SGWC_PORT_ID);
}
/* SGWC */
for (uint8_t itr = 0; itr < sgw_node_addr.num_addr; itr++) {
key.iface = S5S8_SGWC_PORT_ID;
memcpy(&key.peer_node_addr,
&sgw_node_addr.node_addr[itr], sizeof(node_address_t));
/* cleanup Peer node address entry */
cleanup_peer_node_addr_entry(&key, S5S8_SGWC_PORT_ID);
}
return 0;
}
/**
* @brief : Cleanup session using Peer csid entry
* @param : Peer csids
* @param : iface
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
cleanup_sess_by_peer_csid_entry(fqcsid_t *peer_csids, uint8_t iface)
{
int ret = 0;
int8_t ebi = 0;
uint8_t num_csid = 0;
int8_t ebi_index = 0;
uint32_t teid_key = 0;
fqcsid_t csid = {0};
peer_csid_key_t key = {0};
peer_node_addr_key_t key_t = {0};
node_addr_t mme_node_addr = {0};
node_addr_t sgw_node_addr = {0};
node_addr_t pgw_node_addr = {0};
node_addr_t up_node_addr = {0};
ue_context *context = NULL;
pdn_connection *pdn = NULL;
/* Get the session ID by csid */
for (uint8_t itr1 = 0; itr1 < peer_csids->num_csid; itr1++) {
sess_csid *tmp = NULL;
sess_csid *current = NULL;
key.iface = iface;
key.peer_local_csid = peer_csids->local_csid[itr1];
memcpy(&key.peer_node_addr, &peer_csids->node_addr, sizeof(node_address_t));
tmp = get_sess_peer_csid_entry(&key, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to get CSID entry, CSID: %u\n", LOG_VALUE,
peer_csids->local_csid[itr1]);
continue;
}
/* Check SEID is not ZERO */
if ((tmp->cp_seid == 0) && (tmp->next == 0)) {
continue;
}
current = tmp;
while (current != NULL ) {
teid_key = UE_SESS_ID(current->cp_seid);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Initiate Cleanup for teid : 0x%x \n",
LOG_VALUE, teid_key);
ebi = UE_BEAR_ID(current->cp_seid);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Invalid EBI ID\n", LOG_VALUE);
/* Assign Next node address */
tmp = current->next;
/* free csid linked list node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
continue;
}
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &teid_key,
(void **) &context);
if (ret < 0 || context == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get UE context for teid : %u \n",
LOG_VALUE, teid_key);
/* Assign Next node address */
tmp = current->next;
/* free csid linked list node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
continue;
}
delete_ddn_timer_entry(timer_by_teid_hash, teid_key, ddn_by_seid_hash);
delete_ddn_timer_entry(dl_timer_by_teid_hash, teid_key, pfcp_rep_by_seid_hash);
pdn = context->pdns[ebi_index];
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get PDN context for seid : %u \n",
LOG_VALUE, current->cp_seid);
/* Assign Next node address */
tmp = current->next;
/* free csid linked list node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
continue;
}
sess_csid *head = NULL;
/* Delete Session for peer CSID hash */
if ((pdn->mme_csid.num_csid) &&
(COMPARE_IP_ADDRESS(peer_csids->node_addr, pdn->mme_csid.node_addr) != 0)) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Removing Session from MME CSID Link List \n", LOG_VALUE);
/* Remove the session link from MME CSID */
key.iface = ((context->cp_mode != PGWC) ? S11_SGW_PORT_ID : S5S8_PGWC_PORT_ID);
key.peer_local_csid = pdn->mme_csid.local_csid[num_csid];
memcpy(&key.peer_node_addr, &pdn->mme_csid.node_addr, sizeof(node_address_t));
head = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(head, pdn->seid, &key);
}
if ((pdn->up_csid.num_csid) &&
(COMPARE_IP_ADDRESS(peer_csids->node_addr, pdn->up_csid.node_addr) != 0)) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Removing Session from DP CSID Link List \n", LOG_VALUE);
/* Remove the session link from UP CSID */
key.iface = SX_PORT_ID;
key.peer_local_csid = pdn->up_csid.local_csid[num_csid];
memcpy(&key.peer_node_addr, &pdn->up_csid.node_addr, sizeof(node_address_t));
head = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(head, pdn->seid, &key);
}
if ((pdn->pgw_csid.num_csid) && (context->cp_mode == SGWC)
&& (COMPARE_IP_ADDRESS(peer_csids->node_addr, pdn->pgw_csid.node_addr) != 0)) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Removing Session from PGWC CSID Link List \n", LOG_VALUE);
/* Remove the session link from UP CSID */
key.iface = S5S8_SGWC_PORT_ID;
key.peer_local_csid = pdn->pgw_csid.local_csid[num_csid];
memcpy(&key.peer_node_addr, &pdn->pgw_csid.node_addr, sizeof(node_address_t));
head = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(head, pdn->seid, &key);
}
if ((pdn->sgw_csid.num_csid) && (context->cp_mode == PGWC)
&& (COMPARE_IP_ADDRESS(peer_csids->node_addr, pdn->sgw_csid.node_addr) != 0)) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Removing Session from SGWC CSID Link List \n", LOG_VALUE);
/* Remove the session link from UP CSID */
key.iface = S5S8_PGWC_PORT_ID;
key.peer_local_csid = pdn->sgw_csid.local_csid[num_csid];
memcpy(&key.peer_node_addr, &pdn->sgw_csid.node_addr, sizeof(node_address_t));
head = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(head, pdn->seid, &key);
}
/* Retriving local csid from pdn */
if (context->cp_mode != PGWC) {
csid.local_csid[num_csid] = pdn->sgw_csid.local_csid[num_csid];
memcpy(&csid.node_addr, &pdn->sgw_csid.node_addr, sizeof(node_address_t));
csid.num_csid = pdn->sgw_csid.num_csid;
} else {
csid.local_csid[num_csid] = pdn->pgw_csid.local_csid[num_csid];
memcpy(&csid.node_addr, &pdn->pgw_csid.node_addr, sizeof(node_address_t));
csid.num_csid = pdn->pgw_csid.num_csid;
}
/* Remove session link from local CSID */
for (uint8_t itr = 0; itr < csid.num_csid; ++itr) {
sess_csid *tmp1 = NULL;
tmp1 = get_sess_csid_entry(csid.local_csid[itr], REMOVE_NODE);
if (tmp1 != NULL) {
/* Remove node from csid linked list */
tmp1 = remove_sess_csid_data_node(tmp1, pdn->seid);
int8_t ret = 0;
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&csid.local_csid[itr], tmp1);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, csid.local_csid[itr]);
}
if (tmp1 == NULL) {
/* MME FQ-CSID */
if(pdn->mme_csid.num_csid != 0) {
del_csid_entry_hash(&pdn->mme_csid, &csid,
((context->cp_mode == PGWC)? S5S8_PGWC_PORT_ID : S11_SGW_PORT_ID));
}
/* SGWC FQ-CSID */
if(PGWC == context->cp_mode && (pdn->sgw_csid.num_csid)) {
del_csid_entry_hash(&pdn->sgw_csid, &csid, S5S8_PGWC_PORT_ID);
}
/* PGWC FQ-CSID */
if(SGWC == context->cp_mode && (pdn->pgw_csid.num_csid)) {
del_csid_entry_hash(&pdn->pgw_csid, &csid, S5S8_SGWC_PORT_ID);
}
if (SX_PORT_ID != iface) {
/* UP FQ-CSID */
if(pdn->up_csid.num_csid) {
del_csid_entry_hash(&pdn->up_csid, &csid, SX_PORT_ID);
}
}
del_sess_csid_entry(csid.local_csid[itr]);
}
}
}
cleanup_sess_csid_entry(pdn);
/* peer mode address */
if (context->cp_mode != PGWC) {
/* MME S11 IP */
match_and_add_peer_node_addr(&context->s11_mme_gtpc_ip,
&mme_node_addr);
/* UP SX IP */
match_and_add_peer_node_addr(&pdn->upf_ip,
&up_node_addr);
/* PGWC S5S8 IP */
if (is_present(&pdn->s5s8_pgw_gtpc_ip)) {
match_and_add_peer_node_addr(&pdn->s5s8_pgw_gtpc_ip,
&pgw_node_addr);
}
} else {
/* SGWC S5S8 IP */
match_and_add_peer_node_addr(&pdn->s5s8_pgw_gtpc_ip,
&sgw_node_addr);
/* UP SX IP */
match_and_add_peer_node_addr(&pdn->upf_ip,
&up_node_addr);
}
/* Delete UE session entry from UE Hash */
if (del_sess_by_csid_entry(pdn)) {
/* TODO Handle Error */
}
context->num_pdns--;
if (context->num_pdns == 0) {
/* Delete UE context entry from IMSI Hash */
if (rte_hash_del_key(ue_context_by_imsi_hash, &context->imsi) < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Error on "
"Deleting UE context entry from IMSI Hash\n",
LOG_VALUE);
}
if (context != NULL) {
rte_free(context);
context = NULL;
}
/* Delete UE context entry from UE Hash */
if (rte_hash_del_key(ue_context_by_fteid_hash, &teid_key) < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Error on "
"Deleting UE context entry from UE Hash\n", LOG_VALUE);
}
}
update_sys_stat(number_of_users, DECREMENT);
update_sys_stat(number_of_active_session, DECREMENT);
/* Assign Next node address */
tmp = current->next;
/* free csid linked list node */
if(current != NULL) {
rte_free(current);
current = NULL;
}
current = tmp;
}
/* Adding the null entry in the hash */
rte_hash_add_key_data(seid_by_peer_csid_hash, &key, tmp);
}
/* Delete Peer CSID entry */
for (uint8_t itr = 0; itr < peer_csids->num_csid; ++itr) {
key.iface = iface;
key.peer_local_csid = peer_csids->local_csid[itr];
memcpy(&key.peer_node_addr, &peer_csids->node_addr, sizeof(node_address_t));
if (del_sess_peer_csid_entry(&key) < 0) {
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT"Error on "
"Deleting peer CSID entry\n", LOG_VALUE);
}
}
/* MME */
for (uint8_t itr = 0; itr < mme_node_addr.num_addr; itr++) {
key_t.iface = S11_SGW_PORT_ID;
memcpy(&key_t.peer_node_addr, &mme_node_addr.node_addr[itr],
sizeof(node_address_t));
/* cleanup Peer node address entry */
cleanup_peer_node_addr_entry(&key_t, S11_SGW_PORT_ID);
}
/* UP */
for (uint8_t itr = 0; itr < up_node_addr.num_addr; itr++) {
key_t.iface = SX_PORT_ID;
memcpy(&key_t.peer_node_addr, &up_node_addr.node_addr[itr],
sizeof(node_address_t));
/* cleanup Peer node address entry */
cleanup_peer_node_addr_entry(&key_t, SX_PORT_ID);
}
/* PGWC */
for (uint8_t itr = 0; itr < pgw_node_addr.num_addr; itr++) {
key_t.iface = S5S8_SGWC_PORT_ID;
memcpy(&key_t.peer_node_addr, &pgw_node_addr.node_addr[itr],
sizeof(node_address_t));
/* cleanup Peer node address entry */
cleanup_peer_node_addr_entry(&key_t, S5S8_SGWC_PORT_ID);
}
/* SGWC */
for (uint8_t itr = 0; itr < sgw_node_addr.num_addr; itr++) {
key_t.iface = S5S8_SGWC_PORT_ID;
memcpy(&key_t.peer_node_addr, &sgw_node_addr.node_addr[itr],
sizeof(node_address_t));
/* cleanup Peer node address entry */
cleanup_peer_node_addr_entry(&key_t, S5S8_SGWC_PORT_ID);
}
return 0;
}
/**
* @brief : Send del PDN con. set req. to peer node's
* @param : csids, Cotent local csid list.
* @param : ifacce,
* @return : Returns 0 in case of success,
*/
static int8_t
send_delete_pdn_con_set_req(fqcsid_t *csids, uint8_t iface)
{
uint8_t num_mme_node_addr = 0;
uint8_t num_sgw_node_addr = 0;
uint8_t num_pgw_node_addr = 0;
int8_t ebi = 0;
int8_t ebi_index = 0;
uint8_t tmp_cnt = 0;
int ret = 0;
uint16_t payload_length = 0;
uint32_t teid_key = 0;
node_address_t mme_node_addrs[MAX_CSID] = {0};
node_address_t pgw_node_addrs[MAX_CSID] = {0};
node_address_t sgw_node_addrs[MAX_CSID] = {0};
ue_context *context = NULL;
pdn_connection *pdn = NULL;
sess_csid *tmp = NULL;
sess_csid *current = NULL;
del_pdn_conn_set_req_t del_pdn_conn_req = {0};
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
memset(gtpv2c_tx, 0, sizeof(gtpv2c_header_t));
for (uint8_t itr = 0; itr < csids->num_csid; itr++) {
tmp = get_sess_csid_entry(csids->local_csid[itr], REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to get CSID entry, CSID: %u\n", LOG_VALUE,
csids->local_csid[itr]);
continue;
}
/* Check SEID is not ZERO */
if ((tmp->cp_seid == 0) && (tmp->next == 0)) {
continue;
}
current = tmp;
while (current != NULL ) {
teid_key = UE_SESS_ID(current->cp_seid);
ebi = UE_BEAR_ID(current->cp_seid);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Invalid EBI ID\n", LOG_VALUE);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &teid_key,
(void **) &context);
if (ret < 0 || context == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get UE context for teid : %u \n",
LOG_VALUE, teid_key);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
pdn = context->pdns[ebi_index];
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get PDN context for seid : %u \n",
LOG_VALUE, current->cp_seid);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
if (is_present(&pdn->mme_csid.node_addr)) {
if ((match_node_addr(num_mme_node_addr, &pdn->mme_csid.node_addr,
mme_node_addrs)) == 0)
{
fill_peer_info(&mme_node_addrs[num_mme_node_addr++],
&pdn->mme_csid.node_addr);
}
}
if (context->cp_mode != PGWC) {
if (is_present(&pdn->sgw_csid.node_addr)) {
if ((match_node_addr(num_sgw_node_addr, &pdn->sgw_csid.node_addr,
sgw_node_addrs)) == 0)
{
fill_peer_info(&sgw_node_addrs[num_sgw_node_addr++],
&pdn->sgw_csid.node_addr);
}
}
} else {
if (is_present(&pdn->sgw_csid.node_addr)) {
if ((match_node_addr(num_sgw_node_addr, &pdn->s5s8_sgw_gtpc_ip,
sgw_node_addrs)) == 0)
{
fill_peer_info(&sgw_node_addrs[num_sgw_node_addr++],
&pdn->s5s8_sgw_gtpc_ip);
}
}
}
if (is_present(&pdn->pgw_csid.node_addr)) {
if ((match_node_addr(num_pgw_node_addr, &pdn->pgw_csid.node_addr,
pgw_node_addrs)) == 0) {
fill_peer_info(&pgw_node_addrs[num_pgw_node_addr++],
&pdn->pgw_csid.node_addr);
}
}
/* Assign Next node address */
tmp = current->next;
current = tmp;
break;
}
}
/* if pgw_node_addr is not match with config s5s8 ip, i.e cp type is SGWC
* if pgw_node_addr is set to zero , i.e cp type is SAEGWC */
/* Fill the Delete PDN Request */
fill_gtpc_del_set_pdn_conn_req(&del_pdn_conn_req, csids, &pgw_node_addrs[tmp_cnt]);
int cnd = (((pgw_node_addrs[tmp_cnt].ip_type == PDN_TYPE_IPV4)
|| (pgw_node_addrs[tmp_cnt].ip_type == IPV4_GLOBAL_UNICAST))?
memcmp(&pgw_node_addrs[tmp_cnt].ipv4_addr, &config.s5s8_ip.s_addr, IPV4_SIZE) :
memcmp(&pgw_node_addrs[tmp_cnt].ipv6_addr, &config.s5s8_ip_v6.s6_addr, IPV6_SIZE));
if (cnd != 0) {
cnd = (((pgw_node_addrs[tmp_cnt].ip_type == PDN_TYPE_IPV4)
|| (pgw_node_addrs[tmp_cnt].ip_type == IPV4_GLOBAL_UNICAST))?
memcmp(&pgw_node_addrs[tmp_cnt].ipv4_addr, &config.s11_ip.s_addr, IPV4_SIZE) :
memcmp(&pgw_node_addrs[tmp_cnt].ipv6_addr, &config.s11_ip_v6.s6_addr, IPV6_SIZE));
}
/* PGWC */
if ((num_pgw_node_addr != 0) && (cnd == 0)) {
if (iface != S5S8_PGWC_PORT_ID) {
/* Encode the del pdn conn set request*/
uint16_t msg_len = 0;
memset(gtpv2c_tx, 0, sizeof(gtpv2c_header_t));
msg_len = encode_del_pdn_conn_set_req(&del_pdn_conn_req, (uint8_t *)gtpv2c_tx);
gtpv2c_tx->gtpc.message_len = htons(msg_len - IE_HEADER_SIZE);
/* Send the Delete PDN Request to peer node */
payload_length = 0;
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
for (uint8_t itr2 = 0; itr2 < num_sgw_node_addr; itr2++) {
ret = set_dest_address(sgw_node_addrs[itr2], &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
(s5s8_recv_sockaddr.type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Send Delete PDN Connection Set Request to SGW, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Send Delete PDN Connection Set Request to SGW, Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(s5s8_recv_sockaddr.ipv4.sin_addr.s_addr));
}
num_sgw_node_addr = 0;
}
} else {
/* SGWC / SAEGWC */
if (iface != S11_SGW_PORT_ID) {
/* Encode the del pdn conn set request*/
uint16_t msg_len = 0;
memset(gtpv2c_tx, 0, sizeof(gtpv2c_header_t));
msg_len = encode_del_pdn_conn_set_req(&del_pdn_conn_req, (uint8_t *)gtpv2c_tx);
gtpv2c_tx->gtpc.message_len = htons(msg_len - IE_HEADER_SIZE);
/* Send the Delete PDN Request to peer node */
payload_length = 0;
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
for (uint8_t itr3 = 0; itr3 < num_mme_node_addr; itr3++) {
ret = set_dest_address(mme_node_addrs[itr3], &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, SENT);
(s11_mme_sockaddr.type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Send Delete PDN Connection Set Request to MME, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(s11_mme_sockaddr.ipv6.sin6_addr.s6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Send Delete PDN Connection Set Request to MME, Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(s11_mme_sockaddr.ipv4.sin_addr.s_addr ));
}
num_mme_node_addr = 0;
}
/* SGWC */
cnd = (((sgw_node_addrs[tmp_cnt].ip_type == PDN_TYPE_IPV4)
|| (sgw_node_addrs[tmp_cnt].ip_type == IPV4_GLOBAL_UNICAST)) ?
memcmp(&sgw_node_addrs[tmp_cnt].ipv4_addr, &config.s11_ip.s_addr, IPV4_SIZE) :
memcmp(&sgw_node_addrs[tmp_cnt].ipv6_addr, &config.s11_ip_v6.s6_addr, IPV6_SIZE));
if ((num_pgw_node_addr != 0) && (cnd == 0)) {
if (iface != S5S8_SGWC_PORT_ID) {
/* Encode the del pdn conn set request*/
uint16_t msg_len = 0;
memset(gtpv2c_tx, 0, sizeof(gtpv2c_header_t));
msg_len = encode_del_pdn_conn_set_req(&del_pdn_conn_req, (uint8_t *)gtpv2c_tx);
gtpv2c_tx->gtpc.message_len = htons(msg_len - IE_HEADER_SIZE);
/* Send the Delete PDN Request to peer node */
payload_length = 0;
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
for (uint8_t itr4 = 0; itr4 < num_pgw_node_addr; itr4++) {
ret = set_dest_address(pgw_node_addrs[itr4], &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
(s5s8_recv_sockaddr.type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Send Delete PDN Connection Set Request to PGW, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Send Delete PDN Connection Set Request to PGW, Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(s5s8_recv_sockaddr.ipv4.sin_addr.s_addr));
}
num_pgw_node_addr = 0;
}
}
}
return 0;
}
/* Cleanup Session information by local csid*/
int8_t
del_peer_node_sess(node_address_t *node_addr, uint8_t iface)
{
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":START\n", LOG_VALUE);
int ret = 0;
fqcsid_t csids = {0};
peer_node_addr_key_t key_t = {0};
fqcsid_ie_node_addr_t *tmp = {0};
fqcsid_t *peer_csids = NULL;
/* Get peer CSID associated with node */
peer_csids = get_peer_addr_csids_entry(node_addr,
UPDATE_NODE);
if (peer_csids == NULL) {
key_t.iface = iface;
memcpy(&key_t.peer_node_addr, node_addr, sizeof(node_address_t));
tmp = get_peer_node_addr_entry(&key_t, UPDATE_NODE);
if (tmp != NULL)
peer_csids = get_peer_addr_csids_entry(&tmp->fqcsid_node_addr, UPDATE_NODE);
/* Delete UPF hash entry */
if (peer_csids == NULL) {
if (rte_hash_del_key(upf_context_by_ip_hash, &node_addr) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT" Error in Deleting UPF hash entry\n", LOG_VALUE);
}
if (iface == SX_PORT_ID) {
/* Delete entry from teid info list for given upf*/
delete_entry_from_teid_list(*node_addr, &upf_teid_info_head);
if (rte_hash_del_key(upf_context_by_ip_hash, &node_addr) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT" Error in Deleting UPF hash entry\n", LOG_VALUE);
}
}
(node_addr->ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are already cleanup, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(node_addr->ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are already cleanup, Node_Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(node_addr->ipv4_addr));
return 0;
}
}
/* Get the mapped local CSID */
for (int8_t itr = 0; itr < peer_csids->num_csid; itr++) {
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = peer_csids->local_csid[itr];
memcpy(&key.node_addr, &peer_csids->node_addr, sizeof(node_address_t));
tmp = get_peer_csid_entry(&key, iface, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to get CSID "
"Entry while cleanup session \n", LOG_VALUE);
return -1;
}
for (int8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
memcpy(&csids.node_addr, &tmp->node_addr, sizeof(node_address_t));
}
if (csids.num_csid == 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"CSIDs are already cleanup \n", LOG_VALUE);
return 0;
}
send_delete_pdn_con_set_req(&csids, iface);
if ((iface != S11_SGW_PORT_ID) && (iface != S5S8_PGWC_PORT_ID)) {
send_gtpc_pgw_restart_notification(&csids, iface);
}
/* Cleanup Internal data structures */
ret = cleanup_sess_by_csid_entry(&csids, iface);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to get CSID "
"Entry while cleanup session \n", LOG_VALUE);
return -1;
}
if (is_present(&key_t.peer_node_addr)) {
ret = del_peer_node_addr_entry(&key_t);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to delete CSID "
"Entry from hash while cleanup session \n", LOG_VALUE);
return -1;
}
}
/* Cleanup Internal data structures */
ret = del_csid_entry_hash(peer_csids, &csids, iface);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to delete CSID "
"Entry from hash while cleanup session \n", LOG_VALUE);
return -1;
}
/* Delete UPF hash entry */
if (iface == SX_PORT_ID) {
/* Delete entry from teid info list for given upf*/
delete_entry_from_teid_list(*node_addr, &upf_teid_info_head);
if (rte_hash_del_key(upf_context_by_ip_hash, &node_addr) < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Failed to delete UPF "
"hash entry \n", LOG_VALUE);
}
}
/* Delete local csid */
for (uint8_t itr = 0; itr < csids.num_csid; itr++) {
ret = del_sess_csid_entry(csids.local_csid[itr]);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Failed to delete CSID Entry from hash "
"while cleanup session\n", LOG_VALUE);
}
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":END\n", LOG_VALUE);
return 0;
}
/**
* @brief : Send del PDN con. set req. to peer node's
* @param : peer_csids, Content peer csid list.
* @param : ifacce,
* @return : Returns 0 in case of success,
*/
static int8_t
send_del_pdn_con_set_req(fqcsid_t *peer_csids, uint8_t iface)
{
uint8_t num_mme_node_addr = 0;
uint8_t num_pgw_node_addr = 0;
int8_t ebi = 0;
int8_t ebi_index = 0;
int ret = 0;
uint16_t payload_length = 0;
uint32_t teid_key = 0;
node_address_t mme_node_addrs[MAX_CSID] = {0};
node_address_t pgw_node_addrs[MAX_CSID] = {0};
peer_csid_key_t key = {0};
ue_context *context = NULL;
pdn_connection *pdn = NULL;
sess_csid *tmp = NULL;
sess_csid *current = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
for (uint8_t itr = 0; itr < peer_csids->num_csid; itr++) {
key.iface = iface;
key.peer_local_csid = peer_csids->local_csid[itr];
memcpy(&key.peer_node_addr, &peer_csids->node_addr,
sizeof(node_address_t));
tmp = get_sess_peer_csid_entry(&key, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to get CSID entry, CSID: %u\n", LOG_VALUE,
peer_csids->local_csid[itr]);
continue;
}
/* Check SEID is not ZERO */
if ((tmp->cp_seid == 0) && (tmp->next == 0)) {
continue;
}
current = tmp;
while (current != NULL ) {
teid_key = UE_SESS_ID(current->cp_seid);
ebi = UE_BEAR_ID(current->cp_seid);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Invalid EBI ID\n", LOG_VALUE);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &teid_key,
(void **) &context);
if (ret < 0 || context == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get UE context for teid : %u \n",
LOG_VALUE, teid_key);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
pdn = context->pdns[ebi_index];
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get PDN context for seid : %u \n",
LOG_VALUE, current->cp_seid);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
if(is_present(&pdn->mme_csid.node_addr)) {
if ((match_node_addr(num_mme_node_addr, &pdn->mme_csid.node_addr,
mme_node_addrs)) == 0)
{
fill_peer_info(&mme_node_addrs[num_mme_node_addr++],
&pdn->mme_csid.node_addr);
}
}
if(is_present(&pdn->pgw_csid.node_addr)) {
if ((match_node_addr(num_pgw_node_addr, &pdn->pgw_csid.node_addr,
pgw_node_addrs)) == 0)
{
fill_peer_info(&pgw_node_addrs[num_pgw_node_addr++],
&pdn->pgw_csid.node_addr);
}
}
/* Assign Next node address */
tmp = current->next;
current = tmp;
}
}
if (iface == S11_SGW_PORT_ID) {
for (uint8_t itr2; itr2 < num_pgw_node_addr; itr2++) {
ret = set_dest_address(pgw_node_addrs[itr2], &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
fill_gtpc_del_set_pdn_conn_req_t(gtpv2c_tx, peer_csids);
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
/* Send the delete PDN set request to PGW */
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
memset(gtpv2c_tx, 0, sizeof(gtpv2c_header_t));
}
} else if (iface == S5S8_SGWC_PORT_ID) {
for (uint8_t itr2; itr2 < num_mme_node_addr; itr2++) {
ret = set_dest_address(mme_node_addrs[itr2], &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
fill_gtpc_del_set_pdn_conn_req_t(gtpv2c_tx, peer_csids);
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
/* Send the delete PDN set request to MME */
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, SENT);
memset(gtpv2c_tx, 0, sizeof(gtpv2c_header_t));
}
}
return 0;
}
/**
* @brief : Send pfcp sess. del. ser req. to peer node's
* @param : peer_csids, Conten list of peer csid.
* @return : Returns 0 in case of success,
*/
static int8_t
send_pfcp_sess_del_set_req(fqcsid_t *peer_csids, uint8_t iface)
{
uint8_t num_node_addr = 0;
int8_t ebi = 0;
int8_t ebi_index = 0;
int ret = 0;
uint32_t teid_key = 0;
node_address_t node_addrs[MAX_CSID] = {0};
peer_csid_key_t key = {0};
ue_context *context = NULL;
pdn_connection *pdn = NULL;
sess_csid *tmp = NULL;
sess_csid *current = NULL;
for (uint8_t itr = 0; itr < peer_csids->num_csid; itr++)
{
key.iface = iface;
key.peer_local_csid = peer_csids->local_csid[itr];
memcpy(&key.peer_node_addr, &peer_csids->node_addr, sizeof(node_address_t));
tmp = get_sess_peer_csid_entry(&key, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to get CSID entry, CSID: %u\n", LOG_VALUE,
peer_csids->local_csid[itr]);
continue;
}
/* Check SEID is not ZERO */
if ((tmp->cp_seid == 0) && (tmp->next == 0)) {
continue;
}
current = tmp;
while (current != NULL ) {
teid_key = UE_SESS_ID(current->cp_seid);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Found TEID : 0x%x\n", LOG_VALUE, teid_key);
ebi = UE_BEAR_ID(current->cp_seid);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Invalid EBI ID\n", LOG_VALUE);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &teid_key,
(void **) &context);
if (ret < 0 || context == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get UE context for teid : %u \n",
LOG_VALUE, teid_key);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
pdn = context->pdns[ebi_index];
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get PDN context for seid : %u \n",
LOG_VALUE, current->cp_seid);
/* Assign Next node address */
tmp = current->next;
current = tmp;
continue;
}
if(is_present(&pdn->up_csid.node_addr)) {
if ((match_node_addr(num_node_addr, &pdn->up_csid.node_addr,
node_addrs)) == 0)
{
fill_peer_info(&node_addrs[num_node_addr++],
&pdn->up_csid.node_addr);
}
}
/* Assign Next node address */
tmp = current->next;
current = tmp;
}
}
/* Send the PFCP deletion session set request to PGW */
pfcp_sess_set_del_req_t del_set_req_t = {0};
/* Fill the PFCP session set deletion request */
cp_fill_pfcp_sess_set_del_req_t(&del_set_req_t, peer_csids);
/* Send the Delete set Request to peer node */
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_set_del_req_t(&del_set_req_t, pfcp_msg);
pfcp_header_t *header = (pfcp_header_t *) pfcp_msg;
header->message_len = htons(encoded - PFCP_IE_HDR_SIZE);
for (uint8_t itr2 = 0; itr2 < num_node_addr; itr2++) {
ret = set_dest_address(node_addrs[itr2], &upf_pfcp_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr, SENT) < 0 ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to send "
"Delete PDN set Connection Request, Error: %s \n", LOG_VALUE, strerror(errno));
continue;
}
}
num_node_addr = 0;
return 0;
}
int8_t
process_del_pdn_conn_set_req_t(del_pdn_conn_set_req_t *del_pdn_req)
{
int ret = 0;
uint8_t iface = 0;
fqcsid_t csids = {0};
fqcsid_t peer_csids = {0};
/* MME FQ-CSID */
if (del_pdn_req->mme_fqcsid.header.len) {
for (uint8_t itr = 0; itr < del_pdn_req->mme_fqcsid.number_of_csids; itr++) {
/* Get linked local csid */
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = del_pdn_req->mme_fqcsid.pdn_csid[itr];
if (del_pdn_req->mme_fqcsid.node_id_type == IPV4_GLOBAL_UNICAST) {
key.node_addr.ip_type = PDN_TYPE_IPV4;
memcpy(&key.node_addr.ipv4_addr,
del_pdn_req->mme_fqcsid.node_address, IPV4_SIZE);
} else {
key.node_addr.ip_type = PDN_TYPE_IPV6;
memcpy(&key.node_addr.ipv6_addr,
del_pdn_req->mme_fqcsid.node_address, IPV6_ADDRESS_LEN);
}
tmp = get_peer_csid_entry(&key, S11_SGW_PORT_ID, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to get "
"MME FQ-CSID entry while processing Delete PDN set Connection "
"Request, Error: %s \n", LOG_VALUE, strerror(errno));
continue;
}
/* TODO: Hanlde Multiple CSID with single MME CSID */
for (uint8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
memcpy(&csids.node_addr, &tmp->node_addr, sizeof(node_address_t));
peer_csids.local_csid[peer_csids.num_csid++] =
del_pdn_req->mme_fqcsid.pdn_csid[itr];
memcpy(&peer_csids.node_addr, &key.node_addr, sizeof(node_address_t));
}
peer_csids.instance = del_pdn_req->mme_fqcsid.header.instance;
iface = S11_SGW_PORT_ID;
if (csids.num_csid) {
send_del_pdn_con_set_req(&peer_csids, iface);
}
}
/* SGW FQ-CSID */
if (del_pdn_req->sgw_fqcsid.header.len) {
for (uint8_t itr = 0; itr < del_pdn_req->sgw_fqcsid.number_of_csids; itr++) {
/* Get linked local csid */
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = del_pdn_req->sgw_fqcsid.pdn_csid[itr];
if (del_pdn_req->sgw_fqcsid.node_id_type == IPV4_GLOBAL_UNICAST) {
key.node_addr.ip_type = PDN_TYPE_IPV4;
memcpy(&key.node_addr.ipv4_addr,
del_pdn_req->sgw_fqcsid.node_address, IPV4_SIZE);
} else {
key.node_addr.ip_type = PDN_TYPE_IPV6;
memcpy(&key.node_addr.ipv6_addr,
del_pdn_req->sgw_fqcsid.node_address, IPV6_ADDRESS_LEN);
}
tmp = get_peer_csid_entry(&key, S5S8_PGWC_PORT_ID, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to get "
"SGW FQ-CSID entry while processing Delete PDN set Connection "
"Request\n", LOG_VALUE);
continue;
}
/* TODO: Hanlde Multiple CSID with single MME CSID */
for (uint8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
memcpy(&csids.node_addr, &tmp->node_addr, sizeof(node_address_t));
peer_csids.local_csid[peer_csids.num_csid++] =
del_pdn_req->sgw_fqcsid.pdn_csid[itr];
memcpy(&peer_csids.node_addr, &key.node_addr, sizeof(node_address_t));
}
peer_csids.instance = del_pdn_req->sgw_fqcsid.header.instance;
iface = S5S8_PGWC_PORT_ID;
}
/* PGW FQ-CSID */
if (del_pdn_req->pgw_fqcsid.header.len) {
for (uint8_t itr = 0; itr < del_pdn_req->pgw_fqcsid.number_of_csids; itr++) {
/* Get linked local csid */
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = del_pdn_req->pgw_fqcsid.pdn_csid[itr];
if (del_pdn_req->pgw_fqcsid.node_id_type == IPV4_GLOBAL_UNICAST) {
key.node_addr.ip_type = PDN_TYPE_IPV4;
memcpy(&key.node_addr.ipv4_addr,
del_pdn_req->pgw_fqcsid.node_address, IPV4_SIZE);
} else {
key.node_addr.ip_type = PDN_TYPE_IPV6;
memcpy(&key.node_addr.ipv6_addr,
del_pdn_req->pgw_fqcsid.node_address, IPV6_ADDRESS_LEN);
}
tmp = get_peer_csid_entry(&key, S5S8_SGWC_PORT_ID, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"PGW FQ-CSID entry while processing Delete PDN set Connection "
"Request\n", LOG_VALUE);
continue;
}
/* TODO: Hanlde Multiple CSID with single MME CSID */
for (uint8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
memcpy(&csids.node_addr, &tmp->node_addr, sizeof(node_address_t));
peer_csids.local_csid[peer_csids.num_csid++] =
del_pdn_req->pgw_fqcsid.pdn_csid[itr];
memcpy(&peer_csids.node_addr, &key.node_addr, sizeof(node_address_t));
}
peer_csids.instance = del_pdn_req->pgw_fqcsid.header.instance;
iface = S5S8_SGWC_PORT_ID;
if (csids.num_csid) {
send_del_pdn_con_set_req(&peer_csids, iface);
}
}
if (csids.num_csid == 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Not found peer associated CSIDs, CSIDs already cleanup \n",
LOG_VALUE);
return 0;
}
/* send pfcp session del. set req. to DP */
send_pfcp_sess_del_set_req(&peer_csids, iface);
/* Cleanup Internal data structures */
ret = cleanup_sess_by_peer_csid_entry(&peer_csids, iface);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to cleanup "
"Session by CSID entry\n", LOG_VALUE);
return -1;
}
/* TODO: UPDATE THE NODE ADDRESS */
//csids.node_addr = config.pfcp_ip.s_addr;
/* Cleanup Internal data structures */
ret = del_csid_entry_hash(&peer_csids, &csids, iface);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to cleanup "
"Session by CSID entry\n", LOG_VALUE);
return -1;
}
return 0;
}
int8_t
fill_gtpc_del_set_pdn_conn_rsp(gtpv2c_header_t *gtpv2c_tx, uint8_t seq_t,
uint8_t casue_value)
{
del_pdn_conn_set_rsp_t del_pdn_conn_rsp = {0};
set_gtpv2c_teid_header((gtpv2c_header_t *)&del_pdn_conn_rsp.header,
GTP_DELETE_PDN_CONNECTION_SET_RSP, 0, seq_t, 0);
/* Set Cause value */
set_ie_header(&del_pdn_conn_rsp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
del_pdn_conn_rsp.cause.cause_value = casue_value;
encode_del_pdn_conn_set_rsp(&del_pdn_conn_rsp, (uint8_t *)gtpv2c_tx);
return 0;
}
int8_t
process_del_pdn_conn_set_rsp_t(del_pdn_conn_set_rsp_t *del_pdn_rsp)
{
if (del_pdn_rsp->cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to process"
"Delete PDN Connection Set Response with cause : %s \n",
LOG_VALUE, cause_str(del_pdn_rsp->cause.cause_value));
return -1;
}
return 0;
}
int8_t
process_upd_pdn_conn_set_req_t(upd_pdn_conn_set_req_t *upd_pdn_req)
{
RTE_SET_USED(upd_pdn_req);
return 0;
}
int8_t
process_upd_pdn_conn_set_rsp_t(upd_pdn_conn_set_rsp_t *upd_pdn_rsp)
{
RTE_SET_USED(upd_pdn_rsp);
return 0;
}
/* Function */
int
process_pfcp_sess_set_del_req_t(pfcp_sess_set_del_req_t *del_set_req, peer_addr_t *peer_addr)
{
int ret = 0;
int offend_id = 0;
uint8_t cause_id = 0;
peer_addr_t peer_ip = {0};
node_address_t upf_ip = {0};
fqcsid_t csids = {0};
fqcsid_t peer_csids = {0};
pfcp_sess_set_del_rsp_t pfcp_del_resp = {0};
upf_context_t *upf_context = NULL;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"PFCP Session Set Deletion Request :: START \n", LOG_VALUE);
if (peer_addr->type == PDN_TYPE_IPV4) {
upf_ip.ip_type = PDN_TYPE_IPV4;
upf_ip.ipv4_addr = peer_addr->ipv4.sin_addr.s_addr;
} else {
upf_ip.ip_type = peer_addr->type;
memcpy(&upf_ip.ipv6_addr,
&(peer_addr->ipv6.sin6_addr.s6_addr), IPV6_ADDRESS_LEN);
}
/* Lookup upf context of peer node */
/* need to revisite here */
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) &(upf_ip), (void **) &(upf_context));
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"NO ENTRY FOUND IN "
"UPF HASH [%u]\n", LOG_VALUE, del_set_req->node_id.node_id_value_ipv4_address);
return -1;
}
/* UP FQ-CSID */
if (del_set_req->up_fqcsid.header.len) {
if (del_set_req->up_fqcsid.number_of_csids) {
for (uint8_t itr = 0; itr < del_set_req->up_fqcsid.number_of_csids; itr++) {
/* Get linked local csid */
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = del_set_req->up_fqcsid.pdn_conn_set_ident[itr];
if (del_set_req->up_fqcsid.fqcsid_node_id_type == IPV4_GLOBAL_UNICAST) {
key.node_addr.ip_type = PDN_TYPE_IPV4;
memcpy(&key.node_addr.ipv4_addr,
&(del_set_req->up_fqcsid.node_address), IPV4_SIZE);
} else {
key.node_addr.ip_type = PDN_TYPE_IPV6;
memcpy(&key.node_addr.ipv6_addr,
del_set_req->up_fqcsid.node_address, IPV6_ADDRESS_LEN);
}
tmp = get_peer_csid_entry(&key, SX_PORT_ID, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to get "
"SGW/PGW/SAEGW-U FQ-CSID entry while processing PFCP Session Set Deletion "
"Request, Error: %s \n", LOG_VALUE, strerror(errno));
continue;
}
/* TODO: Hanlde Multiple CSID with single MME CSID */
for (int8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
csids.node_addr = tmp->node_addr;
peer_csids.local_csid[peer_csids.num_csid++] =
del_set_req->up_fqcsid.pdn_conn_set_ident[itr];
memcpy(&peer_csids.node_addr, &(key.node_addr), sizeof(node_address_t));
}
}
}
if (!csids.num_csid) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Local CSIDs Found \n", LOG_VALUE);
return 0;
}
send_delete_pdn_con_set_req(&csids, SX_PORT_ID);
/* Cleanup Internal data structures */
ret = cleanup_sess_by_csid_entry(&csids, SX_PORT_ID);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to local cleanup "
"Session by CSID entry, Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
ret = del_csid_entry_hash(&peer_csids, &csids, SX_PORT_ID);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete "
"CSID entry from hash, Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
/* fill pfcp set del resp */
cause_id = REQUESTACCEPTED;
fill_pfcp_sess_set_del_resp(&pfcp_del_resp, cause_id, offend_id);
if (del_set_req->header.s) {
pfcp_del_resp.header.seid_seqno.no_seid.seq_no =
del_set_req->header.seid_seqno.has_seid.seq_no;
} else {
pfcp_del_resp.header.seid_seqno.no_seid.seq_no =
del_set_req->header.seid_seqno.no_seid.seq_no;
}
uint8_t pfcp_msg[PFCP_MSG_LEN]= {0};
int encoded = encode_pfcp_sess_set_del_rsp_t(&pfcp_del_resp, pfcp_msg);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Sending response for PFCP "
"Session Set Deletion Response\n", LOG_VALUE);
memcpy(&peer_ip, peer_addr, sizeof(peer_addr_t));
/* send pfcp set del resp on sx interface */
if ( pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, peer_ip, ACC) < 0 ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to send "
"PFCP Session Set Deletion Request, Error: %s \n", LOG_VALUE, strerror(errno));
}
/* Delete local csid */
for (uint8_t itr = 0; itr < csids.num_csid; itr++) {
ret = del_sess_csid_entry(csids.local_csid[itr]);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Failed to delete CSID Entry from hash "
"while cleanup session\n", LOG_VALUE);
}
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"PFCP Session Set Deletion Request :: END \n", LOG_VALUE);
return 0;
}
/* Function */
int
process_pfcp_sess_set_del_rsp_t(pfcp_sess_set_del_rsp_t *del_set_rsp)
{
if(del_set_rsp->cause.cause_value != REQUESTACCEPTED){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"ERROR:Cause received Session Set deletion response is %d\n",
LOG_VALUE, del_set_rsp->cause.cause_value);
/* TODO: Add handling to send association to next upf
* for each buffered CSR */
return -1;
}
return 0;
}
int
remove_peer_temp_csid(fqcsid_t *peer_fqcsid, uint16_t tmp_csid, uint8_t iface)
{
csid_t *local_csids = NULL;
csid_key_t key_t = {0};
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Removing Temporary local CSID "
"linked with peer node CSID :: START %d\n", LOG_VALUE, tmp_csid);
if (peer_fqcsid != NULL) {
for (uint8_t itr = 0; itr < (peer_fqcsid)->num_csid; itr++) {
key_t.local_csid = (peer_fqcsid)->local_csid[itr];
fill_node_addr_info(&key_t.node_addr, &(peer_fqcsid)->node_addr);
local_csids = get_peer_csid_entry(&key_t, iface, REMOVE_NODE);
if (local_csids == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"CSID entry while removing temp local CSID, Error : %s \n",
LOG_VALUE, strerror(errno));
return -1;
}
for (uint8_t itr1 = 0; itr1 < local_csids->num_csid; itr1++) {
if (local_csids->local_csid[itr1] == tmp_csid ) {
for (uint8_t pos = itr1; pos < local_csids->num_csid; pos++) {
local_csids->local_csid[pos] = local_csids->local_csid[(pos + 1)];
}
local_csids->num_csid--;
}
}
if (local_csids->num_csid == 0) {
if (del_peer_csid_entry(&key_t, iface)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error : Failed to delete CSID "
"entry while cleanup CSID by CSID entry,\n", LOG_VALUE);
return -1;
}
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Remove Temp Local CSID : Number of CSID %d \n",
LOG_VALUE, local_csids->num_csid);
}
return 0;
}
return -1;
}
/**
* @brief : Delete peer address node entry
* @param : pdn,
* @return : Returns nothing
*/
static void
del_peer_addr_node_entry(pdn_connection *pdn) {
peer_node_addr_key_t key = {0};
fqcsid_ie_node_addr_t *node_addr = NULL;
fqcsid_t *tmp = NULL;
if ((is_present(&pdn->mme_csid.node_addr)) && ((pdn->context)->cp_mode != PGWC)
&& (COMPARE_IP_ADDRESS(pdn->mme_csid.node_addr,
(pdn->context)->s11_mme_gtpc_ip) != 0)) {
key.iface = S11_SGW_PORT_ID;
memcpy(&key.peer_node_addr,
&(pdn->context)->s11_mme_gtpc_ip, sizeof(node_address_t));
node_addr = get_peer_node_addr_entry(&key, UPDATE_NODE);
if (node_addr != NULL) {
tmp = get_peer_addr_csids_entry(&node_addr->fqcsid_node_addr, UPDATE_NODE);
if (tmp == NULL) {
del_peer_node_addr_entry(&key);
}
} else {
del_peer_node_addr_entry(&key);
}
}
if ((pdn->context)->cp_mode == PGWC
&& (COMPARE_IP_ADDRESS(pdn->sgw_csid.node_addr,
pdn->s5s8_sgw_gtpc_ip) != 0)) {
key.iface = S5S8_PGWC_PORT_ID;
memcpy(&key.peer_node_addr,
&pdn->s5s8_sgw_gtpc_ip, sizeof(node_address_t));
node_addr = get_peer_node_addr_entry(&key, UPDATE_NODE);
if (node_addr != NULL) {
tmp = get_peer_addr_csids_entry(&node_addr->fqcsid_node_addr, UPDATE_NODE);
if (tmp == NULL) {
del_peer_node_addr_entry(&key);
}
} else {
del_peer_node_addr_entry(&key);
}
}
if ((pdn->context)->cp_mode == SGWC
&& (COMPARE_IP_ADDRESS(pdn->pgw_csid.node_addr,
pdn->s5s8_pgw_gtpc_ip) != 0)) {
key.iface = S5S8_SGWC_PORT_ID;
memcpy(&key.peer_node_addr,
&pdn->s5s8_pgw_gtpc_ip, sizeof(node_address_t));
node_addr = get_peer_node_addr_entry(&key, UPDATE_NODE);
if (node_addr != NULL) {
tmp = get_peer_addr_csids_entry(&node_addr->fqcsid_node_addr, UPDATE_NODE);
if (tmp == NULL) {
del_peer_node_addr_entry(&key);
}
} else {
del_peer_node_addr_entry(&key);
}
}
if (COMPARE_IP_ADDRESS(pdn->up_csid.node_addr, pdn->upf_ip) != 0) {
key.iface = SX_PORT_ID;
memcpy(&key.peer_node_addr, &pdn->upf_ip, sizeof(node_address_t));
node_addr = get_peer_node_addr_entry(&key, UPDATE_NODE);
if (node_addr != NULL) {
tmp = get_peer_addr_csids_entry(&node_addr->fqcsid_node_addr, UPDATE_NODE);
if (tmp == NULL) {
del_peer_node_addr_entry(&key);
}
} else {
del_peer_node_addr_entry(&key);
}
}
}
void del_local_csid(node_address_t *node_addr, fqcsid_t *fqcsid) {
fqcsid_t *tmp = NULL;
tmp = get_peer_addr_csids_entry(node_addr, REMOVE_NODE);
if (tmp != NULL) {
for (uint8_t itr = 0; itr < tmp->num_csid; itr++) {
if (tmp->local_csid[itr] ==
fqcsid->local_csid[(fqcsid->num_csid - 1)]) {
for(uint8_t pos = itr; pos < (tmp->num_csid - 1); pos++ ) {
tmp->local_csid[pos] = tmp->local_csid[pos + 1];
}
if (tmp->num_csid != 0)
tmp->num_csid--;
}
}
if (tmp->num_csid == 0) {
del_peer_addr_csids_entry(node_addr);
}
}
}
int
cleanup_csid_entry(uint64_t seid,
fqcsid_t *fqcsid, pdn_connection *pdn) {
uint8_t ret = 0;
uint16_t csid = 0;
sess_csid *sess_list = NULL;
if (fqcsid != NULL) {
for (uint8_t itr = 0; itr < fqcsid->num_csid; itr++) {
csid = fqcsid->local_csid[itr];
/* Remove the session link from CSID */
sess_list = get_sess_csid_entry(csid, REMOVE_NODE);
if (sess_list == NULL)
continue;
/* Remove node from csid linked list */
sess_list = remove_sess_csid_data_node(sess_list, seid);
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&csid, sess_list);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to remove Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, csid,
rte_strerror(abs(ret)));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
if (sess_list == NULL) {
ret = del_sess_csid_entry(csid);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error : While Delete Session CSID entry \n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
ret = cleanup_session_entries(csid, pdn);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error : While Cleanup session entries \n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
del_peer_addr_node_entry(pdn);
if (((pdn->context)->cp_mode == SGWC) || ((pdn->context)->cp_mode == SAEGWC)) {
del_local_csid(&(pdn->context)->s11_sgw_gtpc_ip, fqcsid);
} else if ((pdn->context)->cp_mode == PGWC) {
del_local_csid(&pdn->s5s8_pgw_gtpc_ip, fqcsid);
}
} else {
/* Remove session csid from UE context */
cleanup_sess_csid_entry(pdn);
}
/*Delete Session Link with Peer CSID */
del_session_csid_entry(pdn);
/* Free pgw_fqcsid if Node is PGWC and SAEGWC */
if (((pdn->context)->cp_mode == PGWC) || ((pdn->context)->cp_mode == SAEGWC)) {
if ((pdn->context)->pgw_fqcsid != NULL) {
rte_free((pdn->context)->pgw_fqcsid);
(pdn->context)->pgw_fqcsid = NULL;
}
}
/* Free sgw_fqcsid if Node is SGWC and SAEGWC */
if (((pdn->context)->cp_mode == SGWC) || ((pdn->context)->cp_mode == SAEGWC)) {
if ((pdn->context)->sgw_fqcsid != NULL) {
rte_free((pdn->context)->sgw_fqcsid);
(pdn->context)->sgw_fqcsid = NULL;
}
}
}
return 0;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"fqcsid not found while Cleanup CSID entry \n", LOG_VALUE);
return -1;
}
/* Cleanup Service GW context after demotion happen from SAEGWC to PGWC */
int8_t
cleanup_sgw_context(del_sess_req_t *ds_req, ue_context *context)
{
int ret = 0;
uint16_t payload_length = 0;
uint32_t dstIp = context->s11_mme_gtpc_ip.ipv4_addr;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer Node MME IP Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(dstIp));
/*Get the ebi index*/
int ebi_index = GET_EBI_INDEX(ds_req->lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
/* Select the PDN based on the ebi_index*/
pdn_connection *pdn = GET_PDN(context, ebi_index);
if (!pdn) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* TODO: Need to think on Multiple Session Scenario */
/* Delete the MME PATH managment timer Entry*/
//peerData *conn_data = NULL;
//ret = rte_hash_lookup_data(conn_hash_handle,
// &dstIp, (void **)&conn_data);
//if ( ret < 0) {
// clLog(clSystemLog, eCLSeverityDebug,
// LOG_FORMAT" Entry not found for NODE: "IPV4_ADDR"\n",
// LOG_VALUE, IPV4_ADDR_HOST_FORMAT(dstIp));
//} else {
// /* Stop transmit timer for specific peer node */
// stopTimer(&conn_data->tt);
// /* Stop periodic timer for specific peer node */
// stopTimer(&conn_data->pt);
// /* Deinit transmit timer for specific Peer Node */
// deinitTimer(&conn_data->tt);
// /* Deinit periodic timer for specific Peer Node */
// deinitTimer(&conn_data->pt);
// /* Update the CLI Peer Status */
// update_peer_status(dstIp, FALSE);
// /* Delete CLI Peer Entry */
// delete_cli_peer(dstIp);
// /* Delete entry from connection hash table */
// ret = rte_hash_del_key(conn_hash_handle,
// &dstIp);
// if (ret < 0) {
// clLog(clSystemLog, eCLSeverityDebug,
// LOG_FORMAT"Failed to del entry from hash table, Key IP:"IPV4_ADDR"\n",
// LOG_VALUE, IPV4_ADDR_HOST_FORMAT(dstIp));
// }
// conn_cnt--;
//}
/* TODO: */
/* Cleanup Peer Node CSIDs Entry */
#ifdef USE_CSID
/*
* De-link entry of the session from the CSID list
* for only default bearer id
* */
/* Remove session entry from the SGWC or SAEGWC CSID */
//cleanup_csid_entry(pdn->seid, (context)->sgw_fqcsid, context);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Cleanup CSID for SEID:%lu\n",
LOG_VALUE, pdn->seid);
#endif /* USE_CSID */
/* TODO: */
//uint32_t teid = ds_req->header.teid.has_teid.teid;
///* Delete S11 SGW GTPC TEID entry from the hash table */
//ret = rte_hash_del_key(ue_context_by_fteid_hash,
// &teid);
//if (ret < 0) {
// clLog(clSystemLog, eCLSeverityDebug,
// LOG_FORMAT"Failed to del entry from hash table, Key TEID:%u\n",
// LOG_VALUE, teid);
//}
//clLog(clSystemLog, eCLSeverityDebug,
// LOG_FORMAT"Entry deleted from ue_context_by_fteid_hash for TEID:%u\n",
// LOG_VALUE, teid);
/* DSR Response */
del_sess_rsp_t del_resp = {0};
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
/* Retrieve the Sequence number from Request header */
uint8_t seq = ds_req->header.teid.has_teid.seq;
/* Fill the Response and send to MME */
fill_del_sess_rsp(&del_resp, seq, context->s11_mme_gtpc_teid);
/*Encode the S11 delete session response message. */
payload_length = encode_del_sess_rsp(&del_resp, (uint8_t *)gtpv2c_tx);
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr,ACC);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SGW/SAEGW Sent the DSRsp back to MME\n", LOG_VALUE);
/* Cleanup SGW UE Context */
context->s11_mme_gtpc_teid = 0;
//context->s11_sgw_gtpc_teid = 0;
return 0;
}
/* TODO: Remove it, Never expected behaviour i*/
/* Cleanup Service GW context after demotion happen from SAEGWC to PGWC */
int8_t
cleanup_pgw_context(del_sess_req_t *ds_req, ue_context *context)
{
int ret = 0;
uint16_t payload_length = 0;
/*Get the ebi index*/
int ebi_index = GET_EBI_INDEX(ds_req->lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
/* Select the PDN based on the ebi_index*/
pdn_connection *pdn = GET_PDN(context, ebi_index);
if (!pdn) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* DSR Response */
del_sess_rsp_t del_resp = {0};
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
/* Retrieve the Sequence number from Request header */
uint8_t seq = ds_req->header.teid.has_teid.seq;
/* Fill the Response and send to MME */
fill_del_sess_rsp(&del_resp, seq, context->s11_mme_gtpc_teid);
/*Encode the S11 delete session response message. */
payload_length = encode_del_sess_rsp(&del_resp, (uint8_t *)gtpv2c_tx);
ret = set_dest_address(pdn->old_sgw_addr, &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, ACC);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"PGW Sent the DSRsp back to SGW\n", LOG_VALUE);
return 0;
}
/* Send the PFCP Session Modification Request after promotion */
int8_t
send_pfcp_modification_req(ue_context *context, pdn_connection *pdn,
eps_bearer *bearer, create_sess_req_t *csr, uint8_t ebi_index)
{
int ret = 0;
uint32_t seq = 0;
uint8_t num_csid = 0;
uint8_t far_count = 0;
uint8_t mme_csid_changed_flag = 0;
struct resp_info *resp = NULL;
pdr_t *pdr_ctxt = NULL;
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
node_address_t node_value = {0};
#ifdef USE_CSID
/* Parse and stored MME and SGW FQ-CSID in the context */
fqcsid_t *tmp = NULL;
/* Allocate the memory for each session */
if (context != NULL) {
if (context->mme_fqcsid == NULL) {
context->mme_fqcsid = rte_zmalloc_socket(NULL, sizeof(sess_fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
}
if (context->sgw_fqcsid == NULL) {
context->sgw_fqcsid = rte_zmalloc_socket(NULL, sizeof(sess_fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
}
if ((context->mme_fqcsid == NULL) || (context->sgw_fqcsid == NULL)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate the "
"memory for fqcsids entry\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Context not found "
"while processing Create Session Request \n", LOG_VALUE);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* Cleanup PGW Associated CSIDs */
/* PGW FQ-CSID */
if (context->pgw_fqcsid) {
if ((context->pgw_fqcsid)->num_csid) {
for (uint8_t inx = 0; (context->pgw_fqcsid != NULL) && (inx < (context->pgw_fqcsid)->num_csid); inx++) {
/* Remove the session link from old CSID */
uint16_t tmp_csid = (context->pgw_fqcsid)->local_csid[inx];
sess_csid *tmp1 = NULL;
tmp1 = get_sess_csid_entry(tmp_csid, REMOVE_NODE);
if (tmp1 != NULL) {
/* Remove node from csid linked list */
tmp1 = remove_sess_csid_data_node(tmp1, pdn->seid);
int8_t ret = 0;
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&tmp_csid, tmp1);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, tmp_csid,
rte_strerror(abs(ret)));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
if (tmp1 == NULL) {
/* Removing temporary local CSID associated with MME */
remove_peer_temp_csid(&pdn->mme_csid, tmp_csid,
S5S8_PGWC_PORT_ID);
/* Removing temporary local CSID assocoated with PGWC */
remove_peer_temp_csid(&pdn->sgw_csid, tmp_csid,
S5S8_PGWC_PORT_ID);
/* Remove Session link from peer csid */
sess_csid *tmp1 = NULL;
peer_csid_key_t key = {0};
key.iface = S5S8_PGWC_PORT_ID;
key.peer_local_csid = pdn->sgw_csid.local_csid[num_csid];
memcpy(&(key.peer_node_addr),
&(pdn->sgw_csid.node_addr), sizeof(node_address_t));
tmp1 = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(tmp1, pdn->seid, &key);
/* Delete Local CSID entry */
del_sess_csid_entry(tmp_csid);
}
/* Delete CSID from the context */
for (uint8_t itr1 = 0; itr1 < (context->pgw_fqcsid)->num_csid; itr1++) {
if ((context->pgw_fqcsid)->local_csid[itr1] == tmp_csid) {
for(uint8_t pos = itr1; pos < ((context->pgw_fqcsid)->num_csid - 1); pos++ ) {
(context->pgw_fqcsid)->local_csid[pos] = (context->pgw_fqcsid)->local_csid[pos + 1];
}
(context->pgw_fqcsid)->num_csid--;
}
}
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Remove PGWC CSID: %u\n",
LOG_VALUE, tmp_csid);
}
if ((context->pgw_fqcsid)->num_csid == 0)
{
memset(&(pdn->pgw_csid), 0, sizeof(fqcsid_t));
if (context->pgw_fqcsid != NULL) {
rte_free(context->pgw_fqcsid);
context->pgw_fqcsid = NULL;
}
}
}
}
/* MME FQ-CSID */
if (csr->mme_fqcsid.header.len) {
if (csr->mme_fqcsid.number_of_csids) {
for (uint8_t inx = 0; inx < pdn->mme_csid.num_csid; inx++) {
if (csr->mme_fqcsid.pdn_csid[csr->mme_fqcsid.number_of_csids - 1] !=
pdn->mme_csid.local_csid[inx]) {
mme_csid_changed_flag = TRUE;
}
}
if (mme_csid_changed_flag) {
fqcsid_t *tmp = NULL;
tmp = get_peer_addr_csids_entry(&pdn->mme_csid.node_addr, UPDATE_NODE);
if (tmp != NULL) {
for (uint8_t itr3 = 0; itr3 < tmp->num_csid; itr3++) {
if (tmp->local_csid[itr3] ==
pdn->mme_csid.local_csid[(pdn->mme_csid.num_csid - 1)]) {
for(uint8_t pos = itr3; pos < (tmp->num_csid - 1); pos++ ) {
tmp->local_csid[pos] = tmp->local_csid[pos + 1];
}
tmp->num_csid--;
}
}
if (tmp->num_csid == 0) {
if (del_peer_addr_csids_entry(&pdn->mme_csid.node_addr)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in deleting "
"peer CSID entry, Error : %i\n", LOG_VALUE, errno);
/* TODO ERROR HANDLING */
return -1;
}
}
}
/* Remove Session link from peer csid */
sess_csid *tmp1 = NULL;
peer_csid_key_t key = {0};
key.iface = S5S8_PGWC_PORT_ID;
key.peer_local_csid = pdn->mme_csid.local_csid[num_csid];
memcpy(&(key.peer_node_addr),
&(pdn->mme_csid.node_addr), sizeof(node_address_t));
tmp1 = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(tmp1, pdn->seid, &key);
/* Remove CSID from context */
remove_csid_from_cntx(context->mme_fqcsid, &pdn->mme_csid);
ret = add_peer_addr_entry_for_fqcsid_ie_node_addr(
&context->s11_mme_gtpc_ip, &csr->mme_fqcsid,
S11_SGW_PORT_ID);
if (ret)
return ret;
ret = add_fqcsid_entry(&csr->mme_fqcsid, context->mme_fqcsid);
if(ret)
return ret;
fill_pdn_fqcsid_info(&pdn->mme_csid, context->mme_fqcsid);
if (pdn->mme_csid.num_csid) {
link_sess_with_peer_csid(&pdn->mme_csid, pdn, S11_SGW_PORT_ID);
}
}
}
} else {
/* Stored the MME CSID by MME Node address */
tmp = get_peer_addr_csids_entry(&context->s11_mme_gtpc_ip,
ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to "
"Add the MME CSID by MME Node address, Error : %s \n", LOG_VALUE,
strerror(errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
memcpy(&tmp->node_addr, &context->s11_mme_gtpc_ip, sizeof(node_address_t));
memcpy(&(context->mme_fqcsid)->node_addr[(context->mme_fqcsid)->num_csid],
&context->s11_mme_gtpc_ip, sizeof(node_address_t));
}
/* SGW FQ-CSID */
if (!(context->sgw_fqcsid)->num_csid) {
tmp = get_peer_addr_csids_entry(&context->s11_sgw_gtpc_ip,
ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed "
"to Add the SGW CSID by SGW Node address, Error : %s \n", LOG_VALUE,
strerror(errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
memcpy(&tmp->node_addr, &context->s11_sgw_gtpc_ip, sizeof(node_address_t));
memcpy(&(context->sgw_fqcsid)->node_addr[(context->sgw_fqcsid)->num_csid],
&context->s11_sgw_gtpc_ip, sizeof(node_address_t));
}
/* Get the copy of existing SGW CSID */
fqcsid_t tmp_csid_t = {0};
if (pdn->sgw_csid.num_csid) {
if ((context->sgw_fqcsid)->num_csid) {
memcpy(&tmp_csid_t, &pdn->sgw_csid, sizeof(fqcsid_t));
}
}
/* Update the entry for peer nodes */
if (fill_peer_node_info(pdn, bearer)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to fill peer node info and assignment of the "
"CSID Error: %s\n", LOG_VALUE, strerror(errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
if(pdn->flag_fqcsid_modified == TRUE) {
uint8_t tmp_csid = 0;
/* Validate the exsiting CSID or allocated new one */
for (uint8_t inx1 = 0; inx1 < tmp_csid_t.num_csid; inx1++) {
if ((context->sgw_fqcsid)->local_csid[(context->sgw_fqcsid)->num_csid - 1] ==
tmp_csid_t.local_csid[inx1]) {
tmp_csid = tmp_csid_t.local_csid[inx1];
break;
}
}
if (!tmp_csid) {
for (uint8_t inx = 0; inx < tmp_csid_t.num_csid; inx++) {
/* Remove the session link from old CSID */
sess_csid *tmp1 = NULL;
tmp1 = get_sess_csid_entry(tmp_csid_t.local_csid[inx], REMOVE_NODE);
if (tmp1 != NULL) {
/* Remove node from csid linked list */
tmp1 = remove_sess_csid_data_node(tmp1, pdn->seid);
int8_t ret = 0;
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&tmp_csid_t.local_csid[inx], tmp1);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, tmp_csid_t.local_csid[inx],
rte_strerror(abs(ret)));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
if (tmp1 == NULL) {
/* Removing temporary local CSID associated with MME */
remove_peer_temp_csid(&pdn->mme_csid, tmp_csid_t.local_csid[inx],
S11_SGW_PORT_ID);
/* Removing temporary local CSID assocoated with PGWC */
remove_peer_temp_csid(&pdn->pgw_csid, tmp_csid_t.local_csid[inx],
S5S8_SGWC_PORT_ID);
del_sess_csid_entry(tmp_csid_t.local_csid[inx]);
}
/* Delete CSID from the context */
remove_csid_from_cntx(context->sgw_fqcsid, &tmp_csid_t);
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Remove session link from Old CSID:%u\n",
LOG_VALUE, tmp_csid_t.local_csid[inx]);
}
}
/* update entry for cp session id with link local csid */
sess_csid *tmp = NULL;
tmp = get_sess_csid_entry(
pdn->sgw_csid.local_csid[pdn->sgw_csid.num_csid - 1],
ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get session of CSID entry %s \n",
LOG_VALUE, strerror(errno));
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* Link local csid with session id */
/* Check head node created ot not */
if(tmp->cp_seid != pdn->seid && tmp->cp_seid != 0) {
sess_csid *new_node = NULL;
/* Add new node into csid linked list */
new_node = add_sess_csid_data_node(tmp,
pdn->sgw_csid.local_csid[pdn->sgw_csid.num_csid - 1]);
if(new_node == NULL ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to "
"ADD new node into CSID linked list : %s\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
} else {
new_node->cp_seid = pdn->seid;
new_node->up_seid = pdn->dp_seid;
}
} else {
tmp->cp_seid = pdn->seid;
tmp->up_seid = pdn->dp_seid;
tmp->next = NULL;
}
/* Fill the fqcsid into the session est request */
if (fill_fqcsid_sess_mod_req(&pfcp_sess_mod_req, pdn)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to fill "
"FQ-CSID in Session Modification Request, "
"Error: %s\n", LOG_VALUE, strerror(errno));
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if (mme_csid_changed_flag) {
/* set MME FQ-CSID */
set_fq_csid_t(&pfcp_sess_mod_req.mme_fqcsid, &pdn->mme_csid);
}
}
#endif /* USE_CSID */
if(pdn) {
for(int bearer_inx = 0; bearer_inx < MAX_BEARERS; bearer_inx++) {
bearer = pdn->eps_bearers[bearer_inx];
if(bearer) {
/* Get the PDR Context */
for(uint8_t pdr = 0; pdr < bearer->pdr_count; pdr++) {
if (bearer->pdrs[pdr] == NULL) {
continue;
}
if(bearer->pdrs[pdr]->pdi.src_intfc.interface_value == SOURCE_INTERFACE_VALUE_CORE) {
pdr_ctxt = bearer->pdrs[pdr];
break;
} else if ((bearer->pdrs[pdr])->pdi.src_intfc.interface_value == SOURCE_INTERFACE_VALUE_ACCESS) {
/* Update the local source IP Address and teid */
ret = set_node_address(&(bearer->pdrs[pdr])->pdi.local_fteid.ipv4_address,
(bearer->pdrs[pdr])->pdi.local_fteid.ipv6_address,
bearer->s1u_sgw_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
/* Update the PDR info */
set_update_pdr(&(pfcp_sess_mod_req.update_pdr[pfcp_sess_mod_req.update_pdr_count]),
bearer->pdrs[pdr], (pdn->context)->cp_mode);
/* Reset Precedance, No need to forward */
memset(&(pfcp_sess_mod_req.update_pdr[pfcp_sess_mod_req.update_pdr_count].precedence), 0, sizeof(pfcp_precedence_ie_t));
/* Reset FAR ID, No need to forward */
memset(&(pfcp_sess_mod_req.update_pdr[pfcp_sess_mod_req.update_pdr_count].far_id), 0,
sizeof(pfcp_far_id_ie_t));
/* Update the PDR header length*/
pfcp_sess_mod_req.update_pdr[pfcp_sess_mod_req.update_pdr_count].header.len -=
(sizeof(pfcp_far_id_ie_t) + sizeof(pfcp_precedence_ie_t));
pfcp_sess_mod_req.update_pdr_count++;
}
}
/* Set the Appropriate Actions */
if(pdr_ctxt) {
pdr_ctxt->far.actions.buff = FALSE;
pdr_ctxt->far.actions.nocp = FALSE;
pdr_ctxt->far.actions.forw = TRUE;
pdr_ctxt->far.actions.drop = FALSE;
pdr_ctxt->far.actions.dupl = GET_DUP_STATUS(pdn->context);
}
/* Add the Update FAR in the message */
if (pdr_ctxt->far.actions.forw) {
set_update_far(
&(pfcp_sess_mod_req.update_far[far_count]),
&pdr_ctxt->far);
/* Set the Update Forwarding Parameter IE*/
uint16_t len = 0;
len += set_upd_forwarding_param(&(pfcp_sess_mod_req.update_far[far_count].upd_frwdng_parms),
bearer->s1u_enb_gtpu_ip);
len += UPD_PARAM_HEADER_SIZE;
pfcp_sess_mod_req.update_far[far_count].header.len += len;
/* Fill the eNB F-TEID Information */
pfcp_sess_mod_req.update_far[far_count].upd_frwdng_parms.outer_hdr_creation.teid =
bearer->s1u_enb_gtpu_teid;
pfcp_sess_mod_req.update_far[far_count].upd_frwdng_parms.dst_intfc.interface_value =
GTPV2C_IFTYPE_S1U_ENODEB_GTPU;
/* Set the endmarker flag */
set_pfcpsmreqflags(&(pfcp_sess_mod_req.update_far[far_count].upd_frwdng_parms.pfcpsmreq_flags));
pfcp_sess_mod_req.update_far[far_count].upd_frwdng_parms.pfcpsmreq_flags.sndem = PRESENT;
pfcp_sess_mod_req.update_far[far_count].header.len += sizeof(struct pfcp_pfcpsmreq_flags_ie_t);
pfcp_sess_mod_req.update_far[far_count].upd_frwdng_parms.header.len += sizeof(struct pfcp_pfcpsmreq_flags_ie_t);
far_count++;
pfcp_sess_mod_req.update_far_count = far_count;
}
}
}
/* Set the procedure */
//pdn->proc = MODIFY_BEARER_PROCEDURE;
/* Set CP Seid and Node Address */
/*Filling Node ID for F-SEID*/
if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(config.pfcp_ip.s_addr, temp, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV6) {
ret = fill_ip_addr(0, config.pfcp_ip_v6.s6_addr, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
set_fseid(&(pfcp_sess_mod_req.cp_fseid), pdn->seid, node_value);
/* Get the Sequence Number for Request */
seq = get_pfcp_sequence_number(PFCP_SESSION_MODIFICATION_REQUEST, seq);
/* Fill the Sequence Number inside the header */
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_mod_req.header), PFCP_SESSION_MODIFICATION_REQUEST,
HAS_SEID, seq, context->cp_mode);
/* Set the UP Seid inside the header */
pfcp_sess_mod_req.header.seid_seqno.has_seid.seid = pdn->dp_seid;
/* Encode the PFCP Session Modification Request */
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
/* Set the UPF IP Address */
//upf_pfcp_sockaddr = pdn->upf_ip.ipv4_addr;
/* Sent the PFCP messages */
if(pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr, SENT) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to send"
"PFCP Session Modification Request %i\n", LOG_VALUE, errno);
} else {
/* Add the timer for PFCP Session Modification Request */
#ifdef CP_BUILD
add_pfcp_if_timer_entry(context->s11_sgw_gtpc_teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
#endif /* CP_BUILD */
/* Stored the Resp Struct with CP Seid */
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* Reset the resp structure */
reset_resp_info_structure(resp);
/* Stored info to send resp back to s11 intfc */
resp->gtpc_msg.csr = *csr;
resp->msg_type = GTP_CREATE_SESSION_REQ;
resp->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->proc = pdn->proc;
resp->cp_mode = context->cp_mode;
pdn->state = PFCP_SESS_MOD_REQ_SNT_STATE;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Sent Request to UP, msg_type:%u, State:%s\n", LOG_VALUE, resp->msg_type,
"PFCP_SESS_MOD_REQ_SNT_STATE");
}
}
return 0;
}
/* Promotion: Parse the handover CSR and modify existing session info */
/**
* @brief : Parse handover CSR request on Combined GW
* @param : csr holds data in csr
* @param : COntext, pointer to UE context structure
* @param : CP_TYPE: changed gateway type, promotion PGWC --> SAEGWC
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
promotion_parse_cs_req(create_sess_req_t *csr, ue_context *context,
uint8_t cp_type)
{
int ret = 0;
int ebi_index = 0;
eps_bearer *bearer = NULL;
pdn_connection *pdn = NULL;
for(uint8_t i = 0; i < csr->bearer_count; i++) {
if (!csr->bearer_contexts_to_be_created[i].header.len) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Bearer Context IE Missing in the CSR\n", LOG_VALUE);
return GTPV2C_CAUSE_MANDATORY_IE_MISSING;
}
ebi_index = GET_EBI_INDEX(csr->bearer_contexts_to_be_created[i].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n",
LOG_VALUE);
return -1;
}
/* set s11_sgw_gtpc_teid = key->ue_context_by_fteid_hash */
if (context->s11_sgw_gtpc_teid !=
csr->pgw_s5s8_addr_ctl_plane_or_pmip.teid_gre_key) {
/* Promotion Scenario S11 SGW and S5S8 PGW TEIDs are same */
context->s11_sgw_gtpc_teid = csr->pgw_s5s8_addr_ctl_plane_or_pmip.teid_gre_key;
}
if (cp_type != 0) {
context->cp_mode = cp_type;
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to select appropriate cp type\n",
LOG_VALUE);
return -1;
}
/* Retrive procedure of CSR */
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to "
"get pdn for ebi_index %d \n", LOG_VALUE, ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
bearer = context->eps_bearers[ebi_index];
if (csr->linked_eps_bearer_id.ebi_ebi) {
if (pdn->default_bearer_id != csr->linked_eps_bearer_id.ebi_ebi) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Exsiting default ebi:'%u' not match with received ebi: %u\n",
LOG_VALUE, pdn->default_bearer_id, csr->linked_eps_bearer_id.ebi_ebi);
/* TODO */
}
}
if (pdn->default_bearer_id == csr->bearer_contexts_to_be_created[i].eps_bearer_id.ebi_ebi) {
if(fill_context_info(csr, context, pdn) != 0) {
return -1;
}
pdn->proc = get_csr_proc(csr);
if (fill_pdn_info(csr, pdn, context, bearer) != 0) {
return -1;
}
} /*Check UE Exist*/
imsi_id_hash_t *imsi_id_config = NULL;
/* To minimize lookup of hash for LI */
if ((NULL == imsi_id_config) && (NULL != context)) {
if (NULL == imsi_id_config) {
/* Get User Level Packet Copying Token or Id Using Imsi */
ret = get_id_using_imsi(context->imsi, &imsi_id_config);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityDebug, "[%s]:[%s]:[%d] Not applicable for li\n",
__file__, __func__, __LINE__);
}
}
if (NULL != imsi_id_config) {
/* Fillup context from li hash */
fill_li_config_in_context(context, imsi_id_config);
}
}
/* Fill the Bearer Information */
bearer->s1u_enb_gtpu_teid = csr->bearer_contexts_to_be_created[i].s1u_enb_fteid.teid_gre_key;
ret = fill_ip_addr(csr->bearer_contexts_to_be_created[i].s1u_enb_fteid.ipv4_address,
csr->bearer_contexts_to_be_created[i].s1u_enb_fteid.ipv6_address,
&bearer->s1u_enb_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
/* Assign the S1U SAEGWU TEID */
bearer->s1u_sgw_gtpu_teid = bearer->s5s8_pgw_gtpu_teid;
} /*for loop*/
/* Store the context of ue in pdn */
context->bearer_count = csr->bearer_count;
pdn->context = context;
if (config.use_gx) {
if(((context->uli_flag != FALSE) && (((context->event_trigger & (1 << ULI_EVENT_TRIGGER))) != 0))
|| ((context->ue_time_zone_flag != FALSE) && (((context->event_trigger) & (1 << UE_TIMEZONE_EVT_TRIGGER)) != 0))
|| ((context->rat_type_flag != FALSE) && ((context->event_trigger & (1 << RAT_EVENT_TRIGGER))) != 0)) {
ret = gen_ccru_request(context, bearer, NULL, NULL);
struct resp_info *resp = NULL;
/*Retrive the session information based on session id. */
if (get_sess_entry(pdn->seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"NO Session Entry "
"Found for session ID:%lu\n", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
reset_resp_info_structure(resp);
resp->gtpc_msg.csr = *csr;
resp->cp_mode = context->cp_mode;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Sent the CCRU Request to PCRF\n", LOG_VALUE);
return ret;
}
}
if ((ret = send_pfcp_modification_req(context, pdn, bearer, csr, ebi_index)) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Sent PFCP MOD Req\n",
LOG_VALUE);
return ret;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Sent the PFCP modification Request to UP\n", LOG_VALUE);
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/seid_llist.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "seid_llist.h"
extern int clSystemLog;
sess_csid *
add_sess_csid_data_node(sess_csid *head, uint16_t local_csid) {
int ret = 0;
sess_csid *new_node =NULL;
/* Check linked list is empty or not */
if(head == NULL )
return NULL;
/* Allocate memory for new node */
new_node = rte_malloc_socket(NULL, sizeof(sess_csid),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(new_node == NULL ) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate memory for session data info\n", LOG_VALUE);
return NULL;
}
/* Add new node into linked list */
if(insert_sess_csid_data_node(head, new_node) < 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to add node entry in LL\n",
LOG_VALUE);
return NULL;
}
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&local_csid, new_node);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, local_csid,
rte_strerror(abs(ret)));
return NULL;
}
return new_node;
}
sess_csid *
add_peer_csid_sess_data_node(sess_csid *head, peer_csid_key_t *key) {
int ret = 0;
sess_csid *new_node =NULL;
/* Check linked list is empty or not */
if(head == NULL )
return NULL;
/* Allocate memory for new node */
new_node = rte_malloc_socket(NULL, sizeof(sess_csid),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(new_node == NULL ) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate memory for session data info\n", LOG_VALUE);
return NULL;
}
/* Add new node into linked list */
if(insert_sess_csid_data_node(head, new_node) < 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to add node entry in LL\n",
LOG_VALUE);
return NULL;
}
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
key, new_node);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry for CSID"
"\n\tError= %s\n",
LOG_VALUE,
rte_strerror(abs(ret)));
return NULL;
}
return new_node;
}
/* Function to add sess_csid node */
int8_t
insert_sess_csid_data_node(sess_csid *head, sess_csid *new_node)
{
if(new_node == NULL)
return -1;
new_node->next = NULL;
/* Check linked list is empty or not */
if (head == NULL) {
head = new_node;
head->next = NULL;
} else {
new_node->next = head;
}
return 0;
}
/* Function to get sess_csid node data */
sess_csid *
get_sess_csid_data_node(sess_csid *head, uint64_t seid)
{
/* Pointing to head node */
sess_csid *current = head;
/* Check linked list is empty or not */
while(current != NULL) {
/* Validate the expected node or not */
if (current->cp_seid == seid || current->up_seid == seid) {
/* Node is not present in linked list */
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Match found for seid %"PRIu64" in LL\n",
LOG_VALUE, seid);
return current;
}
/* Pointing to next node */
current = current->next;
}
/* Node is not present in linked list */
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Match not found for seid %"PRIu64" in LL\n",
LOG_VALUE, seid);
return NULL;
}
/**
* @brief : Function to remove firts node from likned list
* @param : head, link list pointer
* @return : Returns list head on success , NULL otherwise
*/
static sess_csid *
remove_sess_csid_first_node(sess_csid *head) {
if(head == NULL)
return NULL;
sess_csid *current = head;
/* Access the next node */
sess_csid *tmp = current->next;
/* Free next node address form current node */
current->next = NULL;
/* Free the 1st node from linked list */
if(current != NULL)
rte_free(current);
current = NULL;
return tmp;
}
/* Function to remove last node from linked list */
static sess_csid *
remove_sess_csid_last_node(sess_csid *head) {
if(head == NULL)
return NULL;
sess_csid *current = head;
sess_csid *last = head;
/* Find the last node in the linked list */
while(current->next != NULL) {
last = current;
current = current->next;
}
/* Removed the linked from last node */
if (last != NULL)
last->next = NULL;
/* free the last node from linked list */
if (current != NULL)
rte_free(current);
current = NULL;
return head;
}
/* Function to remove node from linked list */
sess_csid *
remove_sess_csid_data_node(sess_csid *head, uint64_t seid)
{
/* pointing to head node */
sess_csid *current = head;
sess_csid *previous = NULL;
if(head == NULL) {
return NULL;
}
/* Check node and linked list empty or not */
while(current != NULL) {
/* Compare seid to remove requested node from link list */
if(current->cp_seid == seid || current->up_seid == seid){
/* If first node remove */
if(current == head){
return remove_sess_csid_first_node(head);
}
/* If the last node remove */
if(current->next == NULL ){
return remove_sess_csid_last_node(head);
}
/* If middel node */
previous->next = current->next;
current->next = NULL;
/* Free the next node */
if(current != NULL)
{
rte_free(current);
current = NULL;
}
return head;
}
previous = current;
current = current->next;
}
/* If no node present in link list for given seid then return NULL */
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to remove node, not found for seid : %"PRIu64"\n",
LOG_VALUE, seid);
return head;
}
/* Function to delete a node from the session data Linked List. */
int8_t
flush_sess_csid_data_list(sess_csid *head)
{
sess_csid *current = NULL;
sess_csid *tmp = NULL;
/* Check linked list head pointer is not NULL */
if (head != NULL) {
/* Get the next node */
tmp = head->next;
head->next = NULL;
while(tmp != NULL) {
current = tmp->next;
/* free the node */
rte_free(tmp);
tmp = current;
}
}
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/state_machine/sm_gtpc_pcnd.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtpv2c.h"
#include "sm_pcnd.h"
#include "cp_stats.h"
#include "debug_str.h"
#include "pfcp_util.h"
#include "gtp_messages_decoder.h"
#include "gtpv2c_error_rsp.h"
#include "cp_timer.h"
#include "pfcp.h"
#ifdef USE_REST
#include "main.h"
#endif
#include "cp_config.h"
#include "gw_adapter.h"
extern pfcp_config_t config;
extern struct cp_stats_t cp_stats;
extern int clSystemLog;
uint8_t
gtpv2c_pcnd_check(gtpv2c_header_t *gtpv2c_rx, int bytes_rx,
struct sockaddr_in *peer_addr, uint8_t iface)
{
int ret = 0;
if ((unsigned)bytes_rx !=
(ntohs(gtpv2c_rx->gtpc.message_len)
+ sizeof(gtpv2c_rx->gtpc)) && gtpv2c_rx->gtpc.piggyback == 0
) {
ret = GTPV2C_CAUSE_INVALID_LENGTH;
/* According to 29.274 7.7.7, if message is request,
* reply with cause = GTPV2C_CAUSE_INVALID_LENGTH
* should be sent - ignoring packet for now
*/
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"GTPv2C Received UDP Payload:"
"\n\t(%d bytes) with gtpv2c + "
"header (%u + %lu) = %lu bytes\n", LOG_VALUE,
bytes_rx, ntohs(gtpv2c_rx->gtpc.message_len),
sizeof(gtpv2c_rx->gtpc),
ntohs(gtpv2c_rx->gtpc.message_len)
+ sizeof(gtpv2c_rx->gtpc));
return ret;
}
if(bytes_rx > 0){
if (gtpv2c_rx->gtpc.version < GTP_VERSION_GTPV2C) {
if (peer_addr != NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"ERROR: Discarding packet from "IPV4_ADDR" due to gtp version %u not supported..\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(ntohl(peer_addr->sin_addr.s_addr)), gtpv2c_rx->gtpc.version);
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"ERROR: Discarding packet due to gtp version %u not supported..\n",
LOG_VALUE, gtpv2c_rx->gtpc.version);
}
return GTPV2C_CAUSE_VERSION_NOT_SUPPORTED;
}else if (gtpv2c_rx->gtpc.version > GTP_VERSION_GTPV2C){
send_version_not_supported(iface, gtpv2c_rx->teid.has_teid.seq);
if (peer_addr != NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"ERROR: Discarding packet from "IPV4_ADDR" due to gtp version %u not supported..\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(ntohl(peer_addr->sin_addr.s_addr)), gtpv2c_rx->gtpc.version);
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"ERROR: Discarding packet due to gtp version %u not supported..\n",
LOG_VALUE, gtpv2c_rx->gtpc.version);
}
return GTPV2C_CAUSE_VERSION_NOT_SUPPORTED;
}
}
return 0;
}
uint8_t
gtpc_pcnd_check(gtpv2c_header_t *gtpv2c_rx, msg_info *msg, int bytes_rx,
peer_addr_t *peer_addr, uint8_t interface_type)
{
int ret = 0;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
msg->msg_type = gtpv2c_rx->gtpc.message_type;
int ebi_index = 0;
int i = 0;
bool piggyback = FALSE;
uint8_t *li_piggyback_buf = NULL;
/*Below check is for GTPV2C version Check and GTPV2C MSG INVALID LENGTH CHECK */
if ((ret = gtpv2c_pcnd_check(gtpv2c_rx, bytes_rx, &peer_addr->ipv4, interface_type)) != 0 ){
if(ret == GTPV2C_CAUSE_VERSION_NOT_SUPPORTED) {
return ret;
}
switch(msg->msg_type) {
case GTP_CREATE_SESSION_REQ:
if(decode_create_sess_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.csr) != 0){
msg->cp_mode = 0;
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, interface_type);
process_error_occured_handler(&msg, NULL);
}
break;
case GTP_CREATE_SESSION_RSP:
if( decode_create_sess_rsp((uint8_t *)gtpv2c_rx,&msg->gtpc_msg.cs_rsp) != 0){
msg->cp_mode = 0;
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, interface_type);
process_error_occured_handler(&msg, NULL);
}
break;
case GTP_DELETE_SESSION_REQ:
if( decode_del_sess_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.dsr) != 0){
ds_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, interface_type);
}
break;
case GTP_DELETE_SESSION_RSP:
if( decode_del_sess_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.ds_rsp) != 0){
ds_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, interface_type);
}
break;
case GTP_MODIFY_BEARER_REQ:
if( decode_mod_bearer_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.mbr) != 0) {
mbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, interface_type);
}
break;
case GTP_MODIFY_BEARER_RSP:
if( decode_mod_bearer_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.mb_rsp) != 0) {
mbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, interface_type);
}
break;
case GTP_CREATE_BEARER_REQ:
if( decode_create_bearer_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.cb_req) != 0) {
cbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
}
break;
case GTP_CREATE_BEARER_RSP:
if( decode_create_bearer_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.cb_rsp) != 0) {
cbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type == S5S8_IFACE ? GX_IFACE : S5S8_IFACE);
}
break;
case GTP_DELETE_BEARER_REQ:
if( decode_del_bearer_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.db_req) != 0) {
delete_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
}
break;
case GTP_DELETE_BEARER_RSP:
if( decode_del_bearer_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.db_rsp) != 0) {
delete_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type == S5S8_IFACE ? GX_IFACE : S5S8_IFACE);
}
break;
case GTP_UPDATE_BEARER_REQ:
if((decode_upd_bearer_req((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.ub_req) != 0)){
ubr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
}
break;
case GTP_UPDATE_BEARER_RSP:
if((decode_upd_bearer_rsp((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.ub_rsp) != 0)){
/*TODO : Need to change interface condition*/
ubr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
}
break;
case GTP_DELETE_PDN_CONNECTION_SET_REQ:
if( decode_del_pdn_conn_set_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.del_pdn_req) != 0) {
/* TODO for delete pdn connection set request error response */
}
break;
case GTP_DELETE_PDN_CONNECTION_SET_RSP:
if( decode_del_pdn_conn_set_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.del_pdn_rsp) != 0) {
/* TODO for delete pdn connection set response error response */
}
break;
case GTP_UPDATE_PDN_CONNECTION_SET_REQ:
if( decode_upd_pdn_conn_set_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.upd_pdn_req) != 0) {
update_pdn_connection_set_error_response(msg, CAUSE_SOURCE_SET_TO_0, ret);
}
break;
case GTP_UPDATE_PDN_CONNECTION_SET_RSP:
if( decode_upd_pdn_conn_set_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.upd_pdn_rsp) != 0) {
/* TODO for update pdn connection set response error response */
mbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, interface_type);
}
break;
case GTP_PGW_RESTART_NOTIFICATION_ACK:
if( decode_pgw_rstrt_notif_ack((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.pgw_rstrt_notif_ack) != 0) {
/* TODO for PGW restart notification response error response */
}
break;
case GTP_DELETE_BEARER_CMD:
if(decode_del_bearer_cmd((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.del_ber_cmd) != 0) {
delete_bearer_cmd_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type);
}
break;
case GTP_DELETE_BEARER_FAILURE_IND:
if(decode_del_bearer_fail_indctn((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.del_fail_ind) != 0) {
delete_bearer_cmd_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type);
}
break;
case GTP_CHANGE_NOTIFICATION_REQ:
if(decode_change_noti_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.change_not_req) != 0) {
change_notification_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type);
}
break;
case GTP_RELEASE_ACCESS_BEARERS_REQ:
if(decode_release_access_bearer_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.rel_acc_ber_req) != 0){
release_access_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type);
}
break;
case GTP_BEARER_RESOURCE_CMD :
if(decode_bearer_rsrc_cmd((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.bearer_rsrc_cmd) != 0) {
send_bearer_resource_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type);
}
break;
case GTP_BEARER_RESOURCE_FAILURE_IND:
if(decode_bearer_rsrc_fail_indctn((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.ber_rsrc_fail_ind) != 0) {
send_bearer_resource_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type);
}
break;
case GTP_MODIFY_BEARER_CMD:
if(decode_mod_bearer_cmd((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.mod_bearer_cmd) != 0) {
modify_bearer_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type);
}
break;
case GTP_MODIFY_BEARER_FAILURE_IND:
if(decode_mod_bearer_fail_indctn((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.mod_fail_ind) != 0) {
modify_bearer_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type);
}
break;
case GTP_IDENTIFICATION_RSP:
break;
case GTP_CREATE_INDIRECT_DATA_FORWARDING_TUNNEL_REQ:
if(decode_create_indir_data_fwdng_tunn_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.crt_indr_tun_req) != 0){
crt_indir_data_frwd_tun_error_response(msg, ret);
}
break;
case GTP_DELETE_INDIRECT_DATA_FORWARDING_TUNNEL_REQ:
if(decode_del_indir_data_fwdng_tunn_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.dlt_indr_tun_req) != 0) {
delete_indir_data_frwd_error_response(msg, ret);
}
break;
case GTP_MODIFY_ACCESS_BEARER_REQ:
if(decode_mod_acc_bearers_req((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.mod_acc_req) != 0) {
mod_access_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0);
}
break;
}
return -1;
}
switch(msg->msg_type) {
case GTP_CREATE_SESSION_REQ: {
uint8_t cp_type= 0;
if ((ret = decode_check_csr(gtpv2c_rx, &msg->gtpc_msg.csr, &cp_type)) != 0) {
if(ret != -1) {
if (cp_type != 0) {
msg->cp_mode = cp_type;
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
cp_type != PGWC ? S11_IFACE : S5S8_IFACE);
} else {
/* Send CS error response if failed to select gateway type */
msg->cp_mode = 0;
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
interface_type);
}
}
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Selected mode for Gateway: %s\n",
LOG_VALUE, cp_type == SGWC ? "SGW-C" : cp_type == PGWC ? "PGW-C" :
cp_type == SAEGWC ? "SAEGW-C" : "UNKNOWN");
msg->cp_mode = cp_type;
msg->interface_type = interface_type;
/*CLI*/
/*add entry of MME(if cp is SGWC) and SGWC (if cp PGWC)*/
if ((peer_addr->ipv4.sin_addr.s_addr != 0)
|| (peer_addr->ipv6.sin6_addr.s6_addr)) {
node_address_t node_addr = {0};
get_peer_node_addr(peer_addr, &node_addr);
add_node_conn_entry(&node_addr,
msg->cp_mode != PGWC ? S11_SGW_PORT_ID : S5S8_PGWC_PORT_ID,
msg->cp_mode);
}
msg->proc = get_procedure(msg);
if (INITIAL_PDN_ATTACH_PROC == msg->proc) {
/* VS: Set the initial state for initial PDN connection */
/* VS: Make single state for all combination */
if (cp_type == SGWC) {
/*Set the appropriate state for the SGWC */
msg->state = SGWC_NONE_STATE;
} else {
/*Set the appropriate state for the SAEGWC and PGWC*/
if (config.use_gx) {
msg->state = PGWC_NONE_STATE;
} else {
msg->state = SGWC_NONE_STATE;
}
}
} else if (S1_HANDOVER_PROC == msg->proc) {
msg->state = SGWC_NONE_STATE;
}
/*Set the appropriate event type.*/
msg->event = CS_REQ_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_CREATE_SESSION_RSP: {
struct resp_info *resp = NULL;
ret = decode_create_sess_rsp((uint8_t *)gtpv2c_rx, &msg->gtpc_msg.cs_rsp);
if(!ret)
return -1;
delete_timer_entry(msg->gtpc_msg.cs_rsp.header.teid.has_teid.teid);
msg->interface_type = interface_type;
if(msg->gtpc_msg.cs_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED
&& msg->gtpc_msg.cs_rsp.cause.cause_value != GTPV2C_CAUSE_NEW_PDN_TYPE_NETWORK_PREFERENCE
&& msg->gtpc_msg.cs_rsp.cause.cause_value != GTPV2C_CAUSE_NEW_PDN_TYPE_SINGLE_ADDR_BEARER) {
msg->cp_mode = 0;
cs_error_response(msg, msg->gtpc_msg.cs_rsp.cause.cause_value,
CAUSE_SOURCE_SET_TO_1, S11_IFACE);
return -1;
}
if(get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.cs_rsp.header.teid.has_teid.teid, &context) != 0)
{
if(msg->gtpc_msg.cs_rsp.sender_fteid_ctl_plane.interface_type == S5_S8_PGW_GTP_C) {
msg->cp_mode = 0;
cs_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
S11_IFACE);
}
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"UE context for teid: %d\n \n", LOG_VALUE, msg->gtpc_msg.cs_rsp.header.teid.has_teid.teid);
return -1;
}
msg->cp_mode = context->cp_mode;
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.cs_rsp.bearer_contexts_created[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
cs_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode!= PGWC ? S11_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI "
"ID\n", LOG_VALUE);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return -1;
}
msg->state = pdn->state;
update_sys_stat(number_of_users, INCREMENT);
update_sys_stat(number_of_active_session, INCREMENT);
if(msg->gtpc_msg.csr.header.gtpc.piggyback) {
msg->proc = ATTACH_DEDICATED_PROC;
pdn->proc = ATTACH_DEDICATED_PROC;
piggyback = TRUE;
if(get_sess_entry(pdn->seid, &resp) == 0)
memcpy(&resp->gtpc_msg.cs_rsp, &msg->gtpc_msg.cs_rsp, sizeof(create_sess_rsp_t));
if ((context != NULL) && (PRESENT == context->dupl)) {
li_piggyback_buf = (uint8_t *)gtpv2c_rx;
}
gtpv2c_rx = (gtpv2c_header_t *)((uint8_t *)gtpv2c_rx + ntohs(gtpv2c_rx->gtpc.message_len)
+ sizeof(gtpv2c_rx->gtpc));
msg->msg_type = gtpv2c_rx->gtpc.message_type;
} else {
msg->proc = pdn->proc;
/*Set the appropriate event type.*/
msg->event = CS_RESP_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
}
case GTP_CREATE_BEARER_REQ:{
teid_key_t teid_key = {0};
if((ret = decode_create_bearer_req((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.cb_req) == 0))
return -1;
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.cb_req.lbi.ebi_ebi);
if(get_ue_context_by_sgw_s5s8_teid(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"UE context for teid: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
cbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
S5S8_IFACE);
return -1;
}
/*Delete timer entry for bearer resource command*/
if (context->ue_initiated_seq_no == msg->gtpc_msg.cb_req.header.teid.has_teid.seq) {
delete_timer_entry(gtpv2c_rx->teid.has_teid.teid);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", "UE_REQ_BER_RSRC_MOD_PROC",
msg->gtpc_msg.cb_req.header.teid.has_teid.seq);
/* Delete the TEID entry for bearer resource cmd */
delete_teid_entry_for_seq(teid_key);
}
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
cbr_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
S5S8_IFACE);
return -1;
}
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
msg->state = context->eps_bearers[ebi_index]->pdn->state;
if((context->eps_bearers[ebi_index]->pdn->proc == ATTACH_DEDICATED_PROC &&
TRUE == piggyback) ||
context->eps_bearers[ebi_index]->pdn->proc == UE_REQ_BER_RSRC_MOD_PROC){
msg->proc = context->eps_bearers[ebi_index]->pdn->proc;
}else {
msg->proc = get_procedure(msg);
}
msg->event = CREATE_BER_REQ_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_MODIFY_BEARER_RSP: {
teid_key_t teid_key = {0};
if((ret = decode_mod_bearer_rsp((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.mb_rsp) == 0)) {
return -1;
}
msg->proc = get_procedure(msg);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", get_proc_string(msg->proc),
msg->gtpc_msg.mb_rsp.header.teid.has_teid.seq);
/* If Received Error Modify Bearer Resp form peer node with 0 teid */
if ((!(msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid)) &&
(msg->gtpc_msg.mb_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED)) {
struct teid_value_t *teid_value = NULL;
teid_value = get_teid_for_seq_number(teid_key);
if (teid_value == NULL) {
/* TODO: Add the appropriate handling */
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get TEID value for Sequence "
"Number key : %s \n", LOG_VALUE,
teid_key.teid_key);
return -1;
}
/* Delete the timer entry for MBREQ */
delete_timer_entry(teid_value->teid);
/* Copy local stored TEID in the MBResp header */
msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid = teid_value->teid;
/* Fill the response struct and sending peer node */
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
S11_IFACE);
/* Delete the TEID entry for MB REQ */
delete_teid_entry_for_seq(teid_key);
process_error_occured_handler(&msg, NULL);
/* Set the return value to skip SM */
return GTPC_ZERO_TEID_FOUND;
}
delete_timer_entry(msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid);
/* Delete the TEID entry for MB REQ */
delete_teid_entry_for_seq(teid_key);
if(get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid, &context) != 0)
{
if(msg->gtpc_msg.mb_rsp.bearer_count != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid);
msg->cp_mode = 0;
cs_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
S11_IFACE);
process_error_occured_handler(&msg, NULL);
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
S11_IFACE);
process_error_occured_handler(&msg, NULL);
}
return -1;
}
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
if(msg->gtpc_msg.mb_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED) {
if(context->procedure == SGW_RELOCATION_PROC) {
cs_error_response(msg, msg->gtpc_msg.mb_rsp.cause.cause_value,
CAUSE_SOURCE_SET_TO_1,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
} else {
mbr_error_response(msg, msg->gtpc_msg.mb_rsp.cause.cause_value,
CAUSE_SOURCE_SET_TO_1, context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
}
return -1;
}
if (msg->gtpc_msg.mb_rsp.linked_eps_bearer_id.ebi_ebi == 0) {
if (msg->gtpc_msg.mb_rsp.bearer_contexts_modified[0].header.len != 0) {
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.mb_rsp.bearer_contexts_modified[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
}else{
struct eps_bearer_t *bearer_temp = NULL;
ret = get_bearer_by_teid(msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid, &bearer_temp);
if(ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Bearer found "
"for teid: %x\n", LOG_VALUE, msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
int ebi = UE_BEAR_ID(bearer_temp->pdn->seid);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
}
} else {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.mb_rsp.linked_eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
}
pdn = GET_PDN(context, ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
msg->cp_mode = context->cp_mode;
msg->state = pdn->state;
msg->proc = pdn->proc;
msg->event = MB_RESP_RCVD_EVNT;
break;
}
case GTP_DELETE_SESSION_REQ: {
/* Decode delete session request */
ret = decode_del_sess_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.dsr);
if (ret == 0){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Decode GTP_DELETE_SESSION_REQ \n",LOG_VALUE);
return -1;
}
if(get_ue_context(msg->gtpc_msg.dsr.header.teid.has_teid.teid,
&context) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get UE context for teid: %d\n",LOG_VALUE,
msg->gtpc_msg.dsr.header.teid.has_teid.teid);
if(msg->gtpc_msg.dsr.indctn_flgs.header.len != 0) {
ds_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
S11_IFACE);
} else {
ds_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
S5S8_IFACE);
}
return -1;
}
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.dsr.lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
ds_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
msg->proc = get_procedure(msg);
if (DETACH_PROC == msg->proc) {
if (update_ue_proc(context, msg->proc, ebi_index) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Update Procedure for"
" GTP_DELETE_SESSION_REQ \n",LOG_VALUE);
ds_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
}
/*Set the appropriate event type and state.*/
msg->state = CONNECTED_STATE;
msg->event = DS_REQ_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid, get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_DELETE_SESSION_RSP: {
teid_key_t teid_key = {0};
ret = decode_del_sess_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.ds_rsp);
if (ret == 0){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Decode GTP_DELETE_SESSION_RSP \n",LOG_VALUE);
return -1;
}
msg->proc = get_procedure(msg);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", get_proc_string(msg->proc),
msg->gtpc_msg.ds_rsp.header.teid.has_teid.seq);
/* If Received Error DSResp form peer node with 0 teid */
if(get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.ds_rsp.header.teid.has_teid.teid, &context) != 0) {
struct teid_value_t *teid_value = NULL;
teid_value = get_teid_for_seq_number(teid_key);
if (teid_value == NULL) {
/* TODO: Add the appropriate handling */
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get TEID value for Sequence "
"Number key: %s \n", LOG_VALUE,
teid_key.teid_key);
ds_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0, S11_IFACE);
return -1;
}
/* Delete the timer entry for DS REQ */
delete_timer_entry(teid_value->teid);
/* Copy local stored TEID in the DSResp header */
msg->gtpc_msg.ds_rsp.header.teid.has_teid.teid = teid_value->teid;
/* Delete the TEID entry for DS REQ */
delete_teid_entry_for_seq(teid_key);
if(get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.ds_rsp.header.teid.has_teid.teid, &context) != 0) {
ds_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0, S11_IFACE);
return -1;
}
}
delete_timer_entry(msg->gtpc_msg.ds_rsp.header.teid.has_teid.teid);
/* Delete the TEID entry for DS REQ */
delete_teid_entry_for_seq(teid_key);
if(context != NULL){
msg->cp_mode = context->cp_mode;
}
msg->interface_type = interface_type;
/* Here we are considering GTPV2C_CAUSE_CONTEXT_NOT_FOUND as success
* Beacuse purpose of DSReq is to cleanup that session
* and if the node don't have the session data
* that means purpose of Request is achived so no need to send error and
* Terminate process here
*/
/*Set the appropriate procedure, state and event type.*/
msg->state = DS_REQ_SNT_STATE;
/*Set the appropriate event type.*/
msg->event = DS_RESP_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
msg->gtpc_msg.ds_rsp.header.teid.has_teid.teid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_RELEASE_ACCESS_BEARERS_REQ: {
if(decode_release_access_bearer_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.rel_acc_ber_req) == 0){
return -1;
}
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
if(get_ue_context(msg->gtpc_msg.rel_acc_ber_req.header.teid.has_teid.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE Context"
"for teid: %d\n",LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
release_access_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, S11_IFACE);
return -1;
}
context->pfcp_sess_count = 0;
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
msg->proc = get_procedure(msg);
msg->event = REL_ACC_BER_REQ_RCVD_EVNT;
for(i=0; i < MAX_BEARERS; i++){
if(context->pdns[i] == NULL){
continue;
}
else {
context->pdns[i]->proc = msg->proc;
msg->state = context->pdns[i]->state;
}
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_DOWNLINK_DATA_NOTIFICATION_ACK: {
ret = decode_dnlnk_data_notif_ack((uint8_t *)gtpv2c_rx, &msg->gtpc_msg.ddn_ack);
if (ret == 0)
return -1;
int ebi_index = 0;
/*Retrive UE state. */
if (get_ue_context(ntohl(gtpv2c_rx->teid.has_teid.teid), &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE Context"
"for teid: %d\n",LOG_VALUE, ntohl(gtpv2c_rx->teid.has_teid.teid));
return -1;
}
for(i=0; i < MAX_BEARERS; i++){
if(context->pdns[i] == NULL){
continue;
}
else{
msg->state = context->pdns[i]->state;
msg->proc = context->pdns[i]->proc;
ebi_index = GET_EBI_INDEX(context->pdns[i]->default_bearer_id);
if(ebi_index == -1){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI Index\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
break;
}
}
delete_gtpv2c_if_timer_entry(msg->gtpc_msg.ddn_ack.header.teid.has_teid.teid, ebi_index);
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
/*Set the appropriate event type.*/
msg->event = DDN_ACK_RESP_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_DOWNLINK_DATA_NOTIFICATION_FAILURE_IND:{
ret = decode_dnlnk_data_notif_fail_indctn((uint8_t *)gtpv2c_rx, &msg->gtpc_msg.ddn_fail_ind);
if (ret == 0)
return -1;
/*Retrive UE */
if(ntohl(gtpv2c_rx->teid.has_teid.teid )!= 0){
if (get_ue_context(ntohl(gtpv2c_rx->teid.has_teid.teid), &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE Context"
"for teid: %d\n",LOG_VALUE, ntohl(gtpv2c_rx->teid.has_teid.teid));
return -1;
}
} else {
if(msg->gtpc_msg.ddn_fail_ind.imsi.header.len){
ret = rte_hash_lookup_data(ue_context_by_imsi_hash,
&msg->gtpc_msg.ddn_fail_ind.imsi.imsi_number_digits,
(void **) &context);
if(ret < 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE Context"
"for imsi: %ld\n",LOG_VALUE, msg->gtpc_msg.ddn_fail_ind.imsi.imsi_number_digits);
return -1;
}
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"There is no teid and no imsi present \n",
LOG_VALUE);
return -1;
}
}
for(i=0; i < MAX_BEARERS; i++){
if(context->pdns[i] == NULL){
continue;
}
else{
msg->state = context->pdns[i]->state;
msg->proc = context->pdns[i]->proc;
ebi_index = GET_EBI_INDEX(context->pdns[i]->default_bearer_id);
if(ebi_index == -1){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI Index\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
}
}
delete_gtpv2c_if_timer_entry(msg->gtpc_msg.ddn_fail_ind.header.teid.has_teid.teid, ebi_index);
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
/* Set the appropriate event type. */
msg->event = DDN_FAILURE_INDIC_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_CREATE_BEARER_RSP:{
struct resp_info *resp = NULL;
teid_key_t teid_key = {0};
if((ret = decode_create_bearer_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.cb_rsp) == 0))
return -1;
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
ebi_index = GET_EBI_INDEX((MAX_BEARERS + NUM_EBI_RESERVED));
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
cbr_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
context->cp_mode == SGWC ? S5S8_IFACE : GX_IFACE);
return -1;
}
msg->proc = get_procedure(msg);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", get_proc_string(msg->proc),
msg->gtpc_msg.cb_rsp.header.teid.has_teid.seq);
/* If Received Error CBResp form peer node with 0 teid */
if ((!(msg->gtpc_msg.cb_rsp.header.teid.has_teid.teid)) &&
(msg->gtpc_msg.cb_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED)) {
struct teid_value_t *teid_value = NULL;
teid_value = get_teid_for_seq_number(teid_key);
if (teid_value == NULL) {
/* TODO: Add the appropriate handling */
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get TEID value for Sequence "
"Number key: %s \n", LOG_VALUE,
teid_key.teid_key);
return -1;
}
/* Delete the timer entry for CB REQ */
delete_pfcp_if_timer_entry(teid_value->teid, ebi_index);
/* Copy local stored TEID in the CBResp header */
msg->gtpc_msg.cb_rsp.header.teid.has_teid.teid = teid_value->teid;
/* Fill the response struct and sending peer node */
cbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
interface_type);
/* Delete the TEID entry for CB REQ */
delete_teid_entry_for_seq(teid_key);
/* Set the return value to skip SM */
return GTPC_ZERO_TEID_FOUND;
}
if (get_ue_context(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get pdn for ebi_index : %d \n", LOG_VALUE, ebi_index);
cbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
interface_type);
return -1;
}
if (pdn->proc != UE_REQ_BER_RSRC_MOD_PROC)
pdn->proc = msg->proc;
delete_pfcp_if_timer_entry(gtpv2c_rx->teid.has_teid.teid, ebi_index);
if(!msg->gtpc_msg.cb_rsp.header.gtpc.piggyback ){
if(msg->gtpc_msg.cb_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED){
cbr_error_response(msg, msg->gtpc_msg.cb_rsp.cause.cause_value,
CAUSE_SOURCE_SET_TO_0, context->cp_mode == SGWC ? S5S8_IFACE : GX_IFACE);
return -1;
}
}
/* Delete the TEID entry for CB REQ */
delete_teid_entry_for_seq(teid_key);
if((ret = get_ue_state(gtpv2c_rx->teid.has_teid.teid ,ebi_index)) > 0){
msg->state = ret;
}else{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" Ue state for tied: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
cbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
interface_type);
return -1;
}
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
if(msg->gtpc_msg.cb_rsp.header.gtpc.piggyback){
msg->proc = ATTACH_DEDICATED_PROC;
pdn->proc = ATTACH_DEDICATED_PROC;
context->piggyback = TRUE;
if(get_sess_entry(pdn->seid, &resp) == 0) {
memcpy(&resp->gtpc_msg.cb_rsp, &msg->gtpc_msg.cb_rsp, sizeof(create_bearer_rsp_t));
/*storing for error scenarios*/
if (msg->gtpc_msg.cb_rsp.header.gtpc.teid_flag) {
resp->cb_rsp_attach.seq = msg->gtpc_msg.cb_rsp.header.teid.has_teid.seq;
} else {
resp->cb_rsp_attach.seq = msg->gtpc_msg.cb_rsp.header.teid.no_teid.seq;
}
resp->cb_rsp_attach.cause_value = msg->gtpc_msg.cb_rsp.cause.cause_value;
resp->cb_rsp_attach.bearer_cnt = msg->gtpc_msg.cb_rsp.bearer_cnt;
for (uint8_t itr = 0; itr < msg->gtpc_msg.cb_rsp.bearer_cnt; itr++) {
resp->cb_rsp_attach.bearer_cause_value[itr] =
msg->gtpc_msg.cb_rsp.bearer_contexts[itr].cause.cause_value;
resp->cb_rsp_attach.ebi_ebi[itr] =
msg->gtpc_msg.cb_rsp.bearer_contexts[itr].eps_bearer_id.ebi_ebi;
}
}
memcpy(&msg->cb_rsp, &msg->gtpc_msg.cb_rsp, sizeof(create_bearer_rsp_t));
if ((context != NULL) && (PRESENT == context->dupl)) {
li_piggyback_buf = (uint8_t *)gtpv2c_rx;
}
gtpv2c_rx = (gtpv2c_header_t *)((uint8_t *)gtpv2c_rx + ntohs(gtpv2c_rx->gtpc.message_len)
+ sizeof(gtpv2c_rx->gtpc));
msg->msg_type = gtpv2c_rx->gtpc.message_type;
} else {
if ( pdn->proc == UE_REQ_BER_RSRC_MOD_PROC) {
msg->proc = UE_REQ_BER_RSRC_MOD_PROC;
} else {
msg->proc = get_procedure(msg);
}
msg->event = CREATE_BER_RESP_RCVD_EVNT;
if(context->piggyback == TRUE){
msg->state = CREATE_BER_REQ_SNT_STATE;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
}
case GTP_MODIFY_BEARER_REQ: {
/*Decode the received msg and stored into the struct. */
if((ret = decode_mod_bearer_req((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.mbr) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Erorr in decoding MBR Req\n", LOG_VALUE);
return -1;
}
uint8_t cp_mode = 0;
/* Dynamically Set the gateway modes */
if ((msg->gtpc_msg.mbr.sender_fteid_ctl_plane.header.len != 0) &&
(msg->gtpc_msg.mbr.sender_fteid_ctl_plane.interface_type == S5_S8_SGW_GTP_C)
&& (interface_type == S5S8_IFACE)) {
/* Selection/Demotion Criteria for Combined GW to PGWC */
if (config.cp_type == SAEGWC) {
cp_mode = PGWC;
} else if (config.cp_type == PGWC) {
cp_mode = PGWC;
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Not Valid MBR Request for configured GW, Gateway Mode:%s\n",
LOG_VALUE, config.cp_type == SGWC ? "SGW-C" :
config.cp_type == PGWC ? "PGW-C" :
config.cp_type == SAEGWC ? "SAEGW-C" : "UNKNOWN");
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Demoted Gateway Mode: %s\n",
LOG_VALUE, cp_mode == SGWC ? "SGW-C" : cp_mode == PGWC ? "PGW-C" :
cp_mode == SAEGWC ? "SAEGW-C" : "UNKNOWN");
}
msg->proc = get_procedure(msg);
msg->state = CONNECTED_STATE;
msg->event = MB_REQ_RCVD_EVNT;
/*Retrive UE state. */
if(get_ue_context(msg->gtpc_msg.mbr.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.mbr.header.teid.has_teid.teid);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
interface_type);
memset(&msg->gtpc_msg.mbr, 0, sizeof(mod_bearer_req_t));
if (NOT_PRESENT != msg->cb_rsp.header.teid.has_teid.teid) {
if(get_ue_context(msg->cb_rsp.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->cb_rsp.header.teid.has_teid.teid);
return -1;
}
} else
return -1;
}
/* Reset the CP Mode Flag */
context->cp_mode_flag = FALSE;
if ((cp_mode != 0) && (cp_mode != context->cp_mode)) {
/* Replicat/Assign in the Context CP Mode */
context->cp_mode = cp_mode;
/* Set the CP Mode Flag */
context->cp_mode_flag = TRUE;
}
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
if(msg->gtpc_msg.mbr.bearer_count != 0) {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.mbr.bearer_contexts_to_be_modified[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
mbr_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
interface_type);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get pdn for ebi_index : %d \n", LOG_VALUE, ebi_index);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
interface_type);
return -1;
}
pdn->proc = msg->proc;
}
/* Set CP mode in Msg struct for STATE MACHINE */
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
if(cp_mode == PGWC) {
if ((peer_addr->ipv4.sin_addr.s_addr != 0)
|| (peer_addr->ipv6.sin6_addr.s6_addr)) {
node_address_t node_addr = {0};
get_peer_node_addr(peer_addr, &node_addr);
add_node_conn_entry(&node_addr,
S5S8_PGWC_PORT_ID, cp_mode);
}
msg->cp_mode = cp_mode;
}
if(context->piggyback == TRUE) {
pdn->proc = ATTACH_DEDICATED_PROC;
msg->proc = ATTACH_DEDICATED_PROC;
msg->state = CREATE_BER_REQ_SNT_STATE;
msg->event = MB_REQ_RCVD_EVNT;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_DELETE_BEARER_REQ:{
teid_key_t teid_key = {0};
if((ret = decode_del_bearer_req((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.db_req) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while decoding Delete Bearer Request\n", LOG_VALUE);
return -1;
}
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
if(get_ue_context_by_sgw_s5s8_teid(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to get "
"UE context for teid: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
delete_bearer_error_response(msg,GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
return -1;
}
/*Delete timer entry for bearer resource command*/
if (context->ue_initiated_seq_no == msg->gtpc_msg.db_req.header.teid.has_teid.seq) {
delete_timer_entry(gtpv2c_rx->teid.has_teid.teid);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", "UE_REQ_BER_RSRC_MOD_PROC",
msg->gtpc_msg.db_req.header.teid.has_teid.seq);
/* Delete the TEID entry for bearer resource cmd */
delete_teid_entry_for_seq(teid_key);
}
if (msg->gtpc_msg.db_req.lbi.header.len) {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.db_req.lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
delete_bearer_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
S5S8_IFACE);
return -1;
}
} else {
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.db_req.eps_bearer_ids[0].ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
delete_bearer_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
S5S8_IFACE);
return -1;
}
}
if(context->eps_bearers[ebi_index] == NULL ||
context->eps_bearers[ebi_index]->pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Context not found for UE\n",
LOG_VALUE);
delete_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
return -1;
}
uint8_t proc = context->eps_bearers[ebi_index]->pdn->proc;
if( proc == MME_INI_DEDICATED_BEARER_DEACTIVATION_PROC ||
proc == UE_REQ_BER_RSRC_MOD_PROC){
msg->proc = context->eps_bearers[ebi_index]->pdn->proc;
}else{
msg->proc = get_procedure(msg);
context->eps_bearers[ebi_index]->pdn->proc = msg->proc;
}
context->eps_bearers[ebi_index]->pdn->state = CONNECTED_STATE;
msg->state = context->eps_bearers[ebi_index]->pdn->state;
msg->event = DELETE_BER_REQ_RCVD_EVNT;
msg->interface_type = interface_type;
msg->cp_mode = context->cp_mode;
context->eps_bearers[ebi_index]->pdn->proc = msg->proc;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"gtpc_pcnd delete ber req proc : %s\n",
LOG_VALUE, get_proc_string(msg->proc));
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_DELETE_BEARER_RSP:{
teid_key_t teid_key = {0};
if((ret = decode_del_bearer_rsp((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.db_rsp) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while decoding Delete Bearer Response\n", LOG_VALUE);
return -1;
}
/* Here we are considering GTPV2C_CAUSE_CONTEXT_NOT_FOUND as success
* Beacuse purpose of DBReq is to cleanup that bearer data
* and if the node don't have the bearer data
* that means purpose of Request is achived so no need to send error and
* Terminate process here
*/
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
if (msg->gtpc_msg.db_rsp.lbi.header.len) {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.db_rsp.lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
delete_bearer_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, context->cp_mode == SGWC ? S5S8_IFACE : GX_IFACE);
return -1;
}
} else {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.db_rsp.bearer_contexts[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
delete_bearer_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, context->cp_mode == SGWC ? S5S8_IFACE : GX_IFACE);
return -1;
}
}
msg->proc = get_procedure(msg);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", get_proc_string(msg->proc),
msg->gtpc_msg.db_rsp.header.teid.has_teid.seq);
/* If Received Error DBResp form peer node with 0 teid */
if ((!(msg->gtpc_msg.db_rsp.header.teid.has_teid.teid)) &&
(msg->gtpc_msg.db_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED)) {
struct teid_value_t *teid_value = NULL;
teid_value = get_teid_for_seq_number(teid_key);
if (teid_value == NULL) {
/* TODO: Add the appropriate handling */
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get TEID value for Sequence "
"Number key : %s \n", LOG_VALUE,
teid_key.teid_key);
return -1;
}
/* Delete the timer entry for DB REQ */
delete_pfcp_if_timer_entry(teid_value->teid, ebi_index);
/* Copy local stored TEID in the DBResp header */
msg->gtpc_msg.db_rsp.header.teid.has_teid.teid = teid_value->teid;
/* Fill the response struct and sending peer node */
delete_bearer_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, interface_type);
/* Delete the TEID entry for DB REQ */
delete_teid_entry_for_seq(teid_key);
/* Set the return value to skip SM */
return GTPC_ZERO_TEID_FOUND;
}
if (get_ue_context(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
return -1;
}
if (msg->gtpc_msg.db_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED
&& msg->gtpc_msg.db_rsp.cause.cause_value != GTPV2C_CAUSE_CONTEXT_NOT_FOUND) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error cause "
"received Delete Bearer Response.Cause : %s\n", LOG_VALUE,
cause_str(msg->gtpc_msg.db_rsp.cause.cause_value));
delete_bearer_error_response(msg, msg->gtpc_msg.db_rsp.cause.cause_value,
CAUSE_SOURCE_SET_TO_0, context->cp_mode == SGWC ? S5S8_IFACE : GX_IFACE);
delete_pfcp_if_timer_entry(gtpv2c_rx->teid.has_teid.teid, ebi_index);
return -1;
}
delete_pfcp_if_timer_entry(gtpv2c_rx->teid.has_teid.teid, ebi_index);
/* Delete the TEID entry for DB REQ */
delete_teid_entry_for_seq(teid_key);
if ((ret = get_ue_state(gtpv2c_rx->teid.has_teid.teid, ebi_index)) > 0) {
msg->state = ret;
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" Ue state for tied: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
return -1;
}
if (context->eps_bearers[ebi_index]->pdn->proc == HSS_INITIATED_SUB_QOS_MOD ) {
msg->proc = get_procedure(msg);
} else {
msg->proc = context->eps_bearers[ebi_index]->pdn->proc;
}
msg->event = DELETE_BER_RESP_RCVD_EVNT;
msg->interface_type = interface_type;
msg->cp_mode = context->cp_mode;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"for BRC: gtpc_pcnd proc set is %s\n: ",
LOG_VALUE, get_proc_string(msg->proc));
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_DELETE_BEARER_FAILURE_IND:{
if((ret = decode_del_bearer_fail_indctn((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.del_fail_ind) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while decoding Delete Bearer Failure Indication\n", LOG_VALUE);
return -1;
}
msg->interface_type = interface_type;
if(msg->gtpc_msg.del_fail_ind.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED){
delete_bearer_cmd_failure_indication(msg, msg->gtpc_msg.del_fail_ind.cause.cause_value,
CAUSE_SOURCE_SET_TO_1, S11_IFACE);
return -1;
}
break;
}
case GTP_BEARER_RESOURCE_FAILURE_IND:{
teid_key_t teid_key = {0};
uint32_t teid = 0;
if((ret = decode_bearer_rsrc_fail_indctn((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.ber_rsrc_fail_ind) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while decoding Bearer Resource "
"Failure Indication msg\n", LOG_VALUE);
return -1;
}
msg->proc = get_procedure(msg);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", get_proc_string(msg->proc),
msg->gtpc_msg.ber_rsrc_fail_ind.header.teid.has_teid.seq);
teid = msg->gtpc_msg.ber_rsrc_fail_ind.header.teid.has_teid.teid;
/*Note : we have to delete timer entry based on sgw s5s8 teid
* : for that we should have correct sgw s5s8 teid
* : this code handle case of wrong teid or zero teid receive
* : in msg*/
/*If we didn't get context based on sgw s5s8 teid, then get correct teid from hash table */
if(get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.ber_rsrc_fail_ind.header.teid.has_teid.teid,
&context) != 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
struct teid_value_t *teid_value = NULL;
teid_value = get_teid_for_seq_number(teid_key);
if (teid_value == NULL) {
/* TODO: Add the appropriate handling */
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get TEID value for Sequence "
"Number key: %s \n", LOG_VALUE,
teid_key.teid_key);
return -1;
}
teid = teid_value->teid;
/* Copy local stored TEID in the msg header */
msg->gtpc_msg.ber_rsrc_fail_ind.header.teid.has_teid.teid = teid_value->teid;
}
/* Delete the timer entry for Bearer rsrc cmd */
delete_timer_entry(teid);
/* Delete the TEID entry for Bearer rsrc cmd */
delete_teid_entry_for_seq(teid_key);
send_bearer_resource_failure_indication(msg,
msg->gtpc_msg.ber_rsrc_fail_ind.cause.cause_value,
CAUSE_SOURCE_SET_TO_0, S11_IFACE);
return -1;
break;
}
case GTP_MODIFY_BEARER_FAILURE_IND: {
if((ret = decode_mod_bearer_fail_indctn((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.mod_fail_ind) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while decoding Modify Bearer "
"Failure Indication msg\n", LOG_VALUE);
return -1;
}
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
delete_timer_entry(gtpv2c_rx->teid.has_teid.teid);
modify_bearer_failure_indication(msg,
msg->gtpc_msg.mod_fail_ind.cause.cause_value,
CAUSE_SOURCE_SET_TO_0, S11_IFACE);
return -1;
break;
}
case GTP_MODIFY_BEARER_CMD: {
if((ret = decode_mod_bearer_cmd((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.mod_bearer_cmd) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while decoding Modify Bearer Command\n", LOG_VALUE);
return -1;
}
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
if(get_ue_context(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
modify_bearer_failure_indication(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, interface_type);
return -1;
}
eps_bearer *bearer = NULL;
if(msg->gtpc_msg.mod_bearer_cmd.bearer_context.header.len != 0) {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.mod_bearer_cmd.bearer_context.eps_bearer_id.ebi_ebi);
if(ebi_index != -1) {
bearer = context->eps_bearers[ebi_index];
}
if (ebi_index == -1 || bearer == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
modify_bearer_failure_indication(msg, GTPV2C_CAUSE_MANDATORY_IE_INCORRECT,
CAUSE_SOURCE_SET_TO_0, interface_type);
return -1;
}
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: MANDATORY IE"
" MISSING in Modify Bearer Command\n", LOG_VALUE);
modify_bearer_failure_indication(msg, GTPV2C_CAUSE_MANDATORY_IE_MISSING,
CAUSE_SOURCE_SET_TO_0, interface_type);
return -1;
}
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
msg->proc = get_procedure(msg);
if (update_ue_proc(context, msg->proc, ebi_index) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to "
"update Procedure\n", LOG_VALUE);
modify_bearer_failure_indication(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, interface_type);
return -1;
}
msg->state = CONNECTED_STATE;
msg->event = MODIFY_BEARER_CMD_RCVD_EVNT;
break;
}
case GTP_UPDATE_BEARER_REQ:{
teid_key_t teid_key = {0};
if((ret = decode_upd_bearer_req((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.ub_req) == 0))
return -1;
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
if(get_ue_context_by_sgw_s5s8_teid(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
ubr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
return -1;
}
/*Delete timer entry for bearer resource command and Modify Bearer Command*/
if (context->ue_initiated_seq_no == msg->gtpc_msg.ub_req.header.teid.has_teid.seq) {
delete_timer_entry(gtpv2c_rx->teid.has_teid.teid);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", "UE_REQ_BER_RSRC_MOD_PROC",
msg->gtpc_msg.ub_req.header.teid.has_teid.seq);
/* Delete the TEID entry for bearer resource cmd */
delete_teid_entry_for_seq(teid_key);
}
/*Which ebi to be selected as multiple bearer in request*/
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.ub_req.bearer_contexts[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
ubr_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if ( (pdn != NULL) && (pdn->proc == UE_REQ_BER_RSRC_MOD_PROC) ) {
msg->proc = UE_REQ_BER_RSRC_MOD_PROC;
} else {
msg->proc = get_procedure(msg);
}
msg->interface_type = interface_type;
msg->state = context->eps_bearers[ebi_index]->pdn->state;
msg->event = UPDATE_BEARER_REQ_RCVD_EVNT;
msg->cp_mode = context->cp_mode;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"In update bearer request\n"
"proc set is : %s\n",
LOG_VALUE, get_proc_string(msg->proc));
break;
}
case GTP_UPDATE_BEARER_RSP:{
teid_key_t teid_key = {0};
if((ret = decode_upd_bearer_rsp((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.ub_rsp) == 0))
return -1;
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.ub_rsp.bearer_contexts[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
ubr_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, context->cp_mode == SGWC ? S5S8_IFACE : GX_IFACE);
return -1;
}
msg->proc = get_procedure(msg);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", get_proc_string(msg->proc),
msg->gtpc_msg.ub_rsp.header.teid.has_teid.seq);
/* If Received Error UBResp form peer node with 0 teid */
if ((!(msg->gtpc_msg.ub_rsp.header.teid.has_teid.teid)) &&
(msg->gtpc_msg.ub_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED)) {
struct teid_value_t *teid_value = NULL;
teid_value = get_teid_for_seq_number(teid_key);
if (teid_value == NULL) {
/* TODO: Add the appropriate handling */
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get TEID value for Sequence "
"Number key: %s \n", LOG_VALUE,
teid_key.teid_key);
return -1;
}
/* Delete the timer entry for UB REQ */
delete_pfcp_if_timer_entry(teid_value->teid, ebi_index);
/* Copy local stored TEID in the UBResp header */
msg->gtpc_msg.ub_rsp.header.teid.has_teid.teid = teid_value->teid;
/* Fill the response struct and sending peer node */
ubr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
interface_type);
/* Delete the TEID entry for UB REQ */
delete_teid_entry_for_seq(teid_key);
/* Set the return value to skip SM */
return GTPC_ZERO_TEID_FOUND;
}
if(get_ue_context(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
return -1;
}
if(context->is_sent_bearer_rsc_failure_indc != PRESENT) {
if(msg->gtpc_msg.ub_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED){
ubr_error_response(msg, msg->gtpc_msg.ub_rsp.cause.cause_value,
CAUSE_SOURCE_SET_TO_1, context->cp_mode == SGWC ? S5S8_IFACE : GX_IFACE);
return -1;
}
}
delete_pfcp_if_timer_entry(gtpv2c_rx->teid.has_teid.teid, ebi_index);
/* Delete the TEID entry for UB REQ */
delete_teid_entry_for_seq(teid_key);
/*TODO: Which ebi to be selected as multiple bearer in request*/
if((ret = get_ue_state(gtpv2c_rx->teid.has_teid.teid ,ebi_index)) > 0){
msg->state = ret;
}else{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" Ue state for tied: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if ( (pdn != NULL) && (pdn->proc == UE_REQ_BER_RSRC_MOD_PROC) ) {
msg->proc = UE_REQ_BER_RSRC_MOD_PROC;
} else {
msg->proc = get_procedure(msg);
}
msg->event = UPDATE_BEARER_RSP_RCVD_EVNT;
msg->interface_type = interface_type;
msg->cp_mode = context->cp_mode;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"In update bearer response\n"
"msg->proc is : %s\n",
LOG_VALUE, get_proc_string(msg->proc));
break;
}
case GTP_DELETE_PDN_CONNECTION_SET_REQ: {
if ((ret = decode_del_pdn_conn_set_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.del_pdn_req) == 0))
return -1;
msg->state = DEL_PDN_CONN_SET_REQ_RCVD_STATE;
msg->proc = get_procedure(msg);
msg->event = DEL_PDN_CONN_SET_REQ_RCVD_EVNT;
msg->interface_type = interface_type;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
" Msg_Type:%s[%u],"
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_DELETE_PDN_CONNECTION_SET_RSP: {
if ((ret = decode_del_pdn_conn_set_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.del_pdn_rsp) == 0))
return -1;
msg->state = DEL_PDN_CONN_SET_REQ_SNT_STATE;
msg->proc = get_procedure(msg);
msg->event = DEL_PDN_CONN_SET_RESP_RCVD_EVNT;
msg->interface_type = interface_type;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
" Msg_Type:%s[%u],"
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_UPDATE_PDN_CONNECTION_SET_REQ: {
if ((ret = decode_upd_pdn_conn_set_req((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.upd_pdn_req) == 0))
return -1;
msg->state = CONNECTED_STATE;
msg->proc = get_procedure(msg);
msg->event = UPD_PDN_CONN_SET_REQ_RCVD_EVNT;
msg->interface_type = interface_type;
/*Retrive UE state. */
if(get_ue_context(msg->gtpc_msg.upd_pdn_req.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.upd_pdn_req.header.teid.has_teid.teid);
update_pdn_connection_set_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0);
return -1;
}
msg->cp_mode = context->cp_mode;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
" Msg_Type:%s[%u],"
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_DELETE_BEARER_CMD: {
if((ret = decode_del_bearer_cmd((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.del_ber_cmd) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while decoding Delete Bearer Command\n", LOG_VALUE);
return -1;
}
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.del_ber_cmd.bearer_contexts[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
delete_bearer_cmd_failure_indication(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, interface_type);
return -1;
}
if(get_ue_context(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
delete_bearer_cmd_failure_indication(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, interface_type);
return -1;
}
msg->proc = get_procedure(msg);
if (update_ue_proc(context, msg->proc, ebi_index) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to "
"update Procedure\n", LOG_VALUE);
return -1;
}
msg->state = CONNECTED_STATE;
msg->event = DELETE_BER_CMD_RCVD_EVNT;
msg->interface_type = interface_type;
msg->cp_mode = context->cp_mode;
break;
}
case GTP_BEARER_RESOURCE_CMD : {
if((ret = decode_bearer_rsrc_cmd((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.bearer_rsrc_cmd) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while decoding Bearer Resource Command\n", LOG_VALUE);
return -1;
}
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
if(get_ue_context(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n for Bearer"
"Resource Command", LOG_VALUE, gtpv2c_rx->teid.has_teid.teid);
send_bearer_resource_failure_indication(msg,
GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
interface_type);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/*Get default bearer id i.e. lbi from BRC */
/*Check for mandatory IE LBI,PTI,TAD*/
if(msg->gtpc_msg.bearer_rsrc_cmd.lbi.header.len != 0 &&
msg->gtpc_msg.bearer_rsrc_cmd.pti.header.len != 0 &&
msg->gtpc_msg.bearer_rsrc_cmd.tad.header.len != 0) {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.bearer_rsrc_cmd.lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID for"
"Bearer Resource Command\n ", LOG_VALUE);
send_bearer_resource_failure_indication(msg,
GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
interface_type);
return -1;
}
} else {
send_bearer_resource_failure_indication(msg,
GTPV2C_CAUSE_MANDATORY_IE_MISSING, CAUSE_SOURCE_SET_TO_0,
interface_type);
return GTPV2C_CAUSE_MANDATORY_IE_MISSING;
}
/*Check LBI (pdn connection) for UE is exist or not*/
ret = check_default_bearer_id_presence_in_ue(msg->gtpc_msg.bearer_rsrc_cmd.lbi.ebi_ebi,
context);
if(ret != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Invalid LBI,pdn connection not found"
"for ebi : %d\n,for Bearer Resource Command",
LOG_VALUE, msg->gtpc_msg.bearer_rsrc_cmd.lbi.ebi_ebi);
send_bearer_resource_failure_indication(msg,
GTPV2C_CAUSE_MANDATORY_IE_INCORRECT, CAUSE_SOURCE_SET_TO_0,
interface_type);
return GTPV2C_CAUSE_MANDATORY_IE_INCORRECT;
}
msg->proc = get_procedure(msg);
if (update_ue_proc(context,msg->proc ,ebi_index) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"failed to update procedure\n",LOG_VALUE);
return -1;
}
msg->state = CONNECTED_STATE;
msg->cp_mode = context->cp_mode;
msg->event = BEARER_RSRC_CMD_RCVD_EVNT;
break;
}
case GTP_UPDATE_PDN_CONNECTION_SET_RSP: {
teid_key_t teid_key = {0};
if ((ret = decode_upd_pdn_conn_set_rsp((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.upd_pdn_rsp) == 0))
return -1;
msg->proc = get_procedure(msg);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", get_proc_string(msg->proc),
msg->gtpc_msg.mb_rsp.header.teid.has_teid.seq);
/* If Received Error UP PDN Resp form peer node with 0 teid */
if ((!(msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid)) &&
(msg->gtpc_msg.upd_pdn_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED)) {
struct teid_value_t *teid_value = NULL;
teid_value = get_teid_for_seq_number(teid_key);
if (teid_value == NULL) {
/* TODO: Add the appropriate handling */
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get TEID value for Sequence "
"Number key: %s \n", LOG_VALUE,
teid_key.teid_key);
return -1;
}
/* Delete the timer entry for UP PDN REQ */
delete_timer_entry(teid_value->teid);
/* Copy local stored TEID in the MBResp header */
msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid = teid_value->teid;
/* Fill the response struct and sending peer node */
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
S11_IFACE);
process_error_occured_handler(&msg, NULL);
/* Delete the TEID entry for UP PDN REQ */
delete_teid_entry_for_seq(teid_key);
/* Set the return value to skip SM */
return GTPC_ZERO_TEID_FOUND;
}
delete_timer_entry(msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid);
/* Delete the TEID entry for UP PDN REQ */
delete_teid_entry_for_seq(teid_key);
if(msg->gtpc_msg.upd_pdn_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED){
mbr_error_response(msg, msg->gtpc_msg.upd_pdn_rsp.cause.cause_value,
CAUSE_SOURCE_SET_TO_1, interface_type);
return -1;
}
if(get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid, &context) != 0)
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, interface_type);
process_error_occured_handler(&msg, NULL);
return -1;
}
msg->state = UPD_PDN_CONN_SET_REQ_SNT_STATE;
msg->proc = get_procedure(msg);
msg->event = UPD_PDN_CONN_SET_RESP_RCVD_EVNT;
msg->interface_type = interface_type;
msg->cp_mode = context->cp_mode;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
" Msg_Type:%s[%u],"
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_PGW_RESTART_NOTIFICATION_ACK: {
if ((ret = decode_pgw_rstrt_notif_ack((uint8_t *) gtpv2c_rx, &msg->gtpc_msg.pgw_rstrt_notif_ack) == 0))
return -1;
msg->state = PGW_RSTRT_NOTIF_REQ_SNT_STATE;
msg->proc = get_procedure(msg);
msg->event = PGW_RSTRT_NOTIF_ACK_RCVD_EVNT;
msg->interface_type = interface_type;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
" Msg_Type:%s[%u],"
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_IDENTIFICATION_RSP:{
clLog(clSystemLog, eCLSeverityInfo, LOG_FORMAT"Warning: Received GTP IDENTIFICATION RSP Message, i.e. Simulator"
" Not support Delete PDN connection Set request feature.\n", LOG_VALUE);
/* TODO: Need to handle this message in state m/c*/
msg->state = END_STATE;
msg->proc = END_PROC;
msg->event = END_EVNT;
msg->interface_type = interface_type;
break;
}
case GTP_CHANGE_NOTIFICATION_REQ: {
if((ret = decode_change_noti_req((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.change_not_req) == 0))
return -1;
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.change_not_req.lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
change_notification_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, interface_type);
return -1;
}
if(get_ue_context(msg->gtpc_msg.change_not_req.header.teid.has_teid.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.change_not_req.header.teid.has_teid.teid);
change_notification_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, interface_type);
return -1;
}
msg->proc = get_procedure(msg);
msg->state = context->eps_bearers[ebi_index]->pdn->state;
msg->event = CHANGE_NOTIFICATION_REQ_RCVD_EVNT;
context->eps_bearers[ebi_index]->pdn->proc = msg->proc;
msg->interface_type = interface_type;
msg->cp_mode = context->cp_mode;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_CHANGE_NOTIFICATION_RSP: {
teid_key_t teid_key = {0};
if((ret = decode_change_noti_rsp((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.change_not_rsp) == 0)) {
return -1;
}
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
msg->proc = get_procedure(msg);
snprintf(teid_key.teid_key, PROC_LEN, "%s%d", get_proc_string(msg->proc),
msg->gtpc_msg.change_not_rsp.header.teid.has_teid.seq);
/* If Received Error Change Notification Rsp from peer node with 0 teid */
if((!msg->gtpc_msg.change_not_rsp.header.teid.has_teid.teid) &&
(msg->gtpc_msg.change_not_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED)) {
struct teid_value_t *teid_value = NULL;
teid_value = get_teid_for_seq_number(teid_key);
if (teid_value == NULL) {
/* TODO: Add the appropriate handling */
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get TEID value for Sequence "
"Number key : %s \n", LOG_VALUE,
msg->gtpc_msg.change_not_rsp.header.teid.has_teid.seq);
return -1;
}
/* Delete the timer entry for Change Notification REQ */
delete_timer_entry(teid_value->teid);
/* Copy local stored TEID in the Change Notification header */
msg->gtpc_msg.change_not_rsp.header.teid.has_teid.teid = teid_value->teid;
/* Fill the response struct and sending peer node */
change_notification_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, interface_type);
/* Delete the TEID entry for Change Notification Req */
delete_teid_entry_for_seq(teid_key);
/* Set the return value to skip SM */
return GTPC_ZERO_TEID_FOUND;
}
delete_timer_entry(msg->gtpc_msg.change_not_rsp.header.teid.has_teid.teid);
/* Delete the TEID entry for Change Notification Req */
delete_teid_entry_for_seq(teid_key);
ret = get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.change_not_rsp.header.teid.has_teid.teid, &context);
if(ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.change_not_rsp.header.teid.has_teid.teid);
change_notification_error_response(msg, ret,
CAUSE_SOURCE_SET_TO_0, interface_type);
return -1;
}
msg->proc = get_procedure(msg);
msg->state = CONNECTED_STATE;
msg->event = CHANGE_NOTIFICATION_RSP_RCVD_EVNT;
msg->interface_type = interface_type;
msg->cp_mode = context->cp_mode;
break;
}
case GTP_CREATE_INDIRECT_DATA_FORWARDING_TUNNEL_REQ: {
if ((ret = decode_create_indir_data_fwdng_tunn_req((uint8_t *)gtpv2c_rx,
&msg->gtpc_msg.crt_indr_tun_req)) == 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while decoding Create Indirect Data Forwarding\n", LOG_VALUE);
if(ret != -1){
crt_indir_data_frwd_tun_error_response(msg, ret);
}
return -1;
}
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
msg->proc = get_procedure(msg);
if(config.cp_type == SGWC){
msg->cp_mode = SGWC;
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Gateway Mode is not SGWC\n", LOG_VALUE);
return -1;
}
if(gtpv2c_rx->teid.has_teid.teid == 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Source Anchor and Forwarding GW are different\n",
LOG_VALUE);
msg->state = SGWC_NONE_STATE;
} else {
if(get_ue_context(gtpv2c_rx->teid.has_teid.teid, &context)) {
msg->state = SGWC_NONE_STATE;
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT "Failed to get"
" UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.crt_indr_tun_req.header.teid.has_teid.teid);
crt_indir_data_frwd_tun_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND);
return -1;
}
msg->state = CONNECTED_STATE;
}
msg->event = CREATE_INDIR_DATA_FRWRD_TUN_REQ_RCVD_EVNT;
msg->interface_type = interface_type;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_DELETE_INDIRECT_DATA_FORWARDING_TUNNEL_REQ: {
if ((ret = decode_del_indir_data_fwdng_tunn_req((uint8_t *)gtpv2c_rx,
&msg->gtpc_msg.dlt_indr_tun_req)) == 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure "
"while Decoding Delete Indirect Data Forwarding\n", LOG_VALUE);
if(ret != -1){
delete_indir_data_frwd_error_response(msg, ret);
return -1;
}
}
msg->proc = get_procedure(msg);
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
if(get_sender_teid_context(gtpv2c_rx->teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.dlt_indr_tun_req.header.teid.has_teid.teid);
delete_indir_data_frwd_error_response(msg,GTPV2C_CAUSE_CONTEXT_NOT_FOUND);
return -1;
}
msg->interface_type = interface_type;
msg->cp_mode = context->cp_mode;
msg->state = context->indirect_tunnel->pdn->state;
msg->event = DELETE_INDIR_DATA_FRWD_TUN_REQ_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, "%s: Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"Procedure:%s, State:%s, Event:%s\n",
__func__, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GTP_MODIFY_ACCESS_BEARER_REQ: {
/*Decode the received msg and stored into the struct. */
if((ret = decode_mod_acc_bearers_req((uint8_t *) gtpv2c_rx,
&msg->gtpc_msg.mod_acc_req) == 0)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Erorr in decoding MBR Req\n", LOG_VALUE);
return -1;
}
msg->proc = get_procedure(msg);
msg->state = CONNECTED_STATE;
msg->event = MAB_REQ_RCVD_EVNT;
if(get_ue_context(msg->gtpc_msg.mod_acc_req.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.mod_acc_req.header.teid.has_teid.teid);
mod_access_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0);
return -1;
}
if(msg->gtpc_msg.mod_acc_req.bearer_modify_count != 0) {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.mod_acc_req.bearer_contexts_to_be_modified[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
mod_access_bearer_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get pdn for ebi_index : %d \n", LOG_VALUE, ebi_index);
mod_access_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0);
return -1;
}
pdn->proc = msg->proc;
}
msg->cp_mode = context->cp_mode;
msg->interface_type = interface_type;
gtpv2c_rx->teid.has_teid.teid = ntohl(gtpv2c_rx->teid.has_teid.teid);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Teid:%u, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, gtp_type_str(msg->msg_type), msg->msg_type,
gtpv2c_rx->teid.has_teid.teid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
default:
/*If Event is not supported then we will called default handler. */
/* Retrive UE state. */
if (get_ue_context(ntohl(gtpv2c_rx->teid.has_teid.teid), &context) != 0) {
msg->proc = NONE_PROC;
if (SGWC == context->cp_mode)
msg->state = SGWC_NONE_STATE;
else {
if (config.use_gx) {
msg->state = PGWC_NONE_STATE;
} else {
msg->state = SGWC_NONE_STATE;
}
}
} else {
pdn = GET_PDN(context, ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return -1;
}
msg->state = pdn->state;
msg->proc = pdn->proc;
}
msg->event = NONE_EVNT;
msg->interface_type = interface_type;
msg->cp_mode = context->cp_mode;
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"process_msgs-"
"\n\tcase: SAEGWC::gw_cfg= %d;"
"\n\tReceived GTPv2c Message Type: "
"%s (%u) not supported... Discarding\n", LOG_VALUE,
context->cp_mode, gtp_type_str(gtpv2c_rx->gtpc.message_type),
gtpv2c_rx->gtpc.message_type);
return -1;
}
/* copy packet for user level packet copying or li */
if ((NULL != context) && (S11_IFACE == interface_type) && (GTP_CREATE_SESSION_REQ != msg->msg_type)) {
int bytes_rx_li = bytes_rx;
uint8_t gtpv2c_rx_li[MAX_GTPV2C_UDP_LEN] = {0};
if ((piggyback == TRUE || context->piggyback == TRUE) && (li_piggyback_buf != NULL)) {
memcpy(gtpv2c_rx_li, li_piggyback_buf, bytes_rx);
} else {
memcpy(gtpv2c_rx_li, gtpv2c_rx, bytes_rx);
}
if (PRESENT == context->dupl) {
process_pkt_for_li(context, S11_INTFC_IN, (uint8_t *)gtpv2c_rx_li, bytes_rx_li,
fill_ip_info(peer_addr->type,
peer_addr->ipv4.sin_addr.s_addr,
peer_addr->ipv6.sin6_addr.s6_addr),
fill_ip_info(peer_addr->type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
((peer_addr->type == IPTYPE_IPV4_LI) ?
ntohs(peer_addr->ipv4.sin_port) : ntohs(peer_addr->ipv6.sin6_port)),
config.s11_port);
}
}
if ((NULL != context) && (S5S8_IFACE == interface_type)) {
int bytes_rx_li = bytes_rx;
uint8_t gtpv2c_rx_li[MAX_GTPV2C_UDP_LEN] = {0};
if ((piggyback == TRUE || context->piggyback == TRUE) && (li_piggyback_buf != NULL)) {
memcpy(gtpv2c_rx_li, li_piggyback_buf, bytes_rx);
} else {
memcpy(gtpv2c_rx_li, gtpv2c_rx, bytes_rx);
}
if (PRESENT == context->dupl) {
process_pkt_for_li(context, S5S8_C_INTFC_IN, (uint8_t *)gtpv2c_rx_li, bytes_rx_li,
fill_ip_info(peer_addr->type,
peer_addr->ipv4.sin_addr.s_addr,
peer_addr->ipv6.sin6_addr.s6_addr),
fill_ip_info(peer_addr->type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
((peer_addr->type == IPTYPE_IPV4_LI) ?
ntohs(peer_addr->ipv4.sin_port) : ntohs(peer_addr->ipv6.sin6_port)),
config.s5s8_port);
}
}
RTE_SET_USED(peer_addr);
RTE_SET_USED(interface_type);
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | dp/up_config.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* NOTE: clLogger initalization happens after parsing of configuration file,
* thus clLog cannot be used here, instead printf is used.
*/
#include <stdio.h>
#include <stdlib.h>
#include <getopt.h>
#include <arpa/inet.h>
#include <errno.h>
#include <limits.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include <rte_ethdev.h>
#include <rte_kni.h>
#include <rte_common.h>
#include <rte_debug.h>
#include <rte_eal.h>
#include <rte_cfgfile.h>
#include "gtpu.h"
#include "up_main.h"
#include "teid_upf.h"
#include "pfcp_util.h"
#include "pipeline/epc_packet_framework.h"
#include "pfcp_up_sess.h"
#include "gw_adapter.h"
#define DECIMAL_BASE 10
#define IPv4_ADDRESS_LEN 16
#define TEIDRI_TIMEOUT_DEFAULT 600000
#define TEIDRI_VALUE_DEFAULT 3
#define STATIC_DP_FILE "../config/dp.cfg"
#define ENTRY_NAME_SIZE 64
#define IPv6_ADDRESS_LEN 16
#define IPv6_PREFIX_LEN 1
extern uint16_t dp_comm_port;
extern struct in_addr dp_comm_ip;
extern struct in6_addr dp_comm_ipv6;
extern uint8_t dp_comm_ip_type;
extern struct in_addr cp_comm_ip;
extern struct in6_addr cp_comm_ip_v6;
extern uint8_t cp_comm_ip_type;
extern uint16_t cp_comm_port;
static int
get_ipv6_address(char *str, char **addr, uint8_t *net)
{
char *addr_t = NULL;
char *net_t = NULL;
/* Check the pointer to string is not NULL */
if (str != NULL) {
/* Point to IPv6 Address */
addr_t = strtok(str, "/");
/* Point to Prefix Length */
net_t = strtok(NULL, "/");
if (net_t == NULL) {
fprintf(stderr, "ERROR: IPv6 Prefix Length is not Configured\n");
return -1;
}
} else {
return -1;
}
*addr = addr_t;
*net = atoi(net_t);
return 0;
}
int
isIPv6Present(struct in6_addr *ipv6_addr)
{
int ret = 0;
struct in6_addr tmp_addr = {0};
ret = memcmp(ipv6_addr, &tmp_addr, sizeof(struct in6_addr));
return ret;
}
int isMacPresent(struct ether_addr *hwaddr)
{
int ret = 0;
struct ether_addr tmp_hwaddr = {0};
ret = memcmp(hwaddr, &tmp_hwaddr, sizeof(struct ether_addr));
return ret;
}
/**
* @brief : parse ethernet address
* @param : hwaddr, structure to parsed ethernet address
* @param : str, input string
* @return : Returns 0 in case of success , 1 otherwise
*/
static inline int
parse_ether_addr(struct ether_addr *hwaddr, const char *str, uint8_t intf_type)
{
/* 01 34 67 90 23 56 */
/* XX:XX:XX:XX:XX:XX */
/*TODO : change strlen with strnlen with proper size (n)*/
if (strlen(str) != 17 ||
!isxdigit(str[0]) ||
!isxdigit(str[1]) ||
str[2] != ':' ||
!isxdigit(str[3]) ||
!isxdigit(str[4]) ||
str[5] != ':' ||
!isxdigit(str[6]) ||
!isxdigit(str[7]) ||
str[8] != ':' ||
!isxdigit(str[9]) ||
!isxdigit(str[10]) ||
str[11] != ':' ||
!isxdigit(str[12]) ||
!isxdigit(str[13]) ||
str[14] != ':' ||
!isxdigit(str[15]) ||
!isxdigit(str[16])) {
printf("invalid mac hardware address format->%s<-\n", str);
return 0;
}
sscanf(str, "%02zx:%02zx:%02zx:%02zx:%02zx:%02zx",
(size_t *) &hwaddr->addr_bytes[0],
(size_t *) &hwaddr->addr_bytes[1],
(size_t *) &hwaddr->addr_bytes[2],
(size_t *) &hwaddr->addr_bytes[3],
(size_t *) &hwaddr->addr_bytes[4],
(size_t *) &hwaddr->addr_bytes[5]);
if (intf_type)
fprintf(stderr, "DP: EB_MAC_ADDR: %s\n", str);
else
fprintf(stderr, "DP: WB_MAC_ADDR: %s\n", str);
return 1;
}
/**
* @brief : Validate the Mandatory Parameters are Configured or Not
* @param : app, structure that holds dp parameter configurations
* @return : Returns 0 in case of success, -1 otherwise
*/
static int8_t
validate_mandatory_params(struct app_params *app)
{
/* Check WB_IP Address or Mask is configured or not */
if (!(((app->wb_ip) && (app->wb_mask))
|| (isIPv6Present(&app->wb_ipv6)))) {
fprintf(stderr, "ERROR: West Bound(WB_IP or WB_MASK) intf IPv4 and IPv6 Address"
" or intf Mask not configured.\n");
return -1;
}
/* Check WB_LI_IP Address or intf MASK or intf Name is configured or not */
if (app->wb_li_ip) {
if (!((app->wb_li_mask)
&& (strncmp("", (const char *)app->wb_li_iface_name, ENTRY_NAME_SIZE)))) {
fprintf(stderr, "ERROR: West Bound(WB_LI_MASK or WB_LI_IFACE)"
" intf MASK or intf Name not configured.\n");
return -1;
}
}
/* Check EB_IP Address is configured or not */
if (!(((app->eb_ip) && (app->eb_mask))
|| (isIPv6Present(&app->eb_ipv6)))) {
fprintf(stderr, "ERROR: East Bound(EB_IP or EB_MASK) intf IPv4 and IPv6 Address"
" or intf Mask not configured.\n");
return -1;
}
/* Check EB_LI_IPv4 Address or intf MASK or intf Name is configured or not */
if (app->eb_li_ip) {
if (!((app->eb_li_mask)
&& (strncmp("", (const char *)app->eb_li_iface_name, ENTRY_NAME_SIZE)))) {
fprintf(stderr, "ERROR: East Bound(EB_LI_IPv4_MASK or EB_LI_IFACE)"
" intf MASK or intf Name not configured.\n");
return -1;
}
}
/* Check WB_MAC Address is configured or not */
if (!isMacPresent(&app->wb_ether_addr)) {
fprintf(stderr, "ERROR: West Bound(WB_MAC) intf MAC Address not configured.\n");
return -1;
}
/* Check WB_IFACE Name is configured or not */
if (!strncmp("", (const char *)app->wb_iface_name, ENTRY_NAME_SIZE)) {
fprintf(stderr, "ERROR: West Bound(WB_IFACE) intf name not configured.\n");
return -1;
}
/* Check EB_MAC Address is configured or not */
if (!isMacPresent(&app->eb_ether_addr)) {
fprintf(stderr, "ERROR: East Bound(EB_MAC) intf MAC Address not configured.\n");
return -1;
}
/* Check EB_IFACE Address is configured or not */
if (!strncmp("", (const char *)app->eb_iface_name, ENTRY_NAME_SIZE)) {
fprintf(stderr, "ERROR: East Bound(EB_IFACE) intf name not configured.\n");
return -1;
}
/* Check TEIDRI value is configured or not */
if(app->teidri_val == -1){
app->teidri_val = TEIDRI_VALUE_DEFAULT;
fprintf(stderr, "TEIDRI value not configured, assigning default value TEIDRI : %d\n",
TEIDRI_VALUE_DEFAULT);
/* TODO: VISHAL: Need to pull changes from r_1.8 */
}
return 0;
}
static int8_t
parse_up_config_param(struct app_params *app)
{
uint8_t inx = 0;
struct ether_addr mac_addr = {0};
int32_t num_global_entries = 0;
char *endptr = NULL;
long temp_val = 0;
struct rte_cfgfile_entry *global_entries = NULL;
struct rte_cfgfile *file = rte_cfgfile_load(STATIC_DP_FILE, 0);
if (file == NULL) {
rte_exit(EXIT_FAILURE, "Cannot load configuration file %s\n",
STATIC_DP_FILE);
}
fprintf(stderr,
"\n\n###############[Data-Plane Config Reading]################\n");
fprintf(stderr,
"DP: User-Plane Configuration Parsing from %s\n", STATIC_DP_FILE);
/* Read GLOBAL seaction values and configure respective params. */
num_global_entries = rte_cfgfile_section_num_entries(file, "GLOBAL");
if (num_global_entries > 0) {
global_entries = rte_malloc_socket(NULL,
sizeof(struct rte_cfgfile_entry) *
num_global_entries,
RTE_CACHE_LINE_SIZE, rte_socket_id());
}
if (global_entries == NULL) {
rte_panic("Error configuring global entry of %s\n",
STATIC_DP_FILE);
}
rte_cfgfile_section_entries(file, "GLOBAL", global_entries,
num_global_entries);
/* Initialize teidri value to -1, it will be used to verify if teidri
* is configured or not
*/
app->teidri_val = -1;
/* Validate the Mandatory Parameters are Configured or Not */
for (inx = 0; inx < num_global_entries; ++inx) {
/* Select static user-plane mode from config file */
if(strncmp("DP_CFG", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
} else if(strncmp("WB_IPv4", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* S1U/S5S8 IP Address */
struct in_addr tmp = {0};
if (!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid West_Bound(S1U/S5S8) IPv4 Address\n");
app->wb_ip = 0;
return -1;
}
app->wb_ip = ntohl(tmp.s_addr);
app->wb_ip_type.ipv4 = PRESENT;
/* Set the IP Type to dual connectivity */
if (app->wb_ip_type.ipv4 && app->wb_ip_type.ipv6) {
app->wb_ip_type.ipv4_ipv6 = PRESENT;
}
fprintf(stderr, "DP: WB_IPv4(S1U/S5S8) Addr: "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->wb_ip));
} else if(strncmp("WB_IPv6", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
uint8_t net_t[IPv6_PREFIX_LEN] = {0};
char *addr_t[IPV6_STR_LEN] = {NULL};
/* Parse the IPv6 String and separate out address and prefix */
if (get_ipv6_address(global_entries[inx].value, addr_t, net_t) < 0) {
fprintf(stderr, "Invalid West_Bound(S1U/S5S8) IPv6 Address and Prefix Configuration.\n");
return -1;
}
/* S1U/S5S8 IPV6 Address */
if (!inet_pton(AF_INET6, *addr_t, &(app->wb_ipv6))) {
fprintf(stderr, "Invalid West_Bound(S1U/S5S8) IPv6 Address\n");
return -1;
}
/* Fill the prefix length */
memcpy(&app->wb_ipv6_prefix_len, net_t, IPv6_PREFIX_LEN);
app->wb_ip_type.ipv6 = PRESENT;
/* Set the IP Type to dual connectivity */
if (app->wb_ip_type.ipv4 && app->wb_ip_type.ipv6) {
app->wb_ip_type.ipv4_ipv6 = PRESENT;
}
fprintf(stderr, "DP: WB_IPv6(S1U/S5S8) Addr/Prefix: %s/%u\n",
global_entries[inx].value, app->wb_ipv6_prefix_len);
} else if(strncmp("WB_LI_IPv4", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* S5S8-P West_Bound Logical interface Address */
struct in_addr tmp = {0};
if (!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid West_Bound(S5S8) Logical iface IPv4 Address\n");
app->wb_li_ip = 0;
return -1;
}
app->wb_li_ip = ntohl(tmp.s_addr);
app->wb_li_ip_type.ipv4 = PRESENT;
/* Set the IP Type to dual connectivity */
if (app->wb_li_ip_type.ipv4 && app->wb_li_ip_type.ipv6) {
app->wb_li_ip_type.ipv4_ipv6 = PRESENT;
}
fprintf(stderr, "DP: WB_LI_IPv4(West_Bound) Addr: "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->wb_li_ip));
} else if(strncmp("WB_LI_IPv6", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
uint8_t net_t[IPv6_PREFIX_LEN] = {0};
char *addr_t[IPV6_STR_LEN] = {NULL};
/* Parse the IPv6 String and separate out address and prefix */
if (get_ipv6_address(global_entries[inx].value, addr_t, net_t) < 0) {
fprintf(stderr, "Invalid West_Bound(S1U/S5S8) Logical intf IPv6 Address and Prefix Configuration.\n");
return -1;
}
/* S1U/S5S8 Logical IPV6 Address */
if (!inet_pton(AF_INET6, *addr_t, &(app->wb_li_ipv6))) {
fprintf(stderr, "Invalid West_LI_Bound(S1U/S5S8) Logical IPv6 Address\n");
return -1;
}
/* Fill the prefix length */
memcpy(&app->wb_li_ipv6_prefix_len, net_t, IPv6_PREFIX_LEN);
app->wb_li_ip_type.ipv6 = PRESENT;
/* Set the IP Type to dual connectivity */
if (app->wb_li_ip_type.ipv4 && app->wb_li_ip_type.ipv6) {
app->wb_li_ip_type.ipv4_ipv6 = PRESENT;
}
fprintf(stderr, "DP: WB_LI_IPv6(S1U/S5S8) Addr/Prefix: %s/%u\n",
global_entries[inx].value, app->wb_li_ipv6_prefix_len);
} else if(strncmp("EB_LI_IPv6", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
uint8_t net_t[IPv6_PREFIX_LEN] = {0};
char *addr_t[IPV6_STR_LEN] = {NULL};
/* Parse the IPv6 String and separate out address and prefix */
if (get_ipv6_address(global_entries[inx].value, addr_t, net_t) < 0) {
fprintf(stderr, "Invalid East_Bound(S5S8/SGI) Logical intf IPv6 Address and Prefix Configuration.\n");
return -1;
}
/* S1U/S5S8 Logical IPV6 Address */
if (!inet_pton(AF_INET6, *addr_t, &(app->eb_li_ipv6))) {
fprintf(stderr, "Invalid East_LI_Bound(S5S8/SGI) Logical IPv6 Address\n");
return -1;
}
/* Fill the prefix length */
memcpy(&app->eb_li_ipv6_prefix_len, net_t, IPv6_PREFIX_LEN);
app->eb_li_ip_type.ipv6 = PRESENT;
/* Set the IP Type to dual connectivity */
if (app->eb_li_ip_type.ipv4 && app->eb_li_ip_type.ipv6) {
app->eb_li_ip_type.ipv4_ipv6 = PRESENT;
}
fprintf(stderr, "DP: EB_LI_IPv6(S5S8/SGI) Addr/Prefix: %s/%u\n",
global_entries[inx].value, app->eb_li_ipv6_prefix_len);
} else if(strncmp("EB_IPv4", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* S5S8/SGI IP Address */
struct in_addr tmp = {0};
if (!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid East_Bound(S5S8/SGI) IPv4 Address\n");
app->eb_ip = 0;
return -1;
}
app->eb_ip = ntohl(tmp.s_addr);
app->eb_ip_type.ipv4 = PRESENT;
/* Set the IP Type to dual connectivity */
if (app->eb_ip_type.ipv4 && app->eb_ip_type.ipv6) {
app->eb_ip_type.ipv4_ipv6 = PRESENT;
}
fprintf(stderr, "DP: EB_IPv4(S5S8/SGI) Addr: "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->eb_ip));
} else if(strncmp("EB_IPv6", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
uint8_t net_t[IPv6_PREFIX_LEN] = {0};
char *addr_t[IPV6_STR_LEN] = {NULL};
/* Parse the IPv6 String and separate out address and prefix */
if (get_ipv6_address(global_entries[inx].value, addr_t, net_t) < 0) {
fprintf(stderr, "Invalid East_Bound(S5S8/SGI) IPv6 Address and Prefix Configuration.\n");
return -1;
}
/* S5S8/SGI IPV6 Address */
if (!inet_pton(AF_INET6, *addr_t, &(app->eb_ipv6))) {
fprintf(stderr, "Invalid East_Bound(S5S8/SGI) IPv6 Address\n");
return -1;
}
/* Fill the prefix length */
memcpy(&app->eb_ipv6_prefix_len, net_t, IPv6_PREFIX_LEN);
app->eb_ip_type.ipv6 = PRESENT;
/* Set the IP Type to dual connectivity */
if (app->eb_ip_type.ipv4 && app->eb_ip_type.ipv6) {
app->eb_ip_type.ipv4_ipv6 = PRESENT;
}
fprintf(stderr, "DP: EB_IPv6(S5S8/SGI) Addr/Prefix: %s/%u\n",
global_entries[inx].value, app->eb_ipv6_prefix_len);
} else if(strncmp("EB_LI_IPv4", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* S5S8-S IP Address */
struct in_addr tmp = {0};
if (!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid East_Bound(S5S8) Logical iface IP Address\n");
app->eb_li_ip = 0;
return -1;
}
app->eb_li_ip = ntohl(tmp.s_addr);
app->eb_li_ip_type.ipv4 = PRESENT;
/* Set the IP Type to dual connectivity */
if (app->eb_li_ip_type.ipv4 && app->eb_li_ip_type.ipv6) {
app->eb_li_ip_type.ipv4_ipv6 = PRESENT;
}
fprintf(stderr, "DP: EB_LI_IPv4(S5S8) Addr: "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->eb_li_ip));
} else if(strncmp("WB_GW_IP", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* Configured GW IP for Routing */
struct in_addr tmp = {0};
if (!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid West_Bound(S1U/S5S8) Gateway IP Address\n");
app->wb_gw_ip = 0;
return -1;
}
app->wb_gw_ip = ntohl(tmp.s_addr);
fprintf(stderr, "DP: WB_GW_IP(S1U/S5S8) Addr: "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->wb_gw_ip));
} else if(strncmp("EB_GW_IP", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* Configured GW IP for Routing */
struct in_addr tmp = {0};
if (!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid East_Bound(S5S8/SGI) Gateway IP Address\n");
app->eb_gw_ip = 0;
return -1;
}
app->eb_gw_ip = tmp.s_addr;
fprintf(stderr, "DP: EB_GW_IP(S5S8/SGI) Addr: "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->eb_gw_ip));
} else if(strncmp("PFCP_IPv4", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* Configured PFCP IP Address */
if (!inet_aton(global_entries[inx].value, &(dp_comm_ip))) {
fprintf(stderr, "Invalid DP PFCP IPv4 Address\n");
dp_comm_ip.s_addr = 0;
return -1;
}
strncpy(CDR_FILE_PATH, "logs/", CDR_BUFF_SIZE);
strncat(CDR_FILE_PATH, inet_ntoa(dp_comm_ip), CDR_BUFF_SIZE);
strncat(CDR_FILE_PATH, "_cdr.csv", strlen("_cdr.csv"));
fprintf(stderr, "DP: CDR_FILE_PATH: ngic_rtc/dp/%s\n", CDR_FILE_PATH);
dp_comm_ip.s_addr = dp_comm_ip.s_addr;
dp_comm_ip_type |= 1;
fprintf(stderr, "DP: PFCP_IPv4 Addr: "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(ntohl(dp_comm_ip.s_addr)));
} else if(strncmp("PFCP_IPv6", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
uint8_t net_t[IPv6_PREFIX_LEN] = {0};
char *addr_t[IPV6_STR_LEN] = {NULL};
char temp[CDR_BUFF_SIZE] = {0};
/* Parse the IPv6 String and separate out address and prefix */
if (get_ipv6_address(global_entries[inx].value, addr_t, net_t) < 0) {
fprintf(stderr, "Invalid DP PFCP_IPv6 Address and Prefix Configuration.\n");
return -1;
}
/* Configured PFCP IP Address */
if (!inet_pton(AF_INET6, *addr_t, &(dp_comm_ipv6))) {
fprintf(stderr, "Invalid DP PFCP IPv6 Address\n");
return -1;
}
inet_ntop(AF_INET6, dp_comm_ipv6.s6_addr, temp, CDR_BUFF_SIZE);
strncat(temp, "_cdr.csv", strlen("_cdr.csv"));
strncpy(CDR_FILE_PATH, "logs/", CDR_BUFF_SIZE);
strncat(CDR_FILE_PATH, temp, CDR_BUFF_SIZE);
fprintf(stderr, "DP: CDR_FILE_PATH: ngic_rtc/dp/%s\n", CDR_FILE_PATH);
memcpy(&app->pfcp_ipv6_prefix_len, net_t, IPv6_PREFIX_LEN);
dp_comm_ip_type |= 2;
fprintf(stderr, "DP: PFCP_IPv6 Addr: %s/%u\n",
global_entries[inx].value, *net_t);
} else if(strncmp("PFCP_PORT", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
dp_comm_port = (uint16_t)atoi(global_entries[inx].value);
fprintf(stderr, "DP: PFCP PORT: %u\n", dp_comm_port);
} else if(strncmp("WB_IPv4_MASK", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* S1U/S5S8 Subnet mask */
struct in_addr tmp = {0};
if(!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid West_Bound(S1U/S5S8) IPv4 Subnet Masks\n");
app->wb_mask = 0;
return -1;
}
app->wb_mask = ntohl(tmp.s_addr);
fprintf(stderr, "DP: WB_IPv4_MASK(S1U/S5S8): "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->wb_mask));
} else if(strncmp("WB_LI_IPv4_MASK", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* S5S8-P Subnet mask */
struct in_addr tmp = {0};
if(!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid West_Bound(S5S8) logical iface Masks\n");
app->wb_li_mask = 0;
return -1;
}
app->wb_li_mask = ntohl(tmp.s_addr);
fprintf(stderr, "DP: WB_LI_IPv4_MASK(S5S8): "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->wb_li_mask));
} else if(strncmp("EB_IPv4_MASK", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* S5S8/SGI Subnet mask */
struct in_addr tmp = {0};
if(!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid East_Bound(S5S8/SGI) IPv4 Subnet Masks\n");
app->eb_mask = 0;
return -1;
}
app->eb_mask = ntohl(tmp.s_addr);
fprintf(stderr, "DP: EB_IPv4_MASK(S5S8/SGI): "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->eb_mask));
} else if(strncmp("EB_LI_IPv4_MASK", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* S5S8-S Subnet mask */
struct in_addr tmp = {0};
if(!inet_aton(global_entries[inx].value, &(tmp))) {
fprintf(stderr, "Invalid East_Bound(S5S8) logical iface Masks\n");
app->eb_li_mask = 0;
return -1;
}
app->eb_li_mask = ntohl(tmp.s_addr);
fprintf(stderr, "DP: EB_LI_IPv4_MASK(S5S8): "IPV4_ADDR"\n",
IPV4_ADDR_HOST_FORMAT(app->eb_li_mask));
} else if(strncmp("WB_MAC", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
if (!parse_ether_addr(&app->wb_ether_addr, global_entries[inx].value, 0)) {
return -1;
}
for (int i = 0; i < RTE_MAX_ETHPORTS; i++) {
rte_eth_macaddr_get(i, &mac_addr);
if (is_same_ether_addr
(&app->wb_ether_addr, &mac_addr)) {
fprintf(stderr, "DP: West_Bound(WB/S1U/S5S8) Port_ID: %d\n", i);
app->wb_port = i;
break;
}
}
if (app->wb_port != 0) {
fprintf(stderr, "ERROR: Iface assignment or West Bound(WB_MAC) intf"
" MAC Address is wrong configured.\n");
return -1;
}
} else if(strncmp("EB_MAC", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
if (!parse_ether_addr(&app->eb_ether_addr, global_entries[inx].value, 1)) {
return -1;
}
for (int i = 0; i < RTE_MAX_ETHPORTS; i++) {
rte_eth_macaddr_get(i, &mac_addr);
if (is_same_ether_addr
(&app->eb_ether_addr, &mac_addr)) {
fprintf(stderr, "DP: East_Bound(EB/S5S8/SGI) Port_ID: %d\n", i);
app->eb_port = i;
break;
}
}
if (app->eb_port != 1) {
fprintf(stderr, "ERROR: Iface assignment or East Bound(EB_MAC) intf"
" MAC Address is wrong configured.\n");
return -1;
}
} else if(strncmp("WB_IFACE", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
strncpy(app->wb_iface_name, global_entries[inx].value, MAX_LEN);
fprintf(stderr, "DP: KNI West_Bound iface: %s\n", app->wb_iface_name);
} else if(strncmp("WB_LI_IFACE", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
strncpy(app->wb_li_iface_name, global_entries[inx].value, MAX_LEN);
fprintf(stderr, "DP: KNI West_Bound(S5S8) Logical iface: %s\n", app->wb_li_iface_name);
} else if(strncmp("EB_IFACE", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
strncpy(app->eb_iface_name, global_entries[inx].value, MAX_LEN);
fprintf(stderr, "DP: KNI East_Bound iface: %s\n", app->eb_iface_name);
} else if(strncmp("EB_LI_IFACE", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
strncpy(app->eb_li_iface_name, global_entries[inx].value, MAX_LEN);
fprintf(stderr, "DP: KNI East_Bound(S5S8) Logical iface: %s\n", app->eb_li_iface_name);
} else if(strncmp("NUMA", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->numa_on = (uint8_t)atoi(global_entries[inx].value);
fprintf(stderr, "DP: NUMA Mode:%u\n", app->numa_on);
} else if(strncmp("GTPU_SEQNB_IN", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->gtpu_seqnb_in = (uint8_t)atoi(global_entries[inx].value);
fprintf(stderr, "DP: GTPU_SEQNB_IN: %u\n", app->gtpu_seqnb_in);
} else if(strncmp("GTPU_SEQNB_OUT", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->gtpu_seqnb_out = (uint8_t)atoi(global_entries[inx].value);
fprintf(stderr, "DP: GTPU_SEQNB_OUT: %u\n", app->gtpu_seqnb_out);
} else if(strncmp("TRANSMIT_TIMER", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->transmit_timer = (int)atoi(global_entries[inx].value);
fprintf(stderr, "DP: TRANSMIT_TIMER: %d\n", app->transmit_timer);
} else if(strncmp("PERIODIC_TIMER", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->periodic_timer = (int)atoi(global_entries[inx].value);
fprintf(stderr, "DP: PERIODIC_TIMER: %d\n", app->periodic_timer);
} else if(strncmp("TRANSMIT_COUNT", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->transmit_cnt = (uint8_t)atoi(global_entries[inx].value);
fprintf(stderr, "DP: TRANSMIT_COUNT: %u\n", app->transmit_cnt);
} else if(strncmp("TEIDRI", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* Configure TEIDRI val */
errno = 0;
endptr = NULL;
temp_val = 0;
int temp_val = strtol(global_entries[inx].value, &endptr, DECIMAL_BASE);
if ((errno == ERANGE && (temp_val == LONG_MAX || temp_val == LONG_MIN))
|| (errno != 0 && temp_val == 0)
|| (*endptr != '\0') /* Checks if input contains any non digit value*/
|| (temp_val < 0 || temp_val > 7)) { /* checks if input is positive and is within given range */
printf("Invalid TEIDRI value %s\n", global_entries[inx].value);
printf(" - Input should be valid positive integer value \n");
printf(" - Input should not contain any non digit character \n");
printf(" - Input should contain value between 0 to 7\n");
app->teidri_val = 0;
return -1;
}
app->teidri_val = temp_val;
fprintf(stderr, "DP: TEIDRI: %d\n", app->teidri_val);
} else if(strncmp("TEIDRI_TIMEOUT", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* Configure TEIDRI timeout val */
errno = 0;
endptr = NULL;
temp_val = 0;
temp_val = strtol(global_entries[inx].value, &endptr, DECIMAL_BASE);
if ((errno == ERANGE && (temp_val == LONG_MAX || temp_val == LONG_MIN))
|| (errno != 0 && temp_val == 0)
|| (*endptr != '\0') /* Checks if input contains any non digit value*/
|| (temp_val < 0 || temp_val > INT_MAX)) { /* checks if input is positive and is inside integer range */
printf("Invalid TEIDRI TIMEOUT value %s\n", global_entries[inx].value);
printf(" - Input should be valid positive integer value \n");
printf(" - Input should not contain any non digit character \n");
printf("Falling back to default value %d for TEIDRI TIMEOUT \n", TEIDRI_TIMEOUT_DEFAULT);
app->teidri_timeout = TEIDRI_TIMEOUT_DEFAULT;
}else{
app->teidri_timeout = temp_val;
fprintf(stderr, "DP: TEIDRI_TIMEOUT: %d\n", app->teidri_timeout);
}
} else if(strncmp("DDF2_IP", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* DDF2 IP Address */
strncpy(app->ddf2_ip, global_entries[inx].value, IPV6_STR_LEN);
fprintf(stderr, "DP: DDF2_IP: %s\n", app->ddf2_ip);
} else if(strncmp("DDF2_PORT", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->ddf2_port = (uint16_t)atoi(global_entries[inx].value);
fprintf(stderr, "DP: DDF2_PORT: %u\n", app->ddf2_port);
} else if(strncmp("DDF2_LOCAL_IP", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
strncpy(app->ddf2_local_ip, global_entries[inx].value, IPV6_STR_LEN);
fprintf(stderr, "DP: DDF2_LOCAL_IP: %s\n", app->ddf2_local_ip);
} else if(strncmp("DDF3_IP", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
/* DDF3 IP Address */
strncpy(app->ddf3_ip, global_entries[inx].value, IPV6_STR_LEN);
fprintf(stderr, "DP: DDF3_IP: %s\n", app->ddf3_ip);
} else if(strncmp("DDF3_PORT", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->ddf3_port = (uint16_t)atoi(global_entries[inx].value);
fprintf(stderr, "DP: DDF3_PORT: %u\n", app->ddf3_port);
} else if(strncmp("DDF3_LOCAL_IP", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
strncpy(app->ddf3_local_ip, global_entries[inx].value, IPV6_STR_LEN);
fprintf(stderr, "DP: DDF3_LOCAL_IP: %s\n", app->ddf3_local_ip);
} else if(strncmp("GENERATE_PCAP", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->generate_pcap = (uint8_t)atoi(global_entries[inx].value);
if (app->generate_pcap != PCAP_GEN_OFF && app->generate_pcap != PCAP_GEN_ON)
rte_panic("Use 0 or 1 for pcap generation STOP/START\n");
fprintf(stderr, "DP: GENERATE_PCAP: %u\n", app->generate_pcap);
} else if(strncmp("PERF_FLAG", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->perf_flag = (uint8_t)atoi(global_entries[inx].value);
if (app->perf_flag != PERF_ON && app->perf_flag != PERF_OFF)
rte_panic("Use 0 or 1 for perf flag DISABLE/ENABLE ");
fprintf(stderr, "DP: PERF_FLAG: %u\n", app->perf_flag);
} else if (strncmp("CLI_REST_IP" , global_entries[inx].name,
ENTRY_NAME_SIZE) == 0) {
/* Check for IP type (ipv4/ipv6) */
struct addrinfo *ip_type = NULL;
if (getaddrinfo(global_entries[inx].value, NULL, NULL, &ip_type)) {
fprintf(stderr, "CP: CP_REST_IP : %s is in incorrect format\n",
global_entries[inx].value);
rte_panic();
}
if(ip_type->ai_family == AF_INET6) {
strncpy(app->cli_rest_ip_buff,
global_entries[inx].value, IPV6_STR_LEN);
} else {
strncpy(app->cli_rest_ip_buff,
global_entries[inx].value, IPv4_ADDRESS_LEN);
}
fprintf(stdout, "CP: CP_REST_IP : %s\n",
app->cli_rest_ip_buff);
freeaddrinfo(ip_type);
} else if(strncmp("CLI_REST_PORT", global_entries[inx].name, ENTRY_NAME_SIZE) == 0) {
app->cli_rest_port = (uint16_t)atoi(global_entries[inx].value);
fprintf(stdout, "CP: CLI_REST_PORT : %d\n",
app->cli_rest_port);
}
}
rte_free(global_entries);
/* Validate the Mandatory Parameters are Configured or Not */
if (validate_mandatory_params(app)) {
return -1;
}
app->wb_net = app->wb_ip & app->wb_mask;
app->wb_bcast_addr = app->wb_ip | ~(app->wb_mask);
fprintf(stderr, "DP: Config:%s::"
"\n\tDP: West_Bound(S1U/S5S8) IP:\t\t"IPV4_ADDR";\n\t",
__func__, IPV4_ADDR_HOST_FORMAT(app->wb_ip));
fprintf(stderr, "West_Bound(S1U/S5S8) NET:\t\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->wb_net));
fprintf(stderr, "West_Bound(S1U/S5S8) MASK:\t\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->wb_mask));
fprintf(stderr, "West_Bound(S1U/S5S8) BCAST ADDR:\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->wb_bcast_addr));
fprintf(stderr, "West_Bound(S1U/S5S8) GW IP:\t\t"IPV4_ADDR"\n\n",
IPV4_ADDR_HOST_FORMAT(app->wb_gw_ip));
if(app->wb_li_ip) {
app->wb_li_net = app->wb_li_ip & app->wb_li_mask;
app->wb_li_bcast_addr = app->wb_li_ip | ~(app->wb_li_mask);
fprintf(stderr, "DP: Config:%s::"
"\n\tDP: West_Bound(S5S8) Logical Intf IP:\t\t"IPV4_ADDR";\n\t",
__func__, IPV4_ADDR_HOST_FORMAT(app->wb_li_ip));
fprintf(stderr, "West_Bound(S5S8) Logical Intf NET:\t\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->wb_li_net));
fprintf(stderr, "West_Bound(S5S8) Logical Intf MASK:\t\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->wb_li_mask));
fprintf(stderr, "West_Bound(S5S8) Logical Intf BCAST ADDR:\t"IPV4_ADDR";\n\n",
IPV4_ADDR_HOST_FORMAT(app->wb_li_bcast_addr));
//fprintf(stderr, "West_Bound(S5S8) Logical Intf GW IP:\t\t"IPV4_ADDR"\n",
// IPV4_ADDR_HOST_FORMAT(app->wb_li_gw_ip));
}
app->eb_net = app->eb_ip & app->eb_mask;
app->eb_bcast_addr = app->eb_ip | ~(app->eb_mask);
fprintf(stderr, "DP: Config:%s::"
"\n\tDP: East_Bound(S5S8/SGI) IP:\t\t"IPV4_ADDR";\n\t",
__func__, IPV4_ADDR_HOST_FORMAT(app->eb_ip));
fprintf(stderr, "East_Bound(S5S8/SGI) NET:\t\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->eb_net));
fprintf(stderr, "East_Bound(S5S8/SGI) MASK:\t\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->eb_mask));
fprintf(stderr, "East_Bound(S5S8/SGI) BCAST ADDR:\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->eb_bcast_addr));
fprintf(stderr, "East_Bound(S5S8/SGI) GW IP:\t\t"IPV4_ADDR"\n\n",
IPV4_ADDR_HOST_FORMAT(app->eb_gw_ip));
if(app->eb_li_ip) {
app->eb_li_net = app->eb_li_ip & app->eb_li_mask;
app->eb_li_bcast_addr = app->eb_li_ip | ~(app->eb_li_mask);
fprintf(stderr, "DP: Config:%s::"
"\n\tDP: East_Bound(S5S8) Logical Intf IP:\t\t"IPV4_ADDR";\n\t",
__func__, IPV4_ADDR_HOST_FORMAT(app->eb_li_ip));
fprintf(stderr, "East_Bound(S5S8) Logical Intf NET:\t\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->eb_li_net));
fprintf(stderr, "East_Bound(S5S8) Logical Intf MASK:\t\t"IPV4_ADDR";\n\t",
IPV4_ADDR_HOST_FORMAT(app->eb_li_mask));
fprintf(stderr, "East_Bound(S5S8) Logical Intf BCAST ADDR:\t"IPV4_ADDR";\n\n",
IPV4_ADDR_HOST_FORMAT(app->eb_li_bcast_addr));
//fprintf(stderr, "East_Bound(S5S8) Logical Intf GW IP:\t\t"IPV4_ADDR"\n",
// IPV4_ADDR_HOST_FORMAT(app->wb_li_gw_ip));
}
fprintf(stderr,
"###############[Completed Data-Plane Config Reading]################\n\n\n");
return 0;
}
/**
* @brief : Set unused core
* @param : core
* @param : used_coremask
* @return : Returns nothing
*/
static inline void set_unused_lcore(int *core, uint64_t *used_coremask)
{
if (*core != -1) {
if (!rte_lcore_is_enabled(*core))
rte_panic("Invalid Core Assignment - "
"core %u not in coremask", *core);
return;
}
unsigned lcore;
RTE_LCORE_FOREACH(lcore) {
if ((1ULL << lcore) & *used_coremask)
continue;
*used_coremask |= (1ULL << lcore);
*core = lcore;
return;
}
rte_exit(EXIT_FAILURE, "No free core available - check coremask\n");
}
/**
* @brief : Function to parse command line config.
* @param : app, global app config structure.
* @param : argc, number of arguments.
* @param : argv, list of arguments.
* @return : Returns 0 in case of success , -1 otherwise
*/
static inline int
parse_config_args(struct app_params *app, int argc, char **argv)
{
int opt;
int option_index;
uint64_t used_coremask = 0;
/* Parse Data-Plane Configuration File */
if (parse_up_config_param(app) < 0) {
return -1;
}
static struct option spgw_opts[] = {
{"LOG",required_argument, 0, 'l'},
{"KNI_PORTMASK", required_argument, 0, 'p'},
{NULL, 0, 0, 0}
};
optind = 0;/* reset getopt lib */
while ((opt = getopt_long(argc, argv, "l:p:",
spgw_opts, &option_index)) != EOF) {
switch (opt) {
case 'l':
app->log_level = atoi(optarg);
break;
case 'p':
app->ports_mask = atoi(optarg);
break;
default:
fprintf(stderr, "Parsing Configuration Error \n");
return -1;
} /* end switch (opt) */
} /* end while() */
set_unused_lcore(&epc_app.core_mct, &used_coremask);
set_unused_lcore(&epc_app.core_iface, &used_coremask);
set_unused_lcore(&epc_app.core_ul[S1U_PORT_ID], &used_coremask);
set_unused_lcore(&epc_app.core_dl[SGI_PORT_ID], &used_coremask);
return 0;
}
void dp_init(int argc, char **argv)
{
if (parse_config_args(&app, argc, argv) < 0)
rte_exit(EXIT_FAILURE,
LOG_FORMAT"Error: Failed parsing of the data-plane configuration\n", LOG_VALUE);
if (read_teidri_data(TEIDRI_FILENAME,
&upf_teidri_blocked_list, &upf_teidri_free_list, app.teidri_val) != 0) {
/* Need to think about error handling */
}
switch (app.gtpu_seqnb_in)
{
case 1: /* include sequence number */
{
fp_gtpu_get_inner_src_dst_ip = gtpu_get_inner_src_dst_ip_with_seqnb;
fp_gtpu_inner_src_ip = gtpu_inner_src_ip_with_seqnb;
fp_gtpu_inner_src_ipv6 = gtpu_inner_src_ipv6_with_seqnb;
fp_decap_gtpu_hdr = decap_gtpu_hdr_with_seqnb;
break;
}
case 2: /* sequence number not included */
{
fp_gtpu_get_inner_src_dst_ip = gtpu_get_inner_src_dst_ip_without_seqnb;
fp_gtpu_inner_src_ip = gtpu_inner_src_ip_without_seqnb;
fp_gtpu_inner_src_ipv6 = gtpu_inner_src_ipv6_without_seqnb;
fp_decap_gtpu_hdr = decap_gtpu_hdr_without_seqnb;
break;
}
case 0: /* dynamic */
default:
{
fp_gtpu_get_inner_src_dst_ip = gtpu_get_inner_src_dst_ip_dynamic_seqnb;
fp_gtpu_inner_src_ip = gtpu_inner_src_ip_dynamic_seqnb;
fp_gtpu_inner_src_ipv6 = gtpu_inner_src_ipv6_dynamic_seqnb;
fp_decap_gtpu_hdr = decap_gtpu_hdr_dynamic_seqnb;
break;
}
}
switch (app.gtpu_seqnb_out)
{
case 1: /* include sequence number */
{
fp_encap_gtpu_hdr = encap_gtpu_hdr_with_seqnb;
break;
}
case 0: /* don't include sequence number */
default:
{
fp_encap_gtpu_hdr = encap_gtpu_hdr_without_seqnb;
break;
}
}
}
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp_up_sess.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PFCP_UP_SESS_H
#define PFCP_UP_SESS_H
#include "pfcp_messages.h"
/*DP-CDR related definations*/
#define PATH_TEMP "./CDR_temp.csv"
#define VOLUME_LIMIT "volume_limit"
#define TIME_LIMIT "Time_limit"
#define CDR_TERMINATION "Termination"
#define CDR_BUFF_SIZE 256
#define CDR_TIME_BUFF 16
#define MAX_SEQ_NO_LEN 32
#define UP_SEID_LEN 16
#define CDR_HEADER "seq_no,up_seid,cp_seid,imsi,dp_ip_v4,dp_ip_v6,cp_ip_v4,cp_ip_v6,ue_ip_v4,ue_ip_v6,cause_for_record_closing,uplink_volume,downlink_volume,total_volume,duration_measurement,start_time,end_time,data_start_time,data_end_time\n"
extern char CDR_FILE_PATH[CDR_BUFF_SIZE];
/**
* @brief : Process pfcp session association req at dp side
* @param : ass_setup_req, hold pfcp session association req data
* @param : ass_setup_resp, response structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
process_up_assoc_req(pfcp_assn_setup_req_t *ass_setup_req,
pfcp_assn_setup_rsp_t *ass_setup_resp);
/**
* @brief : Process pfcp session establishment req at dp side
* @param : sess_req, hold pfcp session establishment req data
* @param : sess_resp, response structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
process_up_session_estab_req(pfcp_sess_estab_req_t *sess_req,
pfcp_sess_estab_rsp_t *sess_resp, peer_addr_t *peer_addr);
/**
* @brief : Process pfcp session modification req at dp side
* @param : sess_mod_req, hold pfcp session modification req data
* @param : sess_mod_rsp, response structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
process_up_session_modification_req(pfcp_sess_mod_req_t *sess_mod_req,
pfcp_sess_mod_rsp_t *sess_mod_rsp);
/**
* @brief : Process pfcp session report resp at dp side
* @param : sess_rep_resp, hold pfcp session report response
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
process_up_session_report_resp(pfcp_sess_rpt_rsp_t *sess_rep_resp);
/**
* @brief : Deletes session entry at dp side
* @param : sess_del_req, hold pfcp session deletion req data
* @param : sess_del_rsp, response structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
up_delete_session_entry(pfcp_session_t *sess, pfcp_sess_del_rsp_t *sess_del_rsp);
/**
* @brief : Process pfcp session deletion req at dp side
* @param : sess_del_req, hold pfcp session deletion req data
* @param : sess_del_rsp, response structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
process_up_session_deletion_req(pfcp_sess_del_req_t *sess_del_req,
pfcp_sess_del_rsp_t *sess_del_rsp);
/**
* @brief : Fill Process pfcp session establishment response
* @param : pfcp_sess_est_resp, structure to be filled
* @param : cause , cause whether request is accepted or not
* @param : offend , offending ie type if any
* @param : dp_comm_ip, ip address
* @param : pfcp_session_request, hold data from establishment request
* @return : Returns nothing
*/
void
fill_pfcp_session_est_resp(pfcp_sess_estab_rsp_t
*pfcp_sess_est_resp, uint8_t cause, int offend,
node_address_t node_value,
struct pfcp_sess_estab_req_t *pfcp_session_request);
/**
* @brief : Fill Process pfcp session delete response
* @param : pfcp_sess_del_resp, structure to be filled
* @param : cause , cause whether request is accepted or not
* @param : offend , offending ie type if any
* @return : Returns nothing
*/
void
fill_pfcp_sess_del_resp(pfcp_sess_del_rsp_t
*pfcp_sess_del_resp, uint8_t cause, int offend);
/**
* @brief : Fill Process pfcp session modification response
* @param : pfcp_sess_modify_resp, structure to be filled
* @param : pfcp_session_mod_req, holds information from modification request
* @param : cause , cause whether request is accepted or not
* @param : offend , offending ie type if any
* @return : Returns nothing
*/
void
fill_pfcp_session_modify_resp(pfcp_sess_mod_rsp_t *pfcp_sess_modify_resp,
pfcp_sess_mod_req_t *pfcp_session_mod_req, uint8_t cause, int offend);
/**
* @brief : Fill usage report for pfcp session modification response
* @param : usage_report, usage report to be fill
* @param : urr, urr strcute for which we are genrating usage report.
* @param : cp_seid , session id of cp
* @return : Returns 0 for Success and -1 for failure
*/
int8_t
fill_sess_mod_usage_report(pfcp_usage_rpt_sess_mod_rsp_ie_t *usage_report,
urr_info_t *urr, uint64_t cp_seid);
/**
* @brief : Fill usage report for pfcp session deletion response
* @param : usage_report, usage report to be fill
* @param : urr, urr strcute for which we are genrating usage report.
* @param : cp_seid, cp session id
* @return : Returns 0 for Success and -1 for failure
*/
int8_t
fill_sess_del_usage_report(pfcp_usage_rpt_sess_del_rsp_ie_t *usage_report,
urr_info_t *urr, uint64_t cp_seid);
/**
* @brief : Fill usage report for pfcp session report request
* @param : usage_report, usage report to be fill
* @param : urr, urr strcute for which we are genrating usage report.
* @return : Returns 0 for Success and -1 for failure
*/
int8_t
fill_sess_rep_req_usage_report(pfcp_usage_rpt_sess_rpt_req_ie_t *usage_report,
urr_info_t *urr, uint32_t trig);
/*
* @brief : add a timer entry for usage report
* @param : conn_data, Peer node connection information
* @param : urr, urr object
* @param : cb, timer callback
* @return : Returns true or false
*/
bool
add_timer_entry_usage_report( peerEntry *conn_data, uint32_t timeout_ms, gstimercallback cb);
/*
* @brief : fill a Peer node connection information
* @param : peer_addr, dest sockect addr
* @param : urr, urr object
* @param : cp_seid, seid of CP
* @param : up_seid, seid of UP
* @return : Returns pointer of peerEntry
*/
peerEntry *
fill_timer_entry_usage_report(struct sockaddr_in *peer_addr, urr_info_t *urr, uint64_t cp_seid, uint64_t up_seid);
/**
* @brief : timer callback
* @param : ti, timer information
* @param : data_t, Peer node connection information
* @return : Returns nothing
*/
void
timer_callback(gstimerinfo_t *ti, const void *data_t);
/**
* @brief : inittimer, initialize a timer
* @param : md, Peer node connection infomation
* @param : ptms, timer in milisec
* @param : cb, callback function to call
* @return : Returns nothing
*/
bool inittimer(peerEntry *md, int ptms, gstimercallback cb);
/*
* @brief : Send pfcp report request for periodic genration for CDR
* @param : urr, URR info for which we need to generte PFCP rep Req
* @param : cp_seid, seid of CP
* @param : up_seid, seid of UP
* @param : trig, Trig point of PFCP rep Req(VOL based or Time Based)
* @return : Returns 0 for succes and -1 failure
*/
int send_usage_report_req(urr_info_t *urr, uint64_t cp_seid, uint64_t up_seid, uint32_t trig);
/*
* @brief : fill duplicating parameter ie for user level packet copying or LI
* @param : far, pfcp create far
* @param : far_t, far data structure
* @return : Returns 0 on success -1 on failure
*/
int
fill_li_duplicating_params(pfcp_create_far_ie_t *far, far_info_t *far_t, pfcp_session_t *sess);
/*
* @brief : fill update duplicating parameter ie for user level packet copying or LI
* @param : far, pfcp create far
* @param : far_t, far data structure
* @return : Returns 0 on success -1 on failure
*/
int
fill_li_update_duplicating_param(pfcp_update_far_ie_t *far, far_info_t *far_t, pfcp_session_t *sess);
/*
* @brief : Get the PFCP recv msg and sent msg and Send it to required server
* @param : sess, The UE session for which we need to perform LI on EVENTS/IRI
* @param : buf_rx, PFCP msg recived in DP
* @param : buf_rx_size, Size of recived msgs
* @param : buf_tx, PFCP msg DP is sending
* @param : buf_tx_size, Size of PFCP msg DP is sending
* @param : peer_addr, peer address
* @return : Returns nothing
*/
int32_t
process_event_li(pfcp_session_t *sess, uint8_t *buf_rx, int buf_rx_size,
uint8_t *buf_tx, int buf_tx_size, peer_addr_t *peer_addr);
/*
* @brief : checks the cause id for pfd management
* @param : cause_id, will store cause value
* @param : offend_id, will store offend_id ,if offending id present
* @return : Returns nothing
*/
void
check_cause_id_pfd_mgmt(pfcp_pfd_contents_ie_t *pfd_content, uint8_t **cause_id, int **offend_id);
/**
* @brief :Extracts the pcc rules from rule table.
* @param :ip, cp_ip
* @return :returns pointer of pcc rules if success else retuns null
*/
struct pcc_rules * get_pcc_rule(uint32_t ip);
/*
* @brief : process the rule msg for pfd management
* @param : pfd_context, contains info for rules.
* @param : msg_type, contains type of msg need to process
* @param : cp_ip, contains cp ip address
* @return : Returns nothing
*/
void
process_rule_msg(pfcp_pfd_contents_ie_t *pfd_content, uint64_t msg_type, uint32_t cp_ip, uint16_t idx);
/*
* @brief : process pfd management request
* @param : cause_id, will store cause value
* @param : offend_id, will store offen_id value
* @param : cp_ip, contains cp ip address
* @return : Returns nothing.
*/
void
process_up_pfd_mgmt_request(pfcp_pfd_mgmt_req_t *pfcp_pfd_mgmt_req, uint8_t *cause_id,
int *offend_id, uint32_t cp_ip);
/*
* @brief : remove cdr entry using seq no
* when receive response from CP
* @param : seq_no, seq_no in response as a key
* @param : up_seid, up seid as a key
* @return : 0 on success, else -1
*/
int
remove_cdr_entry(uint32_t seq_no, uint64_t up_seid);
/*
* @brief : dump CDR from usage report in
* pfcp-sess-rpt-req message
* @return : 0 on success,else -1
*/
int
store_cdr_into_file_pfcp_sess_rpt_req();
/*
* @brief : generate & store CDR from usage report
* when restoration begins
* @param : usage_report, to fill usage info from urr_t
* @param : up_seid, user plane session id
* @param : trig, cause for record close
* @param : seq_no, seq_no in msg used as a key to store CDR
* @param : ue_ip_addr, UE IPv4 address
* @param : ue_ipv6_addr, UE IPv6 address
* @return : 0 on success,else -1
*/
int
store_cdr_for_restoration(pfcp_usage_rpt_sess_del_rsp_ie_t *usage_report,
uint64_t up_seid, uint32_t trig,
uint32_t seq_no, uint32_t ue_ip_addr,
uint8_t ue_ipv6_addr[]);
/**
* @brief : Link session with CSID.
* @param : peer_csid,
* @param : sess, session info.
* @param : iface, interface info.
* @return : Returns 0 in case of success, cause value otherwise.
*/
int
link_dp_sess_with_peer_csid(fqcsid_t *peer_csid, pfcp_session_t *sess, uint8_t iface);
/*
* @brief : Maintain CDR related info
*/
typedef struct dp_cdr {
uint64_t uplink_volume;
uint64_t downlink_volume;
uint64_t total_volume;
uint32_t duration_value;
uint32_t start_time;
uint32_t end_time;
uint32_t time_of_frst_pckt;
uint32_t time_of_lst_pckt;
}cdr_t;
#endif /* PFCP_UP_SESS_H */
|
nikhilc149/e-utran-features-bug-fixes | cp_dp_api/config_validater.h | /*
* Copyright (c) 2020 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define KEY_LEN 32
#define CFG_VALUE_LENGTH 64
#define KEY_NOT_FOUND 1
#define VALUE_FORMAT_NOT_CORRECT 2
#define BUFFER_SIZE 256
#define STARTING_INDEX 0
#define IPV4_LEN 16
#define IPV6_LEN 24
#define MAC_ADDRESS_LEN 12
#define MAC_ADDRESS_SEPARTER 5
#define CP_CFG_PATH "../config/cp.cfg"
#define DP_CFG_PATH "../config/dp.cfg"
typedef struct {
const char *key;
const char *value;
int (*fun_ptr)(char *, char *);
} cfg_data;
typedef struct {
const char *section_name;
} section;
/**
* @brief : validate the integer
* @param : key, value
* @return : Returns 0 successfull else error code
*/
int is_valid_integer(char *key, char *value);
/**
* @brief : validate the ipv4 pattern
* @param : key, value
* @return : Returns 0 successfull else error code
*/
int is_valid_ipv4(char *key, char *value);
/**
* @brief : validate the ipv4/ipv6 pattern
* @param : key, value
* @return : Returns 0 successfull else error code
*/
int is_valid_ipv4v6(char *key, char *value);
/**
* @brief : validate the ipv6 pattern
* @param : key, value
* @return : Returns 0 successfull else error code
*/
int is_valid_ipv6(char *key, char *value);
/**
* @brief : validate the ipv4/ipv6 pattern
* @param : key, value
* @return : Returns 0 successfull else error code
*/
int is_valid_ipv4_ipv6(char *key, char *value);
/**
* @brief : validate the mac address
* @param : key, value
* @return : Returns 0 successfull else error code
*/
int is_valid_mac(char *key, char *value);
/**
* @brief : validate the string patterns
* @param : key, value
* @return : Returns 0 successfull else error code
*/
int is_valid_string(char *key, char *value);
/**
* @brief : validate the integer
* @param : value
* @return : Returns 0 successfull else error code
*/
int is_valid_apn(char *value);
/**
* @brief : validate the alphanumeric value
* @param : value
* @return : Returns 0 successfull else error code
*/
int is_valid_alphanumeric_value(char *value);
/**
* @brief : validate the alpha value
* @param : value
* @return : Returns 0 successfull else error code
*/
int is_valid_alpha_value(char *value);
/**
* @brief : validate the interface value
* @param : value
* @return : Returns 0 successfull else error code
*/
int is_valid_interface(char *value);
/**
* @brief : read cfg file and perform validation
* @param : path
* @return : Returns nothing
*/
void read_cfg_file(const char *path);
|
nikhilc149/e-utran-features-bug-fixes | interface/interface.h | <filename>interface/interface.h
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _INTERFACE_H_
#define _INTERFACE_H_
/**
* @file
* This file contains macros, data structure definitions and function
* prototypes of CP/DP module constructor and communication interface type.
*/
#include <stdint.h>
#include <stdbool.h>
#include <inttypes.h>
#include <rte_hash.h>
#include "vepc_cp_dp_api.h"
//#define RTE_LOGTYPE_CP RTE_LOGTYPE_USER4
extern int num_dp;
/**
* @brief : CP DP communication message type
*/
enum cp_dp_comm {
COMM_QUEUE,
COMM_SOCKET,
COMM_ZMQ,
COMM_END,
};
/**
* @brief : CP DP Communication message structure.
*/
struct comm_node {
int status; /*set if initialized*/
int (*init)(void); /*init function*/
int (*send)(void *msg_payload, uint32_t size); /*send function*/
int (*recv)(void *msg_payload, uint32_t size); /*receive function*/
int (*destroy)(void); /*uninit and free function*/
};
/**
* @brief udp socket structure.
*/
typedef struct udp_sock_t {
struct sockaddr_in my_addr;
struct sockaddr_in other_addr;
int sock_fd;
int sock_fd_v6;
int sock_fd_s11;
int sock_fd_s11_v6;
int sock_fd_s5s8;
int sock_fd_s5s8_v6;
} udp_sock_t;
typedef struct peer_addr_t {
/* Setting IP Address type either IPv4:IPV4_TYPE:1 or IPv6:IPV6_TYPE:2 */
uint8_t type;
struct sockaddr_in ipv4;
struct sockaddr_in6 ipv6;
} peer_addr_t;
struct comm_node comm_node[COMM_END];
struct comm_node *active_comm_msg;
/**
* @brief : Process PFCP message.
* @param : buf_rx
* buf - message buffer.
* @param : peer_addr
* client ip address stucture
* @return : Returns 0 in case of success , -1 otherwise
*/
int process_pfcp_msg(uint8_t *buf_rx, peer_addr_t *peer_addr, bool is_ipv6);
/**
* @brief : Initialize iface message passing
* This function is not thread safe and should only be called once by DP.
* @param : No param
* @return : Returns nothing
*/
void iface_module_constructor(void);
/**
* @brief : Functino to handle signals.
* @param : msg_payload,
* @param : size,
* @param : peer_addr,
* @return : Returns nothing
*/
int
udp_recv(void *msg_payload, uint32_t size, peer_addr_t *peer_addr, bool is_ipv6);
/**
* @brief : Function to create IPV6 UDP Socket.
* @param : ipv6_addr, IPv6 IP address
* @param : port, Port number to bind
* @param : peer_addr, structure to store IP address
* @return : Returns fd if success, otherwise -1
*/
int create_udp_socket_v6(uint8_t ipv6_addr[], uint16_t port,
peer_addr_t *addr);
/**
* @brief : Function to create IPV4 UDP Socket.
* @param : ipv4_addr, IPv4 IP address
* @param : port, Port number to bind
* @param : peer_addr, structure to store IP address
* @return : Returns fd if success, otherwise -1
*/
int create_udp_socket_v4(uint32_t ipv4_addr, uint16_t port,
peer_addr_t *addr);
#ifdef CP_BUILD
/**
* @brief Function to recv the IPC message and process them.
*
* This function is not thread safe and should only be called once by CP.
*/
void process_cp_msgs(void);
#else /*END CP_BUILD*/
/**
* @brief Function to recv the IPC message and process them.
*
* This function is not thread safe and should only be called once by DP.
*/
void process_dp_msgs(void);
#endif /*DP_BUILD*/
#endif /* _INTERFACE_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp/cp_app.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <math.h>
#include "cp_config.h"
#include "cp_app.h"
#include "ipc_api.h"
#include "sm_arr.h"
#include "pfcp.h"
#include "pfcp_association.h"
#include "sm_pcnd.h"
#include "ue.h"
#include "gw_adapter.h"
#include "gtpv2c_error_rsp.h"
static uint32_t cc_request_number = 0;
extern pfcp_config_t config;
extern int clSystemLog;
/*Socket used by CP to listen for GxApp client connection */
int g_cp_sock_read = 0;
int g_cp_sock_read_v6 = 0;
/*Socket used by CP to write CCR and RAA*/
int gx_app_sock = 0;
int gx_app_sock_v6 = 0;
/*Socket used by CP to read CCR and RAA*/
int gx_app_sock_read = 0;
int gx_app_sock_read_v6 = 0;
int ret ;
void
fill_rat_type_ie( int32_t *ccr_rat_type, uint8_t csr_rat_type )
{
if ( csr_rat_type == EUTRAN_ ) {
*ccr_rat_type = GX_EUTRAN;
}else if ( csr_rat_type == UTRAN ){
*ccr_rat_type = GX_UTRAN;
}else if ( csr_rat_type == GERAN ){
*ccr_rat_type = GX_GERAN;
}else if ( csr_rat_type == WLAN ){
*ccr_rat_type = GX_WLAN;
}else if ( csr_rat_type == VIRTUAL ){
*ccr_rat_type = GX_VIRTUAL;
}else if ( csr_rat_type == GAN ){
*ccr_rat_type = GX_GAN;
}else if ( csr_rat_type == HSPA_EVOLUTION ){
*ccr_rat_type = GX_HSPA_EVOLUTION;
}
}
/**
* @brief : Fill qos information
* @param : ccr_qos_info, structure to be filled
* @param : bearer, bearer information
* @param : apn_ambr, ambr details
* @return : Returns nothing
*/
static void
fill_qos_info( GxQosInformation *ccr_qos_info,
eps_bearer *bearer, ambr_ie *apn_ambr)
{
/* VS: Fill the bearer identifier value */
ccr_qos_info->presence.bearer_identifier = PRESENT ;
ccr_qos_info->bearer_identifier.len =
(1 + (uint32_t)log10(bearer->eps_bearer_id));
if (ccr_qos_info->bearer_identifier.len >= 255) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Insufficient memory to copy bearer identifier\n", LOG_VALUE);
return;
} else {
strncpy((char *)ccr_qos_info->bearer_identifier.val,
(char *)&bearer->eps_bearer_id,
ccr_qos_info->bearer_identifier.len);
}
ccr_qos_info->presence.apn_aggregate_max_bitrate_ul = PRESENT;
ccr_qos_info->presence.apn_aggregate_max_bitrate_dl = PRESENT;
ccr_qos_info->apn_aggregate_max_bitrate_ul =
apn_ambr->ambr_uplink;
ccr_qos_info->apn_aggregate_max_bitrate_dl =
apn_ambr->ambr_downlink;
ccr_qos_info->presence.max_requested_bandwidth_ul = PRESENT;
ccr_qos_info->presence.max_requested_bandwidth_dl = PRESENT;
ccr_qos_info->max_requested_bandwidth_ul =
bearer->qos.ul_mbr;
ccr_qos_info->max_requested_bandwidth_dl =
bearer->qos.dl_mbr;
ccr_qos_info->presence.guaranteed_bitrate_ul = PRESENT;
ccr_qos_info->presence.guaranteed_bitrate_dl = PRESENT;
ccr_qos_info->guaranteed_bitrate_ul =
bearer->qos.ul_gbr;
ccr_qos_info->guaranteed_bitrate_dl =
bearer->qos.dl_gbr;
}
/**
* @brief : fill default eps bearer qos
* @param : ccr_default_eps_bearer_qos, structure to be filled
* @param : bearer, bearer data
* @return : Returns Nothing
*/
static void
fill_default_eps_bearer_qos( GxDefaultEpsBearerQos *ccr_default_eps_bearer_qos,
eps_bearer *bearer)
{
if(( QCI_1 <= (bearer->qos.qci) && (bearer->qos.qci) <= QCI_9 ) ||
QCI_65 == (bearer->qos.qci) ||
QCI_66 == (bearer->qos.qci) ||
QCI_69 == (bearer->qos.qci) ||
QCI_70 == (bearer->qos.qci))
{
ccr_default_eps_bearer_qos->presence.qos_class_identifier = PRESENT;
ccr_default_eps_bearer_qos->qos_class_identifier = bearer->qos.qci;
} else {
/* TODO :Revisit to handler other values of Qci e.g 0 */
}
ccr_default_eps_bearer_qos->presence.allocation_retention_priority = PRESENT;
ccr_default_eps_bearer_qos->allocation_retention_priority.presence.priority_level = PRESENT;
ccr_default_eps_bearer_qos->allocation_retention_priority.priority_level =
bearer->qos.arp.priority_level;
ccr_default_eps_bearer_qos->allocation_retention_priority.presence.pre_emption_capability = PRESENT;
ccr_default_eps_bearer_qos->allocation_retention_priority.pre_emption_capability =
bearer->qos.arp.preemption_capability;
ccr_default_eps_bearer_qos->allocation_retention_priority.presence.pre_emption_vulnerability = PRESENT;
ccr_default_eps_bearer_qos->allocation_retention_priority.pre_emption_vulnerability =
bearer->qos.arp.preemption_vulnerability;
}
/**
* @brief : convert binary value to string
* Binary value is stored in 8 bytes, each nibble representing each char.
* char binary stroes each char in 1 byte.
* @param : [in] b_val : Binary val
* @param : [out] s_val : Converted string val
* @return : void
*/
void
bin_to_str(unsigned char *b_val, char *s_val, int b_len, int s_len)
{
if(NULL == b_val || NULL == s_val) return;
memset(s_val, 0, s_len);
/* Byte 'AB' in b_val, is converted to two bytes 'A', 'B' in s_val*/
s_val[0] = '0' + (b_val[0] & 0x0F);
s_val[1] = '0' + ((b_val[0]>>4) & 0x0F);
for(int i=1; i < b_len; ++i) {
s_val[(i*2)] = '0' + (b_val[i] & 0x0F);
s_val[(i*2) + 1] = '0' + ((b_val[i]>>4) & 0x0F);
}
s_val[(b_len*2)-1] = '\0';
}
void
encode_imsi_to_bin(uint64_t imsi, int imsi_len, uint8_t *bin_imsi){
uint8_t buf[32] = {0};
snprintf((char*)buf,32, "%" PRIu64, imsi);
for (int i=0; i < imsi_len; i++)
bin_imsi[i] = ((buf[i*2 + 1] & 0xF) << 4) | ((buf[i*2] & 0xF));
uint8_t odd = strnlen((const char *)buf,32)%2;
if (odd)
bin_imsi[imsi_len -1] = (0xF << 4) | ((buf[(imsi_len-1)*2] & 0xF));
return;
}
void
fill_subscription_id( GxSubscriptionIdList *subs_id, uint64_t imsi, uint64_t msisdn )
{
subs_id->count = 0;
if( imsi != 0 ) {
subs_id->list = rte_malloc_socket(NULL, sizeof( GxSubscriptionId),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(subs_id->list == NULL){
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Memory allocation fails\n",
LOG_VALUE);
}
subs_id->list[subs_id->count].presence.subscription_id_type = PRESENT;
subs_id->list[subs_id->count].presence.subscription_id_data = PRESENT;
subs_id->list[subs_id->count].subscription_id_type = END_USER_IMSI;
subs_id->list[subs_id->count].subscription_id_data.len = STR_IMSI_LEN -1 ;
uint8_t bin_imsi[32] = {0};
encode_imsi_to_bin(imsi, BINARY_IMSI_LEN , bin_imsi);
uint64_t temp_imsi = 0;
memcpy(&temp_imsi, bin_imsi, BINARY_IMSI_LEN);
bin_to_str((unsigned char*) (&temp_imsi),
(char *)(subs_id->list[subs_id->count].subscription_id_data.val),
BINARY_IMSI_LEN, STR_IMSI_LEN);
subs_id->count++;
} else if( msisdn != 0 ) {
subs_id->list = rte_malloc_socket(NULL, sizeof( GxSubscriptionId),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(subs_id->list == NULL){
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Memory allocation fails\n",
LOG_VALUE);
}
subs_id->list[subs_id->count].presence.subscription_id_type = PRESENT;
subs_id->list[subs_id->count].presence.subscription_id_data = PRESENT;
subs_id->list[subs_id->count].subscription_id_type = END_USER_E164;
subs_id->list[subs_id->count].subscription_id_data.len = STR_MSISDN_LEN;
bin_to_str((unsigned char*) (&msisdn),
(char *)(subs_id->list[subs_id->count].subscription_id_data.val),
BINARY_MSISDN_LEN, STR_MSISDN_LEN);
subs_id->count++;
}
}
void
fill_user_equipment_info( GxUserEquipmentInfo *ccr_user_eq_info, uint64_t csr_imei )
{
ccr_user_eq_info->presence.user_equipment_info_type = PRESENT;
ccr_user_eq_info->presence.user_equipment_info_value = PRESENT;
ccr_user_eq_info->user_equipment_info_type = IMEISV ;
ccr_user_eq_info->user_equipment_info_value.len = sizeof(uint64_t);
memcpy( ccr_user_eq_info->user_equipment_info_value.val, &(csr_imei),
ccr_user_eq_info->user_equipment_info_value.len);
}
void
fill_3gpp_ue_timezone( Gx3gppMsTimezoneOctetString *ccr_tgpp_ms_timezone,
gtp_ue_time_zone_ie_t csr_ue_timezone )
{
ccr_tgpp_ms_timezone->len = csr_ue_timezone.header.len;
memcpy( ccr_tgpp_ms_timezone->val, &(csr_ue_timezone.time_zone), ccr_tgpp_ms_timezone->len);
}
void
fill_presence_rprtng_area_info(GxPresenceReportingAreaInformationList *pra_info,
presence_reproting_area_info_t *ue_pra_info){
pra_info->count = 0;
pra_info->list = rte_malloc_socket(NULL, sizeof( GxPresenceReportingAreaInformation),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(pra_info->list == NULL){
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Memory allocation fails\n",
LOG_VALUE);
return;
}
memset(pra_info->list, 0, sizeof(GxPresenceReportingAreaInformation));
pra_info->list[pra_info->count].presence.presence_reporting_area_identifier = 1;
pra_info->list[pra_info->count].presence.presence_reporting_area_status = 1;
if(ue_pra_info->ipra){
pra_info->list[pra_info->count].presence_reporting_area_status = PRA_IN_AREA;
} else if(ue_pra_info->opra){
pra_info->list[pra_info->count].presence_reporting_area_status = PRA_OUT_AREA;
} else {
pra_info->list[pra_info->count].presence_reporting_area_status = PRA_INACTIVE;
}
pra_info->list[pra_info->count].presence_reporting_area_identifier.len = 3*sizeof(uint8_t);
memcpy(pra_info->list[pra_info->count].presence_reporting_area_identifier.val,
&ue_pra_info->pra_identifier,
pra_info->list[pra_info->count].presence_reporting_area_identifier.len);
pra_info->count++;
return;
}
/* Fill the Credit Crontrol Request to send PCRF */
int
fill_ccr_request(GxCCR *ccr, ue_context *context,
int ebi_index, char *sess_id, uint8_t flow_flag)
{
char apn[MAX_APN_LEN] = {0};
eps_bearer *bearer = NULL;
pdn_connection *pdn = NULL;
if (ebi_index > 0) {
bearer = context->eps_bearers[ebi_index];
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID \n", LOG_VALUE);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if ( pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to get pdn"
" for ebi_index: %d\n",
LOG_VALUE, ebi_index);
return -1;
}
/* Assign the Session ID in the request */
if (sess_id != NULL) {
ccr->presence.session_id = PRESENT;
ccr->session_id.len = strnlen(sess_id, MAX_LEN);
memcpy(ccr->session_id.val, sess_id, ccr->session_id.len);
}
/* RFC 4006 section 8.2 */
/* ============================================== */
/* Cc Request Type | Cc Request number */
/* ============================================== */
/* Initial Request -- 0 */
/* Event Request -- 0 */
/* Update Request_1 -- 1 */
/* Update Request_2 -- 2 */
/* Update Request_n -- n */
/* Termination Request -- n + 1 */
/* VS: Handle the Multiple Msg type request */
if (ccr->presence.cc_request_type == PRESENT) {
switch(ccr->cc_request_type) {
case INITIAL_REQUEST: {
ccr->presence.cc_request_number = PRESENT;
/* Make this number generic */
ccr->cc_request_number = 0 ;
/* TODO: Need to Check condition handling */
ccr->presence.ip_can_type = PRESENT;
ccr->ip_can_type = TGPP_GPRS;
break;
}
case UPDATE_REQUEST:
ccr->presence.cc_request_number = PRESENT;
ccr->cc_request_number = ++cc_request_number ;
break;
case TERMINATION_REQUEST:
ccr->presence.cc_request_number = PRESENT;
/* Make this number generic */
ccr->cc_request_number = ++cc_request_number ;
/* TODO: Need to Check condition handling */
ccr->presence.ip_can_type = PRESENT;
ccr->ip_can_type = TGPP_GPRS;
break;
default:
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Unknown "
"Message type request %s \n", LOG_VALUE, strerror(errno));
return -1;
}
}
/* TODO: Need to Discuss to make following AVP's generic or
* to be based on MSG TYPE OR condition basis */
/* Fill the APN Vaule */
if ((pdn->apn_in_use)->apn_name_length != 0) {
ccr->presence.called_station_id = PRESENT;
get_apn_name((pdn->apn_in_use)->apn_name_label, apn);
ccr->called_station_id.len = strnlen(apn, MAX_APN_LEN);
memcpy(ccr->called_station_id.val, apn, ccr->called_station_id.len);
}
/* Fill the RAT type in CCR */
if( context->rat_type.len != 0 ){
ccr->presence.rat_type = PRESENT;
fill_rat_type_ie(&ccr->rat_type, context->rat_type.rat_type);
}
/* Set the bearer eps qos values received in CSR */
ccr->presence.default_eps_bearer_qos = PRESENT;
fill_default_eps_bearer_qos( &(ccr->default_eps_bearer_qos),
bearer);
/* Set the bearer apn ambr and Uplink/Downlink MBR/GBR values received in CSR */
if (flow_flag != 1) {
ccr->presence.qos_information = PRESENT;
fill_qos_info(&(ccr->qos_information), bearer, &pdn->apn_ambr);
}
/* Need to Handle IMSI and MSISDN */
if( context->imsi != 0 || context->msisdn != 0 )
{
ccr->presence.subscription_id = PRESENT;
fill_subscription_id( &ccr->subscription_id, context->imsi, context->msisdn );
}
/* TODO Need to check later on */
if(context->mei != 0)
{
ccr->presence.user_equipment_info = PRESENT;
fill_user_equipment_info( &(ccr->user_equipment_info), context->mei );
}
if(context->pra_flag &&
(context->event_trigger &
1UL << CHANGE_OF_UE_PRESENCE_IN_PRESENCE_REPORTING_AREA_REPORT)){
ccr->presence.presence_reporting_area_information = PRESENT;
fill_presence_rprtng_area_info(&(ccr->presence_reporting_area_information),
&context->pre_rptng_area_info);
context->pra_flag = 0;
}
return 0;
}
void
process_create_bearer_resp_and_send_raa( int sock )
{
char *send_buf = NULL;
uint32_t buflen ;
gx_msg *resp = malloc(sizeof(gx_msg));
memset(resp, 0, sizeof(gx_msg));
/* Filling Header value of RAA */
resp->msg_type = GX_RAA_MSG ;
/* Cal the length of buffer needed */
buflen = gx_raa_calc_length (&resp->data.cp_raa);
resp->msg_len = buflen + GX_HEADER_LEN;
send_buf = malloc(resp->msg_len);
memset(send_buf, 0, resp->msg_len);
/* encoding the raa header value to buffer */
memcpy( send_buf, &resp->msg_type, sizeof(resp->msg_type));
memcpy( send_buf + sizeof(resp->msg_type),
&resp->msg_len,
sizeof(resp->msg_len));
if ( gx_raa_pack(&(resp->data.cp_raa),
(unsigned char *)(send_buf + GX_HEADER_LEN),
buflen ) == 0 ){
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"RAA Packing failure on sock [%d] \n", LOG_VALUE, sock);
}
if (resp != NULL) {
free(resp);
resp = NULL;
}
free(send_buf);
}
int
msg_handler_gx( void )
{
int bytes_rx = 0;
int ret = 0;
msg_info msg = {0};
gx_msg *gxmsg = NULL;
char recv_buf[BUFFSIZE] = {0};
uint16_t msg_len = 0;
bytes_rx = recv_from_ipc_channel(gx_app_sock_read, recv_buf);
if(bytes_rx <= 0 ){
close_ipc_channel(gx_app_sock_read);
/* Greacefull Exit */
exit(0);
}
while(bytes_rx > 0) {
gxmsg = (gx_msg *)(recv_buf + msg_len);
if ((ret = gx_pcnd_check(gxmsg, &msg)) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failure in gx "
"precondion check\n",LOG_VALUE);
if(msg.msg_type == GX_CCA_MSG)
gx_cca_error_response(ret, &msg);
if(msg.msg_type == GX_RAR_MSG)
gen_reauth_error_resp_for_wrong_seid_rcvd(&msg, gxmsg, ret);
if(bytes_rx == gxmsg->msg_len)
return -1;
}else{
if ((msg.proc < END_PROC) && (msg.state < END_STATE) && (msg.event < END_EVNT)) {
if (SGWC == msg.cp_mode) {
ret = (*state_machine_sgwc[msg.proc][msg.state][msg.event])(&msg, gxmsg);
} else if (PGWC == msg.cp_mode) {
ret = (*state_machine_pgwc[msg.proc][msg.state][msg.event])(&msg, gxmsg);
} else if (SAEGWC == msg.cp_mode) {
ret = (*state_machine_saegwc[msg.proc][msg.state][msg.event])(&msg, gxmsg);
} else {
if(bytes_rx == gxmsg->msg_len)
return -1;
}
if (ret) {
if(bytes_rx == gxmsg->msg_len)
return -1;
}
} else {
if(bytes_rx == gxmsg->msg_len)
return -1;
}
}
msg_len += gxmsg->msg_len;
bytes_rx = bytes_rx - gxmsg->msg_len;
}
return 0;
}
void
start_cp_app(void )
{
struct sockaddr_un cp_app_sockaddr_read = {0};
struct sockaddr_un gx_app_sockaddr_read = {0};
/* Socket Creation */
g_cp_sock_read = create_ipc_channel();
if (g_cp_sock_read < 0) {
/*Gracefully exit*/
exit(0);
}
/* Bind the socket*/
bind_ipc_channel(g_cp_sock_read, cp_app_sockaddr_read, SERVER_PATH);
/* Mark the socket fd for listen */
listen_ipc_channel(g_cp_sock_read);
/* Accept incomming connection request receive on socket */
gx_app_sock_read = accept_from_ipc_channel( g_cp_sock_read, gx_app_sockaddr_read);
if (g_cp_sock_read < 0) {
/*Gracefully exit*/
exit(0);
}
/* Remove this sleep to resolved the delay issue in between CSR and CCR */
/* sleep(5); */
int ret = -1;
while (ret) {
struct sockaddr_un app_sockaddr = {0};
gx_app_sock = create_ipc_channel();
ret = connect_to_ipc_channel(gx_app_sock, app_sockaddr, CLIENT_PATH );
if (ret < 0) {
printf("Trying to connect to GxApp...\n");
}
sleep(1);
}
printf("Succesfully connected to GxApp...!!!\n");
}
void
free_cca_msg_dynamically_alloc_memory(GxCCA *cca) {
if (cca->presence.charging_rule_install) {
for (uint8_t itr = 0; itr < cca->charging_rule_install.count; itr++) {
if (cca->charging_rule_install.list[itr].presence.charging_rule_definition) {
for (uint8_t itr1 = 0; itr1 < cca->charging_rule_install.list[itr].charging_rule_definition.count; itr1++) {
if (cca->charging_rule_install.list[itr].charging_rule_definition.list[itr1].presence.flow_information) {
free(cca->charging_rule_install.list[itr].charging_rule_definition.list[itr1].flow_information.list);
cca->charging_rule_install.list[itr].charging_rule_definition.list[itr1].flow_information.list = NULL;
}
}
free(cca->charging_rule_install.list[itr].charging_rule_definition.list);
cca->charging_rule_install.list[itr].charging_rule_definition.list = NULL;
}
}
free(cca->charging_rule_install.list);
cca->charging_rule_install.list = NULL;
}
if (cca->presence.event_trigger) {
free(cca->event_trigger.list);
cca->event_trigger.list = NULL;
}
if (cca->presence.qos_information) {
free(cca->qos_information.list);
cca->qos_information.list = NULL;
}
if (cca->presence.route_record) {
free(cca->route_record.list);
cca->route_record.list = NULL;
}
}
|
nikhilc149/e-utran-features-bug-fixes | cp/ue.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ue.h"
#include "cp.h"
#include "interface.h"
#include "gw_adapter.h"
#include "sm_struct.h"
#include "teid.h"
extern pfcp_config_t config;
extern int clSystemLog;
struct rte_hash *ue_context_by_imsi_hash;
struct rte_hash *ue_context_by_fteid_hash;
struct rte_hash *bearer_by_fteid_hash;
struct rte_hash *li_info_by_id_hash;
struct rte_hash *li_id_by_imsi_hash;
struct rte_hash *ue_context_by_sender_teid_hash;
struct rte_hash *timer_by_teid_hash;
struct rte_hash *ddn_by_seid_hash;
struct rte_hash *dl_timer_by_teid_hash;
struct rte_hash *pfcp_rep_by_seid_hash;
struct rte_hash *thrtl_timer_by_nodeip_hash;
struct rte_hash *thrtl_ddn_count_hash;
struct rte_hash *buffered_ddn_req_hash;
apn apn_list[MAX_NB_DPN];
int total_apn_cnt;
void
create_ue_hash(void)
{
struct rte_hash_parameters rte_hash_params = {
.name = "bearer_by_imsi_hash",
.entries = LDB_ENTRIES_DEFAULT,
.key_len = sizeof(uint64_t),
.hash_func = rte_jhash,
.hash_func_init_val = 0,
.socket_id = rte_socket_id(),
};
ue_context_by_imsi_hash = rte_hash_create(&rte_hash_params);
if (!ue_context_by_imsi_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "bearer_by_fteid_hash";
rte_hash_params.key_len = sizeof(uint32_t);
ue_context_by_fteid_hash = rte_hash_create(&rte_hash_params);
if (!ue_context_by_fteid_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "bearer_by_teid_hash";
rte_hash_params.key_len = sizeof(uint32_t);
bearer_by_fteid_hash = rte_hash_create(&rte_hash_params);
if (!bearer_by_fteid_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "ue_context_by_sender_teid_hash";
rte_hash_params.key_len = sizeof(uint32_t);
ue_context_by_sender_teid_hash = rte_hash_create(&rte_hash_params);
if (!ue_context_by_sender_teid_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "timer_by_teid_hash";
rte_hash_params.key_len = sizeof(uint32_t);
timer_by_teid_hash = rte_hash_create(&rte_hash_params);
if (!timer_by_teid_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "ddn_request_by_session_id_hash";
rte_hash_params.key_len = sizeof(uint64_t);
ddn_by_seid_hash = rte_hash_create(&rte_hash_params);
if (!ddn_by_seid_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "dl_timer_by_teid_hash";
rte_hash_params.key_len = sizeof(uint32_t);
dl_timer_by_teid_hash = rte_hash_create(&rte_hash_params);
if (!dl_timer_by_teid_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "pfcp_rep_by_session_id_hash";
rte_hash_params.key_len = sizeof(uint64_t);
pfcp_rep_by_seid_hash = rte_hash_create(&rte_hash_params);
if (!pfcp_rep_by_seid_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "thrtl_timer_by_nodeip_hash";
rte_hash_params.key_len = sizeof(uint64_t);
thrtl_timer_by_nodeip_hash = rte_hash_create(&rte_hash_params);
if (!thrtl_timer_by_nodeip_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "thrtl_ddn_count_hash";
rte_hash_params.key_len = sizeof(uint64_t);
thrtl_ddn_count_hash = rte_hash_create(&rte_hash_params);
if (!thrtl_ddn_count_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "buffered_ddn_req_hash";
rte_hash_params.key_len = sizeof(uint64_t);
buffered_ddn_req_hash = rte_hash_create(&rte_hash_params);
if (!buffered_ddn_req_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
}
void
create_li_info_hash(void)
{
struct rte_hash_parameters rte_hash_params = {
.name = "li_info_by_id_hash",
.entries = LI_LDB_ENTRIES_DEFAULT,
.key_len = sizeof(uint64_t),
.hash_func = rte_jhash,
.hash_func_init_val = 0,
.socket_id = rte_socket_id(),
};
li_info_by_id_hash = rte_hash_create(&rte_hash_params);
if (!li_info_by_id_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
rte_hash_params.name = "li_id_by_imsi_hash";
li_id_by_imsi_hash = rte_hash_create(&rte_hash_params);
if (!li_id_by_imsi_hash) {
rte_panic("%s hash create failed: %s (%u)\n.",
rte_hash_params.name,
rte_strerror(rte_errno), rte_errno);
}
}
void
set_apn_name(apn *an_apn, char *argstr)
{
if (argstr == NULL)
rte_panic("APN Name argument not set\n");
an_apn->apn_name_length = strnlen(argstr,MAX_NB_DPN) + 1;
an_apn->apn_name_label = rte_zmalloc_socket(NULL, an_apn->apn_name_length,
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (an_apn->apn_name_label == NULL)
rte_panic("Failure to allocate apn_name_label buffer: "
"%s (%s:%d)\n",
rte_strerror(rte_errno),
__FILE__,
__LINE__);
/* Don't copy NULL termination */
strncpy(an_apn->apn_name_label + 1, argstr, strnlen(argstr,MAX_NB_DPN));
char *ptr, *size;
size = an_apn->apn_name_label;
*size = 1;
ptr = an_apn->apn_name_label + strnlen(argstr,MAX_NB_DPN) - 1;
do {
if (ptr == size)
break;
if (*ptr == '.') {
*ptr = *size;
*size = 0;
} else {
(*size)++;
}
--ptr;
} while (ptr != an_apn->apn_name_label);
}
void
print_ue_context_by(struct rte_hash *h, ue_context *context)
{
uint64_t *key;
int32_t ret;
uint32_t next = 0;
int i;
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT" %16s %1s %16s %16s %8s %8s %11s\n", LOG_VALUE, "imsi", "u", "mei",
"msisdn", "s11-teid", "s11-ipv4", "56789012345");
if (context) {
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT"*%16lx %1lx %16lx %16lx %8x %15s ", LOG_VALUE, context->imsi,
(uint64_t) context->unathenticated_imsi, context->mei,
context->msisdn, context->s11_sgw_gtpc_teid,
inet_ntoa(*((struct in_addr *)&context->s11_sgw_gtpc_ip.ipv4_addr)));
for (i = 0; i < MAX_BEARERS; ++i) {
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT "Bearer bitmap %c",
LOG_VALUE, (context->bearer_bitmap & (1 << i))
? '1' : '0');
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"\t0x%04x\n", LOG_VALUE, context->bearer_bitmap);
}
if (h == NULL)
return;
while (1) {
ret = rte_hash_iterate(h, (const void **) &key,
(void **) &context, &next);
if (ret < 0)
break;
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT" %16lx %1lx %16lx %16lx %8x %15s ", LOG_VALUE,
context->imsi,
(uint64_t) context->unathenticated_imsi,
context->mei,
context->msisdn, context->s11_sgw_gtpc_teid,
inet_ntoa(*((struct in_addr *)&context->s11_sgw_gtpc_ip.ipv4_addr)));
for (i = 0; i < MAX_BEARERS; ++i) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT "Bearer bitmap %c",
LOG_VALUE, (context->bearer_bitmap & (1 << i))
? '1' : '0');
}
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT "\t0x%4x", LOG_VALUE, context->bearer_bitmap);
puts("");
}
}
int
add_bearer_entry_by_sgw_s5s8_tied(uint32_t fteid_key, struct eps_bearer_t **bearer)
{
int8_t ret = 0;
ret = rte_hash_add_key_data(bearer_by_fteid_hash,
(const void *) &fteid_key, (void *) (*bearer));
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
"%s - Error on rte_hash_add_key_data add\n",
strerror(ret));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Added bearer entry by sgw_s5s8_teid:%u\n", LOG_VALUE, fteid_key);
return 0;
}
int
create_ue_context(uint64_t *imsi_val, uint16_t imsi_len,
uint8_t ebi, ue_context **context, apn *apn_requested,
uint32_t sequence, uint8_t *check_if_ue_hash_exist,
uint8_t cp_type)
{
int ret;
int ebi_index;
uint64_t imsi = UINT64_MAX;
pdn_connection *pdn = NULL;
eps_bearer *bearer = NULL;
memcpy(&imsi, imsi_val, imsi_len);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
ret = rte_hash_lookup_data(ue_context_by_imsi_hash, &imsi,
(void **) &(*context));
if (ret == -ENOENT) {
(*context) = rte_zmalloc_socket(NULL, sizeof(ue_context),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (*context == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for Context, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
(*context)->imsi = imsi;
(*context)->imsi_len = imsi_len;
ret = rte_hash_add_key_data(ue_context_by_imsi_hash,
(const void *) &(*context)->imsi, (void *) (*context));
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT
"%s - Error on rte_hash_add_key_data add\n", LOG_VALUE,
strerror(ret));
rte_free((*context));
*context = NULL;
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
} else {
/* VS: TODO: Need to think on this, flush entry when received DSR*/
RTE_SET_USED(apn_requested);
*check_if_ue_hash_exist = 1;
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
if((*context)->eps_bearers[ebi_index] != NULL ) {
pdn = (*context)->eps_bearers[ebi_index]->pdn;
if(pdn != NULL ) {
if(pdn->csr_sequence == sequence )
{
/* Discarding re-transmitted csr */
return GTPC_RE_TRANSMITTED_REQ;
}
}
}
}
(*context)->s11_sgw_gtpc_teid =
get_s11_sgw_gtpc_teid(check_if_ue_hash_exist, cp_type, (*context)->s11_sgw_gtpc_teid);
ret = rte_hash_add_key_data(ue_context_by_fteid_hash,
(const void *) &(*context)->s11_sgw_gtpc_teid,
(void *) (*context));
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT
"%s - Error on ue_context_by_fteid_hash add\n", LOG_VALUE,
strerror(ret));
ret = rte_hash_del_key(ue_context_by_imsi_hash,
(const void *) &(*context)->imsi);
if (ret < 0) {
/* If we get here something bad happened. The
* context that was added to
* ue_context_by_imsi_hash above was not able
* to be removed.
*/
rte_panic("%s - Error on "
"ue_context_by_imsi_hash del\n",
strerror(ret));
}
if (*context != NULL) {
rte_free((*context));
*context = NULL;
}
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
bearer = (*context)->eps_bearers[ebi_index];
if (bearer) {
if (pdn) {
/* created session is overwriting old session... */
/* ...clean up old session's dedicated bearers */
for (int i = 0; i < MAX_BEARERS; ++i) {
if (!pdn->eps_bearers[i])
continue;
if (i == ebi_index) {
bzero(bearer, sizeof(*bearer));
continue;
}
rte_free(pdn->eps_bearers[i]);
pdn->eps_bearers[i] = NULL;
(*context)->eps_bearers[i] = NULL;
(*context)->bearer_bitmap &= ~(1 << ebi_index);
}
} else {
/* created session is creating a default bearer in place */
/* of a different pdn connection's dedicated bearer */
bearer->pdn->eps_bearers[ebi_index] = NULL;
bzero(bearer, sizeof(*bearer));
pdn = rte_zmalloc_socket(NULL,
sizeof(struct pdn_connection_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for PDN, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
pdn->num_bearer++;
(*context)->pdns[ebi_index] = pdn;
(*context)->num_pdns++;
pdn->eps_bearers[ebi_index] = bearer;
}
} else {
/*
* Allocate default bearer
*/
bearer = rte_zmalloc_socket(NULL, sizeof(eps_bearer),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (bearer == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for Bearer, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
bearer->eps_bearer_id = ebi;
int ret = get_pdn(&(*context), apn_requested, &pdn);
/* NOTE : APN comparison has been done to handle
* the case of multiple PDN;
* In mutiple PDN, each PDN will have a unique apn*/
if(ret < 0) {
pdn = rte_zmalloc_socket(NULL, sizeof(struct pdn_connection_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for PDN, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
pdn->num_bearer++;
(*context)->eps_bearers[ebi_index] = bearer;
(*context)->pdns[ebi_index] = pdn;
(*context)->num_pdns++;
(*context)->bearer_bitmap |= (1 << ebi_index);
pdn->eps_bearers[ebi_index] = bearer;
pdn->default_bearer_id = ebi;
} else {
pdn->num_bearer++;
(*context)->eps_bearers[ebi_index] = bearer;
pdn->eps_bearers[ebi_index] = bearer;
}
}
for (int i = 0; i < MAX_FILTERS_PER_UE; ++i)
bearer->packet_filter_map[i] = -ENOENT;
bearer->pdn = pdn;
bearer->eps_bearer_id = ebi;
pdn->apn_in_use = apn_requested;
return 0;
}
apn *
get_apn(char *apn_label, uint16_t apn_length)
{
int i;
for (i = 0; i < MAX_NB_DPN; i++) {
if ((apn_length == apn_list[i].apn_name_length)
&& !memcmp(apn_label, apn_list[i].apn_name_label,
apn_length)) {
break;
}
}
if(i >= MAX_NB_DPN) {
/* when apn name of csr are not found in cp.cfg file */
/* TODO : free apn_reruested and apn_name_label memory */
apn *apn_requested = rte_zmalloc_socket(NULL, sizeof(apn),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (apn_requested == NULL) {
rte_panic("Failure to allocate apn_requested buffer: "
"%s (%s:%d)\n",
rte_strerror(rte_errno),
__FILE__,
__LINE__);
return NULL;
}
apn_requested->apn_name_label = rte_zmalloc_socket(NULL, apn_length,
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (apn_requested->apn_name_label == NULL) {
rte_panic("Failure to allocate apn_name_label buffer: "
"%s (%s:%d)\n",
rte_strerror(rte_errno),
__FILE__,
__LINE__);
return NULL;
}
strncpy(apn_requested->apn_name_label, apn_label, apn_length);
apn_requested->apn_name_length = apn_length;
/*TODO: need to discuss with himanshu */
apn_requested->apn_idx = -1;
/*Using default value*/
apn_requested->trigger_type = config.trigger_type;
apn_requested->uplink_volume_th = config.uplink_volume_th;
apn_requested->downlink_volume_th = config.downlink_volume_th;
apn_requested->time_th = config.time_th;
/*Using default IP pool if no configured APN is used.*/
apn_requested->ip_pool_ip = config.ip_pool_ip;
apn_requested->ip_pool_mask = config.ip_pool_mask;
memcpy(apn_requested->ipv6_network_id.s6_addr,
config.ipv6_network_id.s6_addr,
IPV6_ADDRESS_LEN);
apn_requested->ipv6_prefix_len = config.ipv6_prefix_len;
return apn_requested;
}
apn_list[i].apn_idx = i;
return apn_list+i;
}
apn *set_default_apn(void)
{
apn *apn_requested = rte_zmalloc_socket(NULL, sizeof(apn),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (apn_requested == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Failure to allocate apn_requested buffer", LOG_VALUE);
//return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
/*Using default value*/
apn_requested->apn_name_label = NULL;
apn_requested->trigger_type = config.trigger_type;
apn_requested->uplink_volume_th = config.uplink_volume_th;
apn_requested->downlink_volume_th = config.downlink_volume_th;
apn_requested->time_th = config.time_th;
return apn_requested;
}
uint32_t
acquire_ip(struct in_addr ip_pool,
struct in_addr ip_pool_mask,
struct in_addr *ipv4) {
static uint32_t next_ip_index;
if (unlikely(next_ip_index == LDB_ENTRIES_DEFAULT)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT "IP Pool depleted\n", LOG_VALUE);
return GTPV2C_CAUSE_ALL_DYNAMIC_ADDRESSES_OCCUPIED;
}
ipv4->s_addr = GET_UE_IP(ip_pool, ip_pool_mask, next_ip_index++);
RTE_SET_USED(ip_pool);
return 0;
}
static int
fill_ipv6(uint8_t *ipv6, uint64_t prefix_len){
static uint64_t next_ipv6_index;
++next_ipv6_index;
memcpy(ipv6, &next_ipv6_index, (IPV6_ADDRESS_LEN - prefix_len));
return 0;
}
uint32_t
acquire_ipv6(struct in6_addr ipv6_network_id, uint8_t prefix_len,
struct in6_addr *ipv6) {
int ret = 0;
memcpy(ipv6->s6_addr, ipv6_network_id.s6_addr, IPV6_ADDRESS_LEN);
ret = fill_ipv6(ipv6->s6_addr + prefix_len, prefix_len);
if(ret){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT "IPv6 Pool depleted\n", LOG_VALUE);
return GTPV2C_CAUSE_ALL_DYNAMIC_ADDRESSES_OCCUPIED;
}
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | cp_dp_api/teid.h | <reponame>nikhilc149/e-utran-features-bug-fixes<gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef TEID_H
#define TEID_H
/**
* @file
*
* Contains all data structures and functions to manage and/or
* obtain value for teid assignement.
*
*/
#include <stdint.h>
#include <pfcp_struct.h>
struct teid_info_t{
/* DP ip address*/
node_address_t dp_ip;
/* Default teid range value */
uint8_t teid_range;
/* base value and offset for sgw teid generation */
uint32_t up_gtpu_base_teid;
uint32_t up_gtpu_teid_offset;
uint32_t up_gtpu_teid;
/* max teid value in range, after which teid value should loopback */
uint32_t up_gtpu_max_teid_offset;
struct teid_info_t *next;
};
typedef struct teid_info_t teid_info;
/*
* Define type of Control Plane (CP)
* SGWC - Serving GW Control Plane
* PGWC - PDN GW Control Plane
* SAEGWC - Combined SAEGW Control Plane
*/
enum cp_config_type {
CP_TYPE_SGWC= 01,
CP_TYPE_PGWC = 02,
CP_TYPE_SAEGWC = 03,
};
/**
* @brief : sets base teid value given range by DP
* @param : ri_val
* teid range indicator assigned by DP
* @param : val
* teid range assigned by DP
* @param : upf_ip
* ip address of DP
* @param : upf_teid_info_head
* pointer to teid_info list
* @return : Returns 0 in case of success , -1 otherwise
*/
int8_t
set_base_teid(uint8_t ri_val, uint8_t val, node_address_t upf_ip,
teid_info **upf_teid_info_head);
/**
* @brief : sets the s1u_sgw gtpu teid
* @param : upf_ip
* ip address of DP
* @param : cp_type
* cp_type, SGWC, PGWC or SEAGWC
* @param : upf_teid_info_head
* pointer to teid_info list
* @return : Returns s1u_sgw_gtpu_teid
*/
uint32_t
get_s1u_sgw_gtpu_teid(node_address_t upf_ip, int cp_type, teid_info **upf_teid_info_head);
/**
* @brief : sets the s5s8_sgw gtpu teid
* @param : upf_ip
* ip address of DP
* @param : cp_type
* cp_type, SGWC, PGWC or SEAGWC
* @param : upf_teid_info_head
* pointer to teid_info list
* @return : Returns s5s8_sgw_gtpu_teid
*/
uint32_t
get_s5s8_sgw_gtpu_teid(node_address_t upf_ip, int cp_type, teid_info **upf_teid_info_head);
/**
* @brief : sets the s5s8_pgw gtpu teid
* @param : upf_ip
* ip address of DP
* @param : cp_type
* cp_type, SGWC, PGWC or SEAGWC
* @param : upf_teid_info_head
* pointer to teid_info list
* @return : Returns s5s8_pgw_gtpu_teid
*/
uint32_t
get_s5s8_pgw_gtpu_teid(node_address_t upf_ip, int cp_type, teid_info **upf_teid_info_head);
/**
* @brief : sets the s5s8_sgw gtpc teid
* @param : No param
* @return : Returns s5s8_sgw_gtpc_teid
*/
uint32_t
get_s5s8_sgw_gtpc_teid(void);
/**
* @brief : sets the s5s8_pgw gtpc teid
* @param : No param
* @return : Returns s5s8_pgw_gtpc_teid
*/
uint32_t
get_s5s8_pgw_gtpc_teid(void);
/**
* @brief : sets s11 sgw gtpc teid
* @param : check_if_ue_hash_exist,
* ue hash flag
* @param : cp_type
* cp_type, SGWC, PGWC or SEAGWC
* @param : old_s11_sgw_gtpc_teid, s11_sgw_gtpc_teid already in context
* @return : Returns s11_sgw_gtpc_teid
*/
uint32_t
get_s11_sgw_gtpc_teid(uint8_t *check_if_ue_hash_exist, int cp_type, uint32_t old_s11_sgw_gtpc_teid);
/**
* @brief : Retrives node from list for given ip
* @param : head
* teid_info linked list head
* @param : upf_ip
* ip address of DP
* @return : Returns pointer to node in case of success, NULL otherwise
*/
teid_info *
get_teid_info(teid_info **head, node_address_t upf_ip);
/**
* @brief : Adds new node to the list
* @param : head
* teid_info linked list head
* @param : newNode
* new node to be addded in list
* @return : Returns 0 in case of success, -1 otherwise
*/
int8_t
add_teid_info(teid_info **head, teid_info *newNode);
/**
* @brief : Deletes node from list for given ip
* @param : upf_ip
* ip address of DP
* @param : upf_teid_info_head
* pointer to teid_info list
* @return : Returns nothing
*/
void
delete_entry_from_teid_list(node_address_t upf_ip, teid_info **upf_teid_info_head);
#endif /* TEID_H */
|
nikhilc149/e-utran-features-bug-fixes | cp/gx_app/include/fd.h | <gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __FD_H__
#define __FD_H__
#include <freeDiameter/freeDiameter-host.h>
#include <freeDiameter/libfdcore.h>
#include <freeDiameter/libfdproto.h>
/* TBD - create proper definition */
#define TRC2
#define FD_REASON_OK 0
#define FD_REASON_CORE_INIT_FAIL 1
#define FD_REASON_PARSECONF_FAIL 2
#define FD_REASON_CORESTART_FAIL 3
#define FD_REASON_NOT_APPL 4
#define FD_REASON_DICT_GETVAL 5
#define FD_REASON_DICT_SEARCH 6
#define FD_REASON_REGISTER_CALLBACK 7
#define FD_REASON_REGISTER_APPLICATION 8
#define FD_REASON_MSG_SEND_FAIL 9
#define FD_REASON_MSG_ADD_ORIGIN_FAIL 10
#define FD_REASON_MSG_NEW_FAIL 11
#define FD_REASON_MSG_NEW_FAIL_APPL 12
#define FD_REASON_AVP_NEW_FAIL 13
#define FD_REASON_AVP_ADD_FAIL 14
#define FD_REASON_AVP_SETVALUE_FAIL 15
#define FD_REASON_BROWSE_FIRST_FAIL 16
#define FD_REASON_BROWSE_NEXT_FAIL 17
#define MAX_FD_ADDRESS_LEN 16
#define MAX_PEER_NAME_LEN 256
/**
* @brief : Maintains fd address details
*/
typedef struct fdAddress {
uint16_t type;
uint8_t address[MAX_FD_ADDRESS_LEN];
} FdAddress;
typedef time_t FdTime;
#define FDCHECK_FCT(a, b) \
CHECK_FCT_DO(a, return b)
#define FDCHECK_FCT_2(a) \
{ \
S16 __ret__ = a; \
if (__ret__ != FD_REASON_NOT_APPL) \
return __ret__; \
}
/*
* FDCHECK_FCT_DICT_GETVAL(a, b, c, d)
* a - the dictionary entry object pointer
* b - pointer to structure to hold dictionary object
* c - the return value variable
* d - error code if failure detected
*/
#define FDCHECK_FCT_DICT_GETVAL(a, b, c, d) \
CHECK_FCT_DO(fd_dict_getval(a,b), c = FD_REASON_DICT_GETVAL; d)
/*
* FDCHECK_FCT_DICT(a, b, c, d, e, f)
* a - type of object that is being searched
* b - how the object must be searched
* c - name of the dictionary item
* d - the dictionary entry object pointer
* e - the return value variable
* f - error code if failure detected
*/
#define FDCHECK_FCT_DICT(a, b, c, d, e, f) \
CHECK_FCT_DO(fd_dict_search(fd_g_config->cnf_dict, a, b, (void*)c, &d, ENOENT), e = FD_REASON_DICT_SEARCH; f)
/*
* FDCHECK_FCT_REGISTER_CALLBACK(a, b, c, d)
* a - the callback function pointer to register
* b - the dictionary entry pointer to the command
* c - the return value variable
* d - error code if failure detected
*/
#define FDCHECK_FCT_REGISTER_CALLBACK(a, b, c, d) \
data.command = b; \
CHECK_FCT_DO(fd_disp_register(a, DISP_HOW_CC, &data, NULL, NULL), c = FD_REASON_REGISTER_CALLBACK; d)
/*
* FDCHECK_FCT_REGISTER_APPLICATION(a, b, c, d)
* a - the dictionary entry pointer to the application
* b - the dictionary entry pointer to the vendor
* c - the return value variable
* d - error code if failure detected
*/
#define FDCHECK_FCT_REGISTER_APPLICATION(a, b, c, d) \
CHECK_FCT_DO(fd_disp_app_support(a, b, 1, 0), c = FD_REASON_REGISTER_APPLICATION; d)
/*
* FDCHECK_MSG_SEND(a, b, c, d, e)
* a - pointer to message to send
* b - answer callback function pointer
* c - answer callback data
* d - the return value variable
* e - error code if failure detected
*/
#define FDCHECK_MSG_SEND(a, b, c, d, e) \
CHECK_FCT_DO(fd_msg_send(&a,b,c), d = FD_REASON_MSG_SEND_FAIL; e)
/*
* FDCHECK_MSG_ADD_ORIGIN(a, b, c, d, e)
* a - message pointer
* b - the return value variable
* c - error code if failure detected
*/
#define FDCHECK_MSG_ADD_ORIGIN(a, b, c) \
CHECK_FCT_DO(fd_msg_add_origin(a, 0), b = FD_REASON_MSG_ADD_ORIGIN_FAIL; c)
/*
* FDCHECK_MSG_NEW(a, b, c, d)
* a - the dictionary entry pointer to the command to create
* b - message pointer variable
* c - the return value variable
* d - error code if failure detected
*/
#define FDCHECK_MSG_NEW(a, b, c, d) \
CHECK_FCT_DO(fd_msg_new(a, MSGFL_ALLOC_ETEID, &b), c = FD_REASON_MSG_NEW_FAIL; d)
/*
* FDCHECK_MSG_NEW_APPL(a, b, c, d, e)
* a - the dictionary entry pointer to the command to create
* b - the dictionary entry pointer to the application the command is associated with
* c - message pointer variable
* d - the return value variable
* e - error code if failure detected
*/
#define FDCHECK_MSG_NEW_APPL(a, b, c, d, e) \
CHECK_FCT_DO(fd_msg_new_appl(a, b, MSGFL_ALLOC_ETEID, &c), d = FD_REASON_MSG_NEW_FAIL_APPL; e)
/*
* FDCHECK_MSG_FREE(a)
* a - message pointer to free
*/
#define FDCHECK_MSG_FREE(a) \
{ \
int __rval__ = fd_msg_free(a); \
if (__rval__ != 0) \
LOG_E("fd_msg_free(): unable to free the msg pointer (%d)", __rval__); \
}
/*
* FDCHECK_MSG_NEW_ANSWER_FROM_REQ(a, b, c, d)
* a - dictionary pointer that contains the definition for the request
* b - request message pointer
* c - the return value variable
* d - error code if failure detected
*/
#define FDCHECK_MSG_NEW_ANSWER_FROM_REQ(a, b, c, d) \
CHECK_FCT_DO(fd_msg_new_answer_from_req(a, &b, 0), c = FD_REASON_MSG_NEW_FAIL; d)
/*
* FDCHECK_MSG_ADD_AVP_GROUPED_2(a, b, c, d, e, f)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - avp pointer to hold the created avp object
* e - the return value variable
* f - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_GROUPED_2(a, b, c, d, e, f) \
{ \
struct avp * ___avp___; \
CHECK_FCT_DO(fd_msg_avp_new(a, 0, &___avp___), e = FD_REASON_AVP_NEW_FAIL; f); \
CHECK_FCT_DO(fd_msg_avp_add(b, c, ___avp___), e = FD_REASON_AVP_ADD_FAIL; f); \
d = ___avp___; \
}
/*
* FDCHECK_MSG_ADD_AVP_GROUPED(a, b, c, d, e)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - the return value variable
* e - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_GROUPED(a, b, c, d, e) \
{ \
struct avp * __avp__; \
FDCHECK_MSG_ADD_AVP_GROUPED_2(a, b, c, __avp__, d, e); \
(void)__avp__; \
}
/*
* FDCHECK_MSG_ADD_AVP(a, b, c, d, e, f, g)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - the avp_value pointer
* e - avp pointer to hold the created avp object
* f - the return value variable
* g - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP(a, b, c, d, e, f, g) \
{ \
struct avp * ___avp___; \
CHECK_FCT_DO(fd_msg_avp_new(a, 0, &___avp___), f = FD_REASON_AVP_NEW_FAIL; g); \
CHECK_FCT_DO(fd_msg_avp_setvalue(___avp___, d), f = FD_REASON_AVP_SETVALUE_FAIL; g); \
CHECK_FCT_DO(fd_msg_avp_add(b, c, ___avp___), f = FD_REASON_AVP_ADD_FAIL; g); \
e = ___avp___; \
}
/*
* FDCHECK_MSG_ADD_AVP_OSTR_2(a, b, c, d, e, f, g, h)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - pointer to octet string value
* e - length of the octet string value
* f - avp pointer to hold the created avp object
* g - the return value variable
* h - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_OSTR_2(a, b, c, d, e, f, g, h) \
{ \
union avp_value __val__; \
__val__.os.data = (unsigned char *)d; \
__val__.os.len = e; \
FDCHECK_MSG_ADD_AVP(a, b, c, &__val__, f, g, h); \
}
/*
* FDCHECK_MSG_ADD_AVP_OSTR(a, b, c, d, e, f, g)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - pointer to octet string value
* e - length of the octet string value
* f - the return value variable
* g - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_OSTR(a, b, c, d, e, f, g) \
{ \
struct avp * __avp__; \
FDCHECK_MSG_ADD_AVP_OSTR_2(a, b, c, d, e, __avp__, f, g); \
(void)__avp__;\
}
/*
* FDCHECK_MSG_ADD_AVP_STR_2(a, b, c, d, e, f, g)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - pointer to the null terminated string value
* e - avp pointer to hold the created avp object
* f - the return value variable
* g - error code if failure detected
*/
/*TODO : change strlen with strnlen with proper size (n)*/
#define FDCHECK_MSG_ADD_AVP_STR_2(a, b, c, d, e, f, g) \
{ \
union avp_value val; \
val.os.data = (unsigned char *)d; \
val.os.len = strlen((const S8 *)d); \
FDCHECK_MSG_ADD_AVP(a, b, c, &val, e, f, g); \
}
/*
* FDCHECK_MSG_ADD_AVP_STR(a, b, c, d, e, f)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - pointer to the null terminated string value
* e - the return value variable
* f - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_STR(a, b, c, d, e, f) \
{ \
struct avp * __avp__; \
FDCHECK_MSG_ADD_AVP_STR_2(a, b, c, d, __avp__, e, f); \
(void)__avp__;\
}
/*
* FDCHECK_MSG_ADD_AVP_S32_2(a, b, c, d, e, f, g)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - the int32_t value
* e - avp pointer to hold the created avp object
* f - the return value variable
* g - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_S32_2(a, b, c, d, e, f, g) \
{ \
union avp_value val; \
val.i32 = d; \
FDCHECK_MSG_ADD_AVP(a, b, c, &val, e, f, g); \
}
/*
* FDCHECK_MSG_ADD_AVP_S32(a, b, c, d, e, f)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - the int32_t value
* e - the return value variable
* f - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_S32(a, b, c, d, e, f) \
{ \
struct avp * __avp__; \
FDCHECK_MSG_ADD_AVP_S32_2(a, b, c, d, __avp__, e, f); \
(void)__avp__; \
}
/*
* FDCHECK_MSG_ADD_AVP_U32_2(a, b, c, d, e, f, g)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - the uint32_t value
* e - avp pointer to hold the created avp object
* f - the return value variable
* g - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_U32_2(a, b, c, d, e, f, g) \
{ \
union avp_value val; \
val.u32 = d; \
FDCHECK_MSG_ADD_AVP(a, b, c, &val, e, f, g); \
}
/*
* FDCHECK_MSG_ADD_AVP_U32(a, b, c, d, e, f)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - the uint32_t value
* e - the return value variable
* f - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_U32(a, b, c, d, e, f) \
{ \
struct avp * __avp__; \
FDCHECK_MSG_ADD_AVP_U32_2(a, b, c, d, __avp__, e, f); \
(void)__avp__; \
}
/*
* FDCHECK_MSG_ADD_AVP_TIME_2(a, b, c, d, e, f, g)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - the time_t value
* e - avp pointer to hold the created avp object
* f - the return value variable
* g - error code if failure detected
*/
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define FDCHECK_MSG_ADD_AVP_TIME_2(a, b, c, d, e, f, g) \
{ \
union avp_value val; \
union { \
uint32_t u32; \
uint8_t u8[sizeof(uint32_t)]; \
} t; \
t.u32 = (uint32_t)(d + 220898800UL); \
uint8_t u8; \
u8 = t.u8[0]; t.u8[0] = t.u8[3]; t.u8[3] = u8; \
u8 = t.u8[1]; t.u8[1] = t.u8[2]; t.u8[2] = u8; \
val.os.data = t.u8; \
val.os.len = sizeof(uint32_t); \
FDCHECK_MSG_ADD_AVP(a, b, c, &val, e, f, g); \
}
#else
#define FDCHECK_MSG_ADD_AVP_TIME_2(a, b, c, d, e, f, g) \
{ \
union avp_value val; \
union { \
uint32_t u32; \
uint8_t u8[sizeof(uint32_t)]; \
} t; \
t.u32 = (uint32_t)(d + 220898800UL); \
val.os.data = t.u8; \
val.os.len = sizeof(uint32_t); \
FDCHECK_MSG_ADD_AVP(a, b, c, &val, e, f, g); \
}
#endif
/*
* FDCHECK_MSG_ADD_AVP_TIME(a, b, c, d, e, f)
* a - the avp dictionary entry pointer
* b - avp or msg pointer to add the avp to
* c - location where the avp should be inserted (usually MSG_BRW_LAST_CHILD)
* d - the time_t value
* e - the return value variable
* f - error code if failure detected
*/
#define FDCHECK_MSG_ADD_AVP_TIME(a, b, c, d, e, f) \
{ \
struct avp * __avp__; \
FDCHECK_MSG_ADD_AVP_TIME_2(a, b, c, d, __avp__, e, f); \
(void)__avp__; \
}
/*
* FDCHECK_MSG_FIND_AVP(a, b, c, d)
* a - the msg pointer to search
* b - the dictionary entry pointer for the avp to search
* c - location where the AVP reference will be stored
* d - fallback (what to do if there is an error)
*/
#define FDCHECK_MSG_FIND_AVP(a, b, c, d) \
{ \
if (fd_msg_search_avp(a, b, &c) != 0) \
{ \
d; \
} \
}
/*
* FDCHECK_AVP_GET_HDR(a, b, c, d)
* a - the dictionary entry pointer for the avp
* b - the avp pointer
* c - the avp header pointer to populate
* d - fallback (what to do if there is an error)
*/
#define FDCHECK_AVP_GET_HDR(a, b, c, d) \
{ \
if (fd_msg_avp_hdr(b,&c) != 0) { \
struct dict_avp_data * __deval__ = NULL; \
fd_dict_getval(a, &__deval__); \
LOG_E("fd_msg_avp_hdr(): unable to retrieve avp header for [%s]", __deval__ ? __deval__->avp_name : ""); \
d; \
} \
}
/*
* FDCHECK_AVP_GET_S32(a, b, c, d)
* a - the dictionary entry pointer for the avp
* b - the avp pointer
* c - the int32_t variable to populate
* d - fallback (what to do if there is an error)
*/
#define FDCHECK_AVP_GET_S32(a, b, c, d) \
{ \
struct avp_hdr *__hdr__= NULL; \
FDCHECK_AVP_GET_HDR(a, b, __hdr__, d); \
c = __hdr__->avp_value->i32; \
}
/*
* FDCHECK_AVP_GET_U32(a, b, c, d)
* a - the dictionary entry pointer for the avp
* b - the avp pointer
* c - the uint32_t variable to populate
* d - fallback (what to do if there is an error)
*/
#define FDCHECK_AVP_GET_U32(a, b, c, d) \
{ \
struct avp_hdr *__hdr__= NULL; \
FDCHECK_AVP_GET_HDR(a, b, __hdr__, d); \
c = __hdr__->avp_value->u32; \
}
/*
* FD_ALLOC_LIST(a, b)
* a - pointer to the list structure
* b - type of the list structure
*/
#define FD_ALLOC_LIST(a, b) \
{ \
if (a.count > 0) { \
a.list = (b*)malloc( sizeof(*a.list) * a.count ); \
memset( a.list, 0, sizeof(*a.list) * a.count ); \
a.count = 0; \
} \
}
/*
* FD_CALLFREE_STRUCT(a, b)
* a - pointer to the structure to free
* b - free structure function pointer
*/
#define FD_CALLFREE_STRUCT(a, b) \
b( &a );
/*
* FD_CALLFREE_LIST(a, b)
* a - pointer to the list structure that each list element will have the free structure function called
* b - free structure function pointer
*/
#define FD_CALLFREE_LIST(a, b) \
{ \
int __idx__; \
if (a.list) { \
for (__idx__ = 0; __idx__ < a.count; __idx__++) \
FD_CALLFREE_STRUCT( a.list[__idx__], b ); \
} \
}
/*
* FD_FREE_LIST(a)
* a - pointer to the list structure to free
*/
#define FD_FREE_LIST(a) \
{ \
if (a.list) { \
free(a.list); \
a.list = NULL; \
} \
a.count = 0; \
}
/*
* FD_DUMP_MESSAGE(a)
* a - msg or avp pointer that will be printed
*/
#define FD_DUMP_MESSAGE(a) \
{ \
char * buf = NULL; \
size_t len = 0; \
printf("%s\n", fd_msg_dump_treeview(&buf, &len, NULL, a, fd_g_config->cnf_dict, 0, 1)); \
free(buf); \
}
static inline int fdmin(int lval, int rval) { return lval < rval ? lval : rval; }
/*
* FD_PARSE_OCTETSTRING(a, b, c)
* a - pointer to the avp_value
* b - pointer to the destination list structure
* c - max length of the destinaltion buffer
*/
#define FD_PARSE_OCTETSTRING(a, b, c) \
{ \
b.len = fdmin(a->os.len, c); \
memcpy(b.val, a->os.data, b.len); \
}
/*
* FD_PARSE_TIME(a, b)
* a - pointer to the avp_value
* b - variable where the resulting time_t value will be stored
*/
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
#define FD_PARSE_TIME(a, b) \
{ \
union { \
uint32_t u32; \
uint8_t u8[sizeof(uint32_t)]; \
} _val; \
_val.u32 = *(uint32_t*)a->os.data; \
uint8_t u8; \
u8 = _val.u8[0]; _val.u8[0] = _val.u8[3]; _val.u8[3] = u8; \
u8 = _val.u8[1]; _val.u8[1] = _val.u8[2]; _val.u8[2] = u8; \
b = ((FdTime)_val.u32) - (FdTime)2208988800; \
}
#else
#define FD_PARSE_TiME(a, b) \
{ \
union { \
uint32_t u32; \
uint8_t u8[sizeof(uint32_t)]; \
} _val; \
_val.u32 = *(uint32_t*)a->os.data; \
b = ((FdTime)_val.u32) - (FdTime)2208988800; \
}
#endif
#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_BIG_ENDIAN__)
#define FD_NETWORK_TO16(a) ((a[0] << 8) | a[1])
#else
#define FD_NETWORK_TO16(a) ((a[1] << 8) | a[0])
#endif
/*
* FD_PARSE_ADDRESS(a, b)
* a - pointer to the avp_value
* b - FdAddress variable where the resulting address value will be stored
*/
#define FD_PARSE_ADDRESS(a, b) \
{ \
memset(&b, 0, sizeof(b)); \
b.type = FD_NETWORK_TO16( a->os.data ); \
memcpy( b.address, &a->os.data[2], a->os.len - 2 ); \
}
/* FDCHECK_PARSE_DIRECT( function_name, struct avp *, pointer to structure to populate ) */
/*
* FDCHECK_PARSE_DIRECT(a, b)
* a - parsing function pointer that will be called
* b - avp pointer to the grouped avp that will be parsed
* c - pointer to the destination structure that will be populated
*/
#define FDCHECK_PARSE_DIRECT(a,b,c) \
{ \
int __ret__ = a(b,c); \
if (__ret__ != FD_REASON_OK) \
return __ret__; \
}
#define FD_LOW_NIBBLE(b) (b & 0x0f)
#define FD_HIGH_NIBBLE(b) (AQFD_LOW_NIBBLE(b >> 4))
#define FD_CHAR2TBCD(c) \
( \
c >= '0' && c <= '9' ? c - '0' : \
c == '*' ? 10 : \
c == '#' ? 11 : \
c == 'a' ? 12 : \
c == 'b' ? 13 : \
c == 'c' ? 14 : 15 \
)
#endif /* __FD_H__ */
|
nikhilc149/e-utran-features-bug-fixes | cp/gx_app/include/gx.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __GX_H__
#define __GX_H__
#include "fd.h"
#include "gx_struct.h"
#include <stdio.h>
#include <string.h>
#define CONNECTPEER "ConnectPeer"
/* EVENT TRIGGER SET BY PCRF ON PGWC */
#define RAT_EVENT_TRIGGER 2
#define RAI_EVENT_TRIGGER 12
#define ULI_EVENT_TRIGGER 13
#define UE_TIMEZONE_EVT_TRIGGER 25
#define TAI_EVENT_TRIGGER 26
#define ECGI_EVENT_TRIGGER 27
#define SUCCESSFUL_RESOURCE_ALLOCATION 22
#define RESOURCE_MODIFICATION_REQUEST 23
#define CHANGE_OF_UE_PRESENCE_IN_PRESENCE_REPORTING_AREA_REPORT 48
/*The above flag will be set bit wise as
* * Bit 7| Bit 6 | Bit 5 | Bit 4 | Bit 3| Bit 2| Bit 1| Bit 0 |
* *---------------------------------------------------------------
* *| | | | ECGI | RAI | SAI | CGI | TAI |
* ----------------------------------------------------------------
*
*/
#define ECGI_AND_TAI_PRESENT 17
#define TAI_PRESENT 1
#define CGI_PRESENT 2
#define SAI_PRESENT 4
#define RAI_PRESENT 8
#define ECGI_PRESENT 16
#define RESOURCE_MOD_REQ_PRESENT 32
#define GX_UE_TIMEZONE_TYPE 0x17
#define GX_ECGI_AND_TAI_TYPE 0x82
#define GX_TAI_TYPE 0x80
#define GX_ECGI_TYPE 0x81
#define GX_SAI_TYPE 0x01
#define GX_RAI_TYPE 0x02
#define GX_CGI_TYPE 0x00
#define EVENT_TRIGGER_LIST 64
#define GX_HEADER_LEN sizeof(uint8_t) + sizeof(uint16_t)
enum diameter_error {
DIAMETER_UNKNOWN_SESSION_ID = 5002,
DIAMETER_INVALID_AVP_VALUE = 5004,
DIAMETER_MISSING_AVP = 5005,
DIAMETER_ERROR_USER_UNKNOWN = 5030,
DIAMETER_UNABLE_TO_COMPLY = 5012
};
/**
* @brief : Maintains gx dictionary information
*/
typedef struct gxDict {
struct dict_object * vndETSI;
struct dict_object * vnd3GPP2;
struct dict_object * vnd3GPP;
struct dict_object * appGX;
struct dict_application_data davp_appGX;
struct dict_vendor_data davp_vndETSI;
struct dict_vendor_data davp_vnd3GPP2;
struct dict_vendor_data davp_vnd3GPP;
struct dict_object * cmdRAR;
struct dict_object * cmdRAA;
struct dict_object * cmdCCA;
struct dict_object * cmdCCR;
struct dict_object * avp_tdf_destination_host;
struct dict_object * avp_pre_emption_capability;
struct dict_object * avp_packet_filter_content;
struct dict_object * avp_feature_list_id;
struct dict_object * avp_resource_release_notification;
struct dict_object * avp_service_identifier;
struct dict_object * avp_physical_access_id;
struct dict_object * avp_csg_access_mode;
struct dict_object * avp_henb_local_ip_address;
struct dict_object * avp_dynamic_address_flag_extension;
struct dict_object * avp_apn_aggregate_max_bitrate_ul;
struct dict_object * avp_network_request_support;
struct dict_object * avp_termination_cause;
struct dict_object * avp_exponent;
struct dict_object * avp_3gpp_rat_type;
struct dict_object * avp_af_signalling_protocol;
struct dict_object * avp_packet_filter_usage;
struct dict_object * avp_usage_monitoring_support;
struct dict_object * avp_tracking_area_identity;
struct dict_object * avp_load_value;
struct dict_object * avp_feature_list;
struct dict_object * avp_omc_id;
struct dict_object * avp_rai;
struct dict_object * avp_oc_report_type;
struct dict_object * avp_experimental_result;
struct dict_object * avp_cc_request_type;
struct dict_object * avp_service_context_id;
struct dict_object * avp_secondary_event_charging_function_name;
struct dict_object * avp_pcscf_restoration_indication;
struct dict_object * avp_tdf_ip_address;
struct dict_object * avp_load_type;
struct dict_object * avp_extended_gbr_dl;
struct dict_object * avp_oc_feature_vector;
struct dict_object * avp_origin_host;
struct dict_object * avp_pra_remove;
struct dict_object * avp_maximum_wait_time;
struct dict_object * avp_list_of_measurements;
struct dict_object * avp_qos_information;
struct dict_object * avp_final_unit_action;
struct dict_object * avp_conditional_policy_information;
struct dict_object * avp_drmp;
struct dict_object * avp_pra_install;
struct dict_object * avp_logical_access_id;
struct dict_object * avp_resource_allocation_notification;
struct dict_object * avp_rule_deactivation_time;
struct dict_object * avp_flow_status;
struct dict_object * avp_priority_level;
struct dict_object * avp_pre_emption_vulnerability;
struct dict_object * avp_bearer_usage;
struct dict_object * avp_reporting_reason;
struct dict_object * avp_qos_class_identifier;
struct dict_object * avp_3gpp_sgsn_mcc_mnc;
struct dict_object * avp_area_scope;
struct dict_object * avp_re_auth_request_type;
struct dict_object * avp_precedence;
struct dict_object * avp_flow_number;
struct dict_object * avp_pdn_connection_charging_id;
struct dict_object * avp_3gpp_ps_data_off_status;
struct dict_object * avp_redirect_host_usage;
struct dict_object * avp_an_gw_address;
struct dict_object * avp_tunnel_header_filter;
struct dict_object * avp_access_network_charging_identifier_value;
struct dict_object * avp_secondary_charging_collection_function_name;
struct dict_object * avp_tcp_source_port;
struct dict_object * avp_destination_host;
struct dict_object * avp_3gpp_selection_mode;
struct dict_object * avp_location_area_identity;
struct dict_object * avp_logging_interval;
struct dict_object * avp_flow_information;
struct dict_object * avp_ue_local_ip_address;
struct dict_object * avp_extended_apn_ambr_dl;
struct dict_object * avp_tdf_application_identifier;
struct dict_object * avp_tunnel_information;
struct dict_object * avp_media_component_status;
struct dict_object * avp_tft_packet_filter_information;
struct dict_object * avp_guaranteed_bitrate_ul;
struct dict_object * avp_online;
struct dict_object * avp_mbsfn_area;
struct dict_object * avp_extended_apn_ambr_ul;
struct dict_object * avp_extended_gbr_ul;
struct dict_object * avp_content_version;
struct dict_object * avp_usage_monitoring_report;
struct dict_object * avp_event_report_indication;
struct dict_object * avp_job_type;
struct dict_object * avp_bearer_operation;
struct dict_object * avp_user_equipment_info_type;
struct dict_object * avp_tdf_information;
struct dict_object * avp_cc_request_number;
struct dict_object * avp_framed_ipv6_prefix;
struct dict_object * avp_packet_filter_operation;
struct dict_object * avp_coa_ip_address;
struct dict_object * avp_3gpp_charging_characteristics;
struct dict_object * avp_proxy_info;
struct dict_object * avp_used_service_unit;
struct dict_object * avp_charging_rule_install;
struct dict_object * avp_mdt_allowed_plmn_id;
struct dict_object * avp_origin_realm;
struct dict_object * avp_twan_identifier;
struct dict_object * avp_charging_rule_definition;
struct dict_object * avp_flow_label;
struct dict_object * avp_3gpp_ggsn_ipv6_address;
struct dict_object * avp_guaranteed_bitrate_dl;
struct dict_object * avp_restriction_filter_rule;
struct dict_object * avp_3gpp_sgsn_address;
struct dict_object * avp_redirect_address_type;
struct dict_object * avp_tdf_destination_realm;
struct dict_object * avp_user_location_info_time;
struct dict_object * avp_subscription_id_data;
struct dict_object * avp_redirect_server_address;
struct dict_object * avp_nbifom_mode;
struct dict_object * avp_final_unit_indication;
struct dict_object * avp_3gpp_sgsn_ipv6_address;
struct dict_object * avp_3gpp2_bsid;
struct dict_object * avp_trace_collection_entity;
struct dict_object * avp_session_release_cause;
struct dict_object * avp_ran_rule_support;
struct dict_object * avp_unit_value;
struct dict_object * avp_charging_rule_base_name;
struct dict_object * avp_report_interval;
struct dict_object * avp_presence_reporting_area_node;
struct dict_object * avp_user_equipment_info_value;
struct dict_object * avp_route_record;
struct dict_object * avp_presence_reporting_area_identifier;
struct dict_object * avp_csg_information_reporting;
struct dict_object * avp_filter_id;
struct dict_object * avp_presence_reporting_area_information;
struct dict_object * avp_an_gw_status;
struct dict_object * avp_ssid;
struct dict_object * avp_metering_method;
struct dict_object * avp_flow_description;
struct dict_object * avp_logging_duration;
struct dict_object * avp_apn_aggregate_max_bitrate_dl;
struct dict_object * avp_conditional_apn_aggregate_max_bitrate;
struct dict_object * avp_access_network_charging_identifier_gx;
struct dict_object * avp_positioning_method;
struct dict_object * avp_oc_olr;
struct dict_object * avp_routing_rule_install;
struct dict_object * avp_presence_reporting_area_status;
struct dict_object * avp_trace_data;
struct dict_object * avp_sourceid;
struct dict_object * avp_carrier_frequency;
struct dict_object * avp_mbsfn_area_id;
struct dict_object * avp_subscription_id_type;
struct dict_object * avp_usage_monitoring_level;
struct dict_object * avp_bearer_identifier;
struct dict_object * avp_sponsor_identity;
struct dict_object * avp_oc_reduction_percentage;
struct dict_object * avp_default_qos_name;
struct dict_object * avp_routing_rule_definition;
struct dict_object * avp_traffic_steering_policy_identifier_ul;
struct dict_object * avp_mdt_configuration;
struct dict_object * avp_error_reporting_host;
struct dict_object * avp_charging_rule_remove;
struct dict_object * avp_charging_correlation_indicator;
struct dict_object * avp_nbifom_support;
struct dict_object * avp_max_plr_dl;
struct dict_object * avp_event_threshold_event_1i;
struct dict_object * avp_rating_group;
struct dict_object * avp_rat_type;
struct dict_object * avp_event_charging_timestamp;
struct dict_object * avp_default_access;
struct dict_object * avp_event_threshold_event_1f;
struct dict_object * avp_reporting_level;
struct dict_object * avp_allocation_retention_priority;
struct dict_object * avp_bearer_control_mode;
struct dict_object * avp_cell_global_identity;
struct dict_object * avp_max_plr_ul;
struct dict_object * avp_oc_validity_duration;
struct dict_object * avp_application_service_provider_identity;
struct dict_object * avp_csg_membership_indication;
struct dict_object * avp_flow_direction;
struct dict_object * avp_sharing_key_dl;
struct dict_object * avp_default_eps_bearer_qos;
struct dict_object * avp_trace_ne_type_list;
struct dict_object * avp_extended_max_requested_bw_dl;
struct dict_object * avp_redirect_host;
struct dict_object * avp_measurement_period_lte;
struct dict_object * avp_routing_rule_report;
struct dict_object * avp_max_requested_bandwidth_dl;
struct dict_object * avp_user_equipment_info;
struct dict_object * avp_quota_consumption_time;
struct dict_object * avp_origin_state_id;
struct dict_object * avp_qos_negotiation;
struct dict_object * avp_cc_output_octets;
struct dict_object * avp_ran_nas_release_cause;
struct dict_object * avp_sharing_key_ul;
struct dict_object * avp_netloc_access_support;
struct dict_object * avp_trace_event_list;
struct dict_object * avp_supported_features;
struct dict_object * avp_3gpp_user_location_info;
struct dict_object * avp_value_digits;
struct dict_object * avp_security_parameter_index;
struct dict_object * avp_result_code;
struct dict_object * avp_trace_interface_list;
struct dict_object * avp_fixed_user_location_info;
struct dict_object * avp_default_qos_information;
struct dict_object * avp_traffic_steering_policy_identifier_dl;
struct dict_object * avp_redirect_max_cache_time;
struct dict_object * avp_rule_activation_time;
struct dict_object * avp_load;
struct dict_object * avp_3gpp_ggsn_address;
struct dict_object * avp_redirect_server;
struct dict_object * avp_an_trusted;
struct dict_object * avp_e_utran_cell_global_identity;
struct dict_object * avp_called_station_id;
struct dict_object * avp_csg_id;
struct dict_object * avp_framed_ip_address;
struct dict_object * avp_oc_supported_features;
struct dict_object * avp_packet_filter_identifier;
struct dict_object * avp_pcc_rule_status;
struct dict_object * avp_tdf_application_instance_identifier;
struct dict_object * avp_proxy_host;
struct dict_object * avp_event_threshold_rsrp;
struct dict_object * avp_event_threshold_rsrq;
struct dict_object * avp_packet_filter_information;
struct dict_object * avp_subscription_id;
struct dict_object * avp_experimental_result_code;
struct dict_object * avp_collection_period_rrm_lte;
struct dict_object * avp_pdn_connection_id;
struct dict_object * avp_access_network_charging_address;
struct dict_object * avp_auth_application_id;
struct dict_object * avp_revalidation_time;
struct dict_object * avp_execution_time;
struct dict_object * avp_event_trigger;
struct dict_object * avp_extended_max_requested_bw_ul;
struct dict_object * avp_presence_reporting_area_elements_list;
struct dict_object * avp_charging_information;
struct dict_object * avp_monitoring_key;
struct dict_object * avp_3gpp_ms_timezone;
struct dict_object * avp_charging_rule_name;
struct dict_object * avp_access_availability_change_reason;
struct dict_object * avp_dynamic_address_flag;
struct dict_object * avp_monitoring_flags;
struct dict_object * avp_collection_period_rrm_umts;
struct dict_object * avp_usage_monitoring_information;
struct dict_object * avp_charging_rule_report;
struct dict_object * avp_ip_can_type;
struct dict_object * avp_offline;
struct dict_object * avp_udp_source_port;
struct dict_object * avp_routing_ip_address;
struct dict_object * avp_redirect_information;
struct dict_object * avp_mute_notification;
struct dict_object * avp_media_component_number;
struct dict_object * avp_tariff_time_change;
struct dict_object * avp_error_message;
struct dict_object * avp_credit_management_status;
struct dict_object * avp_required_access_info;
struct dict_object * avp_ip_can_session_charging_scope;
struct dict_object * avp_reporting_trigger;
struct dict_object * avp_failed_avp;
struct dict_object * avp_routing_area_identity;
struct dict_object * avp_routing_rule_remove;
struct dict_object * avp_tft_filter;
struct dict_object * avp_trace_reference;
struct dict_object * avp_cc_service_specific_units;
struct dict_object * avp_cc_time;
struct dict_object * avp_currency_code;
struct dict_object * avp_cc_input_octets;
struct dict_object * avp_measurement_quantity;
struct dict_object * avp_removal_of_access;
struct dict_object * avp_routing_filter;
struct dict_object * avp_trace_depth;
struct dict_object * avp_proxy_state;
struct dict_object * avp_rule_failure_code;
struct dict_object * avp_af_charging_identifier;
struct dict_object * avp_tunnel_header_length;
struct dict_object * avp_routing_rule_failure_code;
struct dict_object * avp_coa_information;
struct dict_object * avp_default_bearer_indication;
struct dict_object * avp_vendor_id;
struct dict_object * avp_granted_service_unit;
struct dict_object * avp_max_requested_bandwidth_ul;
struct dict_object * avp_oc_sequence_number;
struct dict_object * avp_routing_rule_identifier;
struct dict_object * avp_redirect_support;
struct dict_object * avp_destination_realm;
struct dict_object * avp_session_id;
struct dict_object * avp_tos_traffic_class;
struct dict_object * avp_origination_time_stamp;
struct dict_object * avp_bssid;
struct dict_object * avp_cc_money;
struct dict_object * avp_application_detection_information;
struct dict_object * avp_qos_upgrade;
struct dict_object * avp_tariff_change_usage;
struct dict_object * avp_report_amount;
struct dict_object * avp_primary_event_charging_function_name;
struct dict_object * avp_cc_total_octets;
struct dict_object * avp_measurement_period_umts;
struct dict_object * avp_flows;
struct dict_object * avp_ps_to_cs_session_continuity;
struct dict_object * avp_primary_charging_collection_function_name;
struct dict_object * avp_user_csg_information;
struct dict_avp_data davp_tdf_destination_host;
struct dict_avp_data davp_pre_emption_capability;
struct dict_avp_data davp_packet_filter_content;
struct dict_avp_data davp_feature_list_id;
struct dict_avp_data davp_resource_release_notification;
struct dict_avp_data davp_service_identifier;
struct dict_avp_data davp_physical_access_id;
struct dict_avp_data davp_csg_access_mode;
struct dict_avp_data davp_henb_local_ip_address;
struct dict_avp_data davp_dynamic_address_flag_extension;
struct dict_avp_data davp_apn_aggregate_max_bitrate_ul;
struct dict_avp_data davp_network_request_support;
struct dict_avp_data davp_termination_cause;
struct dict_avp_data davp_exponent;
struct dict_avp_data davp_3gpp_rat_type;
struct dict_avp_data davp_af_signalling_protocol;
struct dict_avp_data davp_packet_filter_usage;
struct dict_avp_data davp_usage_monitoring_support;
struct dict_avp_data davp_tracking_area_identity;
struct dict_avp_data davp_load_value;
struct dict_avp_data davp_feature_list;
struct dict_avp_data davp_omc_id;
struct dict_avp_data davp_rai;
struct dict_avp_data davp_oc_report_type;
struct dict_avp_data davp_experimental_result;
struct dict_avp_data davp_cc_request_type;
struct dict_avp_data davp_service_context_id;
struct dict_avp_data davp_secondary_event_charging_function_name;
struct dict_avp_data davp_pcscf_restoration_indication;
struct dict_avp_data davp_tdf_ip_address;
struct dict_avp_data davp_load_type;
struct dict_avp_data davp_extended_gbr_dl;
struct dict_avp_data davp_oc_feature_vector;
struct dict_avp_data davp_origin_host;
struct dict_avp_data davp_pra_remove;
struct dict_avp_data davp_maximum_wait_time;
struct dict_avp_data davp_list_of_measurements;
struct dict_avp_data davp_qos_information;
struct dict_avp_data davp_final_unit_action;
struct dict_avp_data davp_conditional_policy_information;
struct dict_avp_data davp_drmp;
struct dict_avp_data davp_pra_install;
struct dict_avp_data davp_logical_access_id;
struct dict_avp_data davp_resource_allocation_notification;
struct dict_avp_data davp_rule_deactivation_time;
struct dict_avp_data davp_flow_status;
struct dict_avp_data davp_priority_level;
struct dict_avp_data davp_pre_emption_vulnerability;
struct dict_avp_data davp_bearer_usage;
struct dict_avp_data davp_reporting_reason;
struct dict_avp_data davp_qos_class_identifier;
struct dict_avp_data davp_3gpp_sgsn_mcc_mnc;
struct dict_avp_data davp_area_scope;
struct dict_avp_data davp_re_auth_request_type;
struct dict_avp_data davp_precedence;
struct dict_avp_data davp_flow_number;
struct dict_avp_data davp_pdn_connection_charging_id;
struct dict_avp_data davp_3gpp_ps_data_off_status;
struct dict_avp_data davp_redirect_host_usage;
struct dict_avp_data davp_an_gw_address;
struct dict_avp_data davp_tunnel_header_filter;
struct dict_avp_data davp_access_network_charging_identifier_value;
struct dict_avp_data davp_secondary_charging_collection_function_name;
struct dict_avp_data davp_tcp_source_port;
struct dict_avp_data davp_destination_host;
struct dict_avp_data davp_3gpp_selection_mode;
struct dict_avp_data davp_location_area_identity;
struct dict_avp_data davp_logging_interval;
struct dict_avp_data davp_flow_information;
struct dict_avp_data davp_ue_local_ip_address;
struct dict_avp_data davp_extended_apn_ambr_dl;
struct dict_avp_data davp_tdf_application_identifier;
struct dict_avp_data davp_tunnel_information;
struct dict_avp_data davp_media_component_status;
struct dict_avp_data davp_tft_packet_filter_information;
struct dict_avp_data davp_guaranteed_bitrate_ul;
struct dict_avp_data davp_online;
struct dict_avp_data davp_mbsfn_area;
struct dict_avp_data davp_extended_apn_ambr_ul;
struct dict_avp_data davp_extended_gbr_ul;
struct dict_avp_data davp_content_version;
struct dict_avp_data davp_usage_monitoring_report;
struct dict_avp_data davp_event_report_indication;
struct dict_avp_data davp_job_type;
struct dict_avp_data davp_bearer_operation;
struct dict_avp_data davp_user_equipment_info_type;
struct dict_avp_data davp_tdf_information;
struct dict_avp_data davp_cc_request_number;
struct dict_avp_data davp_framed_ipv6_prefix;
struct dict_avp_data davp_packet_filter_operation;
struct dict_avp_data davp_coa_ip_address;
struct dict_avp_data davp_3gpp_charging_characteristics;
struct dict_avp_data davp_proxy_info;
struct dict_avp_data davp_used_service_unit;
struct dict_avp_data davp_charging_rule_install;
struct dict_avp_data davp_mdt_allowed_plmn_id;
struct dict_avp_data davp_origin_realm;
struct dict_avp_data davp_twan_identifier;
struct dict_avp_data davp_charging_rule_definition;
struct dict_avp_data davp_flow_label;
struct dict_avp_data davp_3gpp_ggsn_ipv6_address;
struct dict_avp_data davp_guaranteed_bitrate_dl;
struct dict_avp_data davp_restriction_filter_rule;
struct dict_avp_data davp_3gpp_sgsn_address;
struct dict_avp_data davp_redirect_address_type;
struct dict_avp_data davp_tdf_destination_realm;
struct dict_avp_data davp_user_location_info_time;
struct dict_avp_data davp_subscription_id_data;
struct dict_avp_data davp_redirect_server_address;
struct dict_avp_data davp_nbifom_mode;
struct dict_avp_data davp_final_unit_indication;
struct dict_avp_data davp_3gpp_sgsn_ipv6_address;
struct dict_avp_data davp_3gpp2_bsid;
struct dict_avp_data davp_trace_collection_entity;
struct dict_avp_data davp_session_release_cause;
struct dict_avp_data davp_ran_rule_support;
struct dict_avp_data davp_unit_value;
struct dict_avp_data davp_charging_rule_base_name;
struct dict_avp_data davp_report_interval;
struct dict_avp_data davp_presence_reporting_area_node;
struct dict_avp_data davp_user_equipment_info_value;
struct dict_avp_data davp_route_record;
struct dict_avp_data davp_presence_reporting_area_identifier;
struct dict_avp_data davp_csg_information_reporting;
struct dict_avp_data davp_filter_id;
struct dict_avp_data davp_presence_reporting_area_information;
struct dict_avp_data davp_an_gw_status;
struct dict_avp_data davp_ssid;
struct dict_avp_data davp_metering_method;
struct dict_avp_data davp_flow_description;
struct dict_avp_data davp_logging_duration;
struct dict_avp_data davp_apn_aggregate_max_bitrate_dl;
struct dict_avp_data davp_conditional_apn_aggregate_max_bitrate;
struct dict_avp_data davp_access_network_charging_identifier_gx;
struct dict_avp_data davp_positioning_method;
struct dict_avp_data davp_oc_olr;
struct dict_avp_data davp_routing_rule_install;
struct dict_avp_data davp_presence_reporting_area_status;
struct dict_avp_data davp_trace_data;
struct dict_avp_data davp_sourceid;
struct dict_avp_data davp_carrier_frequency;
struct dict_avp_data davp_mbsfn_area_id;
struct dict_avp_data davp_subscription_id_type;
struct dict_avp_data davp_usage_monitoring_level;
struct dict_avp_data davp_bearer_identifier;
struct dict_avp_data davp_sponsor_identity;
struct dict_avp_data davp_oc_reduction_percentage;
struct dict_avp_data davp_default_qos_name;
struct dict_avp_data davp_routing_rule_definition;
struct dict_avp_data davp_traffic_steering_policy_identifier_ul;
struct dict_avp_data davp_mdt_configuration;
struct dict_avp_data davp_error_reporting_host;
struct dict_avp_data davp_charging_rule_remove;
struct dict_avp_data davp_charging_correlation_indicator;
struct dict_avp_data davp_nbifom_support;
struct dict_avp_data davp_max_plr_dl;
struct dict_avp_data davp_event_threshold_event_1i;
struct dict_avp_data davp_rating_group;
struct dict_avp_data davp_rat_type;
struct dict_avp_data davp_event_charging_timestamp;
struct dict_avp_data davp_default_access;
struct dict_avp_data davp_event_threshold_event_1f;
struct dict_avp_data davp_reporting_level;
struct dict_avp_data davp_allocation_retention_priority;
struct dict_avp_data davp_bearer_control_mode;
struct dict_avp_data davp_cell_global_identity;
struct dict_avp_data davp_max_plr_ul;
struct dict_avp_data davp_oc_validity_duration;
struct dict_avp_data davp_application_service_provider_identity;
struct dict_avp_data davp_csg_membership_indication;
struct dict_avp_data davp_flow_direction;
struct dict_avp_data davp_sharing_key_dl;
struct dict_avp_data davp_default_eps_bearer_qos;
struct dict_avp_data davp_trace_ne_type_list;
struct dict_avp_data davp_extended_max_requested_bw_dl;
struct dict_avp_data davp_redirect_host;
struct dict_avp_data davp_measurement_period_lte;
struct dict_avp_data davp_routing_rule_report;
struct dict_avp_data davp_max_requested_bandwidth_dl;
struct dict_avp_data davp_user_equipment_info;
struct dict_avp_data davp_quota_consumption_time;
struct dict_avp_data davp_origin_state_id;
struct dict_avp_data davp_qos_negotiation;
struct dict_avp_data davp_cc_output_octets;
struct dict_avp_data davp_ran_nas_release_cause;
struct dict_avp_data davp_sharing_key_ul;
struct dict_avp_data davp_netloc_access_support;
struct dict_avp_data davp_trace_event_list;
struct dict_avp_data davp_supported_features;
struct dict_avp_data davp_3gpp_user_location_info;
struct dict_avp_data davp_value_digits;
struct dict_avp_data davp_security_parameter_index;
struct dict_avp_data davp_result_code;
struct dict_avp_data davp_trace_interface_list;
struct dict_avp_data davp_fixed_user_location_info;
struct dict_avp_data davp_default_qos_information;
struct dict_avp_data davp_traffic_steering_policy_identifier_dl;
struct dict_avp_data davp_redirect_max_cache_time;
struct dict_avp_data davp_rule_activation_time;
struct dict_avp_data davp_load;
struct dict_avp_data davp_3gpp_ggsn_address;
struct dict_avp_data davp_redirect_server;
struct dict_avp_data davp_an_trusted;
struct dict_avp_data davp_e_utran_cell_global_identity;
struct dict_avp_data davp_called_station_id;
struct dict_avp_data davp_csg_id;
struct dict_avp_data davp_framed_ip_address;
struct dict_avp_data davp_oc_supported_features;
struct dict_avp_data davp_packet_filter_identifier;
struct dict_avp_data davp_pcc_rule_status;
struct dict_avp_data davp_tdf_application_instance_identifier;
struct dict_avp_data davp_proxy_host;
struct dict_avp_data davp_event_threshold_rsrp;
struct dict_avp_data davp_event_threshold_rsrq;
struct dict_avp_data davp_packet_filter_information;
struct dict_avp_data davp_subscription_id;
struct dict_avp_data davp_experimental_result_code;
struct dict_avp_data davp_collection_period_rrm_lte;
struct dict_avp_data davp_pdn_connection_id;
struct dict_avp_data davp_access_network_charging_address;
struct dict_avp_data davp_auth_application_id;
struct dict_avp_data davp_revalidation_time;
struct dict_avp_data davp_execution_time;
struct dict_avp_data davp_event_trigger;
struct dict_avp_data davp_extended_max_requested_bw_ul;
struct dict_avp_data davp_presence_reporting_area_elements_list;
struct dict_avp_data davp_charging_information;
struct dict_avp_data davp_monitoring_key;
struct dict_avp_data davp_3gpp_ms_timezone;
struct dict_avp_data davp_charging_rule_name;
struct dict_avp_data davp_access_availability_change_reason;
struct dict_avp_data davp_dynamic_address_flag;
struct dict_avp_data davp_monitoring_flags;
struct dict_avp_data davp_collection_period_rrm_umts;
struct dict_avp_data davp_usage_monitoring_information;
struct dict_avp_data davp_charging_rule_report;
struct dict_avp_data davp_ip_can_type;
struct dict_avp_data davp_offline;
struct dict_avp_data davp_udp_source_port;
struct dict_avp_data davp_routing_ip_address;
struct dict_avp_data davp_redirect_information;
struct dict_avp_data davp_mute_notification;
struct dict_avp_data davp_media_component_number;
struct dict_avp_data davp_tariff_time_change;
struct dict_avp_data davp_error_message;
struct dict_avp_data davp_credit_management_status;
struct dict_avp_data davp_required_access_info;
struct dict_avp_data davp_ip_can_session_charging_scope;
struct dict_avp_data davp_reporting_trigger;
struct dict_avp_data davp_failed_avp;
struct dict_avp_data davp_routing_area_identity;
struct dict_avp_data davp_routing_rule_remove;
struct dict_avp_data davp_tft_filter;
struct dict_avp_data davp_trace_reference;
struct dict_avp_data davp_cc_service_specific_units;
struct dict_avp_data davp_cc_time;
struct dict_avp_data davp_currency_code;
struct dict_avp_data davp_cc_input_octets;
struct dict_avp_data davp_measurement_quantity;
struct dict_avp_data davp_removal_of_access;
struct dict_avp_data davp_routing_filter;
struct dict_avp_data davp_trace_depth;
struct dict_avp_data davp_proxy_state;
struct dict_avp_data davp_rule_failure_code;
struct dict_avp_data davp_af_charging_identifier;
struct dict_avp_data davp_tunnel_header_length;
struct dict_avp_data davp_routing_rule_failure_code;
struct dict_avp_data davp_coa_information;
struct dict_avp_data davp_default_bearer_indication;
struct dict_avp_data davp_vendor_id;
struct dict_avp_data davp_granted_service_unit;
struct dict_avp_data davp_max_requested_bandwidth_ul;
struct dict_avp_data davp_oc_sequence_number;
struct dict_avp_data davp_routing_rule_identifier;
struct dict_avp_data davp_redirect_support;
struct dict_avp_data davp_destination_realm;
struct dict_avp_data davp_session_id;
struct dict_avp_data davp_tos_traffic_class;
struct dict_avp_data davp_origination_time_stamp;
struct dict_avp_data davp_bssid;
struct dict_avp_data davp_cc_money;
struct dict_avp_data davp_application_detection_information;
struct dict_avp_data davp_qos_upgrade;
struct dict_avp_data davp_tariff_change_usage;
struct dict_avp_data davp_report_amount;
struct dict_avp_data davp_primary_event_charging_function_name;
struct dict_avp_data davp_cc_total_octets;
struct dict_avp_data davp_measurement_period_umts;
struct dict_avp_data davp_flows;
struct dict_avp_data davp_ps_to_cs_session_continuity;
struct dict_avp_data davp_primary_charging_collection_function_name;
struct dict_avp_data davp_user_csg_information;
} GxDict;
extern GxDict gxDict;
extern int gxInit (void);
extern int gxRegister (void);
extern int gx_rar_cb (struct msg ** msg, struct avp * avp, struct session * sess, void * data, enum disp_action * act);
extern int gx_raa_cb (struct msg ** msg, struct avp * avp, struct session * sess, void * data, enum disp_action * act);
extern int gx_cca_cb (struct msg ** msg, struct avp * avp, struct session * sess, void * data, enum disp_action * act);
extern int gx_ccr_cb (struct msg ** msg, struct avp * avp, struct session * sess, void * data, enum disp_action * act);
extern int gx_send_rar (void *data);
extern int gx_send_ccr (void *data);
extern int gx_rar_parse(struct msg * msg, GxRAR *rar);
extern int gx_raa_parse(struct msg * msg, GxRAA *raa);
extern int gx_cca_parse(struct msg * msg, GxCCA *cca);
extern int gx_ccr_parse(struct msg * msg, GxCCR *ccr);
extern int gx_rar_free (GxRAR *rar);
extern int gx_raa_free (GxRAA *raa);
extern int gx_cca_free (GxCCA *cca);
extern int gx_ccr_free (GxCCR *ccr);
extern uint32_t gx_rar_calc_length (GxRAR *rar);
extern uint32_t gx_raa_calc_length (GxRAA *raa);
extern uint32_t gx_cca_calc_length (GxCCA *cca);
extern uint32_t gx_ccr_calc_length (GxCCR *ccr);
extern int gx_rar_pack (GxRAR *rar, unsigned char *buf, uint32_t buflen);
extern int gx_raa_pack (GxRAA *raa, unsigned char *buf, uint32_t buflen);
extern int gx_cca_pack (GxCCA *cca, unsigned char *buf, uint32_t buflen);
extern int gx_ccr_pack (GxCCR *ccr, unsigned char *buf, uint32_t buflen);
extern int gx_rar_unpack (unsigned char *buf, GxRAR *rar);
extern int gx_raa_unpack (unsigned char *buf, GxRAA *raa);
extern int gx_cca_unpack (unsigned char *buf, GxCCA *cca);
extern int gx_ccr_unpack (unsigned char *buf, GxCCR *ccr);
int gx_send_raa(void *data);
int recv_msg_handler( int sock );
int unixsock(void);
#endif /* __GX_H__ */
|
nikhilc149/e-utran-features-bug-fixes | ulpc/legacy_admf_interface/include/Common.h | /*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __COMMON_H_
#define __COMMON_H_
#include <iostream>
#include "rapidjson/filereadstream.h"
#include "rapidjson/document.h"
#include "rapidjson/prettywriter.h"
#include "rapidjson/pointer.h"
//#define RAPIDJSON_NAMESPACE epctoolsrapidjson
#define ADD 1
#define UPDATE 2
#define DELETE 3
#define ADD_URI "/addueentry"
#define UPDATE_URI "/updateueentry"
#define DELETE_URI "/deleteueentry"
#define ZERO 0
#define RETURN_SUCCESS 0
#define RETURN_FAILURE -1
#define HTTP "http://"
#define COLON ":"
#define ACK_POST "/ack"
#define ACK_KEY "ack"
#define SEQ_ID_KEY "sequenceId"
#define IMSI_KEY "imsi"
#define REQUEST_TYPE_KEY "requestType"
#define ADMF_PACKET 10
#define ADMF_INTFC_PACKET 20
#define LEGACY_ADMF_ACK 30
#define ADMF_ACK 40
#define LEGACY_ADMF_PACKET 50
#define ADMF_INTFC_ACK 60
#define LOG_INTFC 3
#define SAFE_DELETE_PTR(p) { if (p) { delete(p); (p) = NULL; }}
typedef struct config {
std::string admfIp;
std::string legacyAdmfIp;
uint16_t admfPort;
uint16_t legacyAdmfPort;
uint16_t legacyAdmfIntfcPort;
enum protocol {tcp, udp, rest};
protocol interfaceProtocol;
} config_t;
#pragma pack(push, 1)
typedef struct admfIntfcPacket {
uint32_t packetLength;
struct ueEntry {
uint64_t seqId;
uint64_t imsi;
uint16_t packetType;
uint16_t requestType;
UChar startTime[21];
UChar stopTime[21];
} ue_entry_t;
} admf_intfc_packet_t;
#pragma pack(pop)
#pragma pack(push, 1)
struct UeDatabase {
uint32_t packetLen;
struct ueEntry {
uint16_t requestType;
uint16_t packetType;
uint16_t bodyLength;
uint16_t requestStatus;
UChar requestBody[0];
} ue_entry_t;
};
#pragma pack(pop)
#endif /* endif __COMMON_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp/gtpv2c_messages/release_access_bearer.c | <gh_stars>0
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ue.h"
#include "pfcp.h"
#include "cp_stats.h"
#include "cp_config.h"
#include "sm_struct.h"
#include "pfcp_util.h"
#include "pfcp_session.h"
#include "pfcp_messages.h"
#include "gtpv2c_set_ie.h"
#include "pfcp_messages_encoder.h"
#include "../cp_dp_api/vepc_cp_dp_api.h"
#include "cdr.h"
#include "pfcp_enum.h"
#ifdef CP_BUILD
#include "cp_timer.h"
#endif /* CP_BUILD */
#define UPD_PARAM_HEADER_SIZE (4)
#define NR_RAT_TYPE (10)
#define RESET_TEID (0)
#define RAT_TYPE_VALUE (0)
extern int pfcp_fd;
extern int pfcp_fd_v6;
extern int clSystemLog;
int
process_release_access_bearer_request(rel_acc_ber_req_t *rel_acc_ber_req, uint8_t proc)
{
pdn_connection *pdn = NULL;
ue_context *context = NULL;
eps_bearer *bearer = NULL;
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
struct resp_info *resp = NULL;
uint32_t seq = 0;
int ret = 0;
uint8_t pdn_counter = 0;
int ebi_index = 0;
node_address_t node_value = {0};
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &rel_acc_ber_req->header.teid.has_teid.teid,
(void **) &context);
if (ret < 0 || !context)
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
if(rel_acc_ber_req->indctn_flgs.header.len != 0) {
context->indication_flag.arrl = rel_acc_ber_req->indctn_flgs.indication_arrl;
}
for(int itr_pdn = 0; itr_pdn < MAX_BEARERS; itr_pdn++) {
pdn = context->pdns[itr_pdn];
pfcp_sess_mod_req.update_far_count = 0;
if(pdn) {
for(int itr_bearer = 0 ; itr_bearer < MAX_BEARERS; itr_bearer++) {
bearer = pdn->eps_bearers[itr_bearer];
if(bearer) {
if(context->indication_flag.s11tf){
bearer->s11u_mme_gtpu_teid = RESET_TEID;
}else{
bearer->s1u_enb_gtpu_teid = RESET_TEID;
}
for(uint8_t itr_pdr = 0; itr_pdr < bearer->pdr_count; itr_pdr++) {
if(bearer->pdrs[itr_pdr] != NULL) {
if(bearer->pdrs[itr_pdr]->pdi.src_intfc.interface_value == SOURCE_INTERFACE_VALUE_CORE) {
bearer->pdrs[itr_pdr]->far.actions.buff = TRUE;
bearer->pdrs[itr_pdr]->far.actions.nocp = TRUE;
bearer->pdrs[itr_pdr]->far.actions.forw = FALSE;
set_update_far(&(pfcp_sess_mod_req.update_far[pfcp_sess_mod_req.update_far_count]),
&bearer->pdrs[itr_pdr]->far);
uint16_t len = 0;
len += set_upd_forwarding_param(&(pfcp_sess_mod_req.update_far
[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms),
bearer->s1u_enb_gtpu_ip);
len += UPD_PARAM_HEADER_SIZE;
pfcp_sess_mod_req.update_far
[pfcp_sess_mod_req.update_far_count].header.len += len;
if(context->indication_flag.s11tf){
pfcp_sess_mod_req.update_far
[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.outer_hdr_creation.teid
= bearer->s11u_mme_gtpu_teid;
set_node_address(&pfcp_sess_mod_req.\
update_far[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.outer_hdr_creation.ipv4_address,
pfcp_sess_mod_req.update_far[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s11u_mme_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
pfcp_sess_mod_req.update_far
[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.dst_intfc.interface_value =
GTPV2C_IFTYPE_S1U_ENODEB_GTPU;
}else {
pfcp_sess_mod_req.update_far
[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.outer_hdr_creation.teid =
bearer->s1u_enb_gtpu_teid;
set_node_address(&pfcp_sess_mod_req.\
update_far[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.outer_hdr_creation.ipv4_address,
pfcp_sess_mod_req.update_far[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s1u_enb_gtpu_ip);
pfcp_sess_mod_req.update_far
[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.dst_intfc.interface_value =
GTPV2C_IFTYPE_S1U_ENODEB_GTPU;
}
pfcp_sess_mod_req.update_far_count++;
break;
}
}
}
}
}
context->sequence =
rel_acc_ber_req->header.teid.has_teid.seq;
/*Filling Node ID for F-SEID*/
if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(config.pfcp_ip.s_addr, temp, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV6) {
ret = fill_ip_addr(0, config.pfcp_ip_v6.s6_addr, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
set_fseid(&(pfcp_sess_mod_req.cp_fseid), pdn->seid, node_value);
seq = get_pfcp_sequence_number(PFCP_SESSION_MODIFICATION_REQUEST, seq);
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_mod_req.header), PFCP_SESSION_MODIFICATION_REQUEST,
HAS_SEID, seq, context->cp_mode);
pfcp_sess_mod_req.header.seid_seqno.has_seid.seid = pdn->dp_seid;
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
if(pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr, SENT) < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to send"
"PFCP Session Modification Request %i\n", LOG_VALUE, errno);
} else {
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
reset_resp_info_structure(resp);
ebi_index = GET_EBI_INDEX(pdn->default_bearer_id);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
add_pfcp_if_timer_entry(rel_acc_ber_req->header.teid.has_teid.teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
resp->msg_type = GTP_RELEASE_ACCESS_BEARERS_REQ;
resp->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->proc = proc;
memcpy(&resp->gtpc_msg.rel_acc_ber_req, rel_acc_ber_req, sizeof(rel_acc_ber_req_t));
resp->cp_mode = context->cp_mode;
pdn->state = PFCP_SESS_MOD_REQ_SNT_STATE;
context->pfcp_sess_count++;
}
pdn_counter++;
if(pdn_counter == context->num_pdns) {
for(uint8_t i =0; i< rel_acc_ber_req->second_rat_count; i++) {
if(rel_acc_ber_req->secdry_rat_usage_data_rpt[i].irpgw == PRESENT) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"IRPGW Flag is SET in the"
" release access bearer request not expected from MME\n", LOG_VALUE);
}
if(rel_acc_ber_req->secdry_rat_usage_data_rpt[i].irsgw == PRESENT) {
uint8_t trigg_buff[] = "secondary_rat_usage";
cdr second_rat_data = {0};
struct timeval unix_start_time;
struct timeval unix_end_time;
second_rat_data.cdr_type = CDR_BY_SEC_RAT;
second_rat_data.change_rat_type_flag = PRESENT;
/*rat type in sec_rat_usage_rpt is NR=0 i.e RAT is 10 as per spec 29.274*/
second_rat_data.rat_type = (rel_acc_ber_req->secdry_rat_usage_data_rpt[i].secdry_rat_type ==
RAT_TYPE_VALUE) ? NR_RAT_TYPE : RAT_TYPE_VALUE;
second_rat_data.bearer_id = rel_acc_ber_req->secdry_rat_usage_data_rpt[i].ebi;
second_rat_data.seid = pdn->seid;
second_rat_data.imsi = pdn->context->imsi;
second_rat_data.start_time = rel_acc_ber_req->secdry_rat_usage_data_rpt[i].start_timestamp;
second_rat_data.end_time = rel_acc_ber_req->secdry_rat_usage_data_rpt[i].end_timestamp;
second_rat_data.data_volume_uplink = rel_acc_ber_req->secdry_rat_usage_data_rpt[i].usage_data_ul;
second_rat_data.data_volume_downlink = rel_acc_ber_req->secdry_rat_usage_data_rpt[i].usage_data_dl;
ntp_to_unix_time(&second_rat_data.start_time, &unix_start_time);
ntp_to_unix_time(&second_rat_data.end_time, &unix_end_time);
second_rat_data.sgw_addr.s_addr = config.pfcp_ip.s_addr;
second_rat_data.duration_meas = unix_end_time.tv_sec - unix_start_time.tv_sec;
second_rat_data.data_start_time = 0;
second_rat_data.data_end_time = 0;
second_rat_data.total_data_volume = second_rat_data.data_volume_uplink + second_rat_data.data_volume_downlink;
memcpy(&second_rat_data.trigg_buff, &trigg_buff, sizeof(trigg_buff));
if(generate_cdr_info(&second_rat_data) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT" failed to generate CDR\n",
LOG_VALUE);
}
}
}
break;
}
}
}
return 0;
}
void
set_release_access_bearer_response(gtpv2c_header_t *gtpv2c_tx, pdn_connection *pdn) {
release_access_bearer_resp_t rel_acc_ber_rsp = {0};
set_gtpv2c_teid_header((gtpv2c_header_t *) &rel_acc_ber_rsp, GTP_RELEASE_ACCESS_BEARERS_RSP,
pdn->context->s11_mme_gtpc_teid, pdn->context->sequence, NOT_PIGGYBACKED);
set_cause_accepted(&rel_acc_ber_rsp.cause, IE_INSTANCE_ZERO);
encode_release_access_bearers_rsp(&rel_acc_ber_rsp, (uint8_t *)gtpv2c_tx);
}
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp_set_ie.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PFCP_SET_IE_H
#define PFCP_SET_IE_H
#include <stdbool.h>
#include <rte_hash_crc.h>
#include "pfcp_messages.h"
#include "interface.h"
#ifdef CP_BUILD
#include "ue.h"
#include "cp.h"
#include "gtp_ies.h"
#include "gtpv2c_set_ie.h"
#include "gtp_messages.h"
#include "../ipc/dp_ipc_api.h"
#include "ngic_timer.h"
#include "cp_app.h"
#else
#include "pfcp_struct.h"
#endif
/* TODO: Move following lines to another file */
#define HAS_SEID 1
#define NO_SEID 0
#define PRESENT 1
#define NO_FORW_ACTION 0
#define PFCP_VERSION (1)
#define OFFSET 2208988800ULL
/* PFCP Message Type Values */
/*NODE RELATED MESSAGED*/
#define PFCP_HEARTBEAT_REQUEST (1)
#define PFCP_HEARTBEAT_RESPONSE (2)
#define PFCP_PFD_MGMT_REQUEST (3)
#define PFCP_PFD_MANAGEMENT_RESPONSE (4)
#define PFCP_ASSOCIATION_SETUP_REQUEST (5)
#define PFCP_ASSOCIATION_SETUP_RESPONSE (6)
#define PFCP_ASSOCIATION_UPDATE_REQUEST (7)
#define PFCP_ASSOCIATION_UPDATE_RESPONSE (8)
#define PFCP_ASSOCIATION_RELEASE_REQUEST (9)
#define PFCP_ASSOCIATION_RELEASE_RESPONSE (10)
#define PFCP_NODE_REPORT_REQUEST (12)
#define PFCP_NODE_REPORT_RESPONSE (13)
#define PFCP_SESSION_SET_DELETION_REQUEST (14)
#define PFCP_SESSION_SET_DELETION_RESPONSE (15)
/*SESSION RELATED MESSAGES*/
#define PFCP_SESSION_ESTABLISHMENT_REQUEST (50)
#define PFCP_SESSION_ESTABLISHMENT_RESPONSE (51)
#define PFCP_SESSION_MODIFICATION_REQUEST (52)
#define PFCP_SESSION_MODIFICATION_RESPONSE (53)
#define PFCP_SESSION_DELETION_REQUEST (54)
#define PFCP_SESSION_DELETION_RESPONSE (55)
/* SESSION REPORT RELATED MESSAGES*/
#define PFCP_SESSION_REPORT_REQUEST (56)
#define PFCP_SESSION_REPORT_RESPONSE (57)
/* TODO: Move above lines to another file */
#define MAX_HOSTNAME_LENGTH (256)
#define MAX_GTPV2C_LENGTH (MAX_GTPV2C_UDP_LEN-sizeof(struct gtpc_t))
#define ALL_CPF_FEATURES_SUPPORTED (CP_LOAD | CP_OVRL)
/*UP FEATURES LIST*/
#define EMPU (1 << 0)
#define PDIU (1 << 1)
#define UDBC (1 << 2)
#define QUOAC (1 << 3)
#define TRACE (1 << 4)
#define FRRT (1 << 5)
#define BUCP (1 << 6)
#define DDND (1 << 9)
#define DLBD (1 << 10)
#define TRST (1 << 11)
#define FTUP (1 << 12)
#define PFDM (1 << 13)
#define HEEU (1 << 14)
#define TREU (1 << 15)
#define UINT8_SIZE sizeof(uint8_t)
#define UINT32_SIZE sizeof(uint32_t)
#define UINT16_SIZE sizeof(uint16_t)
#define IPV4_SIZE 4
#define IPV6_SIZE 16
#define BITRATE_SIZE 10
#define NUMBER_OF_HOSTS 16
#define UPF_ENTRIES_DEFAULT (1 << 16)
#define UPF_ENTRIES_BY_UE_DEFAULT (1 << 18)
#define BUFFERED_ENTRIES_DEFAULT (1024)
#define HEARTBEAT_ASSOCIATION_ENTRIES_DEFAULT (1 << 6)
#define SWGC_S5S8_HANDOVER_ENTRIES_DEFAULT (50)
#define USER_PLANE_IP_RESOURCE_INFO_COUNT_2 2
#define NO_CP_MODE_REQUIRED 0
#pragma pack(1)
/**
* @brief : Maintains the context of pfcp interface
*/
typedef struct pfcp_context_t{
uint16_t up_supported_features;
uint8_t cp_supported_features;
uint32_t s1u_ip[20];
uint32_t s5s8_sgwu_ip;
uint32_t s5s8_pgwu_ip;
struct in_addr ava_ip;
bool flag_ava_ip;
} pfcp_context_t;
pfcp_context_t pfcp_ctxt;
#ifdef CP_BUILD
typedef enum pfcp_assoc_status_en {
ASSOC_IN_PROGRESS = 0,
ASSOC_ESTABLISHED = 1,
} pfcp_assoc_status_en;
/* Need to use this for upf_context */
extern uint32_t *g_gx_pending_csr[BUFFERED_ENTRIES_DEFAULT];
extern uint32_t g_gx_pending_csr_cnt;
/**
* @brief : Maintains the Context for Gx interface
*/
typedef struct gx_context_t {
/* CP Mode */
uint8_t cp_mode;
uint8_t state;
uint8_t proc;
char gx_sess_id[GX_SESS_ID_LEN];
unsigned long rqst_ptr; /*In Case of RAA, need to store RAR pointer*/
} gx_context_t;
/**
* @brief : Maintains context of upf
*/
typedef struct upf_context_t {
pfcp_assoc_status_en assoc_status;
uint8_t cp_mode;
uint32_t csr_cnt;
uint32_t *pending_csr_teid[BUFFERED_ENTRIES_DEFAULT];
char fqdn[MAX_HOSTNAME_LENGTH];
uint16_t up_supp_features;
uint8_t cp_supp_features;
node_address_t s1u_ip;
node_address_t s5s8_sgwu_ip;
/* Indirect Tunnel: Logical Intf */
node_address_t s5s8_li_sgwu_ip;
node_address_t s5s8_pgwu_ip;
uint8_t state;
uint8_t indir_tun_flag; /* flag for indirect tunnel */
uint32_t sender_teid; /*sender teid for indirect tunnel */
/* TEDIRI base value */
uint8_t teidri;
uint8_t teid_range;
/* Add timer_entry for pfcp assoc req */
peerData *timer_entry;
} upf_context_t;
/**
* @brief : Maintains of upf_ip
*/
typedef struct upf_ip_t {
struct in_addr ipv4;
struct in6_addr ipv6;
} upf_ip_t;
/**
* @brief : Maintains results returnd via dns for upf
*/
typedef struct upfs_dnsres_t {
uint8_t upf_count;
uint8_t current_upf;
uint8_t upf_ip_type;
upf_ip_t upf_ip[NUMBER_OF_HOSTS];
char upf_fqdn[NUMBER_OF_HOSTS][MAX_HOSTNAME_LENGTH];
} upfs_dnsres_t;
#pragma pack()
/* upflist returned via DNS query */
struct rte_hash *upflist_by_ue_hash;
struct rte_hash *upf_context_by_ip_hash;
struct rte_hash *gx_context_by_sess_id_hash;
#endif /* CP_BUILD */
/**
* @brief : Generates sequence number
* @param : No parameters
* @return : Returns generated sequence number
*/
uint32_t
generate_seq_no(void);
/**
* @brief : Generates sequence number for pfcp requests
* @param : type , pfcp request type
* @param : seq , default seq number
* @return : Returns generated sequence number
*/
uint32_t
get_pfcp_sequence_number(uint8_t type, uint32_t seq);
/**
* @brief : Set values in pfcp header
* @param : pfcp, pointer to pfcp header structure
* @param : type, pfcp message type
* @param : flag, pfcp flag
* @return : Returns nothing
*/
void
set_pfcp_header(pfcp_header_t *pfcp, uint8_t type, bool flag );
/**
* @brief : Set values in pfcp header and seid value
* @param : pfcp, pointer to pfcp header structure
* @param : type, pfcp message type
* @param : flag, pfcp flag
* @param : seq, pfcp message sequence number
* @param : cp_type, [SGWC/SAEGWC/PGWC]
* @return : Returns nothing
*/
void
set_pfcp_seid_header(pfcp_header_t *pfcp, uint8_t type, bool flag, uint32_t seq,
uint8_t cp_type);
/**
* @brief : Set values in ie header
* @param : header, pointer to pfcp ie header structure
* @param : type, pfcp message type
* @param : length, total length
* @return : Returns nothing
*/
void
pfcp_set_ie_header(pfcp_ie_header_t *header, uint8_t type, uint16_t length);
/**
* @brief : Process pfcp heartbeat request
* @param : peer_addr, peer node address
* @param : seq, sequence number
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_pfcp_heartbeat_req(peer_addr_t peer_addr, uint32_t seq);
#ifdef CP_BUILD
/**
* @brief : Process create session request, update ue context, bearer info
* @param : csr, holds information in csr
* @param : context, ue context structure pointer
* @param : upf_ipv4, upf ip
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_create_sess_req(create_sess_req_t *csr,
ue_context **context, node_address_t upf_ipv4, uint8_t cp_mode);
/**
* @brief : Process pfcp session association request
* @param : context, ue context structure pointer
* @param : ebi_index, index of ebi in array
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_pfcp_assoication_request(pdn_connection *pdn, int ebi_index);
/* TODO: Remove first param when autogenerated code for GTPV2-c
* is integrated.
*/
/**
* @brief : Process pfcp session establishment request
* @param : teid
* @param : ebi_index, index of ebi in array
* @param : upf_ctx, upf information
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_pfcp_sess_est_request(uint32_t teid, pdn_connection *pdn, upf_context_t *upf_ctx);
/**
* @brief : Retrives far id associated with pdr
* @param : bearer, bearer struture
* @param : interface_value, interface type access or core
* @return : Returns far id in case of success , 0 otherwise
*/
uint32_t get_far_id(eps_bearer *bearer, int interface_value);
/**
* @brief : Process pfcp session modification request
* @param : mbr, holds information in session modification request
* @param : context, ue_context
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_pfcp_sess_mod_request(mod_bearer_req_t *mbr, ue_context *context);
/**
* @brief : Process pfcp session modification request for SAEGWC and PGWC
* @param : mbr, holds information in session modification request
* @param : context, ue_context
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_pfcp_sess_mod_req_for_saegwc_pgwc(mod_bearer_req_t *mbr,
ue_context *context);
/**
* @brief : Process pfcp session modification request
* @param : mod_acc, modify_access_bearer req.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_pfcp_mod_req_modify_access_req(mod_acc_bearers_req_t *mod_acc);
/**
* @brief : Process pfcp session modification request for handover scenario
* @param : pdn, pdn connection informatio
* @param : bearer, bearer information
* @param : mbr, holds information in session modification request
* @return : Returns 0 in case of success , -1 otherwise
*/
int
send_pfcp_sess_mod_req(pdn_connection *pdn, eps_bearer *bearer,
mod_bearer_req_t *mbr);
/**
* @brief : Process Delete session request and send PFCP session deletion on Sx
* @param : ds_req, holds information in session deletion request
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_delete_session_request(del_sess_req_t *ds_req, ue_context *context);
/**
* @brief : Process chnage notification request on sgwc
* @param : change_not_req , holds information of
* change notification request received from
* MME.
* @param : context, ue context
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_change_noti_request(change_noti_req_t *change_not_reqi, ue_context *context);
/**
* @brief : Process delete session request for delete bearer response case
* @param : db_rsp, holds information in deletion bearer response
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_pfcp_sess_del_request(del_sess_req_t *db_rsp, ue_context *context);
/**
* @brief : Process delete session request for delete bearer response case
* @param : db_rsp, holds information in deletion bearer response
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_pfcp_sess_del_request_delete_bearer_rsp(del_bearer_rsp_t *db_rsp);
/**
* @brief : Process delete session request on sgwc
* @param : ds_req, holds information in session deletion request
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_sgwc_delete_session_request(del_sess_req_t *ds_req, ue_context *context);
/**
* @brief : Set values in pdn type ie
* @param : pdn , pdn type ie structure pointer to be filled
* @param : pdn_mme, use values from this structure to fill
* @return : Returns nothing
*/
void
set_pdn_type(pfcp_pdn_type_ie_t *pdn, pdn_type_ie *pdn_mme);
/**
* @brief : Creates upf context hash
* @param : No param
* @return : Returns nothing
*/
void
create_upf_context_hash(void);
/**
* @brief : Creates gx conetxt hash
* @param : No param
* @return : Returns nothing
*/
void
create_gx_context_hash(void);
/**
* @brief : Creates upf hash using ue
* @param : No param
* @return : Returns nothing
*/
void
create_upf_by_ue_hash(void);
/**
* @brief : Processes pfcp session report request
* @param : pfcp_sess_rep_req, holds information in pfcp session report request
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
process_pfcp_report_req(pfcp_sess_rpt_req_t *pfcp_sess_rep_req);
#else
/**
* @brief : Set values in pdn type ie
* @param : pdn, pdn type ie structure to be filled
* @return : Returns nothing
*/
void
set_pdn_type(pfcp_pdn_type_ie_t *pdn);
/**
* @brief : Set values in user plane ip resource info ie
* @param : up_ip_resource_info, ie structure to be filled
* @param : i, interface type access or core
* @param : teidri_flag, 0 - Generate teidir.
* : 1 - No action for TEIDRI.
* @param : logical_iface: WB:1, EB:2
* @return : Returns nothing
*/
void
set_up_ip_resource_info(pfcp_user_plane_ip_rsrc_info_ie_t *up_ip_resource_info,
uint8_t i, int8_t teid_range, uint8_t logical_iface);
#endif /* CP_BUILD */
/**
* @brief : Set values in node id ie
* @param : node_id, ie structure to be filled
* @param : nodeid_value structure
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_node_id(pfcp_node_id_ie_t *node_id, node_address_t node_value);
/**
* @brief : Create and set values in create bar ie
* @param : create_bar, ie structure to be filled
* @return : Returns nothing
*/
void
creating_bar(pfcp_create_bar_ie_t *create_bar);
/**
* @brief : Set values in fq csid ie
* @param : fq_csid, ie structure to be filled
* @param : nodeid_value
* @return : Returns nothing
*/
void
set_fq_csid(pfcp_fqcsid_ie_t *fq_csid, uint32_t nodeid_value);
/**
* @brief : Set values in bar id ie
* @param : bar_id, ie structure to be filled
* @param : bar_id_value, bar id
* @return : Returns size of IE
*/
int
set_bar_id(pfcp_bar_id_ie_t *bar_id, uint8_t bar_id_value);
/**
* @brief : Set values in downlink data notification delay ie
* @param : dl_data_notification_delay, ie structure to be filled
* @return : Returns nothing
*/
void
set_dl_data_notification_delay(pfcp_dnlnk_data_notif_delay_ie_t
*dl_data_notification_delay);
/**
* @brief : Set values in buffer packets count ie
* @param : sgstd_buff_pkts_cnts, ie structure to be filled
* @param : pkt_cnt, suggested packet count
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_sgstd_buff_pkts_cnt(pfcp_suggstd_buf_pckts_cnt_ie_t *sgstd_buff_pkts_cnt,
uint8_t pkt_cnt);
/**
* @brief : Set values in buffer packets count ie
* @param : dl_buf_sgstd_pkts_cnts, ie structure to be filled
* @param : pkt_cnt, suggested packet count
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_dl_buf_sgstd_pkts_cnt(pfcp_dl_buf_suggstd_pckt_cnt_ie_t *dl_buf_sgstd_pkts_cnt,
uint8_t pkt_cnt);
#ifdef CP_BUILD
/**
* @brief : Set values in user id ie
* @param : user_id, ie structure to be filled
* @param : imsi, value to be set in user id structure to be filled
* @return : Returns nothing
*/
void
set_user_id(pfcp_user_id_ie_t *user_id, uint64_t imsi);
#endif /* CP_BUILD */
/**
* @brief : Set values in fseid ie
* @param : fseid, ie structure to be filled
* @param : seid, seid value
* @param : node_value structure
* @return : Returns nothing
*/
void
set_fseid(pfcp_fseid_ie_t *fseid, uint64_t seid, node_address_t node_value);
/**
* @brief : Set values in recovery time stamp ie
* @param : rec_time_stamp, ie structure to be filled
* @return : Returns nothing
*/
void
set_recovery_time_stamp(pfcp_rcvry_time_stmp_ie_t *rec_time_stamp);
/**
* @brief : Set values in upf features ie
* @param : upf_feat, ie structure to be filled
* @return : Returns nothing
*/
void
set_upf_features(pfcp_up_func_feat_ie_t *upf_feat);
/**
* @brief : Set values in control plane function feature ie
* @param : cpf_feat, ie structure to be filled
* @return : Returns nothing
*/
void
set_cpf_features(pfcp_cp_func_feat_ie_t *cpf_feat);
/**
* @brief : Set values session report type ie
* @param : rt, structure to be filled
* @return : Returns nothing
*/
void
set_sess_report_type(pfcp_report_type_ie_t *rt);
/**
* @brief : Set values in caues ie
* @param : cause, ie structure to be filled
* @param : cause_val, cause value to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_cause(pfcp_cause_ie_t *cause, uint8_t cause_val);
/**
* @brief : Set values in remove bar ie
* @param : remove_bar, ie structure to be filled
* @param : bar_id_value, value of bar identifier
* @return : Returns nothing
*/
void
set_remove_bar(pfcp_remove_bar_ie_t *remove_bar, uint8_t bar_id_value);
/**
* @brief : Set values in remove pdr ie
* @param : remove_pdr, ie structure to be filled
* @param : pdr_id_value, pdr_id for which we need to send remve pdr
* @return : Returns nothing
*/
void
set_remove_pdr( pfcp_remove_pdr_ie_t *remove_pdr, uint16_t pdr_id_value);
/**
* @brief : Set values in traffic endpoint ie
* @param : traffic_endpoint_id
* @return : Returns nothing
*/
void
set_traffic_endpoint(pfcp_traffic_endpt_id_ie_t *traffic_endpoint_id);
/**
* @brief : Set values in fteid ie
* @param : local_fteid, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_fteid(pfcp_fteid_ie_t *local_fteid, fteid_ie_t *local_fteid_value);
/**
* @brief : Set values in network instance ie
* @param : network_instance, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_network_instance(pfcp_ntwk_inst_ie_t *network_instance,
ntwk_inst_t *network_instance_value);
/**
* @brief : Set values in ue ip address ie
* @param : ue_ip, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_ue_ip(pfcp_ue_ip_address_ie_t *ue_ip, ue_ip_addr_t ue_addr);
/**
* @brief : Set values in qer id ie
* @param : qer_id, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_qer_id(pfcp_qer_id_ie_t *qer_id, uint32_t qer_id_value);
/**
* @brief : Set values in gate status ie
* @param : gate_status, ie structure to be filled
* @param : qer_gate_status, qer_gate_status to be fill
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_gate_status( pfcp_gate_status_ie_t *gate_status, gate_status_t *qer_gate_status);
/**
* @brief : Set values in mbr ie
* @param : mbr, ie structure to be filled
* @param : qer_mbr, information to be fill
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_mbr(pfcp_mbr_ie_t *mbr, mbr_t *qer_mbr);
/**
* @brief : Set values in gbr ie
* @param : gbr, ie structure to be filled
* @param : qer_gbr, information to be fill
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_gbr(pfcp_gbr_ie_t *gbr, gbr_t *qer_gbr);
/**
* @brief : Set values in update qer ie
* @param : up_qer, ie structure to be filled
* @param : bearer_qer, qer value to be fill in ie structure
* @return : Returns nothing
*/
void
set_update_qer(pfcp_update_qer_ie_t *up_qer, qer_t *bearer_qer);
/**
* @brief : Set values in create qer ie
* @param : qer, ie structure to be filled
* @param : bearer_qer, information to be filled in ie's
* @return : Returns nothing
*/
void
set_create_qer(pfcp_create_qer_ie_t *qer, qer_t *bearer_qer);
/**
* @brief : Set values in update bar ie
* @param : up_bar, ie structure to be filled
* @return : Returns nothing
*/
void
updating_bar( pfcp_upd_bar_sess_mod_req_ie_t *up_bar);
/**
* @brief : Set values in update bar pfcp session report response ie
* @param : up_bar, ie structure to be filled
* @param : bearer_bar, stucture bar_t
* @return : Returns nothing
*/
void
set_update_bar_sess_rpt_rsp(pfcp_upd_bar_sess_rpt_rsp_ie_t *up_bar, bar_t *bearer_bar);
/**
* @brief : Set values in pfcpsmreq flags ie
* @param : pfcp_sm_req_flags, ie structure to be filled
* @return : Returns nothing
*/
void
set_pfcpsmreqflags(pfcp_pfcpsmreq_flags_ie_t *pfcp_sm_req_flags);
/**
* @brief : Set values in query urr refference ie
* @param : query_urr_ref, ie structure to be filled
* @return : Returns nothing
*/
void
set_query_urr_refernce(pfcp_query_urr_ref_ie_t *query_urr_ref);
/**
* @brief : Set values in user plane association relation request ie
* @param : ass_rel_req, ie structure to be filled
* @return : Returns nothing
*/
void
set_pfcp_ass_rel_req(pfcp_up_assn_rel_req_ie_t *ass_rel_req);
/**
* @brief : Set values in graceful relation period ie
* @param : graceful_rel_period, ie structure to be filled
* @return : Returns nothing
*/
void
set_graceful_release_period(pfcp_graceful_rel_period_ie_t *graceful_rel_period);
/**
* @brief : Set values in sequence number ie
* @param : seq, ie structure to be filled
* @return : Returns nothing
*/
void
set_sequence_num(pfcp_sequence_number_ie_t *seq);
/**
* @brief : Set values in metric ie
* @param : metric, ie structure to be filled
* @return : Returns nothing
*/
void
set_metric(pfcp_metric_ie_t *metric);
/**
* @brief : Set values in timer ie
* @param : pov, ie structure to be filled
* @return : Returns nothing
*/
void
set_period_of_validity(pfcp_timer_ie_t *pov);
/**
* @brief : Set values in oci flags ie
* @param : oci, ie structure to be filled
* @return : Returns nothing
*/
void
set_oci_flag( pfcp_oci_flags_ie_t *oci);
/**
* @brief : Set values in offending ie
* @param : offending_ie, ie structure to be filled
* @param : offend, offending ie type
* @return : Returns nothing
*/
void
set_offending_ie( pfcp_offending_ie_ie_t *offending_ie, int offend);
/**
* @brief : Set values in load control info ie
* @param : lci, ie structure to be filled
* @return : Returns nothing
*/
void
set_lci(pfcp_load_ctl_info_ie_t *lci);
/**
* @brief : Set values in overload control info ie
* @param : olci, ie structure to be filled
* @return : Returns nothing
*/
void
set_olci(pfcp_ovrld_ctl_info_ie_t *olci);
/**
* @brief : Set values in failed rule id ie
* @param : rule, ie structure to be filled
* @return : Returns nothing
*/
void
set_failed_rule_id(pfcp_failed_rule_id_ie_t *rule);
/**
* @brief : Set values in traffic endpoint id ie
* @param : tnp, ie structure to be filled
* @return : Returns nothing
*/
void
set_traffic_endpoint_id(pfcp_traffic_endpt_id_ie_t *tnp);
/**
* @brief : Set values in pdr id ie
* @param : pdr, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_pdr_id_ie(pfcp_pdr_id_ie_t *pdr);
/**
* @brief : Set values in created pdr ie
* @param : pdr, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_created_pdr_ie(pfcp_created_pdr_ie_t *pdr);
/**
* @brief : Set values in created traffic endpoint ie
* @param : cte, ie structure to be filled
* @return : Returns nothing
*/
void
set_created_traffic_endpoint(pfcp_created_traffic_endpt_ie_t *cte);
/**
* @brief : Set values in node report type ie
* @param : nrt, ie structure to be filled
* @return : Returns nothing
*/
void
set_node_report_type(pfcp_node_rpt_type_ie_t *nrt);
/**
* @brief : Set values in user plane path failure report ie
* @param : uppfr, ie structure to be filled
* @return : Returns nothing
*/
void
set_user_plane_path_failure_report(pfcp_user_plane_path_fail_rpt_ie_t *uppfr);
/**
* @brief : Calculates system Seconds since boot
* @param : no param
* @return : Returns uptime in case of success
*/
long
uptime(void);
/**
* @brief : valiadates pfcp session association setup request and set cause and offending ie accordingly
* @param : pfcp_ass_setup_req, hold information from pfcp session association setup request
* @param : cause_id , param to set cause id
* @param : offend_id, param to set offending ie id
* @return : Returns nothing
*/
void
cause_check_association(pfcp_assn_setup_req_t *pfcp_ass_setup_req,
uint8_t *cause_id, int *offend_id);
/**
* @brief : valiadates pfcp session establishment request and set cause and offending ie accordingly
* @param : pfcp_session_request, hold information from pfcp session establishment request
* @param : cause_id , param to set cause id
* @param : offend_id, param to set offending ie id
* @return : Returns nothing
*/
void
cause_check_sess_estab(pfcp_sess_estab_req_t
*pfcp_session_request, uint8_t *cause_id, int *offend_id);
/**
* @brief : valiadates pfcp session modification request and set cause and offending ie accordingly
* @param : pfcp_session_mod_req, hold information from pfcp session modification request
* @param : cause_id , param to set cause id
* @param : offend_id, param to set offending ie id
* @return : Returns nothing
*/
void
cause_check_sess_modification(pfcp_sess_mod_req_t
*pfcp_session_mod_req, uint8_t *cause_id, int *offend_id);
/**
* @brief : valiadates pfcp session deletion request and set cause and offending ie accordingly
* @param : pfcp_session_delete_req, hold information from pfcp session request
* @param : cause_id , param to set cause id
* @param : offend_id, param to set offending ie id
* @return : Returns nothing
*/
void
cause_check_delete_session(pfcp_sess_del_req_t
*pfcp_session_delete_req, uint8_t *cause_id, int *offend_id);
/**
* @brief : Create recovery time hash table
* @param : No param
* @return : Returns nothing
*/
void
create_heartbeat_hash_table(void);
/**
* @brief : Add ip address to hearbeat hash
* @param : peer_addr, ip address to be added
* @param : recover_timei, recovery time stamp
* @return : Returns nothing
*/
void
add_ip_to_heartbeat_hash(node_address_t *peer_addr, uint32_t recover_time);
/**
* @brief : Delete ip address from heartbeat hash
* @param : peer_addr, ip address to be removed
* @return : Returns nothing
*/
void
delete_entry_heartbeat_hash(node_address_t *peer_addr);
/**
* @brief : Add data to hearbeat hash table
* @param : ip, ip address to be added
* @param : recov_time, recovery timestamp
* @return : Returns nothing
*/
int
add_data_to_heartbeat_hash_table(node_address_t *ip, uint32_t *recov_time);
/**
* @brief : Delete hearbeat hash table
* @param : No param
* @return : Returns nothing
*/
void
clear_heartbeat_hash_table(void);
/**
* @brief : Set values in create pdr ie
* @param : create_pdr, ie structure to be filled
* @param : source_iface_value, interface type
* @param : cp_type,[SGWC/SAEGWC/PGWC]
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_create_pdr(pfcp_create_pdr_ie_t *create_pdr, pdr_t *bearer_pdr,
uint8_t cp_type);
/**
* @brief : Set values in create far ie
* @param : create_far, ie structure to be filled
* @return : Returns nothing
*/
void
set_create_far(pfcp_create_far_ie_t *create_far, far_t *bearer_pdr);
/**
* @brief : Set values in create urr ie
* @param : create_far, ie structure to be filled
* @return : Returns nothing
*/
void
set_create_urr(pfcp_create_urr_ie_t *create_urr, pdr_t *bearer_pdr);
/**
* @brief : Set values in create bar ie
* @param : create_bar, ie structure to be filled
* @return : Returns nothing
*/
void
set_create_bar(pfcp_create_bar_ie_t *create_bar, bar_t *bearer_bar);
/**
* @brief : Set values in update pdr ie
* @param : update_pdr, ie structure to be filled
* @param : source_iface_value
* @param : cp_type, [SGWC/SAEGWC/PGWC]
* @return : Returns nothing
*/
int
set_update_pdr(pfcp_update_pdr_ie_t *update_pdr, pdr_t *bearer_pdr,
uint8_t cp_type);
/**
* @brief : Set values in update far ie
* @param : update_far, ie structure to be filled
* @return : Returns nothing
*/
void
set_update_far(pfcp_update_far_ie_t *update_far, far_t *bearer_pdr);
/**
* @brief : Set values in forwarding params ie
* @param : frwdng_parms, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_forwarding_param(pfcp_frwdng_parms_ie_t *frwdng_parms,
node_address_t node_value, uint32_t teid, uint8_t interface_valuece);
/**
* @brief : Set values in duplicating params ie
* @param : dupng_parms, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_duplicating_param(pfcp_dupng_parms_ie_t *dupng_parms);
/**
* @brief : Set values in duplicating params ie in update far
* @param : upd_dupng_parms, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_upd_duplicating_param(pfcp_upd_dupng_parms_ie_t *dupng_parms);
/**
* @brief : Set values in upd forwarding params ie
* @param : upd_frwdng_parms, ie structure to be filled
* @param : node_value, node address structure to fill IP address
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_upd_forwarding_param(pfcp_upd_frwdng_parms_ie_t *upd_frwdng_parms,
node_address_t node_value);
/**
* @brief : Set values in apply action ie
* @param : apply_action, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_apply_action(pfcp_apply_action_ie_t *apply_action_t, apply_action *bearer_action);
/**
* @brief : Set values in measurement method ie
* @param : apply_action, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_measurement_method(pfcp_meas_mthd_ie_t *meas_mthd, urr_t *bearer_urr);
/**
* @brief : Set values in reporting triggers ie
* @param : apply_action, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_reporting_trigger(pfcp_rptng_triggers_ie_t *rptng_triggers, urr_t *bearer_urr);
/**
* @brief : Set values in volume threshold ie
* @param : apply_action, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_volume_threshold(pfcp_vol_thresh_ie_t *vol_thresh, urr_t *bearer_urr, uint8_t interface_value);
/**
* @brief : Set values in volume measuremnt ie
* @param : vol_meas, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_volume_measurment(pfcp_vol_meas_ie_t *vol_meas);
/**
* @brief : Set values in duration measuremnt ie
* @param : dur_meas, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_duration_measurment(pfcp_dur_meas_ie_t *dur_meas);
/**
* @brief : Set values in start time ie
* @param : start_time, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_start_time(pfcp_start_time_ie_t *start_time);
/**
* @brief : Set values in end time ie
* @param : end_time, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_end_time(pfcp_end_time_ie_t *end_time);
/**
* @brief : Set values in first pkt time ie
* @param : first_pkt_time, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_first_pkt_time(pfcp_time_of_frst_pckt_ie_t *first_pkt_time);
/**
* @brief : Set values in last pkt time ie
* @param : last_pkt_time, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_last_pkt_time(pfcp_time_of_lst_pckt_ie_t *last_pkt_time);
/**
* @brief : Set values in Time threshold ie
* @param : apply_action, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_time_threshold(pfcp_time_threshold_ie_t *time_thresh, urr_t *bearer_urr);
/**
* @brief : Set values in outer header creation ie
* @param : outer_hdr_creation, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_outer_header_creation(pfcp_outer_hdr_creation_ie_t *outer_hdr_creation,
node_address_t node_value, uint32_t teid);
/**
* @brief : Set values in forwarding policy ie
* @param : frwdng_plcy, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_frwding_policy(pfcp_frwdng_plcy_ie_t *frwdng_plcy);
/**
* @brief : Set values in destination interface ie
* @param : dst_intfc, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_destination_interface(pfcp_dst_intfc_ie_t *dst_intfc, uint8_t interface_value);
/**
* @brief : Set values in pdr id ie
* @param : pdr_id, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_pdr_id(pfcp_pdr_id_ie_t *pdr_id, uint16_t pdr_id_value);
/**
* @brief : Set values in far id ie
* @param : far_id, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_far_id(pfcp_far_id_ie_t *far_id, uint32_t far_id_value);
/**
* @brief : Set values in urr id ie
* @param : urr_id, ie structure to be filled
* @return : Returns nothing
*/
int
set_urr_id(pfcp_urr_id_ie_t *urr_id, uint32_t urr_id_value);
/**
* @brief : Set values in outer header removal ie
* @param : out_hdr_rem, ie structure to be filled
* @param : outer_header_desc, outer header desciption
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_outer_hdr_removal(pfcp_outer_hdr_removal_ie_t *out_hdr_rem,
uint8_t outer_header_desc);
/**
* @brief : Set values in precedence ie
* @param : prec, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_precedence(pfcp_precedence_ie_t *prec, uint32_t prec_value);
/**
* @brief : Set values in pdi ie
* @param : pdi, ie structure to be filled
* @param : cp_type, [SGWC/PGWC/SAEGWC]
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_pdi(pfcp_pdi_ie_t *pdi, pdi_t *bearer_pdi, uint8_t cp_type);
/**
* @brief : Set values in source interface ie
* @param : src_intf, ie structure to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
int
set_source_intf(pfcp_src_intfc_ie_t *src_intf, uint8_t src_intf_value);
/**
* @brief : Set peer node address
* @param : peer_addr, structure to contain address
* @param : node_addr, structure to fill node address.
* @return : Returns void.
*/
void get_peer_node_addr(peer_addr_t *peer_addr, node_address_t *node_addr);
#ifdef CP_BUILD
/**
* @brief : Set values in pfd contents ie
* @param : pfd_conts, ie structure to be filled
* @param : cstm_buf, data to be filled
* @return : Returns 0 in case of success , -1 otherwise
*/
uint16_t
set_pfd_contents(pfcp_pfd_contents_ie_t *pfd_conts, struct msgbuf *cstm_buf);
/**
* @brief : Fill pfcp pfd management request
* @param : pfcp_pfd_req, pointer to structure to be filled
* @param : len, Total len
* @return : Returns nothing
*/
void
fill_pfcp_pfd_mgmt_req(pfcp_pfd_mgmt_req_t *pfcp_pfd_req, uint16_t len);
/**
* @brief : Process pfcp pfd management request
* @param : No param
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_pfcp_pfd_mgmt_request(void);
/**
* @brief : Add entry to upflist hash
* @param : imsi_val, imsi value
* @param : imsi_len, imsi length
* @param : entry, entry to be added in hash
* @return : Returns 0 in case of success , -1 otherwise
*/
int
upflist_by_ue_hash_entry_add(uint64_t *imsi_val, uint16_t imsi_len,
upfs_dnsres_t *entry);
/**
* @brief : search entry in upflist hash
* @param : imsi_val, imsi value
* @param : imsi_len, imsi length
* @param : entry, entry to be filled with search result
* @return : Returns 0 in case of success , -1 otherwise
*/
int
upflist_by_ue_hash_entry_lookup(uint64_t *imsi_val, uint16_t imsi_len,
upfs_dnsres_t **entry);
/**
* @brief : delete entry in upflist hash
* @param : imsi_val, imsi value
* @param : imsi_len, imsi length
* @return : Returns 0 in case of success , -1 otherwise
*/
int
upflist_by_ue_hash_entry_delete(uint64_t *imsi_val, uint16_t imsi_len);
/**
* @brief : Add entry to upf conetxt hash
* @param : upf_ip, up ip address
* @param : entry ,entry to be added
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
upf_context_entry_add(node_address_t *upf_ip, upf_context_t *entry);
/**
* @brief : search entry in upf hash using ip
* @param : upf_ip, key to search entry
* @param : entry, variable to store search result
* @return : Returns 0 in case of success , -1 otherwise
*/
int
upf_context_entry_lookup(node_address_t upf_ip, upf_context_t **entry);
/**
* @brief : Add entry into gx context hash
* @param : sess_id , key to add entry
* @param : entry , entry to be added
* @return : Returns 0 in case of success , -1 otherwise
*/
int
gx_context_entry_add(char *sess_id, gx_context_t *entry);
/**
* @brief : search entry in gx context hash
* @param : sess_id , key to add entry
* @param : entry , entry to be added
* @return : Returns 0 in case of success , -1 otherwise
*/
int
gx_context_entry_lookup(char *sess_id, gx_context_t **entry);
/**
* @brief : Create s5s8 hash table in sgwc
* @param : No param
* @return : Returns nothing
*/
void
create_s5s8_sgwc_hash_table(void);
/**
* @brief : Remove s5s8 hash table in sgwc
* @param : No param
* @return : Returns nothing
*/
void
clear_s5s8_sgwc_hash_table(void);
/**
* @brief : Generate and Send CCRU message
* @param : Modify Bearer Request
* @return : Returns 0 in case of success , -1 otherwise
*/
int
send_ccr_u_msg(mod_bearer_req_t *mb_req);
#endif /* CP_BUILD */
/**
* @brief : get msg type from cstm ie string
* @param : pfd_conts, holds pfc contents data
* @param : idx, index in array
* @return : Returns 0 in case of success , -1 otherwise
*/
uint64_t
get_rule_type(pfcp_pfd_contents_ie_t *pfd_conts, uint16_t *idx);
/**
*@brief : Sets IP address for the node as per IP type
*@param : ipv4_addr, IPv4 address
*@param : ipv6_addr, IPv6 address
*@param : node_value, node value structure to store IP address
*@return : returns -1 if no ip is assigned, otherwise 0
*/
int
set_node_address(uint32_t *ipv4_addr, uint8_t ipv6_addr[],
node_address_t node_value);
/* @brief : stores IPv4/IPv6 address into node value
* @param : ipv4_addr, ipv4 address
* @param : ipv6_addr, ipv6 address
* @param : node_value, ipv4 and ipv6 structure
* @return : returns -1 if no ip is assigned, otherwise 0
* */
int
fill_ip_addr(uint32_t ipv4_addr, uint8_t ipv6_addr[],
node_address_t *node_value);
/* @brief : checks if IPV6 address is zero or not
* @param : addr, ipv6 address
* @param : len, ipv6 address len
* @return : returns -1 if IP is non-zero, otherwise 0
* */
int
check_ipv6_zero(uint8_t addr[], uint8_t len);
#endif /* PFCP_SET_IE_H */
|
nikhilc149/e-utran-features-bug-fixes | cp_dp_api/predef_rule_init.h | <reponame>nikhilc149/e-utran-features-bug-fixes<filename>cp_dp_api/predef_rule_init.h
/*
* Copyright (c) 2020 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _PREDEF_RULE_INIT_H
#define _PREDEF_RULE_INIT_H
#include "dp_ipc_api.h"
#ifdef CP_BUILD
#include "packet_filters.h"
#endif
#include "vepc_cp_dp_api.h"
#ifdef CP_BUILD
#include "main.h"
#else
#include "up_main.h"
#endif /* CP_BUILD */
/* Defined the hash table name */
#define PCC_HASH 1
#define SDF_HASH 2
#define MTR_HASH 3
#define ADC_HASH 4
#define RULE_HASH 5
/* Operation Modes of the rules */
#define ADD_RULE 1
#define GET_RULE 2
#define SND_RULE 3
#define DEL_RULE 4
/* PCC Rule name length */
#define MAX_RULE_LEN 256
/* Defined the tables to stored/maintain predefined rules and there
* associated info.
*/
/**
* @brief : rte hash table to maintain the collection of PCC, SDF, MTR, and ADC rules.
* hash key: ip_addr, Data: struct rules_struct
*/
struct rte_hash *rules_by_ip_addr_hash;
/**
* @brief : rte hash table to maintain the pcc rules by rule name.
* hash key: rule_name, Data: struct pcc_rules
*/
struct rte_hash *pcc_by_rule_name_hash;
/**
* @brief : rte hash table to maintain the sdf rules by rule index.
* hash key: rule_indx, Data: struct sdf_pkt_filter
*/
struct rte_hash *sdf_by_inx_hash;
/**
* @brief : rte hash table to maintain the mtr rules by rule index.
* hash key: rule_indx, Data: struct mtr_entry
*/
struct rte_hash *mtr_by_inx_hash;
/**
* @brief : rte hash table to maintain the adc rules by rule index.
* hash key: rule_indx, Data: struct adc_rules
*/
struct rte_hash *adc_by_inx_hash;
typedef struct pcc_rule_name_key {
/* pcc rule name*/
char rname[MAX_RULE_LEN];
}pcc_rule_name;
typedef struct rules_struct_t {
uint16_t rule_cnt;
pcc_rule_name rule_name;
/* LL to contain the list of pcc rules */
struct rules_struct_t *next;
}rules_struct;
/**
* @brief : Function to add rule entry in collection hash table.
* @param : head, new_node.
* @retrun : 0: Success, -1: Failure
*/
int8_t
insert_rule_name_node(rules_struct *head, rules_struct *new_node);
/**
* @brief : Function to add/get/update rule entry in collection hash table.
* @param : cp_pfcp_ip key.
* @param : is_mod, Operation modes.
* @retrun : Success: rules_struct, Failure: NULL
*/
rules_struct *
get_map_rule_entry(uint32_t cp_pfcp_ip, uint8_t is_mod);
/**
* @brief : Function to delete rule entry in collection hash table.
* @param : cp_pfcp_ip key.
* @retrun : 0: Success, -1: Failure
*/
int8_t
del_map_rule_entry(uint32_t cp_pfcp_ip);
/**
* @brief : Function to add/get/update rule entry of SDF,MTR,ADC in hash table.
* @param : rule_indx, SDF, MTR, and ADC rule index value.
* @param : hash_type, Selection of table, Ex. SDF_TABLE, ADC_TABLE etc.
* @param : is_mod, Operation modes, Ex. ADD_RULE, UPDATE_RULE etc
* @param : data, return SDF, MTR, and ADC rule.
* @retrun : 0: Success, -1: Failure
*/
int8_t
get_predef_rule_entry(uint16_t rule_indx, uint8_t hash_type,
uint8_t is_mod, void **data);
/**
* @brief : Function to delete rule entry of SDF,MTR,ADC in hash table.
* @param : rule_indx, SDF, MTR, and ADC rule index value.
* @param : hash_type, Selection of table, Ex. SDF_TABLE, ADC_TABLE etc.
* @retrun : 0: Success, -1: Failure
*/
int8_t
del_predef_rule_entry(uint16_t rule_indx, uint8_t hash_type);
/**
* @brief : Function to add/get/update rule entry of pcc in hash table.
* @param : rule_name, pcc rule name.
* @param : is_mod, Operation modes, Ex. ADD_RULE, UPDATE_RULE etc
* @retrun : Success: struct pcc_rules, Failure: NULL
*/
struct pcc_rules *
get_predef_pcc_rule_entry(const pcc_rule_name *rule_name, uint8_t is_mod);
/**
* @brief : Function to delete rule entry of SDF,MTR,ADC in hash table.
* @param : rule_name, pcc rule name.
* @retrun : 0: Success, -1: Failure
*/
int8_t
del_predef_pcc_rule_entry(const pcc_rule_name *rule_name);
/* Create and initialize the tables to maintain the predefined rules info*/
void
init_predef_rule_hash_tables(void);
/**
* @brief : Pack the message which has to be sent to DataPlane.
* @param : mtype
* mtype - Message type.
* @param : param
* param - parameter to be parsed based on msg type.
* @param : msg_payload
* msg_payload - message payload to be sent.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
build_rules_up_msg(enum dp_msg_type mtype, void *param, struct msgbuf *msg_payload);
#endif /* _PREDEF_RULE_INIT_H */
|
nikhilc149/e-utran-features-bug-fixes | ulpc/d_admf/include/Common.h | <gh_stars>0
/*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __COMMON_H_
#define __COMMON_H_
#include <cstdlib>
#include <iostream>
#include <sstream>
#include <map>
#include <list>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include "emgmt.h"
#include "esynch.h"
#include "epctools.h"
#include "rapidjson/filereadstream.h"
#include "rapidjson/document.h"
#include "rapidjson/prettywriter.h"
#include "rapidjson/pointer.h"
#include "UeEntry.h"
#define ADD_ACTION 1
#define ADD_ACK 2
#define UPDATE_ACTION 3
#define UPDATE_ACK 4
#define START_UE 5
#define START_UE_ACK 6
#define STOP_UE 7
#define STOP_UE_ACK 8
#define DELETE_ACTION 9
#define DELETE_ACK 10
#define ACK_CHECKER 100
#define SXA 1
#define SXB 2
#define SXA_SXB 3
#define CP_TYPE 1
#define DP_TYPE 2
#define CP_DP_TYPE 3
#define UPLINK 1
#define DOWNLINK 2
#define UPLINK_DOWNLINK 3
#define S1U 1
#define SGW_S5S8_U 2
#define PGW_S5S8_U 3
#define SGI 4
#define SXA_INTFC_NAME "sxa"
#define SXB_INTFC_NAME "sxb"
#define SXA_SXB_INTFC_NAME "sxasxb"
#define S1U_INTFC_NAME "s1u"
#define SGW_S5S8_U_INTFC_NAME "sgw-s5s8u"
#define PGW_S5S8_U_INTFC_NAME "pgw-s5s8u"
#define SGI_INTFC_NAME "sgi"
#define SEQ_ID_KEY "sequenceId"
#define IMSI_KEY "imsi"
#define SIGNALLING_CONFIG_KEY "signallingconfig"
#define S11_KEY "s11"
#define SGW_S5S8_C_KEY "sgw-s5s8c"
#define PGW_S5S8_C_KEY "<KEY>"
#define SX_KEY "sx"
#define SX_INTFC_KEY "sxintfc"
#define CP_DP_TYPE_KEY "type"
#define DATA_CONFIG_KEY "dataconfig"
#define S1U_CONTENT_KEY "s1u_content"
#define SGW_S5S8U_CONTENT_KEY "sgw_s5s8u_content"
#define PGW_S5S8U_CONTENT_KEY "pgw_s5s8u_content"
#define SGI_CONTENT_KEY "sgi_content"
#define DATA_INTFC_CONFIG_KEY "intfcconfig"
#define DATA_INTFC_NAME_KEY "intfc"
#define DATA_DIRECTION_KEY "direction"
#define FORWARD_KEY "forward"
#define TIMER_KEY "timer"
#define START_TIME_KEY "starttime"
#define STOP_TIME_KEY "stoptime"
#define REQUEST_SOURCE_KEY "requestSource"
#define RESPONSE_MSG_KEY "response_message"
#define RESPONSE_JSON_KEY "response_json"
#define NOTIFY_TYPE_KEY "notifyType"
#define D_ADMF_IP "DADMF_IP"
#define D_ADMF_PORT "DADMF_PORT"
#define ADMF_IP "ADMF_IP"
#define ADMF_PORT "ADMF_PORT"
#define UE_DB_KEY "uedatabase"
#define CP_IP_ADDR_KEY "cpipaddr"
#define ACK_KEY "ack"
#define REQUEST_TYPE_KEY "requestType"
#define CONTENT_TYPE_JSON "Content-Type: application/json"
#define X_USER_NAME "X-User-Name: YOUR_NAME"
#define USER_AGENT "curl/7.47.0"
#define HTTP "http://"
#define COLON ":"
#define SLASH "/"
#define ADD_UE_ENTRY_URI "addueentry"
#define UPDATE_UE_ENTRY_URI "updateueentry"
#define DELETE_UE_ENTRY_URI "deleteueentry"
#define REGISTER_CP_URI "registercp"
#define NOTIFY_URI "notify"
#define ACK_POST "ack"
#define CONFIG_FILE_PATH "./config/dadmf.conf"
#define EMPTY_STRING ""
#define TRUE true
#define FALSE false
#define SECONDS 60
#define MILLISECONDS 1000
#define MAX_VALUE_UINT16_T 65535
#define ZERO 0
#define ONE 1
#define OPERATION_DEBUG 1
#define OPERATION_LI 2
#define OPERATION_BOTH 3
#define D_ADMF_REQUEST 0
#define ADMF_REQUEST 1
#define LOG_SYSTEM 3
#define RET_SUCCESS 0
#define RET_FAILURE -1
#define DISABLE 0
#define OFF 1
#define ON 2
#define HEADER_ONLY 1
#define HEADER_AND_DATA 2
#define DATA_ONLY 3
#define SAFE_DELETE(p) { if (p) { delete(p); (p) = NULL; }}
/**
* @brief : Maintains D_ADMF configurations read from config file
*/
typedef struct configurations {
cpStr dadmfIp;
UShort dadmfPort;
std::string admfIp;
UShort admfPort;
uint16_t ackCheckTimeInMin;
} configurations_t;
/**
* @brief : Maintains default values for Ue attributes
*/
typedef struct UeDefaults {
uint16_t s11;
uint16_t sgw_s5s8c;
uint16_t pgw_s5s8c;
uint16_t sxa;
uint16_t sxb;
uint16_t sxasxb;
uint16_t s1u;
uint16_t sgw_s5s8u;
uint16_t pgw_s5s8u;
uint16_t sgi;
uint16_t s1u_content;
uint16_t sgw_s5s8u_content;
uint16_t pgw_s5s8u_content;
uint16_t sgi_content;
uint16_t forward;
} ue_defaults_t;
/**
* @brief : Converts ip-address from ascii format to integer using
in_addr structure
* @param : ipAddr, ip-address in ascii format
* @return : Returns ip-address in integer format
*/
int ConvertAsciiIpToNumeric(const char *ipAddr);
/**
* @brief : Calculates difference of time in request with current time
* @param : dateStr, startTime/stopTime in the IMSI object in
format "%Y-%m-%dT%H:%M:%SZ"
* @return : Returns difference of time in request with current time
*/
int64_t getTimeDiffInMilliSec(const std::string &dateStr);
/**
* @brief : Creates JSON object for Imsi and appends request
source flag before sending it to ADMF
so as to identify request has came from D-ADMF
* @param : request, request body received in the request
* @param : imsi, Imsi for which timer has elapsed
* @return : returns JSON in string format in case of success,
return NULL in case of json parsing error
*/
std::string prepareJsonFromUeData(std::list<ue_data_t> &ueDataList);
/**
* @brief : Creates sequence Identifier for every Ue entry request
to identify every request uniquely.
* @param : ueData, structure filled with Ue entry details.
* @return : sequenceId generated.
*/
uint64_t generateSequenceIdentifier(ue_data_t &ueData);
/**
* @brief : Creates JSON object for Imsi whose timer has been elapsed,
and needs to be sent to all registered CP's
* @param : ueData, structure filled with Ue details.
* @return : returns JSON in string format
*/
std::string prepareJsonForCP(std::list<ue_data_t> &ueData);
/**
* @brief : Creates JSON object for list of Imsi's whose start time has
been elapsed and needs to be notified to legacy admf if forward
flag was set for that Imsi.
* @param : ueData, list of Imsi
* @param : notifyType, notification type can be startUe or stopUe.
* @return : return JSON in string format
*/
std::string prepareJsonForStartUe(std::list<ue_data_t> &ueData,
uint8_t notifyType = 0);
/**
* @brief : Creates JSON object for Imsi whose stop timer has been elapsed,
and needs to be sent to all registered CP's (and ADMF if forward
flag was set for that Imsi
* @param : ueData structure filled with Ue details
* @param : notifyType, additional parameter sent while sending stop
notification to admf
* @return : returns JSON in string format
*/
std::string prepareJsonForStopUe(std::list<delete_event_t> &ueData,
uint8_t notifyType = 0);
/**
* @brief : Creates JSON object for response
* @param : ueDataList, list of Ue entries whose request was succeeded.
* @param : responseMsg, response message with additional information.
* @return : return response JSON in string format
*/
std::string prepareResponseJson(std::list<ue_data_t> &ueDataList,
std::string &responseMsg);
#endif /* __COMMON_H */
|
nikhilc149/e-utran-features-bug-fixes | ulpc/df/include/BaseLegacyInterface.h | /*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
#ifndef _BASE_LEGACY_INTERFACE_H_
#define _BASE_LEGACY_INTERFACE_H_
#include <stdint.h>
#include <iostream>
#include "elogger.h"
class BaseLegacyInterface
{
public:
/*
* @brief : Constructor of class BaseLegacyInterface
*/
BaseLegacyInterface();
/*
* @brief : Destructor of class BaseLegacyInterface
*/
virtual ~BaseLegacyInterface();
/*
* @brief : Function to assign EGetOpt object
* @param : opt, EGetOpt object
* @return : Returns void
*/
virtual void ConfigureLogger(ELogger &log) = 0;
/*
* @brief : Function to initialise legacy interface
* @param : strCommMode, mode of communication
* @return : Returns int8_t
*/
virtual int8_t InitializeLegacyInterface(const std::string& strCommMode) = 0;
/*
* @brief : Function to connect with legacy DF
* @param : strRemoteIp, legacy DF IP
* @param : uiRemotePort, legacy DF port
* @return : Returns int8_t
*/
virtual int8_t ConnectWithLegacyInterface(const std::string& strRemoteIp,
uint16_t uiRemotePort) = 0;
/*
* @brief : Function to send information/packet to legacy DF
* @param : pkt, packet to be sent
* @param : packetLen, size of packet
* @return : Returns int8_t
*/
virtual int8_t SendMessageToLegacyInterface(uint8_t *pkt, uint32_t packetLen) = 0;
/*
* @brief : Function to disconnect from legacy DF
* @param : No arguments
* @return : Returns int8_t
*/
virtual int8_t DisconnectWithLegacyInterface() = 0;
/*
* @brief : Function to de-initialise legacy DF
* @param : No arguments
* @return : Returns int8_t
*/
virtual int8_t DeinitalizeLegacyInterface() = 0;
};
// the types of the class factories
typedef BaseLegacyInterface* create_t();
typedef void destroy_t(BaseLegacyInterface*);
#endif /* _BASE_LEGACY_INTERFACE_H_ */
|
nikhilc149/e-utran-features-bug-fixes | oss_adapter/libepcadapter/include/gw_adapter.h | /*
* Copyright (c) 2019 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __NGIC_GW_ADAPTER_H__
#define __NGIC_GW_ADAPTER_H__
#ifdef __cplusplus
extern "C" {
#endif
#include <stdint.h>
#include <stdbool.h>
#include "gw_structs.h"
#define STANDARD_LOGID (1)
#define STATS_LOGID (2)
#define CLI_STATS_TIMER_INTERVAL (5000)
#define MAX_UINT16_T (65535)
/* Single curl command has maximum UE entry limit */
#define MAX_LI_ENTRIES (255)
#define FALSE (0)
#define TRUE (1)
#define PERF_ON (1)
#define PERF_OFF (0)
#define __file__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define LOG_FORMAT "%s:%s:%d:"
#define LOG_VALUE __file__, __func__, __LINE__
typedef long long int _timer_t;
#define TIMER_GET_CURRENT_TP(now) \
({ \
struct timespec ts; \
now = clock_gettime(CLOCK_REALTIME,&ts) ? \
-1 : (((_timer_t)ts.tv_sec) * 1000000000) + ((_timer_t)ts.tv_nsec); \
now; \
})
#define TIMER_GET_ELAPSED_NS(start) \
({ \
_timer_t ns; \
TIMER_GET_CURRENT_TP(ns); \
if (ns != -1){ \
ns -= start; \
} \
ns; \
})
extern _timer_t st_time;
extern cli_node_t cli_node;
extern cli_node_t *cli_node_ptr;
enum CLogType {
eCLTypeBasicFile,
eCLTypeSysLog,
eCLTypeRotatingFile,
eCLTypeStdOut,
eCLTypeStdErr
};
enum CLoggerSeverity {
eCLSeverityDebug,
eCLSeverityInfo,
eCLSeverityStartup,
eCLSeverityMinor,
eCLSeverityMajor,
eCLSeverityCritical
};
enum CLoggerLogLevel
{
eCLogLevelDebug = 0,
eCLogLevelInfo,
eCLogLevelStartup,
eCLogLevelMinor,
eCLogLevelMajor,
eCLogLevelCritical,
eCLogLevelOff,
};
/* Function */
/**
* @brief : init log module
* @param : name, name of file
* @return : Returns 0 on success else -1
*/
int8_t init_log_module(const char *name);
/* Function */
/**
* @brief : clLog for logging
* @param : logid, logid
* @param : sev, Severity of logging
* @param : fmt, logger string params for printing
* @return : Returns nothing
*/
void clLog(const int logid, enum CLoggerSeverity sev, const char *fmt, ...);
/* Function */
/**
* @brief : init rest framework
* @param : cli_rest_ip, ip for rest http request
* @param : cli_rest_port, port for rest http request
* @return : Returns 0 on success else -1
*/
int8_t init_rest_framework(char *cli_rest_ip, uint16_t cli_rest_port);
/* Function */
/**
* @brief : Updates the cli stats as per the interface and direction
* @param : ip_addr,
* @param : msg_type, Type of message
* @param : dir, Direction of message on interface
* @param : it, interface of the message
* @return : Returns 0 on success , otherwise -1
*/
int update_cli_stats(peer_address_t *cli_peer_addr, uint8_t mgs_type, int dir, CLIinterface it);
/* Function */
/**
* @brief : Adds information about peer gateway
* @param : ip_addr, ip address of peer gateway
* @param : it, interface of the message
* @return : Returns nothing
*/
void add_cli_peer(peer_address_t *cli_peer_addr, CLIinterface it);
/* Function */
/**
* @brief : gives index of the peer gateway ip
* @param : ip_addr, ip address of peer gateway
* @return : Returns index on success, otherwise -1
*/
int get_peer_index(peer_address_t *cli_peer_addr);
/* Function */
/**
* @brief : updates alive status of peer
* @param : ip_addr, ip address of peer gateway
* @param : val, boolean value of status
* @return : Returns 0 on success, otherwise -1
*/
int update_peer_status(peer_address_t *cli_peer_addr, bool val);
/* Function */
/**
* @brief : updates timeout counter
* @param : ip_addr, ip address of peer gateway
* @param : val, timeout counter
* @return : Returns 0 on success, otherwise -1
*/
int update_peer_timeouts(peer_address_t *cli_peer_addr, uint8_t val);
/* Function */
/**
* @brief : deletes peer gateway
* @param : ip_addr, ip address of peer gateway
* @return : Returns 0 on success, otherwise -1
*/
int delete_cli_peer(peer_address_t *cli_peer_addr);
/* Function */
/**
* @brief : finds first position of peer gateway
* @param : void
* @return : Returns index of peer in an array on success, otherwise 0
*/
int get_first_index(void);
/* Function */
/**
* @brief : updates timestamp of the peer gateway
* @param : ip_addr, ip address of peer gateway
* @param : timestamp, timestamp of the moment
* @return : Returns 0 on success, otherwise -1
*/
int update_last_activity(peer_address_t *cli_peer_addr, char *time_stamp);
/* Function */
/**
* @brief : updates count of system or users
* @param : index, type of system
* @param : operation, operation value
* @return : Returns 0
*/
int update_sys_stat(int index, int operation);
/* Function */
/**
* @brief : retrieves current time
* @param : last_time_stamp, last timestamp
* @return : Returns nothing
*/
void get_current_time_oss(char *last_time_stamp);
/* Function */
/**
* @brief : checks if activity has updated or not
* @param : msg_type, message type
* @param : it, interface type
* @return : Returns true on success otherwise false
*/
bool is_last_activity_update(uint8_t msg_type, CLIinterface it);
/* Function */
/**
* @brief : checks if command is suppported for respective gateway
* @param : cmd_number, command number
* @return : Returns true if supported, otherwise false
*/
bool is_cmd_supported(int cmd_number);
/* Function */
/**
* @brief : get type of gateway
* @param : void
* @return : Returns type of gateway
*/
uint8_t get_gw_type(void);
/* Function */
/**
* @brief : reset the dp system stats
* @param : void
* @return : Returns nothing
*/
void reset_sys_stat(void);
/* Function */
/**
* @brief : set mac value
* @param : mac char ptr
* @param : mac int ptr
* @return : Returns nothing
*/
void set_mac_value(char *mac_addr_char_ptr, uint8_t *mac_addr_int_ptr);
/* Function */
/**
* @brief : init stats timer
* @param : void
* @return : Returns nothing
*/
void init_stats_timer(void);
/* Function */
/**
* @brief : set gateway type
* @param : gateway_type, type of gateway
* @return : Returns nothing
*/
void set_gw_type(uint8_t gateway_type);
/* Function */
/**
* @brief : get stat live
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_stat_live(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get periodic timer
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_pt(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get transmit timer
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_tt(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get transmit count
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_tc(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get request tries
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_rt(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get request timeout
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_rto(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get perf flag
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_pf(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get generate pcap status
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_generate_pcap_status(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get stat logging
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_stat_logging(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get configuration
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_configuration(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get stat live all
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_stat_live_all(const char *request_body, char **response_body);
/* Function */
/**
* @brief : get stat frequency
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int get_stat_frequency(const char *request_body, char **response_body);
/* Function */
/**
* @brief : post periodic timer
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int post_pt(const char *request_body, char **response_body);
/* Function */
/**
* @brief : post transmit timer
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int post_tt(const char *request_body, char **response_body);
/* Function */
/**
* @brief : post transmit count
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int post_tc(const char *request_body, char **response_body);
/* Function */
/**
* @brief : post request tries
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int post_rt(const char *request_body, char **response_body);
/* Function */
/**
* @brief : post request timeout
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int post_rto(const char *request_body, char **response_body);
/* Function */
/**
* @brief : post generate pcap cmd
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int post_generate_pcap_cmd(const char *request_body, char **response_body);
/* Function */
/**
* @brief : post stat logging
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int post_stat_logging(const char *request_body, char **response_body);
/* Function */
/**
* @brief : post reset stats cmd
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int reset_cli_stats(const char *request_body, char **response_body);
/* Function */
/**gateway
* @brief : post stat frequency
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int post_stat_frequency(const char *request_body, char **response_body);
/* Function */
/**gateway
* @brief : post perf flag value
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int post_pf(const char *request_body, char **response_body);
/* Function */
/**
* @brief : add ue entry details
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int add_ue_entry_details(const char *request_body, char **response_body);
/* Function */
/**
* @brief : update ue entry details
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int update_ue_entry_details(const char *request_body, char **response_body);
/* Function */
/**
* @brief : delete ue entry details
* @param : request_body, http request body
* @param : response_body, http response body
* @return : Returns status code
*/
int delete_ue_entry_details(const char *request_body, char **response_body);
#ifdef __cplusplus
}
#endif
#endif /* __NGIC_GW_ADAPTER_H__ */
|
nikhilc149/e-utran-features-bug-fixes | test/sponsdn/main.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sponsdn.h>
#include <rte_eal.h>
#include <rte_config.h>
#include <rte_common.h>
#include <arpa/inet.h>
#include <sponsdn.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include <string.h>
#include <pcap/pcap.h>
#include <rte_cfgfile.h>
#include <rte_udp.h>
#include <rte_byteorder.h>
#define MAX_DN 10
#define MAX_DNS_NAME_LEN 256
int cnt;
static unsigned char *handler(const unsigned char *bytes)
{
static unsigned char ether_frame[1500];
const struct udp_hdr *udp_hdr = (const struct udp_hdr *)(bytes + 34);
if (rte_be_to_cpu_16(udp_hdr->src_port) == 53) {
memcpy(ether_frame, bytes, 1500);
return ether_frame;
}
return NULL;
}
static unsigned char *map_resp(char *fname)
{
char error_buffer[PCAP_ERRBUF_SIZE];
pcap_t *handle;
unsigned char *p;
const unsigned char *packet;
struct pcap_pkthdr header;
handle = pcap_open_offline(fname, error_buffer);
if (!handle)
return NULL;
p = NULL;
packet = pcap_next(handle, &header);
while (!p && packet) {
p = handler(packet);
packet = pcap_next(handle, &header);
}
return p;
}
static int read_host_names(char *host_names[], const char *cfg_file)
{
struct rte_cfgfile *file;
const char *fname;
int fd;
ssize_t len;
char *buf;
unsigned i;
file = rte_cfgfile_load(cfg_file, 0);
if (!file) {
printf("[%s()] rte_cfgfile_load failed\n", __func__);
return -1;
}
fname = rte_cfgfile_get_entry(file, "0", "dns_file_name");
if (!fname) {
printf("[%s()] failed to get dns_file entry\n", __func__);
return -1;
}
fd = open(fname, O_RDONLY);
if (fd == -1) {
printf("[%s()] failed to open file %s\n", __func__, fname);
return -1;
}
len = lseek(fd, 0, SEEK_END);
if (len == -1) {
printf("[%s()] lseek failed\n", __func__);
return -1;
}
buf = mmap(0, len, PROT_READ, MAP_SHARED, fd, 0);
if (buf == MAP_FAILED) {
printf("[%s()] failed to mmap file %s\n", __func__, fname);
return -1;
}
for (i = 0; i < MAX_DN; i++) {
strncpy(host_names[i], buf, strlen(buf));
buf += strlen(host_names[i]);
if (!buf[0] || !buf[1])
break;
buf++;
}
return i + 1;
}
static void scan_and_print(unsigned char *pkt, char (*hname)[MAX_DNS_NAME_LEN])
{
int addr4_cnt, addr6_cnt;
struct in_addr addr4[100];
int i;
unsigned match_id;
addr4_cnt = 0;
epc_sponsdn_scan((const char *)pkt, 1500, NULL, &match_id, NULL,
&addr4_cnt, NULL, NULL, &addr6_cnt);
if (addr4_cnt) {
epc_sponsdn_scan((const char *)pkt, 1500, NULL, &match_id, addr4,
&addr4_cnt, NULL, NULL, &addr6_cnt);
printf("Host name %s\n", hname[match_id]);
for (i = 0; i < addr4_cnt; i++)
printf("IP address %s\n", inet_ntoa(addr4[i]));
} else {
printf("Domain name not found\n");
}
}
int main(int argc, char **argv)
{
int rc;
int ret;
char hname[MAX_DN][MAX_DNS_NAME_LEN];
char *hname_tbl[MAX_DN];
unsigned int id[MAX_DN];
int i, n;
unsigned char *pkt10;
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
ret++;
for (i = 0; i < MAX_DN; i++)
hname_tbl[i] = (char *)&hname[i];
n = read_host_names(hname_tbl, argv[ret]);
ret++;
pkt10 = map_resp(argv[ret]);
rc = epc_sponsdn_create(n);
if (rc) {
printf("error allocating sponsored DN context %d\n", rc);
return EXIT_FAILURE;
}
for (i = 0; i < n; i++)
id[i] = i;
for (i = 0; i < n; i++)
printf("Hostname %s\n", hname_tbl[i]);
rc = epc_sponsdn_dn_add_multi(hname_tbl, id, n);
if (rc) {
printf("failed to add DN error code %d\n", rc);
return rc;
}
scan_and_print(pkt10 + 0x2a, hname);
printf("Deleting %s\n", hname_tbl[0]);
epc_sponsdn_dn_del(hname_tbl, 1);
scan_and_print(pkt10 + 0x2a, hname);
printf("Deleting %s\n", hname_tbl[1]);
epc_sponsdn_dn_del(&hname_tbl[1], 1);
scan_and_print(pkt10 + 0x2a, hname);
epc_sponsdn_free();
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/cp_stats.h | <filename>cp/cp_stats.h<gh_stars>0
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CP_STATS_H
#define CP_STATS_H
#include <stdint.h>
#include <time.h>
#include <rte_common.h>
#include "gw_adapter.h"
#define __file__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
/**
* @file
*
* Control Plane statistic declarations
*/
/**
* @brief : counters used to display statistics on the control plane
*/
struct cp_stats_t {
uint64_t time;
clock_t execution_time;
clock_t reset_time;
uint64_t create_session;
uint64_t delete_session;
uint64_t modify_bearer;
uint64_t rel_access_bearer;
uint64_t bearer_resource;
uint64_t create_bearer;
uint64_t delete_bearer;
uint64_t ddn;
uint64_t ddn_ack;
uint64_t echo;
uint64_t rx;
uint64_t tx;
uint64_t rx_last;
uint64_t tx_last;
char stat_timestamp[LAST_TIMER_SIZE];
};
extern struct cp_stats_t cp_stats;
/**
* @brief : Prints control plane signaling message statistics
* @param : Currently not being used
* @return : Never returns/value ignored
*/
int
do_stats(__rte_unused void *ptr);
/**
* @brief : clears the control plane statistic counters
* @param : No param
* @return : Returns nothing
*/
void
reset_cp_stats(void);
#endif
|
nikhilc149/e-utran-features-bug-fixes | cp_dp_api/teid_upf.c | <reponame>nikhilc149/e-utran-features-bug-fixes<filename>cp_dp_api/teid_upf.c<gh_stars>0
/*
* Copyright (c) 2017 Intel Corporation
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <rte_ethdev.h>
#include <rte_kni.h>
#include <arpa/inet.h>
#include "teid_upf.h"
#include "gw_adapter.h"
#include "pfcp_struct.h"
#include "pfcp_util.h"
#include "pfcp_set_ie.h"
#define RI_MAX 8
static int MAX_TEID[RI_MAX] = {0,2,4,8,16,32,64,128};
extern int clSystemLog;
#define BUF_READ_SIZE 256
#define FIRST_LINE 1
/* variable will be used to check if dp already have active session with any cp
* or not, if teidri is 0 and if any cp tried to setup association with dp
*/
bool assoc_available = true;
/**
* ipv4 address format.
*/
#define IPV4_ADDR "%u.%u.%u.%u"
#define IPV4_ADDR_HOST_FORMAT(a) (uint8_t)(((a) & 0xff000000) >> 24), \
(uint8_t)(((a) & 0x00ff0000) >> 16), \
(uint8_t)(((a) & 0x0000ff00) >> 8), \
(uint8_t)((a) & 0x000000ff)
/* TEIDRI data info file fd */
FILE *teidri_fd = NULL;
int8_t
assign_teid_range(uint8_t val, teidri_info **free_list_head)
{
uint8_t teid_range = 0;
teidri_info *temp = NULL;
if(val == 0){
return 0;
}else if (val > RI_MAX){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Teid value is not between 0 to 7\n", LOG_VALUE);
return -1;
}
temp = *free_list_head;
/* Assigning first teid range from list */
if(temp != NULL){
teid_range = temp->teid_range;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT" Assigned teid range: %d\n", LOG_VALUE, teid_range);
return teid_range;
}else{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT" TEID range is not available. \n", LOG_VALUE);
return -1;
}
}
int
search_list_for_teid_range(teidri_info **head, uint8_t teid_range)
{
teidri_info *temp = NULL;
if(*head != NULL){
temp = *head;
while(temp != NULL){
if(temp->teid_range == teid_range){
return 0;
}
temp = temp->next;
}
}
return -1;
}
void
create_teid_range_free_list(teidri_info **blocked_list_head,
teidri_info **free_list_head, uint8_t teidri_val, uint8_t num_cp)
{
int ret = 0;
uint8_t blocked_list_len = num_cp;
uint8_t temp_teid_range;
for(temp_teid_range = 0; temp_teid_range < MAX_TEID[teidri_val] ; temp_teid_range++){
if((blocked_list_len != 0) && (*blocked_list_head != NULL)){
/*Search in teid range is in blocked list*/
ret = search_list_for_teid_range(blocked_list_head, temp_teid_range);
if(ret == 0){
/*If teid range is in blocked list dont add it to free list and continue*/
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Teid range %d found in blocked list\n",
LOG_VALUE, temp_teid_range);
blocked_list_len--;
continue;
}
}
/*teid range is not in blocked list, add it to free list*/
teidri_info *upf_info = malloc(sizeof(teidri_info));
if(upf_info == NULL){
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to add node for teid range %d in free list\n",
LOG_VALUE, temp_teid_range);
return;
}
upf_info->node_addr.ipv4_addr = 0;
memset(upf_info->node_addr.ipv6_addr, 0, IPV6_ADDRESS_LEN);
upf_info->node_addr.ip_type = 0;
upf_info->teid_range = temp_teid_range;
upf_info->next = NULL;
ret = add_teidri_info(free_list_head, upf_info);
if(ret != 0){
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to add for node teid range %d in free list\n",
LOG_VALUE, temp_teid_range);
return;
}
}
}
teidri_info *
get_teidri_info(teidri_info **head, node_address_t upf_ip)
{
teidri_info *temp = NULL;
if(*head != NULL){
temp = *head;
while(temp != NULL){
if(upf_ip.ip_type == PDN_TYPE_IPV4 && (temp->node_addr.ipv4_addr == upf_ip.ipv4_addr)) {
return temp;
} else if (upf_ip.ip_type == PDN_TYPE_IPV6
&& (memcmp(temp->node_addr.ipv6_addr, upf_ip.ipv6_addr, IPV6_ADDRESS_LEN) == 0)) {
return temp;
}
temp = temp->next;
}
}
return NULL;
}
int8_t
add_teidri_info(teidri_info **head, teidri_info *newNode)
{
if (*head == NULL) {
*head = newNode;
}else{
teidri_info *temp = *head;
while(temp->next != NULL){
temp = temp->next;
}
temp->next = newNode;
}
return 0;
}
void
delete_entry_from_list_for_teid_range(teidri_info **head, uint8_t teid_range)
{
teidri_info *temp = NULL;
teidri_info *prev = NULL;
if(*head == NULL){
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Failed to remove teidri information from free list for teid range : %d, List is empty\n",
LOG_VALUE, teid_range);
return;
}
temp = *head;
/* If node to be deleted is first node */
if(temp->teid_range == teid_range){
*head = temp->next;
free(temp);
return;
}
/* If node to be deleted is not first node */
prev = *head;
while(temp != NULL){
if(temp->teid_range == teid_range){
prev->next = temp->next;
free(temp);
return;
}
prev = temp;
temp = temp->next;
}
}
void
delete_entry_from_teidri_list_for_ip(node_address_t node_value, teidri_info **head)
{
teidri_info *temp = NULL;
teidri_info *prev = NULL;
if(head == NULL){
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to remove teidri information for cp, List is empty\n",
LOG_VALUE);
return;
}
temp = *head;
/* If node to be deleted is first node */
if (compare_ip_address(temp->node_addr, node_value)) {
*head = temp->next;
free(temp);
return;
}
/* If node to be deleted is not first node */
prev = *head;
while(temp != NULL){
if(compare_ip_address(temp->node_addr, node_value)){
prev->next = temp->next;
free(temp);
return;
}
prev = temp;
temp = temp->next;
}
}
/* read assinged teid range indication and CP node address */
int
read_teidri_data (char *filename, teidri_info **blocked_list_head,
teidri_info **free_list_head, uint8_t teidri_val)
{
char str_buf[BUF_READ_SIZE] = {0};
char *token = NULL;
int ret = 0;
uint8_t num_cp = 0;
/* Open file for read data if Created , if file not create then create file for store data */
if ((teidri_fd = fopen(filename, "r")) == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Creating New file %s : "
"ERROR : %s\n",LOG_VALUE, filename, strerror(errno));
/* Assume file is not created */
if ((teidri_fd = fopen(filename, "w")) == NULL) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
/* Write into file TEIDRI value\n */
if (fprintf(teidri_fd, "TEIDRI , %u ,\n", teidri_val) < 0) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"Failed to write into "
"file, Error : %s \n", LOG_VALUE, strerror(errno));
return -1;
}
fclose(teidri_fd);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Adding nodes in free list\n", LOG_VALUE);
create_teid_range_free_list(blocked_list_head, free_list_head,
teidri_val, num_cp);
return 0;
}
RTE_LOG(NOTICE, DP, LOG_FORMAT"File Open for Reading TEIDRI Data :: START : \n", LOG_VALUE);
bool old_teidri_found = false;
uint8_t old_teidri = 0;
uint8_t line_num = 0;
while ((fgets(str_buf, BUF_READ_SIZE, teidri_fd)) != NULL ) {
/* Read CP Node address */
/* Format : node addr , TEIDRI ,\n*/
token = strtok(str_buf, ",");
if (token != NULL) {
line_num++;
if(line_num == FIRST_LINE){
if(strncmp(token,TEID_NAME,strnlen(TEID_NAME, TEID_LEN)) == 0){
token = strtok(NULL, ",");
if (token != NULL) {
old_teidri = atoi(token);
if(old_teidri != teidri_val){
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" New TEIDRI value (%u) dose not match with previous "
"TEIDRI value (%u). Cleaning records for old TEIDRI data",
LOG_VALUE, teidri_val, old_teidri);
}else{
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" New TEIDRI value (%u) is same as previous TEIDRI value (%u)\n",
LOG_VALUE, teidri_val, old_teidri);
old_teidri_found = true;
}
}else{
/*If TEIDRI value is not present in file*/
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Previous TEIDRI value not found in file "
"Cleaning records from file", LOG_VALUE);
}
}
if(old_teidri_found == false){
/*Close file opened in read mode*/
if (fclose(teidri_fd) != 0) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"ERROR : %s\n", LOG_VALUE, strerror(errno));
return -1;
}
/*Open file to write*/
if ((teidri_fd = fopen(filename, "w")) == NULL) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"Error: %s \n", LOG_VALUE, strerror(errno));
return -1;
}
/* Write into file TEIDRI value\n */
if (fprintf(teidri_fd, "TEIDRI , %u ,\n", teidri_val) < 0) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"Failed to write into "
"file, Error : %s \n", LOG_VALUE, strerror(errno));
return -1;
}
if (fclose(teidri_fd) != 0) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"ERROR : %s\n", LOG_VALUE, strerror(errno));
return -1;
}
create_teid_range_free_list(blocked_list_head, free_list_head, teidri_val, num_cp);
return 0;
}
}
if(line_num > FIRST_LINE){
teidri_info *upf_info = NULL;
upf_info = (teidri_info *)malloc(sizeof(teidri_info));
if(upf_info == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Failed to add node \n",
LOG_VALUE);
return -1;
}
bzero(&upf_info->node_addr, sizeof(upf_info->node_addr));
upf_info->teid_range = atoi(token);
upf_info->next = NULL;
token = strtok(NULL, ",");
if (token != NULL && *token != '0') {
/*Extract IPv6 address, if present*/
inet_pton(AF_INET6, token, &upf_info->node_addr.ipv6_addr);
upf_info->node_addr.ip_type = PDN_TYPE_IPV6;
} else {
token = strtok(NULL, ",");
if (token != NULL) {
/*Extract IPv4 address, if present*/
upf_info->node_addr.ipv4_addr = atoi(token);
upf_info->node_addr.ip_type = PDN_TYPE_IPV4;
} else {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WARNING: IP address not found for record, skip this record\n",
LOG_VALUE);
}
}
if (upf_info != NULL) {
ret = add_teidri_info(blocked_list_head, upf_info);
if(ret != 0){
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Failed to add node for cp n",
LOG_VALUE);
return -1;
}
} else {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"No UPF info filled the for record\n",
LOG_VALUE);
free(upf_info);
upf_info = NULL;
continue;
}
}
} else {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WARNING: Data not found in record, check next record \n",
LOG_VALUE);
continue;
}
num_cp++;
}
RTE_LOG(NOTICE, DP, LOG_FORMAT"Number of CP : %d \n", LOG_VALUE, num_cp);
if (fclose(teidri_fd) != 0) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"ERROR : %s\n", LOG_VALUE, strerror(errno));
return -1;
}
/* Adding nodes in free list */
create_teid_range_free_list(blocked_list_head, free_list_head, teidri_val, num_cp);
RTE_LOG(NOTICE, DP, LOG_FORMAT "File Close :: END : \n", LOG_VALUE);
return 0;
}
int
get_teidri_from_list(uint8_t *teid_range, node_address_t node_addr, teidri_info **head)
{
teidri_info *upf_info = NULL;
upf_info = get_teidri_info(head, node_addr);
if(upf_info != NULL){
*teid_range = upf_info->teid_range;
return 1;
}
/* Node address not found into stored data */
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"NODE Address not found \n", LOG_VALUE);
return 0;
}
/* Function to write teid range and node address into file in csv format */
/* TODO : add one more paramete for file name in both read and write teidri data function */
int
add_teidri_node_entry(uint8_t teid_range, node_address_t node_addr, char *filename,
teidri_info **add_list_head, teidri_info **remove_list_head)
{
teidri_info *upf_info = NULL;
int ret = 0;
if (node_addr.ipv4_addr == 0 && !*node_addr.ipv6_addr) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"NODE Address is "
"NULL\n", LOG_VALUE);
return -1;
}
upf_info = get_teidri_info(add_list_head, node_addr);
if (upf_info == NULL) {
upf_info = malloc(sizeof(teidri_info));
if (upf_info == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to add node for ip : %u\n",
LOG_VALUE, node_addr);
return -1;
}
/* copy into data structure */
if (node_addr.ip_type == PDN_TYPE_IPV4) {
upf_info->node_addr.ipv4_addr = node_addr.ipv4_addr;
upf_info->node_addr.ip_type = PDN_TYPE_IPV4;
} else if (node_addr.ip_type == PDN_TYPE_IPV6) {
memcpy(upf_info->node_addr.ipv6_addr, node_addr.ipv6_addr, IPV6_ADDRESS_LEN);
upf_info->node_addr.ip_type = PDN_TYPE_IPV6;
}
upf_info->teid_range = teid_range;
upf_info->next = NULL;
ret = add_teidri_info(add_list_head, upf_info);
if (ret != 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to add node for cp ip : %u\n",
LOG_VALUE, node_addr);
return -1;
}
/* Remove node from list*/
delete_entry_from_list_for_teid_range(remove_list_head, teid_range);
}
if (filename != NULL) {
/* Open file for write data in append mode */
if ((teidri_fd = fopen(filename, "a")) == NULL) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"Failed to open file, "
"Error : %s \n", LOG_VALUE, strerror(errno));
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"File open : %s successfully \n", LOG_VALUE, filename);
/* Set fd at end of the file */
fseek(teidri_fd, 0L,SEEK_END);
/* Write into file in cvs format FORMAT :
* node_addr in decimal, teid_range , node_address in ipv4 format\n
*/
if (fprintf(teidri_fd, "%u ,"IPv6_FMT", %u, "IPV4_ADDR", \n",
teid_range, PRINT_IPV6_ADDR(node_addr.ipv6_addr),
node_addr.ipv4_addr,
IPV4_ADDR_HOST_FORMAT(ntohl(node_addr.ipv4_addr))) < 0) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"Failed to write into "
"file, Error : %s \n", LOG_VALUE, strerror(errno));
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"IPV6_ADDR : "IPv6_FMT",IPV4_ADDR : %u : teid range : %d \n",
LOG_VALUE,
PRINT_IPV6_ADDR(upf_info->node_addr.ipv6_addr),
upf_info->node_addr.ipv4_addr, upf_info->teid_range);
if (fclose(teidri_fd) != 0) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"Failed to close file, "
"Error: %s\n", LOG_VALUE , strerror(errno));
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"File close : %s successfully \n", LOG_VALUE, filename);
}
return 0;
}
/* Funtion to to delete all containt of file */
int
flush_inactive_teidri_data(char *filename, teidri_info **blocked_list_head, teidri_info **allocated_list_head,
teidri_info **free_list_head, uint8_t teidri_val){
teidri_info *temp = NULL;
if(*blocked_list_head == NULL){
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"No Inactive teidri records found \n",
LOG_VALUE);
}else{
/* Remove data of inactive peers from blocked list,
* and add it to free list
*/
/* Traverse till end of free list*/
temp = *free_list_head;
if(temp != NULL){
while(temp->next != NULL){
temp = temp->next;
}
/* Append blocked list to free list*/
temp->next = *blocked_list_head;
}else{
temp = *blocked_list_head;
}
/* Remove all contents from blocked list*/
*blocked_list_head = NULL;
}
/* Add data for active peers in file */
if ((teidri_fd = fopen(filename, "w")) == NULL ) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to open file [%s], Error : %s \n",
LOG_VALUE, filename, strerror(errno));
return -1;
}
/* Write into file TEIDRI value\n */
if (fprintf(teidri_fd, "TEIDRI , %u ,\n", teidri_val) < 0) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"Failed to write into "
"file, Error : %s \n", LOG_VALUE, strerror(errno));
return -1;
}
temp = *allocated_list_head;
while(temp != NULL){
/* Write into file in cvs format FORMAT :
* node_addr in decimal, teid_range , node_address in ipv4 format\n
*/
if (fprintf(teidri_fd, "%u ,"IPv6_FMT", %u,"IPV4_ADDR", \n",
temp->teid_range, PRINT_IPV6_ADDR(temp->node_addr.ipv6_addr),
temp->node_addr.ipv4_addr,
IPV4_ADDR_HOST_FORMAT(ntohl(temp->node_addr.ipv4_addr))) < 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to write into file, IPV6_ADDR : "IPv6_FMT", " \
IPV4_ADDR" : %u : teid range : %d \n", LOG_VALUE,
PRINT_IPV6_ADDR(temp->node_addr.ipv6_addr),
temp->node_addr.ipv4_addr, temp->teid_range);
//return -1;
}
temp = temp->next;
}
if (fclose(teidri_fd) != 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to close file, Error : %s\n", LOG_VALUE, strerror(errno));
return -1;
}
return 0;
}
/* Function to delete teidri node addr entry from file */
int
delete_teidri_node_entry(char *filename, node_address_t node_addr, teidri_info **head, teidri_info **free_list_head,
uint8_t teidri_val){
teidri_info *upf_info = NULL;
teidri_info *temp = NULL;
int ret = 0;
if (node_addr.ipv4_addr == 0 && !*node_addr.ipv6_addr) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"NODE Address is "
"NULL\n", LOG_VALUE);
return -1;
}
temp = get_teidri_info(head, node_addr);
if(temp == NULL){
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to find to be deleted for IP: %u\n", LOG_VALUE, node_addr);
return -1;
}
upf_info = malloc(sizeof(teidri_info));
if(upf_info == NULL){
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to add node for IP: %u\n",
LOG_VALUE, node_addr);
return -1;
}
if (node_addr.ip_type == PDN_TYPE_IPV4) {
upf_info->node_addr.ipv4_addr = node_addr.ipv4_addr;
upf_info->node_addr.ip_type = PDN_TYPE_IPV4;
} else if (node_addr.ip_type == PDN_TYPE_IPV6) {
memcpy(upf_info->node_addr.ipv6_addr, node_addr.ipv6_addr, IPV6_ADDRESS_LEN);
upf_info->node_addr.ip_type = PDN_TYPE_IPV6;
}
upf_info->teid_range = temp->teid_range;
upf_info->next = NULL;
ret = add_teidri_info(free_list_head, upf_info);
if(ret != 0){
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failed to add node for CP IP: %u\n",
LOG_VALUE, node_addr);
return -1;
}
/* Delete node entry from allocated list */
delete_entry_from_teidri_list_for_ip(node_addr, head);
if ((teidri_fd = fopen(filename, "w")) == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to open file, Error : %s \n", LOG_VALUE, strerror(errno));
return -1;
}
/* Write into file TEIDRI value\n */
if (fprintf(teidri_fd, "TEIDRI , %u ,\n", teidri_val) < 0) {
RTE_LOG(NOTICE, DP, LOG_FORMAT"Failed to write into "
"file, Error : %s \n", LOG_VALUE, strerror(errno));
return -1;
}
temp = *head;
while (temp != NULL) {
/* Write into file in cvs format FORMAT :
* node_addr in decimal, teid_range , node_address in ipv4 format\n
*/
if (fprintf(teidri_fd, "%u ,"IPv6_FMT", %u,"IPV4_ADDR", \n",
temp->teid_range, PRINT_IPV6_ADDR(temp->node_addr.ipv6_addr),
node_addr.ipv4_addr,
IPV4_ADDR_HOST_FORMAT(ntohl(temp->node_addr.ipv4_addr))) < 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to write into file, IPV6_ADDR : "IPv6_FMT", "
"IPV4_ADDR : %u : teid range : %d \n", LOG_VALUE,
PRINT_IPV6_ADDR(temp->node_addr.ipv6_addr),
temp->node_addr.ipv4_addr, temp->teid_range);
}
temp = temp->next;
}
if (fclose(teidri_fd) != 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to close file, Error : %s\n", LOG_VALUE, strerror(errno));
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Node entry removed from "
"list for node addr : %u \n", LOG_VALUE, node_addr);
return 0;
}
uint8_t compare_ip_address(node_address_t node, node_address_t addr) {
if (node.ip_type == PDN_TYPE_IPV4 && addr.ip_type == PDN_TYPE_IPV4) {
if (node.ipv4_addr == addr.ipv4_addr)
return true;
} else if (node.ip_type == PDN_TYPE_IPV6 && addr.ip_type == PDN_TYPE_IPV6) {
if (memcmp(node.ipv6_addr, addr.ipv6_addr, IPV6_ADDRESS_LEN) == 0)
return true;
}
return false;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/gx_app/src/gx_raa.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include "gx.h"
extern void hexDump(char *desc, void *address, int len);
/*
*
* Fun: gx_send_raa
*
* Desc:
*
* Ret:
*
* Notes: None
*
* File: gx_raa.c
*
*/
int gx_send_raa(void *data)
{
int ret = FD_REASON_OK;
//uint32_t buflen;
struct msg *ans = NULL;
#ifdef GX_DEBUG
printf("length is %d\n", *(uint32_t*)data );
hexDump("gx_raa", data, *(uint32_t*)data);
#endif
GxRAA *gx_raa = (GxRAA*)malloc(sizeof(*gx_raa)); /* allocate the RAA structure */
memset((void*)gx_raa, 0, sizeof(*gx_raa));
gx_raa_unpack((unsigned char *)data, gx_raa);
//gx_raa_calc_length (gx_raa);
//memcpy(&rqst_ptr, ((unsigned char *)data + buflen -1), sizeof(unsigned long));
memcpy(&ans, ((unsigned char *)data + *(uint32_t*)data), sizeof(ans));
// ans = (struct msg*)rqst_ptr;
/* construct the message */
FDCHECK_MSG_NEW_ANSWER_FROM_REQ(fd_g_config->cnf_dict, ans, ret, goto err);
//FDCHECK_MSG_NEW_APPL( gxDict.cmdRAA, gxDict.appGX, ans, ret, goto err);
FDCHECK_MSG_ADD_ORIGIN(ans, ret, goto err);
//if (gx_raa->presence.session_id)
// FDCHECK_MSG_ADD_AVP_OSTR(gxDict.avp_session_id, ans, MSG_BRW_LAST_CHILD,
// gx_raa->session_id.val, gx_raa->session_id.len, ret, goto err);
//FDCHECK_MSG_ADD_AVP_OSTR(gxDict.avp_destination_host, ans, MSG_BRW_LAST_CHILD, fd_g_config->cnf_diamid, fd_g_config->cnf_diamid_len, ret, goto err );
//FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_destination_host, ans, MSG_BRW_LAST_CHILD,
// "dstest4.test3gpp.net", strlen("dstest4.test3gpp.net"), ret, goto err );
//FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_destination_realm, ans, MSG_BRW_LAST_CHILD, fd_g_config->cnf_diamrlm, fd_g_config->cnf_diamrlm_len, ret, goto err );
FDCHECK_MSG_ADD_AVP_U32(gxDict.avp_result_code, ans, MSG_BRW_LAST_CHILD,
gx_raa->result_code, ret, goto err );
//FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_auth_application_id, ans, MSG_BRW_LAST_CHILD,
// gxDict.appGX, sizeof(gxDict.appGX), ret, goto err );
//TODO - FILL IN HERE
#if GX_DEBUG
FD_DUMP_MESSAGE(ans);
#endif
/* send the message */
FDCHECK_MSG_SEND( ans, NULL, NULL, ret, goto err );
goto fini;
err:
/* free the message since an error occurred */
FDCHECK_MSG_FREE(ans);
fini:
return ret;
}
/*
*
* Fun: gx_raa_cb
*
* Desc: CMDNAME call back
*
* Ret: 0
*
* File: gx_raa.c
*
The Re-Auth-Answer (RAA) command, indicated by
the Command-Code field set to 258 and the 'R'
bit cleared in the Command Flags field, is sent to/from MME or SGSN.
*
Re-Auth-Answer ::= <Diameter Header: 258, PXY, 16777238>
< Session-Id >
[ DRMP ]
{ Origin-Host }
{ Origin-Realm }
[ Result-Code ]
[ Experimental-Result ]
[ Origin-State-Id ]
[ OC-Supported-Features ]
[ OC-OLR ]
[ IP-CAN-Type ]
[ RAT-Type ]
[ AN-Trusted ]
* 2 [ AN-GW-Address ]
[ 3GPP-SGSN-MCC-MNC ]
[ 3GPP-SGSN-Address ]
[ 3GPP-SGSN-Ipv6-Address ]
[ RAI ]
[ 3GPP-User-Location-Info ]
[ User-Location-Info-Time ]
[ NetLoc-Access-Support ]
[ User-CSG-Information ]
[ 3GPP-MS-TimeZone ]
[ Default-QoS-Information ]
* [ Charging-Rule-Report ]
[ Error-Message ]
[ Error-Reporting-Host ]
[ Failed-AVP ]
* [ Proxy-Info ]
* [ AVP ]
*/
int gx_raa_cb
(
struct msg ** msg,
struct avp * pavp,
struct session * sess,
void * data,
enum disp_action * act
)
{
printf("===== RAA RECEIVED FOM PCEF======= \n");
//#if 1
//FD_DUMP_MESSAGE(ans);
//#endif
//
// /* retrieve the original query associated with the answer */
// CHECK_FCT(fd_msg_answ_getq (ans, &qry));
//
// /* allocate the raa message */
// raa = (GxRAA*)malloc(sizeof(*raa));
//
// memset((void*)raa, 0, sizeof(*raa));
//
// ret = gx_raa_parse(*msg, raa);
// if (ret != FD_REASON_OK)
// goto err;
//
// /*
// * TODO - Add request processing code
// */
//
// //gx_raa_free(raa);
// goto fini2;
//
//err:
// //gx_raa_free(raa);
// free(raa);
// goto fini2;
//
////fini1:
//
//fini2:
// FDCHECK_MSG_FREE(*msg);
// *msg = NULL;
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/gx_app/src/gx_rar.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include "gx.h"
#include "cp_app.h"
#include "ipc_api.h"
extern int g_gx_client_sock;
int unixsock();
/*TBD need to move this in freediameter generated code*/
/**
* @brief : Add element to freediameter message ready
* @param : [in] val - AVP value to be added
* @param : [in] obj - Disctionary object
* @param : [in/out] msg_buf
* @return : int Sucess or failure code
*/
int
add_fd_msg(union avp_value *val, struct dict_object * obj,
struct msg **msg_buf)
{
struct avp *avp_val = NULL;
CHECK_FCT_DO(fd_msg_avp_new(obj, 0, &avp_val), return -1);
CHECK_FCT_DO(fd_msg_avp_setvalue(avp_val, val), return -1);
CHECK_FCT_DO(fd_msg_avp_add(*msg_buf, MSG_BRW_LAST_CHILD, avp_val),
return -1);
return 0;
}
/*
*
* Fun: gx_send_rar
*
* Desc:
*
* Ret:
*
* Notes: None
*
* File: gx_rar.c
*
*/
int gx_send_rar(void *data)
{
int rval = FD_REASON_OK;
struct msg *msg = NULL;
#if 0
/* create new session id */
FDCHECK_FCT_2( aqfdCreateSessionId(ueCb) );
#endif
/* construct the message */
FDCHECK_MSG_NEW( gxDict.cmdRAR, msg, rval, goto err );
#if 0
FDCHECK_MSG_ADD_AVP_STR( gxDict.avp_session_id, msg, MSG_BRW_LAST_CHILD, ueCb->ueCtxt.ueHssCtxt.sessId );
#endif
FDCHECK_MSG_ADD_ORIGIN( msg, rval, goto err );
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_destination_host, msg, MSG_BRW_LAST_CHILD, fd_g_config->cnf_diamid, fd_g_config->cnf_diamid_len, rval, goto err );
FDCHECK_MSG_ADD_AVP_OSTR( gxDict.avp_destination_realm, msg, MSG_BRW_LAST_CHILD, fd_g_config->cnf_diamrlm, fd_g_config->cnf_diamrlm_len, rval, goto err );
#if 0
FDCHECK_MSG_ADD_AVP_STR( gxDict.avp_user_name, msg, MSG_BRW_LAST_CHILD, imsi );
#endif
//TODO - FILL IN HERE
#if 0
FD_DUMP_MESSAGE(msg);
#endif
/* send the message */
FDCHECK_MSG_SEND( msg, NULL, NULL, rval, goto err );
goto fini;
err:
/* free the message since an error occurred */
FDCHECK_MSG_FREE(msg);
fini:
return rval;
}
/*
*
* Fun: gx_rar_cb
*
* Desc: CMDNAME call back
*
* Ret: 0
*
* File: gx_rar.c
*
The Re-Auth-Request (RAR) command, indicated by
the Command-Code field set to 258 and the 'R'
bit set in the Command Flags field, is sent to/from MME or SGSN.
*
Re-Auth-Request ::= <Diameter Header: 258, REQ, PXY, 16777238>
< Session-Id >
[ DRMP ]
{ Auth-Application-Id }
{ Origin-Host }
{ Origin-Realm }
{ Destination-Realm }
{ Destination-Host }
{ Re-Auth-Request-Type }
[ Session-Release-Cause ]
[ Origin-State-Id ]
[ OC-Supported-Features ]
* [ Event-Trigger ]
[ Event-Report-Indication ]
* [ Charging-Rule-Remove ]
* [ Charging-Rule-Install ]
[ Default-EPS-Bearer-QoS ]
* [ QoS-Information ]
[ Default-QoS-Information ]
[ Revalidation-Time ]
* [ Usage-Monitoring-Information ]
[ PCSCF-Restoration-Indication ]
* 4 [ Conditional-Policy-Information ]
[ Removal-Of-Access ]
[ IP-CAN-Type ]
[ PRA-Install ]
[ PRA-Remove ]
* [ CSG-Information-Reporting ]
* [ Proxy-Info ]
* [ Route-Record ]
* [ AVP ]
*/
int gx_rar_cb
(
struct msg ** msg,
struct avp * pavp,
struct session * sess,
void * data,
enum disp_action * act
)
{
int ret = FD_REASON_OK;
struct msg *rqst = *msg;
//struct msg *ans = rqst;
uint8_t *send_buf = NULL;
gx_msg *gx_req = NULL;
uint32_t buflen = 0;
*msg = NULL;
#if 0
FD_DUMP_MESSAGE(rqst);
#endif
/* allocate the rar message */
gx_req = malloc( sizeof(gx_msg) );
if(gx_req == NULL)
printf("Memory Allocation fails for gx_req\n");
memset( gx_req, 0, (sizeof(gx_req)) );
gx_req->msg_type = GX_RAR_MSG;
ret = gx_rar_parse( rqst, &(gx_req->data.cp_rar) );
if (ret != FD_REASON_OK){
goto err;
}
/* Cal the length of buffer needed */
buflen = gx_rar_calc_length (&gx_req->data.cp_rar);
send_buf = malloc(GX_HEADER_LEN + buflen + sizeof(rqst));
if(send_buf == NULL)
printf("Memory Allocation fails for send_buf\n");
memset(send_buf, 0, (GX_HEADER_LEN + buflen + sizeof(rqst)));
gx_req->msg_len = buflen + GX_HEADER_LEN + sizeof(rqst);
/* encoding the rar header value to buffer */
memcpy( send_buf, &gx_req->msg_type, sizeof(gx_req->msg_type));
memcpy( send_buf + sizeof(gx_req->msg_type), &gx_req->msg_len,
sizeof(gx_req->msg_len));
if ( gx_rar_pack( &(gx_req->data.cp_rar),
(unsigned char *)(send_buf + GX_HEADER_LEN), buflen ) == 0 )
printf("RAR Packing failure \n");
memcpy((unsigned char *)(send_buf + GX_HEADER_LEN + buflen), &rqst, sizeof(rqst));
send_to_ipc_channel(g_gx_client_sock, send_buf, buflen + GX_HEADER_LEN + sizeof(rqst));
/* Free the memory sender buffer */
free(send_buf);
#if GX_DEBUG
FD_DUMP_MESSAGE(rqst);
#endif
#if 0
FDCHECK_MSG_NEW_ANSWER_FROM_REQ( fd_g_config->cnf_dict, ans, ret, goto err );
FDCHECK_MSG_ADD_ORIGIN( ans, ret, goto err );
FDCHECK_MSG_ADD_AVP_S32( gxDict.avp_result_code, ans, MSG_BRW_LAST_CHILD, 2001, ret, goto err );
bytes_recv = recv_from_ipc_channel(g_gx_client_sock, buf);
if(bytes_recv > 0){
resp = (gx_msg *)buf;
printf("session id [%s] ulBw [%d] dlBW[%d]\n",resp->data.cp_raa.session_id.val,
resp->data.cp_raa.default_qos_information.max_requested_bandwidth_ul,
resp->data.cp_raa.default_qos_information.max_requested_bandwidth_dl);
/*Updated session-id in ans */
fd_msg_search_avp(ans, gxDict.avp_session_id, &avp_ptr);
if (NULL != avp_ptr) {
val.os.data = resp->data.cp_raa.session_id.val;
val.os.len = resp->data.cp_raa.session_id.len;
CHECK_FCT_DO(fd_msg_avp_setvalue(avp_ptr, &val), ret = FD_REASON_AVP_SETVALUE_FAIL; goto err);
}
/* Adding Qos params */
CHECK_FCT_DO(fd_msg_avp_new(gxDict.avp_default_qos_information, 0, &avp_ptr), return -1);
CHECK_FCT_DO(fd_msg_avp_add(ans, MSG_BRW_LAST_CHILD, avp_ptr), return -1);
val.u32 = resp->data.cp_raa.default_qos_information.max_requested_bandwidth_ul;
val.os.len = sizeof(resp->data.cp_raa.default_qos_information.max_requested_bandwidth_ul);
add_fd_msg(&val,gxDict.avp_max_requested_bandwidth_ul ,(struct msg**)&avp_ptr);
val.u32 = resp->data.cp_raa.default_qos_information.max_requested_bandwidth_dl;
val.os.len = sizeof(resp->data.cp_raa.default_qos_information.max_requested_bandwidth_dl);
add_fd_msg(&val,gxDict.avp_max_requested_bandwidth_dl ,(struct msg**)&avp_ptr);
val.i32 = resp->data.cp_raa.default_qos_information.qos_class_identifier;
val.os.len = sizeof(resp->data.cp_raa.default_qos_information);
add_fd_msg(&val, gxDict.avp_qos_class_identifier ,(struct msg**)&avp_ptr);
}
#endif
#if 0
FD_DUMP_MESSAGE(ans);
#endif
//FDCHECK_MSG_SEND( ans, NULL, NULL, ret, goto err );
goto fini1;
err:
printf("Error (%d) while processing RAR\n", ret);
free(gx_req);
goto fini2;
fini1:
fini2:
gx_rar_free(&(gx_req->data.cp_rar));
return ret;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/ipc_api.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef IPC_API_H
#define IPC_API_H
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <unistd.h>
#include <signal.h>
#include <sys/wait.h>
#include <sys/stat.h>
/**
* @brief : Performs Gx Interface Unix socket creation
* @param : No param
* @return : Returns 0 in case of success , -1 otherwise
*/
int
create_ipc_channel(void );
/**
* @brief : Performs Gx Socket Bind
* @param : sock, GX socket id
* @param : sock_addr, socket address info
* @param : path, Filepath
* @return : Returns nothing
*/
void
bind_ipc_channel(int sock, struct sockaddr_un sock_addr,const char *path);
/**
* @brief : Performs Gx_app client connection
* @param : sock, GX socket id
* @param : sock_addr, socket address info
* @param : path, Filepath
* @return : Returns 0 in case of success, -1 otherwise
*/
int
connect_to_ipc_channel(int sock, struct sockaddr_un sock_addr, const char *path);
/**
* @brief : Performs Socket connection accept function
* @param : sock, socket id
* @param : sock_addr, socket address info
* @return : Returns 0 in case of success , -1 otherwise
*/
int
accept_from_ipc_channel(int sock, struct sockaddr_un sock_addr);
/**
* @brief : Enables Unix Server waiting for Gx_app client connection
* @param : sock, socket id
* @return : Returns non-negative number that is File Descriptor , -1 otherwise
*/
void
listen_ipc_channel(int sock);
/**
* @brief : Retrive peer node name
* @param : sock, socket id
* @param : sock_addr, socket address info
* @return : Returns nothing
*/
void
get_peer_name(int sock, struct sockaddr_un sock_addr);
/**
* @brief : Accept data from created ipc channel
* @param : sock, socket id
* @param : buf, buffer to store incoming data
* @return : Returns number of bytes received in case of success , -1 otherwise
*/
int
recv_from_ipc_channel(int sock, char *buf);
/**
* @brief : Send data to created ipc channel
* @param : sock, socket id
* @param : buf, buffer to store data to be sent
* @param : len, total data length
* @return : number of bytes sent in case of success, -1 otherwise
*/
int
send_to_ipc_channel(int sock, uint8_t *buf, int len);
/**
* @brief : Close ipc channel
* @param : sock, socket id
* @return : Returns nothing
*/
void
close_ipc_channel(int sock);
#endif /* IPC_API_H*/
|
nikhilc149/e-utran-features-bug-fixes | cp/state_machine/sm_enum.h | <filename>cp/state_machine/sm_enum.h
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SM_ENUM_H
#define SM_ENUM_H
#include <stdio.h>
#include <stdint.h>
typedef enum {
NONE_PROC,
INITIAL_PDN_ATTACH_PROC,
SERVICE_REQUEST_PROC,
SGW_RELOCATION_PROC,
CONN_SUSPEND_PROC,
DETACH_PROC,
DED_BER_ACTIVATION_PROC,
PDN_GW_INIT_BEARER_DEACTIVATION,
MME_INI_DEDICATED_BEARER_DEACTIVATION_PROC,
UPDATE_BEARER_PROC,
RESTORATION_RECOVERY_PROC,
MODIFY_BEARER_PROCEDURE,
ATTACH_DEDICATED_PROC,
MODIFY_ACCESS_BEARER_PROC,
CHANGE_NOTIFICATION_PROC,
UPDATE_PDN_CONNECTION_PROC,
UE_REQ_BER_RSRC_MOD_PROC,
HSS_INITIATED_SUB_QOS_MOD,
S1_HANDOVER_PROC,
CREATE_INDIRECT_TUNNEL_PROC,
DELETE_INDIRECT_TUNNEL_PROC,
END_PROC
}sm_proc;
/* VS: Defined different states of the STATE Machine */
typedef enum {
SGWC_NONE_STATE,
PFCP_ASSOC_REQ_SNT_STATE,
PFCP_ASSOC_RESP_RCVD_STATE,
PFCP_SESS_EST_REQ_SNT_STATE,
PFCP_SESS_EST_RESP_RCVD_STATE,
CONNECTED_STATE,
IDEL_STATE,
CS_REQ_SNT_STATE,
CS_RESP_RCVD_STATE,
PFCP_SESS_MOD_REQ_SNT_STATE,
PFCP_SESS_MOD_RESP_RCVD_STATE,
PFCP_SESS_DEL_REQ_SNT_STATE,
PFCP_SESS_DEL_RESP_RCVD_STATE,
DS_REQ_SNT_STATE,
DS_RESP_RCVD_STATE,
DDN_REQ_SNT_STATE,
DDN_ACK_RCVD_STATE,
MBR_REQ_SNT_STATE,
MBR_RESP_RCVD_STATE,
CREATE_BER_REQ_SNT_STATE,
RE_AUTH_ANS_SNT_STATE,
PGWC_NONE_STATE,
CCR_SNT_STATE,
CREATE_BER_RESP_SNT_STATE,
PFCP_PFD_MGMT_RESP_RCVD_STATE,
ERROR_OCCURED_STATE,
UPDATE_BEARER_REQ_SNT_STATE,
UPDATE_BEARER_RESP_SNT_STATE,
DELETE_BER_REQ_SNT_STATE,
CCRU_SNT_STATE,
PGW_RSTRT_NOTIF_REQ_SNT_STATE,
UPD_PDN_CONN_SET_REQ_SNT_STATE,
DEL_PDN_CONN_SET_REQ_SNT_STATE,
DEL_PDN_CONN_SET_REQ_RCVD_STATE,
PFCP_SESS_SET_DEL_REQ_SNT_STATE,
PFCP_SESS_SET_DEL_REQ_RCVD_STATE,
DNS_SENT_STATE,
PROVISION_ACK_SNT_STATE,
END_STATE
}sm_state;
/* VS: Register different types of events */
typedef enum {
NONE_EVNT,
CS_REQ_RCVD_EVNT,
PFCP_ASSOC_SETUP_SNT_EVNT,
PFCP_ASSOC_SETUP_RESP_RCVD_EVNT,
PFCP_SESS_EST_REQ_RCVD_EVNT,
PFCP_SESS_EST_RESP_RCVD_EVNT,
CS_RESP_RCVD_EVNT,
MB_REQ_RCVD_EVNT,
PFCP_SESS_MOD_REQ_RCVD_EVNT,
PFCP_SESS_MOD_RESP_RCVD_EVNT,
MB_RESP_RCVD_EVNT,
REL_ACC_BER_REQ_RCVD_EVNT,
DS_REQ_RCVD_EVNT,
PFCP_SESS_DEL_REQ_RCVD_EVNT,
PFCP_SESS_DEL_RESP_RCVD_EVNT,
DS_RESP_RCVD_EVNT,
ECHO_REQ_RCVD_EVNT,
ECHO_RESP_RCVD_EVNT,
DDN_ACK_RESP_RCVD_EVNT,
PFCP_SESS_RPT_REQ_RCVD_EVNT,
RE_AUTH_REQ_RCVD_EVNT,
CREATE_BER_RESP_RCVD_EVNT,
CCA_RCVD_EVNT,
CREATE_BER_REQ_RCVD_EVNT,
PFCP_PFD_MGMT_RESP_RCVD_EVNT,
ERROR_OCCURED_EVNT,
UPDATE_BEARER_REQ_RCVD_EVNT,
UPDATE_BEARER_RSP_RCVD_EVNT,
DELETE_BER_REQ_RCVD_EVNT,
DELETE_BER_RESP_RCVD_EVNT,
DELETE_BER_CMD_RCVD_EVNT,
CCAU_RCVD_EVNT,
PGW_RSTRT_NOTIF_ACK_RCVD_EVNT,
UPD_PDN_CONN_SET_REQ_RCVD_EVNT,
UPD_PDN_CONN_SET_RESP_RCVD_EVNT,
DEL_PDN_CONN_SET_REQ_RCVD_EVNT,
DEL_PDN_CONN_SET_RESP_RCVD_EVNT,
PFCP_SESS_SET_DEL_REQ_RCVD_EVNT,
PFCP_SESS_SET_DEL_RESP_RCVD_EVNT,
CHANGE_NOTIFICATION_REQ_RCVD_EVNT,
CHANGE_NOTIFICATION_RSP_RCVD_EVNT,
BEARER_RSRC_CMD_RCVD_EVNT,
MODIFY_BEARER_CMD_RCVD_EVNT,
CREATE_INDIR_DATA_FRWRD_TUN_REQ_RCVD_EVNT,
DELETE_INDIR_DATA_FRWD_TUN_REQ_RCVD_EVNT,
MAB_REQ_RCVD_EVNT,
DDN_FAILURE_INDIC_EVNT,
END_EVNT
}sm_event;
#endif
|
nikhilc149/e-utran-features-bug-fixes | cp/gtpv2c_messages/downlink_data_notification.c | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ue.h"
#include "pfcp.h"
#include "cp_stats.h"
#include "sm_struct.h"
#include "pfcp_util.h"
#include "debug_str.h"
#include "dp_ipc_api.h"
#include "gtpv2c_set_ie.h"
#include "pfcp_association.h"
#include "pfcp_messages_encoder.h"
#include "../cp_dp_api/vepc_cp_dp_api.h"
#include"cp_config.h"
#include "cp_timer.h"
extern int pfcp_fd;
extern int pfcp_fd_v6;
extern struct cp_stats_t cp_stats;
extern pfcp_config_t config;
extern int clSystemLog;
/**
* @brief : callback to handle downlink data notification messages from the
* data plane
* @param : msg_payload
* message payload received by control plane from the data plane
* @return : 0 inicates success, error otherwise
*/
int
cb_ddn(struct msgbuf *msg_payload)
{
int ret = ddn_by_session_id(msg_payload->msg_union.sess_entry.sess_id, NULL);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error on DDN Handling %s: (%d) %s\n", LOG_VALUE,
gtp_type_str(ret), ret,
(ret < 0 ? strerror(-ret) : cause_str(ret)));
}
return ret;
}
int
ddn_by_session_id(uint64_t session_id, pdr_ids *pfcp_pdr_id )
{
uint8_t tx_buf[MAX_GTPV2C_UDP_LEN] = { 0 };
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *) tx_buf;
uint32_t sgw_s11_gtpc_teid = UE_SESS_ID(session_id);
ue_context *context = NULL;
pdr_ids *pfcp_pdr = NULL;
uint32_t sequence = 0;
int ebi = 0;
int ebi_index = 0;
int ret = 0;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"sgw_s11_gtpc_teid:%u\n",
LOG_VALUE, sgw_s11_gtpc_teid);
ret = rte_hash_lookup_data(buffered_ddn_req_hash,
(const void *) &session_id,
(void **) &pfcp_pdr);
if(ret < 0){
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &sgw_s11_gtpc_teid,
(void **) &context);
if (ret < 0 || !context)
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
sequence = generate_seq_number();
ret = create_downlink_data_notification(context,
UE_BEAR_ID(session_id),
sequence,
gtpv2c_tx, pfcp_pdr_id);
if (ret)
return ret;
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
uint16_t payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, SENT);
ebi = UE_BEAR_ID(session_id);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI Index\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
add_gtpv2c_if_timer_entry(sgw_s11_gtpc_teid, &s11_mme_sockaddr, tx_buf,
payload_length, ebi_index, S11_IFACE,
context->cp_mode);
++cp_stats.ddn;
/* Allocate memory*/
pfcp_pdr = rte_zmalloc_socket(NULL, sizeof(thrtle_count),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(pfcp_pdr == NULL ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for pfcp_pdr_id structure, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return -1;
}
if(pfcp_pdr_id != NULL) {
memcpy(pfcp_pdr, pfcp_pdr_id, sizeof(pdr_ids));
if(pfcp_pdr_id->ddn_buffered_count == 0)
pfcp_pdr->ddn_buffered_count = 0;
}
/*Add session ids and pdr ids into buffered ddn request hash */
ret = rte_hash_add_key_data(buffered_ddn_req_hash,
(const void *)&session_id, pfcp_pdr);
if(ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Unable to add entry in buffered ddn request hash\n",
LOG_VALUE);
rte_free(pfcp_pdr);
pfcp_pdr = NULL;
return -1;
}
} else {
pfcp_pdr->ddn_buffered_count += 1;
}
return 0;
}
int
create_downlink_data_notification(ue_context *context, uint8_t eps_bearer_id,
uint32_t sequence, gtpv2c_header_t *gtpv2c_tx, pdr_ids *pdr)
{
uint8_t i = 1;
uint8_t j = 0;
pdn_connection *pdn = NULL;
dnlnk_data_notif_t dnl_data_notify = {0};
set_gtpv2c_teid_header((gtpv2c_header_t *)&dnl_data_notify, GTP_DOWNLINK_DATA_NOTIFICATION,
context->s11_mme_gtpc_teid, sequence, 0);
int ebi_index = GET_EBI_INDEX(eps_bearer_id);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
eps_bearer *bearer = context->eps_bearers[ebi_index];
if (bearer == NULL)
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
pdn = bearer->pdn;
if(pdr == NULL){
set_ebi(&dnl_data_notify.eps_bearer_id, IE_INSTANCE_ZERO, bearer->eps_bearer_id);
set_ar_priority(&dnl_data_notify.alloc_reten_priority, IE_INSTANCE_ZERO, bearer);
}else {
for(uint8_t itr = 0; itr <MAX_BEARERS; itr++){
if(pdn->eps_bearers[itr] != NULL){
bearer = pdn->eps_bearers[itr];
if(bearer->pdrs[i]->rule_id == pdr->pdr_id[j]){
set_ebi(&dnl_data_notify.eps_bearer_id, IE_INSTANCE_ZERO, bearer->eps_bearer_id);
set_ar_priority(&dnl_data_notify.alloc_reten_priority, IE_INSTANCE_ZERO, bearer);
j++;
pdr->pdr_count--;
}
}
}
}
uint16_t msg_len = 0;
msg_len = encode_dnlnk_data_notif(&dnl_data_notify, (uint8_t *)gtpv2c_tx);
gtpv2c_tx->gtpc.message_len = htons(msg_len - IE_HEADER_SIZE);
return 0;
}
void
fill_send_pfcp_sess_report_resp(ue_context *context, uint8_t sequence,
pdn_connection *pdn, uint16_t dl_buf_sugg_pkt_cnt, bool dldr_flag)
{
int encoded = 0, ret = 0;
pfcp_sess_rpt_rsp_t pfcp_sess_rep_resp = {0};
/*Fill and send pfcp session report response. */
fill_pfcp_sess_report_resp(&pfcp_sess_rep_resp,
sequence, context->cp_mode);
if (dldr_flag) {
/* Send Default DL Buffering Suggested Packet Count */
if (NOT_PRESENT == pdn->is_default_dl_sugg_pkt_cnt_sent) {
pdn->is_default_dl_sugg_pkt_cnt_sent = PRESENT;
dl_buf_sugg_pkt_cnt = config.dl_buf_suggested_pkt_cnt;
}
/* Send Update BAR IE */
if ((NOT_PRESENT != dl_buf_sugg_pkt_cnt) && (pdn != NULL)) {
pdn->bar.dl_buf_suggstd_pckts_cnt.pckt_cnt_val = dl_buf_sugg_pkt_cnt;
set_update_bar_sess_rpt_rsp(&(pfcp_sess_rep_resp.update_bar), &pdn->bar);
}
}
pfcp_sess_rep_resp.header.seid_seqno.has_seid.seid = pdn->dp_seid;
uint8_t pfcp_msg[PFCP_MSG_LEN] = {0};
encoded = encode_pfcp_sess_rpt_rsp_t(&pfcp_sess_rep_resp, pfcp_msg);
pfcp_header_t *pfcp_hdr = (pfcp_header_t *) pfcp_msg;
pfcp_hdr->message_len = htons(encoded - PFCP_IE_HDR_SIZE);
/* UPF ip address */
ret = set_dest_address(pdn->upf_ip, &upf_pfcp_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr,ACC) < 0 ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in REPORT REPONSE "
"message: %i\n", LOG_VALUE, errno);
return;
}
}
pdr_ids *
delete_buff_ddn_req(uint64_t sess_id)
{
int ret = 0;
pdr_ids *pfcp_pdr_id = NULL;
ret = rte_hash_lookup_data(buffered_ddn_req_hash,
(const void *) &sess_id,
(void **) &pfcp_pdr_id);
if(ret >= 0){
ret = rte_hash_del_key(buffered_ddn_req_hash, (const void *)&sess_id);
if(ret < 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to delete Entry"
"from buffered ddn request hash\n", LOG_VALUE);
return pfcp_pdr_id;
}
}else{
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"No session entry buffered"
"\n", LOG_VALUE);
return NULL;
}
return pfcp_pdr_id;
}
int
process_ddn_ack(dnlnk_data_notif_ack_t *ddn_ack)
{
int ebi_index = 0;
int dl_delay_value = 0;
uint16_t dl_buf_sugg_pkt_cnt = 0;
int delay_value = 0;
struct resp_info *resp = NULL;
pdn_connection *pdn = NULL;
ue_context *context = NULL;
pdr_ids *pfcp_pdr_id = NULL;
if (get_ue_context(ddn_ack->header.teid.has_teid.teid, &context) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error:ue context not found\n", LOG_VALUE);
return -1;
}
/* Lookup entry in hash table on the basis of session id*/
for (uint32_t idx=0; idx <MAX_BEARERS; idx++) {
pdn = context->pdns[idx];
if(pdn != NULL) {
ebi_index = GET_EBI_INDEX(pdn->default_bearer_id);
break;
}
}
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
/* Remove session Entry from buffered ddn request hash */
pfcp_pdr_id = delete_buff_ddn_req(pdn->seid);
if(pfcp_pdr_id != NULL) {
if(pfcp_pdr_id->ddn_buffered_count > 0) {
pfcp_pdr_id->ddn_buffered_count -= 1;
ddn_by_session_id(pdn->seid, pfcp_pdr_id);
}
rte_free(pfcp_pdr_id);
pfcp_pdr_id = NULL;
}
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return -1;
}
/* Update the session state */
resp->msg_type = GTP_DOWNLINK_DATA_NOTIFICATION_ACK;
resp->state = IDEL_STATE;
/* Delete the timer entry for UE Level timer if already present */
ddn_ack->data_notif_delay.delay_value +=
delete_ddn_timer_entry(timer_by_teid_hash, ddn_ack->header.teid.has_teid.teid, ddn_by_seid_hash);
if(ddn_ack->data_notif_delay.header.len){
if(ddn_ack->data_notif_delay.delay_value > 0){
/* Start UE Level Timer with the assgined delay*/
start_ddn_timer_entry(timer_by_teid_hash, pdn->seid,
(ddn_ack->data_notif_delay.delay_value * 50),
ddn_timer_callback);
}
}
/* Set dl buffering timer */
if(ddn_ack->dl_buffering_dur.timer_value){
/* REVIEW: Extend the timer if timer entry is present. */
/* Delete the timer entry for UE level timer if already present */
delete_ddn_timer_entry(dl_timer_by_teid_hash,
ddn_ack->header.teid.has_teid.teid,
pfcp_rep_by_seid_hash);
dl_delay_value = ddn_ack->dl_buffering_dur.timer_value;
/* Depending upon timer uint value DL Buffering Duration Timer value needs to be multiplied */
if(ddn_ack->dl_buffering_dur.timer_unit == ZERO){
dl_delay_value = dl_delay_value * TWOSEC;
} else if(ddn_ack->dl_buffering_dur.timer_unit == ONE){
dl_delay_value = dl_delay_value * ONEMINUTE;
} else if(ddn_ack->dl_buffering_dur.timer_unit == TWO){
dl_delay_value = dl_delay_value * TENMINUTE;
} else if(ddn_ack->dl_buffering_dur.timer_unit == THREE){
dl_delay_value = dl_delay_value * ONEHOUR;
} else if(ddn_ack->dl_buffering_dur.timer_unit == FOUR){
dl_delay_value = dl_delay_value * TENHOUR;
} else if(ddn_ack->dl_buffering_dur.timer_unit == SEVEN){
dl_delay_value = dl_delay_value * ONEMINUTE;
} else {
/* Here the value zero is for infinity*/
dl_delay_value = ddn_ack->dl_buffering_dur.timer_value * 0;
}
/* Starts the timer to buffer the pfcp_session_report_request */
start_ddn_timer_entry(dl_timer_by_teid_hash, pdn->seid, dl_delay_value, dl_buffer_timer_callback);
/* Set suggested buffered packet count*/
if(ddn_ack->dl_buffering_suggested_pckt_cnt.header.len){
dl_buf_sugg_pkt_cnt = ddn_ack->dl_buffering_suggested_pckt_cnt.int_nbr_val;
} else {
dl_buf_sugg_pkt_cnt = config.dl_buf_suggested_pkt_cnt;
}
}
/* Set throttling factor timer */
if(ddn_ack->dl_low_priority_traffic_thrtlng.header.len){
/* Delete the timer entry if already present */
set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_delay_val +=
delete_thrtle_timer(&context->s11_mme_gtpc_ip);
delay_value = ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_delay_val;
/* Depending upon timer uint value throttling timer value needs to be multiplied */
if(ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_delay_unit == ZERO){
delay_value = delay_value * TWOSEC;
} else if(ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_delay_unit == ONE){
delay_value = delay_value * ONEMINUTE;
} else if(ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_delay_unit == TWO){
delay_value = delay_value * TENMINUTE;
} else if(ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_delay_unit == THREE){
delay_value = delay_value * ONEHOUR;
} else if(ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_delay_unit == FOUR){
delay_value = delay_value * TENHOUR;
} else if(ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_delay_unit == SEVEN){
/* Here the value zero is used to indicated timer deactivation */
delay_value = delay_value * 0;
} else {
delay_value = delay_value * ONEMINUTE;
}
/*spec 29.274, 8.85.1 Throttling information element */
if((ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_factor) > 100){
ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_factor = 0;
}
if(delay_value != 0){
/* Start timer for throttling and also save throttling factor */
start_throttle_timer(&context->s11_mme_gtpc_ip, delay_value,
ddn_ack->dl_low_priority_traffic_thrtlng.thrtlng_factor);
}
}
pdn->state = IDEL_STATE;
if((context->pfcp_rept_resp_sent_flag == 0) || dl_buf_sugg_pkt_cnt ){
fill_send_pfcp_sess_report_resp(context, resp->pfcp_seq, pdn, dl_buf_sugg_pkt_cnt, TRUE);
}
return 0;
}
int
send_pfcp_sess_mod_with_drop(ue_context *context)
{
pdn_connection *pdn = NULL;
eps_bearer *bearer = NULL;
pdr_ids *pfcp_pdr_id = NULL;
uint32_t seq = 0;
int ret = 0;
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
node_address_t node_value = {0};
for(uint8_t itr_pdn = 0; itr_pdn < MAX_BEARERS; itr_pdn++){
if(context->pdns[itr_pdn] != NULL) {
pdn = context->pdns[itr_pdn];
for(int itr_bearer = 0 ; itr_bearer < MAX_BEARERS; itr_bearer++) {
bearer = pdn->eps_bearers[itr_bearer];
if(bearer) {
for(uint8_t itr_pdr = 0; itr_pdr < bearer->pdr_count; itr_pdr++) {
if(bearer->pdrs[itr_pdr] != NULL) {
if(bearer->pdrs[itr_pdr]->pdi.src_intfc.interface_value
== SOURCE_INTERFACE_VALUE_CORE) {
bearer->pdrs[itr_pdr]->far.actions.forw = FALSE;
bearer->pdrs[itr_pdr]->far.actions.dupl = FALSE;
bearer->pdrs[itr_pdr]->far.actions.nocp = FALSE;
bearer->pdrs[itr_pdr]->far.actions.buff = FALSE;
bearer->pdrs[itr_pdr]->far.actions.drop = TRUE;
set_update_far(
&(pfcp_sess_mod_req.update_far[pfcp_sess_mod_req.update_far_count]),
&bearer->pdrs[itr_pdr]->far);
uint16_t len = 0;
len += set_upd_forwarding_param(&(pfcp_sess_mod_req.update_far
[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms),
bearer->s1u_enb_gtpu_ip);
len += UPD_PARAM_HEADER_SIZE;
pfcp_sess_mod_req.update_far
[pfcp_sess_mod_req.update_far_count].header.len += len;
pfcp_sess_mod_req.update_far[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.outer_hdr_creation.teid =
bearer->s1u_enb_gtpu_teid;
ret = set_node_address(&pfcp_sess_mod_req.update_far[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.outer_hdr_creation.ipv4_address,
pfcp_sess_mod_req.update_far[pfcp_sess_mod_req.\
update_far_count].upd_frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s1u_enb_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
pfcp_sess_mod_req.update_far[pfcp_sess_mod_req.update_far_count].\
upd_frwdng_parms.dst_intfc.interface_value =
GTPV2C_IFTYPE_S1U_ENODEB_GTPU;
pfcp_sess_mod_req.update_far_count++;
}
}
}
}
}
}
}
set_pfcpsmreqflags(&(pfcp_sess_mod_req.pfcpsmreq_flags));
pfcp_sess_mod_req.pfcpsmreq_flags.drobu = TRUE;
/*Filling Node ID for F-SEID*/
if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(config.pfcp_ip.s_addr, temp, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV6) {
ret = fill_ip_addr(0, config.pfcp_ip_v6.s6_addr, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
/* Remove session Entry from buffered ddn request hash */
pfcp_pdr_id = delete_buff_ddn_req(pdn->seid);
if(pfcp_pdr_id != NULL) {
rte_free(pfcp_pdr_id);
pfcp_pdr_id = NULL;
}
set_fseid(&(pfcp_sess_mod_req.cp_fseid), pdn->seid, node_value);
seq = get_pfcp_sequence_number(PFCP_SESSION_MODIFICATION_REQUEST, seq);
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_mod_req.header), PFCP_SESSION_MODIFICATION_REQUEST,
HAS_SEID, seq, context->cp_mode);
pfcp_sess_mod_req.header.seid_seqno.has_seid.seid = pdn->dp_seid;
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
pfcp_header_t *header = (pfcp_header_t *) pfcp_msg;
header->message_len = htons(encoded - PFCP_IE_HDR_SIZE);
if(pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr, SENT) < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to send"
"PFCP Session Modification Request %i\n", LOG_VALUE, errno);
return -1;
}
return 0;
}
int
process_ddn_failure(dnlnk_data_notif_fail_indctn_t *ddn_fail_ind)
{
ue_context *context = NULL;
int ret = 0;
if(ddn_fail_ind->header.teid.has_teid.teid != 0){
if (get_ue_context(ddn_fail_ind->header.teid.has_teid.teid, &context) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error:ue context not found\n", LOG_VALUE);
return -1;
}
} else {
if(ddn_fail_ind->imsi.header.len){
ret = rte_hash_lookup_data(ue_context_by_imsi_hash,
&ddn_fail_ind->imsi.imsi_number_digits,
(void **) &context);
if(ret < 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE Context"
"for imsi: %ld\n",LOG_VALUE, ddn_fail_ind->imsi.imsi_number_digits);
return -1;
}
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"There is no teid and no imsi present \n",
LOG_VALUE);
return -1;
}
}
ret = send_pfcp_sess_mod_with_drop(context);
if (ret){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: while processing"
" pfcp session modification request\n", LOG_VALUE);
}
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/state_machine/sm_hand.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "pfcp.h"
#include "cp_app.h"
#include "sm_enum.h"
#include "sm_hand.h"
#include "cp_stats.h"
#include "pfcp_util.h"
#include "debug_str.h"
#include "sm_struct.h"
#include "ipc_api.h"
#include "pfcp_set_ie.h"
#include "pfcp_session.h"
#include "pfcp_association.h"
#include "gtpv2c_error_rsp.h"
#include "gtpc_session.h"
#include "cp_timer.h"
#include "cp_config.h"
#include "gw_adapter.h"
#include "cdr.h"
#include "teid.h"
#include "cp.h"
#ifdef USE_REST
#include "main.h"
#endif
int ret = 0;
extern pfcp_config_t config;
extern int clSystemLog;
extern int s5s8_fd;
extern int s5s8_fd_v6;
extern socklen_t s5s8_sockaddr_len;
extern socklen_t s5s8_sockaddr_ipv6_len;
extern socklen_t s11_mme_sockaddr_len;
extern socklen_t s11_mme_sockaddr_ipv6_len;
extern peer_addr_t s5s8_recv_sockaddr;
extern struct rte_hash *bearer_by_fteid_hash;
extern struct cp_stats_t cp_stats;
extern int gx_app_sock;
int
gx_setup_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
ret = process_create_sess_req(&msg->gtpc_msg.csr,
&context, msg->upf_ip, msg->cp_mode);
if (ret != 0 && ret != GTPC_RE_TRANSMITTED_REQ) {
if (ret == GTPC_CONTEXT_REPLACEMENT) {
/* return success value for context replacement case */
return 0;
}
if (ret != -1){
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(data, unused_param);
}
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Create Session Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
if(ret == GTPC_RE_TRANSMITTED_REQ ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Discarding Retransmitted "
"CSR Request\n", LOG_VALUE);
return ret;
}
if (PGWC == context->cp_mode) {
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
int ebi_index = GET_EBI_INDEX(msg->gtpc_msg.csr.bearer_contexts_to_be_created[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
cs_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return -1;
}
process_msg_for_li(context, S5S8_C_INTFC_IN, msg,
fill_ip_info(s5s8_recv_sockaddr.type,
pdn->s5s8_sgw_gtpc_ip.ipv4_addr,
pdn->s5s8_sgw_gtpc_ip.ipv6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
pdn->s5s8_sgw_gtpc_teid, config.s5s8_port);
} else {
process_msg_for_li(context, S11_INTFC_IN, msg,
fill_ip_info(s11_mme_sockaddr.type,
context->s11_mme_gtpc_ip.ipv4_addr,
context->s11_mme_gtpc_ip.ipv6_addr),
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)),
config.s11_port);
}
RTE_SET_USED(unused_param);
return ret;
}
int
association_setup_handler(void *data, void *unused_param)
{
int ebi_index = 0;
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
uint8_t cp_mode = 0;
/* Populate the UE context, PDN and Bearer information */
ret = process_create_sess_req(&msg->gtpc_msg.csr,
&context, msg->upf_ip, msg->cp_mode);
if (ret) {
if(ret != -1) {
if (ret == GTPC_CONTEXT_REPLACEMENT)
return 0;
if(ret == GTPC_RE_TRANSMITTED_REQ)
return ret;
if(context == NULL)
cp_mode = msg->cp_mode;
else
cp_mode = context->cp_mode;
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(data, unused_param);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Create Session Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
}
return -1;
}
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.csr.bearer_contexts_to_be_created[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
msg->cp_mode = context->cp_mode;
cs_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return -1;
}
if (PRESENT == context->dupl) {
if (PGWC == context->cp_mode) {
process_msg_for_li(context, S5S8_C_INTFC_IN, msg,
fill_ip_info(s5s8_recv_sockaddr.type,
pdn->s5s8_sgw_gtpc_ip.ipv4_addr,
pdn->s5s8_sgw_gtpc_ip.ipv6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
pdn->s5s8_sgw_gtpc_teid, config.s5s8_port);
} else {
process_msg_for_li(context, S11_INTFC_IN, msg,
fill_ip_info(s11_mme_sockaddr.type,
context->s11_mme_gtpc_ip.ipv4_addr,
context->s11_mme_gtpc_ip.ipv6_addr),
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)),
config.s11_port);
}
}
if (pdn->upf_ip.ip_type == 0) {
if (config.use_dns) {
push_dns_query(pdn);
return 0;
} else {
if ((config.pfcp_ip_type == PDN_TYPE_IPV6
|| config.pfcp_ip_type == PDN_TYPE_IPV4_IPV6)
&& (config.upf_pfcp_ip_type == PDN_TYPE_IPV6
|| config.upf_pfcp_ip_type == PDN_TYPE_IPV4_IPV6)) {
memcpy(pdn->upf_ip.ipv6_addr, config.upf_pfcp_ip_v6.s6_addr, IPV6_ADDRESS_LEN);
pdn->upf_ip.ip_type = PDN_TYPE_IPV6;
} else if ((config.pfcp_ip_type == PDN_TYPE_IPV4
|| config.pfcp_ip_type == PDN_TYPE_IPV4_IPV6)
&& (config.upf_pfcp_ip_type == PDN_TYPE_IPV4
|| config.upf_pfcp_ip_type == PDN_TYPE_IPV4_IPV6)) {
pdn->upf_ip.ipv4_addr = config.upf_pfcp_ip.s_addr;
pdn->upf_ip.ip_type = PDN_TYPE_IPV4;
}
}
}
if (!context->promotion_flag) {
process_pfcp_sess_setup(pdn);
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_assoc_resp_handler(void *data, void *addr)
{
msg_info *msg = (msg_info *)data;
peer_addr_t *peer_addr = (peer_addr_t *)addr;
ret = process_pfcp_ass_resp(msg, peer_addr);
if(ret) {
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(data, addr);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Association Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
return 0;
}
int process_recov_asso_resp_handler(void *data, void *addr) {
int ret = 0;
peer_addr_t *peer_addr = (peer_addr_t *)addr;
msg_info *msg = (msg_info *)data;
ret = process_asso_resp(msg, peer_addr);
if(ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Association Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
return 0;
}
int process_recov_est_resp_handler(void *data, void *unused_param) {
int ret = 0;
msg_info *msg = (msg_info *)data;
ret = process_sess_est_resp(&msg->pfcp_msg.pfcp_sess_est_resp);
if(ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Establishment Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_cs_resp_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ret = process_sgwc_s5s8_create_sess_rsp(&msg->gtpc_msg.cs_rsp);
if (ret) {
if(ret != -1){
msg->cp_mode = 0;
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, S11_IFACE);
process_error_occured_handler(data, unused_param);
}
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Create Session Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_sess_est_resp_handler(void *data, void *unused_param)
{
uint16_t payload_length = 0;
uint8_t cp_mode = 0;
int ret = 0;
msg_info *msg = (msg_info *)data;
int ebi = UE_BEAR_ID(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid);
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
cs_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
uint64_t sess_id = msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid;
uint32_t teid = UE_SESS_ID(sess_id);
ue_context *context = NULL;
ret = get_ue_context(teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE "
"Context for teid: %u\n", LOG_VALUE, teid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
ret = process_pfcp_sess_est_resp(
&msg->pfcp_msg.pfcp_sess_est_resp, gtpv2c_tx, NOT_PIGGYBACKED);
if (ret) {
if(ret != -1){
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(data, unused_param);
}
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Establishment Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
cp_mode = msg->cp_mode;
if ((msg->cp_mode == SGWC) || (msg->cp_mode == PGWC)) {
if(((context->indirect_tunnel_flag == 1) && context->cp_mode == SGWC) ||
context->procedure == S1_HANDOVER_PROC) {
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
return 0;
} else {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
}
if (SGWC == context->cp_mode) {
add_gtpv2c_if_timer_entry(
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid),
&s5s8_recv_sockaddr, tx_buf, payload_length,
ebi_index, S5S8_IFACE, cp_mode);
}
if (PRESENT == context->dupl) {
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid,
S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
} else {
/* Send response on s11 interface */
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
if (PRESENT == context->dupl) {
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid,
S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_mb_req_handler(void *data, void *unused_param)
{
ue_context *context = NULL;
msg_info *msg = (msg_info *)data;
eps_bearer *bearer = NULL;
pdn_connection *pdn = NULL;
int pre_check = 0;
int ebi_index = 0, ret = 0;
/*Retrive UE state. */
if(get_ue_context(msg->gtpc_msg.mbr.header.teid.has_teid.teid, &context) != 0) {
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
msg->interface_type);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d \n", LOG_VALUE,
msg->gtpc_msg.mbr.header.teid.has_teid.teid);
return -1;
}
if(context->cp_mode != PGWC) {
pre_check = mbr_req_pre_check(&msg->gtpc_msg.mbr);
if(pre_check != 0) {
mbr_error_response(msg, pre_check, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Conditional IE missing MBR Request",
LOG_VALUE);
return -1;
}
}
ret = update_ue_context(&msg->gtpc_msg.mbr, context, bearer, pdn);
if(ret != 0) {
if(ret == GTPC_RE_TRANSMITTED_REQ){
return ret;
}else{
mbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Modify Bearer Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
}
if(msg->gtpc_msg.mbr.pres_rptng_area_info.header.len){
store_presc_reporting_area_info_to_ue_context(&msg->gtpc_msg.mbr.pres_rptng_area_info,
context);
}
if(msg->gtpc_msg.mbr.bearer_count != 0) {
ebi_index =
GET_EBI_INDEX(msg->gtpc_msg.mbr.bearer_contexts_to_be_modified[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
mbr_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
delete_gtpv2c_if_timer_entry(msg->gtpc_msg.mbr.header.teid.has_teid.teid,
ebi_index);
}
/*add entry for New MME if MME get change */
if (((context->mme_changed_flag == TRUE) && ((s11_mme_sockaddr.ipv4.sin_addr.s_addr != 0)
|| (s11_mme_sockaddr.ipv6.sin6_addr.s6_addr)))) {
node_address_t node_addr = {0};
get_peer_node_addr(&s11_mme_sockaddr, &node_addr);
add_node_conn_entry(&node_addr, S11_SGW_PORT_ID, msg->cp_mode);
}
if(context->cp_mode == SGWC) {
if(pre_check != FORWARD_MBR_REQUEST) {
ret = process_pfcp_sess_mod_request(&msg->gtpc_msg.mbr, context);
} else {
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
set_modify_bearer_request(gtpv2c_tx, pdn, bearer);
ret = set_dest_address(bearer->pdn->s5s8_pgw_gtpc_ip, &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"s5s8_recv_sockaddr.sin_addr.s_addr :%s\n", LOG_VALUE,
inet_ntoa(*((struct in_addr *)&s5s8_recv_sockaddr.ipv4.sin_addr.s_addr)));
uint16_t payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
uint8_t cp_mode = pdn->context->cp_mode;
add_gtpv2c_if_timer_entry(
pdn->context->s11_sgw_gtpc_teid,
&s5s8_recv_sockaddr, tx_buf, payload_length,
ebi_index,
S5S8_IFACE, cp_mode);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, s5s8_tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
//resp->state = MBR_REQ_SNT_STATE;
pdn->state = MBR_REQ_SNT_STATE;
return 0;
}
} else {
ret = process_pfcp_sess_mod_req_for_saegwc_pgwc(&msg->gtpc_msg.mbr, context);
}
if (ret != 0) {
if(ret != -1)
mbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode = PGWC ? S11_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Modify Bearer Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_mb_req_for_mod_proc_handler(void *data, void *unused_param)
{
RTE_SET_USED(unused_param);
RTE_SET_USED(data);
return 0;
}
int
process_sess_mod_resp_handler(void *data, void *unused_param)
{
struct resp_info *resp = NULL;
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
int ebi_index = 0;
eps_bearer *bearer = NULL;
pdn_connection *pdn = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
uint64_t sess_id = msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid;
uint32_t teid = UE_SESS_ID(sess_id);
if (get_sess_entry(sess_id, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, sess_id);
return -1;
}
/* Retrieve the UE context */
ret = get_ue_context(teid, &context);
if (ret) {
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE "
"context for teid: %u\n", LOG_VALUE, teid);
return -1;
}
int ebi = UE_BEAR_ID(sess_id);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_SYSTEM_FAILURE);
return -1;
}
bearer = context->eps_bearers[ebi_index];
pdn = GET_PDN(context, ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND);
return -1;
}
if(resp->msg_type == GTP_MODIFY_BEARER_REQ) {
uint8_t mbr_procedure = check_mbr_procedure(pdn);
if(context->cp_mode == SGWC){
ret = process_pfcp_sess_mod_resp_mbr_req(&msg->pfcp_msg.pfcp_sess_mod_resp,
gtpv2c_tx, pdn, resp, bearer, &mbr_procedure);
if (ret != 0) {
if(ret != -1)
pfcp_modification_error_response(resp, msg, ret);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Modification Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
return 0;
} else if((context->cp_mode == SAEGWC) || (context->cp_mode == PGWC)) {
#ifdef USE_CSID
{
update_peer_node_csid(&msg->pfcp_msg.pfcp_sess_mod_resp, pdn);
}
#endif /* USE_CSID */
set_modify_bearer_response(gtpv2c_tx,
context->sequence, context, bearer, &resp->gtpc_msg.mbr);
resp->state = CONNECTED_STATE;
/* Update the UE state */
pdn->state = CONNECTED_STATE;
}
} else {
ret = process_pfcp_sess_mod_resp(&msg->pfcp_msg.pfcp_sess_mod_resp,
gtpv2c_tx, context, resp);
if (ret != 0) {
if(ret != -1)
pfcp_modification_error_response(resp, msg, ret);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Modification Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
}
if(context->cp_mode != PGWC &&
resp->proc == CONN_SUSPEND_PROC) {
resp = NULL;
RTE_SET_USED(unused_param);
return 0;
}
if(resp->msg_type == GTP_MODIFY_ACCESS_BEARER_REQ) {
resp = NULL;
RTE_SET_USED(unused_param);
return 0;
}
uint16_t payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
if(context->cp_mode != PGWC) {
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
if (PRESENT == context->dupl) {
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
} else {
ret = set_dest_address(bearer->pdn->s5s8_sgw_gtpc_ip, &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, s5s8_tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
}
RTE_SET_USED(unused_param);
return 0;
}
/**
* @brief : This handler will be called after receiving pfcp_sess_mod_resp in
case of mod_proc procedure
* @param : data( message received on the sx interface)
* @param : unused_param
* @retrun : Returns 0 in case of success
*/
int
process_mod_resp_for_mod_proc_handler(void *data, void *unused_param)
{
RTE_SET_USED(data);
RTE_SET_USED(unused_param);
return 0;
}
int
process_rel_access_ber_req_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
/* TODO: Check return type and do further processing */
ret = process_release_access_bearer_request(&msg->gtpc_msg.rel_acc_ber_req,
msg->proc);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Release Access Bearer Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
release_access_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
S11_IFACE);
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_change_noti_req_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
pdn_connection *pdn = NULL;
ue_context *context = NULL;
int ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &msg->gtpc_msg.change_not_req.header.teid.has_teid.teid,
(void **) &context);
if (ret < 0 || !context) {
clLog(clSystemLog, eCLSeverityCritical,
"%s : Error: Failed to process Change Notification Request %d \n",
__func__, ret);
change_notification_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
if(msg->gtpc_msg.change_not_req.pres_rptng_area_info.header.len){
store_presc_reporting_area_info_to_ue_context(&msg->gtpc_msg.change_not_req.pres_rptng_area_info,
context);
}
if(context->cp_mode == PGWC || context->cp_mode == SAEGWC) {
ret = process_change_noti_request(&msg->gtpc_msg.change_not_req, context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Change Notification Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
change_notification_error_response(msg, ret,
CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
} else if(context->cp_mode == SGWC) {
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = set_change_notification_request(gtpv2c_tx, &msg->gtpc_msg.change_not_req, &pdn);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Change Notification Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
change_notification_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
}
RTE_SET_USED(data);
RTE_SET_USED(unused_param);
return 0;
}
int
process_change_noti_resp_handler(void *data, void *unused_param)
{
ue_context *context = NULL;
uint16_t payload_length = 0;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = process_change_noti_response(&msg->gtpc_msg.change_not_rsp, gtpv2c_tx);
if(ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Change Notification Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
change_notification_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->interface_type);
return -1;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, SENT);
ret = get_ue_context_by_sgw_s5s8_teid(
msg->gtpc_msg.change_not_rsp.header.teid.has_teid.teid,
&context);
if (ret < 0 || !context) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"UE Context for teid: %d\n",
LOG_VALUE, msg->gtpc_msg.change_not_rsp.header.teid.has_teid.teid);
change_notification_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->interface_type);
return -1;
}
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
RTE_SET_USED(data);
RTE_SET_USED(unused_param);
return 0;
}
int
process_ds_req_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
int ebi_index = 0;
int ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &msg->gtpc_msg.dsr.header.teid.has_teid.teid,
(void **) &context);
if (ret < 0 || !context){
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Context not found for "
"given Dropping packet\n", LOG_VALUE);
ds_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0,
msg->interface_type);
return -1;
}
if(context != NULL ) {
if(context->req_status.seq == msg->gtpc_msg.dsr.header.teid.has_teid.seq) {
if(context->req_status.status == REQ_IN_PROGRESS) {
/* Discarding re-transmitted dsr */
return GTPC_RE_TRANSMITTED_REQ;
}else{
/* Restransmitted DSR but processing already done for previous req */
context->req_status.status = REQ_IN_PROGRESS;
}
} else {
context->req_status.seq = msg->gtpc_msg.dsr.header.teid.has_teid.seq;
context->req_status.status = REQ_IN_PROGRESS;
}
}
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.dsr.lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
ds_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE :S5S8_IFACE);
return -1;
}
delete_gtpv2c_if_timer_entry(msg->gtpc_msg.dsr.header.teid.has_teid.teid,
ebi_index);
/* Handling the case of Demotion */
if((msg->interface_type == S11_IFACE) && (context->cp_mode == PGWC)) {
ret = cleanup_sgw_context(&msg->gtpc_msg.dsr, context);
if (ret) {
return ret;
}
return 0;
}
if (context->cp_mode == SGWC && msg->gtpc_msg.dsr.indctn_flgs.indication_oi == 1) {
/* Indication flag 1 mean dsr needs to be sent to PGW otherwise dont send it to PGW */
ret = process_sgwc_delete_session_request(&msg->gtpc_msg.dsr, context);
} else {
ret = process_pfcp_sess_del_request(&msg->gtpc_msg.dsr, context);
}
if (ret){
if(ret != -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Delete Session Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
ds_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE :S5S8_IFACE);
return -1;
}
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_sess_del_resp_handler(void *data, void *unused_param)
{
uint8_t dupl = 0;
uint64_t imsi = 0;
uint8_t cp_mode = 0;
uint8_t li_data_cntr = 0;
uint8_t cleanup_status = 0;
ue_context *context = NULL;
uint16_t payload_length = 0;
msg_info *msg = (msg_info *)data;
li_data_t li_data[MAX_LI_ENTRIES_PER_UE] = {0};
uint16_t msglen = 0;
uint8_t *buffer = NULL;
gx_msg ccr_request = {0};
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
uint64_t seid = msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid;
if (msg->pfcp_msg.pfcp_sess_del_resp.usage_report_count != 0) {
for(int i=0 ; i< msg->pfcp_msg.pfcp_sess_del_resp.usage_report_count; i++)
fill_cdr_info_sess_del_resp(seid,
&msg->pfcp_msg.pfcp_sess_del_resp.usage_report[i]);
}
ret = get_ue_context(UE_SESS_ID(seid), &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to retrieve UE context",
LOG_VALUE);
ds_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
msg->interface_type != PGWC ? S11_IFACE :S5S8_IFACE);
return -1;
}
/*cleanup activity for HSS initiated flow*/
cleanup_status = context->mbc_cleanup_status;
/* copy data for LI */
imsi = context->imsi;
dupl = context->dupl;
li_data_cntr = context->li_data_cntr;
memcpy(li_data, context->li_data, (sizeof(li_data_t) * context->li_data_cntr));
cp_mode = context->cp_mode;
if(context->cp_mode != SGWC ) {
/* Lookup value in hash using session id and fill pfcp response and delete entry from hash*/
if (config.use_gx) {
ret = process_pfcp_sess_del_resp(seid, gtpv2c_tx, &ccr_request, &msglen,
context);
buffer = rte_zmalloc_socket(NULL, msglen + GX_HEADER_LEN,
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (buffer == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for Buffer, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
ds_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
cp_mode != PGWC ? S11_IFACE :S5S8_IFACE);
return -1;
}
memcpy(buffer, &ccr_request.msg_type, sizeof(ccr_request.msg_type));
memcpy(buffer + sizeof(ccr_request.msg_type),
&ccr_request.msg_len,
sizeof(ccr_request.msg_len));
if (gx_ccr_pack(&(ccr_request.data.ccr),
(unsigned char *)(buffer + GX_HEADER_LEN), msglen) == 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Packing "
"CCR Buffer\n", LOG_VALUE);
rte_free(buffer);
return -1;
}
} else {
ret = process_pfcp_sess_del_resp(
msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid,
gtpv2c_tx, NULL, NULL, context);
}
} else {
ret = process_pfcp_sess_del_resp(
msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid,
gtpv2c_tx, NULL, NULL, context);
}
if (ret) {
ds_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
cp_mode != PGWC ? S11_IFACE :S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Deletion Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
if(cleanup_status != PRESENT) {
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
if ((cp_mode == PGWC) ) {
/*Send response on s5s8 interface */
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
update_sys_stat(number_of_users, DECREMENT);
update_sys_stat(number_of_active_session, DECREMENT);
if (PRESENT == dupl) {
process_cp_li_msg_for_cleanup(
li_data, li_data_cntr, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)),
cp_mode, imsi);
}
} else {
/* Send response on s11 interface */
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
update_sys_stat(number_of_users, DECREMENT);
update_sys_stat(number_of_active_session, DECREMENT);
if (PRESENT == dupl) {
process_cp_li_msg_for_cleanup(
li_data, li_data_cntr, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)),
cp_mode, imsi);
}
}
}
if (config.use_gx) {
/* Write or Send CCR -T msg to Gx_App */
if (cp_mode != SGWC) {
send_to_ipc_channel(gx_app_sock, buffer,
msglen + GX_HEADER_LEN);
}
if (buffer != NULL) {
rte_free(buffer);
buffer = NULL;
}
free_dynamically_alloc_memory(&ccr_request);
update_cli_stats((peer_address_t *) &config.gx_ip, OSS_CCR_TERMINATE, SENT, GX);
rte_free(buffer);
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_ds_resp_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ret = process_delete_session_response(&msg->gtpc_msg.ds_rsp);
if (ret) {
if(ret != -1)
ds_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->interface_type);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Delete Session Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_rpt_req_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ret = process_pfcp_report_req(&msg->pfcp_msg.pfcp_sess_rep_req);
if (ret)
return ret;
RTE_SET_USED(unused_param);
return 0;
}
int
process_ddn_ack_resp_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
int ret = process_ddn_ack(&msg->gtpc_msg.ddn_ack);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Downlink Datat Notification Ack with cause: %s \n",
LOG_VALUE, cause_str(ret));
return ret;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_ddn_failure_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
int ret = process_ddn_failure(&msg->gtpc_msg.ddn_fail_ind);
if(ret){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Downlink Datat Notification Failure with cause: %s \n",
LOG_VALUE, cause_str(ret));
return ret;
}
RTE_SET_USED(unused_param);
return 0;
}
int process_sess_mod_resp_dl_buf_dur_handler(void *data, void *unused_param)
{
RTE_SET_USED(data);
RTE_SET_USED(unused_param);
return 0;
}
int process_sess_mod_resp_ddn_fail_handler(void *data, void *unused_param)
{
RTE_SET_USED(data);
RTE_SET_USED(unused_param);
return 0;
}
int
process_sess_est_resp_sgw_reloc_handler(void *data, void *unused_param)
{
/* SGW Relocation
* Handle pfcp session establishment response
* and send mbr request to PGWC
* Update proper state in hash as MBR_REQ_SNT_STATE
*/
uint16_t payload_length = 0;
struct resp_info *resp = NULL;
uint8_t cp_mode = 0;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
uint8_t ebi = UE_BEAR_ID(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid);
if (get_sess_entry(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE,
msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid);
return -1;
}
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_SYSTEM_FAILURE);
return -1;
}
ret = process_pfcp_sess_est_resp(
&msg->pfcp_msg.pfcp_sess_est_resp, gtpv2c_tx, NOT_PIGGYBACKED);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Establishment Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
cp_mode = msg->cp_mode;
if (SGWC == msg->cp_mode) {
add_gtpv2c_if_timer_entry(
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid),
&s5s8_recv_sockaddr, tx_buf, payload_length,
ebi_index, S5S8_IFACE, cp_mode);
}
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid,
S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
RTE_SET_USED(unused_param);
return 0;
}
/*
This function Handles the CCA-T received from PCEF
*/
int
cca_t_msg_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
gx_context_t *gx_context = NULL;
int ret = 0;
uint32_t call_id = 0;
RTE_SET_USED(unused_param);
/* Retrive Gx_context based on Sess ID. */
ret = rte_hash_lookup_data(gx_context_by_sess_id_hash,
(const void*)(msg->gx_msg.cca.session_id.val),
(void **)&gx_context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"hash table for session id: %lu\n", LOG_VALUE, msg->gx_msg.cca.session_id.val);
return -1;
}
if(rte_hash_del_key(gx_context_by_sess_id_hash, msg->gx_msg.cca.session_id.val) < 0){
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to delete "
"hash key for session id: %lu\n", LOG_VALUE, gx_context_by_sess_id_hash);
}
if (gx_context != NULL) {
rte_free(gx_context);
gx_context = NULL;
}
/* Extract the call id from session id */
ret = retrieve_call_id((char *)msg->gx_msg.cca.session_id.val, &call_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id "
"found for session id:%s\n", LOG_VALUE,
msg->gx_msg.cca.session_id.val);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* Delete PDN Conn Entry */
ret = rte_hash_del_key(pdn_conn_hash, &call_id);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found "
"for CALL_ID :%u while deleting PDN connection entry\n", LOG_VALUE, call_id);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
free_cca_msg_dynamically_alloc_memory(&msg->gx_msg.cca);
return 0;
}
int cca_u_msg_handler(void *data, void *unused)
{
msg_info *msg = (msg_info *)data;
int ret = 0;
uint32_t call_id = 0;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
int ebi_index = 0;
ue_context *context = NULL;
eps_bearer *bearer = NULL;
mod_bearer_req_t *mb_req = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
/* Extract the call id from session id */
ret = retrieve_call_id((char *)&msg->gx_msg.cca.session_id.val, &call_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id found for "
"session id: %s\n", LOG_VALUE,
(char*) &msg->gx_msg.cca.session_id.val);
return -1;
}
/* Retrieve PDN context based on call id */
pdn = get_pdn_conn_entry(call_id);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"PDN for CALL_ID:%u\n", LOG_VALUE, call_id);
return -1;
}
if(msg->gx_msg.cca.presence.presence_reporting_area_information)
store_presence_reporting_area_info(pdn, &msg->gx_msg.cca.presence_reporting_area_information);
/*Retrive the session information based on session id. */
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return -1;
}
int ebi = UE_BEAR_ID(pdn->seid);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
if (!(pdn->context->bearer_bitmap & (1 << ebi_index))) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT
"Received modify bearer on non-existent EBI - "
"Dropping packet\n", LOG_VALUE);
return -EPERM;
}
bearer = pdn->eps_bearers[ebi_index];
context = pdn->context;
mb_req = &resp->gtpc_msg.mbr;
if((context->cp_mode == PGWC) &&(context->sgwu_changed == FALSE) && (mb_req->sgw_fqcsid.header.len == 0 )) {
set_modify_bearer_response_handover(gtpv2c_tx, mb_req->header.teid.has_teid.seq, context,
bearer, mb_req);
int payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
ret = set_dest_address(pdn->s5s8_sgw_gtpc_ip, &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
pdn->state = CONNECTED_STATE;
return 0;
} else {
if (resp->msg_type != GTP_CREATE_SESSION_REQ) {
ret = send_pfcp_sess_mod_req(pdn, bearer, &resp->gtpc_msg.mbr);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to send"
" PFCP Session Modification Request%d \n", LOG_VALUE, ret);
return ret;
}
} else {
if ((ret = send_pfcp_modification_req(context, pdn, bearer, &resp->gtpc_msg.csr, ebi_index)) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to send"
" PFCP Session Modification Request%d \n", LOG_VALUE, ret);
return ret;
}
}
}
RTE_SET_USED(unused);
return 0;
}
/*
This function Handles the msgs received from PCEF
*/
int
cca_msg_handler(void *data, void *unused_param)
{
pdn_connection *pdn = NULL;
msg_info *msg = (msg_info *)data;
RTE_SET_USED(msg);
if (config.use_gx) {
/* Handle the CCR-T Message */
if (msg->gx_msg.cca.cc_request_type == TERMINATION_REQUEST) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Received GX CCR-T Response..!! \n",
LOG_VALUE);
return 0;
}
/* Retrive the ebi index */
ret = parse_gx_cca_msg(&msg->gx_msg.cca, &pdn);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Credit Control Answer with cause: %s \n",
LOG_VALUE, cause_str(ret));
gx_cca_error_response(ret, msg);
return -1;
}
if (pdn->proc == UE_REQ_BER_RSRC_MOD_PROC ||
pdn->proc == HSS_INITIATED_SUB_QOS_MOD)
return 0;
/*update proc if there are two rules*/
if(pdn->policy.num_charg_rule_install > 1)
pdn->proc = ATTACH_DEDICATED_PROC;
if (msg->gx_msg.cca.cc_request_type == UPDATE_REQUEST && pdn->proc == CHANGE_NOTIFICATION_PROC) {
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
set_change_notification_response(gtpv2c_tx, pdn);
uint8_t payload_length = 0;
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
if(pdn->context->cp_mode == PGWC) {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
/* copy packet for user level packet copying or li */
if (pdn->context->dupl) {
process_pkt_for_li(
pdn->context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
} else if (pdn->context->cp_mode == SAEGWC) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, SENT);
/* copy packet for user level packet copying or li */
if (pdn->context->dupl) {
process_pkt_for_li(
pdn->context, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
}
pdn->state = CONNECTED_STATE;
return 0;
}
free_cca_msg_dynamically_alloc_memory(&msg->gx_msg.cca);
}
if (pdn->upf_ip.ip_type == 0) {
if(config.use_dns) {
push_dns_query(pdn);
return 0;
} else {
if ((config.pfcp_ip_type == PDN_TYPE_IPV6
|| config.pfcp_ip_type == PDN_TYPE_IPV4_IPV6)
&& (config.upf_pfcp_ip_type == PDN_TYPE_IPV6
|| config.upf_pfcp_ip_type == PDN_TYPE_IPV4_IPV6)) {
memcpy(pdn->upf_ip.ipv6_addr, config.upf_pfcp_ip_v6.s6_addr, IPV6_ADDRESS_LEN);
pdn->upf_ip.ip_type = PDN_TYPE_IPV6;
} else if ((config.pfcp_ip_type == PDN_TYPE_IPV4
|| config.pfcp_ip_type == PDN_TYPE_IPV4_IPV6)
&& (config.upf_pfcp_ip_type == PDN_TYPE_IPV4
|| config.upf_pfcp_ip_type == PDN_TYPE_IPV4_IPV6)) {
pdn->upf_ip.ipv4_addr = config.upf_pfcp_ip.s_addr;
pdn->upf_ip.ip_type = PDN_TYPE_IPV4;
}
}
}
process_pfcp_sess_setup(pdn);
RTE_SET_USED(unused_param);
return 0;
}
int
process_mb_req_sgw_reloc_handler(void *data, void *unused_param)
{
RTE_SET_USED(unused_param);
RTE_SET_USED(data);
return 0;
}
int
process_sess_mod_resp_sgw_reloc_handler(void *data, void *unused_param)
{
uint8_t cp_mode = 0;
uint16_t payload_length = 0;
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
int ebi = UE_BEAR_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
ret = get_ue_context(UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid), &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to retrieve UE context",
LOG_VALUE);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
ret = process_pfcp_sess_mod_resp_handover(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
gtpv2c_tx, context);
if (ret) {
if(ret != -1)
mbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Modification Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
cp_mode = context->cp_mode;
if (SGWC == context->cp_mode) {
add_gtpv2c_if_timer_entry(
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid),
&s5s8_recv_sockaddr, tx_buf, payload_length,
ebi_index, S5S8_IFACE, cp_mode);
}
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
RTE_SET_USED(unused_param);
return 0;
}
int
process_pfcp_sess_mod_resp_cbr_handler(void *data, void *unused_param)
{
uint8_t cp_mode = 0;
uint16_t payload_length = 0;
struct resp_info *resp = NULL;
ue_context *context = NULL;
int ret = 0;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
uint64_t sess_id = msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid;
uint32_t teid = UE_SESS_ID(sess_id);
int ebi = UE_BEAR_ID(sess_id);
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
cbr_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
/* Retrieve the UE context */
ret = get_ue_context(teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"UE Context for teid: %u\n", LOG_VALUE, teid);
cbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0,
msg->interface_type);
return -1;
}
cp_mode = context->cp_mode;
ret = get_sess_entry(sess_id, &resp);
if (ret){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, sess_id);
cbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0,
context->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
ret = process_pfcp_sess_mod_resp(&msg->pfcp_msg.pfcp_sess_mod_resp, gtpv2c_tx, context, resp);
if (ret != 0) {
if(ret != -1)
cbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Modification Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
/* Dedicated Activation Procedure */
if(context->piggyback == TRUE) {
context->piggyback = FALSE;
return 0;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
if ((SAEGWC != context->cp_mode) && ((resp->msg_type == GTP_CREATE_BEARER_RSP) ||
(resp->msg_type == GX_RAR_MSG) || (resp->msg_type == GTP_BEARER_RESOURCE_CMD))){
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
if(resp->msg_type != GTP_CREATE_BEARER_RSP){
add_gtpv2c_if_timer_entry(
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid),
&s5s8_recv_sockaddr, tx_buf, payload_length,
ebi_index, S5S8_IFACE, cp_mode);
}
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
} else {
if(resp->msg_type != GX_RAA_MSG && resp->msg_type != GX_CCR_MSG) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, SENT);
add_gtpv2c_if_timer_entry(
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid),
&s11_mme_sockaddr, tx_buf, payload_length,
ebi_index, S11_IFACE, cp_mode);
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_pfcp_sess_mod_resp_brc_handler(void *data, void *unused_param)
{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"BRC HANDLER IS CALLED\n",
LOG_VALUE);
int ebi_index = 0;
uint8_t ebi = 0;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
int ret = 0;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
uint64_t sess_id = msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid;
uint32_t teid = UE_SESS_ID(sess_id);
/* Retrive the session information based on session id. */
if (get_sess_entry(sess_id, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, sess_id);
return -1;
}
/* Retrieve the UE context */
ret = get_ue_context(teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to get"
"UE context for teid: %d\n", LOG_VALUE, teid);
send_bearer_resource_failure_indication(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
ebi = UE_BEAR_ID(sess_id);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
send_bearer_resource_failure_indication(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
send_bearer_resource_failure_indication(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
if(pdn->policy.num_charg_rule_install ||
resp->msg_type == GTP_CREATE_BEARER_REQ ||
resp->msg_type == GTP_CREATE_BEARER_RSP) {
process_pfcp_sess_mod_resp_cbr_handler(data, unused_param);
} else if (pdn->policy.num_charg_rule_modify) {
process_pfcp_sess_mod_resp_ubr_handler(data, unused_param);
} else if (pdn->policy.num_charg_rule_delete ||
resp->msg_type == GTP_DELETE_BEARER_REQ ||
resp->msg_type == GTP_DELETE_BEARER_RSP) {
process_pfcp_sess_mod_resp_dbr_handler(data, unused_param);
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Invalid bearer operation \n", LOG_VALUE);
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
provision_ack_ccau_handler(void *data, void *unused_param) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"CCA-U for Provsion Ack "
"is received from PCRF successfully.\n", LOG_VALUE);
RTE_SET_USED(data);
RTE_SET_USED(unused_param);
return 0;
}
int process_mbr_resp_handover_handler(void *data, void *rx_buf)
{
ue_context *context = NULL;
uint16_t payload_length = 0;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = process_sgwc_s5s8_modify_bearer_response(&(msg->gtpc_msg.mb_rsp),
gtpv2c_tx, &context);
if (ret) {
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Modify Bearer Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
update_sys_stat(number_of_users, INCREMENT);
update_sys_stat(number_of_active_session, INCREMENT);
if (NOT_PRESENT == ntohs(gtpv2c_tx->gtpc.message_len)) {
return 0;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
RTE_SET_USED(rx_buf);
return 0;
}
int process_mbr_resp_for_mod_proc_handler(void *data, void *rx_buf)
{
RTE_SET_USED(data);
RTE_SET_USED(rx_buf);
return 0;
}
int
process_create_bearer_response_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ret = process_create_bearer_response(&msg->gtpc_msg.cb_rsp);
if (ret) {
if(ret != -1)
cbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Create Bearer Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_create_bearer_request_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ret = process_create_bearer_request(&msg->gtpc_msg.cb_req);
if (ret) {
if(ret != -1)
cbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Create Bearer Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_rar_request_handler(void *data, void *unused_param)
{
int16_t ret_temp = 0;
msg_info *msg = (msg_info *)data;
uint32_t call_id = 0;
pdn_connection *pdn_cntxt = NULL;
ret = retrieve_call_id((char *)&msg->gx_msg.rar.session_id.val, &call_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id found for "
"session id: %s\n", LOG_VALUE, msg->gx_msg.rar.session_id.val);
return -1;
}
/* Retrieve PDN context based on call id */
pdn_cntxt = get_pdn_conn_entry(call_id);
if (pdn_cntxt == NULL)
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"PDN for CALL_ID:%u\n", LOG_VALUE, call_id);
return -1;
}
ret_temp = parse_gx_rar_msg(&msg->gx_msg.rar, pdn_cntxt);
if (ret_temp) {
if(ret_temp != -1){
gen_reauth_error_response(pdn_cntxt, ret_temp);
}
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Re-Auth Request with cause: %s \n",
LOG_VALUE, cause_str(ret_temp));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
pfd_management_handler(void *data, void *unused_param)
{
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT
"Pfcp Pfd Management Response Recived Successfully \n", LOG_VALUE);
RTE_SET_USED(data);
RTE_SET_USED(unused_param);
return 0;
}
int
process_mod_resp_delete_handler(void *data, void *unused_param)
{
uint8_t cp_mode = 0;
uint16_t payload_length = 0;
struct resp_info *resp = NULL;
ue_context *context = NULL;
msg_info *msg = (msg_info *)data;
uint64_t sess_id = msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid;
uint32_t teid = UE_SESS_ID(sess_id);
int ebi = UE_BEAR_ID(sess_id);
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
ds_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
/* Retrieve the UE context */
ret = get_ue_context(teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"UE context for teid: %u\n", LOG_VALUE, teid);
ds_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
/* Retrive the session information based on session id. */
if (get_sess_entry(sess_id, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, sess_id);
ds_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = process_pfcp_sess_mod_resp(&msg->pfcp_msg.pfcp_sess_mod_resp,
gtpv2c_tx, context, resp);
if (ret) {
ds_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Deletion Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
cp_mode = context->cp_mode;
if (context->cp_mode== SGWC) {
/* Forward s11 delete_session_request on s5s8 */
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
add_gtpv2c_if_timer_entry(
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid),
&s5s8_recv_sockaddr, tx_buf, payload_length, ebi_index, S5S8_IFACE,
cp_mode);
if (PRESENT == context->dupl) {
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
} else {
/*Code should not reach here since this handler is only for SGWC*/
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_pfcp_sess_mod_resp_dbr_handler(void *data, void *unused_param)
{
uint8_t cp_mode = 0;
uint16_t payload_length = 0;
struct resp_info *resp = NULL;
ue_context *context = NULL;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
uint64_t seid = msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid;
uint32_t teid = UE_SESS_ID(seid);
int ebi = UE_BEAR_ID(seid);
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
delete_bearer_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, msg->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
ret = get_ue_context(teid, &context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to get"
"UE context for teid: %d\n",LOG_VALUE, teid);
delete_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, msg->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
if (get_sess_entry( msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE,
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
delete_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, context->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
if (msg->pfcp_msg.pfcp_sess_mod_resp.usage_report_count != 0) {
for(int iCnt=0 ; iCnt< msg->pfcp_msg.pfcp_sess_mod_resp.usage_report_count; iCnt++)
fill_cdr_info_sess_mod_resp(seid,
&msg->pfcp_msg.pfcp_sess_mod_resp.usage_report[iCnt]);
}
cp_mode = context->cp_mode;
ret = process_delete_bearer_pfcp_sess_response(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
context, gtpv2c_tx, resp);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Modification Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
delete_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
if ((SAEGWC != context->cp_mode) &&
( resp->msg_type == GTP_DELETE_BEARER_RSP
|| resp->msg_type == GX_RAR_MSG
|| resp->msg_type == GTP_DELETE_BEARER_CMD
|| resp->msg_type == GTP_BEARER_RESOURCE_CMD) ) {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
if (resp->msg_type != GTP_DELETE_BEARER_RSP) {
add_gtpv2c_if_timer_entry(
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid),
&s5s8_recv_sockaddr, tx_buf, payload_length,
ebi_index, S5S8_IFACE, cp_mode);
}
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
} else if(resp->msg_type != GX_RAA_MSG && resp->msg_type != GX_CCR_MSG) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr,SENT);
add_gtpv2c_if_timer_entry(
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid),
&s11_mme_sockaddr, tx_buf, payload_length,
ebi_index, S11_IFACE, cp_mode);
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.
seid, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_delete_bearer_request_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
ret = get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.db_req.header.teid.has_teid.teid, &context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.db_req.header.teid.has_teid.teid);
delete_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, msg->interface_type);
return -1;
}
ret = process_delete_bearer_request(&msg->gtpc_msg.db_req, context, msg->proc);
if (ret && ret != -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Delete Bearer Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
delete_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_delete_bearer_resp_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
if (msg->gtpc_msg.db_rsp.lbi.header.len != 0) {
/* Delete Default Bearer. Send PFCP Session Deletion Request */
ret = process_pfcp_sess_del_request_delete_bearer_rsp(&msg->gtpc_msg.db_rsp);
if (ret && (ret != -1)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Delete Bearer Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
delete_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->interface_type);
return -1;
}
} else {
ret = get_ue_context(msg->gtpc_msg.db_rsp.header.teid.has_teid.teid, &context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"UE Context for teid", LOG_VALUE, msg->gtpc_msg.db_rsp.header.teid.has_teid.teid);
delete_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, msg->interface_type);
return -1;
}
/* Delete Dedicated Bearer. Send PFCP Session Modification Request */
ret = process_delete_bearer_resp(&msg->gtpc_msg.db_rsp, context, msg->proc);
if (ret && ret!=-1)
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Delete Bearer Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
delete_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_pfcp_sess_del_resp_dbr_handler(void *data, void *unused_param)
{
uint16_t payload_length = 0;
struct resp_info *resp = NULL;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
msg_info *msg = (msg_info *)data;
uint64_t sess_id = msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid;
uint32_t teid = UE_SESS_ID(sess_id);
int ebi = UE_BEAR_ID(sess_id);
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
delete_bearer_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE,
CAUSE_SOURCE_SET_TO_0, msg->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = get_ue_context(teid, &context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to get"
"UE context for teid: %d\n", LOG_VALUE, teid);
delete_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, msg->interface_type);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get PDN for "
"ebi_index : %d\n", LOG_VALUE, ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if (get_sess_entry(sess_id, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"No Session entry"
" Found for session id: %lu\n",LOG_VALUE,
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
delete_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, context->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
ret = process_delete_bearer_pfcp_sess_response(sess_id, context, gtpv2c_tx, resp);
if (ret && ret!=-1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Modification Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
delete_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
return -1;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
if ((SAEGWC != context->cp_mode) &&
((resp->msg_type == GTP_DELETE_BEARER_RSP))) {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr,SENT);
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid,
S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
delete_sess_context(&context, pdn);
RTE_SET_USED(unused_param);
return 0;
}
/*UPDATE bearer */
int process_update_bearer_response_handler(void *data, void *unused_param)
{
int ret = 0;
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
if(get_ue_context(msg->gtpc_msg.ub_rsp.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.ub_rsp.header.teid.has_teid.teid);
ubr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, msg->interface_type);
return -1;
}
if(msg->gtpc_msg.ub_rsp.pres_rptng_area_info.header.len){
store_presc_reporting_area_info_to_ue_context(&msg->gtpc_msg.ub_rsp.pres_rptng_area_info,
context);
}
if (SGWC == context->cp_mode) {
ret = process_s11_upd_bearer_response(&msg->gtpc_msg.ub_rsp, context);
if(ret && (ret != -1))
ubr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
} else {
ret = process_s5s8_upd_bearer_response(&msg->gtpc_msg.ub_rsp, context);
if(ret && ret != -1)
ubr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, GX_IFACE);
}
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Update Bearer Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int process_update_bearer_request_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ret = process_update_bearer_request(&msg->gtpc_msg.ub_req);
if (ret) {
if(ret != -1)
ubr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, S5S8_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Update Bearer Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
/*Bearer resource command handler*/
/*
* The Function handles Bearer resource CMD when send from MME to SGWC and
* also when SGWC sends the same to PGWC
*/
int
process_bearer_resource_command_handler(void *data, void *unused_param)
{
ue_context *context = NULL;
uint16_t payload_length = 0;
uint8_t ret = 0;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = get_ue_context(msg->gtpc_msg.bearer_rsrc_cmd.header.teid.has_teid.teid,
&context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to get"
" UE context for teid: %u\n",LOG_VALUE,
msg->gtpc_msg.bearer_rsrc_cmd.header.teid.has_teid.teid);
send_bearer_resource_failure_indication(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, msg->interface_type);
return -1;
}
context->is_sent_bearer_rsc_failure_indc = NOT_PRESENT;
ret = process_bearer_rsrc_cmd(&msg->gtpc_msg.bearer_rsrc_cmd,
gtpv2c_tx, context);
if(ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Bearer Resource Command with cause: %s \n",
LOG_VALUE, cause_str(ret));
send_bearer_resource_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
if (SGWC == context->cp_mode ) {
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
add_gtpv2c_if_timer_entry(
msg->gtpc_msg.bearer_rsrc_cmd.header.teid.has_teid.teid,
&s5s8_recv_sockaddr, tx_buf, payload_length,
GET_EBI_INDEX(msg->gtpc_msg.bearer_rsrc_cmd.lbi.ebi_ebi), S5S8_IFACE, SGWC);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
}
RTE_SET_USED(unused_param);
return 0;
}
/*Modify Bearer Command handler*/
/*
* The Function handles Modify bearer CMD when send from MME to SGWC and
* also when SGWC sends the same to PGWC
*/
int
process_modify_bearer_command_handler(void *data, void *unused_param)
{
ue_context *context = NULL;
uint16_t payload_length = 0;
uint8_t ret = 0;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = get_ue_context(msg->gtpc_msg.mod_bearer_cmd.header.teid.has_teid.teid,
&context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to get"
" UE context for teid: %u\n", LOG_VALUE,
msg->gtpc_msg.mod_bearer_cmd.header.teid.has_teid.teid);
modify_bearer_failure_indication(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, msg->interface_type);
return -1;
}
ret = process_modify_bearer_cmd(&msg->gtpc_msg.mod_bearer_cmd, gtpv2c_tx, context);
if(ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Modify Bearer Command with cause: %s \n",
LOG_VALUE, cause_str(ret));
modify_bearer_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
if (SGWC == context->cp_mode) {
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
add_gtpv2c_if_timer_entry(
msg->gtpc_msg.mod_bearer_cmd.header.teid.has_teid.teid,
&s5s8_recv_sockaddr, tx_buf, payload_length,
GET_EBI_INDEX(msg->gtpc_msg.mod_bearer_cmd.bearer_context.eps_bearer_id.ebi_ebi), S5S8_IFACE, SGWC);
/* copy packet for user level packet copying or li */
if (context != NULL && context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
s5s8_recv_sockaddr.ipv4.sin_port :
s5s8_recv_sockaddr.ipv6.sin6_port));
}
}
RTE_SET_USED(unused_param);
return 0;
}
/*DELETE bearer commaand deactivation*/
/*
* The Function handles when MME sends Delete Bearer CMD to SGWC and
* also when SGWC sends the same to PGWC
*/
int
process_delete_bearer_command_handler(void *data, void *unused_param)
{
ue_context *context = NULL;
uint16_t payload_length = 0;
uint8_t ret = 0;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = get_ue_context(msg->gtpc_msg.del_ber_cmd.header.teid.has_teid.teid,
&context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to get"
" UE context for teid: %u\n", LOG_VALUE,
msg->gtpc_msg.del_ber_cmd.header.teid.has_teid.teid);
delete_bearer_cmd_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->interface_type);
return -1;
}
ret = process_delete_bearer_cmd_request(&msg->gtpc_msg.del_ber_cmd, gtpv2c_tx, context);
if(ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Delete Bearer Command with cause: %s \n",
LOG_VALUE, cause_str(ret));
delete_bearer_cmd_failure_indication(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->interface_type);
return -1;
}
if (SGWC == context->cp_mode ) {
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
}
RTE_SET_USED(unused_param);
return 0;
}
int del_bearer_cmd_ccau_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
int ret = 0;
uint32_t call_id = 0;
pdn_connection *pdn = NULL;
/* Extract the call id from session id */
ret = retrieve_call_id((char *)&msg->gx_msg.cca.session_id.val, &call_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id found "
"for session id: %s\n", LOG_VALUE,
(char*) &msg->gx_msg.cca.session_id.val);
return -1;
}
/* Retrieve PDN context based on call id */
pdn = get_pdn_conn_entry(call_id);
if (pdn == NULL)
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"PDN for CALL_ID : %u\n",LOG_VALUE, call_id);
return -1;
}
ret = process_sess_mod_req_del_cmd(pdn);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Modification Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
/*Attach with Dedicated bearer flow*/
int process_sess_est_resp_dedicated_handler(void *data, void *unused_param)
{
uint16_t payload_length = 0;
msg_info *msg = (msg_info *)data;
gtpv2c_header_t *gtpv2c_cbr_t = NULL;;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = process_pfcp_sess_est_resp(
&msg->pfcp_msg.pfcp_sess_est_resp, gtpv2c_tx, true);
if (ret) {
if(ret != -1){
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(data, unused_param);
}
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Establishment Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
gtpv2c_cbr_t = (gtpv2c_header_t *)((uint8_t *)gtpv2c_tx + ntohs(gtpv2c_tx->gtpc.message_len) + sizeof(gtpv2c_tx->gtpc));
payload_length = ntohs(gtpv2c_tx->gtpc.message_len) + ntohs(gtpv2c_cbr_t->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc) + sizeof(gtpv2c_cbr_t->gtpc);
if (msg->cp_mode == PGWC) {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid,
S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
} else {
/* Send response on s11 interface */
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid,
S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
RTE_SET_USED(unused_param);
return 0;
}
/* handles create session response with create bearer request on
* SGWC and sends pfcp modification request*/
int
process_cs_resp_dedicated_handler(void *data, void *unused)
{
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Handler"
"process_cs_resp_dedicated_handler is called", LOG_VALUE);
msg_info *msg = (msg_info *)data;
ret = process_cs_resp_cb_request(&msg->gtpc_msg.cb_req);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Create Session Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
RTE_SET_USED(unused);
RTE_SET_USED(data);
return 0;
}
/*handles modification response from up side for attach with dedicated flow*/
int
process_pfcp_sess_mod_resp_cs_dedicated_handler(void *data, void *unused)
{
uint16_t payload_length = 0;
struct resp_info *resp = NULL;
ue_context *context = NULL;
uint32_t teid = 0;
gtpv2c_header_t *gtpv2c_cbr_t = NULL;
msg_info *msg = (msg_info *)data;
uint64_t sess_id = msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
if (get_sess_entry(sess_id , &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"NO Session Entry Found "
"for session ID : %lu\n", LOG_VALUE, sess_id);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
teid = UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
ret = get_ue_context(teid, &context);
if (ret) {
if(ret != -1)
pfcp_modification_error_response(resp, msg, ret);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE "
"context for teid: %u\n", LOG_VALUE, teid);
return -1;
}
context->piggyback = TRUE;
ret = process_pfcp_sess_mod_resp_cs_cbr_request(sess_id, gtpv2c_tx, resp);
if (ret != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Session Modification Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return ret;
}
if(resp->msg_type == GTP_MODIFY_BEARER_REQ) {
if(context->cp_mode == SGWC) {
uint8_t buf1[MAX_GTPV2C_UDP_LEN] = {0};
gtpv2c_cbr_t = (gtpv2c_header_t *)buf1;
if (resp->gtpc_msg.mbr.header.teid.has_teid.teid) {
payload_length = ntohs(gtpv2c_tx->gtpc.message_len) + sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
}
uint16_t payload_length_s11 = payload_length;
if (!resp->gtpc_msg.mbr.header.teid.has_teid.teid) {
payload_length = 0;
payload_length = ntohs(gtpv2c_tx->gtpc.message_len) + sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
} else {
gtpv2c_cbr_t = (gtpv2c_header_t *)((uint8_t *)gtpv2c_tx +
ntohs(gtpv2c_tx->gtpc.message_len) + sizeof(gtpv2c_tx->gtpc));
payload_length = 0;
payload_length = ntohs(gtpv2c_cbr_t->gtpc.message_len) + sizeof(gtpv2c_cbr_t->gtpc);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, (uint8_t *)gtpv2c_cbr_t, payload_length,
s5s8_recv_sockaddr, SENT);
}
context->piggyback = FALSE;
uint8_t tx_buf_temp[MAX_GTPV2C_UDP_LEN] = {0};
memcpy(tx_buf_temp, tx_buf, payload_length);
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S11_INTFC_OUT, tx_buf, payload_length_s11,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S5S8_C_INTFC_OUT, tx_buf_temp, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
} else if (context->cp_mode == SAEGWC){
context->piggyback = FALSE;
payload_length = ntohs(gtpv2c_tx->gtpc.message_len) + sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
} else {
gtpv2c_cbr_t = (gtpv2c_header_t *)((uint8_t *)gtpv2c_tx + ntohs(gtpv2c_tx->gtpc.message_len) + sizeof(gtpv2c_tx->gtpc));
payload_length = ntohs(gtpv2c_tx->gtpc.message_len) + ntohs(gtpv2c_cbr_t->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc) + sizeof(gtpv2c_cbr_t->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
RTE_SET_USED(unused);
return 0;
}
/*Handles mbr request and cbr response
* in ATTACH with DEDICATED flow
*/
int
process_mb_request_cb_resp_handler(void *data, void *unused)
{
msg_info *msg = (msg_info *)data;
ret = process_mb_request_cb_response(&msg->gtpc_msg.mbr, &msg->cb_rsp);
if(ret != 0) {
if(ret == GTPC_RE_TRANSMITTED_REQ){
return ret;
}
else {
mbr_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0, S11_IFACE);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Modify Bearer Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
}
RTE_SET_USED(unused);
return 0;
}
/* Function */
int
process_del_pdn_conn_set_req(void *data, void *peer_addr)
{
#ifdef USE_CSID
uint16_t payload_length = 0;
msg_info *msg = (msg_info *)data;
peer_addr_t peer_ip = {0};
memcpy(&peer_ip, peer_addr, sizeof(peer_addr_t));
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = process_del_pdn_conn_set_req_t(&msg->gtpc_msg.del_pdn_req);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Delete PDN Connection Set Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
/* Send Response back to peer node */
ret = fill_gtpc_del_set_pdn_conn_rsp(gtpv2c_tx,
msg->gtpc_msg.del_pdn_req.header.teid.has_teid.seq,
GTPV2C_CAUSE_REQUEST_ACCEPTED);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" Filling Delete PDN Connection Set Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
if ((msg->gtpc_msg.del_pdn_req.pgw_fqcsid.number_of_csids) ||
(msg->gtpc_msg.del_pdn_req.sgw_fqcsid.number_of_csids)) {
/* Send response to PGW */
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
peer_ip, ACC);
}
if (msg->gtpc_msg.del_pdn_req.mme_fqcsid.number_of_csids) {
/* Send the delete PDN set request to MME */
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
peer_ip, ACC);
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Send GTPv2C Delete PDN Connection Set Response..!!!\n",
LOG_VALUE);
#else
RTE_SET_USED(data);
RTE_SET_USED(peer_addr);
#endif /* USE_CSID */
return 0;
}
/* Function */
int
process_del_pdn_conn_set_rsp(void *data, void *unused_param)
{
#ifdef USE_CSID
msg_info *msg = (msg_info *)data;
ret = process_del_pdn_conn_set_rsp_t(&msg->gtpc_msg.del_pdn_rsp);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Delete PDN Connection Set Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
#else
RTE_SET_USED(data);
#endif /* USE_CSID */
RTE_SET_USED(unused_param);
return 0;
}
/* Function */
int
process_pgw_rstrt_notif_ack(void *data, void *unused_param)
{
#ifdef USE_CSID
msg_info *msg = (msg_info *)data;
if (msg->gtpc_msg.pgw_rstrt_notif_ack.cause.cause_value !=
GTPV2C_CAUSE_REQUEST_ACCEPTED) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PGW Restart Notification Ack with cause: %s \n",
LOG_VALUE, cause_str(msg->gtpc_msg.pgw_rstrt_notif_ack.cause.cause_value));
return -1;
}
#else
RTE_SET_USED(data);
#endif /* USE_CSID */
RTE_SET_USED(unused_param);
return 0;
}
/* Function */
int process_pfcp_sess_set_del_req(void *data, void *peer_addr)
{
#ifdef USE_CSID
msg_info *msg = (msg_info *)data;
peer_addr_t *peer_ip = (peer_addr_t *)peer_addr;
ret = process_pfcp_sess_set_del_req_t(&msg->pfcp_msg.pfcp_sess_set_del_req, peer_ip);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Set Deletion Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
#else
RTE_SET_USED(data);
RTE_SET_USED(peer_addr);
#endif /* USE_CSID */
return 0;
}
/* Function */
int process_pfcp_sess_set_del_rsp(void *data, void *unused_param)
{
#ifdef USE_CSID
msg_info *msg = (msg_info *)data;
ret = process_pfcp_sess_set_del_rsp_t(&msg->pfcp_msg.pfcp_sess_set_del_rsp);
if (ret){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing PFCP Set Deletion Response with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
#else
RTE_SET_USED(data);
#endif /* USE_CSID */
RTE_SET_USED(unused_param);
return 0;
}
int
process_mb_resp_handler(void *data, void *unused_param)
{
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Modify Bearer Response RCVD \n",
LOG_VALUE);
msg_info *msg = (msg_info *)data;
int ret = 0;
ue_context *context = NULL;
eps_bearer *bearer = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
/*Retrive UE state. */
ret = get_ue_context_by_sgw_s5s8_teid(
msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid, &context);
if (ret < 0 || !context) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"UE Context for teid %d\n",
LOG_VALUE, msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid);
return -1;
}
if(msg->gtpc_msg.mb_rsp.pres_rptng_area_act.header.len){
store_presc_reporting_area_act_to_ue_context(&msg->gtpc_msg.mb_rsp.pres_rptng_area_act,
context);
}
ret = get_bearer_by_teid(msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid,
&bearer);
if(ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Bearer found for "
"teid:%x \n", LOG_VALUE, msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid);
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, S11_IFACE);
return -1;
}
pdn = bearer->pdn;
/* Retrive the session information based on session id. */
if (get_sess_entry(pdn->seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if(context->update_sgw_fteid != TRUE ) {
set_modify_bearer_response(gtpv2c_tx,
context->sequence, context, bearer, &resp->gtpc_msg.mbr);
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
int payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Modify Bearer Response SNT \n",
LOG_VALUE);
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
RTE_SET_USED(unused_param);
resp->state = CONNECTED_STATE;
pdn->state = CONNECTED_STATE;
} else {
process_pfcp_sess_mod_resp_s1_handover(&msg->gtpc_msg.mb_rsp,
context, pdn, bearer);
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_error_occured_handler(void *data, void *unused_param)
{
int ret = 0;
msg_info *msg = (msg_info *)data;
err_rsp_info info_resp = {0};
uint8_t count = 1;
upf_context_t *upf_ctx = NULL;
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) &(msg->upf_ip), (void **) &(upf_ctx));
if(ret >= 0 && (msg->msg_type == PFCP_ASSOCIATION_SETUP_RESPONSE)
&& (msg->pfcp_msg.pfcp_ass_resp.cause.cause_value != REQUESTACCEPTED)){
count = upf_ctx->csr_cnt;
}
for (uint8_t i = 0; i < count; i++) {
get_error_rsp_info(msg, &info_resp, i);
int ebi_index = GET_EBI_INDEX(info_resp.ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
uint32_t teid = info_resp.teid;
if (msg->msg_type == PFCP_SESSION_DELETION_RESPONSE) {
uint64_t seid = msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid;
if (msg->pfcp_msg.pfcp_sess_del_resp.usage_report_count != 0) {
for(int i=0 ; i< msg->pfcp_msg.pfcp_sess_del_resp.usage_report_count; i++)
fill_cdr_info_sess_del_resp(seid,
&msg->pfcp_msg.pfcp_sess_del_resp.usage_report[i]);
}
}
cleanup_ue_and_bearer(teid, ebi_index);
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":SM_ERROR: Error handler UE_Proc: %u UE_State: %u "
"%u and Message_Type:%s\n", LOG_VALUE,
msg->proc, msg->state,msg->event,
gtp_type_str(msg->msg_type));
RTE_SET_USED(unused_param);
return 0;
}
int
process_default_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT":SM_ERROR: No handler found for UE_Proc: %s UE_State: %s UE_event: "
"%s and Message_Type: %s\n", LOG_VALUE,
get_proc_string(msg->proc), get_state_string(msg->state),get_event_string(msg->event),
gtp_type_str(msg->msg_type));
RTE_SET_USED(unused_param);
return 0;
}
int process_pfcp_sess_mod_resp_ubr_handler(void *data, void *unused_param)
{
int ret = 0;
struct resp_info *resp = NULL;
struct pdn_connection_t *pdn = NULL;
ue_context *context = NULL;
int ebi_index = 0;
msg_info *msg = (msg_info *)data;
uint32_t teid = UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
if (get_sess_entry(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
&resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"NO Session Entry Found "
"for session id: %lu\n", LOG_VALUE, msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if(resp->bearer_count){
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(resp->eps_bearer_ids[0]);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_SYSTEM_FAILURE);
return -1;
}
}
/* Retrieve the UE context */
ret = get_ue_context(teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"UE Context for teid: %d\n", LOG_VALUE, teid);
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND);
return -1;
}
if (config.use_gx) {
eps_bearer *bearer = NULL;
uint64_t seid = msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid;
if (msg->pfcp_msg.pfcp_sess_mod_resp.usage_report_count != 0) {
for(int iCnt=0 ; iCnt< msg->pfcp_msg.pfcp_sess_mod_resp.usage_report_count; iCnt++)
fill_cdr_info_sess_mod_resp(seid,
&msg->pfcp_msg.pfcp_sess_mod_resp.usage_report[iCnt]);
}
pdn = GET_PDN(context, ebi_index);
if(pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to get pdn"
" for ebi_index: %d\n",
LOG_VALUE, ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/*delete rule name after receiving pfcp mod resp*/
bearer = context->eps_bearers[ebi_index];
if(bearer != NULL) {
for(int idx = 0; idx < pdn->policy.count; idx++) {
for(int idx2 = 0; idx2 < bearer->pdr_count; idx2++) {
if((pdn->policy.pcc_rule[idx]->action == bearer->action) &&
(strncmp(pdn->policy.pcc_rule[idx]->urule.dyn_rule.rule_name,
bearer->pdrs[idx2]->rule_name, RULE_NAME_LEN) == 0)) {
if(pdn->policy.pcc_rule[idx]->action == RULE_ACTION_MODIFY_REMOVE_RULE) {
int ret = delete_pdr_qer_for_rule(bearer, bearer->pdrs[idx2]->rule_id);
if(ret == 0) {
idx2--;
}
}
}
}
}
}
if(pdn->proc != UE_REQ_BER_RSRC_MOD_PROC &&
pdn->proc != HSS_INITIATED_SUB_QOS_MOD) {
rar_funtions rar_function = NULL;
rar_function = rar_process(pdn, pdn->proc);
if(rar_function != NULL){
ret = rar_function(pdn);
if(ret)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing RAR function with cause: %s \n",
LOG_VALUE, cause_str(ret));
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Non of the RAR function "
"returned\n", LOG_VALUE);
}
} else {
provision_ack_ccr(pdn, context->eps_bearers[ebi_index],
RULE_ACTION_MODIFY, NO_FAIL);
}
}
resp->state = pdn->state;
RTE_SET_USED(unused_param);
return 0;
}
int
process_sess_mod_resp_li_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":Processes response for modification response for %s UE_State: %s UE_event: "
"%s and Message_Type: %s\n", LOG_VALUE,
get_proc_string(msg->proc), get_state_string(msg->state),get_event_string(msg->event),
gtp_type_str(msg->msg_type));
RTE_SET_USED(unused_param);
return 0;
}
int
process_cbr_error_occured_handler(void *data, void *unused_param)
{
struct resp_info *resp = NULL;
msg_info *msg = (msg_info *)data;
if (get_sess_entry(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry Found "
"for session id: %lu\n", LOG_VALUE, msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Processes response for modification"
"response for %s UE_State: %s UE_event: "
"%s and Message_Type: %s\n", LOG_VALUE,
get_proc_string(msg->proc), get_state_string(msg->state),get_event_string(msg->event),
gtp_type_str(msg->msg_type));
reset_resp_info_structure(resp);
RTE_SET_USED(unused_param);
return 0;
}
int
process_dbr_error_occured_handler(void *data, void *unused_param)
{
struct resp_info *resp = NULL;
msg_info *msg = (msg_info *)data;
if (get_sess_entry(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry Found "
"for session id: %lu\n", LOG_VALUE, msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
return -1;
}
if (msg->msg_type == MME_INI_DEDICATED_BEARER_DEACTIVATION_PROC) {
/*TODO : Add handling of Failure Provisional Ack*/
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Processes response for"
"modification response for %s UE_State: %s UE_event: "
"%s and Message_Type: %s\n", LOG_VALUE,
get_proc_string(msg->proc), get_state_string(msg->state), get_event_string(msg->event),
gtp_type_str(msg->msg_type));
}
reset_resp_info_structure(resp);
RTE_SET_USED(unused_param);
return 0;
}
int
process_bearer_resource_cmd_error_handler(void *data, void *unused_param)
{
struct resp_info *resp = NULL;
msg_info *msg = (msg_info *)data;
if (get_sess_entry(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry Found "
"for sess ID: %lu\n", LOG_VALUE, msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
return -1;
}
reset_resp_info_structure(resp);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Processes response for "
"modification response for %s UE_State: %s UE_event: "
"%s and Message_Type: %s\n", LOG_VALUE,
get_proc_string(msg->proc), get_state_string(msg->state),get_event_string(msg->event),
gtp_type_str(msg->msg_type));
RTE_SET_USED(unused_param);
return 0;
}
int
process_update_pdn_set_req_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT":Processes response for modification response for %s UE_State: %s UE_event: "
"%s and Message_Type: %s\n", LOG_VALUE,
get_proc_string(msg->proc), get_state_string(msg->state),get_event_string(msg->event),
gtp_type_str(msg->msg_type));
int ret = proc_pfcp_sess_mbr_udp_csid_req(&msg->gtpc_msg.upd_pdn_req);
if(ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Update PDN Set Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
update_pdn_connection_set_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0);
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_pfcp_sess_mod_resp_upd_handler(void *data, void *unused_param)
{
uint8_t payload_length = 0;
struct resp_info *resp = NULL;
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
eps_bearer *bearer = NULL;
uint32_t teid = 0;
int ret = 0;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
teid = UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
if (get_sess_entry(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry Found "
"for session id: %lu\n", LOG_VALUE, msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
return -1;
}
/* Retrieve the UE context */
ret = get_ue_context(teid, &context);
if (ret) {
if(ret != -1)
pfcp_modification_error_response(resp, msg, ret);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE "
"context for teid: %u\n", LOG_VALUE, teid);
return -1;
}
upd_pdn_conn_set_rsp_t upd_pdn_rsp = {0};
set_gtpv2c_teid_header((gtpv2c_header_t *) &upd_pdn_rsp,
GTP_UPDATE_PDN_CONNECTION_SET_RSP,
context->s11_mme_gtpc_teid, context->sequence, 0);
set_cause_accepted(&upd_pdn_rsp.cause, IE_INSTANCE_ZERO);
for(uint8_t i= 0; i< MAX_BEARERS; i++) {
bearer = context->eps_bearers[i];
if(bearer == NULL)
continue;
else
break;
}
pdn = bearer->pdn;
payload_length = encode_upd_pdn_conn_set_rsp(&upd_pdn_rsp, (uint8_t *)gtpv2c_tx);
ret = set_dest_address(pdn->s5s8_sgw_gtpc_ip, &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
pdn = bearer->pdn;
resp->state = CONNECTED_STATE;
pdn->state = CONNECTED_STATE;
RTE_SET_USED(unused_param);
return 0;
}
int process_upd_pdn_set_response_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
eps_bearer *bearer = NULL;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
int ret = get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid,
&context);
if (ret < 0 || !context) {
/*TODO:AAQUILALI: Handling for both message MABR and MBR*/
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, msg->interface_type);
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get UE Context for teid:%d\n",
LOG_VALUE, msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid);
return -1;
}
ret = get_bearer_by_teid(msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid, &bearer);
if(ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Bearer found "
"for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid);
if(context->procedure == MODIFY_BEARER_PROCEDURE) {
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0, context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
} else {
mod_access_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0);
}
return -1;
}
pdn = bearer->pdn;
/* Retrive the session information based on session id. */
if (get_sess_entry(pdn->seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"No Session Entry Found for session id: %lu\n",
LOG_VALUE, pdn->seid);
if(context->procedure == MODIFY_BEARER_PROCEDURE) {
mbr_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND,
CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
} else {
mod_access_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, CAUSE_SOURCE_SET_TO_0);
}
return -1;
}
if(context->procedure == MODIFY_BEARER_PROCEDURE) {
set_modify_bearer_response(gtpv2c_tx,
context->sequence, context, bearer, &resp->gtpc_msg.mbr);
} else {
set_modify_access_bearer_response(gtpv2c_tx,
context->sequence, context, bearer, &resp->gtpc_msg.mod_acc_req);
}
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
int payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, ACC);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
RTE_SET_USED(unused_param);
resp->state = CONNECTED_STATE;
pdn->state = CONNECTED_STATE;
return 0;
}
int process_upd_pdn_conn_set_req(void *data, void *unused_param)
{
RTE_SET_USED(data);
RTE_SET_USED(unused_param);
return 0;
}
int process_upd_pdn_conn_set_rsp(void *data, void *unused_param)
{
RTE_SET_USED(data);
RTE_SET_USED(unused_param);
return 0;
}
int
process_pfcp_sess_del_resp_context_replacement_handler(void *data, void *unused_param)
{
int ebi = 0;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
msg_info *msg = (msg_info *)data;
uint64_t sess_id = msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid;
uint32_t teid = UE_SESS_ID(sess_id);
int eps_bearer_id = UE_BEAR_ID(msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid);
ebi = GET_EBI_INDEX(eps_bearer_id);
if (get_sess_entry(sess_id, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, sess_id);
return -1;
}
/* Retrieve the UE context */
ret = get_ue_context(teid, &context);
if (ret) {
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE "
"context for teid: %u\n", LOG_VALUE, teid);
return -1;
}
pdn = GET_PDN(context, ebi);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi);
return -1;
}
/* CDR handling in case of context replacement */
if (msg->pfcp_msg.pfcp_sess_del_resp.usage_report_count != 0) {
for(int i=0 ; i< msg->pfcp_msg.pfcp_sess_del_resp.usage_report_count; i++) {
fill_cdr_info_sess_del_resp(sess_id, &msg->pfcp_msg.pfcp_sess_del_resp.usage_report[i]);
}
}
/* delete all rule entries and bearer context */
for (int i = 0; i < MAX_BEARERS; i++) {
if (pdn->eps_bearers[i] != NULL) {
uint8_t ebi = pdn->eps_bearers[i]->eps_bearer_id;
ebi = GET_EBI_INDEX(ebi);
if (del_rule_entries(pdn, ebi) != 0 ){
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT
"Failed to delete Rule for ebi_index: %d\n", LOG_VALUE, ebi);
}
if (delete_bearer_context(pdn, ebi) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error : While deleting Bearer Context for EBI %d \n", LOG_VALUE, ebi);
}
}
}
msg->gtpc_msg.csr = resp->gtpc_msg.csr;
/* deleting UE context */
delete_sess_context(&context, pdn);
/* new csr handling */
ret = process_create_sess_req(&msg->gtpc_msg.csr,
&context, msg->upf_ip, msg->cp_mode);
if (ret != 0 && ret != GTPC_RE_TRANSMITTED_REQ) {
if (ret == GTPC_CONTEXT_REPLACEMENT) {
/* return success value for context replacement case */
return 0;
}
if (ret != -1){
cs_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(data, unused_param);
}
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error recieved while"
" processing Create Session Request with cause: %s \n",
LOG_VALUE, cause_str(ret));
return -1;
}
if (SGWC == context->cp_mode && pdn->context != NULL) {
if (pdn->upf_ip.ip_type == 0) {
if (config.use_dns) {
push_dns_query(pdn);
return 0;
} else {
/*Filling Node ID*/
if (config.upf_pfcp_ip_type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(config.upf_pfcp_ip.s_addr, temp, &pdn->upf_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (config.upf_pfcp_ip_type == PDN_IP_TYPE_IPV6) {
ret = fill_ip_addr(0, config.upf_pfcp_ip_v6.s6_addr, &pdn->upf_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
}
}
if (!context->promotion_flag) {
process_pfcp_sess_setup(pdn);
}
}
RTE_SET_USED(unused_param);
RTE_SET_USED(data);
return ret;
}
int
process_create_indir_data_frwd_req_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
ue_context *context = NULL;
int ret = 0;
ret = process_create_indir_data_frwd_tun_request(&msg->gtpc_msg.crt_indr_tun_req, &context);
if(ret != 0 ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error in Creating Indirect Tunnel", LOG_VALUE);
crt_indir_data_frwd_tun_error_response(msg, ret);
return -1;
}
if( context == NULL ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Context Not Found ", LOG_VALUE);
crt_indir_data_frwd_tun_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND);
return -1;
}
ret = process_pfcp_assoication_request(context->indirect_tunnel->pdn,
(context->indirect_tunnel->pdn->default_bearer_id - NUM_EBI_RESERVED));
if(ret) {
if(ret != -1) {
crt_indir_data_frwd_tun_error_response(msg, ret);
}
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error in Association Req Handling For Create Indirect Tunnel MSG",
LOG_VALUE);
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_del_indirect_tunnel_req_handler(void *data, void *unused_param)
{
msg_info *msg = (msg_info *)data;
int ret = 0;
ret = process_del_indirect_tunnel_request(&msg->gtpc_msg.dlt_indr_tun_req);
if(ret) {
if(ret != -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error in del indirect tunnel req", LOG_VALUE);
delete_indir_data_frwd_error_response(msg, ret);
}
return -1;
}
RTE_SET_USED(unused_param);
return 0;
}
int
process_pfcp_del_resp_del_indirect_handler(void *data, void *unused_param)
{
int li_sock_fd = -1;
uint64_t uiImsi = 0;
uint16_t payload_length = 0;
msg_info *msg = (msg_info *)data;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = process_pfcp_sess_del_resp_indirect_tunnel(
msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid,
gtpv2c_tx, &uiImsi, &li_sock_fd);
if(ret) {
if(ret != -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error in PFCP DEL Rsp. For DEL Indirect Tunnel req",
LOG_VALUE);
delete_indir_data_frwd_error_response(msg, ret);
}
return -1;
}
payload_length = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr,ACC);
update_sys_stat(number_of_users, DECREMENT);
update_sys_stat(number_of_active_session, DECREMENT);
/*
process_cp_li_msg(
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
S11_INTFC_OUT, tx_buf, payload_length,
ntohl(config.s11_ip.s_addr), ntohl(s11_mme_sockaddr.ipv4.sin_addr.s_addr),
config.s11_port, ntohs(s11_mme_sockaddr.ipv4.sin_port));
process_cp_li_msg_for_cleanup(
uiImsi, li_sock_fd, tx_buf, payload_length,
config.s11_ip.s_addr, s11_mme_sockaddr.ipv4.sin_addr.s_addr,
config.s11_port, s11_mme_sockaddr.ipv4.sin_port);
*/
RTE_SET_USED(unused_param);
return 0;
}
int process_modify_access_bearer_handler(void *data, void *unused_param)
{
ue_context *context = NULL;
msg_info *msg = (msg_info *)data;
/*Retrive UE state. */
if(get_ue_context(msg->gtpc_msg.mod_acc_req.header.teid.has_teid.teid, &context) != 0) {
mod_access_bearer_error_response(msg, GTPV2C_CAUSE_CONTEXT_NOT_FOUND, S11_IFACE);
return -1;
}
ret = modify_acc_bearer_req_pre_check(&msg->gtpc_msg.mod_acc_req);
if(ret != 0) {
mod_access_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Conditional IE missing Modify Access Bearer Request",
LOG_VALUE);
return -1;
}
context->procedure = MODIFY_ACCESS_BEARER_PROC;
/* CHECK FOR Retranmission of Message */
if(context->req_status.seq == msg->gtpc_msg.mod_acc_req.header.teid.has_teid.seq) {
if(context->req_status.status == REQ_IN_PROGRESS) {
/* Discarding re-transmitted mbr */
return GTPC_RE_TRANSMITTED_REQ;
}else{
/* Restransmitted MABR but processing altready done for previous req */
context->req_status.status = REQ_IN_PROGRESS;
}
}else{
context->req_status.seq = msg->gtpc_msg.mod_acc_req.header.teid.has_teid.seq;
context->req_status.status = REQ_IN_PROGRESS;
}
ret = process_pfcp_mod_req_modify_access_req(&msg->gtpc_msg.mod_acc_req);
if (ret != 0) {
mod_access_bearer_error_response(msg, ret, CAUSE_SOURCE_SET_TO_0);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Error in Modify Access Bearer Request Handling", LOG_VALUE);
}
RTE_SET_USED(unused_param);
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/cdr.h | <gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include "cp.h"
#include "pfcp_session.h"
#define SAEGW_CDR 86
#define SGW_CDR 84
#define PGW_CDR 85
#define SGW_RECORD_TYPE "84"
#define PGW_RECORD_TYPE "85"
#define FORWARD_GATEWAY_RECORD_TYPE "FORWARDING_GATEWAY"
#define CDR_BUFF_SIZE 512
#define MCC_BUFF_SIZE 5
#define MNC_BUFF_SIZE 5
#define CDR_TIME_BUFF 16
#define CDR_PDN_BUFF 16
#define CDR_TRIGG_BUFF 16
#define MAX_ULI_LENGTH 256
#define IP_TYPE_V4 1
#define IP_TYPE_V6 2
#define IP_TYPE_V4V6 3
#define VOLUME_LIMIT "Volume_Limit"
#define TIME_LIMIT "Time_Limit"
#define CDR_TERMINATION "TERMINATION"
#define IPV4 "ipv4"
#define IPV6 "ipv6"
#define IPV4V6 "ipv4v6"
typedef enum cp_cdr_type {
CDR_BY_URR,
CDR_BY_SEC_RAT
}cdr_type_t;
/**
* @brief : Maintains CDR related information
*/
typedef struct cdr_param_t {
cdr_type_t cdr_type;
uint8_t bearer_id;
/*applicable in case of usage report*/
uint32_t urr_id;
uint8_t record_type;
int change_rat_type_flag;
uint8_t rat_type;
uint8_t selec_mode;
uint64_t imsi;
uint64_t seid;
uint64_t ul_mbr;
uint64_t dl_mbr;
uint64_t ul_gbr;
uint64_t dl_gbr;
uint32_t urseqn;
uint32_t data_start_time;
uint32_t data_end_time;
uint32_t start_time;
uint32_t end_time;
uint32_t duration_meas;
uint8_t mcc_digit_2;
uint8_t mcc_digit_1;
uint8_t mnc_digit_3;
uint8_t mnc_digit_2;
uint8_t mnc_digit_1;
struct in_addr ue_ip;
struct in_addr sgw_addr;
uint64_t data_volume_uplink;
uint64_t data_volume_downlink;
uint64_t total_data_volume;
char trigg_buff[CDR_TRIGG_BUFF];
uint8_t pdn_type;
uint32_t timestamp_value;
uint8_t counter_value;
}cdr;
/**
* @brief : Fill cdr info from pfcp-sess-rep-req msg
* @param : seid, session id
* @param : *usage_report,usage report in msg
* @return : Returns 0 on success.
*/
int
fill_cdr_info_sess_rpt_req(uint64_t seid, pfcp_usage_rpt_sess_rpt_req_ie_t *usage_report);
/**
* @brief : Fill cdr info from pfcp-sess-mod-resp msg
* @param : seid, session id
* @param : *usage_report,usage report in msg
* @return : Returns 0 on success.
*/
int
fill_cdr_info_sess_mod_resp(uint64_t seid, pfcp_usage_rpt_sess_mod_rsp_ie_t *usage_report);
/**
* @brief : Fill cdr info from pfcp-sess-del-resp msg
* @param : seid, session id
* @param : *usage_report,usage report in msg
* @return : Returns 0 on success.
*/
int
fill_cdr_info_sess_del_resp(uint64_t seid, pfcp_usage_rpt_sess_del_rsp_ie_t *usage_report);
/**
* @brief : Fill cause code to buffer in form of string
* @param : *usage_rpt_trig,flag containing cause code
* @param : (out param) buf,contain string of cause code
* @return : Returns nothing
*/
void
urr_cause_code_to_str(pfcp_usage_rpt_trig_ie_t *usage_rpt_trig, char *buf);
/**
* @brief : Check pdn type & convert to string
* @param : pdn_type,ipv4/ipv6
* @param : (out param) buf,contain pdn type string
* @return : Returns nothing
*/
void
check_pdn_type(pdn_type_ie *pdn_type, char *buf);
/**
* @brief : Genearet CDR into buffer & push to redis server
* @param : fill_cdr, structure containing cdr info
* @return : Returns 0 on success,else -1 on failure.
*/
int
generate_cdr_info(cdr *fill_cdr);
/**
* @brief : Generate CDR seq no
* @param : nothing
* @return : Returns unique cdr id
*/
uint32_t
generate_cdr_seq_no(void);
/**
* @brief : Get bearer no for pdn
* @param : urr_id
* @param : pdn
* @return : Returns bearer index on
* success,else -1
*/
int
get_bearer_index_by_urr_id(uint32_t urr_id, pdn_connection *pdn);
/**
* @brief : Get rule name on the basis of urr id
* @param : urr_id
* @param : bearer, pointer to bearer
* @param : rule_name, out parameter
* @return : Returns 0 on
* success,else -1
*/
int
get_rule_name_by_urr_id(uint32_t urr_id, eps_bearer *bearer,
char *rule_name);
/**
* @brief : Fill different ULI parameter in buffer
* @param : uli, pointer to User Location Info in context
* @param : uli_buff, buffer as a out parameter
* @return : Returns 0 on success
*
*/
int
fill_user_loc_info(user_loc_info_t *uli, char *uli_buff);
|
nikhilc149/e-utran-features-bug-fixes | cp/cp_stats.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <inttypes.h>
#include <unistd.h>
#include <string.h>
#include "cp_stats.h"
#include "cp.h"
#include <sys/stat.h>
#include <netinet/in.h>
#include <stdbool.h>
#include "gw_adapter.h"
struct cp_stats_t cp_stats;
/**
* @brief : callback used to display rx packets per second
* @param : void
* @return : number of packets received by the control plane s11 interface
*/
static uint64_t
rx_pkts_per_sec(void)
{
uint64_t ret = cp_stats.rx - cp_stats.rx_last;
cp_stats.rx_last = cp_stats.rx;
return ret;
}
/**
* @brief : callback used to display tx packets per second
* @param : void
* @return : number of packets transmitted by the control plane s11 interface
*/
static uint64_t
tx_pkts_per_sec(void)
{
uint64_t ret = cp_stats.tx - cp_stats.tx_last;
cp_stats.tx_last = cp_stats.tx;
return ret;
}
/**
* @brief : callback used to display control plane uptime
* @param : void
* @return : control plane uptime in seconds
*/
static uint64_t
stats_time(void)
{
uint64_t ret = cp_stats.time;
cp_stats.time++;
cli_node.cli_config.oss_reset_time++;
return ret;
}
/**
* @brief : statistics entry used to simplify statistics by providing a common
* interface for statistic values or calculations and their names
*/
struct stat_entry_t {
enum {VALUE, LAMBDA} type;
uint8_t spacing; /** variable length stat entry specifier */
union {
uint64_t *value; /** value used by stat */
uint64_t (*lambda)(void); /** stat callback function */
};
const char *top; /** top collumn stat name string */
const char *bottom; /** bottom collumn stat name string */
};
#define DEFINE_VALUE_STAT(spacing, function, top, bottom) \
{VALUE, spacing, {.value = function}, top, bottom}
#define DEFINE_LAMBDA_STAT(spacing, function, top, bottom) \
{LAMBDA, spacing, {.lambda = function}, top, bottom}
#define PRINT_STAT_ENTRY_HEADER(entry_index, header) \
printf("%*s ",\
stat_entries[entry_index].spacing, \
stat_entries[entry_index].header)
/**
* @brief : statistic entry definitions
*/
struct stat_entry_t stat_entries[] = {
DEFINE_LAMBDA_STAT(5, stats_time, "", "time"),
DEFINE_VALUE_STAT(8, &cp_stats.rx, "rx", "pkts"),
DEFINE_VALUE_STAT(8, &cp_stats.tx, "tx", "pkts"),
DEFINE_LAMBDA_STAT(8, rx_pkts_per_sec, "rx pkts", "/sec"),
DEFINE_LAMBDA_STAT(8, tx_pkts_per_sec, "tx pkts", "/sec"),
DEFINE_VALUE_STAT(8, &cp_stats.create_session, "create", "session"),
DEFINE_VALUE_STAT(8, &cp_stats.modify_bearer, "modify", "bearer"),
DEFINE_VALUE_STAT(8, &cp_stats.bearer_resource, "b resrc", "cmd"),
DEFINE_VALUE_STAT(8, &cp_stats.create_bearer, "create", "bearer"),
DEFINE_VALUE_STAT(8, &cp_stats.delete_bearer, "delete", "bearer"),
DEFINE_VALUE_STAT(8, &cp_stats.delete_session, "delete", "session"),
DEFINE_VALUE_STAT(8, &cp_stats.echo, "", "echo"),
DEFINE_VALUE_STAT(8, &cp_stats.rel_access_bearer, "rel acc", "bearer"),
DEFINE_VALUE_STAT(8, &cp_stats.ddn, "", "ddn"),
DEFINE_VALUE_STAT(8, &cp_stats.ddn_ack, "ddn", "ack"),
};
/**
* @brief : prints out statistics entries
* @param : void
* @return : void
*/
static inline void
print_stat_entries(void) {
unsigned i;
if (!(cp_stats.time % 32)) {
puts("");
for (i = 0; i < RTE_DIM(stat_entries); ++i)
PRINT_STAT_ENTRY_HEADER(i, top);
puts("");
for (i = 0; i < RTE_DIM(stat_entries); ++i)
PRINT_STAT_ENTRY_HEADER(i, bottom);
puts("");
}
for (i = 0; i < RTE_DIM(stat_entries); ++i) {
printf("%*"PRIu64" ", stat_entries[i].spacing,
(stat_entries[i].type == VALUE) ?
*stat_entries[i].value :
(*stat_entries[i].lambda)());
}
puts("");
}
int
do_stats(__rte_unused void *ptr)
{
while (1) {
print_stat_entries();
sleep(1);
}
return 0;
}
void
reset_cp_stats(void) {
memset(&cp_stats, 0, sizeof(cp_stats));
}
|
nikhilc149/e-utran-features-bug-fixes | cp/packet_filters.h | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PACKET_FILTERS_H
#define PACKET_FILTERS_H
/**
* @file
*
* Contains functions to initialize, manage, and install packet filters internal
* to the Control Plane as well as calls to forward the installed packet filters
* to the Data Plane.
*/
#include "ue.h"
#define FIRST_FILTER_ID 1
#define METER_PROFILE_FILE "../config/meter_profile.cfg"
#define PCC_RULE_FILE "../config/pcc_rules.cfg"
#define SDF_RULE_FILE "../config/sdf_rules.cfg"
#define ADC_RULE_FILE "../config/adc_rules.cfg"
extern uint16_t ulambr_idx;
extern uint16_t dlambr_idx;
enum IP_TYPE{
IPV4_ADDR_TYPE = 0,
IPV6_ADDR_TYPE = 1
};
/**
* @brief : Maintains packet filter information
*/
typedef struct pkt_fltr_t {
uint8_t direction;
uint8_t proto;
uint8_t proto_mask;
uint8_t v4;
uint8_t v6;
uint8_t local_ip_mask;
uint8_t remote_ip_mask;
uint16_t remote_port_low;
uint16_t remote_port_high;
uint16_t local_port_low;
uint16_t local_port_high;
struct in_addr local_ip_addr;
struct in_addr remote_ip_addr;
struct in6_addr local_ip6_addr;
struct in6_addr remote_ip6_addr;
} pkt_fltr;
/**
* @brief : Maintains packet filter information along with uplink and
* downlink info
*/
typedef struct packet_filter_t {
pkt_fltr pkt_fltr;
uint16_t ul_mtr_idx;
uint16_t dl_mtr_idx;
} packet_filter;
extern const pkt_fltr catch_all;
/**
* @brief : Adds packet filter entry
* @param : index, index of array where packet filter needs to be added
* @return : Returns nothing
*/
void
push_packet_filter(uint16_t index);
/**
* @brief : Adds sdf rule entry
* @param : index, index of array where sdf rule needs to be added
* @return : Returns nothing
*/
void
push_sdf_rules(uint16_t index);
/**
* @brief : Installs a packet filter in the CP & DP.
* @param : new_packet_filter
* A packet filter yet to be installed
* @return : - >= 0 - on success - indicates index of packet filter
* - < 0 - on error
*/
int
install_packet_filter(const packet_filter *new_packet_filter);
/**
* @brief : Returns the packet filter index.
* @param : pf, Packet filter
* @return : Packet filter index matching packet filter 'pf'
*/
int
get_packet_filter_id(const pkt_fltr *pf);
/**
* @brief : Clears the packet filter at '*pf' to accept all packets.
* @param : pf, The packet filter to reset
* @return : Returns nothing
*/
void
reset_packet_filter(pkt_fltr *pf);
/**
* @brief : Returns direction of packet filter (uplink and/or downlink).
* @param : index
* Packet filter index
* @return : Direction as defined as tft packet filter direction in 3gpp 24.008
* table 10.5.162, one of:
* - TFT_DIRECTION_BIDIRECTIONAL
* - TFT_DIRECTION_UPLINK_ONLY
* - TFT_DIRECTION_DOWNLINK_ONLY
*/
uint8_t
get_packet_filter_direction(uint16_t index);
/**
* @brief : Returns the packet filter given it's index.
* @param : index, Index of packet filter
* @return : Packet filter at index
*/
packet_filter *
get_packet_filter(uint16_t index);
/**
* @brief : Packet filter initialization function. Reads static file and populates
* packet filters accordingly.
* @param : No Param
* @return : Returns nothing
*/
void
init_packet_filters(void);
/**
* @brief : parse adc rules
* @param : No Param
* @return : Returns nothing
*/
void parse_adc_rules(void);
/**
* @brief : Get meter profile index with matching bit rate cir
* @param : cir, bit rate
* @return : Returns meter profile index of matching meter profile
*/
int meter_profile_index_get(uint64_t cir);
#endif
|
nikhilc149/e-utran-features-bug-fixes | dp/up_main.h | <gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _UP_MAIN_H_
#define _UP_MAIN_H_
/**
* @file
* This file contains macros, data structure definitions and function
* prototypes of dataplane initialization, user session
* and rating group processing functions.
*/
#include <pcap.h>
#include <rte_hash.h>
#include <rte_errno.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_meter.h>
#include <rte_jhash.h>
#include <rte_malloc.h>
#include <rte_ethdev.h>
#include <rte_version.h>
#include "../pfcp_messages/pfcp_up_struct.h"
#include "structs.h"
#include "interface.h"
#include "vepc_cp_dp_api.h"
#include "epc_packet_framework.h"
#include "teid_upf.h"
#include "gw_adapter.h"
#ifdef USE_REST
#include "ngic_timer.h"
#endif /* use_rest */
#ifdef USE_CSID
#include "../pfcp_messages/csid_struct.h"
#endif /* USE_CSID */
#define FILE_NAME "../config/dp_rstCnt.txt"
/**
* dataplane rte logs.
*/
#define RTE_LOGTYPE_DP RTE_LOGTYPE_USER1
/**
* CP DP communication API rte logs.
*/
#define RTE_LOGTYPE_API RTE_LOGTYPE_USER2
/**
* rte notification log level.
*/
#define NOTICE 0
/**
* rte information log level.
*/
#define NGIC_INFO 1
/**
* rte debug log level.
*/
#define NGIC_DEBUG 2
/**
* Session Creation.
*/
#define SESS_CREATE 0
/**
* Session Modification.
*/
#define SESS_MODIFY 1
/**
* Session Deletion.
*/
#define SESS_DEL 2
/**
* max prefetch.
*/
#define PREFETCH_OFFSET 8
/**
* set nth bit.
*/
#define SET_BIT(mask, n) ((mask) |= (1LLU << (n)))
/**
* reset nth bit.
*/
#define SET_BIT(mask, n) ((mask) |= (1LLU << (n)))
/**
* reset nth bit.
*/
#define RESET_BIT(mask, n) ((mask) &= ~(1LLU << (n)))
/**
* check if nth bit is set.
*/
#define ISSET_BIT(mask, n) (((mask) & (1LLU << (n))) ? 1 : 0)
/**
* default ring size
*/
#define EPC_DEFAULT_RING_SZ 4096
/**
* default burst size
*/
#define EPC_DEFAULT_BURST_SZ 32
/**
* burst size of 64 pkts
*/
#define EPC_BURST_SZ_64 64
/**
* max burst size
*/
#define MAX_BURST_SZ EPC_BURST_SZ_64
/**
* Reserved ADC ruleids installed by DP during init.
* example: DNS_RULE_ID to identify dns pkts. .
*/
#define RESVD_IDS 1
/**
* Pre-defined DNS sdf filter rule id.
*/
#define DNS_RULE_ID (MAX_ADC_RULES + 1)
/**
* uplink flow.
*/
#define UL_FLOW 1
/**
* downlink flow.
*/
#define DL_FLOW 2
/**
* offset of meta data in headroom.
*/
#define META_DATA_OFFSET 128
/**
* max records charging.
*/
#define MAX_SESSION_RECS 64
/**
* Set DPN ID
*/
#define DPN_ID (12345)
#define DEFAULT_HASH_FUNC rte_jhash
/*
* To replace all old structures with the new one in code
* TODO: Cleaner way.
*/
#define dp_pcc_rules pcc_rules
#ifdef HUGE_PAGE_16GB
#define HASH_SIZE_FACTOR 4
#else
#define HASH_SIZE_FACTOR 1
#endif
#define SDF_FILTER_TABLE_SIZE (1024)
#define ADC_TABLE_SIZE (1024)
#define PCC_TABLE_SIZE (1025)
#define METER_PROFILE_SDF_TABLE_SIZE (2048)
/**
* pcap filename length.
*/
#define PCAP_FILENAME_LEN 256
/**
* pcap filenames.
*/
#define UPLINK_PCAP_FILE "logs/estbnd"
#define DOWNLINK_PCAP_FILE "logs/wstbnd"
#define PCAP_EXTENTION ".pcap"
/* KNI releted parameters and struct define here */
/* Max size of a single packet */
#define MAX_PACKET_SZ 2048
/* Total octets in ethernet header */
#define KNI_ENET_HEADER_SIZE 14
/* Total octets in the FCS */
#define KNI_ENET_FCS_SIZE 4
#define KNI_SECOND_PER_DAY 86400
/* User Level Packet Copying */
#define UPLINK_DIRECTION 1
#define DOWNLINK_DIRECTION 2
#define COPY_UP_PKTS 1
#define COPY_DOWN_PKTS 2
#define COPY_UP_DOWN_PKTS 3
#define COPY_HEADER_ONLY 1
#define COPY_HEADER_DATA_ONLY 2
#define COPY_DATA_ONLY 3
/* How many packets to attempt to read from NIC in one go */
#define PKT_BURST_SZ 32
#define KNI_MAX_KTHREAD 32
/* UDP socket port configure */
#define SOCKET_PORT 5556
#ifdef USE_REST
/* VS: Number of connection can maitain in the hash */
#define NUM_CONN 500
/**
* no. of mbuf.
*/
#define NB_ECHO_MBUF 1024
#define ARP_SEND_BUFF 512
#define WEST_INTFC 0
#define EAST_INTFC 1
#define FWD_MASK 0
#define ENCAP_MASK 1
#define DECAP_MASK 2
struct rte_mempool *echo_mpool;
extern int32_t conn_cnt;
extern udp_sock_t my_sock;
uint8_t dp_restart_cntr;
uint32_t li_seq_no;
extern teidri_info *upf_teidri_allocated_list;
extern teidri_info *upf_teidri_free_list;
extern teidri_info *upf_teidri_blocked_list;
extern dp_configuration_t dp_configuration;
/**
* @brief : IP Type Info
*/
struct ip_type {
/* IPv4 Flag */
uint8_t ipv4;
/* IPv6 Flag */
uint8_t ipv6;
/* IPv4IPv6 Flag */
uint8_t ipv4_ipv6;
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
typedef struct ip_type ip_type_t;
struct rte_ipv6_fragment_ext {
uint8_t next_header;
uint8_t reserved;
rte_be16_t frag_data;
rte_be32_t id;
} __rte_packed;
/* IPv6 fragment extension header size */
#define RTE_IPV6_FRAG_HDR_SIZE sizeof(struct rte_ipv6_fragment_ext)
/**
* @brief : Initialize restoration thread
* @param : No param
* @return : Returns nothing
*/
void rest_thread_init(void);
#ifdef CP_BUILD
/**
* @brief : Add node entry
* @param : dstIp, Ip address to be added
* @param : portId, port number
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
add_node_conn_entry(uint32_t dstIp, uint8_t portId);
/**
* @brief : Update rst count
* @param : No param
* @return : Returns Updated restart counter Value
*/
uint8_t
update_rstCnt(void);
#else
/**
* @brief : Add node entry
* @param : dstIp, Ip address to be added its either ipv4 or ipv6
* @param : sess_id, session id
* @param : portId, port number
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
add_node_conn_entry(node_address_t dstIp, uint64_t sess_id, uint8_t portId);
#endif /* CP_BUILD */
#endif /* USE_REST */
/**
* @brief :Structure of port parameters
*/
struct kni_port_params {
uint8_t port_id;/* Port ID */
unsigned lcore_rx; /* lcore ID for RX */
unsigned lcore_tx; /* lcore ID for TX */
uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
uint32_t nb_kni; /* Number of KNI devices to be created */
unsigned lcore_k[KNI_MAX_KTHREAD]; /* lcore ID list for kthreads */
struct rte_kni *kni[KNI_MAX_KTHREAD]; /* KNI context pointers */
} __rte_cache_aligned;
extern uint32_t nb_ports;
extern struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
/**
* @brief : Interface to burst rx and enqueue mbufs into rx_q
* @param : p, kni parameters
* @param : pkts_burst, mbufs packets
* @param : nb_rs, number of packets
* @return : Returns nothing
*/
void
kni_ingress(struct kni_port_params *p,
struct rte_mbuf *pkts_burst[PKT_BURST_SZ], unsigned nb_rx);
/**
* @brief : Interface to dequeue mbufs from tx_q and burst tx
* @param : p, kni parameters
* @return : Returns nothing
*/
void kni_egress(struct kni_port_params *p);
/**
* @brief : free mbufs after trasmited resp back on port.
* @param : pkts_burst, mbufs packets
* @param : num, number of packets
* @return : Returns nothing
*/
void
kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num);
/**
* @brief : Initialize KNI subsystem
* @param : No param
* @return : Returns nothing
*/
void
init_kni(void);
/**
* @brief : KNI interface allocatation
* @param : port_id, port number
* @return : Returns 0 in case of success , -1 otherwise
*/
int
kni_alloc(uint16_t port_id);
/**
* @brief : Check the link status of all ports in up to 9s, and print them finally
* @param : port_id, port number
* @param : port_mask, mask value
* @return : Returns nothing
*/
void
check_all_ports_link_status(uint16_t port_num, uint32_t port_mask);
/**
* @brief : Validate dpdk interface are configure properly
* @param : port_mask, mask value
* @return : Returns 0 in case of success , -1 otherwise
*/
int
validate_parameters(uint32_t portmask);
/**
* @brief : Free KNI allocation interface on ports
* @param : No param
* @return : Returns nothing
*/
void free_kni_ports(void);
//VS: Routing Discovery
/**
* rte hash handler.
*/
extern struct rte_hash *gateway_arp_hash_handle;
/**
* @brief :rte hash handler.
*/
extern struct rte_hash *route_hash_handle;
#pragma pack(1)
/**
* @brief : Application configure structure .
*/
struct app_params {
/* Gateway Mode*/
//enum dp_config spgw_cfg;
/* Enable DP PCAPS Generation */
/* Start: 1, Restart: 2, Default: 0 Stop*/
uint8_t generate_pcap;
/* Off: 0, On: 1*/
uint8_t perf_flag;
/* Numa Socket
* Default: 0:disable, 1:enable */
uint8_t numa_on;
/* incoming GTP sequence number, 0 - dynamic (default), 1 - not included,
* 2 - included */
uint8_t gtpu_seqnb_in;
/* outgoing GTP sequence number, 0 - do not include (default), 1 - include*/
uint8_t gtpu_seqnb_out;
/* pfcp ipv6 prefix len */
uint8_t pfcp_ipv6_prefix_len;
/* Transmit Count */
uint8_t transmit_cnt;
/* D-DF2 PORT */
uint16_t ddf2_port;
/* D-DF3 PORT */
uint16_t ddf3_port;
/* Transmit Timer*/
int transmit_timer;
/* Peridoic Timer */
int periodic_timer;
/* TEIDRI value */
int teidri_val;
/* TEIDRI Timeout */
int teidri_timeout;
/* cli rest port */
uint16_t cli_rest_port;
/* cli rest ip */
char cli_rest_ip_buff[IPV6_STR_LEN];
/* RTE Log Level*/
uint32_t log_level;
/* West Bound S1U/S5S8 Port */
uint32_t wb_port;
/* East Bound S5S8/SGI Port */
uint32_t eb_port;
/* West Bound S1U/S5S8 IPv4 and IPv6 Type */
ip_type_t wb_ip_type;
/* West Bound S1U/S5S8 IPv4 Address */
uint32_t wb_ip;
/* West Bound S1U/S5S8 IPV6 Link Local Layer Address */
struct in6_addr wb_l3_ipv6;
/* West Bound S1U/S5S8 IPV6 Address */
struct in6_addr wb_ipv6;
/* West Bound S1U/S5S8 IPV6 prefix Len */
uint8_t wb_ipv6_prefix_len;
/* West Bound S1U/S5S8 Logical Interface IPv4 and IPv6 Type */
ip_type_t wb_li_ip_type;
/* West Bound S5S8 Logical Interface IPv4 Address */
uint32_t wb_li_ip;
/* West Bound S5S8 Logical Interface IPV6 Address */
struct in6_addr wb_li_ipv6;
/* West Bound S5S8 Logical Interface IPV6 prefix Len */
uint8_t wb_li_ipv6_prefix_len;
/* East Bound S5S8/SGI IPv4 and IPv6 Type */
ip_type_t eb_ip_type;
/* East Bound S5S8/SGI IPv4 Address */
uint32_t eb_ip;
/* East Bound S5S8/SGI IPV6 Address */
struct in6_addr eb_ipv6;
/* East Bound S5S8/SGI IPV6 Link Local Layer Address */
struct in6_addr eb_l3_ipv6;
/* Eest Bound S5S8/SGI IPV6 prefix Len */
uint8_t eb_ipv6_prefix_len;
/* West Bound S1U/S5S8 IPv4 and IPv6 Type */
ip_type_t eb_li_ip_type;
/* East Bound S5S8 Logical Interface IPv4 Address */
uint32_t eb_li_ip;
/* East Bound S5S8 Logical Interface IPv6 Address */
struct in6_addr eb_li_ipv6;
/* East Bound S5S8 Logical Interface IPv6 Address prefix len */
uint8_t eb_li_ipv6_prefix_len;
/* Ports Masks */
uint32_t ports_mask;
/* West Bound Gateway IP Address */
uint32_t wb_gw_ip;
/* East Bound Gateway IP Address */
uint32_t eb_gw_ip;
/* West Bound S1U/S5S8 Subnet Mask */
uint32_t wb_mask;
/* West Bound S5S8 Logical iface Subnet Mask*/
uint32_t wb_li_mask;
/* East Bound S5S8/SGI Subnet Mask */
uint32_t eb_mask;
/* East Bound S5S8 Logical iface Subnet Mask*/
uint32_t eb_li_mask;
/* West Bound S1U/S5S8 subnet */
uint32_t wb_net;
/* West Bound S5S8 Logical iface Subnet*/
uint32_t wb_li_net;
/* East Bound S5S8/SGI subnet */
uint32_t eb_net;
/* East Bound S5S8 Logical iface Subnet*/
uint32_t eb_li_net;
/* West Bound Gateway Broadcast Address*/
uint32_t wb_bcast_addr;
/* West Bound logical iface Gateway Broadcast Address*/
uint32_t wb_li_bcast_addr;
/* East Bound Gateway Broadcast Address*/
uint32_t eb_bcast_addr;
/* East Bound logical iface Gateway Broadcast Address*/
uint32_t eb_li_bcast_addr;
/* D-DF2 IP Address */
char ddf2_ip[IPV6_STR_LEN];
/* D-DF3 IP Address */
char ddf3_ip[IPV6_STR_LEN];
/* D-DF2 Local IP Address */
char ddf2_local_ip[IPV6_STR_LEN];
/* D-DF3 Local IP Address */
char ddf3_local_ip[IPV6_STR_LEN];
/* West Bound Interface Name */
char wb_iface_name[MAX_LEN];
/* West Bound Logical Interface Name */
char wb_li_iface_name[MAX_LEN];
/* East Bound Interface Name */
char eb_iface_name[MAX_LEN];
/* East Bound Logical Interface Name */
char eb_li_iface_name[MAX_LEN];
/* West Bound S1U/S5S8 physical address MAC */
struct ether_addr wb_ether_addr;
/* East Bound S5S8/SGI physical address MAC */
struct ether_addr eb_ether_addr;
};
#pragma pack()
/** extern the app config struct */
struct app_params app;
/* file descriptor of ddf2 */
void *ddf2_fd;
/* file descriptor of ddf3 */
void *ddf3_fd;
/** ethernet addresses of ports */
struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
/** ethernet addresses of ports */
extern struct ether_addr ports_eth_addr[];
/**
* @brief : ADC sponsored dns table msg payload
*/
struct msg_adc {
uint32_t ipv4;
uint32_t rule_id;
};
/**
* @brief : UL Bearer Map key for hash lookup.
*/
struct ul_bm_key {
/** s1u/s5s8u teid */
uint32_t teid;
/** rule id*/
uint32_t rid;
};
/**
* @brief : Maintains ddn information
*/
typedef struct ddn_info_t {
/* PDR ID */
uint8_t pdr_id;
/* CP Seid */
uint64_t cp_seid;
/* UP Seid */
uint64_t up_seid;
}ddn_t;
#pragma pack(push, 1)
typedef struct li_data_ring
{
uint64_t id;
uint64_t imsi;
int size;
uint8_t forward;
uint8_t *pkts;
} li_data_t;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct cdr_rpt_req {
pfcp_usage_rpt_sess_rpt_req_ie_t *usage_report;
uint64_t up_seid;
uint32_t seq_no;
} cdr_rpt_req_t;
#pragma pack(pop)
/** CDR actions, N_A should never be accounted for */
enum pkt_action_t {CHARGED, DROPPED, N_A};
#ifdef INSTMNT
extern uint32_t flag_wrkr_update_diff;
extern uint64_t total_wrkr_pkts_processed;
#endif /* INSTMNT */
extern struct rte_ring *shared_ring[NUM_SPGW_PORTS];
/** Holds a set of rings to be used for downlink data buffering */
extern struct rte_ring *dl_ring_container;
/** Number of DL rings currently created */
extern uint32_t num_dl_rings;
/** For notification of modify_session so that buffered packets
* can be dequeued
*/
extern struct rte_ring *notify_ring;
/** Pool for notification msg pkts */
extern struct rte_mempool *notify_msg_pool;
extern peer_addr_t dest_addr_t;
extern int arp_icmp_get_dest_mac_address(const uint32_t ipaddr,
const uint32_t phy_port,
struct ether_addr *hw_addr,
uint32_t *nhip);
/**
* @brief : Push DNS packets to DN queue from worker cores
* @param :pkt, DNS packet.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
push_dns_ring(struct rte_mbuf *);
/**
* @brief : Pop DNS packets from ring and send to library for processing
* @param : No param
* @return : Returns nothing
*/
void
scan_dns_ring(void);
/**
* @brief : Function to Initialize the Environment Abstraction Layer (EAL).
* @param : No param
* @return : Returns nothing
*/
void
dp_port_init(void);
/**
* @brief : Function to initialize the dataplane application config.
* @param : argc, number of arguments.
* @param : argv, list of arguments.
* @return : Returns nothing
*/
void
dp_init(int argc, char **argv);
/**
* @brief : Decap gtpu header.
* @param : pkts, pointer to mbuf of incoming packets.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : fd_pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @return : Returns nothing
*/
void
gtpu_decap(struct rte_mbuf **pkts, uint32_t n,
uint64_t *pkts_mask, uint64_t *fd_pkts_mask);
/**
* @brief : Encap gtpu header.
* @param : pdrs, pdr information
* @param : sess_info, pointer to session info.
* @param : pkts, pointer to mbuf of incoming packets.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : fd_pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : pkts_queue_mask, packet queue mask
* @return : Returns nothing
*/
void
gtpu_encap(pdr_info_t **pdrs, pfcp_session_datat_t **sess_info, struct rte_mbuf **pkts,
uint32_t n, uint64_t *pkts_mask, uint64_t *fd_pkts_mask, uint64_t *pkts_queue_mask);
/*************************pkt_handler.ci functions start*********************/
/**
* @brief : Function to handle incoming pkts on west bound interface.
* @param : p, pointer to pipeline.
* @param : pkts, pointer to pkts.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : wk_index,
* @return : Returns 0 in case of success , -1 otherwise
*/
int
wb_pkt_handler(struct rte_pipeline *p, struct rte_mbuf **pkts, uint32_t n,
uint64_t *pkts_mask, int wk_index);
/**
* @brief : Function to handle incoming pkts on east bound interface.
* @param : p, pointer to pipeline.
* @param : pkts, pointer to pkts.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : wk_index,
* @return : Returns 0 in case of success , -1 otherwise
*/
int
eb_pkt_handler(struct rte_pipeline *p, struct rte_mbuf **pkts, uint32_t n,
uint64_t *pkts_mask, int wk_index);
/**
* @brief : Function to handle notifications from CP which needs updates to
* an active session. So notification handler should process them.
* @param : pkts, pointer to icontrol pkts.
* @param : n, number of pkts.
* @return : Returns 0 in case of success , -1 otherwise
*/
int notification_handler(struct rte_mbuf **pkts,
uint32_t n);
/*************************pkt_handler.c functions end***********************/
/**
* @brief : Clone the DNS pkts and send to CP.
* @param : pkts, pointer to mbuf of incoming packets.
* @param : n, number of pkts.
* @return : Returns nothing
*/
void
clone_dns_pkts(struct rte_mbuf **pkts, uint32_t n, uint64_t pkts_mask);
/**
* @brief : If rule id is DNS, update the meta info.
* @param : pkts, pointer to mbuf of incoming packets.
* @param : n, number of pkts.
* @param : rid, sdf rule id to check the DNS pkts.
* @return : Returns nothing
*/
void
update_dns_meta(struct rte_mbuf **pkts, uint32_t n, uint32_t *rid);
/**
* @brief : Set checksum offload in meta, Fwd based on nexthop info.
* @param : pkts, pointer to mbuf of incoming packets.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : portid, port id to forward the pkt.
* @param : PDR, pointer to pdr session info
* @param : Loopback_flag, Indication flag for loopback
* @return : Returns nothing
*/
void
update_nexthop_info(struct rte_mbuf **pkts, uint32_t n,
uint64_t *pkts_mask, uint8_t portid,
pdr_info_t **pdr, uint8_t loopback_flag);
/************* Session information function prototype***********/
/**
* @brief : Get the UL session info from table lookup.
* @param : pkts, pointer to mbuf of incoming packets.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : snd_err_pkts_mask, bit mask to send the error indication.
* @param : fwd_pkts_mask, bit mask to forward that packet.
* @param : decap_pkts_mask, bit mask to decap that packet.
* @param : sess_info, session information returned after hash lookup.
* @return : Returns nothing
*/
void
ul_sess_info_get(struct rte_mbuf **pkts, uint32_t n,
uint64_t *pkts_mask, uint64_t *snd_err_pkts_mask,
uint64_t *fwd_pkts_mask, uint64_t *decap_pkts_mask,
pfcp_session_datat_t **sess_info);
/**
* @brief : Get the DL session info from table lookup.
* @param : pkts, pointer to mbuf of incoming packets.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : snd_err_pkts_mask, bit mask to send the error indication.
* @param : sess_info, session information returned after hash lookup.
* @param : pkts_queue_mask, packet queue mask
* @return : Returns nothing
*/
void
dl_sess_info_get(struct rte_mbuf **pkts, uint32_t n,
uint64_t *pkts_mask, pfcp_session_datat_t **si,
uint64_t *pkts_queue_mask, uint64_t *snd_err_pkts_mask,
uint64_t *fwd_pkts_mask, uint64_t *encap_pkts_mask);
/**
* @brief : Gate the incoming pkts based on PCC entry info.
* @param : sdf_info, list of pcc id precedence struct pionters.
* @param : adc_info, list of pcc id precedence struct pionters.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : pcc_id, array of pcc id.
* @return : Returns nothing
*/
void
pcc_gating(struct pcc_id_precedence *sdf_info, struct pcc_id_precedence *adc_info,
uint32_t n, uint64_t *pkts_mask, uint32_t *pcc_id);
/**
* @brief : Called by CP to remove from uplink look up table.
* Note-This function is thread safe due to message queue implementation.
* @param : key
* @return : Returns 0 in case of success , -1 otherwise
*/
int iface_del_uplink_data(struct ul_bm_key *key);
/**
* @brief : Called by CP to remove from downlink look up table.
* Note-This function is thread safe due to message queue implementation.
* @param : key
* @return : Returns 0 in case of success , -1 otherwise
*/
int iface_del_downlink_data(struct dl_bm_key *key);
/**
* @brief : Called by DP to lookup key-value pair in uplink look up table.
* Note-This function is thread safe (Read Only).
* @param : key
* @param : value, buffer to store to result
* @return : Returns 0 in case of success , -1 otherwise
*/
int
iface_lookup_uplink_data(struct ul_bm_key *key,
void **value);
/**
* @brief : Called by DP to do bulk lookup of key-value pair in uplink
* look up table.
* Note-This function is thread safe (Read Only).
* @param : key, keys
* @param : n, nuber of keys
* @param : hit_mask
* @param : value, buffer to store to result
* @return : Returns 0 in case of success , -1 otherwise
*/
int
iface_lookup_uplink_bulk_data(const void **key, uint32_t n,
uint64_t *hit_mask, void **value);
/**
* @brief : Called by DP to lookup key-value pair in downlink look up table.
* Note-This function is thread safe (Read Only).
* @param : key
* @param : value, buffer to store to result
* @return : Returns 0 in case of success , -1 otherwise
*/
int
iface_lookup_downlink_data(struct dl_bm_key *key,
void **value);
/**
* @brief : Called by DP to do bulk lookup of key-value pair in downlink
* look up table.
* Note-This function is thread safe (Read Only).
* @param : key, keys
* @param : n, nuber of keys
* @param : hit_mask
* @param : value, buffer to store to result
* @return : Returns 0 in case of success , -1 otherwise
*/
int
iface_lookup_downlink_bulk_data(const void **key, uint32_t n,
uint64_t *hit_mask, void **value);
/***********************ddn_utils.c functions start**********************/
#ifdef USE_REST
/**
* @brief : Function to initialize/create shared ring, ring_container and mem_pool to
* inter-communication between DL and iface core.
* @param : No param
* @return : Returns nothing
*/
void
echo_table_init(void);
#ifndef CP_BUILD
/**
* @brief : Function to build GTP-U echo request
* @param : echo_pkt, rte_mbuf pointer
* @param : gtppu_seqnb, sequence number
* @return : Returns nothing
*/
void
build_echo_request(struct rte_mbuf *echo_pkt, peerData *entry, uint16_t gtpu_seqnb);
#endif /* CP_BUILD*/
#endif /* USE_REST */
#ifdef DP_BUILD
/**
* @brief : Function to initialize/create shared ring, ring_container and mem_pool to
* inter-communication between DL and iface core.
* @param : No param
* @return : Returns nothing
*/
void
dp_ddn_init(void);
/**
* @brief : Downlink data notification ack information. The information
* regarding downlink should be updated bearer info.
* @param : dp_id, table identifier.
* @param : ddn_ack, Downlink data notification ack information
* @return : Returns 0 in case of success , -1 otherwise
*/
int
dp_ddn_ack(struct dp_id dp_id,
struct downlink_data_notification_ack_t *ddn_ack);
/**
* @brief : Enqueue the downlink packets based upon the mask.
* @param : pdrs, pdr information
* @param : sess_info, Session for which buffering needs to be performed
* @param : pkts, Set of incoming packets
* @param : pkts_queue_mask, Mask of packets which needs to be buffered
* @return : Returns nothing
*/
void
enqueue_dl_pkts(pdr_info_t **pdrs, pfcp_session_datat_t **sess_info,
struct rte_mbuf **pkts, uint64_t pkts_queue_mask );
/**
* @brief : Process pfcp session report request
* @param : peer_addr, peer node information
* @param : ddn, ddn information
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
process_pfcp_session_report_req(peer_addr_t peer_addr,
ddn_t *ddn);
#endif /* DP_BUILD */
/**
* @brief : update nexthop info.
* @param : pkts, pointer to mbuf of packets.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : fd_pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : loopback_pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : sess_data, pointer to session bear info
* @param : pdrs, pdr information
* @return : Returns nothing
*/
void
update_nexts5s8_info(struct rte_mbuf **pkts, uint32_t n, uint64_t *pkts_mask,
uint64_t *fd_pkts_mask, uint64_t *loopback_pkts_mask,
pfcp_session_datat_t **sess_data, pdr_info_t **pdrs);
/**
* @brief : update enb ip in ip header and s1u tied in gtp header.
* @param : pkts, pointer to mbuf of packets.
* @param : n, number of pkts.
* @param : pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : fd_pkts_mask, bit mask to process the pkts, reset bit to free the pkt.
* @param : sess_data, pointer to session bear info
* @param : pdrs, pdr information
* @return : Returns nothing
*/
void
update_enb_info(struct rte_mbuf **pkts, uint32_t n,
uint64_t *pkts_mask, uint64_t *fd_pkts_mask, pfcp_session_datat_t **sess_info,
pdr_info_t **pdr);
/**
* @brief : Process endmarker data received in session modify request
* @param : far, far information
* @return : Returns 0 in case of success , -1 otherwise
*/
int sess_modify_with_endmarker(far_info_t *far);
/**
* @brief : initalizes user plane pcap feature
* @param : No param
* @return : Returns nothing
*/
void
up_pcap_init(void);
/**
* @brief : initalizes user plane pcap feature
* @param : command, content pcap generation command.
* @param : pcap_dumper, pointer to pcap dumper.
* @param : pkts, pointer to mbuf of packets.
* @param : n,number of pkts.
* @return : Returns nothing
*/
void
up_pcap_dumper(pcap_dumper_t *pcap_dumper,
struct rte_mbuf **pkts, uint32_t n);
/**
* @brief : initalizes user plane pcap feature
* @param : command, content pcap generation command.
* @param : pcap_dumper, pointer to pcap dumper.
* @param : pkts, pointer to mbuf of packets.
* @param : n,number of pkts.
* @param : pkts_mask, set of the pkts collections.
* @return : Returns nothing
*/
void
up_core_pcap_dumper(pcap_dumper_t *pcap_dumper,
struct rte_mbuf **pkts, uint32_t n, uint64_t *pkts_mask);
/**
* @brief : initialize pcap dumper.
* @param : pcap_filename, pointer to pcap output filename.
* @return : Returns pointer to pcap dumper
*/
pcap_dumper_t *
init_pcap(char* pcap_filename);
/**
* @brief : write into pcap file.
* @param : pkts, pointer to mbuf of packets.
* @param : n,number of pkts.
* @param : pcap_dumper, pointer to pcap dumper.
* @return : Returns nothing
*/
void dump_pcap(struct rte_mbuf **pkts, uint32_t n,
pcap_dumper_t *pcap_dumper);
/**
* @brief : get dp restart counter value.
* @param : No Param.
* @return : Returns
* dp restart counter value.
*/
uint8_t
get_dp_restart_cntr(void);
/**
* @brief : update dp restart counter value.
* @param : No Param.
* @return : Nothing
*/
void
update_dp_restart_cntr(void);
#ifdef USE_CSID
/**
* @brief : Function to fill the peer node address and generate unique CSID.
* @param : pfcp_session_t session info.
* @param : Control-Plane node address.
* @return : Returns 0 sucess -1 otherwise
*/
int
fill_peer_node_info_t(pfcp_session_t *sess, node_address_t *cp_node_addr);
/**
* @brief : Function to fill the FQ-CSID in session establishment response.
* @param : pfcp_sess_est_rsp, Session EST Resp Obj
* @param : pfcp_session_t session info.
* @return : Returns 0 sucess -1 otherwise
*/
int8_t
fill_fqcsid_sess_est_rsp(pfcp_sess_estab_rsp_t *pfcp_sess_est_rsp, pfcp_session_t *sess);
/**
* @brief : Function to process received pfcp session set deletion request.
* @param : pfcp_sess_set_del_req, decoded request info
* @return : Returns 0 sucess -1 otherwise
*/
int8_t
process_up_sess_set_del_req(pfcp_sess_set_del_req_t *pfcp_sess_set_del_req);
/**
* @brief : Function to Cleanup Session information by local csid.
* @param : node_addr, peer node address
* @param : iface, interface info.
* @return : Returns 0 sucess -1 otherwise
*/
int8_t
up_del_pfcp_peer_node_sess(node_address_t *node_addr, uint8_t iface);
/**
* @brief : Function to cleanup session by linked CSID.
* @param : pfcp_session_t session info.
* @param : csids, local csids list
* @param : iface, interface info
* @return : Returns 0 sucess -1 otherwise
*/
int8_t
del_sess_by_csid_entry(pfcp_session_t *sess, fqcsid_t *csids, uint8_t iface);
#endif /* USE_CSID */
#ifdef PRINT_NEW_RULE_ENTRY
/**
* @brief : Function to print received pcc rule information.
* @param : pcc, pcc rule info
* @return : Nothing
*/
void
print_pcc_val(struct pcc_rules *pcc);
/**
* @brief : Function to print adc received rule type.
* @param : adc, adc rule info
* @return : Nothing
*/
void
print_sel_type_val(struct adc_rules *adc);
/**
* @brief : Function to print the received adc rule info.
* @param : adc, adc info
* @return : Nothing
*/
void
print_adc_val(struct adc_rules *adc);
/**
* @brief : Function to print the meter rule info.
* @param : mtr, meter info
* @return : Nothing
*/
void
print_mtr_val(struct mtr_entry *mtr);
/**
* @brief : Function to print the sdf rule info.
* @param : sdf, sdf info
* @return : Nothing
*/
void
print_sdf_val(struct pkt_filter *sdf);
#endif /* PRINT_NEW_RULE_ENTRY */
/**
* @brief : Function to process received adc type info.
* @param : sel_type, adce rule info
* @param : arm, string pointer
* @param : adc, adc info
* @return : 0: Success, -1: otherwise
*/
void dp_sig_handler(int signo);
/**
* @brief : Function to process received adc type info.
* @param : sel_type, adce rule info
* @param : arm, string pointer
* @param : adc, adc info
* @return : 0: Success, -1: otherwise
*/
int
parse_adc_buf(int sel_type, char *arm, struct adc_rules *adc);
/**
* @Name : get_sdf_indices
* @argument :
* [IN] sdf_idx : String containing comma separater SDF index values
* [OUT] out_sdf_idx : Array of integers converted from sdf_idx
* @return : 0 - success, -1 fail
* @Description : Convert sdf_idx array in to array of integers for SDF index
* values.
* Sample input : "[0, 1, 2, 3]"
*/
uint32_t
get_sdf_indices(char *sdf_idx, uint32_t *out_sdf_idx);
/***********************ddn_utils.c functions end**********************/
/**
* @brief : Start Timer for flush inactive TEIDRI value and peer addr.
* @param : No Param.
* @return : Returns
* true - on success.
* false - on fail.
*/
bool
start_dp_teidri_timer(void);
/**
* @brief : TEIDRI Timer Callback.
* @param : ti, holds information about timer
* @param : data_t, Peer node related information
* @return : Returns nothing
*/
void
teidri_timer_cb(gstimerinfo_t *ti, const void *data_t );
/**
* @brief : fill dp configuration
* @param : dp configuration pointer
* @return : Return status code
*/
int8_t fill_dp_configuration(dp_configuration_t *dp_configuration);
/**
* @brief : post periodic timer
* @param : periodic_timer_value, Int
* @return : Returns status code
*/
int8_t post_periodic_timer(const int periodic_timer_value);
/**
* @brief : post transmit timer
* @param : transmit_timer_value, Int
* @return : Returns status code
*/
int8_t post_transmit_timer(const int transmit_timer_value);
/**
* @brief : post transmit count
* @param : transmit_count, Int
* @return : Returns status code
*/
int8_t post_transmit_count(const int transmit_count);
/**
* @brief : post pcap generation status
* @param : pcap_status, Int
* @return : Returns status code
*/
int8_t post_pcap_status(const int pcap_status);
/**
* @brief : get periodic timer value
* @param : void
* @return : Returns periodic timer value
*/
int get_periodic_timer(void);
/**
* @brief : update perf flag
* @param : perf_flag, Int
* @return : Returns status code
*/
int8_t update_perf_flag(const int perf_flag);
/**
* @brief : get transmit timer value
* @param : void
* @return : Returns transmit timer value
*/
int get_transmit_timer(void);
/**
* @brief : get transmit count value
* @param : void
* @return : Returns transmit count value
*/
int get_transmit_count(void);
/**
* @brief : get pcap status
* @param : void
* @return : Returns pcap status value
*/
int8_t get_pcap_status(void);
/**
* @brief : get perf flag value
* @param : void
* @return : Returns perf flag value
*/
uint8_t get_perf_flag(void);
/**
* @brief : check IPv6 address is NULL or not
* @param : IPv6 Address
* @return : Returns 0 or bytes
*/
int
isIPv6Present(struct in6_addr *ipv6_addr);
/**
* @brief : Update and send the error indicaion pkts to peer node
* @param : gtpu_pkt, data pkt
* @param : port id
* @return : NULL
*/
void send_error_indication_pkt(struct rte_mbuf *gtpu_pkt, uint8_t port_id);
#endif /* _MAIN_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp/state_machine/sm_gx_pcnd.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtpv2c.h"
#include "sm_pcnd.h"
#include "cp_stats.h"
#include "debug_str.h"
#include "pfcp_util.h"
#include "pfcp.h"
#include "gtp_messages_decoder.h"
#include "cp_config.h"
pfcp_config_t config;
extern struct cp_stats_t cp_stats;
extern int clSystemLog;
uint32_t
gx_pcnd_check(gx_msg *gx_rx, msg_info *msg)
{
int ret = 0;
uint32_t call_id = 0;
gx_context_t *gx_context = NULL;
pdn_connection *pdn_cntxt = NULL;
msg->msg_type = gx_rx->msg_type;
switch(msg->msg_type) {
case GX_CCA_MSG: {
if (gx_cca_unpack((unsigned char *)gx_rx + GX_HEADER_LEN,
&msg->gx_msg.cca) <= 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure in gx CCA "
"unpacking \n", LOG_VALUE);
return -1;
}
switch(msg->gx_msg.cca.cc_request_type) {
case INITIAL_REQUEST:
update_cli_stats((peer_address_t *) &config.gx_ip, OSS_CCA_INITIAL, RCVD, GX);
break;
case UPDATE_REQUEST:
update_cli_stats((peer_address_t *) &config.gx_ip, OSS_CCA_UPDATE, RCVD, GX);
break;
case TERMINATION_REQUEST:
update_cli_stats((peer_address_t *) &config.gx_ip, OSS_CCA_TERMINATE, RCVD, GX);
break;
}
/* Retrive Gx_context based on Sess ID. */
ret = rte_hash_lookup_data(gx_context_by_sess_id_hash,
(const void*)(msg->gx_msg.cca.session_id.val),
(void **)&gx_context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"NO ENTRY FOUND "
"IN Gx HASH [%s]\n", LOG_VALUE,
msg->gx_msg.cca.session_id.val);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if(msg->gx_msg.cca.presence.result_code &&
msg->gx_msg.cca.result_code != 2001){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"received CCA with "
"DIAMETER Failure [%d]\n", LOG_VALUE,
msg->gx_msg.cca.result_code);
return GTPV2C_CAUSE_INVALID_REPLY_FROM_REMOTE_PEER;
}
/* Extract the call id from session id */
ret = retrieve_call_id((char *)msg->gx_msg.cca.session_id.val, &call_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id "
"found for session id:%s\n", LOG_VALUE,
msg->gx_msg.cca.session_id.val);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* Retrieve PDN context based on call id */
pdn_cntxt = get_pdn_conn_entry(call_id);
if (pdn_cntxt == NULL)
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"PDN for CALL_ID:%u\n", LOG_VALUE, call_id);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* Retrive the Session state and set the event */
msg->cp_mode = gx_context->cp_mode;
msg->state = gx_context->state;
msg->event = CCA_RCVD_EVNT;
msg->proc = gx_context->proc;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Session Id:%s, "
"State:%s, Event:%s\n",
LOG_VALUE, gx_type_str(msg->msg_type), msg->msg_type,
msg->gx_msg.cca.session_id.val,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case GX_RAR_MSG: {
uint32_t call_id = 0;
uint32_t buflen ;
update_cli_stats((peer_address_t *) &config.gx_ip, OSS_RAR, RCVD, GX);
if (gx_rar_unpack((unsigned char *)gx_rx + GX_HEADER_LEN,
&msg->gx_msg.rar) <= 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure in gx "
"rar unpacking\n", LOG_VALUE);
return -1;
}
ret = retrieve_call_id((char *)&msg->gx_msg.rar.session_id.val, &call_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id found "
"for session id:%s\n", LOG_VALUE,
msg->gx_msg.rar.session_id.val);
return DIAMETER_UNKNOWN_SESSION_ID;
}
pdn_connection *pdn_cntxt = NULL;
/* Retrieve PDN context based on call id */
pdn_cntxt = get_pdn_conn_entry(call_id);
if (pdn_cntxt == NULL)
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"PDN for CALL_ID:%u\n", LOG_VALUE, call_id);
return DIAMETER_UNKNOWN_SESSION_ID;
}
/* Retrive Gx_context based on Sess ID. */
ret = rte_hash_lookup_data(gx_context_by_sess_id_hash,
(const void*)(msg->gx_msg.rar.session_id.val),
(void **)&gx_context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"NO ENTRY FOUND "
"IN Gx HASH [%s]\n", LOG_VALUE,
msg->gx_msg.rar.session_id.val);
return DIAMETER_UNKNOWN_SESSION_ID;
}
/* Reteive the rqst ptr for RAA */
buflen = gx_rar_calc_length (&msg->gx_msg.rar);
//gx_context->rqst_ptr = (uint64_t *)(((unsigned char *)gx_rx + sizeof(gx_rx->msg_type) + buflen));
memcpy( &gx_context->rqst_ptr ,((unsigned char *)gx_rx + GX_HEADER_LEN + buflen),
sizeof(unsigned long));
pdn_cntxt->rqst_ptr = gx_context->rqst_ptr;
if (pdn_cntxt->context)
msg->cp_mode = pdn_cntxt->context->cp_mode;
else
msg->cp_mode = gx_context->cp_mode;
/* Retrive the Session state and set the event */
msg->state = CONNECTED_STATE;
msg->event = RE_AUTH_REQ_RCVD_EVNT;
msg->proc = DED_BER_ACTIVATION_PROC;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Callback called for"
"Msg_Type:%s[%u], Session Id:%s, "
"State:%s, Event:%s\n",
LOG_VALUE, gx_type_str(msg->msg_type), msg->msg_type,
msg->gx_msg.cca.session_id.val,
get_state_string(msg->state), get_event_string(msg->event));
break;
}
default:
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"process_msgs-"
"\n\tcase: GateWay"
"\n\tReceived Gx Message : "
"%d not supported... Discarding\n", LOG_VALUE, gx_rx->msg_type);
return -1;
}
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/gtpv2c_error_rsp.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "gtpv2c_error_rsp.h"
#ifdef CP_BUILD
#include "sm_arr.h"
#include "cp_config.h"
#include "cp_stats.h"
#include "ipc_api.h"
#include "cp_timer.h"
#include "teid.h"
#include "cp.h"
#include "ue.h"
#include "gtpc_session.h"
#include "debug_str.h"
#include "gtpv2c.h"
#include "pfcp.h"
#endif /* CP_BUILD */
peer_addr_t upf_pfcp_sockaddr;
extern socklen_t s11_mme_sockaddr_len;
extern socklen_t s5s8_sockaddr_len;
extern uint16_t payload_length;
extern int s5s8_fd;
extern int s5s8_fd_v6;
extern int pfcp_fd;
extern int pfcp_fd_v6;
extern pfcp_config_t config;
extern int gx_app_sock;
extern peer_addr_t s5s8_recv_sockaddr;
extern int clSystemLog;
int8_t
clean_up_while_error(uint8_t ebi, ue_context *context, uint32_t teid, uint64_t *imsi_val, uint32_t seq, msg_info *msg)
{
pdn_connection *pdn = NULL;
struct resp_info *resp;
bool Error_sent = False;
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
if (teid != 0) {
if (ebi_index > 0) {
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
if (pdn != NULL && context != NULL) {
if (get_sess_entry(pdn->seid, &resp) == 0) {
if ((resp->state == PFCP_SESS_DEL_REQ_SNT_STATE) || (resp->state == ERROR_OCCURED_STATE)
||(resp->state == PFCP_SESS_MOD_REQ_SNT_STATE)) {
Error_sent = True;
}
/*NOTE: IF SGWC receives CSR RSP with some error from PGWC,then SGWC will do clean up
* on its side as well as will send DSR request to PGWC for clean up at PGWC*/
if ((SGWC == context->cp_mode) && (!context->piggyback)) {
if(msg->gtpc_msg.cs_rsp.cause.cause_value == GTPV2C_CAUSE_REQUEST_ACCEPTED ||
msg->msg_type == PFCP_SESSION_MODIFICATION_RESPONSE ) {
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
del_sess_req_t del_sess_req = {0};
if(msg->msg_type == PFCP_SESSION_MODIFICATION_RESPONSE) {
fill_ds_request(&del_sess_req, context, ebi_index, pdn->s5s8_pgw_gtpc_teid);
}
else {
fill_ds_request(&del_sess_req, context, ebi_index,
msg->gtpc_msg.cs_rsp.pgw_s5s8_s2as2b_fteid_pmip_based_intfc_or_gtp_based_ctl_plane_intfc.teid_gre_key);
}
payload_length = encode_del_sess_req(&del_sess_req, (uint8_t *)gtpv2c_tx);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
}
}
/* checking session is established or not on user plane */
if (PFCP_SESS_EST_REQ_SNT_STATE != resp->state) {
pfcp_sess_del_req_t pfcp_sess_del_req = {0};
fill_pfcp_sess_del_req(&pfcp_sess_del_req, context->cp_mode);
if(msg->msg_type == PFCP_SESSION_ESTABLISHMENT_RESPONSE) {
pfcp_sess_del_req.header.seid_seqno.has_seid.seid =
msg->pfcp_msg.pfcp_sess_est_resp.up_fseid.seid;
}
else {
pfcp_sess_del_req.header.seid_seqno.has_seid.seid = pdn->dp_seid;
}
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_del_req_t(&pfcp_sess_del_req, pfcp_msg);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded,
upf_pfcp_sockaddr, SENT) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error "
"in Sending Session Modification Request. "
"Error : %i\n", LOG_VALUE, errno);
} else {
add_pfcp_if_timer_entry(context->s11_sgw_gtpc_teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
}
}
resp->state = ERROR_OCCURED_STATE;
resp->msg_type = GTP_CREATE_SESSION_RSP;
resp->linked_eps_bearer_id = ebi;
if (context->piggyback) {
resp->state = PFCP_SESS_DEL_REQ_SNT_STATE;
}
} else {
clean_up_upf_context(pdn, context);
if(config.use_dns) {
/* Delete UPFList entry from UPF Hash */
if ((upflist_by_ue_hash_entry_delete(&context->imsi, sizeof(context->imsi)))
< 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error on upflist_by_ue_hash deletion of IMSI \n",
LOG_VALUE);
}
}
clean_context_hash(context, teid, &context->imsi, Error_sent);
}
pdn->state = ERROR_OCCURED_STATE;
}
}
} else {
if(config.use_dns) {
/* Delete UPFList entry from UPF Hash */
if ((upflist_by_ue_hash_entry_delete(imsi_val, sizeof(imsi_val)))
< 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error on upflist_by_ue_hash deletion of IMSI \n",
LOG_VALUE);
}
}
clean_context_hash(NULL, teid, imsi_val, Error_sent);
}
return 0;
RTE_SET_USED(seq);
}
int8_t
clean_up_while_cbr_error(uint32_t teid, uint8_t msg_type, pdn_connection *pdn)
{
int ebi_index = 0;
ue_context *context = NULL;
eps_bearer *bearers[MAX_BEARERS];
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
struct resp_info *resp = NULL;
if (teid == 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"TEID not found while "
"cleaning up create bearer error response", LOG_VALUE);
return -1;
}
if (get_ue_context(teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"UE context not found "
"for teid: %d\n", LOG_VALUE, teid);
return -1;
}
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No session entry "
"found for session id: %lu\n", LOG_VALUE, pdn->seid);
return -1;
}
for (int idx = 0; idx < resp->bearer_count ; ++idx) {
ebi_index = GET_EBI_INDEX(resp->eps_bearer_ids[idx]);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
bearers[idx] = context->eps_bearers[ebi_index];
}
if ((context->cp_mode == SGWC && msg_type != GTP_CREATE_BEARER_REQ) ||
((context->cp_mode == PGWC || context->cp_mode == SAEGWC) && msg_type != GX_RAR_MSG) ) {
fill_pfcp_sess_mod_req_pgw_init_remove_pdr(&pfcp_sess_mod_req, pdn,
bearers, resp->bearer_count);
uint8_t pfcp_msg[PFCP_MSG_LEN] = {0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded,
upf_pfcp_sockaddr, SENT) < 0)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error "
"in Sending Session Modification Request. "
"Error : %i\n", LOG_VALUE, errno);
else {
#ifdef CP_BUILD
add_pfcp_if_timer_entry( teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, GET_EBI_INDEX(pdn->default_bearer_id));
#endif /* CP_BUILD */
}
resp->state = ERROR_OCCURED_STATE;
resp->proc = pdn->proc;
}
if(context != NULL && resp != NULL && context->eps_bearers[ebi_index]->pdn != NULL) {
delete_dedicated_bearers(context->eps_bearers[ebi_index]->pdn,
resp->eps_bearer_ids, resp->bearer_count);
}
return 0;
}
void get_error_rsp_info(msg_info *msg, err_rsp_info *rsp_info, uint8_t index)
{
int ret = 0;
int ebi_index = 0;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
switch (msg->msg_type) {
case GTP_CREATE_SESSION_REQ: {
rsp_info->sender_teid = msg->gtpc_msg.csr.sender_fteid_ctl_plane.teid_gre_key;
rsp_info->seq = msg->gtpc_msg.csr.header.teid.has_teid.seq;
rsp_info->bearer_count = msg->gtpc_msg.csr.bearer_count;
for (uint8_t i = 0; i < rsp_info->bearer_count; i++ ) {
if (msg->gtpc_msg.csr.bearer_contexts_to_be_created[index].header.len) {
rsp_info->ebi = msg->gtpc_msg.csr.bearer_contexts_to_be_created[i].eps_bearer_id.ebi_ebi;
rsp_info->bearer_id[i] = msg->gtpc_msg.csr.bearer_contexts_to_be_created[i].eps_bearer_id.ebi_ebi;
} else
rsp_info->offending = GTP_IE_CREATE_SESS_REQUEST_BEARER_CTXT_TO_BE_CREATED;
}
rsp_info->teid = msg->gtpc_msg.csr.header.teid.has_teid.teid;
if (!msg->gtpc_msg.csr.sender_fteid_ctl_plane.header.len)
rsp_info->offending = GTP_IE_FULLY_QUAL_TUNN_ENDPT_IDNT;
if (!msg->gtpc_msg.csr.imsi.header.len)
rsp_info->offending = GTP_IE_IMSI;
if (!msg->gtpc_msg.csr.apn_ambr.header.len)
rsp_info->offending = GTP_IE_AGG_MAX_BIT_RATE;
if (!msg->gtpc_msg.csr.pdn_type.header.len)
rsp_info->offending = GTP_IE_PDN_TYPE;
for (uint8_t uiCnt = 0; uiCnt < rsp_info->bearer_count; ++uiCnt) {
if (!msg->gtpc_msg.csr.bearer_contexts_to_be_created[uiCnt].bearer_lvl_qos.header.len)
rsp_info->offending = GTP_IE_BEARER_QLTY_OF_SVC;
}
if (!msg->gtpc_msg.csr.rat_type.header.len)
rsp_info->offending = GTP_IE_RAT_TYPE;
if (!msg->gtpc_msg.csr.apn.header.len)
rsp_info->offending = GTP_IE_ACC_PT_NAME;
break;
}
case PFCP_ASSOCIATION_SETUP_RESPONSE:{
upf_context_t *upf_context = NULL;
pdn_connection *pdn = NULL;
/*Retrive association state based on UPF IP. */
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) & (msg->upf_ip), (void **) & (upf_context));
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"UPF context "
"not found for Msg_Type:%u, UPF IP Type : %s, UPF IPv4 : "IPV4_ADDR"\t"
"UPF IPv6 : "IPv6_FMT"", LOG_VALUE, msg->msg_type,
ip_type_str(msg->upf_ip.ip_type),
IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr),
PRINT_IPV6_ADDR(msg->upf_ip.ipv6_addr));
return;
}
context_key *key = (context_key *)upf_context->pending_csr_teid[index];
if (get_ue_context(key->teid, &context) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE context not found "
"for teid: %d\n", LOG_VALUE, key->teid);
}
pdn = GET_PDN(context, key->ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, key->ebi_index);
} else {
rsp_info->bearer_count = context->bearer_count;
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
rsp_info->seq = context->sequence;
rsp_info->ebi = key->ebi_index + NUM_EBI_RESERVED;
rsp_info->teid = key->teid;
for (int i=0 ; i<MAX_BEARERS ; i++) {
if (pdn->eps_bearers[i] != NULL) {
uint8_t itr = 0;
rsp_info->bearer_id[itr] =
pdn->eps_bearers[i]->eps_bearer_id;
itr++;
}
}
}
break;
}
case PFCP_SESSION_ESTABLISHMENT_RESPONSE: {
if (get_sess_entry(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No session entry "
"found for session id: %lu\n", LOG_VALUE,
msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid);
}
if(get_ue_context(UE_SESS_ID(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid), &context) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE context "
"for teid: %d\n", LOG_VALUE,
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid));
}
rsp_info->teid = UE_SESS_ID(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid);
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
rsp_info->seq = context->sequence;
rsp_info->bearer_count = context->bearer_count;
rsp_info->ebi = resp->linked_eps_bearer_id;
for (int i=0 ; i<MAX_BEARERS ; i++) {
if (context->eps_bearers[i] != NULL) {
rsp_info->bearer_id[i] = context->eps_bearers[i]->eps_bearer_id;
}
}
}
break;
}
case GTP_CREATE_SESSION_RSP: {
if (get_ue_context_while_error(msg->gtpc_msg.cs_rsp.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE context not found "
"for teid: %d\n", LOG_VALUE, msg->gtpc_msg.cs_rsp.header.teid.has_teid.teid);
}
if (context != NULL){
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
rsp_info->seq = context->sequence;
}
rsp_info->bearer_count = msg->gtpc_msg.cs_rsp.bearer_count;
for (uint8_t i = 0; i < msg->gtpc_msg.cs_rsp.bearer_count; i++) {
rsp_info->ebi = msg->gtpc_msg.cs_rsp.bearer_contexts_created[i].eps_bearer_id.ebi_ebi;
rsp_info->bearer_id[i] = msg->gtpc_msg.cs_rsp.bearer_contexts_created[i].eps_bearer_id.ebi_ebi;
}
rsp_info->teid = msg->gtpc_msg.cs_rsp.header.teid.has_teid.teid;
break;
}
case GTP_MODIFY_BEARER_REQ: {
rsp_info->seq = msg->gtpc_msg.mbr.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.mbr.header.teid.has_teid.teid;
rsp_info->bearer_count = msg->gtpc_msg.mbr.bearer_count;
for (uint8_t uiCnt = 0; uiCnt < msg->gtpc_msg.mbr.bearer_count; ++uiCnt) {
rsp_info->ebi = msg->gtpc_msg.mbr.bearer_contexts_to_be_modified[uiCnt].eps_bearer_id.ebi_ebi;
rsp_info->bearer_id[uiCnt] = msg->gtpc_msg.mbr.bearer_contexts_to_be_modified[uiCnt].eps_bearer_id.ebi_ebi;
}
/* Fill the GTPv2c header teid from the request */
rsp_info->sender_teid = msg->gtpc_msg.mbr.sender_fteid_ctl_plane.teid_gre_key;
if (!rsp_info->sender_teid) {
if (get_ue_context(msg->gtpc_msg.mbr.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.mbr.header.teid.has_teid.teid);
}
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
}
break;
}
case GTP_MODIFY_BEARER_RSP: {
rsp_info->seq = msg->gtpc_msg.mb_rsp.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid;
rsp_info->bearer_count = msg->gtpc_msg.mb_rsp.bearer_count;
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
rsp_info->ebi = msg->gtpc_msg.mb_rsp.bearer_contexts_modified[0].eps_bearer_id.ebi_ebi;
for (uint8_t i = 0; i < msg->gtpc_msg.mb_rsp.bearer_count; i++) {
rsp_info->bearer_id[i] = msg->gtpc_msg.mb_rsp.bearer_contexts_modified[i].eps_bearer_id.ebi_ebi;
}
if (get_ue_context_while_error(msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE context not found "
"for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.mb_rsp.header.teid.has_teid.teid);
}
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case PFCP_SESSION_MODIFICATION_RESPONSE: {
if(get_sess_entry(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No session entry "
"found for session id: %lu\n", LOG_VALUE,
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
}
if (get_ue_context(UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid), &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE"
" context for teid: %d\n", LOG_VALUE,
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid));
}
pdn_connection *pdn_cntxt = NULL;
ebi_index = GET_EBI_INDEX(resp->linked_eps_bearer_id);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
pdn_cntxt = GET_PDN(context, ebi_index);
if (pdn_cntxt == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
if (pdn_cntxt != NULL && context != NULL) {
if (context->cp_mode == SGWC && (pdn_cntxt->proc == MME_INI_DEDICATED_BEARER_DEACTIVATION_PROC ||
pdn_cntxt->proc == PDN_GW_INIT_BEARER_DEACTIVATION))
rsp_info->sender_teid = pdn_cntxt->s5s8_pgw_gtpc_teid;
else
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
rsp_info->seq = context->sequence;
rsp_info->teid = UE_SESS_ID(msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid);
rsp_info->ebi = resp->linked_eps_bearer_id;
rsp_info->bearer_count = context->bearer_count;
int cnt = 0;
for (int i=0 ; i<MAX_BEARERS ; i++) {
if (pdn_cntxt->eps_bearers[i] != NULL) {
rsp_info->bearer_id[cnt++] = pdn_cntxt->eps_bearers[i]->eps_bearer_id;
}
}
}
break;
}
case GTP_DELETE_SESSION_REQ: {
rsp_info->seq = msg->gtpc_msg.dsr.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.dsr.header.teid.has_teid.teid;
rsp_info->ebi = msg->gtpc_msg.dsr.lbi.ebi_ebi;
if (get_ue_context(msg->gtpc_msg.dsr.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE"
" context not found for teid: %d\n", LOG_VALUE, msg->gtpc_msg.dsr.header.teid.has_teid.teid);
}
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case PFCP_SESSION_DELETION_RESPONSE: {
if (get_ue_context(UE_SESS_ID(msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid), &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE"
" context not found for teid: %d\n", LOG_VALUE,
UE_SESS_ID(msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid));
return;
}
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
rsp_info->seq = context->sequence;
rsp_info->teid = UE_SESS_ID(msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid);
rsp_info->ebi = UE_BEAR_ID(msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid);
break;
}
case GTP_DELETE_SESSION_RSP: {
rsp_info->teid = msg->gtpc_msg.ds_rsp.header.teid.has_teid.teid;
rsp_info->seq = msg->gtpc_msg.ds_rsp.header.teid.has_teid.seq;
if (get_ue_context_while_error(msg->gtpc_msg.ds_rsp.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE"
" context not found for teid: %d\n", LOG_VALUE, msg->gtpc_msg.ds_rsp.header.teid.has_teid.teid);
}
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
rsp_info->seq = context->sequence;
}
break;
}
case GTP_MODIFY_ACCESS_BEARER_REQ: {
rsp_info->seq = msg->gtpc_msg.mod_acc_req.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.mod_acc_req.header.teid.has_teid.teid;
rsp_info->bearer_count = msg->gtpc_msg.mod_acc_req.bearer_modify_count;
for (uint8_t uiCnt = 0; uiCnt < msg->gtpc_msg.mod_acc_req.bearer_modify_count; ++uiCnt) {
rsp_info->ebi = msg->gtpc_msg.mod_acc_req.bearer_contexts_to_be_modified[uiCnt].eps_bearer_id.ebi_ebi;
rsp_info->bearer_id[uiCnt] = msg->gtpc_msg.mod_acc_req.bearer_contexts_to_be_modified[uiCnt].eps_bearer_id.ebi_ebi;
}
if (get_ue_context(msg->gtpc_msg.mod_acc_req.header.teid.has_teid.teid,
&context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.mbr.header.teid.has_teid.teid);
}
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case GX_CCA_MSG: {
uint32_t call_id = 0;
/* Extract the call id from session id */
ret = retrieve_call_id((char *)msg->gx_msg.cca.session_id.val, &call_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id "
"found for session id:%s\n", LOG_VALUE,
msg->gx_msg.cca.session_id.val);
}
/* Retrieve PDN context based on call id */
if (ret == 0) {
pdn = get_pdn_conn_entry(call_id);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"PDN for CALL_ID:%u\n", LOG_VALUE, call_id);
}
}
if (msg->gx_msg.cca.cc_request_type == INITIAL_REQUEST ||
msg->gx_msg.cca.cc_request_type == UPDATE_REQUEST) {
if(pdn != NULL && pdn->context != NULL ) {
context = pdn->context;
rsp_info->ebi = pdn->default_bearer_id;
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
rsp_info->seq = context->sequence;
rsp_info->bearer_count = context->bearer_count;
rsp_info->teid = context->s11_sgw_gtpc_teid;
int j = 0;
for (int i=0 ; i<MAX_BEARERS ; i++) {
if (pdn->eps_bearers[i] != NULL) {
rsp_info->bearer_id[j] = pdn->eps_bearers[i]->eps_bearer_id;
j++;
}
}
}
}
break;
}
case GX_RAR_MSG: {
uint32_t call_id = 0;
/* Extract the call id from session id */
ret = retrieve_call_id((char *)msg->gx_msg.rar.session_id.val, &call_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id "
"found for session id:%s\n", LOG_VALUE,
msg->gx_msg.rar.session_id.val);
}
/* Retrieve PDN context based on call id */
if (ret == 0) {
pdn = get_pdn_conn_entry(call_id);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"PDN for CALL_ID:%u\n", LOG_VALUE, call_id);
}
}
if(pdn != NULL && pdn->context != NULL ) {
context = pdn->context;
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
rsp_info->seq = context->sequence;
rsp_info->bearer_count = context->bearer_count;
rsp_info->teid = context->s11_sgw_gtpc_teid;
int j = 0;
for (int i=0 ; i<MAX_BEARERS ; i++) {
if (pdn->eps_bearers[i] != NULL) {
rsp_info->bearer_id[j] = pdn->eps_bearers[i]->eps_bearer_id;
j++;
}
}
}
break;
}
case GTP_UPDATE_BEARER_REQ : {
if (get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.ub_req.header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n",LOG_VALUE, msg->gtpc_msg.ub_req.header.teid.has_teid.teid);
}
pdn_connection *pdn_cntxt = NULL;
rsp_info->seq = msg->gtpc_msg.ub_req.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.ub_req.header.teid.has_teid.teid;
if (!msg->gtpc_msg.ub_req.apn_ambr.header.len)
rsp_info->offending = GTP_IE_AGG_MAX_BIT_RATE;
for (uint8_t i = 0; i < msg->gtpc_msg.ub_req.bearer_context_count; i++) {
if (msg->gtpc_msg.ub_req.bearer_contexts[i].header.len) {
rsp_info->bearer_id[rsp_info->bearer_count++] =
msg->gtpc_msg.ub_req.bearer_contexts[i].eps_bearer_id.ebi_ebi;
} else {
rsp_info->offending = GTP_IE_UPD_BEARER_REQUEST__BEARER_CTXT;
}
}
rsp_info->ebi = msg->gtpc_msg.ub_req.bearer_contexts[0].eps_bearer_id.ebi_ebi;
int ebi_index = GET_EBI_INDEX(rsp_info->ebi);
pdn_cntxt = GET_PDN(context, ebi_index);
if (pdn_cntxt == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
if (context != NULL && pdn_cntxt != NULL) {
if (rsp_info->teid == 0)
rsp_info->teid = context->s11_sgw_gtpc_teid;
rsp_info->sender_teid = pdn_cntxt->s5s8_pgw_gtpc_teid;
}
break;
}
case GTP_UPDATE_BEARER_RSP: {
if (get_ue_context(msg->gtpc_msg.ub_rsp.header.teid.has_teid.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT "Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.ub_rsp.header.teid.has_teid.teid);
}
pdn_connection *pdn_cntxt = NULL;
rsp_info->seq = msg->gtpc_msg.ub_rsp.header.teid.has_teid.seq;
if (!msg->gtpc_msg.ub_rsp.cause.header.len)
rsp_info->offending = GTP_IE_CAUSE;
for (uint8_t i = 0; i < msg->gtpc_msg.ub_rsp.bearer_context_count; i++) {
if (msg->gtpc_msg.ub_rsp.bearer_contexts[i].header.len) {
rsp_info->bearer_id[rsp_info->bearer_count++] =
msg->gtpc_msg.ub_rsp.bearer_contexts[i].eps_bearer_id.ebi_ebi;
if (!msg->gtpc_msg.ub_rsp.bearer_contexts[i].cause.header.len)
rsp_info->offending = GTP_IE_CAUSE;
if (!msg->gtpc_msg.ub_rsp.bearer_contexts[i].eps_bearer_id.header.len)
rsp_info->offending = GTP_IE_EPS_BEARER_ID;
} else {
rsp_info->offending = GTP_IE_CREATE_BEARER_RESPONSE__BEARER_CTXT;
}
}
ebi_index = GET_EBI_INDEX(rsp_info->bearer_id[0]);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
pdn_cntxt = GET_PDN(context, ebi_index);
if (pdn_cntxt == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
if (context != NULL && pdn_cntxt != NULL) {
rsp_info->teid = context->s11_sgw_gtpc_teid;
rsp_info->sender_teid = pdn_cntxt->s5s8_pgw_gtpc_teid;
}
break;
}
case GTP_DELETE_BEARER_REQ: {
if (get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.db_req.header.teid.has_teid.teid,
&context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.db_req.header.teid.has_teid.teid);
}
pdn_connection *pdn_cntxt = NULL;
rsp_info->seq = msg->gtpc_msg.db_req.header.teid.has_teid.seq;
for (uint8_t i = 0; i < msg->gtpc_msg.db_req.bearer_count; i++) {
rsp_info->bearer_id[rsp_info->bearer_count++] =
msg->gtpc_msg.db_req.eps_bearer_ids[i].ebi_ebi;
}
rsp_info->ebi = msg->gtpc_msg.db_req.eps_bearer_ids[0].ebi_ebi;
ebi_index = GET_EBI_INDEX(rsp_info->ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
pdn_cntxt = GET_PDN(context, ebi_index);
if (pdn_cntxt == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
if (context != NULL && pdn_cntxt != NULL) {
rsp_info->teid = context->s11_sgw_gtpc_teid;
rsp_info->sender_teid = pdn_cntxt->s5s8_pgw_gtpc_teid;
}
break;
}
case GTP_UPDATE_PDN_CONNECTION_SET_RSP: {
rsp_info->seq = msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.seq;
if(get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.mbr.header.teid.has_teid.teid, &context)) {
if(get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid,
&context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d \n",LOG_VALUE,
msg->gtpc_msg.upd_pdn_rsp.header.teid.has_teid.teid);
}
}
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case GTP_CREATE_BEARER_REQ : {
if (get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.cb_req.header.teid.has_teid.teid,
&context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.cb_req.header.teid.has_teid.teid);
}
pdn_connection *pdn_cntxt = NULL;
rsp_info->seq = msg->gtpc_msg.cb_req.header.teid.has_teid.seq;
if (!msg->gtpc_msg.cb_req.lbi.header.len)
rsp_info->offending = GTP_IE_EPS_BEARER_ID;
for (uint8_t i = 0; i < msg->gtpc_msg.cb_req.bearer_cnt; i++) {
if (msg->gtpc_msg.cb_req.bearer_contexts[i].header.len) {
rsp_info->bearer_id[rsp_info->bearer_count++] =
msg->gtpc_msg.cb_req.bearer_contexts[i].eps_bearer_id.ebi_ebi;
if (!msg->gtpc_msg.cb_req.bearer_contexts[i].eps_bearer_id.header.len)
rsp_info->offending = GTP_IE_EPS_BEARER_ID;
if (!msg->gtpc_msg.cb_req.bearer_contexts[i].bearer_lvl_qos.header.len)
rsp_info->offending = GTP_IE_BEARER_QLTY_OF_SVC;
if (!msg->gtpc_msg.cb_req.bearer_contexts[i].s58_u_pgw_fteid.header.len)
rsp_info->offending = GTP_IE_FULLY_QUAL_TUNN_ENDPT_IDNT;
if (!msg->gtpc_msg.cb_req.bearer_contexts[i].tft.header.len)
rsp_info->offending = GTP_IE_EPS_BEARER_LVL_TRAFFIC_FLOW_TMPL;
} else {
rsp_info->offending = GTP_IE_CREATE_BEARER_REQUEST__BEARER_CTXT;
}
}
if (msg->gtpc_msg.cb_rsp.bearer_contexts[0].eps_bearer_id.ebi_ebi != 0) {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.cb_req.bearer_contexts[0].eps_bearer_id.ebi_ebi);
} else {
/*If Create Bearer Response is received with Zero EBI, then
ebi_index is extracted from temporary stored location*/
ebi_index = GET_EBI_INDEX(MAX_BEARERS + NUM_EBI_RESERVED);
}
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
pdn_cntxt = GET_PDN(context, ebi_index);
if (pdn_cntxt == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
if (context != NULL && pdn_cntxt != NULL) {
rsp_info->teid = context->s11_sgw_gtpc_teid;
rsp_info->sender_teid = pdn_cntxt->s5s8_pgw_gtpc_teid;
}
break;
}
case GTP_DELETE_BEARER_RSP: {
pdn_connection *pdn_cntxt = NULL;
if (get_ue_context(msg->gtpc_msg.db_rsp.header.teid.has_teid.teid,
&context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.db_rsp.header.teid.has_teid.teid);
}
rsp_info->seq = msg->gtpc_msg.db_rsp.header.teid.has_teid.seq;
for (uint8_t i = 0; i < msg->gtpc_msg.db_rsp.bearer_count; i++) {
rsp_info->bearer_id[rsp_info->bearer_count++] =
msg->gtpc_msg.db_rsp.bearer_contexts[i].eps_bearer_id.ebi_ebi;
}
rsp_info->ebi = msg->gtpc_msg.db_rsp.bearer_contexts[0].eps_bearer_id.ebi_ebi;
ebi_index = GET_EBI_INDEX(rsp_info->ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID %d\n", LOG_VALUE, ebi_index);
}
pdn_cntxt = GET_PDN(context, ebi_index);
if (pdn_cntxt == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
if (context != NULL && pdn_cntxt != NULL) {
rsp_info->teid = context->s11_sgw_gtpc_teid;
if (context->cp_mode == SGWC)
rsp_info->sender_teid = pdn_cntxt->s5s8_pgw_gtpc_teid;
else
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case GTP_MODIFY_BEARER_CMD: {
if (get_ue_context(msg->gtpc_msg.mod_bearer_cmd.header.teid.has_teid.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.mod_bearer_cmd.header.teid.has_teid.teid);
}
rsp_info->seq = msg->gtpc_msg.mod_bearer_cmd.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.mod_bearer_cmd.header.teid.has_teid.teid;
if (!msg->gtpc_msg.mod_bearer_cmd.bearer_context.eps_bearer_id.header.len)
rsp_info->offending = GTP_IE_BEARER_CONTEXT;
if (!msg->gtpc_msg.mod_bearer_cmd.apn_ambr.header.len)
rsp_info->offending = GTP_IE_AGG_MAX_BIT_RATE;
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case GTP_DELETE_BEARER_CMD: {
if (get_ue_context(msg->gtpc_msg.del_ber_cmd.header.teid.has_teid.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.del_ber_cmd.header.teid.has_teid.teid);
}
rsp_info->seq = msg->gtpc_msg.del_ber_cmd.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.del_ber_cmd.header.teid.has_teid.teid;
rsp_info->bearer_count = msg->gtpc_msg.del_ber_cmd.bearer_count;
for (uint8_t i = 0; i < msg->gtpc_msg.del_ber_cmd.bearer_count; i++) {
rsp_info->bearer_id[i] =
msg->gtpc_msg.del_ber_cmd.bearer_contexts[i].eps_bearer_id.ebi_ebi;
if (!msg->gtpc_msg.del_ber_cmd.bearer_contexts[i].eps_bearer_id.header.len)
rsp_info->offending = GTP_IE_EPS_BEARER_ID;
}
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case GTP_BEARER_RESOURCE_CMD: {
rsp_info->seq = msg->gtpc_msg.bearer_rsrc_cmd.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.bearer_rsrc_cmd.header.teid.has_teid.teid;
rsp_info->bearer_count = 1;
if(msg->gtpc_msg.bearer_rsrc_cmd.lbi.header.len == 0) {
rsp_info->offending = GTP_IE_EPS_BEARER_ID;
} else {
rsp_info->bearer_id[0] = msg->gtpc_msg.bearer_rsrc_cmd.lbi.ebi_ebi;
}
rsp_info->sender_teid = msg->gtpc_msg.bearer_rsrc_cmd.sender_fteid_ctl_plane.teid_gre_key;
break;
}
case GTP_BEARER_RESOURCE_FAILURE_IND: {
if (get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.ber_rsrc_fail_ind.header.teid.has_teid.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.ber_rsrc_fail_ind.header.teid.has_teid.teid);
return;
}
rsp_info->seq = msg->gtpc_msg.ber_rsrc_fail_ind.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.ber_rsrc_fail_ind.header.teid.has_teid.teid;
rsp_info->bearer_count = 1;
if(msg->gtpc_msg.ber_rsrc_fail_ind.linked_eps_bearer_id.header.len == 0) {
rsp_info->offending = GTP_IE_EPS_BEARER_ID;
} else {
rsp_info->bearer_id[0] = msg->gtpc_msg.ber_rsrc_fail_ind.linked_eps_bearer_id.ebi_ebi;
}
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case GTP_DELETE_BEARER_FAILURE_IND: {
if (get_ue_context(msg->gtpc_msg.del_fail_ind.header.teid.has_teid.teid, &context)) {
if (get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.del_fail_ind.header.teid.has_teid.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.del_fail_ind.header.teid.has_teid.teid);
}
}
rsp_info->seq = msg->gtpc_msg.del_fail_ind.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.del_fail_ind.header.teid.has_teid.teid;
rsp_info->bearer_count = msg->gtpc_msg.del_fail_ind.bearer_count;
for (uint8_t i = 0; i < msg->gtpc_msg.del_fail_ind.bearer_count; i++) {
rsp_info->bearer_id[i] =
msg->gtpc_msg.del_fail_ind.bearer_context[i].eps_bearer_id.ebi_ebi;
if (!msg->gtpc_msg.del_fail_ind.bearer_context[i].eps_bearer_id.header.len)
rsp_info->offending = GTP_IE_EPS_BEARER_ID;
}
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case GTP_MODIFY_BEARER_FAILURE_IND: {
if (get_ue_context(msg->gtpc_msg.mod_fail_ind.header.teid.has_teid.teid, &context)) {
if (get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.mod_fail_ind.header.teid.has_teid.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.mod_fail_ind.header.teid.has_teid.teid);
}
}
rsp_info->seq = msg->gtpc_msg.mod_fail_ind.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.mod_fail_ind.header.teid.has_teid.teid;
if (context != NULL) {
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case GTP_CREATE_BEARER_RSP: {
pdn_connection *pdn_cntxt = NULL;
if (get_ue_context(msg->gtpc_msg.cb_rsp.header.teid.has_teid.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, msg->gtpc_msg.cb_rsp.header.teid.has_teid.teid);
}
if (msg->gtpc_msg.cb_rsp.bearer_contexts[0].eps_bearer_id.ebi_ebi != 0) {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.cb_rsp.bearer_contexts[0].eps_bearer_id.ebi_ebi);
} else {
/*If Create Bearer Response is received with Zero EBI, then
ebi_index is extracted from temporary stored location*/
ebi_index = GET_EBI_INDEX(MAX_BEARERS + NUM_EBI_RESERVED);
}
if (NULL != context) {
rsp_info->cp_mode = context->cp_mode;
pdn_cntxt = GET_PDN(context, ebi_index);
if (pdn_cntxt == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
} else {
if (get_sess_entry(pdn_cntxt->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry "
"Found for sess ID:%lu\n", LOG_VALUE, pdn_cntxt->seid);
}
}
}
rsp_info->seq = msg->gtpc_msg.cb_rsp.header.teid.has_teid.seq;
if (!msg->gtpc_msg.cb_rsp.cause.header.len)
rsp_info->offending = GTP_IE_CAUSE;
if (resp != NULL) {
for (uint8_t i = 0; i < resp->bearer_count ; i++) {
if (msg->gtpc_msg.cb_rsp.bearer_contexts[i].header.len) {
rsp_info->bearer_id[rsp_info->bearer_count++] = resp->eps_bearer_ids[i];
} else {
rsp_info->offending = GTP_IE_CREATE_BEARER_RESPONSE__BEARER_CTXT;
}
if (!msg->gtpc_msg.cb_rsp.bearer_contexts[i].cause.header.len)
rsp_info->offending = GTP_IE_CAUSE;
if (!msg->gtpc_msg.cb_rsp.bearer_contexts[i].eps_bearer_id.header.len)
rsp_info->offending = GTP_IE_EPS_BEARER_ID;
}
}
if (context != NULL && pdn_cntxt != NULL) {
rsp_info->teid = context->s11_sgw_gtpc_teid;
if (context->cp_mode == SGWC)
rsp_info->sender_teid = pdn_cntxt->s5s8_pgw_gtpc_teid;
else
rsp_info->sender_teid = context->s11_mme_gtpc_teid;
}
break;
}
case GTP_CREATE_INDIRECT_DATA_FORWARDING_TUNNEL_REQ :{
rsp_info->sender_teid = msg->gtpc_msg.crt_indr_tun_req.sender_fteid_ctl_plane.teid_gre_key;
rsp_info->seq = msg->gtpc_msg.crt_indr_tun_req.header.teid.has_teid.seq;
rsp_info->bearer_count = msg->gtpc_msg.crt_indr_tun_req.bearer_count;
for(uint8_t i = 0;i< rsp_info->bearer_count; i++ ){
rsp_info->ebi = msg->gtpc_msg.crt_indr_tun_req.bearer_contexts[i].eps_bearer_id.ebi_ebi;
rsp_info->bearer_id[i] = msg->gtpc_msg.crt_indr_tun_req.bearer_contexts[i].eps_bearer_id.ebi_ebi;
}
rsp_info->teid = msg->gtpc_msg.crt_indr_tun_req.header.teid.has_teid.teid;
break;
}
case GTP_DELETE_INDIRECT_DATA_FORWARDING_TUNNEL_REQ: {
rsp_info->seq = msg->gtpc_msg.dlt_indr_tun_req.header.teid.has_teid.seq;
rsp_info->teid = msg->gtpc_msg.dlt_indr_tun_req.header.teid.has_teid.teid;
//rsp_info->sender_teid = context->s11_mme_gtpc_teid;
break;
}
}
}
void cs_error_response(msg_info *msg, uint8_t cause_value, uint8_t cause_source,
int iface)
{
int ret = 0;
uint8_t count = 1;
ue_context *context = NULL;
upf_context_t *upf_context = NULL;
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) & (msg->upf_ip), (void **) & (upf_context));
if (ret >= 0 && (msg->msg_type == PFCP_ASSOCIATION_SETUP_RESPONSE)
&& (msg->pfcp_msg.pfcp_ass_resp.cause.cause_value != REQUESTACCEPTED)) {
count = upf_context->csr_cnt;
}
for (uint8_t i = 0; i < count; i++) {
err_rsp_info rsp_info = {0};
get_error_rsp_info(msg, &rsp_info, i);
if (rsp_info.ebi == 0)
rsp_info.ebi = msg->gtpc_msg.csr.bearer_contexts_to_be_created[0].eps_bearer_id.ebi_ebi;
/* Sending CCR-T in case of failure */
/* TODO:CCR should be send in different function */
/*Note when cp_mode is 0 it is not required to
* send ccrt as it will either fail while processing
* initial request or will fail only on serving gateway.
* */
if ((config.use_gx) && msg->cp_mode != SGWC && msg->cp_mode != 0){
/* Check the TEID Value */
if (!rsp_info.teid) {
rsp_info.teid = msg->teid;
}
send_ccr_t_req(msg, rsp_info.ebi, rsp_info.teid);
update_cli_stats((peer_address_t *) &config.gx_ip, OSS_CCR_TERMINATE, SENT, GX);
}
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header *gtpv2c_tx = (gtpv2c_header *) tx_buf;
create_sess_rsp_t cs_resp = {0};
set_gtpv2c_teid_header(&cs_resp.header,
GTP_CREATE_SESSION_RSP,
rsp_info.sender_teid,
rsp_info.seq, NOT_PIGGYBACKED);
set_cause_error_value(&cs_resp.cause, IE_INSTANCE_ZERO, cause_value,
cause_source);
if (cause_value == GTPV2C_CAUSE_MANDATORY_IE_MISSING ) {
set_ie_header(&cs_resp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie));
cs_resp.cause.offend_ie_type = rsp_info.offending;
cs_resp.cause.offend_ie_len = 0;
} else {
set_ie_header(&cs_resp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
}
cs_resp.bearer_count = rsp_info.bearer_count;
for(uint8_t i = 0; i < rsp_info.bearer_count; i++){
set_ie_header(&cs_resp.bearer_contexts_created[i].header, GTP_IE_BEARER_CONTEXT,
IE_INSTANCE_ZERO, 0);
set_ebi(&cs_resp.bearer_contexts_created[i].eps_bearer_id, IE_INSTANCE_ZERO,
rsp_info.bearer_id[i]);
cs_resp.bearer_contexts_created[i].header.len += sizeof(uint8_t) + IE_HEADER_SIZE;
set_cause_error_value(&cs_resp.bearer_contexts_created[i].cause,
IE_INSTANCE_ZERO, cause_value, cause_source);
cs_resp.bearer_contexts_created[i].header.len += sizeof(struct cause_ie_hdr_t) + IE_HEADER_SIZE;
}
payload_length = encode_create_sess_rsp(&cs_resp, (uint8_t *)gtpv2c_tx);
if (rsp_info.teid != 0) {
/* Retrieve the UE context */
ret = get_ue_context_while_error(rsp_info.teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, rsp_info.teid);
}
}
if (iface == S11_IFACE) {
if(rsp_info.seq != 0){
if(context != NULL) {
if(context->piggyback != TRUE) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
}
} else {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
}
/* copy packet for user level packet copying or li */
if ((context != NULL) && (context->dupl)) {
process_pkt_for_li(
context, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
}
} else {
if(context != NULL) {
if(context->piggyback != TRUE) {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, REJ);
}
} else {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, REJ);
}
/* copy packet for user level packet copying or li */
if ((context != NULL) && (context->dupl)) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
}
ret = clean_up_while_error(rsp_info.ebi, context,
rsp_info.teid, &msg->gtpc_msg.csr.imsi.imsi_number_digits,
rsp_info.seq, msg);
if(ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"CleanUp failed while Error response is recived",
LOG_VALUE);
return;
}
}
}
void mbr_error_response(msg_info *msg, uint8_t cause_value, uint8_t cause_source,
int iface)
{
ue_context *context = NULL;
err_rsp_info rsp_info = {0};
pdn_connection *pdn_cntxt = NULL;
get_error_rsp_info(msg, &rsp_info, 0);
struct resp_info *resp = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header *gtpv2c_tx = (gtpv2c_header *) tx_buf;
mod_bearer_rsp_t mb_resp = {0};
set_gtpv2c_teid_header(&mb_resp.header,
GTP_MODIFY_BEARER_RSP,
rsp_info.sender_teid,
rsp_info.seq, 0);
set_cause_error_value(&mb_resp.cause, IE_INSTANCE_ZERO, cause_value, cause_source);
/* Fill the number of bearer context */
mb_resp.bearer_count = rsp_info.bearer_count;
for (uint8_t uiCnt = 0; uiCnt < rsp_info.bearer_count; ++uiCnt) {
set_ie_header(&mb_resp.bearer_contexts_modified[uiCnt].header,
GTP_IE_BEARER_CONTEXT, IE_INSTANCE_ZERO, 0);
set_cause_error_value(&mb_resp.bearer_contexts_modified[uiCnt].cause,
IE_INSTANCE_ZERO, cause_value, cause_source);
mb_resp.bearer_contexts_modified[uiCnt].header.len += sizeof(struct cause_ie_hdr_t) +
IE_HEADER_SIZE;
set_ebi(&mb_resp.bearer_contexts_modified[uiCnt].eps_bearer_id, IE_INSTANCE_ZERO,
rsp_info.bearer_id[uiCnt]);
mb_resp.bearer_contexts_modified[uiCnt].header.len += sizeof(uint8_t) + IE_HEADER_SIZE;
if (get_ue_context(rsp_info.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n",LOG_VALUE, rsp_info.teid);
}
if (context) {
int ebi_index = GET_EBI_INDEX(rsp_info.bearer_id[uiCnt]);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
if (ebi_index > 0 && context->eps_bearers[ebi_index] != NULL) {
if (context->indication_flag.s11tf) {
mb_resp.bearer_contexts_modified[uiCnt].header.len +=
set_gtpc_fteid(&mb_resp.bearer_contexts_modified[uiCnt].s1u_sgw_fteid,
GTPV2C_IFTYPE_S11U_SGW_GTPU, IE_INSTANCE_THREE,
context->eps_bearers[ebi_index]->s1u_sgw_gtpu_ip,
context->eps_bearers[ebi_index]->s1u_sgw_gtpu_teid);
} else {
mb_resp.bearer_contexts_modified[uiCnt].header.len +=
set_gtpc_fteid(&mb_resp.bearer_contexts_modified[uiCnt].s1u_sgw_fteid,
GTPV2C_IFTYPE_S1U_SGW_GTPU, IE_INSTANCE_ZERO,
context->eps_bearers[ebi_index]->s1u_sgw_gtpu_ip,
context->eps_bearers[ebi_index]->s1u_sgw_gtpu_teid);
}
}
}
}
pdn_cntxt = GET_PDN(context, GET_EBI_INDEX(rsp_info.bearer_id[0]));
if (pdn_cntxt != NULL) {
if (get_sess_entry(pdn_cntxt->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry "
"Found for sess ID: %lu\n", LOG_VALUE, pdn_cntxt->seid);
}
if (resp != NULL) {
reset_resp_info_structure(resp);
}
}
payload_length = encode_mod_bearer_rsp(&mb_resp, (uint8_t *)gtpv2c_tx);
if (iface == S11_IFACE) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
/* copy packet for user level packet copying or li */
if (context != NULL && context->dupl) {
process_pkt_for_li(
context, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
} else {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, REJ);
/* copy packet for user level packet copying or li */
if (context != NULL && context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
}
}
void ds_error_response(msg_info *msg, uint8_t cause_value, uint8_t cause_source,
int iface)
{
/* uint8_t forward = 0;
uint64_t uiImsi = 0; */
ue_context *context = NULL;
pdn_connection *pdn = NULL;
err_rsp_info rsp_info = {0};
struct resp_info *resp = NULL;
uint8_t eps_bearer_id = 0;
int8_t ebi_index = 0;
get_error_rsp_info(msg, &rsp_info, 0);
eps_bearer_id = rsp_info.ebi;
ebi_index = GET_EBI_INDEX(eps_bearer_id);
/* Check GTPv2c Messages */
if((get_ue_context_while_error(rsp_info.teid, &context) == 0) && (ebi_index >= 0)) {
if(context->eps_bearers[ebi_index]) {
pdn = context->eps_bearers[ebi_index]->pdn;
}
/* Check for PFCP Message */
if((msg->msg_type == PFCP_SESSION_DELETION_RESPONSE) &&
(get_sess_entry(msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid, &resp) != 0)){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry "
"Found for sess ID:%lu\n", LOG_VALUE,
msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid);
} else {
if(pdn != NULL)
{
if((get_sess_entry(pdn->seid, &resp) != 0)){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry "
"Found for sess ID:%lu\n", LOG_VALUE, pdn->seid);
}
}
}
}
if(context != NULL) {
if ((config.use_gx) && context->cp_mode != SGWC) {
send_ccr_t_req(msg, eps_bearer_id, rsp_info.teid);
update_cli_stats((peer_address_t *) &config.gx_ip, OSS_CCR_TERMINATE, SENT, GX);
}
}
/* Fill and set DSResp message */
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header *gtpv2c_tx = (gtpv2c_header *) tx_buf;
del_sess_rsp_t ds_resp = {0};
set_gtpv2c_teid_header(&ds_resp.header,
GTP_DELETE_SESSION_RSP,
rsp_info.sender_teid,
rsp_info.seq, 0);
/* Set the Cause value */
set_cause_error_value(&ds_resp.cause, IE_INSTANCE_ZERO, cause_value,
cause_source);
/* Encode the DSResp Message */
payload_length = encode_del_sess_rsp(&ds_resp, (uint8_t *)gtpv2c_tx);
if(context != NULL) {
if(context->cp_mode != PGWC)
iface = S11_IFACE;
else
iface = S5S8_IFACE;
}
if (rsp_info.seq != 0) {
if (iface == S11_IFACE) {
gtpv2c_send(s11_fd, s11_fd_v6,tx_buf, payload_length,
s11_mme_sockaddr, REJ);
} else {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, REJ);
}
}
/* it will process only in case of SGWC timer callback.
* when PGWC not give response for timer retry
*
*/
if(context != NULL) {
if((cause_value == GTPV2C_CAUSE_REQUEST_ACCEPTED) && (context->cp_mode == SGWC))
{
if(resp != NULL) {
if (msg->msg_type == GTP_DELETE_SESSION_REQ && resp->state != PFCP_SESS_DEL_REQ_SNT_STATE) {
pfcp_sess_del_req_t pfcp_sess_del_req = {0};
fill_pfcp_sess_del_req(&pfcp_sess_del_req, context->cp_mode);
if(pdn != NULL) {
pfcp_sess_del_req.header.seid_seqno.has_seid.seid = pdn->dp_seid;
}
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_del_req_t(&pfcp_sess_del_req, pfcp_msg);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded,
upf_pfcp_sockaddr, SENT) < 0)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error "
"in Sending Session Modification Request. "
"Error : %i\n", LOG_VALUE, errno);
}
}
}
}
/* Cleanup the session info from the resp struct */
if(resp != NULL)
reset_resp_info_structure(resp);
/* cleanup the ue info structures */
if(context && pdn) {
delete_sess_context(&context, pdn);
}
return;
}
int send_ccr_t_req(msg_info *msg, uint8_t ebi, uint32_t teid) {
int ret = 0, ret_value = 0;
pdn_connection *pdn = NULL;
ue_context *context = NULL;
gx_context_t *gx_context = NULL;
uint16_t msglen = 0;
uint8_t *buffer = NULL;
/* Retrieve the UE context */
ret = get_ue_context(teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get UE context "
"for teid: %d\n", LOG_VALUE, teid);
}
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index >= 0) {
pdn = GET_PDN(context, ebi_index);
if ( pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
}
if (pdn != NULL && context != NULL) {
/* Retrive Gx_context based on Sess ID. */
ret = rte_hash_lookup_data(gx_context_by_sess_id_hash,
(const void*)(pdn->gx_sess_id), (void **)&gx_context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"NO ENTRY FOUND "
"IN Gx HASH [%s]\n", LOG_VALUE, pdn->gx_sess_id);
} else {
gx_msg ccr_request = {0};
/* Set the Msg header type for CCR-T */
ccr_request.msg_type = GX_CCR_MSG ;
/* Set Credit Control Request type */
ccr_request.data.ccr.presence.cc_request_type = PRESENT;
ccr_request.data.ccr.cc_request_type = TERMINATION_REQUEST ;
/* Set Credit Control Bearer opertaion type */
ccr_request.data.ccr.presence.bearer_operation = PRESENT;
ccr_request.data.ccr.bearer_operation = TERMINATION ;
ret_value = fill_ccr_request(&ccr_request.data.ccr, context, ebi_index, pdn->gx_sess_id, 0);
if (ret_value) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed CCR "
"request filling process\n", LOG_VALUE);
ret_value = 1;
}
if (ret_value == 0) {
msglen = gx_ccr_calc_length(&ccr_request.data.ccr);
ccr_request.msg_len = msglen + GX_HEADER_LEN;
buffer = rte_zmalloc_socket(NULL, msglen + GX_HEADER_LEN, RTE_CACHE_LINE_SIZE,
rte_socket_id());
if (buffer == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for Buffer, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
ret_value = 1;
}
}
if ( ret_value == 0 && buffer != NULL) {
memcpy(buffer, &ccr_request.msg_type, sizeof(ccr_request.msg_type));
memcpy((buffer + sizeof(ccr_request.msg_type)),
&ccr_request.msg_len, sizeof(ccr_request.msg_len));
}
if (ret_value == 0
&& buffer != NULL
&& gx_ccr_pack(&(ccr_request.data.ccr),
(unsigned char *)(buffer + GX_HEADER_LEN), msglen) != 0) {
send_to_ipc_channel(gx_app_sock, buffer, msglen + GX_HEADER_LEN);
free_dynamically_alloc_memory(&ccr_request);
if (rte_hash_del_key(gx_context_by_sess_id_hash, pdn->gx_sess_id) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error on "
"gx_context_by_sess_id_hash deletion\n",
LOG_VALUE, strerror(ret));
}
RTE_SET_USED(msg);
if (gx_context != NULL) {
rte_free(gx_context);
gx_context = NULL;
}
rte_free(buffer);
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"ERROR in Packing "
"CCR Buffer\n", LOG_VALUE);
rte_free(buffer);
return -1;
}
}
}
return 0;
}
void gen_reauth_error_response(pdn_connection *pdn, int16_t error) {
/* Initialize the Gx Parameters */
uint16_t msg_len = 0;
uint8_t *buffer = NULL;
gx_msg raa = {0};
gx_context_t *gx_context = NULL;
uint16_t msg_body_ofs = 0;
uint16_t rqst_ptr_ofs = 0;
uint16_t msg_len_total = 0;
/* Clear Policy in PDN */
pdn->policy.count = 0;
pdn->policy.num_charg_rule_install = 0;
pdn->policy.num_charg_rule_modify = 0;
pdn->policy.num_charg_rule_delete = 0;
/* Allocate the memory for Gx Context */
if ((gx_context_entry_lookup(pdn->gx_sess_id, &gx_context)) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"gx context not found for sess id %s\n",
LOG_VALUE, pdn->gx_sess_id);
}
raa.data.cp_raa.session_id.len = strnlen(pdn->gx_sess_id, MAX_LEN);
memcpy(raa.data.cp_raa.session_id.val, pdn->gx_sess_id, raa.data.cp_raa.session_id.len);
raa.data.cp_raa.presence.session_id = PRESENT;
/* Set the Msg header type for CCR */
raa.msg_type = GX_RAA_MSG;
/* Result code */
raa.data.cp_raa.result_code = error;
raa.data.cp_raa.presence.result_code = PRESENT;
/* Update UE State */
pdn->state = RE_AUTH_ANS_SNT_STATE;
/* Set the Gx State for events */
gx_context->state = RE_AUTH_ANS_SNT_STATE;
/* Calculate the max size of CCR msg to allocate the buffer */
msg_len = gx_raa_calc_length(&raa.data.cp_raa);
msg_body_ofs = GX_HEADER_LEN;
rqst_ptr_ofs = msg_len + msg_body_ofs;
msg_len_total = rqst_ptr_ofs + sizeof(pdn->rqst_ptr);
raa.msg_len = msg_len_total;
buffer = rte_zmalloc_socket(NULL, msg_len_total,
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (buffer == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for Buffer, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return;
}
memcpy(buffer, &raa.msg_type, sizeof(raa.msg_type));
memcpy(buffer + sizeof(raa.msg_type),
&raa.msg_len, sizeof(raa.msg_len));
if (gx_raa_pack(&(raa.data.cp_raa),
(unsigned char *)(buffer + msg_body_ofs),
msg_len) == 0 ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in Packing RAA "
"Buffer\n",LOG_VALUE);
rte_free(buffer);
return;
}
memcpy((unsigned char *)(buffer + rqst_ptr_ofs), &(pdn->rqst_ptr),
sizeof(pdn->rqst_ptr));
/* Write or Send CCR msg to Gx_App */
send_to_ipc_channel(gx_app_sock, buffer,msg_len_total);
rte_free(buffer);
buffer = NULL;
return;
}
void gen_reauth_error_resp_for_wrong_seid_rcvd(msg_info *msg,gx_msg *gxmsg, int16_t cause_value) {
/* Initialize the Gx Parameters */
uint16_t msg_len = 0;
unsigned long rqst_ptr = 0;
uint8_t *buffer = NULL;
uint32_t buflen = 0;
gx_msg raa = {0};
memcpy(raa.data.cp_raa.session_id.val, msg->gx_msg.rar.session_id.val, GX_SESSION_ID_LEN);
raa.data.cp_raa.presence.session_id = PRESENT;
buflen = gx_rar_calc_length (&msg->gx_msg.rar);
raa.msg_type = GX_RAA_MSG;
raa.data.cp_raa.result_code = cause_value;
raa.data.cp_raa.presence.result_code = PRESENT;
msg->state = RE_AUTH_ANS_SNT_STATE;
msg_len = gx_raa_calc_length(&raa.data.cp_raa);
raa.msg_len = msg_len + GX_HEADER_LEN + sizeof(unsigned long);
buffer = rte_zmalloc_socket(NULL, msg_len + GX_HEADER_LEN + sizeof(unsigned long),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (buffer == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for Buffer, Error: %s \n", LOG_VALUE, rte_strerror(rte_errno));
return;
}
memcpy(&rqst_ptr, ((unsigned char *)gxmsg + GX_HEADER_LEN + buflen),
sizeof(unsigned long));
memcpy(buffer, &raa.msg_type, sizeof(raa.msg_type));
memcpy(buffer + sizeof(raa.msg_type),
&raa.msg_len, sizeof(raa.msg_len));
if (gx_raa_pack(&(raa.data.cp_raa),
(unsigned char *)(buffer + GX_HEADER_LEN),
msg_len) == 0 ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in Packing RAA "
"Buffer\n",LOG_VALUE);
rte_free(buffer);
return;
}
memcpy((unsigned char *)(buffer + msg_len + GX_HEADER_LEN), (&rqst_ptr),
sizeof(unsigned long));
/* Write or Send CCR msg to Gx_App */
send_to_ipc_channel(gx_app_sock, buffer, raa.msg_len);
rte_free(buffer);
buffer = NULL;
return;
}
void delete_bearer_error_response(msg_info *msg, uint8_t cause_value,
uint8_t cause_source, int iface)
{
int ebi_index = 0, ret = 0;
uint32_t seq = 0;
struct resp_info *resp = NULL;
err_rsp_info rsp_info = {0};
pdn_connection *pdn = NULL;
ue_context *context = NULL;
eps_bearer *bearer = NULL;
get_error_rsp_info(msg, &rsp_info, 0);
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
pfcp_update_far_ie_t *far = NULL;
node_address_t node_value = {0};
if (get_ue_context_by_sgw_s5s8_teid(rsp_info.teid, &context)) {
if (get_ue_context(rsp_info.teid, &context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, rsp_info.teid);
}
}
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(rsp_info.bearer_id[0]);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID \n", LOG_VALUE);
}
if (ebi_index >= 0) {
pdn = GET_PDN(context, ebi_index);
if ( pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
} else {
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %d", LOG_VALUE, pdn->seid);
}
}
}
if (context != NULL) {
if (context->cp_mode != SGWC)
iface = GX_IFACE;
else
iface = S5S8_IFACE;
}
if (resp != NULL && resp->msg_type != GX_RAR_MSG
&& resp->msg_type != GTP_DELETE_BEARER_CMD
&& resp->msg_type != GTP_DELETE_BEARER_REQ) {
seq = get_pfcp_sequence_number(PFCP_SESSION_MODIFICATION_REQUEST, seq);
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_mod_req.header),
PFCP_SESSION_MODIFICATION_REQUEST, HAS_SEID, seq,
context->cp_mode);
pfcp_sess_mod_req.header.seid_seqno.has_seid.seid = pdn->dp_seid;
/*Filling Node ID for F-SEID*/
if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(config.pfcp_ip.s_addr, temp, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV6) {
ret = fill_ip_addr(0, config.pfcp_ip_v6.s6_addr, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
set_fseid(&(pfcp_sess_mod_req.cp_fseid), pdn->seid, node_value);
for (uint8_t idx = 0; idx < rsp_info.bearer_count; idx++) {
ebi_index = GET_EBI_INDEX(rsp_info.bearer_id[idx]);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
if (pdn != NULL && ebi_index >= 0) {
bearer = pdn->eps_bearers[ebi_index];
}
if (bearer != NULL) {
for(uint8_t itr = 0; itr < bearer->pdr_count ; itr++) {
far = &(pfcp_sess_mod_req.update_far[pfcp_sess_mod_req.update_far_count]);
bearer->pdrs[itr]->far.actions.forw = PRESENT;
bearer->pdrs[itr]->far.actions.dupl = 0;
bearer->pdrs[itr]->far.actions.drop = 0;
set_update_far(far, &bearer->pdrs[itr]->far);
pfcp_sess_mod_req.update_far_count++;
}
}
bearer = NULL;
}
uint8_t pfcp_msg[PFCP_MSG_LEN] = {0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req,
pfcp_msg);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded,
upf_pfcp_sockaddr, SENT) < 0)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error "
"in Sending Session Modification Request. "
"Error : %i\n", LOG_VALUE, errno);
else {
#ifdef CP_BUILD
add_pfcp_if_timer_entry(rsp_info.teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded,
ebi_index);
#endif /* CP_BUILD */
}
pdn->state = ERROR_OCCURED_STATE;
resp->state = ERROR_OCCURED_STATE;
resp->proc = pdn->proc;
}
/* send S5S8 interface delete bearer response.*/
if (iface == S5S8_IFACE) {
del_bearer_rsp_t del_rsp = {0};
set_gtpv2c_teid_header(&del_rsp.header,
GTP_DELETE_BEARER_RSP,
rsp_info.sender_teid,
rsp_info.seq, 0);
set_cause_error_value(&del_rsp.cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
del_rsp.bearer_count = rsp_info.bearer_count;
for (int i = 0; i < rsp_info.bearer_count; i++) {
set_ie_header(&del_rsp.bearer_contexts[i].header, GTP_IE_BEARER_CONTEXT,
IE_INSTANCE_ZERO, 0);
set_ebi(&del_rsp.bearer_contexts[i].eps_bearer_id, IE_INSTANCE_ZERO,
rsp_info.bearer_id[i]);
del_rsp.bearer_contexts[i].header.len += sizeof(uint8_t) + IE_HEADER_SIZE;
set_cause_error_value(&del_rsp.bearer_contexts[i].cause, IE_INSTANCE_ZERO,
cause_value, cause_source );
del_rsp.bearer_contexts[i].header.len += sizeof(uint16_t) + IE_HEADER_SIZE;
}
payload_length = encode_del_bearer_rsp(&del_rsp, (uint8_t *)gtpv2c_tx);
reset_resp_info_structure(resp);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length, s5s8_recv_sockaddr,
REJ);
} else {
if (pdn != NULL) {
if (pdn->proc == MME_INI_DEDICATED_BEARER_DEACTIVATION_PROC) {
delete_bearer_cmd_failure_indication(msg, cause_value,
CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return;
}
else if (resp->msg_type == GTP_BEARER_RESOURCE_CMD) {
send_bearer_resource_failure_indication(msg,cause_value,
CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
provision_ack_ccr(pdn, pdn->eps_bearers[ebi_index],
RULE_ACTION_DELETE, RESOURCE_ALLOCATION_FAILURE);
} else {
if (pdn->state != RE_AUTH_ANS_SNT_STATE
&& msg->gtpc_msg.ub_rsp.cause.cause_value != GTPV2C_CAUSE_REMOTE_PEER_NOT_RESPONDING ) {
gen_reauth_error_response(pdn, DIAMETER_UNABLE_TO_COMPLY);
}
}
}
}
return;
}
void cbr_error_response(msg_info *msg, uint8_t cause_value, uint8_t cause_source,
int iface)
{
int ret = 0, ebi_index = 0, err_ret = 0;
err_rsp_info rsp_info = {0};
ue_context *context = NULL;
pdn_connection *pdn_cntxt = NULL;
get_error_rsp_info(msg, &rsp_info, 0);
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
if (msg->msg_type == GTP_CREATE_BEARER_REQ) {
ret = get_ue_context_by_sgw_s5s8_teid(rsp_info.teid, &context);
} else {
ret = get_ue_context(rsp_info.teid, &context);
}
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, rsp_info.teid);
}
if (msg->gtpc_msg.cb_rsp.bearer_contexts[0].eps_bearer_id.ebi_ebi != 0) {
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.cb_rsp.bearer_contexts[0].eps_bearer_id.ebi_ebi);
} else {
/*If Create Bearer Response is received with Zero EBI, then
ebi_index is extracted from temporary stored location*/
ebi_index = GET_EBI_INDEX(MAX_BEARERS + NUM_EBI_RESERVED);
}
pdn_cntxt = GET_PDN(context, ebi_index);
if (pdn_cntxt == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
if (context != NULL) {
if (context->cp_mode != SGWC)
iface = GX_IFACE;
else
iface = S5S8_IFACE;
}
if (iface == S5S8_IFACE) {
create_bearer_rsp_t cbr_rsp = {0};
set_gtpv2c_teid_header(&cbr_rsp.header,
GTP_CREATE_BEARER_RSP,
rsp_info.sender_teid,
rsp_info.seq, 0);
set_cause_error_value(&cbr_rsp.cause, IE_INSTANCE_ZERO, cause_value,
cause_source);
if (cause_value == GTPV2C_CAUSE_MANDATORY_IE_MISSING ) {
set_ie_header(&cbr_rsp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie));
cbr_rsp.cause.offend_ie_type = rsp_info.offending;
cbr_rsp.cause.offend_ie_len = 0;
} else {
set_ie_header(&cbr_rsp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
}
cbr_rsp.bearer_cnt = rsp_info.bearer_count;
for (int i = 0; i < rsp_info.bearer_count; i++) {
set_ie_header(&cbr_rsp.bearer_contexts[i].header, GTP_IE_BEARER_CONTEXT,
IE_INSTANCE_ZERO, 0);
set_ebi(&cbr_rsp.bearer_contexts[i].eps_bearer_id, IE_INSTANCE_ZERO,
rsp_info.bearer_id[i]);
cbr_rsp.bearer_contexts[i].header.len += sizeof(uint8_t) + IE_HEADER_SIZE;
set_cause_error_value(&cbr_rsp.bearer_contexts[i].cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
cbr_rsp.bearer_contexts[i].header.len += sizeof(uint16_t) + IE_HEADER_SIZE;
}
payload_length = encode_create_bearer_rsp(&cbr_rsp, (uint8_t *)gtpv2c_tx);
if(context != NULL && pdn_cntxt != NULL) {
if(context->piggyback == FALSE && cause_value !=
GTPV2C_CAUSE_REMOTE_PEER_NOT_RESPONDING &&
pdn_cntxt->state != PFCP_SESS_MOD_REQ_SNT_STATE ) {
err_ret = clean_up_while_cbr_error(rsp_info.teid, msg->msg_type, pdn_cntxt);
if (err_ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Error while cleaning"
" create bearer error response.\n", LOG_VALUE);
}
}
}
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, REJ);
} else {
struct resp_info *resp = NULL;
if (pdn_cntxt != NULL) {
if(get_sess_entry(pdn_cntxt->seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %d", LOG_VALUE, pdn_cntxt->seid);
}
int ebi_id = 0;
for (int idx = 0; idx < resp->bearer_count ; ++idx) {
ebi_id = resp->eps_bearer_ids[idx];
}
ebi_index = GET_EBI_INDEX(ebi_id);
if (resp->msg_type == GTP_BEARER_RESOURCE_CMD) {
send_bearer_resource_failure_indication(msg,cause_value,
CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
provision_ack_ccr(pdn_cntxt, pdn_cntxt->eps_bearers[ebi_index],
RULE_ACTION_ADD, RESOURCE_ALLOCATION_FAILURE);
}
err_ret = clean_up_while_cbr_error(rsp_info.teid, msg->msg_type, pdn_cntxt);
if (err_ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Error while cleaning"
" create bearer error response.\n", LOG_VALUE);
return;
}
if (resp->msg_type != GTP_BEARER_RESOURCE_CMD)
gen_reauth_error_response(pdn_cntxt, DIAMETER_UNABLE_TO_COMPLY);
}
}
}
void ubr_error_response(msg_info *msg, uint8_t cause_value,
uint8_t cause_source, int iface)
{
int ret = 0;
int ebi_index = 0;
ue_context *context = NULL;
err_rsp_info rsp_info = {0};
pdn_connection *pdn_cntxt = NULL;
struct resp_info *resp = NULL;
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
get_error_rsp_info(msg, &rsp_info, 0);
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ebi_index = GET_EBI_INDEX(rsp_info.bearer_id[0]);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
if (msg->msg_type == GTP_UPDATE_BEARER_REQ) {
ret = get_ue_context_by_sgw_s5s8_teid(rsp_info.teid, &context);
} else {
ret = get_ue_context(rsp_info.teid, &context);
}
if(ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, rsp_info.teid);
}
pdn_cntxt = GET_PDN(context, ebi_index);
if (pdn_cntxt != NULL) {
if (get_sess_entry(pdn_cntxt->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn_cntxt->seid);
}
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
}
if (context != NULL) {
if (context->cp_mode != SGWC)
iface = GX_IFACE;
else
iface = S5S8_IFACE;
}
if (iface == S5S8_IFACE) {
upd_bearer_rsp_t ubr_rsp = {0};
set_gtpv2c_teid_header(&ubr_rsp.header,
GTP_UPDATE_BEARER_RSP,
rsp_info.sender_teid,
rsp_info.seq, 0);
set_cause_error_value(&ubr_rsp.cause, IE_INSTANCE_ZERO, cause_value,
cause_source);
if (cause_value == GTPV2C_CAUSE_MANDATORY_IE_MISSING ) {
set_ie_header(&ubr_rsp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie));
ubr_rsp.cause.offend_ie_type = rsp_info.offending;
ubr_rsp.cause.offend_ie_len = 0;
} else {
set_ie_header(&ubr_rsp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
}
ubr_rsp.bearer_context_count = rsp_info.bearer_count;
for (int i = 0; i < rsp_info.bearer_count; i++) {
set_ie_header(&ubr_rsp.bearer_contexts[i].header, GTP_IE_BEARER_CONTEXT,
IE_INSTANCE_ZERO, 0);
set_ebi(&ubr_rsp.bearer_contexts[i].eps_bearer_id, IE_INSTANCE_ZERO,
rsp_info.bearer_id[i]);
ubr_rsp.bearer_contexts[i].header.len += sizeof(uint8_t) + IE_HEADER_SIZE;
set_cause_error_value(&ubr_rsp.bearer_contexts[i].cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
ubr_rsp.bearer_contexts[i].header.len += sizeof(uint16_t) + IE_HEADER_SIZE;
}
payload_length = encode_upd_bearer_rsp(&ubr_rsp, (uint8_t *)gtpv2c_tx);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
if (get_ue_context_while_error(rsp_info.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to get"
" UE context for teid: %d\n",LOG_VALUE, rsp_info.teid);
}
reset_resp_info_structure(resp);
/* copy packet for user level packet copying or li */
if (context) {
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
}
} else {
if (pdn_cntxt != NULL && resp != NULL) {
if (resp->msg_type == GTP_BEARER_RESOURCE_CMD) {
send_bearer_resource_failure_indication(msg,cause_value,
CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
provision_ack_ccr(pdn_cntxt, pdn_cntxt->eps_bearers[ebi_index],
RULE_ACTION_MODIFY, RESOURCE_ALLOCATION_FAILURE);
return;
} else if(resp->msg_type == GTP_MODIFY_BEARER_CMD) {
modify_bearer_failure_indication(msg, cause_value,
CAUSE_SOURCE_SET_TO_0,
context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
provision_ack_ccr(pdn_cntxt, pdn_cntxt->eps_bearers[ebi_index],
RULE_ACTION_MODIFY, RESOURCE_ALLOCATION_FAILURE);
/* CleanUp for HSS INITIATED FLOW and CONTEXT NOT FOUND, don't need to cleanup */
if(cause_value != GTPV2C_CAUSE_CONTEXT_NOT_FOUND) {
delete_bearer_request_cleanup(pdn_cntxt, context, pdn_cntxt->default_bearer_id);
} else {
/*PGWC should send PGWU pfcp session deletion request*/
pfcp_sess_del_req_t pfcp_sess_del_req = {0};
fill_pfcp_sess_del_req(&pfcp_sess_del_req, context->cp_mode);
pfcp_sess_del_req.header.seid_seqno.has_seid.seid = pdn_cntxt->dp_seid;
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_del_req_t(&pfcp_sess_del_req, pfcp_msg);
pfcp_header_t *header = (pfcp_header_t *) pfcp_msg;
header->message_len = htons(encoded - PFCP_IE_HDR_SIZE);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded,
upf_pfcp_sockaddr, SENT) < 0)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error "
"in Sending Session Modification Request. "
"Error : %i\n", LOG_VALUE, errno);
pdn_cntxt->state = PFCP_SESS_DEL_REQ_SNT_STATE;
resp->state = pdn_cntxt->state;
pdn_cntxt->proc = DETACH_PROC;
resp->proc = DETACH_PROC;
context->mbc_cleanup_status = PRESENT;
resp->linked_eps_bearer_id = pdn_cntxt->default_bearer_id;
send_ccr_t_req(msg, rsp_info.ebi, rsp_info.teid);
}
return;
}
else {
gen_reauth_error_response(pdn_cntxt, DIAMETER_UNABLE_TO_COMPLY);
return;
}
}
reset_resp_info_structure(resp);
}
return;
}
/* Function to Fill and Send Version not supported response to peer node */
void send_version_not_supported(int iface, uint32_t seq) {
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header *gtpv2c_tx = (gtpv2c_header *) tx_buf;
gtpv2c_header_t *header = (gtpv2c_header_t *) gtpv2c_tx;
set_gtpv2c_header(header, 0, GTP_VERSION_NOT_SUPPORTED_IND, 0, seq, 0);
uint16_t msg_len = 0;
msg_len = encode_gtpv2c_header_t(header, (uint8_t *)gtpv2c_tx);
header->gtpc.message_len = htons(msg_len - IE_HEADER_SIZE);
payload_length = msg_len;
if (iface == S11_IFACE) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, SENT);
} else {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
}
return;
}
void send_bearer_resource_failure_indication(msg_info *msg,
uint8_t cause_value, uint8_t cause_source, int iface)
{
int ret = 0;
err_rsp_info rsp_info = {0};
ue_context *context = NULL;
pdn_connection *pdn = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
bearer_rsrc_fail_indctn_t ber_fail_ind = {0};
get_error_rsp_info(msg, &rsp_info, 0);
if (msg->msg_type == GTP_BEARER_RESOURCE_FAILURE_IND) {
ret = get_ue_context_by_sgw_s5s8_teid(rsp_info.teid, &context);
} else {
ret = get_ue_context(rsp_info.teid, &context);
}
if(ret == -1) {
ret = get_ue_context(msg->teid, &context);
rsp_info.teid = msg->teid;
}
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, rsp_info.teid);
}
if(context != NULL) {
rsp_info.seq = context->ue_initiated_seq_no;
rsp_info.sender_teid = context->s11_mme_gtpc_teid;
set_pti(&ber_fail_ind.pti, IE_INSTANCE_ZERO, context->proc_trans_id);
}
if(msg->msg_type == GTP_BEARER_RESOURCE_CMD) {
set_pti(&ber_fail_ind.pti, IE_INSTANCE_ZERO, msg->gtpc_msg.bearer_rsrc_cmd.pti.proc_trans_id);
rsp_info.seq = msg->gtpc_msg.bearer_rsrc_cmd.header.teid.has_teid.seq;
}
set_gtpv2c_teid_header(&ber_fail_ind.header,
GTP_BEARER_RESOURCE_FAILURE_IND,
rsp_info.sender_teid,
rsp_info.seq, 0);
set_cause_error_value(&ber_fail_ind.cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
if (cause_value == GTPV2C_CAUSE_MANDATORY_IE_MISSING ) {
set_ie_header(&ber_fail_ind.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie));
ber_fail_ind.cause.offend_ie_type = rsp_info.offending;
ber_fail_ind.cause.offend_ie_len = 0;
}
else {
set_ie_header(&ber_fail_ind.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
}
pdn = GET_PDN(context, GET_EBI_INDEX(rsp_info.bearer_id[0]));
if(pdn!=NULL) {
set_ebi(&ber_fail_ind.linked_eps_bearer_id, IE_INSTANCE_ZERO,
pdn->default_bearer_id);
} else {
set_ebi(&ber_fail_ind.linked_eps_bearer_id, IE_INSTANCE_ZERO,
rsp_info.bearer_id[0]);
}
ber_fail_ind.cause.cause_value = cause_value;
if(context != NULL) {
if (context->cp_mode != SGWC || context->cp_mode != SAEGWC )
ber_fail_ind.cause.cs = 1;
else
ber_fail_ind.cause.cs = 0;
}
payload_length = encode_bearer_rsrc_fail_indctn(&ber_fail_ind, (uint8_t *)gtpv2c_tx);
if(context != NULL) {
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
if(iface == S5S8_IFACE){
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,s5s8_recv_sockaddr
, REJ);
if(context != NULL) {
context->is_sent_bearer_rsc_failure_indc = PRESENT;
}
} else {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
}
if(context != NULL) {
context->ue_initiated_seq_no = 0;
context->proc_trans_id = 0;
}
}
void delete_bearer_cmd_failure_indication(msg_info *msg, uint8_t cause_value,
uint8_t cause_source, int iface)
{
int ret = 0;
err_rsp_info rsp_info = {0};
ue_context *context = NULL;
get_error_rsp_info(msg, &rsp_info, 0);
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
del_bearer_fail_indctn_t del_fail_ind={0};
struct resp_info *resp = NULL;
pdn_connection *pdn_cntxt = NULL;
set_gtpv2c_teid_header(&del_fail_ind.header,
GTP_DELETE_BEARER_FAILURE_IND,
rsp_info.sender_teid,
rsp_info.seq, 0);
set_cause_error_value(&del_fail_ind.cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
if (cause_value == GTPV2C_CAUSE_MANDATORY_IE_MISSING ) {
set_ie_header(&del_fail_ind.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie));
del_fail_ind.cause.offend_ie_type = rsp_info.offending;
del_fail_ind.cause.offend_ie_len = 0;
}
else {
set_ie_header(&del_fail_ind.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
}
for (int i = 0; i < rsp_info.bearer_count; i++) {
set_ie_header(&del_fail_ind.bearer_context[i].header, GTP_IE_BEARER_CONTEXT,
IE_INSTANCE_ZERO, 0);
set_ebi(&del_fail_ind.bearer_context[i].eps_bearer_id, IE_INSTANCE_ZERO,
rsp_info.bearer_id[i]);
del_fail_ind.bearer_context[i].header.len += sizeof(uint8_t) + IE_HEADER_SIZE;
set_cause_error_value(&del_fail_ind.bearer_context[i].cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
del_fail_ind.bearer_context[i].header.len += sizeof(uint16_t) + IE_HEADER_SIZE;
}
if (msg->msg_type == GTP_DELETE_BEARER_FAILURE_IND) {
ret = get_ue_context_by_sgw_s5s8_teid(rsp_info.teid, &context);
} else {
ret = get_ue_context(rsp_info.teid, &context);
}
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, rsp_info.teid);
}
pdn_cntxt = GET_PDN(context, GET_EBI_INDEX(rsp_info.bearer_id[0]));
if (pdn_cntxt != NULL) {
if (get_sess_entry(pdn_cntxt->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn_cntxt->seid);
}
if (resp != NULL) {
reset_resp_info_structure(resp);
}
}
payload_length = encode_del_bearer_fail_indctn(&del_fail_ind, (uint8_t *)gtpv2c_tx);
if (context != NULL) {
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
if(iface == S5S8_IFACE) {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length, s5s8_recv_sockaddr
, REJ);
} else if(iface == S11_IFACE) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
}
}
void
pfcp_modification_error_response(struct resp_info *resp, msg_info *msg, uint8_t cause_value)
{
switch (resp->msg_type) {
case GTP_CREATE_SESSION_RSP : {
msg->cp_mode = resp->cp_mode;
cs_error_response(msg, cause_value, CAUSE_SOURCE_SET_TO_0,
resp->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
break;
}
case GTP_MODIFY_BEARER_REQ : {
mbr_error_response(msg, cause_value, CAUSE_SOURCE_SET_TO_0,
resp->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
break;
}
case GTP_MODIFY_ACCESS_BEARER_REQ : {
mod_access_bearer_error_response(msg, cause_value, CAUSE_SOURCE_SET_TO_0);
break;
}
case GTP_CREATE_BEARER_REQ : {
cbr_error_response(msg, cause_value, CAUSE_SOURCE_SET_TO_0,
resp->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
break;
}
case GTP_CREATE_BEARER_RSP : {
cbr_error_response(msg, cause_value, CAUSE_SOURCE_SET_TO_0,
resp->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
break;
}
case GTP_DELETE_BEARER_REQ : {
delete_bearer_error_response(msg, cause_value, CAUSE_SOURCE_SET_TO_0,
resp->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
break;
}
case GTP_DELETE_BEARER_RSP : {
delete_bearer_error_response(msg, cause_value, CAUSE_SOURCE_SET_TO_0,
resp->cp_mode != SGWC ? GX_IFACE : S5S8_IFACE);
break;
}
case GTP_DELETE_SESSION_RSP : {
ds_error_response(msg, cause_value,CAUSE_SOURCE_SET_TO_0,
resp->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
break;
}
case GTP_RELEASE_ACCESS_BEARERS_REQ :{
release_access_bearer_error_response(msg, cause_value, CAUSE_SOURCE_SET_TO_0,
S11_IFACE);
break;
}
case GTP_UPDATE_PDN_CONNECTION_SET_REQ: {
update_pdn_connection_set_error_response(msg, cause_value, CAUSE_SOURCE_SET_TO_0);
break;
}
default : {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"message type is not supported", LOG_VALUE);
break;
}
}
}
void
gx_cca_error_response(uint8_t cause, msg_info *msg)
{
int ret = 0;
uint8_t ebi_index = 0;
uint32_t call_id = 0;
pdn_connection *pdn_cntxt = NULL;
struct resp_info *resp = NULL;
uint8_t cp_mode = 0;
switch(msg->gx_msg.cca.cc_request_type){
case INITIAL_REQUEST : {
cs_error_response(msg, cause, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(msg, NULL);
break;
}
case UPDATE_REQUEST : {
/* Extract the call id from session id */
ret = retrieve_call_id((char *)msg->gx_msg.cca.session_id.val, &call_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id "
"found for session id: %s\n", LOG_VALUE,
msg->gx_msg.cca.session_id.val);
return;
}
/* Retrieve PDN context based on call id */
pdn_cntxt = get_pdn_conn_entry(call_id);
if (pdn_cntxt == NULL)
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"PDN found for CALL_ID : %u\n", LOG_VALUE, call_id);
return;
}
/*Retrive the session information based on session id. */
if (get_sess_entry(pdn_cntxt->seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn_cntxt->seid);
return;
}
switch(resp->msg_type) {
case GTP_DELETE_BEARER_CMD : {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in "
"CCA-U message for Delete Bearer Command with cause %s \n"
, LOG_VALUE, cause_str(cause));
break;
}
case GTP_BEARER_RESOURCE_CMD : {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in "
"CCA-U message for Bearer Resource Command with cause %s \n"
, LOG_VALUE, cause_str(cause));
send_bearer_resource_failure_indication(msg, cause, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
break;
}
case GTP_MODIFY_BEARER_CMD : {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in "
"CCA-U message for Modify Bearer Command with cause %s \n"
, LOG_VALUE, cause_str(cause));
modify_bearer_failure_indication(msg, cause, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
ebi_index = GET_EBI_INDEX(pdn_cntxt->default_bearer_id);
provision_ack_ccr(pdn_cntxt, pdn_cntxt->eps_bearers[ebi_index],
RULE_ACTION_MODIFY, RESOURCE_ALLOCATION_FAILURE);
break;
}
}
}
case TERMINATION_REQUEST : {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"CC-Request-Type:TERMINATION_REQUEST in "
"Credit-Control message\n", LOG_VALUE);
break;
}
case EVENT_REQUEST : {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"CC-Request-Type:EVENT_REQUEST in "
"Credit-Control message\n", LOG_VALUE);
break;
}
default : {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"AVP CC-Request-Type contains unexpected value in "
"Credit-Control message\n", LOG_VALUE);
cp_mode = msg->cp_mode;
if(cp_mode == 0)
{
/* Extract the call id from session id */
ret = retrieve_call_id((char *)msg->gx_msg.cca.session_id.val, &call_id);
if (ret < 0)
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Call Id "
"found for session id:%s\n", LOG_VALUE,
msg->gx_msg.cca.session_id.val);
return;
}
/* Retrieve PDN context based on call id */
if (ret == 0)
{
pdn_cntxt = get_pdn_conn_entry(call_id);
if (pdn_cntxt == NULL)
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"PDN for CALL_ID:%u\n", LOG_VALUE, call_id);
return;
}
}
if (pdn_cntxt->context != NULL) {
cp_mode = (pdn_cntxt->context)->cp_mode;
} else if ((msg->cp_mode) && (pdn_cntxt->context == NULL)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get the context,"
"Context is NULL\n", LOG_VALUE);
return;
}
}
cs_error_response(msg, cause, CAUSE_SOURCE_SET_TO_0,
cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(msg, NULL);
break;
}
}
}
void
update_pdn_connection_set_error_response(msg_info *msg, uint8_t cause_value,
uint8_t cause_source)
{
int ret = 0;
ue_context *context = NULL;
upd_pdn_conn_set_rsp_t upd_pdn_rsp = {0};
pdn_connection *pdn = NULL;
eps_bearer *bearer = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = get_ue_context(msg->gtpc_msg.upd_pdn_req.header.teid.has_teid.teid, &context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE,
msg->gtpc_msg.upd_pdn_req.header.teid.has_teid.teid);
return;
}
for(uint8_t i= 0; i< MAX_BEARERS; i++) {
bearer = context->eps_bearers[i];
if(bearer == NULL)
continue;
else
break;
}
pdn = bearer->pdn;
set_gtpv2c_teid_header((gtpv2c_header_t *) &upd_pdn_rsp, GTP_UPDATE_PDN_CONNECTION_SET_RSP,
pdn->s5s8_sgw_gtpc_teid, msg->gtpc_msg.upd_pdn_req.header.teid.has_teid.seq, 0);
set_cause_error_value(&upd_pdn_rsp.cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
set_ie_header(&upd_pdn_rsp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
payload_length = encode_upd_pdn_conn_set_rsp(&upd_pdn_rsp, (uint8_t *)gtpv2c_tx);
ret = set_dest_address(pdn->s5s8_sgw_gtpc_ip, &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
/* copy packet for user level packet copying or li */
if (context) {
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
}
pdn->state = CONNECTED_STATE;
}
void
change_notification_error_response(msg_info *msg, uint8_t cause_value,
uint8_t cause_source, int iface)
{
uint16_t teid = 0;
int ebi_index = 0;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
change_noti_rsp_t change_notification_rsp = {0};
ebi_index = GET_EBI_INDEX(msg->gtpc_msg.change_not_req.lbi.ebi_ebi);
if (ebi_index == -1 && msg->msg_type != GTP_CHANGE_NOTIFICATION_RSP) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
if(get_ue_context(msg->gtpc_msg.change_not_req.header.teid.has_teid.teid, &context)) {
if(get_ue_context_by_sgw_s5s8_teid(msg->gtpc_msg.change_not_rsp.header.teid.has_teid.teid,
&context)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d \n",LOG_VALUE,
msg->gtpc_msg.change_not_rsp.header.teid.has_teid.teid);
}
}
if(context != NULL) {
if(ebi_index != -1) {
pdn = context->eps_bearers[ebi_index]->pdn;
if(context->cp_mode == PGWC) {
teid = pdn->s5s8_sgw_gtpc_teid;
}
}
else {
teid = context->s11_mme_gtpc_teid;
}
}
if(context != NULL && context->cp_mode == PGWC) {
set_gtpv2c_teid_header((gtpv2c_header_t *) &change_notification_rsp, GTP_CHANGE_NOTIFICATION_RSP,
teid, msg->gtpc_msg.change_not_req.header.teid.has_teid.seq, 0);
} else {
set_gtpv2c_teid_header((gtpv2c_header_t *) &change_notification_rsp, GTP_CHANGE_NOTIFICATION_RSP,
teid, msg->gtpc_msg.change_not_req.header.teid.has_teid.seq, 0);
}
change_notification_rsp.imsi.imsi_number_digits = msg->gtpc_msg.change_not_req.imsi.imsi_number_digits;
set_ie_header(&change_notification_rsp.imsi.header, GTP_IE_IMSI, IE_INSTANCE_ZERO,
msg->gtpc_msg.change_not_req.imsi.header.len);
set_cause_error_value(&change_notification_rsp.cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
if (cause_value == GTPV2C_CAUSE_MANDATORY_IE_MISSING ) {
set_ie_header(&change_notification_rsp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie));
change_notification_rsp.cause.offend_ie_type = GTP_IE_RAT_TYPE;
change_notification_rsp.cause.offend_ie_len = 0;
} else {
set_ie_header(&change_notification_rsp.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
}
/*Encode Change Notification Rsp*/
payload_length = encode_change_noti_rsp(&change_notification_rsp, (uint8_t *)gtpv2c_tx);
if(context != NULL) {
if(context->cp_mode == PGWC)
iface = S5S8_IFACE;
else
iface = S11_IFACE;
}
if(iface == S5S8_IFACE) {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length, s5s8_recv_sockaddr, REJ);
}
else if(iface == S11_IFACE) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
}
}
void
send_error_resp(pdn_connection *pdn, uint8_t cause_value)
{
msg_info msg = {0};
if (pdn == NULL || pdn->context == NULL )
return;
msg.msg_type = GTP_CREATE_SESSION_REQ;
msg.gtpc_msg.csr.bearer_count = pdn->num_bearer;
for (uint8_t itr = 0; itr < pdn->num_bearer; ++itr) {
msg.gtpc_msg.csr.bearer_contexts_to_be_created[itr].header.len = 1;
msg.gtpc_msg.csr.bearer_contexts_to_be_created[itr].eps_bearer_id.ebi_ebi =
pdn->default_bearer_id;
}
msg.gtpc_msg.csr.sender_fteid_ctl_plane.teid_gre_key =
pdn->context->s11_mme_gtpc_teid;
msg.gtpc_msg.csr.header.teid.has_teid.seq = pdn->csr_sequence;
msg.gtpc_msg.csr.header.teid.has_teid.teid =
pdn->context->s11_sgw_gtpc_teid;
msg.cp_mode = pdn->context->cp_mode;
cs_error_response(&msg, cause_value, CAUSE_SOURCE_SET_TO_0,
(pdn->context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE));
}
void
release_access_bearer_error_response(msg_info *msg, uint8_t cause_value,
uint8_t cause_source, int iface)
{
int ret = 0;
uint32_t teid = 0;
ue_context *context = NULL;
release_access_bearer_resp_t rel_acc_ber_rsp = {0};
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
ret = get_ue_context(msg->gtpc_msg.rel_acc_ber_req.header.teid.has_teid.teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"Ue context for teid: %d \n", LOG_VALUE, msg->gtpc_msg.rel_acc_ber_req.header.teid.has_teid.teid);
}
else {
teid = context->s11_mme_gtpc_teid;
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
set_gtpv2c_teid_header((gtpv2c_header_t *) &rel_acc_ber_rsp, GTP_RELEASE_ACCESS_BEARERS_RSP,
teid, msg->gtpc_msg.rel_acc_ber_req.header.teid.has_teid.seq, NOT_PIGGYBACKED);
set_cause_error_value(&rel_acc_ber_rsp.cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
payload_length = encode_release_access_bearers_rsp(&rel_acc_ber_rsp, (uint8_t *)gtpv2c_tx);
if(iface == S11_IFACE) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
}
}
void
clean_up_upf_context(pdn_connection *pdn, ue_context *context)
{
upf_context_t *upf_context = NULL;
context_key *key = NULL;
if ((upf_context_entry_lookup(pdn->upf_ip, &upf_context)) == 0) {
if (upf_context->state < PFCP_ASSOC_RESP_RCVD_STATE) {
for (uint8_t i = 0; i < upf_context->csr_cnt; i++) {
key = (context_key *) upf_context->pending_csr_teid[i];
if (key != NULL ) {
if(key->teid == context->s11_sgw_gtpc_teid ) {
rte_free(upf_context->pending_csr_teid[i]);
upf_context->pending_csr_teid[i] = NULL;
break;
}
}
}
if (upf_context->pending_csr_teid[upf_context->csr_cnt - 1] == NULL) {
/* Delete entry from teid info list for given upf*/
delete_entry_from_teid_list(pdn->upf_ip, &upf_teid_info_head);
rte_hash_del_key(upf_context_by_ip_hash, (const void *) &pdn->upf_ip);
if (upf_context != NULL) {
rte_free(upf_context);
upf_context = NULL;
}
}
}
}
return;
}
int
clean_context_hash(ue_context *context, uint32_t teid, uint64_t *imsi_val, bool error_status)
{
int ret = 0;
if (teid == 0) {
ue_context *context = NULL;
ret = rte_hash_lookup_data(ue_context_by_imsi_hash, imsi_val, (void **) & (*context));
if (ret == -ENOENT) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"No data found for %x imsi\n", LOG_VALUE, *imsi_val);
if ( error_status == True )
return -1;
else
return -2;
}
}
rte_hash_del_key(ue_context_by_imsi_hash, (const void *) imsi_val);
rte_hash_del_key(ue_context_by_fteid_hash, (const void *) &teid);
if (context != NULL) {
rte_free(context);
context = NULL;
}
return 0;
}
void
modify_bearer_failure_indication(msg_info *msg, uint8_t cause_value,
uint8_t cause_source, int iface) {
int ret = 0;
err_rsp_info rsp_info = {0};
ue_context *context = NULL;
get_error_rsp_info(msg, &rsp_info, 0);
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
mod_bearer_fail_indctn_t mod_fail_ind = {0};
struct resp_info *resp = NULL;
pdn_connection *pdn_cntxt = NULL;
if (msg->msg_type == GTP_MODIFY_BEARER_FAILURE_IND) {
ret = get_ue_context_by_sgw_s5s8_teid(rsp_info.teid, &context);
} else {
ret = get_ue_context(rsp_info.teid, &context);
}
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, rsp_info.teid);
}
if(context != NULL) {
rsp_info.seq = context->ue_initiated_seq_no;
rsp_info.sender_teid = context->s11_mme_gtpc_teid;
}
if(msg->msg_type == GTP_MODIFY_BEARER_CMD) {
rsp_info.seq = msg->gtpc_msg.bearer_rsrc_cmd.header.teid.has_teid.seq;
}
set_gtpv2c_teid_header(&mod_fail_ind.header,
GTP_MODIFY_BEARER_FAILURE_IND,
rsp_info.sender_teid,
rsp_info.seq, 0);
set_cause_error_value(&mod_fail_ind.cause, IE_INSTANCE_ZERO,
cause_value, cause_source);
if (cause_value == GTPV2C_CAUSE_MANDATORY_IE_MISSING ) {
set_ie_header(&mod_fail_ind.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie));
mod_fail_ind.cause.offend_ie_type = rsp_info.offending;
mod_fail_ind.cause.offend_ie_len = 0;
}
else {
set_ie_header(&mod_fail_ind.cause.header, GTP_IE_CAUSE, IE_INSTANCE_ZERO,
sizeof(struct cause_ie_hdr_t));
}
if (msg->msg_type == GTP_MODIFY_BEARER_FAILURE_IND) {
ret = get_ue_context_by_sgw_s5s8_teid(rsp_info.teid, &context);
} else {
ret = get_ue_context(rsp_info.teid, &context);
}
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, rsp_info.teid);
}
pdn_cntxt = GET_PDN(context, GET_EBI_INDEX(rsp_info.bearer_id[0]));
if (pdn_cntxt != NULL) {
if (get_sess_entry(pdn_cntxt->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn_cntxt->seid);
}
if (resp != NULL) {
reset_resp_info_structure(resp);
}
}
uint16_t msg_len = 0;
msg_len = encode_mod_bearer_fail_indctn(&mod_fail_ind, (uint8_t *)gtpv2c_tx);
gtpv2c_tx->gtpc.message_len = htons(msg_len - IE_HEADER_SIZE);
payload_length = ntohs(gtpv2c_tx->gtpc.message_len) + sizeof(gtpv2c_tx->gtpc);
if (context != NULL) {
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
if(context != NULL) {
if(context->cp_mode == PGWC)
iface = S5S8_IFACE;
else
iface = S11_IFACE;
}
if(iface == S5S8_IFACE) {
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length, s5s8_recv_sockaddr, REJ);
} else if(iface == S11_IFACE) {
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
}
}
int cleanup_ue_and_bearer(uint32_t teid, int ebi_index)
{
ue_context *context = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
upf_context_t *upf_ctx = NULL;
if (get_ue_context_while_error(teid, &context) == 0){
pdn = GET_PDN(context ,ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return -1;
}
if ((upf_context_entry_lookup(pdn->upf_ip, &upf_ctx)) == 0) {
if(upf_ctx->state < PFCP_ASSOC_RESP_RCVD_STATE){
/* Delete entry from teid info list for given upf*/
delete_entry_from_teid_list(pdn->upf_ip, &upf_teid_info_head);
rte_hash_del_key(upf_context_by_ip_hash, (const void *) &pdn->upf_ip);
for (uint8_t i = 0; i < upf_ctx->csr_cnt; i++) {
if(upf_ctx->pending_csr_teid[i] != NULL){
rte_free(upf_ctx->pending_csr_teid[i]);
upf_ctx->pending_csr_teid[i] = NULL;
}
upf_ctx->csr_cnt--;
}
if (upf_ctx != NULL) {
rte_free(upf_ctx);
upf_ctx = NULL;
}
}
}
if (get_sess_entry(pdn->seid, &resp) == 0) {
if(context->piggyback == TRUE) {
delete_dedicated_bearers(pdn,
resp->eps_bearer_ids, resp->bearer_count);
}
rte_hash_del_key(sm_hash, (const void *) &(pdn->seid));
if (resp != NULL) {
rte_free(resp);
resp = NULL;
}
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return -1;
}
for(int8_t idx = 0; idx < MAX_BEARERS; idx++) {
if(context->eps_bearers[idx] == NULL) {
continue;
}else {
if(context->eps_bearers[idx] != NULL){
rte_free(pdn->eps_bearers[idx]);
pdn->eps_bearers[idx] = NULL;
context->eps_bearers[idx] = NULL;
if(pdn->num_bearer != 0) {
pdn->num_bearer--;
}
}
}
}
if(pdn->num_bearer == 0){
if(pdn->s5s8_sgw_gtpc_teid != 0) {
rte_hash_del_key(bearer_by_fteid_hash, (const void *)
&(pdn->s5s8_sgw_gtpc_teid));
}
if(pdn != NULL) {
rte_free(pdn);
pdn = NULL;
context->num_pdns --;
}
}
if (context->num_pdns == 0){
rte_hash_del_key(ue_context_by_imsi_hash,(const void *) &(*context).imsi);
rte_hash_del_key(ue_context_by_fteid_hash,(const void *) &teid);
if(context != NULL )
rte_free(context);
context = NULL;
}
}
return 0;
}
void delete_bearer_request_cleanup(pdn_connection *pdn, ue_context *context, uint8_t lbi) {
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
uint32_t seq_no = generate_seq_number();
int ret = 0;
set_delete_bearer_request(gtpv2c_tx, seq_no,
pdn, lbi, 0, 0, 1);
uint16_t payload_len = 0;
payload_len = ntohs(gtpv2c_tx->gtpc.message_len)
+ sizeof(gtpv2c_tx->gtpc);
if( PGWC == context->cp_mode ) {
ret = set_dest_address(pdn->s5s8_sgw_gtpc_ip, &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_len,
s5s8_recv_sockaddr, REJ);
} else {
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
}
pdn->state = DELETE_BER_REQ_SNT_STATE;
context->mbc_cleanup_status = PRESENT;
}
void send_delete_session_request_after_timer_retry(ue_context *context, int ebi_index)
{
uint8_t encoded_msg[GTP_MSG_LEN] = {0};
int ret = 0;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header_t *gtpv2c_tx = (gtpv2c_header_t *)tx_buf;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
if(context != NULL)
{
pdn = context->eps_bearers[ebi_index]->pdn;
if(pdn != NULL)
{
ret = set_dest_address(pdn->s5s8_pgw_gtpc_ip, &s5s8_recv_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry Found "
"while sending DSR, session ID:%lu\n", LOG_VALUE, pdn->seid);
}
}
if(pdn == NULL || resp == NULL)
{
return;
}
}
/* Indication flags not required in DSR for PGWC */
resp->gtpc_msg.dsr.indctn_flgs.header.len = 0;
encode_del_sess_req((del_sess_req_t *)&(resp->gtpc_msg.dsr), encoded_msg);
gtpv2c_header *header;
header =(gtpv2c_header*) encoded_msg;
gen_sgwc_s5s8_delete_session_request((gtpv2c_header_t *)encoded_msg,
gtpv2c_tx, htonl(pdn->s5s8_pgw_gtpc_teid),
header->teid_u.has_teid.seq,
resp->linked_eps_bearer_id);
/* Update the session state */
resp->state = DS_REQ_SNT_STATE;
update_ue_state(context, DS_REQ_SNT_STATE, ebi_index);
uint16_t payload_length = ntohs(gtpv2c_tx->gtpc.message_len) + sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s5s8_fd, s5s8_fd_v6, tx_buf, payload_length,
s5s8_recv_sockaddr, SENT);
/* copy packet for user level packet copying or li */
if (context->dupl) {
process_pkt_for_li(
context, S5S8_C_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s5s8_recv_sockaddr.type,
config.s5s8_ip.s_addr,
config.s5s8_ip_v6.s6_addr),
fill_ip_info(s5s8_recv_sockaddr.type,
s5s8_recv_sockaddr.ipv4.sin_addr.s_addr,
s5s8_recv_sockaddr.ipv6.sin6_addr.s6_addr),
config.s5s8_port,
((s5s8_recv_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s5s8_recv_sockaddr.ipv4.sin_port) :
ntohs(s5s8_recv_sockaddr.ipv6.sin6_port)));
}
}
void
crt_indir_data_frwd_tun_error_response(msg_info *msg, uint8_t cause_value)
{
ue_context *context = NULL;
node_address_t node_value = {0};
int ret = 0;
err_rsp_info rsp_info = {0};
get_error_rsp_info(msg, &rsp_info, 0);
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header *gtpv2c_tx = (gtpv2c_header *) tx_buf;
create_indir_data_fwdng_tunn_rsp_t crt_resp = {0};
set_gtpv2c_teid_header(&crt_resp.header,
GTP_CREATE_INDIRECT_DATA_FORWARDING_TUNNEL_RSP,
rsp_info.sender_teid,
rsp_info.seq, 0);
set_cause_error_value(&crt_resp.cause, IE_INSTANCE_ZERO, cause_value,
CAUSE_SOURCE_SET_TO_0);
for(uint8_t i = 0; i < rsp_info.bearer_count; i++){
set_ie_header(&crt_resp.bearer_contexts[i].header, GTP_IE_BEARER_CONTEXT,
IE_INSTANCE_ZERO, 0);
set_ebi(&crt_resp.bearer_contexts[i].eps_bearer_id, IE_INSTANCE_ZERO,
rsp_info.bearer_id[i]);
crt_resp.bearer_contexts[i].header.len += sizeof(uint8_t) + IE_HEADER_SIZE;
set_cause_error_value(&crt_resp.bearer_contexts[i].cause, IE_INSTANCE_ZERO, cause_value,
CAUSE_SOURCE_SET_TO_0);
crt_resp.bearer_contexts[i].header.len += sizeof(struct cause_ie_hdr_t) + IE_HEADER_SIZE;
if (context) {
uint8_t ebi = rsp_info.bearer_id[i] - 5;
ret = fill_ip_addr(context->eps_bearers[ebi]->s1u_sgw_gtpu_ip.ipv4_addr,
context->eps_bearers[ebi]->s1u_sgw_gtpu_ip.ipv6_addr,
&node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
crt_resp.bearer_contexts[i].header.len +=
set_gtpc_fteid(&crt_resp.bearer_contexts[i].sgw_fteid_dl_data_fwdng,
GTPV2C_IFTYPE_SGW_GTPU_DL_DATA_FRWD, IE_INSTANCE_THREE, node_value,
rsp_info.teid);
crt_resp.bearer_contexts[i].header.len +=
set_gtpc_fteid(&crt_resp.bearer_contexts[i].sgw_fteid_ul_data_fwdng,
GTPV2C_IFTYPE_SGW_GTPU_DL_DATA_FRWD, IE_INSTANCE_FIVE, node_value,
rsp_info.teid);
}
cleanup_for_indirect_tunnel(&rsp_info);
uint16_t msg_len = 0;
msg_len = encode_create_indir_data_fwdng_tunn_rsp(&crt_resp,(uint8_t *)gtpv2c_tx);
gtpv2c_tx->gtpc.length = htons(msg_len - 4);
payload_length = ntohs(gtpv2c_tx->gtpc.length) + sizeof(gtpv2c_tx->gtpc);
if(context){
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
}
void
delete_indir_data_frwd_error_response(msg_info *msg, uint8_t cause_value)
{
err_rsp_info rsp_info = {0};
get_error_rsp_info(msg, &rsp_info, 0);
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header *gtpv2c_tx = (gtpv2c_header *) tx_buf;
del_indir_data_fwdng_tunn_resp_t dlt_resp = {0};
set_gtpv2c_teid_header(&dlt_resp.header,
GTP_DELETE_INDIRECT_DATA_FORWARDING_TUNNEL_RSP,
rsp_info.teid,
rsp_info.seq, 0);
set_cause_error_value(&dlt_resp.cause, IE_INSTANCE_ZERO, cause_value, CAUSE_SOURCE_SET_TO_0);
uint16_t msg_len = 0;
msg_len = encode_del_indir_data_fwdng_tunn_rsp(&dlt_resp, (uint8_t *)gtpv2c_tx);
gtpv2c_header_t *header = (gtpv2c_header_t *) gtpv2c_tx;
header->gtpc.message_len = htons(msg_len - 4);
payload_length = ntohs(gtpv2c_tx->gtpc.length) + sizeof(gtpv2c_tx->gtpc);
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr,REJ);
}
void
cleanup_for_indirect_tunnel(err_rsp_info *resp)
{
ue_context *context = NULL;
pdn_connection *pdn = NULL;
upf_context_t *upf_context = NULL;
int ret;
ret = rte_hash_lookup_data(ue_context_by_sender_teid_hash,
&resp->sender_teid, (void **) &context);
if( ret < 0){
return;
}
pdn = context->indirect_tunnel->pdn;
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) &(pdn->upf_ip), (void **) &(upf_context));
if (ret >= 0) {
if (upf_context->state < PFCP_ASSOC_RESP_RCVD_STATE) {
rte_hash_del_key(upf_context_by_ip_hash,
(const void *) &pdn->upf_ip);
rte_free(upf_context);
upf_context = NULL;
}
}
for( uint8_t i = 0; i < resp->bearer_count; i++) {
if(pdn->eps_bearers[i] != NULL){
rte_free(pdn->eps_bearers[i]);
pdn->num_bearer -- ;
}
}
rte_free(pdn);
pdn = NULL;
context->num_pdns--;
rte_hash_del_key(ue_context_by_sender_teid_hash, &context->s11_mme_gtpc_teid);
if(context->num_pdns == 0){
rte_hash_del_key(ue_context_by_imsi_hash,(const void *) &context->imsi);
rte_hash_del_key(ue_context_by_fteid_hash,(const void *) &resp->teid);
rte_free(context);
context = NULL;
}
}
void mod_access_bearer_error_response(msg_info *msg, uint8_t cause_value,
uint8_t cause_source)
{
ue_context *context = NULL;
err_rsp_info rsp_info = {0};
int ret = 0;
pdn_connection *pdn_cntxt = NULL;
get_error_rsp_info(msg, &rsp_info, 0);
struct resp_info *resp = NULL;
bzero(&tx_buf, sizeof(tx_buf));
gtpv2c_header *gtpv2c_tx = (gtpv2c_header *) tx_buf;
mod_acc_bearers_rsp_t mod_acc_resp = {0};
set_gtpv2c_teid_header(&mod_acc_resp.header,
GTP_MODIFY_ACCESS_BEARER_RSP,
rsp_info.sender_teid,
rsp_info.seq, 0);
set_cause_error_value(&mod_acc_resp.cause, IE_INSTANCE_ZERO, cause_value,
cause_source);
for (uint8_t uiCnt = 0; uiCnt < rsp_info.bearer_count; ++ uiCnt) {
set_ie_header(&mod_acc_resp.bearer_contexts_modified[uiCnt].header,
GTP_IE_BEARER_CONTEXT, IE_INSTANCE_ZERO, 0);
set_cause_error_value(&mod_acc_resp.bearer_contexts_modified[uiCnt].cause,
IE_INSTANCE_ZERO, cause_value, cause_source);
mod_acc_resp.bearer_contexts_modified[uiCnt].header.len += sizeof(struct cause_ie_hdr_t) +
IE_HEADER_SIZE;
set_ebi(&mod_acc_resp.bearer_contexts_modified[uiCnt].eps_bearer_id, IE_INSTANCE_ZERO,
rsp_info.bearer_id[uiCnt]);
mod_acc_resp.bearer_contexts_modified[uiCnt].header.len += sizeof(uint8_t) + IE_HEADER_SIZE;
node_address_t node_value = {0};
if (get_ue_context(rsp_info.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n",LOG_VALUE, rsp_info.teid);
}
if (context) {
int ebi_index = GET_EBI_INDEX(rsp_info.bearer_id[uiCnt]);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
}
if (ebi_index > 0 && context->eps_bearers[ebi_index] != NULL)
ret = fill_ip_addr(context->eps_bearers[ebi_index]->s1u_enb_gtpu_ip.ipv4_addr,
context->eps_bearers[ebi_index]->s1u_enb_gtpu_ip.ipv6_addr,
&node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
set_gtpc_fteid(&mod_acc_resp.bearer_contexts_modified[uiCnt].s1u_sgw_fteid,
GTPV2C_IFTYPE_S1U_SGW_GTPU, IE_INSTANCE_ZERO, node_value,
rsp_info.teid);
mod_acc_resp.bearer_contexts_modified[uiCnt].header.len += sizeof(struct fteid_ie_hdr_t) +
sizeof(struct in_addr) + IE_HEADER_SIZE;
}
pdn_cntxt = GET_PDN(context, GET_EBI_INDEX(rsp_info.bearer_id[0]));
if (pdn_cntxt != NULL) {
if (get_sess_entry(pdn_cntxt->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session Entry "
"Found for sess ID: %lu\n", LOG_VALUE, pdn_cntxt->seid);
}
if (resp != NULL) {
reset_resp_info_structure(resp);
}
}
uint16_t msg_len = 0;
msg_len = encode_mod_acc_bearers_rsp(&mod_acc_resp, (uint8_t *)gtpv2c_tx);
gtpv2c_tx->gtpc.length = htons(msg_len - 4);
payload_length = ntohs(gtpv2c_tx->gtpc.length) + sizeof(gtpv2c_tx->gtpc);
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
gtpv2c_send(s11_fd, s11_fd_v6, tx_buf, payload_length,
s11_mme_sockaddr, REJ);
if (context != NULL && context->dupl) {
process_pkt_for_li(
context, S11_INTFC_OUT, tx_buf, payload_length,
fill_ip_info(s11_mme_sockaddr.type,
config.s11_ip.s_addr,
config.s11_ip_v6.s6_addr),
fill_ip_info(s11_mme_sockaddr.type,
s11_mme_sockaddr.ipv4.sin_addr.s_addr,
s11_mme_sockaddr.ipv6.sin6_addr.s6_addr),
config.s11_port,
((s11_mme_sockaddr.type == IPTYPE_IPV4_LI) ?
ntohs(s11_mme_sockaddr.ipv4.sin_port) :
ntohs(s11_mme_sockaddr.ipv6.sin6_port)));
}
}
|
nikhilc149/e-utran-features-bug-fixes | dp/gtpu_echo_req.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rte_log.h>
#include <rte_common.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include "ipv4.h"
#include "ipv6.h"
#include "gtpu.h"
#include "util.h"
#include "pfcp_util.h"
#include "gw_adapter.h"
#define IP_PROTO_UDP 17
#define UDP_PORT_GTPU 2152
#define GTPU_OFFSET 50
#define GTPu_VERSION 0x20
#define GTPu_PT_FLAG 0x10
#define GTPu_E_FLAG 0x04
#define GTPu_S_FLAG 0x02
#define GTPu_PN_FLAG 0x01
#define PKT_SIZE 54
extern int clSystemLog;
/**
* @brief : Function to set checksum of IPv4 and UDP header
* @param : echo_pkt rte_mbuf pointer
* @param : IP header type, IPv4 or IPv6
* @return : Returns nothing
*/
static void set_checksum(struct rte_mbuf *echo_pkt, uint8_t ip_type)
{
if (ip_type == IPV6_TYPE) {
struct ipv6_hdr *ipv6hdr = get_mtoip_v6(echo_pkt);
struct udp_hdr *udphdr = get_mtoudp_v6(echo_pkt);
udphdr->dgram_cksum = 0;
udphdr->dgram_cksum = rte_ipv6_udptcp_cksum(ipv6hdr, udphdr);
} else {
struct ipv4_hdr *ipv4hdr = get_mtoip(echo_pkt);
ipv4hdr->hdr_checksum = 0;
struct udp_hdr *udphdr = get_mtoudp(echo_pkt);
udphdr->dgram_cksum = 0;
udphdr->dgram_cksum = rte_ipv4_udptcp_cksum(ipv4hdr, udphdr);
ipv4hdr->hdr_checksum = rte_ipv4_cksum(ipv4hdr);
}
}
/**
* @brief : Encapsulate gtpu header
* @param : m, rte_mbuf pointer
* @param : gtpu_seqnb, sequence number
* @param : type, message type
* @param : IP header type, IPv4 or IPv6
* @return : Returns nothing
*/
static __inline__ void encap_gtpu_hdr(struct rte_mbuf *m, uint16_t gtpu_seqnb,
uint8_t type, uint8_t ip_type)
{
uint32_t teid = 0;
uint16_t len = 0;
gtpuHdr_t *gtpu_hdr = NULL;
/* Insert the headers on pkts */
if (ip_type == IPV6_TYPE) {
len = rte_pktmbuf_data_len(m) - (ETH_HDR_LEN + IPv6_HDR_SIZE + UDP_HDR_LEN);
gtpu_hdr = (gtpuHdr_t*)(rte_pktmbuf_mtod(m, unsigned char *) +
ETH_HDR_LEN + IPv6_HDR_SIZE + UDP_HDR_LEN);
} else {
len = rte_pktmbuf_data_len(m) - (ETH_HDR_LEN + IPV4_HDR_LEN + UDP_HDR_LEN);
gtpu_hdr = (gtpuHdr_t*)(rte_pktmbuf_mtod(m, unsigned char *) +
ETH_HDR_LEN + IPV4_HDR_LEN + UDP_HDR_LEN);
}
len -= GTPU_HDR_LEN;
/* Filling GTP-U header */
gtpu_hdr->version_flags = (GTPU_VERSION << 5) | (GTP_PROTOCOL_TYPE_GTP << 4) | (GTP_FLAG_SEQNB);
gtpu_hdr->msg_type = type;
gtpu_hdr->teid = htonl(teid);
gtpu_hdr->seq_no = htons(gtpu_seqnb);
gtpu_hdr->tot_len = htons(len);
}
/**
* @brief : Create and initialize udp header
* @param : m, rte_mbuf pointer
* @param : entry, peer node information
* @param : IP header type, IPv4 or IPv6
* @return : Returns nothing
*/
static __inline__ void create_udp_hdr(struct rte_mbuf *m, peerData *entry,
uint8_t ip_type)
{
uint16_t len = 0;
struct udp_hdr *udp_hdr = NULL;
/* Get the UDP Header */
if (ip_type == IPV6_TYPE) {
len = rte_pktmbuf_data_len(m)- ETH_HDR_LEN - IPv6_HDR_SIZE;
udp_hdr = (struct udp_hdr*)(rte_pktmbuf_mtod(m, unsigned char *) +
ETH_HDR_LEN + IPv6_HDR_SIZE);
} else {
len = rte_pktmbuf_data_len(m)- ETH_HDR_LEN - IPV4_HDR_LEN;
udp_hdr = (struct udp_hdr*)(rte_pktmbuf_mtod(m, unsigned char *) +
ETH_HDR_LEN + IPV4_HDR_LEN);
}
udp_hdr->src_port = htons(UDP_PORT_GTPU);
udp_hdr->dst_port = htons(UDP_PORT_GTPU);
udp_hdr->dgram_len = htons(len);
udp_hdr->dgram_cksum = 0;
}
/**
* @brief : Create and initialize ipv4 header
* @param : m, rte_mbuf pointer
* @param : entry, peer node information
* @param : IP header type, IPv4 or IPv6
* @return : Returns nothing
*/
static __inline__ void create_ip_hdr(struct rte_mbuf *m, peerData *entry,
uint8_t ip_type)
{
uint16_t len = rte_pktmbuf_data_len(m)- ETH_HDR_LEN;
if (ip_type == IPV6_TYPE) {
struct ipv6_hdr *ipv6_hdr =
(struct ipv6_hdr*)(rte_pktmbuf_mtod(m, unsigned char*) + ETH_HDR_LEN);
/* construct IPv6 header with hardcode values */
ipv6_hdr->vtc_flow = IPv6_VERSION;
ipv6_hdr->payload_len = htons(len - IPv6_HDR_SIZE);
ipv6_hdr->proto = IP_PROTO_UDP;
ipv6_hdr->hop_limits = 0;
memcpy(&ipv6_hdr->src_addr, &entry->srcIP.ipv6_addr,
IPV6_ADDR_LEN);
memcpy(&ipv6_hdr->dst_addr, &entry->dstIP.ipv6_addr,
IPV6_ADDR_LEN);
} else {
struct ipv4_hdr *ipv4_hdr =
(struct ipv4_hdr*)(rte_pktmbuf_mtod(m, unsigned char*) + ETH_HDR_LEN);
ipv4_hdr->version_ihl = 0x45;
ipv4_hdr->type_of_service = 0;
ipv4_hdr->packet_id = 0x1513;
ipv4_hdr->fragment_offset = 0;
ipv4_hdr->time_to_live = 64;
ipv4_hdr->next_proto_id = IP_PROTO_UDP;
ipv4_hdr->total_length = htons(len);
ipv4_hdr->src_addr = entry->srcIP.ipv4_addr;
ipv4_hdr->dst_addr = entry->dstIP.ipv4_addr;
ipv4_hdr->hdr_checksum = 0;
}
}
/**
* @brief : Create and initialize ether header
* @param : m, rte_mbuf pointer
* @param : entry, peer node information
* @param : IP header type, IPv4 or IPv6
* @return : Returns nothing
*/
static __inline__ void create_ether_hdr(struct rte_mbuf *m, peerData *entry,
uint8_t ip_type)
{
struct ether_hdr *eth_hdr = (struct ether_hdr*)rte_pktmbuf_mtod(m, void*);
ether_addr_copy(&entry->dst_eth_addr, ð_hdr->d_addr);
ether_addr_copy(&entry->src_eth_addr, ð_hdr->s_addr);
if (ip_type == IPV6_TYPE) {
eth_hdr->ether_type = htons(ETHER_TYPE_IPv6);
} else {
eth_hdr->ether_type = htons(ETHER_TYPE_IPv4);
}
}
void build_echo_request(struct rte_mbuf *echo_pkt, peerData *entry, uint16_t gtpu_seqnb)
{
if (echo_pkt != NULL) {
echo_pkt->pkt_len = PKT_SIZE;
echo_pkt->data_len = PKT_SIZE;
if (entry->dstIP.ip_type == IPV6_TYPE) {
echo_pkt->pkt_len -= IPV4_HDR_LEN;
echo_pkt->pkt_len += IPv6_HDR_SIZE;
echo_pkt->data_len = echo_pkt->pkt_len;
encap_gtpu_hdr(echo_pkt, gtpu_seqnb, GTPU_ECHO_REQUEST, IPV6_TYPE);
create_udp_hdr(echo_pkt, entry, IPV6_TYPE);
create_ip_hdr(echo_pkt, entry, IPV6_TYPE);
create_ether_hdr(echo_pkt, entry, IPV6_TYPE);
} else {
encap_gtpu_hdr(echo_pkt, gtpu_seqnb, GTPU_ECHO_REQUEST, IPV4_TYPE);
create_udp_hdr(echo_pkt, entry, IPV4_TYPE);
create_ip_hdr(echo_pkt, entry, IPV4_TYPE);
create_ether_hdr(echo_pkt, entry, IPV4_TYPE);
}
/* Set outer IP and UDP checksum, after inner IP and UDP checksum is set.
*/
set_checksum(echo_pkt, entry->dstIP.ip_type);
}
}
void build_endmarker_and_send(struct sess_info_endmark *edmk)
{
static uint16_t seq = 0;
uint16_t len = 0;
peerData entry = {0};
gtpuHdr_t *gtpu_hdr = NULL;
entry.dstIP = edmk->dst_ip;
entry.srcIP = edmk->src_ip;
memcpy(&(entry.src_eth_addr), &(edmk->source_MAC), sizeof(struct ether_addr));
memcpy(&(entry.dst_eth_addr), &(edmk->destination_MAC), sizeof(struct ether_addr));
struct rte_mbuf *endmk_pkt = rte_pktmbuf_alloc(echo_mpool);
endmk_pkt->pkt_len = PKT_SIZE;
endmk_pkt->data_len = PKT_SIZE;
if (entry.dstIP.ip_type == IPV6_TYPE) {
/* Update the packet length */
endmk_pkt->pkt_len -= IPV4_HDR_LEN;
endmk_pkt->pkt_len += IPv6_HDR_SIZE;
endmk_pkt->data_len = endmk_pkt->pkt_len;
len = rte_pktmbuf_data_len(endmk_pkt) - (ETH_HDR_LEN + IPv6_HDR_SIZE + UDP_HDR_LEN);
len -= GTPU_HDR_LEN;
gtpu_hdr = (gtpuHdr_t*)(rte_pktmbuf_mtod(endmk_pkt, unsigned char *) +
ETH_HDR_LEN + IPv6_HDR_SIZE + UDP_HDR_LEN);
} else if (entry.dstIP.ip_type == IPV4_TYPE) {
len = rte_pktmbuf_data_len(endmk_pkt) - (ETH_HDR_LEN + IPV4_HDR_LEN + UDP_HDR_LEN);
len -= GTPU_HDR_LEN;
gtpu_hdr = (gtpuHdr_t*)(rte_pktmbuf_mtod(endmk_pkt, unsigned char *) +
ETH_HDR_LEN + IPV4_HDR_LEN + UDP_HDR_LEN);
}
gtpu_hdr->version_flags = (GTPU_VERSION << 5) | (GTP_PROTOCOL_TYPE_GTP << 4) | (GTP_FLAG_SEQNB);
gtpu_hdr->msg_type = GTPU_END_MARKER_REQUEST;
gtpu_hdr->teid = htonl(edmk->teid);
gtpu_hdr->seq_no = htons(++seq);
gtpu_hdr->tot_len = htons(len);
if (entry.dstIP.ip_type == IPV6_TYPE) {
create_udp_hdr(endmk_pkt, &entry, IPV6_TYPE);
create_ip_hdr(endmk_pkt, &entry, IPV6_TYPE);
create_ether_hdr(endmk_pkt, &entry, IPV6_TYPE);
set_checksum(endmk_pkt, IPV6_TYPE);
} else if (entry.dstIP.ip_type == IPV4_TYPE) {
create_udp_hdr(endmk_pkt, &entry, IPV4_TYPE);
create_ip_hdr(endmk_pkt, &entry, IPV4_TYPE);
create_ether_hdr(endmk_pkt, &entry, IPV4_TYPE);
set_checksum(endmk_pkt, IPV4_TYPE);
}
if (rte_ring_enqueue(shared_ring[S1U_PORT_ID], endmk_pkt) == -ENOBUFS) {
rte_pktmbuf_free(endmk_pkt);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Can't Queue Endmarker "
"PKT because shared ring full so Dropping PKT\n", LOG_VALUE);
return;
}
(edmk->dst_ip.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"END_MAKER: Send the End Marker pkts to ipv6_addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(edmk->dst_ip.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"END_MAKER: Send the End Marker pkts to ipv4_addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(edmk->dst_ip.ipv4_addr));
}
|
nikhilc149/e-utran-features-bug-fixes | cp/li_config.c | /*
* Copyright (c) 2017 Intel Corporation
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "li_config.h"
#include "gw_adapter.h"
#include "pfcp_session.h"
extern int clSystemLog;
uint8_t
del_li_entry(uint64_t *uiId, uint16_t uiCntr)
{
int ret = 0;
uint64_t imsi = 0;
imsi_id_hash_t *imsi_id_config = NULL;
struct li_df_config_t *li_df_config = NULL;
for (uint16_t uiCnt = 0; uiCnt < uiCntr; uiCnt++) {
li_df_config = NULL;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Delete ue entry from hash"
" :%lu\n", LOG_VALUE, uiId[uiCnt]);
ret = rte_hash_lookup_data(li_info_by_id_hash, &uiId[uiCnt], (void **)&li_df_config);
if ((ret >= 0) && (NULL != li_df_config)) {
imsi = li_df_config->uiImsi;
ret = rte_hash_del_key(li_info_by_id_hash, &uiId[uiCnt]);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found for LI Id %lu\n",
LOG_VALUE, uiId[uiCnt]);
return -1;
}
/* Free data from hash */
rte_free(li_df_config);
li_df_config = NULL;
imsi_id_config = NULL;
/* get user level packet copying token or id using imsi */
ret = get_id_using_imsi(imsi, &imsi_id_config);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Not applicable for li\n",
LOG_VALUE);
return -1;
}
if ((NULL != imsi_id_config) && (imsi_id_config->cntr > 0)) {
int i = 0;
for (int8_t cnt = 0; cnt < imsi_id_config->cntr; cnt++) {
if (imsi_id_config->ids[cnt] == uiId[uiCnt]) {
continue;
}
imsi_id_config->ids[i] = imsi_id_config->ids[cnt];
i++;
}
imsi_id_config->cntr--;
}
if ((NULL != imsi_id_config) && (imsi_id_config->cntr == NOT_PRESENT)) {
ret = rte_hash_del_key(li_id_by_imsi_hash, &imsi);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found for LI Id %lu\n",
LOG_VALUE, imsi);
return -1;
}
/* Free data from hash */
rte_free(imsi_id_config);
imsi_id_config = NULL;
}
/* Send pfcp session modification request to user plane */
send_pfcp_sess_mod_req_for_li(imsi);
}
}
return 0;
}
int8_t
fillup_li_df_hash(struct li_df_config_t *li_df_config_data, uint16_t uiCntr) {
int ret = 0;
for (uint16_t uiCnt = 0; uiCnt < uiCntr; uiCnt++) {
struct li_df_config_t *li_df_config = NULL;
ret = rte_hash_lookup_data(li_info_by_id_hash, &li_df_config_data[uiCnt].uiId,
(void **)&li_df_config);
if (ret < 0) {
li_df_config = rte_zmalloc_socket(NULL, sizeof(struct li_df_config_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (NULL == li_df_config) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure to "
"allocate PDN structure: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return -1;
}
li_df_config->uiId = li_df_config_data[uiCnt].uiId;
li_df_config->uiImsi = li_df_config_data[uiCnt].uiImsi;
li_df_config->uiS11 = li_df_config_data[uiCnt].uiS11;
li_df_config->uiSgwS5s8C = li_df_config_data[uiCnt].uiSgwS5s8C;
li_df_config->uiPgwS5s8C = li_df_config_data[uiCnt].uiPgwS5s8C;
li_df_config->uiSxa = li_df_config_data[uiCnt].uiSxa;
li_df_config->uiSxb = li_df_config_data[uiCnt].uiSxb;
li_df_config->uiSxaSxb = li_df_config_data[uiCnt].uiSxaSxb;
li_df_config->uiS1uContent = li_df_config_data[uiCnt].uiS1uContent;
li_df_config->uiSgwS5s8UContent =
li_df_config_data[uiCnt].uiSgwS5s8UContent;
li_df_config->uiPgwS5s8UContent =
li_df_config_data[uiCnt].uiPgwS5s8UContent;
li_df_config->uiSgiContent = li_df_config_data[uiCnt].uiSgiContent;
li_df_config->uiS1u = li_df_config_data[uiCnt].uiS1u;
li_df_config->uiSgwS5s8U = li_df_config_data[uiCnt].uiSgwS5s8U;
li_df_config->uiPgwS5s8U = li_df_config_data[uiCnt].uiPgwS5s8U;
li_df_config->uiSgi = li_df_config_data[uiCnt].uiSgi;
li_df_config->uiForward = li_df_config_data[uiCnt].uiForward;
ret = rte_hash_add_key_data(li_info_by_id_hash, (const void *) &li_df_config->uiId,
(void *) li_df_config);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"- Error on li_info_by_id_hash"
" add\n", LOG_VALUE, strerror(ret));
rte_hash_del_key(li_info_by_id_hash, (const void *) &li_df_config->uiId);
if (ret < 0) {
rte_panic("%s - Error on li_info_by_id_hash del\n", strerror(ret));
}
rte_free(li_df_config);
return -1;
}
ret = add_id_in_imsi_hash(li_df_config->uiId, li_df_config->uiImsi);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"add id in imsi hash failed"
" with return value (%d).", LOG_VALUE, ret);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"send pfcp modification request if ue is already attach."
" Action (%u)\n", LOG_VALUE, ADD_LI_ENTRY);
} else {
if (li_df_config_data[uiCnt].uiS11 != 0) {
li_df_config->uiS11 = li_df_config_data[uiCnt].uiS11;
}
if (li_df_config_data[uiCnt].uiSgwS5s8C != 0) {
li_df_config->uiSgwS5s8C = li_df_config_data[uiCnt].uiSgwS5s8C;
}
if (li_df_config_data[uiCnt].uiPgwS5s8C != 0) {
li_df_config->uiPgwS5s8C = li_df_config_data[uiCnt].uiPgwS5s8C;
}
if (li_df_config_data[uiCnt].uiSxa != 0) {
li_df_config->uiSxa = li_df_config_data[uiCnt].uiSxa;
} else {
li_df_config->uiSxa = NOT_PRESENT;
}
if (li_df_config_data[uiCnt].uiSxb != 0) {
li_df_config->uiSxb = li_df_config_data[uiCnt].uiSxb;
} else {
li_df_config->uiSxb = NOT_PRESENT;
}
if (li_df_config_data[uiCnt].uiSxaSxb != 0) {
li_df_config->uiSxaSxb = li_df_config_data[uiCnt].uiSxaSxb;
} else {
li_df_config->uiSxaSxb = NOT_PRESENT;
}
if (li_df_config_data[uiCnt].uiS1uContent != 0) {
li_df_config->uiS1uContent =
li_df_config_data[uiCnt].uiS1uContent;
}
if (li_df_config_data[uiCnt].uiSgwS5s8UContent != 0) {
li_df_config->uiSgwS5s8UContent =
li_df_config_data[uiCnt].uiSgwS5s8UContent;
}
if (li_df_config_data[uiCnt].uiPgwS5s8UContent != 0) {
li_df_config->uiPgwS5s8UContent =
li_df_config_data[uiCnt].uiPgwS5s8UContent;
}
if (li_df_config_data[uiCnt].uiSgiContent != 0) {
li_df_config->uiSgiContent =
li_df_config_data[uiCnt].uiSgiContent;
}
if (li_df_config_data[uiCnt].uiS1u != 0) {
li_df_config->uiS1u = li_df_config_data[uiCnt].uiS1u;
} else {
li_df_config->uiS1u = NOT_PRESENT;
}
if (li_df_config_data[uiCnt].uiSgwS5s8U != 0) {
li_df_config->uiSgwS5s8U = li_df_config_data[uiCnt].uiSgwS5s8U;
} else {
li_df_config->uiSgwS5s8U = NOT_PRESENT;
}
if (li_df_config_data[uiCnt].uiPgwS5s8U != 0) {
li_df_config->uiPgwS5s8U = li_df_config_data[uiCnt].uiPgwS5s8U;
} else {
li_df_config->uiPgwS5s8U = NOT_PRESENT;
}
if (li_df_config_data[uiCnt].uiSgi != 0) {
li_df_config->uiSgi = li_df_config_data[uiCnt].uiSgi;
} else {
li_df_config->uiSgi = NOT_PRESENT;
}
if (li_df_config_data[uiCnt].uiForward != 0) {
li_df_config->uiForward = li_df_config_data[uiCnt].uiForward;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"send pfcp modification request if ue is already attach."
" Action (%u)\n", LOG_VALUE, UPDATE_LI_ENTRY);
}
send_pfcp_sess_mod_req_for_li(li_df_config->uiImsi);
}
return 0;
}
int
get_li_config(uint64_t uiId, struct li_df_config_t **li_config)
{
int ret = 0;
ret = rte_hash_lookup_data(li_info_by_id_hash, &uiId, (void **)li_config);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":Entry not found for "
"Id : %lu.\n", LOG_VALUE, uiId);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT": LI Id : %lu\n", LOG_VALUE, uiId);
return 0;
}
int8_t
add_id_in_imsi_hash(uint64_t uiId, uint64_t uiImsi) {
int ret = 0;
imsi_id_hash_t *imsi_id_hash = NULL;
ret = rte_hash_lookup_data(li_id_by_imsi_hash, &uiImsi, (void **)&imsi_id_hash);
if (ret < 0) {
imsi_id_hash = rte_zmalloc_socket(NULL, sizeof(imsi_id_hash_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (NULL == imsi_id_hash) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT
"Failure to allocate id_by_imsi structure: %s \n",
LOG_VALUE, rte_strerror(rte_errno));
return -1;
}
/* initialize structure contents */
imsi_id_hash->cntr = 0;
for (uint8_t i = 0; i < MAX_LI_ENTRIES_PER_UE; i++) {
imsi_id_hash->ids[i] = 0;
}
imsi_id_hash->ids[imsi_id_hash->cntr++] = uiId;
ret = rte_hash_add_key_data(li_id_by_imsi_hash, (const void *) &uiImsi,
(void *) imsi_id_hash);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" - Error on"
" li_id_by_imsi_hash add\n",LOG_VALUE, strerror(ret));
rte_hash_del_key(li_id_by_imsi_hash, (const void *) &uiImsi);
if (ret < 0) {
rte_panic("%s - Error on li_id_by_imsi_hash del\n", strerror(ret));
}
rte_free(imsi_id_hash);
return -1;
}
} else {
imsi_id_hash->ids[imsi_id_hash->cntr++] = uiId;
}
return 0;
}
int
get_id_using_imsi(uint64_t uiImsi, imsi_id_hash_t **imsi_id_hash)
{
int ret = 0;
ret = rte_hash_lookup_data(li_id_by_imsi_hash, &uiImsi, (void **)imsi_id_hash);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":Entry not found for imsi :"
"%lu\n", LOG_VALUE, uiImsi);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT": imsi :%lu\n", LOG_VALUE, uiImsi);
return 0;
}
int
fill_li_config_in_context(ue_context *context, imsi_id_hash_t *imsi_id_hash) {
int ret = 0;
context->li_data_cntr = 0;
memset(context->li_data, 0, MAX_LI_ENTRIES_PER_UE * sizeof(li_data_t));
for (uint8_t i = 0; i < imsi_id_hash->cntr; i++) {
struct li_df_config_t *li_config = NULL;
ret = get_li_config(imsi_id_hash->ids[i], &li_config);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Entry not found "
"for li identifier %lu", LOG_VALUE, imsi_id_hash->ids[i]);
continue;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"fillup LI configurations in"
" context. IMSI (%lu)", LOG_VALUE, context->imsi);
context->li_data[context->li_data_cntr].id = imsi_id_hash->ids[i];
if (COPY_SIG_MSG_ON == li_config->uiS11) {
context->li_data[context->li_data_cntr].s11 = PRESENT;
}
if (COPY_SIG_MSG_ON == li_config->uiSgwS5s8C) {
context->li_data[context->li_data_cntr].sgw_s5s8c = PRESENT;
}
if (COPY_SIG_MSG_ON == li_config->uiPgwS5s8C) {
context->li_data[context->li_data_cntr].pgw_s5s8c = PRESENT;
}
if ((SX_COPY_CP_MSG == li_config->uiSxa) ||
(SX_COPY_CP_DP_MSG == li_config->uiSxa)) {
context->li_data[context->li_data_cntr].sxa = PRESENT;
}
if ((SX_COPY_CP_MSG == li_config->uiSxb) ||
(SX_COPY_CP_DP_MSG == li_config->uiSxb)) {
context->li_data[context->li_data_cntr].sxb = PRESENT;
}
if ((SX_COPY_CP_MSG == li_config->uiSxaSxb) ||
(SX_COPY_CP_DP_MSG == li_config->uiSxaSxb)) {
context->li_data[context->li_data_cntr].sxa_sxb = PRESENT;
}
context->li_data[context->li_data_cntr].forward = li_config->uiForward;
context->dupl = PRESENT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"LI configurations : IMSI (%lu) s11(%u) sgw-s5s8c(%u)"
"pgw_s5s8c (%u) sxa(%u) sxb(%u) sxa_sxb(%u) forward(%u) dupl(%u)",
LOG_VALUE, context->imsi, context->li_data[context->li_data_cntr].s11,
context->li_data[context->li_data_cntr].sgw_s5s8c,
context->li_data[context->li_data_cntr].pgw_s5s8c,
context->li_data[context->li_data_cntr].sxa,
context->li_data[context->li_data_cntr].sxb,
context->li_data[context->li_data_cntr].sxa_sxb,
context->li_data[context->li_data_cntr].forward, context->dupl);
context->li_data_cntr++;
}
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | dp/pipeline/epc_packet_framework.c | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string.h>
#include <sched.h>
#include <unistd.h>
#include <rte_ring.h>
#include <rte_pipeline.h>
#include <rte_lcore.h>
#include <rte_ethdev.h>
#include <rte_port_ring.h>
#include <rte_port_ethdev.h>
#include <rte_table_hash.h>
#include <rte_table_stub.h>
#include <rte_byteorder.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_jhash.h>
#include <rte_cycles.h>
#include <rte_port_ring.h>
#include <rte_lcore.h>
#include <rte_cycles.h>
#include <rte_timer.h>
#include <rte_debug.h>
#include <cmdline_rdline.h>
#include <cmdline_parse.h>
#include <cmdline_socket.h>
#include <cmdline.h>
#include "stats.h"
#include "up_main.h"
#include "commands.h"
#include "interface.h"
#include "dp_ipc_api.h"
#include "epc_packet_framework.h"
#include "gw_adapter.h"
struct rte_ring *epc_mct_spns_dns_rx;
struct rte_ring *li_dl_ring;
struct rte_ring *li_ul_ring;
struct rte_ring *cdr_pfcp_rpt_req;
extern int clSystemLog;
/**
* @brief : Maintains epc parameters
*/
struct epc_app_params epc_app = {
/* Ports */
.n_ports = NUM_SPGW_PORTS,
/* Rings */
.ring_rx_size = EPC_DEFAULT_RING_SZ,
.ring_tx_size = EPC_DEFAULT_RING_SZ,
/* Burst sizes */
.burst_size_rx_read = EPC_DEFAULT_BURST_SZ,
.burst_size_rx_write = EPC_BURST_SZ_64,
.burst_size_worker_read = EPC_DEFAULT_BURST_SZ,
.burst_size_worker_write = EPC_BURST_SZ_64,
.burst_size_tx_read = EPC_DEFAULT_BURST_SZ,
.burst_size_tx_write = EPC_BURST_SZ_64,
.core_mct = -1,
.core_iface = -1,
.core_stats = -1,
.core_spns_dns = -1,
.core_ul[S1U_PORT_ID] = -1,
.core_dl[SGI_PORT_ID] = -1,
#ifdef STATS
.ul_params[S1U_PORT_ID].pkts_in = 0,
.ul_params[S1U_PORT_ID].pkts_out = 0,
.dl_params[SGI_PORT_ID].pkts_in = 0,
.dl_params[SGI_PORT_ID].pkts_out = 0,
.dl_params[SGI_PORT_ID].ddn = 0,
.dl_params[SGI_PORT_ID].ddn_buf_pkts = 0,
#endif
};
/**
* @brief : Creats ZMQ read thread , Polls message queue
* Populates hash table from que
* @param : arg, unused parameter
* @return : Returns nothing
*/
static void epc_iface_core(__rte_unused void *args)
{
#ifdef SIMU_CP
static int simu_call;
if (simu_call == 0) {
simu_cp();
simu_call = 1;
}
#else
uint32_t lcore;
lcore = rte_lcore_id();
clLog(clSystemLog, eCLSeverityMajor,
LOG_FORMAT"RTE NOTICE enabled on lcore %d\n", LOG_VALUE, lcore);
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"RTE INFO enabled on lcore %d\n", LOG_VALUE, lcore);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"RTE DEBUG enabled on lcore %d\n", LOG_VALUE, lcore);
/*
* Poll message que. Populate hash table from que.
*/
while (1) {
process_dp_msgs();
#ifdef NGCORE_SHRINK
scan_dns_ring();
#endif
}
#endif
}
/**
* @brief : Initialize epc core
* @param : No param
* @return : Returns nothing
*/
static void epc_init_lcores(void)
{
epc_alloc_lcore(epc_arp, NULL, epc_app.core_mct);
epc_alloc_lcore(epc_iface_core, NULL, epc_app.core_iface);
epc_alloc_lcore(epc_ul, &epc_app.ul_params[S1U_PORT_ID],
epc_app.core_ul[S1U_PORT_ID]);
epc_alloc_lcore(epc_dl, &epc_app.dl_params[SGI_PORT_ID],
epc_app.core_dl[SGI_PORT_ID]);
}
#define for_each_port(port) for (port = 0; port < epc_app.n_ports; port++)
#define for_each_core(core) for (core = 0; core < DP_MAX_LCORE; core++)
/**
* @brief : Initialize rings common to all pipelines
* @param : No param
* @return : Returns nothing
*/
static void epc_init_rings(void)
{
uint32_t port;
/* Ring for Process Slow Path packets like ICMP, ARP, GTPU-ECHO */
/* create communication rings between RX-core and mct core */
for_each_port(port) {
char name[32];
snprintf(name, sizeof(name), "rx_to_mct_%u", port);
epc_app.epc_mct_rx[port] = rte_ring_create(name,
epc_app.ring_rx_size,
rte_socket_id(),
RING_F_SP_ENQ |
RING_F_SC_DEQ);
if (epc_app.epc_mct_rx[port] == NULL)
rte_exit(EXIT_FAILURE, LOG_FORMAT"Cannot create RX ring %u\n", LOG_VALUE, port);
snprintf(name, sizeof(name), "tx_from_mct_%u", port);
}
/* Creating UL and DL rings for LI*/
li_dl_ring = rte_ring_create("LI_DL_RING",
DL_PKTS_RING_SIZE,
rte_socket_id(),
RING_F_SP_ENQ | RING_F_SC_DEQ);
if (li_dl_ring == NULL)
rte_panic("Cannot create LI DL ring \n");
li_ul_ring = rte_ring_create("LI_UL_RING",
UL_PKTS_RING_SIZE,
rte_socket_id(),
RING_F_SP_ENQ | RING_F_SC_DEQ);
if (li_ul_ring == NULL)
rte_panic("Cannot create LI UL ring \n");
/* Creating rings for CDR Report Request*/
cdr_pfcp_rpt_req = rte_ring_create("CDR_RPT_REQ_RING",
DL_PKTS_RING_SIZE,
rte_socket_id(),
RING_F_SP_ENQ
|
RING_F_SC_DEQ);
if (cdr_pfcp_rpt_req == NULL)
rte_panic("Cannot create DR_RPT_REQ_RING \n");
}
/**
* @brief : Launch epc pipeline
* @param : No param
* @return : Returns nothing
*/
static inline void epc_run_pipeline(void)
{
struct epc_lcore_config *config;
int i;
unsigned lcore;
lcore = rte_lcore_id();
config = &epc_app.lcores[lcore];
#ifdef INSTMNT
uint64_t start_tsc, end_tsc;
if (lcore == epc_app.worker_cores[0]) {
for (i = 0; i < config->allocated; i++) {
start_tsc = rte_rdtsc();
config->launch[i].func(config->launch[i].arg);
if (flag_wrkr_update_diff) {
end_tsc = rte_rdtsc();
diff_tsc_wrkr += end_tsc - start_tsc;
flag_wrkr_update_diff = 0;
}
}
} else
#endif
for (i = 0; i < config->allocated; i++) {
config->launch[i].func(config->launch[i].arg);
}
}
/**
* @brief : Start epc core
* @param : arg, unused parameter
* @return : Returns 0 in case of success
*/
static int epc_lcore_main_loop(__attribute__ ((unused))
void *arg)
{
struct epc_lcore_config *config;
uint32_t lcore;
lcore = rte_lcore_id();
config = &epc_app.lcores[lcore];
if (config->allocated == 0)
return 0;
clLog(clSystemLog, eCLSeverityMajor,
LOG_FORMAT"RTE NOTICE enabled on lcore %d\n", LOG_VALUE, lcore);
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"RTE INFO enabled on lcore %d\n", LOG_VALUE, lcore);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"RTE DEBUG enabled on lcore %d\n", LOG_VALUE, lcore);
while (1)
epc_run_pipeline();
return 0;
}
void epc_init_packet_framework(uint8_t east_port_id, uint8_t west_port_id)
{
if (epc_app.n_ports > NUM_SPGW_PORTS) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Number of ports exceeds a configured number %u\n",
LOG_VALUE, epc_app.n_ports);
exit(1);
}
epc_app.ports[WEST_PORT_ID] = west_port_id;
epc_app.ports[EAST_PORT_ID] = east_port_id;
printf("ARP-ICMP Core on:\t\t%d\n", epc_app.core_mct);
printf("CP-DP IFACE Core on:\t\t%d\n", epc_app.core_iface);
#ifdef NGCORE_SHRINK
epc_app.core_spns_dns = epc_app.core_iface;
#endif
printf("SPNS DNS Core on:\t\t%d\n", epc_app.core_spns_dns);
#ifdef STATS
#ifdef NGCORE_SHRINK
epc_app.core_stats = epc_app.core_mct;
#endif
printf("STATS-Timer Core on:\t\t%d\n", epc_app.core_stats);
#endif
/*
* Initialize rings
*/
epc_init_rings();
/*
* Initialize arp & spns_dns cores
*/
epc_arp_init();
epc_spns_dns_init();
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Uplink Core on:\t\t\t%d\n", LOG_VALUE, epc_app.core_ul[WEST_PORT_ID]);
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"VS- ng-core_shrink:\n\t"
"epc_ul_init::epc_app.core_ul[WEST_PORT_ID]= %d\n\t"
"WEST_PORT_ID= %d; EAST_PORT_ID= %d\n",
LOG_VALUE, epc_app.core_ul[WEST_PORT_ID],
WEST_PORT_ID, EAST_PORT_ID);
epc_ul_init(&epc_app.ul_params[WEST_PORT_ID],
epc_app.core_ul[WEST_PORT_ID],
WEST_PORT_ID, EAST_PORT_ID);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Downlink Core on:\t\t%d\n", LOG_VALUE, epc_app.core_dl[EAST_PORT_ID]);
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"VS- ng-core_shrink:\n\t"
"epc_dl_init::epc_app.core_dl[EAST_PORT_ID]= %d\n\t"
"EAST_PORT_ID= %d; WEST_PORT_ID= %d\n",
LOG_VALUE, epc_app.core_dl[EAST_PORT_ID],
EAST_PORT_ID, WEST_PORT_ID);
epc_dl_init(&epc_app.dl_params[EAST_PORT_ID],
epc_app.core_dl[EAST_PORT_ID],
EAST_PORT_ID, WEST_PORT_ID);
/*
* Assign pipelines to cores
*/
epc_init_lcores();
/* Init IPC msgs */
iface_init_ipc_node();
}
void packet_framework_launch(void)
{
if (rte_eal_mp_remote_launch(epc_lcore_main_loop, NULL, CALL_MASTER) < 0)
rte_exit(EXIT_FAILURE, LOG_FORMAT"MP remote lauch fail !!!\n", LOG_VALUE);
}
void epc_alloc_lcore(pipeline_func_t func, void *arg, int core)
{
struct epc_lcore_config *lcore;
if (core >= DP_MAX_LCORE)
rte_exit(EXIT_FAILURE, LOG_FORMAT" Core %d exceed Max core %d\n", LOG_VALUE, core,
DP_MAX_LCORE);
lcore = &epc_app.lcores[core];
lcore->launch[lcore->allocated].func = func;
lcore->launch[lcore->allocated].arg = arg;
lcore->allocated++;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/redis_client.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits.h>
#include "string.h"
#include "hiredis.h"
#include "hiredis_ssl.h"
#include "gw_adapter.h"
#define REDIS_CONN_TIMEOUT 3
#define IP_STR_LEN 16
typedef enum redis_conn_type_t {
REDIS_TCP,
REDIS_TLS
} redis_conn_type_t;
/**
* @brief : Maintains redis configuration data
*/
typedef struct redis_config_t {
redis_conn_type_t type;
char cp_ip[IP_STR_LEN];
union conf {
struct tcp {
char host[IP_STR_LEN];
int port;
struct timeval timeout;
} tcp;
struct tls {
char host[IP_STR_LEN];
int port;
char ca_cert_path[PATH_MAX];
char cert_path[PATH_MAX];
char key_path[PATH_MAX];
struct timeval timeout;
} tls;
} conf;
} redis_config_t;
/**
* @brief : Api to connect to redis server
* @param : cfg, configuration data
* @return : Returns pointer to redis context in case of successs, NULL otherwise
*/
redisContext* redis_connect(redis_config_t *cfg);
/**
* @brief : Function to store generated cdr to redis server
* @param : ctx, redis context pointer
* @param : cp_ip, control plane ip , used as a key
* @param : cdr, generated cdr data
* @return : Returns 0 in case of success, -1 otherwise
*/
int redis_save_cdr(redisContext* ctx, char *cp_ip, char* cdr);
/**
* @brief : Api to disconnect from redis server
* @param : ctx, redis context pointer
* @return : Returns 0 in case of success, -1 otherwise
*/
int redis_disconnect(redisContext* ctx);
extern redisContext *ctx;
extern redisSSLContext *ssl;
|
nikhilc149/e-utran-features-bug-fixes | cp/gtpv2c_messages/create_s5s8_session.c | <filename>cp/gtpv2c_messages/create_s5s8_session.c
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <byteswap.h>
#include "packet_filters.h"
#include "gtpv2c_set_ie.h"
#include "../cp_dp_api/vepc_cp_dp_api.h"
#include "pfcp.h"
#include "gtpv2c.h"
#include "pfcp_enum.h"
#include "pfcp_util.h"
#include "sm_struct.h"
#include "../cp_stats.h"
#include "pfcp_set_ie.h"
#include "pfcp_session.h"
#include "pfcp_messages.h"
#include "pfcp_messages_encoder.h"
#include "cp_config.h"
#include "seid_llist.h"
#include "gtpc_session.h"
#ifdef CP_BUILD
#include "cp_timer.h"
#endif /* CP_BUILD */
#ifdef USE_REST
#include "main.h"
#endif /* USE_REST */
extern pfcp_config_t config;
extern int clSystemLog;
extern int pfcp_fd;
extern int pfcp_fd_v6;
extern peer_addr_t upf_pfcp_sockaddr;
extern peer_addr_t s5s8_recv_sockaddr;
extern struct cp_stats_t cp_stats;
int
process_sgwc_s5s8_modify_bearer_response(mod_bearer_rsp_t *mb_rsp, gtpv2c_header_t *gtpv2c_s11_tx,
ue_context **_context)
{
int ret = 0;
int ebi_index = 0;
uint32_t seq = 0;
struct resp_info *resp = NULL;
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
ue_context *context = NULL;
pdn_connection *pdn = NULL;
eps_bearer *bearer = NULL;
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(mb_rsp->bearer_contexts_modified[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
/* s11_sgw_gtpc_teid= s5s8_sgw_gtpc_teid =
* key->ue_context_by_fteid_hash */
ret = get_ue_context_by_sgw_s5s8_teid(mb_rsp->header.teid.has_teid.teid, &context);
if (ret < 0 || !context) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"Ue context for teid: %d\n",
LOG_VALUE, mb_rsp->header.teid.has_teid.teid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
bearer = context->eps_bearers[ebi_index];
pdn = bearer->pdn;
*_context = context;
/* Retrive the session information based on session id. */
if (get_sess_entry(pdn->seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
#ifdef USE_CSID
fqcsid_t *tmp = NULL;
/* PGW FQ-CSID */
if (mb_rsp->pgw_fqcsid.header.len) {
/* Remove Exsiting PGW CSID linked with session */
if (pdn->pgw_csid.num_csid) {
memset(&pdn->pgw_csid, 0, sizeof(fqcsid_t));
}
ret = add_peer_addr_entry_for_fqcsid_ie_node_addr(
&pdn->s5s8_pgw_gtpc_ip, &mb_rsp->pgw_fqcsid,
S5S8_SGWC_PORT_ID);
if (ret)
return ret;
/* Stored the PGW CSID by PGW Node address */
ret = add_fqcsid_entry(&mb_rsp->pgw_fqcsid, context->pgw_fqcsid);
if(ret)
return ret;
fill_pdn_fqcsid_info(&pdn->pgw_csid, context->pgw_fqcsid);
} else {
tmp = get_peer_addr_csids_entry(&(pdn->s5s8_pgw_gtpc_ip),
ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: Failed to "
"add PGW CSID by PGW Node addres %s \n", LOG_VALUE,
strerror(errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
memcpy(&(tmp->node_addr),
&(pdn->s5s8_pgw_gtpc_ip), sizeof(node_address_t));
memcpy(&((context->pgw_fqcsid)->node_addr[(context->pgw_fqcsid)->num_csid]),
&(pdn->s5s8_pgw_gtpc_ip), sizeof(node_address_t));
}
/* Link local CSID with PGW CSID */
if (pdn->pgw_csid.num_csid) {
if (link_gtpc_peer_csids(&pdn->pgw_csid,
&pdn->sgw_csid, S5S8_SGWC_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Link "
"Local CSID entry to link with PGW FQCSID, Error : %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
if (link_sess_with_peer_csid(&pdn->pgw_csid, pdn, S5S8_SGWC_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error : Failed to Link "
"Session with Peer CSID\n", LOG_VALUE);
return -1;
}
/* Send pfcp mod req to SGWU for pgwc csid */
seq = get_pfcp_sequence_number(PFCP_SESSION_MODIFICATION_REQUEST, seq);
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_mod_req.header),
PFCP_SESSION_MODIFICATION_REQUEST, HAS_SEID, seq, context->cp_mode);
pfcp_sess_mod_req.header.seid_seqno.has_seid.seid = pdn->dp_seid;
node_address_t node_value = {0};
/*Filling Node ID for F-SEID*/
if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(config.pfcp_ip.s_addr, temp, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV6) {
ret = fill_ip_addr(0, config.pfcp_ip_v6.s6_addr, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
set_fseid(&(pfcp_sess_mod_req.cp_fseid), pdn->seid, node_value);
/* Set PGW FQ-CSID */
set_fq_csid_t(&pfcp_sess_mod_req.pgw_c_fqcsid, &pdn->pgw_csid);
uint8_t pfcp_msg[PFCP_MSG_LEN] = {0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
pfcp_header_t *header = (pfcp_header_t *)pfcp_msg;
header->message_len = htons(encoded - PFCP_IE_HDR_SIZE);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr,SENT) < 0)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Send "
"PFCP Session Modification to SGW-U",LOG_VALUE);
else
{
#ifdef CP_BUILD
add_pfcp_if_timer_entry(context->s11_sgw_gtpc_teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
#endif /* CP_BUILD */
}
/* Update UE State */
pdn->state = PFCP_SESS_MOD_REQ_SNT_STATE;
/* Set create session response */
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
resp->linked_eps_bearer_id = mb_rsp->bearer_contexts_modified[0].eps_bearer_id.ebi_ebi;
resp->msg_type = GTP_CREATE_SESSION_RSP;
resp->state = PFCP_SESS_MOD_REQ_SNT_STATE;
/* Need to think about proc in this perticuler scenario */
return 0;
}
#endif /* USE_CSID */
if (resp->msg_type == GTP_MODIFY_BEARER_REQ) {
/* Fill the modify bearer response */
set_modify_bearer_response(
gtpv2c_s11_tx, mb_rsp->header.teid.has_teid.seq,
context, bearer, &resp->gtpc_msg.mbr);
resp->state = CONNECTED_STATE;
/* Update the UE state */
pdn->state = CONNECTED_STATE;
}else{
set_create_session_response(
gtpv2c_s11_tx, mb_rsp->header.teid.has_teid.seq,
context, pdn, 0);
pdn->state = CONNECTED_STATE;
pdn->proc = INITIAL_PDN_ATTACH_PROC;
pdn->csr_sequence =0;
}
return 0;
}
int
process_sgwc_s5s8_mbr_for_mod_proc(mod_bearer_rsp_t *mb_rsp, gtpv2c_header_t *gtpv2c_s11_tx)
{
ue_context *context = NULL;
pdn_connection *pdn = NULL;
eps_bearer *bearer = NULL;
int ret = 0;
struct resp_info *resp = NULL;
int ebi_index = 0;
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
ebi_index = GET_EBI_INDEX(mb_rsp->bearer_contexts_modified[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
/* s11_sgw_gtpc_teid= s5s8_sgw_gtpc_teid =
* key->ue_context_by_fteid_hash */
ret = get_ue_context_by_sgw_s5s8_teid(mb_rsp->header.teid.has_teid.teid, &context);
if (ret < 0 || !context)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" Ue context for teid: %d\n",
LOG_VALUE, mb_rsp->header.teid.has_teid.teid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
bearer = context->eps_bearers[ebi_index];
pdn = bearer->pdn;
/* Retrive the session information based on session id. */
if (get_sess_entry(pdn->seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
reset_resp_info_structure(resp);
/* Fill the modify bearer response */
set_modify_bearer_response_handover(
gtpv2c_s11_tx, mb_rsp->header.teid.has_teid.seq,
context, bearer, &resp->gtpc_msg.mbr);
resp->state = CONNECTED_STATE;
/* Update the UE state */
pdn->state = CONNECTED_STATE;
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
return 0;
}
int
process_create_bearer_request(create_bearer_req_t *cbr)
{
int ret = 0;
int ebi_index = 0;
uint8_t idx = 0;
uint8_t new_ebi_index = 0;
uint32_t seq_no = 0;
eps_bearer *bearers[MAX_BEARERS] = {0},*bearer = NULL;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
pfcp_update_far_ie_t update_far[MAX_LIST_SIZE];
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
ret = get_ue_context_by_sgw_s5s8_teid(cbr->header.teid.has_teid.teid, &context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"UE context for teid: %d\n", LOG_VALUE, cbr->header.teid.has_teid.teid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if(context != NULL ) {
if(context->req_status.seq == cbr->header.teid.has_teid.seq) {
if(context->req_status.status == REQ_IN_PROGRESS) {
/* Discarding re-transmitted cbr */
return GTPC_RE_TRANSMITTED_REQ;
}else{
/* Restransmitted CBR but processing already done for previous req */
context->req_status.status = REQ_IN_PROGRESS;
}
} else {
context->req_status.seq = cbr->header.teid.has_teid.seq;
context->req_status.status = REQ_IN_PROGRESS;
}
}
if(cbr->pres_rptng_area_act.header.len){
store_presc_reporting_area_act_to_ue_context(&cbr->pres_rptng_area_act,
context);
}
if(context->cp_mode != PGWC) {
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
seq_no = cbr->header.teid.has_teid.seq;
if(!cbr->lbi.header.len){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Mandatory IE"
" (EPS bearer id) is missing in create bearer request \n",
LOG_VALUE);
return GTPV2C_CAUSE_MANDATORY_IE_MISSING;
}
ebi_index = GET_EBI_INDEX(cbr->lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if ( pdn->proc == UE_REQ_BER_RSRC_MOD_PROC ) {
seq_no = cbr->header.teid.has_teid.seq;
} else {
seq_no = bswap_32(cbr->header.teid.has_teid.seq);
seq_no = seq_no >> 8;
}
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
reset_resp_info_structure(resp);
for(idx = 0; idx < cbr->bearer_cnt; ++idx) {
bearer = rte_zmalloc_socket(NULL, (sizeof(eps_bearer)),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (bearer == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for Bearer, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
new_ebi_index = (idx + MAX_BEARERS);
resp->eps_bearer_ids[idx] = new_ebi_index + NUM_EBI_RESERVED;
bearer->pdn = pdn;
context->eps_bearers[new_ebi_index] = bearer;
resp->bearer_count++;
pdn->num_bearer++;
if(!cbr->bearer_contexts[idx].eps_bearer_id.header.len){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Error: Mandatory IE "
"(EPS bearer id) is missing in bearer context in create bearer request \n",
LOG_VALUE);
return GTPV2C_CAUSE_MANDATORY_IE_MISSING;
}
pdn->eps_bearers[new_ebi_index] = bearer;
if(cbr->bearer_contexts[idx].bearer_lvl_qos.header.len){
bearer->qos.arp.preemption_vulnerability = cbr->bearer_contexts[idx].bearer_lvl_qos.pvi;
bearer->qos.arp.priority_level = cbr->bearer_contexts[idx].bearer_lvl_qos.pl;
bearer->qos.arp.preemption_capability = cbr->bearer_contexts[idx].bearer_lvl_qos.pci;
bearer->qos.qci = cbr->bearer_contexts[idx].bearer_lvl_qos.qci;
bearer->qos.ul_mbr = cbr->bearer_contexts[idx].bearer_lvl_qos.max_bit_rate_uplnk;
bearer->qos.dl_mbr = cbr->bearer_contexts[idx].bearer_lvl_qos.max_bit_rate_dnlnk;
bearer->qos.ul_gbr = cbr->bearer_contexts[idx].bearer_lvl_qos.guarntd_bit_rate_uplnk;
bearer->qos.dl_gbr = cbr->bearer_contexts[idx].bearer_lvl_qos.guarntd_bit_rate_dnlnk;
}
else{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Error: Mandatory IE"
" (bearer level QoS) is missing in Create Bearer Request\n",
LOG_VALUE);
return GTPV2C_CAUSE_MANDATORY_IE_MISSING;
}
ret = fill_ip_addr(cbr->bearer_contexts[idx].s58_u_pgw_fteid.ipv4_address,
cbr->bearer_contexts[idx].s58_u_pgw_fteid.ipv6_address,
&bearer->s5s8_pgw_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
bearer->s5s8_pgw_gtpu_teid = cbr->bearer_contexts[idx].s58_u_pgw_fteid.teid_gre_key;
if(cbr->bearer_contexts[idx].tft.header.len){
resp->eps_bearer_lvl_tft[idx] =
rte_zmalloc("Bearer_lvl_tft", MAX_TFT_LEN, RTE_CACHE_LINE_SIZE);
memcpy(resp->eps_bearer_lvl_tft[idx],
cbr->bearer_contexts[idx].tft.eps_bearer_lvl_tft, MAX_TFT_LEN);
resp->tft_header_len[idx] = cbr->bearer_contexts[idx].tft.header.len;
}
else{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Error: Mandatory IE"
" (bearer level TFT) is missing in Create Bearer Request\n",
LOG_VALUE);
return GTPV2C_CAUSE_MANDATORY_IE_MISSING;
}
fill_dedicated_bearer_info(bearer, context, pdn, FALSE);
pfcp_sess_mod_req.create_pdr_count += bearer->pdr_count;
bearers[idx] = bearer;
bearer->sequence = cbr->header.teid.has_teid.seq;
}
fill_pfcp_sess_mod_req(&pfcp_sess_mod_req, &cbr->header, bearers, pdn, update_far, 0, cbr->bearer_cnt, context);
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
ret = set_dest_address(pdn->upf_ip, &upf_pfcp_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded,
upf_pfcp_sockaddr, SENT) < 0)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in sending "
"modification request to SGW-U. err_no: %i\n", LOG_VALUE, errno);
else {
#ifdef CP_BUILD
add_pfcp_if_timer_entry(context->s11_sgw_gtpc_teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
#endif /* CP_BUILD */
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"for BRC: proc set to : %s\n",
LOG_VALUE, get_proc_string(pdn->proc));
if ( pdn->proc == UE_REQ_BER_RSRC_MOD_PROC) {
resp->proc = UE_REQ_BER_RSRC_MOD_PROC;
} else {
resp->proc = DED_BER_ACTIVATION_PROC;
pdn->proc = DED_BER_ACTIVATION_PROC;
}
context->sequence = seq_no;
pdn->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->gtpc_msg.cb_req = *cbr;
resp->bearer_count = cbr->bearer_cnt;
resp->linked_eps_bearer_id = pdn->default_bearer_id;
resp->msg_type = GTP_CREATE_BEARER_REQ;
resp->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->cp_mode = context->cp_mode;
return 0;
}
int
process_delete_bearer_request(del_bearer_req_t *db_req, ue_context *context, uint8_t proc_type)
{
int ebi_index = 0, ret = 0;
uint8_t bearer_cntr = 0;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
int default_bearer_index = 0;
eps_bearer *bearers[MAX_BEARERS] = {0};
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
uint8_t jCnt = 0;
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
if (db_req->lbi.header.len != 0) {
default_bearer_index = GET_EBI_INDEX(db_req->lbi.ebi_ebi);
if (default_bearer_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
pdn = GET_PDN(context, default_bearer_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, default_bearer_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
for (uint8_t iCnt = 0; iCnt < MAX_BEARERS; ++iCnt) {
if (NULL != pdn->eps_bearers[iCnt]) {
bearers[jCnt] = pdn->eps_bearers[iCnt];
bearers[jCnt]->sequence = db_req->header.teid.has_teid.seq;
jCnt++;
}
}
bearer_cntr = pdn->num_bearer;
} else {
for (uint8_t iCnt = 0; iCnt < db_req->bearer_count; ++iCnt) {
ebi_index = GET_EBI_INDEX(db_req->eps_bearer_ids[iCnt].ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
bearers[iCnt] = context->eps_bearers[ebi_index];
bearers[iCnt]->sequence = db_req->header.teid.has_teid.seq;
}
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
bearer_cntr = db_req->bearer_count;
}
fill_pfcp_sess_mod_req_delete(&pfcp_sess_mod_req, pdn, bearers, bearer_cntr);
uint8_t pfcp_msg[PFCP_MSG_LEN] = {0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded,
upf_pfcp_sockaddr, SENT) < 0)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in sending Session "
"Modification Request to SGW-U. Error: %i\n", LOG_VALUE, errno);
else {
#ifdef CP_BUILD
ebi_index = GET_EBI_INDEX(pdn->default_bearer_id);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
add_pfcp_if_timer_entry(context->s11_sgw_gtpc_teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
#endif /* CP_BUILD */
}
context->sequence = db_req->header.teid.has_teid.seq;
pdn->state = PFCP_SESS_MOD_REQ_SNT_STATE;
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
reset_resp_info_structure(resp);
if (db_req->lbi.header.len != 0) {
resp->linked_eps_bearer_id = db_req->lbi.ebi_ebi;
resp->bearer_count = 0;
} else {
resp->bearer_count = db_req->bearer_count;
for (uint8_t iCnt = 0; iCnt < db_req->bearer_count; ++iCnt) {
resp->eps_bearer_ids[iCnt] = db_req->eps_bearer_ids[iCnt].ebi_ebi;
}
}
resp->msg_type = GTP_DELETE_BEARER_REQ;
resp->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->proc = proc_type;
resp->cp_mode = context->cp_mode;
resp->gtpc_msg.db_req = *db_req;
pdn->proc = proc_type;
return 0;
}
int
process_delete_bearer_resp(del_bearer_rsp_t *db_rsp, ue_context *context, uint8_t proc_type)
{
int ebi_index = 0;
uint8_t bearer_cntr = 0;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
eps_bearer *bearers[MAX_BEARERS] = {0};
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
for (uint8_t iCnt = 0; iCnt < db_rsp->bearer_count; ++iCnt) {
ebi_index = GET_EBI_INDEX(db_rsp->bearer_contexts[iCnt].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
bearers[iCnt] = context->eps_bearers[ebi_index];
}
bearer_cntr = db_rsp->bearer_count;
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
fill_pfcp_sess_mod_req_pgw_init_remove_pdr(&pfcp_sess_mod_req, pdn, bearers, bearer_cntr);
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded,
upf_pfcp_sockaddr, SENT) < 0)
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Error in Sending "
"Modification Request to SGW-U. err_no: %i\n", LOG_VALUE, errno);
else {
#ifdef CP_BUILD
ebi_index = GET_EBI_INDEX(pdn->default_bearer_id);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
add_pfcp_if_timer_entry(db_rsp->header.teid.has_teid.teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
#endif /* CP_BUILD */
}
context->sequence = db_rsp->header.teid.has_teid.seq;
pdn->state = PFCP_SESS_MOD_REQ_SNT_STATE;
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
resp->bearer_count = db_rsp->bearer_count;
for (uint8_t iCnt = 0; iCnt < db_rsp->bearer_count; ++iCnt) {
resp->eps_bearer_ids[iCnt] = db_rsp->bearer_contexts[iCnt].eps_bearer_id.ebi_ebi;
}
resp->proc = proc_type;
pdn->proc = proc_type;
resp->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->msg_type = GTP_DELETE_BEARER_RSP;
resp->cp_mode = context->cp_mode;
return 0;
}
int
process_cs_resp_cb_request(create_bearer_req_t *cbr)
{
int ret = 0;
int ebi_index = 0;
uint8_t idx = 0;
uint8_t new_ebi_index = 0;
uint32_t seq_no = 0;
eps_bearer *bearers[MAX_BEARERS] = {0},*bearer = NULL,*dedicated_bearer = NULL;
uint8_t index = 0;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
pfcp_update_far_ie_t update_far[MAX_LIST_SIZE];
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
ret = get_ue_context_by_sgw_s5s8_teid(cbr->header.teid.has_teid.teid, &context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
"UE context for teid: %d\n", LOG_VALUE, cbr->header.teid.has_teid.teid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
seq_no = bswap_32(cbr->header.teid.has_teid.seq);
seq_no = seq_no >> 8;
ebi_index = GET_EBI_INDEX(cbr->lbi.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
pdn = context->eps_bearers[ebi_index]->pdn;
pdn = GET_PDN(context, ebi_index);
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No PDN found "
"for ebi_index : %lu", LOG_VALUE, ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
pdn->apn_restriction = resp->gtpc_msg.cs_rsp.apn_restriction.rstrct_type_val;
/*Reseting PDN type to Update as per the type sent in CSResp from PGW-C*/
pdn->pdn_type.ipv4 = 0;
pdn->pdn_type.ipv6 = 0;
if (resp->gtpc_msg.cs_rsp.paa.pdn_type == PDN_IP_TYPE_IPV6
|| resp->gtpc_msg.cs_rsp.paa.pdn_type == PDN_IP_TYPE_IPV4V6) {
pdn->pdn_type.ipv6 = PRESENT;
memcpy(pdn->uipaddr.ipv6.s6_addr, resp->gtpc_msg.cs_rsp.paa.paa_ipv6, IPV6_ADDRESS_LEN);
pdn->prefix_len = resp->gtpc_msg.cs_rsp.paa.ipv6_prefix_len;
}
if (resp->gtpc_msg.cs_rsp.paa.pdn_type == PDN_IP_TYPE_IPV4
|| resp->gtpc_msg.cs_rsp.paa.pdn_type == PDN_IP_TYPE_IPV4V6) {
pdn->pdn_type.ipv4 = PRESENT;
pdn->uipaddr.ipv4.s_addr = resp->gtpc_msg.cs_rsp.paa.pdn_addr_and_pfx;
}
ret = fill_ip_addr(resp->gtpc_msg.cs_rsp.pgw_s5s8_s2as2b_fteid_pmip_based_intfc_or_gtp_based_ctl_plane_intfc.ipv4_address,
resp->gtpc_msg.cs_rsp.pgw_s5s8_s2as2b_fteid_pmip_based_intfc_or_gtp_based_ctl_plane_intfc.ipv6_address,
&pdn->s5s8_pgw_gtpc_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
pdn->s5s8_pgw_gtpc_teid =
resp->gtpc_msg.cs_rsp.pgw_s5s8_s2as2b_fteid_pmip_based_intfc_or_gtp_based_ctl_plane_intfc.teid_gre_key;
for (uint8_t i= 0; i< MAX_BEARERS; i++) {
bearer = pdn->eps_bearers[i];
if(bearer == NULL)
continue;
/* TODO: Implement TFTs on default bearers
* if (create_s5s8_session_response.bearer_tft_ie) {
* }
* */
/* TODO: Implement PGWC S5S8 bearer QoS */
if (resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.header.len) {
bearer->qos.qci = resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.qci;
bearer->qos.ul_mbr =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.max_bit_rate_uplnk;
bearer->qos.dl_mbr =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.max_bit_rate_dnlnk;
bearer->qos.ul_gbr =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.guarntd_bit_rate_uplnk;
bearer->qos.dl_gbr =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.guarntd_bit_rate_dnlnk;
bearer->qos.arp.preemption_vulnerability =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.pvi;
bearer->qos.arp.spare1 =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.spare2;
bearer->qos.arp.priority_level =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.pl;
bearer->qos.arp.preemption_capability =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.pci;
bearer->qos.arp.spare2 =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].bearer_lvl_qos.spare3;
}
ret = fill_ip_addr(resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].s5s8_u_pgw_fteid.ipv4_address,
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].s5s8_u_pgw_fteid.ipv6_address,
&bearer->s5s8_pgw_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
bearer->s5s8_pgw_gtpu_teid =
resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].s5s8_u_pgw_fteid.teid_gre_key;
bearer->pdn = pdn;
update_far[index].upd_frwdng_parms.outer_hdr_creation.teid =
bearer->s5s8_pgw_gtpu_teid;
ret = set_node_address(&update_far[index].upd_frwdng_parms.outer_hdr_creation.ipv4_address,
update_far[index].upd_frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s5s8_pgw_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
update_far[index].upd_frwdng_parms.dst_intfc.interface_value =
check_interface_type(resp->gtpc_msg.cs_rsp.bearer_contexts_created[index].s5s8_u_pgw_fteid.interface_type,
context->cp_mode);
update_far[index].far_id.far_id_value =
get_far_id(bearer, update_far[index].upd_frwdng_parms.dst_intfc.interface_value);
pfcp_sess_mod_req.update_far_count++;
bearers[index] = bearer;
}
#ifdef USE_CSID
fqcsid_t *tmp = NULL;
/* PGW FQ-CSID */
if (resp->gtpc_msg.cs_rsp.pgw_fqcsid.header.len) {
ret = add_peer_addr_entry_for_fqcsid_ie_node_addr(
&pdn->s5s8_pgw_gtpc_ip, &resp->gtpc_msg.cs_rsp.pgw_fqcsid,
S5S8_SGWC_PORT_ID);
if (ret)
return ret;
/* Stored the PGW CSID by PGW Node address */
ret = add_fqcsid_entry(&resp->gtpc_msg.cs_rsp.pgw_fqcsid, context->pgw_fqcsid);
if(ret)
return ret;
fill_pdn_fqcsid_info(&pdn->pgw_csid, context->pgw_fqcsid);
} else {
tmp = get_peer_addr_csids_entry(&(pdn->s5s8_pgw_gtpc_ip), ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: Failed to "
"add PGW CSID by PGW Node addres %s \n", LOG_VALUE,
strerror(errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
memcpy(&(tmp->node_addr), &(pdn->s5s8_pgw_gtpc_ip), sizeof(node_address_t));
memcpy(&((context->pgw_fqcsid)->node_addr[(context->pgw_fqcsid)->num_csid]),
&(pdn->s5s8_pgw_gtpc_ip), sizeof(node_address_t));
}
/* Link local CSID with PGW CSID */
if (pdn->pgw_csid.num_csid) {
if (link_gtpc_peer_csids(&pdn->pgw_csid,
&pdn->sgw_csid, S5S8_SGWC_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Link "
"Local CSID entry to link with PGW FQCSID, Error : %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
if (link_sess_with_peer_csid(&pdn->pgw_csid, pdn, S5S8_SGWC_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error : Failed to Link "
"Session with Peer CSID\n", LOG_VALUE);
return -1;
}
/* Set PGW FQ-CSID */
set_fq_csid_t(&pfcp_sess_mod_req.pgw_c_fqcsid, &pdn->pgw_csid);
}
#endif /* USE_CSID */
for(idx = 0; idx < cbr->bearer_cnt; ++idx) {
dedicated_bearer = rte_zmalloc_socket(NULL, (sizeof(eps_bearer)),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (dedicated_bearer == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for Bearer, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
new_ebi_index = idx + MAX_BEARERS;
resp->eps_bearer_ids[idx] = new_ebi_index + NUM_EBI_RESERVED;
dedicated_bearer->pdn = pdn;
context->eps_bearers[new_ebi_index] = dedicated_bearer;
pdn->eps_bearers[new_ebi_index] = dedicated_bearer;
dedicated_bearer->qos.arp.preemption_vulnerability = cbr->bearer_contexts[idx].bearer_lvl_qos.pvi;
dedicated_bearer->qos.arp.priority_level = cbr->bearer_contexts[idx].bearer_lvl_qos.pl;
dedicated_bearer->qos.arp.preemption_capability = cbr->bearer_contexts[idx].bearer_lvl_qos.pci;
dedicated_bearer->qos.qci = cbr->bearer_contexts[idx].bearer_lvl_qos.qci;
dedicated_bearer->qos.ul_mbr = cbr->bearer_contexts[idx].bearer_lvl_qos.max_bit_rate_uplnk;
dedicated_bearer->qos.dl_mbr = cbr->bearer_contexts[idx].bearer_lvl_qos.max_bit_rate_dnlnk;
dedicated_bearer->qos.ul_gbr = cbr->bearer_contexts[idx].bearer_lvl_qos.guarntd_bit_rate_uplnk;
dedicated_bearer->qos.dl_gbr = cbr->bearer_contexts[idx].bearer_lvl_qos.guarntd_bit_rate_dnlnk;
ret = fill_ip_addr(cbr->bearer_contexts[idx].s58_u_pgw_fteid.ipv4_address,
cbr->bearer_contexts[idx].s58_u_pgw_fteid.ipv6_address,
&dedicated_bearer->s5s8_pgw_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
dedicated_bearer->s5s8_pgw_gtpu_teid = cbr->bearer_contexts[idx].s58_u_pgw_fteid.teid_gre_key;
resp->eps_bearer_lvl_tft[idx] =
rte_zmalloc("Bearer_lvl_tft", MAX_TFT_LEN, RTE_CACHE_LINE_SIZE);
memcpy(resp->eps_bearer_lvl_tft[idx],
cbr->bearer_contexts[idx].tft.eps_bearer_lvl_tft, MAX_TFT_LEN);
resp->tft_header_len[idx] = cbr->bearer_contexts[idx].tft.header.len;
fill_dedicated_bearer_info(dedicated_bearer, context, pdn, FALSE);
pfcp_sess_mod_req.create_pdr_count += dedicated_bearer->pdr_count;
bearers[index] = dedicated_bearer;
index++;
}
fill_pfcp_sess_mod_req(&pfcp_sess_mod_req, &cbr->header, bearers, pdn,
update_far, 0, cbr->bearer_cnt, context);
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded,
upf_pfcp_sockaddr, SENT) < 0)
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Error sending while "
"session delete request at sgwc %i\n", LOG_VALUE, errno);
else {
#ifdef CP_BUILD
add_pfcp_if_timer_entry(context->s11_sgw_gtpc_teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
#endif /* CP_BUILD */
}
pdn->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->bearer_count = cbr->bearer_cnt;
resp->msg_type = GTP_CREATE_BEARER_REQ;
resp->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->proc = ATTACH_DEDICATED_PROC;
pdn->proc = ATTACH_DEDICATED_PROC;
resp->cp_mode = context->cp_mode;
return 0;
}
int
process_mb_request_cb_response(mod_bearer_req_t *mbr, create_bearer_rsp_t *cb_rsp)
{
int ret = 0;
int ebi_index = 0, index = 0, idx = 0;
uint8_t seq_no;
ue_context *context = NULL;
eps_bearer *bearer = NULL, *bearers[MAX_BEARERS] = {0}, *dedicated_bearer = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
pfcp_update_far_ie_t update_far[MAX_LIST_SIZE];
eps_bearer *remove_bearers[MAX_BEARERS] = {0};
if (mbr->header.teid.has_teid.teid) {
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &mbr->header.teid.has_teid.teid,
(void **) &context);
if (ret < 0 || !context) {
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
} else {
if (NOT_PRESENT != cb_rsp->header.teid.has_teid.teid) {
if(get_ue_context(cb_rsp->header.teid.has_teid.teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get"
" UE context for teid: %d\n", LOG_VALUE, cb_rsp->header.teid.has_teid.teid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
} else {
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
}
if (mbr->bearer_contexts_to_be_modified[0].eps_bearer_id.ebi_ebi) {
ebi_index = GET_EBI_INDEX(mbr->bearer_contexts_to_be_modified[0].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
bearer = context->eps_bearers[ebi_index];
} else {
ebi_index = GET_EBI_INDEX(cb_rsp->bearer_contexts[idx].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
bearer = context->eps_bearers[(idx + MAX_BEARERS)];
}
if (get_sess_entry(bearer->pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, bearer->pdn->seid);
return -1;
}
pdn = bearer->pdn;
pfcp_sess_mod_req.update_far_count = 0;
/*Updating the console count */
cp_stats.modify_bearer++;
if ((NULL != context) && (mbr->header.teid.has_teid.seq)) {
if(context->req_status.seq == mbr->header.teid.has_teid.seq) {
if(context->req_status.status == REQ_IN_PROGRESS) {
/* Discarding re-transmitted mbr */
return GTPC_RE_TRANSMITTED_REQ;
}else{
/* Restransmitted MBR but processing altready done for previous req */
context->req_status.status = REQ_IN_PROGRESS;
}
}else{
context->req_status.seq = mbr->header.teid.has_teid.seq;
context->req_status.status = REQ_IN_PROGRESS;
}
}
uint8_t remove_cnt = 0;
resp->cbr_seq = resp->gtpc_msg.cb_rsp.header.teid.has_teid.seq ;
if(resp->gtpc_msg.cb_rsp.cause.cause_value != GTPV2C_CAUSE_REQUEST_ACCEPTED) {
for(uint8_t i = 0; i < resp->gtpc_msg.cb_rsp.bearer_cnt; i++) {
remove_bearers[remove_cnt++] = context->eps_bearers[(i + MAX_BEARERS)];
resp->eps_bearer_ids[idx] =
resp->gtpc_msg.cb_rsp.bearer_contexts[idx].eps_bearer_id.ebi_ebi;
resp->eps_bearer_ids[idx] =
resp->gtpc_msg.cb_rsp.bearer_contexts[idx].cause.cause_value;
}
}
if(resp->gtpc_msg.cb_rsp.cause.cause_value == GTPV2C_CAUSE_REQUEST_ACCEPTED) {
for(idx = 0; idx < resp->gtpc_msg.cb_rsp.bearer_cnt; idx++) {
ebi_index = GET_EBI_INDEX(resp->gtpc_msg.cb_rsp.bearer_contexts[idx].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
if(((ebi_index + NUM_EBI_RESERVED) == pdn->default_bearer_id) ||
(((*context).bearer_bitmap & (1 << ebi_index)) == 1) ||
(resp->gtpc_msg.cb_rsp.bearer_contexts[idx].cause.cause_value
!= GTPV2C_CAUSE_REQUEST_ACCEPTED)) {
if((resp->gtpc_msg.cb_rsp.bearer_contexts[idx].cause.cause_value
!= GTPV2C_CAUSE_REQUEST_ACCEPTED)) {
dedicated_bearer = context->eps_bearers[(idx + MAX_BEARERS)];
context->eps_bearers[ebi_index] = dedicated_bearer;
pdn->eps_bearers[ebi_index] = dedicated_bearer;
}
remove_bearers[remove_cnt] = context->eps_bearers[(idx + MAX_BEARERS)];
resp->eps_bearer_ids[idx] =
resp->gtpc_msg.cb_rsp.bearer_contexts[idx].eps_bearer_id.ebi_ebi;
resp->eps_bearer_ids[idx] =
resp->gtpc_msg.cb_rsp.bearer_contexts[idx].cause.cause_value;
remove_cnt++;
continue;
}
dedicated_bearer = context->eps_bearers[(idx + MAX_BEARERS)];
resp->eps_bearer_ids[idx] = resp->gtpc_msg.cb_rsp.bearer_contexts[idx].eps_bearer_id.ebi_ebi;
context->eps_bearers[ebi_index] = dedicated_bearer;
dedicated_bearer->eps_bearer_id =
resp->gtpc_msg.cb_rsp.bearer_contexts[idx].eps_bearer_id.ebi_ebi;
(*context).bearer_bitmap |= (1 << ebi_index);
context->eps_bearers[(idx + MAX_BEARERS )] = NULL;
pdn->eps_bearers[ebi_index] = dedicated_bearer;
pdn->eps_bearers[(idx + MAX_BEARERS )] = NULL;
if (dedicated_bearer == NULL) {
/* TODO:
* This mean ebi we allocated and received doesnt match
* In correct design match the bearer in transtient struct from sgw-u teid
* */
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Context not found "
"Create Bearer Response with cause %d \n", LOG_VALUE, ret);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
ret = fill_ip_addr(resp->gtpc_msg.cb_rsp.bearer_contexts[idx].s1u_enb_fteid.ipv4_address,
resp->gtpc_msg.cb_rsp.bearer_contexts[idx].s1u_enb_fteid.ipv6_address,
&dedicated_bearer->s1u_enb_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
dedicated_bearer->s1u_enb_gtpu_teid = resp->gtpc_msg.cb_rsp.bearer_contexts[idx].s1u_enb_fteid.teid_gre_key;
if (resp->gtpc_msg.cb_rsp.bearer_contexts[idx].s1u_enb_fteid.header.len != 0) {
update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.outer_hdr_creation.teid =
dedicated_bearer->s1u_enb_gtpu_teid;
ret = set_node_address(&update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.outer_hdr_creation.ipv4_address,
update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.outer_hdr_creation.ipv6_address,
dedicated_bearer->s1u_enb_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.dst_intfc.interface_value =
check_interface_type(resp->gtpc_msg.cb_rsp.bearer_contexts[idx].s1u_enb_fteid.interface_type,
context->cp_mode);
update_far[pfcp_sess_mod_req.update_far_count].far_id.far_id_value =
get_far_id(dedicated_bearer, update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.dst_intfc.interface_value);
update_far[pfcp_sess_mod_req.update_far_count].apply_action.forw = PRESENT;
update_far[pfcp_sess_mod_req.update_far_count].apply_action.dupl = GET_DUP_STATUS(context);
pfcp_sess_mod_req.update_far_count++;
}
bearers[index] = dedicated_bearer;
index++;
}
}
if(remove_cnt != 0 ) {
fill_pfcp_sess_mod_req_with_remove_pdr(&pfcp_sess_mod_req, pdn, remove_bearers, remove_cnt);
}
if (mbr->bearer_count) {
for(uint8_t i = 0; i < mbr->bearer_count; i++) {
if (!mbr->bearer_contexts_to_be_modified[i].eps_bearer_id.header.len
|| !mbr->bearer_contexts_to_be_modified[i].s1_enodeb_fteid.header.len) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Dropping packet\n",
LOG_VALUE);
return GTPV2C_CAUSE_INVALID_LENGTH;
}
ebi_index = GET_EBI_INDEX(mbr->bearer_contexts_to_be_modified[i].eps_bearer_id.ebi_ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
/*Handling Mutiple Bearer Context in MBR*/
if ((resp->gtpc_msg.cb_rsp.bearer_cnt != 0 ) && ((ebi_index + NUM_EBI_RESERVED) != pdn->default_bearer_id)) {
continue;
}
if (!(context->bearer_bitmap & (1 << ebi_index))) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT" Received modify bearer on non-existent EBI - "
"Dropping packet\n", LOG_VALUE);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
bearer = context->eps_bearers[ebi_index];
if (!bearer) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Received modify bearer on non-existent EBI - "
"for while PFCP Session Modification Request Modify Bearer "
"Request, Dropping packet\n", LOG_VALUE);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
pdn = bearer->pdn;
if (mbr->bearer_contexts_to_be_modified[i].s1_enodeb_fteid.header.len != 0){
ret = fill_ip_addr(mbr->bearer_contexts_to_be_modified[i].s1_enodeb_fteid.ipv4_address,
mbr->bearer_contexts_to_be_modified[i].s1_enodeb_fteid.ipv6_address,
&bearer->s1u_enb_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
bearer->s1u_enb_gtpu_teid =
mbr->bearer_contexts_to_be_modified[i].s1_enodeb_fteid.teid_gre_key;
update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.outer_hdr_creation.teid =
bearer->s1u_enb_gtpu_teid;
ret = set_node_address(&update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.outer_hdr_creation.ipv4_address,
update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s1u_enb_gtpu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.dst_intfc.interface_value =
check_interface_type(mbr->bearer_contexts_to_be_modified[i].s1_enodeb_fteid.interface_type,
context->cp_mode);
update_far[pfcp_sess_mod_req.update_far_count].far_id.far_id_value =
get_far_id(bearer, update_far[pfcp_sess_mod_req.update_far_count].upd_frwdng_parms.dst_intfc.interface_value);
update_far[pfcp_sess_mod_req.update_far_count].apply_action.forw = PRESENT;
update_far[pfcp_sess_mod_req.update_far_count].apply_action.dupl = GET_DUP_STATUS(pdn->context);
pfcp_sess_mod_req.update_far_count++;
}
bearers[index] = bearer;
index++;
} /*forloop*/
}
fill_pfcp_sess_mod_req(&pfcp_sess_mod_req, &resp->gtpc_msg.cb_rsp.header, bearers, pdn,
update_far, 0, index, context);
ret = set_dest_address(context->s11_mme_gtpc_ip, &s11_mme_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
seq_no = bswap_32(resp->gtpc_msg.cb_rsp.header.teid.has_teid.seq);
seq_no = seq_no >> 8;
#ifdef USE_CSID
/* Generate the permant CSID for SGW */
if (context->cp_mode != PGWC) {
/* Get the copy of existing SGW CSID */
fqcsid_t tmp_csid_t = {0};
if (pdn->sgw_csid.num_csid) {
memcpy(&tmp_csid_t, &pdn->sgw_csid, sizeof(fqcsid_t));
}
/* Update the entry for peer nodes */
if (fill_peer_node_info(pdn, bearer)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to fill peer node info and assignment of the "
"CSID Error: %s\n", LOG_VALUE, strerror(errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
if (pdn->flag_fqcsid_modified == TRUE) {
uint8_t tmp_csid = 0;
/* Validate the exsiting CSID or allocated new one */
for (uint8_t inx1 = 0; inx1 < tmp_csid_t.num_csid; inx1++) {
if (pdn->sgw_csid.local_csid[pdn->sgw_csid.num_csid - 1] ==
tmp_csid_t.local_csid[inx1]) {
tmp_csid = tmp_csid_t.local_csid[inx1];
break;
}
}
if (!tmp_csid) {
for (uint8_t inx = 0; inx < tmp_csid_t.num_csid; inx++) {
/* Remove the session link from old CSID */
sess_csid *tmp1 = NULL;
tmp1 = get_sess_csid_entry(tmp_csid_t.local_csid[inx], REMOVE_NODE);
if (tmp1 != NULL) {
/* Remove node from csid linked list */
tmp1 = remove_sess_csid_data_node(tmp1, pdn->seid);
int8_t ret = 0;
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&tmp_csid_t.local_csid[inx], tmp1);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, tmp_csid_t.local_csid[inx],
rte_strerror(abs(ret)));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
if (tmp1 == NULL) {
/* Removing temporary local CSID associated with MME */
remove_peer_temp_csid(&pdn->mme_csid, tmp_csid_t.local_csid[inx],
S11_SGW_PORT_ID);
/* emoving temporary local CSID assocoated with PGWC */
remove_peer_temp_csid(&pdn->pgw_csid, tmp_csid_t.local_csid[inx],
S5S8_SGWC_PORT_ID);
/* Delete Local CSID entry */
del_sess_csid_entry(tmp_csid_t.local_csid[inx]);
}
/* Delete CSID from the context */
remove_csid_from_cntx(context->sgw_fqcsid, &tmp_csid_t);
} else {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to "
"get Session ID entry for CSID:%u\n", LOG_VALUE,
tmp_csid_t.local_csid[inx]);
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Remove session link from Old CSID:%u\n",
LOG_VALUE, tmp_csid_t.local_csid[inx]);
}
}
/* update entry for cp session id with link local csid */
sess_csid *tmp = NULL;
tmp = get_sess_csid_entry(
pdn->sgw_csid.local_csid[pdn->sgw_csid.num_csid - 1],
ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get session of CSID entry %s \n",
LOG_VALUE, strerror(errno));
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* Link local csid with session id */
/* Check head node created ot not */
if(tmp->cp_seid != pdn->seid && tmp->cp_seid != 0) {
sess_csid *new_node = NULL;
/* Add new node into csid linked list */
new_node = add_sess_csid_data_node(tmp,
pdn->sgw_csid.local_csid[pdn->sgw_csid.num_csid - 1]);
if(new_node == NULL ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to "
"ADD new node into CSID linked list : %s\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
} else {
new_node->cp_seid = pdn->seid;
new_node->up_seid = pdn->dp_seid;
}
} else {
tmp->cp_seid = pdn->seid;
tmp->up_seid = pdn->dp_seid;
}
/* Fill the fqcsid into the session est request */
if (fill_fqcsid_sess_mod_req(&pfcp_sess_mod_req, pdn)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to fill "
"FQ-CSID in Session Establishment Request, "
"Error: %s\n", LOG_VALUE, strerror(errno));
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
}
}
#endif /* USE_CSID */
/*ULI CHECK*/
context->uli_flag = FALSE;
if(resp->gtpc_msg.cb_rsp.uli.header.len != 0) {
check_for_uli_changes(&resp->gtpc_msg.cb_rsp.uli, context);
}
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr, INTERFACE) < 0)
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error in sending MBR to SGW-U. err_no: %i\n", LOG_VALUE, errno);
else
{
#ifdef CP_BUILD
add_pfcp_if_timer_entry(resp->gtpc_msg.cb_rsp.header.teid.has_teid.teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
#endif /* CP_BUILD */
}
if (mbr->header.teid.has_teid.seq) {
context->sequence = mbr->header.teid.has_teid.seq;
}
resp->msg_type = GTP_MODIFY_BEARER_REQ;
pdn->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->bearer_count = resp->gtpc_msg.cb_rsp.bearer_cnt;
resp->state = PFCP_SESS_MOD_REQ_SNT_STATE;
resp->cp_mode = context->cp_mode;
memcpy(&resp->gtpc_msg.mbr, mbr, sizeof(struct mod_bearer_req_t));
return 0;
}
int
process_pfcp_sess_mod_resp_s1_handover(mod_bearer_rsp_t *mb_rsp, ue_context *context,
pdn_connection *pdn, eps_bearer *bearer)
{
struct resp_info *resp = NULL;
uint32_t seq = 0;
int ret = 0;
if (get_sess_entry(pdn->seid, &resp) != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
#ifdef USE_CSID
fqcsid_t *tmp = NULL;
pfcp_sess_mod_req_t pfcp_sess_mod_req = {0};
/* PGW FQ-CSID */
if ( mb_rsp->pgw_fqcsid.header.len) {
/* Remove Exsiting PGW CSID linked with session */
if (pdn->pgw_csid.num_csid) {
memset(&pdn->pgw_csid, 0, sizeof(fqcsid_t));
}
ret = add_peer_addr_entry_for_fqcsid_ie_node_addr(
&pdn->s5s8_pgw_gtpc_ip, &mb_rsp->pgw_fqcsid,
S5S8_SGWC_PORT_ID);
if (ret)
return ret;
/* Stored the PGW CSID by PGW Node address */
ret = add_fqcsid_entry(&mb_rsp->pgw_fqcsid, context->pgw_fqcsid);
if(ret)
return ret;
fill_pdn_fqcsid_info(&pdn->pgw_csid, context->pgw_fqcsid);
} else {
tmp = get_peer_addr_csids_entry(&(pdn->s5s8_pgw_gtpc_ip),
ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: Failed to "
"add PGW CSID by PGW Node addres %s \n", LOG_VALUE,
strerror(errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
memcpy(&(tmp->node_addr),
&(pdn->s5s8_pgw_gtpc_ip), sizeof(node_address_t));
memcpy(&((context->pgw_fqcsid)->node_addr[(context->pgw_fqcsid)->num_csid]),
&(pdn->s5s8_pgw_gtpc_ip), sizeof(node_address_t));
}
/* Link local CSID with PGW CSID */
if (pdn->pgw_csid.num_csid) {
if (link_gtpc_peer_csids(&pdn->pgw_csid,
&pdn->sgw_csid, S5S8_SGWC_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Link "
"Local CSID entry to link with PGW FQCSID, Error : %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
if (link_sess_with_peer_csid(&pdn->pgw_csid, pdn, S5S8_SGWC_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error : Failed to Link "
"Session with Peer CSID\n", LOG_VALUE);
return -1;
}
/* Send pfcp mod req to SGWU for pgwc csid */
seq = get_pfcp_sequence_number(PFCP_SESSION_MODIFICATION_REQUEST, seq);
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_mod_req.header),
PFCP_SESSION_MODIFICATION_REQUEST, HAS_SEID, seq, context->cp_mode);
pfcp_sess_mod_req.header.seid_seqno.has_seid.seid = pdn->dp_seid;
node_address_t node_value = {0};
/*Filling Node ID for F-SEID*/
if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(config.pfcp_ip.s_addr, temp, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV6) {
ret = fill_ip_addr(0, config.pfcp_ip_v6.s6_addr, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
set_fseid(&(pfcp_sess_mod_req.cp_fseid), pdn->seid, node_value);
/* Set PGW FQ-CSID */
set_fq_csid_t(&pfcp_sess_mod_req.pgw_c_fqcsid, &pdn->pgw_csid);
uint8_t pfcp_msg[PFCP_MSG_LEN] = {0};
int encoded = encode_pfcp_sess_mod_req_t(&pfcp_sess_mod_req, pfcp_msg);
pfcp_header_t *header = (pfcp_header_t *)pfcp_msg;
header->message_len = htons(encoded - PFCP_IE_HDR_SIZE);
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr,SENT) < 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Send "
"PFCP Session Modification to SGW-U",LOG_VALUE);
}else{
#ifdef CP_BUILD
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
int ebi_index = GET_EBI_INDEX(bearer->eps_bearer_id);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
add_pfcp_if_timer_entry(context->s11_sgw_gtpc_teid,
&upf_pfcp_sockaddr, pfcp_msg, encoded, ebi_index);
#endif /* CP_BUILD */
}
/* Update UE State */
pdn->state = PFCP_SESS_MOD_REQ_SNT_STATE;
/* Set create session response */
/*extract ebi_id from array as all the ebi's will be of same pdn.*/
resp->linked_eps_bearer_id = bearer->eps_bearer_id;
resp->msg_type = GTP_MODIFY_BEARER_REQ;
resp->state = PFCP_SESS_MOD_REQ_SNT_STATE;
context->update_sgw_fteid = FALSE;
}
#endif /* USE_CSID */
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | cp/state_machine/sm_pcnd.h | <reponame>nikhilc149/e-utran-features-bug-fixes<gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SM_PCND_H
#define SM_PCND_H
#include "sm_enum.h"
#include "sm_hand.h"
#include "sm_struct.h"
#include "pfcp_messages.h"
#include "gtp_messages.h"
#include "pfcp_set_ie.h"
/**
* @brief : Validate gtpv2c message
* @param : gtpv2c_rx, message data
* @param : bytes_rx, number of bytes in message
* @param : iface, interface type
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
gtpv2c_pcnd_check(gtpv2c_header_t *gtpv2c_rx, int bytes_rx,
struct sockaddr_in *peer_addr, uint8_t iface);
/**
* @brief : Decode and validate gtpv2c message
* @param : gtpv2c_rx, message data
* @param : msg, structure to store decoded message
* @param : bytes_rx, number of bytes in message
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
gtpc_pcnd_check(gtpv2c_header_t *gtpv2c_rx, msg_info *msg, int bytes_rx,
peer_addr_t *peer_addr, uint8_t uiIntFc);
/**
* @brief : Decode and validate pfcp messages
* @param : pfcp_rx, message data
* @param : msg, structure to store decoded message
* @param : bytes_rx, number of bytes in message
* @param : srcip, source ipaddress for lawful interception
* @return : Returns 0 in case of success , -1 otherwise
*/
uint8_t
pfcp_pcnd_check(uint8_t *pfcp_rx, msg_info *msg, int bytes_rx,
peer_addr_t *peer_addr);
/**
* @brief : Decode and validate gx messages
* @param : gx_rx, message data
* @param : msg, structure to store decoded message
* @return : Returns 0 in case of success , -1 otherwise
*/
uint32_t
gx_pcnd_check(gx_msg *gx_rx, msg_info *msg);
/**
* @brief : Retrive upf entry from hash
* @param : ctxt, ue context
* @param : entry, variable to store retrived dns entry
* @param : upf_ip, variable to store retrived ip
* @return : Returns 0 in case of success , -1 otherwise
*/
int
get_upf_ip(ue_context *ctxt, pdn_connection *pdn);
#endif
|
nikhilc149/e-utran-features-bug-fixes | cp/cp_config.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cp.h"
#include "gw_adapter.h"
#include "cstats.h"
#include "cdadmfapi.h"
#define MAX_STRING_LEN 32
#define CFG_VALUE_LEN 256
#define GLOBAL_ENTRIES "GLOBAL"
#define APN_ENTRIES "APN"
#define NAMESERVER_ENTRIES "NAMESERVER_CONFIG"
#define IP_POOL_ENTRIES "IP_POOL_CONFIG"
#define CACHE_ENTRIES "CACHE"
#define APP_ENTRIES "APP"
#define OPS_ENTRIES "OPS"
#define CP_TYPE "CP_TYPE"
#define S11_IPS "S11_IP"
#define S11_IPS_V6 "S11_IP_V6"
#define S11_PORTS "S11_PORT"
#define S5S8_IPS "S5S8_IP"
#define S5S8_IPS_V6 "S5S8_IP_V6"
#define S5S8_PORTS "S5S8_PORT"
#define PFCP_IPS "PFCP_IP"
#define PFCP_IPS_V6 "PFCP_IP_V6"
#define PFCP_PORTS "PFCP_PORT"
#define DDF2_IP "DDF2_IP"
#define DDF2_PORT "DDF2_PORT"
#define DDF2_LOCAL_IPS "DDF2_LOCAL_IP"
#define DADMF_IPS "DADMF_IP"
#define DADMF_PORTS "DADMF_PORT"
#define DADMF_LOCAL_IPS "DADMF_LOCAL_IP"
#define UPF_PFCP_IPS "UPF_PFCP_IP"
#define UPF_PFCP_IPS_V6 "UPF_PFCP_IP_V6"
#define UPF_PFCP_PORTS "UPF_PFCP_PORT"
#define REDIS_IPS "REDIS_IP"
#define CP_REDIS_IP "CP_REDIS_IP"
#define REDIS_PORTS "REDIS_PORT"
#define REDIS_CERT_PATH "REDIS_CERT_PATH"
#define USE_DNS "USE_DNS"
#define CP_DNS_IP "CP_DNS_IP"
#define CLI_REST_IP "CLI_REST_IP"
#define CLI_REST_PORT "CLI_REST_PORT"
#define IP_ALLOCATION_MODE "IP_ALLOCATION_MODE"
#define IP_TYPE_SUPPORTED "IP_TYPE_SUPPORTED"
#define IP_TYPE_PRIORITY "IP_TYPE_PRIORITY"
#define USE_GX "USE_GX"
#define PERF_FLAG "PERF_FLAG"
#define SUGGESTED_PKT_COUNT "SUGGESTED_PKT_COUNT"
#define LOW_LVL_ARP_PRIORITY "LOW_LEVEL_ARP_PRIORITY"
#define APN_SEC_NAME_LEN 8
#define NAME "name"
#define USAGE_TYPE "usage_type"
#define NETWORK_CAPABILITY "network_capability"
#define TRIGGER_TYPE "trigger_type"
#define UPLINK_VOLTH "uplink_volume_th"
#define DOWNLINK_VOLTH "downlink_volume_th"
#define TIMETH "time_th"
#define URR_DEFAULT "URR_DEFAULT"
#define NAMESERVER "nameserver"
#define IP_POOL_IP "IP_POOL_IP"
#define IPV6_NETWORK_ID "IPV6_NETWORK_ID"
#define IPV6_PREFIX_LEN "IPV6_PREFIX_LEN"
#define IP_POOL_MASK "IP_POOL_MASK"
#define CONCURRENT "concurrent"
#define PERCENTAGE "percentage"
#define INT_SEC "interval_seconds"
#define FREQ_SEC "frequency_seconds"
#define FILENAME "filename"
#define QUERY_TIMEOUT "query_timeout_ms"
#define QUERY_TRIES "query_tries"
/* Restoration Parameters */
#define TRANSMIT_TIMER "TRANSMIT_TIMER"
#define PERIODIC_TIMER "PERIODIC_TIMER"
#define TRANSMIT_COUNT "TRANSMIT_COUNT"
/* CP Timer Parameter */
#define REQUEST_TIMEOUT "REQUEST_TIMEOUT"
#define REQUEST_TRIES "REQUEST_TRIES"
/* CP CDR Parameter */
#define GENERATE_CDR "GENERATE_CDR"
#define GENERATE_SGW_CDR "GENERATE_SGW_CDR"
#define SGW_CC "SGW_CC"
#define ADD_DEFAULT_RULE "ADD_DEFAULT_RULE"
/* LI-DF Parameter */
#define NUMBER_OF_LINES 100
#define MAX_LINE_LENGTH 1024
#define LI_DF_CONFIG_FILE_NAME "../config/LI_DF.csv"
#define READ_ONLY_MODE "r"
#define APPEND_MODE "a"
#define LI_DF_CSV_HEADER_INFO 1
#define SGW_CC_CHECK 2
#define PCAP_TTL (64)
#define PCAP_VIHL (0x0045)
#define IP_BUFF_SIZE 16
#define REQUEST_TIMEOUT_DEFAULT_VALUE 3000
#define REQUEST_TRIES_DEFAULT_VALUE 2
/*Default URR paramters*/
#define DEFAULT_VOL_THRESHOLD 1048576
#define DEFAULT_TIME_THRESHOLD 120
#define DEFAULT_TRIGGER_TYPE 2
#define GX_FILE_PATH "gx_app/gx.conf"
#define CONNECT_TO "ConnectTo"
uint8_t recovery_flag;
/**
* @brief : parse the SGWU/PGWU/SAEGWU IP from config file
* @param : config, config file path
* @return : Returns nothing
*/
void
config_cp_ip_port(pfcp_config_t *config);
/**
* @brief : Validate cp requst timeout configured value
* @param : value, configured value
* @return : Returns 0 in case of success, -1 otherwise
*/
int
check_cp_req_timeout_config(char *value);
/**
* @brief : Validate cp requst tries configured value
* @param : value, configured value
* @return : Returns 0 in case of success, -1 otherwise
*/
int
check_cp_req_tries_config(char *value);
/**
* @brief : Convert apn name to readable format
* @param : apn_name_label, apn name which is to be convert
* @param : apn_name, array to store apn name
* @return : Returns 0 in case of success, -1 otherwise
*/
int
get_apn_name(char *apn_name_label, char *apn_name);
/**
* @brief : Identify ip address family ipv4/ipv6
* @param : ip_addr, ip address
* @return : Returns ip address family type in
* case of success, -1 otherwise
*/
int
get_ip_address_type(const char *ip_addr);
/**
* @brief : extract pcrf ip from gx config file
* @param : filename, filename with path
* @param : peer_addr, pointer of peer_addr
* @return : Returns case of success, -1 otherwise
*/
int fill_pcrf_ip(const char *filename, char *peer_addr);
/**
* @brief : fill gx interface ip into config struture
* @param : void
* @return : Returns case of success, -1 otherwise
*/
int8_t fill_gx_iface_ip(void);
|
nikhilc149/e-utran-features-bug-fixes | dp/util.h | <gh_stars>0
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _UTIL_H_
#define _UTIL_H_
#include <search.h>
#include <stdint.h>
#include <rte_mbuf.h>
#include <rte_udp.h>
#ifdef DP_BUILD
#include "up_main.h"
#else
#include "main.h"
#endif /* DP_BUILD */
/**
* @file
* This file contains macros, data structure definitions and function
* prototypes of dataplane utilities.
*/
/**
* ipv4 version flag.
*/
#define VERSION_FLAG_CHECK 0xf0
/**
* ipv4 version flag.
*/
#define IPv4_VERSION 0x40
/**
* ipv6 version flag.
*/
#define IPv6_VERSION 0x60
/**
* ipv4 header size.
*/
#define IPv4_HDR_SIZE 20
/**
* ipv6 header size.
*/
#define IPv6_HDR_SIZE 40
/**
* udp header size.
*/
#define UDP_HDR_SIZE 8
/**
* ethernet header size for untagged packet.
*/
#define ETH_HDR_SIZE 14
/**
* macro to define next protocol udp in
* ipv4 header.
*/
#define IP_PROTO_UDP 17
/**
* GTPU port
*/
#define UDP_PORT_GTPU 2152
/**
* GTPU port
*/
#define GTP_HDR_SIZE 8
/**
* network order DNS src port for udp
*/
#define N_DNS_RES_SRC_PORT 0x3500
/**
* ipv4 address format.
*/
#define IPV4_ADDR "%u.%u.%u.%u"
#define IPV4_ADDR_FORMAT(a) (uint8_t)((a) & 0x000000ff), \
(uint8_t)(((a) & 0x0000ff00) >> 8), \
(uint8_t)(((a) & 0x00ff0000) >> 16), \
(uint8_t)(((a) & 0xff000000) >> 24)
#define IPV4_ADDR_HOST_FORMAT(a) (uint8_t)(((a) & 0xff000000) >> 24), \
(uint8_t)(((a) & 0x00ff0000) >> 16), \
(uint8_t)(((a) & 0x0000ff00) >> 8), \
(uint8_t)((a) & 0x000000ff)
/**
* @brief : Maintains table information
*/
struct table {
char name[MAX_LEN];
void *root;
uint16_t num_entries;
uint16_t max_entries;
uint8_t active;
int (*compare)(const void *r1p, const void *r2p);
void (*print_entry)(const void *nodep, const VISIT which, const int depth);
};
/**
* @brief : Function to return pointer to udp headers.
* @param : m, mbuf pointer
* @return : Returns pointer to udp headers
*/
static inline struct udp_hdr *get_mtoudp(struct rte_mbuf *m)
{
return (struct udp_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
ETH_HDR_SIZE + IPv4_HDR_SIZE);
}
/**
* @brief : Function to return pointer to udp headers.
* @param : m, mbuf pointer
* @return : Returns pointer to udp headers
*/
static inline struct udp_hdr *get_mtoudp_v6(struct rte_mbuf *m)
{
return (struct udp_hdr *)(rte_pktmbuf_mtod(m, unsigned char *) +
ETH_HDR_SIZE + IPv6_HDR_SIZE);
}
/**
* @brief : Function to construct udp header.
* @param : m, mbuf pointer
* @param : len, len of header
* @param : sport, src port
* @param : dport, dst port
* @return : Returns nothing
*/
void
construct_udp_hdr(struct rte_mbuf *m, uint16_t len,
uint16_t sport, uint16_t dport, uint8_t ip_type);
#endif /*_UTIL_H_ */
|
nikhilc149/e-utran-features-bug-fixes | dp/up_main.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unistd.h>
#include <locale.h>
#include <signal.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_common.h>
#include <rte_ether.h>
#include <rte_mbuf.h>
#include <rte_branch_prediction.h>
#include "li_interface.h"
#include "gw_adapter.h"
#include "up_main.h"
#include "pfcp_util.h"
#include "pfcp_set_ie.h"
#include "cstats.h"
#include "predef_rule_init.h"
#include "config_validater.h"
#ifdef USE_CSID
#include "csid_struct.h"
#endif /* USE_CSID */
#define UP "USER PLANE"
#define DP_LOG_PATH "logs/dp.log"
#define LOGGER_JSON_PATH "../config/log.json"
#define DDF2 "DDF2"
#define DDF3 "DDF3"
#define EXIT 0
extern int fd_array_v4[2];
extern int fd_array_v6[2];
uint32_t start_time;
extern struct in_addr cp_comm_ip;
extern struct in_addr dp_comm_ip;
int clSystemLog = STANDARD_LOGID;
#ifdef USE_CSID
uint16_t local_csid = 0;
#endif /* USE_CSID */
/* List of allocated teid ranges*/
teidri_info *upf_teidri_allocated_list = NULL;
/* List of free teid ranges*/
teidri_info *upf_teidri_free_list = NULL;
/* List of blocked teid ranges, needs to release after timer expires*/
teidri_info *upf_teidri_blocked_list = NULL;
/**
* @brief : callback
* @param : signal
* @return : never returns
*/
static void
sig_handler(int signo)
{
deinit_ddf();
RTE_SET_USED(signo);
clLog(clSystemLog, eCLSeverityDebug, "UP: Called Signal_handler..\n");
/* Close the KNI Sockets */
for (uint8_t inx = 0; inx < 2; inx++) {
if (fd_array_v4[inx] > 0)
close(fd_array_v4[inx]);
if (fd_array_v6[inx] > 0)
close(fd_array_v6[inx]);
}
/* TODO: Cleanup the stored data, and closed the sockets */
rte_exit(EXIT, "\nExit Gracefully User-plane service...\n");
}
/**
* @brief : init signals
* @param : void
* @return : never returns
*/
static void
init_signal_handler(void)
{
{
sigset_t sigset;
/* mask SIGALRM in all threads by default */
sigemptyset(&sigset);
sigaddset(&sigset, SIGRTMIN);
sigaddset(&sigset, SIGRTMIN + 2);
sigaddset(&sigset, SIGRTMIN + 3);
sigaddset(&sigset, SIGUSR1);
sigprocmask(SIG_BLOCK, &sigset, NULL);
}
struct sigaction sa;
/* Setup the signal handler */
sa.sa_handler = sig_handler;
sa.sa_flags = SA_RESTART;
sigfillset(&sa.sa_mask);
if (sigaction(SIGINT, &sa, NULL) == -1) {}
if (sigaction(SIGTERM, &sa, NULL) == -1) {}
if (sigaction(SIGRTMIN+1, &sa, NULL) == -1) {}
}
static void
init_cli_framework(void) {
set_gw_type(OSS_USER_PLANE);
cli_node.upsecs = &cli_node.cli_config.oss_reset_time;
cli_init(&cli_node, &cli_node.cli_config.cnt_peer);
cli_node.cli_config.perf_flag = app.perf_flag;
cli_node.cli_config.gw_adapter_callback_list.update_periodic_timer = &post_periodic_timer;
cli_node.cli_config.gw_adapter_callback_list.update_transmit_timer = &post_transmit_timer;
cli_node.cli_config.gw_adapter_callback_list.update_transmit_count = &post_transmit_count;
cli_node.cli_config.gw_adapter_callback_list.get_periodic_timer = &get_periodic_timer;
cli_node.cli_config.gw_adapter_callback_list.get_transmit_timer = &get_transmit_timer;
cli_node.cli_config.gw_adapter_callback_list.get_transmit_count = get_transmit_count;
cli_node.cli_config.gw_adapter_callback_list.get_generate_pcap = &get_pcap_status;
cli_node.cli_config.gw_adapter_callback_list.update_pcap_status = &post_pcap_status;
cli_node.cli_config.gw_adapter_callback_list.get_dp_config = &fill_dp_configuration;
cli_node.cli_config.gw_adapter_callback_list.get_perf_flag = &get_perf_flag;
cli_node.cli_config.gw_adapter_callback_list.update_perf_flag = &update_perf_flag;
/* Init rest framework */
init_rest_framework(app.cli_rest_ip_buff, app.cli_rest_port);
}
/**
* Main function.
*/
int main(int argc, char **argv)
{
int ret;
bool ret_val;
start_time = current_ntp_timestamp();
/* Precondition for configuration file */
read_cfg_file(DP_CFG_PATH);
init_log_module(LOGGER_JSON_PATH);
init_signal_handler();
#ifdef USE_REST
/* Write User-plane start time on Disk */
recovery_time_into_file(start_time);
#endif
/* Initialize the Environment Abstraction Layer */
ret = rte_eal_init(argc, argv);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
argc -= ret;
argv += ret;
/* DP restart conter info */
dp_restart_cntr = get_dp_restart_cntr();
/* DP Init */
dp_init(argc, argv);
init_cli_framework();
/* TODO: Need to validate LI*/
/* Create TCP connection between data-plane and d-df2 */
init_ddf();
ddf2_fd = create_ddf_tunnel(app.ddf2_ip, app.ddf2_port, app.ddf2_local_ip,
(const uint8_t *)DDF2);
if (ddf2_fd == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Unable to connect to DDF2\n", LOG_VALUE);
}
/* Create TCP connection between data-plane and d-df3 */
ddf3_fd = create_ddf_tunnel(app.ddf3_ip, app.ddf3_port, app.ddf3_local_ip,
(const uint8_t *)DDF3);
if (ddf3_fd == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Unable to connect to DDF3\n", LOG_VALUE);
}
create_heartbeat_hash_table();
/* Initialize DP PORTS and membufs */
dp_port_init();
/* Initiliazed the predefined rules tables */
init_predef_rule_hash_tables();
/**
* SGWU: UE <--S1U/WB-->[SGW]<--EB/S5/S8/WB-->[PGW]<--SGi/EB-->
* PGWU: UE <--S1U/WB-->[SGW]<--EB/S5/S8/WB-->[PGW]<--SGi/EB-->
* SAEGWU: UE <--S1U/WB--> [SAEGW] <--SGi/EB-->
*/
init_stats_timer();
/* Pipeline Init */
epc_init_packet_framework(app.eb_port,
app.wb_port);
/* West Bound port handler*/
register_ul_worker(wb_pkt_handler, app.wb_port);
/* East Bound port handler*/
register_dl_worker(eb_pkt_handler, app.eb_port);
/* Initialization of the PFCP interface */
iface_module_constructor();
/* Create the session, pdr,far,qer and urr tables */
init_up_hash_tables();
/* Initialized/Start Pcaps on User-Plane */
if (app.generate_pcap) {
up_pcap_init();
}
/* Init Downlink data notification ring, container and mempool */
dp_ddn_init();
#ifdef USE_REST
/* Create thread for handling for sending echo req to its peer node */
rest_thread_init();
/* DP TEIDRI Timer */
if(app.teidri_val != 0){
ret_val = start_dp_teidri_timer();
if(ret_val == false){
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Unable to start timer for TEIDRI\n", LOG_VALUE);
}
}
#endif /* USE_REST */
#ifdef USE_CSID
init_fqcsid_hash_tables();
#endif /* USE_CSID */
packet_framework_launch();
rte_eal_mp_wait_lcore();
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | interface/ipc/dp_ipc_api.h | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _DP_IPC_API_H_
#define _DP_IPC_API_H_
/**
* @file
* This file contains macros, data structure definitions and function
* prototypes of Interface message parsing.
*/
#include "interface.h"
#ifdef CP_BUILD
#include "cp.h"
#include "main.h"
#else
#include "up_main.h"
#endif /* CP_BUILD */
/* message types */
enum dp_msg_type {
/* Session Bearer Map Hash Table*/
MSG_SESS_TBL_CRE,
MSG_SESS_TBL_DES,
MSG_SESS_CRE,
MSG_SESS_MOD,
MSG_SESS_DEL,
/* ADC Rule Table*/
MSG_ADC_TBL_CRE,
MSG_ADC_TBL_DES,
MSG_ADC_TBL_ADD,
MSG_ADC_TBL_DEL,
/* PCC Rule Table*/
MSG_PCC_TBL_CRE,
MSG_PCC_TBL_DES,
MSG_PCC_TBL_ADD,
MSG_PCC_TBL_DEL,
/* Meter Tables*/
MSG_MTR_CRE,
MSG_MTR_DES,
MSG_MTR_ADD,
MSG_MTR_DEL,
MSG_MTR_CFG,
/* Filter Table for SDF & ADC*/
MSG_SDF_CRE,
MSG_SDF_DES,
MSG_SDF_ADD,
MSG_SDF_DEL,
MSG_EXP_CDR,
/* DDN from DP to CP*/
MSG_DDN,
MSG_DDN_ACK,
MSG_END,
};
/* Table Callback msg payload */
struct cb_args_table {
char name[MAX_LEN]; /* table name */
uint32_t max_elements; /* rule id */
};
/*
* Message Structure
*/
struct msgbuf {
long mtype;
struct dp_id dp_id;
union __attribute__ ((packed)) {
struct pkt_filter pkt_filter_entry;
struct adc_rules adc_filter_entry;
struct pcc_rules pcc_entry;
struct session_info sess_entry;
struct mtr_entry mtr_entry;
struct cb_args_table msg_table;
struct msg_ue_cdr ue_cdr;
#ifdef CP_BUILD
struct downlink_data_notification dl_ddn; /** Downlink data notification info */
#else
struct downlink_data_notification_ack_t dl_ddn; /** Downlink data notification info */
#endif /* CP_BUILD */
} msg_union;
};
struct msgbuf sbuf;
struct msgbuf rbuf;
uint8_t pfcp_rx[4096]; /* TODO: Decide size */
/* IPC msg node */
struct ipc_node {
int msg_id; /*msg type*/
int (*msg_cb)(struct msgbuf *msg_payload); /*callback function*/
};
struct ipc_node *basenode;
/**
* @brief Function to Inilialize memory for IPC msg.
*
* @param
* void
*/
void iface_init_ipc_node(void);
/**
* @brief Functino to register call back apis with msg id..
*
* @param msg_id
* msg_id - id number on which the call back function should
* invoked.
* @param msg_payload
* msg_payload - payload of the message
*
* This function is thread safe due to message queue implementation.
*/
void
iface_ipc_register_msg_cb(int msg_id,
int (*msg_cb)(struct msgbuf *msg_payload));
/**
* @brief : handles s11 interface messages
*/
void msg_handler_s11(bool is_ipv6);
/**
* @brief : handles s5s8 interface messages
*/
void msg_handler_s5s8(bool is_ipv6);
#ifdef CP_BUILD
/**
* @brief Functino to init rte hash tables.
*
* @param none
* Return
* None
*/
int
simu_cp(__rte_unused void *ptr);
/**
* @brief callback to handle downlink data notification messages from the
* data plane
* @param msg_payload
* message payload received by control plane from the data plane
* @return
* 0 inicates success, error otherwise
*/
int
cb_ddn(struct msgbuf *msg_payload);
#else
int simu_cp(void);
#endif /* CP_BUILD */
#endif /* _DP_IPC_API_H_ */
|
nikhilc149/e-utran-features-bug-fixes | ulpc/d_admf/include/UeConfigAsCSV.h | <gh_stars>0
/*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __UE_CONFIG_AS_CSV_H_
#define __UE_CONFIG_AS_CSV_H_
#include <iostream>
#include <stdint.h>
#include <fstream>
#include <sstream>
#include "UeConfig.h"
class UeConfigAsCSV : public UeConfig
{
private:
std::string strCSVPath;
public:
UeConfigAsCSV();
UeConfigAsCSV(const std::string &strPath);
/**
* @brief : Read csv file containing entries of all Ue's to process
and stores it in vector
* @param : No param
* @return : Returns 0 in case of Success, -1 otherwise
*/
int8_t ReadUeConfig(void);
/**
* @brief : Adds/Updates/Deletes Ue entry from csv file
* @param : uiAction, action can be add(1)/update(2)/delete(3)
* @param : mod_ue_data, structure representing the Ue entry
* @return : Returns 0 in case of Success, -1 otherwise
*/
int8_t UpdateUeConfig(uint8_t uiAction, ue_data_t &modUeData);
~UeConfigAsCSV();
};
#endif /* __UE_CONFIG_AS_CSV_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp_dp_api/predef_rule_init.c | /*
* Copyright (c) 2020 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rte_hash_crc.h>
#include "gw_adapter.h"
#include "predef_rule_init.h"
/* Create the Tables to mantain sdf, mtr, adc and pcc rules */
#define PREDEF_NUM_OF_TABLES 5
/* Defined the table index */
#define ZERO 0
#define ONE 1
#define TWO 2
#define THREE 3
#define FOUR 4
/* Maximum collection of rules stored into the hash table */
#define MAX_RULES_ENTRIES_COLLECTION 10
/* Maximum predefined rules stored into the hash table */
#define MAX_RULES_HASH_SIZE 255
extern struct rte_hash *sdf_by_inx_hash;
extern struct rte_hash *mtr_by_inx_hash;
extern struct rte_hash *adc_by_inx_hash;
extern struct rte_hash *pcc_by_rule_name_hash;
extern struct rte_hash *rules_by_ip_addr_hash;
extern int clSystemLog;
/* Return the selected hash table pointer */
static struct rte_hash *
select_predef_rule_hash_table(uint8_t hash_type)
{
if (hash_type == SDF_HASH) {
return sdf_by_inx_hash;
} else if (hash_type == MTR_HASH) {
return mtr_by_inx_hash;
} else if (hash_type == ADC_HASH) {
return adc_by_inx_hash;
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Wrong/not defined hash type, hash_type:%u\n",
LOG_VALUE, hash_type);
return NULL;
}
}
int8_t
insert_rule_name_node(rules_struct *head, rules_struct *new_node)
{
rules_struct *tmp = NULL;
if(new_node == NULL)
return -1;
new_node->next = NULL;
/* Check linked list is empty or not */
if (head == NULL) {
head = new_node;
head->rule_cnt++;
} else {
tmp = head;
/* Traverse the linked list until tmp is the last node */
while(tmp->next != NULL) {
tmp = tmp->next;
}
tmp->next = new_node;
tmp->rule_cnt = head->rule_cnt;
head->rule_cnt++;
}
return 0;
}
rules_struct *
get_map_rule_entry(uint32_t cp_pfcp_ip, uint8_t is_mod)
{
int ret = 0;
uint16_t size = 0;
rules_struct *data = NULL;
struct rte_hash *hash = rules_by_ip_addr_hash;
/* Validate if hash is created or not */
if (hash == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Collection rules hash not found!\n",
LOG_VALUE);
return NULL;
}
ret = rte_hash_lookup_data(hash, &cp_pfcp_ip, (void **)&data);
if ( ret < 0) {
/* allocate memory only if request for add new rule entry */
if (is_mod != ADD_RULE) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Entry not found for IP_Address: "IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(cp_pfcp_ip));
return NULL;
}
/* Calculate the memory size to allocate */
size = sizeof(rules_struct);
/* allocate memory for rule entry*/
data = rte_zmalloc("Rules_Infos", size, RTE_CACHE_LINE_SIZE);
if (data == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate memory for rule entry.\n",
LOG_VALUE);
return NULL;
}
/* Rule Entry not present. Add new rule entry */
ret = rte_hash_add_key_data(hash, &cp_pfcp_ip, data);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add entry for IP_Address: "IPV4_ADDR""
"\n\tError= %s\n", LOG_VALUE, IPV4_ADDR_HOST_FORMAT(cp_pfcp_ip),
rte_strerror(abs(ret)));
/* free allocated memory */
rte_free(data);
data = NULL;
return NULL;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Successfully added rule entry for IP_Address: "IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(cp_pfcp_ip));
return data;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Rule entry found for IP_Address: "IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(cp_pfcp_ip));
return data;
}
int8_t
del_map_rule_entry(uint32_t cp_pfcp_ip)
{
int ret = 0;
rules_struct *data = NULL;
struct rte_hash *hash = rules_by_ip_addr_hash;
ret = rte_hash_lookup_data(hash, &cp_pfcp_ip, (void **)&data);
if (ret >= 0) {
/* Rule Entry is present. Delete rule Entry */
ret = rte_hash_del_key(hash, &cp_pfcp_ip);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found"
"for IP_Address: "IPV4_ADDR"\n", LOG_VALUE,
IPV4_ADDR_HOST_FORMAT(cp_pfcp_ip));
return -1;
}
if (data != NULL) {
rte_free(data);
data = NULL;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Rule entry deleted for IP_Address: "IPV4_ADDR"\n", LOG_VALUE,
IPV4_ADDR_HOST_FORMAT(cp_pfcp_ip));
return 0;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Rule entry not found for IP_Address: "IPV4_ADDR"\n", LOG_VALUE,
IPV4_ADDR_HOST_FORMAT(cp_pfcp_ip));
return -1;
}
struct pcc_rules *
get_predef_pcc_rule_entry(const pcc_rule_name *rule_name, uint8_t is_mod)
{
int ret = 0;
uint16_t size = 0;
struct pcc_rules *data = NULL;
struct rte_hash *hash = pcc_by_rule_name_hash;
/* Validate if hash is created or not */
if (hash == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Predef pcc hash not found!\n",
LOG_VALUE);
return NULL;
}
ret = rte_hash_lookup_data(hash, &rule_name->rname, (void **)&data);
if ( ret < 0) {
/* allocate memory only if request for add new rule entry */
if (is_mod != ADD_RULE) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Entry not found for Rule_Name: %s\n",
LOG_VALUE, rule_name->rname);
return NULL;
}
/* Calculate the memory size to allocate */
size = sizeof(struct pcc_rules);
/* allocate memory for rule entry*/
data = rte_zmalloc("PCC_Rules_Info", size, RTE_CACHE_LINE_SIZE);
if (data == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate memory for rule entry.\n",
LOG_VALUE);
return NULL;
}
/* Rule Entry not present. Add new rule entry */
ret = rte_hash_add_key_data(hash, &rule_name->rname, data);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add entry for Rule_Name: %s"
"\n\tError= %s\n", LOG_VALUE, rule_name->rname,
rte_strerror(abs(ret)));
/* free allocated memory */
rte_free(data);
data = NULL;
return NULL;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Successfully added pcc rule entry for Rule_Name: %s\n",
LOG_VALUE, rule_name->rname);
return data;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"PCC Rule entry found for Rule_Name: %s\n",
LOG_VALUE, rule_name->rname);
return data;
}
int8_t
del_predef_pcc_rule_entry(const pcc_rule_name *rule_name)
{
int ret = 0;
struct pcc_rules *data = NULL;
struct rte_hash *hash = pcc_by_rule_name_hash;
ret = rte_hash_lookup_data(hash, &rule_name->rname, (void **)&data);
if (ret >= 0) {
/* PCC Rule Entry is present. Delete PCC rule Entry */
ret = rte_hash_del_key(hash, &rule_name->rname);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found "
"for PCC_Rule_Name: %s\n", LOG_VALUE, rule_name->rname);
return -1;
}
if (data != NULL) {
rte_free(data);
data = NULL;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"PCC Rule entry deleted for Rule_Name: %s\n", LOG_VALUE,
rule_name->rname);
return 0;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"PCC Rule entry not found for Rule_Name: %s\n", LOG_VALUE,
rule_name->rname);
return -1;
}
int8_t
get_predef_rule_entry(uint16_t rule_indx, uint8_t hash_type,
uint8_t is_mod, void **rule_info)
{
int ret = 0;
uint16_t size = 0;
void *data = NULL;
const char *hash_name = NULL;
struct rte_hash *hash = NULL;
hash = select_predef_rule_hash_table(hash_type);
/* Caluate the size for memory allocation */
if (hash_type == SDF_HASH) {
#ifdef CP_BUILD
size = sizeof(pkt_fltr);
#else
size = sizeof(struct pkt_filter);
#endif
hash_name = "SDF";
} else if (hash_type == MTR_HASH) {
size = sizeof(struct mtr_entry);
hash_name = "MTR";
} else if (hash_type == ADC_HASH) {
size = sizeof(struct adc_rules);
hash_name = "ADC";
}
/* Validate if hash is created or not */
if (hash == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Predef %s hash not found!\n",
LOG_VALUE, hash_name);
return -1;
}
ret = rte_hash_lookup_data(hash, &rule_indx, (void **)&data);
if ( ret < 0) {
/* allocate memory only if request for add new rule entry */
if (is_mod != ADD_RULE) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Entry not found for %s Rule_Index: %u...\n",
LOG_VALUE, hash_name, rule_indx);
return -1;
}
/* allocate memory for rule entry*/
data = rte_zmalloc("Rules_Info", size, RTE_CACHE_LINE_SIZE);
if (data == NULL){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate memory for %s rule entry.\n",
LOG_VALUE, hash_name);
return -1;
}
/* Copy rule info into allocated memory */
#ifdef CP_BUILD
memcpy(data, &(*rule_info), size);
#else
memcpy(data, *rule_info, size);
#endif /* CP_BUILD */
/* Rule Entry not present. Add new rule entry */
ret = rte_hash_add_key_data(hash, &rule_indx, data);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add entry for %s Rule_Index: %u"
"\n\tError= %s\n", LOG_VALUE, hash_name, rule_indx,
rte_strerror(abs(ret)));
/* free allocated memory */
rte_free(data);
data = NULL;
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Successfully added rule entry for %s Rule_Index:%u\n",
LOG_VALUE, hash_name, rule_indx);
return 0;
}
*rule_info = data;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"%s Rule entry found for Rule_Index:%u\n",
LOG_VALUE, hash_name, rule_indx);
return 0;
}
int8_t
del_predef_rule_entry(uint16_t rule_indx, uint8_t hash_type)
{
int ret = 0;
void *data = NULL;
const char *hash_name = NULL;
struct rte_hash *hash = NULL;
/* Set the hash name */
if (hash_type == SDF_HASH) {
hash_name = "SDF";
} else if (hash_type == MTR_HASH) {
hash_name = "MTR";
} else if (hash_type == ADC_HASH) {
hash_name = "ADC";
}
hash = select_predef_rule_hash_table(hash_type);
/* Validate if hash is created or not */
if (hash == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Predef %s hash not found!\n",
LOG_VALUE, hash_name);
return -1;
}
ret = rte_hash_lookup_data(hash, &rule_indx, (void **)&data);
if (ret >= 0) {
/* Rule Entry is present. Delete Rule Entry */
ret = rte_hash_del_key(hash, &rule_indx);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found "
"for %s Rule_Index: %u\n", LOG_VALUE, hash_name, rule_indx);
return -1;
}
if (data != NULL) {
rte_free(data);
data = NULL;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Rule entry delted for %s Rule_Index: %u\n", LOG_VALUE,
hash_name, rule_indx);
return 0;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Rule entry not found for %s Rule_Index: %u\n", LOG_VALUE,
hash_name, rule_indx);
return -1;
}
/* Create and initialize the tables to maintain the predefined rules info*/
void
init_predef_rule_hash_tables(void)
{
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Table Creation Started\n", LOG_VALUE);
struct rte_hash_parameters
predef_hash_params[PREDEF_NUM_OF_TABLES] = {
{ .name = "SDF_ENTRY_HASH",
.entries = MAX_RULES_HASH_SIZE,
.key_len = sizeof(uint16_t),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
.socket_id = rte_socket_id()
},
{ .name = "MTR_ENTRY_HASH",
.entries = MAX_RULES_HASH_SIZE,
.key_len = sizeof(uint16_t),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
.socket_id = rte_socket_id()
},
{ .name = "ADC_ENTRY_HASH",
.entries = MAX_RULES_HASH_SIZE,
.key_len = sizeof(uint16_t),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
.socket_id = rte_socket_id()
},
{ .name = "PCC_ENTRY_HASH",
.entries = MAX_RULES_HASH_SIZE,
.key_len = sizeof(pcc_rule_name),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
.socket_id = rte_socket_id()
},
{ .name = "RULES_ENTRY_HASH",
.entries = MAX_RULES_ENTRIES_COLLECTION,
.key_len = sizeof(uint32_t),
.hash_func = rte_hash_crc,
.hash_func_init_val = 0,
.socket_id = rte_socket_id()
}
};
sdf_by_inx_hash = rte_hash_create(&predef_hash_params[ZERO]);
if (!sdf_by_inx_hash) {
rte_panic("%s: hash create failed: %s (%u)\n",
predef_hash_params[ZERO].name,
rte_strerror(rte_errno), rte_errno);
}
mtr_by_inx_hash = rte_hash_create(&predef_hash_params[ONE]);
if (!mtr_by_inx_hash) {
rte_panic("%s: hash create failed: %s (%u)\n",
predef_hash_params[ONE].name,
rte_strerror(rte_errno), rte_errno);
}
adc_by_inx_hash = rte_hash_create(&predef_hash_params[TWO]);
if (!adc_by_inx_hash) {
rte_panic("%s: hash create failed: %s (%u)\n",
predef_hash_params[TWO].name,
rte_strerror(rte_errno), rte_errno);
}
pcc_by_rule_name_hash = rte_hash_create(&predef_hash_params[THREE]);
if (!pcc_by_rule_name_hash) {
rte_panic("%s: hash create failed: %s (%u)\n",
predef_hash_params[THREE].name,
rte_strerror(rte_errno), rte_errno);
}
rules_by_ip_addr_hash = rte_hash_create(&predef_hash_params[FOUR]);
if (!rules_by_ip_addr_hash) {
rte_panic("%s: hash create failed: %s (%u)\n",
predef_hash_params[FOUR].name,
rte_strerror(rte_errno), rte_errno);
}
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"SDF, PCC, MTR, and ADC hash tables successfully created.\n", LOG_VALUE);
}
#ifdef CP_BUILD
/**
* @brief : Pack the message which has to be sent to DataPlane.
* @param : mtype
* mtype - Message type.
* @param : param
* param - parameter to be parsed based on msg type.
* @param : msg_payload
* msg_payload - message payload to be sent.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
build_rules_up_msg(enum dp_msg_type mtype,
void *param, struct msgbuf *msg_payload)
{
msg_payload->mtype = mtype;
/* Not Supporting dp_id */
struct dp_id dp_id = { .id = DPN_ID };
msg_payload->dp_id = dp_id;
switch (mtype) {
case MSG_SDF_CRE:
case MSG_ADC_TBL_CRE:
case MSG_PCC_TBL_CRE:
case MSG_SESS_TBL_CRE:
case MSG_MTR_CRE:
msg_payload->msg_union.msg_table.max_elements =
*(uint32_t *)param;
break;
case MSG_EXP_CDR:
msg_payload->msg_union.ue_cdr =
*(struct msg_ue_cdr *)param;
break;
case MSG_SDF_DES:
case MSG_ADC_TBL_DES:
case MSG_PCC_TBL_DES:
case MSG_SESS_TBL_DES:
case MSG_MTR_DES:
break;
case MSG_SDF_ADD:
case MSG_SDF_DEL:
msg_payload->msg_union.pkt_filter_entry =
*(struct pkt_filter *)param;
break;
case MSG_ADC_TBL_ADD:
case MSG_ADC_TBL_DEL:
msg_payload->msg_union.adc_filter_entry =
*(struct adc_rules *)param;
break;
case MSG_PCC_TBL_ADD:
case MSG_PCC_TBL_DEL:
msg_payload->msg_union.pcc_entry =
*(struct pcc_rules *)param;
break;
case MSG_SESS_CRE:
case MSG_SESS_MOD:
case MSG_SESS_DEL:
msg_payload->msg_union.sess_entry =
*(struct session_info *)param;
break;
case MSG_MTR_ADD:
case MSG_MTR_DEL:
msg_payload->msg_union.mtr_entry =
*(struct mtr_entry *)param;
break;
case MSG_DDN_ACK:
msg_payload->msg_union.dl_ddn =
*(struct downlink_data_notification *)param;
break;
default:
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Invalid msg type\n", LOG_VALUE);
return -1;
}
return 0;
}
#endif
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PFCP_H
#define PFCP_H
#ifdef CP_BUILD
#include "cp.h"
#include "gx_app/include/gx_struct.h"
#include "pfcp_struct.h"
#include "pfcp_session.h"
struct rte_hash *pdr_entry_hash;
struct rte_hash *qer_entry_hash;
struct rte_hash *pdn_conn_hash;
struct rte_hash *rule_name_bearer_id_map_hash;
struct rte_hash *ds_seq_key_with_teid;
/**
* @file
*
* PFCP definitions and helper macros.
*
* GTP Message type definition and GTP header definition according to 3GPP
* TS 29.274; as well as IE parsing helper functions/macros, and message
* processing function declarations.
*
*/
extern peer_addr_t s11_mme_sockaddr;
extern in_port_t s11_port;
extern peer_addr_t s11_sockaddr;
extern struct in_addr s5s8_ip;
extern in_port_t s5s8_port;
extern peer_addr_t s5s8_sockaddr;
extern peer_addr_t s5s8_recv_sockaddr;
extern in_port_t pfcp_port;
extern peer_addr_t pfcp_sockaddr;
extern in_port_t upf_pfcp_port;
extern peer_addr_t upf_pfcp_sockaddr;
#define PFCP_BUFF_SIZE 1024
#define PFCP_RX_BUFF_SIZE 2048
#define ADD_RULE_TO_ALLOW 1
#define ADD_RULE_TO_DENY 2
#define DEFAULT_SDF_RULE_IPV4 "permit out ip from 0.0.0.0/0 0-65535 to 0.0.0.0/0 0-65535"
#define DEFAULT_SDF_RULE_IPV6 "permit out ip from f000:0:0:0:0:0:0:0/4 0-65535 to fdf8:f53e:61e4::18/4 0-65535"
#define DEFAULT_FLOW_STATUS_FL_ENABLED 2
#define DEFAULT_FLOW_STATUS_FL_DISABLED 3
#define DEFAULT_PRECEDENCE 10
#define DEFAULT_NUM_SDF_RULE 2
#define DEFAULT_NUM_SDF_RULE_v4_v6 4
#define DEFAULT_RULE_NAME "default_rule_name"
/**
* @brief : Rule Name is key for Mapping of Rules and Bearer table.
*/
typedef struct rule_name_bearer_id_map_key {
/** Rule Name */
char rule_name[RULE_NAME_LEN];
}rule_name_key_t;
/**
* @brief : Maintains information for hash key for rule
*/
typedef struct rule_key_t {
uint64_t cp_seid;
uint32_t id;
}rule_key_t;
/**
* @brief : Bearer identifier information
*/
typedef struct bearer_identifier_t {
/* Bearer identifier */
uint8_t bearer_id;
}bearer_id_t;
/**
* @brief : PFCP context information for PDR, QER, BAR and FAR.
*/
struct pfcp_cntxt {
/* TODO: THIS STRUCTURE STORED CSR INFORMATION UNTIL NOT GETTING CCA FROM GX*/
/* Number of PDRs */
// uint8_t create_pdr_count;
// /* Number of FARs*/
// uint8_t create_far_count;
// /* Collection of PDRs */
// pdr_t pdr[MAX_LIST_SIZE];
// /* Collection of FARs */
// far_t far[MAX_LIST_SIZE];
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
/**
* @brief : Create a hash table to maintain the PDR, QER, FAR and BAR information.
* @param : void
* @return : Does not return anything
*/
void
init_pfcp_tables(void);
/**
* @brief : Initializes the pfcp context hash table used to account for PDR, QER, BAR and FAR rules information.
* @param : void
* @return : Does not return anything
*/
void
init_hash_tables(void);
/**
* @brief : Add PDN Connection information in the table.
* @param : call_id
* @param : pdn connection details
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
add_pdn_conn_entry(uint32_t call_id, pdn_connection *pdn);
/**
* @brief : Add Rule name and bearer information in the table.
* @param : rule_key
* @param : bearer
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
add_rule_name_entry(const rule_name_key_t rule_key, bearer_id_t *bearer);
/**
* @brief : Add pfcp context information in the table.
* @param : session_id
* @param : resp, pfcp context details
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
add_pfcp_cntxt_entry(uint64_t session_id, struct pfcp_cntxt *resp);
/**
* @brief : Add PDR information in the table.
* @param : rule id
* @param : pdr context
* @param : cp_seid, CP session ID for that UE
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
add_pdr_entry(uint16_t rule_id, pdr_t *cntxt, uint64_t cp_seid);
/**
* @brief : Add QER information in the table.
* @param : qer id
* @param : qer context
* @param : cp_seid, CP session ID for that UE
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
add_qer_entry(uint32_t qer_id, qer_t *cntxt, uint64_t cp_seid);
/**
* @brief : Retrive PDN connection entry.
* @param : call id
* @return : Returns pointer to pdn entry on success , NULL otherwise
*/
pdn_connection *get_pdn_conn_entry(uint32_t call_id);
/**
* @brief : Retrive Rule Name entry.
* @param : rule_key
* @return : Return bearer id on success , -1 otherwise
*/
int8_t
get_rule_name_entry(const rule_name_key_t rule_key);
/**
* @brief : Retrive pfcp context entry.
* @param : session id
* @return : Returns pointer to pfcp context, NULL otherwise
*/
struct pfcp_cntxt *
get_pfcp_cntxt_entry(uint64_t session_id);
/**
* @brief : Retrive PDR entry.
* @param : rule id
* @param : cp_seid, CP session ID for that UE
* @return : Returns pointer to pdr context, NULL otherwise
*/
pdr_t *get_pdr_entry(uint16_t rule_id, uint64_t cp_seid);
/**
* @brief : Update PDR entry.
* @param : bearer context to be updated
* @param : teid to be updated
* @param : node_address_t ; IP_address for updation
* @param : iface, interface type ACCESS or CORE
* @return : Returns 0 on success , -1 otherwise
*/
int
update_pdr_teid(eps_bearer *bearer, uint32_t teid, node_address_t addr, uint8_t iface);
/**
* @brief : Retrive QER entry.
* @param : qer_id
* @param : cp_seid, CP session ID for that UE
* @return : Returns pointer to qer context on success , NULL otherwise
*/
qer_t *get_qer_entry(uint32_t qer_id, uint64_t cp_seid);
/**
* @brief : Delete PDN connection entry from PDN conn table.
* @param : call_id
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
del_pdn_conn_entry(uint32_t call_id);
/**
* @brief : Delete Rule Name entry from Rule and Bearer Map table.
* @param : rule key
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
del_rule_name_entry(const rule_name_key_t rule_key);
/**
* @brief : Delete context entry from pfcp context table.
* @param : session id
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
del_pfcp_cntxt_entry(uint64_t session_id);
/**
* @brief : Delete PDR entry from QER table.
* @param : pdr id
* @param : cp_seid, CP session ID for that UE
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
del_pdr_entry(uint16_t pdr_id, uint64_t cp_seid);
/**
* @brief : Delete QER entry from QER table.
* @param : qer id
* @param : cp_seid, CP session ID for that UE
* @return : Returns 0 on success , -1 otherwise
*/
uint8_t
del_qer_entry(uint32_t qer_id, uint64_t cp_seid);
/**
* @brief : Generate the PDR ID [RULE ID]
* @param : pdr_rule_id_offset, PDR ID offset value
* @return : Returns pdr id on success , 0 otherwise
*/
uint16_t
generate_pdr_id(uint16_t *pdr_rule_id_offset);
/**
* @brief : Generate the BAR ID
* @param : bar_rule_id_offset, BAR ID offset value
* @return : Returns bar id on success , 0 otherwise
*/
uint8_t
generate_bar_id(uint8_t *bar_rule_id_offset);
/**
* @brief : Generate the FAR ID
* @param : far_rule_id_offset, FAR ID offset value
* @return : Returns far id on success , 0 otherwise
*/
uint32_t
generate_far_id(uint32_t *far_rule_id_offset);
/**
* @brief : Generate the URR ID
* @param : urr_rule_id_offset, URR ID offset value
* @return : Returns far id on success , 0 otherwise
*/
uint32_t
generate_urr_id(uint32_t *urr_rule_id_offset);
/*
* @brief : Generate the QER ID
* @param : qer_rule_id_offset, QER ID offset value
* @return : Returns qer id on success , 0 otherwise
*/
uint32_t
generate_qer_id(uint32_t *qer_rule_id_offset);
/**
* @brief : Generate the CALL ID
* @param : void
* @return : Returns call id on success , 0 otherwise
*/
uint32_t
generate_call_id(void);
/**
* @brief : Generates sequence numbers for sgwc generated
* gtpv2c messages for mme
* @param : void
* @return : Returns sequence number on success , 0 otherwise
*/
uint32_t
generate_seq_number(void);
/**
* @brief : Retrieve Call ID from CCR Session ID
* @param : str represents CCR session ID
* @param : call_id , variable to store retrived call id
* @return : Returns 0 on success , 0 otherwise
*/
int
retrieve_call_id(char *str, uint32_t *call_id);
/**
* @brief : Generate the SESSION ID
* @param : cp session id
* @return : Returns dp session id on success , 0 otherwise
*/
uint64_t
generate_dp_sess_id(uint64_t cp_sess_id);
/**
* @brief : Generate the CCR Session ID with combination of timestamp and call id.
* @param : sess id
* @param : call id
* @return : Returns 0 on success
*/
int8_t
gen_sess_id_for_ccr(char *sess_id, uint32_t call_id);
void store_presence_reporting_area_info(pdn_connection *pdn_cntxt,
GxPresenceReportingAreaInformation *pres_rprtng_area_info);
/**
* @brief : Parse GX CCA message and fill ue context
* @param : cca holds data from gx cca message
* @param : _context , ue context to be filled
* @return : Returns 0 on success, -1 otherwise
*/
int8_t
parse_gx_cca_msg(GxCCA *cca, pdn_connection **_pdn);
/**
* @brief : Create a new bearer
* @param : pdn, pdn connection details
* @return : Returns 0 on success, -1 otherwise
*/
int
gx_create_bearer_req(pdn_connection *pdn);
/**
* @brief : Delete already existing bearer
* @param : pdn, pdn connection details
* @return : Returns 0 on success, -1 otherwise
*/
int
gx_delete_bearer_req(pdn_connection *pdn);
/**
* @brief : Updates the already existing bearer
* @param : pdn, pdn connection details
* @return : Returns 0 on success, -1 otherwise
*/
int
gx_update_bearer_req(pdn_connection *pdn);
/**
* @brief : Parse GX RAR message.
* @param : rar, rar holds data from gx rar message
* @param : pdn_cntxt, pointer structure for pdn information
* @return : Returns 0 on success, -1 otherwise
*/
int16_t
parse_gx_rar_msg(GxRAR *rar, pdn_connection *pdn_cntxt);
/**
* @brief : Get details of charging rule
* @param : pdn, pdn connection details
* @param : lbi
* @param : ded_ebi
* @param : ber_cnt
* @return : Return nothing
*/
void
get_charging_rule_remove_bearer_info(pdn_connection *pdn,
uint8_t *lbi, uint8_t *ded_ebi, uint8_t *ber_cnt);
/**
* @brief : Convert the decimal value into the string
* @param : buf , string to store output value
* @param : val, value to be converted.
* @return : Returns length of new string
*/
int
int_to_str(char *buf , uint32_t val);
/**
* @brief : Compare default bearer qos
* @param : default_bearer_qos
* @param : rule_qos
* @return : Returns 0 on success, -1 otherwise
*/
int8_t
compare_default_bearer_qos(bearer_qos_ie *default_bearer_qos,
bearer_qos_ie *rule_qos);
/**
* @brief : to check whether flow description is changed or not
* @param : dyn_rule, old dynamic_rule
* @param : dyn_rule, new dynamic_rule
* @return : Returns 1 if found changed, 0 otherwise
*/
uint8_t
compare_flow_description(dynamic_rule_t *old_dyn_rule, dynamic_rule_t *new_dyn_rule);
/**
* @brief : to check whether bearer qos is changed or not
* @param : dyn_rule, old dynamic_rule
* @param : dyn_rule, new dynamic_rule
* @return : Returns 1 if found changed, 0 otherwise
*/
uint8_t
compare_bearer_qos(dynamic_rule_t *old_dyn_rule, dynamic_rule_t *new_dyn_rule);
/**
* @brief : to check whether bearer arp is changed or not
* @param : dyn_rule, old dynamic_rule
* @param : dyn_rule, new dynamic_rule
* @return : Returns 1 if found changed, 0 otherwise
*/
uint8_t
compare_bearer_arp(dynamic_rule_t *old_dyn_rule, dynamic_rule_t *new_dyn_rule);
/**
* @brief : to change arp values for all the bearers
* @param : pdn, pdn
* @param : qos, Bearer Qos structure
* @return : nothing
*/
void
change_arp_for_ded_bearer(pdn_connection *pdn, bearer_qos_ie *qos);
/**
* Add seg number on tied.
* @param teid_key : sequence number and proc as a key
* @param teid_info : structure containing value of TEID and msg_type
* return 0 or -1
*/
int8_t
add_seq_number_for_teid(const teid_key_t teid_key, struct teid_value_t *teid_value);
/**
* Add seg number on tied.
*
* @param teid_key : sequence number and proc as a key
* return teid_value structure in case of success otherwise null
*/
teid_value_t *get_teid_for_seq_number(const teid_key_t teid_key);
/**
* Delete teid entry for seg number.
* @param teid_key : sequence number and proc as a key
* return 0 or -1
*/
int8_t
delete_teid_entry_for_seq(const teid_key_t teid_key);
/**
* @brief : Fill qos information for bearer form Dynamic rule
* @param : bearer , eps bearer to be modified
* @return : Returns nothing
*/
void
update_bearer_qos(eps_bearer *bearer);
/**
* @brief : Store rule name & status for pro ack msg
* @param : policy , contains rule & rule action received in CCA-U
* @param : pro_ack_rule_array,global var to store rule name & their status
* @return : Returns 0 on success, else -1
*/
int
store_rule_status_for_pro_ack(policy_t *policy,
pro_ack_rule_array_t *pro_ack_rule_array);
#endif /* CP_BUILD */
#endif /* PFCP_H */
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp_print_rule.c | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <arpa/inet.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <stdlib.h>
#include "up_main.h"
#include "util.h"
#include "interface.h"
#include "dp_ipc_api.h"
#ifdef PRINT_NEW_RULE_ENTRY
/**
* @Name : print_sel_type_val
* @arguments : [In] pointer to adc rule structure element
* @return : void
* @Description : Function to print ADC rules values.
*/
void
print_sel_type_val(struct adc_rules *adc)
{
if (NULL != adc) {
struct in_addr addr = {0};
switch (adc->sel_type) {
case DOMAIN_NAME:
fprintf(stdout, " ---> Domain Name :%s\n",
adc->u.domain_name);
break;
case DOMAIN_IP_ADDR:
addr.s_addr = ntohl(adc->u.domain_ip.u.ipv4_addr);
fprintf(stdout, " ---> Domain Ip :%s\n",
inet_ntoa(addr));
break;
case DOMAIN_IP_ADDR_PREFIX:
addr.s_addr = ntohl(adc->u.domain_ip.u.ipv4_addr);
fprintf(stdout, " ---> Domain Ip :%s\n",
inet_ntoa(addr));
fprintf(stdout, " ---> Domain Prefix :%u\n",
adc->u.domain_prefix.prefix);
break;
default:
fprintf(stdout, "UNKNOWN Selector Type: %d\n",
adc->sel_type);
break;
}
}
}
/**
* @Name : print_adc_val
* @arguments : [In] pointer to adc rule structure element
* @return : void
* @Description : Function to print ADC rules values.
*/
void
print_adc_val(struct adc_rules *adc)
{
if (NULL != adc) {
fprintf(stdout, "=========================================\n");
fprintf(stdout, " ---> ADC Rule Method ::\n");
fprintf(stdout, "=========================================\n");
fprintf(stdout, " ---> Rule id : %u\n", adc->rule_id);
print_sel_type_val(adc);
fprintf(stdout, "=========================================\n\n");
}
}
/**
* @Name : print_pcc_val
* @arguments : [In] pointer to pcc rule structure element
* @return : void
* @Description : Function to print PCC rules values.
*/
void
print_pcc_val(struct pcc_rules *pcc)
{
if (NULL != pcc) {
fprintf(stdout, "=========================================\n");
fprintf(stdout, " ---> PCC Rule Method ::\n");
fprintf(stdout, "=========================================\n");
fprintf(stdout, " ---> Rule id : %u\n", pcc->rule_id);
fprintf(stdout, " ---> metering_method :%u\n",
pcc->metering_method);
fprintf(stdout, " ---> charging_mode :%u\n",
pcc->charging_mode);
fprintf(stdout, " ---> rating_group :%u\n",
pcc->rating_group);
fprintf(stdout, " ---> rule_status :%u\n",
pcc->rule_status);
fprintf(stdout, " ---> ul_gate_status :%u\n",
pcc->ul_gate_status);
fprintf(stdout, " ---> dl_gate_status :%u\n",
pcc->dl_gate_status);
fprintf(stdout, " ---> session_cont :%u\n",
pcc->session_cont);
fprintf(stdout, " ---> monitoring_key :%u\n",
pcc->monitoring_key);
fprintf(stdout, " ---> precedence :%u\n",
pcc->precedence);
fprintf(stdout, " ---> level_of_report :%u\n",
pcc->report_level);
fprintf(stdout, " ---> mute_status :%u\n",
pcc->mute_notify);
fprintf(stdout, " ---> drop_pkt_count :%lu\n",
pcc->drop_pkt_count);
fprintf(stdout, " ---> redirect_info :%u\n",
pcc->redirect_info.info);
fprintf(stdout, " ---> mtr_profile_idx :%u\n",
pcc->qos.mtr_profile_index);
fprintf(stdout, " ---> ADC Index count:%u\n",
pcc->adc_idx_cnt);
for(uint8_t inx =0; inx < pcc->adc_idx_cnt; ++inx)
fprintf(stdout, " ---> ADC Index [%u]:%u\n",
inx, pcc->adc_idx[inx]);
fprintf(stdout, " ---> SDF Index count:%u\n",
pcc->sdf_idx_cnt);
for(uint8_t i =0; i< pcc->sdf_idx_cnt; ++i)
fprintf(stdout, " ---> SDF IDx [%u]:%u\n",
i, pcc->sdf_idx[i]);
fprintf(stdout, " ---> rule_name:%s\n", pcc->rule_name);
fprintf(stdout, " ---> sponsor_id:%s\n", pcc->sponsor_id);
fprintf(stdout, "=========================================\n\n");
}
}
/**
* @Name : print_mtr_val
* @arguments : [In] pointer to mtr entry structure element
* @return : void
* @Description : Function to print METER rules values.
*/
void
print_mtr_val(struct mtr_entry *mtr)
{
if (NULL != mtr) {
fprintf(stdout, "=========================================\n");
fprintf(stdout, " ---> Meter Rule Method ::\n");
fprintf(stdout, "=========================================\n");
fprintf(stdout, " ---> Meter profile index :%u\n",
mtr->mtr_profile_index);
fprintf(stdout, " ---> Meter CIR :%lu\n",
mtr->mtr_param.cir);
fprintf(stdout, " ---> Meter CBS :%lu\n",
mtr->mtr_param.cbs);
fprintf(stdout, " ---> Meter EBS :%lu\n",
mtr->mtr_param.ebs);
fprintf(stdout, " ---> Metering Method :%u\n",
mtr->metering_method);
fprintf(stdout, "=========================================\n\n");
}
}
/**
* @Name : print_sdf_val
* @arguments : [In] pointer to pkt_filter structure element
* @return : void
* @Description : Function to print SDF rules values.
*/
void
print_sdf_val(struct pkt_filter *sdf)
{
if (NULL != sdf) {
fprintf(stdout, "==========================================\n");
fprintf(stdout, " ---> SDF Rule Method ::\n");
fprintf(stdout, "==========================================\n");
switch (sdf->sel_rule_type) {
case RULE_STRING:
fprintf(stdout, " ---> pcc_rule_id :%u\n",
sdf->rule_id);
fprintf(stdout, " ---> rule_type :%u\n",
sdf->sel_rule_type);
fprintf(stdout, " ---> rule_str : %s\n",
sdf->u.rule_str);
fprintf(stdout, "====================================\n\n");
break;
case FIVE_TUPLE:
/*TODO: rule should be in struct
* five_tuple_rule
* This field is currently not used
*/
break;
default:
fprintf(stdout, "UNKNOWN Rule Type: %d\n",
sdf->sel_rule_type);
break;
}
}
}
#endif /*PRINT_NEW_RULE_ENTRY*/
/**
* Name : parse_adc_val
* argument :
* selctor type pointed to adc rule type
* [In] pointer (arm) to zmq rcv structure element
* [Out] pointer (adc) to adc rules structure element
* @return
* 0 - success
* -1 - fail
* Description : Function to parse adc rules values into
* adc_rules struct.
* Here parse values as per selector type (DOMAIN_NAME,
* DOMAIN_IP_ADDR, and DOMAIN_IP_ADDR_PREFIX), domain name,
* domain ip addr, domain prefix parameters values from recv buf and
* stored into adc_rules struct.
* ref.doc: message_sdn.docx
* section : Table No.11 ADC Rules
*/
int
parse_adc_buf(int sel_type, char *arm, struct adc_rules *adc)
{
if (arm != NULL) {
switch (sel_type) {
case DOMAIN_NAME:
strncpy(adc->u.domain_name, (char *)((arm)+1),
*(uint8_t *)(arm));
#ifdef PRINT_NEW_RULE_ENTRY
print_adc_val(adc);
#endif
return 0;
case DOMAIN_IP_ADDR_PREFIX:
adc->u.domain_ip.u.ipv4_addr =
ntohl(*(uint32_t *)(arm));
adc->u.domain_prefix.prefix =
rte_bswap16(*(uint16_t *)((arm) + 4));
#ifdef PRINT_NEW_RULE_ENTRY
print_adc_val(adc);
#endif /* PRINT_NEW_RULE_ENTRY */
return 0;
case DOMAIN_IP_ADDR:
adc->u.domain_ip.u.ipv4_addr =
ntohl(*(uint32_t *)(arm));
#ifdef PRINT_NEW_RULE_ENTRY
print_adc_val(adc);
#endif /* PRINT_NEW_RULE_ENTRY */
return 0;
default:
fprintf(stdout, "UNKNOWN Selector Type: %d\n",
sel_type);
return -1;
}
}
return -1;
}
/**
* @Name : get_sdf_indices
* @argument :
* [IN] sdf_idx : String containing comma separater SDF index values
* [OUT] out_sdf_idx : Array of integers converted from sdf_idx
* @return : 0 - success, -1 fail
* @Description : Convert sdf_idx array in to array of integers for SDF index
* values.
* Sample input : "[0, 1, 2, 3]"
*/
uint32_t
get_sdf_indices(char *sdf_idx, uint32_t *out_sdf_idx)
{
char *tmp = strtok (sdf_idx,",");
int i = 0;
while ((NULL != tmp) && (i < MAX_SDF_IDX_COUNT)) {
out_sdf_idx[i++] = atoi(tmp);
tmp = strtok (NULL, ",");
}
return i;
}
|
nikhilc149/e-utran-features-bug-fixes | ulpc/d_admf/include/DAdmf.h | /*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __D_ADMF_H_
#define __D_ADMF_H_
#include <iostream>
#include <pthread.h>
#include "epc/etevent.h"
#include "epc/epctools.h"
#include "UeConfigAsCSV.h"
#include "CpConfigAsCSV.h"
#include "AddUeEntry.h"
#include "UpdateUeEntry.h"
#include "DeleteUeEntry.h"
#include "RegisterCp.h"
#include "AcknowledgementPost.h"
#include "UeTimerThrd.h"
#include "AckTimerThrd.h"
#define IPV6_MAX_LEN 16
#define UECONFIGFILEPATH "database/uedb.csv"
#define CPCONFIGFILEPATH "database/cpdb.csv"
class DAdmfApp
{
static int iRefCntr;
static DAdmfApp *ptrInstance;
pthread_mutex_t mLock;
public:
/**
* @brief : Creates singleton object of DAdmfApp and increment
the reference count
* @param : No param
* @return : Returns reference to singleton DAdmf object
*/
static DAdmfApp* GetInstance(void);
/**
* @brief : Releases DAdmf singleton object if reference count is zero
* @param : No param
* @return : Returns nothing
*/
void ReleaseInstance(void);
/**
* @brief : Sends Add, Update, Delete Ue Entry To All CP's
* @param : strURI, url suffix (addueentry, updateueentry, deleteueentry)
* @param : strPostData, request body to send
* @return : Returns 0 in case of Success, -1 otherwise
*/
int8_t SendRequestToAllCp(const std::string &strURI,
const std::string &strPostData);
/**
* @brief : Sends Add, Update, Delete Ue Entry to Admf if forward flag
is set fo LI(1) or Both(2)
* @param : strURI, url suffix (addueentry, updateueentry, deleteueentry)
* @param : strPostData, request body to send
* @return : Returns 0 in case of Success, -1 otherwise
*/
int8_t SendRequestToAdmf(const std::string &strURI,
const std::string &strPostData);
/**
* @brief : Sends notification to admf when start time for Ue entry elapses.
* @param : strURI, url suffix (start, stop)
* @param : strPostData, request body to send
* @return : Returns 0 in case of Success, -1 otherwise
*/
int8_t SendNotificationToAdmf(const std::string &strURI,
const std::string &strPostData);
/**
* @brief : Initializes all required objects
* @param : opt, command-line parameter
* @return : Returns nothing
*/
void startup(EGetOpt &opt);
/**
* @brief : Deletes all the initialized objects before exiting the process
* @param : No param
* @return : Returns nothing
*/
void shutdown();
/**
* @brief : Sets shutdown event of EpcTools on handling the signal
* @param : No param
* @return : Returns nothing
*/
void setShutdownEvent(void)
{
mShutdown.set();
}
/**
* @brief : Waits until process is killed or shutdown event is set
* @param : No param
* @return : Returns nothing
*/
void waitForShutdown(void) { mShutdown.wait(); }
UeConfig* getPtrUeConfig();
void setPtrUeConfig(UeConfig* ueConfig);
CpConfig* getPtrCpConfig();
void setPtrCpConfig(CpConfig* cpConfig);
EThreadUeTimer* getTimerThread();
void lock();
void unlock();
~DAdmfApp();
private:
EEvent mShutdown;
DAdmfApp();
UeConfig *ptrUeConfig;
CpConfig *ptrCpConfig;
AddUeEntryPost *mpAddUeEntryPost;
UpdateUeEntryPost *mpUpdtUeEntryPost;
DeleteUeEntryPost *mpDelUeEntryPost;
RegisterCpPost *mpRegisterCpPost;
AcknowledgementPost *mpAckPost;
EManagementEndpoint *mpCliPost;
EThreadAckTimer *ackCheckTimer;
EThreadUeTimer *timerThread;
};
#endif /* __D_ADMF_H_ */
|
nikhilc149/e-utran-features-bug-fixes | oss_adapter/libepcadapter/include/tcp_forwardinterface.h | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
#ifndef __TCP_FORWARD_INTERFACE_H_
#define __TCP_FORWARD_INTERFACE_H_
#include "tcp_listener.h"
class TCPListener;
class TCPForwardInterface : public ESocket::TCP::TalkerPrivate
{
public:
/*
* @brief : Constructor of class TCPListener
*/
TCPForwardInterface(TCPListener &thread);
/*
* @brief : Constructor of class TCPListener
*/
virtual ~TCPForwardInterface();
/*
* @brief : Library function of EPCTool
*/
Void onConnect();
/*
* @brief : Library function of EPCTool
*/
Void onReceive();
/*
* @brief : Library function of EPCTool
*/
Void onClose();
/*
* @brief : Library function of EPCTool
*/
Void onError();
/*
* @brief : Function to send data to DF
* @param : df, packet/information to be sent to DF
* @return : Returns void
*/
Void sendData(DdfPacket_t *df);
private:
/*
* @brief : Constructor of class TCPListener
*/
TCPForwardInterface();
Long m_maxMsgs;
ESemaphorePrivate msg_cnt;
};
#endif /* __TCP_FORWARD_INTERFACE_H_ */
|
nikhilc149/e-utran-features-bug-fixes | interface/interface.c | <gh_stars>0
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdint.h>
#include <arpa/inet.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <rte_common.h>
#include <rte_eal.h>
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_jhash.h>
#include <rte_cfgfile.h>
#include "util.h"
#include "interface.h"
#include "dp_ipc_api.h"
#include "gw_adapter.h"
#ifndef CP_BUILD
#include "up_acl.h"
#else
#include "gtpv2c.h"
#include "ipc_api.h"
extern pfcp_config_t config;
extern uint8_t recovery_flag;
extern int gx_app_sock;
extern int gx_app_sock_v6;
extern int gx_app_sock_read;
extern int gx_app_sock_read_v6;
extern int msg_handler_gx( void );
#endif /* CP_BUILD */
/*
* UDP Setup
*/
udp_sock_t my_sock = {0};
/* ROUTE DISCOVERY */
extern int route_sock;
extern int clSystemLog;
struct in_addr dp_comm_ip;
struct in6_addr dp_comm_ipv6;
uint8_t dp_comm_ip_type;
peer_addr_t up_pfcp_sockaddr;
struct in_addr cp_comm_ip;
struct in6_addr cp_comm_ip_v6;
uint8_t cp_comm_ip_type;
peer_addr_t cp_pfcp_sockaddr;
uint16_t dp_comm_port;
uint16_t cp_comm_port;
#ifdef TIMER_STATS
#ifdef AUTO_ANALYSIS
extern void print_perf_statistics(void);
#endif /* AUTO_ANALYSIS */
#endif /* TIMER_STATS */
extern struct ipc_node *basenode;
extern struct rte_hash *heartbeat_recovery_hash;
#define htonll(x) ((1==htonl(1)) ? (x) : ((uint64_t)htonl((x) & 0xFFFFFFFF) << 32) | htonl((x) >> 32))
#define ntohll(x) ((1==ntohl(1)) ? (x) : ((uint64_t)ntohl((x) & 0xFFFFFFFF) << 32) | ntohl((x) >> 32))
#define IFACE_FILE "../config/interface.cfg"
#define SET_CONFIG_IP(ip, file, section, entry) \
do {\
entry = rte_cfgfile_get_entry(file, section, #ip);\
if (entry == NULL)\
rte_panic("%s not found in %s", #ip, IFACE_FILE);\
if (inet_aton(entry, &ip) == 0)\
rte_panic("Invalid %s in %s", #ip, IFACE_FILE);\
} while (0)
#define SET_CONFIG_PORT(port, file, section, entry) \
do {\
entry = rte_cfgfile_get_entry(file, section, #port);\
if (entry == NULL)\
rte_panic("%s not found in %s", #port, IFACE_FILE);\
if (sscanf(entry, "%"SCNu16, &port) != 1)\
rte_panic("Invalid %s in %s", #port, IFACE_FILE);\
} while (0)
/**
* @brief : API to create udp socket.
* @param : dp_comm_ipv4,
* @param : dp_comm_ipv6,
* @param : dp_comm_ip_type,
* @param : recv_port,
* @param : sock,
*/
static int create_udp_socket(struct in_addr dp_comm_ipv4, struct in6_addr dp_comm_ipv6,
uint8_t dp_comm_ip_type, uint16_t recv_port, udp_sock_t *sock)
{
dp_comm_port = htons(recv_port);
if (dp_comm_ip_type == PDN_TYPE_IPV6 || dp_comm_ip_type == PDN_TYPE_IPV4_IPV6) {
int mode = 1, ret = 0;
socklen_t v6_addr_len = sizeof(up_pfcp_sockaddr.ipv6);
sock->sock_fd_v6 = socket(AF_INET6, SOCK_DGRAM, 0);
if (sock->sock_fd_v6 < 0) {
rte_panic("Socket call error : %s", strerror(errno));
return -1;
}
/*Below Option allows to bind to same port for multiple IPv6 addresses*/
setsockopt(sock->sock_fd_v6, SOL_SOCKET, SO_REUSEPORT, &mode, sizeof(mode));
/*Below Option allows to bind to same port for IPv4 and IPv6 addresses*/
setsockopt(sock->sock_fd_v6, IPPROTO_IPV6, IPV6_V6ONLY, (char*)&mode, sizeof(mode));
up_pfcp_sockaddr.ipv6.sin6_family = AF_INET6;
inet_pton(AF_INET6, (char *)dp_comm_ipv6.s6_addr,
up_pfcp_sockaddr.ipv6.sin6_addr.s6_addr);
up_pfcp_sockaddr.ipv6.sin6_port = dp_comm_port;
ret = bind(sock->sock_fd_v6, (struct sockaddr *) &up_pfcp_sockaddr.ipv6, v6_addr_len);
if (ret < 0) {
rte_panic("Bind error for V6 UDP socket : %s\n",
strerror(errno));
return -1;
}
up_pfcp_sockaddr.type = PDN_TYPE_IPV6;
}
if (dp_comm_ip_type == PDN_TYPE_IPV4 || dp_comm_ip_type == PDN_TYPE_IPV4_IPV6) {
int mode = 1;
sock->sock_fd = socket(AF_INET, SOCK_DGRAM, 0);
socklen_t v4_addr_len = sizeof(up_pfcp_sockaddr.ipv4);
if (sock->sock_fd < 0) {
rte_panic("Socket call error : %s", strerror(errno));
return -1;
}
/*Below Option allows to bind to same port for multiple IPv6 addresses*/
setsockopt(sock->sock_fd, SOL_SOCKET, SO_REUSEPORT, &mode, sizeof(mode));
bzero(up_pfcp_sockaddr.ipv4.sin_zero, sizeof(up_pfcp_sockaddr.ipv4.sin_zero));
up_pfcp_sockaddr.ipv4.sin_family = AF_INET;
up_pfcp_sockaddr.ipv4.sin_port = dp_comm_port;
up_pfcp_sockaddr.ipv4.sin_addr.s_addr = dp_comm_ipv4.s_addr;
int ret = bind(sock->sock_fd, (struct sockaddr *) &up_pfcp_sockaddr.ipv4, v4_addr_len);
if (ret < 0) {
rte_panic("Bind error for V4 UDP Socket %s:%u - %s\n",
inet_ntoa(up_pfcp_sockaddr.ipv4.sin_addr),
ntohs(up_pfcp_sockaddr.ipv4.sin_port),
strerror(errno));
return -1;
}
up_pfcp_sockaddr.type = PDN_TYPE_IPV4;
}
return 0;
}
int
udp_recv(void *msg_payload, uint32_t size, peer_addr_t *peer_addr, bool is_ipv6)
{
socklen_t v4_addr_len = sizeof(peer_addr->ipv4);
socklen_t v6_addr_len = sizeof(peer_addr->ipv6);
int bytes = 0;
if (!is_ipv6) {
bytes = recvfrom(my_sock.sock_fd, msg_payload, size,
MSG_DONTWAIT, (struct sockaddr *) &peer_addr->ipv4,
&v4_addr_len);
peer_addr->type |= PDN_TYPE_IPV4;
clLog(clSystemLog, eCLSeverityDebug, "pfcp received bytes "
"with IPv4 Address");
} else {
bytes = recvfrom(my_sock.sock_fd_v6, msg_payload, size,
MSG_DONTWAIT, (struct sockaddr *) &peer_addr->ipv6,
&v6_addr_len);
peer_addr->type |= PDN_TYPE_IPV6;
clLog(clSystemLog, eCLSeverityDebug, "pfcp received bytes "
"with IPv6 Address");
}
if (bytes == 0) {
clLog(clSystemLog, eCLSeverityCritical, "Error while recieving from "
"PFCP socket");
}
return bytes;
}
/**
* @brief Initialize iface message passing
*
* This function is not thread safe and should only be called once by DP.
*/
void iface_module_constructor(void)
{
clLog(clSystemLog, eCLSeverityMajor,LOG_FORMAT"IFACE: DP Initialization\n", LOG_VALUE);
create_udp_socket(dp_comm_ip, dp_comm_ipv6, dp_comm_ip_type, dp_comm_port, &my_sock);
clLog(clSystemLog, eCLSeverityMajor, "Data-Plane IFACE Initialization Complete\n");
}
#ifdef CP_BUILD
void process_cp_msgs(void)
{
int max = 0;
int n = 0, rv = 0;
fd_set readfds = {0};
peer_addr_t peer_addr = {0};
/* Reset Collections */
FD_ZERO(&readfds);
/* Add PFCP_FD in the set */
if(my_sock.sock_fd > 0)
FD_SET(my_sock.sock_fd, &readfds);
if(my_sock.sock_fd_v6 > 0)
FD_SET(my_sock.sock_fd_v6, &readfds);
/* Add S11_FD in the set */
if (config.cp_type != PGWC) {
if(my_sock.sock_fd_s11 > 0)
FD_SET(my_sock.sock_fd_s11, &readfds);
if(my_sock.sock_fd_s11_v6 > 0)
FD_SET(my_sock.sock_fd_s11_v6, &readfds);
}
/* Add S5S8_FD in the set */
if(my_sock.sock_fd_s5s8 > 0)
FD_SET(my_sock.sock_fd_s5s8, &readfds);
if(my_sock.sock_fd_s5s8_v6 > 0)
FD_SET(my_sock.sock_fd_s5s8_v6, &readfds);
/* Add GX_FD in the set */
if ((config.use_gx) && config.cp_type != SGWC) {
if(gx_app_sock_read > 0)
FD_SET(gx_app_sock_read, &readfds);
}
/* Set the MAX FD's stored into the set */
max = my_sock.sock_fd;
max = (my_sock.sock_fd_v6 > max ? my_sock.sock_fd_v6: max);
max = (my_sock.sock_fd_s11 > max ? my_sock.sock_fd_s11: max);
max = (my_sock.sock_fd_s11_v6 > max ? my_sock.sock_fd_s11_v6: max);
max = (my_sock.sock_fd_s5s8 > max ? my_sock.sock_fd_s5s8: max);
max = (my_sock.sock_fd_s5s8_v6 > max ? my_sock.sock_fd_s5s8_v6: max);
if ((config.use_gx) && config.cp_type != SGWC) {
max = (gx_app_sock_read > max ? gx_app_sock_read : max);
max = (gx_app_sock_read_v6 > max ? gx_app_sock_read_v6 : max);
}
n = max + 1;
rv = select(n, &readfds, NULL, NULL, NULL);
if (rv == -1) {
/*TODO: Need to Fix*/
//perror("select"); /* error occurred in select() */
} else if (rv > 0) {
/* when recovery mode is initiate, CP handle only pfcp message, and other msg is in socket queue */
if (recovery_flag == 1) {
if (FD_ISSET(my_sock.sock_fd, &readfds)) {
process_pfcp_msg(pfcp_rx, &peer_addr, NOT_PRESENT);
}
if (FD_ISSET(my_sock.sock_fd_v6, &readfds)) {
process_pfcp_msg(pfcp_rx, &peer_addr, PRESENT);
}
return;
}
if ((config.use_gx) && config.cp_type != SGWC &&
(FD_ISSET(gx_app_sock_read, &readfds))) {
msg_handler_gx();
}
if (FD_ISSET(my_sock.sock_fd, &readfds)) {
process_pfcp_msg(pfcp_rx, &peer_addr, NOT_PRESENT);
}
if (FD_ISSET(my_sock.sock_fd_v6, &readfds)){
process_pfcp_msg(pfcp_rx, &peer_addr, PRESENT);
}
if (config.cp_type != PGWC) {
if (FD_ISSET(my_sock.sock_fd_s11, &readfds)) {
msg_handler_s11(NOT_PRESENT);
}
if(FD_ISSET(my_sock.sock_fd_s11_v6, &readfds)){
msg_handler_s11(PRESENT);
}
}
if (FD_ISSET(my_sock.sock_fd_s5s8, &readfds)) {
msg_handler_s5s8(NOT_PRESENT);
}
if(FD_ISSET(my_sock.sock_fd_s5s8_v6, &readfds)){
msg_handler_s5s8(PRESENT);
}
}
}
#else /*End of CP_BUILD*/
void process_dp_msgs(void) {
int n = 0, rv = 0, max = 0;
fd_set readfds = {0};
peer_addr_t peer_addr = {0};
FD_ZERO(&readfds);
/* Add PFCP_FD in the set */
if(my_sock.sock_fd > 0)
FD_SET(my_sock.sock_fd, &readfds);
if(my_sock.sock_fd_v6 > 0)
FD_SET(my_sock.sock_fd_v6, &readfds);
max = my_sock.sock_fd;
max = (my_sock.sock_fd_v6 > max ? my_sock.sock_fd_v6 : max);
n = max + 1;
rv = select(n, &readfds, NULL, NULL, NULL);
if (rv == -1) {
/*TODO: Need to Fix*/
//perror("select"); /* error occurred in select() */
} else if (rv > 0) {
/* one or both of the descriptors have data */
if (FD_ISSET(my_sock.sock_fd, &readfds))
process_pfcp_msg(pfcp_rx, &peer_addr, NOT_PRESENT);
if(FD_ISSET(my_sock.sock_fd_v6, &readfds))
process_pfcp_msg(pfcp_rx, &peer_addr, PRESENT);
}
}
#endif /*DP_BUILD*/
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp_struct.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef PFCP_STRUCT_H
#define PFCP_STRUCT_H
#define MAX_LIST_SIZE 16
#define MAX_FLOW_DESC_LEN 256
#define RULE_NAME_LEN (256)
#define NONE_PDN_TYPE 0
#define PDN_TYPE_IPV4 1
#define PDN_TYPE_IPV6 2
#define PDN_TYPE_IPV4_IPV6 3
#include "pfcp_ies.h"
#define MAX_FRWD_PLCY_IDENTIFIER_LEN 255
/**
* li max entries per imsi. id is different per imsi
*/
#define MAX_LI_ENTRIES_PER_UE 16
#define URR_PER_PDR 1
enum urr_measur_method {
VOL_BASED,
TIME_BASED,
VOL_TIME_BASED
};
/**
* @brief : Maintains Source Interface details
*/
typedef struct source_intfc_info_t {
uint8_t interface_value;
} src_intfc_t;
/**
* @brief : Maintains fteid details
*/
typedef struct fteid_info_t {
uint8_t chid;
uint8_t ch;
uint8_t v6;
uint8_t v4;
uint32_t teid;
uint32_t ipv4_address;
uint8_t ipv6_address[IPV6_ADDRESS_LEN];
uint8_t choose_id;
}fteid_ie_t;
/**
* @brief : Maintains node address type and address value
*/
struct node_address_t
{
/* Setting IP Address type either IPv4:1 or IPv6:2 */
uint8_t ip_type;
/*Node Address*/
uint32_t ipv4_addr;
uint8_t ipv6_addr[IPV6_ADDRESS_LEN];
}__attribute__((packed, aligned(RTE_CACHE_LINE_SIZE)));
typedef struct node_address_t node_address_t;
/**
* @brief : Maintains ue ip address details
*/
typedef struct ue_ip_address_info_t {
uint8_t ipv6d;
uint8_t sd;
uint8_t v4;
uint8_t v6;
uint32_t ipv4_address;
uint8_t ipv6_address[IPV6_ADDRESS_LEN];
uint8_t ipv6_pfx_dlgtn_bits;
} ue_ip_addr_t;
/**
* @brief : Maintains sdf filter details
*/
typedef struct sdf_filter_info_t {
uint8_t bid;
uint8_t fl;
uint8_t spi;
uint8_t ttc;
uint8_t fd;
/* TODO: Need to think on flow desc*/
uint8_t flow_desc[MAX_FLOW_DESC_LEN];
uint16_t len_of_flow_desc;
uint16_t tos_traffic_cls;
uint32_t secur_parm_idx;
uint32_t flow_label;
uint32_t sdf_filter_id;
} sdf_filter_t;
/**
* @brief : Maintains Network Instance value
*/
typedef struct network_inst_t {
/* TODO: Revisit this */
uint8_t ntwk_inst[PFCP_NTWK_INST_LEN];
} ntwk_inst_t;
/**
* @brief : Maintains Application ID value
*/
typedef struct application_id_info_t {
/* TODO: Revisit this for change */
uint8_t app_ident[PFCP_APPLICATION_ID_LEN];
} app_id_t;
/**
* @brief : Maintains pdi information
*/
typedef struct pdi_info_t {
uint8_t sdf_filter_cnt;
src_intfc_t src_intfc;
ue_ip_addr_t ue_addr;
ntwk_inst_t ntwk_inst;
fteid_ie_t local_fteid;
sdf_filter_t sdf_filter[MAX_LIST_SIZE];
app_id_t application_id;
}pdi_t;
/**
* @brief : Maintains Outer Header Removal
*/
typedef struct outer_hdr_removal_info_t {
uint8_t outer_hdr_removal_desc;
/* TODO: Revisit this for change */
// uint8_t gtpu_ext_hdr_del;
} outer_hdr_removal_t;
/**
* @brief : Maintains urr id value
*/
typedef struct urr_id_t {
uint32_t urr_id; /* URR ID */
}urr;
/**
* @brief : Maintains qer id value
*/
typedef struct qer_id_t {
uint32_t qer_id; /* QER ID */
}qer;
/**
* @brief : Maintains activating predefined rule name
*/
typedef struct actvt_predef_rules_t {
uint8_t predef_rules_nm[RULE_NAME_LEN];
}actvt_predef_rules;
/**
* @brief : Maintains Destination Interface value
*/
typedef struct destination_intfc_t {
uint8_t interface_value;
} dst_intfc_t;
/**
* @brief : Maintains Outer Header Creation information
*/
typedef struct outer_hdr_creation_info_t{
uint8_t ip_type;
uint16_t outer_hdr_creation_desc;
uint32_t teid;
uint32_t ipv4_address;
uint8_t ipv6_address[IPV6_ADDRESS_LEN];
uint16_t port_number;
uint32_t ctag;
uint32_t stag;
}outer_hdr_creation_t;
/**
* @brief : Maintains Transport Level Marking
*/
typedef struct transport_lvl_marking_info_t {
uint16_t tostraffic_cls;
} trnspt_lvl_marking_t;
/**
* @brief : Maintains Header Enrichment info
*/
typedef struct hdr_enrchmt_info_t {
uint8_t header_type;
uint8_t len_of_hdr_fld_nm;
uint8_t hdr_fld_nm;
uint8_t len_of_hdr_fld_val;
uint8_t hdr_fld_val;
} hdr_enrchmt_t;
/**
* @brief : Maintains Redirect Information
*/
typedef struct redirect_info_t {
uint8_t redir_addr_type;
uint8_t redir_svr_addr_len;
uint8_t redir_svr_addr;
} redir_info_t;
/**
* @brief : Maintains Forwarding Policy info
*/
typedef struct forwardng_plcy_t {
uint8_t frwdng_plcy_ident_len;
uint8_t frwdng_plcy_ident[MAX_FRWD_PLCY_IDENTIFIER_LEN];
} frwdng_plcy_t;
/**
* @brief : Maintains Traffic Endpoint ID
*/
typedef struct traffic_endpoint_id_t {
uint8_t traffic_endpt_id_val;
} traffic_endpt_id_t;
/**
* @brief : Maintains proxying info
*/
typedef struct proxying_inf_t {
uint8_t ins;
uint8_t arp;
} proxying_t;
/**
* @brief : Maintains Apply Action details
*/
typedef struct apply_action_t {
uint8_t dupl;
uint8_t nocp;
uint8_t buff;
uint8_t forw;
uint8_t drop;
} apply_action;
/**
* @brief : Maintains gate status
*/
typedef struct gate_status_info_t {
uint8_t ul_gate;
uint8_t dl_gate;
} gate_status_t;
/**
* @brief : Maintains mbr info
*/
typedef struct req_status_t {
uint64_t ul_mbr;
uint64_t dl_mbr;
} mbr_t;
/**
* @brief : Maintains gbr info
*/
typedef struct gbr_info_t {
uint64_t ul_gbr;
uint64_t dl_gbr;
} gbr_t;
/**
* @brief : Maintains Packet Rate info
*/
typedef struct packet_rate_info_t {
uint8_t dlpr;
uint8_t ulpr;
uint8_t uplnk_time_unit;
uint16_t max_uplnk_pckt_rate;
uint8_t dnlnk_time_unit;
uint16_t max_dnlnk_pckt_rate;
} packet_rate_t;
/**
* @brief : Maintains DL Flow Level Marking
*/
typedef struct dl_flow_level_marking_t {
uint8_t sci;
uint8_t ttc;
uint16_t tostraffic_cls;
uint16_t svc_cls_indctr;
} dl_flow_lvl_marking_t;
/**
* @brief : Maintains qfi value
*/
typedef struct qfi_info_t {
uint8_t qfi_value;
} qfi_t;
/**
* @brief : Maintains rqi value
*/
typedef struct rqi_info_t {
uint8_t rqi;
} rqi_t;
/**
* @brief : Maintains Paging Policy Indicator value
*/
typedef struct paging_policy_indctr_t {
uint8_t ppi_value;
} paging_plcy_indctr_t;
/**
* @brief : Maintains Averaging Window
*/
typedef struct avgng_window_t {
uint32_t avgng_wnd;
} avgng_wnd_t;
/**
* @brief : Maintains Downlink Data Notification Delay
*/
typedef struct downlink_data_notif_delay_t {
/* Note: delay_val_in_integer_multiples_of_50_millisecs_or_zero */
uint8_t delay;
} dnlnk_data_notif_delay_t;
/**
* @brief : Maintains Suggested Buffering Packets Count
*/
typedef struct suggested_buf_packets_cnt_t {
uint8_t pckt_cnt_val;
} suggstd_buf_pckts_cnt_t;
/**
* @brief : Maintains DL Buffering Suggested Packets Count
*/
typedef struct dl_buffering_suggested_packets_cnt_t {
uint16_t pckt_cnt_val;
} dl_buffering_suggested_packets_cnt_t;
/**
* @brief : Maintains measurement methods in urr
*/
typedef struct measurement_method_t {
uint8_t event;
uint8_t volum;
uint8_t durat;
}measurement_method;
/**
* @brief : Maintains reporting trigger in urr
*/
typedef struct reporting_trigger_t {
uint8_t timth;
uint8_t volth;
uint8_t eveth;
}reporting_trigger;
/**
* @brief : Maintains volume threshold in urr
*/
typedef struct volume_threshold_t {
uint64_t total_volume;
uint64_t uplink_volume;
uint64_t downlink_volume;
}volume_threshold;
/**
* @brief : Maintains time threshold in urr
*/
typedef struct time_threshold_t {
uint32_t time_threshold;
}time_threshold;
/**
* @brief : Maintains far information
*/
typedef struct far_cp_info_t {
uint8_t bar_id_value; /* BAR ID */
uint32_t far_id_value; /* FAR ID */
uint64_t session_id; /* Session ID */
ntwk_inst_t ntwk_inst; /* Network Instance */
dst_intfc_t dst_intfc; /* Destination Interface */
outer_hdr_creation_t outer_hdr_creation; /* Outer Header Creation */
trnspt_lvl_marking_t trnspt_lvl_marking; /* Transport Level Marking */
frwdng_plcy_t frwdng_plcy; /* Forwarding policy */
hdr_enrchmt_t hdr_enrchmt; /* Container for header enrichment */
apply_action actions; /* Apply Action parameters*/
}far_t;
/**
* @brief : Maintains qer information
*/
typedef struct qer_cp_info_t {
uint32_t qer_id; /* QER ID */
uint32_t qer_corr_id_val; /* QER Correlation ID */
uint64_t session_id; /* Session ID */
gate_status_t gate_status; /* Gate Status UL/DL */
mbr_t max_bitrate; /* Maximum Bitrate */
gbr_t guaranteed_bitrate; /* Guaranteed Bitrate */
packet_rate_t packet_rate; /* Packet Rate */
dl_flow_lvl_marking_t dl_flow_lvl_marking; /* Downlink Flow Level Marking */
qfi_t qos_flow_ident; /* QOS Flow Ident */
rqi_t reflective_qos; /* RQI */
paging_plcy_indctr_t paging_plcy_indctr; /* Paging policy */
avgng_wnd_t avgng_wnd; /* Averaging Window */
}qer_t;
/**
* @brief : Maintains urr information
*/
typedef struct urr_cp_info_t {
uint32_t urr_id_value;
uint64_t session_id;
measurement_method mea_mt;
reporting_trigger rept_trigg;
volume_threshold vol_th;
time_threshold time_th;
}urr_t;
/**
* @brief : Maintains pdr information for CP
*/
typedef struct pdr_cp_info_t {
uint8_t urr_id_count; /* Number of URR */
uint8_t qer_id_count; /* Number of QER */
uint8_t actvt_predef_rules_count; /* Number of predefine rules */
uint8_t create_far;
uint8_t create_urr;
uint16_t rule_id; /* PDR ID*/
uint32_t prcdnc_val; /* Precedence Value*/
uint64_t session_id; /* Session ID */
pdi_t pdi; /* Packet Detection Information */
far_t far; /* FAR structure info */
qer_t qer;
urr_t urr;
outer_hdr_removal_t outer_hdr_removal; /* Outer Header Removal */
urr urr_id[MAX_LIST_SIZE]; /* Collection of URR IDs */
qer qer_id[MAX_LIST_SIZE]; /* Collection of QER IDs */
actvt_predef_rules rules[MAX_LIST_SIZE]; /* Collection of active predefined rules */
char rule_name[RULE_NAME_LEN]; /* Rule name for which the PDR is created */
}pdr_t;
/**
* @brief : Maintains bar information
*/
typedef struct bar_cp_info_t {
uint8_t bar_id; /* BAR ID */
dnlnk_data_notif_delay_t ddn_delay;
suggstd_buf_pckts_cnt_t suggstd_buf_pckts_cnt;
dl_buffering_suggested_packets_cnt_t dl_buf_suggstd_pckts_cnt;
}bar_t;
#endif /* PFCP_STRUCT_H */
|
nikhilc149/e-utran-features-bug-fixes | dp/up_kni_pkt_handler.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <rte_lcore.h>
#include <rte_per_lcore.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_ring.h>
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_arp.h>
/* KNI specific headers */
#include <rte_kni.h>
#include <rte_cycles.h>
#include <rte_atomic.h>
#include <rte_bus_pci.h>
#include "up_main.h"
#include "pipeline/epc_arp.h"
#include "gw_adapter.h"
#define TX_QUEUE 1
struct ether_addr mac = {0};
/* Macros for printing using RTE_LOG */
unsigned int fd_array_v4[2] = {-1};
unsigned int fd_array_v6[2] = {-1};
extern struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
extern struct rte_mempool *kni_mpool;
extern int clSystemLog;
#define NB_RXD 1024
extern struct rte_eth_conf port_conf_default;
static int kni_change_mtu(uint16_t port_id, unsigned int new_mtu);
static int kni_config_network_interface(uint16_t port_id, uint8_t if_up);
static int kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[]);
void
kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num)
{
unsigned i;
if (pkts == NULL)
return;
for (i = 0; i < num; i++) {
rte_pktmbuf_free(pkts[i]);
pkts[i] = NULL;
}
}
int
validate_parameters(uint32_t portmask)
{
uint32_t i;
if (!portmask) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"No port configured in port mask\n", LOG_VALUE);
return -1;
}
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
if (((portmask & (1 << i)) && !kni_port_params_array[i]) ||
(!(portmask & (1 << i)) && kni_port_params_array[i]))
rte_exit(EXIT_FAILURE, "portmask is not consistent "
"to port ids specified %u\n", portmask);
if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
(unsigned)(kni_port_params_array[i]->lcore_rx)))
rte_exit(EXIT_FAILURE, "lcore id %u for "
"port %d receiving not enabled\n",
kni_port_params_array[i]->lcore_rx,
kni_port_params_array[i]->port_id);
if (kni_port_params_array[i] && !rte_lcore_is_enabled(\
(unsigned)(kni_port_params_array[i]->lcore_tx)))
rte_exit(EXIT_FAILURE, "lcore id %u for "
"port %d transmitting not enabled\n",
kni_port_params_array[i]->lcore_tx,
kni_port_params_array[i]->port_id);
}
return 0;
}
/**
* Burst rx from dpdk interface and transmit burst to kni interface.
* Pkts transmitted to KNI interface, onwards linux will handle whatever pkts rx
* on kni interface
*/
void
kni_ingress(struct kni_port_params *p,
struct rte_mbuf *pkts_burst[PKT_BURST_SZ],
unsigned nb_rx) {
if (p == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"KNI port params is NULL!!!\n", LOG_VALUE);
return;
}
for (uint32_t i = 0; i < p->nb_kni; i++) {
/* Burst rx from eth */
if (unlikely(nb_rx > PKT_BURST_SZ)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error receiving from eth\n", LOG_VALUE);
return;
}
if (nb_rx > 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"KNI probe number of bytes rx=%u\n",
LOG_VALUE, nb_rx);
}
/* Burst tx to kni */
unsigned int num = rte_kni_tx_burst(p->kni[i], pkts_burst, nb_rx);
if (unlikely(num < nb_rx)) {
/* Free mbufs not tx to kni interface */
kni_burst_free_mbufs(&pkts_burst[num], nb_rx - num);
}
}
}
/**
* Burst rx from kni interface and enqueue rx pkts in ring.
*/
void
kni_egress(struct kni_port_params *p)
{
struct rte_mbuf *pkts_burst[PKT_BURST_SZ] = {NULL};
if (p == NULL)
return;
for (uint32_t i = 0; i < p->nb_kni; i++) {
/* Burst rx from kni */
unsigned nb_rx = rte_kni_rx_burst(p->kni[i], pkts_burst, PKT_BURST_SZ);
if (unlikely(nb_rx > PKT_BURST_SZ)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error receiving from KNI\n", LOG_VALUE);
return;
}
if (nb_rx > 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"KNI probe number of bytes rx=%u\n", LOG_VALUE, nb_rx);
}
for (uint32_t pkt_cnt = 0; pkt_cnt < nb_rx; ++pkt_cnt) {
int ret = rte_ring_enqueue(shared_ring[p->port_id], pkts_burst[pkt_cnt]);
if (ret == -ENOBUFS) {
rte_pktmbuf_free(pkts_burst[pkt_cnt]);
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Can't queue pkt- ring full"
" So Dropping pkt", LOG_VALUE);
continue;
}
}
}
}
/* Initialize KNI subsystem */
void init_kni(void)
{
unsigned int num_of_kni_ports = 0, i = 0;
struct kni_port_params **params = kni_port_params_array;
/* Calculate the maximum number of KNI interfaces that will be used */
for (i = 0; i < nb_ports; i++) {
if (kni_port_params_array[i]) {
num_of_kni_ports += (params[i]->nb_lcore_k ?
params[i]->nb_lcore_k : 1);
}
}
/* Invoke rte KNI init to preallocate the ports */
rte_kni_init(num_of_kni_ports);
}
/* Check the link status of all ports in up to 9s, and print them finally */
void check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 10 /* 100ms */
#define MAX_CHECK_TIME 9 /* 9s (90 * 100ms) in total */
uint16_t portid;
uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Checking link status", LOG_VALUE);
fflush(stdout);
for (count = 0; count <= MAX_CHECK_TIME; count++) {
all_ports_up = 1;
for (portid = 0; portid < port_num; portid++) {
if ((port_mask & (1 << portid)) == 0)
continue;
memset(&link, 0, sizeof(link));
rte_eth_link_get_nowait(portid, &link);
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
printf(
"Port%d Link Up - speed %uMbps - %s\n",
portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Port %d Link Down\n", LOG_VALUE, portid);
continue;
}
/* clear all_ports_up flag if any link down */
if (link.link_status == ETH_LINK_DOWN) {
all_ports_up = 0;
break;
}
}
/* after finally printing all link status, get out */
if (print_flag == 1)
break;
if (all_ports_up == 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"All port equal to 0", LOG_VALUE);
fflush(stdout);
rte_delay_ms(CHECK_INTERVAL);
}
/* set the print_flag if all ports up or timeout */
if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
print_flag = 1;
printf("done\n");
}
}
}
/**
* @brief : Callback for request of changing MTU
* @param : port_id, port number
* @param : new_mtu, new mtu value
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
{
int ret;
uint16_t nb_rxd = NB_RXD;
struct rte_eth_conf conf;
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf rxq_conf;
if (port_id >= rte_eth_dev_count()) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Invalid port id %d\n", LOG_VALUE, port_id);
return -EINVAL;
}
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"Change MTU of port %d to %u\n", LOG_VALUE, port_id, new_mtu);
/* Stop specific port */
rte_eth_dev_stop(port_id);
memcpy(&conf, &port_conf_default, sizeof(conf));
/* Set new MTU */
if (new_mtu > ETHER_MAX_LEN)
conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
else
conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
/* mtu + length of header + length of FCS = max pkt length */
conf.rxmode.max_rx_pkt_len = new_mtu + KNI_ENET_HEADER_SIZE +
KNI_ENET_FCS_SIZE;
ret = rte_eth_dev_configure(port_id, 1, 1, &conf);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Fail to reconfigure port %d\n", LOG_VALUE, port_id);
return ret;
}
ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, NULL);
if (ret < 0) {
rte_exit(EXIT_FAILURE, LOG_FORMAT"Could not adjust number of descriptors "
"for port%u (%d)\n", LOG_VALUE, (unsigned int)port_id,
ret);
}
rte_eth_dev_info_get(port_id, &dev_info);
rxq_conf = dev_info.default_rxconf;
rxq_conf.offloads = conf.rxmode.offloads;
struct rte_mempool *mbuf_pool = kni_mpool;
ret = rte_eth_rx_queue_setup(port_id, 0, nb_rxd,
rte_eth_dev_socket_id(port_id), &rxq_conf, mbuf_pool);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Fail to setup Rx queue of port %d\n",
LOG_VALUE, port_id);
return ret;
}
/* Restart specific port */
ret = rte_eth_dev_start(port_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Fail to restart port %d\n", LOG_VALUE, port_id);
return ret;
}
return 0;
}
/**
* @brief : Callback for request of releasing kni
* @param : port_id, port number
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
kni_free_kni(uint16_t port_id)
{
uint8_t i;
struct kni_port_params **p = kni_port_params_array;
if (port_id >= RTE_MAX_ETHPORTS || !p[port_id]) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error, in Kni free\n", LOG_VALUE);
return -1;
}
for (i = 0; i < p[port_id]->nb_kni; i++) {
if (rte_kni_release(p[port_id]->kni[0]))
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Fail to release kni\n", LOG_VALUE);
p[port_id]->kni[i] = NULL;
}
rte_eth_dev_stop(port_id);
return 0;
}
/**
* @brief : Callback for request of configuring network interface up/down
* @param : port_id, port number
* @param : if_up, flag to check if interface is up
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
kni_config_network_interface(uint16_t port_id, uint8_t if_up)
{
int ret = 0;
if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Invalid port id %d\n", LOG_VALUE, port_id);
return -EINVAL;
}
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"Configure network interface of %d %s\n", LOG_VALUE,
port_id, if_up ? "up" : "down");
if (if_up != 0) { /* Configure network interface up */
rte_eth_dev_stop(port_id);
ret = rte_eth_dev_start(port_id);
} else { /* Configure network interface down */
rte_eth_dev_stop(port_id);
}
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to start port %d\n", LOG_VALUE, port_id);
}
/* TODO: */
/*Create udp socket for IPv4 Channel */
int client_fd_v4 = socket(AF_INET, SOCK_DGRAM, 0);
if(client_fd_v4 < 0) {
rte_panic("KNI:IPv4:Socket call error : %s", strerror(errno));
}
fd_array_v4[port_id] = client_fd_v4;
/*Create udp socket for IPv6 Channel */
int client_fd_v6 = socket(AF_INET6, SOCK_DGRAM, 0);
if (client_fd_v6 < 0) {
rte_panic("KNI:IPv6:Socket call error : %s", strerror(errno));
}
fd_array_v6[port_id] = client_fd_v6;
return ret;
}
/**
* @brief : Callback for request to print ethernet address
* @param : name, name
* @param : mac_addr, ethernet address
* @return : Returns nothing
*/
static void
print_ethaddr(const char *name, struct ether_addr *mac_addr)
{
char buf[ETHER_ADDR_FMT_SIZE];
ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, mac_addr);
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"\tMAC Address : %s , ETHER Address : %s\n", LOG_VALUE, name, buf);
}
/**
* @brief : Callback for request configuring mac address
* @param : port_id, port number
* @param : mac_addr, ethernet address
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
kni_config_mac_address(uint16_t port_id, uint8_t mac_addr[])
{
int ret = 0;
if (port_id >= rte_eth_dev_count() || port_id >= RTE_MAX_ETHPORTS) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Invalid port id %d\n", LOG_VALUE, port_id);
return -EINVAL;
}
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"Configure mac address of %d\n", LOG_VALUE, port_id);
print_ethaddr("Address:", (struct ether_addr *)mac_addr);
ret = rte_eth_dev_default_mac_addr_set(port_id,
(struct ether_addr *)mac_addr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to config mac_addr for port %d\n",
LOG_VALUE, port_id);
}
return ret;
}
int
kni_alloc(uint16_t port_id)
{
uint8_t i;
struct rte_kni *kni = NULL;
struct rte_kni_conf conf;
struct kni_port_params **params = kni_port_params_array;
/* select the mempool to be used based on port_id */
struct rte_mempool *mbuf_pool = kni_mpool;
if (port_id >= RTE_MAX_ETHPORTS || !params[port_id]) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error, in Kni allocation\n", LOG_VALUE);
return -1;
}
params[port_id]->nb_kni = params[port_id]->nb_lcore_k ?
params[port_id]->nb_lcore_k : 1;
for (i = 0; i < params[port_id]->nb_kni; i++) {
/* Clear conf at first */
memset(&conf, 0, sizeof(conf));
if (params[port_id]->nb_lcore_k) {
snprintf(conf.name, RTE_KNI_NAMESIZE,
"vEth%u_%u", port_id, i);
conf.core_id = params[port_id]->lcore_k[i];
conf.force_bind = 1;
} else {
if (port_id == 0) {
memcpy(conf.name, app.wb_iface_name,
RTE_KNI_NAMESIZE);
} else if (port_id == 1) {
memcpy(conf.name, app.eb_iface_name,
RTE_KNI_NAMESIZE);
}
}
conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
/* Get the interface default mac address */
rte_eth_macaddr_get(port_id,
(struct ether_addr*)&conf.mac_addr);
/*
* The first KNI device associated to a port
* is the master, for multiple kernel thread
* environment.
*/
if (i == 0) {
struct rte_kni_ops ops;
struct rte_eth_dev_info dev_info;
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
if (dev_info.pci_dev) {
conf.addr = dev_info.pci_dev->addr;
conf.id = dev_info.pci_dev->id;
}
rte_eth_dev_get_mtu(port_id, &conf.mtu);
memset(&ops, 0, sizeof(ops));
ops.port_id = port_id;
ops.change_mtu = kni_change_mtu;
ops.config_network_if = kni_config_network_interface;
ops.config_mac_address = kni_config_mac_address;
kni = rte_kni_alloc(mbuf_pool, &conf, &ops);
} else {
kni = rte_kni_alloc(mbuf_pool, &conf, NULL);
}
if (!kni)
rte_exit(EXIT_FAILURE, LOG_FORMAT"Fail to create kni for "
"port: %d\n", LOG_VALUE, port_id);
params[port_id]->kni[i] = kni;
}
return 0;
}
void
free_kni_ports(void) {
uint8_t ports = 0;
uint16_t nb_sys_ports = rte_eth_dev_count();
rte_kni_close();
for (ports = 0; ports < nb_sys_ports; ports++) {
if (!(app.ports_mask & (1 << ports)))
continue;
kni_free_kni(ports);
}
for (ports = 0; ports < RTE_MAX_ETHPORTS; ports++) {
if (kni_port_params_array[ports]) {
rte_free(kni_port_params_array[ports]);
kni_port_params_array[ports] = NULL;
}
}
}
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/recovery_api.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pfcp_util.h"
#include "pfcp_session.h"
#include "pfcp_association.h"
#include "pfcp_enum.h"
#include "csid_struct.h"
#include "pfcp_set_ie.h"
#include "pfcp_messages_encoder.h"
#include "gw_adapter.h"
#include "pfcp.h"
#include "csid_struct.h"
#include "teid.h"
#ifdef CP_BUILD
#include "cp_config.h"
#include "cp.h"
static pthread_t recov_thread;
node_address_t recov_peer_addr;
extern uint8_t recovery_flag;
extern pfcp_config_t config;
extern int clSystemLog;
extern int pfcp_fd;
extern int pfcp_fd_v6;
extern peer_addr_t upf_pfcp_sockaddr;
/*
* num_sess : Contain Number of session est.
* req sent to the peer node while Recover affected session's
*/
uint64_t num_sess ;
#endif /* CP_BUILD */
/**
* @brief : Fill create pdr ie
* @param : create_pdr, buffer to be filled
* @param : pdr, pdr information
* @param : bearer, bearer information
* @return : Returns 0 on success, -1 otherwise
*/
static int
fill_create_pdr(pfcp_create_pdr_ie_t *create_pdr, pdr_t *pdr, eps_bearer *bearer)
{
int pdr_header_len = 0;
/* Filling pdr ID */
pfcp_set_ie_header(&((create_pdr)->pdr_id.header), PFCP_IE_PDR_ID,
(sizeof(pfcp_pdr_id_ie_t) - sizeof(pfcp_ie_header_t)));
pdr_header_len += sizeof(pfcp_pdr_id_ie_t);
create_pdr->pdr_id.rule_id = pdr->rule_id;
/* Filling Precedance */
pfcp_set_ie_header(&((create_pdr)->precedence.header), PFCP_IE_PRECEDENCE,
(sizeof(pfcp_precedence_ie_t) - sizeof(pfcp_ie_header_t)));
pdr_header_len += sizeof(pfcp_precedence_ie_t);
create_pdr->precedence.prcdnc_val = pdr->prcdnc_val;
/* Filling PDI */
int pdi_header_len = 0;
/* -> source Interface */
pfcp_set_ie_header(&(create_pdr->pdi.src_intfc.header), PFCP_IE_SRC_INTFC,
(sizeof(pfcp_src_intfc_ie_t) - sizeof(pfcp_ie_header_t)));
pdi_header_len += sizeof(pfcp_src_intfc_ie_t);
create_pdr->pdi.src_intfc.src_intfc_spare = 0;
create_pdr->pdi.src_intfc.interface_value = pdr->pdi.src_intfc.interface_value;
/* -> F-TEID */
if ((bearer->pdn->context->cp_mode == SGWC) ||
(pdr->pdi.src_intfc.interface_value != SOURCE_INTERFACE_VALUE_CORE)) {
int len = 0;
if (pdr->pdi.local_fteid.v4) {
len = sizeof(pfcp_fteid_ie_t) -
( sizeof(create_pdr->pdi.local_fteid.ipv6_address)
+ sizeof(create_pdr->pdi.local_fteid.choose_id));
} else if (pdr->pdi.local_fteid.v6){
len = sizeof(pfcp_fteid_ie_t) -
(sizeof(create_pdr->pdi.local_fteid.ipv4_address)
+ sizeof(create_pdr->pdi.local_fteid.choose_id));
}
pfcp_set_ie_header(&(create_pdr->pdi.local_fteid.header), PFCP_IE_FTEID,
(len - sizeof(pfcp_ie_header_t)));
if (pdr->pdi.local_fteid.v4) {
create_pdr->pdi.local_fteid.v4 = PRESENT;
create_pdr->pdi.local_fteid.teid = pdr->pdi.local_fteid.teid;
create_pdr->pdi.local_fteid.ipv4_address = pdr->pdi.local_fteid.ipv4_address;
} else if (pdr->pdi.local_fteid.v6) {
create_pdr->pdi.local_fteid.v6 = PRESENT;
create_pdr->pdi.local_fteid.teid = pdr->pdi.local_fteid.teid;
memcpy(&create_pdr->pdi.local_fteid.ipv6_address,
&pdr->pdi.local_fteid.ipv6_address, IPV6_ADDR_LEN);
}
pdi_header_len += len;
}
if ((bearer->pdn->context->cp_mode != SGWC) &&
(pdr->pdi.src_intfc.interface_value == SOURCE_INTERFACE_VALUE_CORE)) {
/* -> netework Instance */
pfcp_set_ie_header(&(create_pdr->pdi.ntwk_inst.header), PFCP_IE_NTWK_INST,
(sizeof(pfcp_ntwk_inst_ie_t) - sizeof(pfcp_ie_header_t)));
pdi_header_len += sizeof(pfcp_ntwk_inst_ie_t);
strncpy((char *)create_pdr->pdi.ntwk_inst.ntwk_inst,
(char *)&pdr->pdi.ntwk_inst.ntwk_inst, PFCP_NTWK_INST_LEN);
int len = 0;
/* -> UE IP address */
if (pdr->pdi.ue_addr.v4) {
len = sizeof(pfcp_ue_ip_address_ie_t)
-(sizeof(create_pdr->pdi.ue_ip_address.ipv6_address)
+ sizeof(create_pdr->pdi.ue_ip_address.ipv6_pfx_dlgtn_bits));
} else if (pdr->pdi.ue_addr.v6) {
len = sizeof(pfcp_ue_ip_address_ie_t)
- (sizeof(create_pdr->pdi.ue_ip_address.ipv4_address)
+ sizeof(create_pdr->pdi.ue_ip_address.ipv6_pfx_dlgtn_bits));
}
pfcp_set_ie_header(&((create_pdr)->pdi.ue_ip_address.header), PFCP_IE_UE_IP_ADDRESS,
(len - sizeof(pfcp_ie_header_t)));
if (pdr->pdi.ue_addr.v4) {
create_pdr->pdi.ue_ip_address.v4 = PRESENT;
create_pdr->pdi.ue_ip_address.ipv4_address = pdr->pdi.ue_addr.ipv4_address;
} else if (pdr->pdi.ue_addr.v6) {
create_pdr->pdi.ue_ip_address.v6 = PRESENT;
memcpy(&create_pdr->pdi.ue_ip_address.ipv6_address,
&pdr->pdi.ue_addr.ipv6_address, IPV6_ADDR_LEN);
}
pdi_header_len += len;
}
/* --> UE IPv6 address */
if (((bearer->pdn->context->cp_mode != SGWC) &&
(pdr->pdi.src_intfc.interface_value == SOURCE_INTERFACE_VALUE_ACCESS)
&& (pdr->pdi.ue_addr.v6))) {
uint8_t len = 0;
create_pdr->pdi.ue_ip_address.v6 = PRESENT;
create_pdr->pdi.ue_ip_address.ipv6d = PRESENT;
create_pdr->pdi.ue_ip_address.ipv6_pfx_dlgtn_bits =
pdr->pdi.ue_addr.ipv6_pfx_dlgtn_bits;
memcpy(&create_pdr->pdi.ue_ip_address.ipv6_address,
&pdr->pdi.ue_addr.ipv6_address, IPV6_ADDR_LEN);
len = sizeof(pfcp_ue_ip_address_ie_t)
- (sizeof(create_pdr->pdi.ue_ip_address.ipv4_address));
pfcp_set_ie_header(&((create_pdr)->pdi.ue_ip_address.header), PFCP_IE_UE_IP_ADDRESS,
(len - sizeof(pfcp_ie_header_t)));
pdi_header_len += len;
}
pdr_header_len += pdi_header_len + sizeof(pfcp_ie_header_t);
pfcp_set_ie_header(&(create_pdr->pdi.header), IE_PDI, pdi_header_len);
/* Outer header removal */
if((bearer->pdn->context->cp_mode != SGWC) &&
pdr->pdi.src_intfc.interface_value == SOURCE_INTERFACE_VALUE_ACCESS) {
pfcp_set_ie_header(&(create_pdr->outer_hdr_removal.header),
PFCP_IE_OUTER_HDR_REMOVAL, UINT8_SIZE);
pdr_header_len += (sizeof(pfcp_outer_hdr_removal_ie_t)
- sizeof(create_pdr->outer_hdr_removal.gtpu_ext_hdr_del));
create_pdr->outer_hdr_removal.outer_hdr_removal_desc = 0;
}
/* FAR ID */
pfcp_set_ie_header(&(create_pdr->far_id.header), PFCP_IE_FAR_ID,
(sizeof(pfcp_far_id_ie_t) - sizeof(pfcp_ie_header_t)));
pdr_header_len += sizeof(pfcp_far_id_ie_t);
create_pdr->far_id.far_id_value = pdr->far.far_id_value;
/* URR ID */
if (bearer->pdn->generate_cdr) {
create_pdr->urr_id_count = pdr->urr_id_count;
for (uint8_t itr = 0; itr < pdr->urr_id_count; itr++) {
pfcp_set_ie_header(&(create_pdr->urr_id[itr].header),
PFCP_IE_URR_ID, UINT32_SIZE);
create_pdr->urr_id[itr].urr_id_value = pdr->urr.urr_id_value;
/* If Multiple urr id in one pdr */
if (pdr->urr_id_count > 1 ) {
create_pdr->urr_id[itr].urr_id_value = pdr->urr_id[itr].urr_id;
}
pdr_header_len += sizeof(pfcp_urr_id_ie_t);
}
}
/* QER ID */
if((config.use_gx) && bearer->pdn->context->cp_mode != SGWC) {
create_pdr->qer_id_count = pdr->qer_id_count;
for(int itr1 = 0; itr1 < pdr->qer_id_count; itr1++) {
pfcp_set_ie_header(&(create_pdr->qer_id[itr1].header), PFCP_IE_QER_ID,
(sizeof(pfcp_qer_id_ie_t) - sizeof(pfcp_ie_header_t)));
pdr_header_len += sizeof(pfcp_qer_id_ie_t);
create_pdr->qer_id[itr1].qer_id_value = pdr->qer_id[itr1].qer_id;
}
}
pfcp_set_ie_header(&(create_pdr->header), IE_CREATE_PDR, pdr_header_len);
return (pdr_header_len + sizeof(pfcp_ie_header_t));
}
/**
* @brief : Fill create far ie
* @param : create_far, buffer to be filled
* @param : pdr, pdr information
* @param : bearer, bearer information
* @return : Returns 0 on success, -1 otherwise
*/
static int
fill_create_far(pfcp_create_far_ie_t *create_far, pdr_t *pdr, eps_bearer *bearer)
{
/* Filling create FAR */
int far_hdr_len = 0;
/* -> FAR ID */
pfcp_set_ie_header(&(create_far->far_id.header), PFCP_IE_FAR_ID,
(sizeof(pfcp_far_id_ie_t) - sizeof(pfcp_ie_header_t)));
far_hdr_len += sizeof(pfcp_far_id_ie_t);
create_far->far_id.far_id_value = pdr->far.far_id_value;
/* -> Apply Action */
pfcp_set_ie_header(&(create_far->apply_action.header), IE_APPLY_ACTION_ID, UINT8_SIZE);
create_far->apply_action.forw = pdr->far.actions.forw;
create_far->apply_action.dupl= GET_DUP_STATUS(bearer->pdn->context);
create_far->apply_action.nocp = pdr->far.actions.nocp;
create_far->apply_action.buff = pdr->far.actions.buff;
create_far->apply_action.drop = pdr->far.actions.drop;
far_hdr_len += UINT8_SIZE + sizeof(pfcp_ie_header_t);
/* -> Forwarding Parameters */
int frw_hdr_len = 0;
/* --> Destination Interface */
pfcp_set_ie_header(&(create_far->frwdng_parms.dst_intfc.header),
IE_DEST_INTRFACE_ID, UINT8_SIZE);
frw_hdr_len += sizeof(pfcp_dst_intfc_ie_t);
create_far->frwdng_parms.dst_intfc.interface_value =
pdr->far.dst_intfc.interface_value;
if((bearer->pdn->context->cp_mode == SGWC) ||
(pdr->far.dst_intfc.interface_value == DESTINATION_INTERFACE_VALUE_ACCESS)) {
/* --> outer header creation */
int len = sizeof(create_far->frwdng_parms.outer_hdr_creation.teid)
+ sizeof(create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc);
pfcp_set_ie_header(&(create_far->frwdng_parms.outer_hdr_creation.header),
PFCP_IE_OUTER_HDR_CREATION, len);
/* SGWC --> access --> ENB S1U */
/* SAEGWC --> access --> ENB S1U */
if (((bearer->pdn->context->cp_mode == SGWC) || (bearer->pdn->context->cp_mode == SAEGWC)) &&
(pdr->far.dst_intfc.interface_value == DESTINATION_INTERFACE_VALUE_ACCESS)) {
create_far->frwdng_parms.outer_hdr_creation.teid = bearer->s1u_enb_gtpu_teid;
if (bearer->s1u_enb_gtpu_ip.ip_type == IPV4_TYPE) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv4_address);
create_far->frwdng_parms.outer_hdr_creation.ipv4_address = bearer->s1u_enb_gtpu_ip.ipv4_addr;
} else if (bearer->s1u_enb_gtpu_ip.ip_type == IPV6_TYPE) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv6_address);
memcpy(create_far->frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s1u_enb_gtpu_ip.ipv6_addr, IPV6_ADDRESS_LEN);
} else if ((bearer->s1u_enb_gtpu_ip.ip_type == PDN_TYPE_IPV4_IPV6)
&& (bearer->s1u_sgw_gtpu_ip.ip_type == PDN_TYPE_IPV4_IPV6)) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv6_address);
memcpy(create_far->frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s1u_enb_gtpu_ip.ipv6_addr, IPV6_ADDRESS_LEN);
} else if ((bearer->s1u_sgw_gtpu_ip.ip_type == IPV4_TYPE)
&& (create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 == 0)) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv4_address);
create_far->frwdng_parms.outer_hdr_creation.ipv4_address = bearer->s1u_enb_gtpu_ip.ipv4_addr;
} else if ((bearer->s1u_sgw_gtpu_ip.ip_type == IPV6_TYPE)
&& (create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 == 0)) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv6_address);
memcpy(create_far->frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s1u_enb_gtpu_ip.ipv6_addr, IPV6_ADDRESS_LEN);
} else {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
/* In far not found destinatiion instarface value, it's by default 0 pdr->far.dst_intfc.interface_value
* so insted of far we use pdi */
/* SGWC --> core --> PGW S5S8 */
if ((bearer->pdn->context->cp_mode == SGWC) &&
(pdr->far.dst_intfc.interface_value == DESTINATION_INTERFACE_VALUE_CORE)) {
if (bearer->s5s8_pgw_gtpu_ip.ip_type == IPV4_TYPE) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv4_address);
create_far->frwdng_parms.outer_hdr_creation.ipv4_address = bearer->s5s8_pgw_gtpu_ip.ipv4_addr;
} else if (bearer->s5s8_pgw_gtpu_ip.ip_type == IPV6_TYPE) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv6_address);
memcpy(create_far->frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s5s8_pgw_gtpu_ip.ipv6_addr, IPV6_ADDRESS_LEN);
} else if ((bearer->s5s8_pgw_gtpu_ip.ip_type == PDN_TYPE_IPV4_IPV6)
&& (bearer->s5s8_sgw_gtpu_ip.ip_type == PDN_TYPE_IPV4_IPV6)) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv6_address);
memcpy(create_far->frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s5s8_pgw_gtpu_ip.ipv6_addr, IPV6_ADDRESS_LEN);
} else if ((bearer->s5s8_sgw_gtpu_ip.ip_type == IPV4_TYPE)
&& (create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 == 0)) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv4_address);
create_far->frwdng_parms.outer_hdr_creation.ipv4_address = bearer->s5s8_pgw_gtpu_ip.ipv4_addr;
} else if ((bearer->s5s8_sgw_gtpu_ip.ip_type == IPV6_TYPE)
&& (create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 == 0)) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv6_address);
memcpy(create_far->frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s5s8_pgw_gtpu_ip.ipv6_addr, IPV6_ADDRESS_LEN);
}else {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
create_far->frwdng_parms.outer_hdr_creation.teid = bearer->s5s8_pgw_gtpu_teid;
}
/* PGWC --> access --> SGW S5S8 */
if ((bearer->pdn->context->cp_mode == PGWC) &&
pdr->far.dst_intfc.interface_value == SOURCE_INTERFACE_VALUE_ACCESS) {
if (bearer->s5s8_sgw_gtpu_ip.ip_type == IPV4_TYPE) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv4_address);
create_far->frwdng_parms.outer_hdr_creation.ipv4_address = bearer->s5s8_sgw_gtpu_ip.ipv4_addr;
} else if (bearer->s5s8_sgw_gtpu_ip.ip_type == IPV6_TYPE) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv6_address);
memcpy(create_far->frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s5s8_sgw_gtpu_ip.ipv6_addr, IPV6_ADDRESS_LEN);
} else if ((bearer->s5s8_sgw_gtpu_ip.ip_type == PDN_TYPE_IPV4_IPV6)
&& (bearer->s5s8_pgw_gtpu_ip.ip_type == PDN_TYPE_IPV4_IPV6)) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv6_address);
memcpy(create_far->frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s5s8_sgw_gtpu_ip.ipv6_addr, IPV6_ADDRESS_LEN);
} else if ((bearer->s5s8_pgw_gtpu_ip.ip_type == IPV4_TYPE)
&& (create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 == 0)) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv4_address);
create_far->frwdng_parms.outer_hdr_creation.ipv4_address = bearer->s5s8_sgw_gtpu_ip.ipv4_addr;
} else if ((bearer->s5s8_pgw_gtpu_ip.ip_type == IPV6_TYPE)
&& (create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 == 0)) {
create_far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 = PRESENT;
len += sizeof(create_far->frwdng_parms.outer_hdr_creation.ipv6_address);
memcpy(create_far->frwdng_parms.outer_hdr_creation.ipv6_address,
bearer->s5s8_sgw_gtpu_ip.ipv6_addr, IPV6_ADDRESS_LEN);
} else {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
create_far->frwdng_parms.outer_hdr_creation.teid = bearer->s5s8_sgw_gtpu_teid;
}
create_far->frwdng_parms.outer_hdr_creation.header.len = len;
frw_hdr_len += len + sizeof(pfcp_ie_header_t);
}
pfcp_set_ie_header(&(create_far->frwdng_parms.header), IE_FRWDNG_PARMS, frw_hdr_len);
far_hdr_len += (frw_hdr_len + sizeof(pfcp_ie_header_t));
if (((create_far->apply_action.buff == PRESENT)
&& ((bearer->pdn->context->cp_mode != PGWC)
&& (bearer->pdn->context->indication_flag.oi == NOT_PRESENT)
&& (bearer->pdn->context->indication_flag.daf == NOT_PRESENT)))) {
/* BAR ID */
uint16_t bar_hdr_len = 0;
bar_hdr_len = set_bar_id(&(create_far->bar_id), pdr->far.bar_id_value);
far_hdr_len += bar_hdr_len;
}
pfcp_set_ie_header(&(create_far->header), IE_CREATE_FAR, far_hdr_len);
return (far_hdr_len + sizeof(pfcp_ie_header_t));
}
/**
* @brief : Fill create qer ie
* @param : pfcp_sess_est_req, buffer to be filled
* @param : bearer, bearer information
* @return : Returns 0 on success, -1 otherwise
*/
static int
fill_create_qer(pfcp_sess_estab_req_t *pfcp_sess_est_req, eps_bearer *bearer)
{
/* Filling create qer */
int ret = 0;
int qer_hdr_len = 0;
int qer_itr = pfcp_sess_est_req->create_qer_count;
pfcp_create_qer_ie_t *create_qer;
for (uint8_t itr = 0; itr < bearer->qer_count; itr++) {
qer_hdr_len = 0;
create_qer = &pfcp_sess_est_req->create_qer[qer_itr];
/* QER ID */
pfcp_set_ie_header(&(create_qer->qer_id.header), PFCP_IE_QER_ID,
(sizeof(pfcp_qer_id_ie_t) - sizeof(pfcp_ie_header_t)));
create_qer->qer_id.qer_id_value = bearer->qer_id[itr].qer_id;
qer_hdr_len += sizeof(pfcp_qer_id_ie_t);
/* Gate Status */
pfcp_set_ie_header(&(create_qer->gate_status.header), PFCP_IE_GATE_STATUS,
(sizeof(pfcp_gate_status_ie_t) - sizeof(pfcp_ie_header_t)));
create_qer->gate_status.gate_status_spare = 0;
create_qer->gate_status.ul_gate = UL_GATE_OPEN;
create_qer->gate_status.ul_gate = DL_GATE_OPEN;
qer_hdr_len += sizeof(pfcp_gate_status_ie_t);
/* get_qer_entry */
qer_t *qer_context = NULL;
qer_context = get_qer_entry(create_qer->qer_id.qer_id_value, bearer->pdn->seid);
if (qer_context != NULL) {
/* MAX Bit Rate */
pfcp_set_ie_header(&(create_qer->maximum_bitrate.header), PFCP_IE_MBR,
(sizeof(pfcp_mbr_ie_t) - sizeof(pfcp_ie_header_t)));
create_qer->maximum_bitrate.ul_mbr =
qer_context->max_bitrate.ul_mbr;
create_qer->maximum_bitrate.dl_mbr =
qer_context->max_bitrate.dl_mbr;
qer_hdr_len += sizeof(pfcp_mbr_ie_t);
/* Garented Bit Rate */
pfcp_set_ie_header(&(create_qer->guaranteed_bitrate.header), PFCP_IE_GBR,
(sizeof(pfcp_gbr_ie_t) - sizeof(pfcp_ie_header_t)));
create_qer->guaranteed_bitrate.ul_gbr =
qer_context->guaranteed_bitrate.ul_gbr;
create_qer->guaranteed_bitrate.dl_gbr =
qer_context->guaranteed_bitrate.dl_gbr;
qer_hdr_len += sizeof(pfcp_gbr_ie_t);
}
/* QER header */
pfcp_set_ie_header(&(create_qer->header), IE_CREATE_QER, qer_hdr_len);
ret += qer_hdr_len;
qer_itr++;
}
/* Total header lenght of qer */
return ret;
}
/**
* @brief : Fill create urr ie
* @param : pfcp_create_urr_ie_t, structure to be filled
* @param : pdr, pdr information
* @return : Returns header length of create urr.
*/
static int
fill_create_urr(pfcp_create_urr_ie_t *create_urr, pdr_t *pdr)
{
int ret = 0;
int urr_hdr_len = 0;
int vol_th_hdr_len = 0;
/* UUR ID */
pfcp_set_ie_header(&(create_urr->urr_id.header), PFCP_IE_URR_ID, UINT32_SIZE);
create_urr->urr_id.urr_id_value = pdr->urr.urr_id_value;
urr_hdr_len += sizeof(pfcp_urr_id_ie_t);
/* Measurement Method */
pfcp_set_ie_header(&(create_urr->meas_mthd.header), PFCP_IE_MEAS_MTHD, UINT8_SIZE);
create_urr->meas_mthd.volum = pdr->urr.mea_mt.volum;
create_urr->meas_mthd.durat = pdr->urr.mea_mt.durat;
urr_hdr_len += sizeof(pfcp_meas_mthd_ie_t);
/* Reporting Triggers */
pfcp_set_ie_header(&(create_urr->rptng_triggers.header), PFCP_IE_RPTNG_TRIGGERS, UINT16_SIZE);
urr_hdr_len += sizeof(pfcp_rptng_triggers_ie_t);
/* If Volume and Time threshold Both are Present */
if ((pdr->urr.rept_trigg.volth == PRESENT) && (pdr->urr.rept_trigg.timth == PRESENT)) {
create_urr->rptng_triggers.timth = pdr->urr.rept_trigg.timth;
create_urr->rptng_triggers.volth = pdr->urr.rept_trigg.volth;
/* Time Threshold */
pfcp_set_ie_header(&(create_urr->time_threshold.header), PFCP_IE_TIME_THRESHOLD,
sizeof(pfcp_time_threshold_ie_t) - sizeof(pfcp_ie_header_t));
create_urr->time_threshold.time_threshold = pdr->urr.time_th.time_threshold;
urr_hdr_len += sizeof(pfcp_time_threshold_ie_t);
/* Volume Threshold */
vol_th_hdr_len = (sizeof(pfcp_vol_thresh_ie_t)
- (sizeof(pfcp_ie_header_t) + (2 * sizeof(uint64_t))));
pfcp_set_ie_header(&(create_urr->vol_thresh.header), PFCP_IE_VOL_THRESH, vol_th_hdr_len);
urr_hdr_len += (vol_th_hdr_len + sizeof(pfcp_ie_header_t));
if (pdr->pdi.src_intfc.interface_value == SOURCE_INTERFACE_VALUE_ACCESS)
{
create_urr->vol_thresh.ulvol = PRESENT;
create_urr->vol_thresh.uplink_volume = pdr->urr.vol_th.uplink_volume;
} else
{
create_urr->vol_thresh.dlvol = PRESENT;
create_urr->vol_thresh.downlink_volume = pdr->urr.vol_th.downlink_volume;
}
/* If only Volume Threshold are Present */
} else if (pdr->urr.rept_trigg.volth == PRESENT) {
/* Reporting Triggers */
create_urr->rptng_triggers.volth = pdr->urr.rept_trigg.volth;
/* Volume Threshold */
vol_th_hdr_len = (sizeof(pfcp_vol_thresh_ie_t)
- (sizeof(pfcp_ie_header_t) + (2 * sizeof(uint64_t))));
pfcp_set_ie_header(&(create_urr->vol_thresh.header), PFCP_IE_VOL_THRESH, vol_th_hdr_len);
urr_hdr_len += (vol_th_hdr_len + sizeof(pfcp_ie_header_t));
if (pdr->pdi.src_intfc.interface_value == SOURCE_INTERFACE_VALUE_ACCESS)
{
create_urr->vol_thresh.ulvol = PRESENT;
create_urr->vol_thresh.uplink_volume = pdr->urr.vol_th.uplink_volume;
} else
{
create_urr->vol_thresh.dlvol = PRESENT;
create_urr->vol_thresh.downlink_volume = pdr->urr.vol_th.downlink_volume;
}
} else {
create_urr->rptng_triggers.timth = pdr->urr.rept_trigg.timth;
/* Time Threshold */
pfcp_set_ie_header(&(create_urr->time_threshold.header), PFCP_IE_TIME_THRESHOLD,
sizeof(pfcp_time_threshold_ie_t) - sizeof(pfcp_ie_header_t));
create_urr->time_threshold.time_threshold = pdr->urr.time_th.time_threshold;
urr_hdr_len += sizeof(pfcp_time_threshold_ie_t);
}
pfcp_set_ie_header(&(create_urr->header), IE_CREATE_URR, urr_hdr_len);
ret = (urr_hdr_len + sizeof(pfcp_ie_header_t));
return ret;
}
/**
* @brief : Function to fille pfcp session establishment request
* @param : context, ue context information
* @param : node_addr, node address
* @return : Returns 0 on success, -1 otherwise
*/
static int
process_pfcp_sess_est_req(pdn_connection *pdn, node_address_t *node_addr)
{
uint32_t seq = 0;
int ret = 0;
eps_bearer *bearer = NULL;
struct resp_info *resp = NULL;
pfcp_sess_estab_req_t pfcp_sess_est_req = {0};
ue_context *context = NULL;
node_address_t node_value = {0};
context = pdn->context;
/* need to think anout it */
seq = get_pfcp_sequence_number(PFCP_SESSION_ESTABLISHMENT_REQUEST, seq);
/* Filling header */
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_est_req.header),
PFCP_SESSION_ESTABLISHMENT_REQUEST, HAS_SEID, seq, pdn->context->cp_mode);
/* Assing DP SEID */
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" : DP SEID %u\n",
LOG_VALUE, pdn->dp_seid);
pfcp_sess_est_req.header.seid_seqno.has_seid.seid = pdn->dp_seid;
/*Filling Node ID for F-SEID*/
if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(config.pfcp_ip.s_addr, temp, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV6) {
ret = fill_ip_addr(0, config.pfcp_ip_v6.s6_addr, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
set_node_id(&(pfcp_sess_est_req.node_id), node_value);
set_fseid(&(pfcp_sess_est_req.cp_fseid), pdn->seid, node_value);
/* Filling PDN structure*/
pfcp_sess_est_req.pdn_type.header.type = PFCP_IE_PDN_TYPE;
pfcp_sess_est_req.pdn_type.header.len = UINT8_SIZE;
pfcp_sess_est_req.pdn_type.pdn_type_spare = 0;
pfcp_sess_est_req.pdn_type.pdn_type = 1;
/* Filling Create BAR IE */
if ((pdn->context->cp_mode != PGWC) && (pdn->bar.bar_id))
set_create_bar(&(pfcp_sess_est_req.create_bar), &pdn->bar);
set_pdn_type(&(pfcp_sess_est_req.pdn_type), &(pdn->pdn_type));
/* Filling USER ID structure */
set_user_id(&(pfcp_sess_est_req.user_id), context->imsi);
uint8_t pdr_idx =0;
for(uint8_t itr1 = 0; itr1 < MAX_BEARERS; itr1++) {
bearer = pdn->eps_bearers[itr1];
if(bearer == NULL)
continue;
pfcp_sess_est_req.create_pdr_count += bearer->pdr_count;
pfcp_sess_est_req.create_far_count += bearer->pdr_count;
pfcp_sess_est_req.create_urr_count += bearer->pdr_count;
for(uint8_t itr2 = 0; itr2 < bearer->pdr_count; itr2++) {
fill_create_pdr(&(pfcp_sess_est_req.create_pdr[pdr_idx]),
bearer->pdrs[itr2], bearer);
fill_create_far(&(pfcp_sess_est_req.create_far[pdr_idx]),
bearer->pdrs[itr2], bearer);
if (pdn->generate_cdr) {
fill_create_urr(&(pfcp_sess_est_req.create_urr[pdr_idx]),
bearer->pdrs[itr2]);
}
if ((config.use_gx) && pdn->context->cp_mode != SGWC) {
for(int sdf_itr1 = 0;
sdf_itr1 < bearer->pdrs[itr2]->pdi.sdf_filter_cnt; sdf_itr1++) {
enum flow_status f_status =
pdn->policy.pcc_rule[sdf_itr1]->urule.dyn_rule.flow_status;
fill_create_pdr_sdf_rules(pfcp_sess_est_req.create_pdr,
bearer->dynamic_rules[sdf_itr1], pdr_idx);
fill_gate_status(&pfcp_sess_est_req, pdr_idx, f_status);
}
//pdr_idx++;
}
pdr_idx++;
}
if((config.use_gx) && pdn->context->cp_mode != SGWC) {
fill_create_qer(&(pfcp_sess_est_req), bearer);
pfcp_sess_est_req.create_qer_count += bearer->qer_count;
}
} /* for loop */
/* Fill the fqcsid into the session est request */
if (context->cp_mode != PGWC) {
/* Set SGW FQ-CSID */
if (pdn->sgw_csid.num_csid) {
set_fq_csid_t(&pfcp_sess_est_req.sgw_c_fqcsid, &pdn->sgw_csid);
}
/* Set MME FQ-CSID */
if(pdn->mme_csid.num_csid) {
set_fq_csid_t(&pfcp_sess_est_req.mme_fqcsid, &pdn->mme_csid);
}
/* set PGWC FQ-CSID */
if (context->cp_mode != SAEGWC) {
set_fq_csid_t(&pfcp_sess_est_req.pgw_c_fqcsid, &pdn->pgw_csid);
}
} else {
/* Set PGW FQ-CSID */
if (pdn->pgw_csid.num_csid) {
set_fq_csid_t(&pfcp_sess_est_req.pgw_c_fqcsid, &pdn->pgw_csid);
}
/* Set SGW C FQ_CSID */
if (pdn->sgw_csid.num_csid) {
set_fq_csid_t(&pfcp_sess_est_req.sgw_c_fqcsid, &pdn->sgw_csid);
}
/* Set MME FQ-CSID */
if(pdn->mme_csid.num_csid) {
set_fq_csid_t(&pfcp_sess_est_req.mme_fqcsid, &pdn->mme_csid);
}
}
/* Fetch and update resp info */
/* Lookup Stored the session information. */
if (get_sess_entry(pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add response in entry in SM_HASH\n", LOG_VALUE);
return -1;
}
reset_resp_info_structure(resp);
resp->linked_eps_bearer_id = pdn->default_bearer_id;
resp->state = PFCP_SESS_EST_REQ_SNT_STATE ;
resp->proc = RESTORATION_RECOVERY_PROC;
/* Update PDN procedure */
pdn->proc = RESTORATION_RECOVERY_PROC;
pdn->state = PFCP_SESS_EST_REQ_SNT_STATE;
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
/* Think about it , if CP connected with multiple DP */
ret = set_dest_address(pdn->upf_ip, &upf_pfcp_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
int encoded = encode_pfcp_sess_estab_req_t(&pfcp_sess_est_req,
pfcp_msg);
ret = set_dest_address(pdn->upf_ip, &upf_pfcp_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
if ( pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr, SENT) < 0 ){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Error sending in "
"PFCP Session Establishment Request: %i\n", LOG_VALUE, errno);
return -1;
}
RTE_SET_USED(node_addr);
return 0;
}
/**
* @brief : Fucnton to get sess ID and send session est. request to peer node
* @param : csids
* @param : node_addr, node address
* @return : Returns 0 on success, -1 otherwise
*/
static int
create_sess_by_csid_entry(fqcsid_t *peer_csids, node_address_t *node_addr)
{
int ret = 0;
int8_t ebi = 0;
int8_t ebi_index = 0;
node_address_t addr = {0};
peer_csid_key_t key = {0};
ue_context *context = NULL;
pdn_connection *pdn = NULL;
memcpy(&addr, node_addr, sizeof(node_address_t));
/* Get the session ID by csid */
for (uint8_t itr1 = 0; itr1 < peer_csids->num_csid; itr1++) {
sess_csid *tmp = NULL;
sess_csid *current = NULL;
key.iface = SX_PORT_ID;
key.peer_local_csid = peer_csids->local_csid[itr1];
memcpy(&key.peer_node_addr, &peer_csids->node_addr, sizeof(node_address_t));
tmp = get_sess_peer_csid_entry(&key, REMOVE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found, CSID: %u\n", LOG_VALUE,
peer_csids->local_csid[itr1]);
continue;
}
if (tmp->cp_seid == 0 && tmp->next == 0 ) {
continue;
}
current = tmp;
while (current != NULL ) {
uint32_t teid_key = UE_SESS_ID(current->cp_seid);
ebi = UE_BEAR_ID(current->cp_seid);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Invalid EBI ID\n", LOG_VALUE);
current = current->next;
continue;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" TEID : %u\n", LOG_VALUE, teid_key);
ret = rte_hash_lookup_data(ue_context_by_fteid_hash,
(const void *) &teid_key,
(void **) &context);
if (ret < 0 || !context) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: UE contetx not found fot TEID %u \n",
LOG_VALUE, teid_key);
/* Assign Next node address */
current = current->next;
continue;
}
pdn = context->pdns[ebi_index];
if (pdn == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"ERROR : Failed to get PDN context for seid : %u \n",
LOG_VALUE, current->cp_seid);
/* Assign Next node address */
current = current->next;
continue;
}
/* PDN upf ip address and peer node address,
* if not match than we assume CP connected to the onthere DP */
if(COMPARE_IP_ADDRESS(pdn->upf_ip, addr) != 0) {
(pdn->upf_ip.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Match Not Found : Peer Node IPv6 Address : "IPv6_FMT" ,"
" PDN upf ipv6 addres : "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(node_addr->ipv6_addr)),
IPv6_PRINT(IPv6_CAST(pdn->upf_ip.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"Match Not Found : Peer Node IPv4 Address : "IPV4_ADDR" ,"
" PDN upf ipv4 addres : "IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(node_addr->ipv4_addr),
IPV4_ADDR_HOST_FORMAT(pdn->upf_ip.ipv4_addr));
current = current->next;
continue;
}
if (process_pfcp_sess_est_req(pdn, node_addr) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error while Processing "
"PFCP Session Establishment Request TEID %u \n", LOG_VALUE, teid_key);
/* Assign Next node address */
current = current->next;
continue;
}
/* Assign Next node address */
current = current->next;
/* Update session establishment request send counter for recovery */
num_sess++;
} /* while loop */
} /* for loop */
return 0;
}
/* Function to re-create affected session with peer node */
int
create_peer_node_sess(node_address_t *node_addr, uint8_t iface) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":START\n", LOG_VALUE);
fqcsid_t csids = {0};
fqcsid_t *peer_csids = NULL;
/* Get peer CSID associated with node */
peer_csids = get_peer_addr_csids_entry(node_addr,
UPDATE_NODE);
if (peer_csids == NULL) {
/* Delete UPF hash entry */
if (iface == SX_PORT_ID) {
/* Delete entry from teid info list for given upf*/
delete_entry_from_teid_list(*node_addr, &upf_teid_info_head);
if (rte_hash_del_key(upf_context_by_ip_hash, &node_addr->ipv4_addr) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT" Error on upf_context_by_ip_hash del\n", LOG_VALUE);
}
}
(node_addr->ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are not found, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(node_addr->ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer CSIDs are not found, Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(node_addr->ipv4_addr));
return -1;
}
/* Get the mapped local CSID */
for (int8_t itr = 0; itr < peer_csids->num_csid; itr++) {
csid_t *tmp = NULL;
csid_key_t key = {0};
key.local_csid = peer_csids->local_csid[itr];
memcpy(&key.node_addr, &peer_csids->node_addr, sizeof(node_address_t));
tmp = get_peer_csid_entry(&key, iface, UPDATE_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Entry not found for "
"peer CSIDs\n", LOG_VALUE);
continue;
}
for (int8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
csids.local_csid[csids.num_csid++] = tmp->local_csid[itr1];
}
memcpy(&csids.node_addr, &tmp->node_addr, sizeof(node_address_t));
}
if (!csids.num_csid) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Local CSIDs not found\n", LOG_VALUE);
return -1;
}
if (iface == SX_PORT_ID) {
create_sess_by_csid_entry(peer_csids, node_addr);
}
return 0;
}
/* Function to send pfcp association setup request in recovery mode */
int
process_aasociation_setup_req(peer_addr_t *peer_addr)
{
int ret = 0;
upf_context_t *upf_context = NULL;
pfcp_assn_setup_req_t pfcp_ass_setup_req = {0};
node_address_t node_value = {0};
node_address_t cp_node_value = {0};
if (peer_addr->type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
fill_ip_addr(config.pfcp_ip.s_addr, temp, &cp_node_value);
node_value.ipv4_addr = peer_addr->ipv4.sin_addr.s_addr;
node_value.ip_type = PDN_IP_TYPE_IPV4;
} else if ((peer_addr->type == PDN_IP_TYPE_IPV6)
|| (peer_addr->type == PDN_IP_TYPE_IPV4V6)) {
fill_ip_addr(0, config.pfcp_ip_v6.s6_addr, &cp_node_value);
memcpy(node_value.ipv6_addr,
peer_addr->ipv6.sin6_addr.s6_addr, IPV6_ADDRESS_LEN);
node_value.ip_type = PDN_IP_TYPE_IPV6;
}
/* Lookup upf context of peer node */
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) &(node_value), (void **) &(upf_context));
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"NO ENTRY FOUND IN "
"UPF HASH \n", LOG_VALUE);
return -1;
}
/* Changing status and state */
upf_context->assoc_status = ASSOC_IN_PROGRESS;
upf_context->state = PFCP_ASSOC_REQ_SNT_STATE;
/* Filling pfcp associtaion setup request */
fill_pfcp_association_setup_req(&pfcp_ass_setup_req);
/*Filling Node ID*/
set_node_id(&pfcp_ass_setup_req.node_id, cp_node_value);
uint8_t pfcp_msg[PFCP_MSG_LEN] = {0};
int encoded = encode_pfcp_assn_setup_req_t(&pfcp_ass_setup_req, pfcp_msg);
/* ask vishal : Do we need request time for recovery */
/* Do we need to update cli stat */
/* Peer node address */
ret = set_dest_address(node_value, &upf_pfcp_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
if (pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr, SENT) < 0 ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Error in sending Session "
"Association Setup Request\n", LOG_VALUE);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Association request sent to "
"peer node\n", LOG_VALUE);
return 0;
}
/**
* @brief : Function to start thread in recovery mode
* @param : arg, arguments
* @return : Returns nothing
*/
static void*
recov_est_thread_func(void *arg) {
RTE_SET_USED(arg);
//uint32_t *peer_addr = (uint32_t *) arg;
(recov_peer_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT" RECOVERY MODE: Thread Start, peer ipv6 node : ["IPv6_FMT"]\n\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(recov_peer_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT" RECOVERY MODE: Thread Start, peer ipv4 node : ["IPV4_ADDR"]\n\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(recov_peer_addr.ipv4_addr));
/* Dump Session information on UP */
create_peer_node_sess(&recov_peer_addr, SX_PORT_ID);
(recov_peer_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT" RECOVERY MODE: Thread Stop, peer ipv6 node : ["IPv6_FMT"]\n\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(recov_peer_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT" RECOVERY MODE: Thread Stop, peer ipv4 node : ["IPV4_ADDR"]\n\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(recov_peer_addr.ipv4_addr));
/* Checking All session est. response are received or not */
/* There no critical segment, so we are not using thread sync. technique */
while (num_sess != 0) {
usleep(SLEEP_TIME);
}
/* Deinit RECOVERY MODE */
recovery_flag = 0;
pthread_kill(recov_thread, 0);
return NULL;
}
/* Function to process recov association response */
int
process_asso_resp(void *_msg, peer_addr_t *peer_addr) {
int ret = 0;
upf_context_t *upf_context = NULL;
msg_info *msg = (msg_info *)_msg;
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) &(msg->upf_ip), (void **) &(upf_context));
if (ret < 0) {
(msg->upf_ip.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"NO ENTRY FOUND IN UPF HASH IPv6 ["IPv6_FMT"]\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(msg->upf_ip.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"NO ENTRY FOUND IN UPF HASH IPv4 ["IPV4_ADDR"]\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr));
return -1;
}
if (msg->upf_ip.ip_type == PDN_TYPE_IPV4) {
recov_peer_addr.ip_type = PDN_TYPE_IPV4;
recov_peer_addr.ipv4_addr = msg->upf_ip.ipv4_addr;
} else {
recov_peer_addr.ip_type = PDN_TYPE_IPV6;
memcpy(&recov_peer_addr.ipv6_addr,
&msg->upf_ip.ipv6_addr, sizeof(node_address_t));
}
upf_context->assoc_status = ASSOC_ESTABLISHED;
upf_context->state = PFCP_ASSOC_RESP_RCVD_STATE;
/* Checking Assign TEIDRI */
if((msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[0].teid_range != upf_context->teid_range) ||
(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[0].teidri != upf_context->teidri)){
(msg->upf_ip.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ERROR : TEID RANGE MATCH NOT FOUND ,NODE ADDR IPv6: ["IPv6_FMT"] ,"
" PERVIOUS TEID RANGE : [%d] TEIDRI : [%d]"
"CURRENT TEID RANGE : [%d] TEIDRI : [%d] \n", LOG_VALUE,
IPv6_PRINT(IPv6_CAST(msg->upf_ip.ipv6_addr)), upf_context->teid_range,
upf_context->teidri,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[0].teid_range,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[0].teidri):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ERROR : TEID RANGE MATCH NOT FOUND ,NODE ADDR IPv4: ["IPV4_ADDR"] ,"
" PERVIOUS TEID RANGE : [%d] TEIDRI : [%d]"
"CURRENT TEID RANGE : [%d] TEIDRI : [%d] \n", LOG_VALUE,
IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr), upf_context->teid_range,
upf_context->teidri,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[0].teid_range,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[0].teidri);
/* Cleanup is on low priority */
/* Cleanip Initiate for peer node */
del_peer_node_sess(&recov_peer_addr, SX_PORT_ID);
return -1;
}
(peer_addr->type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"TEID RANGE MATCT FOUND : NODE IPv6 ADDR : ["IPv6_FMT"] , PERVIOUS TEID RANGE : [%d] \n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(peer_addr->ipv6.sin6_addr.s6_addr)),
upf_context->teid_range):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"TEID RANGE MATCT FOUND : NODE IPv4 ADDR : ["IPV4_ADDR"] , PERVIOUS TEID RANGE : [%d] \n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(peer_addr->ipv4.sin_addr.s_addr), upf_context->teid_range);
ret = pthread_create(&recov_thread, NULL, &recov_est_thread_func, NULL);
if (ret != 0) {
clLog(clSystemLog, eCLSeverityInfo, LOG_FORMAT
"\nCan't create RECOVRY MODE thread :[%s]", LOG_VALUE, strerror(ret));
return ret;
}
else {
clLog(clSystemLog, eCLSeverityInfo, LOG_FORMAT
"\n RECOVERY MODE thread created successfully\n", LOG_VALUE);
}
return 0;
}
/* Function to process pfpc session establishment response
* in recovery mode while recovering affected session */
int
process_sess_est_resp(pfcp_sess_estab_rsp_t *pfcp_sess_est_rsp)
{
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"RECOVERY MODE : session establishment response received\n", LOG_VALUE);
if (pfcp_sess_est_rsp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"ERROR:%s\n",
LOG_VALUE, strerror(errno));
return -1;
}
int ret = 0;
uint8_t num_csid = 0;
node_address_t node_addr = {0};
struct resp_info *resp = NULL;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
uint64_t sess_id = pfcp_sess_est_rsp->header.seid_seqno.has_seid.seid;
uint32_t teid = UE_SESS_ID(sess_id);
/* Retrieve the UE context */
ret = get_ue_context(teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to update UE "
"State for teid: %u\n", LOG_VALUE, teid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if (get_sess_entry(sess_id, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to get response "
"entry in SM_HASH for SEID : 0x%x \n", LOG_VALUE, sess_id);
return -1;
}
/* Need to think on eps_bearer_id*/
int ebi_index = GET_EBI_INDEX(resp->linked_eps_bearer_id);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
/* Update the UE state */
pdn = GET_PDN(context, ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get pdn for ebi_index : %d \n", LOG_VALUE);
return -1;
}
pdn->state = CONNECTED_STATE;
uint16_t old_csid = pdn->up_csid.local_csid[pdn->up_csid.num_csid - 1];
sess_csid *tmp1 = NULL;
peer_csid_key_t key = {0};
fqcsid_t *tmp = NULL;
sess_fqcsid_t *fqcsid = NULL;
if (context->up_fqcsid == NULL) {
fqcsid = rte_zmalloc_socket(NULL, sizeof(sess_fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (fqcsid == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate the "
"memory for fqcsids entry\n", LOG_VALUE);
return -1;
}
} else {
fqcsid = context->up_fqcsid;
}
/* UP FQ-CSID */
if (pfcp_sess_est_rsp->up_fqcsid.header.len) {
if (pfcp_sess_est_rsp->up_fqcsid.number_of_csids) {
if (pfcp_sess_est_rsp->up_fqcsid.fqcsid_node_id_type == IPV4_GLOBAL_UNICAST) {
memcpy(&(node_addr.ipv4_addr),
pfcp_sess_est_rsp->up_fqcsid.node_address, IPV4_SIZE);
node_addr.ip_type = pfcp_sess_est_rsp->up_fqcsid.fqcsid_node_id_type;
} else {
memcpy(&(node_addr.ipv6_addr),
pfcp_sess_est_rsp->up_fqcsid.node_address, IPV6_ADDRESS_LEN);
node_addr.ip_type = pfcp_sess_est_rsp->up_fqcsid.fqcsid_node_id_type;
}
/* Stored the UP CSID by UP Node address */
tmp = get_peer_addr_csids_entry(&node_addr, ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Add the "
"SGW-U CSID by SGW Node address, Error : %s \n",
LOG_VALUE, strerror(errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
memcpy(&tmp->node_addr, &node_addr, sizeof(node_address_t));
for(uint8_t itr = 0; itr < pfcp_sess_est_rsp->up_fqcsid.number_of_csids; itr++) {
uint8_t match = 0;
for (uint8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
if (tmp->local_csid[itr1] == pfcp_sess_est_rsp->up_fqcsid.pdn_conn_set_ident[itr]) {
match = 1;
break;
}
}
if (!match) {
tmp->local_csid[tmp->num_csid++] =
pfcp_sess_est_rsp->up_fqcsid.pdn_conn_set_ident[itr];
}
}
if (fqcsid->num_csid) {
match_and_add_pfcp_sess_fqcsid(&(pfcp_sess_est_rsp->up_fqcsid), fqcsid);
} else {
add_pfcp_sess_fqcsid(&(pfcp_sess_est_rsp->up_fqcsid), fqcsid);
}
/* Coping UP csid */
fill_pdn_fqcsid_info(&pdn->up_csid, fqcsid);
for (uint8_t itr2 = 0; itr2 < tmp->num_csid; itr2++) {
if (tmp->local_csid[itr2] == old_csid) {
for(uint8_t pos = itr2; pos < (tmp->num_csid - 1); pos++ ) {
tmp->local_csid[pos] = tmp->local_csid[pos + 1];
}
tmp->num_csid--;
}
}
}
} else {
/* TODO: Add the handling if SGW or PGW not support Partial failure */
tmp = get_peer_addr_csids_entry(&pdn->upf_ip,
ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to add "
"the SGW-U CSID by SGW Node address, Error : %s \n",
LOG_VALUE, strerror(errno));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
memcpy(&tmp->node_addr, &pdn->upf_ip, sizeof(node_address_t));
memcpy(&fqcsid->node_addr[fqcsid->num_csid],
&tmp->node_addr, sizeof(node_address_t));
}
/* Link peer node SGW or PGW csid with local csid */
if (pdn->up_csid.num_csid) {
if (context->cp_mode != PGWC) {
ret = update_peer_csid_link(&pdn->up_csid, &pdn->sgw_csid);
} else {
ret = update_peer_csid_link(&pdn->up_csid, &pdn->pgw_csid);
}
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to Update "
"FQ-CSID link while PFCP Session Establishment Response: %s \n",
LOG_VALUE, strerror(errno));
return -1;
}
if (old_csid != pdn->up_csid.local_csid[num_csid]) {
/* Link session with Peer CSID */
link_sess_with_peer_csid(&pdn->up_csid, pdn, SX_PORT_ID);
/* Remove old csid */
key.iface = SX_PORT_ID;
key.peer_local_csid = old_csid;
memcpy(&key.peer_node_addr,
&pdn->up_csid.node_addr, sizeof(node_address_t));
tmp1 = get_sess_peer_csid_entry(&key, REMOVE_NODE);
remove_sess_entry(tmp1, pdn->seid, &key);
}
}
/* Update the UP CSID in the context */
if (context->up_fqcsid == NULL)
context->up_fqcsid = fqcsid;
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp_up_sess_rsp.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "up_main.h"
#include "epc_arp.h"
#include "pfcp_util.h"
#include "pfcp_enum.h"
#include "pfcp_set_ie.h"
#include "pfcp_session.h"
extern struct rte_hash *arp_hash_handle[NUM_SPGW_PORTS];
extern int clSystemLog;
void
fill_pfcp_session_est_resp(pfcp_sess_estab_rsp_t *pfcp_sess_est_resp,
uint8_t cause, int offend, node_address_t node_value,
pfcp_sess_estab_req_t *pfcp_session_request)
{
/*take seq no from sess establishment request when this function is called somewhere*/
uint32_t seq = 0;
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_est_resp->header),
PFCP_SESSION_ESTABLISHMENT_RESPONSE, HAS_SEID, seq, NO_CP_MODE_REQUIRED);
set_node_id(&(pfcp_sess_est_resp->node_id), node_value);
set_cause(&(pfcp_sess_est_resp->cause), cause);
if(cause == CONDITIONALIEMISSING || cause == MANDATORYIEMISSING) {
set_offending_ie(&(pfcp_sess_est_resp->offending_ie), offend);
}
if(REQUESTACCEPTED == cause) {
uint64_t up_seid = pfcp_session_request->header.seid_seqno.has_seid.seid;;
set_fseid(&(pfcp_sess_est_resp->up_fseid), up_seid, node_value);
}
if(pfcp_ctxt.cp_supported_features & CP_LOAD) {
set_lci(&(pfcp_sess_est_resp->load_ctl_info));
}
if(pfcp_ctxt.cp_supported_features & CP_OVRL) {
set_olci(&(pfcp_sess_est_resp->ovrld_ctl_info));
}
if(RULECREATION_MODIFICATIONFAILURE == cause) {
set_failed_rule_id(&(pfcp_sess_est_resp->failed_rule_id));
}
}
void
fill_pfcp_session_modify_resp(pfcp_sess_mod_rsp_t *pfcp_sess_modify_resp,
pfcp_sess_mod_req_t *pfcp_session_mod_req, uint8_t cause, int offend)
{
uint32_t seq = 1;
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_modify_resp->header),
PFCP_SESSION_MODIFICATION_RESPONSE, HAS_SEID, seq, NO_CP_MODE_REQUIRED);
set_cause(&(pfcp_sess_modify_resp->cause), cause);
if(cause == CONDITIONALIEMISSING
|| cause == MANDATORYIEMISSING) {
set_offending_ie(&(pfcp_sess_modify_resp->offending_ie), offend);
}
//created_bar
// Need to do
if(cause == REQUESTACCEPTED){
if(pfcp_session_mod_req->create_pdr_count > 0 &&
pfcp_session_mod_req->create_pdr[0].pdi.local_fteid.ch){
set_created_pdr_ie(&(pfcp_sess_modify_resp->created_pdr));
}
}
if( pfcp_ctxt.cp_supported_features & CP_LOAD )
set_lci(&(pfcp_sess_modify_resp->load_ctl_info));
if( pfcp_ctxt.cp_supported_features & CP_OVRL )
set_olci(&(pfcp_sess_modify_resp->ovrld_ctl_info));
if(cause == RULECREATION_MODIFICATIONFAILURE){
set_failed_rule_id(&(pfcp_sess_modify_resp->failed_rule_id));
}
if( pfcp_ctxt.up_supported_features & UP_PDIU )
set_created_traffic_endpoint(&(pfcp_sess_modify_resp->createdupdated_traffic_endpt));
}
void
fill_pfcp_sess_del_resp(pfcp_sess_del_rsp_t *
pfcp_sess_del_resp, uint8_t cause, int offend)
{
uint32_t seq = 1;
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_del_resp->header), PFCP_SESSION_DELETION_RESPONSE,
HAS_SEID, seq, NO_CP_MODE_REQUIRED);
set_cause(&(pfcp_sess_del_resp->cause), cause);
if(cause == CONDITIONALIEMISSING ||
cause == MANDATORYIEMISSING) {
set_offending_ie(&(pfcp_sess_del_resp->offending_ie), offend);
}
if( pfcp_ctxt.cp_supported_features & CP_LOAD )
set_lci(&(pfcp_sess_del_resp->load_ctl_info));
if( pfcp_ctxt.cp_supported_features & CP_OVRL )
set_olci(&(pfcp_sess_del_resp->ovrld_ctl_info));
}
int sess_modify_with_endmarker(far_info_t *far)
{
struct sess_info_endmark edmk;
struct arp_ip_key arp_key = {0};
struct arp_entry_data *ret_arp_data = NULL;
int ret = 0;
/*Retrieve the destination MAC*/
if (far->frwdng_parms.outer_hdr_creation.ipv4_address != 0) {
edmk.src_ip.ip_type = IPV4_TYPE;
edmk.dst_ip.ip_type = IPV4_TYPE;
edmk.dst_ip.ipv4_addr = far->frwdng_parms.outer_hdr_creation.ipv4_address;
/* Set the ARP KEY */
arp_key.ip_type.ipv4 = PRESENT;
arp_key.ip_addr.ipv4 = edmk.dst_ip.ipv4_addr;
} else if (far->frwdng_parms.outer_hdr_creation.ipv6_address != NULL) {
edmk.src_ip.ip_type = IPV6_TYPE;
edmk.dst_ip.ip_type = IPV6_TYPE;
memcpy(edmk.dst_ip.ipv6_addr,
far->frwdng_parms.outer_hdr_creation.ipv6_address,
IPV6_ADDRESS_LEN);
/* Set the ARP KEY */
arp_key.ip_type.ipv6 = PRESENT;
memcpy(arp_key.ip_addr.ipv6.s6_addr,
edmk.dst_ip.ipv6_addr, IPV6_ADDRESS_LEN);
}
edmk.teid = far->frwdng_parms.outer_hdr_creation.teid;
ret = rte_hash_lookup_data(arp_hash_handle[S1U_PORT_ID],
&arp_key, (void **)&ret_arp_data);
if (ret < 0) {
(arp_key.ip_type.ipv6 == PRESENT)?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"END_MARKER:IPv6 is not resolved for sending endmarker:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(edmk.dst_ip.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"END_MARKER:IPv4 is not resolved for sending endmarker:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(edmk.dst_ip.ipv4_addr));
return -1;
}
memcpy(&(edmk.destination_MAC), &(ret_arp_data->eth_addr) , sizeof(struct ether_addr));
edmk.dst_port = ret_arp_data->port;
/* Fill the Local SRC Address of the intf in the IPV4 header */
if (edmk.dst_ip.ip_type == IPV4_TYPE) {
/* Validate the Destination IP Address subnet */
if (validate_Subnet(ntohl(edmk.dst_ip.ipv4_addr), app.wb_net, app.wb_bcast_addr)) {
/* construct iphdr with local IP Address */
edmk.src_ip.ipv4_addr =htonl(app.wb_ip);
} else if (validate_Subnet(ntohl(edmk.dst_ip.ipv4_addr), app.wb_li_net, app.wb_li_bcast_addr)) {
/* construct iphdr with local IP Address */
edmk.src_ip.ipv4_addr = app.wb_li_ip;
} else if (validate_Subnet(ntohl(edmk.dst_ip.ipv4_addr), app.eb_net, app.eb_bcast_addr)) {
/* construct iphdr with local IP Address */
edmk.src_ip.ipv4_addr = app.eb_ip;
} else if (validate_Subnet(ntohl(edmk.dst_ip.ipv4_addr), app.eb_li_net, app.eb_li_bcast_addr)) {
/* construct iphdr with local IP Address */
edmk.src_ip.ipv4_addr = htonl(app.eb_li_ip);
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"END_MAKER:Destination IPv4 Addr "IPV4_ADDR" is NOT in local intf subnet\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(edmk.dst_ip.ipv4_addr));
return -1;
}
} else if (edmk.dst_ip.ip_type == IPV6_TYPE) {
/* Validate the Destination IPv6 Address Network */
if (validate_ipv6_network(IPv6_CAST(edmk.dst_ip.ipv6_addr), app.wb_ipv6,
app.wb_ipv6_prefix_len)) {
/* Source interface IPv6 address */
memcpy(&edmk.src_ip.ipv6_addr, &app.wb_ipv6, sizeof(struct in6_addr));
} else if (validate_ipv6_network(IPv6_CAST(edmk.dst_ip.ipv6_addr), app.wb_li_ipv6,
app.wb_li_ipv6_prefix_len)) {
/* Source interface IPv6 address */
memcpy(&edmk.src_ip.ipv6_addr, &app.wb_li_ipv6, sizeof(struct in6_addr));
} else if (validate_ipv6_network(IPv6_CAST(edmk.dst_ip.ipv6_addr), app.eb_ipv6,
app.eb_ipv6_prefix_len)) {
/* Source interface IPv6 address */
memcpy(&edmk.src_ip.ipv6_addr, &app.eb_ipv6, sizeof(struct in6_addr));
} else if (validate_ipv6_network(IPv6_CAST(edmk.dst_ip.ipv6_addr), app.eb_li_ipv6,
app.eb_li_ipv6_prefix_len)) {
/* Source interface IPv6 address */
memcpy(&edmk.src_ip.ipv6_addr, &app.eb_li_ipv6, sizeof(struct in6_addr));
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"END_MARKER:Destination S5S8 intf IPv6 addr "IPv6_FMT" "
"is NOT in local intf Network\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(edmk.dst_ip.ipv6_addr)));
return -1;
}
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"END_MARKER: Not set appropriate IP TYpe in the destination address\n",
LOG_VALUE);
return -1;
}
/* VS: Fill the Source IP and Physical Address of the interface based on the interface value */
if (far->frwdng_parms.dst_intfc.interface_value == ACCESS) {
edmk.source_MAC = app.wb_ether_addr;
}else if(far->frwdng_parms.dst_intfc.interface_value == CORE){
edmk.source_MAC = app.eb_ether_addr;
}
build_endmarker_and_send(&edmk);
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | dp/up_acl.c | <gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define _GNU_SOURCE /* Expose declaration of tdestroy() */
#include <search.h>
#include "up_acl.h"
#include "up_main.h"
#include "gw_adapter.h"
#define ACL_DENY_SIGNATURE 0x00000000
/* Currently restrict acl context to use single category*/
#define DEFAULT_MAX_CATEGORIES 1
#define uint32_t_to_char(ip, a, b, c, d) do {\
*a = (unsigned char)((ip) >> 24 & 0xff);\
*b = (unsigned char)((ip) >> 16 & 0xff);\
*c = (unsigned char)((ip) >> 8 & 0xff);\
*d = (unsigned char)((ip) & 0xff);\
} while (0)
#define OFF_ETHHEAD (sizeof(struct ether_hdr))
#define OFF_IPV42PROTO (offsetof(struct ipv4_hdr, next_proto_id))
#define OFF_IPV62PROTO (offsetof(struct ipv6_hdr, proto))
#define MBUF_IPV4_2PROTO(m) \
rte_pktmbuf_mtod_offset((m), uint8_t *, OFF_ETHHEAD + OFF_IPV42PROTO)
#define MBUF_IPV6_2PROTO(m) \
rte_pktmbuf_mtod_offset((m), uint8_t *, OFF_ETHHEAD + OFF_IPV62PROTO)
#define GET_CB_FIELD(in, fd, base, lim, dlm) do { \
unsigned long val; \
char *end; \
errno = 0; \
val = strtoul((in), &end, (base)); \
if (errno != 0 || end[0] != (dlm) || val > (lim)) \
return -EINVAL; \
(fd) = (typeof(fd))val; \
(in) = end + 1; \
} while (0)
/*
* ACL rules should have higher priorities than route ones to ensure ACL rule
* always be found when input packets have multi-matches in the database.
* An exception case is performance measure, which can define route rules with
* higher priority and route rules will always be returned in each lookup.
* Reserve range from ACL_RULE_PRIORITY_MAX + 1 to
* RTE_ACL_MAX_PRIORITY for route entries in performance measure
*/
#define ACL_RULE_PRIORITY_MAX 0x10000000
#define PREFETCH_OFFSET 8
/*
* Forward port info save in ACL lib starts from 1
* since ACL assume 0 is invalid.
* So, need add 1 when saving and minus 1 when forwarding packets.
*/
#define FWD_PORT_SHIFT 1
static uint32_t acl_table_indx_offset = 1;
static uint32_t acl_table_indx;
/* Max number of sdf rules */
static uint8_t sdf_rule_id;
extern int clSystemLog;
/**
* @brief : Maintains acl field type information
*/
struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
{
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
.field_index = PROTO_FIELD_IPV4,
.input_index = RTE_ACL_IPV4VLAN_PROTO,
.offset = 0,
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = SRC_FIELD_IPV4,
.input_index = RTE_ACL_IPV4VLAN_SRC,
.offset = offsetof(struct ipv4_hdr, src_addr) -
offsetof(struct ipv4_hdr, next_proto_id),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST_FIELD_IPV4,
.input_index = RTE_ACL_IPV4VLAN_DST,
.offset = offsetof(struct ipv4_hdr, dst_addr) -
offsetof(struct ipv4_hdr, next_proto_id),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = SRCP_FIELD_IPV4,
.input_index = RTE_ACL_IPV4VLAN_PORTS,
.offset = sizeof(struct ipv4_hdr) -
offsetof(struct ipv4_hdr, next_proto_id),
},
{
.type = RTE_ACL_FIELD_TYPE_RANGE,
.size = sizeof(uint16_t),
.field_index = DSTP_FIELD_IPV4,
.input_index = RTE_ACL_IPV4VLAN_PORTS,
.offset = sizeof(struct ipv4_hdr) -
offsetof(struct ipv4_hdr, next_proto_id) +
sizeof(uint16_t),
},
};
struct rte_acl_field_def ipv6_defs[NUM_FIELDS_IPV6] = {
{
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
.field_index = PROTO_FIELD_IPV6,
.input_index = PROTO_FIELD_IPV6,
.offset = 0,
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = SRC1_FIELD_IPV6,
.input_index = SRC1_FIELD_IPV6,
.offset = offsetof(struct ipv6_hdr, src_addr) -
offsetof(struct ipv6_hdr, proto),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = SRC2_FIELD_IPV6,
.input_index = SRC2_FIELD_IPV6,
.offset = offsetof(struct ipv6_hdr, src_addr) -
offsetof(struct ipv6_hdr, proto) + sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = SRC3_FIELD_IPV6,
.input_index = SRC3_FIELD_IPV6,
.offset = offsetof(struct ipv6_hdr, src_addr) -
offsetof(struct ipv6_hdr, proto) +
2 * sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = SRC4_FIELD_IPV6,
.input_index = SRC4_FIELD_IPV6,
.offset = offsetof(struct ipv6_hdr, src_addr) -
offsetof(struct ipv6_hdr, proto) +
3 * sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST1_FIELD_IPV6,
.input_index = DST1_FIELD_IPV6,
.offset = offsetof(struct ipv6_hdr, dst_addr)
- offsetof(struct ipv6_hdr, proto),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST2_FIELD_IPV6,
.input_index = DST2_FIELD_IPV6,
.offset = offsetof(struct ipv6_hdr, dst_addr) -
offsetof(struct ipv6_hdr, proto) + sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST3_FIELD_IPV6,
.input_index = DST3_FIELD_IPV6,
.offset = offsetof(struct ipv6_hdr, dst_addr) -
offsetof(struct ipv6_hdr, proto) +
2 * sizeof(uint32_t),
},
{
.type = RTE_ACL_FIELD_TYPE_MASK,
.size = sizeof(uint32_t),
.field_index = DST4_FIELD_IPV6,
.input_index = DST4_FIELD_IPV6,
.offset = offsetof(struct ipv6_hdr, dst_addr) -
offsetof(struct ipv6_hdr, proto) +
3 * sizeof(uint32_t),
},
};
RTE_ACL_RULE_DEF(acl4_rule, RTE_DIM(ipv4_defs));
RTE_ACL_RULE_DEF(acl6_rule, RTE_DIM(ipv6_defs));
/**
* @brief : Maintains acl parameters
*/
struct acl_search {
const uint8_t *data_ipv4[MAX_BURST_SZ];
struct rte_mbuf *m_ipv4[MAX_BURST_SZ];
uint32_t res_ipv4[MAX_BURST_SZ];
int num_ipv4;
const uint8_t *data_ipv6[MAX_BURST_SZ];
struct rte_mbuf *m_ipv6[MAX_BURST_SZ];
uint32_t res_ipv6[MAX_BURST_SZ];
int num_ipv6;
};
/**
* @brief : Maintains acl configuration
*/
struct acl_config {
struct rte_acl_ctx *acx_ipv4;
struct rte_acl_ctx *acx_ipv6;
uint8_t acx_ipv4_built;
uint8_t acx_ipv6_built;
struct acl_search acl_search;
};
/**
* @brief : Maintains parm config
*/
static struct{
const char *rule_ipv4_name;
int scalar;
} parm_config;
const char cb_port_delim[] = ":";
/**
* @brief : Maintains acl rule table information
*/
struct acl_rules_table {
char name[MAX_LEN];
void *root;
uint16_t num_entries;
uint16_t max_entries;
int (*compare)(const void *r1p, const void *r2p);
int (*compare_rule)(const void *r1p, const void *r2p);
int (*compare_ipv6_rule)(const void *r1p, const void *r2p);
void (*print_entry)(const void *nodep, const VISIT which, const int depth);
void (*add_entry)(const void *nodep, const VISIT which, const int depth);
void (*add_ipv6_entry)(const void *nodep, const VISIT which, const int depth);
uint16_t num_of_ue;
};
struct acl_config acl_config[MAX_ACL_TABLES];
struct acl_rules_table acl_rules_table[MAX_ACL_TABLES];
/*******************************************************[START]**********************************************************/
/**
* @brief : Print one acl rule information
* @param : rule, acl rule
* @param : extra, data
* @return : Returns nothing
*/
static inline void print_one_ipv4_rule(struct acl4_rule *rule, int extra)
{
unsigned char a, b, c, d;
uint32_t_to_char(rule->field[SRC_FIELD_IPV4].value.u32, &a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ACL Rule Info : %hhu.%hhu.%hhu.%hhu/%u\n", LOG_VALUE, a, b, c, d,
rule->field[SRC_FIELD_IPV4].mask_range.u32);
uint32_t_to_char(rule->field[DST_FIELD_IPV4].value.u32, &a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug,"ACL Rule Info : %hhu.%hhu.%hhu.%hhu/%u \n",
LOG_VALUE, a, b, c, d, rule->field[DST_FIELD_IPV4].mask_range.u32);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ACL Rule Info : %hu : %hu %hu : %hu 0x%hhx/0x%hhx \n",LOG_VALUE,
rule->field[SRCP_FIELD_IPV4].value.u16,
rule->field[SRCP_FIELD_IPV4].mask_range.u16,
rule->field[DSTP_FIELD_IPV4].value.u16,
rule->field[DSTP_FIELD_IPV4].mask_range.u16,
rule->field[PROTO_FIELD_IPV4].value.u8,
rule->field[PROTO_FIELD_IPV4].mask_range.u8);
if (extra)
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ACL Rule Info : 0x%x-0x%x-0x%x \n", LOG_VALUE,
rule->data.category_mask,
rule->data.priority,
rule->data.userdata & ~ACL_DENY_SIGNATURE);
}
static inline void
print_one_ipv6_rule(struct acl6_rule *rule, int extra)
{
unsigned char a, b, c, d;
uint32_t_to_char(rule->field[SRC1_FIELD_IPV6].value.u32,
&a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"%.2x%.2x:%.2x%.2x", LOG_VALUE, a, b, c, d);
uint32_t_to_char(rule->field[SRC2_FIELD_IPV6].value.u32,
&a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug,":%.2x%.2x:%.2x%.2x", a, b, c, d);
uint32_t_to_char(rule->field[SRC3_FIELD_IPV6].value.u32,
&a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug,":%.2x%.2x:%.2x%.2x", a, b, c, d);
uint32_t_to_char(rule->field[SRC4_FIELD_IPV6].value.u32,
&a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug,":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
rule->field[SRC1_FIELD_IPV6].mask_range.u32
+ rule->field[SRC2_FIELD_IPV6].mask_range.u32
+ rule->field[SRC3_FIELD_IPV6].mask_range.u32
+ rule->field[SRC4_FIELD_IPV6].mask_range.u32);
uint32_t_to_char(rule->field[DST1_FIELD_IPV6].value.u32,
&a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug,"%.2x%.2x:%.2x%.2x", a, b, c, d);
uint32_t_to_char(rule->field[DST2_FIELD_IPV6].value.u32,
&a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug,":%.2x%.2x:%.2x%.2x", a, b, c, d);
uint32_t_to_char(rule->field[DST3_FIELD_IPV6].value.u32,
&a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug,":%.2x%.2x:%.2x%.2x", a, b, c, d);
uint32_t_to_char(rule->field[DST4_FIELD_IPV6].value.u32,
&a, &b, &c, &d);
clLog(clSystemLog, eCLSeverityDebug,":%.2x%.2x:%.2x%.2x/%u ", a, b, c, d,
rule->field[DST1_FIELD_IPV6].mask_range.u32
+ rule->field[DST2_FIELD_IPV6].mask_range.u32
+ rule->field[DST3_FIELD_IPV6].mask_range.u32
+ rule->field[DST4_FIELD_IPV6].mask_range.u32);
clLog(clSystemLog, eCLSeverityDebug,"0x%hhx/0x%hhx \n",
rule->field[PROTO_FIELD_IPV6].value.u8,
rule->field[PROTO_FIELD_IPV6].mask_range.u8);
if (extra)
clLog(clSystemLog, eCLSeverityDebug,"0x%x-0x%x-0x%x ",
rule->data.category_mask,
rule->data.priority,
rule->data.userdata);
}
/**
* @brief : Fill acl rule information
* @param : pkts_in, input buffer
* @param : acl, acl structure to fill
* @param : index, index of packet in array
* @return : Returns nothing
*/
static inline void
prepare_one_packet(struct rte_mbuf **pkts_in, struct acl_search *acl,
int index)
{
struct rte_mbuf *pkt = pkts_in[index];
uint64_t len = 0;
uint8_t *data = NULL;
len = ETH_HDR_SIZE;
/*Get pointer to IP frame in packet*/
data = rte_pktmbuf_mtod_offset(pkt, uint8_t *, len);
/* Fill acl structure */
if (( data[0] & VERSION_FLAG_CHECK ) == IPv4_VERSION) {
/* Fill acl structure */
acl->data_ipv4[acl->num_ipv4] = MBUF_IPV4_2PROTO(pkt);
if (acl->data_ipv4[acl->num_ipv4] != NULL) {
acl->m_ipv4[(acl->num_ipv4)++] = pkt;
} else {
/* Malformed packet, drop the packet */
rte_pktmbuf_free(pkt);
}
} else if (( data[0] & VERSION_FLAG_CHECK ) == IPv6_VERSION) {
/* Fill acl structure */
acl->data_ipv6[acl->num_ipv6] = MBUF_IPV6_2PROTO(pkt);
if (acl->data_ipv6[acl->num_ipv6] != NULL) {
acl->m_ipv6[(acl->num_ipv6)++] = pkt;
} else {
/* Malformed packet, drop the packet */
rte_pktmbuf_free(pkt);
}
} else {
/* Unknown type, drop the packet */
rte_pktmbuf_free(pkt);
}
}
/**
* @brief : Process packets and fill acl rule information
* @param : pkts_in, input buffer
* @param : acl, acl structure to fill
* @param : nb_rx, number of packets
* @return : Returns nothing
*/
static inline void
prepare_acl_parameter(struct rte_mbuf **pkts_in, struct acl_search *acl,
int nb_rx)
{
int i;
acl->num_ipv4 = 0;
acl->num_ipv6 = 0;
/* Prefetch first packets */
for (i = 0; i < PREFETCH_OFFSET && i < nb_rx; i++)
rte_prefetch0(rte_pktmbuf_mtod(pkts_in[i], void *));
for (i = 0; i < (nb_rx - PREFETCH_OFFSET); i++) {
rte_prefetch0(rte_pktmbuf_mtod
(pkts_in[i + PREFETCH_OFFSET], void *));
prepare_one_packet(pkts_in, acl, i);
}
/* Process left packets */
for (; i < nb_rx; i++)
prepare_one_packet(pkts_in, acl, i);
}
/**
* @brief : Forward packets
* @param : pkts_in, input buffer
* @param : res
* @return : Returns nothing
*/
static inline void send_one_packet(struct rte_mbuf *m, uint32_t res)
{
if (likely((res & ACL_DENY_SIGNATURE) == 0 && res != 0)) {
/* forward packets */
;
} else {
/* in the ACL list, drop it */
rte_pktmbuf_free(m);
}
}
/*
* Parse ClassBench rules file.
* Expected format:
* '@'<src_ipv4_addr>'/'<masklen> <space> \
* <dst_ipv4_addr>'/'<masklen> <space> \
* <src_port_low> <space> ":" <src_port_high> <space> \
* <dst_port_low> <space> ":" <dst_port_high> <space> \
* <proto>'/'<mask>
*/
/**
* @brief : Parse ipv4 address
* @param : in, input ip string
* @param : addr, output
* @param : mask_len, mask length
* @return : Returns 0
*/
static int parse_ipv4_net(const char *in, uint32_t *addr, uint32_t *mask_len)
{
uint8_t a, b, c, d, m;
GET_CB_FIELD(in, a, 0, UINT8_MAX, '.');
GET_CB_FIELD(in, b, 0, UINT8_MAX, '.');
GET_CB_FIELD(in, c, 0, UINT8_MAX, '.');
GET_CB_FIELD(in, d, 0, UINT8_MAX, '/');
GET_CB_FIELD(in, m, 0, sizeof(uint32_t) * CHAR_BIT, 0);
addr[0] = IPv4(a, b, c, d);
mask_len[0] = m;
return 0;
}
void
swap_src_dst_ip(char *str)
{
char *s, *sp, *in[CB_FLD_NUM], tmp[MAX_LEN] = {0};
static const char *dlm = " \t\n";
strncpy(tmp, str, MAX_LEN);
s = tmp;
in[0] = strtok_r(s, dlm, &sp);
in[1] = strtok_r(NULL, dlm, &sp);
snprintf(str, MAX_LEN,"%s %s %s\n", in[1], in[0], sp);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SDF UL LINK : %s\n", LOG_VALUE, str);
}
/**
* @brief : Function to the parse the SDF filter rule
* @param : str, input string
* @param : v, acl rule
* @param : has_userdata
* @return : Returns 0 in case of success , negative error values otherwise
*/
static int
parse_cb_ipv4vlan_rule(char *str, struct rte_acl_rule *v, int has_userdata)
{
int i, rc;
char *s = NULL, *sp = NULL, *in[CB_FLD_NUM]={0}, tmp[MAX_LEN] = {0};
static const char *dlm = " \t\n";
int dim = has_userdata ? CB_FLD_NUM : CB_FLD_USERDATA;
char *src_low_port = "0", *src_high_port = "65535";
char *dst_low_port = "0", *dst_high_port = "65535";
strncpy(tmp, str, MAX_LEN);
s = tmp;
for (i = 0; i != dim; i++, s = NULL) {
in[i] = strtok_r(s, dlm, &sp);
if (in[i] == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ERROR: String is NULL\n", LOG_VALUE);
return -EINVAL;
}
}
rc = parse_ipv4_net(in[CB_FLD_SRC_ADDR],
&v->field[SRC_FIELD_IPV4].value.u32,
&v->field[SRC_FIELD_IPV4].mask_range.u32);
if (rc != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"INVALID Source Address/Mask: %s\n", LOG_VALUE,
in[CB_FLD_SRC_ADDR]);
return rc;
}
rc = parse_ipv4_net(in[CB_FLD_DST_ADDR],
&v->field[DST_FIELD_IPV4].value.u32,
&v->field[DST_FIELD_IPV4].mask_range.u32);
if (rc != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"INVALID Destination Address/Mask: %s\n", LOG_VALUE,
in[CB_FLD_DST_ADDR]);
return rc;
}
if(atoi(in[CB_FLD_SRC_PORT_LOW]) == 0 && atoi(in[CB_FLD_SRC_PORT_HIGH]) == 0){
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SRC Port LOW and High both 0\n", LOG_VALUE);
GET_CB_FIELD(src_low_port,
v->field[SRCP_FIELD_IPV4].value.u16, 0, UINT16_MAX, 0);
GET_CB_FIELD(src_high_port,
v->field[SRCP_FIELD_IPV4].mask_range.u16,
0, UINT16_MAX, 0);
} else{
GET_CB_FIELD(in[CB_FLD_SRC_PORT_LOW],
v->field[SRCP_FIELD_IPV4].value.u16, 0, UINT16_MAX, 0);
GET_CB_FIELD(in[CB_FLD_SRC_PORT_HIGH],
v->field[SRCP_FIELD_IPV4].mask_range.u16,
0, UINT16_MAX, 0);
}
if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
sizeof(cb_port_delim)) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"INVALID Source Port/Mask: %s\n", LOG_VALUE,
in[CB_FLD_SRC_PORT_DLM]);
return -EINVAL;
}
if(atoi(in[CB_FLD_DST_PORT_LOW]) == 0 && atoi(in[CB_FLD_DST_PORT_HIGH]) == 0){
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"DST Port LOW and High both 0\n", LOG_VALUE);
GET_CB_FIELD(dst_low_port,
v->field[DSTP_FIELD_IPV4].value.u16, 0, UINT16_MAX, 0);
GET_CB_FIELD(dst_high_port,
v->field[DSTP_FIELD_IPV4].mask_range.u16, 0, UINT16_MAX, 0);
} else {
GET_CB_FIELD(in[CB_FLD_DST_PORT_LOW],
v->field[DSTP_FIELD_IPV4].value.u16, 0, UINT16_MAX, 0);
GET_CB_FIELD(in[CB_FLD_DST_PORT_HIGH],
v->field[DSTP_FIELD_IPV4].mask_range.u16, 0, UINT16_MAX, 0);
}
if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
sizeof(cb_port_delim)) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"INVALID Destination Port/Mask: %s\n", LOG_VALUE,
in[CB_FLD_DST_PORT_DLM]);
return -EINVAL;
}
if (v->field[SRCP_FIELD_IPV4].mask_range.u16
< v->field[SRCP_FIELD_IPV4].value.u16
|| v->field[DSTP_FIELD_IPV4].mask_range.u16
< v->field[DSTP_FIELD_IPV4].value.u16) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"INVALID Src and Dst Mask Ranges\n", LOG_VALUE);
return -EINVAL;
}
GET_CB_FIELD(in[CB_FLD_PROTO], v->field[PROTO_FIELD_IPV4].value.u8,
0, UINT8_MAX, '/');
GET_CB_FIELD(in[CB_FLD_PROTO], v->field[PROTO_FIELD_IPV4].mask_range.u8,
0, UINT8_MAX, 0);
if (has_userdata)
GET_CB_FIELD(in[CB_FLD_USERDATA], v->data.userdata, 0,
UINT32_MAX, 0);
return 0;
}
static
void ipv6_expander(const struct in6_addr *addr, char *str){
snprintf(str, IPV6_STR_LEN,"%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x:%02x%02x",
(int)addr->s6_addr[0], (int)addr->s6_addr[1],
(int)addr->s6_addr[2], (int)addr->s6_addr[3],
(int)addr->s6_addr[4], (int)addr->s6_addr[5],
(int)addr->s6_addr[6], (int)addr->s6_addr[7],
(int)addr->s6_addr[8], (int)addr->s6_addr[9],
(int)addr->s6_addr[10], (int)addr->s6_addr[11],
(int)addr->s6_addr[12], (int)addr->s6_addr[13],
(int)addr->s6_addr[14], (int)addr->s6_addr[15]);
return;
}
static int
parse_ipv6_addr(char *in, const char **end, uint32_t v[IPV6_ADDR_U32],
char dlm)
{
struct in6_addr ipv6 = {0};
char tmp[IPV6_STR_LEN];
char *saveptr, *ipv6_str, *final_ipv6 = NULL;
final_ipv6 = (char *)malloc(MAX_LEN * sizeof(char));
ipv6_str = strtok_r(in, "/", &saveptr);
if(inet_pton(AF_INET6, ipv6_str, &ipv6)) {
ipv6_expander(&ipv6, tmp);
}else{
clLog(clSystemLog, eCLSeverityCritical,"IP conversion failes");
return -1;
}
snprintf(final_ipv6, MAX_LEN, "%s/%s", tmp, saveptr);
char *temp = final_ipv6;
uint32_t addr[IPV6_ADDR_U16];
GET_CB_FIELD(final_ipv6, addr[0], 16, UINT16_MAX, ':');
GET_CB_FIELD(final_ipv6, addr[1], 16, UINT16_MAX, ':');
GET_CB_FIELD(final_ipv6, addr[2], 16, UINT16_MAX, ':');
GET_CB_FIELD(final_ipv6, addr[3], 16, UINT16_MAX, ':');
GET_CB_FIELD(final_ipv6, addr[4], 16, UINT16_MAX, ':');
GET_CB_FIELD(final_ipv6, addr[5], 16, UINT16_MAX, ':');
GET_CB_FIELD(final_ipv6, addr[6], 16, UINT16_MAX, ':');
GET_CB_FIELD(final_ipv6, addr[7], 16, UINT16_MAX, dlm);
*end = final_ipv6;
v[0] = (addr[0] << 16) + addr[1];
v[1] = (addr[2] << 16) + addr[3];
v[2] = (addr[4] << 16) + addr[5];
v[3] = (addr[6] << 16) + addr[7];
free(temp);
return 0;
}
static int
parse_ipv6_net(char *in, struct rte_acl_field field[4])
{
int32_t rc;
const char *mp;
uint32_t i, m, v[4];
const uint32_t nbu32 = sizeof(uint32_t) * CHAR_BIT;
/* get address. */
rc = parse_ipv6_addr(in, &mp, v, '/');
if (rc != 0)
return rc;
/* get mask. */
GET_CB_FIELD(mp, m, 0, CHAR_BIT * sizeof(v), 0);
/* put all together. */
for (i = 0; i != RTE_DIM(v); i++) {
if (m >= (i + 1) * nbu32)
field[i].mask_range.u32 = nbu32;
else
field[i].mask_range.u32 = m > (i * nbu32) ?
m - (i * 32) : 0;
field[i].value.u32 = v[i];
}
return 0;
}
static int
parse_cb_ipv6_rule(char *str, struct rte_acl_rule *v, int has_userdata)
{
int i, rc;
char *s, *sp, *in[CB_IPV6_FLD_NUM], tmp[MAX_LEN] = {0};
static const char *dlm = " \t\n";
int dim = has_userdata ? CB_IPV6_FLD_NUM : CB_IPV6_FLD_USERDATA;
strncpy(tmp, str, MAX_LEN);
s = tmp;
for (i = 0; i != dim; i++, s = NULL) {
in[i] = strtok_r(s, dlm, &sp);
if (in[i] == NULL)
return -EINVAL;
}
rc = parse_ipv6_net(in[CB_IPV6_FLD_SRC_ADDR], v->field + SRC1_FIELD_IPV6);
if (rc != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"failed to read source address/mask: %s\n",LOG_VALUE,
in[CB_FLD_SRC_ADDR]);
return rc;
}
rc = parse_ipv6_net(in[CB_IPV6_FLD_DST_ADDR], v->field + DST1_FIELD_IPV6);
if (rc != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"failed to read destination address/mask: %s\n",LOG_VALUE,
in[CB_FLD_SRC_ADDR]);
return rc;
}
GET_CB_FIELD(in[CB_IPV6_FLD_PROTO], v->field[PROTO_FIELD_IPV6].value.u8,
0, UINT8_MAX, '/');
GET_CB_FIELD(in[CB_IPV6_FLD_PROTO], v->field[PROTO_FIELD_IPV6].mask_range.u8,
0, UINT8_MAX, 0);
if (has_userdata)
GET_CB_FIELD(in[CB_IPV6_FLD_USERDATA], v->data.userdata,
0, UINT32_MAX, 0);
return 0;
}
/**
* @brief : Print the Rule entry.
* @param : nodep, node to print
* @param : which, traversal order
* @param : depth, node depth
* @return : Returns nothing
*/
static void acl_rule_print(const void *nodep, const VISIT which, const int depth)
{
struct acl4_rule *r = NULL;
uint32_t precedence = 0;
#pragma GCC diagnostic push /* require GCC 4.6 */
#pragma GCC diagnostic ignored "-Wcast-qual"
r = *(struct acl4_rule **) nodep;
#pragma GCC diagnostic pop /* require GCC 4.6 */
precedence = r->data.userdata - ACL_DENY_SIGNATURE;
switch (which) {
case leaf:
case postorder:
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Depth : %d, Precedence : %u", LOG_VALUE,
depth, precedence);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Prio : %x, Category Mask : %x\n", LOG_VALUE,
r->data.priority, r->data.category_mask);
print_one_ipv4_rule(r, 1);
break;
default:
break;
}
}
/**
* @brief : Add the Rule entry in rte acl table.
* @param : nodep, node to add
* @param : which, traversal order
* @param : depth, node depth
* @return : Returns nothing
*/
static void add_single_rule(const void *nodep, const VISIT which, const int depth)
{
struct acl4_rule *r = NULL;
struct acl_config *pacl_config = &acl_config[acl_table_indx];
struct rte_acl_ctx *context = pacl_config->acx_ipv4;
#pragma GCC diagnostic push /* require GCC 4.6 */
#pragma GCC diagnostic ignored "-Wcast-qual"
r = *(struct acl4_rule **) nodep;
#pragma GCC diagnostic pop /* require GCC 4.6 */
switch (which) {
case leaf:
case postorder:
if (rte_acl_add_rules(context, (struct rte_acl_rule *)r, 1)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Add SDF rule\n", LOG_VALUE);
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SDF Rule Added in ACL table\n", LOG_VALUE);
break;
default:
break;
}
}
/**
* @brief : Add the IPv6 Rule entry in rte acl table.
* @param : nodep, node to add
* @param : which, traversal order
* @param : depth, node depth
* @return : Returns nothing
*/
static void add_single_ipv6_rule(const void *nodep, const VISIT which, const int depth)
{
struct acl6_rule *r = NULL;
struct acl_config *pacl_config = &acl_config[acl_table_indx];
struct rte_acl_ctx *context = pacl_config->acx_ipv6;
#pragma GCC diagnostic push /* require GCC 4.6 */
#pragma GCC diagnostic ignored "-Wcast-qual"
r = *(struct acl6_rule **) nodep;
#pragma GCC diagnostic pop /* require GCC 4.6 */
switch (which) {
case leaf:
case postorder:
if (rte_acl_add_rules(context, (struct rte_acl_rule *)r, 1)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Add SDF rule\n", LOG_VALUE);
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SDF Rule Added in ACL table\n", LOG_VALUE);
break;
default:
break;
}
}
/**
* @brief : Compare acl rule precedence.
* @param : r1p, first acl rule
* @param : r2p, second acl rule
* @return : Returns 0 in case of success , -1 otherwise
*/
static int acl_rule_prcdnc_compare(const void *r1p, const void *r2p)
{
struct acl4_rule *r1, *r2;
r1 = (struct acl4_rule *) r1p;
r2 = (struct acl4_rule *) r2p;
/* compare precedence */
if (r1->data.userdata < r2->data.userdata) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Compare precendence failed\n", LOG_VALUE);
return -1;
}
else if (r1->data.userdata == r2->data.userdata)
return 0;
else
return 1;
}
/**
* @brief : Compare rule entry.
* @param : r1p, first acl rule
* @param : r2p, second acl rule
* @return : Returns 0 in case of success , -1 otherwise
*/
static int acl_rule_compare(const void *r1p, const void *r2p)
{
struct acl4_rule *rule1, *rule2;
rule1 = (struct acl4_rule *) r1p;
rule2 = (struct acl4_rule *) r2p;
/* compare rule */
if ((rule1->data.userdata == rule2->data.userdata) &&
(rule1->field[SRC_FIELD_IPV4].value.u32 ==
rule2->field[SRC_FIELD_IPV4].value.u32) &&
(rule1->field[SRC_FIELD_IPV4].mask_range.u32 ==
rule2->field[SRC_FIELD_IPV4].mask_range.u32) &&
(rule1->field[SRCP_FIELD_IPV4].value.u16 ==
rule2->field[SRCP_FIELD_IPV4].value.u16) &&
(rule1->field[SRCP_FIELD_IPV4].mask_range.u16 ==
rule2->field[SRCP_FIELD_IPV4].mask_range.u16) &&
(rule1->field[DST_FIELD_IPV4].value.u32 ==
rule2->field[DST_FIELD_IPV4].value.u32) &&
(rule1->field[DST_FIELD_IPV4].mask_range.u32 ==
rule2->field[DST_FIELD_IPV4].mask_range.u32) &&
(rule1->field[DSTP_FIELD_IPV4].value.u16 ==
rule2->field[DSTP_FIELD_IPV4].value.u16) &&
(rule1->field[DSTP_FIELD_IPV4].mask_range.u16 ==
rule2->field[DSTP_FIELD_IPV4].mask_range.u16) &&
(rule1->field[PROTO_FIELD_IPV4].value.u8 ==
rule2->field[PROTO_FIELD_IPV4].value.u8) &&
(rule1->field[PROTO_FIELD_IPV4].mask_range.u8 ==
rule2->field[PROTO_FIELD_IPV4].mask_range.u8)){
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SDF Rule matched\n", LOG_VALUE);
return 0;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SDF Rule mismatched\n", LOG_VALUE);
return -1;
}
/**
* @brief : Compare rule entry.
* @param : r1p, first acl rule
* @param : r2p, second acl rule
* @return : Returns 0 in case of success , -1 otherwise
*/
static int acl_ipv6_rule_compare(const void *r1p, const void *r2p)
{
struct acl6_rule *rule1, *rule2;
rule1 = (struct acl6_rule *) r1p;
rule2 = (struct acl6_rule *) r2p;
/* compare rule */
if ((rule1->data.userdata == rule2->data.userdata) &&
(rule1->field[SRC1_FIELD_IPV6].value.u32 ==
rule2->field[SRC1_FIELD_IPV6].value.u32) &&
(rule1->field[SRC2_FIELD_IPV6].value.u32 ==
rule2->field[SRC2_FIELD_IPV6].value.u32) &&
(rule1->field[SRC3_FIELD_IPV6].value.u32 ==
rule2->field[SRC3_FIELD_IPV6].value.u32) &&
(rule1->field[SRC4_FIELD_IPV6].value.u32 ==
rule2->field[SRC4_FIELD_IPV6].value.u32) &&
(rule1->field[DST1_FIELD_IPV6].value.u32 ==
rule2->field[DST1_FIELD_IPV6].value.u32) &&
(rule1->field[DST2_FIELD_IPV6].value.u32 ==
rule2->field[DST2_FIELD_IPV6].value.u32) &&
(rule1->field[DST3_FIELD_IPV6].value.u32 ==
rule2->field[DST3_FIELD_IPV6].value.u32) &&
(rule1->field[DST4_FIELD_IPV6].value.u32 ==
rule2->field[DST4_FIELD_IPV6].value.u32) &&
(rule1->field[PROTO_FIELD_IPV6].value.u8 ==
rule2->field[PROTO_FIELD_IPV6].value.u8) &&
(rule1->field[PROTO_FIELD_IPV6].mask_range.u8 ==
rule2->field[PROTO_FIELD_IPV6].mask_range.u8) &&
((rule1->field[SRC1_FIELD_IPV6].mask_range.u32
+ rule1->field[SRC2_FIELD_IPV6].mask_range.u32
+ rule1->field[SRC3_FIELD_IPV6].mask_range.u32
+ rule1->field[SRC4_FIELD_IPV6].mask_range.u32) ==
(rule2->field[SRC1_FIELD_IPV6].mask_range.u32
+ rule2->field[SRC2_FIELD_IPV6].mask_range.u32
+ rule2->field[SRC3_FIELD_IPV6].mask_range.u32
+ rule2->field[SRC4_FIELD_IPV6].mask_range.u32)) &&
((rule1->field[DST1_FIELD_IPV6].mask_range.u32
+ rule1->field[DST2_FIELD_IPV6].mask_range.u32
+ rule1->field[DST3_FIELD_IPV6].mask_range.u32
+ rule1->field[DST4_FIELD_IPV6].mask_range.u32) ==
(rule2->field[DST1_FIELD_IPV6].mask_range.u32
+ rule2->field[DST2_FIELD_IPV6].mask_range.u32
+ rule2->field[DST3_FIELD_IPV6].mask_range.u32
+ rule2->field[DST4_FIELD_IPV6].mask_range.u32))) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SDF Rule matched\n", LOG_VALUE);
return 0;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SDF Rule mismatched\n", LOG_VALUE);
return -1;
}
/**
* @brief : Create ACL table.
* @param : ACL Table index
* @param : max_element, max number of elements in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
up_acl_rules_table_create(uint32_t indx, uint32_t max_elements, uint8_t is_ipv6)
{
struct acl_rules_table *t = &acl_rules_table[indx];
if (t->root != NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ACL table for index : \"%s\" Exist\n", LOG_VALUE, t->name);
return -1;
}
t->num_entries = 0;
t->max_entries = max_elements;
snprintf(t->name, MAX_LEN, "ACL_RULES_TABLE-%u", indx);
t->compare = acl_rule_prcdnc_compare;
t->compare_rule = acl_rule_compare;
t->compare_ipv6_rule = acl_ipv6_rule_compare;
t->print_entry = acl_rule_print;
t->add_entry = add_single_rule;
t->add_ipv6_entry = add_single_ipv6_rule;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ACL Rules table for \"%s\" Created\n", LOG_VALUE, t->name);
return 0;
}
/**
* @brief : Init acl table context.
* If cflag ACL_READ_CFG is enabled, this function reads rules
* from config file and build acl tables. Else it will add
* default rule "0.0.0.0/0 0.0.0.0/0 0 : 65535 0 : 65535 0x0/0x0"
* with id = max_elements.
* @param : name, name string for table name.
* @param : acl_num, max elements that can be added in this table.
* @param : rs, each rule size
* @param : ipv6, set this if rules are ipv6
* @param : socketid, socket id
* @return : Returns rte_acl_ctx on Success, NULL otherwise
*/
static struct rte_acl_ctx *acl_context_init(char *name,
unsigned int max_elements, int socketid, uint8_t is_ipv6)
{
struct rte_acl_ctx *context = NULL;
struct rte_acl_param acl_param = {0};
int dim = (is_ipv6 ? RTE_DIM(ipv6_defs) : RTE_DIM(ipv4_defs));
/* Create ACL contexts */
acl_param.name = name;
acl_param.socket_id = socketid;
acl_param.rule_size = RTE_ACL_RULE_SZ(dim);
acl_param.max_rule_num = max_elements;
/*Create the ACL Table */
context = rte_acl_create(&acl_param);
if (context == NULL)
rte_exit(EXIT_FAILURE, LOG_FORMAT"Failed to create ACL context\n", LOG_VALUE);
if (parm_config.scalar
&& rte_acl_set_ctx_classify(context,
RTE_ACL_CLASSIFY_SCALAR)
!= 0)
rte_exit(EXIT_FAILURE, LOG_FORMAT"Failed to setup classify method for ACL context\n",
LOG_VALUE);
return context;
}
/**
* @brief : Init config of acl tables.
* @param : acl_config
* config base address of this table.
* @param : name
* name string for table name.
* @param : max_elements
* max elements that can be added in this table.
* @param : rs
* rule size of each elements.
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
acl_config_init(struct acl_config *acl_config,
char *name, uint32_t max_elements, uint8_t is_ipv6)
{
memset(acl_config, 0, sizeof(struct acl_config));
if(!is_ipv6){
acl_config->acx_ipv4 =
acl_context_init(name, max_elements, 0, is_ipv6);
} else{
acl_config->acx_ipv6 =
acl_context_init(name, max_elements, 0, is_ipv6);
}
return 0;
}
/**
* @brief : Create SDF rules table
* @param : max_elements, max number of elements in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
up_sdf_filter_table_create(uint32_t max_elements, uint8_t is_ipv6)
{
char name[NAME_LEN];
char *buf = "ACLTable-";
acl_table_indx = acl_table_indx_offset;
/* Increment the New Created ACL tables */
snprintf(name, NAME_LEN, "%s%u", buf, acl_table_indx);
/* Configure the ACL table */
if (acl_config_init(&acl_config[acl_table_indx], name,
max_elements, is_ipv6) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Acl config init failed\n", LOG_VALUE);
/* TODO: Error Handling */
return -1;
}
/* Create the local acl rules table copy */
if (up_acl_rules_table_create(acl_table_indx, max_elements, is_ipv6)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Up acl rules table create failed\n", LOG_VALUE);
/* TODO: Error Handling */
return -1;
}
/* Increment the ACL Table index */
acl_table_indx_offset++;
/* Return New created ACL table index */
return acl_table_indx;
}
/**
* @brief : Add rules from local table to rte acl rules table.
* @param : ACL Table Index
* @return : Returns nothing
*/
static void
add_rules_to_rte_acl(uint8_t indx, uint8_t is_ipv6)
{
struct acl_rules_table *t = &acl_rules_table[indx];
acl_table_indx = indx;
if(!is_ipv6)
twalk(t->root, t->add_entry);
else
twalk(t->root, t->add_ipv6_entry);
}
/**
* @brief : To reset and build ACL table.
* This funciton reset the acl context rules,
* and add the new rules and build table.
* This should be called only for standby tables.
* @param : ACL Table Index, table index to reset and build.
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
reset_and_build_rules(uint32_t indx, uint8_t is_ipv6)
{
int ret = 0;
int dim = is_ipv6 ? RTE_DIM(ipv4_defs) : RTE_DIM(ipv4_defs);
struct rte_acl_config acl_build_param = {0};
struct acl_config *pacl_config = &acl_config[indx];
struct rte_acl_ctx *context = NULL;
if(!is_ipv6)
context = pacl_config->acx_ipv4;
else
context = pacl_config->acx_ipv6;
/* Delete all rules from the ACL context. */
rte_acl_reset_rules(context);
/* Add the rules from local table to ACL table */
add_rules_to_rte_acl(indx, is_ipv6);
/* Perform builds */
memset(&acl_build_param, 0, sizeof(acl_build_param));
acl_build_param.num_categories = DEFAULT_MAX_CATEGORIES;
acl_build_param.num_fields = dim;
if(!is_ipv6)
memcpy(&acl_build_param.defs, ipv4_defs,
sizeof(ipv4_defs));
else
memcpy(&acl_build_param.defs, ipv6_defs,
sizeof(ipv6_defs));
/* Build the ACL run time structure */
if ((ret = rte_acl_build(context, &acl_build_param)) != 0) {
rte_exit(EXIT_FAILURE, LOG_FORMAT"Failed to build ACL trie,"
"ACL_RULES_TABLE-%u, ret:%d, error:%s\n",
LOG_VALUE, indx, ret, rte_strerror(rte_errno));
}
if(!is_ipv6)
pacl_config->acx_ipv4_built = 1;
else
pacl_config->acx_ipv6_built = 1;
#ifdef DEBUG_ACL
rte_acl_dump(context);
#endif
return 0;
}
/**
* @brief : Add rules entry.
* @param : t, rules table pointer
* @param : rule, element to be added in this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
up_rules_entry_add(struct acl_rules_table *t,
struct acl4_rule *rule, uint8_t is_ipv6)
{
if (t->num_entries == t->max_entries)
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT":%s reached max rules entries\n", LOG_VALUE, t->name);
if(!is_ipv6){
struct acl4_rule *new = rte_malloc("acl_rule", sizeof(struct acl4_rule),
RTE_CACHE_LINE_SIZE);
if (new == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ADC: Failed to allocate memory\n", LOG_VALUE);
return -1;
}
memcpy(new, rule, sizeof(struct acl4_rule));
/* put node into the tree */
if (tsearch(new, &t->root, t->compare_rule) == 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT":Fail to add acl precedance %d\n", LOG_VALUE,
rule->data.userdata - ACL_DENY_SIGNATURE);
return -1;
}
} else {
struct acl6_rule *new = rte_malloc("acl_rule", sizeof(struct acl6_rule),
RTE_CACHE_LINE_SIZE);
if (new == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ADC: Failed to allocate memory\n", LOG_VALUE);
return -1;
}
memcpy(new, rule, sizeof(struct acl6_rule));
/* put node into the tree */
if (tsearch(new, &t->root, t->compare_ipv6_rule) == 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT":Fail to add acl precedance %d\n", LOG_VALUE,
rule->data.userdata - ACL_DENY_SIGNATURE);
return -1;
}
}
t->num_entries++;
return 0;
}
/* Currently NOT USING */
/**
* @brief : To add sdf or adc filter in acl table.
* The entries are first stored in local memory and then updated on
* standby table.
* @param : ACL table Index
* @param : sdf_pkt_filter
* packet filter which include ruleid, priority and
* acl rule string to be added.
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
up_filter_entry_add(uint32_t indx, struct sdf_pkt_filter *pkt_filter)
{
struct acl4_rule r = {0};
struct rte_acl_rule *next = NULL;
char *buf = NULL;
uint8_t prio = 0;
/* check sdf filter exist or not */
if (pkt_filter == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Read msg payload failed\n", LOG_VALUE);
return -1;
}
/* Ensure rule_id does not exceed max num rules*/
if (sdf_rule_id == SDF_DEFAULT_DROP_RULE_ID)
prio = 0;
prio = (255 - pkt_filter->precedence);
buf = (char *)&pkt_filter->u.rule_str[0];
next = (struct rte_acl_rule *)&r;
/* Parse the sdf filter into acl ipv4 rule format */
if (parse_cb_ipv4vlan_rule(buf, next, 0) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Parse rules error\n", LOG_VALUE);
return -1;
}
next->data.userdata = pkt_filter->precedence + ACL_DENY_SIGNATURE;
next->data.priority = prio;
next->data.category_mask = -1;
/* Find similar rule is present or not */
struct acl_rules_table *ctx = &acl_rules_table[indx];
struct acl_rules_table *t = NULL;
if (ctx != NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Search SDF Rule in ACL_Table_Index-%u\n", LOG_VALUE, indx);
t = tfind(next, &ctx->root, ctx->compare_rule);
if (t != NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SDF Rule match in ACL_Table_Index-%u\nDP: SDF Rule:%s\n",
LOG_VALUE, indx, pkt_filter->u.rule_str);
return 0;
}
}
if (up_rules_entry_add(&acl_rules_table[indx],
(struct acl4_rule *)next, 0) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Up rules entry add failed\n", LOG_VALUE);
return -1;
}
if (reset_and_build_rules(indx, 0) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Reset and build rules Failed\n", LOG_VALUE);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ACL ADD: %s, ACL_Indx:%u, precedence:%u, rule:%s\n",
"SDF RULE", LOG_VALUE, indx, pkt_filter->precedence, pkt_filter->u.rule_str);
return 0;
}
/* Function to retrive the acl table index */
int
get_acl_table_indx(struct sdf_pkt_filter *pkt_filter, uint8_t is_create)
{
uint8_t prio = 0;
uint32_t it = 0;
char *buf = NULL;
struct acl4_rule r4 = {0};
struct acl6_rule r6 = {0};
struct rte_acl_rule *next = NULL;
uint8_t is_ipv6 = (pkt_filter->rule_ip_type == RULE_IPV6);
/* check sdf filter exist or not */
if (pkt_filter == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"read msg_payload failed\n", LOG_VALUE);
return -1;
}
prio = (255 - pkt_filter->precedence);
buf = (char *)&pkt_filter->u.rule_str[0];
if(!is_ipv6){
next = (struct rte_acl_rule *)&r4;
/* Parse the sdf filter into acl ipv4 rule format */
if (parse_cb_ipv4vlan_rule(buf, next, 0) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Parse IPv4 rules error\n", LOG_VALUE);
return -1;
}
}else{
next = (struct rte_acl_rule *)&r6;
/* Parse the sdf filter into acl ipv6 rule format */
if (parse_cb_ipv6_rule(buf, next, 0) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Parse IPv6 rules error\n", LOG_VALUE);
print_one_ipv6_rule((struct acl6_rule *)next, 1);
return -1;
}
}
/* Fill the received rule information */
next->data.userdata = pkt_filter->precedence + ACL_DENY_SIGNATURE;
next->data.priority = prio;
next->data.category_mask = -1;
print_one_ipv6_rule((struct acl6_rule *)next, 1);
/* Find similar rule is present or not */
for (uint32_t itr = 1; itr < acl_table_indx_offset; itr++) {
struct acl_rules_table *ctx = &acl_rules_table[itr];
struct acl_rules_table *t = NULL;
if (ctx == NULL)
continue;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Search SDF Rule in ACL_Table_Index :%u\n",
LOG_VALUE, itr);
if(!is_ipv6)
t = tfind(next, &ctx->root, ctx->compare_rule);
else
t = tfind(next, &ctx->root, ctx->compare_ipv6_rule);
if (t != NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"SDF Rule match in ACL_Table_Index-%u\nDP: SDF Rule:%s\n",
LOG_VALUE, itr, pkt_filter->u.rule_str);
if(SESS_CREATE == is_create)
acl_rules_table[itr].num_of_ue++;
return itr;
}
}
if(SESS_CREATE != is_create)
return -1;
/* If ACL table is not present than create the new ACL table */
if ((it = up_sdf_filter_table_create(MAX_SDF_RULE_NUM, is_ipv6)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to create up sdf filter table\n", LOG_VALUE);
return -1;
}
/* Add the sdf filter rule in ACL table */
if (up_rules_entry_add(&acl_rules_table[it],
(struct acl4_rule *)next, is_ipv6) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Up rules entry addtion failed\n", LOG_VALUE);
return -1;
}
acl_rules_table[it].num_of_ue++;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ACL ADD:%s, precedence:%u, rule:%s\n", LOG_VALUE,
"SDF", pkt_filter->precedence, pkt_filter->u.rule_str);
/* Rebuild the ACl table */
if (reset_and_build_rules(it, is_ipv6) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed in reset and build rules\n", LOG_VALUE);
return -1;
}
return it;
}
/**
* @brief : Free the memory allocated for node.
* @param : p, void pointer to be free.
* @return : Returns nothing
*/
static void
free_node(void *p)
{
rte_free(p);
}
/**
* @brief : Delete rules entry.
* @param : t, rules table pointer
* @param : rule, element to be deleted from this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
up_rules_entry_delete(struct acl_rules_table *t,
struct sdf_pkt_filter *pkt_filter_entry)
{
void **p;
struct acl4_rule rule_v4 = {0};
uint8_t prio = 0;
char *buf = NULL;
struct rte_acl_rule *next = NULL;
prio = (255 - pkt_filter_entry->precedence);
buf = (char *)&pkt_filter_entry->u.rule_str[0];
next = (struct rte_acl_rule *)&rule_v4;
if (parse_cb_ipv4vlan_rule(buf, next, 0) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Parse IPv4 rules error\n", LOG_VALUE);
return -1;
}
next->data.userdata = pkt_filter_entry->precedence + ACL_DENY_SIGNATURE;
next->data.priority = prio;
next->data.category_mask = -1;
p = tdelete(next, &t->root, t->compare_rule);
if (p == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Fail to delete acl rule id %d\n", LOG_VALUE,
rule_v4.data.userdata - ACL_DENY_SIGNATURE);
return -1;
}
t->num_entries--;
rte_free(*p);
return 0;
}
/**
* @brief : Delete IPV6 rules entry.
* @param : t, rules table pointer
* @param : rule, element to be deleted from this table.
* @return : Returns 0 in case of success , -1 otherwise
*/
int
up_ipv6_rules_entry_delete(struct acl_rules_table *t,
struct sdf_pkt_filter *pkt_filter_entry)
{
void **p;
struct acl6_rule rule_v6 = {0};
uint8_t prio = 0;
char *buf = NULL;
struct rte_acl_rule *next = NULL;
prio = (255 - pkt_filter_entry->precedence);
buf = (char *)&pkt_filter_entry->u.rule_str[0];
next = (struct rte_acl_rule *)&rule_v6;
if (parse_cb_ipv6_rule(buf, next, 0) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Parse IPv6 rules error\n", LOG_VALUE);
return -1;
}
next->data.userdata = pkt_filter_entry->precedence + ACL_DENY_SIGNATURE;
next->data.priority = prio;
next->data.category_mask = -1;
p = tdelete(next, &t->root, t->compare_ipv6_rule);
if (p == NULL) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Fail to delete acl rule id %d\n", LOG_VALUE,
next->data.userdata - ACL_DENY_SIGNATURE);
return -1;
}
t->num_entries--;
rte_free(*p);
return 0;
}
int
up_sdf_filter_entry_delete(uint32_t indx,
struct sdf_pkt_filter *pkt_filter_entry)
{
uint8_t is_ipv6 = (pkt_filter_entry->rule_ip_type == RULE_IPV6);
if (pkt_filter_entry == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Read msg Payload failed\n", LOG_VALUE);
return -1;
}
if(!is_ipv6)
up_rules_entry_delete(&acl_rules_table[indx], pkt_filter_entry);
else
up_ipv6_rules_entry_delete(&acl_rules_table[indx], pkt_filter_entry);
return 0;
}
/**
* @brief : Lookup into acl table
* @param : m, buffer
* @param : indx, ACL Table Index
* @param : acl_config, acl configuration
* @param : acl_search, acl search parameter
* @return : Returns 0 in case of success
*/
static uint32_t *acl_lookup(struct rte_mbuf **m, uint32_t indx,
struct acl_config *acl_config,
struct acl_search *acl_search)
{
struct rte_acl_ctx *context = NULL;
context = acl_config->acx_ipv4;
if(context == NULL){
context = acl_config->acx_ipv6;
if(context == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No context available for Lookup",
LOG_VALUE);
return 0;
}
}
if (acl_config != NULL) {
if ((context->trans_table != NULL)) {
prepare_acl_parameter(m, acl_search, 1);
if (acl_search->num_ipv4) {
rte_acl_classify(context,
acl_search->data_ipv4,
acl_search->res_ipv4,
acl_search->num_ipv4,
DEFAULT_MAX_CATEGORIES);
return (uint32_t *)&(acl_search->res_ipv4);
} else if(acl_search->num_ipv6) {
rte_acl_classify(context,
acl_search->data_ipv6,
acl_search->res_ipv6,
acl_search->num_ipv6,
DEFAULT_MAX_CATEGORIES);
return (uint32_t *)&(acl_search->res_ipv6);
}
}
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ERROR: ACL Context is Not Found \n", LOG_VALUE);
return 0;
}
uint32_t *sdf_lookup(struct rte_mbuf **m, int nb_rx, uint32_t indx)
{
RTE_SET_USED(nb_rx);
return acl_lookup(m, indx, &acl_config[indx], &acl_config[indx].acl_search);
}
/* Function to add the default entry into the acl table */
int up_sdf_default_entry_add(uint32_t indx, uint32_t precedence, uint8_t direction)
{
struct sdf_pkt_filter pktf = {
.precedence = precedence,
};
if (direction == UPLINK ) {
snprintf(pktf.u.rule_str, MAX_LEN, "%s/%"PRIu8" %s/%"PRIu8" %"
PRIu16" : %"PRIu16" %"PRIu16" : %"PRIu16" 0x%"
PRIx8"/0x%"PRIx8"\n",
"172.16.31.10", 0, /*local_ip & mask */
"0.0.0.0", 0, /*remote_ip, mask,*/
0, /*local_port_low),*/
65535, /*local_port_high),*/
0,/*remote_port_low),*/
65535, /*remote_port_high),*/
0, 0/*proto, proto_mask)*/
);
} else {
snprintf(pktf.u.rule_str, MAX_LEN, "%s/%"PRIu8" %s/%"PRIu8" %"
PRIu16" : %"PRIu16" %"PRIu16" : %"PRIu16" 0x%"
PRIx8"/0x%"PRIx8"\n",
"192.168.3.11", 0, /*local_ip & mask */
"0.0.0.0", 0, /*remote_ip, mask,*/
0, /*local_port_low),*/
65535, /*local_port_high),*/
0,/*remote_port_low),*/
65535, /*remote_port_high),*/
0, 0/*proto, proto_mask)*/
);
}
if (up_filter_entry_add(indx, &pktf)){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add up filter entry\n", LOG_VALUE);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ACL ADD:%s, precedence:%u, rule:%s\n",
"SDF", LOG_VALUE, pktf.precedence, pktf.u.rule_str);
return 0;
}
int
sdf_table_delete(uint32_t indx,
struct sdf_pkt_filter *pkt_filter_entry){
struct rte_acl_ctx *ctx = NULL;
struct acl_rules_table *t = &acl_rules_table[indx];
uint8_t is_ipv6 = (pkt_filter_entry->rule_ip_type == RULE_IPV6);
if(!is_ipv6)
ctx = acl_config[indx].acx_ipv4;
else
ctx = acl_config[indx].acx_ipv6;
/* Delete all rules from the ACL context and destroy all internal run-time structures */
rte_acl_reset(ctx);
if(!is_ipv6){
up_rules_entry_delete(t, pkt_filter_entry);
} else {
up_ipv6_rules_entry_delete(t, pkt_filter_entry);
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ACL DEL:%s \n", LOG_VALUE, ctx->name);
t = NULL;
free(t);
return 0;
}
int
remove_rule_entry_acl(uint32_t indx,
struct sdf_pkt_filter *pkt_filter_entry){
struct acl_rules_table *t = &acl_rules_table[indx];
uint8_t is_ipv6 = (pkt_filter_entry->rule_ip_type == RULE_IPV6);
if(t->num_of_ue > 1){
t->num_of_ue--;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Rule is used for more then one Bearer/UE"
" So not removing that table rule left for %u UE\n",
LOG_VALUE, t->num_of_ue);
return 0;
}
if(t->num_entries == 0){
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"No Acl Entery in Given Index table\n", LOG_VALUE);
return 0;
}
if(t->num_entries == 1)
return sdf_table_delete(indx, pkt_filter_entry);
else{
if(up_sdf_filter_entry_delete(indx, pkt_filter_entry)) {
clLog(clSystemLog, eCLSeverityCritical,
"Failed to delete up sdf filter entry\n", LOG_VALUE);
return -1;
}
return reset_and_build_rules(indx, is_ipv6);
}
}
/****************************************[END]****************************************/
|
nikhilc149/e-utran-features-bug-fixes | cp/cp.h | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _CP_H_
#define _CP_H_
#include <pcap.h>
#include <byteswap.h>
#include <rte_version.h>
#include <stdbool.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include "main.h"
#include "ue.h"
#include "teid.h"
#include "gw_adapter.h"
#ifdef USE_REST
#include "ngic_timer.h"
#endif /* USE_REST */
#if defined(CP_BUILD)
#include "gtp_messages.h"
#endif
#define SLEEP_TIME (100)
#define UPD_PARAM_HEADER_SIZE (4)
/* Define the Micros for hash table operations */
#define ADD_ENTRY 0
#define UPDATE_ENTRY 1
#define DELETE_ENTRY 2
#ifndef PERF_TEST
/** Temp. work around for support debug log level into DP, DPDK version 16.11.4 */
#if (RTE_VER_YEAR >= 16) && (RTE_VER_MONTH >= 11)
#undef RTE_LOG_LEVEL
#define RTE_LOG_LEVEL RTE_LOG_DEBUG
#define RTE_LOG_DP RTE_LOG
#elif (RTE_VER_YEAR >= 18) && (RTE_VER_MONTH >= 02)
#undef RTE_LOG_DP_LEVEL
#define RTE_LOG_DP_LEVEL RTE_LOG_DEBUG
#endif
#else /* Work around for skip LOG statements at compile time in DP, DPDK 16.11.4 and 18.02 */
#if (RTE_VER_YEAR >= 16) && (RTE_VER_MONTH >= 11)
#undef RTE_LOG_LEVEL
#define RTE_LOG_LEVEL RTE_LOG_WARNING
#define RTE_LOG_DP_LEVEL RTE_LOG_LEVEL
#define RTE_LOG_DP RTE_LOG
#elif (RTE_VER_YEAR >= 18) && (RTE_VER_MONTH >= 02)
#undef RTE_LOG_DP_LEVEL
#define RTE_LOG_DP_LEVEL RTE_LOG_WARNING
#endif
#endif /* PERF_TEST */
#ifdef SYNC_STATS
#include <time.h>
#define DEFAULT_STATS_PATH "./logs/"
#define STATS_HASH_SIZE (1 << 21)
#define ACK 1
#define RESPONSE 2
typedef long long int _timer_t;
#define GET_CURRENT_TS(now) \
({ \
struct timespec ts; \
now = clock_gettime(CLOCK_REALTIME,&ts) ? \
-1 : (((_timer_t)ts.tv_sec) * 1000000000) + ((_timer_t)ts.tv_nsec); \
now; \
})
#endif /* SYNC_STATS */
#define MAX_UPF 10
#define S11_INTFC 0
#define S5S8_INTFC 1
#define DNSCACHE_CONCURRENT 2
#define DNSCACHE_PERCENTAGE 70
#define DNSCACHE_INTERVAL 4000
#define DNS_PORT 53
#define PIGGYBACKED (1)
#define NOT_PIGGYBACKED (0)
#define CAUSE_SOURCE_SET_TO_1 (1)
#define CAUSE_SOURCE_SET_TO_0 (0)
#define CANCEL_S1_HO_INDICATION 2
#define __file__ (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
/**
* ipv4 address format.
*/
#define IPV4_ADDR "%u.%u.%u.%u"
#define IPV4_ADDR_HOST_FORMAT(a) (uint8_t)(((a) & 0xff000000) >> 24), \
(uint8_t)(((a) & 0x00ff0000) >> 16), \
(uint8_t)(((a) & 0x0000ff00) >> 8), \
(uint8_t)((a) & 0x000000ff)
/**
* Control-Plane rte logs.
*/
#define RTE_LOGTYPE_CP RTE_LOGTYPE_USER1
/**
* @file
*
* Control Plane specific declarations
*/
/*
* Define type of Control Plane (CP)
* SGWC - Serving GW Control Plane
* PGWC - PDN GW Control Plane
* SAEGWC - Combined SAEGW Control Plane
*/
enum cp_config {
SGWC = 01,
PGWC = 02,
SAEGWC = 03,
};
/*
* Define type of Control Plane (CP)
* IPv4 only
* IPv6 only
* IPv4v6 (priority based) - but GW will assign only one IP on basis of priority
* IPv4v6 (Dual Mode) - GW will assign both IPs to UE
*/
enum ip_type {
IP_V4 = 00,
IP_V6 = 01,
IPV4V6_PRIORITY = 02,
IPV4V6_DUAL = 03
};
enum ip_priority {
IP_V4_PRIORITY = 0,
IP_V6_PRIORITY = 1
};
enum charging_characteristics {
HOME = 03,
VISITING = 04,
ROAMING = 05,
};
enum cdr_config_values {
CDR_OFF = 00,
CDR_ON = 01,
SGW_CC_CHECK = 02,
};
enum ip_config_values {
IP_MODE = 01,
IP_TYPE = 03,
IP_PRIORITY = 01
};
#ifdef SYNC_STATS
/**
* @brief : statstics struct of control plane
*/
struct sync_stats {
uint64_t op_id;
uint64_t session_id;
uint64_t req_init_time;
uint64_t ack_rcv_time;
uint64_t resp_recv_time;
uint64_t req_resp_diff;
uint8_t type;
};
extern struct sync_stats stats_info;
extern _timer_t _init_time;
struct rte_hash *stats_hash;
extern uint64_t entries;
#endif /* SYNC_STATS */
/**
* @brief : core identifiers for control plane threads
*/
struct cp_params {
unsigned stats_core_id;
#ifdef SIMU_CP
unsigned simu_core_id;
#endif
};
/**
* @brief : Structure to downlink data notification ack information struct.
*/
typedef struct downlink_data_notification {
ue_context *context;
gtpv2c_ie *cause_ie;
uint8_t *delay;
/* todo! more to implement... see table 7.2.11.2-1
* 'recovery: this ie shall be included if contacting the peer
* for the first time'
*/
/* */
uint16_t dl_buff_cnt;
uint8_t dl_buff_duration;
}downlink_data_notification_t;
extern pcap_dumper_t *pcap_dumper;
extern pcap_t *pcap_reader;
extern int s11_fd;
extern int s11_fd_v6;
extern int s11_pcap_fd;
extern int s5s8_sgwc_fd;
extern int s5s8_pgwc_fd;
extern int pfcp_sgwc_fd ;
extern struct cp_params cp_params;
extern teid_info *upf_teid_info_head;
extern cp_configuration_t cp_configuration;
#if defined (SYNC_STATS) || defined (SDN_ODL_BUILD)
extern uint64_t op_id;
#endif /* SDN_ODL_BUILD */
/**
* @brief : creates and sends downlink data notification according to session
* identifier
* @param : session_id - session identifier pertaining to downlink data packets
* arrived at data plane
* @return : 0 - indicates success, failure otherwise
*/
int
ddn_by_session_id(uint64_t session_id, pdr_ids *pfcp_pdr_id);
/**
* @brief : initializes data plane by creating and adding default entries to
* various tables including session, pcc, metering, etc
* @param : No param
* @return : Returns Nothing
*/
void
initialize_tables_on_dp(void);
#ifdef CP_BUILD
/**
* @brief : sets delete bearer request
* @param : gtpv2c_tx, transmision buffer
* @param : sequence, sequence number
* @param : pdn, pointer of pdn_connection structure
* @param : linked_eps_bearer_id, default bearer id
* @param : pti,Proc Trans Identifier
* @param : ded_eps_bearer_ids, array of dedicated bearers
* @param : ded_bearer_counter, count of dedicated bearers
* @return : nothing
*/
void
set_delete_bearer_request(gtpv2c_header_t *gtpv2c_tx, uint32_t sequence,
pdn_connection *pdn, uint8_t linked_eps_bearer_id, uint8_t pti,
uint8_t ded_eps_bearer_ids[], uint8_t ded_bearer_counter);
/**
* @brief : sets delete bearer response
* @param : gtpv2c_tx, transmision buffer
* @param : sequence, sequence number
* @param : linked_eps_bearer_id, default bearer id
* @param : ded_eps_bearer_ids, array of dedicated bearers
* @param : ded_bearer_counter, count of dedicated bearers
* @param : s5s8_pgw_gtpc_teid, teid value
* @return : nothing
*/
void
set_delete_bearer_response(gtpv2c_header_t *gtpv2c_tx, uint32_t sequence,
uint8_t linked_eps_bearer_id, uint8_t ded_eps_bearer_ids[],
uint8_t ded_bearer_counter, uint32_t s5s8_pgw_gtpc_teid);
/**
* @brief : sets delete bearer command
* @param : del_bearer_cmd, pointer of del_bearer_cmd_t structure
* @param : pdn, pointer of pdn_connection structure
* @param : gtpv2c_tx, transmission buffer
* @return : nothing
*/
void
set_delete_bearer_command(del_bearer_cmd_t *del_bearer_cmd, pdn_connection *pdn, gtpv2c_header_t *gtpv2c_tx);
/**
* @brief : Fill bearer resource command to forward to PGWC
* @param : bearer_rsrc_cmd, decoded message receive on s11
* @param : pdn, pdn connection of bearer
* @param : gtpv2c_tx,transmission buffer
* @return : nothing
*
*/
void
set_bearer_resource_command(bearer_rsrc_cmd_t *bearer_rsrc_cmd, pdn_connection *pdn,
gtpv2c_header_t *gtpv2c_tx);
/**
* @brief : Fill modify bearer command to forward to PGWC
* @param : mod_bearer_cmd_t, modify bearer cmd structure
* @param : pdn, pdn connection of bearer
* @param : gtpv2c_tx,transmission buffer
* @return : nothing
*
*/
void
set_modify_bearer_command(mod_bearer_cmd_t *mod_bearer_cmd, pdn_connection *pdn,
gtpv2c_header_t *gtpv2c_tx);
/**
* @brief : To Downlink data notification ack of user.
* @param : dp_id, table identifier.
* @param : ddn_ack, Downlink data notification ack information
* @return : - 0 on success
* -1 on failure
*/
int
send_ddn_ack(struct dp_id dp_id,
struct downlink_data_notification ddn_ack);
#endif /* CP_BUILD */
#ifdef SYNC_STATS
/**
* @file
* This file contains function prototypes of cp request and response
* statstics with sync way.
*/
/**
* @brief : Open Statstics record file.
* @param : No param
* @return : Returns nothing
*/
void
stats_init(void);
/**
* @brief : Maintain stats in hash table.
* @param : sync_stats, sync_stats information
* @return : Returns nothing
*/
void
add_stats_entry(struct sync_stats *stats);
/**
* @brief : Update the resp and ack time in hash table.
* @param : key, key for lookup entry in hash table
* @param : type, Update ack_recv_time/resp_recv_time
* @return : Returns nothing
*/
void
update_stats_entry(uint64_t key, uint8_t type);
/**
* @brief : Retrive entries from stats hash table
* @param : void
* @return : Void
*/
void
retrive_stats_entry(void);
/**
* @brief : Export stats reports to file.
* @param : sync_stats, sync_stats information
* @return : Void
*/
void
export_stats_report(struct sync_stats stats_info);
/**
* @brief : Close current stats file and redirects any remaining output to stderr
* @param : void
* @return : Void
*/
void
close_stats(void);
#endif /* SYNC_STATS */
/*PFCP Config file*/
#define STATIC_CP_FILE "../config/cp.cfg"
#define MAX_DP_SIZE 5
#define MAX_CP_SIZE 1
#define MAX_NUM_MME 5
#define MAX_NUM_SGWC 5
#define MAX_NUM_PGWC 5
#define MAX_NUM_SGWU 5
#define MAX_NUM_PGWU 5
#define MAX_NUM_SAEGWU 5
#define MAX_NUM_APN 16
#define MAX_NUM_NAMESERVER 8
#define SGWU_PFCP_PORT 8805
#define PGWU_PFCP_PORT 8805
#define SAEGWU_PFCP_PORT 8805
#define REDIS_CERT_PATH_LEN 256
/**
* @brief : Maintains dns cache information
*/
typedef struct dns_cache_params_t {
uint32_t concurrent;
uint32_t sec;
uint8_t percent;
unsigned long timeoutms;
uint32_t tries;
} dns_cache_params_t;
/**
* @brief : Maintains dns configuration
*/
typedef struct dns_config_t {
uint8_t freq_sec;
char filename[PATH_MAX];
uint8_t nameserver_cnt;
char nameserver_ip[MAX_NUM_NAMESERVER][IPV6_STR_LEN];
} dns_config_t;
/**
* @brief : Maintains pfcp configuration
*/
typedef struct pfcp_config_t {
/* CP Configuration : SGWC=01; PGWC=02; SAEGWC=03 */
uint8_t cp_type;
char dadmf_local_addr[IPV6_STR_LEN];
/* Control-Plane IPs and Ports Params. */
uint16_t s11_port;
uint16_t s5s8_port;
uint16_t pfcp_port;
uint16_t dadmf_port;
uint16_t ddf2_port;
struct in_addr s11_ip;
struct in_addr s5s8_ip;
struct in_addr pfcp_ip;
char dadmf_ip[IPV6_STR_LEN];
char ddf2_ip[IPV6_STR_LEN];
char ddf2_local_ip[IPV6_STR_LEN];
struct in6_addr s11_ip_v6;
struct in6_addr s5s8_ip_v6;
struct in6_addr pfcp_ip_v6;
/*IP Type parameters for S11, S5S8,
* PFCP Interfaces and redis connection*/
uint8_t s11_ip_type;
uint8_t s5s8_ip_type;
uint8_t pfcp_ip_type;
uint8_t upf_pfcp_ip_type;
uint8_t redis_server_ip_type;
uint8_t cp_redis_ip_type;
uint8_t cp_dns_ip_type;
/* User-Plane IPs and Ports Params. */
uint16_t upf_pfcp_port;
struct in_addr upf_pfcp_ip;
struct in6_addr upf_pfcp_ip_v6;
/*Redis server config*/
uint16_t redis_port;
char redis_cert_path[REDIS_CERT_PATH_LEN];
/*Store both ipv4 and ipv6 address*/
char redis_ip_buff[IPV6_STR_LEN];
char cp_redis_ip_buff[IPV6_STR_LEN];
/* RESTORATION PARAMETERS */
uint8_t transmit_cnt;
int transmit_timer;
int periodic_timer;
/* CP Timer Parameters */
uint8_t request_tries;
int request_timeout; /* Request time out in milisecond */
uint8_t use_dns; /*enable or disable dns query*/
/* Store both ipv4 and ipv6 address*/
char cp_dns_ip_buff[IPV6_STR_LEN];
uint16_t cli_rest_port;
char cli_rest_ip_buff[IPV6_STR_LEN];
uint8_t use_gx; /*enable or disable gx interface*/
uint8_t perf_flag; /*enable or disable perf flag*/
uint8_t ip_allocation_mode; /*static or dynamic mode for IP allocation*/
uint8_t ip_type_supported; /*static or dynamic mode for IP allocation*/
uint8_t ip_type_priority; /*IPv6 or IPv4 priority type */
/* APN */
uint32_t num_apn;
/* apn apn_list[MAX_NUM_APN]; */
/*Default URR configuration*/
int trigger_type;
int uplink_volume_th;
int downlink_volume_th;
int time_th;
dns_cache_params_t dns_cache;
dns_config_t ops_dns;
dns_config_t app_dns;
/* IP_POOL_CONFIG Params */
struct in_addr ip_pool_ip;
struct in_addr ip_pool_mask;
struct in6_addr ipv6_network_id;
uint8_t ipv6_prefix_len;
/* CP CDR generation Parameter */
uint8_t generate_cdr;
uint8_t generate_sgw_cdr;
uint16_t sgw_cc;
/* ADD_DEFAULT_RULE */
uint8_t add_default_rule;
/* Network Trigger Service Request Parameters */
uint16_t dl_buf_suggested_pkt_cnt;
uint16_t low_lvl_arp_priority;
/* gx pcrf server ip */
peer_addr_t gx_ip;
} pfcp_config_t;
/**
* @brief : Initialize pfcp interface details
* @param : void
* @return : Void
*/
void
init_pfcp(void);
/**
* @brief : Initializes Control Plane data structures, packet filters, and calls for the
* Data Plane to create required tables
* @param : void
* @return : Void
*/
void
init_cp(void);
/**
* @brief : Initializes redis node to send generated CDR
* @param : void
* @return : 0 on success, -1 on failure
*/
int
init_redis(void);
/**
* @brief : Initialize dp rule table
* @param : void
* @return : Void
*/
void
init_dp_rule_tables(void);
#ifdef SYNC_STATS
/**
* @brief : Initialize statistics hash table
* @param : void
* @return : Void
*/
void
init_stats_hash(void);
#endif /* SYNC_STATS */
/**
* @brief : Function yet to be implemented
* @param : void
* @return : Void
*/
void received_create_session_request(void);
#ifdef USE_CSID
/**
* @brief : Function to peer node address and generate unique csid identifier
* @param : pdn_connection, pdn connection info
* @param : eps_bearer, bearer info
* @return : 0: Success, -1: otherwise
*/
int
fill_peer_node_info(pdn_connection *pdn, eps_bearer *bearer);
/**
* @brief : Function to Fill the FQ-CSID values in session est request
* @param : pfcp_sess_estab_req_t, Session Est Req obj
* @param : pdn_connection, pdn connection info
* @return : 0: Success, -1: otherwise
*/
int8_t
fill_fqcsid_sess_est_req(pfcp_sess_estab_req_t *pfcp_sess_est_req, pdn_connection *pdn);
/**
* @brief : Function to Fill the FQ-CSID values in session modification request
* @param : pfcp_sess_mod_req_t
* @param : pdn_connection, pdn connection info
* @return : 0: Success, -1: otherwise
*/
int8_t
fill_fqcsid_sess_mod_req(pfcp_sess_mod_req_t *pfcp_sess_mod_req, pdn_connection *pdn);
/**
* @brief : Function to Cleanup Session information by local csid
* @param : node_addr, peer node IP Address
* @param : iface, interface info
* @return : 0: Success, -1: otherwise
*/
int8_t
del_peer_node_sess(node_address_t *node_addr, uint8_t iface);
/**
* @brief : Function to Cleanup Session information by local csid
* @param : node_addr, peer node IP Address
* @param : iface, interface info
* @return : 0: Success, -1: otherwise
*/
int8_t
del_pfcp_peer_node_sess(node_address_t *node_addr, uint8_t iface);
/**
* @brief : Function to fill fqcsid into gtpv2c messages
* @param : fqcsid, gtpv2c fqcsid ie
* @param : ie_instance, info of instance
* @param : csids, csids info
* @return : Nothing
*/
void
set_gtpc_fqcsid_t(gtp_fqcsid_ie_t *fqcsid,
enum ie_instance instance, fqcsid_t *csids);
/**
* @brief : Function to fill PGW restart notification message
* @param : gtpv2c_tx, message
* @param : s11_sgw, SGW S11 interface IP Address
* @param : s5s8_pgw, PGW S5S8 interface IP Address
* @return : 0: Success, -1: otherwise
*/
int8_t
fill_pgw_restart_notification(gtpv2c_header_t *gtpv2c_tx,
node_address_t *s11_sgw, node_address_t *s5s8_pgw);
/**
* @brief : Function to link peer node csid with local csid
* @param : fqcsid, peer node csid
* @param : fqcsid_t, local csids
* @return : 0: Success, -1: otherwise
*/
int8_t
update_peer_csid_link(fqcsid_t *fqcsid, fqcsid_t *fqcsid_t);
/**
* @brief : Function to process delete pdn connection set request
* @param : del_pdn_conn_set_req_t, request info
* @return : 0: Success, -1: otherwise
*/
int8_t
process_del_pdn_conn_set_req_t(del_pdn_conn_set_req_t *del_pdn_req);
/**
* @brief : Function to process delete pdn connection set response
* @param : del_pdn_conn_set_rsp_t, response info
* @return : 0: Success, -1: otherwise
*/
int8_t
process_del_pdn_conn_set_rsp_t(del_pdn_conn_set_rsp_t *del_pdn_rsp);
/**
* @brief : Function to process update pdn connection set request
* @param : upd_pdn_conn_set_req_t, request info
* @return : 0: Success, -1: otherwise
*/
int8_t
process_upd_pdn_conn_set_req_t(upd_pdn_conn_set_req_t *upd_pdn_req);
/**
* @brief : Function to process update pdn connection set response
* @param : upd_pdn_conn_set_rsp_t, response info
* @return : 0: Success, -1: otherwise
*/
int8_t
process_upd_pdn_conn_set_rsp_t(upd_pdn_conn_set_rsp_t *upd_pdn_rsp);
/**
* @brief : Function to process pfcp session set deletion request
* @param : pfcp_sess_set_del_req_t, request info
* @param : peer_addr, upf node address
* @return : 0: Success, -1: otherwise
*/
int process_pfcp_sess_set_del_req_t(pfcp_sess_set_del_req_t *del_set_req, peer_addr_t *peer_addr);
/**
* @brief : Function to process pfcp session set deletion response
* @param : pfcp_sess_set_del_rsp_t, response info
* @return : 0: Success, -1: otherwise
*/
int process_pfcp_sess_set_del_rsp_t(pfcp_sess_set_del_rsp_t *del_set_rsp);
/**
* @brief : Function to fill the gtpc delete set pdn connection response
* @param : gtpv2c_header_t, response buffer
* @param : seq_t, sequence number
* @param : casue_value
* @return : 0: Success, -1: otherwise
*/
int8_t
fill_gtpc_del_set_pdn_conn_rsp(gtpv2c_header_t *gtpv2c_tx, uint8_t seq_t,
uint8_t casue_value);
/**
* @brief : Function to cleanup sessions based on the local csids
* @param : local_csid
* @param : pdn_connection, pdn connection info
* @return : 0: Success, -1: otherwise
*/
int8_t
cleanup_session_entries(uint16_t local_csid, pdn_connection *pdn);
/*
* @brief : Remove Temporary Local CSID linked with peer node CSID
* @param : peer_fqcsid, structure to store peer node fqcsid info.
* @param : tmp_csid, Temporary Local CSID.
* @param : iface, Interface .
* @return : Returns 0 in case of success , -1 otherwise
*/
int
remove_peer_temp_csid(fqcsid_t *peer_fqcsid, uint16_t tmp_csid, uint8_t iface);
/*
* @brief : Remove Session entry linked with Local CSID .
* @param : seid, session id .
* @param : peer_fqcsid, st1ructure to store peer node fqcsid info.
* @param : pdn_connection, pdn connection info
* @return : Returns 0 in case of success ,-1 or cause value otherwise.
*/
int
cleanup_csid_entry(uint64_t seid, fqcsid_t *peer_fqcsid, pdn_connection *pdn);
/**
* @brief : Match session CSID with Peer node CSID, if CSID match not found then add.
* @param : fqcsid, structure to store received msg fqcsid.
* @param : context_fqcsid, Structure to store session fqcsid.
* @return : Returns 0 in case of success ,-1 or cause value otherwise.
*/
int
match_and_add_sess_fqcsid(gtp_fqcsid_ie_t *fqcsid, sess_fqcsid_t *context_fqcsid);
/**
* @brief : Add Peer node CSID.
* @param : fqcsid, structure to store received msg fqcsid.
* @param : context_fqcsid, Structure to store session fqcsid.
* @return : Returns 0 in case of success ,-1 or cause value otherwise.
*/
void
add_sess_fqcsid(gtp_fqcsid_ie_t *fqcsid, sess_fqcsid_t *context_fqcsid);
/**
* @brief : Update Peer node CSID.
* @param : pfcp_sess_mod_rsp_t, structure to store sess. mod. req.
* @param : pdn_connection, pdn connection info
* @return : Returns 0 in case of success ,-1 or cause value otherwise.
*/
int
update_peer_node_csid(pfcp_sess_mod_rsp_t *pfcp_sess_mod_rsp, pdn_connection *pdn);
/**
* @brief : Link session with CSID.
* @param : csid,
* @param : pdn_connection, pdn connection info
* @return : Returns 0 in case of success, cause value otherwise.
*/
int
link_sess_with_peer_csid(fqcsid_t *peer_csid, pdn_connection *pdn, uint8_t iface);
/**
* @brief : Delete Link session with CSID.
* @param : pdn_connection, pdn connection info
* @return : Returns 0 in case of success, cause value otherwise.
*/
int
del_session_csid_entry(pdn_connection *pdn);
/**
* @brief : Remove Link session from CSID hash.
* @param : head, link list node.
* @param : seid,
* @param : csid,
* @return : Returns 0 in case of success, cause value otherwise.
*/
int
remove_sess_entry(sess_csid *head, uint64_t seid, peer_csid_key_t *key);
/**
* @brief : Match session CSID with Peer node CSID, if CSID match not found then add.
* @param : fqcsid, structure to store received msg fqcsid.
* @param : context_fqcsid, Structure to store session fqcsid.
* @return : Returns 0 in case of success ,-1 or cause value otherwise.
*/
int
match_and_add_pfcp_sess_fqcsid(pfcp_fqcsid_ie_t *fqcsid, sess_fqcsid_t *context_fqcsid);
/**
* @brief : Add Peer node CSID.
* @param : fqcsid, structure to store received msg fqcsid.
* @param : context_fqcsid, Structure to store session fqcsid.
* @return : Returns 0 in case of success ,-1 or cause value otherwise.
*/
void
add_pfcp_sess_fqcsid(pfcp_fqcsid_ie_t *fqcsid, sess_fqcsid_t *context_fqcsid);
/**
* @brief : Remove CSID from UE context.
* @param : cntx_fqcsid, Structure to store fqcsid.
* @param : csid_t, Structure to store session fqcsid.
* @return : Returns void.
*/
void
remove_csid_from_cntx(sess_fqcsid_t *cntx_fqcsid, fqcsid_t *csid_t);
/**
* @brief : fill pdn fqcsid from UE context fqcsid collection.
* @param : pdn_fqcsid, Structure to store session fqcsid.
* @param : cntx_fqcsid, Structure to store fqcsid.
* @return : Returns void.
*/
void
fill_pdn_fqcsid_info(fqcsid_t *pdn_fqcsid, sess_fqcsid_t *cntx_fqcsid);
/**
* @brief : Function to Delete Temporery and Permanent CSID entry.
* @param : pdn_connection, pdn connection info
* @param : eps_bearer, bearer info
* @return : 0: Success, -1: otherwise
*/
int
delete_peer_node_info(pdn_connection *pdn, eps_bearer *bearer);
/**
* @brief : Remove the local CSID in hash table.
* @param : node_addr, node_addr for lookup entry in hash table
* @param : fqcsid, Remove local CSID.
* @return : Returns nothing
*/
void
del_local_csid(node_address_t *node_addr, fqcsid_t *fqcsid);
#endif /* USE_CSID */
/* SAEGWC --> PGWC demotion scenario, Cleanup the SGW related data structures */
/*
* @brief : Cleanup SGW Session Info
* @param : del_sess_req_t, TEID, Seq etc
* @param : context, Structure to store UE context,
* @return : Returns 0 in case of success ,-1 or cause value otherwise.
*/
int8_t
cleanup_sgw_context(del_sess_req_t *ds_req, ue_context *context);
/* SAEGWC --> SGWC Promtion scenario, Cleanup the PGWC related data structures */
/*
* @brief : Cleanup PGW Session Info
* @param : del_sess_req_t, TEID, Seq etc
* @param : context, Structure to store UE context,
* @return : Returns 0 in case of success ,-1 or cause value otherwise.
*/
int8_t
cleanup_pgw_context(del_sess_req_t *ds_req, ue_context *context);
/*
* @brief : Send the predefined rules SDF, MTR, ADC, and PCC on UP.
* @param : upf IP address.
* @return : Returns 0 in case of success ,-1 or cause value otherwise.
*/
int8_t
dump_predefined_rules_on_up(node_address_t upf_ip);
/*
* @brief : Convert Int value of charging characteristic to string
* @param : cc_value, Int value of charging characteristic
* @return : Returns string value of charging characteristic.
*/
const char *
get_cc_string(uint16_t cc_value);
/*
* @brief : fills and sends pfcp session report response
* @param : context, ue_context
* @param : sequence , sequence number
* @param : pdn, pdn_context
* @param : dl_buf_sugg_pkt_cnt, DL Buffering Suggested Packet Count
* @param : dldr_flag, represent whether ddn report response or cdr report response
* @return : Returns nothing.
*/
void
fill_send_pfcp_sess_report_resp(ue_context *context, uint8_t sequence, pdn_connection *pdn,
uint16_t dl_buf_sugg_pkt_cnt, bool dldr_flag);
/**
* @brief : fill cp configuration
* @param : cp configuration pointer
* @return : Returns status code
*/
int8_t fill_cp_configuration(cp_configuration_t *cp_configuration);
/**
* @brief : post request timeout
* @param : request_timeout_value, Int
* @return : Returns status code
*/
int8_t post_request_timeout(const int request_timeout_value);
/**
* @brief : post request timeout
* @param : request_tries_value, Int
* @return : Returns status code
*/
int8_t post_request_tries(const int request_tries_value);
/**
* @brief : post periodic timer
* @param : periodic_timer_value, Int
* @return : Returns status code
*/
int8_t post_periodic_timer(const int periodic_timer_value);
/**
* @brief : post transmit timer
* @param : transmit_timer_value, Int
* @return : Returns status code
*/
int8_t post_transmit_timer(const int transmit_timer_value);
/**
* @brief : post transmit count
* @param : transmit_count, Int
* @return : Returns status code
*/
int8_t post_transmit_count(const int transmit_count);
/**
* @brief : update perf flag
* @param : perf_flag, Int
* @return : Returns status code
*/
int8_t update_perf_flag(const int perf_flag);
/**
* @brief : get request timeout
* @param : void
* @return : Returns request timeout
*/
int get_request_timeout(void);
/**
* @brief : get request tries
* @param : void
* @return : Returns request tries value
*/
int get_request_tries(void);
/**
* @brief : get periodic timer value
* @param : void
* @return : Returns periodic timer value
*/
int get_periodic_timer(void);
/**
* @brief : get transmit timer value
* @param : void
* @return : Returns transmit timer value
*/
int get_transmit_timer(void);
/**
* @brief : get transmit count value
* @param : void
* @return : Returns transmit count value
*/
int get_transmit_count(void);
/**
* @brief : get perf flag value
* @param : void
* @return : Returns perf flag value
*/
uint8_t get_perf_flag(void);
#endif
|
nikhilc149/e-utran-features-bug-fixes | dp/pipeline/epc_ul.c | <filename>dp/pipeline/epc_ul.c
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string.h>
#include <sched.h>
#include <sys/types.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <inttypes.h>
#include <rte_string_fns.h>
#include <rte_ring.h>
#include <rte_pipeline.h>
#include <rte_lcore.h>
#include <rte_ethdev.h>
#include <rte_port_ring.h>
#include <rte_port_ethdev.h>
#include <rte_table_hash.h>
#include <rte_table_stub.h>
#include <rte_byteorder.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_jhash.h>
#include <rte_cycles.h>
#include <rte_lcore.h>
#include <rte_udp.h>
#include <rte_mbuf.h>
#include <rte_hash_crc.h>
#include <rte_port_ring.h>
#include <rte_icmp.h>
#include <rte_kni.h>
#include <rte_arp.h>
#include <unistd.h>
#include "ipv6.h"
#include "gtpu.h"
#include "up_main.h"
#include "pfcp_util.h"
#include "gw_adapter.h"
#include "epc_packet_framework.h"
#ifdef USE_REST
#include "ngic_timer.h"
#endif /* USE_REST */
/* Borrowed from dpdk ip_frag_internal.c */
#define PRIME_VALUE 0xeaad8405
/* Generate new pcap for s1u port */
extern pcap_dumper_t *pcap_dumper_west;
extern pcap_dumper_t *pcap_dumper_east;
extern struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
extern int clSystemLog;
extern struct rte_hash *conn_hash_handle;
#ifdef TIMER_STATS
#include "perf_timer.h"
extern _timer_t _init_time;
#endif /* TIMER_STATS */
uint32_t ul_nkni_pkts = 0;
#ifdef USE_REST
/**
* @brief : Perform lookup for src ip, and set activity flag if connection
* is active for uplink
* @param : node_address_t, srcIp, Ip address
* @return : Returns nothing
*/
static inline void check_activity(node_address_t srcIp)
{
/* VS: Reset the in-activity flag to activity */
int ret = 0;
peerData *conn_data = NULL;
ret = rte_hash_lookup_data(conn_hash_handle,
&srcIp, (void **)&conn_data);
if ( ret < 0) {
(srcIp.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found for NODE IPv6 Addr: "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(srcIp.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found for NODE IPv4 Addr: %s\n",
LOG_VALUE, inet_ntoa(*(struct in_addr *)&srcIp.ipv4_addr));
return;
} else {
(srcIp.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Recv pkts from NODE IPv6 Addr: "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(srcIp.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Recv pkts from NODE IPv4 Addr:%s\n",
LOG_VALUE, inet_ntoa(*(struct in_addr *)&srcIp.ipv4_addr));
conn_data->activityFlag = 1;
}
}
#endif /* USE_REST */
/**
* @brief : set port id value for uplink
* @param : m, rte mbuf pointer
* @return : Returns nothing
*/
static inline void epc_ul_set_port_id(struct rte_mbuf *m)
{
/* Point to the start of the mbuf */
uint8_t *m_data = rte_pktmbuf_mtod(m, uint8_t *);
/* point to the meta data offset header room */
struct epc_meta_data *meta_data =
(struct epc_meta_data *)RTE_MBUF_METADATA_UINT8_PTR(m, META_DATA_OFFSET);
/* point to the port id in the meta offset */
uint32_t *port_id_offset = &meta_data->port_id;
node_address_t peer_addr = {0};
/* Get the ether header info */
struct ether_hdr *eh = (struct ether_hdr *)&m_data[0];
/* Default route pkts to master core */
*port_id_offset = 1;
/* Flag ARP pkt for linux handling */
if (eh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP)) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WB_IN:ARP:eh->ether_type==ETHER_TYPE_ARP= 0x%X\n",
LOG_VALUE, eh->ether_type);
ul_arp_pkt = 1;
} else if (eh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
uint32_t *ue_ipv4_hash_offset = &meta_data->ue_ipv4_hash;
struct ipv4_hdr *ipv4_hdr =
(struct ipv4_hdr *)&m_data[sizeof(struct ether_hdr)];
struct udp_hdr *udph;
uint32_t ip_len;
uint32_t ipv4_packet;
/* Host Order ipv4_hdr->dst_addr */
uint32_t ho_addr;
ipv4_packet = (eh->ether_type == htons(ETHER_TYPE_IPv4));
/* Check the heder checksum */
/*if (unlikely(
(m->ol_flags & PKT_RX_IP_CKSUM_MASK) == PKT_RX_IP_CKSUM_BAD ||
(m->ol_flags & PKT_RX_L4_CKSUM_MASK) == PKT_RX_L4_CKSUM_BAD)) {
//clLog(clSystemLog, eCLSeverityCritical, "UL Bad checksum: %lu\n", m->ol_flags);
//ipv4_packet = 0;
}*/
ho_addr = ipv4_hdr->dst_addr;
/* If IPv4 ICMP pkt for linux handling
* Flag pkt destined to S1U_IP for linux handling
* Flag MCAST pkt for linux handling
* Flag BCAST pkt for linux handling
*/
if ((ipv4_hdr->next_proto_id == IPPROTO_ICMP) ||
(app.wb_ip == ho_addr) || (app.wb_li_ip == ho_addr) ||
(IS_IPV4_MCAST(ho_addr)) ||
((app.wb_bcast_addr == ho_addr) || (app.wb_li_bcast_addr == ho_addr)))
{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WB_IN:ipv4_hdr->ICMP:ipv4_hdr->next_proto_id= %u\n"
"WB_IN:IPv4:Reject or MulticastIP or broadcast IP Pkt ipv4_hdr->dst_addr= %s\n",
LOG_VALUE, ipv4_hdr->next_proto_id,
inet_ntoa(*(struct in_addr *)&ho_addr));
ul_arp_pkt = 1;
return;
}
/* Flag all other pkts for epc_ul proc handling */
if (likely(ipv4_packet && ipv4_hdr->next_proto_id == IPPROTO_UDP)) {
ip_len = (ipv4_hdr->version_ihl & 0xf) << 2;
udph =
(struct udp_hdr *)&m_data[sizeof(struct ether_hdr) +
ip_len];
if (likely(udph->dst_port == UDP_PORT_GTPU_NW_ORDER)) {
#ifdef USE_REST
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV4_TYPE;
peer_addr.ipv4_addr = ipv4_hdr->src_addr;
/* VS: Set activity flag if data receive from peer node */
check_activity(peer_addr);
#endif /* USE_REST */
struct gtpu_hdr *gtpuhdr = get_mtogtpu(m);
if ((gtpuhdr->msgtype == GTPU_ECHO_REQUEST && gtpuhdr->teid == 0) ||
gtpuhdr->msgtype == GTPU_ECHO_RESPONSE ||
gtpuhdr->msgtype == GTPU_ERROR_INDICATION) {
return;
} else if ((gtpuhdr->msgtype == GTP_GPDU) || (gtpuhdr->msgtype == GTP_GEMR)) {
/* Inner could be ipv6 ? */
struct ipv4_hdr *inner_ipv4_hdr =
(struct ipv4_hdr *)RTE_PTR_ADD(udph,
UDP_HDR_SIZE +
sizeof(struct
gtpu_hdr));
const uint32_t *p =
(const uint32_t *)&inner_ipv4_hdr->src_addr;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WB: IPv4 GTPU packet\n", LOG_VALUE);
*port_id_offset = 0;
ul_gtpu_pkt = 1;
ul_arp_pkt = 0;
#ifdef SKIP_LB_HASH_CRC
*ue_ipv4_hash_offset = p[0] >> 24;
#else
*ue_ipv4_hash_offset =
rte_hash_crc_4byte(p[0], PRIME_VALUE);
#endif
}
}
}
} else if (eh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
/* Get the IPv6 Header from pkt */
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)&m_data[ETH_HDR_SIZE];
/* Rcvd pkts is IPv6 pkt */
uint32_t ipv6_packet = (eh->ether_type == htons(ETHER_TYPE_IPv6));
/* L4: If next header is ICMPv6 and Neighbor Solicitation/Advertisement */
if ((ipv6_hdr->proto == IPPROTO_ICMPV6) ||
(ipv6_hdr->proto != IPPROTO_UDP)) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WB_IN:ipv6->icmpv6:ipv6_hdr->proto= %u\n"
"WB:ICMPv6: IPv6 Packets redirect to LINUX..\n",
LOG_VALUE, ipv6_hdr->proto);
/* Redirect packets to LINUX and Master Core to fill the arp entry */
ul_arp_pkt = 1;
return;
}
/* Local IPv6 Address */
struct in6_addr ho_addr = {0};
memcpy(&ho_addr.s6_addr, &ipv6_hdr->dst_addr, sizeof(ipv6_hdr->dst_addr));
/* Validate the destination address is S1U/WB or not */
if (memcmp(&(app.wb_ipv6), &ho_addr, sizeof(ho_addr)) &&
memcmp(&(app.wb_li_ipv6), &ho_addr, sizeof(ho_addr))) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WB_IN:IPv6:Reject app.wb_ipv6("IPv6_FMT") != ipv6_hdr->dst_addr("IPv6_FMT")\n",
LOG_VALUE, IPv6_PRINT(app.wb_ipv6), IPv6_PRINT(ho_addr));
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WB_IN:ipv6_hdr->proto= %u: Not for local intf IPv6 dst addr Packet,"
"redirect to LINUX..\n", LOG_VALUE, ipv6_hdr->proto);
ul_arp_pkt = 1;
return;
}
/* L4: If next header for data packet */
if (likely(ipv6_packet && ipv6_hdr->proto == IPPROTO_UDP)) {
struct udp_hdr *udph;
/* Point to the UDP payload */
udph = (struct udp_hdr *)&m_data[ETH_HDR_SIZE + IPv6_HDR_SIZE];
/* Validate the GTPU packet dst port */
if (likely(udph->dst_port == UDP_PORT_GTPU_NW_ORDER)) {
#ifdef USE_REST
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV6_TYPE;
memcpy(peer_addr.ipv6_addr,
ipv6_hdr->src_addr, IPV6_ADDR_LEN);
/* TODO: Set activity flag if data receive from peer node */
check_activity(peer_addr);
#endif /* USE_REST */
struct gtpu_hdr *gtpuhdr = get_mtogtpu_v6(m);
/* point to the payload of the IPv6 */
if ((gtpuhdr->msgtype == GTPU_ECHO_REQUEST && gtpuhdr->teid == 0) ||
gtpuhdr->msgtype == GTPU_ECHO_RESPONSE ||
gtpuhdr->msgtype == GTPU_ERROR_INDICATION) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WB_IN:IPv6 GTPU Echo packet\n", LOG_VALUE);
return;
} else if ((gtpuhdr->msgtype == GTP_GPDU) || (gtpuhdr->msgtype == GTP_GEMR)) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"WB_IN:IPv6 GTPU packet\n", LOG_VALUE);
/* Default route pkts to ul core, i.e pipeline */
*port_id_offset = 0;
/* Route packets to fastpath */
ul_gtpu_pkt = 1;
/* Not Redirect packets to LINUX */
ul_arp_pkt = 0;
}
}
}
}
}
/**
* @brief : Capture uplink packets
* @param : p, rte pipeline pointer
* @param : pkts, rte mbuf
* @param : n, number of packets
* @param : arg, unused parameter
* @return : Returns nothing
*/
static int epc_ul_port_in_ah(struct rte_pipeline *p,
struct rte_mbuf **pkts, uint32_t n,
void *arg)
{
#ifdef TIMER_STATS
TIMER_GET_CURRENT_TP(_init_time);
#endif /* TIMER_STATS*/
static uint32_t i;
RTE_SET_USED(arg);
RTE_SET_USED(p);
struct rte_mbuf *kni_pkts_burst[n];
ul_ndata_pkts = 0;
ul_nkni_pkts = 0;
ul_arp_pkt = 0;
ul_gtpu_pkt = 0;
for (i = 0; i < n; i++) {
struct rte_mbuf *m = pkts[i];
epc_ul_set_port_id(m);
if (ul_gtpu_pkt) {
ul_gtpu_pkt = 0;
ul_ndata_pkts++;
} else if(ul_arp_pkt) {
ul_arp_pkt = 0;
kni_pkts_burst[ul_nkni_pkts++] = pkts[i];
}
}
if (ul_nkni_pkts) {
RTE_LOG(DEBUG, DP, "KNI: UL send pkts to kni\n");
kni_ingress(kni_port_params_array[S1U_PORT_ID],
kni_pkts_burst, ul_nkni_pkts);
}
#ifdef STATS
epc_app.ul_params[S1U_PORT_ID].pkts_in += ul_ndata_pkts;
#endif /* STATS */
ul_pkts_nbrst++;
/* Capture packets on s1u_port.*/
up_pcap_dumper(pcap_dumper_west, pkts, n);
return 0;
}
static epc_ul_handler epc_ul_worker_func[NUM_SPGW_PORTS];
/**
* @brief : Uplink packet handler
* @param : p, rte pipeline pointer
* @param : pkts, rte mbuf
* @param : pkts_mask, packet mask
* @param : arg, port number
* @return : Returns nothing
*/
static inline int epc_ul_port_out_ah(struct rte_pipeline *p, struct rte_mbuf **pkts,
uint64_t pkts_mask, void *arg)
{
pkts_mask = 0;
int worker_index = 0, ret = 0;
int portno = (uintptr_t) arg;
if (ul_pkts_nbrst == ul_pkts_nbrst_prv) {
return 0;
} else if (ul_ndata_pkts) {
ul_pkts_nbrst_prv = ul_pkts_nbrst;
epc_ul_handler f = epc_ul_worker_func[portno];
/* VS- NGCORE_SHRINK: worker_index-TBC */
if ( f != NULL) {
ret = f(p, pkts, ul_ndata_pkts, &pkts_mask, worker_index);
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Not Register WB pkts handler, Configured WB MAC was wrong\n",
LOG_VALUE);
}
}
#ifdef TIMER_STATS
#ifndef AUTO_ANALYSIS
ul_stat_info.port_in_out_delta = TIMER_GET_ELAPSED_NS(_init_time);
/* Export stats into file. */
ul_timer_stats(ul_ndata_pkts, &ul_stat_info);
#else
/* calculate min time, max time, min_burst_sz, max_burst_sz
* perf_stats.op_time[12] = port_in_out_time */
SET_PERF_MAX_MIN_TIME(ul_perf_stats.op_time[12], _init_time, ul_ndata_pkts, 0);
#endif /* AUTO_ANALYSIS */
#endif /* TIMER_STATS*/
return ret;
}
void epc_ul_init(struct epc_ul_params *param, int core, uint8_t in_port_id, uint8_t out_port_id)
{
unsigned i;
struct rte_pipeline *p;
ul_pkts_nbrst = 0;
ul_pkts_nbrst_prv = 0;
if (in_port_id != app.eb_port && in_port_id != app.wb_port)
rte_exit(EXIT_FAILURE, LOG_FORMAT"Wrong MAC configured for WB interface\n", LOG_VALUE);
memset(param, 0, sizeof(*param));
snprintf((char *)param->name, PIPE_NAME_SIZE, "epc_ul_%d", in_port_id);
param->pipeline_params.socket_id = rte_socket_id();
param->pipeline_params.name = param->name;
param->pipeline_params.offset_port_id = META_DATA_OFFSET;
p = rte_pipeline_create(¶m->pipeline_params);
if (p == NULL)
rte_panic("%s: Unable to configure the pipeline\n", __func__);
/* Input port configuration */
if (rte_eth_dev_socket_id(in_port_id)
!= (int)lcore_config[core].socket_id) {
clLog(clSystemLog, eCLSeverityMinor,
LOG_FORMAT"location of the RX core for port: %d is not optimal\n",
LOG_VALUE, in_port_id);
clLog(clSystemLog, eCLSeverityMinor,
LOG_FORMAT"Performance may be degradated \n", LOG_VALUE);
}
struct rte_port_ethdev_reader_params port_ethdev_params = {
.port_id = epc_app.ports[in_port_id],
.queue_id = 0,
};
struct rte_pipeline_port_in_params in_port_params = {
.ops = &rte_port_ethdev_reader_ops,
.arg_create = (void *)&port_ethdev_params,
.burst_size = epc_app.burst_size_rx_read,
};
if (in_port_id == S1U_PORT_ID) {
in_port_params.f_action = epc_ul_port_in_ah;
in_port_params.arg_ah = NULL;
}
if (rte_pipeline_port_in_create
(p, &in_port_params, ¶m->port_in_id))
{
rte_panic("%s: Unable to configure input port for port %d\n",
__func__, in_port_id);
}
/* Output port configuration */
for (i = 0; i < epc_app.n_ports; i++) {
if (i == 0){
/* Pipeline driving decapped fast path pkts out the epc_ul core */
struct rte_port_ethdev_writer_nodrop_params port_ethdev_params =
{
.port_id = epc_app.ports[out_port_id],
.queue_id = 0,
.tx_burst_sz = epc_app.burst_size_tx_write,
.n_retries = 0,
};
struct rte_pipeline_port_out_params out_port_params =
{
.ops = &rte_port_ethdev_writer_nodrop_ops,
.arg_create = (void *)&port_ethdev_params,
.f_action = epc_ul_port_out_ah,
.arg_ah = (void *)(uintptr_t) i,
};
if (rte_pipeline_port_out_create
(p, &out_port_params, ¶m->port_out_id[i])) {
rte_panic
("%s: Unable to configure output port\n"
"for ring RX %i\n", __func__, i);
}
}
else {
/* Pipeline equeueing arp request pkts to epc_mct core ring */
struct rte_port_ring_writer_params port_ring_params = {
.tx_burst_sz = epc_app.burst_size_rx_write,
};
struct rte_pipeline_port_out_params out_port_params = {
.ops = &rte_port_ring_writer_ops,
.arg_create = (void *)&port_ring_params
};
port_ring_params.ring = epc_app.epc_mct_rx[in_port_id];
if (rte_pipeline_port_out_create
(p, &out_port_params, ¶m->port_out_id[i])) {
rte_panic
("%s: Unable to configure output port\n"
"for ring RX %i\n", __func__, i);
}
}
}
/* table configuration */
/* Tables */
{
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_stub_ops
};
if (rte_pipeline_table_create
(p, &table_params, ¶m->table_id)) {
rte_panic("%s: Unable to configure table %u\n",
__func__, param->table_id);
}
}
if (rte_pipeline_port_in_connect_to_table
(p, param->port_in_id, param->table_id)) {
rte_panic("%s: Unable to connect input port %u to table %u\n",
__func__, param->port_in_id, param->table_id);
}
/* Add entries to tables */
{
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT_META,
};
struct rte_pipeline_table_entry *default_entry_ptr;
int status = rte_pipeline_table_default_entry_add(p,
param->table_id,
&default_entry,
&default_entry_ptr);
if (status) {
rte_panic(
"%s: failed to add table default entry\n",
__func__);
rte_pipeline_free(p);
return;
}
}
/* Enable input ports */
if (rte_pipeline_port_in_enable(p, param->port_in_id)) {
rte_panic("%s: unable to enable input port %d\n", __func__,
param->port_in_id);
}
/* set flush option */
param->flush_max = EPC_PIPELINE_FLUSH_MAX;
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("%s: Pipeline consistency check failed\n", __func__);
param->pipeline = p;
}
void epc_ul(void *args)
{
struct epc_ul_params *param = (struct epc_ul_params *)args;
rte_pipeline_run(param->pipeline);
if (++param->flush_count >= param->flush_max) {
rte_pipeline_flush(param->pipeline);
param->flush_count = 0;
}
/** Handle the request mbufs sent from kernel space,
* Then analyzes it and calls the specific actions for the specific requests.
* Finally constructs the response mbuf and puts it back to the resp_q.
*/
rte_kni_handle_request(kni_port_params_array[S1U_PORT_ID]->kni[0]);
uint32_t queued_cnt = rte_ring_count(shared_ring[SGI_PORT_ID]);
if (queued_cnt) {
uint32_t pkt_indx = 0;
struct rte_mbuf *pkts[queued_cnt];
uint32_t rx_cnt = rte_ring_dequeue_bulk(shared_ring[SGI_PORT_ID],
(void**)pkts, queued_cnt, NULL);
/* Capture the echo packets.*/
up_pcap_dumper(pcap_dumper_west, pkts, rx_cnt);
while (rx_cnt) {
uint16_t pkt_cnt = PKT_BURST_SZ;
if (rx_cnt < PKT_BURST_SZ)
pkt_cnt = rx_cnt;
/* ARP_REQ on SGI direct driven by epc_ul core */
uint16_t tx_cnt = rte_eth_tx_burst(SGI_PORT_ID,
0, &pkts[pkt_indx], pkt_cnt);
/* Free allocated Mbufs */
for (uint16_t inx = 0; inx < pkt_cnt; inx++) {
rte_pktmbuf_free(pkts[inx]);
}
rx_cnt -= tx_cnt;
pkt_indx += tx_cnt;
}
}
}
void register_ul_worker(epc_ul_handler f, int port)
{
epc_ul_worker_func[port] = f;
}
|
nikhilc149/e-utran-features-bug-fixes | dp/pipeline/epc_dl.c | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string.h>
#include <sched.h>
#include <sys/types.h>
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include <rte_string_fns.h>
#include <rte_ring.h>
#include <rte_pipeline.h>
#include <rte_lcore.h>
#include <rte_ethdev.h>
#include <rte_port_ring.h>
#include <rte_port_ethdev.h>
#include <rte_table_hash.h>
#include <rte_table_stub.h>
#include <rte_byteorder.h>
#include <rte_ip.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_jhash.h>
#include <rte_cycles.h>
#include <rte_lcore.h>
#include <rte_udp.h>
#include <rte_mbuf.h>
#include <rte_hash_crc.h>
#include <rte_port_ring.h>
#include <rte_kni.h>
#include <rte_arp.h>
#include "gtpu.h"
#include "up_main.h"
#include "pfcp_util.h"
#include "epc_packet_framework.h"
#include "gw_adapter.h"
#ifdef TIMER_STATS
#include "perf_timer.h"
_timer_t _init_time = 0;
#endif /* TIMER_STATS */
extern struct kni_port_params *kni_port_params_array[RTE_MAX_ETHPORTS];
/* Generate new pcap for sgi port. */
extern pcap_dumper_t *pcap_dumper_west;
extern pcap_dumper_t *pcap_dumper_east;
extern int clSystemLog;
extern struct rte_hash *conn_hash_handle;
extern uint16_t dp_comm_port;
uint32_t dl_nkni_pkts = 0;
#ifdef USE_REST
/**
* @brief : Perform lookup for src ip, and set activity flag if connection
* is active for downlink
* @param : node_address_t srcIp, Ip address
* @return : Returns nothing
*/
static inline void check_activity(node_address_t srcIp)
{
/* VS: Check the in-activity on tunnel */
int ret = 0;
peerData *conn_data = NULL;
ret = rte_hash_lookup_data(conn_hash_handle,
&srcIp, (void **)&conn_data);
if ( ret < 0) {
(srcIp.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found for NODE IPv6 Addr: "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(srcIp.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found for NODE IPv4 Addr: %s\n",
LOG_VALUE, inet_ntoa(*(struct in_addr *)&srcIp.ipv4_addr));
return;
} else {
(srcIp.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Recv pkts from NODE IPv6 Addr: "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(srcIp.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Recv pkts from NODE IPv4 Addr: %s\n",
LOG_VALUE, inet_ntoa(*(struct in_addr *)&srcIp.ipv4_addr));
conn_data->activityFlag = 1;
}
}
#endif /* USE_REST */
/**
* @brief : set port id value for downlink
* @param : m, rte mbuf pointer
* @return : Returns nothing
*/
static inline void epc_dl_set_port_id(struct rte_mbuf *m)
{
/* point to the start of the mbuf */
uint8_t *m_data = rte_pktmbuf_mtod(m, uint8_t *);
/* point to the meta data offset header room */
struct epc_meta_data *meta_data =
(struct epc_meta_data *)RTE_MBUF_METADATA_UINT8_PTR(m, META_DATA_OFFSET);
/* point to the port id in the meta offset */
uint32_t *port_id_offset = &meta_data->port_id;
node_address_t peer_addr = {0};
/* Get the ether header info */
struct ether_hdr *eh = (struct ether_hdr *)&m_data[0];
/* Default route all packets to master core */
*port_id_offset = 1;
/* Flag ARP pkt for linux handling */
if (eh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_ARP))
{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"EB_IN:eh->ether_type==ETHER_TYPE_ARP= 0x%X\n",
LOG_VALUE, eh->ether_type);
dl_arp_pkt = 1; return;
} else if (eh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4)) {
struct ipv4_hdr *ipv4_hdr =
(struct ipv4_hdr *)&m_data[sizeof(struct ether_hdr)];
uint32_t ipv4_packet;
/* Host Order ipv4_hdr->dst_addr */
uint32_t ho_addr;
ipv4_packet = (eh->ether_type == htons(ETHER_TYPE_IPv4));
/*if (unlikely(
(m->ol_flags & PKT_RX_IP_CKSUM_MASK)
== PKT_RX_IP_CKSUM_BAD ||
(m->ol_flags & PKT_RX_L4_CKSUM_MASK)
== PKT_RX_L4_CKSUM_BAD)) {
//clLog(clSystemLog, eCLSeverityCritical, "DL Bad checksum: %lu\n", m->ol_flags);
//ipv4_packet = 0;
}*/
ho_addr = (ipv4_hdr->dst_addr);
if ((ipv4_hdr->next_proto_id == IPPROTO_ICMP) || /* Ipv4 ICMP pkt for linux handling */
(app.eb_ip == ho_addr) || /* pkt destined to SGI_IP for linux handling */
(app.eb_li_ip == ho_addr) || /* pkt destined to SGI_IP for linux handling */
(IS_IPV4_MCAST(ntohl(ho_addr))) || /* MCAST pkt for linux handling */
(app.eb_bcast_addr == ho_addr) || /* Flag BCAST pkt for linux handling */
(app.eb_li_bcast_addr == ho_addr)) /* Flag BCAST pkt for linux handling */
{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"EB_IN:ipv4_hdr->next_proto_id= %u \n"
"ipv4_hdr->dst_addr(app.eb_ip/IPV4_MCAST/app.eb_bcast_addr)= %s\n",
LOG_VALUE, ipv4_hdr->next_proto_id, inet_ntoa(*(struct in_addr *)&ho_addr));
dl_arp_pkt = 1;
return;
}
/* Flag all other pkts for epc_dl proc handling */
if (likely
(ipv4_packet &&
((ipv4_hdr->next_proto_id == IPPROTO_UDP) ||
(ipv4_hdr->next_proto_id == IPPROTO_TCP)))) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"EB_IN: IPv4 UDP packet\n", LOG_VALUE);
#ifdef USE_REST
struct udp_hdr *udph =
(struct udp_hdr *)&m_data[sizeof(struct ether_hdr) +
((ipv4_hdr->version_ihl & 0xf) << 2)];
if (likely(udph->dst_port == UDP_PORT_GTPU_NW_ORDER)) {
/* VS: Set activity flag if data receive from peer node */
memset(&peer_addr, 0, sizeof(peer_address_t));
peer_addr.ip_type = IPV4_TYPE;
peer_addr.ipv4_addr = ipv4_hdr->src_addr;
check_activity(peer_addr);
struct gtpu_hdr *gtpuhdr = get_mtogtpu(m);
if (gtpuhdr->msgtype == GTPU_ECHO_REQUEST ||
gtpuhdr->msgtype == GTPU_ECHO_RESPONSE ||
gtpuhdr->msgtype == GTPU_ERROR_INDICATION) {
return;
} else if ((gtpuhdr->msgtype != GTP_GPDU) && (gtpuhdr->msgtype != GTP_GEMR)) {
return;
}
}
#endif /* USE_REST */
*port_id_offset = 0;
dl_sgi_pkt = 1;
dl_arp_pkt = 0;
} //GCC_Security flag
} else if (eh->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6)) {
/* Get the IPv6 Header from pkt */
struct ipv6_hdr *ipv6_hdr = (struct ipv6_hdr *)&m_data[ETH_HDR_SIZE];
/* Rcvd pkts is IPv6 pkt */
uint32_t ipv6_packet = (eh->ether_type == htons(ETHER_TYPE_IPv6));
/* L4: If next header is ICMPv6 and Neighbor Solicitation/Advertisement */
if ((ipv6_hdr->proto == IPPROTO_ICMPV6) ||
(ipv6_hdr->proto != IPPROTO_UDP)) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"EB_IN:ipv6->icmpv6:ipv6_hdr->proto= %u\n"
"EB:ICMPv6: IPv6 Packets redirect to LINUX..\n",
LOG_VALUE, ipv6_hdr->proto);
/* Redirect packets to LINUX and Master Core to fill the arp entry */
dl_arp_pkt = 1;
return;
}
/* Flag all other pkts for epc_dl proc handling */
if (likely(ipv6_packet &&
((ipv6_hdr->proto == IPPROTO_UDP) ||
(ipv6_hdr->proto == IPPROTO_TCP)))) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"EB_IN: IPv6 UDP Packet\n", LOG_VALUE);
/* Point to the UDP payload */
struct udp_hdr *udph =
(struct udp_hdr *)&m_data[ETH_HDR_SIZE + IPv6_HDR_SIZE];
if (likely(udph->dst_port == UDP_PORT_GTPU_NW_ORDER)) {
/* Local IPv6 Address */
struct in6_addr ho_addr = {0};
memcpy(&ho_addr.s6_addr, &ipv6_hdr->dst_addr, sizeof(ipv6_hdr->dst_addr));
/* Validate the destination address is SGI/EB or not */
if (memcmp(&(app.eb_ipv6), &ho_addr, sizeof(ho_addr)) &&
memcmp(&(app.eb_li_ipv6), &ho_addr, sizeof(ho_addr))) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"EB_IN:ipv6_hdr->proto= %u: Not for local intf IPv6 dst addr Packet,"
"redirect to LINUX..\n"
"EB_IN: Expected IPv6 Addr:"IPv6_FMT" or "IPv6_FMT", Received IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, ipv6_hdr->proto,
IPv6_PRINT(app.eb_ipv6), IPv6_PRINT(app.eb_li_ipv6), IPv6_PRINT(ho_addr));
/* Redirect packets to LINUX and Master Core to fill the arp entry */
dl_arp_pkt = 1;
return;
}
#ifdef USE_REST
/* TODO: Set activity flag if data receive from peer node */
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV6_TYPE;
memcpy(peer_addr.ipv6_addr,
ipv6_hdr->src_addr, IPV6_ADDR_LEN);
check_activity(peer_addr);
struct gtpu_hdr *gtpuhdr = get_mtogtpu_v6(m);
if (gtpuhdr->msgtype == GTPU_ECHO_REQUEST ||
gtpuhdr->msgtype == GTPU_ECHO_RESPONSE ||
gtpuhdr->msgtype == GTPU_ERROR_INDICATION) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"EB_IN: IPv6 GTPU Echo Packet\n", LOG_VALUE);
return;
} else if ((gtpuhdr->msgtype != GTP_GPDU) && (gtpuhdr->msgtype != GTP_GEMR)) {
return;
}
#endif /* USE_REST */
}
*port_id_offset = 0;
dl_sgi_pkt = 1;
dl_arp_pkt = 0;
} //GCC_Security flag
}
}
/**
* @brief : Capture downlink packets
* @param : p, rte pipeline pointer
* @param : pkts, rte mbuf
* @param : n, number of packets
* @param : arg, unused parameter
* @return : Returns nothing
*/
static int epc_dl_port_in_ah(struct rte_pipeline *p,
struct rte_mbuf **pkts, uint32_t n,
void *arg)
{
#ifdef TIMER_STATS
TIMER_GET_CURRENT_TP(_init_time);
#endif /* TIMER_STATS */
static uint32_t i;
RTE_SET_USED(arg);
RTE_SET_USED(p);
/* KNI: Initialize parameters */
struct rte_mbuf *kni_pkts_burst[n];
dl_ndata_pkts = 0;
dl_nkni_pkts = 0;
dl_arp_pkt = 0;
dl_sgi_pkt = 0;
for (i = 0; i < n; i++) {
struct rte_mbuf *m = pkts[i];
epc_dl_set_port_id(m);
if (dl_sgi_pkt) {
dl_sgi_pkt = 0;
dl_ndata_pkts++;
} else if (dl_arp_pkt) {
dl_arp_pkt = 0;
kni_pkts_burst[dl_nkni_pkts++] = pkts[i];
}
}
if (dl_nkni_pkts) {
RTE_LOG(DEBUG, DP, "KNI: DL send pkts to kni\n");
kni_ingress(kni_port_params_array[SGI_PORT_ID],
kni_pkts_burst, dl_nkni_pkts);
}
#ifdef STATS
epc_app.dl_params[SGI_PORT_ID].pkts_in += dl_ndata_pkts;
#endif /* STATS */
dl_pkts_nbrst++;
/* Capture packets on sgi port. */
up_pcap_dumper(pcap_dumper_east, pkts, n);
return 0;
}
static epc_dl_handler epc_dl_worker_func[NUM_SPGW_PORTS];
/**
* @brief : Downlink packet handler
* @param : p, rte pipeline pointer
* @param : pkts, rte mbuf
* @param : pkts_mask, packet mask
* @param : arg, port number
* @return : Returns nothing
*/
static inline int epc_dl_port_out_ah(struct rte_pipeline *p, struct rte_mbuf **pkts,
uint64_t pkts_mask, void *arg)
{
pkts_mask = 0;
int worker_index = 0;
RTE_SET_USED(p);
int portno = (uintptr_t) arg;
if (dl_pkts_nbrst == dl_pkts_nbrst_prv) {
return 0;
} else if (dl_ndata_pkts) {
dl_pkts_nbrst_prv = dl_pkts_nbrst;
epc_dl_handler f = epc_dl_worker_func[portno];
/* VS- NGCORE_SHRINK: worker_index:TBC */
/* cmntd return f(p, pkts, dl_ndata_pkts, worker_index); */
if(f != NULL){
f(p, pkts, dl_ndata_pkts, &pkts_mask, worker_index);
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Not Register EB pkts handler, Configured EB MAC was wrong\n",
LOG_VALUE);
}
}
#ifdef TIMER_STATS
#ifndef AUTO_ANALYSIS
dl_stat_info.port_in_out_delta = TIMER_GET_ELAPSED_NS(_init_time);
/* Export stats into file. */
dl_timer_stats(dl_ndata_pkts, &dl_stat_info);
#else
/* calculate min time, max time, min_burst_sz, max_burst_sz
* perf_stats.op_time[13] = port_in_out_time */
SET_PERF_MAX_MIN_TIME(dl_perf_stats.op_time[13], _init_time, dl_ndata_pkts, 1);
#endif /* AUTO_ANALYSIS */
#endif /* TIMER_STATS */
return 0;
}
void epc_dl_init(struct epc_dl_params *param, int core, uint8_t in_port_id, uint8_t out_port_id)
{
struct rte_pipeline *p;
unsigned i;
dl_pkts_nbrst = 0;
dl_pkts_nbrst_prv = 0;
if (in_port_id != app.eb_port && in_port_id != app.wb_port)
rte_exit(EXIT_FAILURE, LOG_FORMAT"Wrong MAC configured for EB interface\n", LOG_VALUE);
memset(param, 0, sizeof(*param));
snprintf((char *)param->name, PIPE_NAME_SIZE, "epc_dl_%d", in_port_id);
param->pipeline_params.socket_id = rte_socket_id();
param->pipeline_params.name = param->name;
param->pipeline_params.offset_port_id = META_DATA_OFFSET;
p = rte_pipeline_create(¶m->pipeline_params);
if (p == NULL)
rte_panic("%s: Unable to configure the pipeline\n", __func__);
/* Input port configuration */
if (rte_eth_dev_socket_id(in_port_id)
!= (int)lcore_config[core].socket_id) {
clLog(clSystemLog, eCLSeverityMinor,
LOG_FORMAT"location of the RX core for port= %d is not optimal\n",
LOG_VALUE, in_port_id);
clLog(clSystemLog, eCLSeverityMinor,
LOG_FORMAT"Performance may be Degradated\n", LOG_VALUE);
}
struct rte_port_ethdev_reader_params port_ethdev_params = {
.port_id = epc_app.ports[in_port_id],
.queue_id = 0,
};
struct rte_pipeline_port_in_params in_port_params = {
.ops = &rte_port_ethdev_reader_ops,
.arg_create = (void *)&port_ethdev_params,
.burst_size = epc_app.burst_size_rx_read,
};
if (in_port_id == SGI_PORT_ID) {
in_port_params.f_action = epc_dl_port_in_ah;
in_port_params.arg_ah = NULL;
}
if (rte_pipeline_port_in_create
(p, &in_port_params, ¶m->port_in_id))
{
rte_panic("%s: Unable to configure input port for port %d\n",
__func__, in_port_id);
}
/* Output port configuration */
for (i = 0; i < epc_app.n_ports; i++) {
if (i == 0) {
/* Pipeline driving decapped fast path pkts out the epc_ul core */
struct rte_port_ethdev_writer_nodrop_params port_ethdev_params =
{
.port_id = epc_app.ports[out_port_id],
.queue_id = 0,
.tx_burst_sz = epc_app.burst_size_tx_write,
.n_retries = 0,
};
struct rte_pipeline_port_out_params out_port_params =
{
.ops = &rte_port_ethdev_writer_nodrop_ops,
.arg_create = (void *)&port_ethdev_params,
.f_action = epc_dl_port_out_ah,
.arg_ah = (void *)(uintptr_t) (i+1),
};
if (rte_pipeline_port_out_create
(p, &out_port_params, ¶m->port_out_id[i])) {
rte_panic
("%s: Unable to configure output port\n"
"for ring RX %i\n", __func__, i);
}
} else {
/* Pipeline equeueing arp request pkts to epc_mct core ring */
struct rte_port_ring_writer_params port_ring_params = {
.tx_burst_sz = epc_app.burst_size_rx_write,
};
struct rte_pipeline_port_out_params out_port_params = {
.ops = &rte_port_ring_writer_ops,
.arg_create = (void *)&port_ring_params
};
port_ring_params.ring = epc_app.epc_mct_rx[in_port_id];
if (rte_pipeline_port_out_create
(p, &out_port_params, ¶m->port_out_id[i])) {
rte_panic
("%s: Unable to configure output port\n"
"for ring RX %i\n", __func__, i);
}
}
}
/* table configuration */
/* Tables */
{
struct rte_pipeline_table_params table_params = {
.ops = &rte_table_stub_ops
};
if (rte_pipeline_table_create
(p, &table_params, ¶m->table_id)) {
rte_panic("%s: Unable to configure table %u\n",
__func__, param->table_id);
}
}
if (rte_pipeline_port_in_connect_to_table
(p, param->port_in_id, param->table_id)) {
rte_panic("%s: Unable to connect input port %u to table %u\n",
__func__, param->port_in_id, param->table_id);
}
/* Add entries to tables */
{
struct rte_pipeline_table_entry default_entry = {
.action = RTE_PIPELINE_ACTION_PORT_META,
};
struct rte_pipeline_table_entry *default_entry_ptr;
int status = rte_pipeline_table_default_entry_add(p,
param->table_id,
&default_entry,
&default_entry_ptr);
if (status) {
rte_panic(
"%s: failed to add table default entry\n",
__func__);
rte_pipeline_free(p);
return;
}
}
/* Enable input ports */
if (rte_pipeline_port_in_enable(p, param->port_in_id)) {
rte_panic("%s: unable to enable input port %d\n", __func__,
param->port_in_id);
}
/* set flush option */
param->flush_max = EPC_PIPELINE_FLUSH_MAX;
/* Check pipeline consistency */
if (rte_pipeline_check(p) < 0)
rte_panic("%s: Pipeline consistency check failed\n", __func__);
param->pipeline = p;
}
void epc_dl(void *args)
{
struct epc_dl_params *param = (struct epc_dl_params *)args;
rte_pipeline_run(param->pipeline);
if (++param->flush_count >= param->flush_max) {
rte_pipeline_flush(param->pipeline);
param->flush_count = 0;
}
/** Handle the request mbufs sent from kernel space,
* Then analyzes it and calls the specific actions for the specific requests.
* Finally constructs the response mbuf and puts it back to the resp_q.
*/
rte_kni_handle_request(kni_port_params_array[SGI_PORT_ID]->kni[0]);
uint32_t queued_cnt = rte_ring_count(shared_ring[S1U_PORT_ID]);
if (queued_cnt) {
struct rte_mbuf *pkts[queued_cnt];
uint32_t rx_cnt = rte_ring_dequeue_bulk(shared_ring[S1U_PORT_ID],
(void**)pkts, queued_cnt, NULL);
uint32_t pkt_indx = 0;
/* Capture the echo packets.*/
up_pcap_dumper(pcap_dumper_east, pkts, rx_cnt);
while (rx_cnt) {
uint16_t pkt_cnt = PKT_BURST_SZ;
if (rx_cnt < PKT_BURST_SZ)
pkt_cnt = rx_cnt;
/* ARP_REQ on S1U direct driven by epc_dl core */
uint16_t tx_cnt = rte_eth_tx_burst(S1U_PORT_ID,
0, &pkts[pkt_indx], pkt_cnt);
/* Free allocated Mbufs */
for (uint16_t inx = 0; inx < pkt_cnt; inx++) {
if (pkts[inx])
rte_pktmbuf_free(pkts[inx]);
}
rx_cnt -= tx_cnt;
pkt_indx += tx_cnt;
}
}
uint32_t count = rte_ring_count(notify_ring);
while (count) {
struct rte_mbuf *pkts[count];
uint32_t rx_cnt = rte_ring_dequeue_bulk(notify_ring,
(void**)pkts, count, NULL);
if (rx_cnt) {
int ret = notification_handler(pkts, rx_cnt);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"ERROR: Notification handler failed\n", LOG_VALUE);
}
}
count -= rx_cnt;
}
}
void register_dl_worker(epc_dl_handler f, int port)
{
epc_dl_worker_func[port] = f;
}
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp_association.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "pfcp_util.h"
#include "pfcp_enum.h"
#include "pfcp_set_ie.h"
#include "pfcp_session.h"
#include "pfcp_association.h"
#include "pfcp_messages_encoder.h"
#include "pfcp_messages_decoder.h"
#include "../cp_dp_api/vepc_cp_dp_api.h"
#include "teid_upf.h"
#include "gw_adapter.h"
#ifdef CP_BUILD
#include "teid.h"
#include "cp.h"
#include "main.h"
#include "pfcp.h"
#include "cp_stats.h"
#include "cp_config.h"
#include "gtpv2c_error_rsp.h"
#include "cp_timer.h"
#include "cdr.h"
#include "debug_str.h"
#else
#include "up_main.h"
#endif /* CP_BUILD */
#ifdef CP_BUILD
#include "sm_pcnd.h"
#include "cdnsutil.h"
#endif /* CP_BUILD*/
#define PFCP_APPLICATION_ID_VALUE "app_1"
#ifdef DP_BUILD
struct in_addr cp_comm_ip;
uint16_t cp_comm_port;
#endif /* DP_BUILD */
extern bool assoc_available;
extern int clSystemLog;
#ifdef CP_BUILD
extern int pfcp_fd;
extern int pfcp_fd_v6;
extern pfcp_config_t config;
uint32_t *g_gx_pending_csr[BUFFERED_ENTRIES_DEFAULT];
uint32_t g_gx_pending_csr_cnt = 0;
void
fill_pfcp_association_setup_req(pfcp_assn_setup_req_t *pfcp_ass_setup_req)
{
uint32_t seq = 1;
memset(pfcp_ass_setup_req, 0, sizeof(pfcp_assn_setup_req_t)) ;
seq = get_pfcp_sequence_number(PFCP_ASSOCIATION_SETUP_REQUEST, seq);
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_ass_setup_req->header),
PFCP_ASSOCIATION_SETUP_REQUEST, NO_SEID, seq, NO_CP_MODE_REQUIRED);
set_recovery_time_stamp(&(pfcp_ass_setup_req->rcvry_time_stmp));
}
/* Fill pfd mgmt cstm ie */
uint16_t
set_pfd_contents(pfcp_pfd_contents_ie_t *pfd_conts, struct msgbuf *cstm_buf)
{
pfd_conts->cp = PRESENT;
/*pfd_conts->dn = 0;
pfd_conts->url = 0;
pfd_conts->fd = 0;
pfd_conts->pfd_contents_spare2 = 0x00;*/
if(pfd_conts->fd != 0){
pfd_conts->len_of_flow_desc = 0;
pfd_conts->flow_desc = 0;
}
if(pfd_conts->url != 0){
pfd_conts->length_of_url = 0;
pfd_conts->url2 = 0;
}
if(pfd_conts->dn != 0){
pfd_conts->len_of_domain_nm = 0;
pfd_conts->domain_name = 0;
}
if(pfd_conts->cp != 0){
pfd_conts->cstm_pfd_cntnt = pfd_conts->data;
uint16_t struct_len = 0;
switch (cstm_buf->mtype) {
case MSG_SDF_CRE:
case MSG_ADC_TBL_CRE:
case MSG_PCC_TBL_CRE:
case MSG_SESS_TBL_CRE:
case MSG_MTR_CRE:
/* Fill msg type */
struct_len = snprintf((char *)pfd_conts->data, MAX_LEN, "%"PRId64" ",cstm_buf->mtype);
/* Fill cstm ie contents frome rule structure as string */
memcpy(pfd_conts->data+struct_len, (uint8_t *)&cstm_buf->msg_union.msg_table,
sizeof(cstm_buf->msg_union.msg_table));
pfd_conts->len_of_cstm_pfd_cntnt = sizeof(cstm_buf->msg_union.msg_table) + struct_len;
break;
case MSG_EXP_CDR:
/* Fill msg type */
struct_len = snprintf((char *)pfd_conts->data, MAX_LEN,
"%"PRId64" ",cstm_buf->mtype);
/* Fill cstm ie contents frome rule structure as string */
memcpy(pfd_conts->data + struct_len,
(uint8_t *)&cstm_buf->msg_union.ue_cdr, sizeof(struct msg_ue_cdr));
pfd_conts->len_of_cstm_pfd_cntnt = sizeof(struct msg_ue_cdr) + struct_len;
break;
case MSG_SDF_DES:
case MSG_ADC_TBL_DES:
case MSG_PCC_TBL_DES:
case MSG_SESS_TBL_DES:
case MSG_MTR_DES:
break;
case MSG_SDF_ADD:
case MSG_SDF_DEL:
/* Fill msg type */
struct_len = snprintf((char *)pfd_conts->data, MAX_LEN,
"%"PRId64" ",cstm_buf->mtype);
/* Fill cstm ie contents frome rule structure as string */
memcpy(pfd_conts->data + struct_len,
(uint8_t *)&cstm_buf->msg_union.pkt_filter_entry, sizeof(struct pkt_filter));
pfd_conts->len_of_cstm_pfd_cntnt = sizeof(struct pkt_filter)+struct_len;
break;
case MSG_ADC_TBL_ADD:
case MSG_ADC_TBL_DEL:
/* Fill msg type */
struct_len = snprintf((char *)pfd_conts->data, MAX_LEN,
"%"PRId64" ",cstm_buf->mtype);
/* Fill cstm ie contents frome rule structure as string */
memcpy(pfd_conts->data + struct_len,
(uint8_t *)&cstm_buf->msg_union.adc_filter_entry, sizeof(struct adc_rules));
pfd_conts->len_of_cstm_pfd_cntnt = sizeof(struct adc_rules)+ struct_len;
break;
case MSG_PCC_TBL_ADD:
case MSG_PCC_TBL_DEL:
/* Fill msg type */
struct_len = snprintf((char *)pfd_conts->data, MAX_LEN,
"%"PRId64" ",cstm_buf->mtype);
/* Fill cstm ie contents frome rule structure as string */
memcpy(pfd_conts->data + struct_len,
(uint8_t *)&cstm_buf->msg_union.pcc_entry, sizeof(struct pcc_rules));
pfd_conts->len_of_cstm_pfd_cntnt = sizeof(struct pcc_rules) + struct_len;
break;
case MSG_SESS_CRE:
case MSG_SESS_MOD:
case MSG_SESS_DEL:
/* Fill msg type */
struct_len = snprintf((char *)pfd_conts->data, MAX_LEN,
"%"PRId64" ",cstm_buf->mtype);
/* Fill cstm ie contents frome rule structure as string */
memcpy(pfd_conts->data + struct_len,
(uint8_t *)&cstm_buf->msg_union.sess_entry, sizeof(struct session_info));
pfd_conts->len_of_cstm_pfd_cntnt = sizeof(struct session_info) + struct_len;
break;
case MSG_MTR_ADD:
case MSG_MTR_DEL:
/* Fill msg type */
struct_len = snprintf((char *)pfd_conts->data, MAX_LEN,
"%"PRId64" ",cstm_buf->mtype);
/* Fill cstm ie contents frome rule structure as string */
memcpy(pfd_conts->data + struct_len ,
(uint8_t *)&cstm_buf->msg_union.mtr_entry, sizeof(struct mtr_entry));
pfd_conts->len_of_cstm_pfd_cntnt = sizeof(struct mtr_entry) + struct_len;
break;
case MSG_DDN_ACK:
/* Fill msg type */
struct_len = snprintf((char *)pfd_conts->data, MAX_LEN,
"%"PRId64" ",cstm_buf->mtype);
/* Fill cstm ie contents frome rule structure as string */
memcpy(pfd_conts->data + struct_len ,
(uint8_t *)&cstm_buf->msg_union.mtr_entry, sizeof(struct downlink_data_notification));
pfd_conts->len_of_cstm_pfd_cntnt = sizeof(struct downlink_data_notification) + struct_len;
break;
default:
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid msg type "
"while Set PFD MGMT contents\n", LOG_VALUE);
break;
}
}
/* set pfd contents header */
pfcp_set_ie_header(&pfd_conts->header, PFCP_IE_PFD_CONTENTS,
(pfd_conts->len_of_cstm_pfd_cntnt + sizeof(pfd_conts->header)));
return (pfd_conts->len_of_cstm_pfd_cntnt + sizeof(pfd_conts->header));
}
/**
* @brief : This function fills in values to pfd context ie
* @param : pfd_contxt is pointer to structure of pfd context ie
* @return : This function dose not return anything
*/
static void
set_pfd_context(pfcp_pfd_context_ie_t *pfd_conxt)
{
pfcp_set_ie_header(&pfd_conxt->header, IE_PFD_CONTEXT,
(pfd_conxt->pfd_contents[0].header.len + sizeof(pfcp_ie_header_t)));
pfd_conxt->pfd_contents_count = 1;
}
/**
* @brief : This function fills in values to pfd application id ie
* @param : app_id is pointer to structure of pfd application id ie
* @return : This function dose not return anything
*/
static void
set_pfd_application_id(pfcp_application_id_ie_t *app_id)
{
/* TODO : remove the hardcoded value of APP ID */
pfcp_set_ie_header(&app_id->header, PFCP_IE_APPLICATION_ID,
strnlen(PFCP_APPLICATION_ID_VALUE, sizeof(app_id->app_ident)));
memcpy(app_id->app_ident, PFCP_APPLICATION_ID_VALUE,
strnlen(PFCP_APPLICATION_ID_VALUE, sizeof(app_id->app_ident)));
}
/**
* @brief : This function fills pfd app id and pfd context
* @param : app_id_pfds_t is pointer to structure of ie
* @param : len denotes total length of ie
* @return : This function dose not return anything
*/
static void
set_app_ids_pfds(pfcp_app_ids_pfds_ie_t *app_ids_pfds_t , uint16_t len)
{
/* Fill app id */
set_pfd_application_id(&app_ids_pfds_t->application_id);
app_ids_pfds_t->pfd_context_count = 1;
/* Fill pfd context */
for(int i = 0; i < app_ids_pfds_t->pfd_context_count; ++i){
set_pfd_context(&app_ids_pfds_t->pfd_context[i]);
len = app_ids_pfds_t->pfd_context[i].header.len
+ app_ids_pfds_t->application_id.header.len
+ sizeof(pfcp_ie_header_t)
+ sizeof(pfcp_ie_header_t);
}
/* set app id pfds header */
pfcp_set_ie_header(&app_ids_pfds_t->header, IE_APP_IDS_PFDS, len);
}
void
fill_pfcp_pfd_mgmt_req(pfcp_pfd_mgmt_req_t *pfcp_pfd_req, uint16_t len)
{
uint32_t seq = 0;
seq = get_pfcp_sequence_number(PFCP_PFD_MGMT_REQ, seq);
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_pfd_req->header),
PFCP_PFD_MGMT_REQ, NO_SEID, seq, NO_CP_MODE_REQUIRED);
pfcp_pfd_req->app_ids_pfds_count = 1;
for(int i=0; i < pfcp_pfd_req->app_ids_pfds_count; ++i){
set_app_ids_pfds(&pfcp_pfd_req->app_ids_pfds[i], len);
}
}
int
buffer_csr_request(ue_context *context,
upf_context_t *upf_context, uint8_t ebi_index)
{
context_key *key =
rte_zmalloc_socket(NULL, sizeof(context_key),
RTE_CACHE_LINE_SIZE, rte_socket_id());
key->teid = context->s11_sgw_gtpc_teid;
key->sender_teid = context->s11_mme_gtpc_teid;
key->sequence = context->sequence;
key->ebi_index = ebi_index;
key->imsi = context->imsi;
for(uint8_t i = 0; i< MAX_BEARERS; i++){
if(context->eps_bearers[i] != NULL)
key->bearer_ids[i] = context->eps_bearers[i]->eps_bearer_id;
}
upf_context->pending_csr_teid[upf_context->csr_cnt] = (uint32_t *)key;
upf_context->csr_cnt++;
upf_context->indir_tun_flag = 0;
return 0;
}
int
get_upf_ip(ue_context *ctxt, pdn_connection *pdn)
{
if(config.use_dns) {
upfs_dnsres_t *entry = NULL;
if (upflist_by_ue_hash_entry_lookup(&ctxt->imsi,
sizeof(ctxt->imsi), &entry) != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to extract UPF context by ue hash\n", LOG_VALUE);
return GTPV2C_CAUSE_REQUEST_REJECTED;
}
if (entry->current_upf > entry->upf_count) {
/* TODO: Add error log : Tried sending
* association request to all upf.*/
/* Remove entry from hash ?? */
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT "Failure in sending association request to all upf\n", LOG_VALUE);
return GTPV2C_CAUSE_REQUEST_REJECTED;
}
if (entry != NULL) {
memcpy(pdn->fqdn, entry->upf_fqdn[entry->current_upf], sizeof(entry->upf_fqdn[entry->current_upf]));
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT "Received UPF list for DNS entry is NULL\n", LOG_VALUE);
return GTPV2C_CAUSE_REQUEST_REJECTED;
}
if ((config.pfcp_ip_type == PDN_TYPE_IPV4_IPV6
|| config.pfcp_ip_type == PDN_TYPE_IPV6)
&& (*entry->upf_ip[entry->current_upf].ipv6.s6_addr)) {
memcpy(pdn->upf_ip.ipv6_addr, entry->upf_ip[entry->current_upf].ipv6.s6_addr, IPV6_ADDRESS_LEN);
pdn->upf_ip.ip_type = PDN_TYPE_IPV6;
entry->upf_ip_type = PDN_TYPE_IPV6;
} else if ((config.pfcp_ip_type == PDN_TYPE_IPV4_IPV6
|| config.pfcp_ip_type == PDN_TYPE_IPV4)
&& (entry->upf_ip[entry->current_upf].ipv4.s_addr != 0)) {
pdn->upf_ip.ipv4_addr = entry->upf_ip[entry->current_upf].ipv4.s_addr;
pdn->upf_ip.ip_type = PDN_TYPE_IPV4;
entry->upf_ip_type = PDN_TYPE_IPV4;
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT "Requested type and DNS supported type are not same\n", LOG_VALUE);
return GTPV2C_CAUSE_REQUEST_REJECTED;
}
}
return 0;
}
/**
* @brief : This function creates association setup request and sends to peer
* @param : context holds information of ue
* @param : ebi_index denotes index of bearer stored in array
* @return : This function dose not return anything
*/
static int
assoication_setup_request(pdn_connection *pdn, ue_context *context, int ebi_index)
{
int ret = 0;
// node_address_t upf_ip = {0};
upf_context_t *upf_context = NULL;
pfcp_assn_setup_req_t pfcp_ass_setup_req = {0};
node_address_t node_value = {0};
upf_context = rte_zmalloc_socket(NULL, sizeof(upf_context_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (upf_context == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure to allocate "
"upf context: %s\n", LOG_VALUE, rte_strerror(rte_errno));
return GTPV2C_CAUSE_NO_MEMORY_AVAILABLE;
}
ret = upf_context_entry_add(&pdn->upf_ip, upf_context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure while adding "
"upf context entry for IP Type : %s with IPv4 : "IPV4_ADDR""
"\t IPv6 : "IPv6_FMT", Error: %d \n", LOG_VALUE,
ip_type_str(pdn->upf_ip.ip_type),
IPV4_ADDR_HOST_FORMAT(pdn->upf_ip.ipv4_addr),
PRINT_IPV6_ADDR(pdn->upf_ip.ipv6_addr), ret);
return -1;
}
if(context->indirect_tunnel_flag == 0) {
ret = buffer_csr_request(context, upf_context, ebi_index);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure while buffer "
"Create Session Request, Error: %d \n",LOG_VALUE, ret);
return -1;
}
} else {
upf_context->sender_teid = context->s11_sgw_gtpc_teid;
upf_context->indir_tun_flag = 1;
}
upf_context->assoc_status = ASSOC_IN_PROGRESS;
upf_context->state = PFCP_ASSOC_REQ_SNT_STATE;
upf_context->cp_mode = context->cp_mode;
fill_pfcp_association_setup_req(&pfcp_ass_setup_req);
/*Filling Node ID*/
if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV4) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(config.pfcp_ip.s_addr, temp, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pdn->upf_ip.ip_type == PDN_IP_TYPE_IPV6) {
ret = fill_ip_addr(0, config.pfcp_ip_v6.s6_addr, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
set_node_id(&pfcp_ass_setup_req.node_id, node_value);
uint8_t pfcp_msg[PFCP_MSG_LEN] = {0};
int encoded = encode_pfcp_assn_setup_req_t(&pfcp_ass_setup_req, pfcp_msg);
if(config.use_dns) {
ret = set_dest_address(pdn->upf_ip, &upf_pfcp_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
/* fill and add timer entry */
peerData *timer_entry = NULL;
timer_entry = fill_timer_entry_data(PFCP_IFACE, &upf_pfcp_sockaddr,
pfcp_msg, encoded, config.request_tries, context->s11_sgw_gtpc_teid, ebi_index);
timer_entry->imsi = context->imsi;
if(!(add_timer_entry(timer_entry, config.request_timeout, association_timer_callback))) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Faild to add timer "
"entry\n",LOG_VALUE);
}
upf_context->timer_entry = timer_entry;
if (starttimer(&timer_entry->pt) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Periodic Timer "
"failed to start\n",LOG_VALUE);
}
if ( pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, upf_pfcp_sockaddr,SENT) < 0 ) {
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT"Error sending PFCP "
"Association Request\n", LOG_VALUE);
}
return 0;
}
int
process_pfcp_assoication_request(pdn_connection *pdn, int ebi_index)
{
int ret = 0;
upf_context_t *upf_context = NULL;
/* Retrive association state based on UPF IP. */
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) &(pdn->upf_ip), (void **) &(upf_context));
if (ret >= 0) {
if (upf_context->state == PFCP_ASSOC_RESP_RCVD_STATE) {
ret = process_pfcp_sess_est_request(pdn->context->s11_sgw_gtpc_teid,
pdn, upf_context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"\n "
"Failed to process Session Eshtablishment Request, Error:%d \n",
LOG_VALUE, ret);
return ret;
}
} else {
upf_context->cp_mode = pdn->context->cp_mode;
ret = buffer_csr_request(pdn->context, upf_context, ebi_index);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"\n",
"Failed to buffer Create Session Request, Error: %d ", LOG_VALUE, ret);
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
}
} else {
ret = assoication_setup_request(pdn, pdn->context, ebi_index);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: %d. ",
"Could not process association process\n",LOG_VALUE, ret);
return ret;
}
}
return 0;
}
void
fill_pfcp_sess_report_resp(pfcp_sess_rpt_rsp_t *pfcp_sess_rep_resp,
uint32_t seq, uint8_t cp_mode)
{
memset(pfcp_sess_rep_resp, 0, sizeof(pfcp_sess_rpt_rsp_t));
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_rep_resp->header),
PFCP_SESSION_REPORT_RESPONSE, HAS_SEID, seq, cp_mode);
set_cause(&(pfcp_sess_rep_resp->cause), REQUESTACCEPTED);
}
/**
* @brief : This function fills the csr in resp structure
* @param : sess_id , session id.
* @param : key, pointer of context_key structure.
* @return : returns 0 on success.
*/
int
fill_response(uint64_t sess_id, context_key *key)
{
uint8_t index = 0;
struct resp_info *resp = NULL;
if (get_sess_entry(sess_id, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session "
"Entry Found for sess ID:%lu\n", LOG_VALUE, sess_id);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
/* stored csr for error response */
resp->gtpc_msg.csr.sender_fteid_ctl_plane.teid_gre_key = key->sender_teid;
resp->gtpc_msg.csr.header.teid.has_teid.teid = key->teid;
resp->gtpc_msg.csr.header.teid.has_teid.seq = key->sequence;
resp->gtpc_msg.csr.imsi.imsi_number_digits = key->imsi;
/* Maintain the ebi ids per session object*/
for (uint8_t itr = 0; itr < MAX_BEARERS; ++itr) {
if(key->bearer_ids[itr] != 0){
resp->gtpc_msg.csr.bearer_contexts_to_be_created[index].header.len =
sizeof(uint8_t) + IE_HEADER_SIZE;
resp->gtpc_msg.csr.bearer_contexts_to_be_created[index].eps_bearer_id.ebi_ebi =
key->bearer_ids[itr];
index++;
}
}
/* Update the maximum bearer count value */
resp->gtpc_msg.csr.bearer_count = index;
return 0;
}
uint8_t
process_pfcp_ass_resp(msg_info *msg, peer_addr_t *peer_addr)
{
int ret = 0;
pdn_connection *pdn = NULL;
upf_context_t *upf_context = NULL;
ue_context *context = NULL;
struct resp_info *resp = NULL;
node_address_t node_value = {0};
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) &(msg->upf_ip), (void **) &(upf_context));
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"NO ENTRY FOUND IN UPF "
"HASH, IP Type : %s with IPv4 : "IPV4_ADDR"\t and IPv6 : "IPv6_FMT"",
LOG_VALUE, ip_type_str(msg->upf_ip.ip_type),
IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr),
PRINT_IPV6_ADDR(msg->upf_ip.ipv6_addr));
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
msg->cp_mode = upf_context->cp_mode;
upf_context->assoc_status = ASSOC_ESTABLISHED;
upf_context->state = PFCP_ASSOC_RESP_RCVD_STATE;
/* WB/S1U, WB/S5S8_Logical, EB/S5S8 Interface*/
for (uint8_t inx = 0; inx < msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info_count; inx++) {
if (inx == 0) {
/* WB/S1U Interface */
if (msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].assosi == PRESENT &&
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].src_intfc ==
SOURCE_INTERFACE_VALUE_ACCESS ) {
ret = fill_ip_addr(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv4_address,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv6_address,
&upf_context->s5s8_pgwu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
ret = fill_ip_addr(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv4_address,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv6_address,
&upf_context->s1u_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
}
if (inx == 1) {
/* EB/S5S8 Interface */
if(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].assosi == PRESENT &&
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].src_intfc ==
SOURCE_INTERFACE_VALUE_CORE ) {
ret = fill_ip_addr(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv4_address,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv6_address,
&upf_context->s5s8_sgwu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
}
if ((inx == 2) &&
(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].src_intfc == SOURCE_INTERFACE_VALUE_ACCESS)) {
/* PGWU WB/S5S8 Logical Interface */
memset(&upf_context->s5s8_pgwu_ip, 0, sizeof(node_address_t));
ret = fill_ip_addr(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv4_address,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv6_address,
&upf_context->s5s8_pgwu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if ((inx == 2) &&
(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].src_intfc == SOURCE_INTERFACE_VALUE_CORE)) {
/* SGWU EB/S5S8 Logical Interface:Indirect Tunnel */
ret = fill_ip_addr(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv4_address,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv6_address,
&upf_context->s5s8_li_sgwu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
if ((inx == 3) &&
(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].src_intfc == SOURCE_INTERFACE_VALUE_CORE)) {
/* SGWU EB/S5S8 Logical Interface:Indirect Tunnel */
ret = fill_ip_addr(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv4_address,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv6_address,
&upf_context->s5s8_li_sgwu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if ((inx == 3) &&
(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].src_intfc == SOURCE_INTERFACE_VALUE_ACCESS)) {
/* PGWU WB/S5S8 Logical Interface */
memset(&upf_context->s5s8_pgwu_ip, 0, sizeof(node_address_t));
ret = fill_ip_addr(msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv4_address,
msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[inx].ipv6_address,
&upf_context->s5s8_pgwu_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
}
/* TODO: Make it generic this code */
/* teid_range from first user plane ip IE is used since, for same CP ,
* DP will assigne single teid_range , So all IE's will have same value for teid_range*/
/* Change teid base address here */
if (msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[0].teidri != 0) {
upf_context->teidri = msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[0].teidri;
upf_context->teid_range = msg->pfcp_msg.pfcp_ass_resp.user_plane_ip_rsrc_info[0].teid_range;
}else{
upf_context->teidri = 0;
upf_context->teid_range = 0;
}
if (msg->pfcp_msg.pfcp_ass_resp.node_id.node_id_type == NODE_ID_TYPE_TYPE_IPV4ADDRESS) {
node_value.ip_type = PDN_IP_TYPE_IPV4;
node_value.ipv4_addr =
msg->pfcp_msg.pfcp_ass_resp.node_id.node_id_value_ipv4_address;
} else if (msg->pfcp_msg.pfcp_ass_resp.node_id.node_id_type == NODE_ID_TYPE_TYPE_IPV6ADDRESS) {
node_value.ip_type = PDN_IP_TYPE_IPV6;
memcpy(node_value.ipv6_addr,
msg->pfcp_msg.pfcp_ass_resp.node_id.node_id_value_ipv6_address,
IPV6_ADDRESS_LEN);
}
if (0 != set_base_teid(upf_context->teidri, upf_context->teid_range,
node_value, &upf_teid_info_head)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to set teid range for DP \n, "
"IP Type : %s | IPV4_ADDR : "IPV4_ADDR" | IPV6_ADDR : "IPv6_FMT"",
LOG_VALUE, ip_type_str(node_value.ip_type),
IPV4_ADDR_HOST_FORMAT(node_value.ipv4_addr),
PRINT_IPV6_ADDR(node_value.ipv6_addr));
return GTPV2C_CAUSE_SYSTEM_FAILURE;
}
if(upf_context->indir_tun_flag == 0 ) {
for (uint8_t i = 0; i < upf_context->csr_cnt; i++) {
context_key *key = (context_key *)upf_context->pending_csr_teid[i];
if (get_ue_context(key->teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"UE context not found "
"for teid: %d\n", LOG_VALUE, key->teid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
if(config.use_dns) {
/* Delete UPFList entry from UPF Hash */
if ((upflist_by_ue_hash_entry_delete(&context->imsi, sizeof(context->imsi)))
< 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error on upflist_by_ue_hash deletion of IMSI \n",
LOG_VALUE);
}
}
pdn = GET_PDN(context, key->ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get "
"pdn for ebi_index %d\n", LOG_VALUE, key->ebi_index);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
ret = process_pfcp_sess_est_request(key->teid, pdn, upf_context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to process PFCP "
"session eshtablishment request %d \n", LOG_VALUE, ret);
fill_response(pdn->seid, key);
return ret;
}
fill_response(pdn->seid, key);
rte_free(upf_context->pending_csr_teid[i]);
}
upf_context->csr_cnt = 0;
} else {
if(get_sender_teid_context(upf_context->sender_teid, &context) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No entry found for ue in hash\n",
LOG_VALUE);
return -1;
}
ret = process_pfcp_sess_est_request(context->s11_sgw_gtpc_teid,
context->indirect_tunnel->pdn, upf_context);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to process PFCP "
"session eshtablishment request %d \n", LOG_VALUE, ret);
if(ret != -1) {
crt_indir_data_frwd_tun_error_response(msg, ret);
}
} else {
if (get_sess_entry(context->indirect_tunnel->pdn->seid, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session "
"Entry Found for sess ID : %lu\n", LOG_VALUE,
context->indirect_tunnel->pdn->seid);
return GTPV2C_CAUSE_CONTEXT_NOT_FOUND;
}
}
upf_context->csr_cnt--;
}
/* Adding ip to cp heartbeat when dp returns the association response*/
node_address_t ip_addr = {0};
get_peer_node_addr(peer_addr, &ip_addr);
add_ip_to_heartbeat_hash(&ip_addr,
msg->pfcp_msg.pfcp_ass_resp.rcvry_time_stmp.rcvry_time_stmp_val);
#ifdef USE_REST
if (is_present(&ip_addr)) {
if (add_node_conn_entry(&ip_addr, SX_PORT_ID, upf_context->cp_mode) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to add "
"connection entry for SGWU/SAEGWU\n", LOG_VALUE);
}
(ip_addr.ip_type == IPV6_TYPE)?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Added Connection entry "
"for UPF IPv6:"IPv6_FMT"\n",LOG_VALUE, IPv6_PRINT(IPv6_CAST(ip_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Added Connection entry "
"for UPF IPv4:"IPV4_ADDR"\n",LOG_VALUE, IPV4_ADDR_HOST_FORMAT(ip_addr.ipv4_addr));
}
#endif/* USE_REST */
return 0;
}
/**
* @brief : fill the pdr_ids structure.
* @param : pfcp_pdr_id, structure needs to filled
* @param : num_pdr, count of number of pdrs
* @param : pdr, array of pointers having rule id
* @return : Returns ue_level struture pointer if success else null.
*/
static int
fill_pdr_ids(pdr_ids *pfcp_pdr_id, uint8_t num_pdr, pfcp_pdr_id_ie_t *pdr)
{
uint8_t i = 0;
uint8_t count = 0;
uint8_t pdr_itr = 0;
uint8_t tmp_cnt = 0;
uint8_t temp_arr[MAX_LIST_SIZE] = {0};
if(pfcp_pdr_id != NULL && pfcp_pdr_id->pdr_count == 0){
for(uint8_t itr2 = 0; itr2< num_pdr; itr2++){
uint8_t Match_found = False;
for(uint8_t itr1 = 0; itr1< pfcp_pdr_id->pdr_count; itr1++){
if( pfcp_pdr_id->pdr_id[itr1] == pdr[itr2].rule_id ){
Match_found = True;
break;
}
}
if(Match_found == False){
temp_arr[i] = pdr[itr2].rule_id;
tmp_cnt++;
i++;
}
}
for(i=0; i<MAX_LIST_SIZE; i++){
if(pfcp_pdr_id->pdr_id[i] == 0)
break;
}
/* Save the rule id recieved in pfcp report request*/
while(count != tmp_cnt){
pfcp_pdr_id->pdr_id[i] = temp_arr[pdr_itr];
i++;
count++;
pdr_itr++;
}
pfcp_pdr_id->pdr_count += tmp_cnt;
} else {
pfcp_pdr_id->pdr_count = num_pdr;
for(i = 0; i< num_pdr; i++){
pfcp_pdr_id->pdr_id[i] = pdr[i].rule_id;
}
}
return 0;
}
/**
* @brief : fill the session info structure.
* @param : pfcp_pdr_id, needs to filled
* @param : pdr_count , count of number of pdr_id in pfcp_pdr_id array
* @param : num_pdr, count of number of pdrs_id needs to be filled
* @param : pdr, array of pointers having rule id
* @return : Returns nothing.
*/
static void
fill_sess_info_pdr(uint16_t *pfcp_pdr_id, uint8_t *pdr_count, uint8_t num_pdr, pfcp_pdr_id_ie_t *pdr)
{
uint8_t i = 0;
uint8_t count = 0;
uint8_t pdr_itr = 0;
uint8_t tmp_cnt = 0;
uint8_t temp_arr[MAX_LIST_SIZE] = {0};
if(*pdr_count != 0 ){
for(uint8_t itr2 = 0; itr2 < num_pdr; itr2++){
uint8_t Match_found = False;
for(uint8_t itr1 = 0; itr1< *pdr_count; itr1++){
if( pfcp_pdr_id[itr1] == pdr[itr2].rule_id ){
Match_found = True;
break;
}
}
if(Match_found == False){
temp_arr[i] = pdr[itr2].rule_id;
tmp_cnt++;
i++;
}
}
for(i=0; i<MAX_LIST_SIZE; i++){
if(pfcp_pdr_id[i] == 0)
break;
}
/* Save the rule id recieved in pfcp report request*/
while(count != tmp_cnt){
pfcp_pdr_id[i] = temp_arr[pdr_itr];
i++;
count++;
pdr_itr++;
}
*pdr_count += tmp_cnt;
} else {
*pdr_count = num_pdr;
for(i = 0; i< num_pdr; i++){
pfcp_pdr_id[i] = pdr[i].rule_id;
}
}
}
void fill_sess_info_id(thrtle_count *thrtl_cnt, uint64_t sess_id, uint8_t pdr_count, pfcp_pdr_id_ie_t *pdr)
{
sess_info *sess_info_id = search_into_sess_info_list(thrtl_cnt->sess_ptr, sess_id);
if(sess_info_id == NULL){
sess_info_id = rte_zmalloc_socket(NULL, sizeof(sess_info), RTE_CACHE_LINE_SIZE, rte_socket_id());
if(sess_info_id == NULL){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate memory to session info"
"\n\n", LOG_VALUE);
return;
}
if (insert_into_sess_info_list(thrtl_cnt->sess_ptr, sess_info_id) == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to add node entry in LL\n",
LOG_VALUE);
rte_free(sess_info_id);
sess_info_id = NULL;
return;
}
}
fill_sess_info_pdr(sess_info_id->pdr_id, &sess_info_id->pdr_count,
pdr_count, pdr);
}
thrtle_count *
get_throtle_count(node_address_t *nodeip, uint8_t is_mod)
{
thrtle_count *thrtl_cnt = NULL;
int ret = 0;
ret = rte_hash_lookup_data(thrtl_ddn_count_hash,
(const void *)nodeip, (void **)&thrtl_cnt);
if(ret < 0 ){
/* If operation is not set to the ADD_ENTRY */
if (is_mod) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found for Node: "
" of IP Type : %s\n with IP IPv4 : "IPV4_ADDR"\t and IPv6 : "IPv6_FMT""
"\n", LOG_VALUE, ip_type_str(nodeip->ip_type),
IPV4_ADDR_HOST_FORMAT(nodeip->ipv4_addr), PRINT_IPV6_ADDR(nodeip->ipv6_addr));
return NULL;
}
/* Allocate memory and add a new thrtl_count Entry into hash */
thrtl_cnt = rte_zmalloc_socket(NULL, sizeof(thrtle_count),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(thrtl_cnt == NULL ){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for throttling count structure, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return NULL;
}
/* Initiailize buffer count to one as avoid infinity value*/
thrtl_cnt->prev_ddn_discard = 0;
thrtl_cnt->prev_ddn_eval = 0;
thrtl_cnt->sess_ptr = NULL;
ret = rte_hash_add_key_data(thrtl_ddn_count_hash,
(const void *)nodeip, thrtl_cnt);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to add entry while throttling "
"of IP Type : %s\n with IP IPv4 : "IPV4_ADDR"\t and IPv6 : "IPv6_FMT""
"\n\tError= %d\n", LOG_VALUE, ip_type_str(nodeip->ip_type),
IPV4_ADDR_HOST_FORMAT(nodeip->ipv4_addr), PRINT_IPV6_ADDR(nodeip->ipv6_addr),ret);
rte_free(thrtl_cnt);
thrtl_cnt = NULL;
return NULL;
}
}
return thrtl_cnt;
}
uint8_t
process_pfcp_report_req(pfcp_sess_rpt_req_t *pfcp_sess_rep_req)
{
/*DDN Handling */
int ret = 0;
uint32_t sequence = 0;
uint8_t cp_thrtl_fact = 0;
ue_context *context = NULL;
pdn_connection *pdn = NULL;
struct resp_info *resp = NULL;
pdr_ids *pfcp_pdr_id = NULL;
ue_level_timer *timer_data = NULL;
throttle_timer *thrtle_timer_data = NULL;
thrtle_count *thrtl_cnt = NULL;
uint64_t sess_id = pfcp_sess_rep_req->header.seid_seqno.has_seid.seid;
uint32_t s11_sgw_gtpc_teid = UE_SESS_ID(sess_id);
int ebi = UE_BEAR_ID(sess_id);
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
return -1;
}
/* Stored the session information*/
if (get_sess_entry(sess_id, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to get response "
"from session id\n", LOG_VALUE);
return -1;
}
ret = get_ue_context(s11_sgw_gtpc_teid, &context);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Context not found for "
"report request\n", LOG_VALUE);
return -1;
}
pdn = GET_PDN(context, ebi_index);
if(pdn == NULL){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to get pdn for ebi_index : %d \n", LOG_VALUE, ebi_index);
return -1;
}
/* Retrive the s11 sgwc gtpc teid based on session id.*/
sequence = pfcp_sess_rep_req->header.seid_seqno.has_seid.seq_no;
resp->cp_mode = context->cp_mode;
resp->pfcp_seq = sequence;
if (pfcp_sess_rep_req->report_type.dldr) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DDN Request recv from DP for "
"sess:%lu\n", LOG_VALUE, sess_id);
/* UE LEVEL: PFCP: DL Buffering Duration Timer delay */
if( (rte_hash_lookup_data(dl_timer_by_teid_hash, &s11_sgw_gtpc_teid, (void **)&timer_data)) >= 0 ){
if((rte_hash_lookup_data(pfcp_rep_by_seid_hash, &sess_id, (void **)&pfcp_pdr_id)) < 0){
/* If not present, allocate the memory and add the entry for sess id */
pfcp_pdr_id = rte_zmalloc_socket(NULL, sizeof(pdr_ids),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(pfcp_pdr_id == NULL ){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for pdr_ids structure, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return -1;
}
ret = rte_hash_add_key_data(pfcp_rep_by_seid_hash,
&sess_id, pfcp_pdr_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add entry while dl buffering with session id = %u"
"\n\tError= %d\n", LOG_VALUE, sess_id, ret);
rte_free(pfcp_pdr_id);
pfcp_pdr_id = NULL;
return -1;
}
}
ret = fill_pdr_ids(pfcp_pdr_id, pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id_count,
pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id);
if(ret != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure in dl buffer timer "
"fill pdr ids:%lu\n", LOG_VALUE, sess_id);
return -1;
}
context->pfcp_rept_resp_sent_flag = 1;
fill_send_pfcp_sess_report_resp(context, sequence, pdn, NOT_PRESENT, TRUE);
/*UE LEVEL: GTPv2c: Check for UE level timer */
} else if(rte_hash_lookup_data(timer_by_teid_hash, &s11_sgw_gtpc_teid, (void **)&timer_data) >= 0){
if((rte_hash_lookup_data(ddn_by_seid_hash, &sess_id, (void **)&pfcp_pdr_id)) < 0){
/* If not present, allocate the memory and add the entry for sess id */
pfcp_pdr_id = rte_zmalloc_socket(NULL, sizeof(pdr_ids),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(pfcp_pdr_id == NULL ){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate "
"Memory for pdr_ids structure, Error: %s \n", LOG_VALUE,
rte_strerror(rte_errno));
return -1;
}
ret = rte_hash_add_key_data(ddn_by_seid_hash,
&sess_id, pfcp_pdr_id);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to buffer entry for ddn with session id = %u"
"\n\tError= %d\n", LOG_VALUE, sess_id, ret);
rte_free(pfcp_pdr_id);
pfcp_pdr_id = NULL;
return -1;
}
}
/* Buffer the ddn request */
ret = fill_pdr_ids(pfcp_pdr_id, pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id_count,
pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id);
if(ret != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure in delay timer "
"fill pdr ids:%lu\n", LOG_VALUE, sess_id);
return -1;
}
context->pfcp_rept_resp_sent_flag = 1;
fill_send_pfcp_sess_report_resp(context, sequence, pdn, NOT_PRESENT, TRUE);
/*Node Level: GTPv2C: Check for throttling timer */
}else if((rte_hash_lookup_data(thrtl_timer_by_nodeip_hash,
(const void *)&context->s11_mme_gtpc_ip, (void **)&thrtle_timer_data)) >= 0 ){
/* Retrive the counter to calculate throttling factor */
thrtl_cnt = get_throtle_count(&context->s11_mme_gtpc_ip, ADD_ENTRY);
if(thrtl_cnt == NULL){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error to get throttling count \n",
LOG_VALUE);
return -1;
}
/* Send DDN Request if caluculated throttling factor is greater than received factor value */
for(uint8_t i = 0; i < pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id_count; i++){
for(uint8_t j = 0; j < MAX_BEARERS; j++){
if(pdn->eps_bearers[j] != NULL){
for(uint8_t itr_pdr = 0; itr_pdr < pdn->eps_bearers[j]->pdr_count; itr_pdr++){
if ((pdn->eps_bearers[j]->pdrs[itr_pdr]->pdi.src_intfc.interface_value ==
SOURCE_INTERFACE_VALUE_CORE) &&
(pdn->eps_bearers[j]->pdrs[itr_pdr]->rule_id ==
pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id[i].rule_id)){
if(pdn->eps_bearers[j]->qos.arp.priority_level >
config.low_lvl_arp_priority){
/* Calculate the throttling factor*/
if(thrtl_cnt->prev_ddn_eval != 0){
cp_thrtl_fact = (thrtl_cnt->prev_ddn_discard/thrtl_cnt->prev_ddn_eval) * 100;
if(cp_thrtl_fact > thrtle_timer_data->throttle_factor){
pdr_ids ids;
ids.pdr_count = ONE;
ids.pdr_id[ZERO] = pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id[i].rule_id;
ids.ddn_buffered_count = 0;
/*Send DDN request*/
ret = ddn_by_session_id(sess_id, &ids);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT "Failed to process DDN request \n",
LOG_VALUE);
return -1;
}
context->pfcp_rept_resp_sent_flag = 0;
thrtl_cnt->prev_ddn_eval = thrtl_cnt->prev_ddn_eval + 1;
}else{
pfcp_pdr_id_ie_t pdr = {0};
pdr.rule_id = pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id[i].rule_id;
thrtl_cnt->prev_ddn_eval = thrtl_cnt->prev_ddn_eval + 1;
thrtl_cnt->prev_ddn_discard = thrtl_cnt->prev_ddn_discard + 1;
fill_sess_info_id(thrtl_cnt, sess_id, ONE, &pdr);
context->pfcp_rept_resp_sent_flag = 1;
fill_send_pfcp_sess_report_resp(context, sequence, pdn, NOT_PRESENT, TRUE);
}
}else{
pdr_ids ids;
ids.pdr_count = ONE;
ids.pdr_id[ZERO] = pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id[i].rule_id;
ids.ddn_buffered_count = 0;
/*Send DDN request*/
ret = ddn_by_session_id(sess_id, &ids);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT "Failed to process DDN request \n", LOG_VALUE);
return -1;
}
thrtl_cnt->prev_ddn_eval = thrtl_cnt->prev_ddn_eval + 1;
context->pfcp_rept_resp_sent_flag = 0;
}
} else {
pdr_ids ids;
ids.pdr_count = ONE;
ids.pdr_id[ZERO] = pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id[i].rule_id;
ids.ddn_buffered_count = 0;
ret = ddn_by_session_id(sess_id, &ids);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT "Failed to process DDN request \n", LOG_VALUE);
return -1;
}
context->pfcp_rept_resp_sent_flag = 0;
}
}
}
}
}
}
}else{
ret = ddn_by_session_id(sess_id, pfcp_pdr_id);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT "Failed to process DDN request \n", LOG_VALUE);
return -1;
}
context->pfcp_rept_resp_sent_flag = 0;
}
resp->msg_type = PFCP_SESSION_REPORT_REQUEST;
/* Update the Session state */
resp->state = DDN_REQ_SNT_STATE;
pdn->state = DDN_REQ_SNT_STATE;
}
if (pfcp_sess_rep_req->report_type.usar == PRESENT) {
for( int cnt = 0; cnt < pfcp_sess_rep_req->usage_report_count; cnt++ )
fill_cdr_info_sess_rpt_req(sess_id, &pfcp_sess_rep_req->usage_report[cnt]);
fill_send_pfcp_sess_report_resp(context, sequence, pdn, NOT_PRESENT, FALSE);
}
return 0;
}
#endif /* CP_BUILD */
#ifdef DP_BUILD
void
fill_pfcp_association_setup_resp(pfcp_assn_setup_rsp_t *pfcp_ass_setup_resp,
uint8_t cause, node_address_t dp_node_value,
node_address_t cp_node_value)
{
int8_t teid_range = 0;
uint8_t teidri_gen_flag = 0;
uint32_t seq = 1;
memset(pfcp_ass_setup_resp, 0, sizeof(pfcp_assn_setup_rsp_t)) ;
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_ass_setup_resp->header),
PFCP_ASSOCIATION_SETUP_RESPONSE, NO_SEID, seq, NO_CP_MODE_REQUIRED);
pfcp_ass_setup_resp->header.message_len += set_node_id(&(pfcp_ass_setup_resp->node_id), dp_node_value);
set_recovery_time_stamp(&(pfcp_ass_setup_resp->rcvry_time_stmp));
pfcp_ass_setup_resp->header.message_len += pfcp_ass_setup_resp->rcvry_time_stmp.header.len;
/* As we are not supporting this feature
set_upf_features(&(pfcp_ass_setup_resp->up_func_feat)); */
if(app.teidri_val != 0){
/* searching record for peer node into list of blocked teid_ranges */
teidri_gen_flag =
get_teidri_from_list((uint8_t *)&teid_range,
cp_node_value, &upf_teidri_blocked_list);
if (teidri_gen_flag == 0) {
/* Record not found in list of blocked teid_ranges
* searching record for peer node into list of allocated teid_ranges */
teidri_gen_flag =
get_teidri_from_list((uint8_t *)&teid_range, cp_node_value,
&upf_teidri_allocated_list);
if (teidri_gen_flag == 0) {
/* If node addr and teid range not found in allocated list, then
* - Assign teid range from free list
* - Remove record from free list
* - Add record to the allocated list
* - Add record in file
*/
teid_range = assign_teid_range(app.teidri_val, &upf_teidri_free_list);
if(teid_range < 0){
/* Failed to generate tied range, Reject association request */
cause = NORESOURCESAVAILABLE;
}else{
if (add_teidri_node_entry(teid_range, cp_node_value,
TEIDRI_FILENAME, &upf_teidri_allocated_list,
&upf_teidri_free_list) < 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ERROR :Unable to write data into file"
" for Node addr: %u : TEIDRI: %d \n", LOG_VALUE,
cp_node_value.ipv4_addr, teid_range);
}
}
}
}else{
/* TEIDRI value found into list of blocked records */
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"TEIDRI value found into data node"
" addr: %u : TEIDRI: %d \n", LOG_VALUE,
cp_node_value.ipv4_addr, teid_range);
/* Assuming if peer node address and TEIDRI value find into file data, that's means DP Restarted */
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"PREVIOUS: DP Restart counter: %d\n", LOG_VALUE, dp_restart_cntr);
update_dp_restart_cntr();
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"UPDATED: DP Restart counter: %d \n ", LOG_VALUE, dp_restart_cntr);
/* If node addr and teid range found in blocked list, then
* - Assign teid range from data found in blocked list
* - Remove record from blocked list
* - Add record to the allocated list
* - No need to update file, as record will be already present in file
*/
if (add_teidri_node_entry(teid_range, cp_node_value, NULL, &upf_teidri_allocated_list,
&upf_teidri_blocked_list) < 0) {
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT
"ERROR :Unable to write data into file"
" for Node addr : %u : TEIDRI : %d \n", LOG_VALUE,
cp_node_value.ipv4_addr, teid_range);
}
}
}else{
if(assoc_available == false){
/* TEIDRI is 0. Only one CP can connect,
* Reject association request
*/
cause = NORESOURCESAVAILABLE;
}else{
assoc_available = false;
}
}
pfcp_ass_setup_resp->header.message_len += set_cause(&(pfcp_ass_setup_resp->cause), cause);
if (cause == REQUESTACCEPTED) {
/* Association Response alway sends TWO TEID Pool, 1st: S1U/West_Bound Pool,
* 2nd: S5S8/East_Bound pool
* 3rd: S5S8/West_Bound Pool, if logical interface is present
* 3rd: S5S8/East_Bound Pool, if logical interface is present */
/* WB/S1U/S5S8 and EB/S5S8 interfaces */
pfcp_ass_setup_resp->user_plane_ip_rsrc_info_count = 2;
/* UPF Features IE is added for the ENDMARKER feauture which is supported in SGWU only */
set_upf_features(&(pfcp_ass_setup_resp->up_func_feat));
pfcp_ass_setup_resp->up_func_feat.sup_feat.empu |= EMPU ;
pfcp_ass_setup_resp->header.message_len += pfcp_ass_setup_resp->up_func_feat.header.len;
/* Set UP IP resource info */
for( int i = 0; i < pfcp_ass_setup_resp->user_plane_ip_rsrc_info_count; i++ ){
set_up_ip_resource_info(&(pfcp_ass_setup_resp->user_plane_ip_rsrc_info[i]),
i, teid_range, NOT_PRESENT);
pfcp_ass_setup_resp->header.message_len +=
pfcp_ass_setup_resp->user_plane_ip_rsrc_info[i].header.len;
}
if (app.wb_li_ip || isIPv6Present(&app.wb_li_ipv6)) {
set_up_ip_resource_info(&(pfcp_ass_setup_resp->user_plane_ip_rsrc_info[pfcp_ass_setup_resp->user_plane_ip_rsrc_info_count]),
NOT_PRESENT, teid_range, 1);
pfcp_ass_setup_resp->header.message_len +=
pfcp_ass_setup_resp->user_plane_ip_rsrc_info[pfcp_ass_setup_resp->user_plane_ip_rsrc_info_count].header.len;
/* WB/S5S8 Logical interface teid pool */
pfcp_ass_setup_resp->user_plane_ip_rsrc_info_count += 1;
}
/* EB/S5S8 Logical interfaces */
if (app.eb_li_ip || isIPv6Present(&app.eb_li_ipv6)) {
set_up_ip_resource_info(&(pfcp_ass_setup_resp->user_plane_ip_rsrc_info[pfcp_ass_setup_resp->user_plane_ip_rsrc_info_count]),
NOT_PRESENT, teid_range, 2);
pfcp_ass_setup_resp->header.message_len +=
pfcp_ass_setup_resp->user_plane_ip_rsrc_info[pfcp_ass_setup_resp->user_plane_ip_rsrc_info_count].header.len;
/* EB/S5S8 Logical interface teid pool */
pfcp_ass_setup_resp->user_plane_ip_rsrc_info_count += 1;
}
}
pfcp_ass_setup_resp->header.message_len += sizeof(pfcp_ass_setup_resp->header.seid_seqno.no_seid);
}
/* Fill pfd mgmt response */
void
fill_pfcp_pfd_mgmt_resp(pfcp_pfd_mgmt_rsp_t *pfd_resp, uint8_t cause_val, int offending_id)
{
set_pfcp_header(&pfd_resp->header, PFCP_PFD_MGMT_RSP, 0);
pfcp_set_ie_header(&pfd_resp->cause.header, PFCP_IE_CAUSE,
sizeof(pfd_resp->cause.cause_value));
pfd_resp->cause.cause_value = cause_val;
pfcp_set_ie_header(&pfd_resp->offending_ie.header, PFCP_IE_OFFENDING_IE,
sizeof(pfd_resp->offending_ie.type_of_the_offending_ie));
pfd_resp->offending_ie.type_of_the_offending_ie = (uint16_t)offending_id;
}
#endif /* DP_BUILD */
void
fill_pfcp_heartbeat_req(pfcp_hrtbeat_req_t *pfcp_heartbeat_req, uint32_t seq)
{
memset(pfcp_heartbeat_req, 0, sizeof(pfcp_hrtbeat_req_t)) ;
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_heartbeat_req->header),
PFCP_HEARTBEAT_REQUEST, NO_SEID, seq, NO_CP_MODE_REQUIRED);
set_recovery_time_stamp(&(pfcp_heartbeat_req->rcvry_time_stmp));
seq++;
}
void
fill_pfcp_heartbeat_resp(pfcp_hrtbeat_rsp_t *pfcp_heartbeat_resp)
{
uint32_t seq = 1;
memset(pfcp_heartbeat_resp, 0, sizeof(pfcp_hrtbeat_rsp_t)) ;
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_heartbeat_resp->header),
PFCP_HEARTBEAT_RESPONSE, NO_SEID, seq, NO_CP_MODE_REQUIRED);
set_recovery_time_stamp(&(pfcp_heartbeat_resp->rcvry_time_stmp));
}
int process_pfcp_heartbeat_req(peer_addr_t peer_addr, uint32_t seq)
{
uint8_t pfcp_msg[PFCP_MSG_LEN]={0};
int encoded = 0;
pfcp_hrtbeat_req_t pfcp_heartbeat_req = {0};
fill_pfcp_heartbeat_req(&pfcp_heartbeat_req, seq);
encoded = encode_pfcp_hrtbeat_req_t(&pfcp_heartbeat_req, pfcp_msg);
#ifdef CP_BUILD
if ( pfcp_send(pfcp_fd, pfcp_fd_v6, pfcp_msg, encoded, peer_addr, SENT) < 0 ) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT "Error in sending PFCP "
"Heartbeat Request : %i\n", LOG_VALUE, errno);
}
#endif
#ifdef DP_BUILD
if ( pfcp_send(my_sock.sock_fd, my_sock.sock_fd_v6, pfcp_msg, encoded,
peer_addr, SENT) < 0 ) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT "Error in sending PFCP "
"Heartbeat Request : %i\n", LOG_VALUE, errno);
}
#endif
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp_req_resp.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <byteswap.h>
#include "pfcp_util.h"
#include "dp_ipc_api.h"
#include "pfcp_set_ie.h"
#include "pfcp_association.h"
#include "pfcp_messages_encoder.h"
#include "pfcp_messages_decoder.h"
#include "gw_adapter.h"
#include "pfcp.h"
#ifdef CP_BUILD
#include "pfcp.h"
#include "sm_arr.h"
#include "sm_pcnd.h"
#include "cp_stats.h"
#include "sm_struct.h"
#include "cp_config.h"
#else
#include "up_main.h"
#include "pfcp_up_sess.h"
#include "pfcp_up_struct.h"
#endif /* CP_BUILD */
uint16_t dp_comm_port;
uint16_t cp_comm_port;
struct in_addr dp_comm_ip;
struct in6_addr dp_comm_ipv6;
uint8_t dp_comm_ip_type;
/*
* UDP Socket
*/
extern udp_sock_t my_sock;
extern struct rte_hash *heartbeat_recovery_hash;
extern peer_addr_t upf_pfcp_sockaddr;
extern int clSystemLog;
#ifdef CP_BUILD
extern pfcp_config_t config;
extern int clSystemLog;
extern int s5s8_fd;
extern int s5s8_fd_v6;
extern socklen_t s5s8_sockaddr_len;
extern socklen_t s5s8_sockaddr_ipv6_len;
extern socklen_t s11_mme_sockaddr_len;
extern socklen_t s11_mme_sockaddr_ipv6_len;
extern peer_addr_t s5s8_recv_sockaddr;
#else
#endif /* CP_BUILD */
#if defined(CP_BUILD) || defined(DP_BUILD)
/**
* @brief : Process incoming heartbeat request and send response
* @param : buf_rx holds data from incoming request
* @param : peer_addr used to pass address of peer node
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
process_heartbeat_request(uint8_t *buf_rx, peer_addr_t *peer_addr)
{
int encoded = 0;
int decoded = 0;
uint8_t pfcp_msg[PFCP_MSG_LEN]= {0};
RTE_SET_USED(decoded);
pfcp_hrtbeat_req_t *pfcp_heartbeat_req = malloc(sizeof(pfcp_hrtbeat_req_t));
pfcp_hrtbeat_rsp_t pfcp_heartbeat_resp = {0};
decoded = decode_pfcp_hrtbeat_req_t(buf_rx, pfcp_heartbeat_req);
fill_pfcp_heartbeat_resp(&pfcp_heartbeat_resp);
pfcp_heartbeat_resp.header.seid_seqno.no_seid.seq_no = pfcp_heartbeat_req->header.seid_seqno.no_seid.seq_no;
encoded = encode_pfcp_hrtbeat_rsp_t(&pfcp_heartbeat_resp, pfcp_msg);
#ifdef USE_REST
/* Reset the periodic timers */
node_address_t node_addr = {0};
get_peer_node_addr(peer_addr, &node_addr);
process_response(&node_addr);
#endif /* USE_REST */
#ifdef CP_BUILD
if ( pfcp_send(my_sock.sock_fd, my_sock.sock_fd_v6, pfcp_msg, encoded, *peer_addr, SENT) < 0 ) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":Error sending in "
"heartbeat request: %i\n", LOG_VALUE, errno);
}
#endif /* CP_BUILD */
free(pfcp_heartbeat_req);
#ifdef DP_BUILD
if (encoded) {
int bytes = 0;
if (peer_addr->type == PDN_TYPE_IPV4) {
if(my_sock.sock_fd <= 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"IPv4:PFCP send is "
"not possible due to incompatiable IP Type at "
"Source and Destination\n", LOG_VALUE);
return 0;
}
bytes = sendto(my_sock.sock_fd, (uint8_t *) pfcp_msg, encoded, MSG_DONTWAIT,
(struct sockaddr *) &peer_addr->ipv4, sizeof(peer_addr->ipv4));
} else if (peer_addr->type == PDN_TYPE_IPV6) {
if(my_sock.sock_fd_v6 <= 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"IPv6:PFCP send is "
"not possible due to incompatiable IP Type at "
"Source and Destination\n", LOG_VALUE);
return 0;
}
bytes = sendto(my_sock.sock_fd_v6, (uint8_t *) pfcp_msg, encoded, MSG_DONTWAIT,
(struct sockaddr *) &peer_addr->ipv6, sizeof(peer_addr->ipv6));
}
if (bytes > 0) {
update_cli_stats((peer_address_t *) peer_addr,
PFCP_HEARTBEAT_RESPONSE, SENT, SX);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"DP: Send PFCP Heartbeat response\n", LOG_VALUE);
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to send PFCP Heartbeat response\n", LOG_VALUE);
}
}
#endif /* DP_BUILD */
return 0;
}
/**
* @brief : Process hearbeat response message
* @param : buf_rx holds data from incoming request
* @param : peer_addr used to pass address of peer node
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
process_heartbeat_response(uint8_t *buf_rx, peer_addr_t *peer_addr)
{
node_address_t node_addr = {0};
get_peer_node_addr(peer_addr, &node_addr);
#ifdef USE_REST
process_response(&node_addr);
#endif /*USE_REST*/
int ret = 0;
uint32_t *recov_time;
uint32_t update_recov_time = 0;
pfcp_hrtbeat_rsp_t pfcp_hearbeat_resp = {0};
ret = decode_pfcp_hrtbeat_rsp_t(buf_rx, &pfcp_hearbeat_resp);
if (ret <= 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to decode PFCP Heartbeat Resp\n\n", LOG_VALUE);
}
ret = rte_hash_lookup_data(heartbeat_recovery_hash ,
(const void *)&node_addr, (void **) &(recov_time));
if (ret == -ENOENT) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"No entry found for the heartbeat!!\n", LOG_VALUE);
} else {
/*Restoration part to be added if recovery time is found greater*/
update_recov_time = (pfcp_hearbeat_resp.rcvry_time_stmp.rcvry_time_stmp_val);
if(update_recov_time > *recov_time) {
/* Updated time stamp of user-plane */
*recov_time = update_recov_time;
#ifdef CP_BUILD
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"WARNING : DP Restart Detected and INITIATED RECOVERY MODE\n",
LOG_VALUE);
/* SET recovery initiated flag */
recovery_flag = 1;
/* Send association request to peer node */
if(process_aasociation_setup_req(peer_addr) < 0) {
/* Severity level*/
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Error in sending "
"PFCP Association Setup Request\n", LOG_VALUE);
return -1;
}
#endif /* CP_BUILD */
}
}
return 0;
}
/* Parse byte_rx to process_pfcp_msg */
int
process_pfcp_msg(uint8_t *buf_rx, peer_addr_t *peer_addr, bool is_ipv6)
{
int ret = 0, bytes_rx = 0;
pfcp_header_t *pfcp_header = (pfcp_header_t *) buf_rx;
#ifdef CP_BUILD
/* TODO: Move this rx */
if ((bytes_rx = pfcp_recv(pfcp_rx, PFCP_RX_BUFF_SIZE,
peer_addr, is_ipv6)) < 0) {
perror("msgrecv");
return -1;
}
msg_info msg = {0};
if(pfcp_header->message_type == PFCP_HEARTBEAT_REQUEST){
update_cli_stats((peer_address_t *) peer_addr,
pfcp_header->message_type, RCVD, SX);
ret = process_heartbeat_request(buf_rx, peer_addr);
if(ret != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to process "
"pfcp heartbeat request\n", LOG_VALUE);
return -1;
}
return 0;
}else if(pfcp_header->message_type == PFCP_HEARTBEAT_RESPONSE){
ret = process_heartbeat_response(buf_rx, peer_addr);
if (ret != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT":Failed to process "
"pfcp heartbeat response\n", LOG_VALUE);
return -1;
} else {
update_cli_stats((peer_address_t *) peer_addr,
PFCP_HEARTBEAT_RESPONSE, RCVD, SX);
}
return 0;
}else {
/*Reset periodic timers*/
if (msg.msg_type != 0) {
node_address_t node_addr = {0};
get_peer_node_addr(peer_addr, &node_addr);
process_response(&node_addr);
}
if ((ret = pfcp_pcnd_check(buf_rx, &msg, bytes_rx, peer_addr)) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT":Failed to process "
"pfcp precondition check\n", LOG_VALUE);
if(msg.pfcp_msg.pfcp_sess_del_resp.cause.cause_value != REQUESTACCEPTED){
update_cli_stats((peer_address_t *) peer_addr,
pfcp_header->message_type, REJ, SX);
}
else {
update_cli_stats((peer_address_t *) peer_addr,
pfcp_header->message_type, ACC, SX);
}
return -1;
}
if(pfcp_header->message_type == PFCP_SESSION_REPORT_REQUEST ||
pfcp_header->message_type == PFCP_SESSION_SET_DELETION_REQUEST ||
pfcp_header->message_type == PFCP_SESSION_SET_DELETION_RESPONSE)
update_cli_stats((peer_address_t *) peer_addr,
pfcp_header->message_type, RCVD,SX);
else
update_cli_stats((peer_address_t *) peer_addr,
pfcp_header->message_type, ACC,SX);
/* State Machine execute on session level, but following messages are NODE level */
if (msg.msg_type == PFCP_SESSION_SET_DELETION_REQUEST) {
/* Process RCVD PFCP Session Set Deletion Request */
ret = process_pfcp_sess_set_del_req(&msg, peer_addr);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"process_pfcp_sess_set_del_req() failed with Error: %d \n",
LOG_VALUE, ret);
}
return 0;
} else if (msg.msg_type == PFCP_SESSION_SET_DELETION_RESPONSE) {
/* Process RCVD PFCP Session Set Deletion Response */
ret = process_pfcp_sess_set_del_rsp(&msg, peer_addr);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"process_del_pdn_conn_set_rsp() failed with Error: %d \n",
LOG_VALUE, ret);
}
return 0;
} else {
if ((msg.proc < END_PROC) && (msg.state < END_STATE) && (msg.event < END_EVNT)) {
if (SGWC == msg.cp_mode) {
ret = (*state_machine_sgwc[msg.proc][msg.state][msg.event])(&msg, peer_addr);
} else if (PGWC == msg.cp_mode) {
ret = (*state_machine_pgwc[msg.proc][msg.state][msg.event])(&msg, peer_addr);
} else if (SAEGWC == msg.cp_mode) {
ret = (*state_machine_saegwc[msg.proc][msg.state][msg.event])(&msg, peer_addr);
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Invalid "
"Control Plane Type: %d \n", LOG_VALUE, msg.cp_mode);
return -1;
}
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"State"
"Machine Callback failed with Error: %d \n", LOG_VALUE, ret);
return -1;
}
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Invalid Procedure "
"or State or Event \n", LOG_VALUE);
return -1;
}
}
}
#else /* End CP_BUILD , Start DP_BUILD */
pfcp_session_t *sess = NULL;
pfcp_session_t *tmp_sess = NULL;
/* TO maintain the peer node info and add entry in connection table */
node_address_t peer_info = {0};
/* TODO: Move this rx */
if ((bytes_rx = udp_recv(pfcp_rx, 4096, peer_addr, is_ipv6)) < 0) {
perror("msgrecv");
return -1;
}
int encoded = 0;
int decoded = 0;
uint8_t pfcp_msg[4096]= {0};
node_address_t node_value = {0};
uint8_t cli_cause = 0;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Bytes received is %d\n", LOG_VALUE, bytes_rx);
if (peer_addr->type == IPV6_TYPE) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"IPv6_ADDR ["IPv6_FMT"]\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(peer_addr->ipv6.sin6_addr.s6_addr)));
peer_info.ip_type = IPV6_TYPE;
memcpy(&peer_info.ipv6_addr, &peer_addr->ipv6.sin6_addr.s6_addr, IPV6_ADDR_LEN);
} else {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"IPv4_ADDR ["IPV4_ADDR"]\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(peer_addr->ipv4.sin_addr.s_addr));
peer_info.ip_type = IPV4_TYPE;
peer_info.ipv4_addr = peer_addr->ipv4.sin_addr.s_addr;
}
if( pfcp_header->message_type != PFCP_SESSION_REPORT_RESPONSE)
{
update_cli_stats((peer_address_t *) peer_addr,
pfcp_header->message_type,RCVD,SX);
}
#ifdef USE_REST
node_address_t node_addr = {0};
get_peer_node_addr(peer_addr, &node_addr);
process_response(&node_addr);
#endif /* USE_REST */
switch (pfcp_header->message_type)
{
case PFCP_HEARTBEAT_REQUEST:
ret = process_heartbeat_request(buf_rx, peer_addr);
if(ret != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to process "
"pfcp heartbeat request\n", LOG_VALUE);
return -1;
}
break;
case PFCP_HEARTBEAT_RESPONSE:
ret = process_heartbeat_response(buf_rx, peer_addr);
if(ret != 0){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to process "
"pfcp heartbeat response\n", LOG_VALUE);
return -1;
}
break;
case PFCP_ASSOCIATION_SETUP_REQUEST:
{
pfcp_assn_setup_req_t pfcp_ass_setup_req = {0};
pfcp_assn_setup_rsp_t pfcp_ass_setup_resp = {0};
/* TODO: Error Handling */
decoded = decode_pfcp_assn_setup_req_t(buf_rx, &pfcp_ass_setup_req);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"[DP] Decoded bytes [%d]\n", LOG_VALUE, decoded);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"recover_time[%d],cpf[%d] from CP \n\n",
LOG_VALUE, (pfcp_ass_setup_req.rcvry_time_stmp.rcvry_time_stmp_val),
(pfcp_ass_setup_req.cp_func_feat.sup_feat));
uint8_t cause_id = 0;
node_address_t pfcp_ass_setup_req_node = {0};
node_address_t pfcp_ass_setup_resp_node = {0};
int offend_id = 0;
cause_check_association(&pfcp_ass_setup_req, &cause_id, &offend_id);
cli_cause = cause_id;
if (cause_id == REQUESTACCEPTED)
{
ret = fill_ip_addr(pfcp_ass_setup_req.node_id.node_id_value_ipv4_address,
pfcp_ass_setup_req.node_id.node_id_value_ipv6_address,
&pfcp_ass_setup_req_node);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
add_ip_to_heartbeat_hash(&peer_info,
pfcp_ass_setup_req.rcvry_time_stmp.rcvry_time_stmp_val);
#ifdef USE_REST
if ((peer_info.ip_type == IPV4_TYPE) || (peer_info.ip_type == IPV6_TYPE)) {
if ((add_node_conn_entry(peer_info, 0, SX_PORT_ID)) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to "
"add connection entry for SGWU/SAEGWU", LOG_VALUE);
}
}
#endif
if (pfcp_ass_setup_req.node_id.node_id_type == NODE_ID_TYPE_TYPE_IPV4ADDRESS) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(dp_comm_ip.s_addr, temp, &pfcp_ass_setup_resp_node);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pfcp_ass_setup_req.node_id.node_id_type == NODE_ID_TYPE_TYPE_IPV6ADDRESS) {
ret = fill_ip_addr(0, dp_comm_ipv6.s6_addr, &pfcp_ass_setup_resp_node);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
fill_pfcp_association_setup_resp(&pfcp_ass_setup_resp, cause_id,
pfcp_ass_setup_resp_node, pfcp_ass_setup_req_node);
pfcp_ass_setup_resp.header.seid_seqno.no_seid.seq_no =
pfcp_ass_setup_req.header.seid_seqno.no_seid.seq_no;
encoded = encode_pfcp_assn_setup_rsp_t(&pfcp_ass_setup_resp, pfcp_msg);
pfcp_header_t *pfcp_hdr = (pfcp_header_t *) pfcp_msg;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT "sending response "
"of sess [%d] from dp\n",LOG_VALUE, pfcp_hdr->message_type);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT "length[%d]\n",
LOG_VALUE, htons(pfcp_hdr->message_len));
break;
}
case PFCP_PFD_MGMT_REQUEST:
{
int offend_id = 0;
uint8_t cause_id = 0;
pfcp_pfd_mgmt_rsp_t pfcp_pfd_mgmt_resp = {0};
pfcp_pfd_mgmt_req_t *pfcp_pfd_mgmt_req = malloc(sizeof(pfcp_pfd_mgmt_req_t));
memset(pfcp_pfd_mgmt_req, 0, sizeof(pfcp_pfd_mgmt_req_t));
/* Decode pfcp pfd mgmt req */
decoded = decode_pfcp_pfd_mgmt_req_t(buf_rx, pfcp_pfd_mgmt_req);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"[DP] Decoded bytes [%d]\n",
LOG_VALUE, decoded);
process_up_pfd_mgmt_request(pfcp_pfd_mgmt_req, &cause_id,
&offend_id, peer_addr->ipv4.sin_addr.s_addr);
/* Fill pfcp pfd mgmt response */
fill_pfcp_pfd_mgmt_resp(&pfcp_pfd_mgmt_resp, cause_id, offend_id);
if(pfcp_pfd_mgmt_req->header.s) {
pfcp_pfd_mgmt_resp.header.seid_seqno.no_seid.seq_no =
pfcp_pfd_mgmt_req->header.seid_seqno.has_seid.seq_no;
} else {
pfcp_pfd_mgmt_resp.header.seid_seqno.no_seid.seq_no =
pfcp_pfd_mgmt_req->header.seid_seqno.no_seid.seq_no;
}
cli_cause = cause_id;
encoded = encode_pfcp_pfd_mgmt_rsp_t(&pfcp_pfd_mgmt_resp, pfcp_msg);
pfcp_header_t *pfcp_hdr = (pfcp_header_t *) pfcp_msg;
RTE_SET_USED(encoded);
RTE_LOG_DP(DEBUG, DP, "sending response of sess [%d] from dp\n",pfcp_hdr->message_type);
RTE_LOG_DP(DEBUG, DP, "length[%d]\n",htons(pfcp_hdr->message_len));
free(pfcp_pfd_mgmt_req);
break;
}
case PFCP_SESSION_ESTABLISHMENT_REQUEST:
{
pfcp_sess_estab_req_t pfcp_session_request = {0};
pfcp_sess_estab_rsp_t pfcp_session_response = {0};
decoded = decode_pfcp_sess_estab_req_t(buf_rx, &pfcp_session_request);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DECOED bytes in sesson "
"is %d\n", LOG_VALUE, decoded);
if (process_up_session_estab_req(&pfcp_session_request,
&pfcp_session_response, peer_addr)) {
return -1;
}
uint8_t cause_id = 0;
int offend_id = 0 ;
cause_check_sess_estab(&pfcp_session_request, &cause_id, &offend_id);
cli_cause = cause_id;
/*Filling Node ID for F-SEID*/
if (pfcp_session_request.node_id.node_id_type == NODE_ID_TYPE_TYPE_IPV4ADDRESS) {
uint8_t temp[IPV6_ADDRESS_LEN] = {0};
ret = fill_ip_addr(dp_comm_ip.s_addr, temp, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
} else if (pfcp_session_request.node_id.node_id_type == NODE_ID_TYPE_TYPE_IPV6ADDRESS) {
ret = fill_ip_addr(0, dp_comm_ipv6.s6_addr, &node_value);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
}
fill_pfcp_session_est_resp(&pfcp_session_response, cause_id,
offend_id, node_value, &pfcp_session_request);
pfcp_session_response.header.seid_seqno.has_seid.seq_no =
pfcp_session_request.header.seid_seqno.has_seid.seq_no;
memcpy(&(pfcp_session_response.up_fseid.ipv4_address),
&(dp_comm_ip.s_addr), IPV4_SIZE);
/*CLI:increment active-session count*/
if (cause_id == REQUESTACCEPTED)
update_sys_stat(number_of_active_session,INCREMENT);
encoded = encode_pfcp_sess_estab_rsp_t(&pfcp_session_response, pfcp_msg);
pfcp_header_t *pfcp_hdr = (pfcp_header_t *) pfcp_msg;
pfcp_hdr->seid_seqno.has_seid.seid =
bswap_64(pfcp_session_request.cp_fseid.seid);
sess = get_sess_info_entry(pfcp_session_response.up_fseid.seid, SESS_MODIFY);
if ((sess != NULL) && (sess->li_sx_config_cnt > 0)) {
tmp_sess = calloc(1,sizeof(pfcp_session_t));
if (tmp_sess != NULL)
memcpy(tmp_sess, sess, sizeof(pfcp_session_t));
}
break;
}
case PFCP_SESSION_MODIFICATION_REQUEST:
{
int offend_id = 0;
uint8_t cause_id = REQUESTACCEPTED;
pfcp_sess_mod_req_t pfcp_session_mod_req = {0};
pfcp_sess_mod_rsp_t pfcp_sess_mod_res = {0};
decoded = decode_pfcp_sess_mod_req_t(buf_rx, &pfcp_session_mod_req);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT "DECODED bytes in "
"sesson modification is %d\n",LOG_VALUE, decoded);
sess = get_sess_info_entry(pfcp_session_mod_req.header.seid_seqno.has_seid.seid, SESS_MODIFY);
if(sess == NULL) {
cause_id = SESSIONCONTEXTNOTFOUND;
}
if ((sess != NULL) && (sess->li_sx_config_cnt > 0)) {
tmp_sess = calloc(1,sizeof(pfcp_session_t));
if (tmp_sess != NULL)
memcpy(tmp_sess, sess, sizeof(pfcp_session_t));
}
cli_cause = cause_id;
fill_pfcp_session_modify_resp(&pfcp_sess_mod_res,
&pfcp_session_mod_req, cause_id, offend_id);
pfcp_sess_mod_res.header.seid_seqno.has_seid.seid =
pfcp_session_mod_req.cp_fseid.seid;
pfcp_sess_mod_res.header.seid_seqno.has_seid.seq_no =
pfcp_session_mod_req.header.seid_seqno.has_seid.seq_no;
if (process_up_session_modification_req(&pfcp_session_mod_req,
&pfcp_sess_mod_res)) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failure in proces "
"up session modification_req function\n", LOG_VALUE);
}
/*cause_check_sess_modification(&pfcp_session_mod_req, &cause_id, &offend_id);
* if (ret == SESSIONCONTEXTNOTFOUND ){
cause_id = SESSIONCONTEXTNOTFOUND;
} */
encoded = encode_pfcp_sess_mod_rsp_t(&pfcp_sess_mod_res, pfcp_msg);
pfcp_header_t *pfcp_hdr = (pfcp_header_t *) pfcp_msg;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT "sending response of "
"sess [%d] from dp\n", LOG_VALUE, pfcp_hdr->message_type);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT "length[%d]\n",
LOG_VALUE, htons(pfcp_hdr->message_len));
break;
}
case PFCP_SESSION_DELETION_REQUEST:
{
int offend_id = 0;
uint8_t cause_id = 0;
uint64_t cp_seid = 0;
pfcp_sess_del_rsp_t pfcp_sess_del_res = {0};
pfcp_sess_del_req_t pfcp_session_del_req = {0};
decoded = decode_pfcp_sess_del_req_t(buf_rx, &pfcp_session_del_req);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DECODE bytes in sesson deletion is %d\n\n",
LOG_VALUE, decoded);
sess = get_sess_info_entry(pfcp_session_del_req.header.seid_seqno.has_seid.seid, SESS_MODIFY);
if (sess == NULL) {
cause_id = SESSIONCONTEXTNOTFOUND;
}
if ((sess != NULL) && (sess->li_sx_config_cnt > 0)) {
tmp_sess = calloc(1,sizeof(pfcp_session_t));
if (tmp_sess != NULL)
memcpy(tmp_sess, sess, sizeof(pfcp_session_t));
}
if (process_up_session_deletion_req(&pfcp_session_del_req,
&pfcp_sess_del_res)) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Failure in "
"process_up_session_deletion_req function\n",LOG_VALUE);
return -1;
}
cause_check_delete_session(&pfcp_session_del_req, &cause_id, &offend_id);
cp_seid = pfcp_sess_del_res.header.seid_seqno.has_seid.seid;
cli_cause = cause_id;
fill_pfcp_sess_del_resp(&pfcp_sess_del_res, cause_id, offend_id);
pfcp_sess_del_res.header.seid_seqno.has_seid.seid = cp_seid;
pfcp_sess_del_res.header.seid_seqno.has_seid.seq_no =
pfcp_session_del_req.header.seid_seqno.has_seid.seq_no;
encoded = encode_pfcp_sess_del_rsp_t(&pfcp_sess_del_res, pfcp_msg);
pfcp_header_t *pfcp_hdr = (pfcp_header_t *) pfcp_msg;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"sending response "
"of sess [%d] from dp\n", LOG_VALUE, pfcp_hdr->message_type);
break;
}
case PFCP_SESSION_REPORT_RESPONSE:
{
/*DDN Response Handle*/
pfcp_sess_rpt_rsp_t pfcp_sess_rep_resp = {0};
decoded = decode_pfcp_sess_rpt_rsp_t(buf_rx,
&pfcp_sess_rep_resp);
update_cli_stats((peer_address_t *) peer_addr,
pfcp_header->message_type,
(pfcp_sess_rep_resp.cause.cause_value = REQUESTACCEPTED) ? ACC:REJ, SX);
if (pfcp_sess_rep_resp.cause.cause_value != REQUESTACCEPTED) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Cause received "
"Report response is %d\n", LOG_VALUE,
pfcp_sess_rep_resp.cause.cause_value);
/* Add handling to send association to next upf
* for each buffered CSR */
return -1;
}
sess = get_sess_info_entry(pfcp_sess_rep_resp.header.seid_seqno.has_seid.seid, SESS_MODIFY);
if ((sess != NULL) && (sess->li_sx_config_cnt > 0)) {
tmp_sess = calloc(1,sizeof(pfcp_session_t));
if (tmp_sess != NULL)
memcpy(tmp_sess, sess, sizeof(pfcp_session_t));
}
remove_cdr_entry(pfcp_sess_rep_resp.header.seid_seqno.has_seid.seq_no,
pfcp_sess_rep_resp.header.seid_seqno.has_seid.seid);
if (process_up_session_report_resp(&pfcp_sess_rep_resp)) {
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT "Received Report "
"Response for sess_id:%lu\n\n", LOG_VALUE,
pfcp_sess_rep_resp.header.seid_seqno.has_seid.seid);
break;
}
case PFCP_SESSION_SET_DELETION_REQUEST:
{
#ifdef USE_CSID
int offend_id = 0;
uint8_t cause_id = 0;
/* Handle PFCP Session SET Deletion Response */
pfcp_sess_set_del_req_t pfcp_sess_set_del_req = {0};
pfcp_sess_set_del_rsp_t pfcp_sess_set_del_rsp = {0};
/* Type : 0 --> DP */
/*Decode the received msg and stored into the struct. */
decoded = decode_pfcp_sess_set_del_req_t(buf_rx,
&pfcp_sess_set_del_req);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DECODE bytes in "
"session set deletion req is %d\n", LOG_VALUE, decoded);
if (process_up_sess_set_del_req(&pfcp_sess_set_del_req)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure in "
"process up Session Set Deletion Request function\n",
LOG_VALUE);
return -1;
}
/* Fill PFCP SESS SET DEL RESP */
cause_id = REQUESTACCEPTED;
fill_pfcp_sess_set_del_resp(&pfcp_sess_set_del_rsp,
cause_id, offend_id);
if (pfcp_sess_set_del_req.header.s) {
pfcp_sess_set_del_rsp.header.seid_seqno.no_seid.seq_no =
pfcp_sess_set_del_req.header.seid_seqno.has_seid.seq_no;
} else {
pfcp_sess_set_del_rsp.header.seid_seqno.no_seid.seq_no =
pfcp_sess_set_del_req.header.seid_seqno.no_seid.seq_no;
}
encoded = encode_pfcp_sess_set_del_rsp_t(&pfcp_sess_set_del_rsp, pfcp_msg);
pfcp_header_t *pfcp_hdr = (pfcp_header_t *) pfcp_msg;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Sending response "
"for [%d] from dp\n", LOG_VALUE, pfcp_hdr->message_type);
#endif /* USE_CSID */
break;
}
case PFCP_SESSION_SET_DELETION_RESPONSE:
{
/* Handle PFCP Session SET Deletion Response */
pfcp_sess_set_del_rsp_t pfcp_sess_set_del_rsp = {0};
/*Decode the received msg and stored into the struct. */
decoded = decode_pfcp_sess_set_del_rsp_t(buf_rx,
&pfcp_sess_set_del_rsp);
if (pfcp_sess_set_del_rsp.cause.cause_value != REQUESTACCEPTED) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Cause received pfcp session set deletion "
"response is %d\n", LOG_VALUE,
pfcp_sess_set_del_rsp.cause.cause_value);
/* Add handling to send association to next upf
* for each buffered CSR */
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Received PFCP "
"Session Set Deletion Response\n", LOG_VALUE);
break;
}
default:
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"No Data received\n", LOG_VALUE);
break;
}
if (encoded != 0) {
int bytes = 0;
if (peer_addr->type == PDN_TYPE_IPV4) {
if(my_sock.sock_fd <= 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"PFCP send is "
"not possible due to incompatiable IP Type at "
"Source and Destination\n", LOG_VALUE);
return 0;
}
bytes = sendto(my_sock.sock_fd, (uint8_t *) pfcp_msg, encoded, MSG_DONTWAIT,
(struct sockaddr *) &peer_addr->ipv4, sizeof(peer_addr->ipv4));
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"NGIC- main.c::pfcp_send()"
"\n\tpfcp_fd= %d, payload_length= %d ,Direction= %d, tx bytes= %d\n",
LOG_VALUE, my_sock.sock_fd, encoded, SX, bytes);
} else if (peer_addr->type == PDN_TYPE_IPV6) {
if(my_sock.sock_fd_v6 <= 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"PFCP send is "
"not possible due to incompatiable IP Type at "
"Source and Destination\n", LOG_VALUE);
return 0;
}
bytes = sendto(my_sock.sock_fd_v6, (uint8_t *) pfcp_msg, encoded, MSG_DONTWAIT,
(struct sockaddr *) &peer_addr->ipv6, sizeof(peer_addr->ipv6));
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"NGIC- main.c::pfcp_send()"
"\n\tpfcp_fd= %d, payload_length= %d ,Direction= %d, tx bytes= %d\n",
LOG_VALUE, my_sock.sock_fd_v6, encoded, SX, bytes);
}
pfcp_header_t *pfcp_hdr = (pfcp_header_t *) pfcp_msg;
if(pfcp_header->message_type != PFCP_SESSION_SET_DELETION_REQUEST &&
pfcp_header->message_type != PFCP_SESSION_SET_DELETION_RESPONSE)
update_cli_stats((peer_address_t *) peer_addr,
pfcp_hdr->message_type,
(cli_cause == REQUESTACCEPTED) ? ACC:REJ, SX);
else
update_cli_stats((peer_address_t *) peer_addr,
pfcp_hdr->message_type, SENT, SX);
}
if ((tmp_sess != NULL) && (tmp_sess->li_sx_config_cnt > 0)) {
process_event_li(tmp_sess, buf_rx, bytes_rx, pfcp_msg, encoded, peer_addr);
free(tmp_sess);
tmp_sess = NULL;
}
#endif /* DP_BUILD */
return 0;
}
#endif
|
nikhilc149/e-utran-features-bug-fixes | ulpc/d_admf/include/CpConfig.h | /*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CP_CONFIG_H_
#define __CP_CONFIG_H_
#include <vector>
#include "Common.h"
class CpConfig
{
protected:
std::vector<std::string> vecCpConfig;
public:
CpConfig() {}
/**
* @brief : Virtual method. Extended class needs to implement this method
* @param : uiAction, action can be add(1)/update(2)/delete(3)
* @param : strIpAddr, Ip-address of Cp
* @return : Returns 0 in case of Success, -1 otherwise
*/
virtual int8_t UpdateCpConfig(uint8_t uiAction,
const std::string &strIpAddr) = 0;
std::vector<std::string> &getVecCpConfig()
{
return vecCpConfig;
}
void setVecCpConfig(const std::vector<std::string> cpConfig)
{
vecCpConfig = cpConfig;
}
virtual ~CpConfig() {}
};
#endif /* __CP_CONFIG_H_ */
|
nikhilc149/e-utran-features-bug-fixes | ulpc/d_admf/include/UeTimerThrd.h | <filename>ulpc/d_admf/include/UeTimerThrd.h<gh_stars>0
/*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __UETIMERTHRD_H_
#define __UETIMERTHRD_H_
#include "emgmt.h"
#include "etevent.h"
#include "elogger.h"
#include "Common.h"
class EThreadUeTimer : public EThreadPrivate
{
public:
EThreadUeTimer();
/**
* @brief : EpcTools callback function on timer elapsed
* @param : *pTimer, reference to timer object
* @return : Returns nothing
*/
Void onTimer(EThreadEventTimer *pTimer);
Void InitTimer(EUeTimer &timer);
private:
std::map <uint32_t, EUeTimer *> mapUeTimers;
};
#endif /* __UETIMERTHRD_H_ */
|
nikhilc149/e-utran-features-bug-fixes | dp/pipeline/epc_packet_framework.h | <filename>dp/pipeline/epc_packet_framework.h
/*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __EPC_PACKET_FRAMEWORK_H__
#define __EPC_PACKET_FRAMEWORK_H__
/**
* @file
* This file contains data structure definitions to describe Data Plane
* pipeline and function prototypes used to initialize pipeline.
*/
#include <rte_pipeline.h>
#include <rte_hash_crc.h>
extern uint64_t num_dns_processed;
/**
* RTE Log type.
*/
#define RTE_LOGTYPE_EPC RTE_LOGTYPE_USER1
/**
* Number of ports.
*/
#define NUM_SPGW_PORTS 2
/**
* Pipeline name size.
*/
#define PIPE_NAME_SIZE 80
/**
* S1U port id.
*/
#define S1U_PORT_ID 0
#define WEST_PORT_ID 0
/**
* SGI port id.
*/
#define SGI_PORT_ID 1
#define EAST_PORT_ID 1
/* Per worker macros for DDN */
/* Macro to specify size of DDN notify_ring */
#define NOTIFY_RING_SIZE 2048
/* Macro to specify size of DDN notify_ring */
#define DL_RING_CONTAINER_SIZE (2048 * 2)
#define DL_PKT_POOL_SIZE (1024 * 32)
#define DL_PKT_POOL_CACHE_SIZE 32
#define DL_PKTS_BUF_RING_SIZE 1024
/* TODO: Define the appropriate ring size based on the PPS value, Temp Set to 65K approx */
#define DL_PKTS_RING_SIZE (1 << 16)
#define UL_PKTS_RING_SIZE (1 << 16)
/* Borrowed from dpdk ip_frag_internal.c */
#define PRIME_VALUE 0xeaad8405
/**
* @brief : DL Bearer Map key for hash lookup
*/
struct dl_bm_key {
/** Ue ip */
ue_ip_t ue_ip;
/** Rule id */
uint32_t rid;
};
/**
* @brief : Meta data used for directing packets to cores
*/
struct epc_meta_data {
/** pipeline output port ID */
uint32_t port_id;
/** UE IPv4 hash for load balancing */
uint32_t ue_ipv4_hash;
/** flag for DNS pkt */
uint32_t dns;
union {
/** eNB IPv4 from GTP-U */
uint32_t enb_ipv4;
/** eNB IPv6 from GTP-U */
struct in6_addr enb_ipv6;
} ip_type_t;
/** Teid from GTP-U */
uint32_t teid;
/** DL Bearer Map key */
struct dl_bm_key key;
};
/*
* Defines the frequency when each pipeline stage should be flushed.
* For example,
* 1 = flush the pipeline stage each time it is executed
* 4 = flush the pipeline stage every four times it is executed
* Generally "1" gives the best value for both performance
* and latency, but under
* certain circumstances (i.e. very small packets resulting in
* very high packet rate)
* a larger number may provide better overall CPU efficiency.
*/
#define EPC_PIPELINE_FLUSH_MAX 1
/*
* Can only support as many lcores as the number of ports allowed in
* a pipeline block
*/
#define DP_MAX_LCORE RTE_PIPELINE_PORT_OUT_MAX
/** UL pipeline parameters - Per input port */
uint32_t dl_ndata_pkts;
uint32_t ul_ndata_pkts;
uint32_t ul_arp_pkt;
uint32_t ul_gtpu_pkt;
uint32_t ul_pkts_nbrst;
uint32_t ul_pkts_nbrst_prv;
/**
* @brief : Maintains epc uplink parameters
*/
struct epc_ul_params {
/** Count since last flush */
int flush_count;
/** Number of pipeline runs between flush */
int flush_max;
/** RTE pipeline params */
struct rte_pipeline_params pipeline_params;
/** Input port id */
uint32_t port_in_id;
/** Output port IDs [0]-> load balance, [1]-> master
* control thr
*/
uint32_t port_out_id[2];
/** Table ID - ports connect to this table */
uint32_t table_id;
/** Notify port id */
uint32_t notify_port;
/** RTE pipeline */
struct rte_pipeline *pipeline;
/** pipeline name */
char name[PIPE_NAME_SIZE];
/** Number of dns packets cloned by this worker */
uint64_t num_dns_packets;
/** Holds a set of rings to be used for downlink data buffering */
struct rte_ring *dl_ring_container;
/** Number of DL rings currently created */
uint32_t num_dl_rings;
/** For notification of modify_session so that buffered packets
* can be dequeued*/
struct rte_ring *notify_ring;
/** Pool for notification msg pkts */
struct rte_mempool *notify_msg_pool;
/** Holds number of packets received by uplink */
uint32_t pkts_in;
/** Holds number of packets sent out after uplink processing */
uint32_t pkts_out;
/** Holds number of echo packets received by uplink */
uint32_t pkts_echo;
/** Holds number of router solicitation packets received by uplink */
uint32_t pkts_rs_in;
/** Holds number of router advertisement packets sent out after uplink processed */
uint32_t pkts_rs_out;
/** Holds number of error indication packets received */
uint32_t pkts_err_in;
/** Holds number of error indication packets sent out */
uint32_t pkts_err_out;
} __rte_cache_aligned;
typedef int (*epc_ul_handler) (struct rte_pipeline*, struct rte_mbuf **pkts,
uint32_t n, uint64_t *pkts_mask, int wk_index);
/** DL pipeline parameters - Per input port */
uint32_t dl_arp_pkt;
uint32_t dl_sgi_pkt;
uint32_t dl_pkts_nbrst;
uint32_t dl_pkts_nbrst_prv;
/**
* @brief : Maintains epc downlink parameters
*/
struct epc_dl_params {
/** Count since last flush */
int flush_count;
/** Number of pipeline runs between flush */
int flush_max;
/** RTE pipeline params */
struct rte_pipeline_params pipeline_params;
/** Input port id */
uint32_t port_in_id;
/** Output port IDs [0]-> load balance, [1]-> master
* control thr
*/
uint32_t port_out_id[2];
/** Table ID - ports connect to this table */
uint32_t table_id;
/** Notify port id */
uint32_t notify_port;
/** RTE pipeline */
struct rte_pipeline *pipeline;
/** pipeline name */
char name[PIPE_NAME_SIZE];
/** Number of dns packets cloned by this worker */
uint64_t num_dns_packets;
/** Holds a set of rings to be used for downlink data buffering */
struct rte_ring *dl_ring_container;
/** Number of DL rings currently created */
uint32_t num_dl_rings;
/** For notification of modify_session so that buffered packets
* can be dequeued*/
struct rte_ring *notify_ring;
/** Pool for notification msg pkts */
struct rte_mempool *notify_msg_pool;
/** Holds number of packets received by downlink */
uint32_t pkts_in;
/** Holds number of packets sent out after downlink processing */
uint32_t pkts_out;
/** Holds number of packets queued for until DDN ACK not received */
uint32_t ddn_buf_pkts;
/** Holds number of ddn request sends */
uint32_t ddn;
/** Holds number of error indication packets received */
uint32_t pkts_err_in;
/** Holds number of error indication packets sent out */
uint32_t pkts_err_out;
} __rte_cache_aligned;
typedef int (*epc_dl_handler) (struct rte_pipeline*, struct rte_mbuf **pkts,
uint32_t n, uint64_t *pkts_mask, int wk_index);
/* defines max number of pipelines per core */
#define EPC_PIPELINE_MAX 4
/**
* @brief : pipeline function
* @param : No param
* @return : Returns nothing
*/
typedef void pipeline_func_t(void *param);
/**
* @brief : Maintains pipeline function pointer and argument
*/
struct pipeline_launch {
pipeline_func_t *func; /* pipeline function called */
void *arg; /* pipeline function argument */
};
/**
* @brief : Maintains epc lcore configuration parameter
*/
struct epc_lcore_config {
int allocated; /* indicates a number of pipelines enebled */
struct pipeline_launch launch[EPC_PIPELINE_MAX];
};
/**
* @brief : Maintains epc parameter
*/
struct epc_app_params {
/* CPU cores */
struct epc_lcore_config lcores[DP_MAX_LCORE];
int core_mct;
int core_iface;
int core_stats;
int core_spns_dns;
int core_ul[NUM_SPGW_PORTS];
int core_dl[NUM_SPGW_PORTS];
/* NGCORE_SHRINK::NUM_WORKER = 1 */
unsigned num_workers;
unsigned worker_cores[DP_MAX_LCORE];
unsigned worker_core_mapping[DP_MAX_LCORE];
/* Ports */
uint32_t ports[NUM_SPGW_PORTS];
uint32_t n_ports;
uint32_t port_rx_ring_size;
uint32_t port_tx_ring_size;
/* Rx rings */
struct rte_ring *epc_lb_rx[NUM_SPGW_PORTS];
struct rte_ring *epc_mct_rx[NUM_SPGW_PORTS];
struct rte_ring *epc_mct_spns_dns_rx;
struct rte_ring *epc_work_rx[DP_MAX_LCORE][NUM_SPGW_PORTS];
/* Tx rings */
struct rte_ring *ring_tx[DP_MAX_LCORE][NUM_SPGW_PORTS];
uint32_t ring_rx_size;
uint32_t ring_tx_size;
/* Burst sizes */
uint32_t burst_size_rx_read;
uint32_t burst_size_rx_write;
uint32_t burst_size_worker_read;
uint32_t burst_size_worker_write;
uint32_t burst_size_tx_read;
uint32_t burst_size_tx_write;
/* Pipeline params */
struct epc_ul_params ul_params[NUM_SPGW_PORTS];
struct epc_dl_params dl_params[NUM_SPGW_PORTS];
} __rte_cache_aligned;
extern struct epc_app_params epc_app;
/**
* @brief : Adds pipeline function to core's list of pipelines to run
* @param : func, Function to run
* @param : arg, Argument to pipeline function
* @param : core, Core to run pipeline function on
* @return : Returns nothing
*/
void epc_alloc_lcore(pipeline_func_t func, void *arg, int core);
/**
* @brief : Initializes arp icmp pipeline
* @param : No param
* @return : Returns nothing
*/
void epc_arp_init(void);
/**
* @brief : Returns the mac address for an IP address, currently works only for directly
* connected neighbours
* @param : ipaddr, IP address to lookup
* @param : phy_port, Identifies the port to which the IP address is connected to
* @param : hw_addr, Ethernet address returned
* @param : nhip, next-hop IP address
* Note - Same as ip addr (for now)
* @return : Returns 0 in case of success , -1 otherwise
*/
int arp_icmp_get_dest_mac_address(const uint32_t ipaddr,
const uint32_t phy_port,
struct ether_addr *hw_addr, uint32_t *nhip);
/**
* @brief : ARP/ICMP pipeline function
* @param : arg, unused parameter
* @return : Returns nothing
*/
void epc_arp(__rte_unused void *arg);
void process_li_data();
/**
* @brief : Initializes DNS processing resources
* @param : No param
* @return : Returns nothing
*/
void epc_spns_dns_init(void);
/**
* @brief : Initialize EPC packet framework
* @param : s1u_port_id, Port id for s1u interface assigned by rte
* @param : sgi_port_id, Port id for sgi interface assigned by rte
* @return : Returns nothing
*/
void epc_init_packet_framework(uint8_t east_port_id, uint8_t west_port_id);
/**
* @brief : Launches data plane threads to execute pipeline funcs
* @param : No param
* @return : Returns nothing
*/
void packet_framework_launch(void);
/**
* @brief : Initializes UL pipeline
* @param : param, Pipeline parameters passed on to pipeline at runtime
* @param : core, Core to run Rx pipeline, used to warn if this core and the NIC port_id
* are in different NUMA domains
* @param : in_port_id, Input Port ID
* @param : out_port_id, Input Port ID & Output Port ID
* @return : Returns nothing
*/
void epc_ul_init(struct epc_ul_params *param, int core, uint8_t in_port_id, uint8_t out_port_id);
/**
* @brief : Initializes DL pipeline
* @param : param, Pipeline parameters passed on to pipeline at runtime
* @param : core, Core to run Rx pipeline, used to warn if this core and the NIC port_id
* are in different NUMA domains
* @param : in_port_id, Input Port ID
* @param : out_port_id, Input Port ID & Output Port ID
* @return : Returns nothing
*
*/
void epc_dl_init(struct epc_dl_params *param, int core, uint8_t in_port_id, uint8_t out_port_id);
/**
* @brief : UL pipeline function
* @param : args, Pipeline parameters
* @return : Returns nothing
*/
void epc_ul(void *args);
/**
* @brief : DL pipeline function
* @param : args, Pipeline parameters
* @return : Returns nothing
*/
void epc_dl(void *args);
/**
* @brief : Registers uplink worker function that is executed from the pipeline
* @param : f, Function handler for packet processing
* @param : port, Port to register the worker function for
* @return : Returns nothing
*/
void register_ul_worker(epc_ul_handler f, int port);
/**
* @brief : Registers downlink worker function that is executed from the pipeline
* @param : f, Function handler for packet processing
* @param : port, Port to register the worker function for
* @return : Returns nothing
*/
void register_dl_worker(epc_dl_handler f, int port);
#endif /* __EPC_PACKET_FRAMEWORK_H__ */
|
nikhilc149/e-utran-features-bug-fixes | cp/cp_timer.h | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __CP_TIMER_H
#define __CP_TIMER_H
#include "../cp_dp_api/vepc_cp_dp_api.h"
#include "pfcp_enum.h"
extern pfcp_config_t config;
extern struct rte_hash *timer_by_teid_hash;
extern struct rte_hash *ddn_by_seid_hash;
extern struct rte_hash *dl_timer_by_teid_hash;
extern struct rte_hash *pfcp_rep_by_seid_hash;
extern struct rte_hash *thrtl_timer_by_nodeip_hash;
extern struct rte_hash *thrtl_ddn_count_hash;
extern struct rte_hash *buffered_ddn_req_hash;
#define ZERO 0
#define ONE 1
#define TWO 2
#define THREE 3
#define FOUR 4
#define SEVEN 7
/* Values in miliseconds */
#define TWOSEC (2 * 1000)
#define ONEMINUTE (60 * 1000)
#define TENMINUTE (600 * 1000)
#define ONEHOUR (3600 * 1000)
#define TENHOUR (36000 * 1000)
/*@brief : ue level timer for ddn flow */
typedef struct ue_level_timer_t {
gstimerinfo_t pt; /*transmit Timer*/
uint64_t sess_id; /*Session id*/
_timer_t start_time; /*start timestamp*/
}ue_level_timer;
/*@brief : struct throttling timer */
typedef struct throttling_timer_t {
gstimerinfo_t pt; /*transmit Timer*/
uint8_t throttle_factor; /*Throttling factor*/
node_address_t *node_ip; /*mme ip address*/
_timer_t start_time; /*start timestamp*/
}throttle_timer;
/*brief : struct counters for throttling*/
typedef struct throttling_count_t{
float prev_ddn_eval; /* number of previous evaluated ddn*/
float prev_ddn_discard; /* number of previous discarded ddn*/
sess_info *sess_ptr; /* head pointer to sess_info list*/
}thrtle_count;
/**
* @brief : sends pfcp session modification request with drop flag for ddn.
* @param : pdn, pointer of pdn
* @return : returns nothing
*/
void
send_pfcp_sess_mod_req_for_ddn(pdn_connection *pdn);
/**
* @brief : fill the session info structure.
* @param : thrtl_cnt, pointer of throttling count
* @param : sess_id, session id
* @param : pdr_count, number of pdrs
* @param : pdr, array of pdr ids
* @return : Returns nothing.
*/
void fill_sess_info_id(thrtle_count *thrtl_cnt, uint64_t sess_id, uint8_t pdr_count, pfcp_pdr_id_ie_t *pdr);
/**
* @brief : Get the throttle count
* @param : nodeip, ip address of mme
* @param : is_mod, operation to be performed
* @return : Returns throttle count pointer if success else null
*/
thrtle_count * get_throtle_count(node_address_t *nodeip, uint8_t is_mod);
/**
* @brief : insert into a new node in linked list.
* @param : head , head pointer in linked list
* @param : new_node, new node pointer to be inserted
* @return : Returns head struture pointer if success else null.
*/
sess_info * insert_into_sess_info_list(sess_info *head, sess_info *new_node);
/**
* @brief : delete all nodes from linked list.
* @param : head, head pointer in linked list
* @return : Returns nothing
*/
void delete_from_sess_info_list(sess_info *head);
/**
* @brief : Search into linked list with with sess_id .
* @param : head, head pointer of linked list
* @param : sess_id, session id need to be searched
* @return : Returns sess_info pointer on success, null on failure.
*/
sess_info * search_into_sess_info_list(sess_info * head, uint64_t sess_id);
/**
* @brief : Removes the throttle entry for particular session.
* @param : context, ue_context
* @param : sess_id, session id need to be searched
* @return : Returns nothing.
*/
void
delete_sess_in_thrtl_timer(ue_context *context, uint64_t sess_id);
/**
* @brief : fill the ue level timer structure.
* @param : seid, session id
* @return : Returns ue_level struture pointer if success else null.
*/
ue_level_timer *fill_timer_entry(uint64_t seid);
/**
* @brief : callback function for ue level timer.
* @param : ti, gstimerinfo_t
* @param : data, constant void pointer
* @return : Returns nothing.
*/
void ddn_timer_callback(gstimerinfo_t *ti, const void *data_t );
/**
* @brief : starts ddn timer entry .
* @param : hash, pointer of rte_hash to store timer_entry
* @param : seid, session id
* @param : delay_value, delay_value
* @param : cb, callback function to be called after timer expiry
* @return : Returns nothing.
*/
void start_ddn_timer_entry(struct rte_hash *hash, uint64_t seid, int delay_value, gstimercallback cb);
/**
* @brief : Removes session entry fron session hash.
* @param : seid, session id
* @param : sess_hash, pointer to rte_hash
* @return : Returns nothing.
*/
void delete_entry_from_sess_hash(uint64_t seid, struct rte_hash *sess_hash);
/**
* @brief : cleanups ddn timer entry
* @param : hash, pointer of rte_hash to store timer_entry
* @param : teid, teid value
* @param : sess_hash, pointer of rte_hash to store session
* @return : Returns extend timer value if exists otherwise returns 0.
*/
uint8_t delete_ddn_timer_entry(struct rte_hash *hash, uint32_t teid, struct rte_hash *sess_hash);
/**
* @brief : callback function for dl buffering timer.
* @param : ti, gstimerinfo_t
* @param : data, constant void pointer
* @return : Returns nothing.
*/
void dl_buffer_timer_callback(gstimerinfo_t *ti, const void *data_t );
/**
* @brief : callback function for throttling timer.
* @param : ti, gstimerinfo_t
* @param : data, constant void pointer
* @return : Returns nothing.
*/
void thrtle_timer_callback(gstimerinfo_t *ti, const void *data_t );
/**
* @brief : starts throttling timer entry
* @param : node_ip, node ip address
* @param : thrtlng_delay_val, delay value
* @param : thrtl_fact, throttling factor
* @return : Returns nothing.
*/
void start_throttle_timer(node_address_t *node_ip, int thrtlng_delay_val, uint8_t thrtl_fact);
/**
* @brief : cleanups throttling timer
* @param : node_ip, node ip address
* @return : Returns remaining timer value if exist otherwise return 0.
*/
uint8_t delete_thrtle_timer(node_address_t *node_ip);
/**
* @brief : Returns peer data struct address and fill data.
* @param : iface, source interface type
* @param : peer_addr, peer node address
* @param : buf, holds timer data
* @param : buf_len, total length of data
* @param : itr, request_tries value in pfcp config
* @param : teid, teid value
* @param : ebi_index
* @return : Returns pointer to filled timer entry structure
*/
peerData *
fill_timer_entry_data(enum source_interface iface, peer_addr_t *peer_addr,
uint8_t *buf, uint16_t buf_len, uint8_t itr, uint32_t teid, int ebi_index );
/**
* @brief : add timer entry
* @param : conn_data, peer node connection information
* @param : timeout_ms, timeout
* @param : cb, timer callback
* @return : Returns true or false
*/
bool
add_timer_entry(peerData *conn_data, uint32_t timeout_ms,
gstimercallback cb);
/**
* @brief : delete time entry
* @param : teid, teid value
* @return : Returns nothing
*/
void
delete_timer_entry(uint32_t teid);
/**
* @brief : timer callback
* @param : ti, timer information
* @param : data_t, Peer node connection information
* @return : Returns nothing
*/
void
timer_callback(gstimerinfo_t *ti, const void *data_t);
/**
* @brief : fills error response
* @param : data, Peer node connection information
* @return : Returns nothing
*/
void association_fill_error_response(peerData *data);
/**
* @brief : timer callback for association request
* @param : ti, timer information
* @param : data_t, Peer node connection information
* @return : Returns nothing
*/
void
association_timer_callback(gstimerinfo_t *ti, const void *data_t);
/**
* @brief : Fills and adds timer entry, and starts periodic timer for gtpv2c messages
* @param : teid, teid value
* @param : peer_addr, peer node address
* @param : buf, holds timer data
* @param : buf_len, total length of data
* @param : ebi_index
* @param : iface, source interface
* @param : cp_mode, cp mode type[SGWC/SAEGWC/PGWC]
* @return : Returns nothing
*/
void
add_gtpv2c_if_timer_entry(uint32_t teid, peer_addr_t *peer_addr,
uint8_t *buf, uint16_t buf_len, int ebi_index , enum source_interface iface,
uint8_t cp_mode);
/**
* @brief : Fills and adds timer entry, and starts periodic timer for pfcp message
* @param : teid, teid value
* @param : peer_addr, peer node address
* @param : buf, holds timer data
* @param : buf_len, total length of data
* @param : ebi_index
* @return : Returns nothing
*/
void
add_pfcp_if_timer_entry(uint32_t teid, peer_addr_t *peer_addr,
uint8_t *buf, uint16_t buf_len, int ebi_index );
/**
* @brief : Deletes pfcp timer entry
* @param : teid, teid value
* @param : ebi_index
* @return : Returns nothing
*/
void
delete_pfcp_if_timer_entry(uint32_t teid, int ebi_index );
/**
* @brief : Deletes gtp timer entry
* @param : teid, teid value
* @param : ebi_index
* @return : Returns nothing
*/
void
delete_gtpv2c_if_timer_entry(uint32_t teid, int ebi_index);
/**
* @brief : Deletes association timer entry
* @param : data, peerData pointer
* @return : Returns nothing
*/
void
delete_association_timer(peerData *data);
#endif
|
nikhilc149/e-utran-features-bug-fixes | dp/up_sess_table.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define _GNU_SOURCE /* Expose declaration of tdestroy() */
#include "util.h"
#include "up_acl.h"
extern struct rte_hash *sess_ctx_by_sessid_hash;
extern struct rte_hash *sess_by_teid_hash;
extern struct rte_hash *sess_by_ueip_hash;
extern struct rte_hash *pdr_by_id_hash;
extern struct rte_hash *far_by_id_hash;
extern struct rte_hash *qer_by_id_hash;
extern struct rte_hash *urr_by_id_hash;
extern struct rte_hash *timer_by_id_hash;
extern struct rte_hash *qer_rule_hash;
/* Retrive the Session information based on teid */
int
iface_lookup_uplink_data(struct ul_bm_key *key,
void **value)
{
return rte_hash_lookup_data(sess_by_teid_hash, key, value);
}
/* Retrive the Session information based on teid */
int
iface_lookup_uplink_bulk_data(const void **key, uint32_t n,
uint64_t *hit_mask, void **value)
{
return rte_hash_lookup_bulk_data(sess_by_teid_hash, key, n, hit_mask, value);
}
/* Retrive the Session information based on UE IP */
int
iface_lookup_downlink_data(struct dl_bm_key *key,
void **value)
{
return rte_hash_lookup_data(sess_by_ueip_hash, key, value);
}
/* Retrive the Session information based on UE IP */
int
iface_lookup_downlink_bulk_data(const void **key, uint32_t n,
uint64_t *hit_mask, void **value)
{
return rte_hash_lookup_bulk_data(sess_by_ueip_hash, key, n, hit_mask, value);
}
|
nikhilc149/e-utran-features-bug-fixes | pfcp_messages/pfcp_up_sess.c | <filename>pfcp_messages/pfcp_up_sess.c
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "up_acl.h"
#include "pfcp_enum.h"
#include "pfcp_set_ie.h"
#include "pfcp_up_llist.h"
#include "pfcp_util.h"
#include "pfcp_association.h"
#include "li_interface.h"
#include "gw_adapter.h"
#include "seid_llist.h"
#include "pfcp_up_sess.h"
#include "../cp_dp_api/predef_rule_init.h"
#include "csid_struct.h"
#define OUT_HDR_DESC_VAL 1
extern uint16_t dp_comm_port;
extern struct in_addr dp_comm_ip;
extern struct in6_addr dp_comm_ipv6;
extern struct in_addr cp_comm_ip;
extern int clSystemLog;
#ifdef USE_CSID
/* TEMP fill the FQ-CSID form here */
/**
* @brief : Create and Fill the FQ-CSIDs
* @param : fq_csid
* @param : csids
* @return : Returns nothing
*/
static void
est_set_fq_csid_t(pfcp_fqcsid_ie_t *fq_csid, fqcsid_t *csids)
{
uint16_t len = 0;
fq_csid->number_of_csids = csids->num_csid;
if (csids->node_addr.ip_type == IPV4_TYPE) {
fq_csid->fqcsid_node_id_type = IPV4_GLOBAL_UNICAST;
memcpy(&(fq_csid->node_address),
&(csids->node_addr.ipv4_addr), IPV4_SIZE);
len += IPV4_SIZE;
} else {
fq_csid->fqcsid_node_id_type = IPV6_GLOBAL_UNICAST;
memcpy(&(fq_csid->node_address),
&(csids->node_addr.ipv6_addr), IPV6_ADDRESS_LEN);
len += IPV6_ADDRESS_LEN;
}
for (uint8_t inx = 0; inx < csids->num_csid; inx++) {
fq_csid->pdn_conn_set_ident[inx] = csids->local_csid[inx];
}
/* Adding 1 byte in header length for flags */
len += PRESENT;
pfcp_set_ie_header(&(fq_csid->header),
PFCP_IE_FQCSID, (2 * (fq_csid->number_of_csids)) + len);
}
/**
* @brief : Fills fqcsid in pfcp session modification request
* @param : pfcp_sess_mod_rsp, request to be filled
* @param : sess, sess information
* @return : Return 0 on success, -1 otherwise
*/
static int8_t
fill_fqcsid_sess_mod_rsp(pfcp_sess_mod_rsp_t *pfcp_sess_mod_rsp, pfcp_session_t *sess)
{
/* Set SGW/PGW FQ-CSID */
if (sess->up_fqcsid != NULL) {
if ((sess->up_fqcsid)->num_csid) {
est_set_fq_csid_t(&pfcp_sess_mod_rsp->up_fqcsid, sess->up_fqcsid);
for (uint8_t inx = 0; inx < pfcp_sess_mod_rsp->up_fqcsid.number_of_csids; inx++) {
if (pfcp_sess_mod_rsp->up_fqcsid.fqcsid_node_id_type == IPV4_GLOBAL_UNICAST) {
uint32_t node_addr = 0;
memcpy(&node_addr, pfcp_sess_mod_rsp->up_fqcsid.node_address, IPV4_SIZE);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Filled FQ-CSID in Sess MOD Resp, inx:%u,"
"CSID:%u, Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, inx, pfcp_sess_mod_rsp->up_fqcsid.pdn_conn_set_ident[inx],
IPV4_ADDR_HOST_FORMAT(node_addr));
} else {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Filled FQ-CSID in Sess MOD Resp, inx:%u,"
"CSID:%u, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, inx, pfcp_sess_mod_rsp->up_fqcsid.pdn_conn_set_ident[inx],
IPv6_PRINT(IPv6_CAST(pfcp_sess_mod_rsp->up_fqcsid.node_address)));
}
}
}
}
return 0;
}
int8_t
fill_fqcsid_sess_est_rsp(pfcp_sess_estab_rsp_t *pfcp_sess_est_rsp, pfcp_session_t *sess)
{
/* Set SGW/PGW FQ-CSID */
if (sess->up_fqcsid != NULL) {
if ((sess->up_fqcsid)->num_csid) {
est_set_fq_csid_t(&pfcp_sess_est_rsp->up_fqcsid, sess->up_fqcsid);
for (uint8_t inx = 0; inx < pfcp_sess_est_rsp->up_fqcsid.number_of_csids; inx++) {
uint32_t node_addr = 0;
/* need to think about ip log */
memcpy(&node_addr, pfcp_sess_est_rsp->up_fqcsid.node_address, IPV4_SIZE);
(pfcp_sess_est_rsp->up_fqcsid.fqcsid_node_id_type == IPV6_GLOBAL_UNICAST) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Filled FQ-CSID in Sess EST Resp, inx:%u,"
"CSID:%u, Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, inx, pfcp_sess_est_rsp->up_fqcsid.pdn_conn_set_ident[inx],
IPv6_PRINT(IPv6_CAST((pfcp_sess_est_rsp->up_fqcsid.node_address)))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Filled FQ-CSID in Sess EST Resp, inx:%u,"
"CSID:%u, Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, inx, pfcp_sess_est_rsp->up_fqcsid.pdn_conn_set_ident[inx],
IPV4_ADDR_HOST_FORMAT(node_addr));
}
}
}
return 0;
}
int
fill_peer_node_info_t(pfcp_session_t *sess, node_address_t *cp_node_addr)
{
int16_t csid = 0;
csid_key peer_info_t = {0};
/* SGWC/PGWC/SAEGWC FQ-CSID */
memcpy(&peer_info_t.cp_ip, cp_node_addr, sizeof(node_address_t));
(peer_info_t.cp_ip.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer Node CP IPv6 Address: "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(peer_info_t.cp_ip.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Peer Node CP IPv4 Address: "IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(peer_info_t.cp_ip.ipv4_addr));
/* Fill the enodeb/SGWU IP */
{
pfcp_session_datat_t *current = NULL;
current = sess->sessions;
while(current != NULL) {
if (current->pdrs != NULL) {
if ((current->pdrs)->pdi.src_intfc.interface_value == CORE) {
memcpy(&peer_info_t.wb_peer_ip,
¤t->wb_peer_ip_addr, sizeof(node_address_t));
(current->wb_peer_ip_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"West Bound Peer Node IPv6 Address: "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(peer_info_t.wb_peer_ip.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"West Bound Peer Node IPv4 Address: "IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(peer_info_t.wb_peer_ip.ipv4_addr));
break;
}
}
current = current->next;
}
}
/* SGWU and PGWU peer node info */
memcpy(&peer_info_t.up_ip,
&(sess->up_fqcsid)->node_addr, sizeof(node_address_t));
(peer_info_t.up_ip.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"User-Plane Node IPv6 Address: "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(peer_info_t.up_ip.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"User-Plane Node IPv4 Address: "IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(peer_info_t.up_ip.ipv4_addr));
/* PGWU peer node Address */
{
pfcp_session_datat_t *current_t = NULL;
current_t = sess->sessions;
while(current_t != NULL) {
if (current_t->pdrs != NULL) {
if ((current_t->pdrs)->pdi.src_intfc.interface_value == ACCESS) {
memcpy(&peer_info_t.eb_peer_ip,
¤t_t->eb_peer_ip_addr, sizeof(node_address_t));
break;
}
}
current_t = current_t->next;
}
(peer_info_t.eb_peer_ip.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"East Bound Peer Node IPv6 Address: "IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(peer_info_t.eb_peer_ip.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"East Bound Peer Node IPv4 Address: "IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(peer_info_t.eb_peer_ip.ipv4_addr));
}
/* Get local csid for set of peer node */
csid = get_csid_entry(&peer_info_t);
if (csid < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to assinged CSID..\n", LOG_VALUE);
return -1;
}
/* Update the local csid into the UE context */
uint8_t match = 0;
for(uint8_t itr = 0; itr < (sess->up_fqcsid)->num_csid; itr++) {
if ((sess->up_fqcsid)->local_csid[itr] == csid){
match = 1;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"CSID not generated, matched with exsiting CSID:%u\n",
LOG_VALUE, csid);
/* TODO: Validate it */
/* Aleready Linked CSID */
return itr;
}
}
if (!match) {
(sess->up_fqcsid)->local_csid[(sess->up_fqcsid)->num_csid++] =
csid;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"CSID Generated, Added in UP_FQCSID with CSID value:%u\n",
LOG_VALUE, csid);
}
/* Link with eNB/SGWU node addr and local csid */
if (is_present(&peer_info_t.wb_peer_ip)) {
fqcsid_t *tmp = NULL;
(peer_info_t.wb_peer_ip.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"West bound eNB/SGWU/WestBound Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST(peer_info_t.wb_peer_ip.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"West bound eNB/SGWU/WestBound Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(peer_info_t.wb_peer_ip.ipv4_addr));
/* Stored the SGW CSID by eNB/SGWU/West Bound Node address */
tmp = get_peer_addr_csids_entry(&peer_info_t.wb_peer_ip,
ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
memcpy(&tmp->node_addr,
&peer_info_t.wb_peer_ip, sizeof(node_address_t));
if (!tmp->num_csid) {
tmp->local_csid[tmp->num_csid++] = csid;
} else {
uint8_t match = 0;
for (uint8_t itr = 0; itr < tmp->num_csid; itr++) {
if (tmp->local_csid[itr] == csid){
match = 1;
break;
}
}
if (!match) {
tmp->local_csid[tmp->num_csid++] = csid;
}
}
if (sess->wb_peer_fqcsid == NULL) {
sess->wb_peer_fqcsid = rte_zmalloc_socket(NULL, sizeof(fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (sess->wb_peer_fqcsid == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate the memory for fqcsids entry\n",
LOG_VALUE);
return -1;
}
}
/* Add the CSID in the Session List */
(sess->wb_peer_fqcsid)->local_csid[(sess->wb_peer_fqcsid)->num_csid++] = csid;
memcpy(&(sess->wb_peer_fqcsid)->node_addr,
&peer_info_t.wb_peer_ip, sizeof(node_address_t));
/* LINK West bound CSID with local CSID */
if (link_peer_csid_with_local_csid(sess->wb_peer_fqcsid,
sess->up_fqcsid, S1U_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed West Bound Peer CSID link with local CSID\n",
LOG_VALUE);
return -1;
}
((sess->wb_peer_fqcsid)->node_addr.ip_type == IPV6_TYPE) ?
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Fill Sess West Bound Peer Node IPv6 Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(IPv6_CAST((sess->wb_peer_fqcsid)->node_addr.ipv6_addr))):
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Fill Sess West Bound Peer Node IPv4 Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT((sess->wb_peer_fqcsid)->node_addr.ipv4_addr));
}
/* Link with PGWU/East Bound node addr and local csid */
if (is_present(&peer_info_t.eb_peer_ip)) {
fqcsid_t *tmp = NULL;
/* Stored the SGW CSID by PGW/East Bound Node address */
tmp = get_peer_addr_csids_entry(&peer_info_t.eb_peer_ip, ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
memcpy(&tmp->node_addr,
&peer_info_t.eb_peer_ip, sizeof(node_address_t));
if (!tmp->num_csid) {
tmp->local_csid[tmp->num_csid++] = csid;
} else {
uint8_t match = 0;
for(uint8_t itr = 0; itr < tmp->num_csid; itr++) {
if (tmp->local_csid[itr] == csid){
match = 1;
break;
}
}
if (!match) {
tmp->local_csid[tmp->num_csid++] = csid;
}
}
if (sess->eb_peer_fqcsid == NULL) {
sess->eb_peer_fqcsid = rte_zmalloc_socket(NULL, sizeof(fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (sess->eb_peer_fqcsid == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate the memory for fqcsids entry\n",
LOG_VALUE);
return -1;
}
}
(sess->eb_peer_fqcsid)->local_csid[(sess->eb_peer_fqcsid)->num_csid++] = csid;
memcpy(&(sess->eb_peer_fqcsid)->node_addr,
&peer_info_t.eb_peer_ip, sizeof(node_address_t));
/* LINK East bound CSID with local CSID */
if (link_peer_csid_with_local_csid(sess->eb_peer_fqcsid,
sess->up_fqcsid, SGI_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed East Bound Peer CSID link with local CSID\n",
LOG_VALUE);
return -1;
}
}
/* LINK MME CSID with local CSID */
if (sess->mme_fqcsid) {
/* LINK MME CSID with local CSID */
if (link_peer_csid_with_local_csid(sess->mme_fqcsid,
sess->up_fqcsid, SX_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed MME Peer CSID link with local CSID\n",
LOG_VALUE);
return -1;
}
}
/* LINK SGW CSID with local CSID */
if (sess->sgw_fqcsid) {
/* LINK SGWC CSID with local CSID */
if (link_peer_csid_with_local_csid(sess->sgw_fqcsid,
sess->up_fqcsid, SX_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed SGWC Peer CSID link with local CSID\n",
LOG_VALUE);
return -1;
}
}
/* LINK PGW CSID with local CSID */
if (sess->pgw_fqcsid) {
/* LINK PGWC CSID with local CSID */
if (link_peer_csid_with_local_csid(sess->pgw_fqcsid,
sess->up_fqcsid, SX_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed PGWC Peer CSID link with local CSID\n",
LOG_VALUE);
return -1;
}
}
return 0;
}
#endif /* USE_CSID */
/**
* @brief : Get cp node address
* @param : cp_node_addr, holds cp ip.
* @param : cp_fseid, Strucutre for hold cp fseid data
* @return : Returns void
*/
static void
get_cp_node_addr(node_address_t *cp_node_addr, pfcp_fseid_ie_t *cp_fseid) {
if (cp_fseid->v4) {
cp_node_addr->ip_type = PDN_TYPE_IPV4;
cp_node_addr->ipv4_addr = cp_fseid->ipv4_address;
}
if (cp_fseid->v6) {
cp_node_addr->ip_type = PDN_TYPE_IPV6;
memcpy(&cp_node_addr->ipv6_addr,
&cp_fseid->ipv6_address, IPV6_ADDRESS_LEN);
}
}
/**
* @brief : Process sdf filters
* @param : sdf_fltr_t , holds sdf filter data
* @param : sdf_fltr , update this strucutre using sdf filter data
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_pdi_sdf_filters(pfcp_sdf_filter_ie_t *sdf_fltr_t, sdf_filter_t *sdf_fltr)
{
if (sdf_fltr_t->fd) {
/* len of flow description */
sdf_fltr->len_of_flow_desc = sdf_fltr_t->len_of_flow_desc;
sdf_fltr->fd = sdf_fltr_t->fd;
/* flow description */
memcpy(&sdf_fltr->flow_desc, sdf_fltr_t->flow_desc,
sdf_fltr->len_of_flow_desc);
}
if (sdf_fltr_t->bid) {
/* TODO:*/
}
if (sdf_fltr_t->fl) {
/* TODO:*/
}
return 0;
}
/**
* @brief : Process ueip information
* @param : ue_addr, holds ue ip information
* @param : ue_addr_t, update this structure with ue ip info
* @param : ue_ip, copy ue ip address to this variable
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_pdi_ueip_info(pfcp_ue_ip_address_ie_t *ue_addr, ue_ip_addr_t *ue_addr_t,
pfcp_session_datat_t *session)
{
/* Check ipv4 address */
if (ue_addr->v4) {
/* UE IP Address */
session->ipv4 = PRESENT;
ue_addr_t->v4 = PRESENT;
ue_addr_t->ipv4_address = ue_addr->ipv4_address;
session->ue_ip_addr = ue_addr_t->ipv4_address;
}
/* Check the IPv6 Flag */
if (ue_addr->v6) {
/* TODO: IPv6 not Supported */
session->ipv6 = PRESENT;
ue_addr_t->v6 = PRESENT;
ue_addr_t->ipv6_pfx_dlgtn_bits = ue_addr->ipv6_pfx_dlgtn_bits;
memcpy(ue_addr_t->ipv6_address, ue_addr->ipv6_address, IPV6_ADDRESS_LEN);
memcpy(session->ue_ipv6_addr, ue_addr->ipv6_address, IPV6_ADDRESS_LEN);
}
return 0;
}
/**
* @brief : Process pdi local teid info
* @param : lo_tied, hold information about local teid
* @param : f_teid, update this structure using local teid info
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_pdi_local_teid_info(pfcp_fteid_ie_t *lo_teid, fteid_ie_t *f_teid)
{
/* Check the IPv4 Flag */
if (lo_teid->v4) {
/* TEID */
f_teid->teid = lo_teid->teid;
/* Local Interface IPv4 address */
f_teid->ipv4_address = lo_teid->ipv4_address;
}
/* Check the IPv6 Flag */
if (lo_teid->v6) {
/* TEID */
f_teid->teid = lo_teid->teid;
/* Local Interface IPv6 address */
memcpy(f_teid->ipv6_address, lo_teid->ipv6_address, IPV6_ADDRESS_LEN);
// return -1;
}
/* Check the chid Flag */
if (lo_teid->chid) {
/* TODO: Not Supported */
return -1;
}
/* Check the CHOOSE Flag */
if (lo_teid->ch) {
/* TODO: Not supported */
return -1;
}
return 0;
}
static uint8_t
get_rule_ip_type(char *rule){
char *s, *sp, *in[CB_FLD_NUM], tmp[MAX_LEN] = {0};
static const char *dlm = " \t\n";
strncpy(tmp, rule, MAX_LEN);
s = tmp;
in[0] = strtok_r(s, dlm, &sp);
if(strstr(in[0], ":") != NULL)
return RULE_IPV6;
return RULE_IPV4;
}
/**
* @brief : Process pdi info
* @param : pdi_ie_t, holds pdi information
* @param : pdi, structure to be updated
* @param : session, session information
* @param : prcdnc_val , precondition value
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_pdr_pdi_info(pfcp_pdi_ie_t *pdi_ie_t, pdi_t *pdi,
pfcp_session_datat_t **session, uint32_t prcdnc_val)
{
/* M: Source Interface */
if (pdi_ie_t->src_intfc.header.len) {
pdi->src_intfc.interface_value = pdi_ie_t->src_intfc.interface_value;
}
/* Local F-TEID */
if (pdi_ie_t->local_fteid.header.len) {
if (process_pdi_local_teid_info(&pdi_ie_t->local_fteid,
&pdi->local_fteid)) {
return -1;
}
}
/* Network Instance */
if (pdi_ie_t->ntwk_inst.header.len) {
memcpy(pdi->ntwk_inst.ntwk_inst, pdi_ie_t->ntwk_inst.ntwk_inst,
sizeof(ntwk_inst_t));
}
/* UE IP Address */
if (pdi_ie_t->ue_ip_address.header.len) {
if (process_pdi_ueip_info(&pdi_ie_t->ue_ip_address, &pdi->ue_addr,
*session)) {
return -1;
}
}
/* SDF Filters */
if (pdi_ie_t->sdf_filter_count > 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Number of SDF Rule Rcv:%u\n",
LOG_VALUE, pdi_ie_t->sdf_filter_count);
for (int itr = 0; itr < pdi_ie_t->sdf_filter_count; itr++) {
if (pdi_ie_t->sdf_filter[itr].header.len) {
/* Add SDF rule entry in the ACL TABLE */
struct sdf_pkt_filter pkt_filter = {0};
pkt_filter.precedence = prcdnc_val;
if (process_pdi_sdf_filters(&pdi_ie_t->sdf_filter[itr],
&pdi->sdf_filter[pdi->sdf_filter_cnt++])) {
return -1;
}
/* Reset the rule string */
memset(pkt_filter.u.rule_str, 0, MAX_LEN);
/* flow description */
if (pdi_ie_t->sdf_filter[itr].fd) {
memcpy(&pkt_filter.u.rule_str, &pdi_ie_t->sdf_filter[itr].flow_desc,
pdi_ie_t->sdf_filter[itr].len_of_flow_desc);
pkt_filter.rule_ip_type = get_rule_ip_type(pkt_filter.u.rule_str);
if (!pdi_ie_t->src_intfc.interface_value) {
/* swap the src and dst address for UL traffic.*/
swap_src_dst_ip(&pkt_filter.u.rule_str[0]);
}
(*session)->acl_table_indx[(*session)->acl_table_count] =
get_acl_table_indx(&pkt_filter, SESS_CREATE);
if ((*session)->acl_table_indx[(*session)->acl_table_count] <= 0) {
/* TODO: ERROR Handling */
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"ACL table creation failed\n", LOG_VALUE);
}else{
(*session)->acl_table_count++;
}
}
}
}
#ifdef DEFAULT_ACL_RULE_ADD
uint8_t dir = 0;
if (pdi_ie_t->src_intfc.interface_value) {
dir = DOWNLINK;
} else {
dir = UPLINK;
}
if (up_sdf_default_entry_add((*session)->acl_table_indx[(*session)->acl_table_count],
prcdnc_val, dir)) {
/* TODO: ERROR Handling */
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to add default rule \n", LOG_VALUE);
}
pdi->sdf_filter_cnt++;
#endif /* DEFAULT_ACL_RULE_ADD */
}
return 0;
}
/**
* @brief : Process create urr info
* @param : urr, hold create urr info
* @param : urr_t, structure to be updated
* @param : cp_seid, cp session id
* @param : up_seid, up session id
* @param : cp_ip, peer node address
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_create_urr_info(pfcp_create_urr_ie_t *urr, urr_info_t *urr_t, uint64_t cp_seid,
uint64_t up_seid, peer_addr_t cp_ip)
{
peerEntry *timer_entry = NULL;
urr_t = get_urr_info_entry(urr->urr_id.urr_id_value, cp_ip, cp_seid);
if(urr_t == NULL){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" URR not found for "
"URR_ID:%u while creating URR info\n",
LOG_VALUE, urr->urr_id.urr_id_value);
return -1;
}
/* Vol threshold for Usage report Gen */
if(urr->vol_thresh.header.len){
if(urr->vol_thresh.ulvol)
urr_t->vol_thes_uplnk = urr->vol_thresh.uplink_volume;
if(urr->vol_thresh.dlvol)
urr_t->vol_thes_dwnlnk = urr->vol_thresh.downlink_volume;
}
/* Time threshold for Usage report Gen */
if(urr->time_threshold.header.len){
urr_t->time_thes = urr->time_threshold.time_threshold;
}
/* Measurement Method
* Now only Supporting
* 1) Time threshold base
* 2) Volume threshold base
* 3) Both */
if(urr->meas_mthd.volum && urr->meas_mthd.durat)
urr_t->meas_method = VOL_TIME_BASED;
else if(urr->meas_mthd.durat)
urr_t->meas_method = TIME_BASED;
else if(urr->meas_mthd.volum)
urr_t->meas_method = VOL_BASED;
else {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT" Measurement Method Not "
"supported for URR ID %u\n", LOG_VALUE, urr->urr_id.urr_id_value);
return -1;
}
if(urr->rptng_triggers.volth && urr->rptng_triggers.timth)
urr_t->rept_trigg = VOL_TIME_BASED;
else if(urr->rptng_triggers.timth)
urr_t->rept_trigg = TIME_BASED;
else if(urr->rptng_triggers.volth)
urr_t->rept_trigg = VOL_BASED;
else {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT" Trigger Type Not supported for URR ID %u\n",
LOG_VALUE, urr->urr_id.urr_id_value);
return -1;
}
/* Defaulte setting to 0 as it is start of Usage report Generation */
urr_t->uplnk_data = 0;
urr_t->dwnlnk_data = 0;
urr_t->start_time = current_ntp_timestamp();
urr_t->end_time = 0;
urr_t->first_pkt_time = 0;
urr_t->last_pkt_time = 0;
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT" URR created with urr id %u\n",
LOG_VALUE, urr_t->urr_id);
if((urr_t->rept_trigg == TIME_BASED) || (urr_t->rept_trigg == VOL_TIME_BASED)) {
timer_entry = fill_timer_entry_usage_report(&dest_addr_t.ipv4, urr_t, cp_seid, up_seid);
if(!(add_timer_entry_usage_report(timer_entry, urr_t->time_thes, timer_callback))) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Faild to add timer "
"entry while creating URR info\n", LOG_VALUE);
return -1;
}
if (starttimer(&timer_entry->pt) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Periodic Timer "
"failed to start while creating URR info\n", LOG_VALUE);
}
}
return 0;
}
/**
* @brief : Process create bar info
* @param : bar, hold create bar info
* @param : bar_t, structure to be updated
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_create_bar_info(pfcp_create_bar_ie_t *bar, bar_info_t *bar_t)
{
/* M: BAR ID */
/* Downlink Data Notification Delay */
/* Suggested Buffering Packet Count */
/* TODO: Implement Handling */
return 0;
}
/**
* @brief : Process create qer info
* @param : qer, hold create qer info
* @param : quer_t, structure to be updated
* @param : session, session information
* @param : cp_ip, peer node address
* @param : cp_seid, CP session ID of UE
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_create_qer_info(pfcp_create_qer_ie_t *qer, qer_info_t **quer_t,
pfcp_session_datat_t **session, peer_addr_t cp_ip, uint64_t cp_seid)
{
qer_info_t *qer_t = NULL;
/* M: QER ID */
if (qer->qer_id.header.len) {
/* Get allocated memory location */
qer_t = get_qer_info_entry(qer->qer_id.qer_id_value, quer_t, cp_ip, cp_seid);
if (qer_t == NULL)
return -1;
}
/* M: Gate Status */
if (qer->gate_status.header.len) {
/* Uplink Action Allow/Drop */
qer_t->gate_status.ul_gate = qer->gate_status.ul_gate;
/* Downlink Action Allow/Drop */
qer_t->gate_status.dl_gate = qer->gate_status.dl_gate;
}
/* QER Correlation ID */
if (qer->qer_corr_id.header.len) {
qer_t->qer_corr_id_val = qer->qer_corr_id.qer_corr_id_val;
}
/* MBR: Maximum Bitrate */
if (qer->maximum_bitrate.header.len) {
/* Maximum Bitrare allow on Uplink */
qer_t->max_bitrate.ul_mbr = qer->maximum_bitrate.ul_mbr;
/* Maximum Bitrare allow on Downlink */
qer_t->max_bitrate.dl_mbr = qer->maximum_bitrate.dl_mbr;
}
/* GBR: Guaranteed Bitrate */
if (qer->guaranteed_bitrate.header.len) {
/* Guaranteed Bitrare allow on Uplink */
qer_t->guaranteed_bitrate.ul_gbr = qer->guaranteed_bitrate.ul_gbr;
/* Guaranteed Bitrare allow on Downlink */
qer_t->guaranteed_bitrate.dl_gbr = qer->guaranteed_bitrate.dl_gbr;
}
/* Packet Rate */
if (qer->packet_rate.header.len) {
/* Check Uplink Packet Rate Flag */
if (qer->packet_rate.ulpr) {
/* Maximum Uplink Packet Rate */
qer_t->packet_rate.max_uplnk_pckt_rate =
qer->packet_rate.max_uplnk_pckt_rate;
/* Uplink Time Unit */
qer_t->packet_rate.uplnk_time_unit =
qer->packet_rate.uplnk_time_unit;
}
/* Check Downlink Packet Rate Flag */
if (qer->packet_rate.dlpr) {
/* Maximum Downlink Packet Rate */
qer_t->packet_rate.max_dnlnk_pckt_rate =
qer->packet_rate.max_dnlnk_pckt_rate;
/* Downlink Time Unit */
qer_t->packet_rate.dnlnk_time_unit =
qer->packet_rate.dnlnk_time_unit;
}
}
/* Downlink Flow Level Marking */
if (qer->dl_flow_lvl_marking.header.len) {
/* Check ToS/Traffic Class Flag */
if (qer->dl_flow_lvl_marking.ttc) {
qer_t->dl_flow_lvl_marking.ttc =
qer->dl_flow_lvl_marking.ttc;
/* ToS/Traffic Class */
memcpy(&(qer_t->dl_flow_lvl_marking.tostraffic_cls),
&(qer->dl_flow_lvl_marking.tostraffic_cls), sizeof(qer_t->dl_flow_lvl_marking.tostraffic_cls));
}
/* Check Service Class Indicator Flag */
if (qer->dl_flow_lvl_marking.sci) {
qer_t->dl_flow_lvl_marking.sci =
qer->dl_flow_lvl_marking.sci;
/* Service Class Indicator */
memcpy(&(qer_t->dl_flow_lvl_marking.svc_cls_indctr),
&(qer->dl_flow_lvl_marking.svc_cls_indctr) ,sizeof(qer->dl_flow_lvl_marking.svc_cls_indctr));
}
}
/* QOS Flow Ident */
if (qer->qos_flow_ident.header.len) {
qer_t->qos_flow_ident.qfi_value = qer->qos_flow_ident.qfi_value;
}
/* RQI: Reflective QoS */
if (qer->reflective_qos.header.len) {
qer_t->reflective_qos.rqi = qer->reflective_qos.rqi;
}
/* Paging policy */
if (qer->paging_plcy_indctr.header.len) {
qer_t->paging_plcy_indctr.ppi_value = qer->paging_plcy_indctr.ppi_value;
}
/* Averaging Window */
if (qer->avgng_wnd.header.len) {
qer_t->avgng_wnd.avgng_wnd = qer->avgng_wnd.avgng_wnd;
}
/* Pointer to Sessions */
qer_t->session = *session;
return 0;
}
/**
* @brief : update far apply action
* @param : far, hold create far apply action info
* @param : far_t, structure to be updated
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
far_apply_action(pfcp_apply_action_ie_t *far, apply_action *far_t)
{
/* M: Apply Action */
if (far->header.len) {
/* Duplicate the packets */
far_t->dupl = far->dupl;
/* Buffer the packets */
far_t->buff = far->buff;
/* Forward the packets */
far_t->forw = far->forw;
/* Drop the packets */
far_t->drop = far->drop;
/* Notify the CP function about arrival of a
* first downlink packet being buffered */
far_t->nocp = far->nocp;
}
return 0;
}
/**
* @brief : Process create far info
* @param : far, hold create far info
* @param : session, session information
* @param : up_seid , session id
* @param : sess,pfcp_session_t infomation
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_create_far_info(pfcp_create_far_ie_t *far,
pfcp_session_datat_t **session, uint64_t up_seid,
pfcp_session_t *sess)
{
node_address_t peer_addr = {0};
far_info_t *far_t = NULL;
/* M: FAR ID */
if (far->far_id.header.len) {
/* Get allocated memory location */
far_t = get_far_info_entry(far->far_id.far_id_value, sess->cp_ip, sess->cp_seid);
if (far_t == NULL)
return -1;
}
/* M: Apply Action */
if (far->apply_action.header.len) {
if (far_apply_action(&far->apply_action, &far_t->actions)) {
/* TODO: Error Handling */
}
}
/* Forwarding Parameters */
if (far->frwdng_parms.header.len) {
/* M: Destination Interface */
if (far->frwdng_parms.dst_intfc.header.len) {
/* Destination Interface */
far_t->frwdng_parms.dst_intfc.interface_value =
far->frwdng_parms.dst_intfc.interface_value;
}
/* Outer Header Creation */
if (far->frwdng_parms.outer_hdr_creation.header.len) {
/* TODO: Add the handling for dual stack connectivity scenario */
if (far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4) {
far_t->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc = GTPU_UDP_IPv4;
/* Linked Outer header Creation with Session */
if (far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 ==
OUT_HDR_DESC_VAL) {
(*session)->hdr_crt = GTPU_UDP_IPv4;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Outer Header Desciprition(GTPU_UDP_IPv4) : %u\n",
LOG_VALUE,
far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc);
}
} else if (far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6) {
far_t->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc = GTPU_UDP_IPv6;
/* Linked Outer header Creation with Session */
if (far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 ==
OUT_HDR_DESC_VAL) {
(*session)->hdr_crt = GTPU_UDP_IPv6;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Outer Header Desciprition(GTPU_UDP_IPv6) : %u\n",
LOG_VALUE,
far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc);
}
} else {
/* Linked Outer header Creation with Session */
(*session)->hdr_crt = NOT_SET_OUT_HDR_RVL_CRT;
}
/* TEID */
far_t->frwdng_parms.outer_hdr_creation.teid =
far->frwdng_parms.outer_hdr_creation.teid;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"FAR Teid : %u\n",
LOG_VALUE, far_t->frwdng_parms.outer_hdr_creation.teid);
/* Customer-VLAN Tag */
far_t->frwdng_parms.outer_hdr_creation.ctag =
far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.ctag;
/* Service-VLAN Tag */
far_t->frwdng_parms.outer_hdr_creation.stag =
far->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.stag;
/* Port Number */
far_t->frwdng_parms.outer_hdr_creation.port_number =
far->frwdng_parms.outer_hdr_creation.port_number;
if(far->frwdng_parms.outer_hdr_creation.ipv4_address != 0){
/* IPv4 Address */
far_t->frwdng_parms.outer_hdr_creation.ip_type = IPV4_TYPE;
far_t->frwdng_parms.outer_hdr_creation.ipv4_address =
far->frwdng_parms.outer_hdr_creation.ipv4_address;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"FAR dst Ipv4 Address :"
IPV4_ADDR"\n", LOG_VALUE,
IPV4_ADDR_HOST_FORMAT(far_t->frwdng_parms.outer_hdr_creation.ipv4_address));
} else if(far->frwdng_parms.outer_hdr_creation.ipv6_address != NULL){
far_t->frwdng_parms.outer_hdr_creation.ip_type = IPV6_TYPE;
memcpy(far_t->frwdng_parms.outer_hdr_creation.ipv6_address,
far->frwdng_parms.outer_hdr_creation.ipv6_address,
IPV6_ADDRESS_LEN);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"FAR dst Ipv6 Address :"
IPv6_FMT"\n", LOG_VALUE,
IPv6_PRINT(*(struct in6_addr *)far_t->frwdng_parms.outer_hdr_creation.ipv6_address));
}
} else {
/* Linked Outer header Creation with Session */
(*session)->hdr_crt = NOT_SET_OUT_HDR_RVL_CRT;
}
uint8_t tmp_ipv6[IPV6_ADDR_LEN] = {0};
if (far->frwdng_parms.dst_intfc.interface_value == ACCESS ) {
/* Add eNB peer node information in connection table */
if ((far->frwdng_parms.outer_hdr_creation.ipv4_address != 0) ||
(memcmp(&far->frwdng_parms.outer_hdr_creation.ipv6_address,
&tmp_ipv6, IPV6_ADDR_LEN))) {
if (far->frwdng_parms.outer_hdr_creation.ipv4_address != 0) {
#ifdef USE_REST
/* Fill the peer node entry and add the entry into connection table */
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV4_TYPE;
peer_addr.ipv4_addr = far->frwdng_parms.outer_hdr_creation.ipv4_address;
if ((add_node_conn_entry(peer_addr, up_seid, S1U_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT":Failed to add connection entry for eNB\n",
LOG_VALUE);
}
#endif /* USE_REST */
(*session)->wb_peer_ip_addr.ip_type |= PDN_TYPE_IPV4;
(*session)->wb_peer_ip_addr.ipv4_addr = far->frwdng_parms.outer_hdr_creation.ipv4_address;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"MBR: West Bound Peer IPv4 Node Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT((*session)->wb_peer_ip_addr.ipv4_addr));
} else {
(*session)->wb_peer_ip_addr.ip_type |= PDN_TYPE_IPV6;
memcpy((*session)->wb_peer_ip_addr.ipv6_addr,
far->frwdng_parms.outer_hdr_creation.ipv6_address,
IPV6_ADDRESS_LEN);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"MBR: West Bound Peer IPv6 Node Addr:"IPv6_FMT"\n",
LOG_VALUE,
IPv6_PRINT(*(struct in6_addr *)(*session)->wb_peer_ip_addr.ipv6_addr));
#ifdef USE_REST
/* Fill the peer node entry and add the entry into connection table */
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV6_TYPE;
memcpy(peer_addr.ipv6_addr,
far->frwdng_parms.outer_hdr_creation.ipv6_address, IPV6_ADDRESS_LEN);
if ((add_node_conn_entry(peer_addr, up_seid, S1U_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT":Failed to add connection entry for eNB\n",
LOG_VALUE);
}
#endif /* USE_REST */
}
/* Update the Session state */
if (far->frwdng_parms.outer_hdr_creation.teid != 0) {
(*session)->sess_state = CONNECTED;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Session State Change : "
"IN_PROGRESS --> CONNECTED\n", LOG_VALUE);
}
}
} else {
/* Add S5S8 peer node information in connection table */
if ((far->frwdng_parms.outer_hdr_creation.ipv4_address != 0) ||
(memcmp(&far->frwdng_parms.outer_hdr_creation.ipv6_address,
&tmp_ipv6, IPV6_ADDR_LEN))) {
if (far->frwdng_parms.outer_hdr_creation.ipv4_address != 0) {
#ifdef USE_REST
/* Fill the peer node entry and add the entry into connection table */
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV4_TYPE;
peer_addr.ipv4_addr = far->frwdng_parms.outer_hdr_creation.ipv4_address;
if ((add_node_conn_entry(peer_addr, up_seid, SGI_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT":Failed to add connection entry for S5S8\n",
LOG_VALUE);
}
#endif /* USE_REST */
(*session)->eb_peer_ip_addr.ip_type |= PDN_TYPE_IPV4;
(*session)->eb_peer_ip_addr.ipv4_addr = far->frwdng_parms.outer_hdr_creation.ipv4_address;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"MBR: West Bound Peer IPv4 Node Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT((*session)->eb_peer_ip_addr.ipv4_addr));
} else {
/* TODO:PATH MANG: Add the entry for IPv6 Address */
(*session)->eb_peer_ip_addr.ip_type |= PDN_TYPE_IPV6;
memcpy((*session)->eb_peer_ip_addr.ipv6_addr,
far->frwdng_parms.outer_hdr_creation.ipv6_address,
IPV6_ADDRESS_LEN);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"MBR: West Bound Peer IPv6 Node Addr:"IPv6_FMT"\n",
LOG_VALUE, IPv6_PRINT(*(struct in6_addr *)(*session)->eb_peer_ip_addr.ipv6_addr));
#ifdef USE_REST
/* Fill the peer node entry and add the entry into connection table */
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV6_TYPE;
memcpy(peer_addr.ipv6_addr,
far->frwdng_parms.outer_hdr_creation.ipv6_address, IPV6_ADDRESS_LEN);
if ((add_node_conn_entry(peer_addr, up_seid, SGI_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT":Failed to add connection entry for S5S8\n",
LOG_VALUE);
}
#endif /* USE_REST */
}
/* Update the Session state */
if (far->frwdng_parms.outer_hdr_creation.teid != 0) {
(*session)->sess_state = CONNECTED;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Session State Change : "
"IN_PROGRESS --> CONNECTED\n", LOG_VALUE);
}
}
}
} else {
/* Linked Outer header Creation with Session */
(*session)->hdr_crt = NOT_SET_OUT_HDR_RVL_CRT;
}
/* Buffering Action Rule Identifier */
if (far->bar_id.header.len) {
far_t->bar_id_value = far->bar_id.bar_id_value;
}
/* Duplicating Parameters */
if (far->dupng_parms_count) {
/* Fill Duplicating Parameters For User Level Packet Copying */
fill_li_duplicating_params(far, far_t, sess);
}
/* Pointer to Session */
far_t->session = *session;
return 0;
}
/**
* @brief : Process update pdr info
* @param : pdr, hold pdr info
* @param : sess , pfcp session info
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_update_pdr_info(pfcp_update_pdr_ie_t *pdr, pfcp_session_t *sess)
{
int ret = 0;
pfcp_session_datat_t *session = NULL;
pdr_info_t *pdr_t = NULL;
struct sdf_pkt_filter pkt_filter = {0};
if (pdr->pdi.local_fteid.teid) {
session = get_sess_by_teid_entry(pdr->pdi.local_fteid.teid,
&sess->sessions, SESS_MODIFY);
if (session == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to create "
"the session for TEID:%u", LOG_VALUE, pdr->pdi.local_fteid.teid);
return -1;
}
} else if (pdr->pdi.ue_ip_address.header.len){
ue_ip_t ue_ip = {0};
if (pdr->pdi.ue_ip_address.v4) {
ue_ip.ue_ipv4 = pdr->pdi.ue_ip_address.ipv4_address;
session = get_sess_by_ueip_entry(ue_ip, &sess->sessions, SESS_MODIFY);
if (session == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to create the session for UE_IPv4:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(pdr->pdi.ue_ip_address.ipv4_address));
return -1;
}
}
if (pdr->pdi.ue_ip_address.v6) {
memcpy(ue_ip.ue_ipv6, pdr->pdi.ue_ip_address.ipv6_address, IPV6_ADDRESS_LEN);
char ipv6[IPV6_STR_LEN];
inet_ntop(AF_INET6, ue_ip.ue_ipv6, ipv6, IPV6_STR_LEN);
if (pdr->pdi.ue_ip_address.v4) {
int ret = 0;
/* Session Entry not present. Add new session entry */
ret = rte_hash_add_key_data(sess_by_ueip_hash,
&ue_ip, session);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add entry for UE IPv4: "IPV4_ADDR" or IPv6 Addr: %s"
", Error: %s\n", LOG_VALUE, IPV4_ADDR_HOST_FORMAT(ue_ip.ue_ipv4), ue_ip.ue_ipv6,
rte_strerror(abs(ret)));
return -1;
}
} else {
session = get_sess_by_ueip_entry(ue_ip, &sess->sessions, SESS_MODIFY);
if (session == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to create the session for IPv6 Addr: %s\n",
LOG_VALUE, ipv6);
return -1;
}
}
}
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" TIED and UE_IP_addr "
"both are NULL \n", LOG_VALUE);
return -1;
}
/* M: PDR ID */
if (pdr->pdr_id.header.len) {
rule_key hash_key = {0};
hash_key.cp_ip_addr.type = sess->cp_ip.type;
if(sess->cp_ip.type == PDN_TYPE_IPV4){
hash_key.cp_ip_addr.ip.ipv4_addr = sess->cp_ip.ipv4.sin_addr.s_addr;
}else{
memcpy(hash_key.cp_ip_addr.ip.ipv6_addr, sess->cp_ip.ipv6.sin6_addr.s6_addr, IPV6_ADDRESS_LEN);
}
hash_key.id = (uint32_t)pdr->pdr_id.rule_id;
hash_key.cp_seid = sess->cp_seid;
ret = rte_hash_lookup_data(pdr_by_id_hash,
&hash_key, (void **)&pdr_t);
if ( ret < 0) {
return -1;
}
if(pdr_t->rule_id != pdr->pdr_id.rule_id)
return -1;
}
if ((pdr->pdi).sdf_filter_count) {
/*First remove older sdf context from acl rules*/
for(int itr = 0; itr < pdr_t->pdi.sdf_filter_cnt; itr++){
pkt_filter.precedence = pdr_t->prcdnc_val;
/* Reset the rule string */
memset(pkt_filter.u.rule_str, 0, MAX_LEN);
/* flow description */
if (pdr_t->pdi.sdf_filter[itr].fd) {
memcpy(&pkt_filter.u.rule_str, &pdr_t->pdi.sdf_filter[itr].flow_desc,
pdr_t->pdi.sdf_filter[itr].len_of_flow_desc);
pkt_filter.rule_ip_type = get_rule_ip_type(pkt_filter.u.rule_str);
if (!pdr_t->pdi.src_intfc.interface_value) {
/* swap the src and dst address for UL traffic.*/
swap_src_dst_ip(&pkt_filter.u.rule_str[0]);
}
int flag = 0;
int32_t indx = get_acl_table_indx(&pkt_filter, SESS_MODIFY);
if(indx > 0){
for(uint16_t itr = 0; itr < session->acl_table_count; itr++){
if(session->acl_table_indx[itr] == indx){
flag = 1;
}
if(flag && itr != session->acl_table_count - 1)
session->acl_table_indx[itr] = session->acl_table_indx[itr+1];
}
}
if(flag){
session->acl_table_indx[session->acl_table_count] = 0;
session->acl_table_count--;
}
}
}
}
if (pdr->precedence.header.len) {
pdr_t->prcdnc_val = pdr->precedence.prcdnc_val;
}
/* M: Packet Detection Information */
if (pdr->pdi.header.len) {
if (process_pdr_pdi_info(&pdr->pdi, &pdr_t->pdi, &session,
pdr_t->prcdnc_val)) {
/* TODO:Error handling */
}
}
/* C: Outer Header Removal */
if (pdr->outer_hdr_removal.header.len) {
/* Fill the outer header header description */
pdr_t->outer_hdr_removal.outer_hdr_removal_desc =
pdr->outer_hdr_removal.outer_hdr_removal_desc;
/* Linked into Session Obj */
session->hdr_rvl = pdr->outer_hdr_removal.outer_hdr_removal_desc;
}
return 0;
}
static int
fill_sdf_rule_by_rule_name(uint8_t *rule_name, pdi_t *pdi,
pfcp_session_datat_t **session)
{
int ret = 0;
pcc_rule_name rule = {0};
struct pcc_rules *pcc = NULL;
if (rule_name == NULL)
return -1;
/* Fill/Copy the Rule Name */
memcpy(&rule.rname, (void *)rule_name, strnlen(((char *)rule_name),MAX_RULE_LEN));
pcc = get_predef_pcc_rule_entry(&rule, GET_RULE);
if (pcc == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to GET PCC Rule in the pcc table"
" for Rule_Name: %s\n", LOG_VALUE, rule.rname);
return -1;
}else {
pdi->sdf_filter_cnt = pcc->sdf_idx_cnt;
for (uint8_t idx = 0; idx < pcc->sdf_idx_cnt; idx++) {
void *sdf_rule = NULL;
struct pkt_filter *sdf = NULL;
ret = get_predef_rule_entry(pcc->sdf_idx[idx],
SDF_HASH, GET_RULE, &sdf_rule);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to GET SDF Rule from the internal table"
"for SDF_Indx: %u\n", LOG_VALUE, pcc->sdf_idx[idx]);
continue;
} else {
/* Fill the QER info */
sdf = (struct pkt_filter *)sdf_rule;
if (sdf != NULL) {
/* Add SDF rule entry in the ACL TABLE */
struct sdf_pkt_filter pkt_filter = {0};
pkt_filter.precedence = pcc->precedence;
/* len of flow description */
pdi->sdf_filter[idx].len_of_flow_desc = sizeof(sdf->u.rule_str);
/* flow description */
memcpy(&pdi->sdf_filter[idx].flow_desc, &(sdf->u).rule_str,
pdi->sdf_filter[idx].len_of_flow_desc);
/* Reset the rule string */
memset(pkt_filter.u.rule_str, 0, MAX_LEN);
/* Fill the flow description*/
memcpy(&pkt_filter.u.rule_str, &pdi->sdf_filter[idx].flow_desc,
pdi->sdf_filter[idx].len_of_flow_desc);
pkt_filter.rule_ip_type = get_rule_ip_type(pkt_filter.u.rule_str);
if (!pdi->src_intfc.interface_value) {
/* swap the src and dst address for UL traffic.*/
swap_src_dst_ip(&pkt_filter.u.rule_str[0]);
}
(*session)->acl_table_indx[(*session)->acl_table_count] =
get_acl_table_indx(&pkt_filter, SESS_CREATE);
if ((*session)->acl_table_indx[(*session)->acl_table_count] <= 0) {
/* TODO: ERROR Handling */
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"ACL table creation failed\n", LOG_VALUE);
continue;
}else{
(*session)->acl_table_count++;
}
(*session)->predef_rule = TRUE;
} /* TODO: ERROR Handling */
}
}
}
return 0;
}
/**
* @brief : Process create pdr info
* @param : pdr, hold create pdr info
* @param : session, pfcp session data related info
* @param : sess, pfcp_session_t
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_create_pdr_info(pfcp_create_pdr_ie_t *pdr, pfcp_session_datat_t **session,
pfcp_session_t *sess)
{
pdr_info_t *pdr_t = NULL;
/* M: PDR ID */
if (pdr->pdr_id.header.len) {
pdr_t = get_pdr_info_entry(pdr->pdr_id.rule_id,
&(*session)->pdrs, SESS_CREATE, sess->cp_ip, sess->cp_seid);
if (pdr_t == NULL)
return -1;
pdr_t->rule_id = pdr->pdr_id.rule_id;
}
/* M: Precedance */
if (pdr->precedence.header.len) {
pdr_t->prcdnc_val = pdr->precedence.prcdnc_val;
}
/* M: Packet Detection Information */
if (pdr->pdi.header.len) {
if (process_pdr_pdi_info(&pdr->pdi, &pdr_t->pdi, session,
pdr_t->prcdnc_val)) {
return -1;
}
}
/* C: Outer Header Removal */
if (pdr->outer_hdr_removal.header.len) {
/* Fill the outer header header description */
pdr_t->outer_hdr_removal.outer_hdr_removal_desc =
pdr->outer_hdr_removal.outer_hdr_removal_desc;
/* Linked into Session Obj */
(*session)->hdr_rvl = pdr->outer_hdr_removal.outer_hdr_removal_desc;
} else {
/* Linked into Session Obj */
(*session)->hdr_rvl = NOT_SET_OUT_HDR_RVL_CRT;
}
/* Forwarding Action Rule (FAR ID) Identifer */
if (pdr->far_id.header.len) {
/* Add FAR ID entry in the hash table */
if (add_far_info_entry(pdr->far_id.far_id_value, &pdr_t->far, sess->cp_ip, sess->cp_seid)) {
return -1;
}
(pdr_t->far)->far_id_value = pdr->far_id.far_id_value;
(pdr_t->far)->pdr_count++;
}
/* QoS Enforcement Rule (QER ID) Identifiers */
if (pdr->qer_id_count > 0) {
pdr_t->qer_count = pdr->qer_id_count;
for (int itr = 0; itr < pdr_t->qer_count; itr++) {
/* Add QER ID entry in the hash table */
if (add_qer_info_entry(pdr->qer_id[itr].qer_id_value, &pdr_t->quer, sess->cp_ip, sess->cp_seid)) {
return -1;
}
(pdr_t->quer[itr]).qer_id = pdr->qer_id[itr].qer_id_value;
}
}
/* Usage Reporting Rule (URR ID) Identifiers */
if (pdr->urr_id_count > 0) {
pdr_t->urr_count = pdr->urr_id_count;
for (int itr = 0; itr < pdr_t->urr_count; itr++) {
/* Add URR ID entry in the hash table */
if (add_urr_info_entry(pdr->urr_id[itr].urr_id_value, &pdr_t->urr, sess->cp_ip, sess->cp_seid)) {
return -1;
}
(pdr_t->urr[itr]).urr_id = pdr->urr_id[itr].urr_id_value;
(pdr_t->urr[itr]).pdr_count++;
}
}
/* Predefine Rules */
if (pdr->actvt_predef_rules_count) {
pdr_t->predef_rules_count = pdr->actvt_predef_rules_count;
clLog(clSystemLog, eCLSeverityDebug, "Number of Predef Rule Rcv:%u\n",
pdr_t->predef_rules_count);
for (int itr = 0; itr < pdr_t->predef_rules_count; itr++) {
/* Add predefine rule entry in the table */
memcpy(&pdr_t->predef_rules[itr], &pdr->actvt_predef_rules[itr],
pdr->actvt_predef_rules[itr].header.len);
/* Based on the rule name fill/generate the QER */
qer_info_t *qer = NULL;
qer = add_rule_info_qer_hash(pdr->actvt_predef_rules[itr].predef_rules_nm);
if (qer != NULL) {
/* Pointer to Sessions */
qer->session = *session;
qer->next = NULL;
/* Linked the QER with PDR */
if (pdr_t->quer == NULL) {
pdr_t->quer = qer;
} else {
qer_info_t *tmp = NULL;
tmp = pdr_t->quer;
while (tmp->next != NULL) {
tmp = tmp->next;
}
tmp->next = qer;
}
pdr_t->qer_count++;
}
/* TODO: Add Error handling */
/* Based on the rule name fill the SDF Information */
fill_sdf_rule_by_rule_name(pdr->actvt_predef_rules[itr].predef_rules_nm,
&pdr_t->pdi, session);
}
}
clLog(clSystemLog, eCLSeverityInfo, LOG_FORMAT"Entry Add PDR_ID:%u, precedence:%u, ACL_TABLE_INDX:%u\n",
LOG_VALUE, pdr->pdr_id.rule_id, pdr->precedence.prcdnc_val,
(*session)->acl_table_indx[(*session)->acl_table_count - 1]);
/* pointer to the session */
pdr_t->session = sess;
return 0;
}
/**
* @brief : Decode imsi value to 64 bit uint
* @param : buf, incoming data
* @param : len, length
* @param : imsi, buffer to store decoded data
* @return : Returns 0 in case of success , -1 otherwise
*/
static void
decode_imsi_to_u64(uint8_t *buf, int len, uint64_t *imsi)
{
char hex[16] = {0};
bool flag = false;
for(uint32_t i = 0; i < len; i++) {
if (i == len -1 && (((buf[i] & 0xF0)>>4) == 0x0F)) {
sprintf(hex + i*2 , "%02x", (buf[i] & 0x0F)<<4);
flag = true;
} else
sprintf(hex + i*2 , "%02x",(((buf[i] & 0x0F)<<4) | ((buf[i] & 0xF0)>>4)));
}
sscanf(hex, "%lu", imsi);
if (flag)
*imsi /= 10;
return;
}
static pfcp_session_datat_t *
get_pfcp_session_data(pfcp_create_pdr_ie_t *create_pdr, pfcp_session_t *sess)
{
pfcp_session_datat_t *session = NULL;
if (create_pdr->pdi.local_fteid.teid) {
session = get_sess_by_teid_entry(create_pdr->pdi.local_fteid.teid,
&sess->sessions, SESS_CREATE);
if (session == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to create the session for TEID:%u", LOG_VALUE,
create_pdr->pdi.local_fteid.teid);
return NULL;
}
} else if (create_pdr->pdi.ue_ip_address.header.len) {
ue_ip_t ue_ip = {0};
if (create_pdr->pdi.ue_ip_address.v4) {
ue_ip.ue_ipv4 = create_pdr->pdi.ue_ip_address.ipv4_address;
session = get_sess_by_ueip_entry(ue_ip, &sess->sessions, SESS_CREATE);
if (session == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to create the session for UE_IPv4:"IPV4_ADDR"", LOG_VALUE,
IPV4_ADDR_HOST_FORMAT(create_pdr->pdi.ue_ip_address.ipv4_address));
return NULL;
}
}
if (create_pdr->pdi.ue_ip_address.v6) {
memset(&ue_ip, 0, sizeof(ue_ip_t));
memcpy(ue_ip.ue_ipv6, create_pdr->pdi.ue_ip_address.ipv6_address, IPV6_ADDRESS_LEN);
char ipv6[IPV6_STR_LEN];
inet_ntop(AF_INET6, ue_ip.ue_ipv6, ipv6, IPV6_STR_LEN);
if (create_pdr->pdi.ue_ip_address.v4) {
int ret = 0;
/* Session Entry not present. Add new session entry */
ret = rte_hash_add_key_data(sess_by_ueip_hash,
&ue_ip, session);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add entry for UE IPv4: "IPV4_ADDR" or IPv6 Addr: %s"
", Error: %s\n", LOG_VALUE, IPV4_ADDR_HOST_FORMAT(ue_ip.ue_ipv4), ipv6,
rte_strerror(abs(ret)));
return NULL;
}
} else {
session = get_sess_by_ueip_entry(ue_ip, &sess->sessions, SESS_CREATE);
if (session == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to create the session for IPv6 Addr: %s", LOG_VALUE,
ipv6);
return NULL;
}
}
}
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"TIED and UE_IP_addr both are NULL \n", LOG_VALUE);
return NULL;
}
return session;
}
int8_t
process_up_session_estab_req(pfcp_sess_estab_req_t *sess_req,
pfcp_sess_estab_rsp_t *sess_rsp, peer_addr_t *peer_addr)
{
node_address_t cp_node_addr = {0};
pfcp_session_t *sess = NULL;
if (sess_req == NULL)
return -1;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"PFCP Session Establishment Request :: START \n", LOG_VALUE);
/* Check Session ID is present or not in header */
if (sess_req->header.s) {
/* Check SEID is not ZERO */
if (sess_req->header.seid_seqno.has_seid.seid != 0) {
sess = get_sess_info_entry(sess_req->header.seid_seqno.has_seid.seid,
SESS_CREATE);
} else {
/* Generate the Session ID for UP */
sess = get_sess_info_entry(
gen_up_sess_id(sess_req->cp_fseid.seid),
SESS_CREATE);
}
} else {
/* Generate the Session ID for UP */
sess = get_sess_info_entry(gen_up_sess_id(0),
SESS_CREATE);
}
if (sess == NULL)
return -1;
if(sess_req->user_id.header.len){
uint64_t imsi;
decode_imsi_to_u64(sess_req->user_id.imsi, sess_req->user_id.length_of_imsi, &imsi);
sess->imsi = imsi;
}
/* Get the CP Session Id */
get_cp_node_addr(&cp_node_addr, &sess_req->cp_fseid);
memcpy(&sess->cp_node_addr,
&cp_node_addr, sizeof(node_address_t));
sess->cp_seid = sess_req->cp_fseid.seid;
if(sess_req->node_id.node_id_type == NODE_ID_TYPE_TYPE_IPV4ADDRESS) {
sess->cp_ip.ipv4.sin_family = AF_INET;
sess->cp_ip.ipv4.sin_port = peer_addr->ipv4.sin_port;
sess->cp_ip.ipv4.sin_addr.s_addr = sess_req->node_id.node_id_value_ipv4_address;
sess->cp_ip.type = PDN_TYPE_IPV4;
} else if (sess_req->node_id.node_id_type == NODE_ID_TYPE_TYPE_IPV6ADDRESS) {
sess->cp_ip.ipv6.sin6_family = AF_INET6;
sess->cp_ip.ipv6.sin6_port = peer_addr->ipv6.sin6_port;
memcpy(sess->cp_ip.ipv6.sin6_addr.s6_addr, sess_req->node_id.node_id_value_ipv6_address, IPV6_ADDRESS_LEN);
sess->cp_ip.type = PDN_TYPE_IPV6;
} else {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Invalid Node ID interface type is received\n",
LOG_VALUE);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT": CP_Sess_ID: %lu, UP_Sess_ID:%lu\n",
LOG_VALUE, sess->cp_seid, sess->up_seid);
/* TODO: Export this function to make it generic across the establishment and modify request */
/* Fill the info from PDR */
for (int itr = 0; itr < sess_req->create_pdr_count; itr++) {
pfcp_session_datat_t *session = NULL;
/* Get the Session Object per PDR */
session = get_pfcp_session_data(&sess_req->create_pdr[itr], sess);
if (session == NULL)
continue;
/* Update the Session state */
session->sess_state = IN_PROGRESS;
/* Process the Create PDR info */
if (process_create_pdr_info(&sess_req->create_pdr[itr],
&session, sess)) {
return -1;
}
for (int itr1 = 0; itr1 < sess_req->create_far_count; itr1++) {
if (sess_req->create_pdr[itr].far_id.far_id_value ==
sess_req->create_far[itr1].far_id.far_id_value) {
/* Process the Create FAR info */
if (process_create_far_info(&sess_req->create_far[itr1],
&session, sess->up_seid, sess)) {
return -1;
}
}
}
/* TODO: Remove the loops */
for (int itr2 = 0; itr2 < sess_req->create_pdr[itr].qer_id_count; itr2++) {
for (int itr3 = 0; itr3 < sess_req->create_qer_count; itr3++) {
if (sess_req->create_pdr[itr].qer_id[itr2].qer_id_value ==
sess_req->create_qer[itr3].qer_id.qer_id_value) {
if (process_create_qer_info(&sess_req->create_qer[itr3],
&(session->pdrs[itr]).quer, &session, sess->cp_ip, sess->cp_seid)) {
return -1;
}
}
}
}
/* TODO: Remove the loops */
for (int itr3 = 0; itr3 < sess_req->create_pdr[itr].urr_id_count; itr3++) {
for (int itr4 = 0; itr4 < sess_req->create_urr_count; itr4++) {
if (sess_req->create_pdr[itr].urr_id[itr3].urr_id_value ==
sess_req->create_urr[itr4].urr_id.urr_id_value) {
urr_info_t urr = {0};
/* Process the Create URR info */
if (process_create_urr_info(&sess_req->create_urr[itr4],
&urr, sess->cp_seid, sess->up_seid, sess->cp_ip)) {
return -1;
}
}
}
}
/* Maintain the teids in session level */
if (sess_req->create_pdr[itr].pdi.local_fteid.teid) {
sess->teids[sess->ber_cnt] = sess_req->create_pdr[itr].pdi.local_fteid.teid;
sess->ber_cnt++;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Checking Teid value:0x%x, counter:%u\n",
LOG_VALUE, sess->teids[(sess->ber_cnt - 1)], (sess->ber_cnt - 1));
}
}
sess->bar.bar_id = sess_req->create_bar.bar_id.bar_id_value;
sess->bar.dl_buf_suggstd_pckts_cnt.pckt_cnt_val = DL_PKTS_BUF_RING_SIZE;
#ifdef USE_CSID
/* SGWC/SAEGWC FQ-CSID */
sess->sgw_fqcsid = rte_zmalloc_socket(NULL, sizeof(fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (sess->sgw_fqcsid == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to allocate the "
"memory for SGW FQ-CSID entry\n", LOG_VALUE);
return -1;
}
/* MME FQ-CSID */
if (sess_req->mme_fqcsid.header.len) {
if (sess_req->mme_fqcsid.number_of_csids) {
sess->mme_fqcsid = rte_zmalloc_socket(NULL, sizeof(fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (sess->mme_fqcsid == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate the memory for MME FQ-CSID entry\n",
LOG_VALUE);
return -1;
}
/* Stored the MME CSID by MME Node address */
if (stored_recvd_peer_fqcsid(&sess_req->mme_fqcsid, sess->mme_fqcsid) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Store MME CSID \n", LOG_VALUE);
return -1;
}
/* Link session with Peer CSID */
link_dp_sess_with_peer_csid(sess->mme_fqcsid, sess, SX_PORT_ID);
}
}
/* SGW FQ-CSID */
if (sess_req->sgw_c_fqcsid.header.len) {
node_address_t sgw_node_addr = {0};
if (sess_req->sgw_c_fqcsid.fqcsid_node_id_type == IPV4_GLOBAL_UNICAST) {
sgw_node_addr.ip_type = IPV4_TYPE;
memcpy(&sgw_node_addr.ipv4_addr,
&sess_req->sgw_c_fqcsid.node_address, IPV4_SIZE);
} else {
sgw_node_addr.ip_type = IPV6_TYPE;
memcpy(&sgw_node_addr.ipv6_addr,
&sess_req->sgw_c_fqcsid.node_address, IPV6_SIZE);
}
if (sess_req->sgw_c_fqcsid.number_of_csids) {
if (sess_req->pgw_c_fqcsid.header.len == 0) {
if (add_peer_addr_entry_for_fqcsid_ie_node_addr(
&cp_node_addr, &(sess_req->sgw_c_fqcsid),
SX_PORT_ID) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Store SGWC Address\n", LOG_VALUE);
return -1;
}
}
/* Stored the SGW CSID by SGW Node address */
if (stored_recvd_peer_fqcsid(&sess_req->sgw_c_fqcsid, sess->sgw_fqcsid) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Store SGWC CSID \n", LOG_VALUE);
return -1;
}
/* Link session with Peer CSID */
link_dp_sess_with_peer_csid(sess->sgw_fqcsid, sess, SX_PORT_ID);
} else if (sess_req->sgw_c_fqcsid.node_address) {
fqcsid_t *tmp = NULL;
tmp = get_peer_addr_csids_entry(&sgw_node_addr, ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to get "
"CSID entry by SGW-C FQ-CSID while Processing UP Session "
"Establishment Request, Error : %s \n",
LOG_VALUE, strerror(errno));
return -1;
}
memcpy(&(tmp->node_addr),
&(sgw_node_addr), sizeof(node_address_t));
memcpy(&(sess->sgw_fqcsid)->node_addr,
&tmp->node_addr, sizeof(node_address_t));
}
}
/* PGW FQ-CSID */
if (sess_req->pgw_c_fqcsid.header.len) {
/* PGWC FQ-CSID */
sess->pgw_fqcsid = rte_zmalloc_socket(NULL, sizeof(fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (sess->pgw_fqcsid == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate the memory for fqcsids entry\n",
LOG_VALUE);
return -1;
}
node_address_t pgw_node_addr = {0};
if (sess_req->pgw_c_fqcsid.fqcsid_node_id_type == IPV4_GLOBAL_UNICAST) {
pgw_node_addr.ip_type = IPV4_TYPE;
memcpy(&pgw_node_addr.ipv4_addr,
&sess_req->pgw_c_fqcsid.node_address, IPV4_SIZE);
} else {
pgw_node_addr.ip_type = IPV6_TYPE;
memcpy(&pgw_node_addr.ipv6_addr,
&sess_req->pgw_c_fqcsid.node_address, IPV6_SIZE);
}
if (sess_req->pgw_c_fqcsid.number_of_csids) {
int ret = add_peer_addr_entry_for_fqcsid_ie_node_addr(
&cp_node_addr, &sess_req->pgw_c_fqcsid,
SX_PORT_ID);
if (ret < 0)
return ret;
/* Stored the PGWC CSID by PGW Node address */
if (stored_recvd_peer_fqcsid(&sess_req->pgw_c_fqcsid, sess->pgw_fqcsid) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Store PGWC CSID \n", LOG_VALUE);
return -1;
}
/* Link session with Peer CSID */
link_dp_sess_with_peer_csid(sess->pgw_fqcsid, sess, SX_PORT_ID);
} else if (sess_req->pgw_c_fqcsid.node_address) {
fqcsid_t *tmp = NULL;
tmp = get_peer_addr_csids_entry(&pgw_node_addr, ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to get "
"CSID entry by PGW-C FQ-CSID while Processing UP Session "
"Establishment Request, Error : %s \n",
LOG_VALUE, strerror(errno));
return -1;
}
memcpy(&tmp->node_addr, &pgw_node_addr, sizeof(node_address_t));
memcpy(&(sess->pgw_fqcsid)->node_addr,
&pgw_node_addr, sizeof(node_address_t));
}
}
/* Allocate the memory User-Plane CSID */
sess->up_fqcsid = rte_zmalloc_socket(NULL, sizeof(fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (sess->up_fqcsid == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate the memory for SGW-U FQ-CSID entry\n",
LOG_VALUE);
return -1;
}
if ((sess->cp_node_addr).ip_type == IPV4_TYPE) {
/* Add the User-plane Node Address */
(sess->up_fqcsid)->node_addr.ip_type = IPV4_TYPE;
(sess->up_fqcsid)->node_addr.ipv4_addr = dp_comm_ip.s_addr;
} else {
/* Add the User-plane Node Address */
(sess->up_fqcsid)->node_addr.ip_type = IPV6_TYPE;
memcpy(&(sess->up_fqcsid)->node_addr.ipv6_addr,
&dp_comm_ipv6.s6_addr, IPV6_ADDRESS_LEN);
}
int indx = 0;
/* Add the entry for peer nodes */
indx = fill_peer_node_info_t(sess, &cp_node_addr);
if (indx < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to fill peer node info and assignment of the CSID Error: %s\n",
LOG_VALUE, strerror(errno));
return -1;
}
/* Add entry for cp session id with link local csid */
sess_csid *tmp1 = NULL;
tmp1 = get_sess_csid_entry(
(sess->up_fqcsid)->local_csid[(sess->up_fqcsid)->num_csid - 1], ADD_NODE);
if (tmp1 == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to get "
"PGW-U CSID entry while Processing UP Session "
"Establishment Request, CSID:%u, Error : %s \n",
LOG_VALUE, (sess->up_fqcsid)->local_csid[(sess->up_fqcsid)->num_csid - 1],
strerror(errno));
return -1;
}
/* Link local csid with session id */
/* Check head node created ot not */
if(tmp1->up_seid != sess->up_seid && tmp1->up_seid != 0) {
sess_csid *new_node = NULL;
/* Add new node into csid linked list */
new_node = add_sess_csid_data_node(tmp1,
(sess->up_fqcsid)->local_csid[(sess->up_fqcsid)->num_csid - 1]);
if(new_node == NULL ) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to ADD new node into CSID"
"linked list : %s\n", LOG_VALUE);
return -1;
} else {
new_node->cp_seid = sess->cp_seid;
new_node->up_seid = sess->up_seid;
}
} else {
tmp1->cp_seid = sess->cp_seid;
tmp1->up_seid = sess->up_seid;
tmp1->next = NULL;
}
/* Fill the fqcsid into the session est response */
if (fill_fqcsid_sess_est_rsp(sess_rsp, sess)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to fill FQ-CSID in Sess EST Resp ERROR: %s\n",
LOG_VALUE,
strerror(errno));
return -1;
}
#endif /* USE_CSID */
/* Update the UP session id in the response */
sess_rsp->up_fseid.seid = sess->up_seid;
sess_req->header.seid_seqno.has_seid.seid = sess->up_seid;
/* Update the CP seid in the response packet */
sess_rsp->header.seid_seqno.has_seid.seid = sess->cp_seid;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"PFCP Session Establishment Request :: END \n", LOG_VALUE);
return 0;
}
/**
* @brief : Process update far info
* @param : far, hold create far info
* @param : up_seid, session id
* @param : sess, pfcp_session
* @return : Returns 0 in case of success , -1 otherwise
*/
static int8_t
process_update_far_info(pfcp_update_far_ie_t *far, uint64_t up_seid,
pfcp_session_t *sess)
{
node_address_t peer_addr = {0};
far_info_t *far_t = NULL;
/* M: FAR ID */
if (far->far_id.header.len) {
/* Get allocated memory location */
far_t = get_far_info_entry(far->far_id.far_id_value, sess->cp_ip, sess->cp_seid);
}
/* Check far entry found or not */
if (far_t == NULL)
return -1;
/* M: Apply Action */
if (far->apply_action.header.len) {
/* Stop lawful interception request */
if ((NOT_PRESENT == far->apply_action.dupl) &&
(PRESENT == far_t->actions.dupl)) {
far_t->dup_parms_cnt = 0;
far_t->li_config_cnt = 0;
sess->li_sx_config_cnt = 0;
memset(far_t->li_config, 0, MAX_LI_ENTRIES_PER_UE * sizeof(li_config_t));
memset(sess->li_sx_config, 0, MAX_LI_ENTRIES_PER_UE * sizeof(li_sx_config_t));
}
if (far_apply_action(&far->apply_action, &far_t->actions)) {
/* TODO: Error Handling */
}
}
/* Update Forwarding Parameters */
if (far->upd_frwdng_parms.header.len) {
/* pfcpsmreq_flags: */
if (far->upd_frwdng_parms.pfcpsmreq_flags.header.len) {
/* TODO: Add support for IPv6 */
/* X2 Handover: Send the endmarker packet to old eNB*/
if (far->upd_frwdng_parms.pfcpsmreq_flags.sndem) {
if (sess_modify_with_endmarker(far_t)) {
/* TODO: ERROR Handling */
}
}
}
/* M: Destination Interface */
if (far->upd_frwdng_parms.dst_intfc.header.len) {
/* Destination Interface */
far_t->frwdng_parms.dst_intfc.interface_value =
far->upd_frwdng_parms.dst_intfc.interface_value;
}
/* Outer Header Creation */
if (far->upd_frwdng_parms.outer_hdr_creation.header.len) {
if (far->upd_frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4) {
far_t->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc = GTPU_UDP_IPv4;
/* TODO: Need to validate this logic*/
/* Linked Outer header Creation with Session */
if (far->upd_frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv4 ==
OUT_HDR_DESC_VAL) {
(far_t->session)->hdr_crt = GTPU_UDP_IPv4;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Outer Header Desciprition(GTPU_UDP_IPv4) : %u\n",
LOG_VALUE,
far->upd_frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc);
}
} else if (far->upd_frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6) {
far_t->frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc = GTPU_UDP_IPv6;
/* Linked Outer header Creation with Session */
if (far->upd_frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.gtpu_udp_ipv6 ==
OUT_HDR_DESC_VAL) {
(far_t->session)->hdr_crt = GTPU_UDP_IPv6;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Outer Header Desciprition(GTPU_UDP_IPv6) : %u\n",
LOG_VALUE,
far->upd_frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc);
}
}
/* TEID */
far_t->frwdng_parms.outer_hdr_creation.teid =
far->upd_frwdng_parms.outer_hdr_creation.teid;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"FAR Teid : %u\n",
LOG_VALUE, far->upd_frwdng_parms.outer_hdr_creation.teid);
/* Customer-VLAN Tag */
far_t->frwdng_parms.outer_hdr_creation.ctag =
far->upd_frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.ctag;
/* Service-VLAN Tag */
far_t->frwdng_parms.outer_hdr_creation.stag =
far->upd_frwdng_parms.outer_hdr_creation.outer_hdr_creation_desc.stag;
/* Port Number */
far_t->frwdng_parms.outer_hdr_creation.port_number =
far->upd_frwdng_parms.outer_hdr_creation.port_number;
if(far->upd_frwdng_parms.outer_hdr_creation.ipv4_address != 0) {
/* IPv4 Address */
far_t->frwdng_parms.outer_hdr_creation.ip_type = IPV4_TYPE;
far_t->frwdng_parms.outer_hdr_creation.ipv4_address =
far->upd_frwdng_parms.outer_hdr_creation.ipv4_address;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"FAR Dst Ipv4 Address :"
IPV4_ADDR"\n", LOG_VALUE,
IPV4_ADDR_HOST_FORMAT(far_t->frwdng_parms.outer_hdr_creation.ipv4_address));
} else if(far->upd_frwdng_parms.outer_hdr_creation.ipv6_address != NULL) {
far_t->frwdng_parms.outer_hdr_creation.ip_type = IPV6_TYPE;
memcpy(far_t->frwdng_parms.outer_hdr_creation.ipv6_address,
far->upd_frwdng_parms.outer_hdr_creation.ipv6_address,
IPV6_ADDRESS_LEN);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"FAR Dst Ipv6 Address :"
IPv6_FMT"\n", LOG_VALUE,
IPv6_PRINT(IPv6_CAST(far_t->frwdng_parms.outer_hdr_creation.ipv6_address)));
}
}
uint8_t tmp_ipv6[IPV6_ADDR_LEN] = {0};
if (far->upd_frwdng_parms.dst_intfc.interface_value == ACCESS ) {
/* Add eNB peer node information in connection table */
if ((far->upd_frwdng_parms.outer_hdr_creation.ipv4_address != 0) ||
((memcmp(&far->upd_frwdng_parms.outer_hdr_creation.ipv6_address,
&tmp_ipv6, IPV6_ADDR_LEN)))) {
if (far->upd_frwdng_parms.outer_hdr_creation.ipv4_address) {
#ifdef USE_REST
/* Add the peer node connection entry */
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV4_TYPE;
peer_addr.ipv4_addr = far->upd_frwdng_parms.outer_hdr_creation.ipv4_address;
if ((add_node_conn_entry(peer_addr, up_seid, S1U_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT":Failed to add connection entry for eNB\n",
LOG_VALUE);
}
#endif /* USE_REST */
far_t->session->wb_peer_ip_addr.ip_type |= PDN_TYPE_IPV4;
far_t->session->wb_peer_ip_addr.ipv4_addr = far->upd_frwdng_parms.outer_hdr_creation.ipv4_address;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"MBR: West Bound Peer IPv4 Node Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT((far_t->session)->wb_peer_ip_addr.ipv4_addr));
} else {
/* TODO:PATH: Add the connection entry */
far_t->session->wb_peer_ip_addr.ip_type |= PDN_TYPE_IPV6;
memcpy(far_t->session->wb_peer_ip_addr.ipv6_addr,
far->upd_frwdng_parms.outer_hdr_creation.ipv6_address,
IPV6_ADDRESS_LEN);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"MBR: West Bound Peer IPv6 Node Addr:"IPv6_FMT"\n",
LOG_VALUE,
IPv6_PRINT(IPv6_CAST(far_t->session)->wb_peer_ip_addr.ipv6_addr));
#ifdef USE_REST
/* Add the peer node connection entry */
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV6_TYPE;
memcpy(peer_addr.ipv6_addr,
far->upd_frwdng_parms.outer_hdr_creation.ipv6_address, IPV6_ADDR_LEN);
if ((add_node_conn_entry(peer_addr, up_seid, S1U_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT":Failed to add connection entry for eNB\n",
LOG_VALUE);
}
#endif /* USE_REST */
}
/* Update the Session state */
if (!far->upd_frwdng_parms.outer_hdr_creation.teid) {
if ((far_t->session)->sess_state == CONNECTED) {
(far_t->session)->sess_state = IDLE;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Session State Change : "
"CONNECTED --> IDLE\n", LOG_VALUE);
}
} else {
switch((far_t->session)->sess_state) {
case IDLE:
{
(far_t->session)->sess_state = CONNECTED;
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT "Session State Change : "
"IDLE --> CONNECTED\n", LOG_VALUE);
}
break;
case IN_PROGRESS:
{
/* TODO: DDN Support for IPv6 */
/** Resolved queued pkts by dl core and enqueue pkts into notification ring */
struct rte_mbuf *buf_pkt =
rte_ctrlmbuf_alloc(notify_msg_pool);
if (buf_pkt == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT
"Failed to Allocate a new mbuf from mempool \n", LOG_VALUE);
}
if (buf_pkt != NULL) {
uint32_t *key =
rte_pktmbuf_mtod(buf_pkt, uint32_t *);
if ((far_t->session)->pdrs) {
if ((far_t->session)->pdrs->pdi.local_fteid.teid) {
*key = (far_t->session)->pdrs->pdi.local_fteid.teid;
} else if ((far_t->session)->pdrs->pdi.ue_addr.ipv4_address) {
*key = (far_t->session)->pdrs->pdi.ue_addr.ipv4_address;
}
} else {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"ERROR: PDRs value is NULL\n", LOG_VALUE);
break;
}
rte_ring_enqueue(notify_ring,
buf_pkt);
(far_t->session)->sess_state = CONNECTED;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Session State Change : "
"IN_PROGRESS --> CONNECTED\n", LOG_VALUE);
}
}
break;
default:
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT "No state change\n", LOG_VALUE);
}
}
}
} else {
/* Add S5S8 peer node information in connection table */
if ((far->upd_frwdng_parms.outer_hdr_creation.ipv4_address != 0) ||
((memcmp(&far->upd_frwdng_parms.outer_hdr_creation.ipv6_address,
&tmp_ipv6, IPV6_ADDR_LEN)))) {
if (far->upd_frwdng_parms.outer_hdr_creation.ipv4_address) {
#ifdef USE_REST
/* Add the peer node connection entry */
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV4_TYPE;
peer_addr.ipv4_addr = far->upd_frwdng_parms.outer_hdr_creation.ipv4_address;
if ((add_node_conn_entry(peer_addr, up_seid, SGI_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT":Failed to add connection entry for S5S8\n",
LOG_VALUE);
}
#endif /* USE_REST */
far_t->session->eb_peer_ip_addr.ip_type |= PDN_TYPE_IPV4;
far_t->session->eb_peer_ip_addr.ipv4_addr =
far->upd_frwdng_parms.outer_hdr_creation.ipv4_address;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"MBR: West Bound Peer IPv4 Node Addr:"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(far_t->session->eb_peer_ip_addr.ipv4_addr));
} else {
/* TODO:PATH MANG: Add the entry for IPv6 Address */
far_t->session->eb_peer_ip_addr.ip_type |= PDN_TYPE_IPV6;
memcpy(far_t->session->eb_peer_ip_addr.ipv6_addr,
far->upd_frwdng_parms.outer_hdr_creation.ipv6_address,
IPV6_ADDRESS_LEN);
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"MBR: West Bound Peer IPv6 Node Addr:"IPv6_FMT"\n",
LOG_VALUE,
IPv6_PRINT(IPv6_CAST(far_t->session->eb_peer_ip_addr.ipv6_addr)));
#ifdef USE_REST
/* Add the peer node connection entry */
memset(&peer_addr, 0, sizeof(node_address_t));
peer_addr.ip_type = IPV6_TYPE;
memcpy(peer_addr.ipv6_addr,
far->upd_frwdng_parms.outer_hdr_creation.ipv6_address, IPV6_ADDR_LEN);
if ((add_node_conn_entry(peer_addr, up_seid, SGI_PORT_ID)) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT":Failed to add connection entry for S5S8\n",
LOG_VALUE);
}
#endif /* USE_REST */
}
/* Update the Session state */
if (far->upd_frwdng_parms.outer_hdr_creation.teid != 0) {
(far_t->session)->sess_state = CONNECTED;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Session State Change to : "
"CONNECTED\n", LOG_VALUE);
}
}
}
}
/* Buffering Action Rule Identifier */
if (far->bar_id.header.len) {
/* TODO: Implement Handling */
}
/* Duplicating Parameters */
if (far->upd_dupng_parms_count > 0) {
fill_li_update_duplicating_param(far, far_t, sess);
}
return 0;
}
int8_t
fill_sess_mod_usage_report(pfcp_usage_rpt_sess_mod_rsp_ie_t *usage_report,
urr_info_t *urr, uint64_t cp_seid)
{
int8_t size = 0;
int ret = 0;
struct timeval epoc_start_time;
struct timeval epoc_end_time;
peerEntry *data = NULL;
uint32_t end_time = 0;
size += set_urr_id(&usage_report->urr_id, urr->urr_id);
pfcp_set_ie_header(&(usage_report->urseqn.header), PFCP_IE_URSEQN,
(sizeof(pfcp_urseqn_ie_t) - sizeof(pfcp_ie_header_t)));
size += sizeof(pfcp_urseqn_ie_t);
usage_report->urseqn.urseqn = urr->urr_seq_num++;
pfcp_set_ie_header(&(usage_report->usage_rpt_trig.header), PFCP_IE_USAGE_RPT_TRIG,
(sizeof(pfcp_usage_rpt_trig_ie_t) - sizeof(pfcp_ie_header_t)));
size += sizeof(pfcp_usage_rpt_trig_ie_t);
usage_report->usage_rpt_trig.termr = 1;
if(urr->meas_method == VOL_TIME_BASED ||
urr->meas_method == VOL_BASED){
size += set_volume_measurment(&usage_report->vol_meas);
usage_report->vol_meas.uplink_volume = urr->uplnk_data;
usage_report->vol_meas.downlink_volume = urr->dwnlnk_data;
usage_report->vol_meas.total_volume = urr->dwnlnk_data + urr->uplnk_data;
}
if(urr->meas_method == TIME_BASED || urr->meas_method == VOL_TIME_BASED) {
end_time = current_ntp_timestamp();
size += set_duration_measurment(&usage_report->dur_meas);
ntp_to_unix_time(&urr->start_time, &epoc_start_time);
ntp_to_unix_time(&end_time, &epoc_end_time);
usage_report->dur_meas.duration_value = epoc_end_time.tv_sec - epoc_start_time.tv_sec;
}
size += set_start_time(&usage_report->start_time);
size += set_end_time(&usage_report->end_time);
size += set_first_pkt_time(&usage_report->time_of_frst_pckt);
size += set_last_pkt_time(&usage_report->time_of_lst_pckt);
usage_report->start_time.start_time = urr->start_time;
usage_report->end_time.end_time = current_ntp_timestamp();
usage_report->time_of_frst_pckt.time_of_frst_pckt = urr->first_pkt_time;
usage_report->time_of_lst_pckt.time_of_lst_pckt = urr->last_pkt_time;
urr->start_time = current_ntp_timestamp();
urr->first_pkt_time = 0;
urr->last_pkt_time = 0;
rule_key hash_key = {0};
hash_key.id = urr->urr_id;
hash_key.cp_seid = cp_seid;
pfcp_set_ie_header(&usage_report->header, IE_USAGE_RPT_SESS_MOD_RSP, size);
/*remove from hash*/
if(urr->meas_method == TIME_BASED || urr->meas_method == VOL_TIME_BASED) {
ret = rte_hash_lookup_data(timer_by_id_hash,
&hash_key, (void **)&data);
if (ret >= 0) {
if(data->pt.ti_id != 0) {
stoptimer(&data->pt.ti_id);
deinittimer(&data->pt.ti_id);
/* URR Entry is present. Delete Session Entry */
ret = rte_hash_del_key(timer_by_id_hash, &hash_key);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Timer Entry "
"not found for URR_ID:%u\n", LOG_VALUE, urr->urr_id);
return -1;
}
if (data != NULL) {
rte_free(data);
data = NULL;
}
}
}
}
return size;
}
int8_t
process_remove_pdr_sess(pfcp_remove_pdr_ie_t *remove_pdr, uint64_t up_seid,
pfcp_sess_mod_rsp_t *sess_mod_rsp, peer_addr_t cp_ip)
{
int ret = 0;
uint8_t uiFlag = 0;
pfcp_session_t *sess = NULL;
struct sdf_pkt_filter pkt_filter = {0};
/* Get the session information from session table based on UP_SESSION_ID*/
sess = get_sess_info_entry(up_seid, SESS_MODIFY);
if (sess == NULL)
return -1;
/* Flush the Session data info from the hash tables based on teid*/
pfcp_session_datat_t *session = sess->sessions;
/* Cleanup the session data form hash table and delete the node from linked list */
while (NULL != session) {
/* Cleanup PDRs info from the linked list */
pdr_info_t *pdr = session->pdrs;
while (NULL != pdr) {
if (remove_pdr->pdr_id.rule_id == pdr->rule_id) {
pdr = get_pdr_info_entry(
remove_pdr->pdr_id.rule_id, NULL,SESS_MODIFY, cp_ip, sess->cp_seid);
if (pdr == NULL)
return -1;
for(int itr = 0; itr < pdr->urr_count; itr++){
fill_sess_mod_usage_report(&sess_mod_rsp->usage_report[sess_mod_rsp->usage_report_count++],
&pdr->urr[itr], sess->cp_seid);
}
//Remove Entry from ACL Table
for(int itr = 0; itr < pdr->pdi.sdf_filter_cnt; itr++){
pkt_filter.precedence = pdr->prcdnc_val;
/* Reset the rule string */
memset(pkt_filter.u.rule_str, 0, MAX_LEN);
/* flow description */
if (pdr->pdi.sdf_filter[itr].fd) {
memcpy(&pkt_filter.u.rule_str, &pdr->pdi.sdf_filter[itr].flow_desc,
pdr->pdi.sdf_filter[itr].len_of_flow_desc);
pkt_filter.rule_ip_type = get_rule_ip_type(pkt_filter.u.rule_str);
if (!pdr->pdi.src_intfc.interface_value) {
/* swap the src and dst address for UL traffic.*/
swap_src_dst_ip(&pkt_filter.u.rule_str[0]);
}
int flag = 0;
int32_t indx = get_acl_table_indx(&pkt_filter, SESS_DEL);
for(uint16_t itr = 0; itr < session->acl_table_count; itr++){
if(session->acl_table_indx[itr] == indx){
flag = 1;
}
if(flag && itr != session->acl_table_count - 1)
session->acl_table_indx[itr] = session->acl_table_indx[itr+1];
}
if(flag == 1 && indx > 0){
if (remove_rule_entry_acl(indx, &pkt_filter)) {
/* TODO: ERROR handling */
}else{
session->acl_table_indx[session->acl_table_count] = 0;
session->acl_table_count--;
}
}
}
}
far_info_t *far = pdr->far;
/* Cleanup the FAR information */
if (far != NULL) {
if(far->pdr_count > 1){
far->pdr_count--;
}else{
/* Flush the far info from the hash table */
ret = del_far_info_entry(far->far_id_value, cp_ip, sess->cp_seid);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug,
"DP:"LOG_FORMAT"Entry not found for FAR_ID:%u...\n",
LOG_VALUE, far->far_id_value);
return -1;
}
}
}
/* Cleanup QERs info from the linked list */
qer_info_t *qer = pdr->quer;
while (qer != NULL) {
/* Get QER ID */
uint32_t qer_id = qer->qer_id;
/* Delete the QER info node from the linked list */
pdr->quer = remove_qer_node(pdr->quer, qer);
qer = pdr->quer;
/* Flush the QER info from the hash table */
ret = del_qer_info_entry(qer_id, cp_ip, sess->cp_seid);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found for QER_ID:%u...\n",
LOG_VALUE, qer_id);
return -1;
}
}
/* Cleanup URRs info from the linked list */
urr_info_t *urr = pdr->urr;
while (urr != NULL) {
if(urr->pdr_count > 1){
urr->pdr_count--;
urr = urr->next;
}else{
/* Get URR ID */
uint32_t urr_id = urr->urr_id;
/* Delete the URR info node from the linked list */
pdr->urr = remove_urr_node(pdr->urr, urr);
urr = pdr->urr;
/* Flush the URR info from the hash table */
if (del_urr_info_entry(urr_id, cp_ip, sess->cp_seid)) {
/* TODO : ERROR Handling */
}
}
}
if (pdr->pdi.local_fteid.teid && pdr->next == NULL) {
if (del_sess_by_teid_entry(pdr->pdi.local_fteid.teid)) {
/* TODO : ERROR Handling */
}else{
for (int itr1 = 0; itr1 < sess->ber_cnt; itr1++) {
if (pdr->pdi.local_fteid.teid == sess->teids[itr1]) {
sess->teids[itr1] = 0;
}
}
}
}
/* Cleanup PDRs info from the linked list */
/* Get PDR ID */
uint32_t pdr_id = pdr->rule_id;
/* Delete the PDR info node from the linked list */
session->pdrs = remove_pdr_node(session->pdrs, pdr);
/* Flush the PDR info from the hash table */
ret = del_pdr_info_entry(pdr_id, cp_ip, sess->cp_seid);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Entry not found for PDR_ID:%u...\n",
LOG_VALUE, pdr_id);
return -1;
}
uiFlag = 1;
pdr = session->pdrs;
continue;
}
pdr = pdr->next;
}
if ((1 == uiFlag) && (NULL == session->pdrs)) {
/* Delete the Session data info node from the linked list */
sess->sessions = remove_sess_data_node(sess->sessions, session);
if (sess->sessions != NULL) {
session = sess->sessions;
}
uiFlag = 0;
} else {
session = session->next;
}
}
return 0;
}
int8_t
process_up_session_modification_req(pfcp_sess_mod_req_t *sess_mod_req,
pfcp_sess_mod_rsp_t *sess_mod_rsp)
{
node_address_t cp_node_addr = {0};
pfcp_session_t *sess = NULL;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"PFCP Session Modification Request :: START \n", LOG_VALUE);
/* Get the session information from session table based on UP_SESSION_ID*/
if (sess_mod_req->header.s) {
/* Check SEID is not ZERO */
sess = get_sess_info_entry(sess_mod_req->header.seid_seqno.has_seid.seid,
SESS_MODIFY);
}
if (sess == NULL)
return -1;
/* pfcpsmreq_flags: Dropped the bufferd packets */
if (sess_mod_req->pfcpsmreq_flags.drobu) {
/* Free the downlink data rings */
//rte_free();
struct rte_ring *ring = NULL;
struct pfcp_session_datat_t *si = NULL;
si = sess->sessions;
while (si != NULL) {
/* Delete dl ring which created by default if present */
ring = si->dl_ring;
if (ring) {
if (rte_ring_dequeue(dl_ring_container, (void **)&ring) ==
ENOENT) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Can't put ring back, so free it\n", LOG_VALUE);
rte_ring_free(ring);
}
rte_ring_free(si->dl_ring);
si->dl_ring = NULL;
}
si = si->next;
}
}
/* Scenario CP Changes it's SEID */
get_cp_node_addr(&cp_node_addr, &sess_mod_req->cp_fseid);
if (sess->cp_seid != sess_mod_req->cp_fseid.seid) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"CP Session IP Changed CP_Old_Seid: %lu, CP_New_Seid:%lu\n",
LOG_VALUE, sess->cp_seid, sess_mod_req->cp_fseid.seid);
sess->cp_seid = sess_mod_req->cp_fseid.seid;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT": CP_Sess_ID: %lu, UP_Sess_ID:%lu\n",
LOG_VALUE, sess->cp_seid, sess->up_seid);
/* TODO: Export this function to make it generic across the establishment and modify request */
/* Fill the info from PDR */
for (int itr = 0; itr < sess_mod_req->create_pdr_count; itr++) {
pfcp_session_datat_t *session = NULL;
/* Get the Session Object per PDR */
session = get_pfcp_session_data(&sess_mod_req->create_pdr[itr], sess);
if (session == NULL)
continue;
/* Update the Session state */
session->sess_state = IN_PROGRESS;
/* Process the Create PDR info */
if (process_create_pdr_info(&sess_mod_req->create_pdr[itr],
&session, sess)) {
return -1;
}
/* TODO: Remove the loops */
for (int itr1 = 0; itr1 < sess_mod_req->create_far_count; itr1++) {
if (sess_mod_req->create_pdr[itr].far_id.far_id_value ==
sess_mod_req->create_far[itr1].far_id.far_id_value) {
/* Process the Create FAR info */
if (process_create_far_info(&sess_mod_req->create_far[itr1],
&session, sess->up_seid, sess)) {
return -1;
}
}
}
/* TODO: Remove the loops */
for (int itr2 = 0; itr2 < sess_mod_req->create_pdr[itr].qer_id_count; itr2++) {
for (int itr3 = 0; itr3 < sess_mod_req->create_qer_count; itr3++) {
if (sess_mod_req->create_pdr[itr].qer_id[itr2].qer_id_value ==
sess_mod_req->create_qer[itr3].qer_id.qer_id_value) {
if (process_create_qer_info(&sess_mod_req->create_qer[itr3],
&(session->pdrs[itr]).quer, &session, sess->cp_ip, sess->cp_seid)) {
return -1;
}
}
}
}
/* TODO: Remove the loops */
for (int itr3 = 0; itr3 < sess_mod_req->create_pdr[itr].urr_id_count; itr3++) {
for (int itr4 = 0; itr4 < sess_mod_req->create_urr_count; itr4++) {
if (sess_mod_req->create_pdr[itr].urr_id[itr3].urr_id_value ==
sess_mod_req->create_urr[itr4].urr_id.urr_id_value) {
urr_info_t urr = {0};
/* Process the Create URR info */
if (process_create_urr_info(&sess_mod_req->create_urr[itr4],
&urr, sess->cp_seid, sess->up_seid, sess->cp_ip)) {
return -1;
}
}
}
}
/* Maintain the teids in session level */
if (sess_mod_req->create_pdr[itr].pdi.local_fteid.teid) {
sess->teids[sess->ber_cnt] = sess_mod_req->create_pdr[itr].pdi.local_fteid.teid;
sess->ber_cnt++;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Checking Teid value:%u, counter:%u\n",
LOG_VALUE, sess->teids[sess->ber_cnt - 1], sess->ber_cnt - 1);
}
/* Update the Session state */
session->sess_state = CONNECTED;
}
/* Process the Update FAR information */
for (int itr = 0; itr < sess_mod_req->update_far_count; itr++) {
if (process_update_far_info(&sess_mod_req->update_far[itr],
sess->up_seid, sess)) {
/* TODO: Error Handling */
return -1;
}
}
for(int itr = 0; itr < sess_mod_req->update_pdr_count; itr++ ){
/* Process the Update PDR info */
if(process_update_pdr_info(&sess_mod_req->update_pdr[itr], sess)){
/* TODO: Error Handling */
}
}
/* Process the Remove PDR information */
for (int itr = 0; itr < sess_mod_req->remove_pdr_count; itr++) {
if (process_remove_pdr_sess(&sess_mod_req->remove_pdr[itr], sess->up_seid,
sess_mod_rsp, sess->cp_ip)) {
/* TODO: Error Handling */
return -1;
}
}
#ifdef USE_CSID
fqcsid_t *tmp = NULL;
uint16_t tmp_csid = 0;
uint16_t old_csid = 0;
node_address_t old_node_addr = {0};
node_address_t node_addr = {0};
/* SGW FQ-CSID */
if (sess_mod_req->sgw_c_fqcsid.header.len) {
if (sess_mod_req->sgw_c_fqcsid.number_of_csids) {
/* Get the List of the Old CSID */
old_csid = (sess->sgw_fqcsid)->local_csid[(sess->sgw_fqcsid)->num_csid - 1];
memcpy(&old_node_addr,
&(sess->sgw_fqcsid)->node_addr, sizeof(node_address_t));
if (sess_mod_req->sgw_c_fqcsid.fqcsid_node_id_type == IPV4_GLOBAL_UNICAST) {
node_addr.ip_type = IPV4_TYPE;
memcpy(&node_addr.ipv4_addr,
sess_mod_req->sgw_c_fqcsid.node_address, IPV4_SIZE);
} else {
node_addr.ip_type = IPV6_TYPE;
memcpy(&node_addr.ipv6_addr,
&sess_mod_req->sgw_c_fqcsid.node_address, IPV6_SIZE);
}
/* Stored the SGW CSID by SGW Node address */
tmp = get_peer_addr_csids_entry(&node_addr, ADD_NODE);
if (tmp == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to get "
"CSID entry by SGW-C while Processing UP Session "
"Modification Request, Error : %s \n",
LOG_VALUE, strerror(errno));
return -1;
}
if (!(is_present(&tmp->node_addr))) {
memcpy(&tmp->node_addr, &node_addr, sizeof(node_address_t));
}
for(uint8_t itr = 0; itr < sess_mod_req->sgw_c_fqcsid.number_of_csids; itr++) {
uint8_t match = 0;
for (uint8_t itr1 = 0; itr1 < tmp->num_csid; itr1++) {
if (tmp->local_csid[itr1] == sess_mod_req->sgw_c_fqcsid.pdn_conn_set_ident[itr]){
match = 1;
break;
}
}
if (!match) {
tmp->local_csid[tmp->num_csid++] =
sess_mod_req->sgw_c_fqcsid.pdn_conn_set_ident[itr];
}
}
/* Remove old CSID */
for (uint8_t itr = 0; itr < tmp->num_csid; itr++) {
if (tmp->local_csid[itr] == old_csid) {
for (uint8_t pos = itr; pos < tmp->num_csid; pos++) {
tmp->local_csid[pos] = tmp->local_csid[pos + 1];
}
tmp->num_csid--;
}
}
/* Remove the temp associated CSID Link with local CSID*/
csid_t *sgw_csid = NULL;
csid_key_t key_t = {0};
key_t.local_csid = old_csid;
memcpy(&key_t.node_addr,
&old_node_addr, sizeof(node_address_t));
sgw_csid = get_peer_csid_entry(&key_t, SX_PORT_ID, REMOVE_NODE);
if (sgw_csid == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to get "
"CSID entry by SGW-C while Processing UP Session "
"Modification Request, Error : %s \n",
LOG_VALUE, strerror(errno));
} else {
for (uint8_t itr = 0; itr < (sess->sgw_fqcsid)->num_csid; itr++) {
if ((sess->sgw_fqcsid)->local_csid[itr] == old_csid) {
for (uint8_t pos = itr; pos < ((sess->sgw_fqcsid)->num_csid - 1); pos++) {
(sess->sgw_fqcsid)->local_csid[pos] = (sess->sgw_fqcsid)->local_csid[pos + 1];
}
(sess->sgw_fqcsid)->num_csid--;
}
}
}
for(uint8_t itr1 = 0; itr1 < sess_mod_req->sgw_c_fqcsid.number_of_csids; itr1++) {
(sess->sgw_fqcsid)->local_csid[(sess->sgw_fqcsid)->num_csid++] =
sess_mod_req->sgw_c_fqcsid.pdn_conn_set_ident[itr1];
}
memcpy(&((sess->sgw_fqcsid)->node_addr),
&(node_addr), sizeof(node_address_t));
/* TODO: Need to think about this, this portion only has to hit in PGWU */
/* LINK SGW CSID with local CSID */
if (link_peer_csid_with_local_csid(sess->sgw_fqcsid,
sess->up_fqcsid, SX_PORT_ID) < 0) {
return -1;
}
link_dp_sess_with_peer_csid(sess->sgw_fqcsid, sess, SX_PORT_ID);
/* Remove the session link from old CSID */
sess_csid *tmp1 = NULL;
peer_csid_key_t key = {0};
key.iface = SX_PORT_ID;
key.peer_local_csid = old_csid;
memcpy(&key.peer_node_addr,
&old_node_addr, sizeof(node_address_t));
tmp1 = get_sess_peer_csid_entry(&key, REMOVE_NODE);
if (tmp1 != NULL) {
/* Remove node from csid linked list */
tmp1 = remove_sess_csid_data_node(tmp1, sess->up_seid);
int8_t ret = 0;
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seid_by_peer_csid_hash, &key, tmp1);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry"
" for CSID = %u \n", LOG_VALUE, old_csid);
return -1;
}
if (tmp1 == NULL) {
/* Delete Local CSID entry */
del_sess_peer_csid_entry(&key);
}
}
}
}
/* PGW FQ-CSID */
if (sess_mod_req->pgw_c_fqcsid.header.len) {
if (sess_mod_req->pgw_c_fqcsid.number_of_csids) {
if (sess->pgw_fqcsid == NULL) {
/* PGWC FQ-CSID */
sess->pgw_fqcsid = rte_zmalloc_socket(NULL, sizeof(fqcsid_t),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if (sess->pgw_fqcsid == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate the memory for fqcsids entry\n",
LOG_VALUE);
return -1;
}
} else {
old_csid = (sess->pgw_fqcsid)->local_csid[(sess->pgw_fqcsid)->num_csid - 1];
memcpy(&old_node_addr,
&(sess->pgw_fqcsid)->node_addr, sizeof(node_address_t));
}
/* Stored the PGWC CSID by PGW Node address */
if (stored_recvd_peer_fqcsid(&sess_mod_req->pgw_c_fqcsid, sess->pgw_fqcsid) < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to Store PGWC CSID \n", LOG_VALUE);
return -1;
}
/* LINK SGW CSID with local CSID */
if (link_peer_csid_with_local_csid(sess->pgw_fqcsid,
sess->up_fqcsid, SX_PORT_ID) < 0) {
return -1;
}
link_dp_sess_with_peer_csid(sess->pgw_fqcsid, sess, SX_PORT_ID);
if (old_csid != (sess->pgw_fqcsid)->local_csid[(sess->pgw_fqcsid)->num_csid -1]) {
/* Remove the session link from old CSID */
sess_csid *tmp1 = NULL;
peer_csid_key_t key = {0};
key.iface = SX_PORT_ID;
key.peer_local_csid = old_csid;
memcpy(&key.peer_node_addr,
&old_node_addr, sizeof(node_address_t));
tmp1 = get_sess_peer_csid_entry(&key, REMOVE_NODE);
if (tmp1 != NULL) {
/* Remove node from csid linked list */
tmp1 = remove_sess_csid_data_node(tmp1, sess->up_seid);
int8_t ret = 0;
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seid_by_peer_csid_hash, &key, tmp1);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to add Session IDs entry"
" for CSID = %u \n", LOG_VALUE, old_csid);
return -1;
}
if (tmp1 == NULL) {
/* Delete Local CSID entry */
del_sess_peer_csid_entry(&key);
}
}
}
}
}
/* TODO:VISHAL Need to think in PGWU case */
if (sess_mod_req->sgw_c_fqcsid.number_of_csids) {
tmp_csid = (sess->up_fqcsid)->local_csid[(sess->up_fqcsid)->num_csid - 1];
int indx = 0;
/* Add the entry for peer nodes */
indx = fill_peer_node_info_t(sess, &cp_node_addr);
if (indx < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to fill peer node info and assignment of the CSID Error: %s\n",
LOG_VALUE,
strerror(errno));
return -1;
}
/* TODO: Based on the index value, add the condition */
/* Remove temp associated old CSIDs*/
if (tmp_csid != (sess->up_fqcsid)->local_csid[(sess->up_fqcsid)->num_csid - 1]) {
for (uint8_t itr = 0; itr < (sess->up_fqcsid)->num_csid; itr++) {
if (tmp_csid == (sess->up_fqcsid)->local_csid[itr]) {
for(uint32_t pos = itr; pos < ((sess->up_fqcsid)->num_csid - 1); pos++ ) {
(sess->up_fqcsid)->local_csid[pos] = (sess->up_fqcsid)->local_csid[pos + 1];
}
(sess->up_fqcsid)->num_csid--;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Remove temp allocated local CSID:%u, Num_Local_CSID:%u\n",
LOG_VALUE, tmp_csid, (sess->up_fqcsid)->num_csid);
}
}
/* Remove the current session link from tmp csid */
sess_csid *tmp_t = NULL;
tmp_t = get_sess_csid_entry(tmp_csid, REMOVE_NODE);
if (tmp_t != NULL) {
int ret = 0;
sess_csid *seid_tmp = NULL;
seid_tmp = remove_sess_csid_data_node(tmp_t, sess->up_seid);
/* Update CSID Entry in table */
ret = rte_hash_add_key_data(seids_by_csid_hash,
&tmp_csid, seid_tmp);
if (ret) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Failed to Update Session IDs entry for CSID = %u"
"\n\tError= %s\n",
LOG_VALUE, tmp_csid,
rte_strerror(abs(ret)));
}
if (seid_tmp == NULL) {
del_sess_csid_entry(tmp_csid);
}
}
}
/* Add entry for cp session id with link local csid */
sess_csid *tmp1 = NULL;
tmp1 = get_sess_csid_entry(
(sess->up_fqcsid)->local_csid[(sess->up_fqcsid)->num_csid - 1], ADD_NODE);
if (tmp1 == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed to get "
"CSID entry by PGW-U while Processing UP Session "
"Modification Request, Error : %s \n",
LOG_VALUE, strerror(errno));
return -1;
}
/* Link local csid with session id */
/* Check head node created ot not */
if(tmp1->up_seid != sess->up_seid && tmp1->up_seid != 0) {
sess_csid *new_node = NULL;
/* Add new node into csid linked list */
new_node = add_sess_csid_data_node(tmp1,
(sess->up_fqcsid)->local_csid[(sess->up_fqcsid)->num_csid - 1]);
if(new_node == NULL ) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to ADD new node into CSID"
"linked list : %s\n",__func__);
return -1;
} else {
new_node->cp_seid = sess->cp_seid;
new_node->up_seid = sess->up_seid;
}
} else {
tmp1->cp_seid = sess->cp_seid;
tmp1->up_seid = sess->up_seid;
tmp1->next = NULL;
}
if (tmp_csid != (sess->up_fqcsid)->local_csid[(sess->up_fqcsid)->num_csid - 1]) {
/* Fill the fqcsid into the session est request */
if (fill_fqcsid_sess_mod_rsp(sess_mod_rsp, sess)) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to fill FQ-CSID in Sess EST Resp ERROR: %s\n",
LOG_VALUE,
strerror(errno));
return -1;
}
}
}
#endif /* USE_CSID */
/* Update the CP seid in the response packet */
sess_mod_rsp->header.seid_seqno.has_seid.seid = sess->cp_seid;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"PFCP Session Modification Request :: END \n", LOG_VALUE);
return 0;
}
int8_t
fill_sess_rep_req_usage_report(pfcp_usage_rpt_sess_rpt_req_ie_t *usage_report,
urr_info_t *urr, uint32_t trig)
{
int8_t size = 0;
struct timeval epoc_start_time;
struct timeval epoc_end_time;
uint32_t end_time = 0;
size += set_urr_id(&usage_report->urr_id, urr->urr_id);
pfcp_set_ie_header(&(usage_report->urseqn.header), PFCP_IE_URSEQN,
(sizeof(pfcp_urseqn_ie_t) - sizeof(pfcp_ie_header_t)));
size += sizeof(pfcp_urseqn_ie_t);
usage_report->urseqn.urseqn = urr->urr_seq_num++;
pfcp_set_ie_header(&(usage_report->usage_rpt_trig.header), PFCP_IE_USAGE_RPT_TRIG,
(sizeof(pfcp_usage_rpt_trig_ie_t) - sizeof(pfcp_ie_header_t)));
size += sizeof(pfcp_usage_rpt_trig_ie_t);
if(trig == VOL_BASED)
usage_report->usage_rpt_trig.volth = 1;
else if(trig == TIME_BASED)
usage_report->usage_rpt_trig.timth = 1;
if(urr->meas_method == VOL_TIME_BASED ||
urr->meas_method == VOL_BASED){
size += set_volume_measurment(&usage_report->vol_meas);
usage_report->vol_meas.uplink_volume = urr->uplnk_data;
usage_report->vol_meas.downlink_volume = urr->dwnlnk_data;
usage_report->vol_meas.total_volume = urr->dwnlnk_data + urr->uplnk_data;
}
if(urr->meas_method == TIME_BASED || urr->meas_method == VOL_TIME_BASED) {
end_time = current_ntp_timestamp();
size += set_duration_measurment(&usage_report->dur_meas);
ntp_to_unix_time(&urr->start_time, &epoc_start_time);
ntp_to_unix_time(&end_time, &epoc_end_time);
usage_report->dur_meas.duration_value = epoc_end_time.tv_sec - epoc_start_time.tv_sec;
}
size += set_start_time(&usage_report->start_time);
size += set_end_time(&usage_report->end_time);
size += set_first_pkt_time(&usage_report->time_of_frst_pckt);
size += set_last_pkt_time(&usage_report->time_of_lst_pckt);
usage_report->start_time.start_time = urr->start_time;
usage_report->end_time.end_time = current_ntp_timestamp();
usage_report->time_of_frst_pckt.time_of_frst_pckt = urr->first_pkt_time;
usage_report->time_of_lst_pckt.time_of_lst_pckt = urr->last_pkt_time;
urr->start_time = current_ntp_timestamp();
urr->first_pkt_time = 0;
urr->last_pkt_time = 0;
if(urr->meas_method == TIME_BASED || urr->meas_method == VOL_TIME_BASED) {
urr->uplnk_data = 0;
urr->dwnlnk_data = 0;
}
pfcp_set_ie_header(&usage_report->header, IE_USAGE_RPT_SESS_RPT_REQ, size);
return size;
}
int8_t
fill_sess_del_usage_report(pfcp_usage_rpt_sess_del_rsp_ie_t *usage_report,
urr_info_t *urr, uint64_t cp_seid)
{
int8_t size = 0;
peerEntry *data = NULL;
int ret = 0;
struct timeval epoc_start_time;
struct timeval epoc_end_time;
uint32_t end_time = 0;
size += set_urr_id(&usage_report->urr_id, urr->urr_id);
pfcp_set_ie_header(&(usage_report->urseqn.header), PFCP_IE_URSEQN,
(sizeof(pfcp_urseqn_ie_t) - sizeof(pfcp_ie_header_t)));
size += sizeof(pfcp_urseqn_ie_t);
usage_report->urseqn.urseqn = urr->urr_seq_num++;
pfcp_set_ie_header(&(usage_report->usage_rpt_trig.header), PFCP_IE_USAGE_RPT_TRIG,
(sizeof(pfcp_usage_rpt_trig_ie_t) - sizeof(pfcp_ie_header_t)));
size += sizeof(pfcp_usage_rpt_trig_ie_t);
usage_report->usage_rpt_trig.termr = 1;
if(urr->meas_method == VOL_TIME_BASED ||
urr->meas_method == VOL_BASED){
size += set_volume_measurment(&usage_report->vol_meas);
usage_report->vol_meas.uplink_volume = urr->uplnk_data;
usage_report->vol_meas.downlink_volume = urr->dwnlnk_data;
usage_report->vol_meas.total_volume = urr->dwnlnk_data + urr->uplnk_data;
}
if(urr->meas_method == TIME_BASED || urr->meas_method == VOL_TIME_BASED) {
end_time = current_ntp_timestamp();
size += set_duration_measurment(&usage_report->dur_meas);
ntp_to_unix_time(&urr->start_time, &epoc_start_time);
ntp_to_unix_time(&end_time, &epoc_end_time);
usage_report->dur_meas.duration_value = epoc_end_time.tv_sec - epoc_start_time.tv_sec;
}
size += set_start_time(&usage_report->start_time);
size += set_end_time(&usage_report->end_time);
size += set_first_pkt_time(&usage_report->time_of_frst_pckt);
size += set_last_pkt_time(&usage_report->time_of_lst_pckt);
usage_report->start_time.start_time = urr->start_time;
usage_report->end_time.end_time = current_ntp_timestamp();
usage_report->time_of_frst_pckt.time_of_frst_pckt = urr->first_pkt_time;
usage_report->time_of_lst_pckt.time_of_lst_pckt = urr->last_pkt_time;
urr->start_time = current_ntp_timestamp();
urr->first_pkt_time = 0;
urr->last_pkt_time = 0;
rule_key hash_key = {0};
hash_key.id = urr->urr_id;
hash_key.cp_seid = cp_seid;
pfcp_set_ie_header(&usage_report->header, IE_USAGE_RPT_SESS_DEL_RSP, size);
if((urr->rept_trigg == TIME_BASED) || (urr->rept_trigg == VOL_TIME_BASED)) {
ret = rte_hash_lookup_data(timer_by_id_hash,
&hash_key, (void **)&data);
if (ret >= 0) {
if(data->pt.ti_id != 0) {
stoptimer(&data->pt.ti_id);
deinittimer(&data->pt.ti_id);
/* URR Entry is present. Delete timer Entry */
ret = rte_hash_del_key(timer_by_id_hash, &hash_key);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Timer Entry "
"not found for URR_ID:%u\n", LOG_VALUE, urr->urr_id);
return -1;
}
if (data != NULL) {
rte_free(data);
data = NULL;
}
}
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"No timer entery found for URR %u\n", LOG_VALUE, urr->urr_id);
}
}
return size;
}
int8_t
up_delete_session_entry(pfcp_session_t *sess, pfcp_sess_del_rsp_t *sess_del_rsp)
{
int ret = 0;
int8_t inx = 0;
ue_ip_t ue_ip[MAX_BEARERS] = {0};
int cnt = 0;
uint32_t ue_ip_addr = 0;
uint8_t ue_ipv6_addr[IPV6_ADDRESS_LEN] = {0};
pfcp_usage_rpt_sess_del_rsp_ie_t usage_report[MAX_LIST_SIZE]= {0};
(sess->cp_ip.type == IPV6_TYPE)?
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" CP_Sess_ID: %lu, UP_Sess_ID:%lu, CP_IPv6:"IPv6_FMT"\n",
LOG_VALUE, sess->cp_seid, sess->up_seid,
IPv6_PRINT(IPv6_CAST(sess->cp_ip.ipv6.sin6_addr.s6_addr))):
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" CP_Sess_ID: %lu, UP_Sess_ID:%lu, CP_IPv4:"IPV4_ADDR"\n",
LOG_VALUE, sess->cp_seid, sess->up_seid,
IPV4_ADDR_HOST_FORMAT(sess->cp_ip.ipv4.sin_addr.s_addr));
/* Flush the Session data info from the hash tables based on teid*/
pfcp_session_datat_t *session = sess->sessions;
/* Cleanup the session data form hash table and delete the node from linked list */
while (session != NULL) {
if (session->dl_ring != NULL) {
struct rte_ring *ring = session->dl_ring;
session->dl_ring = NULL;
/* This is going to be nasty. We could potentially have a race
* condition if modify bearer occurs directly before a delete
* session, causing scan_notify_ring_func to work on the same
* ring as this function. For our current tests, we *should* be
* okay. For now.
*/
int i = 0;
int ret = 0;
int count = 0;
struct rte_mbuf *m[MAX_BURST_SZ];
do {
/* Adding handling for support dpdk-18.02 and dpdk-16.11.04 */
#if (RTE_VER_YEAR >= 16) && (RTE_VER_MONTH >= 11)
ret = rte_ring_sc_dequeue_burst(ring,
(void **)m, MAX_BURST_SZ);
#elif (RTE_VER_YEAR >= 18) && (RTE_VER_MONTH >= 02)
unsigned int *ring_entry = NULL;
/* Adding handling for support dpdk-18.02 */
ret = rte_ring_sc_dequeue_burst(ring,
(void **)m, MAX_BURST_SZ, ring_entry);
#endif
for (i = 0; i < ret; ++i) {
if (m[i] != NULL)
rte_pktmbuf_free(m[i]);
}
count += ret;
} while (ret);
if(ring) {
rte_ring_free(ring);
}
if(rte_ring_sc_dequeue(dl_ring_container, (void *)&ring) == -ENOENT) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Ring not found\n",LOG_VALUE);
} else{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Dequeued Ring \n",LOG_VALUE);
}
ring = NULL;
}
/* Cleanup PDRs info from the linked list */
pdr_info_t *pdr = session->pdrs;
if (ue_ip_addr == 0 && *ue_ipv6_addr == 0) {
if (session->ipv4) {
ue_ip_addr = session->ue_ip_addr;
}
if (session->ipv6) {
memcpy(ue_ipv6_addr, session->ue_ipv6_addr, IPV6_ADDRESS_LEN);
}
}
if (ue_ip_addr == 0 && *ue_ipv6_addr == 0) {
if(session->next != NULL) {
if (session->next->ipv4) {
ue_ip_addr = session->next->ue_ip_addr;
}
if (session->next->ipv6) {
memcpy(ue_ipv6_addr, session->next->ue_ipv6_addr, IPV6_ADDRESS_LEN);
}
}
}
while (pdr != NULL) {
for(int itr = 0; itr < pdr->urr_count; itr++){
if(sess_del_rsp != NULL){
fill_sess_del_usage_report(&sess_del_rsp->usage_report[sess_del_rsp->usage_report_count++],
&pdr->urr[itr], sess->cp_seid);
} else {
fill_sess_del_usage_report(&usage_report[cnt], &pdr->urr[itr], sess->cp_seid);
store_cdr_for_restoration(&usage_report[cnt], sess->up_seid, 0,
0, ue_ip_addr, ue_ipv6_addr);
cnt++;
}
}
far_info_t *far = pdr->far;
/* Cleanup the FAR information */
if (far != NULL) {
/* Flush the far info from the hash table */
ret = del_far_info_entry(far->far_id_value, sess->cp_ip, sess->cp_seid);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, "DP:"LOG_FORMAT"Entry not found for FAR_ID:%u...\n",
LOG_VALUE, far->far_id_value);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":FAR_ID:%u\n",
LOG_VALUE, far->far_id_value);
}
/* Cleanup QERs info from the linked list */
if (!pdr->predef_rules_count) {
qer_info_t *qer = pdr->quer;
while (qer != NULL) {
/* Get QER ID */
uint32_t qer_id = qer->qer_id;
/* Delete the QER info node from the linked list */
pdr->quer = remove_qer_node(pdr->quer, qer);
qer = pdr->quer;
/* Flush the QER info from the hash table */
ret = del_qer_info_entry(qer_id, sess->cp_ip, sess->cp_seid);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found for QER_ID:%u...\n",
LOG_VALUE, qer_id);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT": QER_ID:%u\n",
LOG_VALUE, qer_id);
}
}
/* Cleanup URRs info from the linked list */
urr_info_t *urr = pdr->urr;
while (urr != NULL) {
uint32_t urr_id = urr->urr_id;
/* Delete the URR info node from the linked list */
pdr->urr = remove_urr_node(pdr->urr, urr);
urr = pdr->urr;
/* Flush the URR info from the hash table */
if (del_urr_info_entry(urr_id, sess->cp_ip, sess->cp_seid)) {
/* TODO : ERROR Handling */
}
}
/* Cleanup PDRs info from the linked list */
/* Get PDR ID */
uint32_t pdr_id = pdr->rule_id;
/* Delete the PDR info node from the linked list */
session->pdrs = remove_pdr_node(session->pdrs, pdr);
pdr = session->pdrs;
/* Flush the PDR info from the hash table */
ret = del_pdr_info_entry(pdr_id, sess->cp_ip, sess->cp_seid);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found for PDR_ID:%u...\n",
LOG_VALUE, pdr_id);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT":PDR_ID:%u\n", LOG_VALUE, pdr_id);
}
if ((session->ipv4 != 0) || (session->ipv6 != 0)){
ue_ip[inx].ue_ipv4 = session->ue_ip_addr;
memcpy(ue_ip[inx].ue_ipv6, session->ue_ipv6_addr, IPV6_ADDRESS_LEN);
inx++;
}
/* Delete the Session data info node from the linked list */
sess->sessions = remove_sess_data_node(sess->sessions, session);
if (sess->sessions == NULL)
break;
session = sess->sessions;
}
/* Flush the Session data info from the hash tables based on ue_ip */
for (int itr = 0; itr < inx; itr++) {
ue_ip_t ue_addr = {0};
if (ue_ip[itr].ue_ipv4) {
ue_addr.ue_ipv4 = ue_ip[itr].ue_ipv4;
if (del_sess_by_ueip_entry(ue_addr) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found for UE_IPv4 :"
""IPV4_ADDR"\n", LOG_VALUE, IPV4_ADDR_HOST_FORMAT(ue_ip[itr].ue_ipv4));
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT": UE_IPv4 :"IPV4_ADDR"\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(ue_ip[itr].ue_ipv4));
}
if (ue_ip[itr].ue_ipv6) {
memset(&ue_addr, 0, sizeof(ue_ip_t));
memcpy(ue_addr.ue_ipv6, ue_ip[itr].ue_ipv6, IPV6_ADDRESS_LEN);
char ipv6[IPV6_STR_LEN];
inet_ntop(AF_INET6, ue_ip[itr].ue_ipv6, ipv6, IPV6_STR_LEN);
if (del_sess_by_ueip_entry(ue_addr) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not found for"
" IPv6 Addr: %s\n", LOG_VALUE, ipv6);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT": IPv6 Addr: %s\n",
LOG_VALUE, ipv6);
}
}
for (int itr1 = 0; itr1 < sess->ber_cnt; itr1++) {
if(sess->teids[itr1] == 0)
continue;
else if (del_sess_by_teid_entry(sess->teids[itr1])) {
/* TODO : ERROR Handling */
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Checking Teid value: 0x%x, counter:%u, Max Counter:%u\n",
LOG_VALUE, sess->teids[itr1], itr1, sess->ber_cnt);
sess->teids[itr1] = 0;
}
/* Session Entry is present. Delete Session Entry */
ret = del_sess_info_entry(sess->up_seid);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Entry not found for UP_SESS_ID:%lu...\n",
LOG_VALUE, sess->up_seid);
return -1;
}
/*CLI:decrement active session count*/
update_sys_stat(number_of_active_session, DECREMENT);
return 0;
}
int8_t
process_up_session_deletion_req(pfcp_sess_del_req_t *sess_del_req,
pfcp_sess_del_rsp_t *sess_del_rsp)
{
pfcp_session_t *sess = NULL;
memset(sess_del_rsp, 0, sizeof(pfcp_sess_del_rsp_t));
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"PFCP Session Deletion Request :: START \n", LOG_VALUE);
/* Get the session information from session table based on UP_SESSION_ID*/
if (sess_del_req->header.s) {
/* Check SEID is not ZERO */
sess = get_sess_info_entry(sess_del_req->header.seid_seqno.has_seid.seid,
SESS_DEL);
}
if (sess == NULL)
return -1;
if (up_delete_session_entry(sess, sess_del_rsp))
return -1;
/* Update the CP seid in the response packet */
sess_del_rsp->header.seid_seqno.has_seid.seid = sess->cp_seid;
#ifdef USE_CSID
if (del_sess_by_csid_entry(sess, sess->up_fqcsid, SX_PORT_ID)) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error: %s \n", LOG_VALUE,
strerror(errno));
return -1;
}
#endif /* USE_CSID */
/* Cleanup the session */
rte_free(sess);
sess = NULL;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"PFCP Session Deletion Request :: END \n", LOG_VALUE);
return 0;
}
bool inittimer(peerEntry *md, int ptms, gstimercallback cb)
{
return gst_timer_init(&md->pt, ttInterval, cb, ptms, md);
}
peerEntry *
fill_timer_entry_usage_report(struct sockaddr_in *peer_addr, urr_info_t *urr, uint64_t cp_seid, uint64_t up_seid)
{
peerEntry *timer_entry = NULL;
int ret = 0;
timer_entry = rte_zmalloc_socket(NULL, sizeof(peerEntry),
RTE_CACHE_LINE_SIZE, rte_socket_id());
if(timer_entry == NULL )
{
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failure to allocate timer entry :"
"%s\n", LOG_VALUE, rte_strerror(rte_errno));
return NULL;
}
timer_entry->dstIP = peer_addr->sin_addr.s_addr;
timer_entry->cp_seid = cp_seid;
timer_entry->up_seid = up_seid;
timer_entry->urr = urr;
rule_key hash_key = {0};
hash_key.id = urr->urr_id;
hash_key.cp_seid = cp_seid;
ret = rte_hash_add_key_data(timer_by_id_hash,
&hash_key, timer_entry);
if (ret) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to add timer entry for URR_ID = %u"
"\n\tError= %s\n", LOG_VALUE, urr->urr_id, rte_strerror(abs(ret)));
return NULL;
}
return(timer_entry);
}
bool
add_timer_entry_usage_report(peerEntry *conn_data, uint32_t timeout_ms,
gstimercallback cb)
{
if (!inittimer(conn_data, timeout_ms*1000, cb))
{
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT " =>%s - initialization of %s failed erro no %s\n",
LOG_VALUE, getPrintableTime(), conn_data->name, strerror(errno));
return false;
}
return true;
}
void
timer_callback(gstimerinfo_t *ti, const void *data_t )
{
peerEntry *data = (peerEntry *) data_t;
int ret = 0;
rule_key hash_key = {0};
hash_key.id = data->urr->urr_id;
hash_key.cp_seid = data->cp_seid;
if(data->urr->meas_method == TIME_BASED ||
data->urr->meas_method == VOL_TIME_BASED) {
if(send_usage_report_req(data->urr, data->cp_seid, data->up_seid, TIME_BASED) != 0 ){
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT"Failed to Send Usage "
"Report Request \n", LOG_VALUE);
}
} else {
ret = rte_hash_lookup_data(timer_by_id_hash,
&hash_key, (void **)&data);
if (ret >=0 ) {
if(data->pt.ti_id != 0) {
stoptimer(&data->pt.ti_id);
deinittimer(&data->pt.ti_id);
ret = rte_hash_del_key(timer_by_id_hash, &hash_key);
if ( ret < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Timer Entry not "
"found for URR_ID:%u\n", LOG_VALUE, data->urr->urr_id);
return;
}
if (data != NULL) {
rte_free(data);
data = NULL;
}
}
}
}
}
int
fill_li_duplicating_params(pfcp_create_far_ie_t *far, far_info_t *far_t, pfcp_session_t *sess) {
far_t->dup_parms_cnt = 0;
far_t->li_config_cnt = 0;
sess->li_sx_config_cnt = 0;
memset(far_t->li_config, 0, MAX_LI_ENTRIES_PER_UE * sizeof(li_config_t));
memset(sess->li_sx_config, 0, MAX_LI_ENTRIES_PER_UE * sizeof(li_sx_config_t));
for (uint8_t itr = 0; itr < far->dupng_parms_count; ++itr) {
uint8_t policy_ident = 0;
for (uint8_t iCnt = 0; iCnt < far->dupng_parms[itr].frwdng_plcy.frwdng_plcy_ident_len;
++iCnt) {
policy_ident = far->dupng_parms[itr].frwdng_plcy.frwdng_plcy_ident[iCnt];
switch(iCnt) {
case FRWDING_PLCY_SX:
sess->li_sx_config[itr].sx = policy_ident;
break;
case FRWDING_PLCY_WEST_DIRECTION:
far_t->li_config[itr].west_direction = policy_ident;
break;
case FRWDING_PLCY_WEST_CONTENT:
far_t->li_config[itr].west_content = policy_ident;
break;
case FRWDING_PLCY_EAST_DIRECTION:
far_t->li_config[itr].east_direction = policy_ident;
break;
case FRWDING_PLCY_EAST_CONTENT:
far_t->li_config[itr].east_content = policy_ident;
break;
case FRWDING_PLCY_FORWARD:
sess->li_sx_config[itr].forward = policy_ident;
far_t->li_config[itr].forward = policy_ident;
break;
case FRWDING_PLCY_ID:
memcpy(&sess->li_sx_config[itr].id,
&far->dupng_parms[itr].frwdng_plcy.frwdng_plcy_ident[iCnt],
sizeof(uint64_t));
far_t->li_config[itr].id = sess->li_sx_config[itr].id;
break;
default:
break;
}
}
}
far_t->dup_parms_cnt = far->dupng_parms_count;
far_t->li_config_cnt = far->dupng_parms_count;
sess->li_sx_config_cnt = far->dupng_parms_count;
return 0;
}
int
fill_li_update_duplicating_param(pfcp_update_far_ie_t *far, far_info_t *far_t, pfcp_session_t *sess) {
far_t->dup_parms_cnt = 0;
far_t->li_config_cnt = 0;
sess->li_sx_config_cnt = 0;
memset(far_t->li_config, 0, MAX_LI_ENTRIES_PER_UE * sizeof(li_config_t));
memset(sess->li_sx_config, 0, MAX_LI_ENTRIES_PER_UE * sizeof(li_sx_config_t));
for (int itr = 0; itr < far->upd_dupng_parms_count; itr++) {
uint8_t policy_ident = 0;
for (uint8_t iCnt = 0; iCnt < far->upd_dupng_parms[itr].frwdng_plcy.frwdng_plcy_ident_len;
++iCnt) {
policy_ident = far->upd_dupng_parms[itr].frwdng_plcy.frwdng_plcy_ident[iCnt];
switch(iCnt) {
case FRWDING_PLCY_SX:
sess->li_sx_config[itr].sx = policy_ident;
break;
case FRWDING_PLCY_WEST_DIRECTION:
far_t->li_config[itr].west_direction = policy_ident;
break;
case FRWDING_PLCY_WEST_CONTENT:
far_t->li_config[itr].west_content = policy_ident;
break;
case FRWDING_PLCY_EAST_DIRECTION:
far_t->li_config[itr].east_direction = policy_ident;
break;
case FRWDING_PLCY_EAST_CONTENT:
far_t->li_config[itr].east_content = policy_ident;
break;
case FRWDING_PLCY_FORWARD:
sess->li_sx_config[itr].forward = policy_ident;
far_t->li_config[itr].forward = policy_ident;
break;
case FRWDING_PLCY_ID:
memcpy(&sess->li_sx_config[itr].id,
&far->upd_dupng_parms[itr].frwdng_plcy.frwdng_plcy_ident[iCnt],
sizeof(uint64_t));
far_t->li_config[itr].id = sess->li_sx_config[itr].id;
break;
default:
break;
}
}
}
far_t->dup_parms_cnt = far->upd_dupng_parms_count;
far_t->li_config_cnt = far->upd_dupng_parms_count;
sess->li_sx_config_cnt = far->upd_dupng_parms_count;
return 0;
}
int32_t
process_event_li(pfcp_session_t *sess, uint8_t *buf_rx, int buf_rx_size,
uint8_t *buf_tx, int buf_tx_size, peer_addr_t *peer_addr) {
int ret = 0;
int pkt_length = 0;
uint8_t *pkt = NULL;
if (NULL == sess) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Sess"
" entry Not found ", LOG_VALUE);
return -1;
}
for (uint8_t cnt = 0; cnt < sess->li_sx_config_cnt; cnt++) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Li "
"configuration sx(%u)", LOG_VALUE, sess->li_sx_config[cnt].sx);
if (NOT_PRESENT == sess->li_sx_config[cnt].sx) {
continue;
}
/* For incoming message */
if ((NULL != buf_rx) && (buf_rx_size > 0)) {
pkt_length = buf_rx_size;
pkt = rte_malloc(NULL, (pkt_length + sizeof(li_header_t)), 0);
if (NULL == pkt) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Failed"
" to allocate memory for li packet", LOG_VALUE);
return -1;
}
memcpy(pkt, buf_rx, pkt_length);
create_li_header(pkt, &pkt_length, EVENT_BASED,
sess->li_sx_config[cnt].id, sess->imsi,
fill_ip_info(peer_addr->type,
peer_addr->ipv4.sin_addr.s_addr,
peer_addr->ipv6.sin6_addr.s6_addr),
fill_ip_info(peer_addr->type,
dp_comm_ip.s_addr,
dp_comm_ipv6.s6_addr),
((peer_addr->type == IPTYPE_IPV4_LI) ?
ntohs(peer_addr->ipv4.sin_port) :
ntohs(peer_addr->ipv6.sin6_port)),
dp_comm_port,
sess->li_sx_config[cnt].forward);
ret = send_li_data_pkt(ddf2_fd, pkt, pkt_length);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Failed"
" to send PFCP event on TCP sock"
" with error %d\n", LOG_VALUE, ret);
return -1;
}
rte_free(pkt);
pkt = NULL;
}
/* For outgoing message */
if ((NULL != buf_tx) && (buf_tx_size > 0)) {
pkt_length = buf_tx_size;
pkt = rte_malloc(NULL, (pkt_length + sizeof(li_header_t)), 0);
if (NULL == pkt) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed"
" to allocate memory for li packet", LOG_VALUE);
return -1;
}
memcpy(pkt, buf_tx, pkt_length);
create_li_header(pkt, &pkt_length, EVENT_BASED,
sess->li_sx_config[cnt].id, sess->imsi,
fill_ip_info(peer_addr->type,
dp_comm_ip.s_addr,
dp_comm_ipv6.s6_addr),
fill_ip_info(peer_addr->type,
peer_addr->ipv4.sin_addr.s_addr,
peer_addr->ipv6.sin6_addr.s6_addr),
dp_comm_port,
((peer_addr->type == IPTYPE_IPV4_LI) ?
ntohs(peer_addr->ipv4.sin_port) :
ntohs(peer_addr->ipv6.sin6_port)),
sess->li_sx_config[cnt].forward);
ret = send_li_data_pkt(ddf2_fd, pkt, pkt_length);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Failed"
" to send PFCP event on TCP sock"
" with error %d\n", LOG_VALUE, ret);
return -1;
}
rte_free(pkt);
pkt = NULL;
}
}
return 0;
}
void
check_cause_id_pfd_mgmt(pfcp_pfd_contents_ie_t *pfd_content, uint8_t **cause_id, int **offend_id)
{
if((pfd_content->cp) && (pfd_content->len_of_cstm_pfd_cntnt)){
**cause_id = REQUESTACCEPTED;
} else {
**cause_id = MANDATORYIEMISSING;
**offend_id= PFCP_IE_PFD_CONTENTS;
}
}
struct pcc_rules* get_pcc_rule(uint32_t ip)
{
rules_struct *rule = NULL;
rule = get_map_rule_entry(ip, GET_RULE);
if (rule != NULL) {
rules_struct *current = NULL;
current = rule;
while (current != NULL) {
/* Retrive the PCC rule based on the rule name */
struct pcc_rules *pcc = NULL;
pcc = get_predef_pcc_rule_entry(¤t->rule_name, GET_RULE);
if (pcc == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to get PCC Rule from the pcc table"
" for Rule_Name: %s\n", LOG_VALUE, current->rule_name.rname);
/* Assign Next node address */
rule = current->next;
/* Get the next node */
current = rule;
continue;
}else {
return pcc;
}
}
}
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to Get PCC Rule from centralized map table\n",
LOG_VALUE);
return NULL;
}
void
process_rule_msg(pfcp_pfd_contents_ie_t *pfd_content, uint64_t msg_type,
uint32_t cp_ip, uint16_t idx)
{
int ret = 0;
switch(msg_type) {
case MSG_PCC_TBL_ADD: {
struct pcc_rules *pcc = NULL;
pcc_rule_name key = {0};
memset(key.rname, '\0', sizeof(key.rname));
pcc = (struct pcc_rules *)(pfd_content->cstm_pfd_cntnt + idx);
if (pcc == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to extract PCC Rule\n", LOG_VALUE);
return;
}
memcpy(key.rname, pcc->rule_name, sizeof(pcc->rule_name));
struct pcc_rules *pcc_temp = get_predef_pcc_rule_entry(&key, ADD_RULE);
if (pcc_temp != NULL) {
memcpy(pcc_temp, pcc, sizeof(struct pcc_rules));
/* Add the rule name in centralized map table */
rules_struct *rules = NULL;
rules = get_map_rule_entry(cp_ip, ADD_RULE);
if (rules == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to ADD/GET PCC Rule from centralized map table\n",
LOG_VALUE);
return;
} else {
rules_struct *new_node = NULL;
/* Calculate the memory size to allocate */
uint16_t size = sizeof(rules_struct);
/* allocate memory for rule entry*/
new_node = rte_zmalloc("Rules_Infos", size, RTE_CACHE_LINE_SIZE);
if (new_node == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to allocate memory for rule entry.\n",
LOG_VALUE);
return;
}
/* Set/Stored the rule name in the centralized location */
memcpy(new_node->rule_name.rname, &key.rname,
sizeof(key.rname));
/* Insert the node into the LL */
if (insert_rule_name_node(rules, new_node) < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Failed to add node entry in LL\n",
LOG_VALUE);
return;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"PCC Rule add/inserted in the internal table and map,"
"Rule_Name: %s, Node_Count:%u\n", LOG_VALUE, key.rname, rules->rule_cnt);
}
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to add pcc rules for rule_name =%s\n",
LOG_VALUE, key.rname);
}
print_pcc_val(pcc);
break;
}
case MSG_SDF_ADD:{
/* TypeCast the payload into SDF Rule */
struct pkt_filter *sdf_filter = (struct pkt_filter *)(pfd_content->cstm_pfd_cntnt + idx);
if (sdf_filter == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to extract SDF Rule\n", LOG_VALUE);
return;
}
ret = get_predef_rule_entry(sdf_filter->rule_id,
SDF_HASH, ADD_RULE, (void **)&sdf_filter);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to Add SDF Rule from the internal table"
"for SDF_Indx: %u\n", LOG_VALUE, sdf_filter->rule_id);
}
print_sdf_val(sdf_filter);
break;
}
case MSG_ADC_TBL_ADD:{
struct adc_rules *adc_rule_entry = (struct adc_rules *)(pfd_content->cstm_pfd_cntnt + idx);
if (adc_rule_entry == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to extract ADC Rule\n", LOG_VALUE);
return;
}
ret = get_predef_rule_entry(adc_rule_entry->rule_id,
ADC_HASH, ADD_RULE, (void **)&adc_rule_entry);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to add ADC Rule from the internal table"
"for ADC_Indx: %u\n", LOG_VALUE, adc_rule_entry->rule_id);
}
print_adc_val(adc_rule_entry);
break;
}
case MSG_MTR_ADD: {
struct mtr_entry *mtr_rule = (struct mtr_entry *)(pfd_content->cstm_pfd_cntnt + idx);
if (mtr_rule == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to extract Mtr Rule\n", LOG_VALUE);
return;
}
ret = get_predef_rule_entry(mtr_rule->mtr_profile_index,
MTR_HASH, ADD_RULE, (void **)&mtr_rule);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to Add MTR Rule from the internal table"
"for Mtr_Indx: %u\n", LOG_VALUE, mtr_rule->mtr_profile_index);
}
print_mtr_val(mtr_rule);
break;
}
case MSG_PCC_TBL_DEL:
case MSG_SDF_DEL:
case MSG_ADC_TBL_DEL:
case MSG_MTR_DEL:
case MSG_SESS_DEL:{
ret = del_map_rule_entry(cp_ip);
if(ret < 0){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: Failed to delete rules\n",
LOG_VALUE);
}
break;
}
case MSG_SESS_MOD:
break;
case MSG_DDN_ACK:
break;
case MSG_EXP_CDR:
break;
default:
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: no appropirate message passed\n",
LOG_VALUE);
break;
}
}
void
process_up_pfd_mgmt_request(pfcp_pfd_mgmt_req_t *pfcp_pfd_mgmt_req,
uint8_t *cause_id, int *offend_id, uint32_t cp_ip)
{
uint16_t idx = 0;
uint8_t app_id_itr = 0;
uint8_t pfd_context_itr = 0;
uint8_t pfd_context_count = 0;
uint8_t pfd_content_itr = 0;
uint8_t pfd_content_count = 0;
pfcp_pfd_contents_ie_t *pfd_content = NULL;
for (app_id_itr = 0; app_id_itr < pfcp_pfd_mgmt_req->app_ids_pfds_count;
app_id_itr++) {
pfd_context_count =
pfcp_pfd_mgmt_req->app_ids_pfds[app_id_itr].pfd_context_count;
for (pfd_context_itr = 0; pfd_context_itr < pfd_context_count;
pfd_context_itr++) {
pfd_content_count =
pfcp_pfd_mgmt_req->app_ids_pfds[app_id_itr].pfd_context[pfd_context_itr].pfd_contents_count;
for (pfd_content_itr = 0; pfd_content_itr < pfd_content_count; pfd_content_itr++) {
pfd_content =
&(pfcp_pfd_mgmt_req->app_ids_pfds[app_id_itr].pfd_context[pfd_context_itr].pfd_contents[pfd_content_itr]);
if(pfd_content->header.len){
check_cause_id_pfd_mgmt(pfd_content, &cause_id, &offend_id);
if(*cause_id == REQUESTACCEPTED){
long mtype= 0 ;
mtype = get_rule_type(pfd_content, &idx);
process_rule_msg(pfd_content, mtype, cp_ip, idx);
}else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Error: cause id is not accepted\n",
LOG_VALUE);
return;
}
}
}
}
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Successfully pfd management request processed\n",
LOG_VALUE);
return;
}
int8_t
process_up_session_report_resp(pfcp_sess_rpt_rsp_t *sess_rep_resp)
{
pfcp_session_t *sess = NULL;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT
"PFCP Session Report Response :: START \n", LOG_VALUE);
/* Get the session information from session table based on UP_SESSION_ID */
if (sess_rep_resp->header.s) {
/* Check SEID is not ZERO */
sess = get_sess_info_entry(sess_rep_resp->header.seid_seqno.has_seid.seid,
SESS_MODIFY);
}
if (sess == NULL)
return -1;
/* pfcpsrrsp_flags: Dropped the bufferd packets */
if (sess_rep_resp->sxsrrsp_flags.drobu) {
/* Free the downlink data rings */
//rte_free();
}
if(sess_rep_resp->update_bar.header.len){
if (sess->bar.dl_buf_suggstd_pckts_cnt.pckt_cnt_val !=
sess_rep_resp->update_bar.dl_buf_suggstd_pckt_cnt.pckt_cnt_val) {
struct rte_ring *ring = NULL;
struct rte_ring *new_ring = NULL;
struct pfcp_session_datat_t *si = NULL;
si = sess->sessions;
while (si != NULL) {
/* Delete dl ring which created by default if present */
ring = si->dl_ring;
if (ring) {
unsigned int *ring_entry = NULL;
unsigned int count = rte_ring_count(ring);
struct rte_mbuf pkts[count];
new_ring = allocate_ring(
sess_rep_resp->update_bar.dl_buf_suggstd_pckt_cnt.pckt_cnt_val);
if (new_ring == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Not enough memory "
"to allocate new ring\n", LOG_VALUE);
return 0;
}
if(rte_ring_sc_dequeue_burst(ring, (void **)&pkts,
count, ring_entry) == -ENOENT) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Can't put ring back, so free it\n",
LOG_VALUE);
rte_ring_free(ring);
}
if(rte_ring_enqueue_burst(new_ring, (void **)&pkts,
count, ring_entry) == -ENOBUFS) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Can't put ring back, so free it\n", LOG_VALUE);
rte_ring_free(new_ring);
}
rte_ring_free(ring);
if(rte_ring_sc_dequeue(dl_ring_container, (void *)&ring) == -ENOENT) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Ring not found\n",LOG_VALUE);
} else{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Dequeued Ring \n",LOG_VALUE);
}
ring = NULL;
si->dl_ring = new_ring;
}
si = si->next;
}
sess->bar.dl_buf_suggstd_pckts_cnt.pckt_cnt_val =
sess_rep_resp->update_bar.dl_buf_suggstd_pckt_cnt.pckt_cnt_val;
}
sess->bar.bar_id = sess_rep_resp->update_bar.bar_id.bar_id_value;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT": CP_Sess_ID: %lu, UP_Sess_ID:%lu\n",
LOG_VALUE, sess->cp_seid, sess->up_seid);
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | ulpc/legacy_admf/include/LegacyAdmf.h | <filename>ulpc/legacy_admf/include/LegacyAdmf.h
/*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _SAMPLE_ADMF_H_
#define _SAMPLE_ADMF_H_
#include <dlfcn.h>
#include "epctools.h"
#include "etevent.h"
#include "esocket.h"
#include "emgmt.h"
#define LOG_SYSTEM 1
#define EMPTY_STRING ""
#define ZERO 0
#define RET_SUCCESS 0
#define RET_FAILURE 1
#define ADMF_PACKET 10
#define ADMF_INTFC_PACKET 20
#define LEGACY_ADMF_ACK 30
#define ADMF_ACK 40
#define LEGACY_ADMF_PACKET 50
#define ADMF_INTFC_ACK 60
#define IPV6_MAX_LEN 16
#define BACKLOG_CONNECTIION 100
#define ADD_UE_ENTRY_URI "/addueentry"
#define UPDATE_UE_ENTRY_URI "/updateueentry"
#define DELETE_UE_ENTRY_URI "/deleteueentry"
#define UE_DB_KEY "uedatabase"
#define ADD_REQUEST 1
#define UPDATE_REQUEST 2
#define DELETE_REQUEST 3
#define SAFE_DELETE(p) { if (NULL != p) { delete(p); (p) = NULL; }}
#pragma pack(push, 1)
struct legacyAdmfPacket {
uint32_t packetLength;
struct ueEntry {
uint64_t seqId;
uint64_t imsi;
uint16_t packetType;
uint16_t requestType;
UChar startTime[21];
UChar stopTime[21];
} ue_entry_t;
};
#pragma pack(pop)
#pragma pack(push, 1)
struct AckPacket {
uint8_t packetLen;
struct AckPacketHdr {
uint8_t packetType;
uint64_t imsi;
} header;
};
#pragma pack(pop)
#pragma pack(push, 1)
struct UeDatabase {
uint32_t packetLen;
struct ueEntry {
uint16_t requestType;
uint16_t packetType;
uint16_t bodyLength;
uint16_t requestStatus;
UChar requestBody[2048];
} ue_entry_t;
};
#pragma pack(pop)
/**
* @brief : Maintains Sample LegacyADMF configurations read from config file
*/
typedef struct configurations {
std::string nodeName;
cpStr serverIp;
uint16_t serverPort;
cpStr legAdmfIntfcIp;
uint16_t legAdmfIntfcPort;
} configurations_t;
class WorkerThread;
class LegacyAdmfApp;
class AddUeEntryPost : public EManagementHandler
{
public:
AddUeEntryPost(ELogger &audit, WorkerThread *mApp);
WorkerThread *app;
virtual Void process(const Pistache::Http::Request& request,
Pistache::Http::ResponseWriter &response);
virtual ~AddUeEntryPost() {}
private:
AddUeEntryPost();
};
class UpdateUeEntryPost : public EManagementHandler
{
public:
UpdateUeEntryPost(ELogger &audit, WorkerThread *mApp);
WorkerThread *app;
virtual Void process(const Pistache::Http::Request& request,
Pistache::Http::ResponseWriter &response);
virtual ~UpdateUeEntryPost() {}
private:
UpdateUeEntryPost();
};
class DeleteUeEntryPost : public EManagementHandler
{
public:
DeleteUeEntryPost(ELogger &audit, WorkerThread *mApp);
WorkerThread *app;
virtual Void process(const Pistache::Http::Request& request,
Pistache::Http::ResponseWriter &response);
virtual ~DeleteUeEntryPost() {}
private:
DeleteUeEntryPost();
};
class Talker : public ESocket::TCP::TalkerPrivate
{
public:
Talker(WorkerThread &thread);
virtual ~Talker();
void onReceive();
void onClose();
void onError();
void onConnect();
void sendData();
private:
Talker();
};
class IntfcClient : public ESocket::TCP::TalkerPrivate
{
public:
IntfcClient(WorkerThread &thread);
virtual ~IntfcClient();
Void onConnect();
Void onReceive();
Void onClose();
Void onError();
Void sendData(UeDatabase &ue);
private:
IntfcClient();
};
class Listener : public ESocket::TCP::ListenerPrivate {
public:
Listener(WorkerThread &thread);
~Listener();
void onError();
ESocket::TCP::TalkerPrivate *createSocket(ESocket::ThreadPrivate &thread);
};
class WorkerThread : public ESocket::ThreadPrivate {
public:
WorkerThread();
~WorkerThread();
void onInit();
void onQuit();
void onClose();
Void onError();
Void connect();
Void onSocketClosed(ESocket::BasePrivate *psocket);
Void errorHandler(EError &err, ESocket::BasePrivate *psocket);
Talker *createTalker();
UShort getLocalPort() const { return m_local_port; }
Void sendRequestToInterface(uint16_t requestType,
std::string &requestBody);
private:
UShort m_local_port;
Listener *m_listener;
IntfcClient *m_client;
std::list <Talker *> m_talker;
};
class LegacyAdmfApp
{
public:
LegacyAdmfApp();
~LegacyAdmfApp();
Void startup(EGetOpt &opt);
Void shutdown();
Void setShutdownEvent() {
m_shutdown.set();
}
Void waitForShutdown() {
m_shutdown.wait();
}
private:
WorkerThread *m_app;
EEvent m_shutdown;
EManagementEndpoint *cCliPost;
AddUeEntryPost *mAddUe;
UpdateUeEntryPost *mUpdateUe;
DeleteUeEntryPost *mDeleteUe;
};
#endif /* _SAMPLE_ADMF_H_ */
|
nikhilc149/e-utran-features-bug-fixes | dp/stats.c | /*
* Copyright (c) 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <string.h>
#include <sched.h>
#include <unistd.h>
#include <rte_ring.h>
#include <rte_pipeline.h>
#include <rte_lcore.h>
#include <rte_ethdev.h>
#include <rte_port_ring.h>
#include <rte_port_ethdev.h>
#include <rte_table_hash.h>
#include <rte_table_stub.h>
#include <rte_byteorder.h>
#include <rte_udp.h>
#include <rte_tcp.h>
#include <rte_jhash.h>
#include <rte_cycles.h>
#include <rte_port_ring.h>
#include <rte_lcore.h>
#include <rte_cycles.h>
#include <rte_timer.h>
#include <rte_debug.h>
#include <cmdline_rdline.h>
#include <cmdline_parse.h>
#include <cmdline_socket.h>
#include <cmdline.h>
#include "stats.h"
#include "up_main.h"
#include "commands.h"
#include "interface.h"
#include "gw_adapter.h"
#include "epc_packet_framework.h"
/**
* @brief : Maintains uplink packet data
*/
struct ul_pkt_struct {
uint64_t IfPKTS;
uint64_t IfMisPKTS;
uint64_t ULRX;
uint64_t iLBRNG;
uint64_t oLBRNG;
uint64_t iWKRNG;
uint64_t iTXRNG;
uint64_t ULTX;
uint64_t GTP_ECHO;
uint64_t RS_RX;
uint64_t RS_TX;
};
/**
* @brief : Maintains downlink packet data
*/
struct dl_pkt_struct {
uint64_t IfPKTS;
uint64_t IfMisPKTS;
uint64_t DLRX;
uint64_t iLBRNG;
uint64_t oLBRNG;
uint64_t iWKRNG;
uint64_t iTXRNG;
uint64_t DLTX;
uint64_t ddn_req;
uint64_t ddn_pkts;
};
struct ul_pkt_struct ul_param = { 0 };
struct dl_pkt_struct dl_param = { 0 };
uint8_t cnt = 0;
extern int clSystemLog;
#ifdef STATS
void
print_headers(void)
{
printf("\n\n");
printf("%s\n", "##NGCORE_SHRINK(RTC)");
#ifdef EXSTATS
printf("%30s %32s %24s\n", "UPLINK", "||", "DOWNLINK");
printf("%9s %9s %9s %9s %9s %9s %4s %9s %9s %9s %9s %9s \n",
"IfMisPKTS", "IfPKTS", "UL-RX", "UL-TX", "UL-DFF", "GTP-ECHO", "||",
"IfMisPKTS", "IfPKTS", "DL-RX", "DL-TX", "DL-DFF");
#else
#if DEBUG_DDN
printf("%24s %29s %24s\n", "UPLINK", "||", "DOWNLINK");
printf("%9s %9s %9s %9s %9s %4s %9s %9s %9s %9s %9s %9s %9s\n",
"IfMisPKTS", "IfPKTS", "UL-RX", "UL-TX", "UL-DFF", "||",
"IfMisPKTS", "IfPKTS", "DL-RX", "DL-TX", "DL-DFF", "DDN", "DDN_BUF_PKTS");
#else
#ifdef DEBUG_RS_MSG
printf("%30s %33s %30s\n", "UPLINK", "||", "DOWNLINK");
printf("%9s %9s %9s %9s %9s %9s %4s %9s %9s %9s %9s %9s %9s \n",
"IfMisPKTS", "IfPKTS", "UL-RX", "UL-TX", "UL-DFF", "RS-RX", "||",
"IfMisPKTS", "IfPKTS", "DL-RX", "DL-TX", "DL-DFF", "RS-TX");
#else
printf("%24s %29s %24s\n", "UPLINK", "||", "DOWNLINK");
printf("%9s %9s %9s %9s %9s %4s %9s %9s %9s %9s %9s \n",
"IfMisPKTS", "IfPKTS", "UL-RX", "UL-TX", "UL-DFF", "||",
"IfMisPKTS", "IfPKTS", "DL-RX", "DL-TX", "DL-DFF");
#endif /* DEBUG_RS_MSG */
#endif /* DEBUG_DDN */
#endif /* EXSTATS */
}
void
display_stats(void)
{
#ifdef EXSTATS
printf("%9lu %9lu %9lu %9lu %9lu %9lu %4s %9lu %9lu %9lu %9lu %9lu \n",
ul_param.IfMisPKTS, ul_param.IfPKTS, ul_param.ULRX, ul_param.ULTX,
(ul_param.ULRX - ul_param.ULTX), ul_param.GTP_ECHO, "||",
dl_param.IfMisPKTS, dl_param.IfPKTS, dl_param.DLRX, dl_param.DLTX,
(dl_param.DLRX - dl_param.DLTX));
#else
#if DEBUG_DDN
printf("%9lu %9lu %9lu %9lu %9lu %4s %9lu %9lu %9lu %9lu %9lu %9lu %9lu\n",
ul_param.IfMisPKTS, ul_param.IfPKTS, ul_param.ULRX, ul_param.ULTX,
(ul_param.ULRX - ul_param.ULTX), "||",
dl_param.IfMisPKTS, dl_param.IfPKTS, dl_param.DLRX, dl_param.DLTX,
(dl_param.DLRX - dl_param.DLTX), dl_param.ddn_req, dl_param.ddn_pkts);
#else
#ifdef DEBUG_RS_MSG
printf("%9lu %9lu %9lu %9lu %9lu %9lu %4s %9lu %9lu %9lu %9lu %9lu %9lu\n",
ul_param.IfMisPKTS, ul_param.IfPKTS, ul_param.ULRX, ul_param.ULTX,
(ul_param.ULRX - ul_param.ULTX), ul_param.RS_RX, "||",
dl_param.IfMisPKTS, dl_param.IfPKTS, dl_param.DLRX, dl_param.DLTX,
(dl_param.DLRX - dl_param.DLTX), ul_param.RS_TX);
#else
printf("%9lu %9lu %9lu %9lu %9lu %4s %9lu %9lu %9lu %9lu %9lu \n",
ul_param.IfMisPKTS, ul_param.IfPKTS, ul_param.ULRX, ul_param.ULTX,
(ul_param.ULRX - ul_param.ULTX), "||",
dl_param.IfMisPKTS, dl_param.IfPKTS, dl_param.DLRX, dl_param.DLTX,
(dl_param.DLRX - dl_param.DLTX));
#endif /* DEBUG_RS_MSG */
#endif /* DEBUG_DDN */
#endif /* EXSTATS */
}
void
pip_istats(struct rte_pipeline *p, char *name, uint8_t port_id, struct rte_pipeline_port_in_stats *istats)
{
int status;
#ifdef STATS_CLR
/* set clear bit */
status = rte_pipeline_port_in_stats_read(p, port_id, istats, 1);
#else
status = rte_pipeline_port_in_stats_read(p, port_id, istats, 0);
#endif /* STATS_CLR*/
if (status != 0)
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT" Stats Read Error\n", LOG_VALUE);
}
void
pipeline_in_stats(void)
{
ul_param.ULRX = epc_app.ul_params[S1U_PORT_ID].pkts_in;
ul_param.RS_RX = epc_app.ul_params[S1U_PORT_ID].pkts_rs_in;
dl_param.DLRX = epc_app.dl_params[SGI_PORT_ID].pkts_in;
dl_param.ddn_req = epc_app.dl_params[SGI_PORT_ID].ddn;
dl_param.ddn_pkts = epc_app.dl_params[SGI_PORT_ID].ddn_buf_pkts;
#ifdef EXSTATS
ul_param.GTP_ECHO = epc_app.ul_params[S1U_PORT_ID].pkts_echo;
#endif /* EXSTATS */
}
void
pip_ostats(struct rte_pipeline *p, char *name, uint8_t port_id, struct rte_pipeline_port_out_stats *ostats)
{
int status;
status = rte_pipeline_port_out_stats_read(p, port_id, ostats, 0);
if (status != 0)
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT" Stats Read Error\n",LOG_VALUE);
}
void
pipeline_out_stats(void)
{
ul_param.ULTX = epc_app.ul_params[S1U_PORT_ID].pkts_out;
ul_param.RS_TX = epc_app.ul_params[S1U_PORT_ID].pkts_rs_out;
dl_param.DLTX = epc_app.dl_params[SGI_PORT_ID].pkts_out;
}
void
nic_in_stats(void)
{
int ret = 0;
struct rte_eth_stats stats0 = {0};
struct rte_eth_stats stats1 = {0};
/* UPLINK: Get/Read the NIC/Iface stats */
ret = rte_eth_stats_get(app.wb_port, &stats0);
if (ret != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"UPLINK: Packets are not read from UL PORT\n", LOG_VALUE);
}
/* DOWNLINK: Get/Read the NIC/Iface stats */
ret = rte_eth_stats_get(app.eb_port, &stats1);
if (ret != 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"DOWNLINK: Packets are not read from DL PORT\n", LOG_VALUE);
}
{
ul_param.IfPKTS = stats0.ipackets;
ul_param.IfMisPKTS = stats0.imissed;
}
{
dl_param.IfPKTS = stats1.ipackets;
dl_param.IfMisPKTS = stats1.imissed;
}
#ifdef STATS_CLR
rte_eth_stats_reset(app.wb_port);
rte_eth_stats_reset(app.eb_port);
#endif /* STATS_CLR */
}
#endif /*STATS*/
#ifndef CMDLINE_STATS
/**
* @brief : Timer callback
* @param : time, timer value, unused param
* @param : arg, unused param
* @return : Returns nothing
*/
static void timer_cb(__attribute__ ((unused))
struct rte_timer *tim, __attribute__ ((unused))void *arg)
{
static unsigned counter;
#ifdef STATS
nic_in_stats();
pipeline_in_stats();
pipeline_out_stats();
if(cnt == 0 || cnt == 20) {
print_headers();
if(cnt == 20)
cnt=1;
}
display_stats();
cnt++;
#endif /* STATS */
/* this timer is automatically reloaded until we decide to
* stop it, when counter reaches 500. */
if ((counter++) == 500) {
/* rte_timer_stop(tim); */
}
/* CLI counter */
cli_node.cli_config.oss_reset_time++;
}
#endif
#define TIMER_RESOLUTION_CYCLES 20000000ULL /* around 10ms at 2 Ghz */
#define TIMER_INTERVAL 1 /* sec */
#ifndef CMDLINE_STATS
static struct rte_timer timer0;
uint64_t prev_tsc = 0, cur_tsc, diff_tsc;
#endif
void epc_stats_core(void)
{
#ifdef CMDLINE_STATS
struct cmdline *cl = NULL;
int status;
static int cmd_ready;
if (cmd_ready == 0) {
cl = cmdline_stdin_new(main_ctx, "vepc>");
if (cl == NULL)
rte_panic("Cannot create cmdline instance\n");
cmdline_interact(cl);
cmd_ready = 1;
}
status = cmdline_poll(cl);
if (status < 0)
rte_panic("CLI poll error (%" PRId32 ")\n", status);
else if (status == RDLINE_EXITED) {
cmdline_stdin_exit(cl);
rte_exit(0, NULL);
}
#else
/* init timer structures */
static uint8_t start_timer = 1;
/* For NGCORE_SHRINK version, this function would be invoked in an
* infinite loop. Initialize timer parameters only once */
if (start_timer == 1) {
rte_timer_init(&timer0);
/* load timer0, every second, on master lcore, reloaded automatically */
uint64_t hz = rte_get_timer_hz();
unsigned lcore_id = rte_lcore_id();
rte_timer_reset(&timer0, hz * TIMER_INTERVAL, PERIODICAL, lcore_id,
timer_cb, NULL);
start_timer = 0;
}
cur_tsc = rte_rdtsc();
diff_tsc = cur_tsc - prev_tsc;
if (diff_tsc > TIMER_RESOLUTION_CYCLES) {
rte_timer_manage();
prev_tsc = cur_tsc;
}
#endif
}
|
nikhilc149/e-utran-features-bug-fixes | dp/up_ether.h | <filename>dp/up_ether.h<gh_stars>0
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _UP_ETHER_H_
#define _UP_ETHER_H_
/**
* @file
* This file contains macros, data structure definitions and function
* prototypes of dataplane ethernet constructor.
*/
#include <stdint.h>
#include <rte_mbuf.h>
#include <rte_ether.h>
#include "up_main.h"
#include "pfcp_up_struct.h"
#define ETH_TYPE_IPv4 0x0800 /**< IPv4 Protocol. */
#define ETH_TYPE_IPv6 0x86DD /**< IPv6 Protocol. */
/**
* @brief : Function to return pointer to L2 headers.
* @param : m, mbuf pointer
* @return : Returns address to l2 hdr
*/
static inline struct ether_hdr *get_mtoeth(struct rte_mbuf *m)
{
return (struct ether_hdr *)rte_pktmbuf_mtod(m, unsigned char *);
}
/**
* @brief : Function to construct L2 headers.
* @param : m, mbuf pointer
* @param : portid, port id
* @param : pdr, pointer to pdr session info
* @param : Loopback_flag, indication flags
* @return : Returns 0 in case of success , -1(ARP lookup fail) otherwise
*/
int construct_ether_hdr(struct rte_mbuf *m, uint8_t portid,
pdr_info_t **pdr, uint8_t flag);
#endif /* _ETHER_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp/cp_app.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef CP_APP_H_
#define CP_APP_H_
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/un.h>
#include <unistd.h>
#include <signal.h>
#include <sys/wait.h>
#include <stdbool.h>
#include "gx_app/include/gx_struct.h"
#include "gx_app/include/gx.h"
#ifdef CP_BUILD
#include "ue.h"
#endif /* CP_BUILD */
#include "gtp_messages.h"
#define SERVER_PATH "/usr/sock_server_cca_rar"
#define CLIENT_PATH "/usr/sock_client_ccr_raa"
#define MAX_PATH_LEN 32
#define MULTIPLIER 50
#define BUFFSIZE MULTIPLIER * 1024
#define BACKLOG 100
#define LENGTH sizeof(struct sockaddr_un)
/* IMSI length on gtpv2c */
#define BINARY_IMSI_LEN 8
/* IMSI length on gx */
#define STR_IMSI_LEN 16
/* MSISDN length on gtpv2c */
#define BINARY_MSISDN_LEN 6
/* MSISDN length on gx */
#define STR_MSISDN_LEN 12
extern int g_cp_sock;
extern int g_app_sock;
#pragma pack(1)
enum e_BUF_HDR {
GX_RAR_MSG,
GX_RAA_MSG,
GX_CCR_MSG,
GX_CCA_MSG,
};
enum pra_status {
PRA_IN_AREA,
PRA_OUT_AREA,
PRA_INACTIVE,
};
/**
* @brief : Maintains data related to different gs messages
*/
typedef struct Gx_msg {
uint8_t msg_type;
uint16_t msg_len;
union data_t {
GxRAR cp_rar;
GxRAA cp_raa;
GxCCR cp_ccr;
GxCCR ccr;
GxCCA cp_cca;
}data;
}gx_msg;
#pragma pack()
/**
* @brief : Handles processing of gx rar message
* @param : recv_buf, Received data from incoming message
* @return : Returns nothing
*/
void
handle_gx_rar( unsigned char *recv_buf);
/**
* @brief : Handles processing of gx cca message
* @param : recv_buf, Received data from incoming message
* @return : Returns nothing
*/
void
handle_gx_cca( unsigned char *recv_buf);
/**
* @brief : Handles incoming gx messages
* @param : No param
* @return : Returns 0 in case of success , -1 otherwise
*/
int
msg_handler_gx( void );
/**
* @brief : Activate interface for listening gx messages
* @param : No param
* @return : Returns nothing
*/
void
start_cp_app( void );
#ifdef CP_BUILD
/**
* @brief : Fill ccr request
* @param : ccr, structure to be filled
* @param : context, ue context data
* @param : ebi_index, array index of bearer
* @param : sess_id, session id
* @param : flow_flag, 1 for bearer resource mod flow,else 0
* @return : Returns 0 in case of success , -1 otherwise
*/
int
fill_ccr_request(GxCCR *ccr, ue_context *context,
int ebi_index, char *sess_id, uint8_t flow_flag);
/**
* @brief : Fill Presence Reporting Area Info in CCA
* @param : presence_rprtng_area_info, structure to be filled
* @param : ue_pra_info, Presence Reporting Area in UE context
* @return : Returns nothing
*/
void
fill_presence_rprtng_area_info(GxPresenceReportingAreaInformationList *presence_rprtng_area_info,
presence_reproting_area_info_t *ue_pra_info);
#endif /* CP_BUILD */
/**
* @brief : Fill rat type ie
* @param : ccr_rat_type, parameter to be filled
* @param : csr_rat_type, input rat type
* @return : Returns nothing
*/
void
fill_rat_type_ie( int32_t *ccr_rat_type, uint8_t csr_rat_type );
/**
* @brief : Fill user equipment information
* @param : ccr_user_eq_info, structure to be filled
* @param : csr_imei, imei value
* @return : Returns nothing
*/
void
fill_user_equipment_info( GxUserEquipmentInfo *ccr_user_eq_info, uint64_t csr_imei );
/**
* @brief : Fill timezone information
* @param : ccr_tgpp_ms_timezone, structure to be filled
* @param : csr_ue_timezone, input data
* @return : Returns nothing
*/
void
fill_3gpp_ue_timezone( Gx3gppMsTimezoneOctetString *ccr_tgpp_ms_timezone,
gtp_ue_time_zone_ie_t csr_ue_timezone );
/**
* @brief : Fill subscription id information
* @param : subs_id, structure to be filled
* @param : imsi, imsi value
* @param : msisdn, msisdn value
* @return : Returns nothing
*/
void
fill_subscription_id( GxSubscriptionIdList *subs_id, uint64_t imsi, uint64_t msisdn );
/**
* @brief : Process create bearer response and send raa message
* @param : sock, interface id to send raa
* @return : Returns nothing
*/
void
process_create_bearer_resp_and_send_raa( int sock );
/**
* @brief : Convert binary data to string value
* @param : b_val, input binary data
* @param : s_val, parameter to store converted string
* @param : b_len, length of binary data
* @param : s_len, length of string
* @return : Returns nothing
*/
void
bin_to_str(unsigned char *b_val, char *s_val, int b_len, int s_len);
/**
* @brief : Encode imsi to binary value
* @param : imsi, imput imsi value
* @param : imsi_len, length of imsi
* @param : bin_imsi, output value
* @return : Returns nothing
*/
void
encode_imsi_to_bin(uint64_t imsi, int imsi_len, uint8_t *bin_imsi);
/**
* @brief : free dynamically allocated memory of cca msg.
* @param : cca, Structure to store cca msg.
* @return : Returns nothing
*/
void
free_cca_msg_dynamically_alloc_memory(GxCCA *cca);
#endif /* CP_APP_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp/state_machine/sm_pfcp_pcnd.c | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cp.h"
#include "gtpv2c.h"
#include "sm_pcnd.h"
#include "cp_stats.h"
#include "debug_str.h"
#include "pfcp_util.h"
#include "pfcp_messages_decoder.h"
#include "gtpv2c_error_rsp.h"
#include "cp_timer.h"
#include "predef_rule_init.h"
#include "cp_config.h"
extern pfcp_config_t config;
extern struct cp_stats_t cp_stats;
extern peer_addr_t upf_pfcp_sockaddr;
extern uint64_t num_sess;
extern int clSystemLog;
/**
* @brief : Validate pfcp messages
* @param : pfcp_header, message data
* @param : bytes_rx, number of bytes in message
* @return : Returns 0 in case of success , -1 otherwise
*/
static uint8_t
pcnd_check(pfcp_header_t *pfcp_header, int bytes_rx)
{
RTE_SET_USED(pfcp_header);
RTE_SET_USED(bytes_rx);
/* TODO: Precondition of PFCP message need to handle later on. ]*/
return 0;
}
uint8_t
pfcp_pcnd_check(uint8_t *pfcp_rx, msg_info *msg, int bytes_rx, peer_addr_t *peer_addr)
{
int ret = 0;
int decoded = 0;
struct resp_info *resp = NULL;
uint64_t sess_id = 0;
int ebi_index = 0;
pfcp_header_t *pfcp_header = (pfcp_header_t *) pfcp_rx;
if ((ret = pcnd_check(pfcp_header, bytes_rx)) != 0)
return ret;
msg->msg_type = pfcp_header->message_type;
switch(msg->msg_type) {
case PFCP_ASSOCIATION_SETUP_RESPONSE: {
/*Decode the received msg and stored into the struct. */
decoded = decode_pfcp_assn_setup_rsp_t(pfcp_rx,
&msg->pfcp_msg.pfcp_ass_resp);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Decoded PFCP"
" Association Setup Request [%d]\n", LOG_VALUE, decoded);
ret = fill_ip_addr(msg->pfcp_msg.pfcp_ass_resp.node_id.node_id_value_ipv4_address,
msg->pfcp_msg.pfcp_ass_resp.node_id.node_id_value_ipv6_address,
&msg->upf_ip);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
upf_context_t *upf_context = NULL;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Lookup for UPF Entry of IP Type : %s "
"with IPv4 : "IPV4_ADDR"\t and IPv6 : "IPv6_FMT"",
LOG_VALUE, ip_type_str(msg->upf_ip.ip_type),
IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr),
PRINT_IPV6_ADDR(msg->upf_ip.ipv6_addr));
/*Retrive association state based on UPF IP. */
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) &(msg->upf_ip), (void **) &(upf_context));
if (ret < 0 || upf_context == NULL) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not "
"Found for UPF Context for IP Type : %s "
"with IPv4 : "IPV4_ADDR"\t and IPv6 : "IPv6_FMT"",
LOG_VALUE, ip_type_str(msg->upf_ip.ip_type),
IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr),
PRINT_IPV6_ADDR(msg->upf_ip.ipv6_addr));
return -1;
}
if( upf_context->cp_mode != SGWC) {
/* Init rule tables of user-plane */
ret = set_dest_address(msg->upf_ip, &upf_pfcp_sockaddr);
if (ret < 0) {
clLog(clSystemLog, eCLSeverityCritical,LOG_FORMAT "Error while assigning "
"IP address", LOG_VALUE);
}
dump_predefined_rules_on_up(msg->upf_ip);
}
msg->cp_mode = upf_context->cp_mode;
/* Checking Recovery mode initaited or not */
if (recovery_flag == PRESENT) {
if (msg->pfcp_msg.pfcp_ass_resp.cause.cause_value != REQUESTACCEPTED) {
clLog(clSystemLog, eCLSeverityDebug,LOG_FORMAT
"Cause received Association response is %d\n", LOG_VALUE,
msg->pfcp_msg.pfcp_ass_resp.cause.cause_value);
return -1;
}
/* CODE_REVIEW: Remove the hard coded proc setting, need to set proc in the pdn while sending sess est req */
/* Recovery mode */
msg->state = upf_context->state;
msg->proc = RESTORATION_RECOVERY_PROC;
/*Set the appropriate event type.*/
msg->event = PFCP_ASSOC_SETUP_RESP_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Callback called for"
"Msg_Type:PFCP_ASSOCIATION_SETUP_RESPONSE[%u], UPF IP Type : %s , "
"UPF_IPv4 : "IPV4_ADDR", UPF_IPv6 : "IPv6_FMT", "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, msg->msg_type, ip_type_str(msg->upf_ip.ip_type),
IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr),
PRINT_IPV6_ADDR(msg->upf_ip.ipv6_addr),
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
if(upf_context->timer_entry->pt.ti_id != 0) {
stoptimer(&upf_context->timer_entry->pt.ti_id);
deinittimer(&upf_context->timer_entry->pt.ti_id);
/* free peer data when timer is de int */
if(upf_context->timer_entry){
rte_free(upf_context->timer_entry);
upf_context->timer_entry = NULL;
}
}
if(msg->pfcp_msg.pfcp_ass_resp.cause.cause_value != REQUESTACCEPTED){
msg->state = ERROR_OCCURED_STATE;
msg->event = ERROR_OCCURED_EVNT;
msg->proc = INITIAL_PDN_ATTACH_PROC;
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Cause received in PFCP Association response"
" is %s\n", LOG_VALUE,
cause_str_pfcp(msg->pfcp_msg.pfcp_ass_resp.cause.cause_value));
/* TODO: Add handling to send association to next upf
* for each buffered CSR */
msg->cp_mode = upf_context->cp_mode;
cs_error_response(msg, GTPV2C_CAUSE_INVALID_REPLY_FROM_REMOTE_PEER,
CAUSE_SOURCE_SET_TO_0,
upf_context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(&msg, NULL);
return -1;
}
if (ret >= 0) {
msg->state = upf_context->state;
/* Set Hard code value for temporary purpose as assoc is only in initial pdn */
if(upf_context->indir_tun_flag != 0) {
msg->proc = CREATE_INDIRECT_TUNNEL_PROC;
} else {
msg->proc = INITIAL_PDN_ATTACH_PROC;
}
} else {
msg->cp_mode = upf_context->cp_mode;
cs_error_response(msg, GTPV2C_CAUSE_INVALID_PEER, CAUSE_SOURCE_SET_TO_0,
upf_context->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(&msg, NULL);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Entry not Found Msg_Type:%u, UPF IP:%u, Error_no:%d\n",
LOG_VALUE, msg->msg_type, msg->upf_ip.ipv4_addr, ret);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not "
"Found for UPF Context for IP Type : %s "
"with IPv4 : "IPV4_ADDR"\t and IPv6 : "IPv6_FMT"",
LOG_VALUE, ip_type_str(msg->upf_ip.ip_type),
IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr),
PRINT_IPV6_ADDR(msg->upf_ip.ipv6_addr));
return -1;
}
/*Set the appropriate event type.*/
msg->event = PFCP_ASSOC_SETUP_RESP_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Callback called for"
"Msg_Type:PFCP_ASSOCIATION_SETUP_RESPONSE[%u], UPF IP Type : %s, "
"UPF_IPv4 : "IPV4_ADDR", UPF_IPv6 : "IPv6_FMT", "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, msg->msg_type, ip_type_str(msg->upf_ip.ip_type),
IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr),
PRINT_IPV6_ADDR(msg->upf_ip.ipv6_addr),
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case PFCP_PFD_MANAGEMENT_RESPONSE: {
/* Decode pfd mgmt response */
upf_context_t *upf_context = NULL;
node_address_t upf_ip = {0};
decoded = decode_pfcp_pfd_mgmt_rsp_t(pfcp_rx, &msg->pfcp_msg.pfcp_pfd_resp);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DEOCED bytes in Pfd Mgmt Resp is %d\n", LOG_VALUE,
decoded);
/* check cause ie */
if(msg->pfcp_msg.pfcp_pfd_resp.cause.cause_value != REQUESTACCEPTED){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT" Msg_Type:%u, Cause value:%s, offending ie:%u\n",
LOG_VALUE, msg->msg_type, cause_str_pfcp(msg->pfcp_msg.pfcp_pfd_resp.cause.cause_value),
msg->pfcp_msg.pfcp_pfd_resp.offending_ie.type_of_the_offending_ie);
return -1;
}
if (peer_addr->type == PDN_TYPE_IPV4) {
upf_ip.ipv4_addr = peer_addr->ipv4.sin_addr.s_addr;
upf_ip.ip_type = PDN_TYPE_IPV4;
} else if (peer_addr->type == PDN_TYPE_IPV6) {
memcpy(upf_ip.ipv6_addr, peer_addr->ipv6.sin6_addr.s6_addr, IPV6_ADDRESS_LEN);
upf_ip.ip_type = PDN_TYPE_IPV6;
}
/*Retrive association state based on UPF IP. */
ret = rte_hash_lookup_data(upf_context_by_ip_hash,
(const void*) &(upf_ip), (void **) &(upf_context));
if (upf_context == NULL && ret < 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not Found Msg_Type:%u, UPF IP:%u, Error_no:%d\n",
LOG_VALUE, msg->msg_type, upf_ip.ipv4_addr, ret);
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Entry not "
"Found for UPF Context with Msg type : %u IP Type : %s "
"IPv4 : "IPV4_ADDR"\t and IPv6 : "IPv6_FMT"",
LOG_VALUE, msg->msg_type, ip_type_str(msg->upf_ip.ip_type),
IPV4_ADDR_HOST_FORMAT(msg->upf_ip.ipv4_addr),
PRINT_IPV6_ADDR(msg->upf_ip.ipv6_addr));
return -1;
}
msg->cp_mode = upf_context->cp_mode;
msg->state = PFCP_PFD_MGMT_RESP_RCVD_STATE;
msg->event = PFCP_PFD_MGMT_RESP_RCVD_EVNT;
msg->proc = INITIAL_PDN_ATTACH_PROC;
break;
}
case PFCP_SESSION_ESTABLISHMENT_RESPONSE: {
/*Decode the received msg and stored into the struct. */
decoded = decode_pfcp_sess_estab_rsp_t(pfcp_rx, &msg->pfcp_msg.pfcp_sess_est_resp);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DEOCED bytes in Sess Estab Resp is %d\n", LOG_VALUE,
decoded);
sess_id = msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid;
if (recovery_flag == PRESENT) {
if (msg->pfcp_msg.pfcp_sess_est_resp.cause.cause_value != REQUESTACCEPTED){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Cause received Est response is %d\n", LOG_VALUE,
cause_str_pfcp(msg->pfcp_msg.pfcp_sess_est_resp.cause.cause_value));
/* Update session conter */
num_sess--;
return -1;
}
/* Retrive the session information based on session id. */
if (get_sess_entry(sess_id, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, sess_id);
/* Update session conter */
num_sess--;
return -1;
}
/* Set State and proc */
msg->state = resp->state ;
msg->proc = resp->proc;
msg->cp_mode = resp->cp_mode;
/*Set the appropriate event type.*/
msg->event = PFCP_SESS_EST_RESP_RCVD_EVNT;
/* Update session conter */
num_sess--;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Callback called for"
"Msg_Type:PFCP_SESSION_ESTABLISHMENT_RESPONSE[%u], Seid:%lu, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, msg->msg_type,
msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
/* Retrive teid from session id */
/* stop and delete the timer session for pfcp est. req. */
int ebi = UE_BEAR_ID(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid);
int ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
cs_error_response(msg, GTPV2C_CAUSE_SYSTEM_FAILURE, CAUSE_SOURCE_SET_TO_0,
msg->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
return -1;
}
delete_pfcp_if_timer_entry(UE_SESS_ID(msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid),
ebi_index);
sess_id = msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid;
/* Retrive the session information based on session id. */
if (get_sess_entry(sess_id, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, sess_id);
return -1;
}
if(msg->pfcp_msg.pfcp_sess_est_resp.cause.cause_value !=
REQUESTACCEPTED){
msg->state = ERROR_OCCURED_STATE;
msg->event = ERROR_OCCURED_EVNT;
msg->proc = INITIAL_PDN_ATTACH_PROC;
msg->cp_mode = resp->cp_mode;
cs_error_response(msg, GTPV2C_CAUSE_INVALID_REPLY_FROM_REMOTE_PEER,
CAUSE_SOURCE_SET_TO_0,
resp->cp_mode != PGWC ? S11_IFACE : S5S8_IFACE);
process_error_occured_handler(&msg, NULL);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"Cause received Est response is %d\n", LOG_VALUE,
msg->pfcp_msg.pfcp_sess_est_resp.cause.cause_value);
return -1;
}
msg->state = resp->state;
msg->proc = resp->proc;
msg->cp_mode = resp->cp_mode;
/*Set the appropriate event type.*/
msg->event = PFCP_SESS_EST_RESP_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Callback called for"
"Msg_Type:PFCP_SESSION_ESTABLISHMENT_RESPONSE[%u], Seid:%lu, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, msg->msg_type,
msg->pfcp_msg.pfcp_sess_est_resp.header.seid_seqno.has_seid.seid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case PFCP_SESSION_MODIFICATION_RESPONSE: {
/*Decode the received msg and stored into the struct. */
decoded = decode_pfcp_sess_mod_rsp_t(pfcp_rx,
&msg->pfcp_msg.pfcp_sess_mod_resp);
if(!decoded){
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Decode fails for PFCP_SESSION_MODIFICATION_RESPONSE", LOG_VALUE);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DECODED bytes in Sess Modify Resp is %d\n", LOG_VALUE,
decoded);
sess_id = msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid;
/* Retrive the session information based on session id. */
if (get_sess_entry(sess_id, &resp) != 0) {
/*TODO: add error response on basis of gtp message type*/
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, sess_id);
return -1;
}
int ebi = (UE_BEAR_ID(sess_id));
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_SYSTEM_FAILURE);
return -1;
}
/* stop and delete timer entry for pfcp mod req */
delete_pfcp_if_timer_entry(UE_SESS_ID(sess_id),ebi_index);
/*Validate the modification is accepted or not. */
if (msg->pfcp_msg.pfcp_sess_mod_resp.cause.cause_value != REQUESTACCEPTED) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Error Cause "
"received for Session Modification Request. Cause : %s\n",
LOG_VALUE, cause_str_pfcp(msg->pfcp_msg.pfcp_sess_mod_resp.cause.cause_value));
if(resp->proc != DETACH_PROC
&& resp->proc != MME_INI_DEDICATED_BEARER_DEACTIVATION_PROC
&& resp->proc != PDN_GW_INIT_BEARER_DEACTIVATION) {
resp->linked_eps_bearer_id = ebi;
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_INVALID_REPLY_FROM_REMOTE_PEER);
return -1;
}
}
msg->state = resp->state;
msg->proc = resp->proc;
msg->cp_mode = resp->cp_mode;
/*Set the appropriate event type.*/
msg->event = PFCP_SESS_MOD_RESP_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Callback called for"
"Msg_Type:PFCP_SESSION_MODIFICATION_RESPONSE[%u], Seid:%lu, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, msg->msg_type,
msg->pfcp_msg.pfcp_sess_mod_resp.header.seid_seqno.has_seid.seid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case PFCP_SESSION_DELETION_RESPONSE: {
/* Decode pfcp session delete response*/
decoded = decode_pfcp_sess_del_rsp_t(pfcp_rx, &msg->pfcp_msg.pfcp_sess_del_resp);
if(!decoded){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Decode fails for "
"PFCP_SESSION_DELETION_RESPONSE", LOG_VALUE);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DECODED bytes in Sess "
"Del Resp is %d\n", LOG_VALUE ,decoded);
sess_id = msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid;
/* Retrive the session information based on session id. */
if (get_sess_entry(sess_id, &resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE, sess_id);
return -1;
}
/* Retrive teid from session id */
/* stop and delete timer entry for pfcp sess del req */
int ebi = UE_BEAR_ID(sess_id);
ebi_index = GET_EBI_INDEX(ebi);
if (ebi_index == -1) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Invalid EBI ID\n", LOG_VALUE);
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_SYSTEM_FAILURE);
return -1;
}
delete_pfcp_if_timer_entry(UE_SESS_ID(sess_id), ebi_index);
if(msg->pfcp_msg.pfcp_sess_del_resp.cause.cause_value != REQUESTACCEPTED){
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"Cause received Del response is %s\n",
LOG_VALUE, cause_str_pfcp(msg->pfcp_msg.pfcp_sess_del_resp.cause.cause_value));
if(resp->proc != DETACH_PROC
&& resp->proc != PDN_GW_INIT_BEARER_DEACTIVATION) {
pfcp_modification_error_response(resp, msg, GTPV2C_CAUSE_INVALID_REPLY_FROM_REMOTE_PEER);
return -1;
}
}
msg->state = resp->state;
msg->proc = resp->proc;
msg->cp_mode = resp->cp_mode;
/*Set the appropriate event type.*/
msg->event = PFCP_SESS_DEL_RESP_RCVD_EVNT ;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Callback called for"
"Msg_Type:PFCP_SESSION_DELETION_RESPONSE[%u], Seid:%lu, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, msg->msg_type,
msg->pfcp_msg.pfcp_sess_del_resp.header.seid_seqno.has_seid.seid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case PFCP_SESSION_REPORT_REQUEST: {
/*Decode the received msg and stored into the struct*/
decoded = decode_pfcp_sess_rpt_req_t(pfcp_rx,
&msg->pfcp_msg.pfcp_sess_rep_req);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DEOCED bytes in Sess Report Request is %d\n", LOG_VALUE,
decoded);
/* Retrive the session information based on session id. */
if (get_sess_entry(msg->pfcp_msg.pfcp_sess_rep_req.header.seid_seqno.has_seid.seid,
&resp) != 0) {
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT"No Session entry found "
"for seid: %lu", LOG_VALUE,
msg->pfcp_msg.pfcp_sess_rep_req.header.seid_seqno.has_seid.seid);
send_pfcp_del_sess_req(msg->pfcp_msg.pfcp_sess_rep_req.header.seid_seqno.has_seid.seid,
peer_addr);
return -1;
}
sess_id = msg->pfcp_msg.pfcp_sess_rep_req.header.seid_seqno.has_seid.seid;
msg->state = resp->state;
msg->proc = resp->proc;
msg->cp_mode = resp->cp_mode;
/*Set the appropriate event type.*/
msg->event = PFCP_SESS_RPT_REQ_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Callback called for"
"Msg_Type:PFCP_SESSION_REPORT_REQUEST[%u], Seid:%lu, "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, msg->msg_type,
msg->pfcp_msg.pfcp_sess_rep_req.header.seid_seqno.has_seid.seid,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
}
case PFCP_SESSION_SET_DELETION_REQUEST:
/*Decode the received msg and stored into the struct. */
decoded = decode_pfcp_sess_set_del_req_t(pfcp_rx,
&msg->pfcp_msg.pfcp_sess_set_del_req);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DEOCED bytes in Sess Set Deletion Request is %d\n", LOG_VALUE,
decoded);
msg->state = PFCP_SESS_SET_DEL_REQ_RCVD_STATE;
msg->proc = RESTORATION_RECOVERY_PROC;
sess_id = msg->pfcp_msg.pfcp_sess_set_del_req.header.seid_seqno.has_seid.seid;
/*Set the appropriate event type.*/
msg->event = PFCP_SESS_SET_DEL_REQ_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Callback called for"
" Msg_Type: PFCP_SESSION_SET_DELETION_RESPONSE[%u], "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, msg->msg_type,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
case PFCP_SESSION_SET_DELETION_RESPONSE:
/*Decode the received msg and stored into the struct. */
decoded = decode_pfcp_sess_set_del_rsp_t(pfcp_rx,
&msg->pfcp_msg.pfcp_sess_set_del_rsp);
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"DEOCED bytes in Sess Set Deletion Resp is %d\n", LOG_VALUE,
decoded);
sess_id = msg->pfcp_msg.pfcp_sess_set_del_rsp.header.seid_seqno.has_seid.seid;
msg->state = PFCP_SESS_SET_DEL_REQ_SNT_STATE;
msg->proc = RESTORATION_RECOVERY_PROC;
/*Set the appropriate event type.*/
msg->event = PFCP_SESS_SET_DEL_RESP_RCVD_EVNT;
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT" Callback called for"
" Msg_Type: PFCP_SESSION_SET_DELETION_RESPONSE[%u], "
"Procedure:%s, State:%s, Event:%s\n",
LOG_VALUE, msg->msg_type,
get_proc_string(msg->proc),
get_state_string(msg->state), get_event_string(msg->event));
break;
default:
/* Retrive the session information based on session id */
if ((get_sess_entry(pfcp_header->seid_seqno.has_seid.seid, &resp)) == 0 ) {
msg->proc = NONE_PROC;
if( SGWC == resp->cp_mode ) {
msg->state = SGWC_NONE_STATE;
msg->cp_mode = resp->cp_mode;
} else {
msg->state = PGWC_NONE_STATE;
}
msg->state = resp->state;
msg->proc = resp->proc;
} else {
msg->event = NONE_EVNT;
msg->proc = NONE_PROC;
}
clLog(clSystemLog, eCLSeverityCritical, LOG_FORMAT":process_msgs-"
"\n\tcase: gw_cfg= %u;"
"\n\tReceived unprocessed PFCP Message_Type:%u"
"... Discarding\n", LOG_VALUE, msg->cp_mode, msg->msg_type);
return -1;
}
if (sess_id) {
process_cp_li_msg(sess_id, SX_INTFC_IN, pfcp_rx, bytes_rx,
fill_ip_info(peer_addr->type,
peer_addr->ipv4.sin_addr.s_addr,
peer_addr->ipv6.sin6_addr.s6_addr),
fill_ip_info(peer_addr->type,
config.pfcp_ip.s_addr,
config.pfcp_ip_v6.s6_addr),
((peer_addr->type == IPTYPE_IPV4_LI) ?
ntohs(peer_addr->ipv4.sin_port) :
ntohs(peer_addr->ipv6.sin6_port)),
config.pfcp_port);
}
return 0;
}
|
nikhilc149/e-utran-features-bug-fixes | ulpc/legacy_df_interface/include/LegacyTCPClient.h | /*
* Copyright (c) 2020 Sprint
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
**/
#ifndef _LEGACY_TCP_CLIENT_H_
#define _LEGACY_TCP_CLIENT_H_
#include "epctools.h"
#include "esocket.h"
#include "emgmt.h"
#include "LegacyClient.h"
#define LOG_SYSTEM 1
#define EMPTY_STRING ""
#define ZERO 0
#define RET_SUCCESS 0
#define DF_PACKET 200
#define LEGACY_DF_ACK 201
#pragma pack(push, 1)
struct DfPacket {
uint32_t packetLen;
struct DfPacketHdr {
uint8_t packetType;
uint32_t seqNo;
uint32_t dataLen;
} header;
uint8_t data[0];
};
#pragma pack(pop)
#pragma pack(push, 1)
struct AckPacket {
uint8_t packetLen;
struct AckPacketHdr {
uint8_t packetType;
uint32_t seqNo;
} header;
};
#pragma pack(pop)
class TCPThread;
class Talker : public ESocket::TCP::TalkerPrivate
{
public:
Talker(TCPThread &thread);
virtual ~Talker();
/*
* @brief : Library function of EPCTool
*/
void onReceive();
/*
* @brief : Library function of EPCTool
*/
void onClose();
/*
* @brief : Library function of EPCTool
*/
void onError();
/*
* @brief : Library function of EPCTool
*/
void onConnect();
private:
Talker();
uint32_t m_seq;
};
class TCPThread : public ESocket::ThreadPrivate {
public:
TCPThread();
~TCPThread();
/*
* @brief : Library function of EPCTool
*/
void onInit();
/*
* @brief : Library function of EPCTool
*/
void onQuit();
/*
* @brief : Library function of EPCTool
*/
void onClose();
/*
* @brief : Library function of EPCTool
*/
Void onTimer(EThreadEventTimer *ptimer);
/*
* @brief : Library function of EPCTool
*/
Void onSocketClosed(ESocket::BasePrivate *psocket);
Void onSocketError(ESocket::BasePrivate *psocket);
/*
* @brief : Library function of EPCTool
*/
Void errorHandler(EError &err, ESocket::BasePrivate *psocket);
Void startlDfRetryTimer();
Void stoplDfRetryTimer();
Talker *createTalker();
Void deleteTalker();
Talker *getTalker();
void setRemoteIp(const std::string& strRemoteIp);
void setRemotePort(uint16_t uiRemotePort);
cpStr getRemoteIp();
UShort getRemotePort();
private:
Talker *m_talker;
cpStr m_remote_ip;
UShort m_remote_port;
EThreadEventTimer m_ldfRetryTimer;
bool conn_flag = 0;
};
class LegacyTCPClient : public LegacyClient
{
public:
/*
* @brief : Constructor of class BaseLegacyInterface
*/
LegacyTCPClient();
/*
* @brief : Destructor of class BaseLegacyInterface
*/
~LegacyTCPClient();
/*
* @brief : Function to initialise legacy interface
* @param : No arguments
* @return : Returns int8_t
*/
int8_t InitializeLegacyClient();
/*
* @brief : Function to connect with legacy DF
* @param : strRemoteIp, legacy DF IP
* @param : uiRemotePort, legacy DF port
* @return : Returns int8_t
*/
int8_t ConnectToLegacy(const std::string& strRemoteIp,
uint16_t uiRemoteIp);
/*
* @brief : Function to send information/packet to legacy DF
* @param : pkt, packet to be sent
* @param : packetLen, size of packet
* @return : Returns int8_t
*/
int8_t SendMessageToLegacy(uint8_t *pkt, uint32_t packetLen);
/*
* @brief : Function to disconnect from legacy DF
* @param : No arguments
* @return : Returns int8_t
*/
int8_t DisconnectToLegacy();
/*
* @brief : Function to de-initialise legacy DF
* @param : No arguments
* @return : Returns int8_t
*/
int8_t DeinitializeLegacyClient();
TCPThread * getThread();
private:
TCPThread *m_app;
};
/* Function which calls callback of DF to process ACK */
void to_df_callback(uint32_t ackNumb);
/* Function which calls callback of DF to notify Legacy DF socket close */
void to_socket_callback();
/* Function which calls callback of DF to notify Legacy DF is connected */
void to_conn_callback();
#endif /* _LEGACY_TCP_CLIENT_H_ */
|
nikhilc149/e-utran-features-bug-fixes | cp_dp_api/config_validater.c | /*
* Copyright (c) 2020 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include<stdio.h>
#include <string.h>
#include <stdbool.h>
#include <ctype.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <rte_common.h>
#include <rte_string_fns.h>
#include <netdb.h>
#include "config_validater.h"
cfg_data cfg_parms_list[] = {
{ "CP_TYPE", NULL, &is_valid_integer },
{ "S11_IP", NULL, &is_valid_ipv4 },
{ "S11_IP_V6", NULL, &is_valid_ipv6 },
{ "S11_PORT", NULL, &is_valid_integer },
{ "S5S8_IP", NULL, &is_valid_ipv4 },
{ "S5S8_IP_V6", NULL, &is_valid_ipv6 },
{ "S5S8_PORT", NULL, &is_valid_integer },
{ "PFCP_IP", NULL, &is_valid_ipv4 },
{ "PFCP_IP_V6", NULL, &is_valid_ipv6 },
{ "PFCP_PORT", NULL, &is_valid_integer },
{ "UPF_PFCP_IP", NULL, &is_valid_ipv4 },
{ "UPF_PFCP_IP_V6", NULL, &is_valid_ipv6 },
{ "UPF_PFCP_PORT", NULL, &is_valid_integer },
{ "REDIS_IP", NULL, &is_valid_ipv4v6 },
{ "REDIS_PORT", NULL, &is_valid_integer },
{ "CP_REDIS_IP", NULL, &is_valid_ipv4v6 },
{ "REDIS_CERT_PATH", NULL, &is_valid_string },
{ "DDF2_IP", NULL, &is_valid_ipv4_ipv6 },
{ "DDF2_PORT", NULL, &is_valid_integer },
{ "DDF2_LOCAL_IP", NULL, &is_valid_ipv4_ipv6 },
{ "DADMF_IP", NULL, &is_valid_ipv4_ipv6 },
{ "DADMF_PORT", NULL, &is_valid_integer },
{ "DADMF_LOCAL_IP", NULL, &is_valid_ipv4_ipv6 },
{ "SUGGESTED_PKT_COUNT", NULL, &is_valid_integer },
{ "LOW_LEVEL_ARP_PRIORITY", NULL, &is_valid_integer },
{ "TRANSMIT_TIMER", NULL, &is_valid_integer },
{ "PERIODIC_TIMER", NULL, &is_valid_integer },
{ "TRANSMIT_COUNT", NULL, &is_valid_integer },
{ "REQUEST_TIMEOUT", NULL, &is_valid_integer },
{ "REQUEST_TRIES", NULL, &is_valid_integer },
{ "USE_DNS", NULL, &is_valid_integer },
{ "CP_DNS_IP", NULL, &is_valid_ipv4v6 },
{ "CLI_REST_PORT", NULL, &is_valid_integer },
{ "CLI_REST_IP", NULL, &is_valid_ipv4v6 },
{ "GENERATE_CDR", NULL, &is_valid_integer },
{ "GENERATE_SGW_CDR", NULL, &is_valid_integer },
{ "SGW_CC", NULL, &is_valid_integer },
{ "ADD_DEFAULT_RULE", NULL, &is_valid_integer },
{ "IP_ALLOCATION_MODE", NULL, &is_valid_integer },
{ "IP_TYPE_SUPPORTED", NULL, &is_valid_integer },
{ "IP_TYPE_PRIORITY", NULL, &is_valid_integer },
{ "USE_GX", NULL, &is_valid_integer },
{ "PERF_FLAG", NULL, &is_valid_integer },
{ "name", NULL, &is_valid_string },
{ "usage_type", NULL, &is_valid_integer },
{ "network_capability", NULL, &is_valid_string },
{ "trigger_type", NULL, &is_valid_integer },
{ "uplink_volume_th", NULL, &is_valid_integer },
{ "downlink_volume_th", NULL, &is_valid_integer },
{ "time_th", NULL, &is_valid_integer },
{ "concurrent", NULL, &is_valid_integer },
{ "percentage", NULL, &is_valid_integer },
{ "interval_seconds", NULL, &is_valid_integer },
{ "query_timeout_ms", NULL, &is_valid_integer },
{ "query_tries", NULL, &is_valid_integer },
{ "frequency_seconds", NULL, &is_valid_integer },
{ "filename", NULL, &is_valid_string },
{ "nameserver", NULL, &is_valid_ipv4v6 },
{ "IP_POOL_IP", NULL, &is_valid_ipv4 },
{ "IP_POOL_MASK", NULL, &is_valid_ipv4 },
{ "IPV6_NETWORK_ID", NULL, &is_valid_ipv6 },
{ "IPV6_PREFIX_LEN", NULL, &is_valid_string },
{ "PFCP_IPv4", NULL, &is_valid_ipv4 },
{ "PFCP_IPv6", NULL, &is_valid_ipv6 },
{ "WB_IFACE", NULL, &is_valid_string },
{ "EB_IFACE", NULL, &is_valid_string },
{ "WB_IPv4", NULL, &is_valid_ipv4 },
{ "WB_IPv6", NULL, &is_valid_ipv6 },
{ "WB_IPv4_MASK", NULL, &is_valid_ipv4 },
{ "WB_MAC", NULL, &is_valid_mac },
{ "EB_IPv4", NULL, &is_valid_ipv4 },
{ "EB_IPv6", NULL, &is_valid_ipv6 },
{ "EB_IPv4_MASK", NULL, &is_valid_ipv4 },
{ "EB_MAC", NULL, &is_valid_mac },
{ "WB_LI_IPv4", NULL, &is_valid_ipv4 },
{ "WB_LI_IPv6", NULL, &is_valid_ipv6 },
{ "WB_LI_IPv4_MASK", NULL, &is_valid_ipv4 },
{ "WB_LI_IFACE", NULL, &is_valid_string },
{ "EB_LI_IPv4", NULL, &is_valid_ipv4 },
{ "EB_LI_IPv6", NULL, &is_valid_ipv6 },
{ "EB_LI_IPv4_MASK", NULL, &is_valid_ipv4 },
{ "EB_LI_IFACE", NULL, &is_valid_string },
{ "NUMA", NULL, &is_valid_integer },
{ "TEIDRI", NULL, &is_valid_integer },
{ "TEIDRI_TIMEOUT", NULL, &is_valid_integer },
{ "GENERATE_PCAP", NULL, &is_valid_integer },
{ "DDF3_IP", NULL, &is_valid_ipv4_ipv6 },
{ "DDF3_PORT", NULL, &is_valid_integer },
{ "DDF3_LOCAL_IP", NULL, &is_valid_ipv4_ipv6 },
{ "WB_GW_IP", NULL, &is_valid_ipv4 },
{ "EB_GW_IP", NULL, &is_valid_ipv4 },
{ "GTPU_SEQNB_IN", NULL, &is_valid_integer },
{ "GTPU_SEQNB_OUT", NULL, &is_valid_integer }
};
section section_list[] = {
{ "[GLOBAL]" },
{ "[APN]" },
{ "[URR_DEFAULT]" },
{ "[NAMESERVER_CONFIG]" },
{ "[CACHE]" },
{ "[APP]" },
{ "[OPS]" },
{ "[IP_POOL_CONFIG]" }
};
int is_valid_integer(char *key, char *value) {
unsigned int idx = 0;
if(value == NULL)
return VALUE_FORMAT_NOT_CORRECT;
for(idx = 0; idx < strnlen(value,CFG_VALUE_LENGTH); idx++) {
if(isdigit(value[idx]) == 0) {
return VALUE_FORMAT_NOT_CORRECT;
}
}
RTE_SET_USED(key);
return 0;
}
int is_valid_ipv4(char *key, char *value) {
char buf[IPV4_LEN];
if(value == NULL)
return VALUE_FORMAT_NOT_CORRECT;
if (!(inet_pton(AF_INET, value, buf)))
return VALUE_FORMAT_NOT_CORRECT;
RTE_SET_USED(key);
return 0;
}
int is_valid_ipv4v6(char *key, char *value) {
struct addrinfo *ip_type = NULL;
if(value == NULL)
return VALUE_FORMAT_NOT_CORRECT;
if(getaddrinfo(value, NULL, NULL, &ip_type)) {
return VALUE_FORMAT_NOT_CORRECT;
}
freeaddrinfo(ip_type);
RTE_SET_USED(key);
return 0;
}
int is_valid_ipv6(char *key, char *value) {
char buf[IPV6_LEN];
if(value == NULL)
return VALUE_FORMAT_NOT_CORRECT;
if( strstr(value, "/") != NULL){
int ip_token = 0;
char *ip_fld[2];
ip_token = rte_strsplit(value, strnlen(value, CFG_VALUE_LENGTH), ip_fld, 2, '/');
if(ip_token > 2)
return VALUE_FORMAT_NOT_CORRECT;
if (!(inet_pton(AF_INET6, ip_fld[0], buf)))
return VALUE_FORMAT_NOT_CORRECT;
RTE_SET_USED(key);
}else {
if (!(inet_pton(AF_INET6, value, buf)))
return VALUE_FORMAT_NOT_CORRECT;
RTE_SET_USED(key);
}
return 0;
}
int is_valid_ipv4_ipv6(char *key, char *value) {
char buf[IPV4_LEN];
if(value == NULL)
return VALUE_FORMAT_NOT_CORRECT;
if (!(inet_pton(AF_INET, value, buf))) {
if (!(inet_pton(AF_INET6, value, buf))) {
return VALUE_FORMAT_NOT_CORRECT;
}
}
RTE_SET_USED(key);
return 0;
}
int is_valid_mac(char *key, char *value) {
int hex_itr = 0;
int separater = 0;
if(value == NULL)
return VALUE_FORMAT_NOT_CORRECT;
while(*value) {
if (isxdigit(*value)) {
hex_itr++;
} else if (*value == ':') {
if (hex_itr == 0 || hex_itr / 2 - 1 != separater)
break;
++separater;
} else {
separater = -1;
}
++value;
}
if ((hex_itr == MAC_ADDRESS_LEN) && (separater == MAC_ADDRESS_SEPARTER)) {
return 0;
} else {
return VALUE_FORMAT_NOT_CORRECT;
}
RTE_SET_USED(key);
}
int is_valid_apn(char *value) {
unsigned int idx = 0;
if(value == NULL) {
return VALUE_FORMAT_NOT_CORRECT;
} else if(!(isalnum(value[STARTING_INDEX]) &
isalnum(value[strnlen(value,CFG_VALUE_LENGTH) - 1]))) {
return VALUE_FORMAT_NOT_CORRECT;
} else {
for(idx = 1; idx < strnlen(value,CFG_VALUE_LENGTH) - 1; idx++) {
if(value[idx] == '.' && isalnum(value[idx + 1]) == 0) {
return VALUE_FORMAT_NOT_CORRECT;
} else if(isalnum(value[idx]) == 0 && value[idx] != '.') {
return VALUE_FORMAT_NOT_CORRECT;
}
}
}
return 0;
}
int is_valid_alphanumeric_value(char *value) {
unsigned int idx = 0;
if(value == NULL)
return VALUE_FORMAT_NOT_CORRECT;
for(idx = 0; idx < strnlen(value,CFG_VALUE_LENGTH); idx++) {
if(isalnum(value[idx]) == 0) {
return VALUE_FORMAT_NOT_CORRECT;
}
}
return 0;
}
int is_valid_alpha_value(char *value) {
unsigned int idx = 0;
if(value == NULL)
return VALUE_FORMAT_NOT_CORRECT;
for(idx = 0; idx < strnlen(value,CFG_VALUE_LENGTH); idx++) {
if(isalpha(value[idx]) == 0) {
return VALUE_FORMAT_NOT_CORRECT;
}
}
return 0;
}
int is_valid_interface(char *value) {
unsigned int idx = 0;
if(value == NULL) {
return VALUE_FORMAT_NOT_CORRECT;
} else if(!(isalnum(value[STARTING_INDEX]) &
isalnum(value[strnlen(value,CFG_VALUE_LENGTH) - 1]))) {
return VALUE_FORMAT_NOT_CORRECT;
} else {
for(idx = 1; idx < strnlen(value,CFG_VALUE_LENGTH) - 1; idx++) {
if(value[idx] == ':' && isalnum(value[idx + 1]) == 0) {
return VALUE_FORMAT_NOT_CORRECT;
} else if(isalnum(value[idx]) == 0 && value[idx] != ':') {
return VALUE_FORMAT_NOT_CORRECT;
}
}
}
return 0;
}
int is_valid_string(char *key, char *value) {
if(value == NULL)
return VALUE_FORMAT_NOT_CORRECT;
if(!strncmp(key, "WB_IFACE", KEY_LEN)) {
if(is_valid_interface(value) != 0)
return VALUE_FORMAT_NOT_CORRECT;
} else if(!strncmp(key, "WB_LI_IFACE", KEY_LEN)) {
if(is_valid_interface(value) != 0)
return VALUE_FORMAT_NOT_CORRECT;
} else if(!strncmp(key, "EB_IFACE", KEY_LEN)) {
if(is_valid_interface(value) != 0)
return VALUE_FORMAT_NOT_CORRECT;
} else if(!strncmp(key, "EB_LI_IFACE", KEY_LEN)) {
if(is_valid_interface(value) != 0)
return VALUE_FORMAT_NOT_CORRECT;
} else if(!strncmp(key, "REDIS_CERT_PATH", KEY_LEN)) {
if(strncmp(value, "../config/redis_cert", CFG_VALUE_LENGTH) != 0)
return VALUE_FORMAT_NOT_CORRECT;
} else if(!strncmp(key, "network_capability", KEY_LEN)) {
if(is_valid_alpha_value(value) != 0)
return VALUE_FORMAT_NOT_CORRECT;
} else if(!strncmp(key, "filename", KEY_LEN)) {
if((strnlen(value, CFG_VALUE_LENGTH) == 0)
|| (strnlen(value, CFG_VALUE_LENGTH) == 0))
return VALUE_FORMAT_NOT_CORRECT;
} else if(!strncmp(key, "name", KEY_LEN)) {
if(is_valid_apn(value) != 0)
return VALUE_FORMAT_NOT_CORRECT;
}
return 0;
}
void read_cfg_file(const char *path) {
char buffer[BUFFER_SIZE] = {0};
int itr_buff = 0;
int index = 0;
bool key_value_filled = false;
int no_of_cfg_params = sizeof(cfg_parms_list) / sizeof(cfg_parms_list[STARTING_INDEX]);
int no_of_sections = sizeof(section_list) / sizeof(section_list[STARTING_INDEX]);
FILE *fp = fopen(path, "r");
if(fp == NULL) {
fprintf(stderr, "\nFailed to open %s file\n", path);
exit(0);
}
while(fgets(buffer, sizeof(buffer), fp) != NULL) {
char cfg_key[KEY_LEN] = {0};
char cfg_parameter_value[CFG_VALUE_LENGTH] = {0};
char cfg_section_value[KEY_LEN] = {0};
int itr_cfg_params = 0;
int itr_cfg_section = 0;
index = 0;
key_value_filled = false;
if(buffer[STARTING_INDEX] != ';'
&& buffer[STARTING_INDEX] != '#' && buffer[STARTING_INDEX] != '\n') {
for (itr_buff = 0;
buffer[itr_buff] != '\n' && buffer[itr_buff] != '\0'; itr_buff++) {
if(buffer[STARTING_INDEX] == '[') {
cfg_section_value [index++] = buffer[itr_buff];
} else if(buffer[itr_buff] == '=') {
cfg_key[index] = '\0';
index = 0;
key_value_filled = true;
} else if(!key_value_filled && buffer[itr_buff] != ' ') {
cfg_key[index++] = buffer[itr_buff];
} else {
if(buffer[itr_buff] != ' ')
cfg_parameter_value[index++] = buffer[itr_buff];
}
}
if(buffer[STARTING_INDEX] != '[') {
cfg_parameter_value[index] = '\0';
for(itr_cfg_params = 0; itr_cfg_params < no_of_cfg_params; itr_cfg_params++) {
if(!strncmp(cfg_parms_list[itr_cfg_params].key, cfg_key, KEY_LEN)) {
if((*cfg_parms_list[itr_cfg_params].fun_ptr)(cfg_key, cfg_parameter_value) != 0) {
fprintf(stderr, "\nNeed to enter the valid value for %s key\n", cfg_key);
exit(0);
}
break;
}
}
} else {
cfg_section_value [index++] = '\0';
for(itr_cfg_section = 0; itr_cfg_section < no_of_sections; itr_cfg_section++) {
if(!strncmp(section_list[itr_cfg_section].section_name,
cfg_section_value, KEY_LEN))
break;
}
}
}
if(itr_cfg_params == no_of_cfg_params && buffer[STARTING_INDEX] != '[') {
fprintf(stderr, "\nInvalid Key : %s\n", cfg_key);
exit(0);
} else if(itr_cfg_section == no_of_sections) {
fprintf(stderr, "\nInvalid Section : %s\n", cfg_section_value);
exit(0);
}
}
fclose(fp);
}
|
nikhilc149/e-utran-features-bug-fixes | cp/gtpc_session.h | /*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "cp.h"
#include "main.h"
#include "pfcp.h"
#include "cp_stats.h"
#include "pfcp_util.h"
#include "pfcp_set_ie.h"
#include "pfcp_session.h"
#include "pfcp_messages.h"
#include "pfcp_association.h"
#include "pfcp_messages_encoder.h"
#include "pfcp_messages_decoder.h"
#include "pfcp_enum.h"
#ifdef CP_BUILD
#include "ue.h"
#include "gtp_messages.h"
#include "gtpv2c_set_ie.h"
#include "cp_config.h"
#include "ipc_api.h"
#endif /* CP_BUILD */
#ifndef GTPC_SESSION_H
#define GTPC_SESSION_H
#define GTP_MSG_LEN 2048
enum modify_bearer_procedure {
INITIAL_PDN_ATTACH = 01,
UPDATE_PDN_CONNECTION,
FORWARD_MBR_REQUEST,
NO_UPDATE_MBR,
};
#ifdef CP_BUILD
/**
* @brief : deletes ue context information
* @param : ds_req, holds info from delete sess request
* @param : context, context to be deleted
* @param : s5s8_pgw_gtpc_teid, pgwc teid
* @param : s5s8_pgw_gtpc_ip, pgwc ip
* @return : Returns 0 in case of success , -1 otherwise
*/
int
delete_context(gtp_eps_bearer_id_ie_t lbi, uint32_t teid,
ue_context **_context, pdn_connection **pdn);
/**
* @brief : Fill Create Sess Request
* @param : cs_req, request structure to be filled
* @param : context, ue context info
* @param : ebi_index, index of bearer in bearer array
* @param : requested_pdn_type, pdn type received from MME
* @return : Returns 0 in case of success , -1 otherwise
*/
int
fill_cs_request(create_sess_req_t *cs_req, struct ue_context_t *context,
int ebi_index, uint8_t requested_pdn_type);
/**
* @brief : Process create session response received on s5s8 interface in sgwc
* @param : cs_rsp, holds info received in response
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_sgwc_s5s8_create_sess_rsp(create_sess_rsp_t *cs_rsp);
/**
* @brief : Fill delete session request
* @param : ds_req, request structure to be filled
* @param : context, ue context info
* @param : ebi_index, index of bearer in bearer array
* @return : Returns nothing
*/
void
fill_ds_request(del_sess_req_t *ds_req, struct ue_context_t *context,
int ebi_index , uint32_t teid);
/**
* @brief : Fill delete session response on pgwc
* @param : ds_resp, response structure to be filled
* @param : sequence, sequence number
* @param : has_teid, teid info
* @return : Returns nothing
*/
void
fill_del_sess_rsp(del_sess_rsp_t *ds_resp, uint32_t sequence, uint32_t has_teid);
/**
* @brief : Set values in create bearer request
* @param : gtpv2c_tx, transmission buffer to contain 'create bearer request' message
* @param : sequence, sequence number as described by clause 7.6 3gpp 29.274
* @param : pdn, pdn data structure pertaining to the bearer to be created
* @param : bearer, EPS Bearer data structure to be created
* @param : lbi, 'Linked Bearer Identifier': indicates the default bearer identifier
* associated to the PDN connection to which the dedicated bearer is to be
* created
* @param : pti, 'Procedure Transaction Identifier' according to clause 8.35 3gpp 29.274,
* as specified by table 7.2.3-1 3gpp 29.274, 'shall be the same as the one
* used in the corresponding bearer resource command'
* @param : resp
* @param : piggybacked flag
* @param : req_for_mme, flag to identify if req is being created for mme or not
* @return : Returns 0 on sucess
*/
int
set_create_bearer_request(gtpv2c_header_t *gtpv2c_tx, uint32_t sequence,
pdn_connection *pdn, uint8_t lbi, uint8_t pti, struct resp_info *resp, uint8_t piggybacked, bool req_for_mme);
/**
* @brief : Set values in create bearer response
* @param : gtpv2c_tx, transmission buffer to contain 'create bearer response' message
* @param : sequence, sequence number as described by clause 7.6 3gpp 29.274
* @param : pdn, pdn data structure pertaining to the bearer to be created
* @param : bearer, EPS Bearer data structure to be created
* @param : lbi, 'Linked Bearer Identifier': indicates the default bearer identifier
* associated to the PDN connection to which the dedicated bearer is to be
* created
* @param : pti, 'Procedure Transaction Identifier' according to clause 8.35 3gpp 29.274,
* as specified by table 7.2.3-1 3gpp 29.274, 'shall be the same as the one
* used in the corresponding bearer resource command'
* @param : resp
* @return : Returns nothing
*/
int
set_create_bearer_response(gtpv2c_header_t *gtpv2c_tx, uint32_t sequence,
pdn_connection *pdn, uint8_t lbi, uint8_t pti, struct resp_info *resp);
/**
* @brief : Handles the processing at sgwc after receiving delete
* session request messages
* @param : ds_resp, holds info from response
* @return : - 0 if successful
* - > 0 if error occurs during packet filter parsing corresponds to 3gpp
* specified cause error value
* - < 0 for all other errors
*/
int
process_delete_session_response(del_sess_rsp_t *ds_resp);
/**
* @brief : Proccesses create bearer response on sgwc
* @param : cb_rsp, holds data from response
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_create_bearer_response(create_bearer_rsp_t *cb_rsp);
/**
* @brief : Proccesses update bearer request
* @param : ubr, holds data from request
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_update_bearer_request(upd_bearer_req_t *ubr);
/**
* @brief : Proccesses update bearer response received on s11 interface
* @param : ub_rsp, holds data from response
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_s11_upd_bearer_response(upd_bearer_rsp_t *ub_rsp, ue_context *context);
/**
* @brief : Proccesses update bearer response received on s5s8 interface
* @param : ub_rsp, holds data from response
* @return : Returns 0 in case of success , -1 otherwise
*/
int
process_s5s8_upd_bearer_response(upd_bearer_rsp_t *ub_rsp, ue_context *context);
/**
* @brief : Process CSR request for Context Replacement.
* @param : csr, Received CSR request.
* @param : cp_mode
* @param : apn_requested : Requested APN in CSR
* @return : Returns 0 on success, -1 otherwise
*/
int
gtpc_context_replace_check(create_sess_req_t *csr, uint8_t cp_mode, apn *apn_requested);
/**
* @brief : Check MBRequest and decide the process for that MBR.
* @param : pdn, pnd context
* @return : Returns 0 on failure, and interger corresponing to a process.
*/
uint8_t
check_mbr_procedure(pdn_connection *pdn);
/**
* @brief : This Handler is used when SGWC receives the MBR request
* @param : pfcp_sess_mod_response, gtpv2c header, pdn, resp strcut,
* eps bearer, mbr procedure flag
* @return : Returns 0 on failure, and interger corresponing to a process.
*/
int
process_pfcp_sess_mod_resp_mbr_req(pfcp_sess_mod_rsp_t *pfcp_sess_mod_rsp,
gtpv2c_header_t *gtpv2c_tx, pdn_connection *pdn,
struct resp_info *resp, eps_bearer *bearer, uint8_t *mbr_procedure);
/**
* @brief : This Handler is used after Receiving Sess MODIFICATION RESPONSE
* when PGWC will receive Update PDN Connection Req.
* @param : UPDATE PDN CONNEC. SET REQ
* @return : Returns 0 on failure, and interger corresponing to a process.
*/
int
proc_pfcp_sess_mbr_udp_csid_req(upd_pdn_conn_set_req_t *upd_req);
/**
* @brief : Check for difference in ULI IE received and context
* @param : ULI IE, ue context
* @return : Returns 0 on failure, and interger corresponing to a process.
*/
void
check_for_uli_changes(gtp_user_loc_info_ie_t *uli, ue_context *context);
/**
* @brief : Generate CCR-U request and send to PCRF.
* @param : ue context, eps_bearer
* @param : Bearer Resouce Command, bearer_rsrc_cmd
* @param : Modify Bearer Command, mod_bearer_cmd_t
* @return : Returns 0 on failure, and interger corresponing to a process.
*/
int
gen_ccru_request(ue_context *context, eps_bearer *bearer, bearer_rsrc_cmd_t *bearer_rsrc_cmd,
mod_bearer_cmd_t *mod_bearer_cmd);
/**
* @brief : Delete session context in case of context replacement.
* @param : _context, UE context information.
* @param : pdn, pdn information
* @return : Returns nothing.
*/
void
delete_sess_context(ue_context **_context, pdn_connection *pdn);
/**
* @brief : Delete rules in bearer context.
* @param : bearer, Bearer context.
* @return : Returns 0 on success, -1 otherwise
*/
int delete_rule_in_bearer(eps_bearer *bearer);
/**
* @brief : Delete Bearer Context associate with EBI.
* @param : pdn, pdn information.
* @param : ebi_index, Bearer index.
* @return : Returns 0 on success, -1 otherwise
*/
int
delete_bearer_context(pdn_connection *pdn, int ebi_index );
/**
* @brief : Store Presence reporting area action in UE Context.
* @param : ie, Presence reporting area action IE recived.
* @param : context, UE context for storing Presence reporting area action.
* @return : Returns nothing.
*/
void
store_presc_reporting_area_act_to_ue_context(gtp_pres_rptng_area_act_ie_t *ie,
ue_context *context);
/**
* @brief : Store Presence reporting area Info in UE Context.
* @param : ie, Presence reporting area Info IE recived.
* @param : context, UE context for storing Presence reporting area Info.
* @return : Returns nothing.
*/
void
store_presc_reporting_area_info_to_ue_context(gtp_pres_rptng_area_info_ie_t *ie,
ue_context *context);
#endif /*CP_BUILD*/
#endif
|
nikhilc149/e-utran-features-bug-fixes | dp/up_ddn.c | <reponame>nikhilc149/e-utran-features-bug-fixes
/*
* Copyright (c) 2019 Sprint
* Copyright (c) 2020 T-Mobile
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <rte_errno.h>
#include "gtpu.h"
#include "up_main.h"
#include "pfcp_util.h"
#include "pfcp_set_ie.h"
#include "gw_adapter.h"
#include "interface.h"
#include "pfcp_messages_encoder.h"
extern int clSystemLog;
struct
rte_ring *allocate_ring(unsigned int dl_ring_size)
{
char name[32];
struct rte_ring *dl_ring = NULL;
unsigned dl_core = rte_lcore_id();
if ((DL_RING_CONTAINER_SIZE > num_dl_rings) &&
(rte_ring_count(dl_ring_container) < DL_RING_CONTAINER_SIZE)) {
snprintf(name, sizeof(name), "dl_pkt_ring_%"PRIu32"_%u",
num_dl_rings, dl_core);
struct rte_ring *tmp =
rte_ring_create(name, rte_align32pow2(dl_ring_size),
rte_socket_id(), RING_F_SP_ENQ | RING_F_SC_DEQ);
if (tmp) {
int ret = rte_ring_enqueue(dl_ring_container, tmp);
if (ret == ENOBUFS) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Cannot hold more dl rings\n", LOG_VALUE);
rte_ring_free(tmp);
return NULL;
}
dl_ring = tmp;
num_dl_rings++;
} else {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Couldnt create %s for DL PKTS %s\n",
LOG_VALUE, name, rte_strerror(rte_errno));
if (rte_errno == EEXIST)
num_dl_rings++;
}
}
return dl_ring;
}
/* Process ddn ack received by data-plane from control-plane */
int
dp_ddn_ack(struct dp_id dp_id,
struct downlink_data_notification_ack_t *dl_ddn)
{
/* TBD: Downlink data notification Ack handling need to be implement. */
/** Currently ack attribute dl_buff_cnt and dl_buff_duration is not handled.
* default behaviour is ddn will be issued for the 1st packet for which the
* session is IDEL and it will issued after ring is full. */
clLog(clSystemLog, eCLSeverityInfo,
LOG_FORMAT"DDN ACK processed\n", LOG_VALUE);
return 0;
}
/**
* @brief : Set values in downlink data service info ie
* @param : dl, structure to be filled
* @return : Returns nothing
*/
static void
set_dndl_data_srv_if_ie(pfcp_dnlnk_data_svc_info_ie_t *dl)
{
pfcp_set_ie_header(&(dl->header), PFCP_IE_DNLNK_DATA_SVC_INFO, 3);
dl->ppi = 0;
dl->qfi = 0;
dl->qfii = 0;
dl->paging_plcy_indctn_val = 0;
dl->dnlnk_data_svc_info_spare = 0;
dl->dnlnk_data_svc_info_spare2 = 0;
dl->dnlnk_data_svc_info_spare3 = 0;
}
/**
* @brief : Set values in downlink data report ie
* @param : dl, structure to be filled
* @return : Returns nothing
*/
static void
set_dldr_ie(pfcp_dnlnk_data_rpt_ie_t *dl)
{
dl->pdr_id_count = 1;
pfcp_set_ie_header(&(dl->header), IE_DNLNK_DATA_RPT, 6);
set_pdr_id(dl->pdr_id, 0);
}
/**
* @brief : Fill pfcp session report request
* @param : pfcp_sess_rep_req, structure ti be filled
* @param : ddn, ddn information
* @return : Returns nothing
*/
static void
fill_pfcp_sess_rep_req(pfcp_sess_rpt_req_t *pfcp_sess_rep_req,
ddn_t **ddn)
{
static uint32_t seq = 1;
memset(pfcp_sess_rep_req, 0, sizeof(pfcp_sess_rpt_req_t));
seq = get_pfcp_sequence_number(PFCP_SESSION_REPORT_REQUEST, seq);
set_pfcp_seid_header((pfcp_header_t *) &(pfcp_sess_rep_req->header),
PFCP_SESSION_REPORT_REQUEST, HAS_SEID, seq, NO_CP_MODE_REQUIRED);
pfcp_sess_rep_req->header.seid_seqno.has_seid.seid = (*ddn)->cp_seid;
set_sess_report_type(&pfcp_sess_rep_req->report_type);
/* Need to Implement handling of other IE's when Rules implementation is done */
if (pfcp_sess_rep_req->report_type.dldr == 1) {
set_dldr_ie(&pfcp_sess_rep_req->dnlnk_data_rpt);
pfcp_sess_rep_req->dnlnk_data_rpt.pdr_id[0].rule_id = (*ddn)->pdr_id;
}
pfcp_sess_rep_req->header.message_len = pfcp_sess_rep_req->report_type.header.len +
pfcp_sess_rep_req->dnlnk_data_rpt.header.len + 8;
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"VS:- Sending DDN Request to Control Plane for CP Seid: %lu, PDR_ID:%u\n",
LOG_VALUE, (*ddn)->cp_seid, (*ddn)->pdr_id);
}
uint8_t
process_pfcp_session_report_req(peer_addr_t peer_addr, ddn_t *ddn)
{
int encoded = 0;
uint8_t pfcp_msg[250] = {0};
pfcp_sess_rpt_req_t pfcp_sess_rep_req = {0};
fill_pfcp_sess_rep_req(&pfcp_sess_rep_req, &ddn);
encoded = encode_pfcp_sess_rpt_req_t(&pfcp_sess_rep_req, pfcp_msg);
if ( pfcp_send(my_sock.sock_fd, my_sock.sock_fd_v6, pfcp_msg, encoded, peer_addr, SENT) < 0 ) {
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Error in Sending PFCP SESSION REPORT REQ %i\n",
LOG_VALUE, errno);
return -1;
}
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Sends Report Request message to CP:"IPV4_ADDR" for trigger DDN\n",
LOG_VALUE, IPV4_ADDR_HOST_FORMAT(peer_addr.ipv4.sin_addr.s_addr));
return 0;
}
/**
* @brief : Data-plane send ddn request to control-plane to activate the bearer.
* @param : pdr, pdr information
* @return : Returns 0 in case of success , -1 otherwise
*/
static int
send_ddn_request(pdr_info_t *pdr)
{
ddn_t *ddn = malloc(sizeof(ddn_t));
ddn->pdr_id = pdr->rule_id;
memcpy(&ddn->cp_seid, &(pdr->session)->cp_seid, sizeof(uint64_t));
memcpy(&ddn->up_seid, &(pdr->session)->up_seid, sizeof(uint64_t));
/* VS: Process and initiate the DDN Request */
if (process_pfcp_session_report_req((pdr->session)->cp_ip, ddn) < 0 ) {
perror("msgsnd");
return -1;
}
/* Free allocated memory */
free(ddn);
++epc_app.dl_params[SGI_PORT_ID].ddn;
return 0;
}
void
enqueue_dl_pkts(pdr_info_t **pdrs, pfcp_session_datat_t **sess_info,
struct rte_mbuf **pkts, uint64_t pkts_queue_mask)
{
int i = 0, rc = 0;
pdr_info_t *pdr = NULL;
struct rte_ring *ring = NULL;
struct pfcp_session_datat_t *si = NULL;
while (pkts_queue_mask) {
i = __builtin_ffsll(pkts_queue_mask) - 1;
RESET_BIT(pkts_queue_mask, i);
si = sess_info[i];
pdr = pdrs[i];
if (pdr == NULL) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"DROPPED Packet, PDR is NULL\n", LOG_VALUE);
rte_pktmbuf_free(pkts[i]);
continue;
}
/* Check the action */
if ((pdr->far)->actions.drop) {
clLog(clSystemLog, eCLSeverityInfo, LOG_FORMAT"Action : DROP :"
"Dropping pkts for this Session:%lu\n", LOG_VALUE,
(pdr->session)->up_seid);
rte_pktmbuf_free(pkts[i]);
continue;
}
/* Decarding the END MARKER for the IDEL Session */
struct ether_hdr *ether = NULL;
struct gtpu_hdr *gtpu_hdr = NULL;
/* Get the ether header info */
ether = (struct ether_hdr *)rte_pktmbuf_mtod(pkts[i], uint8_t *);
/* Handle the IPv4 packets */
if (ether && (ether->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv4))) {
gtpu_hdr = get_mtogtpu(pkts[i]);
if (gtpu_hdr && (gtpu_hdr->msgtype == GTP_GEMR)) {
clLog(clSystemLog, eCLSeverityInfo, LOG_FORMAT"IPv4 Session State : IDLE"
"Dropping pkts the endmarker pkts for this Session:%lu\n", LOG_VALUE,
(pdr->session)->up_seid);
rte_pktmbuf_free(pkts[i]);
continue;
}
} else if (ether && (ether->ether_type == rte_cpu_to_be_16(ETHER_TYPE_IPv6))) {
gtpu_hdr = get_mtogtpu_v6(pkts[i]);
if (gtpu_hdr && (gtpu_hdr->msgtype == GTP_GEMR)) {
clLog(clSystemLog, eCLSeverityInfo, LOG_FORMAT"IPv6 Session State : IDLE"
"Dropping pkts the endmarker pkts for this Session:%lu\n", LOG_VALUE,
(pdr->session)->up_seid);
rte_pktmbuf_free(pkts[i]);
continue;
}
}
ring = si->dl_ring;
if ((!ring) /* && ((pdr->far)->actions.buff) */) {
ring = allocate_ring((pdr->session)->bar.dl_buf_suggstd_pckts_cnt.pckt_cnt_val);
if (ring == NULL) {
clLog(clSystemLog, eCLSeverityInfo, LOG_FORMAT"Not enough memory, can't "
"buffer this Session: %lu\n", LOG_VALUE,
(pdr->session)->up_seid);
rte_pktmbuf_free(pkts[i]);
continue;
}
si->dl_ring = ring;
if (si->sess_state == IDLE) {
if ((pdr->far)->actions.nocp) {
rc = send_ddn_request(pdr);
if(rc < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to send ddn req for session: %lu\n",
LOG_VALUE, (pdr->session)->up_seid);
}
si->sess_state = IN_PROGRESS;
}
}
}
if (((pdr->far)->actions.nocp) || ((pdr->far)->actions.buff) || ((pdr->far)->actions.forw)) {
if (rte_ring_enqueue(si->dl_ring, (void *)pkts[i]) == -ENOBUFS) {
rte_pktmbuf_free(pkts[i]);
rte_ring_free(si->dl_ring);
if(rte_ring_sc_dequeue(dl_ring_container, (void *)&si->dl_ring) == -ENOENT) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Ring not found\n",LOG_VALUE);
} else{
clLog(clSystemLog, eCLSeverityDebug,
LOG_FORMAT"Dequeued Ring \n",LOG_VALUE);
}
si->dl_ring = NULL;
pkts[i] = NULL;
si->sess_state = IDLE;
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Can't queue pkt ring full"
" So Dropping pkt\n", LOG_VALUE);
/* Send PFCP Session Report Response */
rc = send_ddn_request(pdr);
if(rc < 0) {
clLog(clSystemLog, eCLSeverityCritical,
LOG_FORMAT"Failed to send ddn req for session: %lu\n",
LOG_VALUE, (pdr->session)->up_seid);
}
} else {
clLog(clSystemLog, eCLSeverityDebug, LOG_FORMAT"ACTIONS : %s :"
"Buffering the PKTS\n", LOG_VALUE,
(((pdr->far)->actions.nocp != 0) &&
((pdr->far)->actions.nocp != 0)) ? "Notify to CP, Buffer," :
(pdr->far)->actions.nocp != 0 ? "Notify to CP" :
(pdr->far)->actions.nocp != 0 ? "Buffer" :"UNKNOWN");
}
}
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.