/*
The MIT License
Copyright (c) 2019 Lehrstuhl Informatik 11 - RWTH Aachen University
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE

This file is part of embeddedRTPS.

Author: i11 - Embedded Software, RWTH Aachen University
*/

#include "rtps/entities/Domain.h"
#include "rtps/utils/Log.h"
#include "rtps/utils/udpUtils.h"

#if DOMAIN_VERBOSE && RTPS_GLOBAL_VERBOSE
#define DOMAIN_LOG(...)                                                        \
  if (true) {                                                                  \
    printf("[Domain] ");                                                       \
    printf(__VA_ARGS__);                                                       \
    printf("\n");                                                              \
  }
#else
#define DOMAIN_LOG(...) //
#endif

static void receiveCallback(rtps_Domain *pDomain, const rtps_PacketInfo *packet);
static GuidPrefix_t generateGuidPrefix(ParticipantId_t id);
static void createBuiltinWritersAndReaders(rtps_Domain *pDomain, rtps_Participant *pPart);
static void registerPort(rtps_Domain *pDomain, const rtps_Participant *part);
static void registerMulticastPort(rtps_Domain *pDomain, Locator mcastLocator);
static void receiveJumppad(void *callee, const rtps_PacketInfo *packet);

void rtps_Domain_Construct(rtps_Domain *pDomain) {
  pDomain->PARTICIPANT_START_ID = 0;
  pDomain->m_nextParticipantId = pDomain->PARTICIPANT_START_ID;
  pDomain->m_numStatelessWriters = 0;
  pDomain->m_numStatelessReaders = 0;
  pDomain->m_numStatefulReaders = 0;
  pDomain->m_numStatefulWriters = 0;
  pDomain->m_initComplete = false;
  pDomain->m_threadPool = rtps_ThreadPool_Construct(NULL, receiveJumppad, (void *)pDomain);
  pDomain->m_transport = rtps_UdpDriverCtor(rtps_ThreadPool_readCallback, pDomain->m_threadPool);
  (void)rtps_UdpDriver_createUdpConnection(pDomain->m_transport, getUserMulticastPort());
  (void)rtps_UdpDriver_createUdpConnection(pDomain->m_transport, getBuiltInMulticastPort());
  rtps_UdpDriver_joinMultiCastGroup(transformIP4ToU32(239, 255, 0, 1));
}

void rtps_Domain_Destruct(rtps_Domain *pDomain) {
  rtps_Domain_stop(pDomain);
  rtps_UdpDriverDtor(pDomain->m_transport);
  rtps_ThreadPool_Destruct(pDomain->m_threadPool);
  #if 0//need app to destruct
  //participat
  for (uint8_t i = 0; i < MAX_NUM_PARTICIPANTS; i++)
    rtps_Participant_Destruct(&pDomain->m_participants[i]);
  //READER
  for (uint8_t i = 0; i < NUM_STATEFUL_READERS; i++)
    //rtps_StatefulReader_Destruct(&pDomain->m_statefulReaders[i]);
  for (uint8_t i = 0; i < NUM_STATELESS_READERS; i++)
    //rtps_StatefulReader_Destruct(&pDomain->m_statelessReaders[i]);
  //WRITER
  for (uint8_t i = 0; i < NUM_STATEFUL_WRITERS; i++)
    rtps_StatefulWriter_destruct(&pDomain->m_statefulWriters[i]);
  for (uint8_t i = 0; i < NUM_STATELESS_WRITERS; i++)
    rtps_StatefulWriter_destruct(&pDomain->m_statelessWriters[i]);
  #endif
}

bool rtps_Domain_completeInit(rtps_Domain *pDomain) {
  pDomain->m_initComplete = startThreads(pDomain->m_threadPool);

  if (!pDomain->m_initComplete) {
    DOMAIN_LOG("Failed starting threads\n");
  }

  for (uint8_t i = 0; i < pDomain->m_nextParticipantId; i++) {
    rtps_SPDPAgent_start(rtps_Participant_getSPDPAgent(&pDomain->m_participants[i]));
  }
  return pDomain->m_initComplete;
}

void rtps_Domain_stop(rtps_Domain *pDomain) {
  stopThreads(pDomain->m_threadPool);
}

static void receiveJumppad(void *callee, const rtps_PacketInfo *packet) {
  rtps_Domain *domain = (rtps_Domain *)(callee);
  receiveCallback(domain, packet);
}

static void receiveCallback(rtps_Domain *pDomain, const rtps_PacketInfo *packet) {
  if (packet->buffer->firstElement->next != NULL) {

    DOMAIN_LOG("Cannot handle multiple elements chained. You might "
               "want to increase PBUF_POOL_BUFSIZE\n");
  }

  if (isMetaMultiCastPort(packet->destPort)) {
    // Pass to all
    DOMAIN_LOG("Domain: Multicast to port %u\n", packet->destPort);
    for (uint8_t i = 0; i < pDomain->m_nextParticipantId - pDomain->PARTICIPANT_START_ID; ++i) {
      rtps_Participant_newMessage(&pDomain->m_participants[i],
          (uint8_t *)(packet->buffer->firstElement->payload),
          packet->buffer->firstElement->len);
    }
    // First Check if UserTraffic Multicast
  } else if (isUserMultiCastPort(packet->destPort)) {
    // Pass to Participant with assigned Multicast Adress (Port ist everytime
    // the same)
    DOMAIN_LOG("Domain: Got user multicast message on port %u\n",
               packet->destPort);
    for (uint8_t i = 0; i < pDomain->m_nextParticipantId - pDomain->PARTICIPANT_START_ID; ++i) {
      if (rtps_Participant_hasReaderWithMulticastLocator(&pDomain->m_participants[i], packet->destAddr)) {
        DOMAIN_LOG("Domain: Forward Multicast only to Participant: %u\n", i);
        rtps_Participant_newMessage(&pDomain->m_participants[i],
            (uint8_t *)(packet->buffer->firstElement->payload),
            packet->buffer->firstElement->len);
      }
    }
  } else {
    // Pass to addressed one only (Unicast, by Port)
    ParticipantId_t id = getParticipantIdFromUnicastPort(
        packet->destPort, isUserPort(packet->destPort));
    if (id != PARTICIPANT_ID_INVALID) {
      DOMAIN_LOG("Domain: Got unicast message on port %u\n", packet->destPort);
      if (id < pDomain->m_nextParticipantId &&
          id >= pDomain->PARTICIPANT_START_ID) { // added extra check to avoid segfault
                                        // (id below START_ID)
        rtps_Participant_newMessage(&pDomain->m_participants[id - pDomain->PARTICIPANT_START_ID],
            (uint8_t *)(packet->buffer->firstElement->payload),
            packet->buffer->firstElement->len);
      } else {
        DOMAIN_LOG("Domain: Participant id too high or unplausible.\n");
      }
    } else {
      DOMAIN_LOG("Domain: Got message to port %u: no matching participant\n",
                 packet->destPort);
    }
  }
}

rtps_Participant *rtps_Domain_createParticipant(rtps_Domain *pDomain) {

  DOMAIN_LOG("Domain: Creating new participant.\n");

  uint8_t nextSlot =
      (uint8_t)(pDomain->m_nextParticipantId - pDomain->PARTICIPANT_START_ID);
  if (pDomain->m_initComplete || (sizeof(pDomain->m_participants)/sizeof(rtps_Participant)) <= nextSlot) {
    return NULL;
  }

  rtps_Participant *entry = &pDomain->m_participants[nextSlot];
  rtps_Participant_Construct(&pDomain->m_participants[nextSlot], NULL, NULL);
  GuidPrefix_t prefix = generateGuidPrefix(pDomain->m_nextParticipantId);
  rtps_Participant_reuse(entry, &prefix, pDomain->m_nextParticipantId);
  registerPort(pDomain, entry);
  createBuiltinWritersAndReaders(pDomain, entry);
  ++pDomain->m_nextParticipantId;
  return entry;
}

static void createBuiltinWritersAndReaders(rtps_Domain *pDomain, rtps_Participant *pPart) {
  struct rtps_StatelessWriter *pSpdpWriter = &pDomain->m_statelessWriters[pDomain->m_numStatelessWriters++];
  // SPDP
  StatelessReader *pSpdpReader = &pDomain->m_statelessReaders[pDomain->m_numStatelessReaders++];

  TopicData spdpWriterAttributes;
  spdpWriterAttributes.topicName[0] = '\0';
  spdpWriterAttributes.typeName[0] = '\0';
  spdpWriterAttributes.reliabilityKind = BEST_EFFORT;
  spdpWriterAttributes.durabilityKind = TRANSIENT_LOCAL;
  spdpWriterAttributes.endpointGuid.prefix = pPart->m_guidPrefix;
  spdpWriterAttributes.endpointGuid.entityId = ENTITYID_SPDP_BUILTIN_PARTICIPANT_WRITER;
  rtps_Locator_getBuiltInMulticastLocator(&spdpWriterAttributes.unicastLocator);
  rtps_StatelessWriterT_init(pSpdpWriter, spdpWriterAttributes, WITH_KEY, pDomain->m_threadPool,
                  pDomain->m_transport, false);
  struct rtps_ReaderProxy readerP;//fix me by liudi  need to free
  Locator loc;
  Guid_t guid;
  rtps_Locator_getBuiltInMulticastLocator(&loc);
  
  guid.prefix = pPart->m_guidPrefix;
  guid.entityId = ENTITYID_SPDP_BUILTIN_PARTICIPANT_READER;
  rtps_ReaderProxy_Construct(&readerP, &guid, &loc, NULL);
  rtps_StatelessWriterT_addNewMatchedReader(pSpdpWriter, &readerP);

  TopicData spdpReaderAttributes;

  spdpReaderAttributes.endpointGuid.prefix = pPart->m_guidPrefix;
  spdpReaderAttributes.endpointGuid.entityId = ENTITYID_SPDP_BUILTIN_PARTICIPANT_READER;
  rtps_StatelessReader_init(pSpdpReader, &spdpReaderAttributes);
  // SEDP
  StatefulReader *pSedpPubReader = &pDomain->m_statefulReaders[pDomain->m_numStatefulReaders++];
  StatefulReader *pSedpSubReader = &pDomain->m_statefulReaders[pDomain->m_numStatefulReaders++];
  struct rtps_StatefulWriter *pSedpPubWriter = &pDomain->m_statefulWriters[pDomain->m_numStatefulWriters++];
  struct rtps_StatefulWriter *pSedpSubWriter = &pDomain->m_statefulWriters[pDomain->m_numStatefulWriters++];

  // Prepare attributes
  TopicData sedpAttributes;
  sedpAttributes.topicName[0] = '\0';
  sedpAttributes.typeName[0] = '\0';
  sedpAttributes.reliabilityKind = RELIABLE;
  sedpAttributes.durabilityKind = TRANSIENT_LOCAL;
  sedpAttributes.endpointGuid.prefix = pPart->m_guidPrefix;
  rtps_Locator_getBuiltInUnicastLocator(&sedpAttributes.unicastLocator, pPart->m_participantId);

  // READER
  sedpAttributes.endpointGuid.entityId =
      ENTITYID_SEDP_BUILTIN_PUBLICATIONS_READER;

  rtps_StatefulReader_init(pSedpPubReader, &sedpAttributes, pDomain->m_transport);
  sedpAttributes.endpointGuid.entityId =
      ENTITYID_SEDP_BUILTIN_SUBSCRIPTIONS_READER;
  rtps_StatefulReader_init(pSedpSubReader, &sedpAttributes, pDomain->m_transport);

  // WRITER
  sedpAttributes.endpointGuid.entityId =
      ENTITYID_SEDP_BUILTIN_PUBLICATIONS_WRITER;
  rtps_StatefulWriter_init(pSedpPubWriter, sedpAttributes, NO_KEY, pDomain->m_threadPool,
                     pDomain->m_transport, false);

  sedpAttributes.endpointGuid.entityId =
      ENTITYID_SEDP_BUILTIN_SUBSCRIPTIONS_WRITER;
  rtps_StatefulWriter_init(pSedpSubWriter, sedpAttributes, NO_KEY, pDomain->m_threadPool,
                     pDomain->m_transport, false);
  // COLLECT
  BuiltInEndpoints endpoints;
  endpoints.spdpWriter = pSpdpWriter;
  endpoints.spdpReader = pSpdpReader;
  endpoints.sedpPubReader = pSedpPubReader;
  endpoints.sedpSubReader = pSedpSubReader;
  endpoints.sedpPubWriter = pSedpPubWriter;
  endpoints.sedpSubWriter = pSedpSubWriter;
  rtps_Participant_addBuiltInEndpoints(pPart, &endpoints);
}

static void registerPort(rtps_Domain *pDomain, const rtps_Participant *pPart) {
  (void)rtps_UdpDriver_createUdpConnection(pDomain->m_transport, getUserUnicastPort(pPart->m_participantId));
  (void)rtps_UdpDriver_createUdpConnection(pDomain->m_transport, getBuiltInUnicastPort(pPart->m_participantId));
}

static void registerMulticastPort(rtps_Domain *pDomain, Locator mcastLocator) {
  if (mcastLocator.kind == LOCATOR_KIND_UDPv4) {
    rtps_UdpDriver_createUdpConnection(pDomain->m_transport, rtps_Locator_getLocatorPort(&mcastLocator));
  }
}

struct rtps_Reader *rtps_Domain_readerExists(rtps_Domain *pDomain, rtps_Participant *pPart, const char *topicName,
                                   const char *typeName, bool reliable) {
  if (reliable) {
    for (unsigned int i = 0; i < pDomain->m_numStatefulReaders; i++) {
      if (rtps_Reader_isInitialized((struct rtps_Reader *)&pDomain->m_statefulReaders[i])) {
        if (strncmp(pDomain->m_statefulReaders[i].m_reader.m_attributes.topicName, topicName,
                    MAX_TYPENAME_LENGTH) != 0) {
          continue;
        }

        if (strncmp(pDomain->m_statefulReaders[i].m_reader.m_attributes.typeName, typeName,
                    MAX_TYPENAME_LENGTH) != 0) {
          continue;
        }

        DOMAIN_LOG("StatefulReader exists already [%s, %s]\n", topicName,
                   typeName);

        return (struct rtps_Reader *)&pDomain->m_statefulReaders[i];
      }
    }
  } else {
    for (unsigned int i = 0; i < pDomain->m_numStatelessReaders; i++) {
      if (rtps_Reader_isInitialized((struct rtps_Reader *)&pDomain->m_statelessReaders[i])) {
        if (strncmp(pDomain->m_statelessReaders[i].m_reader.m_attributes.topicName, topicName,
                    MAX_TYPENAME_LENGTH) != 0) {
          continue;
        }

        if (strncmp(pDomain->m_statelessReaders[i].m_reader.m_attributes.typeName, typeName,
                    MAX_TYPENAME_LENGTH) != 0) {
          continue;
        }

        DOMAIN_LOG("StatelessReader exists [%s, %s]\n", topicName, typeName);

        return (struct rtps_Reader *)&pDomain->m_statelessReaders[i];
      }
    }
  }

  (void)pPart;
  return NULL;
}

struct rtps_Writer *rtps_Domain_writerExists(rtps_Domain *pDomain, rtps_Participant *part, const char *topicName,
                                   const char *typeName, bool reliable) {
  if (reliable) {
    for (unsigned int i = 0; i < pDomain->m_numStatefulWriters; i++) {
      if (rtps_Writer_isInitialized((struct rtps_Writer *)&pDomain->m_statefulWriters[i])) {
        if (strncmp(pDomain->m_statefulWriters[i].writer.m_attributes.topicName, topicName,
                    MAX_TYPENAME_LENGTH) != 0) {
          continue;
        }

        if (strncmp(pDomain->m_statefulWriters[i].writer.m_attributes.typeName, typeName,
                    MAX_TYPENAME_LENGTH) != 0) {
          continue;
        }

        DOMAIN_LOG("StatefulWriter exists [%s, %s]\n", topicName, typeName);

        return (struct rtps_Writer *)&pDomain->m_statefulWriters[i];
      }
    }
  } else {
    for (unsigned int i = 0; i < pDomain->m_numStatelessWriters; i++) {
      if (rtps_Writer_isInitialized((struct rtps_Writer *)&pDomain->m_statelessWriters[i])) {
        if (strncmp(pDomain->m_statelessWriters[i].writer.m_attributes.topicName, topicName,
                    MAX_TYPENAME_LENGTH) != 0) {
          continue;
        }

        if (strncmp(pDomain->m_statelessWriters[i].writer.m_attributes.typeName, typeName,
                    MAX_TYPENAME_LENGTH) != 0) {
          continue;
        }

        DOMAIN_LOG("StatelessWriter exists [%s, %s]\n", topicName, typeName);

        return (struct rtps_Writer *)&pDomain->m_statelessWriters[i];
      }
    }
  }

  (void)part;
  return NULL;
}

struct rtps_Writer *rtps_Domain_createWriter(rtps_Domain *pDomain, rtps_Participant *part, const char *topicName,
                                   const char *typeName, bool reliable,
                                   bool enforceUnicast) {

  // Check if there is enough capacity for more writers
  if ((reliable && (sizeof(pDomain->m_statefulWriters)/sizeof(struct rtps_StatefulWriter)) <= pDomain->m_numStatefulWriters) ||
      (!reliable && (sizeof(pDomain->m_statelessWriters)/sizeof(struct rtps_StatelessWriter)) <= pDomain->m_numStatelessWriters) ||
      rtps_Participant_isWritersFull(part)) {

    DOMAIN_LOG("No Writer created. Max Number of Writers reached.\n");
    return NULL;
  }

  // TODO Distinguish WithKey and NoKey (Also changes EntityKind)
  TopicData attributes;

  if (strlen(topicName) > MAX_TOPICNAME_LENGTH ||
      strlen(typeName) > MAX_TYPENAME_LENGTH) {
    return NULL;
  }
  strcpy(attributes.topicName, topicName);
  strcpy(attributes.typeName, typeName);
  attributes.endpointGuid.prefix = part->m_guidPrefix;
  attributes.endpointGuid.entityId.entityKind = USER_DEFINED_WRITER_WITHOUT_KEY;
  memcpy(&attributes.endpointGuid.entityId.entityKey, rtps_Participant_getNextUserEntityKey(part),
      sizeof(attributes.endpointGuid.entityId.entityKey));
  rtps_Locator_getUserUnicastLocator(&attributes.unicastLocator, part->m_participantId);
  attributes.durabilityKind = TRANSIENT_LOCAL;

  DOMAIN_LOG("Creating writer[%s, %s]\n", topicName, typeName);

  if (reliable) {
    attributes.reliabilityKind = RELIABLE;

    StatefulWriter *writer = &pDomain->m_statefulWriters[pDomain->m_numStatefulWriters++];

    rtps_StatefulWriter_init(writer, attributes, NO_KEY, pDomain->m_threadPool, pDomain->m_transport,
                enforceUnicast);

    rtps_Participant_addWriter(part, (struct rtps_Writer *)writer);//need to fix by liudi, now is stateless.
    return (struct rtps_Writer *)writer;
  } else {
    attributes.reliabilityKind = BEST_EFFORT;

    StatelessWriter *writer = &pDomain->m_statelessWriters[pDomain->m_numStatelessWriters++];

    rtps_StatelessWriter_init(writer, attributes, NO_KEY, pDomain->m_threadPool, pDomain->m_transport,
                enforceUnicast);

    rtps_Participant_addWriter(part, (struct rtps_Writer *)writer);
    return (struct rtps_Writer *)writer;
  }
}

struct rtps_Reader *rtps_Domain_createReader(rtps_Domain *pDomain, rtps_Participant *part, const char *topicName,
                                   const char *typeName, bool reliable,
                                   ip4_addr_t mcastaddress) {
  if ((reliable && (sizeof(pDomain->m_statefulReaders)/sizeof(StatefulReader)) <= pDomain->m_numStatefulReaders) ||
      (!reliable && (sizeof(pDomain->m_statelessReaders)/sizeof(StatelessReader)) <= pDomain->m_numStatelessReaders) ||
      rtps_Participant_isReadersFull(part)) {

    DOMAIN_LOG("No Reader created. Max Number of Readers reached.\n");

    return NULL;
  }

  // TODO Distinguish WithKey and NoKey (Also changes EntityKind)
  TopicData attributes;

  if (strlen(topicName) > MAX_TOPICNAME_LENGTH ||
      strlen(typeName) > MAX_TYPENAME_LENGTH) {
    return NULL;
  }
  strcpy(attributes.topicName, topicName);
  strcpy(attributes.typeName, typeName);
  attributes.endpointGuid.prefix = part->m_guidPrefix;
  attributes.endpointGuid.entityId.entityKind = USER_DEFINED_READER_WITHOUT_KEY;
  memcpy(&attributes.endpointGuid.entityId.entityKey, rtps_Participant_getNextUserEntityKey(part),
      sizeof(attributes.endpointGuid.entityId.entityKey));
  rtps_Locator_getUserUnicastLocator(&attributes.unicastLocator, part->m_participantId);
  if (!isZeroAddress(mcastaddress)) {
    if (ip4_addr_ismulticast(&mcastaddress)) {
      rtps_Locator_createUDPv4Locator(&attributes.multicastLocator,
          ip4_addr1(&mcastaddress), ip4_addr2(&mcastaddress),
          ip4_addr3(&mcastaddress), ip4_addr4(&mcastaddress),
          getUserMulticastPort());
      rtps_UdpDriver_joinMultiCastGroup(rtps_Locator_getIp4Address(&attributes.multicastLocator));
      registerMulticastPort(pDomain, attributes.multicastLocator);

      DOMAIN_LOG("Multicast enabled!\n");

    } else {

      DOMAIN_LOG("This is not a Multicastaddress!\n");
    }
  }
  attributes.durabilityKind = VOLATILE;

  DOMAIN_LOG("Creating reader[%s, %s]\n", topicName, typeName);

  if (reliable) {
    if (pDomain->m_numStatefulReaders == (sizeof(pDomain->m_statefulReaders)/sizeof(StatefulReader))) {
      return NULL;
    }

    attributes.reliabilityKind = RELIABLE;

    StatefulReader *reader = &pDomain->m_statefulReaders[pDomain->m_numStatefulReaders++];
    rtps_StatefulReader_init(reader, &attributes, pDomain->m_transport);

    if (!rtps_Participant_addReader(part, (struct rtps_Reader *)reader)) {
      return NULL;
    }
    return (struct rtps_Reader *)reader;
  } else {
    if (pDomain->m_numStatelessReaders == (sizeof(pDomain->m_statelessReaders)/sizeof(StatelessReader))) {
      return NULL;
    }

    attributes.reliabilityKind = BEST_EFFORT;

    StatelessReader *reader = &pDomain->m_statelessReaders[pDomain->m_numStatelessReaders++];
    rtps_StatelessReader_init(reader, &attributes);

    if (!rtps_Participant_addReader(part, (struct rtps_Reader *)reader)) {
      return NULL;
    }
    return (struct rtps_Reader *)reader;
  }
}

static GuidPrefix_t generateGuidPrefix(ParticipantId_t id) {
  GuidPrefix_t prefix = BASE_GUID_PREFIX;
#if defined(unix) || defined(__unix__)
  srand(time(NULL));
#else
  unsigned int seed = (int)xTaskGetTickCount();
  srand(seed);
#endif
  for (uint8_t i = 0; i < (sizeof(BASE_GUID_PREFIX.id)/sizeof(uint8_t)); i++) {
    prefix.id[i] = (rand() % 256);
  }
  prefix.id[sizeof(prefix.id)/sizeof(uint8_t) - 1] = *(uint8_t *)(&id);
  return prefix;
}
