["#!/usr/bin/env python3\n# still in development\n#\n\nimport asyncio\nimport websockets\nimport json\nimport requests\n\n\neventsAPIPath = '/api/v1/events'\n\nlocalServerIP = 'PI:FN:0.0.0.0END_PI'\nlocalServerAPIPort = '8000'\nlocalServerWSPort = '8000'\nlocalServerPath = '/sealog-server'\nlocalToken = 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjU5ODFmMTY3MjEyYjM0OGFlZDdmYTlmNSIsInNjb3BlIjpbImFkbWluIiwiZXZlbnRfbWFuYWdlciIsImV2ZW50X2xvZ2dlciIsImV2ZW50X3dhdGNoZXIiXSwiaWF0IjoxNTI1MDE0NDE3fQ.D8ja66bnLxJ3bsJlaKRtOquu8XbibjNCyFxJpI7vafc'\nlocalClientWSID = 'localSealogReceive'\n\nremoteServerIP = '162.243.201.175'\nremoteServerAPIPort = '80'\nremoteServerWSPort = '8000'\nremoteServerPath = '/sealog-server'\nremoteToken = \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpZCI6IjU5ODFmMTY3MjEyYjM0OGFlZDdmYTlmNSIsInNjb3BlIjpbImFkbWluIiwiZXZlbnRfbWFuYWdlciIsImV2ZW50X2xvZ2dlciIsImV2ZW50X3dhdGNoZXIiXSwiaWF0IjoxNTI1MDEzNTUxfQ.8X-fBRUHdrwtkTLcOFAsW-vvvqCzmkZKM2gQgHNkBKk\"\nremoteClientWSID = 'remoteSealogReceive'\n\nhello = {\n 'type': 'hello',\n 'id': remoteClientWSID,\n 'auth': {\n 'headers': {\n 'authorization': remoteToken\n }\n },\n 'version': '2',\n 'subs': ['/ws/status/newEvents']\n}\n\nping = {\n 'type':'ping',\n 'id':remoteClientWSID\n}\n\nlocalHeaders = {'authorization': localToken}\nremoteHeaders = {'authorization': remoteToken}\n\nasync def eventlog():\n try:\n async with websockets.connect('ws://' + remoteServerIP + ':' + remoteServerWSPort) as websocket:\n\n await websocket.send(json.dumps(hello))\n\n while(True):\n\n event = await websocket.recv()\n eventObj = json.loads(event)\n print(\"eventObj:\", eventObj)\n\n if eventObj['type'] and eventObj['type'] == 'ping':\n await websocket.send(json.dumps(ping))\n elif eventObj['type'] and eventObj['type'] == 'pub':\n\n r = requests.post('http://' + localServerIP + ':' + localServerAPIPort + localServerPath + eventsAPIPath, headers=localHeaders, data = json.dumps(eventObj['message']))\n print(r.text)\n\n ### end of repeat\n\n except Exception as error:\n print(error)\n\nasyncio.get_event_loop().run_until_complete(eventlog())\n", "/*\n Copyright (c) 2007-2013 Contributors as noted in the AUTHORS file\n\n This file is part of 0MQ.\n\n 0MQ is free software; you can redistribute it and/or modify it under\n the terms of the GNU Lesser General Public License as published by\n the Free Software Foundation; either version 3 of the License, or\n (at your option) any later version.\n\n 0MQ is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Lesser General Public License for more details.\n\n You should have received a copy of the GNU Lesser General Public License\n along with this program. If not, see .\n*/\n\n#include \"platform.hpp\"\n\n#ifdef ZMQ_HAVE_OPENPGM\n\n#ifdef ZMQ_HAVE_WINDOWS\n#include \"windows.hpp\"\n#endif\n\n#ifdef ZMQ_HAVE_LINUX\n#include \n#endif\n\n#include \n#include \n#include \n\n#include \"options.hpp\"\n#include \"pgm_socket.hpp\"\n#include \"config.hpp\"\n#include \"err.hpp\"\n#include \"random.hpp\"\n#include \"stdint.hpp\"\n\n#ifndef MSG_ERRQUEUE\n#define MSG_ERRQUEUE 0x2000\n#endif\n\nzmq::pgm_socket_t::pgm_socket_t (bool receiver_, const options_t &options_) :\n sock (NULL),\n options (options_),\n receiver (receiver_),\n pgm_msgv (NULL),\n pgm_msgv_len (0),\n nbytes_rec (0),\n nbytes_processed (0),\n pgm_msgv_processed (0)\n{\n}\n\n// Resolve PGM socket address.\n// network_ of the form :\n// e.g. eth0;PI:FN:239.192.0.1:7500END_PI\n// link-local;224.250.0.1,PI:FN:224.250.0.2END_PI;PI:FN:224.250.0.3:8000END_PI\n// ;[fe80::1%en0]:7500\nint zmq::pgm_socket_t::init_address (const char *network_,\n struct pgm_addrinfo_t **res, uint16_t *port_number)\n{\n // Parse port number, start from end for IPv6\n const char *port_delim = strrchr (network_, ':');\n if (!port_delim) {\n errno = EINVAL;\n return -1;\n }\n\n *port_number = atoi (port_delim + 1);\n \n char network [256];\n if (port_delim - network_ >= (int) sizeof (network) - 1) {\n errno = EINVAL;\n return -1;\n }\n memset (network, '\\0', sizeof (network));\n memcpy (network, network_, port_delim - network_);\n\n pgm_error_t *pgm_error = NULL;\n struct pgm_addrinfo_t hints;\n\n memset (&hints, 0, sizeof (hints));\n hints.ai_family = AF_UNSPEC;\n if (!pgm_getaddrinfo (network, NULL, res, &pgm_error)) {\n\n // Invalid parameters don't set pgm_error_t.\n zmq_assert (pgm_error != NULL);\n if (pgm_error->domain == PGM_ERROR_DOMAIN_IF &&\n\n // NB: cannot catch EAI_BADFLAGS.\n ( pgm_error->code != PGM_ERROR_SERVICE &&\n pgm_error->code != PGM_ERROR_SOCKTNOSUPPORT)) {\n\n // User, host, or network configuration or transient error.\n pgm_error_free (pgm_error);\n errno = EINVAL;\n return -1;\n }\n\n // Fatal OpenPGM internal error.\n zmq_assert (false);\n }\n return 0;\n}\n\n// Create, bind and connect PGM socket.\nint zmq::pgm_socket_t::init (bool udp_encapsulation_, const char *network_)\n{\n // Can not open transport before destroying old one.\n zmq_assert (sock == NULL);\n zmq_assert (options.rate > 0);\n\n // Zero counter used in msgrecv.\n nbytes_rec = 0;\n nbytes_processed = 0;\n pgm_msgv_processed = 0;\n\n uint16_t port_number;\n struct pgm_addrinfo_t *res = NULL;\n sa_family_t sa_family;\n\n pgm_error_t *pgm_error = NULL;\n\n if (init_address(network_, &res, &port_number) < 0) {\n goto err_abort;\n }\n\n zmq_assert (res != NULL);\n\n // Pick up detected IP family.\n sa_family = res->ai_send_addrs[0].gsr_group.ss_family;\n\n // Create IP/PGM or UDP/PGM socket.\n if (udp_encapsulation_) {\n if (!pgm_socket (&sock, sa_family, SOCK_SEQPACKET, IPPROTO_UDP,\n &pgm_error)) {\n\n // Invalid parameters don't set pgm_error_t.\n zmq_assert (pgm_error != NULL);\n if (pgm_error->domain == PGM_ERROR_DOMAIN_SOCKET && (\n pgm_error->code != PGM_ERROR_BADF &&\n pgm_error->code != PGM_ERROR_FAULT &&\n pgm_error->code != PGM_ERROR_NOPROTOOPT &&\n pgm_error->code != PGM_ERROR_FAILED))\n\n // User, host, or network configuration or transient error.\n goto err_abort;\n\n // Fatal OpenPGM internal error.\n zmq_assert (false);\n }\n\n // All options are of data type int\n const int encapsulation_port = port_number;\n if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_UDP_ENCAP_UCAST_PORT,\n &encapsulation_port, sizeof (encapsulation_port)))\n goto err_abort;\n if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_UDP_ENCAP_MCAST_PORT,\n &encapsulation_port, sizeof (encapsulation_port)))\n goto err_abort;\n }\n else {\n if (!pgm_socket (&sock, sa_family, SOCK_SEQPACKET, IPPROTO_PGM,\n &pgm_error)) {\n\n // Invalid parameters don't set pgm_error_t.\n zmq_assert (pgm_error != NULL);\n if (pgm_error->domain == PGM_ERROR_DOMAIN_SOCKET && (\n pgm_error->code != PGM_ERROR_BADF &&\n pgm_error->code != PGM_ERROR_FAULT &&\n pgm_error->code != PGM_ERROR_NOPROTOOPT &&\n pgm_error->code != PGM_ERROR_FAILED))\n\n // User, host, or network configuration or transient error.\n goto err_abort;\n\n // Fatal OpenPGM internal error.\n zmq_assert (false);\n }\n }\n\n {\n\t\tconst int rcvbuf = (int) options.rcvbuf;\n\t\tif (rcvbuf) {\n\t\t if (!pgm_setsockopt (sock, SOL_SOCKET, SO_RCVBUF, &rcvbuf,\n\t\t sizeof (rcvbuf)))\n\t\t goto err_abort;\n\t\t}\n\n\t\tconst int sndbuf = (int) options.sndbuf;\n\t\tif (sndbuf) {\n\t\t if (!pgm_setsockopt (sock, SOL_SOCKET, SO_SNDBUF, &sndbuf,\n\t\t sizeof (sndbuf)))\n\t\t goto err_abort;\n\t\t}\n\n\t\tconst int max_tpdu = (int) pgm_max_tpdu;\n\t\tif (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_MTU, &max_tpdu,\n\t\t sizeof (max_tpdu)))\n\t\t goto err_abort;\n }\n\n if (receiver) {\n const int recv_only = 1,\n rxw_max_tpdu = (int) pgm_max_tpdu,\n rxw_sqns = compute_sqns (rxw_max_tpdu),\n peer_expiry = pgm_secs (300),\n spmr_expiry = pgm_msecs (25),\n nak_bo_ivl = pgm_msecs (50),\n nak_rpt_ivl = pgm_msecs (200),\n nak_rdata_ivl = pgm_msecs (200),\n nak_data_retries = 50,\n nak_ncf_retries = 50;\n\n if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_RECV_ONLY, &recv_only,\n sizeof (recv_only)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_RXW_SQNS, &rxw_sqns,\n sizeof (rxw_sqns)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_PEER_EXPIRY, &peer_expiry,\n sizeof (peer_expiry)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_SPMR_EXPIRY, &spmr_expiry,\n sizeof (spmr_expiry)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_BO_IVL, &nak_bo_ivl,\n sizeof (nak_bo_ivl)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_RPT_IVL, &nak_rpt_ivl,\n sizeof (nak_rpt_ivl)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_RDATA_IVL,\n &nak_rdata_ivl, sizeof (nak_rdata_ivl)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_DATA_RETRIES,\n &nak_data_retries, sizeof (nak_data_retries)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_NAK_NCF_RETRIES,\n &nak_ncf_retries, sizeof (nak_ncf_retries)))\n goto err_abort;\n }\n else {\n const int send_only = 1,\n max_rte = (int) ((options.rate * 1000) / 8),\n txw_max_tpdu = (int) pgm_max_tpdu,\n txw_sqns = compute_sqns (txw_max_tpdu),\n ambient_spm = pgm_secs (30),\n heartbeat_spm[] = { pgm_msecs (100),\n pgm_msecs (100),\n pgm_msecs (100),\n pgm_msecs (100),\n pgm_msecs (1300),\n pgm_secs (7),\n pgm_secs (16),\n pgm_secs (25),\n pgm_secs (30) };\n\n if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_SEND_ONLY,\n &send_only, sizeof (send_only)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_ODATA_MAX_RTE,\n &max_rte, sizeof (max_rte)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_TXW_SQNS,\n &txw_sqns, sizeof (txw_sqns)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_AMBIENT_SPM,\n &ambient_spm, sizeof (ambient_spm)) ||\n !pgm_setsockopt (sock, IPPROTO_PGM, PGM_HEARTBEAT_SPM,\n &heartbeat_spm, sizeof (heartbeat_spm)))\n goto err_abort;\n }\n\n // PGM transport GSI.\n struct pgm_sockaddr_t addr;\n\n memset (&addr, 0, sizeof(addr));\n addr.sa_port = port_number;\n addr.sa_addr.sport = DEFAULT_DATA_SOURCE_PORT;\n\n // Create random GSI.\n uint32_t buf [2];\n buf [0] = generate_random ();\n buf [1] = generate_random ();\n if (!pgm_gsi_create_from_data (&addr.sa_addr.gsi, (uint8_t*) buf, 8))\n goto err_abort;\n\n\n // Bind a transport to the specified network devices.\n struct pgm_interface_req_t if_req;\n memset (&if_req, 0, sizeof(if_req));\n if_req.ir_interface = res->ai_recv_addrs[0].gsr_interface;\n if_req.ir_scope_id = 0;\n if (AF_INET6 == sa_family) {\n struct sockaddr_in6 sa6;\n memcpy (&sa6, &res->ai_recv_addrs[0].gsr_group, sizeof (sa6));\n if_req.ir_scope_id = sa6.sin6_scope_id;\n }\n if (!pgm_bind3 (sock, &addr, sizeof (addr), &if_req, sizeof (if_req),\n &if_req, sizeof (if_req), &pgm_error)) {\n\n // Invalid parameters don't set pgm_error_t.\n zmq_assert (pgm_error != NULL);\n if ((pgm_error->domain == PGM_ERROR_DOMAIN_SOCKET ||\n pgm_error->domain == PGM_ERROR_DOMAIN_IF) && (\n pgm_error->code != PGM_ERROR_INVAL &&\n pgm_error->code != PGM_ERROR_BADF &&\n pgm_error->code != PGM_ERROR_FAULT))\n\n // User, host, or network configuration or transient error.\n goto err_abort;\n\n // Fatal OpenPGM internal error.\n zmq_assert (false);\n }\n\n // Join IP multicast groups.\n for (unsigned i = 0; i < res->ai_recv_addrs_len; i++) {\n if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_JOIN_GROUP,\n &res->ai_recv_addrs [i], sizeof (struct group_req)))\n goto err_abort;\n }\n if (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_SEND_GROUP,\n &res->ai_send_addrs [0], sizeof (struct group_req)))\n goto err_abort;\n\n pgm_freeaddrinfo (res);\n res = NULL;\n\n // Set IP level parameters.\n {\n\t\t// Multicast loopback disabled by default\n\t\tconst int multicast_loop = 0;\n\t\tif (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_MULTICAST_LOOP,\n\t\t &multicast_loop, sizeof (multicast_loop)))\n\t\t goto err_abort;\n\n\t\tconst int multicast_hops = options.multicast_hops;\n\t\tif (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_MULTICAST_HOPS,\n\t\t &multicast_hops, sizeof (multicast_hops)))\n\t\t goto err_abort;\n\n\t\t// Expedited Forwarding PHB for network elements, no ECN.\n\t\t// Ignore return value due to varied runtime support.\n\t\tconst int dscp = 0x2e << 2;\n\t\tif (AF_INET6 != sa_family)\n\t\t pgm_setsockopt (sock, IPPROTO_PGM, PGM_TOS,\n\t\t &dscp, sizeof (dscp));\n\n\t\tconst int nonblocking = 1;\n\t\tif (!pgm_setsockopt (sock, IPPROTO_PGM, PGM_NOBLOCK,\n\t\t &nonblocking, sizeof (nonblocking)))\n\t\t goto err_abort;\n }\n\n // Connect PGM transport to start state machine.\n if (!pgm_connect (sock, &pgm_error)) {\n\n // Invalid parameters don't set pgm_error_t.\n zmq_assert (pgm_error != NULL);\n goto err_abort;\n }\n\n // For receiver transport preallocate pgm_msgv array.\n if (receiver) {\n zmq_assert (in_batch_size > 0);\n size_t max_tsdu_size = get_max_tsdu_size ();\n pgm_msgv_len = (int) in_batch_size / max_tsdu_size;\n if ((int) in_batch_size % max_tsdu_size)\n pgm_msgv_len++;\n zmq_assert (pgm_msgv_len);\n\n pgm_msgv = (pgm_msgv_t*) malloc (sizeof (pgm_msgv_t) * pgm_msgv_len);\n alloc_assert (pgm_msgv);\n }\n\n return 0;\n\nerr_abort:\n if (sock != NULL) {\n pgm_close (sock, FALSE);\n sock = NULL;\n }\n if (res != NULL) {\n pgm_freeaddrinfo (res);\n res = NULL;\n }\n if (pgm_error != NULL) {\n pgm_error_free (pgm_error);\n pgm_error = NULL;\n }\n errno = EINVAL;\n return -1;\n}\n\nzmq::pgm_socket_t::~pgm_socket_t ()\n{\n if (pgm_msgv)\n free (pgm_msgv);\n if (sock) \n pgm_close (sock, TRUE);\n}\n\n// Get receiver fds. receive_fd_ is signaled for incoming packets,\n// waiting_pipe_fd_ is signaled for state driven events and data.\nvoid zmq::pgm_socket_t::get_receiver_fds (fd_t *receive_fd_, \n fd_t *waiting_pipe_fd_)\n{\n socklen_t socklen;\n bool rc;\n\n zmq_assert (receive_fd_);\n zmq_assert (waiting_pipe_fd_);\n\n socklen = sizeof (*receive_fd_);\n rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_RECV_SOCK, receive_fd_,\n &socklen);\n zmq_assert (rc);\n zmq_assert (socklen == sizeof (*receive_fd_));\n\n socklen = sizeof (*waiting_pipe_fd_);\n rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_PENDING_SOCK, waiting_pipe_fd_,\n &socklen);\n zmq_assert (rc);\n zmq_assert (socklen == sizeof (*waiting_pipe_fd_));\n}\n\n// Get fds and store them into user allocated memory. \n// send_fd is for non-blocking send wire notifications.\n// receive_fd_ is for incoming back-channel protocol packets.\n// rdata_notify_fd_ is raised for waiting repair transmissions.\n// pending_notify_fd_ is for state driven events.\nvoid zmq::pgm_socket_t::get_sender_fds (fd_t *send_fd_, fd_t *receive_fd_, \n fd_t *rdata_notify_fd_, fd_t *pending_notify_fd_)\n{\n socklen_t socklen;\n bool rc;\n\n zmq_assert (send_fd_);\n zmq_assert (receive_fd_);\n zmq_assert (rdata_notify_fd_);\n zmq_assert (pending_notify_fd_);\n\n socklen = sizeof (*send_fd_);\n rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_SEND_SOCK, send_fd_, &socklen);\n zmq_assert (rc);\n zmq_assert (socklen == sizeof (*receive_fd_));\n\n socklen = sizeof (*receive_fd_);\n rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_RECV_SOCK, receive_fd_,\n &socklen);\n zmq_assert (rc);\n zmq_assert (socklen == sizeof (*receive_fd_));\n\n socklen = sizeof (*rdata_notify_fd_);\n rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_REPAIR_SOCK, rdata_notify_fd_,\n &socklen);\n zmq_assert (rc);\n zmq_assert (socklen == sizeof (*rdata_notify_fd_));\n\n socklen = sizeof (*pending_notify_fd_);\n rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_PENDING_SOCK,\n pending_notify_fd_, &socklen);\n zmq_assert (rc);\n zmq_assert (socklen == sizeof (*pending_notify_fd_));\n}\n\n// Send one APDU, transmit window owned memory.\n// data_len_ must be less than one TPDU.\nsize_t zmq::pgm_socket_t::send (unsigned char *data_, size_t data_len_)\n{\n size_t nbytes = 0;\n \n const int status = pgm_send (sock, data_, data_len_, &nbytes);\n\n // We have to write all data as one packet.\n if (nbytes > 0) {\n zmq_assert (status == PGM_IO_STATUS_NORMAL);\n zmq_assert (nbytes == data_len_);\n }\n else {\n zmq_assert (status == PGM_IO_STATUS_RATE_LIMITED ||\n status == PGM_IO_STATUS_WOULD_BLOCK);\n\n if (status == PGM_IO_STATUS_RATE_LIMITED)\n errno = ENOMEM;\n else\n errno = EBUSY;\n }\n\n // Save return value.\n last_tx_status = status;\n\n return nbytes;\n}\n\nlong zmq::pgm_socket_t::get_rx_timeout ()\n{\n if (last_rx_status != PGM_IO_STATUS_RATE_LIMITED &&\n last_rx_status != PGM_IO_STATUS_TIMER_PENDING)\n return -1;\n\n struct timeval tv;\n socklen_t optlen = sizeof (tv);\n const bool rc = pgm_getsockopt (sock, IPPROTO_PGM,\n last_rx_status == PGM_IO_STATUS_RATE_LIMITED ? PGM_RATE_REMAIN :\n PGM_TIME_REMAIN, &tv, &optlen);\n zmq_assert (rc);\n\n const long timeout = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);\n\n return timeout;\n}\n\nlong zmq::pgm_socket_t::get_tx_timeout ()\n{\n if (last_tx_status != PGM_IO_STATUS_RATE_LIMITED)\n return -1;\n\n struct timeval tv;\n socklen_t optlen = sizeof (tv);\n const bool rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_RATE_REMAIN, &tv,\n &optlen);\n zmq_assert (rc);\n\n const long timeout = (tv.tv_sec * 1000) + (tv.tv_usec / 1000);\n\n return timeout;\n}\n\n// Return max TSDU size without fragmentation from current PGM transport.\nsize_t zmq::pgm_socket_t::get_max_tsdu_size ()\n{\n int max_tsdu = 0;\n socklen_t optlen = sizeof (max_tsdu);\n\n bool rc = pgm_getsockopt (sock, IPPROTO_PGM, PGM_MSS, &max_tsdu, &optlen);\n zmq_assert (rc);\n zmq_assert (optlen == sizeof (max_tsdu));\n return (size_t) max_tsdu;\n}\n\n// pgm_recvmsgv is called to fill the pgm_msgv array up to pgm_msgv_len.\n// In subsequent calls data from pgm_msgv structure are returned.\nssize_t zmq::pgm_socket_t::receive (void **raw_data_, const pgm_tsi_t **tsi_)\n{\n size_t raw_data_len = 0;\n\n // We just sent all data from pgm_transport_recvmsgv up \n // and have to return 0 that another engine in this thread is scheduled.\n if (nbytes_rec == nbytes_processed && nbytes_rec > 0) {\n\n // Reset all the counters.\n nbytes_rec = 0;\n nbytes_processed = 0;\n pgm_msgv_processed = 0;\n errno = EAGAIN;\n return 0;\n }\n\n // If we have are going first time or if we have processed all pgm_msgv_t\n // structure previously read from the pgm socket.\n if (nbytes_rec == nbytes_processed) {\n\n // Check program flow.\n zmq_assert (pgm_msgv_processed == 0);\n zmq_assert (nbytes_processed == 0);\n zmq_assert (nbytes_rec == 0);\n\n // Receive a vector of Application Protocol Domain Unit's (APDUs) \n // from the transport.\n pgm_error_t *pgm_error = NULL;\n\n const int status = pgm_recvmsgv (sock, pgm_msgv,\n pgm_msgv_len, MSG_ERRQUEUE, &nbytes_rec, &pgm_error);\n\n // Invalid parameters.\n zmq_assert (status != PGM_IO_STATUS_ERROR);\n\n last_rx_status = status;\n\n // In a case when no ODATA/RDATA fired POLLIN event (SPM...)\n // pgm_recvmsg returns PGM_IO_STATUS_TIMER_PENDING.\n if (status == PGM_IO_STATUS_TIMER_PENDING) {\n\n zmq_assert (nbytes_rec == 0);\n\n // In case if no RDATA/ODATA caused POLLIN 0 is \n // returned.\n nbytes_rec = 0;\n errno = EBUSY;\n return 0;\n }\n\n // Send SPMR, NAK, ACK is rate limited.\n if (status == PGM_IO_STATUS_RATE_LIMITED) {\n\n zmq_assert (nbytes_rec == 0);\n\n // In case if no RDATA/ODATA caused POLLIN 0 is returned.\n nbytes_rec = 0;\n errno = ENOMEM;\n return 0;\n }\n\n // No peers and hence no incoming packets.\n if (status == PGM_IO_STATUS_WOULD_BLOCK) {\n\n zmq_assert (nbytes_rec == 0);\n\n // In case if no RDATA/ODATA caused POLLIN 0 is returned.\n nbytes_rec = 0;\n errno = EAGAIN;\n return 0;\n }\n\n // Data loss.\n if (status == PGM_IO_STATUS_RESET) {\n\n struct pgm_sk_buff_t* skb = pgm_msgv [0].msgv_skb [0];\n\n // Save lost data TSI.\n *tsi_ = &skb->tsi;\n nbytes_rec = 0;\n\n // In case of dala loss -1 is returned.\n errno = EINVAL;\n pgm_free_skb (skb);\n return -1;\n }\n\n zmq_assert (status == PGM_IO_STATUS_NORMAL);\n }\n else\n {\n zmq_assert (pgm_msgv_processed <= pgm_msgv_len);\n }\n\n // Zero byte payloads are valid in PGM, but not 0MQ protocol.\n zmq_assert (nbytes_rec > 0);\n\n // Only one APDU per pgm_msgv_t structure is allowed.\n zmq_assert (pgm_msgv [pgm_msgv_processed].msgv_len == 1);\n \n struct pgm_sk_buff_t* skb = \n pgm_msgv [pgm_msgv_processed].msgv_skb [0];\n\n // Take pointers from pgm_msgv_t structure.\n *raw_data_ = skb->data;\n raw_data_len = skb->len;\n\n // Save current TSI.\n *tsi_ = &skb->tsi;\n\n // Move the the next pgm_msgv_t structure.\n pgm_msgv_processed++;\n zmq_assert (pgm_msgv_processed <= pgm_msgv_len);\n nbytes_processed +=raw_data_len;\n\n return raw_data_len;\n}\n\nvoid zmq::pgm_socket_t::process_upstream ()\n{\n pgm_msgv_t dummy_msg;\n\n size_t dummy_bytes = 0;\n pgm_error_t *pgm_error = NULL;\n\n const int status = pgm_recvmsgv (sock, &dummy_msg,\n 1, MSG_ERRQUEUE, &dummy_bytes, &pgm_error);\n\n // Invalid parameters.\n zmq_assert (status != PGM_IO_STATUS_ERROR);\n\n // No data should be returned.\n zmq_assert (dummy_bytes == 0 && (status == PGM_IO_STATUS_TIMER_PENDING || \n status == PGM_IO_STATUS_RATE_LIMITED ||\n status == PGM_IO_STATUS_WOULD_BLOCK));\n\n last_rx_status = status;\n\n if (status == PGM_IO_STATUS_TIMER_PENDING)\n errno = EBUSY;\n else\n if (status == PGM_IO_STATUS_RATE_LIMITED)\n errno = ENOMEM;\n else\n errno = EAGAIN;\n}\n\nint zmq::pgm_socket_t::compute_sqns (int tpdu_)\n{\n // Convert rate into B/ms.\n uint64_t rate = uint64_t (options.rate) / 8;\n \n // Compute the size of the buffer in bytes.\n uint64_t size = uint64_t (options.recovery_ivl) * rate;\n\n // Translate the size into number of packets.\n uint64_t sqns = size / tpdu_;\n\n // Buffer should be able to hold at least one packet.\n if (sqns == 0)\n sqns = 1;\n\n return (int) sqns;\n}\n\n#endif\n\n", "from jsonrpc import ServiceProxy\nimport sys\nimport string\n\n# ===== BEGIN USER SETTINGS =====\n# if you do not set these you will be prompted for a password for every command\nrpcuser = \"\"\nrpcpass = \"\"\n# ====== END USER SETTINGS ======\n\n\nif rpcpass == \"\":\n\taccess = ServiceProxy(\"http://PI:FN:127.0.0.1END_PI:9332\")\nelse:\n\taccess = ServiceProxy(\"http://\"+rpcuser+\":\"+rpcpass+\"@127.0.0.1:9332\")\ncmd = sys.argv[1].lower()\n\nif cmd == \"backupwallet\":\n\ttry:\n\t\tpath = raw_input(\"Enter destination path/filename: \")\n\t\tprint access.backupwallet(path)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getaccount\":\n\ttry:\n\t\taddr = raw_input(\"Enter a Sarnath address: \")\n\t\tprint access.getaccount(addr)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getaccountaddress\":\n\ttry:\n\t\tacct = raw_input(\"Enter an account name: \")\n\t\tprint access.getaccountaddress(acct)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getaddressesbyaccount\":\n\ttry:\n\t\tacct = raw_input(\"Enter an account name: \")\n\t\tprint access.getaddressesbyaccount(acct)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getbalance\":\n\ttry:\n\t\tacct = raw_input(\"Enter an account (optional): \")\n\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n\t\ttry:\n\t\t\tprint access.getbalance(acct, mc)\n\t\texcept:\n\t\t\tprint access.getbalance()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getblockbycount\":\n\ttry:\n\t\theight = raw_input(\"Height: \")\n\t\tprint access.getblockbycount(height)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getblockcount\":\n\ttry:\n\t\tprint access.getblockcount()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getblocknumber\":\n\ttry:\n\t\tprint access.getblocknumber()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getconnectioncount\":\n\ttry:\n\t\tprint access.getconnectioncount()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getdifficulty\":\n\ttry:\n\t\tprint access.getdifficulty()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getgenerate\":\n\ttry:\n\t\tprint access.getgenerate()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"gethashespersec\":\n\ttry:\n\t\tprint access.gethashespersec()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getinfo\":\n\ttry:\n\t\tprint access.getinfo()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getnewaddress\":\n\ttry:\n\t\tacct = raw_input(\"Enter an account name: \")\n\t\ttry:\n\t\t\tprint access.getnewaddress(acct)\n\t\texcept:\n\t\t\tprint access.getnewaddress()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getreceivedbyaccount\":\n\ttry:\n\t\tacct = raw_input(\"Enter an account (optional): \")\n\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n\t\ttry:\n\t\t\tprint access.getreceivedbyaccount(acct, mc)\n\t\texcept:\n\t\t\tprint access.getreceivedbyaccount()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getreceivedbyaddress\":\n\ttry:\n\t\taddr = raw_input(\"Enter a Sarnath address (optional): \")\n\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n\t\ttry:\n\t\t\tprint access.getreceivedbyaddress(addr, mc)\n\t\texcept:\n\t\t\tprint access.getreceivedbyaddress()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"gettransaction\":\n\ttry:\n\t\ttxid = raw_input(\"Enter a transaction ID: \")\n\t\tprint access.gettransaction(txid)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"getwork\":\n\ttry:\n\t\tdata = raw_input(\"Data (optional): \")\n\t\ttry:\n\t\t\tprint access.gettransaction(data)\n\t\texcept:\n\t\t\tprint access.gettransaction()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"help\":\n\ttry:\n\t\tcmd = raw_input(\"Command (optional): \")\n\t\ttry:\n\t\t\tprint access.help(cmd)\n\t\texcept:\n\t\t\tprint access.help()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"listaccounts\":\n\ttry:\n\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n\t\ttry:\n\t\t\tprint access.listaccounts(mc)\n\t\texcept:\n\t\t\tprint access.listaccounts()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"listreceivedbyaccount\":\n\ttry:\n\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n\t\tincemp = raw_input(\"Include empty? (true/false, optional): \")\n\t\ttry:\n\t\t\tprint access.listreceivedbyaccount(mc, incemp)\n\t\texcept:\n\t\t\tprint access.listreceivedbyaccount()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"listreceivedbyaddress\":\n\ttry:\n\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n\t\tincemp = raw_input(\"Include empty? (true/false, optional): \")\n\t\ttry:\n\t\t\tprint access.listreceivedbyaddress(mc, incemp)\n\t\texcept:\n\t\t\tprint access.listreceivedbyaddress()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"listtransactions\":\n\ttry:\n\t\tacct = raw_input(\"Account (optional): \")\n\t\tcount = raw_input(\"Number of transactions (optional): \")\n\t\tfrm = raw_input(\"Skip (optional):\")\n\t\ttry:\n\t\t\tprint access.listtransactions(acct, count, frm)\n\t\texcept:\n\t\t\tprint access.listtransactions()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"move\":\n\ttry:\n\t\tfrm = raw_input(\"From: \")\n\t\tto = raw_input(\"To: \")\n\t\tamt = raw_input(\"Amount:\")\n\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n\t\tcomment = raw_input(\"Comment (optional): \")\n\t\ttry:\n\t\t\tprint access.move(frm, to, amt, mc, comment)\n\t\texcept:\n\t\t\tprint access.move(frm, to, amt)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"sendfrom\":\n\ttry:\n\t\tfrm = raw_input(\"From: \")\n\t\tto = raw_input(\"To: \")\n\t\tamt = raw_input(\"Amount:\")\n\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n\t\tcomment = raw_input(\"Comment (optional): \")\n\t\tcommentto = raw_input(\"Comment-to (optional): \")\n\t\ttry:\n\t\t\tprint access.sendfrom(frm, to, amt, mc, comment, commentto)\n\t\texcept:\n\t\t\tprint access.sendfrom(frm, to, amt)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"sendmany\":\n\ttry:\n\t\tfrm = raw_input(\"From: \")\n\t\tto = raw_input(\"To (in format address1:amount1,address2:amount2,...): \")\n\t\tmc = raw_input(\"Minimum confirmations (optional): \")\n\t\tcomment = raw_input(\"Comment (optional): \")\n\t\ttry:\n\t\t\tprint access.sendmany(frm,to,mc,comment)\n\t\texcept:\n\t\t\tprint access.sendmany(frm,to)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"sendtoaddress\":\n\ttry:\n\t\tto = raw_input(\"To (in format address1:amount1,address2:amount2,...): \")\n\t\tamt = raw_input(\"Amount:\")\n\t\tcomment = raw_input(\"Comment (optional): \")\n\t\tcommentto = raw_input(\"Comment-to (optional): \")\n\t\ttry:\n\t\t\tprint access.sendtoaddress(to,amt,comment,commentto)\n\t\texcept:\n\t\t\tprint access.sendtoaddress(to,amt)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"setaccount\":\n\ttry:\n\t\taddr = raw_input(\"Address: \")\n\t\tacct = raw_input(\"Account:\")\n\t\tprint access.setaccount(addr,acct)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"setgenerate\":\n\ttry:\n\t\tgen= raw_input(\"Generate? (true/false): \")\n\t\tcpus = raw_input(\"Max processors/cores (-1 for unlimited, optional):\")\n\t\ttry:\n\t\t\tprint access.setgenerate(gen, cpus)\n\t\texcept:\n\t\t\tprint access.setgenerate(gen)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"settxfee\":\n\ttry:\n\t\tamt = raw_input(\"Amount:\")\n\t\tprint access.settxfee(amt)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"stop\":\n\ttry:\n\t\tprint access.stop()\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"validateaddress\":\n\ttry:\n\t\taddr = raw_input(\"Address: \")\n\t\tprint access.validateaddress(addr)\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"walletpassphrase\":\n\ttry:\n\t\tpwd = raw_input(\"Enter wallet passphrase: \")\n\t\taccess.walletpassphrase(pwd, 60)\n\t\tprint \"\\n---Wallet unlocked---\\n\"\n\texcept:\n\t\tprint \"\\n---An error occurred---\\n\"\n\nelif cmd == \"walletpassphrasechange\":\n\ttry:\n\t\tpwd = raw_input(\"Enter old wallet passphrase: \")\n\t\tpwd2 = raw_input(\"Enter new wallet passphrase: \")\n\t\taccess.walletpassphrasechange(pwd, pwd2)\n\t\tprint\n\t\tprint \"\\n---Passphrase changed---\\n\"\n\texcept:\n\t\tprint\n\t\tprint \"\\n---An error occurred---\\n\"\n\t\tprint\n\nelse:\n\tprint \"Command not found or not supported\"\n", "const path = require(\"path\");\nconst HtmlWebpackPlugin = require(\"html-webpack-plugin\");\nconst MiniCssExtractPlugin = require(\"mini-css-extract-plugin\");\n\nmodule.exports = {\n entry: {\n index: path.resolve(__dirname, \"./src/index.tsx\")\n },\n output: {\n path: path.resolve(__dirname, \"./dist\"),\n filename: \"./js/[name].js\"\n },\n plugins: [\n new HtmlWebpackPlugin({\n filename: \"index.html\",\n template: \"src/index.html\"\n }),\n new MiniCssExtractPlugin({\n filename: \"./css/style.css\"\n })\n ],\n resolve: {\n extensions: [\n \".ts\", // for ts-loader\n \".tsx\", // for ts-loader\n \".js\",\n \".jsx\"\n ]\n },\n module: {\n rules: [\n {\n test: /\\.tsx?$/,\n use: \"ts-loader\"\n },\n {\n test: /\\.scss$/,\n use: [MiniCssExtractPlugin.loader, \"css-loader\", \"sass-loader\"]\n },\n {\n test: /\\.(jpg|png|gif)$/,\n use: {\n loader: \"file-loader\",\n options: {\n name: \"./images/[name].[ext]\",\n outputPath: \"./\",\n publicPath: path => \".\" + path\n }\n }\n },\n {\n test: /\\.html$/,\n use: [\n {\n loader: \"html-loader\",\n options: { minimize: true }\n }\n ]\n }\n ]\n },\n devServer: {\n contentBase: \"./dist\",\n port: 8081,\n inline: true,\n host: \"PI:FN:0.0.0.0END_PI\"\n }\n};\n", "package compute_test\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com/hashicorp/terraform-plugin-sdk/helper/resource\"\n\t\"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance\"\n\t\"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/acceptance/check\"\n)\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkAcceleratedNetworking(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkAcceleratedNetworking(data, true),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkAcceleratedNetworkingUpdated(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkAcceleratedNetworking(data, false),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\tConfig: r.networkAcceleratedNetworking(data, true),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\tConfig: r.networkAcceleratedNetworking(data, false),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkApplicationGateway(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkApplicationGateway(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkApplicationSecurityGroup(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkApplicationSecurityGroup(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkApplicationSecurityGroupUpdate(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\t// none\n\t\t\tConfig: r.networkPrivate(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\t// one\n\t\t\tConfig: r.networkApplicationSecurityGroup(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\t// another\n\t\t\tConfig: r.networkApplicationSecurityGroupUpdated(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\t// none\n\t\t\tConfig: r.networkPrivate(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkDNSServers(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkDNSServers(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\tConfig: r.networkDNSServersUpdated(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkIPForwarding(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\t// enabled\n\t\t\tConfig: r.networkIPForwarding(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\t// disabled\n\t\t\tConfig: r.networkPrivate(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\t// enabled\n\t\t\tConfig: r.networkIPForwarding(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkIPv6(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkIPv6(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t\tExpectError: regexp.MustCompile(\"Error expanding `network_interface`: An IPv6 Primary IP Configuration is unsupported - instead add a IPv4 IP Configuration as the Primary and make the IPv6 IP Configuration the secondary\"),\n\t\t},\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkLoadBalancer(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkLoadBalancer(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkMultipleIPConfigurations(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkMultipleIPConfigurations(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkMultipleIPConfigurationsIPv6(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkMultipleIPConfigurationsIPv6(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkMultipleNICs(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkMultipleNICs(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkMultipleNICsMultipleIPConfigurations(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkMultipleNICsMultipleIPConfigurations(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkMultipleNICsMultiplePublicIPs(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkMultipleNICsMultiplePublicIPs(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkMultipleNICsWithDifferentDNSServers(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkMultipleNICsWithDifferentDNSServers(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkNetworkSecurityGroup(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkNetworkSecurityGroup(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkNetworkSecurityGroupUpdate(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\t// without\n\t\t\tConfig: r.networkPrivate(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\t// add one\n\t\t\tConfig: r.networkNetworkSecurityGroup(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\t// change it\n\t\t\tConfig: r.networkNetworkSecurityGroupUpdated(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t\t{\n\t\t\t// remove it\n\t\t\tConfig: r.networkPrivate(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkPrivate(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkPrivate(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkPublicIP(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkPublicIP(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkPublicIPDomainNameLabel(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkPublicIPDomainNameLabel(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkPublicIPFromPrefix(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkPublicIPFromPrefix(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc TestAccLinuxVirtualMachineScaleSet_networkPublicIPTags(t *testing.T) {\n\tdata := acceptance.BuildTestData(t, \"azurerm_linux_virtual_machine_scale_set\", \"test\")\n\tr := LinuxVirtualMachineScaleSetResource{}\n\n\tdata.ResourceTest(t, r, []resource.TestStep{\n\t\t{\n\t\t\tConfig: r.networkPublicIPTags(data),\n\t\t\tCheck: resource.ComposeTestCheckFunc(\n\t\t\t\tcheck.That(data.ResourceName).ExistsInAzure(r),\n\t\t\t),\n\t\t},\n\t\tdata.ImportStep(\n\t\t\t\"admin_password\",\n\t\t),\n\t})\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkAcceleratedNetworking(data acceptance.TestData, enabled bool) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F4\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n enable_accelerated_networking = %t\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger, enabled)\n}\n\nfunc (LinuxVirtualMachineScaleSetResource) networkApplicationGateway(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\nprovider \"azurerm\" {\n features {}\n}\n\nresource \"azurerm_resource_group\" \"test\" {\n name = \"acctestRG-%d\"\n location = \"%s\"\n}\n\nresource \"azurerm_virtual_network\" \"test\" {\n name = \"acctest-vnet-%d\"\n resource_group_name = \"${azurerm_resource_group.test.name}\"\n address_space = [\"PI:FN:10.0.0.0/16END_PI\"]\n location = \"${azurerm_resource_group.test.location}\"\n}\n\nresource \"azurerm_subnet\" \"test\" {\n name = \"subnet-%d\"\n resource_group_name = \"${azurerm_resource_group.test.name}\"\n virtual_network_name = \"${azurerm_virtual_network.test.name}\"\n address_prefix = \"PI:FN:10.0.0.0/24END_PI\"\n}\n\nresource \"azurerm_public_ip\" \"test\" {\n name = \"acctest-pubip-%d\"\n location = \"${azurerm_resource_group.test.location}\"\n resource_group_name = \"${azurerm_resource_group.test.name}\"\n allocation_method = \"Dynamic\"\n}\n\n#\u00a0since these variables are re-used - a locals block makes this more maintainable\nlocals {\n backend_address_pool_name = \"${azurerm_virtual_network.test.name}-beap\"\n frontend_port_name = \"${azurerm_virtual_network.test.name}-feport\"\n frontend_ip_configuration_name = \"${azurerm_virtual_network.test.name}-feip\"\n http_setting_name = \"${azurerm_virtual_network.test.name}-be-htst\"\n listener_name = \"${azurerm_virtual_network.test.name}-httplstn\"\n request_routing_rule_name = \"${azurerm_virtual_network.test.name}-rqrt\"\n}\n\nresource \"azurerm_application_gateway\" \"test\" {\n name = \"acctestag-%d\"\n resource_group_name = \"${azurerm_resource_group.test.name}\"\n location = \"${azurerm_resource_group.test.location}\"\n\n sku {\n name = \"Standard_Small\"\n tier = \"Standard\"\n capacity = 2\n }\n\n gateway_ip_configuration {\n name = \"my-gateway-ip-configuration\"\n subnet_id = \"${azurerm_subnet.test.id}\"\n }\n\n frontend_port {\n name = \"${local.frontend_port_name}\"\n port = 80\n }\n\n frontend_ip_configuration {\n name = \"${local.frontend_ip_configuration_name}\"\n public_ip_address_id = \"${azurerm_public_ip.test.id}\"\n }\n\n backend_address_pool {\n name = \"${local.backend_address_pool_name}\"\n }\n\n backend_http_settings {\n name = \"${local.http_setting_name}\"\n cookie_based_affinity = \"Disabled\"\n port = 80\n protocol = \"Http\"\n request_timeout = 1\n }\n\n http_listener {\n name = \"${local.listener_name}\"\n frontend_ip_configuration_name = \"${local.frontend_ip_configuration_name}\"\n frontend_port_name = \"${local.frontend_port_name}\"\n protocol = \"Http\"\n }\n\n request_routing_rule {\n name = \"${local.request_routing_rule_name}\"\n rule_type = \"Basic\"\n http_listener_name = \"${local.listener_name}\"\n backend_address_pool_name = \"${local.backend_address_pool_name}\"\n backend_http_settings_name = \"${local.http_setting_name}\"\n }\n}\n\nresource \"azurerm_subnet\" \"other\" {\n name = \"other\"\n resource_group_name = \"${azurerm_resource_group.test.name}\"\n virtual_network_name = \"${azurerm_virtual_network.test.name}\"\n address_prefix = \"PI:FN:10.0.1.0/24END_PI\"\n}\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.other.id\n application_gateway_backend_address_pool_ids = [azurerm_application_gateway.test.backend_address_pool.0.id]\n }\n }\n}\n`, data.RandomInteger, data.Locations.Primary, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger, data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkApplicationSecurityGroup(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_application_security_group\" \"test\" {\n name = \"acctestasg-%d\"\n location = azurerm_resource_group.test.location\n resource_group_name = azurerm_resource_group.test.name\n}\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n application_security_group_ids = [azurerm_application_security_group.test.id]\n }\n }\n}\n`, r.template(data), data.RandomInteger, data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkApplicationSecurityGroupUpdated(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_application_security_group\" \"test\" {\n name = \"acctestasg-%d\"\n location = azurerm_resource_group.test.location\n resource_group_name = azurerm_resource_group.test.name\n}\n\nresource \"azurerm_application_security_group\" \"other\" {\n name = \"acctestasg2-%d\"\n location = azurerm_resource_group.test.location\n resource_group_name = azurerm_resource_group.test.name\n}\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n application_security_group_ids = [\n azurerm_application_security_group.test.id,\n azurerm_application_security_group.other.id,\n ]\n }\n }\n}\n`, r.template(data), data.RandomInteger, data.RandomInteger, data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkDNSServers(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n dns_servers = [\"8.8.8.8\"]\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkDNSServersUpdated(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n dns_servers = [\"1.1.1.1\", \"8.8.8.8\"]\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkIPForwarding(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n enable_ip_forwarding = true\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkIPv6(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n\n ip_configuration {\n name = \"internal\"\n primary = true\n version = \"IPv6\"\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkLoadBalancer(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_public_ip\" \"test\" {\n name = \"test-ip-%d\"\n location = azurerm_resource_group.test.location\n resource_group_name = azurerm_resource_group.test.name\n allocation_method = \"Static\"\n}\n\nresource \"azurerm_lb\" \"test\" {\n name = \"acctestlb-%d\"\n location = azurerm_resource_group.test.location\n resource_group_name = azurerm_resource_group.test.name\n\n frontend_ip_configuration {\n name = \"internal\"\n public_ip_address_id = azurerm_public_ip.test.id\n }\n}\n\nresource \"azurerm_lb_backend_address_pool\" \"test\" {\n name = \"test\"\n resource_group_name = azurerm_resource_group.test.name\n loadbalancer_id = azurerm_lb.test.id\n}\n\nresource \"azurerm_lb_nat_pool\" \"test\" {\n name = \"test\"\n resource_group_name = azurerm_resource_group.test.name\n loadbalancer_id = azurerm_lb.test.id\n frontend_ip_configuration_name = \"internal\"\n protocol = \"Tcp\"\n frontend_port_start = 80\n frontend_port_end = 81\n backend_port = 8080\n}\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.test.id]\n load_balancer_inbound_nat_rules_ids = [azurerm_lb_nat_pool.test.id]\n }\n }\n}\n`, r.template(data), data.RandomInteger, data.RandomInteger, data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkMultipleIPConfigurations(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"internal\"\n primary = true\n\n ip_configuration {\n name = \"primary\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n\n ip_configuration {\n name = \"secondary\"\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkMultipleIPConfigurationsIPv6(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_D2s_v3\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"primary\"\n primary = true\n\n ip_configuration {\n name = \"first\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n version = \"IPv4\"\n }\n\n ip_configuration {\n name = \"second\"\n version = \"IPv6\"\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkMultipleNICs(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"primary\"\n primary = true\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n\n network_interface {\n name = \"secondary\"\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkMultipleNICsMultipleIPConfigurations(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"primary\"\n primary = true\n\n ip_configuration {\n name = \"first\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n\n ip_configuration {\n name = \"second\"\n subnet_id = azurerm_subnet.test.id\n }\n }\n\n network_interface {\n name = \"secondary\"\n\n ip_configuration {\n name = \"third\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n\n ip_configuration {\n name = \"fourth\"\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkMultipleNICsWithDifferentDNSServers(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"primary\"\n primary = true\n dns_servers = [\"8.8.8.8\"]\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n\n network_interface {\n name = \"secondary\"\n dns_servers = [\"1.1.1.1\"]\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkMultipleNICsMultiplePublicIPs(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"primary\"\n primary = true\n\n ip_configuration {\n name = \"first\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n\n public_ip_address {\n name = \"first\"\n domain_name_label = \"acctest1-%d\"\n idle_timeout_in_minutes = 4\n }\n }\n }\n\n network_interface {\n name = \"secondary\"\n\n ip_configuration {\n name = \"second\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n\n public_ip_address {\n name = \"second\"\n domain_name_label = \"acctest2-%d\"\n idle_timeout_in_minutes = 4\n }\n }\n }\n}\n`, r.template(data), data.RandomInteger, data.RandomInteger, data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkNetworkSecurityGroup(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_network_security_group\" \"test\" {\n name = \"acctestnsg-%d\"\n location = \"${azurerm_resource_group.test.location}\"\n resource_group_name = \"${azurerm_resource_group.test.name}\"\n}\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n network_security_group_id = azurerm_network_security_group.test.id\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger, data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkNetworkSecurityGroupUpdated(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_network_security_group\" \"test\" {\n name = \"acctestnsg-%d\"\n location = \"${azurerm_resource_group.test.location}\"\n resource_group_name = \"${azurerm_resource_group.test.name}\"\n}\n\nresource \"azurerm_network_security_group\" \"other\" {\n name = \"acctestnsg2-%d\"\n location = \"${azurerm_resource_group.test.location}\"\n resource_group_name = \"${azurerm_resource_group.test.name}\"\n}\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n network_security_group_id = azurerm_network_security_group.other.id\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger, data.RandomInteger, data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkPrivate(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"example\"\n primary = true\n\n ip_configuration {\n name = \"internal\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkPublicIP(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"primary\"\n primary = true\n\n ip_configuration {\n name = \"first\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n\n public_ip_address {\n name = \"first\"\n idle_timeout_in_minutes = 4\n }\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkPublicIPDomainNameLabel(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"primary\"\n primary = true\n\n ip_configuration {\n name = \"first\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n\n public_ip_address {\n name = \"first\"\n domain_name_label = \"acctestdnl-%d\"\n idle_timeout_in_minutes = 4\n }\n }\n }\n}\n`, r.template(data), data.RandomInteger, data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkPublicIPFromPrefix(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_public_ip_prefix\" \"test\" {\n name = \"acctestpublicipprefix-%d\"\n location = azurerm_resource_group.test.location\n resource_group_name = azurerm_resource_group.test.name\n}\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"primary\"\n primary = true\n\n ip_configuration {\n name = \"first\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n\n public_ip_address {\n name = \"first\"\n public_ip_prefix_id = azurerm_public_ip_prefix.test.id\n }\n }\n }\n}\n`, r.template(data), data.RandomInteger, data.RandomInteger)\n}\n\nfunc (r LinuxVirtualMachineScaleSetResource) networkPublicIPTags(data acceptance.TestData) string {\n\treturn fmt.Sprintf(`\n%s\n\nresource \"azurerm_linux_virtual_machine_scale_set\" \"test\" {\n name = \"acctestvmss-%d\"\n resource_group_name = azurerm_resource_group.test.name\n location = azurerm_resource_group.test.location\n sku = \"Standard_F2\"\n instances = 1\n admin_username = \"adminuser\"\n admin_password = \"P@ssword1234!\"\n\n disable_password_authentication = false\n\n source_image_reference {\n publisher = \"Canonical\"\n offer = \"UbuntuServer\"\n sku = \"16.04-LTS\"\n version = \"latest\"\n }\n\n os_disk {\n storage_account_type = \"Standard_LRS\"\n caching = \"ReadWrite\"\n }\n\n network_interface {\n name = \"primary\"\n primary = true\n\n ip_configuration {\n name = \"first\"\n primary = true\n subnet_id = azurerm_subnet.test.id\n\n public_ip_address {\n name = \"first\"\n\n ip_tag {\n tag = \"/Sql\"\n type = \"FirstPartyUsage\"\n }\n }\n }\n }\n}\n`, r.template(data), data.RandomInteger)\n}\n", "import sys\nimport logging\nimport urlparse\nimport urllib\n\nimport redis\nfrom flask import Flask, current_app\nfrom flask_sslify import SSLify\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom werkzeug.routing import BaseConverter\nfrom statsd import StatsClient\nfrom flask_mail import Mail\nfrom flask_limiter import Limiter\nfrom flask_limiter.util import get_ipaddr\nfrom flask_migrate import Migrate\n\nfrom redash import settings\nfrom redash.query_runner import import_query_runners\nfrom redash.destinations import import_destinations\n\n\n__version__ = '7.0.0-beta'\n\n\nimport os\nif os.environ.get(\"REMOTE_DEBUG\"):\n import ptvsd\n ptvsd.enable_attach(address=('PI:FN:0.0.0.0END_PI', 5678))\n\n\ndef setup_logging():\n handler = logging.StreamHandler(sys.stdout if settings.LOG_STDOUT else sys.stderr)\n formatter = logging.Formatter(settings.LOG_FORMAT)\n handler.setFormatter(formatter)\n logging.getLogger().addHandler(handler)\n logging.getLogger().setLevel(settings.LOG_LEVEL)\n\n # Make noisy libraries less noisy\n if settings.LOG_LEVEL != \"DEBUG\":\n logging.getLogger(\"passlib\").setLevel(\"ERROR\")\n logging.getLogger(\"requests.packages.urllib3\").setLevel(\"ERROR\")\n logging.getLogger(\"snowflake.connector\").setLevel(\"ERROR\")\n logging.getLogger('apiclient').setLevel(\"ERROR\")\n\n\ndef create_redis_connection():\n logging.debug(\"Creating Redis connection (%s)\", settings.REDIS_URL)\n redis_url = urlparse.urlparse(settings.REDIS_URL)\n\n if redis_url.scheme == 'redis+socket':\n qs = urlparse.parse_qs(redis_url.query)\n if 'virtual_host' in qs:\n db = qs['virtual_host'][0]\n else:\n db = 0\n\n client = redis.StrictRedis(unix_socket_path=redis_url.path, db=db)\n else:\n if redis_url.path:\n redis_db = redis_url.path[1]\n else:\n redis_db = 0\n # Redis passwords might be quoted with special characters\n redis_password = redis_url.password and urllib.unquote(redis_url.password)\n client = redis.StrictRedis(host=redis_url.hostname, port=redis_url.port, db=redis_db, password=redis_password)\n\n return client\n\n\nsetup_logging()\nredis_connection = create_redis_connection()\n\nmail = Mail()\nmigrate = Migrate()\nmail.init_mail(settings.all_settings())\nstatsd_client = StatsClient(host=settings.STATSD_HOST, port=settings.STATSD_PORT, prefix=settings.STATSD_PREFIX)\nlimiter = Limiter(key_func=get_ipaddr, storage_uri=settings.LIMITER_STORAGE)\n\nimport_query_runners(settings.QUERY_RUNNERS)\nimport_destinations(settings.DESTINATIONS)\n\nfrom redash.version_check import reset_new_version_status\nreset_new_version_status()\n\n\nclass SlugConverter(BaseConverter):\n def to_python(self, value):\n # This is ay workaround for when we enable multi-org and some files are being called by the index rule:\n # for path in settings.STATIC_ASSETS_PATHS:\n # full_path = safe_join(path, value)\n # if os.path.isfile(full_path):\n # raise ValidationError()\n\n return value\n\n def to_url(self, value):\n return value\n\n\ndef create_app():\n from redash import authentication, extensions, handlers\n from redash.handlers.webpack import configure_webpack\n from redash.handlers import chrome_logger\n from redash.models import db, users\n from redash.metrics.request import provision_app\n from redash.utils import sentry\n\n sentry.init()\n\n app = Flask(__name__,\n template_folder=settings.STATIC_ASSETS_PATH,\n static_folder=settings.STATIC_ASSETS_PATH,\n static_path='/static')\n\n # Make sure we get the right referral address even behind proxies like nginx.\n app.wsgi_app = ProxyFix(app.wsgi_app, settings.PROXIES_COUNT)\n app.url_map.converters['org_slug'] = SlugConverter\n\n if settings.ENFORCE_HTTPS:\n SSLify(app, skips=['ping'])\n\n # configure our database\n app.config['SQLALCHEMY_DATABASE_URI'] = settings.SQLALCHEMY_DATABASE_URI\n app.config.update(settings.all_settings())\n\n provision_app(app)\n db.init_app(app)\n migrate.init_app(app, db)\n mail.init_app(app)\n authentication.init_app(app)\n limiter.init_app(app)\n handlers.init_app(app)\n configure_webpack(app)\n extensions.init_extensions(app)\n chrome_logger.init_app(app)\n users.init_app(app)\n\n return app\n", "]+$/i\n|\n| DO NOT CHANGE THIS UNLESS YOU FULLY UNDERSTAND THE REPERCUSSIONS!!\n|\n*/\n$config['permitted_uri_chars'] = 'a-z 0-9~%.:_\\-';\n\n\n/*\n|--------------------------------------------------------------------------\n| Enable Query Strings\n|--------------------------------------------------------------------------\n|\n| By default CodeIgniter uses search-engine friendly segment based URLs:\n| example.com/who/what/where/\n|\n| By default CodeIgniter enables access to the $_GET array. If for some\n| reason you would like to disable it, set 'allow_get_array' to FALSE.\n|\n| You can optionally enable standard query string based URLs:\n| example.com?who=me&what=something&where=here\n|\n| Options are: TRUE or FALSE (boolean)\n|\n| The other items let you set the query string 'words' that will\n| invoke your controllers and its functions:\n| example.com/index.php?c=controller&m=function\n|\n| Please note that some of the helpers won't work as expected when\n| this feature is enabled, since CodeIgniter is designed primarily to\n| use segment based URLs.\n|\n*/\n$config['allow_get_array'] = TRUE;\n$config['enable_query_strings'] = FALSE;\n$config['controller_trigger'] = 'c';\n$config['function_trigger'] = 'm';\n$config['directory_trigger'] = 'd';\n\n/*\n|--------------------------------------------------------------------------\n| Error Logging Threshold\n|--------------------------------------------------------------------------\n|\n| If you have enabled error logging, you can set an error threshold to\n| determine what gets logged. Threshold options are:\n| You can enable error logging by setting a threshold over zero. The\n| threshold determines what gets logged. Threshold options are:\n|\n|\t0 = Disables logging, Error logging TURNED OFF\n|\t1 = Error Messages (including PHP errors)\n|\t2 = Debug Messages\n|\t3 = Informational Messages\n|\t4 = All Messages\n|\n| You can also pass an array with threshold levels to show individual error types\n|\n| \tarray(2) = Debug Messages, without Error Messages\n|\n| For a live site you'll usually only enable Errors (1) to be logged otherwise\n| your log files will fill up very fast.\n|\n*/\n$config['log_threshold'] = 0;\n\n/*\n|--------------------------------------------------------------------------\n| Error Logging Directory Path\n|--------------------------------------------------------------------------\n|\n| Leave this BLANK unless you would like to set something other than the default\n| application/logs/ directory. Use a full server path with trailing slash.\n|\n*/\n$config['log_path'] = '';\n\n/*\n|--------------------------------------------------------------------------\n| Log File Extension\n|--------------------------------------------------------------------------\n|\n| The default filename extension for log files. The default 'php' allows for\n| protecting the log files via basic scripting, when they are to be stored\n| under a publicly accessible directory.\n|\n| Note: Leaving it blank will default to 'php'.\n|\n*/\n$config['log_file_extension'] = '';\n\n/*\n|--------------------------------------------------------------------------\n| Log File Permissions\n|--------------------------------------------------------------------------\n|\n| The file system permissions to be applied on newly created log files.\n|\n| IMPORTANT: This MUST be an integer (no quotes) and you MUST use octal\n| integer notation (i.e. 0700, 0644, etc.)\n*/\n$config['log_file_permissions'] = 0644;\n\n/*\n|--------------------------------------------------------------------------\n| Date Format for Logs\n|--------------------------------------------------------------------------\n|\n| Each item that is logged has an associated date. You can use PHP date\n| codes to set your own date formatting\n|\n*/\n$config['log_date_format'] = 'Y-m-d H:i:s';\n\n/*\n|--------------------------------------------------------------------------\n| Error Views Directory Path\n|--------------------------------------------------------------------------\n|\n| Leave this BLANK unless you would like to set something other than the default\n| application/views/errors/ directory. Use a full server path with trailing slash.\n|\n*/\n$config['error_views_path'] = '';\n\n/*\n|--------------------------------------------------------------------------\n| Cache Directory Path\n|--------------------------------------------------------------------------\n|\n| Leave this BLANK unless you would like to set something other than the default\n| application/cache/ directory. Use a full server path with trailing slash.\n|\n*/\n$config['cache_path'] = '';\n\n/*\n|--------------------------------------------------------------------------\n| Cache Include Query String\n|--------------------------------------------------------------------------\n|\n| Set this to TRUE if you want to use different cache files depending on the\n| URL query string. Please be aware this might result in numerous cache files.\n|\n*/\n$config['cache_query_string'] = FALSE;\n\n/*\n|--------------------------------------------------------------------------\n| Encryption Key\n|--------------------------------------------------------------------------\n|\n| If you use the Encryption class, you must set an encryption key.\n| See the user guide for more info.\n|\n| http://codeigniter.com/user_guide/libraries/encryption.html\n|\n*/\n$config['encryption_key'] = '';\n\n/*\n|--------------------------------------------------------------------------\n| Session Variables\n|--------------------------------------------------------------------------\n|\n| 'sess_driver'\n|\n|\tThe storage driver to use: files, database, redis, memcached\n|\n| 'sess_cookie_name'\n|\n|\tThe session cookie name, must contain only [0-9a-z_-] characters\n|\n| 'sess_expiration'\n|\n|\tThe number of SECONDS you want the session to last.\n|\tSetting to 0 (zero) means expire when the browser is closed.\n|\n| 'sess_save_path'\n|\n|\tThe location to save sessions to, driver dependant.\n|\n|\tFor the 'files' driver, it's a path to a writable directory.\n|\tWARNING: Only absolute paths are supported!\n|\n|\tFor the 'database' driver, it's a table name.\n|\tPlease read up the manual for the format with other session drivers.\n|\n|\tIMPORTANT: You are REQUIRED to set a valid save path!\n|\n| 'sess_match_ip'\n|\n|\tWhether to match the user's IP address when reading the session data.\n|\n| 'sess_time_to_update'\n|\n|\tHow many seconds between CI regenerating the session ID.\n|\n| 'sess_regenerate_destroy'\n|\n|\tWhether to destroy session data associated with the old session ID\n|\twhen auto-regenerating the session ID. When set to FALSE, the data\n|\twill be later deleted by the garbage collector.\n|\n| Other session cookie settings are shared with the rest of the application,\n| except for 'cookie_prefix' and 'cookie_httponly', which are ignored here.\n|\n*/\n$config['sess_driver'] = 'files';\n$config['sess_cookie_name'] = 'ci_session';\n$config['sess_expiration'] = 7200;\n$config['sess_save_path'] = NULL;\n$config['sess_match_ip'] = FALSE;\n$config['sess_time_to_update'] = 300;\n$config['sess_regenerate_destroy'] = FALSE;\n\n/*\n|--------------------------------------------------------------------------\n| Cookie Related Variables\n|--------------------------------------------------------------------------\n|\n| 'cookie_prefix' = Set a cookie name prefix if you need to avoid collisions\n| 'cookie_domain' = Set to .your-domain.com for site-wide cookies\n| 'cookie_path' = Typically will be a forward slash\n| 'cookie_secure' = Cookie will only be set if a secure HTTPS connection exists.\n| 'cookie_httponly' = Cookie will only be accessible via HTTP(S) (no javascript)\n|\n| Note: These settings (with the exception of 'cookie_prefix' and\n| 'cookie_httponly') will also affect sessions.\n|\n*/\n$config['cookie_prefix']\t= '';\n$config['cookie_domain']\t= '';\n$config['cookie_path']\t\t= '/';\n$config['cookie_secure']\t= FALSE;\n$config['cookie_httponly'] \t= FALSE;\n\n/*\n|--------------------------------------------------------------------------\n| Standardize newlines\n|--------------------------------------------------------------------------\n|\n| Determines whether to standardize newline characters in input data,\n| meaning to replace \\r\\n, \\r, \\n occurences with the PHP_EOL value.\n|\n| This is particularly useful for portability between UNIX-based OSes,\n| (usually \\n) and Windows (\\r\\n).\n|\n*/\n$config['standardize_newlines'] = FALSE;\n\n/*\n|--------------------------------------------------------------------------\n| Global XSS Filtering\n|--------------------------------------------------------------------------\n|\n| Determines whether the XSS filter is always active when GET, POST or\n| COOKIE data is encountered\n|\n| WARNING: This feature is DEPRECATED and currently available only\n| for backwards compatibility purposes!\n|\n*/\n$config['global_xss_filtering'] = FALSE;\n\n/*\n|--------------------------------------------------------------------------\n| Cross Site Request Forgery\n|--------------------------------------------------------------------------\n| Enables a CSRF cookie token to be set. When set to TRUE, token will be\n| checked on a submitted form. If you are accepting user data, it is strongly\n| recommended CSRF protection be enabled.\n|\n| 'csrf_token_name' = The token name\n| 'csrf_cookie_name' = The cookie name\n| 'csrf_expire' = The number in seconds the token should expire.\n| 'csrf_regenerate' = Regenerate token on every submission\n| 'csrf_exclude_uris' = Array of URIs which ignore CSRF checks\n*/\n$config['csrf_protection'] = FALSE;\n$config['csrf_token_name'] = 'csrf_test_name';\n$config['csrf_cookie_name'] = 'csrf_cookie_name';\n$config['csrf_expire'] = 7200;\n$config['csrf_regenerate'] = TRUE;\n$config['csrf_exclude_uris'] = array();\n\n/*\n|--------------------------------------------------------------------------\n| Output Compression\n|--------------------------------------------------------------------------\n|\n| Enables Gzip output compression for faster page loads. When enabled,\n| the output class will test whether your server supports Gzip.\n| Even if it does, however, not all browsers support compression\n| so enable only if you are reasonably sure your visitors can handle it.\n|\n| Only used if zlib.output_compression is turned off in your php.ini.\n| Please do not use it together with httpd-level output compression.\n|\n| VERY IMPORTANT: If you are getting a blank page when compression is enabled it\n| means you are prematurely outputting something to your browser. It could\n| even be a line of whitespace at the end of one of your scripts. For\n| compression to work, nothing can be sent before the output buffer is called\n| by the output class. Do not 'echo' any values with compression enabled.\n|\n*/\n$config['compress_output'] = FALSE;\n\n/*\n|--------------------------------------------------------------------------\n| Master Time Reference\n|--------------------------------------------------------------------------\n|\n| Options are 'local' or any PHP supported timezone. This preference tells\n| the system whether to use your server's local time as the master 'now'\n| reference, or convert it to the configured one timezone. See the 'date\n| helper' page of the user guide for information regarding date handling.\n|\n*/\n$config['time_reference'] = 'local';\n\n/*\n|--------------------------------------------------------------------------\n| Rewrite PHP Short Tags\n|--------------------------------------------------------------------------\n|\n| If your PHP installation does not have short tag support enabled CI\n| can rewrite the tags on-the-fly, enabling you to utilize that syntax\n| in your view files. Options are TRUE or FALSE (boolean)\n|\n*/\n$config['rewrite_short_tags'] = FALSE;\n\n\n/*\n|--------------------------------------------------------------------------\n| Reverse Proxy IPs\n|--------------------------------------------------------------------------\n|\n| If your server is behind a reverse proxy, you must whitelist the proxy\n| IP addresses from which CodeIgniter should trust headers such as\n| HTTP_X_FORWARDED_FOR and HTTP_CLIENT_IP in order to properly identify\n| the visitor's IP address.\n|\n| You can use both an array or a comma-separated list of proxy addresses,\n| as well as specifying whole subnets. Here are a few examples:\n|\n| Comma-separated:\t'10.0.1.200,PI:FN:192.168.5.0/24END_PI'\n| Array:\t\tarray('10.0.1.200', 'PI:FN:192.168.5.0/24END_PI')\n*/\n$config['proxy_ips'] = '';\n", "#!/usr/bin/env python\n\n# Copyright 2015 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport os\nimport re\nimport random\nimport shutil\nimport socket\nimport string\nimport json\nimport ipaddress\n\nimport charms.leadership\n\nfrom shlex import split\nfrom subprocess import check_call\nfrom subprocess import check_output\nfrom subprocess import CalledProcessError\n\nfrom charms import layer\nfrom charms.layer import snap\nfrom charms.reactive import hook\nfrom charms.reactive import remove_state\nfrom charms.reactive import set_state\nfrom charms.reactive import is_state\nfrom charms.reactive import when, when_any, when_not\nfrom charms.reactive.helpers import data_changed, any_file_changed\nfrom charms.kubernetes.common import get_version\nfrom charms.kubernetes.common import retry\nfrom charms.kubernetes.flagmanager import FlagManager\n\nfrom charmhelpers.core import hookenv\nfrom charmhelpers.core import host\nfrom charmhelpers.core import unitdata\nfrom charmhelpers.core.host import service_stop\nfrom charmhelpers.core.templating import render\nfrom charmhelpers.fetch import apt_install\nfrom charmhelpers.contrib.charmsupport import nrpe\n\n\n# Override the default nagios shortname regex to allow periods, which we\n# need because our bin names contain them (e.g. 'snap.foo.daemon'). The\n# default regex in charmhelpers doesn't allow periods, but nagios itself does.\nnrpe.Check.shortname_re = '[\\.A-Za-z0-9-_]+$'\n\nos.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')\n\n\ndef service_cidr():\n ''' Return the charm's service-cidr config '''\n db = unitdata.kv()\n frozen_cidr = db.get('kubernetes-master.service-cidr')\n return frozen_cidr or hookenv.config('service-cidr')\n\n\ndef freeze_service_cidr():\n ''' Freeze the service CIDR. Once the apiserver has started, we can no\n longer safely change this value. '''\n db = unitdata.kv()\n db.set('kubernetes-master.service-cidr', service_cidr())\n\n\n@hook('upgrade-charm')\ndef reset_states_for_delivery():\n '''An upgrade charm event was triggered by Juju, react to that here.'''\n migrate_from_pre_snaps()\n install_snaps()\n set_state('reconfigure.authentication.setup')\n remove_state('authentication.setup')\n\n\ndef rename_file_idempotent(source, destination):\n if os.path.isfile(source):\n os.rename(source, destination)\n\n\ndef migrate_from_pre_snaps():\n # remove old states\n remove_state('kubernetes.components.installed')\n remove_state('kubernetes.dashboard.available')\n remove_state('kube-dns.available')\n remove_state('kubernetes-master.app_version.set')\n\n # disable old services\n services = ['kube-apiserver',\n 'kube-controller-manager',\n 'kube-scheduler']\n for service in services:\n hookenv.log('Stopping {0} service.'.format(service))\n host.service_stop(service)\n\n # rename auth files\n os.makedirs('/root/cdk', exist_ok=True)\n rename_file_idempotent('/etc/kubernetes/serviceaccount.key',\n '/root/cdk/serviceaccount.key')\n rename_file_idempotent('/srv/kubernetes/basic_auth.csv',\n '/root/cdk/basic_auth.csv')\n rename_file_idempotent('/srv/kubernetes/known_tokens.csv',\n '/root/cdk/known_tokens.csv')\n\n # cleanup old files\n files = [\n \"/lib/systemd/system/kube-apiserver.service\",\n \"/lib/systemd/system/kube-controller-manager.service\",\n \"/lib/systemd/system/kube-scheduler.service\",\n \"/etc/default/kube-defaults\",\n \"/etc/default/kube-apiserver.defaults\",\n \"/etc/default/kube-controller-manager.defaults\",\n \"/etc/default/kube-scheduler.defaults\",\n \"/srv/kubernetes\",\n \"/home/ubuntu/kubectl\",\n \"/usr/local/bin/kubectl\",\n \"/usr/local/bin/kube-apiserver\",\n \"/usr/local/bin/kube-controller-manager\",\n \"/usr/local/bin/kube-scheduler\",\n \"/etc/kubernetes\"\n ]\n for file in files:\n if os.path.isdir(file):\n hookenv.log(\"Removing directory: \" + file)\n shutil.rmtree(file)\n elif os.path.isfile(file):\n hookenv.log(\"Removing file: \" + file)\n os.remove(file)\n\n # clear the flag managers\n FlagManager('kube-apiserver').destroy_all()\n FlagManager('kube-controller-manager').destroy_all()\n FlagManager('kube-scheduler').destroy_all()\n\n\ndef install_snaps():\n channel = hookenv.config('channel')\n hookenv.status_set('maintenance', 'Installing kubectl snap')\n snap.install('kubectl', channel=channel, classic=True)\n hookenv.status_set('maintenance', 'Installing kube-apiserver snap')\n snap.install('kube-apiserver', channel=channel)\n hookenv.status_set('maintenance',\n 'Installing kube-controller-manager snap')\n snap.install('kube-controller-manager', channel=channel)\n hookenv.status_set('maintenance', 'Installing kube-scheduler snap')\n snap.install('kube-scheduler', channel=channel)\n hookenv.status_set('maintenance', 'Installing cdk-addons snap')\n snap.install('cdk-addons', channel=channel)\n set_state('kubernetes-master.snaps.installed')\n remove_state('kubernetes-master.components.started')\n\n\n@when('config.changed.channel')\ndef channel_changed():\n install_snaps()\n\n\n@when('config.changed.client_password', 'leadership.is_leader')\ndef password_changed():\n \"\"\"Handle password change via the charms config.\"\"\"\n password = hookenv.config('client_password')\n if password == \"\" and is_state('client.password.initialised'):\n # password_changed is called during an upgrade. Nothing to do.\n return\n elif password == \"\":\n # Password not initialised\n password = token_generator()\n setup_basic_auth(password, \"admin\", \"admin\")\n set_state('reconfigure.authentication.setup')\n remove_state('authentication.setup')\n set_state('client.password.initialised')\n\n\n@when('cni.connected')\n@when_not('cni.configured')\ndef configure_cni(cni):\n ''' Set master configuration on the CNI relation. This lets the CNI\n subordinate know that we're the master so it can respond accordingly. '''\n cni.set_config(is_master=True, kubeconfig_path='')\n\n\n@when('leadership.is_leader')\n@when_not('authentication.setup')\ndef setup_leader_authentication():\n '''Setup basic authentication and token access for the cluster.'''\n api_opts = FlagManager('kube-apiserver')\n controller_opts = FlagManager('kube-controller-manager')\n\n service_key = '/root/cdk/serviceaccount.key'\n basic_auth = '/root/cdk/basic_auth.csv'\n known_tokens = '/root/cdk/known_tokens.csv'\n\n api_opts.add('basic-auth-file', basic_auth)\n api_opts.add('token-auth-file', known_tokens)\n hookenv.status_set('maintenance', 'Rendering authentication templates.')\n\n keys = [service_key, basic_auth, known_tokens]\n # Try first to fetch data from an old leadership broadcast.\n if not get_keys_from_leader(keys) \\\n or is_state('reconfigure.authentication.setup'):\n last_pass = get_password('basic_auth.csv', 'admin')\n setup_basic_auth(last_pass, 'admin', 'admin')\n\n if not os.path.isfile(known_tokens):\n setup_tokens(None, 'admin', 'admin')\n setup_tokens(None, 'kubelet', 'kubelet')\n setup_tokens(None, 'kube_proxy', 'kube_proxy')\n\n # Generate the default service account token key\n os.makedirs('/root/cdk', exist_ok=True)\n if not os.path.isfile(service_key):\n cmd = ['openssl', 'genrsa', '-out', service_key,\n '2048']\n check_call(cmd)\n remove_state('reconfigure.authentication.setup')\n\n api_opts.add('service-account-key-file', service_key)\n controller_opts.add('service-account-private-key-file', service_key)\n\n # read service account key for syndication\n leader_data = {}\n for f in [known_tokens, basic_auth, service_key]:\n with open(f, 'r') as fp:\n leader_data[f] = fp.read()\n\n # this is slightly opaque, but we are sending file contents under its file\n # path as a key.\n # eg:\n # {'/root/cdk/serviceaccount.key': 'RSA:2471731...'}\n charms.leadership.leader_set(leader_data)\n remove_state('kubernetes-master.components.started')\n set_state('authentication.setup')\n\n\n@when_not('leadership.is_leader')\ndef setup_non_leader_authentication():\n\n service_key = '/root/cdk/serviceaccount.key'\n basic_auth = '/root/cdk/basic_auth.csv'\n known_tokens = '/root/cdk/known_tokens.csv'\n\n keys = [service_key, basic_auth, known_tokens]\n # The source of truth for non-leaders is the leader.\n # Therefore we overwrite_local with whatever the leader has.\n if not get_keys_from_leader(keys, overwrite_local=True):\n # the keys were not retrieved. Non-leaders have to retry.\n return\n\n if not any_file_changed(keys) and is_state('authentication.setup'):\n # No change detected and we have already setup the authentication\n return\n\n hookenv.status_set('maintenance', 'Rendering authentication templates.')\n api_opts = FlagManager('kube-apiserver')\n api_opts.add('basic-auth-file', basic_auth)\n api_opts.add('token-auth-file', known_tokens)\n api_opts.add('service-account-key-file', service_key)\n\n controller_opts = FlagManager('kube-controller-manager')\n controller_opts.add('service-account-private-key-file', service_key)\n\n remove_state('kubernetes-master.components.started')\n set_state('authentication.setup')\n\n\ndef get_keys_from_leader(keys, overwrite_local=False):\n \"\"\"\n Gets the broadcasted keys from the leader and stores them in\n the corresponding files.\n\n Args:\n keys: list of keys. Keys are actually files on the FS.\n\n Returns: True if all key were fetched, False if not.\n\n \"\"\"\n # This races with other codepaths, and seems to require being created first\n # This block may be extracted later, but for now seems to work as intended\n os.makedirs('/root/cdk', exist_ok=True)\n\n for k in keys:\n # If the path does not exist, assume we need it\n if not os.path.exists(k) or overwrite_local:\n # Fetch data from leadership broadcast\n contents = charms.leadership.leader_get(k)\n # Default to logging the warning and wait for leader data to be set\n if contents is None:\n msg = \"Waiting on leaders crypto keys.\"\n hookenv.status_set('waiting', msg)\n hookenv.log('Missing content for file {}'.format(k))\n return False\n # Write out the file and move on to the next item\n with open(k, 'w+') as fp:\n fp.write(contents)\n\n return True\n\n\n@when('kubernetes-master.snaps.installed')\ndef set_app_version():\n ''' Declare the application version to juju '''\n version = check_output(['kube-apiserver', '--version'])\n hookenv.application_version_set(version.split(b' v')[-1].rstrip())\n\n\n@when('cdk-addons.configured', 'kube-api-endpoint.available',\n 'kube-control.connected')\ndef idle_status(kube_api, kube_control):\n ''' Signal at the end of the run that we are running. '''\n if not all_kube_system_pods_running():\n hookenv.status_set('waiting', 'Waiting for kube-system pods to start')\n elif hookenv.config('service-cidr') != service_cidr():\n msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()\n hookenv.status_set('active', msg)\n else:\n # All services should be up and running at this point. Double-check...\n failing_services = master_services_down()\n if len(failing_services) == 0:\n hookenv.status_set('active', 'Kubernetes master running.')\n else:\n msg = 'Stopped services: {}'.format(','.join(failing_services))\n hookenv.status_set('blocked', msg)\n\n\ndef master_services_down():\n \"\"\"Ensure master services are up and running.\n\n Return: list of failing services\"\"\"\n services = ['kube-apiserver',\n 'kube-controller-manager',\n 'kube-scheduler']\n failing_services = []\n for service in services:\n daemon = 'snap.{}.daemon'.format(service)\n if not host.service_running(daemon):\n failing_services.append(service)\n return failing_services\n\n\n@when('etcd.available', 'tls_client.server.certificate.saved',\n 'authentication.setup')\n@when_not('kubernetes-master.components.started')\ndef start_master(etcd):\n '''Run the Kubernetes master components.'''\n hookenv.status_set('maintenance',\n 'Configuring the Kubernetes master services.')\n freeze_service_cidr()\n if not etcd.get_connection_string():\n # etcd is not returning a connection string. This hapens when\n # the master unit disconnects from etcd and is ready to terminate.\n # No point in trying to start master services and fail. Just return.\n return\n handle_etcd_relation(etcd)\n configure_master_services()\n hookenv.status_set('maintenance',\n 'Starting the Kubernetes master services.')\n\n services = ['kube-apiserver',\n 'kube-controller-manager',\n 'kube-scheduler']\n for service in services:\n host.service_restart('snap.%s.daemon' % service)\n\n hookenv.open_port(6443)\n set_state('kubernetes-master.components.started')\n\n\n@when('etcd.available')\ndef etcd_data_change(etcd):\n ''' Etcd scale events block master reconfiguration due to the\n kubernetes-master.components.started state. We need a way to\n handle these events consistenly only when the number of etcd\n units has actually changed '''\n\n # key off of the connection string\n connection_string = etcd.get_connection_string()\n\n # If the connection string changes, remove the started state to trigger\n # handling of the master components\n if data_changed('etcd-connect', connection_string):\n remove_state('kubernetes-master.components.started')\n\n\n@when('kube-control.connected')\n@when('cdk-addons.configured')\ndef send_cluster_dns_detail(kube_control):\n ''' Send cluster DNS info '''\n # Note that the DNS server doesn't necessarily exist at this point. We know\n # where we're going to put it, though, so let's send the info anyway.\n dns_ip = get_dns_ip()\n kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)\n\n\n@when('kube-control.auth.requested')\n@when('authentication.setup')\n@when('leadership.is_leader')\ndef send_tokens(kube_control):\n \"\"\"Send the tokens to the workers.\"\"\"\n kubelet_token = get_token('kubelet')\n proxy_token = get_token('kube_proxy')\n admin_token = get_token('admin')\n\n # Send the data\n requests = kube_control.auth_user()\n for request in requests:\n kube_control.sign_auth_request(request[0], kubelet_token,\n proxy_token, admin_token)\n\n\n@when_not('kube-control.connected')\ndef missing_kube_control():\n \"\"\"Inform the operator master is waiting for a relation to workers.\n\n If deploying via bundle this won't happen, but if operator is upgrading a\n a charm in a deployment that pre-dates the kube-control relation, it'll be\n missing.\n\n \"\"\"\n hookenv.status_set('blocked', 'Waiting for workers.')\n\n\n@when('kube-api-endpoint.available')\ndef push_service_data(kube_api):\n ''' Send configuration to the load balancer, and close access to the\n public interface '''\n kube_api.configure(port=6443)\n\n\n@when('certificates.available')\ndef send_data(tls):\n '''Send the data that is required to create a server certificate for\n this server.'''\n # Use the public ip of this unit as the Common Name for the certificate.\n common_name = hookenv.unit_public_ip()\n\n # Get the SDN gateway based on the cidr address.\n kubernetes_service_ip = get_kubernetes_service_ip()\n\n domain = hookenv.config('dns_domain')\n # Create SANs that the tls layer will add to the server cert.\n sans = [\n hookenv.unit_public_ip(),\n hookenv.unit_private_ip(),\n socket.gethostname(),\n kubernetes_service_ip,\n 'kubernetes',\n 'kubernetes.{0}'.format(domain),\n 'kubernetes.default',\n 'kubernetes.default.svc',\n 'kubernetes.default.svc.{0}'.format(domain)\n ]\n # Create a path safe name by removing path characters from the unit name.\n certificate_name = hookenv.local_unit().replace('/', '_')\n # Request a server cert with this information.\n tls.request_server_cert(common_name, sans, certificate_name)\n\n\n@when('kubernetes-master.components.started')\ndef configure_cdk_addons():\n ''' Configure CDK addons '''\n remove_state('cdk-addons.configured')\n dbEnabled = str(hookenv.config('enable-dashboard-addons')).lower()\n args = [\n 'arch=' + arch(),\n 'dns-ip=' + get_dns_ip(),\n 'dns-domain=' + hookenv.config('dns_domain'),\n 'enable-dashboard=' + dbEnabled\n ]\n check_call(['snap', 'set', 'cdk-addons'] + args)\n if not addons_ready():\n hookenv.status_set('waiting', 'Waiting to retry addon deployment')\n remove_state('cdk-addons.configured')\n return\n\n set_state('cdk-addons.configured')\n\n\n@retry(times=3, delay_secs=20)\ndef addons_ready():\n \"\"\"\n Test if the add ons got installed\n\n Returns: True is the addons got applied\n\n \"\"\"\n try:\n check_call(['cdk-addons.apply'])\n return True\n except CalledProcessError:\n hookenv.log(\"Addons are not ready yet.\")\n return False\n\n\n@when('loadbalancer.available', 'certificates.ca.available',\n 'certificates.client.cert.available', 'authentication.setup')\ndef loadbalancer_kubeconfig(loadbalancer, ca, client):\n # Get the potential list of loadbalancers from the relation object.\n hosts = loadbalancer.get_addresses_ports()\n # Get the public address of loadbalancers so users can access the cluster.\n address = hosts[0].get('public-address')\n # Get the port of the loadbalancer so users can access the cluster.\n port = hosts[0].get('port')\n server = 'https://{0}:{1}'.format(address, port)\n build_kubeconfig(server)\n\n\n@when('certificates.ca.available', 'certificates.client.cert.available',\n 'authentication.setup')\n@when_not('loadbalancer.available')\ndef create_self_config(ca, client):\n '''Create a kubernetes configuration for the master unit.'''\n server = 'https://{0}:{1}'.format(hookenv.unit_get('public-address'), 6443)\n build_kubeconfig(server)\n\n\n@when('ceph-storage.available')\ndef ceph_state_control(ceph_admin):\n ''' Determine if we should remove the state that controls the re-render\n and execution of the ceph-relation-changed event because there\n are changes in the relationship data, and we should re-render any\n configs, keys, and/or service pre-reqs '''\n\n ceph_relation_data = {\n 'mon_hosts': ceph_admin.mon_hosts(),\n 'fsid': ceph_admin.fsid(),\n 'auth_supported': ceph_admin.auth(),\n 'hostname': socket.gethostname(),\n 'key': ceph_admin.key()\n }\n\n # Re-execute the rendering if the data has changed.\n if data_changed('ceph-config', ceph_relation_data):\n remove_state('ceph-storage.configured')\n\n\n@when('ceph-storage.available')\n@when_not('ceph-storage.configured')\ndef ceph_storage(ceph_admin):\n '''Ceph on kubernetes will require a few things - namely a ceph\n configuration, and the ceph secret key file used for authentication.\n This method will install the client package, and render the requisit files\n in order to consume the ceph-storage relation.'''\n ceph_context = {\n 'mon_hosts': ceph_admin.mon_hosts(),\n 'fsid': ceph_admin.fsid(),\n 'auth_supported': ceph_admin.auth(),\n 'use_syslog': \"true\",\n 'ceph_public_network': '',\n 'ceph_cluster_network': '',\n 'loglevel': 1,\n 'hostname': socket.gethostname(),\n }\n # Install the ceph common utilities.\n apt_install(['ceph-common'], fatal=True)\n\n etc_ceph_directory = '/etc/ceph'\n if not os.path.isdir(etc_ceph_directory):\n os.makedirs(etc_ceph_directory)\n charm_ceph_conf = os.path.join(etc_ceph_directory, 'ceph.conf')\n # Render the ceph configuration from the ceph conf template\n render('ceph.conf', charm_ceph_conf, ceph_context)\n\n # The key can rotate independently of other ceph config, so validate it\n admin_key = os.path.join(etc_ceph_directory,\n 'ceph.client.admin.keyring')\n try:\n with open(admin_key, 'w') as key_file:\n key_file.write(\"[client.admin]\\n\\tkey = {}\\n\".format(\n ceph_admin.key()))\n except IOError as err:\n hookenv.log(\"IOError writing admin.keyring: {}\".format(err))\n\n # Enlist the ceph-admin key as a kubernetes secret\n if ceph_admin.key():\n encoded_key = base64.b64encode(ceph_admin.key().encode('utf-8'))\n else:\n # We didn't have a key, and cannot proceed. Do not set state and\n # allow this method to re-execute\n return\n\n context = {'secret': encoded_key.decode('ascii')}\n render('ceph-secret.yaml', '/tmp/ceph-secret.yaml', context)\n try:\n # At first glance this is deceptive. The apply stanza will create if\n # it doesn't exist, otherwise it will update the entry, ensuring our\n # ceph-secret is always reflective of what we have in /etc/ceph\n # assuming we have invoked this anytime that file would change.\n cmd = ['kubectl', 'apply', '-f', '/tmp/ceph-secret.yaml']\n check_call(cmd)\n os.remove('/tmp/ceph-secret.yaml')\n except:\n # the enlistment in kubernetes failed, return and prepare for re-exec\n return\n\n # when complete, set a state relating to configuration of the storage\n # backend that will allow other modules to hook into this and verify we\n # have performed the necessary pre-req steps to interface with a ceph\n # deployment.\n set_state('ceph-storage.configured')\n\n\n@when('nrpe-external-master.available')\n@when_not('nrpe-external-master.initial-config')\ndef initial_nrpe_config(nagios=None):\n set_state('nrpe-external-master.initial-config')\n update_nrpe_config(nagios)\n\n\n@when('kubernetes-master.components.started')\n@when('nrpe-external-master.available')\n@when_any('config.changed.nagios_context',\n 'config.changed.nagios_servicegroups')\ndef update_nrpe_config(unused=None):\n services = (\n 'snap.kube-apiserver.daemon',\n 'snap.kube-controller-manager.daemon',\n 'snap.kube-scheduler.daemon'\n )\n hostname = nrpe.get_nagios_hostname()\n current_unit = nrpe.get_nagios_unit_name()\n nrpe_setup = nrpe.NRPE(hostname=hostname)\n nrpe.add_init_service_checks(nrpe_setup, services, current_unit)\n nrpe_setup.write()\n\n\n@when_not('nrpe-external-master.available')\n@when('nrpe-external-master.initial-config')\ndef remove_nrpe_config(nagios=None):\n remove_state('nrpe-external-master.initial-config')\n\n # List of systemd services for which the checks will be removed\n services = (\n 'snap.kube-apiserver.daemon',\n 'snap.kube-controller-manager.daemon',\n 'snap.kube-scheduler.daemon'\n )\n\n # The current nrpe-external-master interface doesn't handle a lot of logic,\n # use the charm-helpers code for now.\n hostname = nrpe.get_nagios_hostname()\n nrpe_setup = nrpe.NRPE(hostname=hostname)\n\n for service in services:\n nrpe_setup.remove_check(shortname=service)\n\n\ndef is_privileged():\n \"\"\"Return boolean indicating whether or not to set allow-privileged=true.\n\n \"\"\"\n privileged = hookenv.config('allow-privileged')\n if privileged == 'auto':\n return is_state('kubernetes-master.gpu.enabled')\n else:\n return privileged == 'true'\n\n\n@when('config.changed.allow-privileged')\n@when('kubernetes-master.components.started')\ndef on_config_allow_privileged_change():\n \"\"\"React to changed 'allow-privileged' config value.\n\n \"\"\"\n remove_state('kubernetes-master.components.started')\n remove_state('config.changed.allow-privileged')\n\n\n@when('kube-control.gpu.available')\n@when('kubernetes-master.components.started')\n@when_not('kubernetes-master.gpu.enabled')\ndef on_gpu_available(kube_control):\n \"\"\"The remote side (kubernetes-worker) is gpu-enabled.\n\n We need to run in privileged mode.\n\n \"\"\"\n config = hookenv.config()\n if config['allow-privileged'] == \"false\":\n hookenv.status_set(\n 'active',\n 'GPUs available. Set allow-privileged=\"auto\" to enable.'\n )\n return\n\n remove_state('kubernetes-master.components.started')\n set_state('kubernetes-master.gpu.enabled')\n\n\n@when('kubernetes-master.gpu.enabled')\n@when_not('kubernetes-master.privileged')\ndef disable_gpu_mode():\n \"\"\"We were in gpu mode, but the operator has set allow-privileged=\"false\",\n so we can't run in gpu mode anymore.\n\n \"\"\"\n remove_state('kubernetes-master.gpu.enabled')\n\n\n@hook('stop')\ndef shutdown():\n \"\"\" Stop the kubernetes master services\n\n \"\"\"\n service_stop('snap.kube-apiserver.daemon')\n service_stop('snap.kube-controller-manager.daemon')\n service_stop('snap.kube-scheduler.daemon')\n\n\ndef arch():\n '''Return the package architecture as a string. Raise an exception if the\n architecture is not supported by kubernetes.'''\n # Get the package architecture for this system.\n architecture = check_output(['dpkg', '--print-architecture']).rstrip()\n # Convert the binary result into a string.\n architecture = architecture.decode('utf-8')\n return architecture\n\n\ndef build_kubeconfig(server):\n '''Gather the relevant data for Kubernetes configuration objects and create\n a config object with that information.'''\n # Get the options from the tls-client layer.\n layer_options = layer.options('tls-client')\n # Get all the paths to the tls information required for kubeconfig.\n ca = layer_options.get('ca_certificate_path')\n ca_exists = ca and os.path.isfile(ca)\n client_pass = get_password('basic_auth.csv', 'admin')\n # Do we have everything we need?\n if ca_exists and client_pass:\n # Create an absolute path for the kubeconfig file.\n kubeconfig_path = os.path.join(os.sep, 'home', 'ubuntu', 'config')\n # Create the kubeconfig on this system so users can access the cluster.\n\n create_kubeconfig(kubeconfig_path, server, ca,\n user='admin', password=client_pass)\n # Make the config file readable by the ubuntu users so juju scp works.\n cmd = ['chown', 'ubuntu:ubuntu', kubeconfig_path]\n check_call(cmd)\n\n\ndef create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,\n user='ubuntu', context='juju-context',\n cluster='juju-cluster', password=None, token=None):\n '''Create a configuration for Kubernetes based on path using the supplied\n arguments for values of the Kubernetes server, CA, key, certificate, user\n context and cluster.'''\n if not key and not certificate and not password and not token:\n raise ValueError('Missing authentication mechanism.')\n\n # token and password are mutually exclusive. Error early if both are\n # present. The developer has requested an impossible situation.\n # see: kubectl config set-credentials --help\n if token and password:\n raise ValueError('Token and Password are mutually exclusive.')\n # Create the config file with the address of the master server.\n cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \\\n '--server={2} --certificate-authority={3} --embed-certs=true'\n check_call(split(cmd.format(kubeconfig, cluster, server, ca)))\n # Delete old users\n cmd = 'kubectl config --kubeconfig={0} unset users'\n check_call(split(cmd.format(kubeconfig)))\n # Create the credentials using the client flags.\n cmd = 'kubectl config --kubeconfig={0} ' \\\n 'set-credentials {1} '.format(kubeconfig, user)\n\n if key and certificate:\n cmd = '{0} --client-key={1} --client-certificate={2} '\\\n '--embed-certs=true'.format(cmd, key, certificate)\n if password:\n cmd = \"{0} --username={1} --password={2}\".format(cmd, user, password)\n # This is mutually exclusive from password. They will not work together.\n if token:\n cmd = \"{0} --token={1}\".format(cmd, token)\n check_call(split(cmd))\n # Create a default context with the cluster.\n cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \\\n '--cluster={2} --user={3}'\n check_call(split(cmd.format(kubeconfig, context, cluster, user)))\n # Make the config use this new context.\n cmd = 'kubectl config --kubeconfig={0} use-context {1}'\n check_call(split(cmd.format(kubeconfig, context)))\n\n\ndef get_dns_ip():\n '''Get an IP address for the DNS server on the provided cidr.'''\n interface = ipaddress.IPv4Interface(service_cidr())\n # Add .10 at the end of the network\n ip = interface.network.network_address + 10\n return ip.exploded\n\n\ndef get_kubernetes_service_ip():\n '''Get the IP address for the kubernetes service based on the cidr.'''\n interface = ipaddress.IPv4Interface(service_cidr())\n # Add .1 at the end of the network\n ip = interface.network.network_address + 1\n return ip.exploded\n\n\ndef handle_etcd_relation(reldata):\n ''' Save the client credentials and set appropriate daemon flags when\n etcd declares itself as available'''\n connection_string = reldata.get_connection_string()\n # Define where the etcd tls files will be kept.\n etcd_dir = '/root/cdk/etcd'\n # Create paths to the etcd client ca, key, and cert file locations.\n ca = os.path.join(etcd_dir, 'client-ca.pem')\n key = os.path.join(etcd_dir, 'client-key.pem')\n cert = os.path.join(etcd_dir, 'client-cert.pem')\n\n # Save the client credentials (in relation data) to the paths provided.\n reldata.save_client_credentials(key, cert, ca)\n\n api_opts = FlagManager('kube-apiserver')\n\n # Never use stale data, always prefer whats coming in during context\n # building. if its stale, its because whats in unitdata is stale\n data = api_opts.data\n if data.get('etcd-servers-strict') or data.get('etcd-servers'):\n api_opts.destroy('etcd-cafile')\n api_opts.destroy('etcd-keyfile')\n api_opts.destroy('etcd-certfile')\n api_opts.destroy('etcd-servers', strict=True)\n api_opts.destroy('etcd-servers')\n\n # Set the apiserver flags in the options manager\n api_opts.add('etcd-cafile', ca)\n api_opts.add('etcd-keyfile', key)\n api_opts.add('etcd-certfile', cert)\n api_opts.add('etcd-servers', connection_string, strict=True)\n\n\ndef configure_master_services():\n ''' Add remaining flags for the master services and configure snaps to use\n them '''\n\n api_opts = FlagManager('kube-apiserver')\n controller_opts = FlagManager('kube-controller-manager')\n scheduler_opts = FlagManager('kube-scheduler')\n scheduler_opts.add('v', '2')\n\n # Get the tls paths from the layer data.\n layer_options = layer.options('tls-client')\n ca_cert_path = layer_options.get('ca_certificate_path')\n client_cert_path = layer_options.get('client_certificate_path')\n client_key_path = layer_options.get('client_key_path')\n server_cert_path = layer_options.get('server_certificate_path')\n server_key_path = layer_options.get('server_key_path')\n\n if is_privileged():\n api_opts.add('allow-privileged', 'true', strict=True)\n set_state('kubernetes-master.privileged')\n else:\n api_opts.add('allow-privileged', 'false', strict=True)\n remove_state('kubernetes-master.privileged')\n\n # Handle static options for now\n api_opts.add('service-cluster-ip-range', service_cidr())\n api_opts.add('min-request-timeout', '300')\n api_opts.add('v', '4')\n api_opts.add('tls-cert-file', server_cert_path)\n api_opts.add('tls-private-key-file', server_key_path)\n api_opts.add('kubelet-certificate-authority', ca_cert_path)\n api_opts.add('kubelet-client-certificate', client_cert_path)\n api_opts.add('kubelet-client-key', client_key_path)\n api_opts.add('logtostderr', 'true')\n api_opts.add('insecure-bind-address', '127.0.0.1')\n api_opts.add('insecure-port', '8080')\n api_opts.add('storage-backend', 'etcd2') # FIXME: add etcd3 support\n admission_control = [\n 'Initializers',\n 'NamespaceLifecycle',\n 'LimitRanger',\n 'ServiceAccount',\n 'ResourceQuota',\n 'DefaultTolerationSeconds'\n ]\n\n if get_version('kube-apiserver') < (1, 6):\n hookenv.log('Removing DefaultTolerationSeconds from admission-control')\n admission_control.remove('DefaultTolerationSeconds')\n if get_version('kube-apiserver') < (1, 7):\n hookenv.log('Removing Initializers from admission-control')\n admission_control.remove('Initializers')\n api_opts.add('admission-control', ','.join(admission_control), strict=True)\n\n # Default to 3 minute resync. TODO: Make this configureable?\n controller_opts.add('min-resync-period', '3m')\n controller_opts.add('v', '2')\n controller_opts.add('root-ca-file', ca_cert_path)\n controller_opts.add('logtostderr', 'true')\n controller_opts.add('master', 'http://PI:FN:127.0.0.1END_PI:8080')\n\n scheduler_opts.add('v', '2')\n scheduler_opts.add('logtostderr', 'true')\n scheduler_opts.add('master', 'http://PI:FN:127.0.0.1END_PI:8080')\n\n cmd = ['snap', 'set', 'kube-apiserver'] + api_opts.to_s().split(' ')\n check_call(cmd)\n\n cmd = (\n ['snap', 'set', 'kube-controller-manager'] +\n controller_opts.to_s().split(' ')\n )\n check_call(cmd)\n cmd = ['snap', 'set', 'kube-scheduler'] + scheduler_opts.to_s().split(' ')\n check_call(cmd)\n\n\ndef setup_basic_auth(password=None, username='admin', uid='admin'):\n '''Create the htacces file and the tokens.'''\n root_cdk = '/root/cdk'\n if not os.path.isdir(root_cdk):\n os.makedirs(root_cdk)\n htaccess = os.path.join(root_cdk, 'basic_auth.csv')\n if not password:\n password = token_generator()\n with open(htaccess, 'w') as stream:\n stream.write('{0},{1},{2}'.format(password, username, uid))\n\n\ndef setup_tokens(token, username, user):\n '''Create a token file for kubernetes authentication.'''\n root_cdk = '/root/cdk'\n if not os.path.isdir(root_cdk):\n os.makedirs(root_cdk)\n known_tokens = os.path.join(root_cdk, 'known_tokens.csv')\n if not token:\n token = token_generator()\n with open(known_tokens, 'a') as stream:\n stream.write('{0},{1},{2}\\n'.format(token, username, user))\n\n\ndef get_password(csv_fname, user):\n '''Get the password of user within the csv file provided.'''\n root_cdk = '/root/cdk'\n tokens_fname = os.path.join(root_cdk, csv_fname)\n if not os.path.isfile(tokens_fname):\n return None\n with open(tokens_fname, 'r') as stream:\n for line in stream:\n record = line.split(',')\n if record[1] == user:\n return record[0]\n return None\n\n\ndef get_token(username):\n \"\"\"Grab a token from the static file if present. \"\"\"\n return get_password('known_tokens.csv', username)\n\n\ndef set_token(password, save_salt):\n ''' Store a token so it can be recalled later by token_generator.\n\n param: password - the password to be stored\n param: save_salt - the key to store the value of the token.'''\n db = unitdata.kv()\n db.set(save_salt, password)\n return db.get(save_salt)\n\n\ndef token_generator(length=32):\n ''' Generate a random token for use in passwords and account tokens.\n\n param: length - the length of the token to generate'''\n alpha = string.ascii_letters + string.digits\n token = ''.join(random.SystemRandom().choice(alpha) for _ in range(length))\n return token\n\n\n@retry(times=3, delay_secs=10)\ndef all_kube_system_pods_running():\n ''' Check pod status in the kube-system namespace. Returns True if all\n pods are running, False otherwise. '''\n cmd = ['kubectl', 'get', 'po', '-n', 'kube-system', '-o', 'json']\n\n try:\n output = check_output(cmd).decode('utf-8')\n except CalledProcessError:\n hookenv.log('failed to get kube-system pod status')\n return False\n\n result = json.loads(output)\n for pod in result['items']:\n status = pod['status']['phase']\n if status != 'Running':\n return False\n\n return True\n\n\ndef apiserverVersion():\n cmd = 'kube-apiserver --version'.split()\n version_string = check_output(cmd).decode('utf-8')\n return tuple(int(q) for q in re.findall(\"[0-9]+\", version_string)[:3])\n", "// Copyright 2013 Martini Authors\n// Copyright 2014 The Web Authors\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\"): you may\n// not use this file except in compliance with the License. You may obtain\n// a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n// License for the specific language governing permissions and limitations\n// under the License.\n\npackage web\n\nimport (\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t. \"github.com/smartystreets/goconvey/convey\"\n)\n\nfunc Test_New(t *testing.T) {\n\tConvey(\"Initialize a new instance\", t, func() {\n\t\tSo(New(), ShouldNotBeNil)\n\t})\n\n\tConvey(\"Just test that Run doesn't bomb\", t, func() {\n\t\tgo New().Run()\n\t\ttime.Sleep(1 * time.Second)\n\t\tos.Setenv(\"PORT\", \"4001\")\n\t\tgo New().Run(\"PI:FN:0.0.0.0END_PI\")\n\t\tgo New().Run(4002)\n\t\tgo New().Run(\"PI:FN:0.0.0.0END_PI\", 4003)\n\t})\n}\n\nfunc Test_Web_Before(t *testing.T) {\n\tConvey(\"Register before handlers\", t, func() {\n\t\tm := New()\n\t\tm.Before(func(rw http.ResponseWriter, req *http.Request) bool {\n\t\t\treturn false\n\t\t})\n\t\tm.Before(func(rw http.ResponseWriter, req *http.Request) bool {\n\t\t\treturn true\n\t\t})\n\t\tresp := httptest.NewRecorder()\n\t\treq, err := http.NewRequest(\"GET\", \"/\", nil)\n\t\tSo(err, ShouldBeNil)\n\t\tm.ServeHTTP(resp, req)\n\t})\n}\n\nfunc Test_Web_ServeHTTP(t *testing.T) {\n\tConvey(\"Serve HTTP requests\", t, func() {\n\t\tresult := \"\"\n\t\tm := New()\n\t\tm.Use(func(c *Context) {\n\t\t\tresult += \"foo\"\n\t\t\tc.Next()\n\t\t\tresult += \"ban\"\n\t\t})\n\t\tm.Use(func(c *Context) {\n\t\t\tresult += \"bar\"\n\t\t\tc.Next()\n\t\t\tresult += \"baz\"\n\t\t})\n\t\tm.Get(\"/\", func() {})\n\t\tm.Action(func(res http.ResponseWriter, req *http.Request) {\n\t\t\tresult += \"bat\"\n\t\t\tres.WriteHeader(http.StatusBadRequest)\n\t\t})\n\n\t\tresp := httptest.NewRecorder()\n\t\treq, err := http.NewRequest(\"GET\", \"/\", nil)\n\t\tSo(err, ShouldBeNil)\n\t\tm.ServeHTTP(resp, req)\n\t\tSo(result, ShouldEqual, \"foobarbatbazban\")\n\t\tSo(resp.Code, ShouldEqual, http.StatusBadRequest)\n\t})\n}\n\nfunc Test_Web_Handlers(t *testing.T) {\n\tConvey(\"Add custom handlers\", t, func() {\n\t\tresult := \"\"\n\t\tbatman := func(c *Context) {\n\t\t\tresult += \"batman!\"\n\t\t}\n\n\t\tm := New()\n\t\tm.Use(func(c *Context) {\n\t\t\tresult += \"foo\"\n\t\t\tc.Next()\n\t\t\tresult += \"ban\"\n\t\t})\n\t\tm.Handlers(\n\t\t\tbatman,\n\t\t\tbatman,\n\t\t\tbatman,\n\t\t)\n\n\t\tConvey(\"Add not callable function\", func() {\n\t\t\tdefer func() {\n\t\t\t\tSo(recover(), ShouldNotBeNil)\n\t\t\t}()\n\t\t\tm.Use(\"shit\")\n\t\t})\n\n\t\tm.Get(\"/\", func() {})\n\t\tm.Action(func(res http.ResponseWriter, req *http.Request) {\n\t\t\tresult += \"bat\"\n\t\t\tres.WriteHeader(http.StatusBadRequest)\n\t\t})\n\n\t\tresp := httptest.NewRecorder()\n\t\treq, err := http.NewRequest(\"GET\", \"/\", nil)\n\t\tSo(err, ShouldBeNil)\n\t\tm.ServeHTTP(resp, req)\n\t\tSo(result, ShouldEqual, \"batman!batman!batman!bat\")\n\t\tSo(resp.Code, ShouldEqual, http.StatusBadRequest)\n\t})\n}\n\nfunc Test_Web_EarlyWrite(t *testing.T) {\n\tConvey(\"Write early content to response\", t, func() {\n\t\tresult := \"\"\n\t\tm := New()\n\t\tm.Use(func(res http.ResponseWriter) {\n\t\t\tresult += \"foobar\"\n\t\t\tres.Write([]byte(\"Hello world\"))\n\t\t})\n\t\tm.Use(func() {\n\t\t\tresult += \"bat\"\n\t\t})\n\t\tm.Get(\"/\", func() {})\n\t\tm.Action(func(res http.ResponseWriter) {\n\t\t\tresult += \"baz\"\n\t\t\tres.WriteHeader(http.StatusBadRequest)\n\t\t})\n\n\t\tresp := httptest.NewRecorder()\n\t\treq, err := http.NewRequest(\"GET\", \"/\", nil)\n\t\tSo(err, ShouldBeNil)\n\t\tm.ServeHTTP(resp, req)\n\t\tSo(result, ShouldEqual, \"foobar\")\n\t\tSo(resp.Code, ShouldEqual, http.StatusOK)\n\t})\n}\n\nfunc Test_Web_Written(t *testing.T) {\n\tConvey(\"Written sign\", t, func() {\n\t\tresp := httptest.NewRecorder()\n\t\tm := New()\n\t\tm.Handlers(func(res http.ResponseWriter) {\n\t\t\tres.WriteHeader(http.StatusOK)\n\t\t})\n\n\t\tctx := m.createContext(resp, &http.Request{Method: \"GET\"})\n\t\tSo(ctx.Written(), ShouldBeFalse)\n\n\t\tctx.run()\n\t\tSo(ctx.Written(), ShouldBeTrue)\n\t})\n}\n\nfunc Test_Web_Basic_NoRace(t *testing.T) {\n\tConvey(\"Make sure no race between requests\", t, func() {\n\t\tm := New()\n\t\thandlers := []Handler{func() {}, func() {}}\n\t\t// Ensure append will not realloc to trigger the race condition\n\t\tm.handlers = handlers[:1]\n\t\tm.Get(\"/\", func() {})\n\t\tfor i := 0; i < 2; i++ {\n\t\t\tgo func() {\n\t\t\t\treq, _ := http.NewRequest(\"GET\", \"/\", nil)\n\t\t\t\tresp := httptest.NewRecorder()\n\t\t\t\tm.ServeHTTP(resp, req)\n\t\t\t}()\n\t\t}\n\t})\n}\n\nfunc Test_SetENV(t *testing.T) {\n\tConvey(\"Get and save environment variable\", t, func() {\n\t\ttests := []struct {\n\t\t\tin string\n\t\t\tout string\n\t\t}{\n\t\t\t{\"\", \"development\"},\n\t\t\t{\"not_development\", \"not_development\"},\n\t\t}\n\n\t\tm := New()\n\n\t\tfor _, test := range tests {\n\t\t\tm.SetEnv(test.in)\n\t\t\t// auto correct env to 'development', 'production', 'test'\n\t\t\tSo(m.Env(), ShouldEqual, DEV)\n\t\t}\n\t})\n}\n", "# Copyright 2015 gRPC authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The Python implementation of the GRPC helloworld.Greeter client.\"\"\"\n\nfrom __future__ import print_function\n\nimport logging\nfrom urllib import response\nfrom vinte_um import Jogador, VinteUm\nimport grpc\nimport helloworld_pb2\nimport helloworld_pb2_grpc\nimport time\nimport redis\n\ndef createLoginForm(stub):\n username = input(\"Digite seu login: \")\n password = input(\"Digite sua senha: \")\n\n _redis = redis.Redis(\n host= 'localhost',\n port= '6379',\n password = 'davi')\n\n _redis.set('username', username)\n value = _redis.get('username') \n print(\"variavel do redis:\", value)\n\n return stub.Login(helloworld_pb2.LoginRequest(username=username, password=password))\n\ndef runTurn(stub, auth_token):\n extraCard = input(\"Deseja cavar mais uma carta? S/N: \")\n return stub.TurnAction(helloworld_pb2.TurnRequest(auth_token=auth_token, dig = extraCard))\n\ndef run():\n # NOTE(gRPC Python Team): .close() is possible on a channel and should be\n # used in circumstances in which the with statement does not fit the needs\n # of the code.\n with grpc.insecure_channel('PI:FN:0.0.0.0:50051END_PI') as channel:\n stub = helloworld_pb2_grpc.GreeterStub(channel)\n login = createLoginForm(stub)\n print(\"Suas cartas s\u00e3o: \", login.message)\n \n while True:\n turnResponse = runTurn(stub, login.auth_token) \n print(\"Suas cartas s\u00e3o: \", turnResponse.cards) \n if turnResponse.message:\n print(turnResponse.message) \n if turnResponse.playing == \"False\":\n break \n winner = stub.VerifyTurn(helloworld_pb2.VerifyTurnRequest(auth_token=login.auth_token))\n print(winner.message)\n\n\nif __name__ == '__main__':\n logging.basicConfig()\n run()\n"]