hexsha
stringlengths 40
40
| size
int64 5
1.05M
| ext
stringclasses 588
values | lang
stringclasses 305
values | max_stars_repo_path
stringlengths 3
363
| max_stars_repo_name
stringlengths 5
118
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
float64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringdate 2015-01-01 00:00:35
2022-03-31 23:43:49
⌀ | max_stars_repo_stars_event_max_datetime
stringdate 2015-01-01 12:37:38
2022-03-31 23:59:52
⌀ | max_issues_repo_path
stringlengths 3
363
| max_issues_repo_name
stringlengths 5
118
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
float64 1
134k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
363
| max_forks_repo_name
stringlengths 5
135
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
float64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringdate 2015-01-01 00:01:02
2022-03-31 23:27:27
⌀ | max_forks_repo_forks_event_max_datetime
stringdate 2015-01-03 08:55:07
2022-03-31 23:59:24
⌀ | content
stringlengths 5
1.05M
| avg_line_length
float64 1.13
1.04M
| max_line_length
int64 1
1.05M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c8eb7cd92e457074c323681485b214253d9d004
| 11,115
|
ino
|
Arduino
|
arduino/libraries/GFX_Library_for_Arduino/examples/WiFiAnalyzer/RTLWiFiAnalyzer/RTLWiFiAnalyzer.ino
|
Mchaney3/AXSResearch
|
6843b833a95010014bb3113ca59dda3b5e1c3663
|
[
"Unlicense"
] | null | null | null |
arduino/libraries/GFX_Library_for_Arduino/examples/WiFiAnalyzer/RTLWiFiAnalyzer/RTLWiFiAnalyzer.ino
|
Mchaney3/AXSResearch
|
6843b833a95010014bb3113ca59dda3b5e1c3663
|
[
"Unlicense"
] | null | null | null |
arduino/libraries/GFX_Library_for_Arduino/examples/WiFiAnalyzer/RTLWiFiAnalyzer/RTLWiFiAnalyzer.ino
|
Mchaney3/AXSResearch
|
6843b833a95010014bb3113ca59dda3b5e1c3663
|
[
"Unlicense"
] | null | null | null |
/*******************************************************************************
* Rtl WiFi Analyzer
* For RTL872x only.
*
* Add realtek ameba core support to Arduino IDE:
* https://github.com/ambiot/ambd_arduino
*
* Old patch realtek ameba core variant.cpp to RTL8720DN pinout:
* https://github.com/mikey60/BW16-RTL8720DN-Module-Arduino
*
* Defalult pin list for non display dev kit:
* RTL8720 BW16 old patch core : CS: 18, DC: 17, RST: 2, BL: 23
* RTL8720_BW16 Official core : CS: 9, DC: 8, RST: 6, BL: 3
* RTL8722 dev board : CS: 18, DC: 17, RST: 22, BL: 23
* RTL8722_mini dev board : CS: 12, DC: 14, RST: 15, BL: 13
******************************************************************************/
#define SCAN_INTERVAL 3000
#include <lwip_netconf.h>
#include <wifi_conf.h>
#include <wifi_constants.h>
#include <wifi_structures.h>
#include <wl_definitions.h>
#include <wl_types.h>
#include <Arduino_GFX_Library.h>
#define GFX_BL DF_GFX_BL // default backlight pin, you may replace DF_GFX_BL to actual backlight pin
Arduino_DataBus *bus = create_default_Arduino_DataBus();
/* More display class: https://github.com/moononournation/Arduino_GFX/wiki/Display-Class */
Arduino_GFX *gfx = new Arduino_ILI9341(bus, DF_GFX_RST, 3 /* rotation */, false /* IPS */);
static int16_t w, h, text_size, banner_height, graph24_baseline, graph50_baseline, graph_baseline, graph_height, channel24_width, channel50_width, channel_width, signal_width;
// RSSI RANGE
#define RSSI_CEILING -40
#define RSSI_FLOOR -100
// Channel legend mapping
static uint16_t channel_legend[] = {
1, 2, 3, 4, 5, 6, 7, // 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, // 8, 9, 10, 11, 12, 13, 14,
32, 0, 0, 0, 40, 0, 0, // 32, 34, 36, 38, 40, 42, 44,
0, 48, 0, 0, 0, 56, 0, // 46, 48, 50, 52, 54, 56, 58,
0, 0, 64, 0, 0, 0, // 60, 62, 64, 68,N/A, 96,
100, 0, 0, 0, 108, 0, 0, //100,102,104,106,108,110,112,
0, 116, 0, 0, 0, 124, 0, //114,116,118,120,122,124,126,
0, 0, 132, 0, 0, 0, 140, //128,N/A,132,134,136,138,140,
0, 0, 0, 149, 0, 0, 0, //142,144,N/A,149,151,153,155,
157, 0, 0, 0, 165, 0, 0, //157,159,161,163,165,167,169,
0, 173}; //171,173
// Channel color mapping
static uint16_t channel_color[] = {
RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, MAGENTA,
RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, MAGENTA,
RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, MAGENTA,
RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, MAGENTA,
RED, ORANGE, YELLOW, GREEN, WHITE, MAGENTA,
RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, MAGENTA,
RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, MAGENTA,
RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, MAGENTA,
RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, MAGENTA,
RED, ORANGE, YELLOW, GREEN, CYAN, BLUE, MAGENTA,
RED, ORANGE};
static uint16_t channelIdx(int channel)
{
if (channel <= 14) // 2.4 GHz, channel 1-14
{
return channel - 1;
}
if (channel <= 64) // 5 GHz, channel 32 - 64
{
return 14 + ((channel - 32) / 2);
}
if (channel == 68)
{
return 31;
}
if (channel == 96)
{
return 33;
}
if (channel <= 144) // channel 98 - 144
{
return 34 + ((channel - 100) / 2);
}
// channel 149 - 177
return 58 + ((channel - 149) / 2);
}
static uint8_t _networkCount;
static char _networkSsid[WL_NETWORKS_LIST_MAXNUM][WL_SSID_MAX_LENGTH];
static int32_t _networkRssi[WL_NETWORKS_LIST_MAXNUM];
static uint32_t _networkEncr[WL_NETWORKS_LIST_MAXNUM];
static uint8_t _networkChannel[WL_NETWORKS_LIST_MAXNUM];
static char _networkMac[WL_NETWORKS_LIST_MAXNUM][18];
static rtw_result_t wifidrv_scan_result_handler(rtw_scan_handler_result_t *malloced_scan_result)
{
rtw_scan_result_t *record;
if (malloced_scan_result->scan_complete != RTW_TRUE)
{
record = &malloced_scan_result->ap_details;
record->SSID.val[record->SSID.len] = 0; /* Ensure the SSID is null terminated */
if (_networkCount < WL_NETWORKS_LIST_MAXNUM)
{
strcpy(_networkSsid[_networkCount], (char *)record->SSID.val);
_networkRssi[_networkCount] = record->signal_strength;
_networkEncr[_networkCount] = record->security;
_networkChannel[_networkCount] = record->channel;
sprintf(_networkMac[_networkCount], "%02X:%02X:%02X:%02X:%02X:%02X",
record->BSSID.octet[0], record->BSSID.octet[1], record->BSSID.octet[2],
record->BSSID.octet[3], record->BSSID.octet[4], record->BSSID.octet[5]);
_networkCount++;
}
}
return RTW_SUCCESS;
}
static int8_t scanNetworks()
{
uint8_t attempts = 10;
_networkCount = 0;
if (wifi_scan_networks(wifidrv_scan_result_handler, NULL) != RTW_SUCCESS)
{
return WL_FAILURE;
}
do
{
delay(SCAN_INTERVAL);
} while ((_networkCount == 0) && (--attempts > 0));
return _networkCount;
}
void setup()
{
LwIP_Init();
wifi_on(RTW_MODE_STA);
#if defined(LCD_PWR_PIN)
pinMode(LCD_PWR_PIN, OUTPUT); // sets the pin as output
digitalWrite(LCD_PWR_PIN, HIGH); // power on
#endif
#ifdef GFX_BL
pinMode(GFX_BL, OUTPUT);
digitalWrite(GFX_BL, HIGH);
#endif
// init LCD
gfx->begin();
w = gfx->width();
h = gfx->height();
text_size = (h < 200) ? 1 : 2;
banner_height = (text_size * 8) + 4;
graph_height = ((gfx->height() - banner_height) / 2) - 30;
graph24_baseline = banner_height + graph_height + 10;
graph50_baseline = graph24_baseline + graph_height + 30;
channel24_width = w / 17;
channel50_width = w / 62;
// direct draw banner to output display
gfx->setTextSize(text_size);
gfx->fillScreen(BLACK);
gfx->setTextColor(GREEN);
gfx->setCursor(2, 2);
gfx->print("RTL");
gfx->setTextColor(WHITE);
gfx->print(" WiFi Analyzer");
}
void loop()
{
uint8_t ap_count_list[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
int32_t peak_list[] = {RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR, RSSI_FLOOR};
int16_t peak_id_list[] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1};
int32_t channel;
uint16_t idx;
int32_t rssi;
String ssid;
uint16_t color;
int16_t height, offset, text_width;
int n = scanNetworks();
// clear old graph
gfx->fillRect(0, banner_height, w, h - banner_height, BLACK);
gfx->setTextSize(1);
if (n == 0)
{
gfx->setTextColor(WHITE);
gfx->setCursor(0, banner_height);
gfx->println("No networks found");
}
else
{
for (int i = 0; i < n; i++)
{
channel = _networkChannel[i];
idx = channelIdx(channel);
rssi = _networkRssi[i];
// channel peak stat
if (peak_list[idx] < rssi)
{
peak_list[idx] = rssi;
peak_id_list[idx] = i;
}
ap_count_list[idx]++;
}
// plot found WiFi info
for (int i = 0; i < n; i++)
{
channel = _networkChannel[i];
idx = channelIdx(channel);
rssi = _networkRssi[i];
color = channel_color[idx];
height = constrain(map(rssi, RSSI_FLOOR, RSSI_CEILING, 1, graph_height), 1, graph_height);
if (idx < 14)
{
graph_baseline = graph24_baseline;
channel_width = channel24_width;
signal_width = channel24_width * 2;
offset = (idx + 2) * channel24_width;
}
else
{
graph_baseline = graph50_baseline;
channel_width = channel50_width;
signal_width = channel50_width * 2;
offset = (idx - 14 + 2) * channel50_width;
}
// trim rssi with RSSI_FLOOR
if (rssi < RSSI_FLOOR)
{
rssi = RSSI_FLOOR;
}
// plot chart
gfx->startWrite();
gfx->drawEllipseHelper(offset, graph_baseline + 1, signal_width, height, 0b0011, color);
gfx->endWrite();
if (i == peak_id_list[idx])
{
// Print SSID, signal strengh and if not encrypted
String ssid = _networkSsid[i];
if (ssid.length() == 0)
{
ssid = _networkMac[i];
}
text_width = (ssid.length() + 6) * 6;
if (text_width > w)
{
offset = 0;
}
else
{
offset -= signal_width;
if ((offset + text_width) > w)
{
offset = w - text_width;
}
}
gfx->setTextColor(color);
gfx->setCursor(offset, graph_baseline - 10 - height);
gfx->print(ssid);
gfx->print('(');
gfx->print(rssi);
gfx->print(')');
if (_networkEncr[i] == RTW_SECURITY_OPEN)
{
gfx->print('*');
}
}
}
}
// print WiFi found
gfx->setTextColor(WHITE);
gfx->setCursor(2, banner_height);
gfx->print(n);
gfx->print(" networks");
// draw 2.4 GHz graph base axle
gfx->drawFastHLine(0, graph24_baseline, 320, WHITE);
for (idx = 0; idx < 14; idx++)
{
channel = channel_legend[idx];
offset = (idx + 2) * channel24_width;
if (channel > 0)
{
gfx->setTextColor(channel_color[idx]);
gfx->setCursor(offset - ((channel < 10) ? 3 : 6), graph24_baseline + 2);
gfx->print(channel);
}
if (ap_count_list[idx] > 0)
{
gfx->setTextColor(LIGHTGREY);
gfx->setCursor(offset - ((ap_count_list[idx] < 10) ? 3 : 6), graph24_baseline + 8 + 2);
gfx->print(ap_count_list[idx]);
}
}
// draw 5 GHz graph base axle
gfx->drawFastHLine(0, graph50_baseline, 320, WHITE);
for (idx = 14; idx < 71; idx++)
{
channel = channel_legend[idx];
offset = (idx - 14 + 2) * channel50_width;
if (channel > 0)
{
gfx->setTextColor(channel_color[idx]);
gfx->setCursor(offset - ((channel < 100) ? 6 : 9), graph50_baseline + 2);
gfx->print(channel);
}
if (ap_count_list[idx] > 0)
{
gfx->setTextColor(DARKGREY);
gfx->setCursor(offset - ((ap_count_list[idx] < 10) ? 3 : 6), graph50_baseline + 8 + 2);
gfx->print(ap_count_list[idx]);
}
}
// Wait a bit before scanning again
delay(SCAN_INTERVAL);
}
| 32.5
| 877
| 0.606928
|
31adb36ea862cb89a50571fd51fca8c4641a491a
| 927
|
ino
|
Arduino
|
Physical_Part/gy61.ino
|
cviaai/RL-TREMOR-SUPPRESSION
|
2743eb70bd902e42a7c713c3c91da08695195a03
|
[
"MIT"
] | 3
|
2020-07-22T12:40:50.000Z
|
2022-03-07T03:28:19.000Z
|
Physical_Part/gy61.ino
|
cviaai/RL-TREMOR-SUPPRESSION
|
2743eb70bd902e42a7c713c3c91da08695195a03
|
[
"MIT"
] | 1
|
2020-07-22T10:39:29.000Z
|
2020-07-22T12:42:16.000Z
|
Physical_Part/gy61.ino
|
cviaai/RL-TREMOR-SUPPRESSION
|
2743eb70bd902e42a7c713c3c91da08695195a03
|
[
"MIT"
] | null | null | null |
// пины подключения x,y,z
const int pinX=A0;
const int pinY=A1;
const int pinZ=A2;
// переменные для хранения значений
unsigned int x, y, z;
bool allow = false;
void setup() {
// запуск последовательного порта
Serial.begin(9600);
}
void loop() {
if (allow){
for (int i = 0; i < 1000; i++){
// получение данных
x = analogRead(pinX);
y = analogRead(pinY);
z = analogRead(pinZ);
// вывод в последовательный порт
Serial.print(x);Serial.print(",");Serial.print(y);Serial.print(",");Serial.println(z);
// пауза
delay(0);
}
Serial.println(0);
allow = false;
}
}
void serialEvent() {
while (Serial.available()){
int incom = Serial.read() - '0';
//delay(10);
Serial.println(incom);
if (incom == 0){
allow = false;
delay(100);
}
if (incom == 1){
allow = true;
delay(100);
}
}
}
| 15.196721
| 93
| 0.549083
|
358368a48472066e2de4f95e627237e834aed20c
| 151
|
ino
|
Arduino
|
02_05_temp/02_05_temp.ino
|
simonmonk/prog_arduino_3
|
6bef0169e22e9e67ab4ae4605f679b8a12d65fd5
|
[
"MIT"
] | 2
|
2022-02-10T20:54:31.000Z
|
2022-03-20T08:29:38.000Z
|
02_05_temp/02_05_temp.ino
|
simonmonk/prog_arduino_3
|
6bef0169e22e9e67ab4ae4605f679b8a12d65fd5
|
[
"MIT"
] | null | null | null |
02_05_temp/02_05_temp.ino
|
simonmonk/prog_arduino_3
|
6bef0169e22e9e67ab4ae4605f679b8a12d65fd5
|
[
"MIT"
] | null | null | null |
// 02_05_temp
void setup() {
Serial.begin(9600);
int degC = 20;
int degF;
degF = degC * 9 / 5 + 32;
Serial.println(degF);
}
void loop(){}
| 12.583333
| 27
| 0.576159
|
7b766ff21b944b9bcfdf0ddaf1f81689746cf096
| 1,288
|
ino
|
Arduino
|
NodeMCU-wifi-scan/NodeMCU-wifi-scan.ino
|
paschualetto/nodemcu
|
c59b6a12e161ad869bb5c1fa26e57bc89d118f38
|
[
"Apache-2.0"
] | null | null | null |
NodeMCU-wifi-scan/NodeMCU-wifi-scan.ino
|
paschualetto/nodemcu
|
c59b6a12e161ad869bb5c1fa26e57bc89d118f38
|
[
"Apache-2.0"
] | null | null | null |
NodeMCU-wifi-scan/NodeMCU-wifi-scan.ino
|
paschualetto/nodemcu
|
c59b6a12e161ad869bb5c1fa26e57bc89d118f38
|
[
"Apache-2.0"
] | 1
|
2018-08-06T23:19:02.000Z
|
2018-08-06T23:19:02.000Z
|
/* Pesquisa de Redes WIFI usando o NodeMCU-ESP12
Arduino IDE 1.8.5 - ESP8266
Gustavo Murta 07/mar/2018
baseado em https://arduino-esp8266.readthedocs.io/en/2.4.0/esp8266wifi/scan-class.html
Blog Eletrogate http://blog.eletrogate.com/nodemcu-esp12-usando-arduino-ide-2/
*/
#include "ESP8266WiFi.h"
void setup()
{
Serial.begin(115200); // configura monitor serial 115200 Bps
Serial.println(); // imprime uma linha
WiFi.mode(WIFI_STA); // configura rede no modo estacao
WiFi.disconnect(); // desconecta rede WIFI
delay(100); // atraso de 100 milisegundos
}
void prinScanResult(int networksFound)
{
Serial.printf("\n"); // imprime uma linha
Serial.printf("%d redes encontradas\n", networksFound); // imprime numero de redes encontradas
for (int i = 0; i < networksFound; i++) // contagem das redes encontradas
{
Serial.printf("%d: %s, Ch:%d (%ddBm) %s\n", i + 1, WiFi.SSID(i).c_str(), WiFi.channel(i), WiFi.RSSI(i), WiFi.encryptionType(i) == ENC_TYPE_NONE ? "aberta" : "");
}
}
void loop()
{
WiFi.scanNetworksAsync(prinScanResult); // imprime o resultado
delay(500); // atraso de 0,5 segundos
}
| 37.882353
| 165
| 0.614907
|
9d6cd966b8cf525ad48ba604553497ad18af10b2
| 6,948
|
ino
|
Arduino
|
SHIELDS/Sh04_PAINEL_string/Sh04_PAINEL_string.ino
|
renatomportugal/arduino
|
1a2efac72ab863d8a1d9fa1d1f17170bbad4c0b9
|
[
"MIT"
] | 1
|
2021-03-20T13:21:21.000Z
|
2021-03-20T13:21:21.000Z
|
SHIELDS/Sh04_PAINEL_string/Sh04_PAINEL_string.ino
|
renatomportugal/arduino
|
1a2efac72ab863d8a1d9fa1d1f17170bbad4c0b9
|
[
"MIT"
] | null | null | null |
SHIELDS/Sh04_PAINEL_string/Sh04_PAINEL_string.ino
|
renatomportugal/arduino
|
1a2efac72ab863d8a1d9fa1d1f17170bbad4c0b9
|
[
"MIT"
] | null | null | null |
// 1. INCLUDES
#include "stdio.h"
// 2. PINOS
// 2.1. SHIELD 04
// 2.1.1. DISPLAY 7 SEG
int pino_SRCLK = 13;
int pino_SER = 12;
int pino_RCLK = 11;
int pino_SRCLR_inv = 10;
int pino_OE_inv = 9;
String caractere;
String Frase;
int qtdCaractere;
int Codigo;
int pos;
void setup(){
// 1. DISPLAY 7 SEG
pinMode(pino_SRCLK, OUTPUT);
pinMode(pino_SER, OUTPUT);
pinMode(pino_RCLK, OUTPUT);
pinMode(pino_SRCLR_inv, OUTPUT);
pinMode(pino_OE_inv, OUTPUT);
// 3. PREPARA O SHIFT REGISTER
digitalWrite(pino_SRCLK, LOW);
digitalWrite(pino_SER, LOW);
digitalWrite(pino_RCLK, HIGH);
digitalWrite(pino_SRCLR_inv, LOW);
digitalWrite(pino_SRCLR_inv, HIGH);
digitalWrite(pino_OE_inv, HIGH);
//Recebe a frase
//Frase = "0123456789ACdEFHJLPUocuin .th";
/* PALAVRAS PARA O PAINEL _ _ _ _ _ _ _
Cod
Join
HOJE
FACA
CAdA
notA
hoJE
teLA
PULO
PELA
PELO
PELE
Foi
CASA
CAdA
*/
Frase = "Cod 007";
//Verifica a quantidade de caracteres
//qtdCaractere = ContaCaracteres(Frase);
qtdCaractere = Frase.length();
//int Codigo = MostraQtdCaractere(qtdCaractere);
//caractere = "1";
//int pos = 23;
//caractere = Frase.substring(pos,pos + 1);
//int Codigo = CodCaractere(caractere);
//ShiftRegEnviaInt(pino_SER,Codigo);
digitalWrite(pino_OE_inv, LOW);
//ENVIA CARACTERES do Fim ao começo
for (int i = qtdCaractere - 1; i > -1; i--){
pos = i;
caractere = Frase.substring(pos,pos + 1);
Codigo = CodCaractere(caractere);
//ShiftRegEnviaInt(pino_SER,Codigo);
EnviaCaractere(Codigo);
}
/*
//ENVIA CARACTERES
for (int i = 0; i < qtdCaractere; i++){
pos = i;
caractere = Frase.substring(pos,pos + 1);
Codigo = CodCaractereInverso(caractere);
//ShiftRegEnviaInt(pino_SER,Codigo);
EnviaCaractere(Codigo);
delay(400);
}
*/
//digitalWrite(pino_OE_inv, LOW);
/*
Frase = "0123456789ACdEFHJLPUocuin .th";
Frase.substring(0,1) mostra 0
Frase.substring(1,2) mostra 1
Frase.substring(2,3) mostra 2
Frase.substring(3,4) mostra 3
Frase.substring(4,5) mostra 4
Frase.substring(5,6) mostra 5
Frase.substring(6,7) mostra 6
Frase.substring(7,8) mostra 7
Frase.substring(8,9) mostra 8
Frase.substring(9,10) mostra 9
Frase.substring(10,11) mostra A
Frase.substring(11,12) mostra C
Frase.substring(12,13) mostra d
Frase.substring(13,14) mostra E
Frase.substring(14,15) mostra F
Frase.substring(15,16) mostra H
Frase.substring(16,17) mostra J
Frase.substring(17,18) mostra L
Frase.substring(18,19) mostra P
Frase.substring(19,20) mostra U
Frase.substring(20,21) mostra o
Frase.substring(21,22) mostra c
Frase.substring(22,23) mostra u
Frase.substring(23,24) mostra i
Frase.substring(24,25) mostra n
Frase.substring(25,26) mostra "nada"
Frase.substring(26,27) mostra .
Frase.substring(27,28) mostra t
Frase.substring(28,29) mostra h
*/
}
void loop(){
//delay(1000);
//Pisca(500);
}
// Verificar o código do caractere
int CodCaractere (String caractere){
if (caractere == "0"){return 63;}
if (caractere == "1"){return 6;}
if (caractere == "2"){return 91;}
if (caractere == "3"){return 79;}
if (caractere == "4"){return 102;}
if (caractere == "5"){return 109;}
if (caractere == "6"){return 124;}
if (caractere == "7"){return 7;}
if (caractere == "8"){return 127;}
if (caractere == "9"){return 103;}
if (caractere == "O"){return 63;}
if (caractere == "A"){return 119;}
if (caractere == "C"){return 57;}
if (caractere == "d"){return 94;}
if (caractere == "E"){return 121;}
if (caractere == "F"){return 113;}
if (caractere == "H"){return 118;}
if (caractere == "J"){return 30;}
if (caractere == "L"){return 56;}
if (caractere == "P"){return 115;}
if (caractere == "U"){return 62;}
if (caractere == "o"){return 92;}
if (caractere == "c"){return 88;}
if (caractere == "u"){return 28;}
if (caractere == "i"){return 4;}
if (caractere == "t"){return 120;}
if (caractere == "n"){return 84;}
if (caractere == " "){return 0;}
if (caractere == "."){return 128;}
if (caractere == "h"){return 116;}
if (caractere == "S"){return 109;}
}
int CodCaractereInverso (String caractere){
if (caractere == "0"){return 63;}
if (caractere == "1"){return 48;}
if (caractere == "2"){return 91;}
if (caractere == "3"){return 120;}
if (caractere == "4"){return 116;}
if (caractere == "5"){return 109;}
if (caractere == "6"){return 103;}
if (caractere == "7"){return 56;}
if (caractere == "8"){return 127;}
if (caractere == "9"){return 124;}
if (caractere == "O"){return 63;}
if (caractere == "A"){return 126;}
if (caractere == "C"){return 15;}
if (caractere == "d"){return 115;}
if (caractere == "E"){return 79;}
if (caractere == "F"){return 78;}
if (caractere == "H"){return 118;}
if (caractere == "J"){return 51;}
if (caractere == "L"){return 7;}
if (caractere == "P"){return 94;}
if (caractere == "U"){return 55;}
if (caractere == "o"){return 99;}
if (caractere == "c"){return 67;}
if (caractere == "u"){return 35;}
if (caractere == "i"){return 32;}
if (caractere == "t"){return 71;}
if (caractere == "n"){return 98;}
if (caractere == " "){return 0;}
if (caractere == "."){return 128;}
if (caractere == "h"){return 100;}
if (caractere == "S"){return 109;}
}
int MostraQtdCaractere (int qtdade){
if (qtdade == 0){return 63;}
if (qtdade == 1){return 6;}
if (qtdade == 2){return 91;}
if (qtdade == 3){return 79;}
if (qtdade == 4){return 102;}
if (qtdade == 5){return 109;}
if (qtdade == 6){return 124;}
if (qtdade == 7){return 7;}
if (qtdade == 8){return 127;}
if (qtdade == 9){return 103;}
}
int ContaCaracteres (String Frase){
return Frase.length();
}
int ShiftRegEnviaInt(int PortaDados, int dadoInt){
//CONVERTE PARA CHAR
char Dados[8];
//Preenche o char Dados[8]
for(int i=8;i>0;--i){
int j = i - 1;
Dados[j] = dadoInt%2;
dadoInt = dadoInt / 2;
}
for(int i=0;i<8;i++){
if (Dados[i] == 1){digitalWrite(PortaDados, HIGH);}else{digitalWrite(PortaDados, LOW);}
digitalWrite(pino_SRCLK, HIGH);
digitalWrite(pino_RCLK, LOW);
digitalWrite(pino_SRCLK, LOW);
digitalWrite(pino_RCLK, HIGH);
//delay(2);
}
}
int Pisca(int tempo){
digitalWrite(pino_OE_inv, HIGH);
delay(tempo);
digitalWrite(pino_OE_inv, LOW);
}
int DDD(){
int casas = 7;
caractere = "A";
Codigo = CodCaractere(caractere);
EnviaCaractere(Codigo);
}
int EnviaCaractere(int dadoInt){
//CONVERTE PARA CHAR
char Dados[8];
//Preenche o char Dados[8]
for(int i=8;i>0;--i){
int j = i - 1;
Dados[j] = dadoInt%2;
dadoInt = dadoInt / 2;
}
for(int i=0;i<8;i++){
if (Dados[i] == 1){digitalWrite(pino_SER, HIGH);}else{digitalWrite(pino_SER, LOW);}
digitalWrite(pino_SRCLK, HIGH);
digitalWrite(pino_RCLK, LOW);
digitalWrite(pino_SRCLK, LOW);
digitalWrite(pino_RCLK, HIGH);
//delay(2);
}
}
| 24.725979
| 94
| 0.629822
|
00a406fc64bca2895c23970d861530634b33d8e4
| 3,118
|
ino
|
Arduino
|
tempLogger/tempLogger.ino
|
walle86/Arduino
|
f97cb0aee163de2a85bb86f638448ba7d8193fbb
|
[
"MIT"
] | 1
|
2019-03-09T17:09:25.000Z
|
2019-03-09T17:09:25.000Z
|
tempLogger/tempLogger.ino
|
walle86/Arduino
|
f97cb0aee163de2a85bb86f638448ba7d8193fbb
|
[
"MIT"
] | null | null | null |
tempLogger/tempLogger.ino
|
walle86/Arduino
|
f97cb0aee163de2a85bb86f638448ba7d8193fbb
|
[
"MIT"
] | null | null | null |
/*
Temperaturlogger using an MCP3304 as ADC and saving data to an SD card
I only have the standard TMP36 wich comes with the Arduino Starterkit.
The time is since Startup of the Arduino
For wiring lock at included as svg and jpg.
programmed by Stefan Wallnoefer for josephchrzempiec
more librarys and sketches under https://github.com/walle86/
*/
#include <SD.h>
#include <MCP3304.h>
#include <SPI.h>
//int analogPin = 0; dont need it because you use the MCP3304 as input
int numReadings = 10; //make 10 redings to gest a stable value
int readValue = 0;
float average = 0;
float voltage = 0;
float temperature = 0;
float temperatureF = 0;
float resolution = 10; // in mV/°C
float offsetVoltage = 500; //outputvoltage in mV of Sensor at 0°C -> 500mV for the tmp36
const int chipSelectSD = 10;
const int chipSelectMCP = 9;
const int errorPin = 8;
unsigned long time; // Variable to store the time (only since startup)
unsigned int wait = 1000; // How long to delay between sensor readings in milliseconds
MCP3304 adc(chipSelectMCP); //creating the ADC
void setup() {
Serial.begin(9600);
Serial.print("Initializing SD card...");
pinMode(chipSelectSD, OUTPUT);
pinMode(errorPin, OUTPUT); //connect LED wich will light up iff an error occurd
digitalWrite(errorPin, LOW);
if (!SD.begin(chipSelectSD)) {
Serial.println("Card failed, or not present");
digitalWrite(errorPin, HIGH);
// don't do anything more:
return;
}
Serial.println("card initialized.");
//Write a Headerline in datalog.txt
File dataFile = SD.open("datalog.txt", FILE_WRITE);
if (dataFile) {
dataFile.println("Programmed by Stefan Wallnoefer for josephchrzempiec\nmore librarys and sketches under https://github.com/walle86/\n");
dataFile.println("The time is since Startup of the Arduino\n");
dataFile.println("Time [s]\tTemperature [F]");
dataFile.close();
}
else {
Serial.println("error opening datalog.txt");
digitalWrite(errorPin, HIGH);
}
}
void loop() {
if(millis() - time >= wait){
time = millis();
//read 10 values in about 100ms to get a stabler result
for (int i = 0; i < numReadings; i++){
readValue += adc.readSgl(0);
delay(10);
}
voltage = readValue / numReadings * 5.0 / 4095.0; //average of 10 reading converted into a voltage
temperature = (voltage - offsetVoltage/1000.0) * 1000.0 / resolution;
temperatureF = (temperature * 1.8) + 32;
Serial.print("Temperature: ");
Serial.print(temperature);
Serial.println("C");
// open the file. note that only one file can be open at a time,
// so you have to close this one before opening another.
File dataFile = SD.open("datalog.txt", FILE_WRITE);
if (dataFile) {
dataFile.print(round(time/1000.0));
dataFile.print("\t");
dataFile.println(temperatureF,1);
dataFile.close();
}
// if the file isn't open, pop up an error:
else {
Serial.println("error opening datalog.txt");
digitalWrite(errorPin, HIGH);
}
readValue = 0; //set value back
}
}
| 29.415094
| 141
| 0.675433
|
b60151d138b4f32cddf65adeafb5f241e3da08fe
| 504
|
ino
|
Arduino
|
src/tilt_switch_LED/tilt_switch_LED.ino
|
zcking/ArduinoStash
|
c88bf6533557969914a6700109abbdf4bf087a9b
|
[
"MIT"
] | 3
|
2018-07-24T16:52:49.000Z
|
2020-04-13T22:39:57.000Z
|
src/tilt_switch_LED/tilt_switch_LED.ino
|
zach-king/ArduinoStash
|
c88bf6533557969914a6700109abbdf4bf087a9b
|
[
"MIT"
] | null | null | null |
src/tilt_switch_LED/tilt_switch_LED.ino
|
zach-king/ArduinoStash
|
c88bf6533557969914a6700109abbdf4bf087a9b
|
[
"MIT"
] | 4
|
2018-06-28T13:38:05.000Z
|
2020-05-04T19:29:50.000Z
|
const int ledPin = 8;//the led attach to
const int tiltPin = 3;
void setup()
{
pinMode(ledPin,OUTPUT);//initialize the ledPin as an output
pinMode(tiltPin,INPUT);
digitalWrite(tiltPin, HIGH);
}
/******************************************/
void loop()
{
int digitalVal = digitalRead(tiltPin);
if(HIGH == digitalVal)
{
digitalWrite(ledPin,LOW);//turn the led off
}
else
{
digitalWrite(ledPin,HIGH);//turn the led on
}
}
/**********************************************/
| 19.384615
| 61
| 0.537698
|
8a18be0b19fdd74ce6e3ab2ada7f6ba51f457d85
| 1,243
|
ino
|
Arduino
|
examples/mDNSResolver/mDNSResolver.ino
|
khoih-prog/mDNSResolver
|
2b600dc6dc549beab5835e55339deb2e19a44a10
|
[
"MIT"
] | null | null | null |
examples/mDNSResolver/mDNSResolver.ino
|
khoih-prog/mDNSResolver
|
2b600dc6dc549beab5835e55339deb2e19a44a10
|
[
"MIT"
] | null | null | null |
examples/mDNSResolver/mDNSResolver.ino
|
khoih-prog/mDNSResolver
|
2b600dc6dc549beab5835e55339deb2e19a44a10
|
[
"MIT"
] | null | null | null |
#if defined(ESP32)
#include <WiFi.h>
#else
#include <ESP8266WiFi.h>
#endif
#include <WiFiUdp.h>
#include <mDNSResolver.h>
#define WIFI_AP "my-wifi-ap"
#define WIFI_PASS "my-wifi-pass"
#define NAME_TO_RESOLVE "test.local"
using namespace mDNSResolver;
WiFiClient wifiClient;
WiFiUDP udp;
Resolver resolver(udp);
void setup() {
Serial.begin(115200);
int WiFiCounter = 0;
// We start by connecting to a WiFi network
Serial.println("Connecting to ");
Serial.println(WIFI_AP);
WiFi.disconnect();
WiFi.mode(WIFI_STA);
WiFi.begin(WIFI_AP, WIFI_PASS);
while (WiFi.status() != WL_CONNECTED && WiFiCounter < 30) {
delay(1000);
WiFiCounter++;
Serial.print(".");
}
Serial.println("");
Serial.println("WiFi connected");
Serial.println("IP address: ");
Serial.println(WiFi.localIP());
Serial.print("Resolving ");
Serial.println(NAME_TO_RESOLVE);
resolver.setLocalIP(WiFi.localIP());
IPAddress ip = resolver.search(NAME_TO_RESOLVE);
if(ip != INADDR_NONE) {
Serial.print("Resolved: ");
Serial.println(ip);
} else {
Serial.println("Not resolved");
}
}
void loop() {
// Required to clear the UDP buffer, as we can't unjoin a multicast group yet
resolver.loop();
}
| 21.067797
| 79
| 0.678198
|
363c590aff9ba632ae82c2bd09b3b9454ac3616c
| 109,807
|
ino
|
Arduino
|
FlyByWire/FlyByWire.ino
|
voodoodemon/TheFlyingMagicCarpet
|
cd18b09c74c212d6add8e9ac9984fa6081036518
|
[
"MIT"
] | null | null | null |
FlyByWire/FlyByWire.ino
|
voodoodemon/TheFlyingMagicCarpet
|
cd18b09c74c212d6add8e9ac9984fa6081036518
|
[
"MIT"
] | null | null | null |
FlyByWire/FlyByWire.ino
|
voodoodemon/TheFlyingMagicCarpet
|
cd18b09c74c212d6add8e9ac9984fa6081036518
|
[
"MIT"
] | null | null | null |
// Carpet CANTroller II Source Code - For Arduino Due with Adafruit 2.8inch Captouch TFT shield.
// Libraries to include. Note all these have example code when installed into arduino ide
//
#include <SPI.h> // SPI serial bus needed to talk to the LCD and the SD card
#include <Wire.h> // Contains I2C serial bus, needed to talk to touchscreen chip
#include <SdFat.h> // SD card & FAT filesystem library
#include <Servo.h> // Makes PWM output to control motors (for rudimentary control of our gas and steering)
#include <Adafruit_GFX.h> // For drawing pictures & text on the screen
#include <Adafruit_FT6206.h> // For interfacing with the cap touchscreen controller chip
#include <Adafruit_ILI9341.h> // For interfacing with the TFT LCD controller chip
// #include <Fonts/FreeSans12pt7b.h> // Variable width font (optional) Note: fonts hog a ton of code memory
// #include <Adafruit_ImageReader.h> // Lets you display image files from the sd card so keeps memory free
// #include <FastPID.h> // Fixed-point math based PID loop (for our brakes and maybe cruise control, maybe also steering and gas)
// #include <PID_v1.h> // Arduino PID loop library
/*
# Here are the different runmodes documented
#
# ** Shutdown Mode **
# - Required: Ignition Off
# - Priority: 1 (Highest)
# This mode is active whenever the ignition is off. In other words, whenever the
# little red pushbutton switch by the joystick is unclicked. This happens before the
# ignition is pressed before driving, but it also may happen if the driver needs to
# panic and E-stop due to loss of control or any other reason. The ignition will get cut
# independent of the controller, but we can help stop the car faster by applying the
# brakes. Once car is stopped, we release all actuators and then go idle.
# - Actions: 1. Release throttle. If car is moving AND BasicMode Off, apply brakes to stop car
# - Actions: 2: Release brakes and deactivate all actuators including steering
#
# ** Basic Mode **
# - Required: BasicMode switch On & Ignition On
# - Priority: 2
# The gas and brake don't do anything in Basic Mode. Just the steering works, so use the pedals.
# This mode is enabled with a toggle switch in the controller box. When in Basic Mode, the only
# other valid mode is Shutdown Mode. Shutdown Mode may override Basic Mode.
# - Actions: Release and deactivate brake and gas actuators. Steering PID keep active
#
# ** Stall Mode **
# - Required: Engine stopped & BasicMode switch Off & Ignition On
# - Priority: 3
# This mode is active when the engine is not running. If car is moving, then it presumably may
# coast to a stop. The actuators are all enabled and work normally. Starting the engine will
# bring you into Hold Mode. Shutdown Mode and Basic Mode both override Stall Mode. Note: This
# mode allows for driver to steer while being towed or pushed, or working on the car.
# - Actions: Enable all actuators
#
# ** Hold Mode **
# - Required: Engine running & JoyVert<=Center & BasicMode switch Off & Ignition On
# - Priority: 4
# This mode is entered from Stall Mode once engine is started, and also, whenever the car comes
# to a stop while driving around in Fly Mode. This mode releases the throttle and will
# continuously increase the brakes until the car is stopped, if it finds the car is moving.
# Pushing up on the joystick from Hold mode releases the brakes & begins Fly Mode.
# Shutdown, Basic & Stall Modes override Hold Mode.
# # Actions: Close throttle, and Apply brake to stop car, continue to ensure it stays stopped.
#
# ** Fly Mode **
# - Required: (Car Moving OR JoyVert>Center) & In gear & Engine running & BasicMode Off & Ign On
# - Priority: 5
# This mode is for driving under manual control. In Fly Mode, vertical joystick positions
# result in a proportional level of gas or brake (AKA "Manual" control). Fly Mode is
# only active when the car is moving - Once stopped or taken out of gear, we go back to Hold Mode.
# If the driver performs a special secret "cruise gesture" on the joystick, then go to Cruise Mode.
# Special cruise gesture might be: Pair of sudden full-throttle motions in rapid succession
# - Actions: Enable all actuators, Watch for gesture
#
# ** Cruise Mode **
# - Required: Car Moving & In gear & Engine running & BasicMode switch Off & Ignition On
# - Priority: 6 (Lowest)
# This mode is entered from Fly Mode by doing a special joystick gesture. In Cruise Mode,
# the brake is disabled, and the joystick vertical is different: If joyv at center, the
# throttle will actively maintain current car speed. Up or down momentary joystick presses
# serve to adjust that target speed. A sharp, full-downward gesture will drop us back to
# Fly Mode, promptly resulting in braking (if kept held down).
# - Actions: Release brake, Maintain car speed, Handle joyvert differently, Watch for gesture
*/
// Some human readable integers
//
#define SHUTDOWN 0
#define BASIC 1
#define STALL 2
#define HOLD 3
#define FLY 4
#define CRUISE 5
#define LOCK 0
#define JOY 1
#define CAR 2
#define PWM 3
#define BPID 4
#define GPID 5
#define CPID 6
#define CW 1
#define CCW -1
#define arraysize(x) (sizeof(x) / sizeof((x)[0])) // To determine the length of string arrays
// #define rangebox(amt,low,high) ((amt)<(low)?(low):((amt)>(high)?(high):(amt))) // Copied from arduino constrain, just so I can easily refer to the the source code (made a function instead)
// LCD is 2.8in diagonal, 240x320 pixels
// LCD supports 18-bit color, but GFX library uses 16-bit color, organized (MSB) 5b-red, 6b-green, 5b-blue (LSB)
// Since the RGB don't line up with the nibble boundaries, it's tricky to quantify a color, here are some colors:
// Color picker website: https://chrishewett.com/blog/true-rgb565-colour-picker/
#define BLK 0x0000
#define BLU 0x001F
#define RED 0xF800
#define GRN 0x07E0
#define CYN 0x07FF // 00000 111 111 11111
#define DCYN 0x0575 //
#define MGT 0xF81F
#define YEL 0xFFE0
#define WHT 0xFFFF
#define GRY1 0x8410 // 10000 100 000 10000 = 84 10
#define GRY2 0xC618 // 11000 110 000 11000 = C6 18
#define PNK 0xFC1F // Pink is the best color
#define DPNK 0xBAD7 // We need all shades of pink
#define LPNK 0xFE1F // Especially light pink, the champagne of pinks
// Defines for all the GPIO pins we're using
#define usd_cs_pin 4 // Output, active low, Chip select allows SD card controller chip use of the SPI bus
#define tft_ledk_pin 5 // Output, Optional PWM signal to control brightness of LCD backlight (needs modification to shield board to work)
#define tp_irq_pin 7 // Optional int input so touchpanel can interrupt us (need to modify shield board for this to work)
#define tft_dc_pin 9 // Output, Assert when sending data to display chip to indicate commands vs. screen data
#define tft_cs_pin 10 // Output, active low, Chip select allows ILI9341 display chip use of the SPI bus
#define led_pin 13 // Output, This is the LED onboard the arduino. Active high.
#define encoder_sw_pin 18 // Int input, Encoder above, for the UI. This is its pushbutton output, active low (needs pullup)
#define encoder_b_pin 19 // Int input, The B pin of the encoder. Both A and B complete a negative pulse in between detents. If B pulse goes low first, turn is CW. (needs pullup)
#define encoder_a_pin 21 // Int input, The A pin of the encoder. Both A and B complete a negative pulse in between detents. If A pulse goes low first, turn is CCW. (needs pullup)
// The other kind of encoder: When A toggles, if B is equal to A, then turn is CCW, else CW. (needs pullup)
#define pot_pwr_pin 24 // Output, Lets us supply the optional external potentiometer with 3.3V power
#define sim_pulse_pin 26 // Output, For testing interrupts and stuff
#define steer_pwm_pin 29 // Output, PWM signal duty cycle sets speed of steering motor from full speed left, to full speed right, (50% is stopped)
#define speedo_pulse_pin 30 // Int Input, active high, asserted when magnet is in range of sensor. 1 pulse per driven pulley rotation. (no pullup)
#define tach_pulse_pin 32 // Int Input, active high, asserted when magnet is in range of sensor. 1 pulse per engine rotation. (no pullup)
// #define tach_pulse_pin 67 // This pin might have caused problems
#define brake_pwm_pin 35 // Output, PWM signal duty cycle sets speed of brake actuator from full speed extend to full speed retract, (50% is stopped)
#define ignition_pin 43 // Input tells us if ignition signal is on or off, active high (no pullup)
#define cruise_sw_pin 41 // Input, momentary button low pulse >500ms in fly mode means start cruise mode. Any pulse in cruise mode goes to fly mode. Active low. (needs pullup)
#define basicmodesw_pin 47 // Input, asserted to tell us to run in basic mode. (needs pullup)
#define neutral_pin 49 // Input, asserted when car is in neutral, i.e. out of gear. Active low. (needs pullup)
#define gas_pwm_pin 67 // Output, PWM signal duty cycle controls throttle target
// #define gas_pwm_pin 34 // Output, PWM signal duty cycle controls throttle target
#define pot_wipe_pin A6 // Analog input, tells us position of attached potentiometer (useful for debug, etc.)
#define battery_pin A7 // Analog input, mule battery voltage level, full scale is 15.638V
#define joy_horz_pin A8 // Analog input, tells us left-right position of joystick. Take complement of ADC value gives: Low values for left, High values for right.
#define joy_vert_pin A9 // Analog input, tells us up-down position of joystick. Take complement of ADC value gives: Low values for down, High values for up.
#define pressure_pin A10 // Analog input, tells us brake fluid pressure (full count = 1000psi)
#define brake_pos_pin A11 // Analog input, tells us linear position of brake actuator
#define sim_halfass false // Don't sim the joystick or encoder or tach
// Readily available possibilities we could wire up if we want
//
// * Status LEDs (digital out)
// * Control of steering or brake motor coast vs. brake
// * CAN bus as a superior interface to brake and steering Jaguars (only on Due I think?)
// * Steering limit switches left and right, handle here instead of in Jaguar (digital in)
// * Engine temperature module overheat panic input (digital in)
// * Remote E-Stop panic inputs (digital in)
// * Serial interface to the lighting controller (if we can think of a reason)
// * Mule starter (digital out)
// * E-brake handle position (digital in)
// Fixed parameters
//
#define disp_width_pix 320
#define disp_height_pix 240
#define disp_lines 20
#define disp_fixed_lines 12
#define disp_tuning_lines 8
#define disp_line_height_pix 12 // fits 16x 15-pixel or 20x 12-pixel rows
#define disp_vshift_pix 2
#define touch_rows 4
#define touch_cols 5
#define touch_cell_width_pix 64
#define touch_cell_height_pix 60
#define sim_tuning_modes 7
#define adc_bits 12
#define adc_range_adc 4096 // = 2^12
#define adc_midscale_adc 2048
#define pid_period_us 20000 // time period between output updates. Reciprocal of pid frequency (in us)
#define looptimer false // Makes code write out timestamps throughout loop to serial port
char telemetry[disp_fixed_lines][12] = {
"Flightmode:",
" Air Speed:",
" Engine #1:",
"Hydraulics:",
"Stick Horz:",
"Stick Vert:",
" Steer PWM:",
"Cruise Tgt:",
"Gas Target:",
" Gas PWM:",
"PresTarget:",
" Brake PWM:"
};
char tunings[sim_tuning_modes][disp_tuning_lines][12] = {
{ " Battery:", // LOCK
" Brake Pos:",
" Pot:",
" Enc Delta:",
" Enc A:",
" Enc B:",
" Enc Sw:",
" - :" },
{ " Horz Raw:", // JOY
" Vert Raw:",
" Horz Min:",
" Horz Max:",
" Horz Dead:",
" Vert Min:",
" Vert Max:",
" Vert Dead:" },
{ " Governor:", // CAR
" Eng Idle:",
"Eng Redlin:",
"Speed Idle:",
"Spd Redlin:",
" Gas PID:",
" Gesturing:",
"BrakePosZP:" },
{ " Steer Lt:", // PWM
"Steer Stop:",
" Steer Rt:",
" Brake Ext:",
"Brake Stop:",
"Brake Retr:",
" Gas Idle:",
"Gas Redlin:" },
{ "Pres Error:", // BPID
" P Term:",
" I Term:",
" D Term:",
"Pres Delta:",
" Kc (P):",
" Fi (I):",
" Td (D):" },
{ " Eng Error:", // GPID
" P Term:",
" I Term:",
" D Term:",
" Eng Delta:",
" Kc (P):",
" Fi (I):",
" Td (D):" },
{ " Spd Error:", // CPID
" P Term:",
" I Term:",
" D Term:",
" Spd Delta:",
" Kc (P):",
" Fi (I):",
" Td (D):" },
};
char units[disp_fixed_lines][5] = {" ", "mmph", "rpm ", "adc ", "adc ", "adc ", "us ", "mmph", "rpm ", "us ", "adc ", "us " };
char tuneunits[sim_tuning_modes][disp_tuning_lines][5] = {
{ "mV ", "adc ", "adc ", "det ", " ", " ", " ", " " }, // LOCK
{ "adc ", "adc ", "adc ", "adc ", "adc ", "adc ", "adc ", "adc " }, // JOY
{ "% ", "rpm ", "rpm ", "mmph", "mmph", " ", " ", "adc " }, // CAR
{ "us ", "us ", "us ", "us ", "us ", "us ", "us ", "us " }, // PWM
{ "adc ", "adc ", "adc ", "adc ", "adc ", "*1k ", "Hz ", "ns " }, // BPID
{ "mmph", "mmph", "mmph", "mmph", "mmph", "*1k ", "Hz ", "ns " }, // GPID
{ "rpm ", "rpm ", "rpm ", "rpm ", "rpm ", "*1k ", "Hz ", "ns " } // CPID
};
char simgrid[touch_rows][touch_cols][6] = {
{ "sim ", " I ", "pres+", "rpm+ ", "car+ " },
{ " ", " B ", "pres-", "rpm- ", "car- " },
{ "pid ", " N ", " (-) ", "joy ^", " (+) " },
{ "val ", " C ", "< joy", "joy v", "joy >" }
};
char modecard[6][7] = { "Shutdn", "Basic", "Stall", "Hold", "Fly", "Cruise" };
uint16_t colorcard[6] = { RED, MGT, YEL, YEL, GRN, CYN };
char tunecard[7][5] = { "Run ", "Joy ", "Car ", "PWM ", "Bpid", "Gpid", "Cpid" };
// Settable calibration values and control parameters
//
// When setting time values in us, consider each loop completes in around 65000us (or 200us without screen writes)
bool laboratory = true; // Indicates we're not live on a real car. Allows launch of simulation interface by touching upper left corner
bool gas_pid = true; // Are we using pid to get gas pulse output from desired engine rpm in fly mode, or just setting proportional
bool display_enabled = true; // Should we run 325x slower in order to get bombarded with tiny numbers? Probably.
bool cruise_gesturing = false; // Is cruise mode enabled by gesturing? Otherwise by press of cruise button
float brake_pid_kc = 0.8; // PID proportional coefficient (brake). How hard to push for each unit of difference between measured and desired pressure (unitless range 0-1)
float brake_pid_fi_mhz = 0.02; // PID integral frequency factor (brake). How much harder to push for each unit time trying to reach desired pressure (in 1/us (mhz), range 0-1)
float brake_pid_td_us = 0.4; // PID derivative time factor (brake). How much to dampen sudden braking changes due to P and I infuences (in us, range 0-1)
float brake_pid_pos_kx = 0.6; // Extra brake actuator position influence. This kicks in when the actuator is below the pressure zeropoint, to bring it up (unitless range 0-1)
float cruise_pid_kc = 0.9; // PID proportional coefficient (cruise) How many RPM for each unit of difference between measured and desired car speed (unitless range 0-1)
float cruise_pid_fi_mhz = 0.005; // PID integral frequency factor (cruise). How many more RPM for each unit time trying to reach desired car speed (in 1/us (mhz), range 0-1)
float cruise_pid_td_us = 0.0; // PID derivative time factor (cruise). How much to dampen sudden RPM changes due to P and I infuences (in us, range 0-1)
float gas_pid_kc = 0.85; // PID proportional coefficient (gas) How much to open throttle for each unit of difference between measured and desired RPM (unitless range 0-1)
float gas_pid_fi_mhz = 0.001; // PID integral frequency factor (gas). How much more to open throttle for each unit time trying to reach desired RPM (in 1/us (mhz), range 0-1)
float gas_pid_td_us = 0.3; // PID derivative time factor (gas). How much to dampen sudden throttle changes due to P and I infuences (in us, range 0-1)
float joy_ema_alpha = 0.05; // alpha value for ema filtering, lower is more continuous, higher is more responsive (0-1).
float pot_ema_alpha = 0.2; // alpha value for ema filtering, lower is more continuous, higher is more responsive (0-1).
float battery_ema_alpha = 0.01; // alpha value for ema filtering, lower is more continuous, higher is more responsive (0-1).
float pressure_ema_alpha = 0.1; // alpha value for ema filtering, lower is more continuous, higher is more responsive (0-1).
float carspeed_ema_alpha = 0.05; // alpha value for ema filtering, lower is more continuous, higher is more responsive (0-1).
float engine_rpm_ema_alpha = 0.2; // alpha value for ema filtering, lower is more continuous, higher is more responsive (0-1).
uint16_t joy_vert_min_adc = 9; // ADC count of furthest joy position in down direction (ADC count 0-4095)
uint16_t joy_vert_max_adc = 4095; // (3728 at 3.3V VDD) ADC count of furthest joy position in up direction (ADC count 0-4095)
uint16_t joy_horz_min_adc = 9; // ADC count of furthest joy position in left direction (ADC count 0-4095)
uint16_t joy_horz_max_adc = 4095; // (3728 at 3.3V VDD) ADC count of furthest joy position in right direction (ADC count 0-4095)
uint16_t joy_vert_deadband_adc = 220; // Width of inert readings around center which we should treat as center (vert) (ADC count 0-4095)
uint16_t joy_horz_deadband_adc = 220; // Width of inert readings around center which we should treat as center (horz) (ADC count 0-4095)
int16_t pressure_min_adc = 0; // Brake pressure when brakes are effectively off (ADC count 0-4095)
int16_t pressure_max_adc = 2048; // Highest possible pressure achievable by the actuator (ADC count 0-4095)
int16_t pressure_margin_adc = 12; // Margin of error when comparing brake pressure adc values (ADC count 0-4095)
int16_t pressure_spike_thresh_adc = 60; // min pressure delta between two readings considered a spike to ignore (ADC count 0-4095)
int16_t pressure_lp_thresh_adc = 1200; // max delta acceptable over three consecutive readings (ADC count 0-4095)
uint16_t brake_hold_initial_adc = 1200; // Pressure initially applied when brakes are hit to auto-stop the car (ADC count 0-4095)
uint16_t brake_hold_increment_adc = 10; // Incremental pressure added periodically when auto stopping (ADC count 0-4095)
uint32_t brake_increment_interval_us = 500000; // How often to apply increment during auto-stopping (in us)
uint16_t brake_pos_retracted_adc = 0; // Brake position value corresponding to retract limit of actuator (ADC count 0-4095)
uint16_t brake_pos_zeropoint_adc = 1000; // Brake position value corresponding to the point where fluid PSI hits zero (ADC count 0-4095)
uint16_t brake_pos_extended_adc = 4095; // Brake position value corresponding to extend limit of actuator (ADC count 0-4095)
uint32_t gesture_flytimeout_us = 250000; // Time allowed for joy mode-change gesture motions (Fly mode <==> Cruise mode) (in us)
uint32_t sanity_timeout_us = 7000000; // Gives certain loops an eventual way out if failures prevent normal completion (in us)
uint32_t car_stop_timeout_us = 400000; // Time after last magnet pulse when we can assume the car is stopped (in us)
uint32_t engine_stop_timeout_us = 400000; // Time after last magnet pulse when we can assume the engine is stopped (in us)
uint16_t engine_idle_rpm = 700; // Min value for engine hz, corresponding to low idle (in rpm)
uint16_t engine_redline_rpm = 4000; // Max value for engine_rpm, pedal to the metal (in rpm)
uint16_t engine_margin_rpm = 15; // Margin of error for checking engine rpm (in rpm)
uint16_t engine_spike_thresh_rpm = 500; // min pressure delta between two readings considered a spike to ignore (in rpm)
uint16_t engine_lp_thresh_rpm = 1000; // max delta acceptable over three consecutive readings (in rpm)
uint16_t carspeed_spike_thresh_mmph = 1500; // min pressure delta between two readings considered a spike to ignore (in milli-mph)
uint16_t carspeed_lp_thresh_mmph = 3000; // max delta acceptable over three consecutive readings (in milli-mph)
uint16_t carspeed_idle_mmph = 3000; // What is our steady state speed at engine idle? Pulley rotation frequency (in milli-mph)
uint16_t carspeed_redline_mmph = 20000; // What is our steady state speed at redline? Pulley rotation frequency (in milli-mph)
uint16_t cruise_max_change_mmph = 500; // What's the max car cruise speed change from a single speed adjustment? (in milli-mph)
uint32_t cruise_adj_period_us = 250000; // how often cruise mode applies speed adjustments based on joystick vert position (in us)
uint32_t cruise_sw_timeout_us = 500000; // how long do you have to hold down the cruise button to start cruise mode (in us)
uint8_t gas_governor_percent = 85; // Software governor will only allow this percent of full-open throttle (percent 0-100)
uint16_t gas_pulse_idle_us = 1761; // Gas duty cycle on-time corresponding to fully closed throttle (in us)
uint16_t gas_pulse_redline_us = 1544; // Gas duty cycle on-time corresponding to full open throttle (in us)
uint16_t steer_pulse_right_us = 840; // Duty cycle on-time corresponding to full-speed right steering (in us)
uint16_t steer_pulse_stop_us = 1500; // Center point on-time corresponding to zero steering motor movement (in us)
uint16_t steer_pulse_left_us = 2200; // Duty cycle on-time corresponding to full-speed left steering (in us)
uint16_t default_pulse_margin_us = 30; // Default margin of error for comparisons of pulse on-times (in us)
uint16_t brake_pulse_retract_us = 1000; // Duty cycle on-time corresponding to full-speed retraction of brake actuator (in us)
uint16_t brake_pulse_stop_us = 1425; // Center point on-time for zero motor movement (in us)
uint16_t brake_pulse_extend_us = 1900; // Duty cycle on-time corresponding to full-speed extension of brake actuator (in us)
uint16_t encoder_invalid_timeout_us = 1000; // Used to prevent bouncing and noise generated interrupts (in us)
uint16_t default_margin_adc = 20; // Default margin of error for comparisons of adc values (ADC count 0-4095)
uint32_t touch_timeout_us = 300000; // Minimum acceptable time between two valid touches (in us)
uint32_t sim_modify_period_us = 150000; // How fast to change variable values when holding modify button down in simulator (in us)
// Non-settable variables
//
uint8_t sim_tuning_mode = LOCK;
uint8_t sim_tuning_mode_old = sim_tuning_mode;
uint8_t sim_selected_value = 0;
uint8_t sim_selected_value_old = 0;
float gas_pid_i_term_rpm = 0.0;
float gas_pid_d_term_rpm = 0.0;
float gas_pid_derivative_rpmperus = 0.0;
float cruise_pid_i_term_mmph = 0.0;
float cruise_pid_d_term_mmph = 0.0;
float cruise_pid_derivative_mmphperus = 0.0;
float brake_pid_i_term_adc = 0.0;
float brake_pid_d_term_adc = 0.0;
float brake_pid_derivative_adcperus = 0.0;
uint16_t engine_rpm = 0; // Current engine speed in rpm
uint16_t engine_filt_rpm = 0; // Current engine speed in rpm
uint16_t engine_last_rpm = 0;
uint16_t engine_old_rpm = 0;
uint16_t battery_mv = 10000;
uint16_t battery_filt_mv = 10000;
uint16_t pot_filt_adc = adc_midscale_adc;
uint16_t joy_vert_adc = adc_midscale_adc;
uint16_t joy_horz_adc = adc_midscale_adc;
uint16_t joy_vert_filt_adc = adc_midscale_adc;
uint16_t joy_horz_filt_adc = adc_midscale_adc;
uint16_t steer_pulse_out_us = steer_pulse_stop_us; // pid loop output to send to the actuator (steering)
uint16_t brake_pulse_out_us = brake_pulse_stop_us; // pid loop output to send to the actuator (brake)
uint32_t brake_timer_us = 0; // Timer used to control braking increments
int16_t brake_pid_error_adc = 0;
int16_t brake_pid_error_last_adc = 0;
int16_t brake_pid_integral_adcus = 0;
int16_t brake_pid_pos_error_adc = 0;
int16_t pressure_adc = 0;
int16_t pressure_delta_adc = 0;
int32_t pressure_target_adc = 0; // Stores new setpoint to give to the pid loop (brake)
int16_t pressure_filt_adc = 0; // Stores new setpoint to give to the pid loop (brake)
uint16_t pressure_last_adc = adc_midscale_adc; // Some pressure reading history for noise handling (-1)
uint16_t pressure_old_adc = adc_midscale_adc; // Some pressure reading history for noise handling (-2)
uint16_t gas_target_rpm = 0; // Stores new setpoint to give to the pid loop (gas)
int16_t gas_pid_error_rpm = 0;
int16_t gas_delta_rpm = 0;
int16_t gas_pid_error_last_rpm = 0;
int16_t gas_pid_integral_rpmus = 0;
int16_t gas_pulse_delta_us;
uint16_t gas_pulse_out_us = gas_pulse_idle_us; // pid loop output to send to the actuator (gas)
uint32_t sanity_timer_us; // Allows code to fail in a sensible way in certain circumstances
uint32_t gesture_timer_us = 0; // Used to keep track of time for gesturing
uint32_t cruise_timer_adj_us = 0;
int16_t cruise_engine_delta_rpm = 0; //
int16_t cruise_pid_error_mmph = 0;
int16_t cruise_pid_error_last_mmph = 0;
int16_t cruise_pid_integral_mmphus = 0;
int16_t carspeed_delta_mmph = 0; //
uint16_t carspeed_target_mmph = 0.0; // Stores new setpoint to give to the pid loop (cruise) in milli-mph
uint32_t carspeed_mmph = 0; // Current car speed in mph
uint32_t carspeed_filt_mmph = 0; //
uint32_t carspeed_last_mmph = 0; //
uint32_t carspeed_old_mmph = 0; //
uint8_t gesture_progress = 0; // How many steps of the Cruise Mode gesture have you completed successfully (from Fly Mode)
uint8_t runmode = SHUTDOWN; // Variable to store what mode we're in
uint8_t oldmode = SHUTDOWN; // So we can tell when the mode has just changed
bool neutral = true;
bool ignition = false;
bool disp_redraw_all = true;
bool basicmodesw = false;
bool cruise_sw = false;
bool cruise_sw_held = false;
bool shutdown_complete = true; // Shutdown mode has completed its work and can stop activity
bool we_just_switched_modes = true; // For mode logic to set things up upon first entry into mode
bool sim_out = LOW;
bool simulate = false;
char disp_draw_buffer[8]; // Used to convert integers to ascii for purposes of displaying on screen
char disp_values[disp_lines][8];
int16_t disp_age_quanta[disp_lines];
uint32_t disp_ages_us[disp_lines];
#define disp_nobools 4
bool disp_bool_values[disp_nobools];
char disp_bool_buffer;
// int16_t disp_bool_age_quanta[disp_nobools];
// uint32_t disp_bool_ages_us[disp_nobools];
uint32_t old_tach_time_us;
uint32_t old_speedo_time_us;
uint32_t cruise_sw_timer_us = 0;
uint32_t now_us = micros();
uint32_t pid_timer_us = micros();
uint32_t sim_timer_us = micros();
int8_t sim_modify_polarity = 0;
uint32_t sim_modify_timer_us = micros();
bool sim_edit_mode = false;
uint32_t touch_timer_us = micros();
uint32_t loopno = 1;
uint32_t loopzero = 0;
// int16_t pressure_min_psi = 0; // Brake pressure when brakes are effectively off (psi 0-1000)
// int16_t pressure_max_psi = 500; // Highest possible pressure achievable by the actuator (psi 0-1000)
// float pid_freq_hz = 1000000/pid_period_us; // PID looping frequency for all pid loops. (in Hz)
// Volatile variables - for variables set inside ISRs
//
volatile uint32_t tach_timer_us = micros();
volatile uint32_t tach_last_us = tach_timer_us;
volatile uint32_t speedo_timer_us = micros();
volatile uint32_t tach_delta_us = 0;
volatile uint32_t speedo_last_us = speedo_timer_us;
volatile uint32_t speedo_delta_us = 0;
volatile uint32_t encoder_timer_us = 0; // Used to prevent bouncing and noise generated interrupts
volatile int16_t encoder_delta = 0; // Keeps track of un-handled rotary clicks of the encoder. Positive for CW clicks, Negative for CCW.
// volatile bool encoder_turn_in_progress = false;
volatile bool encoder_sw_isr_flag = false; // flag for fresh interrupt (push down or up)
volatile bool led_state = LOW;
// volatile bool enc_turn_isr_flag = false;
// volatile bool speedo_isr_flag = false;
// volatile uint16_t int_count = 0;
// volatile uint16_t* pwm[] = { &OCR5A, &OCR5B, &OCR5C }; // &OCR1A, &OCR1B, &OCR1C, &OCR3A, &OCR3B, &OCR3C, &OCR4A, &OCR4B, &OCR4C, // Store the addresses of the PWM timer compare (duty-cycle) registers:
// bool encoder_sw_event = false; // flag for button event ready for service
bool encoder_a_raw, encoder_b_raw;
bool encoder_sw = false;
bool encoder_sw_last = encoder_sw;
int32_t encoder_delta_last = encoder_delta;
// bool encoder_button = LOW; // Is the encoder button being pushed? If so this would be high. Changes of this value will interrupt
// Instantiate objects
Adafruit_FT6206 touchpanel = Adafruit_FT6206(); // Touch panel
Adafruit_ILI9341 tft = Adafruit_ILI9341(tft_cs_pin, tft_dc_pin); // LCD screen
SdFat sd; // SD card filesystem
#define approot "cantroller2020"
#define logfile "log.txt"
#define error(msg) sd.errorHalt(F(msg)) // Error messages stored in flash.
SdFile root; // Directory file.
SdFile file; // Use for file creation in folders.
// char cinBuf[40]; // Buffer for Serial input.
// ArduinoOutStream cout(Serial); // Create a Serial output stream.
// ArduinoInStream cin(Serial, cinBuf, sizeof(cinBuf)); // Create a serial input stream.
// Instantiate PID loops
//
// Steering: Motor direction and velocity are controlled with PWM, proportional to joystick horizontal direction and magnitude
// Setpoint Value: Proportional to Joystick Horz ADC value. 0V = Full Left, 2.5V = Stop, 5V = Full Right
// Measured Value: We have no feedback, other than the joystick current horizontal position
// Actuator Output Value: PWM signal to Steering Jaguar unit. 0% duty = Full Left, 50% = Stop, 100% = Full Right
// Limits: Reed switch limit signals for left and right may be handled by us, or by the jaguar controller
// Setpoint scaling: Kp/Ki/Kd values should decrease appropriately as a function of vehicle speed (safety)
//
// Notes: The steering has no feedback sensing, other than two digital limit switches at the ends of travel.
// So just consider the error to be the difference between the joystick position and the last output value.
//
// Brakes: Motor direction & velocity are controlled with PWM until brake pressure matches pressure setpoint
// Setpoint Value: * Default: Pressure setpoint proportional to Joystick Vert distance from center when below center.
// * In Hold Mode: Brake adjusts automatically to keep car stopped, as long as joystick below center
// * In Cruise Mode: Brake is kept released
// Measured Value: Analog voltage from brake fluid pressure sensor. 0-3.3V proportional to 0-1000psi
// Actuator Output Value: PWM signal to Brake Jaguar unit.
// 0% duty = Full speed extend (less brake), 50% = Stop, 100% = Full speed Retract (more brake)
// Position: Analog 0-3.3V proportional to the travel length of the actuator (not used as feedback)
//
// Gas: Servo angle is adjusted with PWM until engine rpm matches rpm target setpoint
// Setpoint Value: * Default: RPM Setpoint proportional to Joystick Vert distance from center when above center.
// * In Cruise Mode: Upward or downward joy vert motions modify vehicle speed setpoint
// Gas pid setppoints are output from cruise pid
// Measured Value: * Default: Engine speed determined from tach pulses
// Actuator Output Value: PWM signal to throttle servo
// 0% duty = Fully close throttle. This will idle. 100% duty = Fully open throttle.
//
// Cruise:
// Setpoint Value: * Default: Set to the current vehicle speed when mode is entered.
// * In Cruise Mode: Upward or downward joy vert motions modify vehicle speed setpoint
// Measured Value: * Vehicle speed determined from pulley sensor pulses
// Actuator Output Value: Cruise PID output values become setpoint values for the Gas PID above
// 0% duty = Car stopped. 100% duty = Car max speed.
//
// One way to tune a PID loop:
// 1) Set Kp = 0, Ki = 0, and Kd = 0
// 2) Increase Kp until output starts to oscillate
// 3) Increase Ki until oscillation stops
// 4) If improved response time is needed, increase Kd slightly and go back to step 2
//
// Ziegler-Nichols method:
// 1) Set Kp = 0, Ki = 0, and Kd = 0
// 2) Increase Kp until output starts to oscillate.
// 3) Record Kc = critical value of Kp, and Pc = period of oscillations
// 4) Set Kp=0.6*Kc and Ki=1.2*Kc/Pc and Kd=Kc*Pc/13.33 (Or for P only: Kp=Kc/2) (Or for PI: Kp=0.45*Kc and Ki=0.54*Kc/Pc)
// PID brake_pid(&pressure, &brake_pwm, &brake_target, brake_pid_kp, brake_pid_ki_mhz, brake_pid_kd_us, DIRECT);
// FastPID brake_pid(brake_pid_kp, brake_pid_ki_mhz, brake_pid_kd_us, pid_freq_hz, 8, false);
// FastPID cruise_pid(cruise_pid_kp, cruise_pid_ki_mhz, cruise_pid_kd_us, pid_freq_hz, 8, false);
// Servo library lets us set pwm outputs given an on-time pulse width in us
static Servo steer_servo;
static Servo brake_servo;
// static Servo gas_servo;
// Interrupt service routines
//
void encoder_a_isr(void) { // When A goes high if B is low, we are CW, otherwise we are CCW -- This ISR intended for encoders like the one on the tan proto board
// if (micros()-encoder_timer_us > encoder_invalid_timeout_us) { // If transition is valid and not already triggered by other pin
// encoder_timer_us = micros();
if (encoder_b_raw) encoder_delta++;
else encoder_delta--;
led_state = !led_state;
digitalWrite(led_pin, led_state);
}
// void encoder_sw_isr(void) {
// encoder_button = 1-digitalRead(encoder_sw_pin);
// encoder_sw_isr_flag = true;
// }
void tach_isr(void) { // The tach and speedo isrs compare value returned from the micros() function with the value from the last interrupt to determine period, to get frequency of the vehicle pulley rotations.
tach_timer_us = micros(); // This might screw up things. Anders would remember
tach_delta_us = tach_timer_us-tach_last_us;
tach_last_us = tach_timer_us;
}
void speedo_isr(void) { // A better approach would be to read, reset, and restart a hardware interval timer in the isr. Better still would be to regularly read a hardware binary counter clocked by the pin - no isr.
speedo_timer_us = micros(); // This might screw up things. Anders would remember
speedo_delta_us = speedo_timer_us-speedo_last_us;
speedo_last_us = speedo_timer_us;
}
void setup() {
pinMode(led_pin, OUTPUT);
pinMode(encoder_a_pin, INPUT_PULLUP);
pinMode(encoder_b_pin, INPUT_PULLUP);
pinMode(sim_pulse_pin, OUTPUT);
pinMode(brake_pwm_pin, OUTPUT);
pinMode(steer_pwm_pin, OUTPUT);
pinMode(tft_dc_pin, OUTPUT);
pinMode(encoder_sw_pin, INPUT_PULLUP);
pinMode(gas_pwm_pin, OUTPUT);
pinMode(ignition_pin, INPUT);
pinMode(neutral_pin, INPUT_PULLUP);
pinMode(basicmodesw_pin, INPUT_PULLUP);
pinMode(cruise_sw_pin, INPUT_PULLUP);
pinMode(tach_pulse_pin, INPUT);
pinMode(speedo_pulse_pin, INPUT);
pinMode(joy_horz_pin, INPUT);
pinMode(joy_vert_pin, INPUT);
pinMode(pressure_pin, INPUT);
pinMode(brake_pos_pin, INPUT);
pinMode(battery_pin, INPUT);
pinMode(usd_cs_pin, OUTPUT);
pinMode(tft_cs_pin, OUTPUT);
pinMode(pot_wipe_pin, INPUT);
pinMode(tp_irq_pin, INPUT);
// pinMode(tft_ledk_pin, OUTPUT);
// Set all outputs to known sensible values
digitalWrite(tft_cs_pin, HIGH); // Prevent bus contention
digitalWrite(usd_cs_pin, HIGH); // Prevent bus contention
digitalWrite(tft_dc_pin, LOW);
digitalWrite(sim_pulse_pin, LOW);
digitalWrite(pot_pwr_pin, HIGH); // Power up the potentiometer
digitalWrite(led_pin, HIGH); // Light on
analogReadResolution(adc_bits); // Set Arduino Due to 12-bit resolution (default is same as Mega=10bit)
// Arduino Due timer research:
//
// TCLK0 (PB26, Peripheral B) is external clock for Timer/Counter0, Ch0, Enabled by rtegister TC0XC0S
// TCLK6 (PC27, Peripheral B) is external clock for Timer/Counter2, Ch0, Enabled by rtegister TC2XC0S (?)
// Peripheral ID for programming interrupt for TC0: 27. For TC2: 29
// Enabling TC requires configuration of PMC (Power Mgmt Controller) to enable TC clock
// QDEC must be disabled (default) for TC0 1 and 2 to be independent
// Each 32bit TC channel has a 32bit counter, increments on rising edges of clock. Counter Value in TC_CV reg, Overflow sets COVFS bit in TC_SR
// Configure clock source for each channel with TCCLKS bit of TC_BMR. External clk signals XC0, XC2. Can invert with CLKI bit in TC_CMR, or configure burst (BURST in TC_CMR)
// External clock freq must be 2.5x less than peripheral clock. 100 Hz x 2.5 = 250 Hz. No problem, yeah?
// Enable/disable clock with CLKEN and CLKDIS in TC_CCR. See pg 862-863 for config of stop, disable, trigger events
// Set mode to capture or waveform (PWM) using WAVE bit of TC_CMR
// See waveform mode setup to configure PWM. See Quadrature encoder setup on pg 875 to decode rotary encoders.
// TC0XC0S = 0 B.26 D22
// Can sync up the three PWM outputs by setting SYNCx bits of PWM_SCM. Synced channels get CPREx, CPRDx, and CALG0 fields from those of ch 0 instead of their own channel. Pg 985
// PWM setup (for gas): (found here: https://forum.arduino.cc/index.php?topic=386981.0)
// Output 50Hz PWM at a resolution of 14 bits on pin DAC1 (D67) (pin: PB16, peripheral: PWML0)
// The PWM channels can be multiplexed onto a number of different pins on the Due. Which PWM pins are available is specified by the mutliplexing tables in the SAM3X8E's datasheet.
// The SAM3X8E has four tables for each of its 32-bit ports A, B, C and D. Most pins having two peripheral functions: "Peripheral A" and "Peripheral B".
// It's possible for each PWM channel to control two complementary (opposite of each other) square wave outputs, labelled in the table as PWMLx (low) and PWMHx (high).
// It's first necessary to enable the PWM controller and multiplex the PWM controller's output to DAC1, which is pin PB16, using the REG_PMC_PCER1, REG_PIOB_ABSR and REG_PIOB_PDR reg.
// The PWM controller's clock register (REG_PWM_CLK) allows you to either divide down the master clock (84MHz), either by using a prescaler, or clock divisor, or both.
// There are two clock divisor and prescaler registers: A and B, so you can generate up to two base frequencies for your 8 channels.
// The channel mode register (REG_PWM_CMR0) connects the divided clock (2MHz) to channel 0 and selects the PWM mode of operation, in this case dual slope PWM, (center aligned).
// REG_PWM_CPRD0 and REG_PWM_CDTY0 determine the period (frequency) and duty cycle (phase) respectively for a given channel. It's also necessary to enable ch 0 with the REG_PWM_ENA reg.
// By the way, if you plan on changing the period or duty cycle during PWM operation, you'll need to use the REG_PWM_CPRDUPDx and REG_PWM_CDTYUPDx update registers for the given channel.
// The equation for calculating the PWM frequency is also contained in the SAM3X8E's datasheet. For dual slope PWM: PWMFrequency = MCLK/(2*CPRD*DIVA) = 84MHz/(2*20000*42) = 50Hz
REG_PMC_PCER1 |= PMC_PCER1_PID36; // Enable PWM
REG_PIOB_ABSR |= PIO_ABSR_P16; // Set PWM pin perhipheral type A or B, in this case B
REG_PIOB_PDR |= PIO_PDR_P16; // Set PWM pin to an output
REG_PWM_CLK = PWM_CLK_PREA(0) | PWM_CLK_DIVA(42); // Set the PWM clock rate to 2MHz (84MHz/42)
REG_PWM_CMR0 = PWM_CMR_CALG | PWM_CMR_CPRE_CLKA; // Enable dual slope PWM and set the clock source as CLKA
REG_PWM_CPRD0 = pid_period_us; // Set the PWM frequency 2MHz/(2 * 20000) = 50Hz
REG_PWM_CDTY0 = gas_pulse_idle_us; // Set the PWM duty cycle to nominal angle
REG_PWM_ENA = PWM_ENA_CHID0; // Enable the PWM channel
// To change duty cycle: CW limit: REG_PWM_CDTYUPD0 = 1000; Center: REG_PWM_CDTYUPD0 = 1500; CCW limit: REG_PWM_CDTYUPD0 = 2000;
// analogWrite(steer_pwm_pin, steer_stop_pwm); // Write values range from 0 to 255
// analogWrite(gas_pwm_pin, gas_min_pwm);
// analogWrite(brake_pwm_pin, brake_stop_pwm);
// while (!Serial); // needed for debugging?!
Serial.begin(115200);
if (display_enabled) {
Serial.print(F("Init LCD... "));
tft.begin();
tft.setRotation(1); // 0: Portrait, USB Top-Rt, 1: Landscape, usb=Bot-Rt, 2: Portrait, USB=Bot-Rt, 3: Landscape, USB=Top-Lt
// tft.setFont(&FreeSans12pt7b); // Use tft.setFont() to return to fixed font
for (uint8_t lineno=0; lineno<=arraysize(telemetry); lineno++) {
disp_age_quanta[lineno] = -1;
disp_ages_us[lineno] = 0;
memset(disp_values[lineno],0,strlen(disp_values[lineno]));
}
for (uint8_t row=0; row<disp_nobools; row++) {
disp_bool_values[row] = 0;
}
draw_text(false);
Serial.println(F("Success"));
}
Serial.print(F("Captouch initialization... "));
if (! touchpanel.begin(40)) { // pass in 'sensitivity' coefficient
Serial.println(F("Couldn't start FT6206 touchscreen controller"));
// while (1);
}
else {
Serial.println(F("Capacitive touchscreen started"));
}
/*
while (1) { // Useful to uncomment and move around the code to find points of crashing
Serial.print(F("Alive, "));
Serial.println(micros());
delay(250);
}
*/
Serial.print(F("Initializing filesystem... ")); // SD card is pretty straightforward, a single call.
if (! sd.begin(usd_cs_pin, SD_SCK_MHZ(25))) { // ESP32 requires 25 mhz limit
Serial.println(F("SD begin() failed"));
for(;;); // Fatal error, do not continue
}
sd_init();
Serial.println(F("Filesystem started"));
// Set up our interrupts
Serial.print(F("Interrupts... "));
attachInterrupt(digitalPinToInterrupt(encoder_a_pin), encoder_a_isr, RISING); // One type of encoder (e.g. Panasonic EVE-YBCAJ016B) needs Rising int on pin A only
// attachInterrupt(digitalPinToInterrupt(encoder_b_pin), encoder_b_isr, FALLING); // Other type (e.g. on the tan proto board) needs both pins. Interrupts needed on both pins
// attachInterrupt(digitalPinToInterrupt(encoder_sw_pin), encoder_sw_isr, CHANGE);
attachInterrupt(digitalPinToInterrupt(tach_pulse_pin), tach_isr, RISING);
attachInterrupt(digitalPinToInterrupt(speedo_pulse_pin), speedo_isr, RISING);
Serial.println(F("Set up and enabled"));
steer_servo.attach(steer_pwm_pin);
brake_servo.attach(brake_pwm_pin);
// gas_servo.attach(gas_pwm_pin);
// steer_pid.setOutputRange(steer_min_pwm, steer_max_pwm);
// gas_pid.setOutputRange(gas_min_pwm, gas_max_pwm);
// brake_pid.setOutputRange(brake_pwm_retract_pwm, brake_pwm_extend_pwm); // Lines only useful with FastPID library
// cruise_pid.setOutputRange(engine_idle_rpm, engine_redline_rpm); // Lines only useful with FastPID library
Serial.println(F("Setup finished"));
}
/* Here are the older ISRs for use with the encoder on the tan protoboard. These need CHANGE interrupts
void encoder_a_isr(void) { // If A goes high before B, we are turning CCW -- This ISR intended for encoders like the one on the tan proto board
if (micros()-encoder_timer_us > encoder_invalid_timeout_us && digitalRead(encoder_b_pin)) { // If transition is valid and not already triggered by other pin
encoder_timer_us = micros();
// encoder_turn_in_progress = CCW; // Flag to the other pin's interrupt which is about to happen
encoder_delta--; // Increment the turns accumulated since last handling by the code. Negative delta means CW direction
led_state = !led_state;
digitalWrite(led_pin, led_state);
}
}
void encoder_b_isr(void) { // If B goes high before A, we are turning CW -- This ISR intended for encoders like the one on the tan proto board
if (micros()-encoder_timer_us > encoder_invalid_timeout_us && digitalRead(encoder_a_pin)) { // If transition is valid and not already triggered by other pin
encoder_timer_us = micros();
// encoder_turn_in_progress = CW; // Flag to the other pin's interrupt which is about to happen
encoder_delta++; // Increment the turns accumulated since last handling by the code. Positive delta means CCW direction
led_state = !led_state;
digitalWrite(led_pin, led_state);
}
}
void encoder_a_isr(void) { // If A goes high before B, we are turning CCW -- This ISR intended for encoders like the one on the tan proto board
if (digitalRead(encoder_a_pin)) { // If we just went high,
if (!encoder_turn_in_progress) { // only act if we were the first to go high
encoder_turn_in_progress = true; // Prevent the other pin's ISR from taking action when it goes high in a moment
encoder_delta--; // Increment the turns accumulated since last handling by the code. Negative delta means CCW direction
digitalWrite(led_pin, HIGH);
}
}
else { // If we just went low
encoder_turn_in_progress = false; // Reset to correctly handle next turn. Technically only one of the ISRs needs this clause.
digitalWrite(led_pin, LOW);
}
}
void encoder_b_isr(void) { // If B goes high before A, we are turning CW -- This ISR intended for encoders like the one on the tan proto board
if (digitalRead(encoder_b_pin)) { // If we just went high,
if (!encoder_turn_in_progress) { // only act if we were the first to go high
encoder_turn_in_progress = true; // Prevent the other pin's ISR from taking action when it goes high in a moment
encoder_delta++; // Increment the turns accumulated since last handling by the code. Positive delta means CW direction
digitalWrite(led_pin, HIGH);
}
}
else { // If we just went low
encoder_turn_in_progress = false; // Reset to correctly handle next turn. Technically only one of the ISRs needs this clause.
digitalWrite(led_pin, LOW);
}
}
*/
// Functions to write to the screen efficiently
//
void draw_text(bool tuning) { // set tuning to true in order to just erase the tuning section and redraw
tft.setTextColor(GRY2);
tft.setTextSize(1);
if (tuning) tft.fillRect(0,146,138,94,BLK);
else {
tft.fillScreen(BLK);
for (uint8_t lineno=0; lineno<arraysize(telemetry); lineno++) {
tft.setCursor(2, lineno*disp_line_height_pix+disp_vshift_pix); // +disp_line_height_pix/2
tft.println(telemetry[lineno]);
tft.setCursor(114, lineno*disp_line_height_pix+disp_vshift_pix); // +disp_line_height_pix/2
tft.println(units[lineno]);
}
}
for (uint8_t lineno=0; lineno<arraysize(tunings[sim_tuning_mode]); lineno++) {
tft.setCursor(2, (lineno+arraysize(telemetry))*disp_line_height_pix+disp_vshift_pix); // +disp_line_height_pix/2
tft.println(tunings[sim_tuning_mode][lineno]);
tft.setCursor(114, (lineno+arraysize(telemetry))*disp_line_height_pix+disp_vshift_pix); // +disp_line_height_pix/2
tft.println(tuneunits[sim_tuning_mode][lineno]);
}
}
void draw_value(uint8_t lineno, int32_t value, uint8_t modeflag) {
uint16_t age_us = (now_us-disp_ages_us[lineno])/2500000; // Divide by us per color gradient quantum
memset(disp_draw_buffer,0,strlen(disp_draw_buffer));
if (modeflag == 0) itoa(value, disp_draw_buffer, 10); // Modeflag 0 is for writing numeric values for variables in the active data column at a given line
else if (modeflag == 1) strcpy(disp_draw_buffer, modecard[runmode]); // Modeflag 1 is used for writing the runmode. Each mode has a custom color which doesn't get stale
if (modeflag == 3 && sim_selected_value != sim_selected_value_old) { // Modeflag 3 is for highlighting a variable name when its value may be changed
tft.setCursor(2, (sim_selected_value_old+arraysize(telemetry))*disp_line_height_pix+disp_vshift_pix); // +disp_line_height_pix/2
tft.setTextColor(GRY2);
if (sim_tuning_mode != sim_tuning_mode_old) tft.print(tunings[sim_tuning_mode_old][sim_selected_value_old]);
else tft.print(tunings[sim_tuning_mode][sim_selected_value_old]);
if (sim_tuning_mode != LOCK && sim_selected_value >= 0) {
tft.setCursor(2, (sim_selected_value+arraysize(telemetry))*disp_line_height_pix+disp_vshift_pix); // +disp_line_height_pix/2
if (sim_edit_mode) tft.setTextColor(BLU);
else tft.setTextColor(WHT);
tft.print(tunings[sim_tuning_mode][sim_selected_value]);
}
sim_selected_value_old = sim_selected_value;
}
else if (modeflag == 2 && sim_tuning_mode != sim_tuning_mode_old) { // Modeflag 2 is used for displaying which set of tuning variables is being displayed. Text next to the runmode
tft.setCursor(112, disp_vshift_pix); // +disp_line_height_pix/2
tft.setTextColor(BLK); // Rewriting old value in black over visible value is efficient way to erase
tft.print(tunecard[sim_tuning_mode_old]);
tft.setCursor(112, disp_vshift_pix); // +disp_line_height_pix/2
tft.setTextColor(CYN);
tft.print(tunecard[sim_tuning_mode]);
sim_tuning_mode_old = sim_tuning_mode;
}
else if (strcmp(disp_values[lineno], disp_draw_buffer) || disp_redraw_all) { // If value differs, Erase old value and write new
tft.setCursor(70, (lineno-1)*disp_line_height_pix+disp_vshift_pix); // +disp_line_height_pix/2
tft.setTextColor(BLK); // Rewriting old value in black over visible value is efficient way to erase
tft.print(disp_values[lineno]);
tft.setCursor(70, (lineno-1)*disp_line_height_pix+disp_vshift_pix); // +disp_line_height_pix/2
if (modeflag == 1) tft.setTextColor(colorcard[runmode]);
else tft.setTextColor(GRN);
tft.print(disp_draw_buffer);
strcpy(disp_values[lineno], disp_draw_buffer);
disp_ages_us[lineno] = now_us;
disp_age_quanta[lineno] = 0;
}
else if (modeflag != 1 && age_us > disp_age_quanta[lineno] && age_us < 11) { // As readings age, redraw in new color
uint16_t color;
if (age_us < 8) color = 0x1FE0+age_us*0x2000; // Base of green with red added as you age
else color = 0xFFE0; // Yellow is achieved
if (age_us > 8) color -= (age_us-8)*0x100; // Then lose green as you age further
tft.setTextColor(color);
tft.setCursor(70, (lineno-1)*disp_line_height_pix+disp_vshift_pix); // +disp_line_height_pix/2
tft.print(disp_values[lineno]);
disp_age_quanta[lineno] = age_us;
} // Else don't draw anything, because we already did. Logic is 100s of times cheaper than screen drawing.
}
void draw_bool(bool value, uint8_t row) {
if ((disp_bool_values[row] != value) || disp_redraw_all) { // If value differs, Erase old value and write new
if (value) {
tft.setCursor(disp_width_pix/touch_cols+touch_cell_width_pix/2+11, row*disp_height_pix/touch_rows+touch_cell_height_pix/2-disp_line_height_pix);
tft.setTextColor(BLK);
tft.println("0");
tft.setCursor(disp_width_pix/touch_cols+touch_cell_width_pix/2+11, row*disp_height_pix/touch_rows+touch_cell_height_pix/2-disp_line_height_pix);
tft.setTextColor(CYN);
tft.println("1");
disp_bool_values[row] = 1;
}
else {
tft.setCursor(disp_width_pix/touch_cols+touch_cell_width_pix/2+11, row*disp_height_pix/touch_rows+touch_cell_height_pix/2-disp_line_height_pix);
tft.setTextColor(BLK);
tft.println("1");
tft.setCursor(disp_width_pix/touch_cols+touch_cell_width_pix/2+11, row*disp_height_pix/touch_rows+touch_cell_height_pix/2-disp_line_height_pix);
tft.setTextColor(DCYN);
tft.println("0");
disp_bool_values[row] = 0;
}
}
}
void draw_touchgrid(bool update) { // if update is true, will only redraw the at-risk elements of the grid
if (!update) {
for (uint8_t row = 0; row < touch_rows; row++) {
tft.drawFastHLine(0, row*disp_height_pix/touch_rows, disp_width_pix, DPNK); // x, y, length, color
}
for (uint8_t col = 0; col < touch_cols; col++) {
tft.drawFastVLine(col*disp_width_pix/touch_cols, 0, disp_height_pix, DPNK); // faster than tft.drawLine(0, 80, 320, 80, GRY1);
}
tft.drawFastHLine(0, disp_height_pix-1, disp_width_pix, DPNK); // x, y, length, color
tft.drawFastVLine(disp_width_pix-1, 0, disp_height_pix, DPNK); // faster than tft.drawLine(0, 80, 320, 80, GRY1);
}
tft.setTextColor(LPNK);
tft.setTextSize(1);
if (update) {
for (uint8_t row = 0; row < touch_rows; row++) {
tft.setCursor(disp_width_pix/touch_cols+touch_cell_width_pix/2-7, row*disp_height_pix/touch_rows+touch_cell_height_pix/2-disp_line_height_pix);
tft.println(simgrid[row][1]);
}
}
else {
for (uint8_t row = 0; row < touch_rows; row++) {
for (uint8_t col = 0; col < touch_cols; col++) {
tft.setCursor(col*disp_width_pix/touch_cols+touch_cell_width_pix/2-7, row*disp_height_pix/touch_rows+touch_cell_height_pix/2-disp_line_height_pix);
tft.println(simgrid[row][col]);
}
}
}
}
// Other functions
//
void sd_init() {
if (!sd.begin(usd_cs_pin, SD_SCK_MHZ(50))) { // Initialize at highest supported speed that is not over 50 mhz. Go lower if errors.
sd.initErrorHalt();
}
if (!root.open("/")) {
error("open root failed");
}
if (!sd.exists(approot)) {
if (sd.mkdir(approot)) {
Serial.println(F("Created approot directory\n")); // cout << F("Created approot directory\n");
}
else {
error("Create approot failed");
}
}
// Change volume working directory to Folder1.
// if (sd.chdir(approot)) {
// cout << F("\nList of files in appdir:\n");
// char *apppath = (char*)malloc((arraysize(appdir)+2)*sizeof(char));
// sd.ls(strcat("/",approot, LS_R);
// }
// else {
// error("Chdir approot failed\n");
// }
// if (!file.open(logfile, O_WRONLY | O_CREAT)) {
// error("Open logfile failed\n");
// }
// file.close();
Serial.println(F("Filesystem init finished\n")); // cout << F("Filesystem init finished\n");
// for (byte a = 10; a >= 1; a--)
// {
// char fileName[12];
// sprintf(fileName, "%d.txt", a);
// file = sd.open(fileName, FILE_WRITE); //create file
// }
}
// int16_t scale16(int16_t x, int16_t in_min, int16_t in_max, int16_t out_min, int16_t out_max) { // This is arduino map() in 32bit, just so I can easily refer to the source code (and portability I guess)
// return (x - in_min) * (out_max - out_min) / (in_max - in_min) + out_min;
// }
// int16_t rangebox16(int16_t amt, int16_t low, int16_t high) { // Limits a value to within a range
// if (amt < low) return low;
// else if (amt > high) return high;
// else return amt;
// }
// Main loop. Each time through we do these eight steps:
//
// 0) Beginning-of-the-loop nonsense
// 1) Gather new telemetry and filter the signals
// 2) Check if our current runmode has been overridden by certain specific conditions
// 3) Read joystick horizontal and determine new steering setpoint
// 4) Do actions based on which runmode we are in (including gas & brake setpoint), and possibly change runmode
// 5) Step the PID loops and update the actuation outputs
// 6) Service the user interface
// 7) Log to SD card
// 8) Do the control loop bookkeeping at the end of each loop
//
void loop() {
// 0) Beginning-of-the-loop nonsense
//
if (looptimer) {
Serial.print("Loop# ");
Serial.print(loopno); Serial.print(": ");
loopzero = micros(); // Start time for loop
}
// Update derived variable values in case they have changed
float gas_pid_ki_mhz = gas_pid_kc*gas_pid_fi_mhz; // Convert dependent-form PID coefficients to independent term for each of the influences
float gas_pid_kd_us = gas_pid_kc*gas_pid_td_us; // Convert dependent-form PID coefficients to independent term for each of the influences
float brake_pid_ki_mhz = brake_pid_kc*brake_pid_fi_mhz; // Convert dependent-form PID coefficients to independent term for each of the influences
float brake_pid_kd_us = brake_pid_kc*brake_pid_td_us; // Convert dependent-form PID coefficients to independent term for each of the influences
float brake_pid_pos_kp = brake_pid_kc*brake_pid_pos_kx; // Convert dependent-form PID coefficients to independent term for each of the influences
float cruise_pid_ki_mhz = cruise_pid_kc*cruise_pid_fi_mhz; // Convert dependent-form PID coefficients to independent term for each of the influences
float cruise_pid_kd_us = cruise_pid_kc*cruise_pid_td_us; // Convert dependent-form PID coefficients to independent term for each of the influences
uint16_t joy_vert_deadband_bot_adc = (adc_range_adc-joy_vert_deadband_adc)/2; // Lower threshold of vert joy deadband (ADC count 0-4095)
uint16_t joy_vert_deadband_top_adc = (adc_range_adc+joy_vert_deadband_adc)/2; // Upper threshold of vert joy deadband (ADC count 0-4095)
uint16_t joy_horz_deadband_bot_adc = (adc_range_adc-joy_horz_deadband_adc)/2; // Lower threshold of horz joy deadband (ADC count 0-4095)
uint16_t joy_horz_deadband_top_adc = (adc_range_adc+joy_horz_deadband_adc)/2; // Upper threshold of horz joy deadband (ADC count 0-4095)
uint16_t engine_govern_rpm = map(gas_governor_percent, 0, 100, 0, engine_redline_rpm); // Create an artificially reduced maximum for the engine speed
uint16_t gas_pulse_govern_us = map(gas_governor_percent*(engine_redline_rpm-engine_idle_rpm)/engine_redline_rpm, 0, 100, gas_pulse_idle_us, gas_pulse_redline_us); // Governor must scale the pulse range proportionally
uint16_t carspeed_govern_mmph = map(gas_governor_percent, 0, 100, 0, carspeed_redline_mmph); // Governor must scale the top vehicle speed proportionally
// uint16_t engine_center_rpm = (engine_idle_rpm+engine_govern_rpm)/2; // RPM value center between idle and govern, needed in pid.
// uint16_t carspeed_center_mmph = (carspeed_idle_mmph+carspeed_govern_mmph)/2; // Car speed value center between idle and govern, needed in pid.
// 1) Gather new telemetry and filter the signals
//
// Serial.print("Point0: "); Serial.println(carspeed_target_mmph);
// Read misc input signals
uint16_t brake_pos_adc = analogRead(brake_pos_pin);
// uint8_t tach_pulse_raw = digitalRead(tach_pulse_pin);
// uint8_t speedo_pulse_raw = digitalRead(speedo_pulse_pin);
// Potentiometer
uint16_t pot_adc = analogRead(pot_wipe_pin);
pot_filt_adc = (uint16_t)((pot_ema_alpha*(float)pot_adc) + ((1-pot_ema_alpha)*(float)pot_filt_adc)); // Apply EMA filter
// Encoder
encoder_a_raw = digitalRead(encoder_a_pin);
encoder_b_raw = digitalRead(encoder_b_pin);
encoder_sw = 1-digitalRead(encoder_sw_pin); // 1-value because electrical signal is active low
// Voltage of vehicle battery
uint16_t battery_adc = analogRead(battery_pin);
battery_mv = (uint16_t)(15638*((float)battery_adc)/adc_range_adc);
battery_filt_mv = (uint16_t)((battery_ema_alpha*(float)battery_mv) + ((1-battery_ema_alpha)*(float)battery_filt_mv)); // Apply EMA filter
if (simulate) { // Any sensor data which should be mucked with or overridden automatically and continuously, during simulation should get set in here
if (brake_pos_adc < brake_pos_zeropoint_adc) brake_pos_adc = brake_pos_zeropoint_adc;
}
else { // When not simulating, read real sensors and filter them. Just those that would get taken over by the simulator go in here.
ignition = digitalRead(ignition_pin);
basicmodesw = 1-digitalRead(basicmodesw_pin); // 1-value because electrical signal is active low
neutral = 1-digitalRead(neutral_pin); // 1-value because electrical signal is active low
cruise_sw = 1-digitalRead(cruise_sw_pin); // 1-value because electrical signal is active low
// Tach
if (now_us-tach_timer_us < engine_stop_timeout_us) engine_rpm = (uint32_t)(60000000/(float)tach_delta_us); // Tachometer magnets/us * 60000000 (1 rot/magnet * 1000000 us/sec * 60 sec/min) gives rpm
else engine_rpm = 0; // If timeout since last magnet is exceeded
if (abs(engine_rpm-engine_old_rpm) > engine_lp_thresh_rpm || engine_rpm-engine_last_rpm < engine_spike_thresh_rpm) { // Remove noise spikes from tach values, if otherwise in range
engine_old_rpm = engine_last_rpm;
engine_last_rpm = engine_rpm;
}
else engine_rpm = engine_last_rpm; // Spike detected - ignore that sample
if (engine_rpm) engine_filt_rpm = (uint32_t)((engine_rpm_ema_alpha*(float)engine_rpm) + ((1-engine_rpm_ema_alpha)*(float)engine_filt_rpm)); // Sensor EMA filter
else engine_filt_rpm = 0;
// Speedo
if (now_us-speedo_timer_us < car_stop_timeout_us) carspeed_mmph = (uint32_t)(179757270/(float)speedo_delta_us); // Update car speed value
// magnets/us * 179757270 (1 rot/magnet * 1000000 us/sec * 3600 sec/hr * 1/19.85 gearing * 20*pi in/rot * 1/12 ft/in * 1000/5280 milli-mi/ft gives milli-mph // * 1/1.15 knots/mph gives milliknots
// Mule gearing: Total -19.845x (lo) ( Converter: -3.5x to -0.96x Tranny -3.75x (lo), -1.821x (hi), Final drive -5.4x )
else carspeed_mmph = 0;
if (abs(carspeed_mmph-carspeed_old_mmph) > carspeed_lp_thresh_mmph || carspeed_mmph-carspeed_last_mmph < carspeed_spike_thresh_mmph) { // Remove noise spikes from speedo values, if otherwise in range
carspeed_old_mmph = carspeed_last_mmph;
carspeed_last_mmph = carspeed_mmph;
}
else carspeed_mmph = carspeed_last_mmph; // Spike detected - ignore that sample
if (carspeed_mmph) carspeed_filt_mmph = (uint32_t)((carspeed_ema_alpha*(float)carspeed_mmph) + ((1-carspeed_ema_alpha)*(float)carspeed_filt_mmph)); // Sensor EMA filter
else carspeed_filt_mmph = 0;
// Brake pressure. Read sensor, then Remove noise spikes from brake feedback, if reading is otherwise in range
uint16_t pressure_adc = analogRead(pressure_pin);
if (abs(pressure_adc-pressure_old_adc) > pressure_lp_thresh_adc || pressure_adc-pressure_last_adc < pressure_spike_thresh_adc) {
pressure_old_adc = pressure_last_adc;
pressure_last_adc = pressure_adc;
}
else pressure_adc = pressure_last_adc; // Spike detected - ignore that sample
// pressure_psi = (uint16_t)(1000*(float)(pressure_adc)/adc_range_adc); // Convert pressure to units of psi
pressure_filt_adc = (uint16_t)((pressure_ema_alpha*(float)pressure_adc) + ((1-pressure_ema_alpha)*(float)pressure_filt_adc)); // Sensor EMA filter
}
now_us = micros();
if (looptimer) {
// Serial.print(now_us-loopzero); Serial.print(" ");
}
// 2) Check if our current runmode has been overridden by certain specific conditions
//
if (!ignition) runmode = SHUTDOWN; //} && laboratory != true) { // if ignition off --> Shutdown Mode
else if (basicmodesw) runmode = BASIC; // elif basicmode switch on --> Basic Mode
else if (!engine_filt_rpm) runmode = STALL; // elif engine not running --> Stall Mode
// 3) Read joystick then determine new steering setpoint
//
joy_vert_adc = analogRead(joy_vert_pin); // Read joy vertical
joy_horz_adc = analogRead(joy_horz_pin); // Read joy horizontal
if (!simulate || sim_halfass) { // If joystick is in the vert or horz deadband, set corresponding filt value to center value, otherwise if not simulating, ema filter the adc value
if (joy_vert_adc > joy_vert_deadband_bot_adc && joy_vert_adc < joy_vert_deadband_top_adc) joy_vert_filt_adc = adc_midscale_adc;
else joy_vert_filt_adc = (uint16_t)(joy_ema_alpha*joy_vert_adc + (1-joy_ema_alpha)*joy_vert_filt_adc);
// Serial.print(joy_vert_deadband_top_adc); // joy_horz_deadband_top_adc, joy_horz_max_adc, steer_pulse_stop_us, steer_pulse_right_us ");
if (joy_horz_adc > joy_horz_deadband_bot_adc && joy_horz_adc < joy_horz_deadband_top_adc) joy_horz_filt_adc = adc_midscale_adc;
else joy_horz_filt_adc = (uint16_t)(joy_ema_alpha*joy_horz_adc + (1-joy_ema_alpha)*joy_horz_filt_adc);
}
if (!(runmode == SHUTDOWN && (!carspeed_filt_mmph || shutdown_complete))) { // Figure out the steering setpoint, if we want steering
if (joy_horz_filt_adc >= joy_horz_deadband_top_adc) steer_pulse_out_us = map(joy_horz_filt_adc, joy_horz_deadband_top_adc, joy_horz_max_adc, steer_pulse_stop_us, steer_pulse_right_us);
else if (joy_horz_filt_adc <= joy_horz_deadband_bot_adc) steer_pulse_out_us = map(joy_horz_filt_adc, joy_horz_deadband_bot_adc, joy_horz_min_adc, steer_pulse_stop_us, steer_pulse_left_us);
else steer_pulse_out_us = steer_pulse_stop_us; // Stop the steering motor if inside the deadband, otherwisee scale and set output in the right direction
// Serial.print("joy_horz_filt_adc, joy_horz_deadband_top_adc, joy_horz_max_adc, steer_pulse_stop_us, steer_pulse_right_us ");
// Serial.print(joy_horz_filt_adc); Serial.print(" "); Serial.print(joy_horz_deadband_top_adc); Serial.print(" "); Serial.print(joy_horz_max_adc); Serial.print(" "); Serial.print(steer_pulse_stop_us); Serial.print(" "); Serial.println(steer_pulse_right_us);
// Serial.print("joy V/H: "); Serial.print(joy_vert_adc); Serial.print(" "); Serial.print(joy_vert_filt_adc); Serial.print(" "); Serial.print(joy_horz_adc); Serial.print(" "); Serial.println(joy_horz_filt_adc);
}
// Serial.print("Point1: "); Serial.println(carspeed_target_mmph);
now_us = micros();
if (looptimer) {
// Serial.print(now_us-loopzero); Serial.print(" ");
}
// Serial.print("Point2: "); Serial.println(carspeed_target_mmph);
// 4) Do actions based on which runmode we are in (and set gas/brake setpoints), and possibly change runmode
//
if (runmode == SHUTDOWN) {
if (basicmodesw) shutdown_complete = true; // If basic mode switch is enabled
else if (ignition && engine_filt_rpm > 0) { // This should not happen, but we should catch any possibility
Serial.println(F("Error: Engine RPM without ignition signal")); // , engine_filt_rpm, ignition
runmode = HOLD; // Might be better to go to an error mode or failure mode in cases like this
}
else if (we_just_switched_modes) { // If basic switch is off, we need to stop the car and release brakes and gas before shutting down
gas_target_rpm = engine_idle_rpm; // Begin Letting off the gas all the way
shutdown_complete = false;
if (carspeed_filt_mmph) {
pressure_target_adc = brake_hold_initial_adc; // More brakes, etc. to stop the car
brake_timer_us = now_us;
sanity_timer_us = now_us;
}
}
if (!shutdown_complete) { // If we haven't yet stopped the car and released the brakes and gas all the way
if (!carspeed_filt_mmph || now_us-sanity_timer_us > sanity_timeout_us) { // Car is stopped, but maybe we still need to release the brakes
pressure_target_adc = pressure_min_adc; // Start to Fully release brakes
if (pressure_filt_adc <= pressure_min_adc + pressure_margin_adc) shutdown_complete = true; // With this set, we will do nothing from here on out (until mode changes, i.e. ignition)
}
else if (brake_timer_us-now_us > brake_increment_interval_us) {
pressure_target_adc += brake_hold_increment_adc; // Slowly add more brakes until car stops
if (pressure_target_adc > pressure_max_adc) pressure_target_adc = pressure_max_adc;
brake_timer_us = now_us;
}
}
}
else if (runmode == BASIC) { // Basic mode is so basic, it's only 1 line long
if ((!basicmodesw) && engine_filt_rpm) runmode = HOLD; // If we turned off the basic mode switch with engine running, go to Hold mode. If engine is not running, we'll end up in Stall Mode automatically
}
else if (runmode == STALL) { // In stall mode, the gas doesn't have feedback
if (engine_filt_rpm) runmode = HOLD; // Enter Hold Mode if we started the car
else { // Actuators still respond and everything, even tho engine is turned off
pressure_target_adc = pressure_min_adc; // Default when joystick not pressed
gas_pulse_out_us = gas_pulse_idle_us; // Default when joystick not pressed
if (joy_vert_filt_adc >= joy_vert_deadband_top_adc) { // If we are pushing up
// In stall mode there is no engine rom for PID to use as feedback, so we bypass the PID and just set the gas_target_angle proportional to
// the joystick position. This works whether there is normally a gas PID or not.
gas_pulse_out_us = map(joy_vert_filt_adc, joy_vert_deadband_top_adc, joy_vert_max_adc, gas_pulse_idle_us, gas_pulse_govern_us);
// gas_target_rpm = engine_idle_rpm+engine_range_rpm*((float)joy_vert_filt_adc-(float)joy_vert_deadband_top_adc)/(float)joy_vert_deadband_bot_adc;
}
else if (joy_vert_filt_adc <= joy_vert_deadband_bot_adc) { // If we are pushing down
// Scale joystick value to pressure adc setpoint
// pressure_target_adc = scalefloat((float)joy_vert_filt_adcpot_filtered_adc, 0.0, 4096.0, (float)pressure_min_adc, (float)pressure_max_adc);
pressure_target_adc = map(joy_vert_filt_adc, joy_vert_deadband_bot_adc, joy_vert_min_adc, pressure_min_adc, pressure_max_adc);
// Serial.print(joy_vert_filt_adc); Serial.print(" ");
// Serial.print(joy_vert_deadband_bot_adc); Serial.print(" ");
// Serial.print(joy_vert_min_adc); Serial.print(" ");
// Serial.print(pressure_min_adc); Serial.print(" ");
// Serial.print(pressure_min_adc); Serial.print(" ");
// Serial.println(pressure_target_adc); //Serial.print(" ");
// pressure_min_adc+(pressure_max_adc-pressure_min_adc)*((float)joy_vert_deadband_bot_adc-(float)joy_vert_filt_adc)/(float)joy_vert_deadband_bot_adc;
}
}
}
else if (runmode == HOLD) {
// Serial.println("Welcome to Hold mode"); Serial.print(" ");
// Serial.print(joy_vert_filt_adc); Serial.print(" ");
// Serial.print(joy_vert_deadband_top_adc); Serial.print(" ");
// Serial.print(neutral); Serial.print(" ");
if (joy_vert_filt_adc >= joy_vert_deadband_top_adc && !neutral) runmode = FLY; // Enter Fly Mode if joystick is pushed up, as long as car is in gear
else if (we_just_switched_modes) { // Release throttle and push brake upon entering hold mode
gas_target_rpm = engine_idle_rpm; // Let off gas (if gas using PID mode)
if (!carspeed_filt_mmph) pressure_target_adc += brake_hold_increment_adc; // If the car is already stopped then just add a touch more pressure and then hold it.
else pressure_target_adc = brake_hold_initial_adc; // Otherwise, these hippies need us to stop the car for them
brake_timer_us = now_us;
}
else if (carspeed_filt_mmph && brake_timer_us-now_us > brake_increment_interval_us) { // Each interval the car is still moving, push harder
pressure_target_adc += brake_hold_increment_adc; // Slowly add more brakes until car stops
brake_timer_us = now_us;
}
constrain(pressure_target_adc, pressure_min_adc, pressure_max_adc); // Just make sure we don't try to push harder than we can
//Serial.print("runmode: "); Serial.println(runmode);
}
else if (runmode == FLY) {
// Serial.println("Welcome to Fly mode"); Serial.print(" ");
if (we_just_switched_modes) {
gesture_progress = 0;
gesture_timer_us = now_us-(gesture_flytimeout_us+1); // Initialize gesture timer to already-expired value
cruise_sw_held = false;
cruise_sw_timer_us = now_us;
}
if (cruise_gesturing) { // If gestures are used to go to cruise mode
if (!gesture_progress && joy_vert_filt_adc >= joy_vert_deadband_bot_adc && joy_vert_filt_adc <= joy_vert_deadband_top_adc) { // Re-zero gesture timer for potential new gesture whenever joystick at center
gesture_timer_us = now_us;
}
if (now_us-gesture_timer_us >= gesture_flytimeout_us) gesture_progress = 0; // If gesture timeout has expired, cancel any in-progress gesture
else { // Otherwise check for successful gesture motions
if (!gesture_progress && joy_vert_filt_adc >= joy_vert_max_adc-default_margin_adc) { // If joystick quickly pushed to top, step 1 of gesture is successful
gesture_progress++;
gesture_timer_us = now_us;
}
else if (gesture_progress == 1 && joy_vert_filt_adc <= joy_vert_min_adc+default_margin_adc) { // If joystick then quickly pushed to bottom, step 2 succeeds
gesture_progress++;
gesture_timer_us = now_us;
}
else if (gesture_progress == 2 && joy_vert_filt_adc >= joy_vert_deadband_bot_adc && joy_vert_filt_adc <= joy_vert_deadband_top_adc) { // If joystick then quickly returned to center, go to Cruise mode
runmode = CRUISE;
}
}
}
else { // If cruise mode is entered by long press of a cruise button
if (!cruise_sw) { // If button not currently pressed
if (cruise_sw_held && now_us-cruise_sw_timer_us > cruise_sw_timeout_us) runmode = CRUISE; // If button was just held long enough, upon release enter Cruise mode
cruise_sw_held = false; // Cancel button held state
}
else if (!cruise_sw_held) { // If button is being pressed, but we aren't in button held state
cruise_sw_timer_us = now_us; // Start hold time timer
cruise_sw_held = true; // Get into that state
}
}
if ((!carspeed_filt_mmph && joy_vert_filt_adc <= joy_vert_deadband_bot_adc) || neutral) runmode = HOLD; // Go to Hold Mode if we have braked to a stop or fell out of gear
else { // Use PID to drive
gas_target_rpm = engine_idle_rpm; // Default when joystick not pressed
pressure_target_adc = pressure_min_adc; // Default when joystick not pressed
if (joy_vert_filt_adc > joy_vert_deadband_top_adc) { // If we are trying to accelerate
gas_target_rpm = map(joy_vert_filt_adc, joy_vert_deadband_top_adc, joy_vert_max_adc, engine_idle_rpm, engine_govern_rpm);
}
else if (joy_vert_filt_adc < joy_vert_deadband_bot_adc) { // If we are trying to brake, scale joystick value to determine pressure adc setpoint
pressure_target_adc = map(joy_vert_filt_adc, joy_vert_deadband_bot_adc, joy_vert_min_adc, pressure_min_adc, pressure_max_adc);
}
}
}
else if (runmode == CRUISE) {
if (!carspeed_filt_mmph || neutral) { // In case we slam into a brick wall or some fool takes the car out of gear, get out of cruise mode
Serial.println(F("Error: Car stopped or taken out of gear in cruise mode")); // , carspeed_filt_mmph, neutral
runmode = HOLD; // Back to Hold Mode
}
else if (we_just_switched_modes) {
carspeed_target_mmph = carspeed_filt_mmph; // Begin cruising with cruise set to current speed
pressure_target_adc = pressure_min_adc; // Let off the brake and keep it there till out of Cruise mode
cruise_timer_adj_us = now_us;
gesture_timer_us = now_us;
cruise_sw_held = false;
}
if (joy_vert_filt_adc <= joy_vert_min_adc+default_margin_adc && now_us-gesture_timer_us < gesture_flytimeout_us) runmode = FLY; // If joystick quickly pushed to bottom
if (cruise_sw) cruise_sw_held = true; // Pushing cruise button sets up return to fly mode
else if (cruise_sw_held) { // Release of button drops us back to fly mode
cruise_sw_held - false;
runmode = FLY;
}
else if (joy_vert_deadband_bot_adc < joy_vert_filt_adc && joy_vert_deadband_top_adc > joy_vert_filt_adc) { // joystick at center: reset gesture timer
gesture_timer_us = now_us;
}
else if (now_us-cruise_timer_adj_us > cruise_adj_period_us) {
if (joy_vert_filt_adc > joy_vert_deadband_top_adc) {
// carspeed_target_mmph += cruise_max_change_mmph*(float)(joy_vert_filt_adc-joy_vert_deadband_top_adc)/(joy_vert_max_adc-joy_vert_deadband_top_adc);
uint32_t temp = carspeed_target_mmph = map(joy_vert_filt_adc, joy_vert_deadband_top_adc, joy_vert_max_adc, 0, cruise_max_change_mmph);
carspeed_target_mmph += temp;
Serial.print("Temp1: "); Serial.println(temp);
Serial.print("Carspeed_target1: "); Serial.println(carspeed_target_mmph);
// carspeed_target_mmph += map(joy_vert_filt_adc, joy_vert_deadband_top_adc, joy_vert_max_adc, 0, cruise_max_change_mmph);
}
else if (joy_vert_filt_adc < joy_vert_deadband_bot_adc) {
// carspeed_target_mmph -= cruise_max_change_mmph*(float)(joy_vert_deadband_bot_adc-joy_vert_filt_adc)/(joy_vert_deadband_bot_adc-joy_vert_min_adc);
uint32_t temp = carspeed_target_mmph = map(joy_vert_filt_adc, joy_vert_deadband_top_adc, joy_vert_max_adc, 0, cruise_max_change_mmph);
carspeed_target_mmph -= temp;
Serial.print("Temp2: "); Serial.println(temp);
Serial.print("Carspeed_target2: "); Serial.println(carspeed_target_mmph);
// carspeed_target_mmph -= map(joy_vert_filt_adc, joy_vert_deadband_bot_adc, joy_vert_min_adc, 0, cruise_max_change_mmph);
}
cruise_timer_adj_us = now_us;
}
}
else { // Obviously this should never happen
Serial.println(F("Error: Invalid runmode entered")); // , runmode
runmode = HOLD;
}
now_us = micros();
if (looptimer) {
// Serial.print(now_us-loopzero); Serial.print(" ");
}
// Serial.print("Point3: "); Serial.println(carspeed_target_mmph);
// 5) Step the pids, update the actuator outputs (at regular intervals)
//
if (now_us-pid_timer_us > pid_period_us && !(runmode == SHUTDOWN && shutdown_complete)) { // If control system is supposed to be in control, recalculate pid and update outputs (at regular intervals)
steer_pulse_out_us = constrain(steer_pulse_out_us, steer_pulse_right_us, steer_pulse_left_us); // Don't be out of range
steer_servo.writeMicroseconds(steer_pulse_out_us); // Write steering value to jaguar servo interface
if (!basicmodesw) { // Unless basicmode switch is turned on, we want brake and gas
// Here is the brake PID math
pressure_target_adc = constrain(pressure_target_adc, pressure_min_adc, pressure_max_adc); // Make sure pressure target is in range
brake_pid_error_adc = pressure_target_adc - pressure_filt_adc; // Determine the error in pressure
brake_pid_integral_adcus += brake_pid_error_adc*pid_period_us; // Calculate pressure integral
brake_pid_i_term_adc = constrain((int16_t)(brake_pid_ki_mhz*(float)brake_pid_integral_adcus), pressure_min_adc-pressure_max_adc, pressure_max_adc-pressure_min_adc); // limit integral to 2x the full range of the input
brake_pid_derivative_adcperus = (float)((brake_pid_error_adc - brake_pid_error_last_adc))/(float)pid_period_us; // Calculate pressure derivative
brake_pid_error_last_adc = brake_pid_error_adc; // For use next time in pressure derivative calculation
brake_pid_d_term_adc = brake_pid_kd_us*(float)brake_pid_derivative_adcperus;
if (brake_pos_adc < brake_pos_zeropoint_adc) brake_pid_pos_error_adc = brake_pos_zeropoint_adc-brake_pos_adc; // Additional position influence to ensure actuator position doesn't go below the zero pressure point
else brake_pid_pos_error_adc = 0;
pressure_delta_adc = (int16_t)(brake_pid_kc*(float)brake_pid_error_adc + brake_pid_i_term_adc + brake_pid_d_term_adc + brake_pid_pos_kp*(float)brake_pid_pos_error_adc); // Add all the terms and scale to get delta in adc counts
brake_pulse_out_us = map(pressure_delta_adc+pressure_min_adc, pressure_min_adc, pressure_max_adc, brake_pulse_extend_us, brake_pulse_retract_us); // Scale pressure adc value to range of PWM pulse on-time
// Serial.print("Brake: "); Serial.print(" "); //
// Serial.print(brake_pos_adc); Serial.print(" "); // Brake position current adc value
// Serial.print(brake_pos_zeropoint_adc); Serial.print(" "); // Brake position zero point
// Serial.print(brake_pid_pos_error_adc); Serial.print(" "); // Brake position error
// Serial.print(pressure_filt_adc); Serial.print(" "); // Pressure sensor feedback value (adc)
// Serial.print(pressure_target_adc); Serial.print(" "); // Pressure target setpoint (adc count)
// Serial.print(brake_pid_kc*brake_pid_error_adc); Serial.print(" "); // Proportional component
// Serial.print(brake_pid_i_term_adc); Serial.print(" "); // Integral component
// Serial.print((int16_t)(brake_pid_ki_mhz*(float)brake_pid_integral_adcus)); Serial.print(" "); //
// Serial.print(brake_pid_d_term_adc); Serial.print(" "); // Derivative component
// Serial.print(brake_pid_pos_kp*brake_pid_pos_error_adc); Serial.print(" "); // Derivative component
// Serial.print(pressure_filt_adc); Serial.print(" "); // Filtered sensor feedback value
// Serial.print(pressure_adc); Serial.print(" "); // Raw sensor feedback value
// Serial.print(pressure_delta_adc); Serial.print(" "); // Raw sensor feedback value
// Serial.println(brake_pulse_out_us); // Serial.print(" "); // Raw sensor feedback value
// brake_pwm_out_pwm = brake_pid.step(pressure_target_adc, pressure_adc); // Old attempt to use library PID loop
brake_pulse_out_us = constrain(brake_pulse_out_us, brake_pulse_retract_us, brake_pulse_extend_us); // Refuse to exceed range
brake_servo.writeMicroseconds(brake_pulse_out_us); // Write result to jaguar servo interface
if (runmode == CRUISE) { // Update gas rpm target from cruise pid output
// gas_target_rpm = cruise_pid.step(carspeed_target_mmph, carspeed_filt_mmph);
cruise_pid_error_mmph = carspeed_target_mmph - carspeed_filt_mmph; // Determine the mmph error
cruise_pid_integral_mmphus += cruise_pid_error_mmph*pid_period_us; // Calculate mmph integral
cruise_pid_i_term_mmph = constrain((int16_t)(cruise_pid_ki_mhz*(float)cruise_pid_integral_mmphus), carspeed_idle_mmph-carspeed_redline_mmph, carspeed_redline_mmph-carspeed_idle_mmph); // limit integral to 2x the full range of the input
cruise_pid_derivative_mmphperus = (float)((cruise_pid_error_mmph - cruise_pid_error_last_mmph))/(float)pid_period_us; // Calculate mmph derivative
cruise_pid_error_last_mmph = cruise_pid_error_mmph; // For use next time in mmph derivative calculation
cruise_pid_d_term_mmph = cruise_pid_kd_us*(float)cruise_pid_derivative_mmphperus;
carspeed_delta_mmph = (int16_t)(cruise_pid_kc*(float)cruise_pid_error_mmph + cruise_pid_i_term_mmph + cruise_pid_d_term_mmph); // Add all the terms and scale to get delta from center in mmph
gas_target_rpm = map(carspeed_delta_mmph+carspeed_idle_mmph, carspeed_idle_mmph, carspeed_govern_mmph, engine_idle_rpm, engine_govern_rpm); // Scale mmph value to range of rpm
Serial.print("Cruise: "); Serial.print(" "); //
Serial.print(carspeed_target_mmph); Serial.print(" "); // Raw sensor feedback value
Serial.print(cruise_pid_kc*cruise_pid_error_mmph); Serial.print(" "); // Proportional component
Serial.print(cruise_pid_kc*cruise_pid_fi_mhz*cruise_pid_integral_mmphus); Serial.print(" "); // Integral component
Serial.print(cruise_pid_kc*cruise_pid_td_us*cruise_pid_derivative_mmphperus); Serial.print(" "); // Derivative component
Serial.print(carspeed_filt_mmph); Serial.print(" "); // Filtered sensor feedback value
Serial.print(carspeed_delta_mmph); Serial.print(" "); // Raw sensor feedback value
Serial.print(carspeed_delta_mmph+carspeed_idle_mmph); Serial.print(" "); // Raw sensor feedback value
Serial.println(gas_target_rpm); // Serial.print(" "); // Raw sensor feedback value
}
if (runmode != STALL) { // If Hold, Fly or Cruise mode, then we need to determine gas actuator output from rpm target
gas_target_rpm = constrain(gas_target_rpm, engine_idle_rpm, engine_govern_rpm); // Make sure desired rpm isn't out of range (due to crazy pid math, for example)
if (gas_pid) { // If use of gas pid is enabled, calculate pid to get pulse output from rpm target
// Here is the gas PID math
gas_pid_error_rpm = gas_target_rpm - engine_filt_rpm; // Determine the rpm error
gas_pid_integral_rpmus += gas_pid_error_rpm*pid_period_us; // Calculate rpm integral
gas_pid_i_term_rpm = constrain((int16_t)(gas_pid_ki_mhz*(float)gas_pid_integral_rpmus), engine_idle_rpm-engine_govern_rpm, engine_govern_rpm-engine_idle_rpm); // Prevent integral runaway by limiting it to 2x the full range of the input
gas_pid_derivative_rpmperus = (float)((gas_pid_error_rpm - gas_pid_error_last_rpm))/(float)pid_period_us; // Calculate rpm derivative
gas_pid_error_last_rpm = gas_pid_error_rpm; // For use next time in rpm derivative calculation
gas_pid_d_term_rpm = gas_pid_kd_us*(float)gas_pid_derivative_rpmperus;
gas_delta_rpm = (int16_t)(gas_pid_kc*(float)gas_pid_error_rpm + gas_pid_i_term_rpm + gas_pid_d_term_rpm); // Add all the terms and scale to get delta from center in rpm
gas_pulse_out_us = map(gas_delta_rpm+engine_idle_rpm, engine_idle_rpm, engine_govern_rpm, gas_pulse_idle_us, gas_pulse_govern_us); // Scale rpm alue to range of PWM pulse on-time
// Serial.print("Gas: "); Serial.print(" "); //
// Serial.print(gas_target_rpm); Serial.print(" "); // Raw sensor feedback value
// Serial.print(gas_pid_kc*gas_pid_error_rpm); Serial.print(" "); // Proportional component
// Serial.print(gas_pid_kc*gas_pid_fi_mhz*gas_pid_integral_rpmus); Serial.print(" "); // Integral component
// Serial.print(gas_pid_kc*gas_pid_td_us*gas_pid_derivative_rpmperus); Serial.print(" "); // Derivative component
// Serial.print(engine_filt_rpm); Serial.print(" "); // Filtered sensor feedback value
// Serial.print(gas_delta_rpm); Serial.print(" "); // Raw sensor feedback value
// Serial.print(gas_delta_rpm+engine_idle_rpm); Serial.print(" "); // Raw sensor feedback value
// Serial.println(gas_pulse_out_us); // Serial.print(" "); // Raw sensor feedback value
}
else { // Otherwise, use proportional gas instead of PID
gas_pulse_out_us = map(gas_target_rpm, engine_idle_rpm, engine_govern_rpm, gas_pulse_idle_us, gas_pulse_govern_us); // scale gas rpm target onto gas pulsewidth target (unless already set in stall mode logic)
}
}
gas_pulse_out_us = constrain(gas_pulse_out_us, gas_pulse_govern_us, gas_pulse_idle_us); // Make sure pulse time is in range
// Serial.println(gas_pulse_out_us);
// gas_servo.writeMicroseconds(gas_pulse_out_us); // Update gas pwm output (with gas pid or servo mode)
REG_PWM_CDTYUPD0 = gas_pulse_out_us; // Update the pin duty cycle
}
pid_timer_us = now_us;
}
// Serial.print("Point4: "); Serial.println(carspeed_target_mmph);
now_us = micros();
if (looptimer) {
// Serial.print(now_us-loopzero); Serial.print(" ");
}
// 6) Service the user interface
//
if (touchpanel.touched()) { // If someone is groping our touch screen, we should address that
TS_Point touchpoint = touchpanel.getPoint(); // Retreive a point
touchpoint.x = map(touchpoint.x, 0, disp_height_pix, disp_height_pix, 0); // Rotate touch coordinates to match tft
touchpoint.y = map(touchpoint.y, 0, disp_width_pix, disp_width_pix, 0);
uint16_t touch_y = tft.height()-touchpoint.x;
uint16_t touch_x = touchpoint.y;
uint8_t touch_row = (uint8_t)((float)touch_y/touch_cell_height_pix);
uint8_t touch_col = (uint8_t)((float)touch_x/touch_cell_width_pix);
Serial.print("Got touched: \n");
Serial.print(touch_x); Serial.print(" ");
Serial.print(touch_y); Serial.print(" ");
Serial.print(touch_row); Serial.print(" ");
Serial.println(touch_col); // Serial.print(" ");
// Simulation inputs
//
// on the bench, can simulate system conditions by touching twelve screen regions
if (touch_row == 0 && touch_col == 0 && laboratory) {
simulate = 1-simulate;
if (simulate) {
draw_touchgrid(false); // Draw the touch grid over the display, in its entirety
neutral = false; // Set any initial conditions for simulation
}
else {
draw_text(false); // Reset the screen, completely, to get rid of button grid
disp_redraw_all = true; // Signal drawing functions to redraw everything
}
}
else if (simulate) {
if (touch_row == 0) {
if (touch_col == 1) {
if (touch_timer_us-now_us > touch_timeout_us) {
ignition = 1-ignition;
touch_timer_us = now_us;
}
}
else if (touch_col == 2) {
if (pressure_max_adc-pressure_filt_adc < 25) pressure_filt_adc = pressure_max_adc;
else pressure_filt_adc += 25;
}
else if (touch_col == 3) {
if (engine_govern_rpm-engine_filt_rpm < 25) engine_filt_rpm = engine_govern_rpm;
else engine_filt_rpm += 25;
}
else if (touch_col == 4) {
if (carspeed_govern_mmph-carspeed_filt_mmph < 250) carspeed_filt_mmph = carspeed_govern_mmph;
else carspeed_filt_mmph += 250;
}
}
else if (touch_row == 1) {
if (touch_col == 0) {
}
else if (touch_col == 1) {
if (touch_timer_us-now_us > touch_timeout_us) {
basicmodesw = 1-basicmodesw;
touch_timer_us = now_us;
}
}
else if (touch_col == 2) {
if (pressure_filt_adc-pressure_min_adc < 25) pressure_filt_adc = pressure_min_adc;
else pressure_filt_adc -= 25;
}
else if (touch_col == 3) {
if (engine_filt_rpm < 25) engine_filt_rpm = 0;
else engine_filt_rpm -= 25;
}
else if (touch_col == 4) {
if (carspeed_filt_mmph < 250) carspeed_filt_mmph = 0;
else carspeed_filt_mmph -= 250;
}
}
else if (touch_row == 2) {
if (touch_col == 0) {
if (touch_timer_us-now_us > touch_timeout_us) {
sim_tuning_mode += 1;
if (sim_tuning_mode >= sim_tuning_modes) sim_tuning_mode -= sim_tuning_modes;
sim_selected_value = -1;
draw_text(true); // Redraw the tuning corner of the screen
draw_touchgrid(false); // Redraw entire touch grid
touch_timer_us = now_us;
}
}
else if (touch_col == 1) {
if (touch_timer_us-now_us > touch_timeout_us) {
neutral = !neutral;
touch_timer_us = now_us;
}
}
else if (touch_col == 2) { // Subtract from the selected tunable value
if (sim_tuning_mode != LOCK && sim_selected_value >= 0) sim_modify_polarity = -1;
}
else if (touch_col == 3) {
if (joy_vert_max_adc-joy_vert_filt_adc < 25) joy_vert_filt_adc = joy_vert_max_adc;
else joy_vert_filt_adc += 25;
}
else if (touch_col == 4) { // Add to the selected tunable value
if (sim_tuning_mode != LOCK && sim_selected_value >= 0) sim_modify_polarity = 1;
}
}
else if (touch_row == 3) {
if (touch_col == 0) {
if (touch_timer_us-now_us > touch_timeout_us) {
sim_selected_value += 1;
if (sim_selected_value > arraysize(tunings[sim_tuning_mode])) sim_selected_value -= arraysize(tunings[sim_tuning_mode]);
if (sim_tuning_mode >= 4 && sim_selected_value == 0) sim_selected_value = 5; // Skip unchangeable values for all PID modes
if (sim_tuning_mode == JOY && sim_selected_value == 0) sim_selected_value = 2; // Skip unchangeable values for joy mode
touch_timer_us = now_us;
}
}
else if (touch_col == 1) {
cruise_sw = true;
}
else if (touch_col == 2) {
if (joy_horz_filt_adc-joy_horz_min_adc < 25) joy_horz_filt_adc = joy_horz_min_adc;
else joy_horz_filt_adc -= 25;
}
else if (touch_col == 3) {
if (joy_vert_filt_adc-joy_vert_min_adc < 25) joy_vert_filt_adc = joy_vert_min_adc;
else joy_vert_filt_adc -= 25;
}
else if (touch_col == 4) {
if (joy_horz_max_adc-joy_horz_filt_adc < 25) joy_horz_filt_adc = joy_horz_max_adc;
else joy_horz_filt_adc += 25;
}
}
}
}
else if (simulate) { // Put momentarily-set simulated button values back to default values
cruise_sw = false; // Makes this button effectively momentary
sim_modify_polarity = 0; // Stop changing value
}
// Act on any encoder action
// if (encoder_sw_event) {
// Serial.print(F("encoderoder button = "));
// Serial.println(encoder_button);
// }
// if (encoder_turns != 0) {
// Serial.print(F("encoder turned = "));
// Serial.println(encoder_turns);
// }
// End-of-loop handling of new encoder actions. Events may be serviced in next loop, then they will be forgotten.
// if (encoder_sw_isr_flag) { // If we got a new encoder switch intterupt
// encoder_sw_event = true; // Allow one more loop to handle the button change event
// encoder_sw_isr_flag = false; // Reset the ISR flag
// }
// else {
// encoder_sw_event = false; // Just let it go
// }
if (encoder_sw && !encoder_sw_last) { // if you push the encoder
if (sim_edit_mode) {
sim_edit_mode = false;
}
else if (sim_selected_value != -1) {
sim_edit_mode = true;
}
}
if (encoder_delta != 0) {
if (sim_edit_mode) {
sim_modify_polarity = 10*encoder_delta;
}
else {
if (sim_selected_value == -1)
sim_selected_value += 1;
if (sim_selected_value > arraysize(tunings[sim_tuning_mode])) sim_selected_value -= (arraysize(tunings[sim_tuning_mode]) + 1);
if (sim_tuning_mode >= 4 && sim_selected_value == 0) sim_selected_value = 5; // Skip unchangeable values for all PID modes
if (sim_tuning_mode == JOY && sim_selected_value == 0) sim_selected_value = 2; // Skip unchangeable values for joy mode
}
encoder_delta = 0;
}
// Just testing timings
// if (now_us-sim_timer_us > 000) { // Max is 300 rpm . 100000 us gives 155 rpm, 240000 us gives 100 rpm, 480000 gives 56 rpm
// // sim_out = !sim_out;
// digitalWrite(sim_pulse_pin, sim_out);
// sim_timer_us = now_us;
// }
// Change variable values (when simulating)
//
if (sim_modify_polarity != 0 && sim_modify_timer_us-now_us > sim_modify_period_us) {
if (sim_tuning_mode == JOY) switch (sim_selected_value) {
case 2: joy_horz_min_adc += sim_modify_polarity; break;
case 3: joy_horz_max_adc += sim_modify_polarity; break;
case 4: joy_horz_deadband_adc += sim_modify_polarity; break;
case 5: joy_vert_min_adc += sim_modify_polarity; break;
case 6: joy_vert_max_adc += sim_modify_polarity; break;
case 7: joy_vert_deadband_adc += sim_modify_polarity; break;
}
else if (sim_tuning_mode == CAR) switch (sim_selected_value) {
case 0: gas_governor_percent += sim_modify_polarity; break;
case 1: engine_idle_rpm += sim_modify_polarity; break;
case 2: engine_redline_rpm += sim_modify_polarity; break;
case 3: carspeed_idle_mmph += sim_modify_polarity; break;
case 4: carspeed_redline_mmph += sim_modify_polarity; break;
case 5: gas_pid = (sim_modify_polarity+1)/2; break;
case 6: cruise_gesturing = (sim_modify_polarity+1)/2; break;
case 7: brake_pos_zeropoint_adc += sim_modify_polarity; break;
}
else if (sim_tuning_mode == PWM) switch (sim_selected_value) {
case 0: steer_pulse_left_us += sim_modify_polarity; break;
case 1: steer_pulse_stop_us += sim_modify_polarity; break;
case 2: steer_pulse_right_us += sim_modify_polarity; break;
case 3: brake_pulse_extend_us += sim_modify_polarity; break;
case 4: brake_pulse_stop_us += sim_modify_polarity; break;
case 5: brake_pulse_retract_us += sim_modify_polarity; break;
case 6: gas_pulse_idle_us += sim_modify_polarity; break;
case 7: gas_pulse_redline_us += sim_modify_polarity; break;
}
else if (sim_tuning_mode == BPID) switch (sim_selected_value) {
case 5: brake_pid_kc += 0.001*(float)sim_modify_polarity; break;
case 6: brake_pid_fi_mhz += 0.001*(float)sim_modify_polarity; break;
case 7: brake_pid_td_us += 0.001*(float)sim_modify_polarity; break;
}
else if (sim_tuning_mode == GPID) switch (sim_selected_value) {
case 5: gas_pid_kc += 0.001*(float)sim_modify_polarity; break;
case 6: gas_pid_fi_mhz += 0.001*(float)sim_modify_polarity; break;
case 7: gas_pid_td_us += 0.001*(float)sim_modify_polarity; break;
}
else if (sim_tuning_mode == CPID) switch (sim_selected_value) {
case 5: cruise_pid_kc += 0.001*(float)sim_modify_polarity; break;
case 6: cruise_pid_fi_mhz += 0.001*(float)sim_modify_polarity; break;
case 7: cruise_pid_td_us += 0.001*(float)sim_modify_polarity; break;
}
sim_modify_timer_us=now_us;
}
if (sim_edit_mode) {
sim_modify_polarity = 0;
}
// Write telemetry values to the screen
//
// Note, these screen writes take 62 ms, causing loop to run at only 16 Hz.
// Without them, each loop would only take 1.7 ms, almost 600 Hz.
if (display_enabled) {
draw_value(0, 0, 2);
draw_value(0, 0, 3);
draw_value(1, runmode, 1);
draw_value(2, carspeed_filt_mmph, 0);
draw_value(3, engine_filt_rpm, 0);
draw_value(4, pressure_filt_adc, 0);
draw_value(5, joy_horz_filt_adc, 0);
draw_value(6, joy_vert_filt_adc, 0);
draw_value(7, steer_pulse_out_us, 0);
draw_value(8, carspeed_target_mmph, 0);
draw_value(9, gas_target_rpm, 0);
draw_value(10, gas_pulse_out_us, 0);
draw_value(11, pressure_target_adc, 0);
draw_value(12, brake_pulse_out_us, 0);
if (sim_tuning_mode == LOCK) {
draw_value(13, battery_filt_mv, 0);
draw_value(14, brake_pos_adc, 0);
draw_value(15, pot_filt_adc, 0);
draw_value(16, encoder_delta, 0);
draw_value(17, encoder_a_raw, 0);
draw_value(18, encoder_b_raw, 0);
draw_value(19, encoder_sw, 0);
}
else if (sim_tuning_mode == JOY) {
draw_value(13, joy_horz_adc, 0);
draw_value(14, joy_vert_adc, 0);
draw_value(15, joy_horz_min_adc, 0);
draw_value(16, joy_horz_max_adc, 0);
draw_value(17, joy_horz_deadband_adc, 0);
draw_value(18, joy_vert_min_adc, 0);
draw_value(19, joy_vert_max_adc, 0);
draw_value(20, joy_vert_deadband_adc, 0);
}
else if (sim_tuning_mode == CAR) {
draw_value(13, gas_governor_percent, 0);
draw_value(14, engine_idle_rpm, 0);
draw_value(15, engine_redline_rpm, 0);
draw_value(16, carspeed_idle_mmph, 0);
draw_value(17, carspeed_redline_mmph, 0);
draw_value(18, gas_pid, 0);
draw_value(19, cruise_gesturing, 0);
draw_value(20, brake_pos_zeropoint_adc, 0);
}
else if (sim_tuning_mode == PWM) {
draw_value(13, steer_pulse_left_us, 0);
draw_value(14, steer_pulse_stop_us, 0);
draw_value(15, steer_pulse_right_us, 0);
draw_value(16, brake_pulse_extend_us, 0);
draw_value(17, brake_pulse_stop_us, 0);
draw_value(18, brake_pulse_retract_us, 0);
draw_value(19, gas_pulse_idle_us, 0);
draw_value(20, gas_pulse_redline_us, 0);
}
else if (sim_tuning_mode == BPID) {
draw_value(13, brake_pid_error_adc, 0);
draw_value(14, (int32_t)(brake_pid_kc*(float)brake_pid_error_adc), 0);
draw_value(15, brake_pid_i_term_adc, 0);
draw_value(16, brake_pid_d_term_adc, 0);
draw_value(17, pressure_delta_adc, 0);
draw_value(18, (int32_t)(1000*brake_pid_kc), 0);
draw_value(19, (int32_t)(1000000*brake_pid_fi_mhz), 0);
draw_value(20, (int32_t)(1000*brake_pid_td_us), 0);
}
else if (sim_tuning_mode == GPID) {
draw_value(13, gas_pid_error_rpm, 0);
draw_value(14, (int32_t)(gas_pid_kc*(float)gas_pid_error_rpm), 0);
draw_value(15, gas_pid_i_term_rpm, 0);
draw_value(16, gas_pid_d_term_rpm, 0);
draw_value(17, gas_delta_rpm, 0);
draw_value(18, (int32_t)(1000*gas_pid_kc), 0);
draw_value(19, (int32_t)(1000000*gas_pid_fi_mhz), 0);
draw_value(20, (int32_t)(1000*gas_pid_td_us), 0);
}
else if (sim_tuning_mode == CPID) {
draw_value(13, cruise_pid_error_mmph, 0);
draw_value(14, (int32_t)(cruise_pid_kc*(float)cruise_pid_error_mmph), 0);
draw_value(15, cruise_pid_i_term_mmph, 0);
draw_value(16, cruise_pid_d_term_mmph, 0);
draw_value(17, carspeed_delta_mmph, 0);
draw_value(18, (int32_t)(1000*cruise_pid_kc), 0);
draw_value(19, (int32_t)(1000000*cruise_pid_fi_mhz), 0);
draw_value(20, (int32_t)(1000*cruise_pid_td_us), 0);
}
if (simulate) draw_touchgrid(true); // Redraw only the at-risk content of the touch grid
draw_bool(ignition, 0);
draw_bool(basicmodesw, 1);
draw_bool(neutral, 2);
draw_bool(cruise_sw, 3);
}
// 7) SD card
//
// [Do card storage operations]
// 8) Do the control loop bookkeeping at the end of each loop
//
now_us = micros();
if (looptimer) {
// Serial.print(now_us-loopzero);
// Serial.print(". Loop in ");
Serial.print(((float)(now_us-loopzero)/1000));
Serial.print(" ms, ");
Serial.print(1000000/((float)(now_us-loopzero)));
Serial.println(" Hz");
}
// Serial.print("runmode: "); Serial.println(runmode);
loopno++; // I like to count how many loops
encoder_sw_last = encoder_sw;
encoder_delta_last = encoder_delta;
disp_redraw_all = false;
if (runmode != SHUTDOWN) shutdown_complete = false;
if (runmode != oldmode) we_just_switched_modes = true; // If changing runmode, set this so new mode logic can perform initial actions
else we_just_switched_modes = false; // Reset this variable
oldmode = runmode; // remember what mode we're in for next time
}
| 63.730122
| 269
| 0.66096
|
b79742e6123e5b70a07c7afaf6b434a5020ef968
| 8,171
|
ino
|
Arduino
|
xosera_tester/xosera_tester.ino
|
roscopeco/Xosera
|
80aa5205f44804ddc59e8e0a42fefe6861efde6c
|
[
"MIT"
] | null | null | null |
xosera_tester/xosera_tester.ino
|
roscopeco/Xosera
|
80aa5205f44804ddc59e8e0a42fefe6861efde6c
|
[
"MIT"
] | null | null | null |
xosera_tester/xosera_tester.ino
|
roscopeco/Xosera
|
80aa5205f44804ddc59e8e0a42fefe6861efde6c
|
[
"MIT"
] | null | null | null |
#pragma GCC optimize("O3")
// Xosera Test Jig
enum
{
BUS_SEL_N = 10,
BUS_RD_RNW = 11,
BUS_BYTESEL = 12,
BUS_REG_NUM0 = A0,
BUS_REG_NUM1 = A1,
BUS_REG_NUM2 = A2,
BUS_REG_NUM3 = A3,
BUS_DATA7 = 7,
BUS_DATA6 = 6,
BUS_DATA5 = 5,
BUS_DATA4 = 4,
BUS_DATA3 = 3,
BUS_DATA2 = 2,
BUS_DATA1 = 9,
BUS_DATA0 = 8,
CS_SELECTED = LOW,
CS_DESELECTED = HIGH,
RNW_WRITE = LOW, // CPU WRITE, Xosera READ
RNW_READ = HIGH // CPU READ, Xosera WRITE
};
enum
{
XVID_RD_ADDR, // reg 0 0000: address to read from VRAM (write-only)
XVID_WR_ADDR, // reg 1 0001: address to write from VRAM (write-only)
XVID_DATA, // reg 2 0010: read/write word from/to VRAM RD/WR
XVID_DATA_2, // reg 3 0011: read/write word from/to VRAM RD/WR (for 32-bit)
XVID_VID_CTRL, // reg 4 0100: TODO video display mode (write-only)
XVID_AUX_DATA, // reg 5 0101: TODO blitter mode/control/status (read/write)
XVID_RD_INC, // reg 6 0110: TODO read addr increment value (write-only)
XVID_WR_INC, // reg 7 0111: TODO write addr increment value (write-only)
XVID_RD_MOD, // reg 8 1000: TODO read modulo width (write-only)
XVID_WR_MOD, // reg A 1001: TODO write modulo width (write-only)
XVID_WIDTH, // reg 9 1010: TODO width for 2D blit (write-only)
XVID_COUNT, // reg B 1011: TODO blitter "repeat" count (write-only)
XVID_AUX_RD_ADDR, // reg C 1100: TODO aux read address (font audio etc.?) (write-only)
XVID_AUX_WR_ADDR, // reg D 1101: TODO aux write address (font audio etc.?) (write-only)
XVID_AUX_DATA, // reg E 1110: TODO aux memory/register data read/write value
XVID_AUX_CTRL // reg F 1111: TODO audio and other control? (read/write)
};
inline void xvid_set_bus_read()
{
pinMode(BUS_DATA0, INPUT);
pinMode(BUS_DATA1, INPUT);
pinMode(BUS_DATA2, INPUT);
pinMode(BUS_DATA3, INPUT);
pinMode(BUS_DATA4, INPUT);
pinMode(BUS_DATA5, INPUT);
pinMode(BUS_DATA6, INPUT);
pinMode(BUS_DATA7, INPUT);
digitalWrite(BUS_RD_RNW, RNW_READ);
}
inline void xvid_set_bus_write()
{
digitalWrite(BUS_RD_RNW, RNW_WRITE);
pinMode(BUS_DATA0, OUTPUT);
pinMode(BUS_DATA1, OUTPUT);
pinMode(BUS_DATA2, OUTPUT);
pinMode(BUS_DATA3, OUTPUT);
pinMode(BUS_DATA4, OUTPUT);
pinMode(BUS_DATA5, OUTPUT);
pinMode(BUS_DATA6, OUTPUT);
pinMode(BUS_DATA7, OUTPUT);
}
inline void xvid_reg_num(uint8_t r)
{
digitalWrite(BUS_REG_NUM0, r & 0x1);
digitalWrite(BUS_REG_NUM1, r & 0x2);
digitalWrite(BUS_REG_NUM2, r & 0x4);
digitalWrite(BUS_REG_NUM3, r & 0x8);
}
inline void xvid_bytesel(uint8_t b)
{
digitalWrite(BUS_BYTESEL, b);
}
inline void xvid_data_write(uint8_t data)
{
digitalWrite(BUS_DATA0, data & 0x01);
digitalWrite(BUS_DATA1, data & 0x02);
digitalWrite(BUS_DATA2, data & 0x04);
digitalWrite(BUS_DATA3, data & 0x08);
digitalWrite(BUS_DATA4, data & 0x10);
digitalWrite(BUS_DATA5, data & 0x20);
digitalWrite(BUS_DATA6, data & 0x40);
digitalWrite(BUS_DATA7, data & 0x80);
}
inline uint8_t xvid_data_read()
{
uint8_t data = 0;
if (digitalRead(BUS_DATA0))
data |= 0x01;
if (digitalRead(BUS_DATA1))
data |= 0x02;
if (digitalRead(BUS_DATA2))
data |= 0x04;
if (digitalRead(BUS_DATA3))
data |= 0x08;
if (digitalRead(BUS_DATA4))
data |= 0x10;
if (digitalRead(BUS_DATA5))
data |= 0x20;
if (digitalRead(BUS_DATA6))
data |= 0x40;
if (digitalRead(BUS_DATA7))
data |= 0x80;
return data;
}
inline void xvid_set_reg(uint8_t r, uint16_t value)
{
xvid_reg_num(r);
xvid_bytesel(0);
xvid_data_write(value >> 8);
digitalWrite(BUS_SEL_N, CS_SELECTED);
digitalWrite(BUS_SEL_N, CS_DESELECTED);
xvid_bytesel(1);
xvid_data_write(value & 0xff);
digitalWrite(BUS_SEL_N, CS_SELECTED);
digitalWrite(BUS_SEL_N, CS_DESELECTED);
}
inline void xvid_set_regb(uint8_t r, uint8_t value)
{
xvid_reg_num(r);
xvid_bytesel(1);
xvid_data_write(value & 0xff);
digitalWrite(BUS_SEL_N, CS_SELECTED);
digitalWrite(BUS_SEL_N, CS_DESELECTED);
}
inline uint16_t xvid_get_reg(uint8_t r)
{
xvid_reg_num(r);
xvid_bytesel(0);
xvid_set_bus_read();
digitalWrite(BUS_SEL_N, CS_SELECTED);
uint8_t msb = xvid_data_read();
digitalWrite(BUS_SEL_N, CS_DESELECTED);
xvid_bytesel(1);
digitalWrite(BUS_SEL_N, CS_SELECTED);
uint8_t lsb = xvid_data_read();
digitalWrite(BUS_SEL_N, CS_DESELECTED);
xvid_set_bus_write();
return (msb << 8) | lsb;
}
inline uint8_t xvid_get_regb(uint8_t r, uint16_t value)
{
xvid_reg_num(r);
xvid_bytesel(1);
xvid_set_bus_read();
digitalWrite(BUS_SEL_N, CS_SELECTED);
value = xvid_data_read();
digitalWrite(BUS_SEL_N, CS_DESELECTED);
xvid_set_bus_write();
return value;
}
void setup()
{
Serial.begin(115200);
Serial.println("Xosera Test Jig");
digitalWrite(LED_BUILTIN, LOW);
pinMode(BUS_SEL_N, OUTPUT);
digitalWrite(BUS_SEL_N, CS_DESELECTED);
pinMode(BUS_RD_RNW, OUTPUT);
digitalWrite(BUS_RD_RNW, RNW_WRITE);
pinMode(BUS_BYTESEL, OUTPUT);
digitalWrite(BUS_BYTESEL, LOW);
pinMode(BUS_REG_NUM0, OUTPUT);
digitalWrite(BUS_REG_NUM0, LOW);
pinMode(BUS_REG_NUM1, OUTPUT);
digitalWrite(BUS_REG_NUM1, LOW);
pinMode(BUS_REG_NUM2, OUTPUT);
digitalWrite(BUS_REG_NUM2, LOW);
pinMode(BUS_REG_NUM3, OUTPUT);
digitalWrite(BUS_REG_NUM3, LOW);
pinMode(A4, OUTPUT);
digitalWrite(A4, LOW); // yellow = problem
pinMode(A5, OUTPUT);
digitalWrite(A5, HIGH); // green = good
pinMode(BUS_DATA0, OUTPUT);
pinMode(BUS_DATA1, OUTPUT);
pinMode(BUS_DATA2, OUTPUT);
pinMode(BUS_DATA3, OUTPUT);
pinMode(BUS_DATA4, OUTPUT);
pinMode(BUS_DATA5, OUTPUT);
pinMode(BUS_DATA6, OUTPUT);
pinMode(BUS_DATA7, OUTPUT);
delay(100);
digitalWrite(LED_BUILTIN, HIGH);
Serial.println("Starting...");
delay(4000);
randomSeed(0x1234);
static const char msg[] = "Begin!";
xvid_set_reg(XVID_WR_INC, 1);
xvid_set_reg(XVID_DATA, 0x1f00 | msg[0]);
for (uint8_t i = 1; i < sizeof(msg); i++)
{
xvid_set_regb(XVID_DATA, msg[i]);
}
delay(1000);
}
uint16_t count = 0;
uint16_t laddr = 0x0000;
uint16_t addr = 0x0000;
uint16_t data = 0x0100;
void loop()
{
xvid_set_reg(XVID_WR_ADDR, addr);
xvid_set_reg(XVID_WR_INC, 1);
xvid_set_reg(XVID_DATA, data);
digitalWrite(A5, LOW); // green = good (blink)
#if 0 // timer
uint16_t test_time = millis();
uint16_t start_time = millis();
while (start_time == test_time)
{
start_time = millis();
}
{
uint8_t i = 0;
uint8_t j = 0;
do
{
do
{
xvid_set_reg(XVID_DATA, data);
} while (++i);
} while (++j);
}
uint16_t end_time = millis();
Serial.print("T=");
Serial.println(end_time-start_time);
#endif
for (uint16_t c = 1; c < 106 * 15; c++)
{
xvid_set_regb(XVID_DATA, data);
}
xvid_set_reg(XVID_RD_ADDR, addr);
xvid_set_reg(XVID_RD_INC, 1);
digitalWrite(A5, HIGH); // green = good (blink)
for (uint16_t c = 1; c < 106 * 15; c++)
{
uint16_t rdata = xvid_get_reg(XVID_DATA);
if (rdata != data)
{
digitalWrite(A4, HIGH); // yellow = problem
Serial.print(addr + c, HEX);
Serial.print(": WR=");
Serial.print(data, HEX);
Serial.print(" vs RD=");
Serial.print(rdata, HEX);
Serial.print(" \n");
break;
}
}
data++;
addr += 106 * 50;
if (addr < laddr)
{
Serial.print(".");
if ((++count & 0x3f) == 0)
{
Serial.println("");
Serial.print(count);
}
}
laddr = addr;
}
| 27.60473
| 98
| 0.615836
|
25def68055ff0918bec537ffdded44bf18598db9
| 3,078
|
ino
|
Arduino
|
VOR-013Code.ino
|
volab/VOR-013_code
|
0a420da36a43a43c9fafb829a15d4b4019fcbe0e
|
[
"MIT"
] | null | null | null |
VOR-013Code.ino
|
volab/VOR-013_code
|
0a420da36a43a43c9fafb829a15d4b4019fcbe0e
|
[
"MIT"
] | null | null | null |
VOR-013Code.ino
|
volab/VOR-013_code
|
0a420da36a43a43c9fafb829a15d4b4019fcbe0e
|
[
"MIT"
] | null | null | null |
//----------------------------------------------------------------------------------------------------------------------
// Projet VOR-013 Vor Marley the drawing Robots
// Major Lee
// CC-0
// Juillet 2016
//----------------------------------------------------------------------------------------------------------------------
//
// add bp and led to start
// majorLee 5/3
// Ajoute d'un tableau de lettres
// Changement de broche du L_stepper validé 8/7/16
// reprise intégration SD carte
// Programmation orientée objet
// integration bluetooth débordement de la pile !!!!
// a nécessité de changer de philo.
#include <Servo.h>
#define TABLETTERMAXLIGN 25
#include "Lettres.h"
#include "Flasher.h"
#include "VOR13.h"
#include "bluetooth.h"
#define LED 3
#define SWITCH 2
VOR13 robot; //to keep the robot state (ben oui un peu d'anglais !)
Lettres lettreur; //ie traceur de lettre
Flasher led;
V13BT bluetoothChanel ;
//----------------------------------------------------------------------------------------------------------------------
void setup() {
Serial.begin(9600);
randomSeed(analogRead(1));
robot.begin();
dspl("setup : " __DATE__ " @ " __TIME__);
pinMode( SWITCH, INPUT_PULLUP);
bluetoothChanel.begin(9600);
/* sample code à concerver pour la beautee
led1.begin( 13, 10, 500 );
for ( ; led.getChangeStateCpt() < 100; ){
led.update();
}
*/
//------------------------------------------------------------------------------------------------------------------
// sabordage en cas d'échec ! avec LED flash rapide
if (!SD.begin(10)) {
Serial.println(F("initialization failed!"));
led.begin( LED, 20, 100 );
while (1){
led.update();
}
}
lettreur.begin();
}
//----------------------------------------------------------------------------------------------------------------------
void loop(){
String recTrame ="";
// attente appui sur le bouton poussoir
led.begin( LED, 300, 300);
boolean go = false;
while(!go ){
led.update();
bluetoothChanel.update( robot.buildStateTrame() );
if ( bluetoothChanel.getRec( recTrame )){
//bluetoothChanel.echoTrame( recTrame );
robot.interpreteTrame( recTrame );
}
go = !digitalRead( SWITCH ) | robot.go();
}
led.stop();
delay(500);
robot.setState( ETAT_WORK );
//Ecriture du texte
if ( robot.getMode() == MODE_ECRIT ){
for (int i = 0; i< robot.get_aEcrire().length(); i++){
lettreur.traceLettre( robot.get_aEcrire().charAt(i) );
bluetoothChanel.update( robot.buildStateTrame() );
}
//dégagement
lettreur.degage(90, 110);
} else { //mode dessin
lettreur.dessine( robot.get_aDessiner() );
//dégagement
lettreur.degage(90, 160);
}
robot.setState( ETAT_FINI );
//done(); // releases stepper motor
while(1) bluetoothChanel.update( robot.buildStateTrame() ); // wait for reset
}
| 28.5
| 120
| 0.48603
|
abda8215ff171ea31ff5fc47f10858ce98d02744
| 7,256
|
ino
|
Arduino
|
OLD_DEPRECIATED/HX711_Tariq/HX711_Tariq.ino
|
david-andrew/PRS_robot
|
9a3834c6bb217d8e06fb2a470bc3b1b5b55c19aa
|
[
"MIT"
] | null | null | null |
OLD_DEPRECIATED/HX711_Tariq/HX711_Tariq.ino
|
david-andrew/PRS_robot
|
9a3834c6bb217d8e06fb2a470bc3b1b5b55c19aa
|
[
"MIT"
] | null | null | null |
OLD_DEPRECIATED/HX711_Tariq/HX711_Tariq.ino
|
david-andrew/PRS_robot
|
9a3834c6bb217d8e06fb2a470bc3b1b5b55c19aa
|
[
"MIT"
] | null | null | null |
// #include "HX711.h"
// HX711.DOUT - pin #A1
// HX711.PD_SCK - pin #A0
class HX711
{
private:
byte PD_SCK; // Power Down and Serial Clock Input Pin
byte DOUT; // Serial Data Output Pin
byte GAIN; // amplification factor
long OFFSET = 0; // used for tare weight
float SCALE = 1; // used to return weight in grams, kg, ounces, whatever
public:
// define clock and data pin, channel, and gain factor
// channel selection is made by passing the appropriate gain: 128 or 64 for channel A, 32 for channel B
// gain: 128 or 64 for channel A; channel B works with 32 gain factor only
HX711(byte dout, byte pd_sck, byte gain = 128);
HX711();
virtual ~HX711();
// Allows to set the pins and gain later than in the constructor
void begin(byte dout, byte pd_sck, byte gain = 128);
// check if HX711 is ready
// from the datasheet: When output data is not ready for retrieval, digital output pin DOUT is high. Serial clock
// input PD_SCK should be low. When DOUT goes to low, it indicates data is ready for retrieval.
bool is_ready();
// set the gain factor; takes effect only after a call to read()
// channel A can be set for a 128 or 64 gain; channel B has a fixed 32 gain
// depending on the parameter, the channel is also set to either A or B
void set_gain(byte gain = 128);
// waits for the chip to be ready and returns a reading
long read();
// returns an average reading; times = how many times to read
long read_average(byte times = 10);
// returns (read_average() - OFFSET), that is the current value without the tare weight; times = how many readings to do
double get_value(byte times = 1);
// returns get_value() divided by SCALE, that is the raw value divided by a value obtained via calibration
// times = how many readings to do
float get_units(byte times = 1);
// set the OFFSET value for tare weight; times = how many times to read the tare value
void tare(byte times = 10);
// set the SCALE value; this value is used to convert the raw data to "human readable" data (measure units)
void set_scale(float scale = 1.f);
// get the current SCALE
float get_scale();
// set OFFSET, the value that's subtracted from the actual reading (tare weight)
void set_offset(long offset = 0);
// get the current OFFSET
long get_offset();
// puts the chip into power down mode
void power_down();
// wakes up the chip after power down mode
void power_up();
};
HX711::HX711(byte dout, byte pd_sck, byte gain) {
begin(dout, pd_sck, gain);
}
HX711::HX711() {
}
HX711::~HX711() {
}
void HX711::begin(byte dout, byte pd_sck, byte gain) {
PD_SCK = pd_sck;
DOUT = dout;
pinMode(PD_SCK, OUTPUT);
pinMode(DOUT, INPUT);
set_gain(gain);
}
bool HX711::is_ready() {
return digitalRead(DOUT) == LOW;
}
void HX711::set_gain(byte gain) {
switch (gain) {
case 128: // channel A, gain factor 128
GAIN = 1;
break;
case 64: // channel A, gain factor 64
GAIN = 3;
break;
case 32: // channel B, gain factor 32
GAIN = 2;
break;
}
digitalWrite(PD_SCK, LOW);
read();
}
long HX711::read() {
// wait for the chip to become ready
while (!is_ready()) {
// Will do nothing on Arduino but prevent resets of ESP8266 (Watchdog Issue)
yield();
}
unsigned long value = 0;
uint8_t data[3] = { 0 };
uint8_t filler = 0x00;
// pulse the clock pin 24 times to read the data
data[2] = shiftIn(DOUT, PD_SCK, MSBFIRST);
data[1] = shiftIn(DOUT, PD_SCK, MSBFIRST);
data[0] = shiftIn(DOUT, PD_SCK, MSBFIRST);
// set the channel and the gain factor for the next reading using the clock pin
for (unsigned int i = 0; i < GAIN; i++) {
digitalWrite(PD_SCK, HIGH);
digitalWrite(PD_SCK, LOW);
}
// Replicate the most significant bit to pad out a 32-bit signed integer
if (data[2] & 0x80) {
filler = 0xFF;
} else {
filler = 0x00;
}
// Construct a 32-bit signed integer
value = ( static_cast<unsigned long>(filler) << 24
| static_cast<unsigned long>(data[2]) << 16
| static_cast<unsigned long>(data[1]) << 8
| static_cast<unsigned long>(data[0]) );
return static_cast<long>(value);
}
long HX711::read_average(byte times) {
long sum = 0;
for (byte i = 0; i < times; i++) {
sum += read();
yield();
}
return sum / times;
}
double HX711::get_value(byte times) {
return read_average(times) - OFFSET;
}
float HX711::get_units(byte times) {
return get_value(times) / SCALE;
}
void HX711::tare(byte times) {
double sum = read_average(times);
set_offset(sum);
}
void HX711::set_scale(float scale) {
SCALE = scale;
}
float HX711::get_scale() {
return SCALE;
}
void HX711::set_offset(long offset) {
OFFSET = offset;
}
long HX711::get_offset() {
return OFFSET;
}
void HX711::power_down() {
digitalWrite(PD_SCK, LOW);
digitalWrite(PD_SCK, HIGH);
}
void HX711::power_up() {
digitalWrite(PD_SCK, LOW);
}
HX711 scale(A1, A0); // parameter "gain" is ommited; the default value 128 is used by the library
void setup() {
Serial.begin(115200);
Serial.println("HX711 Demo");
Serial.println("Before setting up the scale:");
Serial.print("read: \t\t");
Serial.println(scale.read()); // print a raw reading from the ADC
Serial.print("read average: \t\t");
Serial.println(scale.read_average(20)); // print the average of 20 readings from the ADC
Serial.print("get value: \t\t");
Serial.println(scale.get_value(5)); // print the average of 5 readings from the ADC minus the tare weight (not set yet)
Serial.print("get units: \t\t");
Serial.println(scale.get_units(5), 1); // print the average of 5 readings from the ADC minus tare weight (not set) divided
// by the SCALE parameter (not set yet)
scale.power_up(); // possibly only turns on the excitation & therefore probably only needs to be done before reading
scale.set_gain(32); // this also selects B channel inputs
scale.set_scale(2280.f); // this value is obtained by calibrating the scale with known weights; see the README for details
scale.tare(); // reset the scale to 0
Serial.println("After setting up the scale:");
Serial.print("read: \t\t");
Serial.println(scale.read()); // print a raw reading from the ADC
Serial.print("read average: \t\t");
Serial.println(scale.read_average(20)); // print the average of 20 readings from the ADC
Serial.print("get value: \t\t");
Serial.println(scale.get_value(5)); // print the average of 5 readings from the ADC minus the tare weight, set with tare()
Serial.print("get units: \t\t");
Serial.println(scale.get_units(5), 1); // print the average of 5 readings from the ADC minus tare weight, divided
// by the SCALE parameter set with set_scale
Serial.println("Readings:");
}
void loop() {
Serial.print("one reading:\t");
Serial.print(scale.get_units(), 1);
Serial.print("\t| average:\t");
Serial.println(scale.get_units(10), 1);
//scale.power_down(); // put the ADC in sleep mode
//delay(100);
//scale.power_up();
}
| 28.679842
| 145
| 0.655595
|
4815cf7ffed5125d357990e8bccdbd08beb9645f
| 929
|
ino
|
Arduino
|
AMBServer/RRServerTest/RRServerTest.ino
|
egk696/DPSS
|
f50124eb011599a36bfbfa038c75730d989b058d
|
[
"MIT"
] | null | null | null |
AMBServer/RRServerTest/RRServerTest.ino
|
egk696/DPSS
|
f50124eb011599a36bfbfa038c75730d989b058d
|
[
"MIT"
] | null | null | null |
AMBServer/RRServerTest/RRServerTest.ino
|
egk696/DPSS
|
f50124eb011599a36bfbfa038c75730d989b058d
|
[
"MIT"
] | null | null | null |
//RR
#include <SPI.h>
#include <Mirf.h>
#include <nRF24L01.h>
#include <MirfHardwareSpiDriver.h>
#define StatusLed 9
//#define DEBUG
String data, part1, part2, DataWord;
void setup(){
Serial.begin(57600);
initMirf(8, 10);
configMirf("serv1", 32, 90);
#ifdef DEBUG
Serial.println("Server opened");
Serial.println("--------------");
#endif
}
void loop(){
#ifdef DEBUG
Serial.println("--------------");
Serial.println("--------------");
#endif
digitalWrite(StatusLed, HIGH);
configMirf("serv1", 32, 90);
receiveDataWord(1);
DataWord=data;
digitalWrite(StatusLed, LOW);
#ifdef DEBUG
Serial.println("--------------");
Serial.println("--------------");
#endif
digitalWrite(StatusLed, HIGH);
configMirf("serv2", 32, 95);
receiveDataWord(2);
DataWord=DataWord+"+"+data+"$";
digitalWrite(StatusLed, LOW);
#ifdef DEBUG
Serial.println("--------------");
#endif
Serial.println(DataWord);
}
| 18.58
| 36
| 0.615716
|
cb60fd3f8d71a7d222376a99771ee53c60ce264e
| 247
|
ino
|
Arduino
|
blinky.ino
|
sitharaj88/arduino-samples
|
bf6221edfe4d780c4c08ddca6d409e8e170d7532
|
[
"Apache-2.0"
] | null | null | null |
blinky.ino
|
sitharaj88/arduino-samples
|
bf6221edfe4d780c4c08ddca6d409e8e170d7532
|
[
"Apache-2.0"
] | null | null | null |
blinky.ino
|
sitharaj88/arduino-samples
|
bf6221edfe4d780c4c08ddca6d409e8e170d7532
|
[
"Apache-2.0"
] | null | null | null |
const char LED = 13;
void setup() {
//make pin 13 as output
pinMode(LED, OUTPUT);
}
void loop() {
//LED "ON"
digitalWrite(LED, HIGH);
//wait 1 sec
delay(1000);
//LED "OFF"
digitalWrite(LED, LOW);
//wait 1 sec
delay(1000);
}
| 13.722222
| 26
| 0.591093
|
3f2f38049021ae1bccf0cb42442285937d228090
| 1,015
|
ino
|
Arduino
|
Pasta do Projeto/Lendo_Botao/Lendo_Botao.ino
|
edvaldoide/Arduino--lendo_botao
|
d9b40b5b5aec10fd47bfd47c3befa9e08dd95f72
|
[
"MIT"
] | null | null | null |
Pasta do Projeto/Lendo_Botao/Lendo_Botao.ino
|
edvaldoide/Arduino--lendo_botao
|
d9b40b5b5aec10fd47bfd47c3befa9e08dd95f72
|
[
"MIT"
] | null | null | null |
Pasta do Projeto/Lendo_Botao/Lendo_Botao.ino
|
edvaldoide/Arduino--lendo_botao
|
d9b40b5b5aec10fd47bfd47c3befa9e08dd95f72
|
[
"MIT"
] | null | null | null |
/*******************************************************************************
* RoboCore Kit Iniciante V8 para Arduino - Lendo um Botão
* Acende o LED quando o botão é pressionado e o apaga quando o botão é solto.
*******************************************************************************/
void setup(){
pinMode(8, INPUT); // configura o pino com o botão como entrada
pinMode(12, INPUT); // configura o pino com o botão como entrada
pinMode(9, OUTPUT); // configura o pino com o LED como saída
pinMode(13, OUTPUT); // configura o pino com o LED como saída
}
void loop(){
if (digitalRead(8) == HIGH){ // se botão estiver pressionado (HIGH)
digitalWrite(9, HIGH); // acende o LED
}
else{ // se não estiver pressionado (LOW)
digitalWrite(9, LOW); // apaga o LED
}
if (digitalRead(12) == HIGH){ // se botão estiver pressionado (HIGH)
digitalWrite(13, LOW); // acende o LED
}
else{ // se não estiver pressionado (LOW)
digitalWrite(13, HIGH); // apaga o LED
}
}
| 33.833333
| 80
| 0.557635
|
a02958300b5a6a1f2534693eeced3041dc12c1f2
| 4,237
|
ino
|
Arduino
|
irremote7segmentdisplay/irremote7segmentdisplay.ino
|
bydzen/aio-arduino
|
cfdf6f9bbeb29866211244646dacd1f67e5ad5e6
|
[
"MIT"
] | 1
|
2021-12-11T01:03:56.000Z
|
2021-12-11T01:03:56.000Z
|
irremote7segmentdisplay/irremote7segmentdisplay.ino
|
bydzen/aio-arduino
|
cfdf6f9bbeb29866211244646dacd1f67e5ad5e6
|
[
"MIT"
] | null | null | null |
irremote7segmentdisplay/irremote7segmentdisplay.ino
|
bydzen/aio-arduino
|
cfdf6f9bbeb29866211244646dacd1f67e5ad5e6
|
[
"MIT"
] | null | null | null |
#include <IRremote.h>
int irPIN = 10;
IRrecv recv(irPIN);
decode_results clickButton;
int G = 2;
int F = 3;
int E = 4;
int D = 5;
int C = 6;
int B = 7;
int A = 8;
int DP = 9;
void setup()
{
pinMode(LED_BUILTIN, OUTPUT);
recv.enableIRIn();
pinMode(G, OUTPUT);
pinMode(F, OUTPUT);
pinMode(E, OUTPUT);
pinMode(D, OUTPUT);
pinMode(C, OUTPUT);
pinMode(B, OUTPUT);
pinMode(A, OUTPUT);
pinMode(DP, OUTPUT);
Serial.begin(9600);
}
void loop()
{
// List of value button pressed on variabel numPressed
// Remote type: Remote Control Audio Mp3 Player RD-002 by RAYDEN
// 12495 = 1
// 6375 = 2
// 31365 = 3
// 4335 = 4
// 14535 = 5
// 23205 = 6
// 17085 = 7
// 19125 = 8
// 21165 = 9
// -20401 = 0
if(recv.decode(&clickButton)) {
int numPressed = clickButton.value;
// Serial.println(recv.decode(&clickButton));
// Serial.println(clickButton.value);
Serial.println(numPressed);
if(numPressed == 12495) {
numOne();
} else if(numPressed == 6375) {
numTwo();
} else if (numPressed == 31365) {
numThree();
} else if (numPressed == 4335) {
numFour();
} else if (numPressed == 14535) {
numFive();
} else if (numPressed == 23205) {
numSix();
} else if (numPressed == 17085) {
numSeven();
} else if (numPressed == 19125) {
numEight();
} else if (numPressed == 21165) {
numNine();
} else if (numPressed == -20401) {
numZero();
}
digitalWrite(LED_BUILTIN, HIGH);
turnOffAll();
recv.resume();
}
}
void numOne() {
digitalWrite(G, LOW);
digitalWrite(F, LOW);
digitalWrite(E, LOW);
digitalWrite(D, LOW);
digitalWrite(C, HIGH);
digitalWrite(B, HIGH);
digitalWrite(A, LOW);
digitalWrite(DP, HIGH);
}
void numTwo() {
digitalWrite(G, HIGH);
digitalWrite(F, LOW);
digitalWrite(E, HIGH);
digitalWrite(D, HIGH);
digitalWrite(C, LOW);
digitalWrite(B, HIGH);
digitalWrite(A, HIGH);
digitalWrite(DP, HIGH);
}
void numThree() {
digitalWrite(G, HIGH);
digitalWrite(F, LOW);
digitalWrite(E, LOW);
digitalWrite(D, HIGH);
digitalWrite(C, HIGH);
digitalWrite(B, HIGH);
digitalWrite(A, HIGH);
digitalWrite(DP, HIGH);
}
void numFour() {
digitalWrite(G, HIGH);
digitalWrite(F, HIGH);
digitalWrite(E, LOW);
digitalWrite(D, LOW);
digitalWrite(C, HIGH);
digitalWrite(B, HIGH);
digitalWrite(A, LOW);
digitalWrite(DP, HIGH);
}
void numFive()
{
digitalWrite(G, HIGH);
digitalWrite(F, HIGH);
digitalWrite(E, LOW);
digitalWrite(D, HIGH);
digitalWrite(C, HIGH);
digitalWrite(B, LOW);
digitalWrite(A, HIGH);
digitalWrite(DP, HIGH);
}
void numSix() {
digitalWrite(G, HIGH);
digitalWrite(F, HIGH);
digitalWrite(E, HIGH);
digitalWrite(D, HIGH);
digitalWrite(C, HIGH);
digitalWrite(B, LOW);
digitalWrite(A, HIGH);
digitalWrite(DP, HIGH);
}
void numSeven() {
digitalWrite(G, LOW);
digitalWrite(F, LOW);
digitalWrite(E, LOW);
digitalWrite(D, LOW);
digitalWrite(C, HIGH);
digitalWrite(B, HIGH);
digitalWrite(A, HIGH);
digitalWrite(DP, HIGH);
}
void numEight() {
digitalWrite(G, HIGH);
digitalWrite(F, HIGH);
digitalWrite(E, HIGH);
digitalWrite(D, HIGH);
digitalWrite(C, HIGH);
digitalWrite(B, HIGH);
digitalWrite(A, HIGH);
digitalWrite(DP, HIGH);
}
void numNine() {
digitalWrite(G, HIGH);
digitalWrite(F, HIGH);
digitalWrite(E, LOW);
digitalWrite(D, HIGH);
digitalWrite(C, HIGH);
digitalWrite(B, HIGH);
digitalWrite(A, HIGH);
digitalWrite(DP, HIGH);
}
void numZero() {
digitalWrite(G, LOW);
digitalWrite(F, HIGH);
digitalWrite(E, HIGH);
digitalWrite(D, HIGH);
digitalWrite(C, HIGH);
digitalWrite(B, HIGH);
digitalWrite(A, HIGH);
digitalWrite(DP, HIGH);
}
void turnOffAll() {
delay(1000);
digitalWrite(LED_BUILTIN, LOW);
digitalWrite(G, LOW);
digitalWrite(F, LOW);
digitalWrite(E, LOW);
digitalWrite(D, LOW);
digitalWrite(C, LOW);
digitalWrite(B, LOW);
digitalWrite(A, LOW);
digitalWrite(DP, LOW);
}
| 20.668293
| 66
| 0.599009
|
b1b149fc8676736ba6850646d26e7c576e29202b
| 343
|
ino
|
Arduino
|
Arduino (C y C++)/LCD/LCD.ino
|
rafatyn/Proyectos
|
d080e040a2b1205b7b15f5ada82fb759e3cd2869
|
[
"MIT"
] | null | null | null |
Arduino (C y C++)/LCD/LCD.ino
|
rafatyn/Proyectos
|
d080e040a2b1205b7b15f5ada82fb759e3cd2869
|
[
"MIT"
] | null | null | null |
Arduino (C y C++)/LCD/LCD.ino
|
rafatyn/Proyectos
|
d080e040a2b1205b7b15f5ada82fb759e3cd2869
|
[
"MIT"
] | null | null | null |
#include <LiquidCrystal.h>
LiquidCrystal lcd(22, 23, 24, 25, 26, 27, 28);
void setup (void) {
lcd.begin(16, 2);
}
void loop (void) {
lcd.clear();
lcd.setCursor(3, 0);
lcd.print("Welcome to");
lcd.setCursor(1, 1);
lcd.print("geek-workshop");
delay(5000);
lcd.clear();
lcd.setCursor(0, 0);
lcd.print("I am hongyi_");
delay(5000);
}
| 17.15
| 46
| 0.635569
|
35d4546291c66a34943b2589f974a1e5cbd77f62
| 328
|
ino
|
Arduino
|
TwoLEDBlink/TwoLEDBlink.ino
|
Chronicbeard/ArduinoProjects
|
915dbd0e083a4231333b3e8790d286aff3d0838e
|
[
"CC0-1.0"
] | null | null | null |
TwoLEDBlink/TwoLEDBlink.ino
|
Chronicbeard/ArduinoProjects
|
915dbd0e083a4231333b3e8790d286aff3d0838e
|
[
"CC0-1.0"
] | null | null | null |
TwoLEDBlink/TwoLEDBlink.ino
|
Chronicbeard/ArduinoProjects
|
915dbd0e083a4231333b3e8790d286aff3d0838e
|
[
"CC0-1.0"
] | null | null | null |
/*Adjustable Twin LED Flasher
Flshing LED's with the changes in the potentiometer.
*/
int led = 9;
int PotIn = A0;
int Flash;
void setup() {
pinMode (led, OUTPUT);
pinMode(PotIn, INPUT);
}
void loop() {
Flash = analogRead(PotIn);
digitalWrite(led, HIGH);
delay(Flash);
digitalWrite(led, LOW);
delay(Flash);
}
| 14.26087
| 52
| 0.667683
|
c2a86f47c18f51bf7f20f51df4b4890b5ff194af
| 1,014
|
ino
|
Arduino
|
examples/ethernet/ethernet.ino
|
gaetancollaud/arduino-terminal
|
35704593b4892eebf1a72daf9ac7d38051a8cc91
|
[
"MIT"
] | null | null | null |
examples/ethernet/ethernet.ino
|
gaetancollaud/arduino-terminal
|
35704593b4892eebf1a72daf9ac7d38051a8cc91
|
[
"MIT"
] | null | null | null |
examples/ethernet/ethernet.ino
|
gaetancollaud/arduino-terminal
|
35704593b4892eebf1a72daf9ac7d38051a8cc91
|
[
"MIT"
] | null | null | null |
#include <SimpleTerminal.h>
#include <SPI.h>
#include <Ethernet.h>
SimpleTerminal terminal(NULL);
byte mac[] = {0xDE, 0xAD, 0xBE, 0xEF, 0xFE, 0xED};
IPAddress ip(172, 17, 10, 12);
EthernetServer server(80);
int var1;
void setup() {
Serial.begin(115200);
terminal.addCommand("test", (void*) test, "ma super description");
terminal.addVar("var1", INT, &var1);
Ethernet.begin(mac, ip);
server.begin();
Serial.print("server is at ");
Serial.println(Ethernet.localIP());
}
void test(String &name, String &line) {
Serial.print(name);
Serial.print(" received, line => ");
Serial.println(line);
}
void loop() {
terminal.run();
// listen for incoming clients
EthernetClient client = server.available();
if (client) {
Serial.println("new client");
//set stream to ethernet client
terminal.setStream(&client);
while (client.connected()) {
terminal.run();
}
delay(1);
client.stop();
Serial.println("client disconnected");
//set no stream
terminal.setStream(NULL);
}
}
| 18.777778
| 67
| 0.671598
|
d3b323fa9ea0f548aecc7401f8fbafe7996b1c6a
| 249
|
ino
|
Arduino
|
examples/Blink/Blink.ino
|
tfeldmann/Arduino-Blinkenlight
|
ade60f70dede3f3df693dc8ce17b9a41e0fcc677
|
[
"MIT"
] | 14
|
2021-12-16T17:59:17.000Z
|
2022-03-06T12:47:21.000Z
|
examples/Blink/Blink.ino
|
tfeldmann/Arduino-Indicator
|
ade60f70dede3f3df693dc8ce17b9a41e0fcc677
|
[
"MIT"
] | null | null | null |
examples/Blink/Blink.ino
|
tfeldmann/Arduino-Indicator
|
ade60f70dede3f3df693dc8ce17b9a41e0fcc677
|
[
"MIT"
] | null | null | null |
/*
Start a blinking pattern on the built-in LED.
*/
#include <Blinkenlight.h>
Blinkenlight led(13);
void setup()
{
// blink two times, pause, blink three times, longer pause, repeat
led.pattern(2, 3);
}
void loop()
{
led.update();
}
| 13.105263
| 70
| 0.646586
|
db6a3fc38730797ffbd053956ab78eda29dc42eb
| 348
|
ino
|
Arduino
|
software/_proto_USBtoFTDI_test/nano_usbTest/nano_usbTest.ino
|
Nrpickle/BIS
|
762f2cb5cb519baaedddd77523a2185ffc4338ec
|
[
"MIT"
] | 1
|
2015-01-26T22:10:10.000Z
|
2015-01-26T22:10:10.000Z
|
software/_proto_USBtoFTDI_test/nano_usbTest/nano_usbTest.ino
|
Nrpickle/BIS
|
762f2cb5cb519baaedddd77523a2185ffc4338ec
|
[
"MIT"
] | null | null | null |
software/_proto_USBtoFTDI_test/nano_usbTest/nano_usbTest.ino
|
Nrpickle/BIS
|
762f2cb5cb519baaedddd77523a2185ffc4338ec
|
[
"MIT"
] | null | null | null |
#include <SoftwareSerial.h>
SoftwareSerial PCComm(11,10); //Rx, Tx
void setup(){
Serial.begin(9600);
PCComm.begin(115200);
pinMode(13, OUTPUT);
}
void loop(){
if(PCComm.available()){
if(PCComm.read() == 'r'){
digitalWrite(13, HIGH);
PCComm.println("Fiiiiiiiiiiiiiiiiine.");
}
else {
digitalWrite(13, LOW);
}
}
}
| 15.130435
| 43
| 0.62069
|
d1f6be40772555352d2c46818d319b4b195b7de5
| 362
|
ino
|
Arduino
|
firmware/RC_Input_to_Serial.ino
|
gt-marine-robotics-group/orcs-comms
|
91950713b96ad94229afa08928f90a98c43e8823
|
[
"MIT"
] | null | null | null |
firmware/RC_Input_to_Serial.ino
|
gt-marine-robotics-group/orcs-comms
|
91950713b96ad94229afa08928f90a98c43e8823
|
[
"MIT"
] | null | null | null |
firmware/RC_Input_to_Serial.ino
|
gt-marine-robotics-group/orcs-comms
|
91950713b96ad94229afa08928f90a98c43e8823
|
[
"MIT"
] | null | null | null |
#include <ServoInput.h>
ServoInputPin<2> servo;;
ServoInputPin<3> servo3;
g
void setup() {
// put your setup code here, to run once:
Serial.begin(9600);
}
void loop() {
// put your main code here, to run repeatedly:
float angle = servo.getAngle();
Serial.println(angle);
// angle = servo3.getAngle();
// Serial.println(angle);
delay(100);
}
| 17.238095
| 48
| 0.660221
|
9c899c6aeed134b6a2e3d9808b23b13e88976640
| 2,304
|
ino
|
Arduino
|
examples/CommandTree_Check/CommandTree_Check.ino
|
Vrekrer/Vrekrer_scpi_parser
|
58e03b52fad58eb4a853e94805bc9ecac4982e55
|
[
"MIT"
] | 43
|
2019-03-12T07:03:32.000Z
|
2022-02-27T21:35:01.000Z
|
examples/CommandTree_Check/CommandTree_Check.ino
|
Vrekrer/Vrekrer_scpi_parser
|
58e03b52fad58eb4a853e94805bc9ecac4982e55
|
[
"MIT"
] | 17
|
2020-05-10T19:27:37.000Z
|
2022-02-07T10:03:27.000Z
|
examples/CommandTree_Check/CommandTree_Check.ino
|
Vrekrer/Vrekrer_scpi_parser
|
58e03b52fad58eb4a853e94805bc9ecac4982e55
|
[
"MIT"
] | 15
|
2018-04-15T04:29:49.000Z
|
2021-11-17T16:07:19.000Z
|
/*
Vrekrer_scpi_parser library.
CommandTree_Check example.
Demonstrates how to verify the validity of the registered command tree.
In order to reduce RAM usage, Vrekrer_scpi_parser library (ver. 0.5 and later)
uses a hash algorithm to store and compare registered commands. In very rare
situations this might end in hash crashes (two commands have the same hash).
To check the uniqueness of the registered commands' hashes the `PrintDebugInfo`
function must be used.
If a hash crash exists, this can be solved changing the `hash_magic_number`
variable before registering the commands
*/
#include "Arduino.h"
#include "Vrekrer_scpi_parser.h"
SCPI_Parser my_instrument;
void setup()
{
//We change the `hash_magic_number` variable before registering the commands
my_instrument.hash_magic_number = 16; //16 will generate hash crashes
//The default value is 37 and good values are prime numbers (up to 113)
//This is a simple command tree with 8 registered commands:
my_instrument.RegisterCommand(F("*IDN?"), &Identify); //*IDN?
my_instrument.SetCommandTreeBase(F("TEST:"));
my_instrument.RegisterCommand(F(":A"), &DoNothing); //TEST:A
my_instrument.RegisterCommand(F(":A?"), &DoNothing); //TEST:A?
my_instrument.RegisterCommand(F(":B"), &DoNothing); //TEST:B
my_instrument.RegisterCommand(F(":C"), &DoNothing); //TEST:C
my_instrument.SetCommandTreeBase(F("NEW:TEST"));
my_instrument.RegisterCommand(F(":A"), &DoNothing); //NEW:TEST:A
my_instrument.SetCommandTreeBase(F("OLD:TEST"));
my_instrument.RegisterCommand(F(":B"), &DoNothing); //OLD:TEST:B
my_instrument.RegisterCommand(F(":C"), &DoNothing); //OLD:TEST:C
Serial.begin(9600);
while (!Serial) {;}
//`PrintDebugInfo` will print the registered tokens and
//command hashes to the serial interface.
my_instrument.PrintDebugInfo();
//See the result in the serial monitor and verify that
//there are no duplicated hashes or hashes equal to zero.
//Change the hash_magic_number to solve any problem.
}
void loop()
{
my_instrument.ProcessInput(Serial, "\n");
}
void Identify(SCPI_C commands, SCPI_P parameters, Stream& interface) {
interface.println(F("Vrekrer,Hash check example,#00,v0.5"));
}
void DoNothing(SCPI_C commands, SCPI_P parameters, Stream& interface) {
}
| 35.446154
| 79
| 0.742188
|
73105ded7088fb93adf032c4aaa359aaeb06947d
| 3,733
|
ino
|
Arduino
|
aquaponia_main/aquaponia_main.ino
|
Tassany/projeto_aquaponia_fablab
|
1973b9a74fd929ac445f95d1dda4cf0381c0c1cf
|
[
"MIT"
] | null | null | null |
aquaponia_main/aquaponia_main.ino
|
Tassany/projeto_aquaponia_fablab
|
1973b9a74fd929ac445f95d1dda4cf0381c0c1cf
|
[
"MIT"
] | null | null | null |
aquaponia_main/aquaponia_main.ino
|
Tassany/projeto_aquaponia_fablab
|
1973b9a74fd929ac445f95d1dda4cf0381c0c1cf
|
[
"MIT"
] | null | null | null |
/*
Versão de teste(sensor de vazão, sensor de nível, acionamento do relé, acionamento do motor, biblioteca NTPtimeESP)
*/
//Biblioteca NTPtimeESP
#include <NTPtimeESP.h>
NTPtime NTPch("br.pool.ntp.org"); // Choose server pool as required
char *ssid = "VIVOFIBRA-8F30"; // Set you WiFi SSID
char *password = "C79A4B8B90"; // Set you WiFi password
/*
struct strDateTime
{
byte hour;
byte minute;
byte second;
int year;
byte month;
byte day;
byte dayofWeek;
boolean valid;
};
*/
strDateTime dateTime;
//Vazão
float vazao; //Variável para armazenar o valor da vazão em L/min
int contaPulso; //Variável para armazenar a quantidade de pulsos do sensor de vazão
int pinoVazao = 13; //D7
//Nível
bool sensorNivel;
int pinoNivel = 15; //D8
//Relé
int pinoRele = 12; //D6
//Motor
int pinoMotor = 14; //D5
bool statusMotor;
bool flag_acionamento = 0;
int tempo_acionamento = 3450;
//Tempo relé
unsigned long verif_anterior_rele;
unsigned long verif_seguinte_rele;
unsigned long delay_rele = 10000;
void IRAM_ATTR incpulso()
{
contaPulso++; //Incrementa a variável de contagem dos pulsos
}
void setup()
{
Serial.begin(9600); //Inicia a serial com um baud rate de 9600
//Setup biblioteca NTPtimeESP
Serial.println();
Serial.println("Booted");
Serial.println("Connecting to Wi-Fi");
WiFi.mode(WIFI_STA);
WiFi.begin (ssid, password);
while (WiFi.status() != WL_CONNECTED)
{
Serial.print(".");
delay(500);
}
Serial.println("WiFi connected");
//Setup sensor de vazão para o pino D7 (GPIO 13)
pinMode(pinoVazao, INPUT);
attachInterrupt(digitalPinToInterrupt(pinoVazao), incpulso, RISING); //Configura o pino para trabalhar como interrupção
//Setup sensor de nivel
pinMode(pinoRele,INPUT);
//Setup relé
pinMode(pinoRele,OUTPUT);
//Setup motor
pinMode(pinoMotor,OUTPUT);
//Setup tempo relé
verif_anterior_rele = millis();
verif_seguinte_rele = verif_anterior_rele + delay_rele;
Serial.begin(9600); //Inicia a serial com um baud rate de 9600
}
void loop ()
{
//Monitorar o tempo a fim de acionar o motor
dateTime = NTPch.getNTPtime(-3, 0);
//Caso queira ver o horário atual
/*
if(dateTime.valid)
{
Serial.print(dateTime.hour);
Serial.print(" - ");
Serial.print(dateTime.minute);
Serial.print(" - ");
Serial.println(dateTime.second);
}
*/
//Acionamento do motor quando necessário
if((dateTime.second > 30) && (dateTime.second < 33) )
{
flag_acionamento = 1;
}
else
{
flag_acionamento = 0;
}
if((flag_acionamento == 1) && (statusMotor == 0))
{
digitalWrite(pinoMotor,HIGH);
statusMotor = 1;
delay(tempo_acionamento);
}
else
{
digitalWrite(pinoMotor,LOW);
statusMotor = 0;
}
Serial.print("StatusMotor:");
Serial.print(statusMotor);
Serial.print(" ");
//Sensor de vazão
contaPulso = 0; //Zera a variável para contar os giros por segundos
sei(); //Habilita interrupção
delay (1000); //Aguarda 1 segundo
cli(); //Desabilita interrupção
vazao = contaPulso / 7.5; //Converte para L/min
Serial.print("Vazao:");
Serial.print(vazao);
Serial.print(" ");
//Detecta a situação do sensor de nível através do pino D8 (GPIO 15)
sensorNivel = digitalRead(pinoNivel);
Serial.print("Sensor nivel:");
Serial.println(sensorNivel);
//Verificar se é necessário desligar ou ligar o relé
verif_anterior_rele = millis();
if(verif_seguinte_rele < verif_anterior_rele)
{
if(sensorNivel == 1)
{
digitalWrite(pinoRele,LOW);
verif_seguinte_rele = verif_anterior_rele + delay_rele;
}
if(sensorNivel == 0)
{
digitalWrite(pinoRele,HIGH);
}
}
}
| 21.578035
| 121
| 0.674792
|
67425c6add3dbe84a2a7ec3854bb0205399eee09
| 7,684
|
adoc
|
AsciiDoc
|
docs/en-gb/modules/data/pages/practical-example-elasticsync-categories.adoc
|
plentymarkets/plenty-manual-docs
|
65d179a8feb8fcf1b594ef45883e3437287d8e09
|
[
"MIT"
] | null | null | null |
docs/en-gb/modules/data/pages/practical-example-elasticsync-categories.adoc
|
plentymarkets/plenty-manual-docs
|
65d179a8feb8fcf1b594ef45883e3437287d8e09
|
[
"MIT"
] | 2
|
2022-01-05T10:31:24.000Z
|
2022-03-11T11:56:07.000Z
|
docs/en-gb/modules/data/pages/practical-example-elasticsync-categories.adoc
|
plentymarkets/plenty-manual-docs
|
65d179a8feb8fcf1b594ef45883e3437287d8e09
|
[
"MIT"
] | 1
|
2021-03-01T09:12:18.000Z
|
2021-03-01T09:12:18.000Z
|
= Importing new categories
:lang: en
include::{includedir}/_header.adoc[]
:keywords: Importing category, Importing categories, Category importing, Categories importing, Category import, Categories import, Import category, Import categories, Category-Import, Categories-Import, Import-Category, Import-Categories
:position: 55
:url: data/importing-data/elasticsync-best-practices/best-practices-elasticsync-categories
:id: 26Y9QBJ
:author: team-item
////
zuletzt bearbeitet 14.12.2021
////
Categories help you group your products. They determine how items are structured in your online store.
Generally speaking, there are two different ways to create or update categories. If you only want to create or modify _a few categories_, then we would recommend <<item/settings/categories#300, doing this manually>>. However, if you have _a lot of categories_, then it will probably be quicker to import your category data.
That’s what this page is all about. Here you’ll learn how to import category information into plentymarkets. This is useful if you want to create a lot of new categories at once or if you want to update a lot of existing categories at once.
[TIP]
.Requirements
====
This page assumes that you’ve already thought up a <<item/settings/categories#100, meaningful category structure>> and that you have a basic understanding of the <<data/importing-data/ElasticSync#, import tool>>.
====
== Practical example: Initial situation
Imagine you sell clothing in your online store. You’re new to plentymarkets and you want to import all of your category data at once. You want to structure your categories like so:
[cols="3*^", grid=cols, frame=none, stripes=none]
|====
a| * Women
** Accessoires
** Shoes
** Clothing
*** Jeans
*** Tops
a| * Men
** Accessoires
** Shoes
** Clothing
*** Jeans
*** Tops
a| * Kids
** Accessoires
** Shoes
** Clothing
*** Jeans
*** Tops
|====
== Setting up the CSV file
First, transfer this category structure into a CSV file. This is the file that you’ll import into plentymarkets later. In our example, the categories would look like this in the CSV file:
image::data/importing-data/assets/best-practice-category-category-structure-csv.png[]
You are free to choose any names for the column headers. In this example, I’ve put the following category information in the columns:
* *Name* = The name of the category.
* *Parentname* = If you want this to be a main category, i.e. a top-level category, then leave this field blank. If you want this to be a sub-category, then enter the path of whichever category is directly above it. _Use semicolons_ to separate the individual category levels.
* *Multiname* = The complete category path, i.e. the Parentname and the Name. Decide which separator you want to use to separate the individual category levels.
[discrete]
==== Adding further category info
You probably want to create much more than just a bare-bones category structure. Perhaps you want to add descriptive texts or SEO-relevant meta data.
Add a few more columns to your CSV file and use them to specify the additional category information. For example, like this:
image::data/importing-data/assets/best-practice-category-category-info-csv.png[]
You are free to choose any names for the column headers. You can add any or all of the <<data/importing-data/sync-types/elasticSync-categories#20, category information listed here>>. In this example, I’ve decided to add the following information:
* *Description* = A description of each category.
* *Meta title* = A text that should be displayed as the tab title in the web browser and in the search engine results.
* *Visible* = Whether I want the individual categories to be included in the sitemap.
+
1 = Yes, the category should be included in the sitemap. +
0 = No, the category should _not_ be included in the sitemap.
+
In this example, I only want the first two category levels to be included in the sitemap. I’ve inserted the numbers 1 and 0 accordingly.
== Choosing the import settings
<<data/importing-data/ElasticSync#1210, Create a new import>>. Most of the settings can be chosen freely. However, since you want to import _category data_, make sure you’ve set the *Type* to *Categories*.
== Choosing the matching settings
During the import, plentymarkets will check whether the category already exists. This is done with the help of a so-called matching field.
Since we want to import _new categories_ in this example, configure the settings as follows.
[cols="1,3"]
|====
|Setting |Explanation
| *Combined category name*
|Which column of your CSV file contains the complete category path? Choose this column header from the first drop-down list. In our example, I’ll choose the column *Multiname*. +
Select the separator from the second drop-down list. In our example, I’ll choose the _semicolon_.
| *Import options*
|Since you’ll be importing new categories, choose the option *Only import new data* or *Import new, update existing data*.
|====
== Choosing the mapping settings
Your CSV file is already filled with a bunch of category information. Now you’ll decide _where in plentymarkets_ each piece of information should appear when you import the file. <<data/importing-data/sync-types/elasticSync-categories#20, Take a look at this page>> while you map the columns of your CSV file to the fields in plentymarkets. In this example, I’ve configured the settings as follows.
image::data/importing-data/assets/best-practice-category-mapping-settings.png[]
I created the bare-bones category structure by:
* mapping the *Name* column of my CSV file with the *Category settings / Name* data field in plentymarkets.
* mapping the *Parentname* column of my CSV file with the *Category / Parent category name* data field in plentymarkets.
* using the *Own value* option and entering *1* for the data field *Category / Create parent category if not found*. In other words, by deciding to create the superior category if it doesn’t already exist.
I added extra category information by:
* mapping the *Description* column of my CSV file with the *Category settings / Description 1* data field in plentymarkets.
* mapping the *Meta title* column of my CSV file with the *Category settings / Meta title* data field in plentymarkets.
* mapping the *Visible* column of my CSV file with the *Category / Visible* data field in plentymarkets.
== Did it work?
Ready to import your categories? Start the import and check whether the data was correctly imported into plentymarkets.
[.instruction]
Starting the import and checking the result:
. Activate the lines that should be imported (icon:toggle-on[role="green"]).
. Test the import (icon:plugin_stage_deploy[set=plenty]) or start the import (icon:play-circle-o[role="darkGrey"]). +
*_Note:_* This can take a few minutes.
. Go to *Item » Category*.
. Check whether the categories were correctly structured. +
*_Note:_* Click on a category’s folder icon (icon:folder[role="darkGrey"]) on the far left. This displays the subcategories within.
. Open a few categories and check their settings.
[TIP]
.Do a trial run
====
We recommend testing the import (icon:plugin_stage_deploy[set=plenty]) before you start it for the first time.
This imports the first 10 rows of the file and bypasses the cache.
It gives you time to check whether the import works correctly. If the import does not perform as expected, you can correct it before importing the entire file.
====
[TIP]
.Resetting the cache
====
Directly within the import, you’ll find the button *Reset cache* (icon:reload[set=plenty]).
This button allows you to reset the import cache in order to reimport a file that does not contain any changes.
====
| 49.896104
| 398
| 0.769261
|
e891f05d3724e92f312dbdd8dabf117e024baaa3
| 66
|
asciidoc
|
AsciiDoc
|
docs/index.asciidoc
|
blachniet/lsbucketbeat
|
809324fb210c40101b01ae3d05bc948e4b0efead
|
[
"Apache-2.0"
] | null | null | null |
docs/index.asciidoc
|
blachniet/lsbucketbeat
|
809324fb210c40101b01ae3d05bc948e4b0efead
|
[
"Apache-2.0"
] | null | null | null |
docs/index.asciidoc
|
blachniet/lsbucketbeat
|
809324fb210c40101b01ae3d05bc948e4b0efead
|
[
"Apache-2.0"
] | null | null | null |
= Lsbucketbeat Docs
Welcome to the Lsbucketbeat documentation.
| 11
| 42
| 0.80303
|
69c95dce7e965da50475ddd69de9b8e0541c1873
| 143
|
adoc
|
AsciiDoc
|
day02/python/fallshare/README.adoc
|
AlexisTM/aoc-2021
|
91a801b3c812cc3d37d6088a2544227cf158d114
|
[
"MIT"
] | 11
|
2021-11-28T10:36:54.000Z
|
2021-12-21T10:38:34.000Z
|
day02/python/fallshare/README.adoc
|
AlexisTM/aoc-2021
|
91a801b3c812cc3d37d6088a2544227cf158d114
|
[
"MIT"
] | 83
|
2021-11-22T17:02:05.000Z
|
2022-01-29T10:27:31.000Z
|
day02/python/fallshare/README.adoc
|
AlexisTM/aoc-2021
|
91a801b3c812cc3d37d6088a2544227cf158d114
|
[
"MIT"
] | 19
|
2021-11-22T20:47:57.000Z
|
2022-02-01T08:51:19.000Z
|
Advent of Code 2021
== Python
Not much to say. Finding proper names for variables is hard.
[source, python]
....
include::solution.py[]
....
| 14.3
| 60
| 0.692308
|
00a573510cb784d21fffb858f6d0c7a47e286d56
| 56
|
adoc
|
AsciiDoc
|
author/ietf/topics.adoc
|
webdev778/metanorma.org
|
eaff0f303f43123658e3107fadb709429a1d1cc0
|
[
"MIT"
] | 3
|
2021-05-18T07:58:37.000Z
|
2022-03-23T07:43:59.000Z
|
author/ietf/topics.adoc
|
techtrailhead/metanorma.org
|
742afb49871702e2354c9588575957a4f16c7e09
|
[
"MIT"
] | 159
|
2019-03-10T02:25:08.000Z
|
2021-03-11T10:09:50.000Z
|
author/ietf/topics.adoc
|
riboseinc/metanorma.com
|
da73dc49a3fcd463f44c72e5d9bd2068e5a55d12
|
[
"MIT"
] | 5
|
2021-05-08T06:25:01.000Z
|
2021-12-13T08:31:45.000Z
|
---
layout: ietf-flavor
title: Using Metanorma-IETF
---
| 11.2
| 27
| 0.696429
|
b62f3d60d8cd2954419545a899db663dbad7609d
| 5,871
|
adoc
|
AsciiDoc
|
antora/components/userguide/modules/fun/pages/core-concepts/apache-isis-vs/cqrs.adoc
|
ahus1/isis
|
e75fa2d61b78757a7e4e0dd4772412f1d72ea3a6
|
[
"Apache-2.0"
] | null | null | null |
antora/components/userguide/modules/fun/pages/core-concepts/apache-isis-vs/cqrs.adoc
|
ahus1/isis
|
e75fa2d61b78757a7e4e0dd4772412f1d72ea3a6
|
[
"Apache-2.0"
] | null | null | null |
antora/components/userguide/modules/fun/pages/core-concepts/apache-isis-vs/cqrs.adoc
|
ahus1/isis
|
e75fa2d61b78757a7e4e0dd4772412f1d72ea3a6
|
[
"Apache-2.0"
] | null | null | null |
[[cqrs]]
= Apache Isis vs CQRS
:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http://www.apache.org/licenses/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
include::_attributes.adoc[]
:page-partial:
The link:https://martinfowler.com/bliki/CQRS.html[CQRS architectural pattern] (it stands for "Command Query Responsibility Separation") is the idea that the domain objects that mutate the state of the system - to which commands are sent and which then execute - should be separated from the mechanism by which the state of the system is queried (rendered).
The former are sometimes called the "write (domain) model", the latter the "read model".
In the canonical version of this pattern there are separate datastores.
The commands act upon a command/write datastore.
The data in this datastore is then replicated in some way to the query/read datastore, usually denormalized or otherwise such that it is easy to query.
CQRS advocates recommend using very simple (almost naive) technology for the query/read model; it should be a simple projection of the query datastore.
Complexity instead lives elsewhere: business logic in the command/write model, and in the transformation logic betweeen the command/write and read/query datastores.
In particular, there is no requirement for the two datastores to use the same technology: one might be an RDBMS while the other a NoSQL datastore or even datawarehouse.
In most implementations the command and query datastores are _not_ updated in the same transaction; instead there is some sort of replication mechanism.
This also means that the query datastore is eventually consistent rather than always consistent; there could be a lag of a few seconds before it is updated.
This means in turn that CQRS implementations require mechanisms to cater for offline query datastores; usually some sort of event bus.
The CQRS architecture's extreme separation of responsibilities can result in a lot of boilerplate.
Any given domain concept, eg `Customer`, must be represented both in the command/write model and also in the query/read model.
Each business operation upon the command model is reified as a command object, for example `PlaceOrderCommand`.
Comparing CQRS to Apache Isis, the most obvious difference is that Apache Isis does not separate out a command/write model from a query/read model, and there is usually just a single datastore.
But then again, having a separate read model just so that the querying is very straightforward is pointless with Apache Isis because, of course, Isis provides the UI "for free".
There are other reasons though why a separate read model might make sense, such as to precompute particular queries, or against denormalized data.
In these cases Apache Isis can often provide a reasonable alternative, namely to map domain entities against RDBMS views, either materialized views or dynamic.
In such cases there is still only a single physical datastore, and so transactional integrity is retained.
Or, the CQRS architecture can be more fully implemented with Apache Isis by introducing a separate read model, synchronized using the xref:refguide:applib-svc:persistence-layer-spi/PublisherService.adoc[`PublisherService`], or using xref:refguide:applib-cm:classes/super.adoc#AbstractSubscriber[subscribers] on the xref:refguide:applib-svc:core-domain-api/EventBusService.adoc[`EventBusService`].
One can then use xref:userguide:fun:building-blocks.adoc#view-models[view models] to surface the data in the external read datastore.
With respect to commands, Apache Isis does of course support the xref:refguide:applib-svc:application-layer-spi/CommandService.adoc[`CommandService`] which allows each business action to be reified into a `Command`.
However, names are misleading here: Apache Isis' commands are relatively passive, merely recording the intent of the user to invoke some operation.
In a CQRS architecture, though, commands take a more active role, locating and acting upon the domain objects.
More significantly, in CQRS each command has its own class, such as `PlaceOrderCommand`, instantiated by the client and then executed.
With Apache Isis, though, the end-user merely invokes the `placeOrder(...)` action upon the domain object; the framework itself creates the `Command` as a side-effect of this.
In CQRS the commands correspond to the business logic that mutates the system.
Whether this logic is part of the command class (`PlaceOrderCommand`) or whether that command delegates to methods on the domain object is an implementation detail; but it certainly is common for the business logic to be wholly within the command object and for the domain object to be merely a data holder of the data within the command/write datastore.
In Apache Isis this same separation of business logic from the underlying data can be accomplished most straightforwardly using xref:userguide:fun:building-blocks.adoc#mixins[mixins].
In the UI (surfaced by the xref:vw:ROOT:about.adoc[Wicket viewer]) or in the REST API (surfaced by the xref:vro:ROOT:about.adoc[RestfulObjects viewer]) the behaviour appears to reside on the domain object; however the behaviour actually resides on separate classes and is mixed in (like a trait) only at runtime.
| 117.42
| 759
| 0.809743
|
d67a6afb6d62e7dae52e880456e163135e3ebfe9
| 2,633
|
adoc
|
AsciiDoc
|
antora/components/refguide/modules/applib-classes/pages/events/uievent.adoc
|
dsp-testing/isis
|
6272860133001030c6a7442a394b84dcd782d816
|
[
"Apache-2.0"
] | 665
|
2015-01-01T06:06:28.000Z
|
2022-03-27T01:11:56.000Z
|
antora/components/refguide/modules/applib-classes/pages/events/uievent.adoc
|
dsp-testing/isis
|
6272860133001030c6a7442a394b84dcd782d816
|
[
"Apache-2.0"
] | 176
|
2015-02-07T11:29:36.000Z
|
2022-03-25T04:43:12.000Z
|
antora/components/refguide/modules/applib-classes/pages/events/uievent.adoc
|
pjfanning/isis
|
c02d8a04ebdeedd85163aebf8f944835dc97a2e2
|
[
"Apache-2.0"
] | 337
|
2015-01-02T03:01:34.000Z
|
2022-03-21T15:56:28.000Z
|
= UI Event Classes
:Notice: Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at. http://www.apache.org/licenses/LICENSE-2.0 . Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
:page-partial:
UI events are broadcast on the xref:refguide:applib:index/services/eventbus/EventBusService.adoc[EventBusService] as the result of rendering a domain object.
They allow subscribers to change various presentation aspects of the rendered object.
Note that if the domain object defines its own layout preferences (for example, a xref:refguide:applib-methods:ui-hints.adoc#title[title()] supporting method) then these will take precedence.
.UI Event Classes
[cols="2m,2a,2a,2m", options="header"]
|===
|API
|Event class defined by xref:refguide:applib:index/annotation/DomainObjectLayout.adoc[@DomainObjectLayout]
|Published when
|Overridden by
|xref:refguide:applib:index/events/ui/TitleUiEvent.adoc[TitleUiEvent]
|xref:refguide:applib:index/annotation/DomainObjectLayout.adoc#titleUiEvent[titleUiEvent()]
|\... rendering the title for a domain object.
|xref:refguide:applib-methods:ui-hints.adoc#title[title()] +
xref:refguide:applib:index/annotation/Title.adoc[@Title]
|xref:refguide:applib:index/events/ui/IconUiEvent.adoc[IconUiEvent]
|xref:refguide:applib:index/annotation/DomainObjectLayout.adoc#iconUiEvent[iconUiEvent()]
|\... rendering an icon for a domain object.
|xref:refguide:applib-methods:ui-hints.adoc#iconName[iconName()]
|xref:refguide:applib:index/events/ui/CssClassUiEvent.adoc[CssClassUiEvent]
|xref:refguide:applib:index/annotation/DomainObjectLayout.adoc#cssClassUiEvent[cssClassUiEvent()]
|\... obtaining a CSS class hint for a domain object.
|xref:refguide:applib-methods:ui-hints.adoc#cssClass[cssClass()]
|xref:refguide:applib:index/events/ui/LayoutUiEvent.adoc[LayoutUiEvent]
|xref:refguide:applib:index/annotation/DomainObjectLayout.adoc#layoutUiEvent[layoutUiEvent()]
|\... obtain an alternative layout for a domain object.
|xref:refguide:applib-methods:ui-hints.adoc#layout[layout()]
|===
| 48.759259
| 759
| 0.801367
|
5a17e6cdba6fef4d937f24d1c3331e2c2425a84f
| 215
|
adoc
|
AsciiDoc
|
src/docs/asciidoc/manuals/security/security-manual.adoc
|
Woody64k/my-consulting-handbook
|
44b0d07539c6e228aae6bfeb375e4da32e70d48b
|
[
"MIT"
] | 1
|
2020-07-12T08:20:53.000Z
|
2020-07-12T08:20:53.000Z
|
src/docs/asciidoc/manuals/security/security-manual.adoc
|
Woody64k/my-consulting-handbook
|
44b0d07539c6e228aae6bfeb375e4da32e70d48b
|
[
"MIT"
] | null | null | null |
src/docs/asciidoc/manuals/security/security-manual.adoc
|
Woody64k/my-consulting-handbook
|
44b0d07539c6e228aae6bfeb375e4da32e70d48b
|
[
"MIT"
] | null | null | null |
= Sicherheits - Handbuch
== Einführung (Sicherheit)
== Zertifikate erstellen (Sicherheit)
Erstellung einen Zertifikates wie es z.B. für SSL verwendet wird.
[source,shell]
----
include::createCertificate.sh[]
----
| 19.545455
| 65
| 0.739535
|
ed5fe4bbd1fe48d935c1b55eda4d1cf626ab1a49
| 7,039
|
adoc
|
AsciiDoc
|
ubuntu-specific/gcc.adoc
|
tomaszgigiel/notebook
|
19c7400b35e2cc2316ff3e2d50b08978fd23a6e4
|
[
"Apache-2.0"
] | null | null | null |
ubuntu-specific/gcc.adoc
|
tomaszgigiel/notebook
|
19c7400b35e2cc2316ff3e2d50b08978fd23a6e4
|
[
"Apache-2.0"
] | null | null | null |
ubuntu-specific/gcc.adoc
|
tomaszgigiel/notebook
|
19c7400b35e2cc2316ff3e2d50b08978fd23a6e4
|
[
"Apache-2.0"
] | null | null | null |
= bash
[source,bash]
----
tomasz@tom:~$ sudo apt update
[sudo] password for tomasz:
Hit:1 http://pl.archive.ubuntu.com/ubuntu bionic InRelease
Hit:2 http://pl.archive.ubuntu.com/ubuntu bionic-updates InRelease
Hit:3 http://pl.archive.ubuntu.com/ubuntu bionic-backports InRelease
Get:4 http://security.ubuntu.com/ubuntu bionic-security InRelease [88,7 kB]
Fetched 88,7 kB in 1s (96,8 kB/s)
Reading package lists... Done
Building dependency tree
Reading state information... Done
2 packages can be upgraded. Run 'apt list --upgradable' to see them.
tomasz@tom:~$ sudo apt install build-essential
Reading package lists... Done
Building dependency tree
Reading state information... Done
The following package was automatically installed and is no longer required:
libllvm7
Use 'sudo apt autoremove' to remove it.
The following additional packages will be installed:
dpkg-dev fakeroot libalgorithm-diff-perl libalgorithm-diff-xs-perl
libalgorithm-merge-perl libdpkg-perl libfakeroot make
Suggested packages:
debian-keyring bzr make-doc
The following NEW packages will be installed:
build-essential dpkg-dev fakeroot libalgorithm-diff-perl
libalgorithm-diff-xs-perl libalgorithm-merge-perl libfakeroot make
The following packages will be upgraded:
libdpkg-perl
1 upgraded, 8 newly installed, 0 to remove and 1 not upgraded.
Need to get 1 136 kB of archives.
After this operation, 2 974 kB of additional disk space will be used.
Do you want to continue? [Y/n] Y
Get:1 http://pl.archive.ubuntu.com/ubuntu bionic/main amd64 make amd64 4.1-9.1ubuntu1 [154 kB]
Get:2 http://pl.archive.ubuntu.com/ubuntu bionic-updates/main amd64 libdpkg-perl all 1.19.0.5ubuntu2.2 [211 kB]
Get:3 http://pl.archive.ubuntu.com/ubuntu bionic-updates/main amd64 dpkg-dev all 1.19.0.5ubuntu2.2 [607 kB]
Get:4 http://pl.archive.ubuntu.com/ubuntu bionic/main amd64 build-essential amd64 12.4ubuntu1 [4 758 B]
Get:5 http://pl.archive.ubuntu.com/ubuntu bionic/main amd64 libfakeroot amd64 1.22-2ubuntu1 [25,9 kB]
Get:6 http://pl.archive.ubuntu.com/ubuntu bionic/main amd64 fakeroot amd64 1.22-2ubuntu1 [62,3 kB]
Get:7 http://pl.archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-perl all 1.19.03-1 [47,6 kB]
Get:8 http://pl.archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-diff-xs-perl amd64 0.04-5 [11,1 kB]
Get:9 http://pl.archive.ubuntu.com/ubuntu bionic/main amd64 libalgorithm-merge-perl all 0.08-3 [12,0 kB]
Fetched 1 136 kB in 0s (7 109 kB/s)
Selecting previously unselected package make.
(Reading database ... 186959 files and directories currently installed.)
Preparing to unpack .../0-make_4.1-9.1ubuntu1_amd64.deb ...
Unpacking make (4.1-9.1ubuntu1) ...
Preparing to unpack .../1-libdpkg-perl_1.19.0.5ubuntu2.2_all.deb ...
Unpacking libdpkg-perl (1.19.0.5ubuntu2.2) over (1.19.0.5ubuntu2.1) ...
Selecting previously unselected package dpkg-dev.
Preparing to unpack .../2-dpkg-dev_1.19.0.5ubuntu2.2_all.deb ...
Unpacking dpkg-dev (1.19.0.5ubuntu2.2) ...
Selecting previously unselected package build-essential.
Preparing to unpack .../3-build-essential_12.4ubuntu1_amd64.deb ...
Unpacking build-essential (12.4ubuntu1) ...
Selecting previously unselected package libfakeroot:amd64.
Preparing to unpack .../4-libfakeroot_1.22-2ubuntu1_amd64.deb ...
Unpacking libfakeroot:amd64 (1.22-2ubuntu1) ...
Selecting previously unselected package fakeroot.
Preparing to unpack .../5-fakeroot_1.22-2ubuntu1_amd64.deb ...
Unpacking fakeroot (1.22-2ubuntu1) ...
Selecting previously unselected package libalgorithm-diff-perl.
Preparing to unpack .../6-libalgorithm-diff-perl_1.19.03-1_all.deb ...
Unpacking libalgorithm-diff-perl (1.19.03-1) ...
Selecting previously unselected package libalgorithm-diff-xs-perl.
Preparing to unpack .../7-libalgorithm-diff-xs-perl_0.04-5_amd64.deb ...
Unpacking libalgorithm-diff-xs-perl (0.04-5) ...
Selecting previously unselected package libalgorithm-merge-perl.
Preparing to unpack .../8-libalgorithm-merge-perl_0.08-3_all.deb ...
Unpacking libalgorithm-merge-perl (0.08-3) ...
Setting up make (4.1-9.1ubuntu1) ...
Setting up libdpkg-perl (1.19.0.5ubuntu2.2) ...
Setting up dpkg-dev (1.19.0.5ubuntu2.2) ...
Processing triggers for libc-bin (2.27-3ubuntu1) ...
Setting up libfakeroot:amd64 (1.22-2ubuntu1) ...
Setting up libalgorithm-diff-perl (1.19.03-1) ...
Processing triggers for man-db (2.8.3-2ubuntu0.1) ...
Setting up build-essential (12.4ubuntu1) ...
Setting up fakeroot (1.22-2ubuntu1) ...
update-alternatives: using /usr/bin/fakeroot-sysv to provide /usr/bin/fakeroot (fakeroot) in auto mode
Setting up libalgorithm-merge-perl (0.08-3) ...
Setting up libalgorithm-diff-xs-perl (0.04-5) ...
Processing triggers for libc-bin (2.27-3ubuntu1) ...
tomasz@tom:~$ sudo apt-get install manpages-dev
Reading package lists... Done
Building dependency tree
Reading state information... Done
manpages-dev is already the newest version (4.15-1).
manpages-dev set to manually installed.
The following package was automatically installed and is no longer required:
libllvm7
Use 'sudo apt autoremove' to remove it.
0 upgraded, 0 newly installed, 0 to remove and 1 not upgraded.
tomasz@tom:~$ gcc --version
gcc (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0
Copyright (C) 2017 Free Software Foundation, Inc.
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
tomasz@tom:~$ cd /home/tomasz/Documents/workspace-cpp/hello/
tomasz@tom:~/Documents/workspace-cpp/hello$ gcc hello.cpp -o hello
/tmp/cch5cNmY.o: In function `main':
hello.cpp:(.text+0x19): undefined reference to `std::cout'
hello.cpp:(.text+0x1e): undefined reference to `std::basic_ostream<char, std::char_traits<char> >& std::operator<< <std::char_traits<char> >(std::basic_ostream<char, std::char_traits<char> >&, char const*)'
/tmp/cch5cNmY.o: In function `__static_initialization_and_destruction_0(int, int)':
hello.cpp:(.text+0x4e): undefined reference to `std::ios_base::Init::Init()'
hello.cpp:(.text+0x63): undefined reference to `std::ios_base::Init::~Init()'
collect2: error: ld returned 1 exit status
tomasz@tom:~/Documents/workspace-cpp/hello$ gcc hello.cpp -lstdc++ -o hello
tomasz@tom:~/Documents/workspace-cpp/hello$ ./hello
Hello, World!
tomasz@tom:~/Documents/workspace-cpp/hello$ g++ hello.cpp -o hello
tomasz@tom:~/Documents/workspace-cpp/hello$ ./hello
Hello, World!
tomasz@tom:~/Documents/workspace-cpp/hello$ gcc hello.c -o hello
tomasz@tom:~/Documents/workspace-cpp/hello$ ./hello
Hello World!
----
= hello.cpp
[source,cpp]
----
#include <iostream>
using namespace std;
int main(int argc, char **argv) {
cout << "Hello, World!" << endl;
return 0;
}
----
= hello.c
[source,c]
----
#include <stdio.h>
int main(int argc, char **argv) {
printf ("Hello, World!\n");
return 0;
}
----
= www
. https://linuxize.com/post/how-to-install-gcc-compiler-on-ubuntu-18-04/
. https://stackoverflow.com/questions/28236870/undefined-reference-to-stdcout
| 45.121795
| 206
| 0.747976
|
eb554da0e2da6c6c4ad933243c884a707dcfd52a
| 2,120
|
adoc
|
AsciiDoc
|
docs/userguide/src/en/cmmn/ch01-Introduction.adoc
|
gro-mar/flowable-engine
|
90c3b1ae6e909a1791103b5e4c339918e471da84
|
[
"Apache-2.0"
] | null | null | null |
docs/userguide/src/en/cmmn/ch01-Introduction.adoc
|
gro-mar/flowable-engine
|
90c3b1ae6e909a1791103b5e4c339918e471da84
|
[
"Apache-2.0"
] | null | null | null |
docs/userguide/src/en/cmmn/ch01-Introduction.adoc
|
gro-mar/flowable-engine
|
90c3b1ae6e909a1791103b5e4c339918e471da84
|
[
"Apache-2.0"
] | 2
|
2018-10-04T08:33:41.000Z
|
2018-10-04T08:36:03.000Z
|
== Introduction
[[license]]
=== License
Flowable is distributed under link:$$http://www.apache.org/licenses/LICENSE-2.0.html$$[the Apache V2 license].
[[download]]
=== Download
link:$$http://flowable.org/downloads.html$$[http://flowable.org/downloads.html]
[[sources]]
=== Sources
The distribution contains most of the sources as JAR files. The source code of Flowable can be found on link:$$https://github.com/flowable/flowable-engine$$[https://github.com/flowable/flowable-engine]
[[experimental]]
=== Experimental Release
This is the first release of Flowable containing the CMMN engine, which is currently marked as experimental.
Similar to experimental features in the other engines, this means that no stability guarantees are given with regards to backwards compatibility (packages, classes, data),
as we want to use this release to gather feedback from the community before making the API's fully final. However, future release notes will describe the changes if any such changes are made.
In the next releases, we will enhance and expand on the CMMN 1.1 support and continue to add more features.
[[required.software]]
=== Required software
Flowable runs on a JDK higher than or equal to version 7. Go to link:$$http://www.oracle.com/technetwork/java/javase/downloads/index.html$$[Oracle Java SE downloads] and click on button "Download JDK". There are installation instructions on that page as well. To verify that your installation was successful, run +java -version+ on the command line. That should print the installed version of your JDK.
[[reporting.problems]]
=== Reporting problems
Questions and comments can be discussed on the on link:$$https://forum.flowable.org$$[the Flowable forum]. Issues can be created in link:$$https://github.com/flowable/flowable-engine/issues$$[our Github issue tracker].
[[internal]]
=== Internal implementation classes
In the JAR file, all classes in packages that have +.impl.+ in them are implementation classes and should be considered internal. No stability guarantees are given on classes or interfaces that are in implementation classes.
| 41.568627
| 405
| 0.77217
|
6626f50f0c7f25a892e34bdebeb3929e5cfd2d2f
| 4,074
|
adoc
|
AsciiDoc
|
rest_api/storage_apis/storage-apis-index.adoc
|
alebedev87/openshift-docs
|
b7ed96ce84670e2b286f51b4303c144a01764e2b
|
[
"Apache-2.0"
] | 625
|
2015-01-07T02:53:02.000Z
|
2022-03-29T06:07:57.000Z
|
rest_api/storage_apis/storage-apis-index.adoc
|
alebedev87/openshift-docs
|
b7ed96ce84670e2b286f51b4303c144a01764e2b
|
[
"Apache-2.0"
] | 21,851
|
2015-01-05T15:17:19.000Z
|
2022-03-31T22:14:25.000Z
|
rest_api/storage_apis/storage-apis-index.adoc
|
alebedev87/openshift-docs
|
b7ed96ce84670e2b286f51b4303c144a01764e2b
|
[
"Apache-2.0"
] | 1,681
|
2015-01-06T21:10:24.000Z
|
2022-03-28T06:44:50.000Z
|
[id="storage-apis"]
= Storage APIs
ifdef::product-title[]
include::modules/common-attributes.adoc[]
endif::[]
toc::[]
== CSIDriver [storage.k8s.io/v1]
Description::
+
--
CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.
--
Type::
`object`
== CSINode [storage.k8s.io/v1]
Description::
+
--
CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.
--
Type::
`object`
== CSIStorageCapacity [storage.k8s.io/v1beta1]
Description::
+
--
CSIStorageCapacity stores the result of one CSI GetCapacity call. For a given StorageClass, this describes the available capacity in a particular topology segment. This can be used when considering where to instantiate new PersistentVolumes.
For example this can express things like: - StorageClass "standard" has "1234 GiB" available in "topology.kubernetes.io/zone=us-east1" - StorageClass "localssd" has "10 GiB" available in "kubernetes.io/hostname=knode-abc123"
The following three cases all imply that no capacity is available for a certain combination: - no object exists with suitable topology and storage class name - such an object exists, but the capacity is unset - such an object exists, but the capacity is zero
The producer of these objects can decide which approach is more suitable.
They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate is enabled there and a CSI driver opts into capacity-aware scheduling with CSIDriver.StorageCapacity.
--
Type::
`object`
== PersistentVolumeClaim [core/v1]
Description::
+
--
PersistentVolumeClaim is a user's request for and claim to a persistent volume
--
Type::
`object`
== StorageClass [storage.k8s.io/v1]
Description::
+
--
StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.
StorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.
--
Type::
`object`
== StorageState [migration.k8s.io/v1alpha1]
Description::
+
--
The state of the storage of a specific resource.
--
Type::
`object`
== StorageVersionMigration [migration.k8s.io/v1alpha1]
Description::
+
--
StorageVersionMigration represents a migration of stored data to the latest storage version.
--
Type::
`object`
== VolumeAttachment [storage.k8s.io/v1]
Description::
+
--
VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.
VolumeAttachment objects are non-namespaced.
--
Type::
`object`
== VolumeSnapshot [snapshot.storage.k8s.io/v1]
Description::
+
--
VolumeSnapshot is a user's request for either creating a point-in-time snapshot of a persistent volume, or binding to a pre-existing snapshot.
--
Type::
`object`
== VolumeSnapshotClass [snapshot.storage.k8s.io/v1]
Description::
+
--
VolumeSnapshotClass specifies parameters that a underlying storage system uses when creating a volume snapshot. A specific VolumeSnapshotClass is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses are non-namespaced
--
Type::
`object`
== VolumeSnapshotContent [snapshot.storage.k8s.io/v1]
Description::
+
--
VolumeSnapshotContent represents the actual "on-disk" snapshot object in the underlying storage system
--
Type::
`object`
| 28.690141
| 590
| 0.776141
|
94167853e130b426f57bd791a03a5fb804ee6374
| 1,460
|
adoc
|
AsciiDoc
|
docs/modules/sqldb/partials/navlist/navlist-schema-type-repo-p.adoc
|
DataHandwerk/sqldb-docs
|
c81fb4fd021799a30c022548c5cd8a325d0e1b9c
|
[
"MIT"
] | null | null | null |
docs/modules/sqldb/partials/navlist/navlist-schema-type-repo-p.adoc
|
DataHandwerk/sqldb-docs
|
c81fb4fd021799a30c022548c5cd8a325d0e1b9c
|
[
"MIT"
] | null | null | null |
docs/modules/sqldb/partials/navlist/navlist-schema-type-repo-p.adoc
|
DataHandwerk/sqldb-docs
|
c81fb4fd021799a30c022548c5cd8a325d0e1b9c
|
[
"MIT"
] | null | null | null |
* xref:repo.usp_index_finish.adoc[]
* xref:repo.usp_index_foreignkey.adoc[]
* xref:repo.usp_index_inheritance.adoc[]
* xref:repo.usp_index_settings.adoc[]
* xref:repo.usp_index_virtual_set.adoc[]
* xref:repo.usp_main.adoc[]
* xref:repo.usp_persist_foreignkey_indexes_union_t.adoc[]
* xref:repo.usp_persist_index_columlist_t.adoc[]
* xref:repo.usp_persist_index_ssas_t.adoc[]
* xref:repo.usp_persist_indexcolumn_referencedreferencing_hasfullcolumnsinreferencing_t.adoc[]
* xref:repo.usp_persist_indexcolumn_ssas_t.adoc[]
* xref:repo.usp_persist_indexcolumn_union_t.adoc[]
* xref:repo.usp_persist_repoobject_external_tgt.adoc[]
* xref:repo.usp_persist_repoobject_sat2_t.adoc[]
* xref:repo.usp_persist_repoobject_ssas_tgt.adoc[]
* xref:repo.usp_persist_repoobjectcolumn_external_tgt.adoc[]
* xref:repo.usp_persist_repoobjectcolumn_ssas_tgt.adoc[]
* xref:repo.usp_persist_reposchema_ssas_tgt.adoc[]
* xref:repo.usp_persistence_delete.adoc[]
* xref:repo.usp_persistence_set.adoc[]
* xref:repo.usp_repoobjectcolumn_update_repoobjectcolumn_column_id.adoc[]
* xref:repo.usp_sync_guid.adoc[]
* xref:repo.usp_sync_guid_repoobject.adoc[]
* xref:repo.usp_sync_guid_repoobject_ssas.adoc[]
* xref:repo.usp_sync_guid_repoobjectcolumn.adoc[]
* xref:repo.usp_sync_guid_repoobjectcolumn_ssas.adoc[]
* xref:repo.usp_sync_guid_reposchema.adoc[]
* xref:repo.usp_sync_guid_reposchema_ssas.adoc[]
* xref:repo.usp_sync_guid_ssas.adoc[]
* xref:repo.usp_update_referencing_count.adoc[]
| 47.096774
| 94
| 0.835616
|
a71a7026bb420ab107fd304e64242432aaa06a26
| 378
|
adoc
|
AsciiDoc
|
resources/yapf_nbformat/README-yapf_nbformat.adoc
|
jendelel/codenames
|
ccd0bd7578b3deedeec60d0849ec4ebca48b6426
|
[
"MIT"
] | 13
|
2019-03-03T23:51:56.000Z
|
2022-02-26T10:14:15.000Z
|
resources/yapf_nbformat/README-yapf_nbformat.adoc
|
jendelel/codenames
|
ccd0bd7578b3deedeec60d0849ec4ebca48b6426
|
[
"MIT"
] | 2
|
2021-01-31T12:14:14.000Z
|
2022-02-09T23:29:31.000Z
|
resources/yapf_nbformat/README-yapf_nbformat.adoc
|
BreastGAN/experiment2
|
2a1d15c1f479bbd6ca58af4e3b1379bf34b89f51
|
[
"Apache-2.0"
] | 5
|
2019-04-11T11:12:02.000Z
|
2021-12-22T13:29:37.000Z
|
= yapf_nbformat
Ondrej Skopek <oskopek@oskopek.com>
Runs YAPF on individual notebook cells using yapf_api.
* To skip a cell formatting (for example with jupyter-custom syntax, add `# noqa` to the first line of the cell.
* Remove newlines at end of cells.
* Uses the style_format file `.style.yapf`.
* Has a dry run option for checking proper formatting (f.e. in a Git hook).
| 34.363636
| 112
| 0.753968
|
3aa9f00aefaf829673ab0140e9c88e5fb5007d10
| 1,460
|
adoc
|
AsciiDoc
|
MEMBERS.adoc
|
NicoWde/vanillastack
|
f8adce70bda4b9e83144dde1f5cf03e57858cc8a
|
[
"Apache-2.0"
] | 39
|
2020-09-16T01:24:08.000Z
|
2022-02-26T05:19:09.000Z
|
MEMBERS.adoc
|
NicoWde/vanillastack
|
f8adce70bda4b9e83144dde1f5cf03e57858cc8a
|
[
"Apache-2.0"
] | 21
|
2020-09-15T19:27:08.000Z
|
2022-03-22T13:46:53.000Z
|
MEMBERS.adoc
|
NicoWde/vanillastack
|
f8adce70bda4b9e83144dde1f5cf03e57858cc8a
|
[
"Apache-2.0"
] | 9
|
2020-09-21T17:17:51.000Z
|
2022-01-29T10:20:31.000Z
|
= Members
This page lists all Members assigned to a Role to make it possible to address PRs, questions, etc. to the right place.
- See link:GOVERANCE.adoc[GOVERNANCE.adoc] for governance guidelines and steering committee and maintainer responsibilities.
== Owners
.Owners
|===
| Name
| Kim-Norman Sahm <kim-norman.sahm@cloudical.io>
| Karsten Samaschke <karsten.samaschke@cloudical.io>
|===
== Steering Committee
Steering committee members are added according the link:GOVERNANCE.adoc[GOVERNANCE.adoc].
.Steering Commitee
|===
| Name | Represents | Github
| Kim-Norman Sahm <kim-norman.sahm@cloudical.io> | Cloudical | [kisahm](https://github.com/kisahm)
| Karsten Samaschke <karsten.samaschke@cloudical.io> | Cloudical | [ksamaschke](https://github.com/ksamaschke)
| Christian Schilliing <christian.schilling@cloudical.io> | Cloudical | [christiancloudical](https://github.com/christiancloudical)
| Jeff Chousterman <jeff.choustermann@cloudical.io> | Cloudical | [j0x00](https://github.com/j0x00)
|===
== Maintainers
Maintainers will be added according to the process defined in link:GOVERNANCE.adoc[GOVERNANCE.adoc].
.Maintainers
|===
| Name
| Kim-Norman Sahm <kim-norman.sahm@cloudical.io>
| Karsten Samaschke <karsten.samaschke@cloudical.io>
| Christian Schilliing <christian.schilling@cloudical.io>
| Jeff Chousterman <jeff.choustermann@cloudical.io>
| 33.953488
| 133
| 0.719863
|
8b971260a9eceb6c96398c358119f7be9f31ba12
| 159
|
adoc
|
AsciiDoc
|
service/src/doc/generated-snippets/test-employee/should-delete-employee/httpie-request.adoc
|
Rolence/fineract-cn-office
|
67053a960de778af7da8ad3b98629600571a7451
|
[
"Apache-2.0"
] | null | null | null |
service/src/doc/generated-snippets/test-employee/should-delete-employee/httpie-request.adoc
|
Rolence/fineract-cn-office
|
67053a960de778af7da8ad3b98629600571a7451
|
[
"Apache-2.0"
] | null | null | null |
service/src/doc/generated-snippets/test-employee/should-delete-employee/httpie-request.adoc
|
Rolence/fineract-cn-office
|
67053a960de778af7da8ad3b98629600571a7451
|
[
"Apache-2.0"
] | null | null | null |
[source,bash]
----
$ http DELETE 'http://localhost:8080/office/v1/employees/FBea9IQEOII3aaR7xv7jBQCN6dEFrqqY' 'Accept:*/*' 'Content-Type:application/json'
----
| 39.75
| 135
| 0.742138
|
56c5d7306d0a7b674f2433f3ac09035506a7a749
| 1,322
|
adoc
|
AsciiDoc
|
source/documentation/administration_guide/topics/Creating_a_Storage_Quality_of_Service_Entry.adoc
|
lessfoobar/ovirt-site
|
97cb0cdc5c9f5a7c9108e70f2d29baac8dcc2a40
|
[
"MIT"
] | 80
|
2015-12-07T16:39:12.000Z
|
2022-01-13T13:29:16.000Z
|
source/documentation/administration_guide/topics/Creating_a_Storage_Quality_of_Service_Entry.adoc
|
lessfoobar/ovirt-site
|
97cb0cdc5c9f5a7c9108e70f2d29baac8dcc2a40
|
[
"MIT"
] | 1,277
|
2015-12-04T15:56:51.000Z
|
2022-03-31T17:00:21.000Z
|
source/documentation/administration_guide/topics/Creating_a_Storage_Quality_of_Service_Entry.adoc
|
lessfoobar/ovirt-site
|
97cb0cdc5c9f5a7c9108e70f2d29baac8dcc2a40
|
[
"MIT"
] | 404
|
2015-12-04T22:16:05.000Z
|
2022-02-01T18:45:22.000Z
|
[[Creating_a_Storage_Quality_of_Service_Entry]]
==== Creating a Storage Quality of Service Entry
*Creating a Storage Quality of Service Entry*
. Click menu:Compute[Data Centers].
. Click a data center's name. This opens the details view.
. Click the *QoS* tab.
. Under *Storage*, click *New*.
. Enter a *QoS Name* and a *Description* for the quality of service entry.
. Specify the *Throughput* quality of service by clicking one of the radio buttons:
* *None*
* *Total* - Enter the maximum permitted total throughput in the *MB/s* field.
* *Read/Write* - Enter the maximum permitted throughput for read operations in the left *MB/s* field, and the maximum permitted throughput for write operations in the right *MB/s* field.
. Specify the input and output (*IOps*) quality of service by clicking one of the radio buttons:
* *None*
* *Total* - Enter the maximum permitted number of input and output operations per second in the *IOps* field.
* *Read/Write* - Enter the maximum permitted number of input operations per second in the left *IOps* field, and the maximum permitted number of output operations per second in the right *IOps* field.
. Click btn:[OK].
You have created a storage quality of service entry, and can create disk profiles based on that entry in data storage domains that belong to the data center.
| 60.090909
| 200
| 0.760968
|
5c217b40c61ad82348c57decae56b9033d4aa5e9
| 12,503
|
adoc
|
AsciiDoc
|
src/docs/API/OnPremise API/deepasr_v6.3_api_v1.adoc
|
KonstantinosKontogiannis/omilia-docs-ui
|
22509471d9e6433dd368f6b0800344464daec20b
|
[
"MIT"
] | null | null | null |
src/docs/API/OnPremise API/deepasr_v6.3_api_v1.adoc
|
KonstantinosKontogiannis/omilia-docs-ui
|
22509471d9e6433dd368f6b0800344464daec20b
|
[
"MIT"
] | null | null | null |
src/docs/API/OnPremise API/deepasr_v6.3_api_v1.adoc
|
KonstantinosKontogiannis/omilia-docs-ui
|
22509471d9e6433dd368f6b0800344464daec20b
|
[
"MIT"
] | null | null | null |
= deepASR API
:revnumber: 1.0
:revdate: June 5, 2020
:source-highlighter: highlightjs
:toc: macro
:toclevels: 2
:sectanchors: true
:sectnumlevels: 5
:includedir: ../../styles
:title-page:
:productname: deepASR API
:productversion: 1
:classification: CONFIDENTIAL
:data-uri:
:imagesdir:
toc::[]
<<<
ifdef::backend-pdf[]
include::{includedir}/preface.adoc[]
endif::[]
<<<
== Overview
You can use the REST interface to run recognitions with short files, or can develop a custom client to use the WebSocket and run recognitions on long audio files.
The deepASR® API provides an example implementation on how to use the WebSocket endpoint to perform recognitions.
== Version compatibility matrix
The deepASR API is synchronized with the latest deepASR software version :
[cols="^1,^6", options="header"]
|===
| deepASR API | deepASR server
| v1 | v6.3.0
|===
<<<
include::{includedir}/http_authentication.adoc[]
<<<
include::{includedir}/http_errors.adoc[]
<<<
== API Reference
deepASR API exposes an HTTP endpoint for offline speech recognition of wave files (_.wav_).
=== Recognize
Provides recognition services and is mostly targeted at short duration recordings.
==== Request
[source,subs="verbatim,quotes"]
----
*[teal]#POST#* /api/recognize
----
===== Headers
[source,http]
----
Content-Type: multipart/form-data
----
===== Form Data
[cols="1,1,6", options="header",cols="25%,10%,65%"]
|===
| Property | Type | Description
| `file` | file | `[red]#Required#`. The file to recognize
| `grammar` | string | `[red]#Required#`. The recognition model to use
| `bio_user_id` | string | `[red]#Optional#`. The user identifier to use for biometrics verification
| `dialog_id` | string | `[red]#Optional#`. An identifier for the recognition session
| `enable_logs` | boolean a| `[red]#Optional#`. Option for logging recognition results on server {empty} +
Default value: `[red]#true#`
| `enable_recordings` | boolean a| `[red]#Optional#`. Option for recording the recognized audio {empty} +
Default value: `[red]#true#`
|===
<<<
==== Response
[source,json]
----
{
"success": true,
"steps": [
{
"step": 1,
"parameters": {
"speech_grammar": "demo",
"dtmf_grammar": "",
"noInput_timeout": 4000,
"lowConfThreshold": 0.0,
"log_result": true
},
"timings": {
"stream_idle_time": 0,
"stream_start_time": "2020-04-01 11:44:03.625",
"rec_start_time": "2020-04-01 11:44:03.621",
"rec_finish_time": "2020-04-01 11:44:05.065",
"system_prompt_duration": 227,
"user_barged_in": "yes",
"user_noInput_time": 0,
"user_speech_duration": 7465,
"speech_start_trigger": 15780,
"speech_start_on_rec": 15260,
"speech_end_trigger": 21725,
"speech_end_on_rec": 22725,
"clean_speech_duration": 3780
},
"recognition_info": {
"completion_type": "SR",
"completion_description": "Speech result (normal)",
"input_mode": "speech",
"result_grammar": "_DEFAULT_",
"term_char_pressed": "none",
"rec_process_time": 923,
"rec_latency": 9,
"rec_post_delay": 5,
"is_prunned": false,
"beam_changed_to": -1.0,
"max_speech_reached": false,
"step_recording_temp_url": "https://biometrics-dev1.omilia.com:20000/work/v1/my_session_identifier.1.wav",
"step_recording_name": "my_session_identifier.1.wav",
"step_recording_fetched": null,
"noise_recording": null,
"is_teardown": false,
"low_volume": true,
"clipping": true,
"rtf": 0.12
},
"results": [
{
"confidence": 0.86,
"words": [
{
"word": "okay",
"conf": 0.49,
"wordBegin": 480,
"wordEnd": 2460
},
{
"word": "hello",
"conf": 1.0,
"wordBegin": 2910,
"wordEnd": 3630
},
{
"word": "i",
"conf": 0.7,
"wordBegin": 4110,
"wordEnd": 4230
},
{
"word": "would",
"conf": 0.68,
"wordBegin": 4230,
"wordEnd": 4320
},
{
"word": "like",
"conf": 0.98,
"wordBegin": 4320,
"wordEnd": 4620
},
{
"word": "to",
"conf": 1.0,
"wordBegin": 4620,
"wordEnd": 4710
},
{
"word": "know",
"conf": 1.0,
"wordBegin": 4710,
"wordEnd": 4860
},
{
"word": "my",
"conf": 1.0,
"wordBegin": 4860,
"wordEnd": 5070
},
{
"word": "balance",
"conf": 1.0,
"wordBegin": 5070,
"wordEnd": 5610
},
{
"word": "please",
"conf": 1.0,
"wordBegin": 5610,
"wordEnd": 5970
}
],
"utterance": "okay hello i would like to know my balance please"
}
],
"utteranceForWer": "okay hello i would like to know my balance please",
"utterance": "okay hello i would like to know my balance please"
},
{
"step": 2,
"parameters": {
"speech_grammar": "demo",
"dtmf_grammar": "",
"noInput_timeout": 4000,
"lowConfThreshold": 0.0,
"log_result": true
},
"timings": {
"stream_idle_time": 0,
"stream_start_time": "2020-04-01 11:44:05.121",
"rec_start_time": "2020-04-01 11:44:05.120",
"rec_finish_time": "2020-04-01 11:44:06.348",
"system_prompt_duration": 119,
"user_barged_in": "yes",
"user_noInput_time": 0,
"user_speech_duration": 7925,
"speech_start_trigger": 32210,
"speech_start_on_rec": 31690,
"speech_end_trigger": 38615,
"speech_end_on_rec": 39615,
"clean_speech_duration": 7340
},
"recognition_info": {
"completion_type": "SR",
"completion_description": "Speech result (normal)",
"input_mode": "speech",
"result_grammar": "_DEFAULT_",
"term_char_pressed": "none",
"rec_process_time": 869,
"rec_latency": 5,
"rec_post_delay": 10,
"is_prunned": false,
"beam_changed_to": -1.0,
"max_speech_reached": false,
"step_recording_temp_url": "https://biometrics-dev1.omilia.com:20000/work/v1/bd6cd5371a9d45cc849ea9bed41be8d6.2.wav",
"step_recording_name": "bd6cd5371a9d45cc849ea9bed41be8d6.2.wav",
"step_recording_fetched": null,
"noise_recording": null,
"is_teardown": false,
"low_volume": true,
"clipping": true,
"rtf": 0.11
},
"results": [
{
"confidence": 0.98,
"words": [
{
"word": "my",
"conf": 0.92,
"wordBegin": 480,
"wordEnd": 2130
},
{
"word": "five",
"conf": 1.0,
"wordBegin": 2760,
"wordEnd": 3510
},
{
"word": "seven",
"conf": 1.0,
"wordBegin": 3750,
"wordEnd": 4530
},
{
"word": "six",
"conf": 1.0,
"wordBegin": 4740,
"wordEnd": 5490
},
{
"word": "eight",
"conf": 1.0,
"wordBegin": 6030,
"wordEnd": 6540
}
],
"utterance": "my five seven six eight"
},
{
"confidence": 0.42,
"words": [
{
"word": "add",
"conf": 0.08
},
{
"word": "my",
"conf": 0.08
},
{
"word": "five",
"conf": 1.0
},
{
"word": "seven",
"conf": 1.0
},
{
"word": "six",
"conf": 1.0
},
{
"word": "eight",
"conf": 1.0
}
],
"utterance": "add my five seven six eight"
}
],
"utteranceForWer": "my five seven six eight",
"utterance": "my five seven six eight"
}
],
"biometrics": {
"llr": -12.1899,
"nllr": 0.0,
"frames": 1816,
"bio_result": "FALSE_HIGH",
"bio_score": 0.0772943,
"bio_user_id": "user33",
"error_type": null
}
}
----
=== WebSocket API
deepASR API provides streaming recognition capabilities by exposing a WebSocket endpoint.
A simple protocol is used for streaming recognition.
.Sequence Diagram
image::images/sequence_diagram.png[500,500]
The protocol flow is as follows:
. Client initiates a WebSocket connection to the WebSocket server, defining a message handler for the incoming recognition results.
. Client sends a RecognitionConfig JSON message.
. Server receives the RecognitionConfig message and starts a recognition session. No more RecognitionConfig messages are expected and sending one will result in an error.
. Client starts sending messages containing binary audio data (bytearrays)(*Base64*-encoded).
. Server forwards results to the Client asynchronously.
. Client sends an empty message to the server to terminate a recognition session.
. Client waits for the server to send any remaining results. The client may close the connection at any time, risking to miss a recognition result.
. Server terminates the connection.
==== Request
[source,subs="verbatim,quotes"]
----
wss://{base_url}/stream?token={access_token}
----
=== Abort recognition
Stops an active recognition for a specific dialog id.
==== Request
[source,subs="verbatim,quotes"]
----
*[teal]#POST#* /api/abortrec
----
===== Headers
[source,http]
----
Content-Type: application/json
----
===== Json Body
[cols="1,1,6", options="header",cols="25%,10%,65%"]
|===
| Property | Type | Description
| `dialog_id` | String | `[red]#Required#`. Dialog unique id
|===
<<<
==== Response
[source,json]
----
{
"status": "OK"
}
----
[cols="1,1,6", options="header",cols="25%,10%,65%"]
|===
| Property | Type | Possible values
| `status` | String | DIALOG_NOT_FOUND / INACTIVE_RECOGNITION / OK
|===
=== Status
Reports the status of deepASR API service as well as information about the deepASR that API service is connected to.
==== Request
[source,subs="verbatim,quotes"]
----
*[blue]#GET#* /api/status
----
===== Headers
[source,http]
----
Content-Type: application/json
----
==== Response
[source,json]
----
{
"success": true,
"message": "Service is operating normally",
"version": "6.3.0 build A21",
"status": {
"status": "OK",
"utilization": {
"status": "IDLE",
"active_sessions": 0
},
"license": {
"status": "OK",
"license-info": {
"customer": "deepASR",
"lines": 20,
"expiry-date": "permanent"
},
"license-details": []
},
"models": {
"status": "OK",
"status-descr": [
"All checked servers have the same models!",
"Checked servers: [v1, r1, b1]"
],
"common-models": {
"languages": [
"en-US_3.4.xx"
],
"grammars": [
{
"name": "grammar1",
"language": "en-US_3.4.xx",
"is-default": false
},
{
"name": "grammar2",
"language": "en-US_3.4.xx",
"based-on": "grammar1",
"is-default": true
}
],
"bio-models": [
"model_v1.0.bio"
]
}
}
}
}
----
| 25.620902
| 170
| 0.503079
|
93598f346f7966ac80e9d7ff9a55857b82d3abbd
| 210
|
adoc
|
AsciiDoc
|
README.adoc
|
samdelnaki/objectbox
|
67e33fa6ae940fa1be2035d0903daf44e17fd1e5
|
[
"Apache-2.0"
] | null | null | null |
README.adoc
|
samdelnaki/objectbox
|
67e33fa6ae940fa1be2035d0903daf44e17fd1e5
|
[
"Apache-2.0"
] | null | null | null |
README.adoc
|
samdelnaki/objectbox
|
67e33fa6ae940fa1be2035d0903daf44e17fd1e5
|
[
"Apache-2.0"
] | null | null | null |
= ObjectBox
WARNING: Pre-alpha stage. This project is not fit for production use. First release anticipated in January 2020.
A utility which handles object history, to simplify undo and redo featrures etc.
| 26.25
| 112
| 0.790476
|
d5ceeb69b43950e2ad8ac35ce27c3025a58a9103
| 3,168
|
adoc
|
AsciiDoc
|
pages/apim/3.x/installation-guide/amazon-linux/installation-guide-amazon-portal.adoc
|
tomgeudens/gravitee-docs
|
a03ac23b275f7632d25798d9d3c142893d1036e4
|
[
"Apache-2.0"
] | null | null | null |
pages/apim/3.x/installation-guide/amazon-linux/installation-guide-amazon-portal.adoc
|
tomgeudens/gravitee-docs
|
a03ac23b275f7632d25798d9d3c142893d1036e4
|
[
"Apache-2.0"
] | null | null | null |
pages/apim/3.x/installation-guide/amazon-linux/installation-guide-amazon-portal.adoc
|
tomgeudens/gravitee-docs
|
a03ac23b275f7632d25798d9d3c142893d1036e4
|
[
"Apache-2.0"
] | null | null | null |
= Install APIM Portal
:page-sidebar: apim_3_x_sidebar
:page-permalink: apim/3.x/apim_installguide_amazon_portal.html
:page-folder: apim/installation-guide/amazon
:page-liquid:
:page-layout: apim3x
:page-description: Gravitee.io API Management - Installation Guide - Amazon - Portal
:page-keywords: Gravitee.io, API Platform, API Management, API Gateway, oauth2, openid, documentation, manual, guide, reference, api
:page-toc: true
:gravitee-component-name: APIM Portal
:gravitee-package-name: graviteeio-apim-portal-ui-3x
:gravitee-service-name: graviteeio-apim-portal-ui
To install Gravitee.io API Management Portal, be sure to check the list of prerequisites and complete the following configuration (both listed below).
== Prerequisites
. [underline]#*APIM API*# must be installed and running (link:/apim/3.x/apim_installguide_amazon_management_api.html[click here] for instructions on how to install APIM API).
. [underline]#*Nginx*# must be installed.
. [underline]#*YUM package manager*# must be configured (link:/apim/3.x/apim_installguide_amazon_configure_yum.html[click here] for instructions on how to configure `yum`).
=== Install APIM API
link:/apim/3.x/apim_installguide_amazon_management_api.html[Click here] for instructions on how to install and test that APIM API is running.
=== Install Nginx
To install Nginx, run the following commands:
[source,bash,subs="attributes"]
----
sudo amazon-linux-extras install nginx1.12
sudo systemctl start nginx
----
=== Configure YUM Package Manager
link:/apim/3.x/apim_installguide_amazon_configure_yum.html[Click here] for instructions on how to configure `yum`.
=== 1. Install the {gravitee-component-name} Package
To install the latest stable version of {gravitee-component-name}, run the following command:
[source,bash,subs="attributes"]
----
sudo yum install -y {gravitee-service-name}
----
=== 2. Run {gravitee-component-name}
NOTE: {gravitee-component-name} is based on Nginx.
To configure {gravitee-component-name} to start automatically when the system boots up, run the following commands:
[source,bash,subs="attributes"]
----
sudo systemctl daemon-reload
sudo systemctl enable nginx
----
To start and stop Nginx, run the following commands:
[source,bash,subs="attributes"]
----
sudo systemctl start nginx
sudo systemctl stop nginx
----
=== 3. View The Logs
*You must check the logs to confirm if APIM Portal has been installed correctly.* When `systemd` logging is enabled, logging information is available using the `journalctl` commands.
To tail the journal, run the following command:
[source,shell]
----
sudo journalctl -f
----
To list journal entries for the Nginx service, run the following command:
[source,shell]
----
sudo journalctl --unit nginx
----
To list journal entries for the Nginx service starting from a given time, run the following command:
[source,shell]
----
sudo journalctl --unit nginx --since "2020-01-30 12:13:14"
----
=== 4. Confirm {gravitee-component-name} Was Installed
To test that {gravitee-component-name} has been installed and has started up properly, check this URL:
|===
|Component |URL
|APIM Portal
|http://localhost:8085/
|===
| 29.333333
| 182
| 0.762311
|
55e31c29f2f3414ccf25904fbec40b5da95b7098
| 6,808
|
adoc
|
AsciiDoc
|
implementations/README.adoc
|
richiecarmichael/ogcapi-features
|
3623485b918727dbe174af6a2aa446b8dd1f620d
|
[
"OML"
] | null | null | null |
implementations/README.adoc
|
richiecarmichael/ogcapi-features
|
3623485b918727dbe174af6a2aa446b8dd1f620d
|
[
"OML"
] | null | null | null |
implementations/README.adoc
|
richiecarmichael/ogcapi-features
|
3623485b918727dbe174af6a2aa446b8dd1f620d
|
[
"OML"
] | null | null | null |
# Implementations
#work-in-progress#
This page lists software packages that implement approved or draft OGC API Features standards. Draft standards are only added once they have a tagged version. Currently the following parts are covered:
* https://docs.ogc.org/DRAFTS/17-069r4.html[OGC API - Features - Part 1: Core 1.0]
* https://docs.ogc.org/DRAFTS/18-058r1.html[OGC API - Features - Part 2: Coordinate Reference Systems by Reference 1.0]
* https://docs.ogc.org/DRAFTS/19-079r1.html[OGC API - Features - Part 3: Filtering and Common Query Language (CQL) 1.0] **(DRAFT)**
Two types of software packages are distinguished:
* Server implementations are software products that can be used to set up APIs that implement OGC API Features conformance classes.
* Client implementations are software products that connect to APIs implementing OGC API Features conformance classes to use the feature data.
## Contribute
If you have a server or client implementation of OGC API Features, we welcome a pull request to update this page to add or update an entry for the product. You may add a link to a sub-page with more details in the link:servers[servers] and link:clients[clients] folders of this repository and you may add a link to the associated entry in the OGC Product Database. Please include a contact email address so that we may later contact you in case of page updates or questions.
If you have a server product, please consider to https://www.ogc.org/resource/products/registration[register it in the OGC Product Database] and https://cite.opengeospatial.org/teamengine/[test it for compliance], too.
## Servers
The columns for each part list the conformance classes of the standard that are implemented by the server implementation. The conformance classes available in a specific API that is provided using the implementation will be listed in the http://www.opengis.net/doc/IS/ogcapi-features-1/1.0#_declaration_of_conformance_classes[Conformance Declaration resource] of the API.
.Server implementations
[cols="5h,^3,^3,^3,^1a,2",options="header",grid="rows",stripes="hover"]
|===
| Product | Part 1 | Part 2 | Part 3 | OGC Product Database | Contact email
| link:servers/ldproxy.md[ldproxy]
| `core`, `oas30`, `geojson`, `html`, `gmlsf2`
| `crs`
| `filter`, `features-filter`, `simple-cql`, `cql-json`, `cql-text`
| https://www.ogc.org/resource/products/details/?pid=1598[Link]
| portele [at] interactive-instruments.de
| link:servers/cubewerx.md[CubeWerx Suite]
| `core`, `oas30`, `geojson`, `html`, #to be updated#
| `crs`
| `filter`, `features-filter`, `simple-cql`, `cql-json`, `cql-text`, #to be updated#
| https://www.ogc.org/resource/products/details/?pid=1676[Link]
| pvretano [at] cubewerx.com
| link:servers/geoserver.md[GeoServer]
3+| please consult the product documentation for details
| https://www.ogc.org/resource/products/details/?pid=1668[Link]
|
| link:servers/pygeoapi.md[pygeoapi]
3+| please consult the product documentation for details
| https://www.ogc.org/resource/products/details/?pid=1663[Link]
|
| link:servers/sofp.md[sofp Server]
3+| please consult the product documentation for details
| https://www.ogc.org/resource/products/details/?pid=1669[Link]
|
| link:servers/nlsfi.md[nls-fi]
| `core`, `oas30`, `geojson`
| `crs`
| -
| -
|
| link:servers/qgis.md[QGIS Server]
3+| please consult the product documentation for details
| https://www.ogc.org/resource/products/details/?pid=1611[Link]
|
| link:servers/sdirp.md[SDI Rhineland-Palatinate]
3+| please consult the product documentation for details
| https://www.ogc.org/resource/products/details/?pid=1667[Link]
|
| GNOSIS Map Server
3+| please consult the product documentation for details
| https://www.ogc.org/resource/products/details/?pid=1670[Link]
|
| TerraNexus OGC API Server
3+| please consult the product documentation for details
| https://www.ogc.org/resource/products/details/?pid=1675[Link]
|
|===
## Clients
The columns for each part list the conformance classes of the standard that the client uses when connecting to an API and the API supports that conformance class.
### Desktop
.Desktop clients
[cols="5h,^3,^3,^3,^1a,2",options="header",grid="rows",stripes="hover"]
|===
| Product | Part 1 | Part 2 | Part 3 | OGC Product Database | Contact email
| link:clients/qgis.md[QGIS]
| `core`, `oas30`, `geojson`
| -
| -
| -
|
| link:clients/fme.md[FME]
3+| please consult the product documentation for details
| -
|
|===
### Native APIs / Libraries
.Native APIs / Libraries
[cols="5h,^3,^3,^3,^1a,2",options="header",grid="rows",stripes="hover"]
|===
| Product | Part 1 | Part 2 | Part 3 | OGC Product Database | Contact email
| link:clients/gdal.md[GDAL/OGR - OGC API Features driver]
| `core`, `oas30`, `geojson`
| -
| `filter`, `features-filter`, `simple-cql`, `cql-text`
| -
|
| link:clients/owslib.md[OWSLib]
| `core`, `oas30`, `geojson`
| -
| -
| -
|
| link:clients/arcgis-runtime.md[ArcGIS Runtime API for .NET/Android/iOS/Java/Qt (OGCFeatureCollectionTable)]
| `core`, `oas30`, `geojson`
| `crs`
| `filter`, `features-filter`, `cql-text`, `cql-json`
| -
|
|===
### JavaScript APIs
.JavaScript APIs
[cols="5h,^3,^3,^3,^1a,2",options="header",grid="rows",stripes="hover"]
|===
| Product | Part 1 | Part 2 | Part 3 | OGC Product Database | Contact email
| link:clients/arcgis-js.md[ArcGIS API for JavaScript - OGCFeatureLayer]
| `core`, `oas30`, `geojson`
| -
| -
| -
|
| link:clients/ogcapi-js.md[ogcapi-js]
| `core`, `geojson`
| `crs`
| -
| https://www.ogc.org/resource/products/details/?pid=1673[Link]
|
|===
### Clients supporting GeoJSON
Since most APIs implementing OGC API Features support the GeoJSON conformance class, any client that is able to process GeoJSON feature collections and features can also use feature data provide by such APIs by directly accessing the Features resources (at the relative path `collections/{collectionId}/items`) as long as they can fetch all features in a single request without paging.
See the link:clients/README.md[examples and descriptions for clients] for more information, for example, for link:clients/leaflet.md[Leaflet], link:clients/openlayers.md[OpenLayers] and link:clients/mapbox-gl-js.md[Mapbox GL JS].
## STAC
The https://github.com/radiantearth/stac-spec[SpatioTemporal Asset Catalog (STAC) specification], more precisely the https://github.com/radiantearth/stac-api-spec[STAC API specification], is based on OGC API Features. Thus STAC API is a superset of OGC API - Features - Part 1: Core, in that OGC API Features defines many of the resources that STAC uses. A STAC API should be compatible and usable with OGC API Features clients and a STAC server should also be a valid OGC API Features server.
See the https://stacindex.org/ecosystem[STAC implementations page] for implementations.
| 38.902857
| 493
| 0.740012
|
0f78cf75ec9547c4e43969b1a6cb301b574ebdd3
| 282
|
adoc
|
AsciiDoc
|
src/docs/asciidoc/overview.adoc
|
pmackowski/rsocket-playground
|
aef307a15e06fd6c0453abc056040306693a4e08
|
[
"Apache-2.0"
] | 2
|
2019-09-30T13:37:57.000Z
|
2019-10-24T11:12:13.000Z
|
src/docs/asciidoc/overview.adoc
|
pmackowski/rsocket-playground
|
aef307a15e06fd6c0453abc056040306693a4e08
|
[
"Apache-2.0"
] | 9
|
2019-08-26T08:18:54.000Z
|
2019-09-09T09:18:16.000Z
|
src/docs/asciidoc/overview.adoc
|
pmackowski/rsocket-playground
|
aef307a15e06fd6c0453abc056040306693a4e08
|
[
"Apache-2.0"
] | 1
|
2021-11-09T10:38:52.000Z
|
2021-11-09T10:38:52.000Z
|
== Overview
=== Zomky
Zomky is a Java library build on top of RSocket. It provides implementation of Gossip and Raft distributed systems protocols.
=== Dependencies
[source,java,indent=0]
----
include::{test-examples}/ZomkyDocumentation.java[tag=overview]
----
<1> Bob
<2> Alice
| 20.142857
| 125
| 0.737589
|
0cea6395b0d7e7104aafaedb5e66c68d58101c0a
| 88
|
adoc
|
AsciiDoc
|
src/asciidoc/appendix.adoc
|
hawaiifw/hawaiiframework
|
506eb6be5a76c6203d3e2b48b82f2c25782a824d
|
[
"X11",
"Apache-2.0",
"OLDAP-2.2.1"
] | 6
|
2016-11-15T23:00:47.000Z
|
2020-11-30T01:57:19.000Z
|
src/asciidoc/appendix.adoc
|
hawaiifw/hawaiiframework
|
506eb6be5a76c6203d3e2b48b82f2c25782a824d
|
[
"X11",
"Apache-2.0",
"OLDAP-2.2.1"
] | 11
|
2016-04-16T07:45:05.000Z
|
2020-09-22T07:08:15.000Z
|
src/asciidoc/appendix.adoc
|
hawaiifw/hawaii-framework
|
506eb6be5a76c6203d3e2b48b82f2c25782a824d
|
[
"X11",
"Apache-2.0",
"OLDAP-2.2.1"
] | 19
|
2016-04-13T13:17:09.000Z
|
2020-09-21T13:41:43.000Z
|
[[appendix]]
:sectnums!:
== Appendices
include::appendix-application-properties.adoc[]
| 14.666667
| 47
| 0.75
|
c96a96d8e8766aae32b8ba06e17cc25f9172fed4
| 1,058
|
adoc
|
AsciiDoc
|
modules/ROOT/pages/dataweave-formats-fixedwidth.adoc
|
Amelsfort/docs-mule-runtime
|
8e506055f38561c09ff8fdfdfe6d6653a807585f
|
[
"BSD-3-Clause"
] | null | null | null |
modules/ROOT/pages/dataweave-formats-fixedwidth.adoc
|
Amelsfort/docs-mule-runtime
|
8e506055f38561c09ff8fdfdfe6d6653a807585f
|
[
"BSD-3-Clause"
] | null | null | null |
modules/ROOT/pages/dataweave-formats-fixedwidth.adoc
|
Amelsfort/docs-mule-runtime
|
8e506055f38561c09ff8fdfdfe6d6653a807585f
|
[
"BSD-3-Clause"
] | null | null | null |
= Fixed Width Format
MIME Type: `application/flatfile`
ID: `flatfile`
Fixed width types are technically considered a type of Flat File format, but
when selecting this option, the Transform component offers you settings that are
better tailored to the needs of this format.
NOTE: Fixed width in DataWeave supports files of up to 15 MB, and the memory requirement is roughly 40 to 1. For example, a 1-MB file requires up to 40 MB of memory to process, so it's important to consider this memory requirement in conjunction with your TPS needs for large fixed width files. This is not an exact figure; the value might vary according to the complexity of the mapping instructions.
// CONFIG PROPS ///////////////////////////////////////////////////////
// Fixed Width accepts Flat File config properties.
include::partial$dataweave-formats-flatfile-config.adoc[]
[[mime_type_fixed_width]]
== Supported MIME Types (for Fixed Width)
The Fixed Width format supports the following MIME types.
[cols="1", options="header"]
|===
| MIME Type
|`*/flatfile`
|===
| 36.482759
| 401
| 0.731569
|
b0aee2707f683793987997ec281b38e107a1cc30
| 74
|
adoc
|
AsciiDoc
|
README.adoc
|
kdubois/rhods-od-workshop
|
6efbf1716fa5a398e8b70f32dc3f8c4ab15c9c4e
|
[
"Apache-2.0"
] | 1
|
2021-11-18T18:52:56.000Z
|
2021-11-18T18:52:56.000Z
|
README.adoc
|
kdubois/rhods-od-workshop
|
6efbf1716fa5a398e8b70f32dc3f8c4ab15c9c4e
|
[
"Apache-2.0"
] | null | null | null |
README.adoc
|
kdubois/rhods-od-workshop
|
6efbf1716fa5a398e8b70f32dc3f8c4ab15c9c4e
|
[
"Apache-2.0"
] | 5
|
2021-11-09T15:29:32.000Z
|
2021-11-29T10:41:59.000Z
|
https://redhat-scholars.github.io/rhods-od-workshop/od-workshop/index.html
| 74
| 74
| 0.824324
|
c58c5bebf610777e283fd4e7a012890e97da9fa2
| 23,194
|
adoc
|
AsciiDoc
|
OU's en groepsbeleid/README.adoc
|
ucll-operating-systems/operating-systems
|
9577d6c442e5903f8c74bf5aa64f30c88e12b804
|
[
"BSD-2-Clause"
] | 3
|
2021-03-13T13:38:47.000Z
|
2022-03-16T16:22:26.000Z
|
OU's en groepsbeleid/README.adoc
|
GekkieFoxxie/operating-systems
|
60c227d2c08965248b6007b4f20918f789f8ab03
|
[
"BSD-2-Clause"
] | null | null | null |
OU's en groepsbeleid/README.adoc
|
GekkieFoxxie/operating-systems
|
60c227d2c08965248b6007b4f20918f789f8ab03
|
[
"BSD-2-Clause"
] | 17
|
2021-04-12T17:00:41.000Z
|
2022-03-28T08:49:15.000Z
|
= Group Policy Objects
UCLL
:doctype: article
:encoding: utf-8
:lang: nl
:toc: left
We hebben nu een computer met Active Directory gekoppeld en een gecentraliseerde gebruiker aangemaakt om aan te melden. Active Directory biedt echter nog een heleboel meer mogelijkheden en één heel belangrijke mogelijkheid is het definiëren van configuraties of afspraken voor PC's/gebruiker(s) binnen het domein.
Zo'n configuraties noemen we **Group Policy Settings**. Een Group Policy setting maakt het mogelijk om een heleboel configuraties door te voeren, bijvoorbeeld bepaalde software installeren, beveiligingsopties aanpassen, folder redirection, en het aanpassen van de Windows Registry, meer hierover wordt wel duidelijk tijdens het labo. Een aantal Group Policy Settings (regels) kunnen samen een Group Policy Object vormen, een set van regels die jij als beheerder van het domein logischerwijs vindt samenhoren. Hieronder staat de basis over GPO's kort samengevat, maar natuurlijk heeft Microsoft hier zijn eigen documentatie over. https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-7/hh147307(v=ws.10)?redirectedfrom=MSDN[Dit artikel] omschrijft de essentie van GPO's, en is zeker een interessant als je meer details wilt. Bekijk daarnaast ook gerust dit https://www.youtube.com/watch?v=azup50LaIN0[filmpje]. Een GPO omvat beleidsregels, nl. **policies** of voorkeuren nl. **preferences** die ingesteld of toegepast moeten worden voor bepaalde gebruiker(s) en/of computer(s).
IMPORTANT: de beveiliging van "bronnen" (printers, netwerkschijven,...) worden niet afgedwongen d.m.v. group policies maar groepen (global, domein lokaal, ...).
== Soorten GPO's
We onderscheiden 3 soorten Group Policy Objects (GPO's):
1. **Local Group Policy Objects**: Dit zijn Group Policy Objects die van toepassing zijn op 1 enkele lokale PC en de gebruikers die hierop aanmelden. Deze bestaan standaard op iedere PC, al dan niet opgenomen in een domein.
2. **Non-local Group Policy Objects**: Group Policy Objects die van toepassing zijn op meerdere PC's. Een GPO is van het type Non-Local, zodra deze op een Active Directory Server geïnstalleerd worden. Non-local Group Policy Objects overschrijven altijd Local Group Policy Objects.
3. **Starter Group Policy Objects**: Dit zijn templates, waarvan je kan starten bij het aanmaken van GPO's.
== Voordelen van GPO's
* Efficiënter beheer van IT-omgevingen
* Password policy enforcement (verder in het labo zal je zien dat dit een slecht voorbeeld is en NIET zal werken)
* Folder redirection
== Nadelen van GPO's
Natuurlijk is het niet allemaal rozengeur en maneschijn. Er zijn een paar valkuilen als het aankomt op GPO's.
Eerst en vooral worden GPO's standaard iedere 90-120 minuten vernieuwd. Dit betekent concreet dat je iedere keer dat je een aanpassing aan een GPO doet ook zolang moet wachten totdat de betrokken PC de aanpassing "oppikt". Je kan de updatefrequentie wel manueel instellen. Daarnaast is het ook belangrijk om te weten dat GPO's sequentieel worden uitgevoerd bij de opstart van de PC. Dit wil zeggen dat als je veel GPO's hebt, dat je ook heel lang zal moeten wachten totdat de PC opgestart is.
== Verwerking van GPO's
GPO's worden in een bepaalde volgorde verwerkt.
1. Local
2. Site
3. Domain
4. Organizational Unit
Dit wilt concreet zeggen dat een setting in een GPO die local geconfigureerd is overschreven wordt als diezelfde setting opgenomen in een GPO gekoppeld aan het domein, anders geconfigureerd is.
Het tijdstip waarop GPO-instellingen effectief worden, is niet altijd hetzelfde. Bijvoorbeeld:
* Instellingen van computer configuration worden toegepast als de computer (her)start
* Instellingen van user configuration worden toegepast als de gebruiker (her)inlogt
== Aan de slag
=== Group Policy Management
Bij de installatie van Active Directory wordt de tool "Group Policy Management" mee geïnstalleerd. Deze is te vinden in Server Manager>Tools.
Het merendeel van dit labo zal zich hier afspelen. Als je het vorige labo goed hebt afgerond, zie je in de balk aan de linkerzijkant iets zoals hieronder:
image::images/image1.png[]
Alvorens nieuwe GPO's aan te maken, is het belangrijk om te beseffen dat er reeds twee GPO's aangemaakt zijn, nl. Default Domain Controllers Policy en Default Domain Policy. De eerste GPO is bedoeld om de domein controllers te beveiligen en de tweede GPO stelt standaard beleidsregels in voor het domein. Enkele instellen zijn hieronder te zien:
image::images/image5.png[]
In OU=Sales gaan we onze eerste GPO aanmaken. Met de rechtermuisknop klik je op de OU=Sales en maak je een nieuwe GPO aan. Je geeft de GPO de naam "labo-GPO-nr1". De GPO zal automatisch verschijnen onder de container OU=Sales. Deze is nu nog leeg, dus we willen hem aanpassen, rechtermuisknop>Edit.
Hier krijgen we twee opties: Computer Configuration & User Configuration. Navigeer in beide categorieën naar Policies, Administrative Templates, en kijk welke regels je zoal kan instellen. Je merkt dat het aantal beleidsregels enorm is. Elke regel kan **ingeschakeld**, **uitgeschakeld** of **niet geconfigureerd** zijn. Voor het afdwingen van de Password Policy navigeren we naar Computer Configuration>Policies>Windows Settings>Security Settings>Account Policies>Password Policiy. Wanneer we dit venster open hebben zien we een aantal opties. Open 'Password must meet complexity requirements' en selecteer *Disabled*. Als je dit gedaan hebt zou je het volgende moeten zien.
image::images/image2.png[]
Zorg nu dat de laatste 5 wachtwoorden worden onthouden, een wachtwoord minstens 1 dag oud moet zijn voor het veranderd kan worden, na een jaar veranderd moet worden, het minstens 3 tekens heeft, dat het niet opgeslagen wordt met een terugkeerbare encryptie en behoudt de complexitiy requirements zoals hierboven. Kijk ook zeker naar de standaardwaarden van de configuratie, die je vindt onder "Explain" als je een configuratie opent.
Wanneer je klaar bent, sluit je het venster gewoon af. Je hebt nu een Group Policy Object geconfigureerd op het domein-niveau.
Test of de GPO werkt:
* Maak een user1 aan met paswoord p@ssw0rd in OU=Sales
* Log in met user1 op de Windows 10 machine en probeer het paswoord aan te passen naar ttt ... => lukt niet!
OK, waarom lukt dit niet ... wat zegt de error boodschap van Windows:
image::images/image3.png[]
Het nieuwe paswoord kan "verkeerd" zijn volgens een van de drie redenen:
* does not meet the length => kan niet is 3 karakters lang
* does not meet the complixity => is afgezet
* does not meet the history requirements => verder onderzoeken ...
Het paswoord moet één dag oud zijn alvorens het aangepast zou kunnen worden, dan moet er onderzocht worden wanneer het paswoord "aangemaakt" werd. Gebruik PowerShell met het commando `Get-ADuser -identity user1 -porperties *` op de server en controleer of het paswoord "ouder" is dan een dag. Als dat niet zo is ... (en kies optie 2):
* wacht tot morgen ;-)
* hef die beleidsregel op => zet de minimum password age op 0
IMPORTANT: Werkt de paswoordaanpassing na de beleidsaanpassing van de "minimale password age" op 0 te zetten? Waarschijnlijk nog niet, je moet de PC of computer van de user in de juiste OU zetten, omdat het een computer policy is die je aangepast hebt. *Er is dus een duidelijk verschil tussen USER en COMPUTER policies!* Afhankelijk van de policy die je aanmaakt, moet de user en/of de PC in de juiste OU geplaatst worden.
Plaats de PC in de juiste OU=Sales en probeer het paswoord nu aan te passen? Lukt het? Eindelijk! ... of toch https://community.spiceworks.com/topic/2160696-password-policy-not-working-but-is-applied[niet].
IMPORTANT: Volgens bovenstaande link is het **voorbeeld van password-policy een slecht voorbeeld**. Om toch te controlleren of de computerpolicy wel degelijk werkt, kan je deze testen met het "Start Menu" via Computer policy => Admin. Templ. => Start Menu => full screen.
Als je wil zien wat er precies is geconfigureerd in een bepaalde GPO, klik je op de GPO en selecteer je de tab 'Settings'. Hier zie je alle configuraties. Onder Scope zie je ook aan wie een GPO gekoppeld is. Je kan ook bestaande GPO's koppelen aan meerdere containers. Dit doe je door naar een OU/Domain te gaan en te klikken op 'Link existing GPO'.
Note: Als meerdere GPO's na mekaar toegepast worden, dan betekent **niet geconfigureerd** dat de vorig toegepaste instelling in de GPO-hiërarchie blijft en **uitgeschakeld**, wat de vorige instelling ook was, ze wordt nu uitgeschakeld.
=== Policies debuggen
Wanneer er iets misloopt met het toepassen van een policy, zijn er tools om te kijken waar het probleem precies zit. Een eerste stap is kijken welke regels er nu toegepast werden. Dit kan je doen door in een console-venster het commando `gpresult /v` in te typen. Dit geeft een lijst van alle policies die toegepast worden op de huidige **gebruiker**, alsook wat extra informatie. Je kan deze informatie ook wegschrijven als een (overzichtelijker) HTML-bestand door het commando `gpresult /H bestand.html`.
Wanneer een policy niet toegepast kan worden, gaat de Group Policy Client een foutmelding wegschrijven in het Windows event log. Je kan deze log bekijken door het programma Event Viewer op te starten en te navigeren naar Windows logs.
Zoals reeds kort aangehaald werd, komen hiërarchieën van OU’s vaak voor (dat is in feite de bedoeling). Aangezien je op elke OU bepaalde regels kan instellen, kunnen conflicten voorkomen.
Neem bijvoorbeeld het beleidsaspect “Hide my network places on desktop”. GPO's kunnen hiervoor één van de volgende waardes specificeren:
* Enabled
* Disabled
* Not configured
Door de opeenvolgende GPO’s achter elkaar te zetten/uitvoeren volgens de hiërarchieën van OU’s, krijgen we bijvoorbeeld: disabled, disabled, not configured, ..., enabled, not configured (Not configured betekent: er wordt niets veranderd aan de vorige instelling). In dit geval is enabled de definitieve instelling.
=== Meer dan een GPO per OU
Ga naar Group Policy Management en maak onder de OU Marketing twee nieuwe GPO’s aan (via “Create a GPO in this domain, and Link it here…”).
* Noem de eerste **Disable Command Prompt** en zorg dat de instelling “Toegang tot de opdrachtprompt voorkomen” (E. Prevent access to the command prompt) aan staat.
* Noem de tweede **Enable Command Prompt ** en zorg dat daarin dezelfde instelling uitgeschakeld staat.
We hebben nu twee GPO’s gedefinieerd in de OU van Marketing die in conflict liggen met elkaar.
Selecteer in Group Policy Management de OU en navigeer naar het “Linked Group Policy Objects”-tabblad. Hier zie je de volgorde staan van waarin de GPO’s voorrang krijgen (hoger krijgt voorrang). Zorg dat de GPO Disable Command Prompt bovenaan staat. Log op de Windows 10-machine in als iemand van de OU Marketing en probeer een opdrachtprompt te openen. Dat zou niet mogen werken.
Zet nu de GPO Enable Command Prompt bovenaan, en meld je opnieuw aan op de Windows 10-machine (wijzigingen in de group policy worden pas doorgegeven wanneer men opnieuw inlogt). Probeer opnieuw om een opdrachtprompt te openen. Nu zou het wel moeten gaan.
== De registry: een kennismaking
TIP: Waarom bespreken we hier kort de registry? Met groepsbeleid kan je onder andere het register aanpassen, maar ook scripts toepassen, mappen omleiden, applicaties beheren, ... .
De registry is een database waarin Windows de instellingen i.v.m. de software en de hardware bijhoudt. Via het commando `regedit` hebben we toegang tot de registry. Er zijn 5 *afdelingen* (E. hives, wat letterlijk vertaald bijenkorf betekent) die elk een categorie van instellingen bijhouden.
Via `regedit` kan men wijzigingen aanbrengen aan de registry. Dit moet evenwel uiterst omzichtig gebeuren. Wie nog in een leerfase zit, kan best alleen aan de registry prutsen op een machine waarbij het geen kwaad kan (een virtuele machine, een machine in een pc-lab, ...). Zorg alleszins dat je een backup hebt, want een foutje in de registry kan ervoor zorgen dat het Windows OS niet meer start/werkt.
Een afdeling is gestructureerd als een folder met subfolders. Een (sub)folder wordt key genoemd. Zoals een folder kan een key bestaan uit subkeys. Ook kan een key een of meerdere waardes hebben (afhankelijk van de key kan het type van de waarde zijn : unicode string, dword, bytes, ...). Een registry-afdeling wordt ook rootkey genoemd. Van de 5 afdelingen zijn er 3 echte, waar dus daadwerkelijk data in opgeslagen wordt:
* **HKEY_LOCAL_MACHINE**: bevat informatie over Windows en de geïnstalleerde applicaties die algemeen van toepassing is (d.i. voor alle gebruikers).
* **HKEY_USERS**: bevat informatie voor alle gebruikers die een profiel hebben. De gegevens staan gegroepeerd per gebruiker.
* **HKEY_CLASSES_ROOT**: bevat allerlei informatie over bestandsextensies, e.d.
De andere twee afdelingen zijn shortcuts naar bepaalde delen van één van de bovenvernoemde afdelingen:
* **HKEY_CURRENT_USER**: bevat informatie voor Windows en de applicaties die enkel van toepassing is op de huidige gebruiker. Het verwijst naar een bepaalde subkey in *HKEY_USERS*.
* **HKEY_CURRENT_CONFIG**: bevat informatie over de configuratie van de hardware. Het verwijst door naar een bepaalde subkey in HKEY_LOCAL_MACHINE.
Instellingen worden bij voorkeur gewijzigd via het configuratiescherm of via de programma’s die ze in de registry gestoken hebben. Uitzonderlijk kan het nodig zijn de registry te editeren via `regedit`.
=== Voorbeeld: de standaard toetsenbordlayout
Wanneer er een nieuwe gebruiker aangemaakt wordt, worden een aantal standaardinstellingen gekopieerd naar zijn profiel. Zo wordt er onder andere de default layout van het toetsenbord (AZERTY, QWERTY, DVORAK, …) opgehaald uit de registry (HKEY_USERS\.DEFAULT\Keyboard Layout\Preload\1). Open de registry en zoek op welke waarde er standaard gebruikt wordt. Je kan deze waarde als volgt interpreteren:
* 00000413 Dutch (Standard) – QWERTY
* 00000813 Dutch (Belgian) – AZERTY
* 0000040c French (Standard) – AZERTY
* ...
=== Exporteren en importeren
Via `regedit` kan je ook (delen van) de registry *exporteren* naar een .reg-bestand. Dat is een bestand in tekstformaat dat de verschillende keys met bijbehorende waarden bevat. Zo kan een .reg-bestand bijvoorbeeld de volgende data bevatten:
------------------------------------------------
Windows Registry Editor Version 5.00
[HKEY_USERS\.DEFAULT\Keyboard Layout\Preload]
"1"="00000409"
------------------------------------------------
Dit bestand terug importeren in de registry kan door er eenvoudigweg op te dubbelklikken. Als de gebruiker bovenstaand bestand importeert, dan zal de standaard toetsenbordlayout dus op English (United States) gezet worden.
== Remote Access
Domeinbeheerders hoeven niet altijd rechtstreeks in te loggen op de domein controller om aanpassingen te maken aan de Active Directory instellingen. Men kan vanop eender welke pc het beheer van het domein doen, als men de https://www.microsoft.com/en-us/download/details.aspx?id=45520[Remote Server Administration Tools (RSAT)] downloadt.
Log in op de Windows 10-machine als de domeinbeheerder. Let op, standaard word je op de Windows 10-machine ingelogd als een gebruiker van het domein, tenzij er een lokale account bestaat met dezelfde naam. Als je nu inlogt als ‘Administrator’ zal je dus ingelogd worden als de lokale beheerder (d.i. de account die we gebruikten voordat de Windows 10 client toegevoegd was aan het domein) in plaats van de domeinbeheerder. Om te forceren dat we aanmelden met de domeinbeheerder, moeten we de naam van het domein dus toevoegen aan de loginnaam. Dit kan op volgende manier: Administrator@cosci.be
Installeer de RSA-tools; dit duurt even. Na de installatie kan je in het start menu gaan naar Windows Administrative Tools, waar je nu de vertrouwde Active Directory instellingen kan vinden. Open bijvoorbeeld Active Directory Users and Computers en verifieer dat alles werkt.
NOTE: Als de installatie van RSAT niet wil starten, controleer dan of het bestand niet geblokkeerd is. Wanneer je bepaalde bestanden over het netwerk kopieert, kan Windows uit veiligheidsoverwegingen de toegang tot het bestand blokkeren totdat je expliciet het bestand deblokkeert. Rechterklik op het bestand en controleer of er onderaan de General-tab staat dat het bestand geblokkeerd is. Indien dat het geval is, deblokkeer het bestand en dan zal de installatie succesvol opstarten.
== Oefeningen
=== Beperk toegang tot het configuratiescherm & Command Line
Gewone gebruikers mogen geen toegang hebben tot het configuratiepaneel en command line. Dit is enkel toegelaten voor gebruikers in de OU=IT.
=== Verbied het gebruik van USB-sticks, CDs, DVDs en andere verwijderbare media
Besmette verwijderbare media is een van de populaire manieren voor hackers om een organisatie binnen te dringen/aan te vallen. Daarom willen we dit voor iedereen afsluiten.
=== Sluit het gastaccount af
Door het gastaccount kunnen gebruikers toegang krijgen tot gevoelige data. Zo'n accounts geven toegang tot een Windows-computer en vereisen geen wachtwoord. Standaard staan deze gelukkig uit, maar voor de zekerheid willen we dit toch afdwingen vanuit het domein.
=== Verhinder automatische driver-updates.
Windows voert automatisch een heleboel updates uit, ook device drivers updates. In de OU=IT gebruikt men echter custom drivers die niet geüpdatet mogen worden.
=== Snelkoppeling cosci.be
Plaats bij alle gebruikers op het bureaublad een snelkoppeling naar Cosci.be
=== Script Logon name
Zorg dat iedere keer dat er iemand aanmeldt op een PC in het domein, de gebruikersnaam en aanlogtijd naar een tekstbestand op de PC worden weggeschreven.
=== Installeer van programma's (op alle pc's)
Standaard hebben domeingebruikers geen rechten om programma’s te installeren op een pc. Vaak wil men echter toch kunnen toelaten dat de gebruiker bepaalde software kan installeren, zonder hem toe te laten om eender welke software te installeren. Ook hier kunnen GPO's gebruikt worden om in te stellen welke programma’s de gebruikers mag installeren op een cliënt-pc.
Als voorbeeld nemen we de installatie van 7-zip. Downloadt dit *msi-bestand* op de cliënt-PC en start het setup-bestand. Volg de setup wizard (laat de standaard waardes telkens ongemoeid) en blijf doorklikken tot de installatie daadwerkelijk begint. Op dat moment zal je een loginvenster krijgen waarin je je moet aanmelden als een administrator. Dit komt omdat het setup-programma aanpassingen moet maken die voor normale gebruikers niet toegelaten zijn (bijv. het kopiëren van bestanden naar C:\Program Files).
Open nu het groepsbeleidsbeheer op de domain controller, en maak een nieuwe GPO onder de OU=IT met de naam “Software installeren”. Bewerk de GPO en ga naar User Configuration, Policies, Software Settings, Software installation. Rechterklik, en kies New, Package. Als bestandsnaam vul je het pad naar het gedeelde bestand in (bijv. \\VMware-host\7z1801-x64.msi). Klik op Open.
In het volgende scherm krijg je de keuze hoe je de software wil distribueren. Standaard staat **Published** geselecteerd, wat wil zeggen dat de gebruiker kan kiezen of hij de software wil installeren of niet. De optie **Assigned** betekent dat de gebruiker niet kan kiezen, en dat de software automatisch geïnstalleerd wordt. Kies Published en klik OK. Sluit de groepsbeleidsbeheer editor af.
Log in op de Windows 10-machine als een gebruiker in de OU=IT. Ga via het Control Panel naar Programs, Programs and Features, Install a program from the network. In deze lijst zie je nu het programma 7-zip staan en kan je het zonder probleem installeren (ook zonder beheerdersrechten).
Installeer het msi-bestand dat je voor dit labo op Toledo terug kan vinden. Hiervoor zal je een netwerk-share nodig hebben. Het gemakkelijkste hiervoor is om een shared folder in VMWare aan te maken en deze aan beide VM's te koppelen eventueel door *map network drive* in `This PC`.
=== Delegatie
Voor grote domeinen kan er veel werk zijn om alle gebruikers en instellingen te beheren. Normaal is dit de taak van de systeembeheerders, maar soms kan het zijn dat de beheerders een aantal taken willen doorgeven aan anderen (zonder die andere gebruikers daarvoor de volledige beheerdersrechten te geven). Active Directory ondersteunt dit scenario door middel van *delegatie*.
Ga naar Active Directory Users and Computers, rechtsklik de gepaste OU=HR en kies *Delegate Control…* . Voeg via de wizard de juiste groep (IT-admins) toe en laat toe dat deze groep nieuwe gebruikersaccounts kan maken, verwijderen en beheren.
Log op de Windows 10-machine in als een gebruiker in de groep IT-admins en ga via het start menu naar *Windows Administrative Tools*, Active Directory Users and Computers. Probeer een nieuwe gebruiker aan te maken onder de OU=HR; lukt dit? Waarom niet? Wat moet je doen om het wel te laten lukken? Doe dit nu en test opnieuw. Wanneer je hetzelfde probeert onder de OU=Sales dan zal je zien dat dit niet lukt (aangezien de gebruiker hier geen rechten voor heeft).
=== Overname blokkeren of niet
In een GPO kan "Block inheritance" ingesteld worden. Hierdoor worden de instellingen van een hoger niveau NIET toegepast, we beginnen weer met een “schone lei”. Hierop is evenwel één uitzondering: op een (ouder-)GPO kan ook “Enforced” gespecifieerd worden. Het gevolg is dan dat de instellingen lager in de hiërarchie (ook indien er een "Block inheritance" tussen staat) niet meer van toepassing zijn.
Maak zelf een demo-oefening om dit te testen/demonstreren. Tip: Remove Recycle Bin icon from Desktop.
== Wat moet je na dit labo kennen/kunnen
* Je weet en kan uitleggen/toepassen dat GPO op verschillende niveaus toegepast kunnen worden, nl. domein, site, ou (begrijpen, toepassen)
* Je weet en kan uitleggen/toepassen dat GPO op verschillende niveaus in een andere volgorde worden toegepast, nl. domein, site, ou (begrijpen, toepassen)
* Binnen een OU kan je GPO's in de juiste vorlgorde zetten (toepassen)
* Binnen een domein kan je GPO(s) in de juiste vorlgorde zetten (toepassen)
* Je kan met behulp van het Internet policy- en preference-regels toepassen (toepassen)
* Je kan met behulp van het Internet uitleggen wat het verschil is tussen een policy en een preference (begrijpen)
* Je kan uitzoeken waarom een beleidsregel (policy/preference) niet toegepast is voor een gebruiker/computer (toepassen, analyseren)
* Je weet en kan het verschil uitleggen tussen de twee delen waaruit een GPO bestaat *Computer Configuration* en *User Configuration* (onthouden)
* Je kan de tools gpresult en gpupdate gebruiken (toepassen)
* Je kan de RSAT-tools installeren op een Windows 10 machine en deze tools ook correct gebruiken (als administrator of via delegatie) (begrijpen, toepassen)
* Je kan eenvoudige taken binnen "AD Users en Computers" delegeren naar een AD-gebruiker of een AD-groep (begrijpen, toepassen)
* Je kent het concepte "Block inheritance" bij een GPO, kan dat uitleggen en toepassen aan de hand van een concreet voorbeeld (toepassen, analyseren)
| 98.279661
| 1,097
| 0.789859
|
25b44b1106c0c0d90b3afc4cdf30cd4fa123204e
| 320
|
asciidoc
|
AsciiDoc
|
doc/configuration/example.asciidoc
|
mwocka/devon-production-line-shared-lib
|
35d392525adae45f5f724f0fd3cdac688b06ab5a
|
[
"Apache-2.0"
] | 4
|
2018-12-19T08:08:49.000Z
|
2019-06-27T07:49:36.000Z
|
doc/configuration/example.asciidoc
|
mwocka/devon-production-line-shared-lib
|
35d392525adae45f5f724f0fd3cdac688b06ab5a
|
[
"Apache-2.0"
] | 22
|
2018-11-05T07:52:00.000Z
|
2019-08-19T14:27:30.000Z
|
doc/configuration/example.asciidoc
|
mwocka/devon-production-line-shared-lib
|
35d392525adae45f5f724f0fd3cdac688b06ab5a
|
[
"Apache-2.0"
] | 15
|
2018-10-18T10:30:21.000Z
|
2019-11-15T06:05:59.000Z
|
= Usage Examples
:toc:
== Installing a custom tool in Jenkins
```Groovy
@Library('ProductionLineTemplateLib')
import com.capgemini.productionline.configuration.*
Jenkins jenkinsConfiguration = new Jenkins();
println jenkinsConfiguration.addCustomTool('gcc', 'cmd1', '/usr/bin/', '/hom/', 'bin', '/pwd/', null);
```
| 21.333333
| 102
| 0.725
|
a71b2f1243722489c08cfd7b676ec7106796355c
| 2,962
|
adoc
|
AsciiDoc
|
docs/partner_editable/faq_troubleshooting.adoc
|
holt-calder/quickstart-tableau-sagemaker
|
592a1cc7e35dfb66dcd42c35cd0ba5ae94966827
|
[
"Apache-2.0"
] | 3
|
2021-02-26T13:12:18.000Z
|
2021-10-30T16:32:52.000Z
|
docs/partner_editable/faq_troubleshooting.adoc
|
holt-calder/quickstart-tableau-sagemaker
|
592a1cc7e35dfb66dcd42c35cd0ba5ae94966827
|
[
"Apache-2.0"
] | 3
|
2021-02-25T12:44:08.000Z
|
2021-06-28T20:35:48.000Z
|
docs/partner_editable/faq_troubleshooting.adoc
|
holt-calder/quickstart-tableau-sagemaker
|
592a1cc7e35dfb66dcd42c35cd0ba5ae94966827
|
[
"Apache-2.0"
] | 4
|
2021-02-25T19:11:03.000Z
|
2021-10-30T16:32:44.000Z
|
// Add any tips or answers to anticipated questions.
== FAQ
*Q.* I encountered a *CREATE_FAILED* error when I launched the Quick Start.
*A.* If AWS CloudFormation fails to create the stack, relaunch the template with *Rollback on failure* set to *Disabled*. This setting is under *Advanced* in the AWS CloudFormation console on the *Configure stack options* page. With this setting, the stack’s state is retained, and you can troubleshoot the issue.
WARNING: When you set *Rollback on failure* to *Disabled*, you continue to incur AWS charges for this stack. Ensure that you delete stack after troubleshooting.
For more information, see https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html[Troubleshooting AWS CloudFormation^].
*Q.* I encountered a size-limitation error when I deployed the AWS CloudFormation templates.
*A.* Launch the Quick Start templates from the links in this guide or from another S3 bucket. If you deploy the templates from a local copy on your computer or from a location other than an S3 bucket, you might encounter template-size limitations. For more information, see http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cloudformation-limits.html[AWS CloudFormation quotas^].
*Q.* How do I launch the Quick Start into AWS GovCloud (US)?
*A.* While AWS GovCloud (US) isn't listed as a link:#_supported_regions[supported Region], this Quick Start can be deployed into AWS GovCloud (US) with a few modifications to the workload template. To do this, make the following changes to the `SageMakerAPI` section of the workload template:
----
Domain:
CertificateArn: !Ref CertificateARN
DomainName: !Ref DomainName
EndpointConfiguration: EDGE
Route53:
HostedZoneId: !Ref HostedZoneId
----
* Change the `EndpointConfiguration` parameter from `EDGE` to `REGIONAL`.
* Delete `Route53:` and `HostedZoneId: !Ref HostedZoneId`, the two lines immediately following the `EndpointConfiguration` parameter.
----
Domain:
CertificateArn: !Ref CertificateARN
DomainName: !Ref DomainName
EndpointConfiguration: REGIONAL
----
These changes require a modification to the deployment steps, as your certificate is deployed in the same Region as your AWS deployment to support the Regional domain configuration.
After modifying the workload template, you can launch the Quick Start into your AWS account. After the stack is deployed, navigate to Route 53 and identify the alias record created for the custom domain (matching the output in your CloudFormation console). Manually modify this record to change it from an alias record to a CNAME record. For more information about AWS GovCloud (US) and Route 53, see https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/govcloud-r53.html[Amazon Route 53^].
With these modifications, the deployment should be accessible from AWS GovCloud (US). You can test this by calling the info endpoint of your API using the custom domain name.
| 65.822222
| 495
| 0.787306
|
29c8d5eb5891c652bcb3dd99738658bca3f63340
| 9,026
|
adoc
|
AsciiDoc
|
docs/asciidoc/modules/ROOT/partials/usage/apoc.path.expand.adoc
|
alexwoolford/neo4j-apoc-procedures
|
ad48af6a7dde19825d7fcc100fd2c0612536289d
|
[
"Apache-2.0"
] | 1,481
|
2016-04-16T00:24:31.000Z
|
2022-03-29T08:15:38.000Z
|
docs/asciidoc/modules/ROOT/partials/usage/apoc.path.expand.adoc
|
alexwoolford/neo4j-apoc-procedures
|
ad48af6a7dde19825d7fcc100fd2c0612536289d
|
[
"Apache-2.0"
] | 1,747
|
2016-04-23T07:53:53.000Z
|
2022-03-31T14:35:58.000Z
|
docs/asciidoc/modules/ROOT/partials/usage/apoc.path.expand.adoc
|
alexwoolford/neo4j-apoc-procedures
|
ad48af6a7dde19825d7fcc100fd2c0612536289d
|
[
"Apache-2.0"
] | 528
|
2016-04-16T23:11:11.000Z
|
2022-03-23T02:12:43.000Z
|
The examples in this section are based on the following sample graph:
include::partial$expand-sample-graph.adoc[]
Let's start by expanding paths from the Praveena node.
We only want to consider the `KNOWS` relationship type, so we'll specify that as the relationship filter.
.The following returns the paths to people that Praveena `KNOWS` from 1 to 2 hops
[source,cypher]
----
MATCH (p:Person {name: "Praveena"})
CALL apoc.path.expand(p, "KNOWS", null, 1, 2)
YIELD path
RETURN path, length(path) AS hops
ORDER BY hops;
----
.Results
[opts="header", cols="4,1"]
|===
| path | hops
| (:Person:Engineering {name: "Praveena"})<-[:KNOWS]-(:Person:Engineering {name: "Zhen"}) | 1
| (:Person:Engineering {name: "Praveena"})<-[:KNOWS]-(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:Engineering {name: "Martin"}) | 2
| (:Person:Engineering {name: "Praveena"})<-[:KNOWS]-(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:DevRel {name: "Lju"}) | 2
| (:Person:Engineering {name: "Praveena"})<-[:KNOWS]-(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:Field {name: "Stefan"}) | 2
|===
Praveena only has a direct `KNOWS` relationship to Zhen, but Zhen has `KNOWS` relationships to 3 other people, which means they're 2 hops away from Praveena.
We can also provide a node label filter to restrict the nodes that are returned.
The following query only returns paths where every node has the `Engineering` label.
.The following returns paths containing only `Engineering` people that Praveena `KNOWS` from 1 to 2 hops
[source,cypher]
----
MATCH (p:Person {name: "Praveena"})
CALL apoc.path.expand(p, "KNOWS", "+Engineering", 1, 2)
YIELD path
RETURN path, length(path) AS hops
ORDER BY hops;
----
.Results
[opts="header", cols="4,1"]
|===
| path | hops
| (:Person:Engineering {name: "Praveena"})<-[:KNOWS]-(:Person:Engineering {name: "Zhen"}) | 1
| (:Person:Engineering {name: "Praveena"})<-[:KNOWS]-(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:Engineering {name: "Martin"}) | 2
|===
We lose the paths that ended with Lju and Stefan because neither of those nodes had the `Engineering` label.
We can specify multiple relationship types.
The following query starts from the Alicia node, and then expands the `FOLLOWS` and `KNOWS` relationships:
.The following returns paths containing people that Alicia `FOLLOWS` or `KNOWS` from 1 to 3 hops
[source,cypher]
----
MATCH (p:Person {name: "Alicia"})
CALL apoc.path.expand(p, "FOLLOWS>|KNOWS", "", 1, 3)
YIELD path
RETURN path, length(path) AS hops
ORDER BY hops;
----
.Results
[opts="header", cols="4,1"]
|===
| path | hops
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"}) | 1
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Sales {name: "Jonny"}) | 1
| (:Person:Product {name: "Alicia"})-[:KNOWS]->(:Person:Product {name: "Jake"}) | 1
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"}) | 2
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Praveena"}) | 2
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:DevRel {name: "Mark"}) | 2
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Sales {name: "Jonny"})-[:KNOWS]->(:Person:Sales {name: "Anthony"}) | 2
| (:Person:Product {name: "Alicia"})-[:KNOWS]->(:Person:Product {name: "Jake"})<-[:KNOWS]-(:Person:DevRel {name: "Mark"}) | 2
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"})-[:FOLLOWS]->(:Person:Product {name: "John"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:Engineering {name: "Martin"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:Engineering {name: "Praveena"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:DevRel {name: "Lju"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:Field {name: "Stefan"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Praveena"})-[:FOLLOWS]->(:Person:Field {name: "Joe"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Praveena"})<-[:KNOWS]-(:Person:Engineering {name: "Zhen"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:DevRel {name: "Mark"})-[:FOLLOWS]->(:Person:Field {name: "Stefan"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:DevRel {name: "Mark"})-[:KNOWS]->(:Person:Product {name: "Jake"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Sales {name: "Jonny"})-[:KNOWS]->(:Person:Sales {name: "Anthony"})-[:FOLLOWS]->(:Person:Field {name: "Joe"}) | 3
| (:Person:Product {name: "Alicia"})-[:KNOWS]->(:Person:Product {name: "Jake"})<-[:KNOWS]-(:Person:DevRel {name: "Mark"})-[:FOLLOWS]->(:Person:Field {name: "Stefan"}) | 3
|===
This query returns 19 paths, Alicia is very well connected!
We can also specify traversal termination criteria using label filters.
If we wanted to terminate a traversal as soon as the traversal encounters a node containing the `Engineering` label, we can use the `/Engineering` node filter.
.The following returns paths containing people that Alicia `FOLLOWS` or `KNOWS` from 1 to 3 hops, terminating as soon as a node with the `Engineering` label is reached
[source,cypher]
----
MATCH (p:Person {name: "Alicia"})
CALL apoc.path.expand(p, "FOLLOWS>|KNOWS", "/Engineering", 1, 3)
YIELD path
RETURN path, length(path) AS hops
ORDER BY hops;
----
.Results
[opts="header", cols="4,1"]
|===
| path | hops
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"}) | 2
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Praveena"}) | 2
|===
We're now down to only two paths.
But this query doesn't capture all of the paths from Alicia that end in a node with the `Engineering` label.
We can use the `>Engineering` node filter to define a traversal that:
* only returns paths that terminate at nodes with the `Engineering` label
* continues expansion to end nodes after that, looking for more paths that end with the `Engineering` label
.The following returns paths containing people that Alicia `FOLLOWS` or `KNOWS` from 1 to 3 hops, where paths end with a node with the `Engineering` label
[source,cypher]
----
MATCH (p:Person {name: "Alicia"})
CALL apoc.path.expand(p, "FOLLOWS>|KNOWS", ">Engineering", 1, 3)
YIELD path
RETURN path, length(path) AS hops
ORDER BY hops;
----
.Results
[opts="header", cols="4,1"]
|===
| path | hops
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"}) | 2
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Praveena"}) | 2
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:Engineering {name: "Martin"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Zhen"})-[:KNOWS]->(:Person:Engineering {name: "Praveena"}) | 3
| (:Person:Product {name: "Alicia"})-[:FOLLOWS]->(:Person:Field {name: "Joe"})-[:FOLLOWS]->(:Person:Engineering {name: "Praveena"})<-[:KNOWS]-(:Person:Engineering {name: "Zhen"}) | 3
|===
Our query now also returns paths going through Praveena and Zhen, one going to Martin, and other others going back to Zhen and Praveena!
| 63.56338
| 182
| 0.603368
|
a93aa22550e47b81287ff504023f379f7476e6bf
| 2,416
|
adoc
|
AsciiDoc
|
documentation/api/io.strimzi.api.kafka.model.EntityTopicOperatorSpec.adoc
|
codemagicprotege/strimzi-kafka-operator
|
23548c30aeb00b4d2a2df07592661b9fa9f399f1
|
[
"Apache-2.0"
] | 2,978
|
2018-06-09T18:20:00.000Z
|
2022-03-31T03:33:27.000Z
|
documentation/api/io.strimzi.api.kafka.model.EntityTopicOperatorSpec.adoc
|
codemagicprotege/strimzi-kafka-operator
|
23548c30aeb00b4d2a2df07592661b9fa9f399f1
|
[
"Apache-2.0"
] | 4,066
|
2018-06-09T23:08:28.000Z
|
2022-03-31T22:40:29.000Z
|
documentation/api/io.strimzi.api.kafka.model.EntityTopicOperatorSpec.adoc
|
codemagicprotege/strimzi-kafka-operator
|
23548c30aeb00b4d2a2df07592661b9fa9f399f1
|
[
"Apache-2.0"
] | 895
|
2018-06-13T18:03:22.000Z
|
2022-03-31T11:22:11.000Z
|
Configures the Topic Operator.
[id='property-topic-operator-logging-{context}']
=== `logging`
The Topic Operator has a configurable logger:
* `rootLogger.level`
The Topic Operator uses the Apache `log4j2` logger implementation.
Use the `logging` property in the `entityOperator.topicOperator` field of the Kafka resource `Kafka` resource to configure loggers and logger levels.
You can set the log levels by specifying the logger and level directly (inline) or use a custom (external) ConfigMap.
If a ConfigMap is used, you set `logging.valueFrom.configMapKeyRef.name` property to the name of the ConfigMap containing the external logging configuration. Inside the ConfigMap, the logging configuration is described using `log4j2.properties`. Both `logging.valueFrom.configMapKeyRef.name` and `logging.valueFrom.configMapKeyRef.key` properties are mandatory. A ConfigMap using the exact logging configuration specified is created with the custom resource when the Cluster Operator is running, then recreated after each reconciliation. If you do not specify a custom ConfigMap, default logging settings are used. If a specific logger value is not set, upper-level logger settings are inherited for that logger.
For more information about log levels, see {ApacheLoggers}.
Here we see examples of `inline` and `external` logging.
.Inline logging
[source,yaml,subs="+quotes,attributes"]
----
apiVersion: {KafkaApiVersion}
kind: Kafka
metadata:
name: my-cluster
spec:
kafka:
# ...
zookeeper:
# ...
entityOperator:
# ...
topicOperator:
watchedNamespace: my-topic-namespace
reconciliationIntervalSeconds: 60
logging:
type: inline
loggers:
rootLogger.level: INFO
# ...
----
.External logging
[source,yaml,subs="+quotes,attributes"]
----
apiVersion: {KafkaApiVersion}
kind: Kafka
metadata:
name: my-cluster
spec:
kafka:
# ...
zookeeper:
# ...
entityOperator:
# ...
topicOperator:
watchedNamespace: my-topic-namespace
reconciliationIntervalSeconds: 60
logging:
type: external
valueFrom:
configMapKeyRef:
name: customConfigMap
key: topic-operator-log4j2.properties
# ...
----
.Garbage collector (GC)
Garbage collector logging can also be enabled (or disabled) using the xref:con-common-configuration-garbage-collection-reference[`jvmOptions` property].
| 33.09589
| 712
| 0.733444
|
0d66bb4987d89f1cc4def636b78a8a9efdbc3859
| 174
|
adoc
|
AsciiDoc
|
src/main/asciidoc/index008.adoc
|
qwfys/asciidoctor-sample
|
3286ad201c23036d3b65eca750ed8d01bde8ee50
|
[
"Apache-2.0"
] | null | null | null |
src/main/asciidoc/index008.adoc
|
qwfys/asciidoctor-sample
|
3286ad201c23036d3b65eca750ed8d01bde8ee50
|
[
"Apache-2.0"
] | null | null | null |
src/main/asciidoc/index008.adoc
|
qwfys/asciidoctor-sample
|
3286ad201c23036d3b65eca750ed8d01bde8ee50
|
[
"Apache-2.0"
] | null | null | null |
= 库房数据
== 库房业务
[plantuml,./image/008/usecase,png]
....
@startuml
(First usecase)
(Another usecase) as (UC2)
usecase UC3
usecase (Last\nusecase) as UC4
@enduml
....
== 表设计
| 10.875
| 34
| 0.666667
|
8741df05658eb724d97a56e22e0bd2c3e697426d
| 5,048
|
adoc
|
AsciiDoc
|
docs/asciidoc/getting-started-rop/src/docs/asciidoc/_getting-started-rop/part2/starting.adoc
|
rohankumardubey/cayenne
|
d6beb87eacd9ef126c2ec6a1a89fb3b23dfb5912
|
[
"Apache-2.0"
] | 289
|
2015-01-28T13:35:27.000Z
|
2022-03-27T07:51:03.000Z
|
docs/asciidoc/getting-started-rop/src/docs/asciidoc/_getting-started-rop/part2/starting.adoc
|
rohankumardubey/cayenne
|
d6beb87eacd9ef126c2ec6a1a89fb3b23dfb5912
|
[
"Apache-2.0"
] | 78
|
2015-01-05T19:28:33.000Z
|
2022-02-01T13:53:11.000Z
|
docs/asciidoc/getting-started-rop/src/docs/asciidoc/_getting-started-rop/part2/starting.adoc
|
acidburn0zzz/cayenne
|
e4809a008e3d084ad26c62e419eff35c8009b65c
|
[
"Apache-2.0"
] | 150
|
2015-01-15T08:13:55.000Z
|
2022-03-19T19:42:16.000Z
|
// Licensed to the Apache Software Foundation (ASF) under one or more
// contributor license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright ownership.
// The ASF licenses this file to you under the Apache License, Version
// 2.0 (the "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0 Unless required by
// applicable law or agreed to in writing, software distributed under the
// License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for
// the specific language governing permissions and limitations under the
// License.
include::../var.adoc[]
=== Starting Client Project
==== Create an ROP Client Project in Eclipse
Creation of a new Eclipse project has been discussed in some details in "Getting Started with Cayenne" guide,
so we will omit the screenshots for the common parts.
In Eclipse select "File > New > Other..." and then "Maven > Maven Project". Click "Next".
On the following screen check "Create a simple project" checkbox and click "Next" again.
In the dialog shown on the screenshot below, enter "org.example.cayenne" for the "Group Id" and "tutorial-rop-client"
for the "Artifact Id" (both without the quotes) and click "Finish".
Now you should have a new empty project in the Eclipse workspace. Check that the project Java compiler settings are correct.
Rightclick on the "tutorial-rop-client" project, select "Properties > Java Compiler"
and ensure that "Compiler compliance level" is at least 1.5 (some versions of Maven plugin
seem to be setting it to 1.4 by default).
==== Create Client Java Classes
The client doesn't need the XML ORM mapping, as it is loaded from the server. However it needs the client-side Java classes.
Let's generate them from the existing mapping:
* Start CayenneModeler and open `cayenne.xml` from the "tutorial" project (located under `tutorial/src/main/resources`,
unless it is already open.
* Select the "datamap" DataMap and check "Allow Client Entities" checkbox.
* Enter `org.example.cayenne.persistent.client` for the "Client Java Package" and click "Update.." button
next to the field to refresh the client package of all entities.
image::../images/datamap-enableclient.png[align="center"]
* Select "Tools > Generate Classes" menu.
* For "Type" select "Client Persistent Objects".
* For the "Output Directory" select `tutorial-rop-client/src/main/java` folder (as client classes should go in the client project).
* Click on "Classes" tab and check the "Check All Classes" checkbox (unless it is already checked and reads "Uncheck all Classes").
* Click "Generate".
Now go back to Eclipse, right click on "tutorial-rop-client" project and select "Refresh" - you should see pairs
of classes generated for each mapped entity, same as on the server. And again, we see a bunch of errors in those classes.
Let's fix it now by adding two dependencies, "cayenne-client" and "hessian", in the bottom of the pom.xml file.
We also need to add Caucho M2 repository to pull Hessian jar files. The resulting POM should look like this:
[source, XML,subs="verbatim,attributes"]
----
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/maven-v4_0_0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>org.example.cayenne</groupId>
<artifactId>tutorial-rop-client</artifactId>
<version>0.0.1-SNAPSHOT</version>
<dependencies>
<dependency>
<groupId>org.apache.cayenne</groupId>
<artifactId>cayenne-client-jetty</artifactId>
<!-- Here specify the version of Cayenne you are actually using -->
<version>{version}</version>
</dependency>
<dependency>
<groupId>com.caucho</groupId>
<artifactId>hessian</artifactId>
<version>4.0.38</version>
</dependency>
</dependencies>
<repositories>
<repository>
<id>caucho</id>
<name>Caucho Repository</name>
<url>http://caucho.com/m2</url>
<layout>default</layout>
<snapshots>
<enabled>false</enabled>
</snapshots>
<releases>
<enabled>true</enabled>
</releases>
</repository>
</repositories>
</project>
----
Your computer must be connected to the internet. Once you save the pom.xml, Eclipse will download the needed jar files
and add them to the project build path. After that all the errors should disappear.
Now let's check the entity class pairs. They look almost identical to their server counterparts,
although the superclass and the property access code are different. At this point these differences are somewhat academic,
so let's go on with the tutorial.
| 46.311927
| 131
| 0.719097
|
008724829c5f78457f48224f8d19ac233a4014f5
| 1,690
|
adoc
|
AsciiDoc
|
content/contribute/docs-team.adoc
|
matthijskooijman/kicad-website
|
4374ec48184c906040b70eec15984d02da6a20f8
|
[
"CC-BY-3.0"
] | 1
|
2022-02-11T05:40:32.000Z
|
2022-02-11T05:40:32.000Z
|
content/contribute/docs-team.adoc
|
matthijskooijman/kicad-website
|
4374ec48184c906040b70eec15984d02da6a20f8
|
[
"CC-BY-3.0"
] | null | null | null |
content/contribute/docs-team.adoc
|
matthijskooijman/kicad-website
|
4374ec48184c906040b70eec15984d02da6a20f8
|
[
"CC-BY-3.0"
] | 2
|
2020-10-27T18:51:45.000Z
|
2021-04-06T02:51:35.000Z
|
+++
title = "Docs Team"
categories = [ "Contribute" ]
[menu.main]
parent = "Contribute"
name = "Docs Team"
weight = 10
+++
The Docs Team works on writing and maintaining documentation.
Documentation includes both the KiCad application and the website.
We are always looking for volunteers who are interested in
contributing to the KiCad documentation project.
The first thing to do before anything is to join the
link:https://launchpad.net/~kicad-doc-devs[KiCad Documentation Mailing
List]. You can also join the link:/community/irc[IRC channel] if you
have questions. Usually, there are multiple KiCad developers,
documenters and users in the channel.
Most of the action takes place on two repositories on GitHub.
- link:https://github.com/KiCad/kicad-doc[Official Documentation]
- link:https://github.com/KiCad/kicad-website[Website]
What are the tasks we are working on?
- Authoring
- Screenshot production
- Proofreading
- Translating
- Documenting new features
We welcome everyone who can contribute in a positive way to the
official documentation. This could be any of the above mentioned
points.
Simply fork the repositories linked above on GitHub and submit a pull
request with changes for review.
The reference language of the documentation and the official language of
the program is English.
If your mother tongue is different from English you can still contribute
to the documentation effort translating the program and/or the
documentation.
We suggest to start translating the program. There is also a very nice
link:https://github.com/KiCad/kicad-doc/blob/master/src/gui_translation_howto/gui_translation_howto.adoc[KiCad GUI Translation HOWTO].
| 33.137255
| 134
| 0.789349
|
fd0ae75951132411a6e507b5ed113ad5876d1ef7
| 2,633
|
adoc
|
AsciiDoc
|
_posts/2017-08-02-Network-Information-A-P-I.adoc
|
prateekjadhwani/prateekjadhwani.github.io
|
3c7a72fcbd5a0f449549f1b10dcd8a8d19e35f10
|
[
"MIT"
] | null | null | null |
_posts/2017-08-02-Network-Information-A-P-I.adoc
|
prateekjadhwani/prateekjadhwani.github.io
|
3c7a72fcbd5a0f449549f1b10dcd8a8d19e35f10
|
[
"MIT"
] | null | null | null |
_posts/2017-08-02-Network-Information-A-P-I.adoc
|
prateekjadhwani/prateekjadhwani.github.io
|
3c7a72fcbd5a0f449549f1b10dcd8a8d19e35f10
|
[
"MIT"
] | null | null | null |
= Network Information API
:hp-tags: network, network information api, API, specification, spec, demo
With technology development in the past few decades, web apps have improved from running simply on a desktop computer to a mobile phone to an IOT device. With so many different clients, using so many different types of network, there will always be a time when a web app will need information i.e. the type of network being used or the downlink speed provided by the network provider. With this thing in mind, web browsers are now shipping with this new specification called the link:http://wicg.github.io/netinfo/[Network Information API].
At the time of writing, only chrome for Android supported this spec. So, if you are on chrome for Android, you should be able to see the demo working properly. But in case you are unable to see the proper data due to an incomplete spec implementation, here is what you should be able to see.
Querying for `navigator.connection.type` would return one of these values
[width="35%"]
|========
| bluetooth
| cellular
| ethernet
| mixed
| none
| other
| unknown
| wifi
| wimax
|========
This is basically the type of networks that a user can be on.
Querying for `navigator.connection.effectiveType` would return one of these values. And their meanings.
[width="100%",options="header"]
|========
| Effective Type | Speed | Type of data downloadable
| 2g | Very Slow | Only Text and Small Images
| 3g | Good Speed | Can download high quality images and SD Videos
| 4g | High Speed | Can download HD Video and large files
| slow-2g | Very Very Slow | Only Text
|========
`effectiveType` can be used to know the user network speed.
All these objects in the `navigator.connection` API might help decide web apps with the type of resources that can be downloaded in order to provide user with a better user experience. For example, a web app might be able to decide whether to serve the user with a low quality video or a high quality video, or if a news article can be displayed in terms of text or with images.
So, without any further delay, here is the demo.
++++
<p data-height="300" data-theme-id="3991" data-slug-hash="gxrwNb" data-default-tab="result" data-user="prateekjadhwani" data-embed-version="2" data-pen-title="Network Information API" class="codepen">See the Pen <a href="https://codepen.io/prateekjadhwani/pen/gxrwNb/">Network Information API</a> by Prateek Jadhwani (<a href="https://codepen.io/prateekjadhwani">@prateekjadhwani</a>) on <a href="https://codepen.io">CodePen</a>.</p>
<script async src="https://production-assets.codepen.io/assets/embed/ei.js"></script>
++++
| 58.511111
| 540
| 0.753513
|
df41ba64e27faaec4f9e4b2d8b3a9ade72cd1d5b
| 2,436
|
adoc
|
AsciiDoc
|
topics/snippets/istio_installation_auth.adoc
|
jomeier/maistra.github.io
|
ade0992fde1346ceee685068001a957840b57725
|
[
"Apache-2.0"
] | 1
|
2021-05-10T02:11:43.000Z
|
2021-05-10T02:11:43.000Z
|
topics/snippets/istio_installation_auth.adoc
|
jomeier/maistra.github.io
|
ade0992fde1346ceee685068001a957840b57725
|
[
"Apache-2.0"
] | null | null | null |
topics/snippets/istio_installation_auth.adoc
|
jomeier/maistra.github.io
|
ade0992fde1346ceee685068001a957840b57725
|
[
"Apache-2.0"
] | null | null | null |
[source,yaml]
----
apiVersion: maistra.io/v1
kind: ServiceMeshControlPlane
metadata:
name: auth-install
spec:
template: default
# NOTE, if you remove all children from an element, you should remove the
# element too. An empty element is interpreted as null and will override all
# default values (i.e. no values will be specified for that element, not even
# the defaults baked into the chart values.yaml).
istio:
global:
# the following lines enable tls across the control and data planes
controlPlaneSecurityEnabled: true
mtls:
enabled: true
proxy:
# constrain resources for use in smaller environments
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 500m
memory: 128Mi
gateways:
istio-egressgateway:
# disable autoscaling for use in smaller environments
autoscaleEnabled: false
istio-ingressgateway:
# disable autoscaling for use in smaller environments
autoscaleEnabled: false
# set to true to enable IOR
ior_enabled: false
mixer:
policy:
# disable autoscaling for use in smaller environments
autoscaleEnabled: false
telemetry:
# disable autoscaling for use in smaller environments
autoscaleEnabled: false
# constrain resources for use in smaller environments
resources:
requests:
cpu: 100m
memory: 1G
limits:
cpu: 500m
memory: 4G
pilot:
# disable autoscaling for use in smaller environments
autoscaleEnabled: false
# increase random sampling rate for development/testing
traceSampling: 100.0
kiali:
# change to false to disable kiali
enabled: true
# create a secret for accessing kiali dashboard with the following credentials
# dashboard:
# user: admin
# passphrase: admin
tracing:
# change to false to disable tracing (i.e. jaeger)
enabled: true
jaeger:
tag: 1.13.1
# simple, all-in-one strategy
template: all-in-one
# production strategy, utilizing elasticsearch
#template: production-elasticsearch
# if required. only one instance may use agentStrategy=DaemonSet
#agentStrategy: DaemonSet
threeScale:
enabled: false
----
| 28
| 84
| 0.636289
|
7752cd7f1d92ddc7d64f8806c7e9b076c07871a5
| 1,857
|
adoc
|
AsciiDoc
|
subprojects/docs/src/samples/java/modules-with-transform/README.adoc
|
magneticflux-/gradle
|
5c2bafc148279ffd2d0a0d31c9daec91351aea87
|
[
"Apache-2.0"
] | null | null | null |
subprojects/docs/src/samples/java/modules-with-transform/README.adoc
|
magneticflux-/gradle
|
5c2bafc148279ffd2d0a0d31c9daec91351aea87
|
[
"Apache-2.0"
] | null | null | null |
subprojects/docs/src/samples/java/modules-with-transform/README.adoc
|
magneticflux-/gradle
|
5c2bafc148279ffd2d0a0d31c9daec91351aea87
|
[
"Apache-2.0"
] | null | null | null |
NOTE: You can open this sample inside an IDE using the https://www.jetbrains.com/help/idea/gradle.html#gradle_import_project_start[IntelliJ's Gradle import] or https://projects.eclipse.org/projects/tools.buildship[Eclipse Buildship].
This sample shows how link:{userManualPath}/artifact_transforms.html[artifact transforms] can be utilised to turn traditional Java libraries into Java Modules by adding additional information to the corresponding Jars.
For that, a plugin called `extra-java-module-info` is defined in the `buildSrc` folder.
This plugin can be copied into another project and adjusted as needed to solve use cases where it is desired to treat every dependency as a Java Module.
The example defines an application that relies on libraries from Maven central where some of them are not available as modules.
It uses `commons-cli` (not a module) to parse the command line arguments, which can contain a JSON String, and `gson` (a proper module) to parse the JSON string.
It also utilises `commons-lang3` (an automatic module) and `commons-beanutils` (not a module) which brings in some additional dependencies that are also not modules.
By configuring our own `extra-java-module-info` plugin in the root build script, we add information to turn the legacy libraries into modules.
====
include::sample[dir="groovy",files="build.gradle[tags=extraModuleInfo]"]
include::sample[dir="kotlin",files="build.gradle.kts[tags=extraModuleInfo]"]
====
You can run the example application like this:
```
run --args='-json {"message":"Hello","receivers":["Lisa","John"]} -debug'
```
For more information, see link:{userManualPath}/java_library_plugin.html#sec:java_library_modular[Java Module support in the Java Library Plugin] and link:{userManualPath}/application_plugin.html#sec:application_modular[Java Module support in the Application Plugin].
| 74.28
| 267
| 0.792676
|
fac0cbe5d978d8c7c19dbac26b0e79ad949ee80a
| 100,682
|
adoc
|
AsciiDoc
|
docs/DeveloperGuide.adoc
|
lekoook/main
|
632411670b8b9a7d67b300c3c344966c71cac579
|
[
"MIT"
] | 1
|
2018-10-04T16:56:49.000Z
|
2018-10-04T16:56:49.000Z
|
docs/DeveloperGuide.adoc
|
lekoook/main
|
632411670b8b9a7d67b300c3c344966c71cac579
|
[
"MIT"
] | 110
|
2018-09-19T04:00:52.000Z
|
2018-11-12T14:07:52.000Z
|
docs/DeveloperGuide.adoc
|
lekoook/main
|
632411670b8b9a7d67b300c3c344966c71cac579
|
[
"MIT"
] | 6
|
2018-08-31T04:14:02.000Z
|
2018-11-07T07:29:08.000Z
|
= CorpPro - Developer Guide
:site-section: DeveloperGuide
:toc:
:toc-title:
:toc-placement: preamble
:sectnums:
:imagesDir: images
:stylesDir: stylesheets
:xrefstyle: full
:experimental:
:linkattrs:
ifdef::env-github[]
:tip-caption: :bulb:
:note-caption: :information_source:
:warning-caption: :warning:
endif::[]
:repoURL: https://github.com/se-edu/addressbook-level4/tree/master
By: `W12-3` Since: `AUG 2018` Licence: `MIT`
== Setting up
=== Prerequisites
. *JDK `9`* or later
+
[WARNING]
JDK `10` on Windows will fail to run tests in <<UsingGradle#Running-Tests, headless mode>> due to a https://github.com/javafxports/openjdk-jfx/issues/66[JavaFX bug].
Windows developers are highly recommended to use JDK `9`.
. *IntelliJ* IDE
+
[NOTE]
IntelliJ by default has Gradle and JavaFx plugins installed. +
Do not disable them. If you have disabled them, go to `File` > `Settings` > `Plugins` to re-enable them.
=== Setting up the project in your computer
. Fork this repo, and clone the fork to your computer
. Open IntelliJ (if you are not in the welcome screen, click `File` > `Close Project` to close the existing project dialog first)
. Set up the correct JDK version for Gradle
.. Click `Configure` > `Project Defaults` > `Project Structure`
.. Click `New...` and find the directory of the JDK
. Click `Import Project`
. Locate the `build.gradle` file and select it. Click `OK`
. Click `Open as Project`
. Click `OK` to accept the default settings
. Open a console and run the command `gradlew processResources` (Mac/Linux: `./gradlew processResources`). It should finish with the `BUILD SUCCESSFUL` message. +
This will generate all resources required by the application and tests.
=== Verifying the setup
. Run the `seedu.address.MainApp` and try a few commands
. <<Testing,Run the tests>> to ensure they all pass.
=== Configurations to do before writing code
==== Configuring the coding style
This project follows https://github.com/oss-generic/process/blob/master/docs/CodingStandards.adoc[oss-generic coding standards]. IntelliJ's default style is mostly compliant with ours but it uses a different import order from ours. To rectify,
. Go to `File` > `Settings...` (Windows/Linux), or `IntelliJ IDEA` > `Preferences...` (macOS)
. Select `Editor` > `Code Style` > `Java`
. Click on the `Imports` tab to set the order
* For `Class count to use import with '\*'` and `Names count to use static import with '*'`: Set to `999` to prevent IntelliJ from contracting the import statements
* For `Import Layout`: The order is `import static all other imports`, `import java.\*`, `import javax.*`, `import org.\*`, `import com.*`, `import all other imports`. Add a `<blank line>` between each `import`
Optionally, you can follow the <<UsingCheckstyle#, UsingCheckstyle.adoc>> document to configure Intellij to check style-compliance as you write code.
==== Updating documentation to match your fork
After forking the repo, the documentation will still have the SE-EDU branding and refer to the `se-edu/addressbook-level4` repo.
If you plan to develop this fork as a separate product (i.e. instead of contributing to `se-edu/addressbook-level4`), you should do the following:
. Configure the <<Docs-SiteWideDocSettings, site-wide documentation settings>> in link:{repoURL}/build.gradle[`build.gradle`], such as the `site-name`, to suit your own project.
. Replace the URL in the attribute `repoURL` in link:{repoURL}/docs/DeveloperGuide.adoc[`DeveloperGuide.adoc`] and link:{repoURL}/docs/UserGuide.adoc[`UserGuide.adoc`] with the URL of your fork.
==== Setting up CI
Set up Travis to perform Continuous Integration (CI) for your fork. See <<UsingTravis#, UsingTravis.adoc>> to learn how to set it up.
After setting up Travis, you can optionally set up coverage reporting for your team fork (see <<UsingCoveralls#, UsingCoveralls.adoc>>).
[NOTE]
Coverage reporting could be useful for a team repository that hosts the final version but it is not that useful for your personal fork.
Optionally, you can set up AppVeyor as a second CI (see <<UsingAppVeyor#, UsingAppVeyor.adoc>>).
[NOTE]
Having both Travis and AppVeyor ensures your App works on both Unix-based platforms and Windows-based platforms (Travis is Unix-based and AppVeyor is Windows-based)
==== Getting started with coding
When you are ready to start coding,
1. Get some sense of the overall design by reading <<Design-Architecture>>.
2. Take a look at <<GetStartedProgramming>>.
== Design
[[Design-Architecture]]
=== Architecture
.Architecture Diagram
image::Architecture.png[width="600"]
The *_Architecture Diagram_* given above explains the high-level design of the App. Given below is a quick overview of each component.
[TIP]
The `.pptx` files used to create diagrams in this document can be found in the link:{repoURL}/docs/diagrams/[diagrams] folder. To update a diagram, modify the diagram in the pptx file, select the objects of the diagram, and choose `Save as picture`.
`Main` has only one class called link:{repoURL}/src/main/java/seedu/address/MainApp.java[`MainApp`]. It is responsible for,
* At app launch: Initializes the components in the correct sequence, and connects them up with each other.
* At shut down: Shuts down the components and invokes cleanup method where necessary.
<<Design-Commons,*`Commons`*>> represents a collection of classes used by multiple other components. Two of those classes play important roles at the architecture level.
* `EventsCenter` : This class (written using https://github.com/google/guava/wiki/EventBusExplained[Google's Event Bus library]) is used by components to communicate with other components using events (i.e. a form of _Event Driven_ design)
* `LogsCenter` : Used by many classes to write log messages to the App's log file.
The rest of the App consists of four components.
* <<Design-Ui,*`UI`*>>: The UI of the App.
* <<Design-Logic,*`Logic`*>>: The command executor.
* <<Design-Model,*`Model`*>>: Holds the data of the App in-memory.
* <<Design-Storage,*`Storage`*>>: Reads data from, and writes data to, the hard disk.
Each of the four components
* Defines its _API_ in an `interface` with the same name as the Component.
* Exposes its functionality using a `{Component Name}Manager` class.
For example, the `Logic` component (see the class diagram given below) defines it's API in the `Logic.java` interface and exposes its functionality using the `LogicManager.java` class.
.Class Diagram of the Logic Component
image::LogicClassDiagram.png[width="800"]
[discrete]
==== Events-Driven nature of the design
The _Sequence Diagram_ below shows how the components interact for the scenario where the user issues the command `delete 1`.
.Component interactions for `delete 1` command (part 1)
image::SDforDeletePerson.png[width="800"]
[NOTE]
Note how the `Model` simply raises a `AddressBookChangedEvent` when the Address Book data are changed, instead of asking the `Storage` to save the updates to the hard disk.
The diagram below shows how the `EventsCenter` reacts to that event, which eventually results in the updates being saved to the hard disk and the status bar of the UI being updated to reflect the 'Last Updated' time.
.Component interactions for `delete 1` command (part 2)
image::SDforDeletePersonEventHandling.png[width="800"]
[NOTE]
Note how the event is propagated through the `EventsCenter` to the `Storage` and `UI` without `Model` having to be coupled to either of them. This is an example of how this Event Driven approach helps us reduce direct coupling between components.
The sections below give more details of each component.
[[Design-Ui]]
=== UI component
.Structure of the UI Component
image::UiClassDiagram.png[width="800"]
*API* : link:{repoURL}/src/main/java/seedu/address/ui/Ui.java[`Ui.java`]
The UI consists of a `MainWindow` that is made up of parts e.g.`CommandBox`, `ResultDisplay`, `PersonListPanel`, `StatusBarFooter`, `BrowserPanel` etc. All these, including the `MainWindow`, inherit from the abstract `UiPart` class.
The `UI` component uses JavaFx UI framework. The layout of these UI parts are defined in matching `.fxml` files that are in the `src/main/resources/view` folder. For example, the layout of the link:{repoURL}/src/main/java/seedu/address/ui/MainWindow.java[`MainWindow`] is specified in link:{repoURL}/src/main/resources/view/MainWindow.fxml[`MainWindow.fxml`]
The `UI` component,
* Executes user commands using the `Logic` component.
* Binds itself to some data in the `Model` so that the UI can auto-update when data in the `Model` change.
* Responds to events raised from various parts of the App and updates the UI accordingly.
[[Design-Logic]]
=== Logic component
[[fig-LogicClassDiagram]]
.Structure of the Logic Component
image::LogicClassDiagram.png[width="800"]
*API* :
link:{repoURL}/src/main/java/seedu/address/logic/Logic.java[`Logic.java`]
. `Logic` uses the `AddressBookParser` class to parse the user command.
. This results in a `Command` object which is executed by the `LogicManager`.
. The command execution can affect the `Model` (e.g. adding a person) and/or raise events.
. The result of the command execution is encapsulated as a `CommandResult` object which is passed back to the `Ui`.
Given below is the Sequence Diagram for interactions within the `Logic` component for the `execute("delete 1")` API call.
.Interactions Inside the Logic Component for the `delete 1` Command
image::DeletePersonSdForLogic.png[width="800"]
[[Design-Model]]
=== Model component
.Structure of the Model Component
image::ModelClassDiagram.png[width="800"]
*API* : link:{repoURL}/src/main/java/seedu/address/model/Model.java[`Model.java`]
The `Model`,
* stores a `UserPref` object that represents the user's preferences.
* stores the Address Book data.
* exposes an unmodifiable `ObservableList<Person>` that can be 'observed' e.g. the UI can be bound to this list so that the UI automatically updates when the data in the list change.
* Stores a `UniqueTagList` hashmap in the `Address Book`. Each unique `Tag` key is assigned to a value of the list of `person` who are assigned with the same tag.
* does not depend on any of the other three components.
[[Design-Storage]]
=== Storage component
.Structure of the Storage Component
image::StorageClassDiagram.png[width="800"]
*API* : link:{repoURL}/src/main/java/seedu/address/storage/Storage.java[`Storage.java`]
The `Storage` component,
* can save `UserPref` objects in json format and read it back.
* can save the Address Book data in xml format and read it back.
[[Design-Commons]]
=== Common classes
Classes used by multiple components are in the `seedu.addressbook.commons` package.
== Implementation
This section describes some noteworthy details on how certain features are implemented.
// tag::undoredo[]
=== Undo/Redo feature
==== Current Implementation
The undo/redo mechanism is facilitated by `VersionedAddressBook`.
It extends `AddressBook` with an undo/redo history, stored internally as an `addressBookStateList` and `currentStatePointer`.
Additionally, it implements the following operations:
* `VersionedAddressBook#commit()` -- Saves the current address book state in its history.
* `VersionedAddressBook#undo()` -- Restores the previous address book state from its history.
* `VersionedAddressBook#redo()` -- Restores a previously undone address book state from its history.
These operations are exposed in the `Model` interface as `Model#commitAddressBook()`, `Model#undoAddressBook()` and `Model#redoAddressBook()` respectively.
Given below is an example usage scenario and how the undo/redo mechanism behaves at each step.
Step 1. The user launches the application for the first time. The `VersionedAddressBook` will be initialized with the initial address book state, and the `currentStatePointer` pointing to that single address book state.
image::UndoRedoStartingStateListDiagram.png[width="800"]
Step 2. The user executes `delete 5` command to delete the 5th person in the address book. The `delete` command calls `Model#commitAddressBook()`, causing the modified state of the address book after the `delete 5` command executes to be saved in the `addressBookStateList`, and the `currentStatePointer` is shifted to the newly inserted address book state.
image::UndoRedoNewCommand1StateListDiagram.png[width="800"]
Step 3. The user executes `add n/David ...` to add a new person. The `add` command also calls `Model#commitAddressBook()`, causing another modified address book state to be saved into the `addressBookStateList`.
image::UndoRedoNewCommand2StateListDiagram.png[width="800"]
[NOTE]
If a command fails its execution, it will not call `Model#commitAddressBook()`, so the address book state will not be saved into the `addressBookStateList`.
Step 4. The user now decides that adding the person was a mistake, and decides to undo that action by executing the `undo` command. The `undo` command will call `Model#undoAddressBook()`, which will shift the `currentStatePointer` once to the left, pointing it to the previous address book state, and restores the address book to that state.
image::UndoRedoExecuteUndoStateListDiagram.png[width="800"]
[NOTE]
If the `currentStatePointer` is at index 0, pointing to the initial address book state, then there are no previous address book states to restore. The `undo` command uses `Model#canUndoAddressBook()` to check if this is the case. If so, it will return an error to the user rather than attempting to perform the undo.
The following sequence diagram shows how the undo operation works:
image::UndoRedoSequenceDiagram.png[width="800"]
The `redo` command does the opposite -- it calls `Model#redoAddressBook()`, which shifts the `currentStatePointer` once to the right, pointing to the previously undone state, and restores the address book to that state.
[NOTE]
If the `currentStatePointer` is at index `addressBookStateList.size() - 1`, pointing to the latest address book state, then there are no undone address book states to restore. The `redo` command uses `Model#canRedoAddressBook()` to check if this is the case. If so, it will return an error to the user rather than attempting to perform the redo.
Step 5. The user then decides to execute the command `list`. Commands that do not modify the address book, such as `list`, will usually not call `Model#commitAddressBook()`, `Model#undoAddressBook()` or `Model#redoAddressBook()`. Thus, the `addressBookStateList` remains unchanged.
image::UndoRedoNewCommand3StateListDiagram.png[width="800"]
Step 6. The user executes `clear`, which calls `Model#commitAddressBook()`. Since the `currentStatePointer` is not pointing at the end of the `addressBookStateList`, all address book states after the `currentStatePointer` will be purged. We designed it this way because it no longer makes sense to redo the `add n/David ...` command. This is the behavior that most modern desktop applications follow.
image::UndoRedoNewCommand4StateListDiagram.png[width="800"]
The following activity diagram summarizes what happens when a user executes a new command:
image::UndoRedoActivityDiagram.png[width="650"]
==== Design Considerations
===== Aspect: How undo & redo executes
* **Alternative 1 (current choice):** Saves the entire address book.
** Pros: Easy to implement.
** Cons: May have performance issues in terms of memory usage.
* **Alternative 2:** Individual command knows how to undo/redo by itself.
** Pros: Will use less memory (e.g. for `delete`, just save the person being deleted).
** Cons: We must ensure that the implementation of each individual command are correct.
===== Aspect: Data structure to support the undo/redo commands
* **Alternative 1 (current choice):** Use a list to store the history of address book states.
** Pros: Easy for new Computer Science student undergraduates to understand, who are likely to be the new incoming developers of our project.
** Cons: Logic is duplicated twice. For example, when a new command is executed, we must remember to update both `HistoryManager` and `VersionedAddressBook`.
* **Alternative 2:** Use `HistoryManager` for undo/redo
** Pros: We do not need to maintain a separate list, and just reuse what is already in the codebase.
** Cons: Requires dealing with commands that have already been undone: We must remember to skip these commands. Violates Single Responsibility Principle and Separation of Concerns as `HistoryManager` now needs to do two different things.
// end::undoredo[]
// tag::dataencryption[]
=== Data Encryption
==== Current Implementation
The encrypt/ decrypt mechanism is facilitated by `FileEncryptor`.
It extends `AddressBook` with a encrypt/decrypt feature, maintained by `PasswordCommand`.
Additionally, it implements the following operations:
* `FileEncryptor#process()` -- Decrypts or encrypts the data file depending on its current state (encrypted or decrypted).
* `VersionedAddressBook#decryptFile()` -- encrypts a file given the path and password.
* `VersionedAddressBook#encryptFile()` -- decrypts a file given the path and password.
Given below is an example usage scenario and how the password mechanism behaves at each step.
*Step 1.* The user enters the password command with a password.
[NOTE]
If the user enters a password which is non alpha-numeric, an error will be thrown at the CommandResult box. Only alpha-numeric passwords are supported by `FileEncryptor`
*Step 2.* The user closes the address book.
*Step 3.* The user re-opens the address book. No data will be shown as the `XML data file` is technically not present in the data folder.
*Step 4.* The user enters the password command with the right password. Address book will be refreshed and restored back to its former state (before encryption).
[NOTE]
If the user enters the wrong password , an error will be thrown at the CommandResult box.
==== Design Considerations
===== Aspect: How encryption and decryption is done
1. The `PBEKeySpec` is first specified using the "PBEWithMD5AndDES" specification.
2. A secret key is generated from `SecretKeyFactory` using "PBEWithMD5AndDES" cipher.
3. A `Cipher` is then used to encrypt or decrypt the file with a given password and key specifications.
4. Additional salt is used in the password to ensure that the password cannot be easily broken down by dictionary attacks.
===== Aspect: Pros and cons of tight security
** Pros: Your data is protected and it will be near impossible to use any third part tool to crack the data file.
** Cons: Data will be *permanently* lost if you forget the password.
====== Encrypting the address book:
*Step 1.* The user executes `password test` to encrypt the address book with `test` as the password. +
*Step 2.* `PasswordCommandParser` checks for the validity of the input password (if its alpha-numeric) +
*Step 3.* If the password is acceptable, it is parsed to the `PasswordCommand` object +
*Step 4.* Within the `PasswordCommand` object, a new `FileEncryptor` object is created and it will check *if* the address book is currently in a locked state +
*Step 5.* If it is not currently locked, it will create a cipher and begin encrypting the address book with the input password. +
*Step 6.* Previous `addressbook.xml` will be deleted whereas a new `addressbook.xml.encrypted` file will be created. +
*Step 7.* A new `emptyPredicate` object will be instantiated and `model.updateFilteredPersonList(emptyPredicate)` will be called to clear the address book list. +
*Step 8.* A `CommandResult` object will be created to notify the user that the encryption was successful +
====== Accessing commands post encryption:
*Step 1.* The user executes `list` to list out all the contacts in the address book. +
*Step 2.* The user input is parsed by `AddressBookParser` which creates a new `ListCommandParser` object. +
*Step 3.* The arguments are then parsed by the `ListCommandParser`. +
*Step 4.* `ListCommandParser` then checks the validity of the arguments before it creates the `ListCommand` object. +
*Step 5.* `ListCommand` creates a `FileEncryptor` object to check if the address book is in a locked state by calling the `islocked()` method. +
*Step 6.* isLocked() will return true. +
*Step 7.* A `CommandException` will be thrown to warn the user that the address book is in a locked state.
image::passwordCommand_seq.png[width="500"]
Figure 1. Interactions inside the logic component for the `password` command.
// end::dataencryption[]
=== Logging
We are using `java.util.logging` package for logging. The `LogsCenter` class is used to manage the logging levels and logging destinations.
* The logging level can be controlled using the `logLevel` setting in the configuration file (See <<Implementation-Configuration>>)
* The `Logger` for a class can be obtained using `LogsCenter.getLogger(Class)` which will log messages according to the specified logging level
* Currently log messages are output through: `Console` and to a `.log` file.
*Logging Levels*
* `SEVERE` : Critical problem detected which may possibly cause the termination of the application
* `WARNING` : Can continue, but with caution
* `INFO` : Information showing the noteworthy actions by the App
* `FINE` : Details that is not usually noteworthy but may be useful in debugging e.g. print the actual list instead of just its size
[[Implementation-Configuration]]
=== Configuration
Certain properties of the application can be controlled (e.g App name, logging level) through the configuration file (default: `config.json`).
// tag::textprediction[]
=== Text Prediction
==== Current Implementation
The text prediction feature is facilitated mainly by 2 classes, `CommandCompleter` and `Trie`. `Trie` contains the underlying data structure with a set of operations that allows predictions to be retrieved. `CommandCompleter` manages several instances of that data structure such that predictions of different attributes (name, email, address, etc.) and command keywords (add, edit, etc.) can be made. +
For `Trie`, following operations are implemented:
* `Trie#insert(String value)` -- inserts a string into the data structure.
* `Trie#remove(String value)` -- removes a string from the data structure.
* `Trie#getPredictList(String prefix)` -- retrieves a list of predicted string values that completes the prefix.
For `CommandCompleter`, following operations are implemented:
* `CommandCompleter#insertPerson()` -- inserts a Person’s attributes into respective data structure instances.
* `CommandCompleter#removePerson()` -- removes a Person’s attributes from respective data structure instances.
* `CommandCompleter#editPerson()` -- edits a Person’s attributes in each data structure instances.
* `CommandCompleter#predictText()` -- predicts a list of possible text outputs that will complete and append to the given text input.
* `CommandCompleter#clearData()` -- clears all data structure instances of their data.
Given below is an example usage scenario and how the text prediction mechanism behaves on a high level. For low level implementation details, refer to <<TextPredictionDetails>>. +
**Step 1.** User launches the application. `CommandCompleter` is instantiated and will initialise all `Trie` instances with all command keywords and every existing contact’s (aka Person) attributes (name, phone, address, etc.).
**Step 2.** The user keys in `find n/Al` into the command box and presses kbd:[Tab]. This will invoke `predictText()`. The method will subsequently determine (assisted by several helper classes) which `Trie` instance to retrieve the predictions from. That `Trie` instance then returns a list of possible predictions, in this case, a list containing `ex Yeoh`.
**Step 3.** The user decides to add a new contact with the name _Alex Tan_ using the `add` command. In the `add` logic, the method `Model#insertPersonIntoPrediction(Person person)` will be called which will call the `insertPerson()` method. This method inserts the contact’s (Alex Tan) attributes into the respective `Trie` instances.
**Step 4.** The user decides to predict `find n/Al` again. The logic sequence is similar to Step 2. However, since a new contact was added in Step 3, instead of returning `ex Yeoh` only, the returning list will contain both `ex Yeoh` and `ex Tan`.
**Step 5.** The user now decides to remove the contact with the name _Alex Yeoh_ from the address book using the `delete 1` command. The `removePerson()` method will be called and removes that contact’s (Alex Yeoh) attributes from the respective `Trie` instances.
**Step 6.** The user decides to predict `find n/Al` again. Similar to Step 4 except that Alex Yeoh has been removed, the list returned will contain only `ex Tan`.
**Step 7.** The user decides to clear the address book with `clear` command which invokes `clearData()` method. This clears all attributes data in all `Trie` instances.
**Step 8.** The user wants to predict `find n/Al` again. However since all data were cleared, the returned predictions list contains no entries. No predictions are displayed in feedback panel.
The sequence diagram below demonstrates how the `predictText()` method will work:
image::predictText_sequence.png[]
==== Design Considerations
===== Aspect: Data structure used to support text prediction
* **Alternative 1 (current choice):** Use a Directed Acyclic Graph to store strings.
** Pros: Results in greater computational efficiency than the naive approach (Alternative 2).
** Cons: Much more difficult to implement and prone to bugs.
* **Alternative 2:** Use a simple list to store strings.
** Pros: Much more easier and simple to implement.
** Cons: Inefficient and takes longer time to retrive the strings. Since the address book can potentially contain large number of contacts, retrieval of strings may take too long, which results in a slow application.
// end::textprediction[]
// tag::mail[]
=== Email Contacts
==== Current Implementation
The email feature is facilitated by the `MailCommand` class. It is assisted by the `MailCommandParser` class to determine the `mailType` (who to email to) based on the user input. It is also supported by the Java built-in `Desktop` class to open the user system’s email application using the mail type as the recipients. It implements the following operation:
* `MailCommand#execute()` - opens user system’s email application based on the `mailType`.
Given below is an example usage scenario and how the email mechanism behaves at each step: +
Step 1. The user selects contact with the index 1 using the `select` command.
Step 2. The user inputs `mail` into the command box and executes the command. `execute()` checks if the user system is supported by the Java _Desktop_ library and throws an exception if unsupported. In this case, the `mailType` is contacts selection type. Hence, `MailCommand#mailToSelection()` is invoked. The Uniform Resource Identifier (URI) is built and `Desktop#mail(URI uri)` is called, which opens up the system’s email application.
NOTE: If the user decides to email all contacts with the tag `coworker` instead, the user will execute `mail t/coworker` in the command box. Similar to Step 2, except that the logic calls `MailCommand#mailToGroups(Model model, Tag tag)` with the input tag as parameter. URI is built with contacts containing the specified tag and the email application opens up.
// end::mail[]
// tag::backuprestore[]
=== Back Up and Restore
==== Current implementation of backup
Creation of backups is done in `BackUpCommand` class. It extends the `Command` class with an overriding execute function. +
===== Model
This feature implements a new method in `Model` +
* `Model#backUpAddressbook(Path)` -- Saves a copy of the address book into the path in a `XML` format. +
==== Current implementation of Listing of backup snapshots
Listing of the snapshots of backups is done with the `RestoreSnapshotsCommand` class. It also extends the `Command` class. +
===== BackupList
The `BackupList` object holds a map of files with indexes as its keys. It also has an array of `String`. The position of the strings corresponds to the index of the file it represents in the map. +
* `BackupList#getFileName()` -- returns a list of the names of the snapshots. +
* `BackupList#getFileMap()` -- returns a map of the snapshots, each denoted with an `INDEX` +
* `BackupList#millisToDateAndTime(String fileName)` -- converts a timestamp in milliseconds to a formatted date and time format. +
==== Current implementation of Restoring from the list
The restoration of backup snapshots is done in the `RestoreCommand` class. It extends the `Command` class with an overriding execute function. +
===== Model
This feature implements a new method in `Model` +
* `Model#replaceData(Path path)` -- Overrides the current address book information with another `XML` file. +
==== Usage Scenarios
Given below is the sequence of actions done by the address book when you back up and restore your data. +
===== Saving a Backup
*Step 1.* The user executes `backup` to create a backup snapshot +
*Step 2.* The user input is first parsed by `AddressBookParser` which creates a new `BackupCommand` object. +
*Step 3.* `BackupCommand` is executed and calls `Model#backUpAddressbook(Path)` with the path of the `.backup` folder as the argument. This creates a copy of the address book data in the folder as an `XML` file. +
*Step 4.* An `AddressBookStorage` will be created in the `Model#backUpAddressbook(Path)` method which facilitates the saving of the data of address book. +
*Step 5.* `saveAddressBook()` will be called to save the data of the address book. +
[NOTE]
`AddressBookStorage` will check if the `.backup` folder exists. If not it will create the folder. +
The following sequence diagrams shows you how the add operation works: +
image::backupSequenceDiagramLogic.png[width="900"]
Figure 1. Interactions inside the Logic component for the `backup` command.
image::backupSequenceDiagramStorage.png[Height="200"]
Figure 2. Interactions inside the Storage component for the `backup` command.
===== Listing all the snapshots
*Step 1.* The user executes `restore-snapshots` to get a list of all backup snapshots +
*Step 2.* The user input is first parsed by `AddressBookParser` which creates a new `RestoreSnapshotsCommand` object. +
*Step 3.* `RestoreSnapshotsCommand` is executed and calls `readBackupList(String)` with the directory of the `.backup` folder as the argument. This reads a `BackupList` from the given destination path. A list of formatted file names will be created which will be shown to the user. +
The following sequence diagrams shows you how the add operation works: +
image::restoreSnapshotsSequenceDiagramLogic.png[width="900"]
Figure 3. Interactions inside the Logic component for the `restore-snapshots` command.
===== Restoring from a backup
*Step 1.* The user executes `restore 1` to restore their data with a backup snapshot denoted by an `index of 1` +
*Step 2.* The user input is first parsed by `AddressBookParser` which creates a new `RestoreCommandParser` object. +
*Step 3.* The argument, `1`, is then parsed by `RestoreCommandParser`. +
*Step 4.* `RestoreCommandParser` checks the validity of the index as input by the user. It then creates a
`BackupList` object. +
[NOTE]
If the index is not valid, an error would be returned to the user instead of creating a `RestoreCommand` object.
*Step 5.* `RestoreCommandParser` then creates a new `RestoreCommand` with the `BackupList` and index as its arguments. +
*Step 6.* `RestoreCommand` is executed and calls `Model#replaceData(Path)` which replaces the data of the address book with the `XML` file denoted by the `Path`.
[NOTE]
Before calling `Model#replaceData(Path path)`, the address book is checked if it is encrypted with a password, via `FileEncryptor`.
It the address book is locked, an error would be displayed to the user instead of carrying on with the command.
*Step 7.* A `XmlAddressBookStorage` will be initialised which helps to read the data of the `XML` file. +
*Step 8.* The address book will be overwritten with the new data by calling `resetData` with the new data as its argument. +
image::restoreSequenceDiagramLogic.png[width="900"]
Figure 4. Interactions inside the Logic component for the `restore 1` command.
image::restoreSequenceDiagramStorage.png[Height="200"]
Figure 5. Interactions inside the Storage component for the `restore 1` command.
// end::backuprestore[]
// tag::exportimport[]
=== Export and Import
==== Current implementation of Export
The export function is facilitated by the `ExportCommand` class. It extends the `Command` class wit an overriding execute function. +
===== CsvWriter
`CsvWriter` is an object that takes in a path of where the export is to be saved and write a CSV file into the said path. +
The conversion is as follows: +
*Step 1.* The constructor converts an `ObservableList<Person>` into a `List<Person>`
*Step 2.* The `convertToCsv()` method will be called in `ExportCommand` and a new CSV file will be written with the content of the created `List<Person>`. +
*Step 3.* This file will be created in the path. +
==== Current implementation of Import
The import function is implemented with the `ImportCommand` class. It extends the `Command` class with an overriding execute method. +
===== CsvReader
`CsvReader` is an object that takes in a CSV file and converts it into a list of persons. +
The conversion is as follows: +
*Step 1.* The constructor reads a CSV file line by line. +
*Step 2.* The `convertToList` method formats the file into a `Model` friendly format. +
*Step 3.* It then converts the strings into a persons. +
*Step 4.* These persons are then stored in a `List<Person>` and returned. +
==== Usage Scenarios
Given below is the sequence of actions done by the address book when you export and import an address book. +
===== Exporting your address book
*Step 1.* The user executes `export d/C:\Users\USER\Desktop\DemoExport f/test` to export their data to the directory with the file name the user wants. +
*Step 2.* The user input is first parsed by `AddressBookParser` which creates a new `ExportCommandParser` object. +
*Step 3.* The argument, `d/C:\Users\USER\Desktop\DemoExport f/test`, is then parsed by `ExportCommandParser`. +
*Step 4.* `ExportCommandParser` checks the validity of the directory as input by the user. It then creates a
`ExportCommand` object with the directory and file name as its arguments. +
[NOTE]
If the directory is not valid or a file with the same file name exists in the directory, an error would be returned to the user instead of creating a `ExportCommand` object. +
*Step 5.* `ExportCommand` is executed and creates a `CsvWriter` object. The `CsvWriter` object is created with a copy of the address book taken from `Model` +
*Step 6.* `convertToCsv(String directory)` is called with the full directory as the argument which writes the data of the address book to a file of `CSV` format. +
[NOTE]
Before calling `convertToCsv(String directory)`, the address book is checked if it is encrypted with a password, via `FileEncryptor`.
It the address book is locked, an error would be displayed to the user instead of carrying on with the command.
The following sequence diagrams shows you how the add operation works: +
image::exportSequenceDiagramLogic.png[width="900"]
Figure 1. Interactions inside the Logic component for the `export d/C:\Users\USER\Desktop\DemoExport f/test` command.
image::exportSequenceDiagramStorage.png[Height="200"]
Figure 2. Interactions inside the Storage component for the `export d/C:\Users\USER\Desktop\DemoExport f/test` command.
===== Importing to your address book
*Step 1.* The user executes `import d/C:\Users\USER\Desktop\DemoExport f/test` to import their data from the directory with the name of the file. +
*Step 2.* The user input is first parsed by `AddressBookParser` which creates a new `ImportCommandParser` object. +
*Step 3.* The argument, `d/C:\Users\USER\Desktop\DemoExport f/test`, is then parsed by `ImportCommandParser`. +
*Step 4.* `ImportCommandParser` checks the validity of the directory as input by the user. It then creates a
`ImportCommand` object with the directory and file. +
[NOTE]
If the directory is not valid or a file with the name does not exists in the directory, an error would be returned to the user instead of creating a `ImportCommand` object.
[NOTE]
If the file is of the wrong format, an error would also be returned to the user instead of creating a `ImportCommand` object.
*Step 5.* `ImportCommand` is executed and creates a `CsvReader` object. The `CsvReader` object is created with the file identified +
*Step 6.* `convertToList()` is called which creates a list of persons that can be added. +
[NOTE]
Before calling `convertToList()`, the address book is checked if it is encrypted with a password, via `FileEncryptor`.
It the address book is locked, an error would be displayed to the user instead of carrying on with the command.
*Step 7.* There is a loop that iterates through the list and add the persons into the address book. All duplicated persons will be skipped. +
The following sequence diagrams shows you how the add operation works: +
image::importSequenceDiagramLogic.png[width="900"]
Figure 1. Interactions inside the Logic component for the `import d/C:\Users\USER\Desktop\DemoExport f/test` command.
// end::exportimport[]
// tag::search[]
=== Find and Search
==== Current Implementation
The find function has been revamped to support search guessing and search by attributes. +
`FindCommand` is now backed up by the `ClosestMatchList` class which uses `LevenshteinDistanceUtil` and `HammingDistanceUtil` to generate an ordered set of `Person` attributes ordered by similarity.
==== Design Considerations
===== Aspect: How find command executes
* **Alternative 1:** Find using only predicates
** Pros: Easy to implement.
** Cons: Search must be exact, cannot have typos or incomplete keywords
* **Alternative 2:** Store the search results in a `treeMap` ordered by their Levenshtein or Hamming distances from the search keyword
** Pros: Will also consider searches that are similar to what we want and will account for typos or incomplete keywords
** Cons: Added complexities in finding and searching, can be vague when searching for number attributes
* **Alternative 3 (current choice):** Same as alternative 2 but we use *Hamming distance* for phone numbers and KPI attributes instead.
** Pros: Phone number and KPI searches are now more precise
** Cons: Added complexities in finding and searching
===== Aspect: Expanded features of find command
* **Alternative 1:** Find only by name
** Pros: Easy to implement.
** Cons: Can only search by name of addressees
* **Alternative 2:** Find by attributes
** Pros: Can search by email, phone, address, etc instead of just the name of addressees
** Cons: Can only search for one attribute at a time (i.e find by name or find by email)
* **Alternative 3 (current choice):** Chain-able find attributes
** Pros: Can search by email and phone and address, etc instead of just one at a time
** Cons: Added complexities in find command
===== Aspect: Data structure to support the revamped Find command
`treeMap` was used to store the search results ordered by their Levenshtein or Hamming distances. +
The results are then filtered and
results furthest away from the top few are ignored. The searches will then be passed thru their respective predicates
(`NameContainsKeywordsPredicate`, `AddressContainsKeywordsPredicate`, `EmailContainsKeywordsPredicate`, `KpiContainsKeywordPredicate`, `NoteContainsKeywordsPredicate`, `PhoneContainsKeywordPredicate`, `PositionContainsKeywordsPredicate`, `TagContainsKeywordsPredicate`)
before filtering the list.
====== Searching for a contact:
*Step 1.* The user executes `find a/Clementi t/owesMoney` to find all contacts staying in Clementi and bearing the `owesMoney` tag. +
*Step 2.* The user input is parsed by `AddressBookParser` which creates a new `FindCommandParser` object. +
*Step 3.* The arguments `a/Clementi t/owesMoney` are then parsed by the `FindCommandParser`. +
*Step 4.* `FindCommandParser` then checks the validity of the arguments before it creates the `FindCommand` object. +
*Step 5.* `FindCommand` then proceeds to create `ClosestMatchList` objects. +
*Step 6.* It uses the list of keywords obtained from `ClosestMatchList` to create `AddressContainsKeywordsPredicate` and `TagContainsKeywordsPredicate`. +
*Step 7.* These predicates are combined into a `combinedPredicate` object using the "AND" operation. +
*Step 8.* The model is then updated by calling `model.updateFilteredPersonList(combinedPredicate)` together with the combined predicate obtained in *Step 8.* +
*Step 9.* A `CommandResult` object will be created and an internal method `findActualMatches()` will be called to generate a string of keywords that are exact matches and keywords that are guessed. +
image::findFeature_seq.png[width="900"]
Figure 1. Interactions inside the logic component for the `find a/Clementi t/owesMoney` command.
image::closestMatchList_seq.png[width="500"]
Figure 2. Interactions inside the `ClosestMatchList` class
// end::search[]
// tag::schedule[]
=== Schedule
==== Current implementation
Updating the *Schedule* is facilitated by the `ScheduleCommand` class which extends the `Command` class.
`ScheduleAddCommand`, `ScheduleEditCommand` and `ScheduleDeleteCommand` further extends `ScheduleCommand` to add, edit and
delete entries in the *Schedule* respectively.
===== Activity
Each entry in the *Schedule* is an `Activity`. It consists of a `String` which takes the name of the activity
and the `Date`, of which the activity is due.
===== Schedule
`Schedule` is implemented with a `TreeMap`. It has the `Date` of activities as its `key` and a `list` of activities, which is due on the same
date, as its `value`.
Additionally, it implements the following main operations:
* `Schedule#add(Activity activity)` -- Add an `Activity` to `schedule`. +
* `Schedule#delete(Activity activity)` -- Deletes an `Activity` from `schedule`. +
* `Schedule#update(Activity target, Activity editedActivity)` -- Updates/edits an `Activity` in the `schedule`. `target` is the activity to be edited. `editedActivity` is the new, changed, activity. +
* `Schedule#setSchedule(List<Activity> activities)` -- Sets `schedule` from a `list` of activities. This operation is executed when importing data from an `XML` file.
This happens when you first start *CorpPro*.
* `Schedule#getSchedule()` -- Returns the schedule with activities sorted by `Date` to be displayed in the GUI by `SchedulePanel` in the `UI` component.
The schedule is instantiated in the `AddressBook` and have the main operations exposed in the `Model` interface as follows:
* `Model#addActivity(Activity activity)` -- Exposes `Schedule#add(Activity activity)` .
* `Model#deleteActivity(Activity activity)` -- Exposes `Schedule#delete(Activity activity)`.
* `Model#updateActivity(Activity target, Activity editedActivity)` -- Exposes `Schedule#update(Activity target, Activity editedActivity)`.
* `Model#getSchedule()` -- Exposes `Schedule#getSchedule()`.
===== Storage
In addition to the CRUD (create, read, update and delete) functions, the schedule is also saved to an `XML` file
whenever you update it. This is facilitated by `XmlAdaptedActivity` which stores an `Activity` in an `XML` format.
`XmlSerializableAddressBook` then appends each `XmlAdaptedActivity` into a list and is saved in `addressbook.xml`.
===== Usage scenarios
Given below are examples of usage scenarios of how the schedule behaves when you carry out `schedule` commands. +
====== Adding an activity:
*Step 1.* The user executes `schedule-add d/01/01/2018 a/Complete report.` to add an activity to their schedule. +
*Step 2.* The user input is first parsed by `AddressBookParser` which creates a new `ScheduleAddCommandParser` object. +
*Step 3.* The arguments, `d/01/01/2018 a/Complete report.`, are then parsed by `ScheduleAddCommandParser`. +
*Step 4.* `ScheduleAddCommandParser` checks the validity of the date and activity as input by the user. It then creates an
`Activity` object. +
[NOTE]
If the date or the activity name is not valid, an error would be returned to the user instead of creating an `Activity` object.
*Step 5.* `ScheduleAddCommandParser` then creates a new `ScheduleAddCommand` with the `activity` as its argument. +
*Step 6.* `ScheduleAddCommand` is executed and calls `Model#addActivity(activity)` which creates the `activity` in the `schedule`.
[NOTE]
Before calling `Model#addActivity(activity)`, the address book is checked if it is encrypted with a password, via `FileEncryptor`.
It the address book is locked, an error would be displayed to the user instead of carrying on with the command.
*Step 7.* `indicateAddressBookChanged()` is called within `Model#addActivity(activity)` to raise an `AddressBookChangedEvent`, that the information within the
address book is changed. +
*Step 8.* The `UI` object, `schedulePanel` which is subscribed to the event, receives this updated information and updates
the display to show the correct information to the user. +
The following sequence diagrams shows you how the add operation works: +
image::scheduleAddSequenceDiagramLogic.png[width="900"]
Figure 1. Interactions inside the Logic component for the `schedule-add d/01/01/2018 a/Complete report.` command.
[NOTE]
The figure above illustrates the sequence from *Step 1.* to *Step 5.* +
image::scheduleAddSequenceDiagramStorage.png[width="650"]
Figure 2. Interactions inside the Storage component for the `schedule-add d/01/01/2018 a/Complete report.` command.
[NOTE]
The figure above illustrates the sequence of *Step 6.* and *Step 7.* +
image::scheduleAddSequenceDiagramEvent.png[width="650"]
Figure 3. Interactions between the `EventCenter`, `UI` and `Storage` components for the `schedule-add d/01/01/2018 a/Complete report.` command.
[NOTE]
The figure above illustrates the sequence of *Step 8.* +
[NOTE]
Storage of each activity is facilitated by `XmlAdaptedActivity` which stores an `Activity` in an `XML` format. `XmlSerializableAddressBook` then appends each `XmlAdaptedActivity` into a list and is saved in `addressbook.xml`.
====== Editing an activity:
*Step 1.* The user executes `schedule-edit 2 a/Interview intern.` to edit an activity at `INDEX 2` in their schedule. +
*Step 2.* The user input is first parsed by `AddressBookParser` which creates a new `ScheduleEditCommandParser` object. +
*Step 3.* The arguments, `2 a/Interview intern.`, are then parsed by `ScheduleEditCommandParser`. +
*Step 4.* `ScheduleEditCommandParser` checks the validity of the index and activity as input by the user. +
[NOTE]
If the activity name or index is not valid, an error would be returned to the user instead of editing
an activity.
*Step 5.* `ScheduleEditCommandParser` then creates a new `ScheduleEditCommand` with the `INDEX` and the new activity
`String` as its argument. +
*Step 6.* `ScheduleEditCommand` gets `target`, the activity to be edited, via `ScheduleCommand#getActivityFromIndex(model, index)` and creates `editedActivity`, the new activity.
It then calls `Model#updateActivity(target, editedActivity)`.
[NOTE]
If the index is not valid, i.e. out of range of the displayed schedule, an error would be returned to the user instead of editing
an activity.
[NOTE]
Before calling `Model#updateActivity(target, editedActivity)`, the address book is checked if it is encrypted with a password, via `FileEncryptor`.
It the address book is locked, an error would be displayed to the user instead of carrying on with the command.
*Step 7.* `Model#updateActivity(target, editedActivity)` updates the corresponding `activity` in the `schedule`.
`indicateAddressBookChanged()` is called within `Model#updateActivity(target, editedActivity)` to raise an `AddressBookChangedEvent` that the information within the
address book is changed. +
*Step 8.* The `UI` object, `schedulePanel` which is subscribed to the event, receives this updated information and
updates the display to show the correct information to the user. +
The following sequence diagrams shows how the edit operation works:
image::scheduleEditSequenceDiagramLogic.png[width="900"]
Figure 4. Interactions inside the Logic component for the `schedule-edit 2 a/Interview intern.` command.
[NOTE]
The figure above illustrates the sequence from *Step 1.* to *Step 6.* +
image::scheduleEditSequenceDiagramStorage.png[width="650"]
Figure 5. Interactions inside the Model component for the `schedule-edit 2 a/Interview intern.` command.
[NOTE]
The figure above illustrates the sequence of *Step 7.* +
The interactions between the `EventCenter`, `UI` and `Storage` components for `editing an activity` (*Step 8.*) are similar to *adding an activity* (see *Figure 3.*).
====== Deleting an activity:
*Step 1.* The user executes `schedule-delete 2` to delete the activity at `INDEX 2` in their schedule. +
*Step 2.* The user input is first parsed by `AddressBookParser` which creates a new `ScheduleDeleteCommandParser` object. +
*Step 3.* `ScheduleDeleteCommandParser` checks the validity of the index as input by the user.
*Step 4.* `ScheduleDeleteCommandParser` then creates a new `ScheduleDeleteCommand` with the `INDEX` as its argument. +
*Step 5.* `ScheduleDeleteCommand` gets the `activity`, to be deleted, via `ScheduleCommand#getActivityFromIndex(model, index)` and calls
`Model#deleteActivity(activity)`.
[NOTE]
If the index is not valid, i.e. out of range of the displayed schedule, an error would be returned to the user instead of deleting
an activity.
[NOTE]
Before calling `Model#deleteActivity(activity)`, the address book is checked if it is encrypted with a password, via `FileEncryptor`.
It the address book is locked, an error would be displayed to the user instead of carrying on with the command.
*Step 6.* `Model#deleteActivity(activity)` deletes the corresponding `activity` from the `schedule`. `indicateAddressBookChanged()` is called
within `Model#deleteActivity(activity)` to raise an `AddressBookChangedEvent`, that the information within the
address book is changed. +
*Step 7.* The `UI` object, `schedulePanel` which is subscribed to the event, receives this updated information and updates
the display to show the correct information to the user. +
The sequence of deleting an activity is similar to editing an activity. Instead of updating the activity , it is deleted (see *Figure 4.* and *Figure 5.*).
The interactions between the `EventCenter`, `UI` and `Storage` components for *deleting an activity* (*Step 7.*) are similar to *adding an activity* (see *Figure 3.*).
==== Design Considerations
===== Aspect: Data structure of Schedule
* **Alternative 1:** `List` of Activities
** Pros: Easy to implement.
** Cons: Need to sort each activity by its date whenever the schedule is updated.
** Cons: Larger time complexity.
* **Alternative 2 (Current choice):** `TreeMap` of Activities
** Pros: Activities are automatically sorted by their dates whenever the schedule is updated.
** Pros: Faster time complexity.
** Cons: Harder to implement.
** Cons: Larger space complexity.
===== Aspect: Date of Activity
* **Alternative 1:** `String` of date in DD/MM/YYYY format
** Pros: Easy to implement and do not need to parse user input.
** Cons: Need to implement comparators to sort the dates of activity.
** Cons: Not flexible. Unable to include and sort by time in future implementations.
* **Alternative 2 (Current choice):** Usage of `java.util.Date`
** Pros: Easy to implement.
** Pros: Able to sort by time in future implementations.
** Cons: Need to parse `Date` when converting it to `String`.
** Cons: Need to parse user inputs to convert `String` to `Date`.
// end::schedule[]
== Documentation
We use asciidoc for writing documentation.
[NOTE]
We chose asciidoc over Markdown because asciidoc, although a bit more complex than Markdown, provides more flexibility in formatting.
=== Editing Documentation
See <<UsingGradle#rendering-asciidoc-files, UsingGradle.adoc>> to learn how to render `.adoc` files locally to preview the end result of your edits.
Alternatively, you can download the AsciiDoc plugin for IntelliJ, which allows you to preview the changes you have made to your `.adoc` files in real-time.
=== Publishing Documentation
See <<UsingTravis#deploying-github-pages, UsingTravis.adoc>> to learn how to deploy GitHub Pages using Travis.
=== Converting Documentation to PDF format
We use https://www.google.com/chrome/browser/desktop/[Google Chrome] for converting documentation to PDF format, as Chrome's PDF engine preserves hyperlinks used in webpages.
Here are the steps to convert the project documentation files to PDF format.
. Follow the instructions in <<UsingGradle#rendering-asciidoc-files, UsingGradle.adoc>> to convert the AsciiDoc files in the `docs/` directory to HTML format.
. Go to your generated HTML files in the `build/docs` folder, right click on them and select `Open with` -> `Google Chrome`.
. Within Chrome, click on the `Print` option in Chrome's menu.
. Set the destination to `Save as PDF`, then click `Save` to save a copy of the file in PDF format. For best results, use the settings indicated in the screenshot below.
.Saving documentation as PDF files in Chrome
image::chrome_save_as_pdf.png[width="300"]
[[Docs-SiteWideDocSettings]]
=== Site-wide Documentation Settings
The link:{repoURL}/build.gradle[`build.gradle`] file specifies some project-specific https://asciidoctor.org/docs/user-manual/#attributes[asciidoc attributes] which affects how all documentation files within this project are rendered.
[TIP]
Attributes left unset in the `build.gradle` file will use their *default value*, if any.
[cols="1,2a,1", options="header"]
.List of site-wide attributes
|===
|Attribute name |Description |Default value
|`site-name`
|The name of the website.
If set, the name will be displayed near the top of the page.
|_not set_
|`site-githuburl`
|URL to the site's repository on https://github.com[GitHub].
Setting this will add a "View on GitHub" link in the navigation bar.
|_not set_
|`site-seedu`
|Define this attribute if the project is an official SE-EDU project.
This will render the SE-EDU navigation bar at the top of the page, and add some SE-EDU-specific navigation items.
|_not set_
|===
[[Docs-PerFileDocSettings]]
=== Per-file Documentation Settings
Each `.adoc` file may also specify some file-specific https://asciidoctor.org/docs/user-manual/#attributes[asciidoc attributes] which affects how the file is rendered.
Asciidoctor's https://asciidoctor.org/docs/user-manual/#builtin-attributes[built-in attributes] may be specified and used as well.
[TIP]
Attributes left unset in `.adoc` files will use their *default value*, if any.
[cols="1,2a,1", options="header"]
.List of per-file attributes, excluding Asciidoctor's built-in attributes
|===
|Attribute name |Description |Default value
|`site-section`
|Site section that the document belongs to.
This will cause the associated item in the navigation bar to be highlighted.
One of: `UserGuide`, `DeveloperGuide`, ``LearningOutcomes``{asterisk}, `AboutUs`, `ContactUs`
_{asterisk} Official SE-EDU projects only_
|_not set_
|`no-site-header`
|Set this attribute to remove the site navigation bar.
|_not set_
|===
=== Site Template
The files in link:{repoURL}/docs/stylesheets[`docs/stylesheets`] are the https://developer.mozilla.org/en-US/docs/Web/CSS[CSS stylesheets] of the site.
You can modify them to change some properties of the site's design.
The files in link:{repoURL}/docs/templates[`docs/templates`] controls the rendering of `.adoc` files into HTML5.
These template files are written in a mixture of https://www.ruby-lang.org[Ruby] and http://slim-lang.com[Slim].
[WARNING]
====
Modifying the template files in link:{repoURL}/docs/templates[`docs/templates`] requires some knowledge and experience with Ruby and Asciidoctor's API.
You should only modify them if you need greater control over the site's layout than what stylesheets can provide.
The SE-EDU team does not provide support for modified template files.
====
[[Testing]]
== Testing
=== Running Tests
There are three ways to run tests.
[TIP]
The most reliable way to run tests is the 3rd one. The first two methods might fail some GUI tests due to platform/resolution-specific idiosyncrasies.
*Method 1: Using IntelliJ JUnit test runner*
* To run all tests, right-click on the `src/test/java` folder and choose `Run 'All Tests'`
* To run a subset of tests, you can right-click on a test package, test class, or a test and choose `Run 'ABC'`
*Method 2: Using Gradle*
* Open a console and run the command `gradlew clean allTests` (Mac/Linux: `./gradlew clean allTests`)
[NOTE]
See <<UsingGradle#, UsingGradle.adoc>> for more info on how to run tests using Gradle.
*Method 3: Using Gradle (headless)*
Thanks to the https://github.com/TestFX/TestFX[TestFX] library we use, our GUI tests can be run in the _headless_ mode. In the headless mode, GUI tests do not show up on the screen. That means the developer can do other things on the Computer while the tests are running.
To run tests in headless mode, open a console and run the command `gradlew clean headless allTests` (Mac/Linux: `./gradlew clean headless allTests`)
=== Types of tests
We have two types of tests:
. *GUI Tests* - These are tests involving the GUI. They include,
.. _System Tests_ that test the entire App by simulating user actions on the GUI. These are in the `systemtests` package.
.. _Unit tests_ that test the individual components. These are in `seedu.address.ui` package.
. *Non-GUI Tests* - These are tests not involving the GUI. They include,
.. _Unit tests_ targeting the lowest level methods/classes. +
e.g. `seedu.address.commons.StringUtilTest`
.. _Integration tests_ that are checking the integration of multiple code units (those code units are assumed to be working). +
e.g. `seedu.address.storage.StorageManagerTest`
.. Hybrids of unit and integration tests. These test are checking multiple code units as well as how the are connected together. +
e.g. `seedu.address.logic.LogicManagerTest`
=== Troubleshooting Testing
**Problem: `HelpWindowTest` fails with a `NullPointerException`.**
* Reason: One of its dependencies, `HelpWindow.html` in `src/main/resources/docs` is missing.
* Solution: Execute Gradle task `processResources`.
== Dev Ops
=== Build Automation
See <<UsingGradle#, UsingGradle.adoc>> to learn how to use Gradle for build automation.
=== Continuous Integration
We use https://travis-ci.org/[Travis CI] and https://www.appveyor.com/[AppVeyor] to perform _Continuous Integration_ on our projects. See <<UsingTravis#, UsingTravis.adoc>> and <<UsingAppVeyor#, UsingAppVeyor.adoc>> for more details.
=== Coverage Reporting
We use https://coveralls.io/[Coveralls] to track the code coverage of our projects. See <<UsingCoveralls#, UsingCoveralls.adoc>> for more details.
=== Documentation Previews
When a pull request has changes to asciidoc files, you can use https://www.netlify.com/[Netlify] to see a preview of how the HTML version of those asciidoc files will look like when the pull request is merged. See <<UsingNetlify#, UsingNetlify.adoc>> for more details.
=== Making a Release
Here are the steps to create a new release.
. Update the version number in link:{repoURL}/src/main/java/seedu/address/MainApp.java[`MainApp.java`].
. Generate a JAR file <<UsingGradle#creating-the-jar-file, using Gradle>>.
. Tag the repo with the version number. e.g. `v0.1`
. https://help.github.com/articles/creating-releases/[Create a new release using GitHub] and upload the JAR file you created.
=== Managing Dependencies
A project often depends on third-party libraries. For example, Address Book depends on the http://wiki.fasterxml.com/JacksonHome[Jackson library] for XML parsing. Managing these _dependencies_ can be automated using Gradle. For example, Gradle can download the dependencies automatically, which is better than these alternatives. +
a. Include those libraries in the repo (this bloats the repo size) +
b. Require developers to download those libraries manually (this creates extra work for developers)
[[TextPredictionDetails]]
[appendix]
== Text Prediction low level details
The low level implementation of text prediction is done in `Trie`. The internal data structure is a _Tree_ structure where each character is stored as a node and strings built into a single tree.
A node has a `endNode` flag that determines if that node represents the last character of the predicted string value. If such a node is reached, the entire string value is appended to the prediction list.
The data structure can be visualised in the diagram below:
image::text_prediction_general.png[width="800"]
In addition, `Trie` implements the following main operations:
* `Trie#insert(String value)` -- inserts a string into the data structure.
* `Trie#remove(String value)` -- removes a string from the data structure.
* `Trie#getPredictList(String prefix)` -- retrieve a list of predicted string values that completes the prefix.
The prediction implementation is achieved by utilising depth first search (DFS) algorithm in the data structure, starting from the node representing the last character of the prefix.
The prediction algorithm is illustrated with the help of the following diagram:
image::text_prediction_predict.png[width="800"]
For example, we call the method `getPredictList("wom")`.
The algorithm works by first traversing the route (green circles) that represents `"wom"` and stopping at the `m` node (red circle). The algorithm proceeds to DFS from that node and explore all it's children nodes (blue circles).
Hence, calling `getPredictList("wom")` will return a list containing the elements:
* `"en"`
* `"ble"`
[[GetStartedProgramming]]
[appendix]
== Suggested Programming Tasks to Get Started
Suggested path for new programmers:
1. First, add small local-impact (i.e. the impact of the change does not go beyond the component) enhancements to one component at a time. Some suggestions are given in <<GetStartedProgramming-EachComponent>>.
2. Next, add a feature that touches multiple components to learn how to implement an end-to-end feature across all components. <<GetStartedProgramming-RemarkCommand>> explains how to go about adding such a feature.
[[GetStartedProgramming-EachComponent]]
=== Improving each component
Each individual exercise in this section is component-based (i.e. you would not need to modify the other components to get it to work).
[discrete]
==== `Logic` component
*Scenario:* You are in charge of `logic`. During dog-fooding, your team realize that it is troublesome for the user to type the whole command in order to execute a command. Your team devise some strategies to help cut down the amount of typing necessary, and one of the suggestions was to implement aliases for the command words. Your job is to implement such aliases.
[TIP]
Do take a look at <<Design-Logic>> before attempting to modify the `Logic` component.
. Add a shorthand equivalent alias for each of the individual commands. For example, besides typing `clear`, the user can also type `c` to remove all persons in the list.
+
****
* Hints
** Just like we store each individual command word constant `COMMAND_WORD` inside `*Command.java` (e.g. link:{repoURL}/src/main/java/seedu/address/logic/commands/FindCommand.java[`FindCommand#COMMAND_WORD`], link:{repoURL}/src/main/java/seedu/address/logic/commands/DeleteCommand.java[`DeleteCommand#COMMAND_WORD`]), you need a new constant for aliases as well (e.g. `FindCommand#COMMAND_ALIAS`).
** link:{repoURL}/src/main/java/seedu/address/logic/parser/AddressBookParser.java[`AddressBookParser`] is responsible for analyzing command words.
* Solution
** Modify the switch statement in link:{repoURL}/src/main/java/seedu/address/logic/parser/AddressBookParser.java[`AddressBookParser#parseCommand(String)`] such that both the proper command word and alias can be used to execute the same intended command.
** Add new tests for each of the aliases that you have added.
** Update the user guide to document the new aliases.
** See this https://github.com/se-edu/addressbook-level4/pull/785[PR] for the full solution.
****
[discrete]
==== `Model` component
*Scenario:* You are in charge of `model`. One day, the `logic`-in-charge approaches you for help. He wants to implement a command such that the user is able to remove a particular tag from everyone in the address book, but the model API does not support such a functionality at the moment. Your job is to implement an API method, so that your teammate can use your API to implement his command.
[TIP]
Do take a look at <<Design-Model>> before attempting to modify the `Model` component.
. Add a `removeTag(Tag)` method. The specified tag will be removed from everyone in the address book.
+
****
* Hints
** The link:{repoURL}/src/main/java/seedu/address/model/Model.java[`Model`] and the link:{repoURL}/src/main/java/seedu/address/model/AddressBook.java[`AddressBook`] API need to be updated.
** Think about how you can use SLAP to design the method. Where should we place the main logic of deleting tags?
** Find out which of the existing API methods in link:{repoURL}/src/main/java/seedu/address/model/AddressBook.java[`AddressBook`] and link:{repoURL}/src/main/java/seedu/address/model/person/Person.java[`Person`] classes can be used to implement the tag removal logic. link:{repoURL}/src/main/java/seedu/address/model/AddressBook.java[`AddressBook`] allows you to update a person, and link:{repoURL}/src/main/java/seedu/address/model/person/Person.java[`Person`] allows you to update the tags.
* Solution
** Implement a `removeTag(Tag)` method in link:{repoURL}/src/main/java/seedu/address/model/AddressBook.java[`AddressBook`]. Loop through each person, and remove the `tag` from each person.
** Add a new API method `deleteTag(Tag)` in link:{repoURL}/src/main/java/seedu/address/model/ModelManager.java[`ModelManager`]. Your link:{repoURL}/src/main/java/seedu/address/model/ModelManager.java[`ModelManager`] should call `AddressBook#removeTag(Tag)`.
** Add new tests for each of the new public methods that you have added.
** See this https://github.com/se-edu/addressbook-level4/pull/790[PR] for the full solution.
****
[discrete]
==== `Ui` component
*Scenario:* You are in charge of `ui`. During a beta testing session, your team is observing how the users use your address book application. You realize that one of the users occasionally tries to delete non-existent tags from a contact, because the tags all look the same visually, and the user got confused. Another user made a typing mistake in his command, but did not realize he had done so because the error message wasn't prominent enough. A third user keeps scrolling down the list, because he keeps forgetting the index of the last person in the list. Your job is to implement improvements to the UI to solve all these problems.
[TIP]
Do take a look at <<Design-Ui>> before attempting to modify the `UI` component.
. Use different colors for different tags inside person cards. For example, `friends` tags can be all in brown, and `colleagues` tags can be all in yellow.
+
**Before**
+
image::getting-started-ui-tag-before.png[width="300"]
+
**After**
+
image::getting-started-ui-tag-after.png[width="300"]
+
****
* Hints
** The tag labels are created inside link:{repoURL}/src/main/java/seedu/address/ui/PersonCard.java[the `PersonCard` constructor] (`new Label(tag.tagName)`). https://docs.oracle.com/javase/8/javafx/api/javafx/scene/control/Label.html[JavaFX's `Label` class] allows you to modify the style of each Label, such as changing its color.
** Use the .css attribute `-fx-background-color` to add a color.
** You may wish to modify link:{repoURL}/src/main/resources/view/DarkTheme.css[`DarkTheme.css`] to include some pre-defined colors using css, especially if you have experience with web-based css.
* Solution
** You can modify the existing test methods for `PersonCard` 's to include testing the tag's color as well.
** See this https://github.com/se-edu/addressbook-level4/pull/798[PR] for the full solution.
*** The PR uses the hash code of the tag names to generate a color. This is deliberately designed to ensure consistent colors each time the application runs. You may wish to expand on this design to include additional features, such as allowing users to set their own tag colors, and directly saving the colors to storage, so that tags retain their colors even if the hash code algorithm changes.
****
. Modify link:{repoURL}/src/main/java/seedu/address/commons/events/ui/NewResultAvailableEvent.java[`NewResultAvailableEvent`] such that link:{repoURL}/src/main/java/seedu/address/ui/ResultDisplay.java[`ResultDisplay`] can show a different style on error (currently it shows the same regardless of errors).
+
**Before**
+
image::getting-started-ui-result-before.png[width="200"]
+
**After**
+
image::getting-started-ui-result-after.png[width="200"]
+
****
* Hints
** link:{repoURL}/src/main/java/seedu/address/commons/events/ui/NewResultAvailableEvent.java[`NewResultAvailableEvent`] is raised by link:{repoURL}/src/main/java/seedu/address/ui/CommandBox.java[`CommandBox`] which also knows whether the result is a success or failure, and is caught by link:{repoURL}/src/main/java/seedu/address/ui/ResultDisplay.java[`ResultDisplay`] which is where we want to change the style to.
** Refer to link:{repoURL}/src/main/java/seedu/address/ui/CommandBox.java[`CommandBox`] for an example on how to display an error.
* Solution
** Modify link:{repoURL}/src/main/java/seedu/address/commons/events/ui/NewResultAvailableEvent.java[`NewResultAvailableEvent`] 's constructor so that users of the event can indicate whether an error has occurred.
** Modify link:{repoURL}/src/main/java/seedu/address/ui/ResultDisplay.java[`ResultDisplay#handleNewResultAvailableEvent(NewResultAvailableEvent)`] to react to this event appropriately.
** You can write two different kinds of tests to ensure that the functionality works:
*** The unit tests for `ResultDisplay` can be modified to include verification of the color.
*** The system tests link:{repoURL}/src/test/java/systemtests/AddressBookSystemTest.java[`AddressBookSystemTest#assertCommandBoxShowsDefaultStyle() and AddressBookSystemTest#assertCommandBoxShowsErrorStyle()`] to include verification for `ResultDisplay` as well.
** See this https://github.com/se-edu/addressbook-level4/pull/799[PR] for the full solution.
*** Do read the commits one at a time if you feel overwhelmed.
****
. Modify the link:{repoURL}/src/main/java/seedu/address/ui/StatusBarFooter.java[`StatusBarFooter`] to show the total number of people in the address book.
+
**Before**
+
image::getting-started-ui-status-before.png[width="500"]
+
**After**
+
image::getting-started-ui-status-after.png[width="500"]
+
****
* Hints
** link:{repoURL}/src/main/resources/view/StatusBarFooter.fxml[`StatusBarFooter.fxml`] will need a new `StatusBar`. Be sure to set the `GridPane.columnIndex` properly for each `StatusBar` to avoid misalignment!
** link:{repoURL}/src/main/java/seedu/address/ui/StatusBarFooter.java[`StatusBarFooter`] needs to initialize the status bar on application start, and to update it accordingly whenever the address book is updated.
* Solution
** Modify the constructor of link:{repoURL}/src/main/java/seedu/address/ui/StatusBarFooter.java[`StatusBarFooter`] to take in the number of persons when the application just started.
** Use link:{repoURL}/src/main/java/seedu/address/ui/StatusBarFooter.java[`StatusBarFooter#handleAddressBookChangedEvent(AddressBookChangedEvent)`] to update the number of persons whenever there are new changes to the addressbook.
** For tests, modify link:{repoURL}/src/test/java/guitests/guihandles/StatusBarFooterHandle.java[`StatusBarFooterHandle`] by adding a state-saving functionality for the total number of people status, just like what we did for save location and sync status.
** For system tests, modify link:{repoURL}/src/test/java/systemtests/AddressBookSystemTest.java[`AddressBookSystemTest`] to also verify the new total number of persons status bar.
** See this https://github.com/se-edu/addressbook-level4/pull/803[PR] for the full solution.
****
[discrete]
==== `Storage` component
*Scenario:* You are in charge of `storage`. For your next project milestone, your team plans to implement a new feature of saving the address book to the cloud. However, the current implementation of the application constantly saves the address book after the execution of each command, which is not ideal if the user is working on limited internet connection. Your team decided that the application should instead save the changes to a temporary local backup file first, and only upload to the cloud after the user closes the application. Your job is to implement a backup API for the address book storage.
[TIP]
Do take a look at <<Design-Storage>> before attempting to modify the `Storage` component.
. Add a new method `backupAddressBook(ReadOnlyAddressBook)`, so that the address book can be saved in a fixed temporary location.
+
****
* Hint
** Add the API method in link:{repoURL}/src/main/java/seedu/address/storage/AddressBookStorage.java[`AddressBookStorage`] interface.
** Implement the logic in link:{repoURL}/src/main/java/seedu/address/storage/StorageManager.java[`StorageManager`] and link:{repoURL}/src/main/java/seedu/address/storage/XmlAddressBookStorage.java[`XmlAddressBookStorage`] class.
* Solution
** See this https://github.com/se-edu/addressbook-level4/pull/594[PR] for the full solution.
****
[[GetStartedProgramming-RemarkCommand]]
=== Creating a new command: `remark`
By creating this command, you will get a chance to learn how to implement a feature end-to-end, touching all major components of the app.
*Scenario:* You are a software maintainer for `addressbook`, as the former developer team has moved on to new projects. The current users of your application have a list of new feature requests that they hope the software will eventually have. The most popular request is to allow adding additional comments/notes about a particular contact, by providing a flexible `remark` field for each contact, rather than relying on tags alone. After designing the specification for the `remark` command, you are convinced that this feature is worth implementing. Your job is to implement the `remark` command.
==== Description
Edits the remark for a person specified in the `INDEX`. +
Format: `remark INDEX r/[REMARK]`
Examples:
* `remark 1 r/Likes to drink coffee.` +
Edits the remark for the first person to `Likes to drink coffee.`
* `remark 1 r/` +
Removes the remark for the first person.
==== Step-by-step Instructions
===== [Step 1] Logic: Teach the app to accept 'remark' which does nothing
Let's start by teaching the application how to parse a `remark` command. We will add the logic of `remark` later.
**Main:**
. Add a `RemarkCommand` that extends link:{repoURL}/src/main/java/seedu/address/logic/commands/Command.java[`Command`]. Upon execution, it should just throw an `Exception`.
. Modify link:{repoURL}/src/main/java/seedu/address/logic/parser/AddressBookParser.java[`AddressBookParser`] to accept a `RemarkCommand`.
**Tests:**
. Add `RemarkCommandTest` that tests that `execute()` throws an Exception.
. Add new test method to link:{repoURL}/src/test/java/seedu/address/logic/parser/AddressBookParserTest.java[`AddressBookParserTest`], which tests that typing "remark" returns an instance of `RemarkCommand`.
===== [Step 2] Logic: Teach the app to accept 'remark' arguments
Let's teach the application to parse arguments that our `remark` command will accept. E.g. `1 r/Likes to drink coffee.`
**Main:**
. Modify `RemarkCommand` to take in an `Index` and `String` and print those two parameters as the error message.
. Add `RemarkCommandParser` that knows how to parse two arguments, one index and one with prefix 'r/'.
. Modify link:{repoURL}/src/main/java/seedu/address/logic/parser/AddressBookParser.java[`AddressBookParser`] to use the newly implemented `RemarkCommandParser`.
**Tests:**
. Modify `RemarkCommandTest` to test the `RemarkCommand#equals()` method.
. Add `RemarkCommandParserTest` that tests different boundary values
for `RemarkCommandParser`.
. Modify link:{repoURL}/src/test/java/seedu/address/logic/parser/AddressBookParserTest.java[`AddressBookParserTest`] to test that the correct command is generated according to the user input.
===== [Step 3] Ui: Add a placeholder for remark in `PersonCard`
Let's add a placeholder on all our link:{repoURL}/src/main/java/seedu/address/ui/PersonCard.java[`PersonCard`] s to display a remark for each person later.
**Main:**
. Add a `Label` with any random text inside link:{repoURL}/src/main/resources/view/PersonListCard.fxml[`PersonListCard.fxml`].
. Add FXML annotation in link:{repoURL}/src/main/java/seedu/address/ui/PersonCard.java[`PersonCard`] to tie the variable to the actual label.
**Tests:**
. Modify link:{repoURL}/src/test/java/guitests/guihandles/PersonCardHandle.java[`PersonCardHandle`] so that future tests can read the contents of the remark label.
===== [Step 4] Model: Add `Remark` class
We have to properly encapsulate the remark in our link:{repoURL}/src/main/java/seedu/address/model/person/Person.java[`Person`] class. Instead of just using a `String`, let's follow the conventional class structure that the codebase already uses by adding a `Remark` class.
**Main:**
. Add `Remark` to model component (you can copy from link:{repoURL}/src/main/java/seedu/address/model/person/Address.java[`Address`], remove the regex and change the names accordingly).
. Modify `RemarkCommand` to now take in a `Remark` instead of a `String`.
**Tests:**
. Add test for `Remark`, to test the `Remark#equals()` method.
===== [Step 5] Model: Modify `Person` to support a `Remark` field
Now we have the `Remark` class, we need to actually use it inside link:{repoURL}/src/main/java/seedu/address/model/person/Person.java[`Person`].
**Main:**
. Add `getRemark()` in link:{repoURL}/src/main/java/seedu/address/model/person/Person.java[`Person`].
. You may assume that the user will not be able to use the `add` and `edit` commands to modify the remarks field (i.e. the person will be created without a remark).
. Modify link:{repoURL}/src/main/java/seedu/address/model/util/SampleDataUtil.java/[`SampleDataUtil`] to add remarks for the sample data (delete your `addressBook.xml` so that the application will load the sample data when you launch it.)
===== [Step 6] Storage: Add `Remark` field to `XmlAdaptedPerson` class
We now have `Remark` s for `Person` s, but they will be gone when we exit the application. Let's modify link:{repoURL}/src/main/java/seedu/address/storage/XmlAdaptedPerson.java[`XmlAdaptedPerson`] to include a `Remark` field so that it will be saved.
**Main:**
. Add a new Xml field for `Remark`.
**Tests:**
. Fix `invalidAndValidPersonAddressBook.xml`, `typicalPersonsAddressBook.xml`, `validAddressBook.xml` etc., such that the XML tests will not fail due to a missing `<remark>` element.
===== [Step 6b] Test: Add withRemark() for `PersonBuilder`
Since `Person` can now have a `Remark`, we should add a helper method to link:{repoURL}/src/test/java/seedu/address/testutil/PersonBuilder.java[`PersonBuilder`], so that users are able to create remarks when building a link:{repoURL}/src/main/java/seedu/address/model/person/Person.java[`Person`].
**Tests:**
. Add a new method `withRemark()` for link:{repoURL}/src/test/java/seedu/address/testutil/PersonBuilder.java[`PersonBuilder`]. This method will create a new `Remark` for the person that it is currently building.
. Try and use the method on any sample `Person` in link:{repoURL}/src/test/java/seedu/address/testutil/TypicalPersons.java[`TypicalPersons`].
===== [Step 7] Ui: Connect `Remark` field to `PersonCard`
Our remark label in link:{repoURL}/src/main/java/seedu/address/ui/PersonCard.java[`PersonCard`] is still a placeholder. Let's bring it to life by binding it with the actual `remark` field.
**Main:**
. Modify link:{repoURL}/src/main/java/seedu/address/ui/PersonCard.java[`PersonCard`]'s constructor to bind the `Remark` field to the `Person` 's remark.
**Tests:**
. Modify link:{repoURL}/src/test/java/seedu/address/ui/testutil/GuiTestAssert.java[`GuiTestAssert#assertCardDisplaysPerson(...)`] so that it will compare the now-functioning remark label.
===== [Step 8] Logic: Implement `RemarkCommand#execute()` logic
We now have everything set up... but we still can't modify the remarks. Let's finish it up by adding in actual logic for our `remark` command.
**Main:**
. Replace the logic in `RemarkCommand#execute()` (that currently just throws an `Exception`), with the actual logic to modify the remarks of a person.
**Tests:**
. Update `RemarkCommandTest` to test that the `execute()` logic works.
==== Full Solution
See this https://github.com/se-edu/addressbook-level4/pull/599[PR] for the step-by-step solution.
[appendix]
== Product Scope
*Target user profile*:
* has a need to manage a significant number of contacts
* prefer desktop apps over other types
* can type fast
* prefers typing over mouse input
* is reasonably comfortable using CLI apps
*Value proposition*: manage contacts faster than a typical mouse/GUI driven app
[appendix]
== User Stories
Priorities: High (must have) - `* * \*`, Medium (nice to have) - `* \*`, Low (unlikely to have) - `*` +
Functional Requirements:
[width="59%",cols="22%,<23%,<25%,<30%",options="header",]
|=======================================================================
|Priority |As a ... |I want to ... |So that I can...
|`* * *` |new user |see usage instructions |refer to instructions when I forget how to use the App
|`* * *` |user |add a new person |
|`* * *` |user |delete a person |remove entries that I no longer need
|`* * *` |user |find a person by name |locate details of persons without having to go through the entire list
|`* * *` |user who values time |autocomplete my commands |execute commands faster.
|`* * *` |forgetful user |approximate my search input |get the closest output suggestions I need.
|`* * *` |employee |access the full profile of my clients |retrieve their information when required.
|`* * *` |organized user |create groups |mass contact easily.
|`* * *` |meticulous supervisor |access my team’s profiles easily |I can keep track of each member conveniently.
|`* * *` |overworked employee |i need a backup file |information is not immediately lost when i accidentally delete contacts.
|`* *` |user |hide <<private-contact-detail,private contact details>> by default |minimize chance of someone else seeing them by accident
|`* *` |organised user |can sort my contacts based on rank/position |respond to them appropriately.
|`* *` |efficient user |send mass emails with a single command |email large groups quickly.
|`* *` |meticulous employee |add notes along with my contacts |keep track of my working relations
|`* *` |supervisor |access my subordinate’s Key-Performance-Index |keep track of their work quality.
|`*` |user with many persons in the address book |sort persons by name |locate a person easily
|`*` |user |schedule tasks in a calendar |notified of my appointments.
|`*` |user |schedule tasks in a calendar |notified of when tasks are due.
|`*` |forgetful user |look at the photos of my contacts |recognise the person.
|=======================================================================
[appendix]
== Use Cases
(For all use cases below, the *System* is the `AddressBook` and the *Actor* is the `user`, unless specified otherwise)
[discrete]
=== Use case: Viewing help
*MSS*
1. User requests a list of commands
2. AddressBook shows a list of commands
+
Use case ends.
[discrete]
=== Use case: Add person
*MSS*
1. User requests to Add a person with the relevant details.
2. AddressBook adds the person into the list and shows a success message.
+
Use case ends.
*Extensions*
[none]
* 1a. Given details are invalid.
[none]
** 1a1. AddressBook shows an error message.
+
Use case resumes at step 1.
[discrete]
=== Use case: List persons
*MSS*
1. User requests to list persons
2. AddressBook shows a list of persons
+
Use case ends.
*Extensions*
[none]
* 2a. The list is empty.
+
Use case ends.
[discrete]
=== Use case: Find persons
*MSS*
1. User requests to search by address
2. Address book shows a list of persons living under that address
3. User requests to search for a specific tag and the previous address
4. Address book refines the search results by displaying people who live in the specified address and is tagged
+
Use case ends.
*Extensions*
[none]
* 2a. No similar keywords within the address book.
+
[none]
** 2a1. Address book shows list of persons with similar keywords to the one originally searched.
+
Use case resumes at step 3.
[discrete]
=== Use case: Delete person
*MSS*
1. User requests to list persons
2. AddressBook shows a list of persons
3. User requests to delete a specific person in the list
4. AddressBook deletes the person
+
Use case ends.
*Extensions*
[none]
* 2a. The list is empty.
+
Use case ends.
* 3a. The given index is invalid.
+
[none]
** 3a1. AddressBook shows an error message.
+
Use case resumes at step 2.
[discrete]
=== Use case: Back Up data
*MSS*
1. User requests to back up data
2. AddressBook shows that data has been backed up
+
Use case ends.
[discrete]
=== Use case: Restore data
*MSS*
1. User requests to list snapshots of all backups
2. AddressBook shows a list of snapshots
3. User requests to restore a specific snapshot in the list
4. AddressBook is restored to the time and date of the restored backup.
Use case ends.
*Extensions*
[none]
* 2a. The list is empty.
+
Use case ends.
* 3a. The given index is invalid.
+
[none]
** 3a1. AddressBook shows an error message.
+
Use case resumes at step 2.
[discrete]
=== Use case: Export data to a directory
*MSS*
1. User requests to export the data to the directory
2. AddressBook shows that the file has been exported
Use case ends.
*Extensions*
[none]
* 2a. Directory does not exist
[none]
** 2a1. AddressBook shows an error message.
+
Use case ends.
[discrete]
=== Use case: Import data from a directory
*MSS*
1. User requests to import the data from a directory
2. AddressBook shows that the file has been imported
3. AddressBook will include the imported data.
Use case ends.
*Extensions*
[none]
* 2a. Directory does not exist
[none]
** 2a1. AddressBook shows an error message.
+
Use case ends.
[discrete]
=== Use case: Lock address book
*MSS*
1. User requests to encrypt addressbook with password
2. Address book shows encrypted message
+
Use case ends.
*Extensions*
[none]
* 1a. Address book is already encrypted.
+
[none]
** 1a1. Address book will decrypt the data instead with the given password.
+
Use case ends.
[discrete]
=== Use case: Unlock address book
*MSS*
1. User requests to decrypt address book with password
2. Address book shows decrypted message
+
Use case ends.
*Extensions*
[none]
* 1a. Address book is already decrypted.
+
[none]
** 1a1. Address book will encrypt the data instead with the given password.
+
[none]
* 1b. Incorrect password entered.
+
[none]
** 1b1. Address book will display incorrect password message.
+
Use case ends.
[discrete]
=== Use case: Text prediction
*MSS*
1. User input some characters. +
2. User requests text prediction with `Tab`. +
3. Address book auto completes user input with closest prediction. +
Use case ends.
*Extensions*
[none]
* 2a. There is more than one prediction.
[none]
** 2a1. Address book lists multiple predictions. +
** 2a2. Use case resumes from step 1. +
[discrete]
=== Use case: Send email
*MSS*
1. User requests to send email with mail command. +
2. Address book opens email application with appropriate recipients. +
Use case ends.
*Extensions*
[none]
* 1a. Input command has invalid format.
[none]
** 1a1. Address book displays format error. +
Use case ends.
[discrete]
=== Use case: Add an activity to the schedule
*MSS*
1. User requests to add an activity to the schedule. +
2. Address book adds the activity to the schedule +
Use case ends.
*Extensions*
[none]
* 1a. Input command has invalid format.
[none]
** 1a1. Address book displays format error. +
Use case ends.
=== Use case: Edit an activity in the schedule
*MSS*
1. User requests to edit an activity in the schedule. +
2. Address book edits the activity in the schedule +
Use case ends.
*Extensions*
[none]
* 1a. Input command has invalid format.
[none]
** 1a1. Address book displays format error. +
Use case ends.
=== Use case: Delete an activity from the schedule
*MSS*
1. User requests to delete an activity from the schedule. +
2. Address book delete the activity from the schedule +
Use case ends.
*Extensions*
[none]
* 1a. Input command has invalid format.
[none]
** 1a1. Address book displays format error. +
Use case ends.
[appendix]
== Non Functional Requirements
. Should work on any <<mainstream-os,mainstream OS>> as long as it has Java `9` or higher installed.
. Should be able to hold up to 1000 persons without a noticeable sluggishness in performance for typical usage.
. A user with above average typing speed for regular English text (i.e. not code, not system admin commands) should be able to accomplish most of the tasks faster using commands than using the mouse.
. A busy employee should be able to get back to work quickly using an intuitive interface to get information from the address book fast.
. A secretary should be able to refer and contact different people with accurate information.
. A busy employee should be able to get back to work fast by having quick access to information required.
. Should have a way to keep information confidential and safe from any unauthenticated personnel.
. Should be able to transfer information between machines easily by making the address book small and compact
[appendix]
== Glossary
[[mainstream-os]] Mainstream OS::
Windows, Linux, Unix, OS-X
[[private-contact-detail]] Private contact detail::
A contact detail that is not meant to be shared with others
[appendix]
== Instructions for Manual Testing
Given below are instructions to test the app manually.
[NOTE]
These instructions only provide a starting point for testers to work on; testers are expected to do more _exploratory_ testing.
=== Adding a person
Adding a person to the address book. +
Prerequisites: Carry out the following commands sequentially:
. Test case: `add n/Alexia Tok p/22224444 e/AT@example.com a/123, Potong Pasir, 1234665 r/Boss k/4.5 d/AT is a good friend t/friend t/colleague 2` +
Expected: The person is added to the contact list.
. Test case: `add n/Alexia Tok p/22224444 e/AT@example.com a/123, Potong Pasir, 1234665 r/Boss k/4.5 d/AT is a good friend t/friend t/colleague 2` +
Expected: No person is added as the person "Alexia Tok" is already in the contact list. Error details shown in the status message.
. Test case: `add n/Bob Teo p/23q e/AT@example.com a/123, Potong Pasir, 1234665 r/Boss k/4.5 d/AT is a good friend t/friend t/colleague 2` +
Expected: No person is added as the phone number is invalid. Error details shown in the status message.
. Test case: `add n/Bob Teo p/99008800 e/ATexamplecom a/123, Potong Pasir, 1234665 r/Boss k/4.5 d/AT is a good friend t/friend t/colleague 2` +
Expected: No person is added as the email is invalid. Error details shown in the status message.
. Test case: `add n/Bob Teo p/99008800 e/AT@example.com a/123, Potong Pasir, 1234665 r/... k/4.5 d/AT is a good friend t/friend t/colleague 2` +
Expected: No person is added as the position is invalid. Error details shown in the status message.
. Test case: `add n/Bob Teo p/99008800 e/AT@example.com a/123, Potong Pasir, 1234665 r/Boss k/6 d/AT is a good friend t/friend t/colleague 2` +
Expected: No person is added as the kpi is invalid. Error details shown in the status message.
. Test case: `add n/Bob Teo p/99008800 e/AT@example.com a/123, Potong Pasir, 1234665 r/Boss k/4.5 d/?#$% t/friend t/colleague 2` +
Expected: No person is added as the note is invalid. Error details shown in the status message.
. Test case: `add n/Bob Teo p/99008800 e/AT@example.com a/123, Potong Pasir, 1234665 r/Boss k/4.5 d/AT is a good friend t/friend t/colleague 3` +
Expected: No person is added as the tag is invalid. Error details shown in the status message.
=== Editing a person
Editing a person in the address book. +
Prerequisites: There are multiple persons in the contact list.
. Test case: `edit 1 p/22224444 e/AT@example.com a/123, Potong Pasir, 1234665 r/Boss k/4.5 d/AT is a good friend t/friend t/colleague 2` +
Expected: The first person in the contact list is edited.
. Test case: `edit 0 p/12345678` +
Expected: No person is edited. Error details shown in status message.
. Test case: `edit all/ n/Hercules fatty` +
Expected: No persons is edited. Error details shown in status message.
=== Listing persons
Listing persons in the address book. +
Prerequisites: There are multiple persons in the contact list.
. Test case: `list` +
Expected: All contacts in the address book will be listed and shown in the contacts panel.
. Test case: `list t/colleagues` +
Expected: All contacts with the tag `colleagues` will be listed and shown.
. Test case: `list k/5.0` +
Expected: All contacts with KPI value of `5.0` will be listed and shown.
=== Selecting persons
Selecting persons in the address book. +
Prerequisites: There are at least 7 persons in the contacts list. +
. Test case: `select 1` +
Expected: First contact will be highlighted in the contacts panel and information shown in information panel.
. Test case: `select 1 2 3` +
Expected: First three contacts will be highlighted.
. Test case: `select 1-3, 5-7` +
Expected: First three and 5th to 7th contacts will be highlighted.
=== Predicting text
Invoking text prediction in the address book. +
Prerequisites: There are the sample contacts in the address book.
. Test case: `f` and press kbd:[Tab]. +
Expected: The command box should automatically be filled to the command `find `.
. Test case: Continuing from previous test case, append with `n/Al` and press kbd:[Tab]. +
Expected: The command box should be automatically filled to `find n/Alex Yeoh`.
. Other commands to try: `add`, `delete`, `password`. +
NOTE: Some arguments do not work since they are unsupported. Refer to <<UserGuide.adoc#text-prediction,Text Prediction>> in the User Guide for more details.
=== Searching for a person
Finding a person in the address book. +
Prerequisites: The following commands must be entered, "Alexia Tok" and "Marcus Tok" must exist within the contact list: +
* `add n/Alexia Tok p/22224444 e/AT@example.com a/123, Potong Pasir, 1234665 r/Boss k/4.5 d/AT is a good friend t/friend t/colleague 2` +
* `add n/Marcus Tok p/12341234 e/MT@example.com a/123, Admiraly, 1234665 r/Cleaner k/4.0 d/MT is a bad cleaner t/notFriend t/colleague 2` +
. Test case: `find n/Tok` +
Expected: "Marcus Tok" and "Alexia Tok" will show up in the contacts panel.
. Test case: `find k/4.0` +
Expected: "Marcus Tok" will show up in the contacts panel.
. Test case: `find k/4` +
Expected: "Marcus Tok" and "Alexia Tok" will show up in the contacts panel.
. Test case: `find r/bass` +
Expected: "Alexia Tok" with position "Boss" will show up in the contacts panel. `Keywords guessed: {Boss}` will show at the status message as "bass" is a closer match to "Boss"
=== Locking and unlocking
Locking and unlocking of the address book. +
Prerequisites: Address book should not be locked. Check path at `./data/addressbook.xml` to ensure that it is not encrypted: `addressbook.xml.encrypted` and that `addressbook.xml` exists.
. Test case: `password hello` +
Expected: A status message "File encrypted!" will be displayed to the user and the contacts panel will be cleared. All further commands at this point will show a message "Address book is locked, please key in password". +
+
NOTE: Schedule panel will still be visible and not cleared. But the user will not be able to update it. Closing and re-opening the address book will clear it.
. Test case: `password test` +
Prerequisites: This test case should be carried out after the previous test case. +
Expected: A status message "Password mismatch!" will be displayed to the user and the address book will remain locked.
. Test case: `password hello` +
Prerequisites: This test case should be carried out after the previous test case *1.* +
Expected: A status message "File decrypted!" will be displayed to the user, the contacts panel and schedule will be refreshed to show the data that was encrypted. +
=== Deleting a person
Deleting a person while all persons are listed. +
Prerequisites: List all persons using the `list` command. Multiple persons in the list.
. Test case: `delete 1` +
Expected: First contact is deleted from the list. Details of the deleted contact shown in the status message. Timestamp in the status bar is updated.
. Test case: `delete 1 2 3` +
Expected: First three contacts will be deleted.
. Test case: `delete 1-3, 5-7` +
Expected: First three and 5th to 7th contacts will be deleted.
. Test case: `delete 0` +
Expected: No person is deleted. Error details shown in the status message. Status bar remains the same.
. Other incorrect delete commands to try: `delete`, `delete x` (where x is larger than the list size) +
Expected: Similar to previous.
=== Updating the Schedule
Updating activities in the schedule. +
Prerequisites: Carry out the following commands sequentially:
. Test case: `schedule-add d/30/10/2018 a/Attend meeting with Alex.` +
Expected: activity "Attend meeting with Alex." is added to the schedule.
. Test case: `schedule-add d/30/10/2018 a/Attend meeting with Alex.` +
Expected: another activity, "Attend meeting with Alex.", is added to the schedule.
. Test case: `schedule-edit 2 a/Attend meeting with Alexia.` +
Expected: The second activity is edited to "Attend meeting with Alexia.".
. Test case: `schedule-edit 0 a/Test bugs` +
Expected: No activity is edited. Error details shown in the status message. Schedule panel remains the same.
. Test case: `schedule-delete 1` +
Expected: First activity is deleted from the schedule.
. Test case: `schedule-delete 0` +
Expected: No activity is deleted. Error details shown in the status message.
=== Backing up and restoring data in the address book
Backing up and restoring data in the address book. +
Prerequisites: Make a backup with the `backup` command. +
. Test case: `restore-snapshot` +
Expected: One snapshot will be shown on the screen if this is your first backup. If this is not your first backup, the list will be populated with older snapshots. +
. Test case: `restore 0` +
Expected: No data restored. Error details shown in the status message. Status bar remains the same.
. Other incorrect restore commands to try: `restore`, `delete x` (where x is larger than the list size) +
Expected: Similar to previous.
=== Exporting data from the address book
Exporting data from the address book. Prerequisites: Make sure the address book is populated with people +
. Test case: `export` +
Expected: A copy of the contacts in `CSV` format will be created in a `exports` folder in the root directory +
. Test case: `export DIRECTORY FILENAME` +
Expected: A copy of the contacts in `CSV` format will be created in the designated folder with the designated name.
. Any directory or file name that does not exist: `export FALSE_DIRECTORY FILENAME`, `export DIRECTORY FALSE_FILENAME`. +
Expected: Nothing will be created. An error will be thrown to the user. Status bar will remain the same.
=== Importing data to the address book
Importing data from the address book.
. Test case: `import DIRECTORY FILENAME` +
Expected: A copy of the contacts in `CSV` format will be created in the designated folder with the designated name.
. Test case: `import` +
Expected: Nothing will be created. An error will be thrown to the user. Status bar will remain the same.
. Any directory or file name that does not exist: `import FALSE_DIRECTORY FILENAME`, `import DIRECTORY FALSE_FILENAME`. +
Expected: Similar to previous
| 50.315842
| 638
| 0.76103
|
65877a16be4773e936db3644ea072bacf552ef6a
| 648
|
adoc
|
AsciiDoc
|
virt/upgrading-virt.adoc
|
shaneboulden/openshift-docs
|
94f23b1905b3dbbcb5eaf9dcfc8a16f0e8db329b
|
[
"Apache-2.0"
] | null | null | null |
virt/upgrading-virt.adoc
|
shaneboulden/openshift-docs
|
94f23b1905b3dbbcb5eaf9dcfc8a16f0e8db329b
|
[
"Apache-2.0"
] | null | null | null |
virt/upgrading-virt.adoc
|
shaneboulden/openshift-docs
|
94f23b1905b3dbbcb5eaf9dcfc8a16f0e8db329b
|
[
"Apache-2.0"
] | null | null | null |
[id="upgrading-virt"]
= Upgrading {VirtProductName}
include::modules/virt-document-attributes.adoc[]
:context: upgrading-virt
toc::[]
You can manually upgrade to the next minor version of {VirtProductName} and
monitor the status of an update by using the web console.
:FeatureName: {VirtProductName}
include::modules/virt-about-upgrading-virt.adoc[leveloffset=+1]
include::modules/virt-upgrading-virt.adoc[leveloffset=+1]
include::modules/virt-monitoring-upgrade-status.adoc[leveloffset=+1]
.Additional information
* xref:../operators/understanding_olm/olm-understanding-olm.adoc#olm-csv_olm-understanding-olm[ClusterServiceVersions (CSVs)]
| 30.857143
| 125
| 0.799383
|
82bb0e75846da88f60e43e977c474e466f84e0dc
| 936
|
adoc
|
AsciiDoc
|
docs/apis/snippets/field-types-documentation/delete-field-type-by-id/1/http-request.adoc
|
yudori/mmadu
|
310703d1b21bf2f4a03e6573586fc2aac069572e
|
[
"MIT"
] | null | null | null |
docs/apis/snippets/field-types-documentation/delete-field-type-by-id/1/http-request.adoc
|
yudori/mmadu
|
310703d1b21bf2f4a03e6573586fc2aac069572e
|
[
"MIT"
] | null | null | null |
docs/apis/snippets/field-types-documentation/delete-field-type-by-id/1/http-request.adoc
|
yudori/mmadu
|
310703d1b21bf2f4a03e6573586fc2aac069572e
|
[
"MIT"
] | null | null | null |
[source,http,options="nowrap"]
----
DELETE /repo/fieldTypes/1 HTTP/1.1
Authorization: Bearer eyJraWQiOiIxMjMiLCJhbGciOiJSUzI1NiJ9.eyJzdWIiOiI1ZWUzNzhhZDQ3NDg5MTI5Y2M0OWIzYjAiLCJyb2xlcyI6W10sImlzcyI6Im1tYWR1LmNvbSIsImdyb3VwcyI6W10sImF1dGhvcml0aWVzIjpbXSwiY2xpZW50X2lkIjoiMjJlNjViNzItOTIzNC00MjgxLTlkNzMtMzIzMDA4OWQ0OWE3IiwiZG9tYWluX2lkIjoiMCIsImF1ZCI6InRlc3QiLCJuYmYiOjE1OTI1NTI4MjYsInVzZXJfaWQiOiIxMTExMTExMTEiLCJzY29wZSI6ImEuZ2xvYmFsLmZpZWxkX3R5cGUuZGVsZXRlIiwiZXhwIjoxNTkyNTUyODMxLCJpYXQiOjE1OTI1NTI4MjYsImp0aSI6ImY1YmY3NWE2LTA0YTAtNDJmNy1hMWUwLTU4M2UyOWNkZTg2YyJ9.X_z8-R_uunmmYMWsIAQNsj6W-o_N8tlkP7E1yOhPbHQNRZ0YULnILuHrQR0hPwSFmrx8p10cbOcgT4pQiCyfXRdQPaCaS1Wb-QeJqa67OS_txP0lAPPteWa70549WJC_bfLoQiVZMzW338GyRcs6PrRYrh8yZS-vHHpL8cW6cQ-VwUyFbduCn2hw51vnlurwjVXvwTqn0ckCKPXb8VfpqnYhIz2fUD89xBbjJLojgRbHCAo7rA3e2I6jEXRv9KvUETCXt2hxwKrLDiLG1xhzgQmaKAAWmj5N2AaSsQxzYSyOu9mVlDNxntoM5anFfTfB9-gXAFwI_CaNvquAld1-yw
Host: localhost:8080
----
| 133.714286
| 838
| 0.95406
|
0dfa36fa429d21f556b8995535ebc69c7d2b481a
| 484
|
adoc
|
AsciiDoc
|
docs/en-gb/modules/plugins/partials/plugin-set-namen-aendern-en_gb.adoc
|
plentymarkets/plenty-manual-docs
|
65d179a8feb8fcf1b594ef45883e3437287d8e09
|
[
"MIT"
] | null | null | null |
docs/en-gb/modules/plugins/partials/plugin-set-namen-aendern-en_gb.adoc
|
plentymarkets/plenty-manual-docs
|
65d179a8feb8fcf1b594ef45883e3437287d8e09
|
[
"MIT"
] | 2
|
2022-01-05T10:31:24.000Z
|
2022-03-11T11:56:07.000Z
|
docs/en-gb/modules/plugins/partials/plugin-set-namen-aendern-en_gb.adoc
|
plentymarkets/plenty-manual-docs
|
65d179a8feb8fcf1b594ef45883e3437287d8e09
|
[
"MIT"
] | 1
|
2021-03-01T09:12:18.000Z
|
2021-03-01T09:12:18.000Z
|
:icons: font
:docinfodir: /workspace/manual-adoc
:docinfo1:
[.instruction]
Changing the name of plugin sets:
. Go to *Plugins » Plugin overview*.
. Click on the plugin set you want to edit. +
→ The area for editing the selected plugin set opens.
. Click on the *Plugin set settings* button (icon:cog[]). +
→ The *Settings* window opens.
. Under *Name*, enter the new name for your plugin set.
. *Save* (icon:save[role=green]) the settings. +
→ The name of the plugin set is updated.
| 32.266667
| 59
| 0.716942
|
970c0fd8f4be409d13a974cff4a1bb231cceb7bd
| 47
|
adoc
|
AsciiDoc
|
public/steelthreads/asciidocs/en/complete-before-proceeding.adoc
|
witmicko/tutorial-web-app
|
59a654e5c4f7ed311a554ea28f8c956cd214b0ac
|
[
"Apache-2.0"
] | null | null | null |
public/steelthreads/asciidocs/en/complete-before-proceeding.adoc
|
witmicko/tutorial-web-app
|
59a654e5c4f7ed311a554ea28f8c956cd214b0ac
|
[
"Apache-2.0"
] | null | null | null |
public/steelthreads/asciidocs/en/complete-before-proceeding.adoc
|
witmicko/tutorial-web-app
|
59a654e5c4f7ed311a554ea28f8c956cd214b0ac
|
[
"Apache-2.0"
] | null | null | null |
Complete the procedure above before proceeding.
| 47
| 47
| 0.87234
|
eb7bdf954a010ebe24e5012e2bb037ba12c06878
| 148
|
adoc
|
AsciiDoc
|
docs/bb3.adoc
|
ikke-t/stc
|
f265311f4d6d3ea1ed655b47a93c7f294cf1efcf
|
[
"Apache-2.0"
] | 27
|
2018-08-21T12:49:48.000Z
|
2021-09-03T21:12:20.000Z
|
docs/bb3.adoc
|
ikke-t/stc
|
f265311f4d6d3ea1ed655b47a93c7f294cf1efcf
|
[
"Apache-2.0"
] | 72
|
2018-08-24T10:01:46.000Z
|
2019-07-07T10:54:57.000Z
|
docs/bb3.adoc
|
ikke-t/stc
|
f265311f4d6d3ea1ed655b47a93c7f294cf1efcf
|
[
"Apache-2.0"
] | 20
|
2018-08-20T15:01:51.000Z
|
2020-03-31T10:31:52.000Z
|
== Building Block 3 - Install CFME on Openshift
This *Building Block* is targeted to install Cloudforms Management Engine (CFME) on Openshift.
WIP
| 29.6
| 94
| 0.783784
|
022192fd5eb270b1bb3309bd70762ae82f230fc6
| 6,428
|
adoc
|
AsciiDoc
|
docs/src/main/asciidoc/inc/circuitBreakers.adoc
|
fabric8io/fabric8-spring
|
3ad29f0daf8e89c4559095ba74b2ef20aa9a3bc3
|
[
"Apache-2.0"
] | 5
|
2016-11-28T15:25:11.000Z
|
2021-01-05T21:19:19.000Z
|
docs/src/main/asciidoc/inc/circuitBreakers.adoc
|
fabric8io/fabric8-spring
|
3ad29f0daf8e89c4559095ba74b2ef20aa9a3bc3
|
[
"Apache-2.0"
] | 8
|
2016-09-02T13:08:45.000Z
|
2017-11-06T11:00:58.000Z
|
docs/src/main/asciidoc/inc/circuitBreakers.adoc
|
fabric8io/fabric8-spring
|
3ad29f0daf8e89c4559095ba74b2ef20aa9a3bc3
|
[
"Apache-2.0"
] | 8
|
2016-09-02T13:14:48.000Z
|
2017-12-29T10:59:15.000Z
|
[[circuit-breakers]]
= Circuit Breakers
Adding circuit breakers via the https://github.com/Netflix/Hystrix[Hystrix] library helps you fail fast or provide a fallback if any dependent service either goes down or goes too slow.
Hystrix is a library rather than anything else, which means that it can just be easily added in *any* java program. Additionally there are frameworks that provide integration with Hystrix:
* link:#kubeflix[Kubeflix]
* link:#wildfly-swarm-netflix[Wildfly Swarm - Netflix]
*
== link:#spring-cloud-netflix[Spring Cloud - Netflix]
== Standalone applications
Using Hystrix is as simple as implementing the https://github.com/Netflix/Hystrix/blob/master/hystrix-core/src/main/java/com/netflix/hystrix/HystrixCommand.java[HystrixCommand] interface.
Borrowed from https://github.com/Netflix/Hystrix/tree/master/hystrix-examples[hystrix examples] here is a simple _Hello World_ implementation:
[source]
----
public class CommandHelloWorld extends HystrixCommand<String> {
private final String name;
public CommandHelloWorld(String name) {
super(HystrixCommandGroupKey.Factory.asKey("ExampleGroup"));
this.name = name;
}
@Override
protected String run() {
return "Hello " + name + "!";
}
----
The command can be now executed:
[source]
----
new CommandHelloWorld().execute();
----
This is enough to implement a circuit breaker.
=== Exposing Hystrix metrics
To expose metrics from the circuit breaker one needs to expose the https://github.com/Netflix/Hystrix/blob/master/hystrix-contrib/hystrix-metrics-event-stream/src/main/java/com/netflix/hystrix/contrib/metrics/eventstream/HystrixMetricsStreamServlet.java[HystrixMetricsStreamServlet], which can be found inside:
[source]
----
<dependency>
<groupId>com.netflix.hystrix</groupId>
<artifactId>hystrix-metrics-event-stream</artifactId>
<version>1.4.9</version>
</dependency>
----
To register the servlet one needs to simply add the following inside the web.xml:
[source]
----
<servlet>
<display-name>metrics</display-name>
<servlet-name>metrics</servlet-name>
<servlet-class>com.netflix.hystrix.contrib.metrics.eventstream.HystrixMetricsStreamServlet</servlet-class>
</servlet>
<servlet-mapping>
<servlet-name>metrics</servlet-name>
<url-pattern>/hystrix.stream</url-pattern>
</servlet-mapping>
----
=== Using the Hystrix dashboard
An application that is link:#exposing-hystrix-metrics[exposing its hystrix metrics stream] can take advantage of the visualization capabilities of the https://github.com/Netflix/Hystrix/tree/master/hystrix-dashboard[Hystrix Dashboard].
This is as simple as pointing the dashboard to the url of the hystrix metrics stream.
=== Using Turbine to aggregate multiple hystrix stream
To make the most out of the Hystrix dashboard you can aggregate multiple streams and have the dashboard visualize multiple circuit breakers at once.
The aggregation is performed by https://github.com/Netflix/Turbine/wiki[Turbine].
== Kubeflix
Everything that has been mentioned so far is something that can easily used inside simple java application. But if those application are to be run inside Kubernetes, there will be some additional requirements:
* link:#circuit-breaker-discovery[Circuit Breaker discovery]
* link:#turbine-server-docker-image[Turbine Server Docker image]
* link:#hystrix-dashboard-docker-image[Hystrix Dashboard Docker image]
=== Circuit Breaker Discovery
In most cloud environments ip addresses are not known in advanced and kubernetes is no exception. This means that we can't have Turbine pre-configured with a fixed set of urls but instead we need a discovery mechanism.
This mechanism is provided by Kubeflix and it pretty much allows to:
* Discovery all endpoints in the current that have been labeled as `hystrix.enabled`.
* Define multiple clusters that are composed by multiple endpoints accross multiple namespaces.
=== Turbine Server Docker image
Having a discovery implementation for turbine is not enough. We also need a turbine server app packaged as a docker container and of course the required Kubernetes configuration.
Kubeflix provides both (image and configuration).
The image is a simple webapp, pre-configured with the link:#circuit-breaker-discovery[Circuit Breaker discovery] as described above. The great thing about this app is that the default configuration can be modified by:
* Environment Variables
* ConfigMap
and that makes it easier for the user to define his own clusters or tune turbine to his own needs.
=== Hystrix Dashboard Docker image
For the Hystrix dashboard we also need to package it as a Docker container and create the required Kubernetes configuration.
Again Kubeflix provides both. On top of that web console is configured to reference Turbine Servers DNS name *out of the box*.
For more details, please visit the https://github.com/fabric8io/kubeflix[kubeflix project].
—
== Wildfly Swarm Netflix
Taken directly from the http://wildfly-swarm.io[Wildfly Swarm Website]:
`Swarm offers an innovative approach to packaging and running JavaEE applications by packaging them with just enough of the server runtime to "java -jar" your application.`
One of the available modules is https://github.com/wildfly-swarm/wildfly-swarm-netflix[Wildfly Swarm Netflix] which provides integration with the Netflix components.
A *Hello World* example of https://github.com/redhat-helloworld-msa/hola[Hystrix with Wildfly Swarm].
It's important to note that this example is `Kubeflix-ready` which means that regardless of how it has been implemented it will be able to integrate with the rest fo the Kubeflix bits. This is also the the case for the next framework in line….
—
=== Spring Cloud Netflix
This project provides integration between Spring Cloud and the Netflix components, including Hystrix as a *Circuit breaker* implementation.
On top of that it provides integration with https://github.com/Netflix/ribbon[Ribbon] and makes it easy to compose rest application that communicate with each other.
For Spring Cloud users it worths mentioning https://github.com/fabric8io/spring-cloud-kubernetes[Spring Cloud Kubernetes] that provides Kubernetes integration with Spring cloud and it allows you to use everything together (Spring Cloud, Kubernetes, Netflix components).
| 44.951049
| 310
| 0.776758
|
c7cb1a41fcd3b35bd494d265e11f3f1c62202d46
| 4,773
|
adoc
|
AsciiDoc
|
doc-content/enterprise-only/openshift/apb-deploy-proc.adoc
|
xieshenzh/kie-docs
|
61243b00e13d7ef45e7c860a330e2f65b7462ed5
|
[
"Apache-2.0"
] | null | null | null |
doc-content/enterprise-only/openshift/apb-deploy-proc.adoc
|
xieshenzh/kie-docs
|
61243b00e13d7ef45e7c860a330e2f65b7462ed5
|
[
"Apache-2.0"
] | 37
|
2017-10-09T12:38:44.000Z
|
2022-03-24T14:32:41.000Z
|
doc-content/enterprise-only/openshift/apb-deploy-proc.adoc
|
xieshenzh/kie-docs
|
61243b00e13d7ef45e7c860a330e2f65b7462ed5
|
[
"Apache-2.0"
] | 6
|
2017-12-06T18:18:14.000Z
|
2020-07-23T15:47:37.000Z
|
[id='apb-deploy-proc_{context}']
= Deploying a {PRODUCT} environment using the Automation Broker
To deploy a {PRODUCT} environment using the Automation Broker, you must find the Ansible Playbook in the OpenShift catalog, run it, and set the parameters as required.
.Procedure
. In the OpenShift Web UI, select *Add to Project -> Browse Catalog*.
. In the search field, type *{PRODUCT}*.
. Select the *{PRODUCT} {PRODUCT_VERSION} APB* catalog entry.
. Click *Next*.
. Select the required architecture elements, as described on the screen, and click the *Next* button.
+
IMPORTANT: If you want to deploy an environment with immutable servers and a monitoring infrastructure, you must first install the *Immutable Server - Monitor* option and then the *Immutable Server - KIE Process Server* option.
. Enter the parameters as described on the screen. In most cases, the default values lead to a working deployment; you can modify them as necessary. You must enter the following settings:
+
** For the *Immutable Server - Monitor* option:
+
*** The *Maven repository URL* field. You must provide a Maven repository with the same versions of all the artifacts that are deployed on any monitored immutable servers.
*** The *Admin password* field. You must record the administrative user name and password to configure monitored servers to connect to {CENTRAL} Monitoring.
+
** For the *Immutable Server - KIE Process Server* option:
*** The *KIE Server Container Deployment*, *Git Repository URL*, and *Git Repository Reference* fields. These settings determine the source code that the deployment process builds and deploys on the {KIE_SERVER}.
*** If you deployed the *Immutable Server - Monitor* option and want to connect the server to the monitoring infrastructure:
**** Under *Router integration*, the service name of the `rhpam-immutable-mon-smartrouter` service.
**** Under *Controller integration*, the service name of the `rhpam-immutable-mon-rhpamcentrmon` service and the admin user name and password that you set in the *Immutable Server - KIE Process Server* option.
+
[IMPORTANT]
====
Avoid each of the following combinations of settings. These combinations produce an invalid environment.
* *Process server*>**Database type** `H2` and *Process server*>**Number of replicas** exceeding `1`.
* *Process server*>**Database type** `External` and *Process server*>**Sets of Process Servers** exceeding `1`.
* *Red Hat - Single Sign-On* configured and *Process server*>**Sets of Process Servers** exceeding `1`.
====
+
. If you are using the *External* database type for the {KIE_SERVER} in the *Authoring*, *Immutable Server - Process Server*, or *Managed Environment* option, set the parameters under the *External Database* heading. Set the host, port, database name, and database JDBC URL to the correct values for your database server. Use the following values for the other fields:
+
** *Driver name*: The driver for the server, depending on the server type:
+
*** mysql
*** postgresql
//*** mariadb
//*** mssql
//*** db2
//*** oracle
//*** sybase
+
** *Dialect class* (`KIE_SERVER_EXTERNALDB_DIALECT`): The Hibernate dialect for the server, depending on the server type:
+
*** `org.hibernate.dialect.MySQL5Dialect` (used for MySQL and MariaDB)
*** `org.hibernate.dialect.PostgreSQLDialect`
//*** `org.hibernate.dialect.SQLServer2012Dialect` (used for MS SQL)
//*** `org.hibernate.dialect.DB2Dialect`
//*** `org.hibernate.dialect.Oracle12cDialect`
//*** `org.hibernate.dialect.SybaseASE15Dialect`
+
[NOTE]
====
In {PRODUCT} {PRODUCT_VERSION}, when you deploy an environment using the Ansible Broker, only MySQL and PostgreSQL external database servers are supported.
====
+
//. If you created a custom image for using an external database server other than MySQL or PostgreSQL, as described in <<externaldb-build-proc_{context}>>, you must also set the *Process Server Image Stream Name* parameter (under the *Process Server* heading) to the following value:
//+
//** For Microsoft SQL Server, `{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-kieserver-mssql-openshift`
//** For MariaDB, `{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-kieserver-mariadb-openshift`
//** For IBM DB2, `{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-kieserver-db2-openshift`
//** For Oracle Database, `{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-kieserver-oracle-openshift`
//** For Sybase, `{PRODUCT_INIT}{ENTERPRISE_VERSION_SHORT}-kieserver-sybase-openshift`
//+
. Click *Next* to commence deployment.
+
After deploying the environment, you can access it using the HTTPS routes displayed in the OpenShift Web console. HTTP requests are redirected to HTTPS.
IMPORTANT: After deploying the service, you can scale the {KIE_SERVER} pods up and down as necessary. Do not scale the database pods.
| 58.925926
| 368
| 0.760947
|
92cb5b07f3e7f5b2e900ac878a3bd3a32959901b
| 5,378
|
adoc
|
AsciiDoc
|
README.adoc
|
6paklata/emerald-wallet
|
a89780895e7035d5e269e29243598f87c27d2bae
|
[
"Apache-2.0"
] | null | null | null |
README.adoc
|
6paklata/emerald-wallet
|
a89780895e7035d5e269e29243598f87c27d2bae
|
[
"Apache-2.0"
] | null | null | null |
README.adoc
|
6paklata/emerald-wallet
|
a89780895e7035d5e269e29243598f87c27d2bae
|
[
"Apache-2.0"
] | null | null | null |
= Emerald Wallet for Desktop
image:https://img.shields.io/circleci/project/github/ETCDEVTeam/emerald-wallet/master.svg?label=CircleCI[CircleCI branch]
image:https://img.shields.io/travis/ETCDEVTeam/emerald-wallet.svg?label=Travis[Travis (.org)]
image:https://img.shields.io/appveyor/ci/ETCDEVTeam/emerald-wallet.svg?label=AppVeyor[AppVeyor]
image:https://codecov.io/gh/ETCDEVTeam/emerald-wallet/branch/master/graph/badge.svg[Codecov, link=https://codecov.io/gh/ETCDEVTeam/emerald-wallet]
image:https://img.shields.io/github/license/ETCDEVTeam/emerald-wallet.svg?maxAge=2592000["License", link="https://github.com/ETCDEVTeam/emerald-wallet/blob/master/LICENSE"]
Emerald is a free open source multiplatform desktop cryptocurrency wallet, supports Ethereum, Ethereum Classic and ERC-20 tokens.
Features:
- *Full Node* - automatically connects to a full node running on a local machine
- *Light Node* - when a full node is not feasible, it uses a secure endpoint provided by our servers. All private keys are stored on local machine.
- *Hardware Wallets* - supports hardware wallets such as Ledger Nano to keep private keys in a most safe way. It's recommended to use Ledger Nano S
- *ERC-20 Tokens* - supports major ERC-20 tokens out of box, and a user can enable support for any ERC-20 compatible token in the Wallet by providing an address
- *Secure Key Storage* = stores all private keys either in a local secure vault. Private keys are encrypted and are not transferred to another node even when Light mode is used
- *Multiplatform* - compatible with major OSes, and builds for Windows, Macos and Linux are provided to download
Official website: https://emeraldwallet.io
== Development instructions
The recommended way to assert that you are using the correct version of node is to use https://github.com/nodenv/nodenv[nodenv],
which will shim the `node` and `npm` commands and assert that the `local` version is set to what is
specified in https://github.com/ETCDEVTeam/emerald-wallet/blob/master/.node-version[.node-version].
Once nodenv is installed:
```shell
$ nodenv install
```
The required version of `node` >= `v8`.
=== Dependencies
==== Libs
You need `libudev`, `libusb-1.0-0`, `libusb-1.0-0-dev` installed on Linux
Ubuntu:
```
sudo apt-get install libudev-dev libusb-1.0-0 libusb-1.0-0-dev
```
==== NPM packages
This will install dependencies.
```shell
$ lerna bootstrap
```
==== Emerald Console
If you haven't got `emerald` already installed on your system, you can execute `./dependencies.sh`
to automatically `rustup` and use `cargo` to install `emerald-cli` and move it to the
project's base dir. Note: this command is idempotent __for rust and cargo__ (it won't
try to install cargo if you've already got it), but it will
use cargo's `-f` flag to force install `emerald-cli` and copy that version to the
project directory.
=== Run for development
_Terminal 1_
```shell
= This will begin a live-watching compiler for ./src/ and ./electron/ folders
$ yarn build:all:dev
```
_Terminal 2_
```shell
= This will begin the simulated electron app pulling from those compiled sources.
$ yarn start:electron
```
=== Logs
Electron and Emerald logs persisted in:
* OSX: `~/Library/Logs/EmeraldWallet/log.log`
* Linux: `~/.config/EmeraldWallet/log.log`
=== Building alternatively
You can also use a variety of alternate build options, eg.
```
$ npm run build:all:nowatch
$ npm run build:web
```
=== Building distributions in development
You can run a distribution build in your development environment. Assuming
you've already compiled `./src/` and `./electron/`, you'll be able to run:
```
$ npm run dist
```
This command will build for _your current system_. Note that there are configurations for
several systems (OSX, Linux, Windows) specified in the `"build"` field of `package.json`, but the `dist` command will by default only build for the system it's on.
Note: If you're developing on OSX and have a developer signing identity on your machine, you can
optionally disable that with `CSC_IDENTITY_AUTO_DISCOVERY=false`.
OSX is also able to build for Linux. Add `-ml` to that raw command to build for
both OSX and Linux at the same time.
=== Troubleshooting
Some preliminary things to try in case you run into issues:
Clear out any persisted settings or userdata from previous trials
* OSX: `~/Library/Application Support/EmeraldWallet`
* Linux: `~/.config/EmeraldWallet`
* Windows: `%APPDATA%\EmeraldWallet`
== Run tests
```
npm run test:watch
```
or for single run:
```
npm test
```
== Contact
=== Submit Bug
https://github.com/ETCDEVTeam/emerald-wallet/issues/new
=== Contact Support
https://emeraldwallet.io/support
=== Chat
Chat with us via Gitter: https://gitter.im/etcdev-public/Lobby
=== Submit Security Issue
Email to security@etcdevteam.com
== License
Copyright 2019 ETCDEV GmbH
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 32.203593
| 176
| 0.759576
|
2ad64aa04226a3dbab49e037b760bf58fd405d95
| 1,990
|
adoc
|
AsciiDoc
|
documentation/book/ref-zookeeper-node-configuration.adoc
|
serrss/strimzi-kafka-operator
|
c04b1d57bc4f0cd23153227bbc542adb72338a60
|
[
"Apache-2.0"
] | null | null | null |
documentation/book/ref-zookeeper-node-configuration.adoc
|
serrss/strimzi-kafka-operator
|
c04b1d57bc4f0cd23153227bbc542adb72338a60
|
[
"Apache-2.0"
] | 1
|
2020-09-23T19:24:49.000Z
|
2020-09-23T19:24:49.000Z
|
documentation/book/ref-zookeeper-node-configuration.adoc
|
serrss/strimzi-kafka-operator
|
c04b1d57bc4f0cd23153227bbc542adb72338a60
|
[
"Apache-2.0"
] | 1
|
2020-10-15T13:56:40.000Z
|
2020-10-15T13:56:40.000Z
|
// Module included in the following assemblies:
//
// assembly-zookeeper-node-configuration.adoc
[id='ref-zookeeper-node-configuration-{context}']
= Zookeeper configuration
Zookeeper nodes are configured using the `config` property in `Kafka.spec.zookeeper`.
This property contains the Zookeeper configuration options as keys.
The values can be described using one of the following JSON types:
* String
* Number
* Boolean
Users can specify and configure the options listed in {ApacheZookeeperConfig} with the exception of those options which are managed directly by {ProductName}.
Specifically, all configuration options with keys equal to or starting with one of the following strings are forbidden:
* `server.`
* `dataDir`
* `dataLogDir`
* `clientPort`
* `authProvider`
* `quorum.auth`
* `requireClientAuthScheme`
When one of the forbidden options is present in the `config` property, it is ignored and a warning message is printed to the Custer Operator log file.
All other options are passed to Zookeeper.
IMPORTANT: The Cluster Operator does not validate keys or values in the provided `config` object.
When invalid configuration is provided, the Zookeeper cluster might not start or might become unstable.
In such cases, the configuration in the `Kafka.spec.zookeeper.config` object should be fixed and the Cluster Operator will roll out the new configuration to all Zookeeper nodes.
Selected options have default values:
* `timeTick` with default value `2000`
* `initLimit` with default value `5`
* `syncLimit` with default value `2`
* `autopurge.purgeInterval` with default value `1`
These options will be automatically configured when they are not present in the `Kafka.spec.zookeeper.config` property.
.An example showing Zookeeper configuration
[source,yaml,subs="attributes+"]
----
apiVersion: {KafkaApiVersion}
kind: Kafka
spec:
kafka:
# ...
zookeeper:
# ...
config:
autopurge.snapRetainCount: 3
autopurge.purgeInterval: 1
# ...
----
| 34.310345
| 177
| 0.768844
|
84e2a6684aefe708c5704da74e8ee8c6e1dd821a
| 2,308
|
adoc
|
AsciiDoc
|
modules/virt-creating-vddk-image.adoc
|
tradej/openshift-docs
|
584ce30d22ccc79d822da825559fb7752b41b1f8
|
[
"Apache-2.0"
] | 1
|
2021-05-07T00:22:43.000Z
|
2021-05-07T00:22:43.000Z
|
modules/virt-creating-vddk-image.adoc
|
tradej/openshift-docs
|
584ce30d22ccc79d822da825559fb7752b41b1f8
|
[
"Apache-2.0"
] | 1
|
2018-03-02T01:17:52.000Z
|
2018-03-02T01:17:52.000Z
|
modules/virt-creating-vddk-image.adoc
|
tradej/openshift-docs
|
584ce30d22ccc79d822da825559fb7752b41b1f8
|
[
"Apache-2.0"
] | 1
|
2020-08-04T01:36:59.000Z
|
2020-08-04T01:36:59.000Z
|
// Module included in the following assemblies:
//
// * virt/virtual_machines/importing_vms/virt-importing-vmware-vm.adoc
[id="virt-creating-vddk-image_{context}"]
= Creating and using a VDDK image
You can download the VMware Virtual Disk Development Kit (VDDK), build a VDDK image, and push the VDDK image to your image registry. You then add the VDDK image to the `v2v-vmware` config map.
.Prerequisites
* You must have access to an {product-title} internal image registry or a secure external registry.
.Procedure
. Create and navigate to a temporary directory:
+
[source,terminal]
----
$ mkdir /tmp/<dir_name> && cd /tmp/<dir_name>
----
. In a browser, navigate to link:https://code.vmware.com/home[VMware code] and click *SDKs*.
. Under *Compute Virtualization*, click *Virtual Disk Development Kit (VDDK)*.
. Select the VDDK version that corresponds to your VMware vSphere version, for example, VDDK 7.0 for vSphere 7.0, click *Download*, and then save the VDDK archive in the temporary directory.
. Extract the VDDK archive:
+
[source,terminal]
----
$ tar -xzf VMware-vix-disklib-<version>.x86_64.tar.gz
----
. Create a `Dockerfile`:
+
[source,terminal]
----
$ cat > Dockerfile <<EOF
FROM busybox:latest
COPY vmware-vix-disklib-distrib /vmware-vix-disklib-distrib
RUN mkdir -p /opt
ENTRYPOINT ["cp", "-r", "/vmware-vix-disklib-distrib", "/opt"]
EOF
----
. Build the image:
+
[source,terminal]
----
$ podman build . -t <registry_route_or_server_path>/vddk:<tag> <1>
----
<1> Specify your image registry:
+
* For an internal {product-title} registry, use the internal registry route, for example, `image-registry.openshift-image-registry.svc:5000/openshift/vddk:<tag>`.
* For an external registry, specify the server name, path, and tag, for example, `server.example.com:5000/vddk:<tag>`.
. Push the image to the registry:
+
[source,terminal]
----
$ podman push <registry_route_or_server_path>/vddk:<tag>
----
. Ensure that the image is accessible to your {VirtProductName} environment.
. Edit the `v2v-vmware` config map in the *openshift-cnv* project:
+
[source,terminal]
----
$ oc edit configmap v2v-vmware -n openshift-cnv
----
. Add the `vddk-init-image` parameter to the `data` stanza:
+
[source,yaml]
----
...
data:
vddk-init-image: <registry_route_or_server_path>/vddk:<tag>
----
| 28.85
| 192
| 0.72617
|
6bf3b49b50c3b6b1fcf310b46dcbc77e28048d0f
| 336
|
adoc
|
AsciiDoc
|
pages/am/3.x/installation-guide/configuration/installation-guide-management-api-technical-api.adoc
|
peschmae/gravitee-docs
|
a0150d342dee47bad263a21c46d76ceac70f101b
|
[
"Apache-2.0"
] | 19
|
2016-04-06T16:08:06.000Z
|
2022-02-23T13:47:54.000Z
|
pages/am/3.x/installation-guide/configuration/installation-guide-management-api-technical-api.adoc
|
peschmae/gravitee-docs
|
a0150d342dee47bad263a21c46d76ceac70f101b
|
[
"Apache-2.0"
] | 444
|
2015-12-15T10:13:01.000Z
|
2022-03-31T15:46:06.000Z
|
pages/am/3.x/installation-guide/configuration/installation-guide-management-api-technical-api.adoc
|
peschmae/gravitee-docs
|
a0150d342dee47bad263a21c46d76ceac70f101b
|
[
"Apache-2.0"
] | 83
|
2015-12-15T12:20:19.000Z
|
2022-03-28T01:56:35.000Z
|
= Configure the AM API internal API
:page-sidebar: am_3_x_sidebar
:page-permalink: am/current/am_installguide_management_api_technical_api.html
:page-folder: am/installation-guide
:page-toc: false
:page-layout: am
:gravitee-component-name: AM API
:node_port: 18093
include::../../../../node/partial/technical-api.adoc[leveloffset=+1]
| 28
| 77
| 0.779762
|
3510a0cbab09c1ad78ec9d62246e4939fa95a4e8
| 4,338
|
adoc
|
AsciiDoc
|
docs/project_management/meetings/meeting_3+4.adoc
|
RaphaelNeubert/I5_GPS_Tracks
|
a65dc6ea496d34c65820ce1e1893846d9f4ed970
|
[
"CC-BY-4.0"
] | 7
|
2021-11-26T06:57:49.000Z
|
2022-01-31T09:59:17.000Z
|
docs/project_management/meetings/meeting_3+4.adoc
|
RaphaelNeubert/I5_GPS_Tracks
|
a65dc6ea496d34c65820ce1e1893846d9f4ed970
|
[
"CC-BY-4.0"
] | 75
|
2021-12-19T16:07:00.000Z
|
2022-03-31T10:21:31.000Z
|
docs/project_management/meetings/meeting_3+4.adoc
|
RaphaelNeubert/I5_GPS_Tracks
|
a65dc6ea496d34c65820ce1e1893846d9f4ed970
|
[
"CC-BY-4.0"
] | 1
|
2021-12-13T17:29:58.000Z
|
2021-12-13T17:29:58.000Z
|
== Protokoll Meeting 3 und 4
|===
2+| *Thema* +
Fragen zum Belegprojekt mit dem Themensteller Mario Neugebauer.
| *Datum* +
28.11. 2021 18:00 Uhr +
30.11. 2021 15:00 Uhr
| *Ort* +
Meeting 3: Element (online) +
Meeting 4: BigBlueButton (online)
| *Anwesend (Meeting 3)* +
Anton Peschel +
Alex Schechtel +
Raphael Neubert +
Richard Michel +
Quang Duy Pham +
Ludwig Schönthier +
Tom Nicolai
| *Abwesend (Meeting 3)* +
Felix Reuß
| *Anwesend (Meeting 4)* +
Mario Neugebauer +
Anton Peschel +
Alex Schechtel +
Raphael Neubert +
Richard Michel +
Quang Duy Pham +
Felix Reuß +
Ludwig Schönthier +
Tom Nicolai
| *Abwesend (Meeting 4)* +
|*Leitung der Besprechung* +
Anton Peschel
|*Protokollführung* +
Felix Reuß
|===
=== Fragen zum Belegprojekt
==== 1. Fragen zum Roboter
** 1.1 Welche Aufgaben des Roboters haben Prorität? +
** 1.2 Wie werden die Tests ablaufen?
** 1.3 Soll der Roboter während der Fahrt mit dem Server kommunizieren?
** 1.4 Wie kommunizieren wir mit dem Roboter, bzw. was bietet der Roboter zur Kommunikation an? +
===== Antworten zu 1.: +
* -> Die höchste Priorität des Projektes liegt beim korrekten Aufnehmen der GPS-Tracks und dem Vornehmen von Korrekturen auf der Karte über die App +
-> Eine untergeordnete Priorität hat die Live-Verfolgung
* -> Der Roboter an sich wird eventuell für Transport-Aufgaben (Transportieren von Messgeräten) genutzt werden +
-> Er könnte benutzt werden um Bilder oder Wärmebilder zu machen +
-> Eine vorstellbare Aufgabe wäre das systematische Abfahren eines bestimmten Bereiches, wobei Müll erkannt und fotografiert werden könnte und anschließend aufgenommen würde
* -> Der Server könnte direkt am Roboter über einen Rechner angebracht sein +
-> Optimal wäre ein zentraler Server/ Speicherung in der Cloud +
-> Damit wird die App verbunden und die GPS-Tracks werden auf dem Server dann gespeichert +
-> Die GPS-Tracks werden dann vorgegeben und Abgefahren und dabei aufgezeichnet +
==== 2. Fragen zum GPS
** 2.1 Wie sieht es mit Datenschutz aus (GPS-Daten)? +
→ Abfrage zur Einverständniserklärung zum Datenschutz?
** 2.2 Einfügen von besonderen Punkten innerhalb der GPS-Tracks, an welchen Aufgaben bearbeitet werden sollen. +
→ welche besonderen Punkte sind gemeint?
** 2.3 Soll der GPS-Track überall anwendbar sein können? (Weltkarte)
** 2.4 Wie soll der GPS-Track damit umgehen, falls die App auf dem Gerät kein Empfang bekommt/ Wie kann das GPS korrigiert werden?
** 2.5 In welchem Format sollen die GPS-Tracks gespeichert werden? (csv, sqlite) +
===== Antworten zu 2.: +
* -> Es sollte nur eine öffentliche IP-Adresse verwendet werden, wenn wir die Sicherheit der Daten gewährleisten können +
-> Am besten werden keine personenbezogenen Daten abgefragt/genutzt +
-> Es werden keine genauen Zeiten/ Geschwindigkeiten aufgenommen +
-> eventuell werden die GPS-Tracks unabhängig vom Nutzer gespeichert +
* -> Beispiel: Roboter soll an bestimmtem geografischen Punkt Foto machen +
* -> Die Strecke wird zum korrigieren der GPS-Daten mehrfach abgelaufen +
-> Punkte werden auf der Karte eingeblendet und können manuell verschoben werden +
* -> Als Format bietet sich GPX an, andere Formate sind mit Absprache möglich +
==== 3. Fragen zum Server
* 3.1 Was soll der Server alles können?
* 3.2 Soll der Server eine öffentliche IP haben?
===== Antworten zu 3.: +
* siehe <<Antworten zu 1.:>>
-> Überschneidungen mit Datenschutz/ Kommunikation mit Server +
==== 4. Fragen zur App
* 4.1 Soll die Rest API nur mit einem Login möglich sein?
* 4.2 Welche Technologie ist beim Programmieren gewünscht?
* 4.3 Wer wird später mit den GPS-Tracks bzw. mit der App arbeiten? +
===== Antworten zu 4.: +
* -> Wenn die IP des Servers öffentlich ist, ist ein login sinvoll/notwendig, falls nicht dann ist es nicht notwendig.
* -> Als Programmiersprache für den Server sollte Java oder Python verwendet werden, für die App steht uns freie Wahl +
* -> Nutzer der App könnten Studenten innerhalb eines studentischen Forschungsprojektes werden +
=== Nächste Meetings +
Die Entwickler und Tester treffen sich am Freitag dem 03.12. 2021 um 11:00 Uhr um erste technische Details zu klären. +
Die Analysten werden sich mit dem Projektleiter ebenfalls am Freitag treffen, die Uhrzeit ist noch offen. +
Dort werden Risikopriorisierung un erste Use-Cases im Vordergrund stehen.
| 35.557377
| 173
| 0.752651
|
e2507c1cdfcc96a7d9a171f0720c95be6fddc8c7
| 5,063
|
adoc
|
AsciiDoc
|
documentation/modules/ref-standard-infra-config-example.adoc
|
anukritijha/enmasse
|
1ee76768d58ba71612a0fbafb215c4b4f41609ea
|
[
"Apache-2.0"
] | 1
|
2020-07-09T12:07:51.000Z
|
2020-07-09T12:07:51.000Z
|
documentation/modules/ref-standard-infra-config-example.adoc
|
anukritijha/enmasse
|
1ee76768d58ba71612a0fbafb215c4b4f41609ea
|
[
"Apache-2.0"
] | 1
|
2019-11-27T14:38:41.000Z
|
2019-11-27T14:45:31.000Z
|
documentation/modules/ref-standard-infra-config-example.adoc
|
anukritijha/enmasse
|
1ee76768d58ba71612a0fbafb215c4b4f41609ea
|
[
"Apache-2.0"
] | null | null | null |
// Module included in the following assemblies:
//
// assembly-infrastructure-configuration.adoc
[id='ref-standard-infra-config-example-{context}']
= Standard infrastructure configuration example
The following example of a standard infrastructure configuration file shows the various settings that can be specified.
[source,yaml,options="nowrap",subs="attributes+"]
----
apiVersion: admin.enmasse.io/v1beta1
kind: StandardInfraConfig
metadata:
name: myconfig
spec:
version: "{EnMasseVersion}" <1>
admin: <2>
resources:
memory: 256Mi
broker: <3>
resources:
cpu: 0.5
memory: 2Gi
storage: 100Gi
addressFullPolicy: PAGE
router: <4>
resources:
cpu: 1
memory: 256Mi
linkCapacity: 1000
minReplicas: 1
policy:
maxConnections: 1000
maxConnectionsPerHost: 1
maxConnectionsPerUser: 10
maxSessionsPerConnection: 10
maxSendersPerConnection: 5
maxReceiversPerConnection: 5
podTemplate: <5>
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 1
preference:
matchExpressions:
- key: e2e-az-EastWest
operator: In
values:
- e2e-az-East
- e2e-az-West
----
<1> Specifies the {ProductName} version used. When upgrading, {ProductName} uses
this field to determine whether to upgrade the infrastructure to the requested version. If omitted,
the version is assumed to be the same version as the controllers reading the configuration.
<2> Specifies the settings you can configure for the `admin` components.
<3> Specifies the settings you can configure for the `broker` components. Changing
the `.broker.resources.storage` setting does not configure the existing broker storage size.
<4> Specifies the settings you can configure for the `router` components.
<5> For `admin`, `broker`, and `router` components you can configure the following `podTemplate` elements:
* `metadata.labels`
* `spec.priorityClassName`
* `spec.tolerations`
* `spec.affinity`
* `spec.containers.resources`
* `spec.containers.readinessProbe`
* `spec.containers.livenessProbe`
* `spec.containers.env`
+
All other `podTemplate` elements are ignored. For more information about these elements, see the {KubePlatform} documentation in the following _Related links_ section.
+
For more information about how to set a readiness probe timeout, see link:{BookUrlBase}{BaseProductVersion}{BookNameUrl}#ref-standard-infra-config-override-probe-timeout-messaging[Overriding the readiness probe timing for standard infrastructure configuration].
For detailed information about all of the available standard infrastructure configuration fields, see the link:{BookUrlBase}{BaseProductVersion}{BookNameUrl}#ref-standard-infra-config-fields-messaging[Standard infrastructure configuration fields table].
.Related links
ifeval::["{cmdcli}" == "oc"]
* For more information about the `podTemplate` settings, see the following {KubePlatform} documentation:
** link:https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html-single/cluster_administration/index#admin-guide-priority-preemption[Pod priority]
** link:https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html-single/cluster_administration/index#taints-and-tolerations[Taints and tolerations]
** link:https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html-single/cluster_administration/index#admin-guide-sched-pod-affinity[Affinity and anti-affinity]
** link:https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html-single/developer_guide/dev-guide-application-health[Application health]
** link:https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html-single/developer_guide/dev-guide-compute-resources#dev-compute-resources[Compute resources]
** link:https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html-single/developer_guide/index#list-environment-variables[Environment variables]
endif::[]
ifeval::["{cmdcli}" == "kubectl"]
* For more information about the `podTemplate` settings, see the following {KubePlatform} documentation:
** link:https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/[Pod priority]
** link:https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/[Taints and tolerations]
** link:https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity[Affinity and anti-affinity]
** link:https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes[Liveness and readiness probes (application health)]
** link:https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/[Compute resources]
** link:https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/[Environment variables]
endif::[]
| 50.128713
| 261
| 0.765752
|
3fb2c6d4df68307c6e9a189b150190155e461968
| 1,573
|
adoc
|
AsciiDoc
|
core/src/test/resources/issues/gh-190-cypher-directive-with-passThrough.adoc
|
magaton/neo4j-graphql-java
|
9330ab4a8bf15135499282e99be4349480a01177
|
[
"Apache-2.0"
] | 90
|
2018-09-02T23:00:21.000Z
|
2022-02-28T02:06:17.000Z
|
core/src/test/resources/issues/gh-190-cypher-directive-with-passThrough.adoc
|
magaton/neo4j-graphql-java
|
9330ab4a8bf15135499282e99be4349480a01177
|
[
"Apache-2.0"
] | 217
|
2018-09-16T08:13:03.000Z
|
2022-03-29T11:38:04.000Z
|
core/src/test/resources/issues/gh-190-cypher-directive-with-passThrough.adoc
|
magaton/neo4j-graphql-java
|
9330ab4a8bf15135499282e99be4349480a01177
|
[
"Apache-2.0"
] | 40
|
2018-11-14T10:49:41.000Z
|
2022-02-14T03:11:30.000Z
|
:toc:
= Github Issue #190: cypher directive with passThrough
== Schema
[source,graphql,schema=true]
----
type Query {
## queriesRootQuery
getUser(userId: ID): UserData
@cypher(statement: "MATCH (u:User{id: $userId})-[:CREATED_MAP]->(m:Map) WITH collect({id: m.id, name: m.name}) AS mapsCreated, u RETURN {name: u.name, mapsCreated: mapsCreated}", passThrough:true)
}
type UserData {
name: String
mapsCreated: [MapsCreated]
}
type MapsCreated {
id: String
name: String
}
----
[source,cypher,test-data=true]
----
CREATE
(u1:User{ id: 'u1', name: 'user 1' }),
(u2:User{ id: 'u2', name: 'user 2' }),
(m1:Map{ id: 'm1', name: 'v1' }),
(m2:Map{ id: 'm2', name: 'v2' }),
(m3:Map{ id: 'm3', name: 'v3' }),
(u1)-[:CREATED_MAP]->(m1),
(u1)-[:CREATED_MAP]->(m2),
(u2)-[:CREATED_MAP]->(m3);
----
== Tests
=== Query projected data
.GraphQL-Query
[source,graphql]
----
query getUser {
user: getUser(userId: "u1") {
name
mapsCreated { id }
}
}
----
.Cypher Params
[source,json]
----
{
"userUserId" : "u1"
}
----
.GraphQL-Response
[source,json,response=true,ignore-order]
----
{
"user" : {
"name" : "user 1",
"mapsCreated" : [ {
"id" : "m1"
}, {
"id" : "m2"
} ]
}
}
----
.Cypher
[source,cypher]
----
UNWIND apoc.cypher.runFirstColumnSingle('WITH $userId AS userId MATCH (u:User{id: $userId})-[:CREATED_MAP]->(m:Map) WITH collect({id: m.id, name: m.name}) AS mapsCreated, u RETURN {name: u.name, mapsCreated: mapsCreated}', {
userId: $userUserId
}) AS user
RETURN user AS user
----
| 18.505882
| 225
| 0.58932
|
931c83a3dda5cf74985c351296fde749ed52d442
| 14,243
|
adoc
|
AsciiDoc
|
spring-security/src/docs/asciidoc/zh-cn/_includes/about/authentication/password-storage.adoc
|
jcohy/jcohy-docs
|
3b890e2aa898c78d40182f3757e3e840cf63d38b
|
[
"Apache-2.0"
] | 19
|
2020-06-04T07:46:20.000Z
|
2022-03-23T01:46:40.000Z
|
spring-security/src/docs/asciidoc/zh-cn/_includes/about/authentication/password-storage.adoc
|
jcohy/jcohy-docs
|
3b890e2aa898c78d40182f3757e3e840cf63d38b
|
[
"Apache-2.0"
] | 15
|
2020-06-11T09:38:15.000Z
|
2022-01-04T16:04:53.000Z
|
spring-security/src/docs/asciidoc/zh-cn/_includes/about/authentication/password-storage.adoc
|
jcohy/jcohy-docs
|
3b890e2aa898c78d40182f3757e3e840cf63d38b
|
[
"Apache-2.0"
] | 4
|
2020-11-24T11:03:19.000Z
|
2022-02-28T07:21:23.000Z
|
[[authentication-password-storage]]
= 密码存储
Spring Security 的 `PasswordEncoder` 接口用于对密码执行单向转换,以允许安全地存储密码. 鉴于 `PasswordEncoder` 是一种单向转换,
因此当密码转换需要采用两种方式 (即向数据库存储进行身份验证的凭据) 时,则不打算使用它. 通常,`PasswordEncoder` 是在身份验证时与用户提供的密码和存储的密码进行比较.
[[authentication-password-storage-history]]
== 密码存储历史
多年来,存储密码的标准机制在不断的发展. 最初,密码以纯文本格式存储. 假设密码是安全的,因为密码已保存在访问它所需的凭据中.
但是,恶意用户能够使用 SQL 注入 这样的攻击方式来获取用户名和密码的大量的 "data dumps". 随着越来越多的用户凭证成为公共安全,保护用户密码至关重要.
接下来,开发人员在通过诸如 SHA-256 之类的单向哈希算法对密码进行加密后存储. 当用户尝试进行身份验证时,会将加密后的密码与他们输入的密码的散列值进行比较. 这意味着系统仅需要存储加密后的密码. 如果发生了密码泄漏,则仅暴露密码的一种哈希方式.
由于哈希算法是单向的,所以给定哈希值,反向计算密码很难. 因此想找出系统中的每个密码都很困难而且不值得. 为了破解密码,恶意用户决定创建称为 https://en.wikipedia.org/wiki/Rainbow_table[彩虹表] 的查找表. 他们不必每次都猜测每个密码,而是计算一次密码并将其存储在查找表中.
为了预防 彩虹表 的破解攻击,鼓励开发人员使用加盐的密码. 不仅将密码用作哈希函数的输入,还将为每个用户的密码生成随机字节 (称为 `salt` ) . 盐和用户密码将通过散列函数运行,从而产生唯一的散列. 盐将以明文形式与用户密码一起存储.
然后,当用户尝试进行身份验证时,会将哈希密码与存储的盐的哈希值和他们输入的密码进行比较. 唯一的盐意味着 `彩虹表` 不再有效,因为每种盐和密码组合的哈希值都不同.
在现代,我们意识到加密哈希 (例如 SHA-256) 不再安全. 原因是使用现代硬件,我们可以每秒执行数十亿次哈希计算. 这意味着我们可以轻松地分别破解每个密码.
现在鼓励开发人员利用自适应单向函数来存储密码. 具有自适应单向函数的密码验证意味着会消耗大量资源 (即 CPU,内存等) .
自适应单向函数允许配置 "work factor" (工作因子) ,该因数会随着硬件的改进而增加. 建议将 "work factor" 调整为大约1秒钟,以验证系统上的密码. 这种权衡使攻击者难以破解密码,但代价却不高,这给您自己的系统带来了沉重负担.
Spring Security 试图为 "work factor" 提供一个良好的起点,但是鼓励用户为自己的系统自定义 "work factor" ,因为不同系统之间的性能会有很大差异. 具有自适应单向函数的有 https://en.wikipedia.org/wiki/Bcrypt[bcrypt],
https://en.wikipedia.org/wiki/PBKDF2[PBKDF2],
https://en.wikipedia.org/wiki/Scrypt[scrypt],
和 https://en.wikipedia.org/wiki/Argon2[Argon2]
由于自适应单向函数会占用大量资源,因此为每个请求验证用户名和密码都会大大降低应用程序的性能. Spring Security (或任何其他库) 无法采取任何措施来加快密码的验证速度,因为通过增加验证资源的强度来获得安全性.
鼓励用户将长期凭证 (即用户名和密码) 替换为短期凭证 (即会话,OAuth令牌等) . 可以快速验证短期凭证,而不会损失任何安全性.
[[authentication-password-storage-dpe]]
== DelegatingPasswordEncoder
在 Spring Security 5.0 之前,默认的 `PasswordEncoder` 是 `NoOpPasswordEncoder`,它需要纯文本密码. 根据 <<authentication-password-storage-history,"密码历史记录">> 所述,您可能希望默认的 `PasswordEncoder` 是 `BCryptPasswordEncoder`. 但是,这忽略了三个现实问题:
- 有许多使用旧密码编码的应用程序无法轻松迁移
- 密码存储的最佳做法将再次更改.
- 作为一个框架,Spring Security 不能经常进行重大更改
相反,Spring Security 引入了 `DelegatingPasswordEncoder`,它通过以下方式解决了所有问题:
- 确保使用建议的对当前密码密码进行编码
- 允许以新的和旧的格式验证密码
- 允许将来升级编码
您可以使用 `PasswordEncoderFactories` 轻松构造 `DelegatingPasswordEncoder` 的实例.
.Create Default DelegatingPasswordEncoder
====
.Java
[source,java,role="primary"]
----
PasswordEncoder passwordEncoder =
PasswordEncoderFactories.createDelegatingPasswordEncoder();
----
.Kotlin
[source,kotlin,role="secondary"]
----
val passwordEncoder: PasswordEncoder = PasswordEncoderFactories.createDelegatingPasswordEncoder()
----
====
或者,您可以创建自己的自定义实例.
.Create Custom DelegatingPasswordEncoder
====
.Java
[source,java,role="primary"]
----
String idForEncode = "bcrypt";
Map encoders = new HashMap<>();
encoders.put(idForEncode, new BCryptPasswordEncoder());
encoders.put("noop", NoOpPasswordEncoder.getInstance());
encoders.put("pbkdf2", new Pbkdf2PasswordEncoder());
encoders.put("scrypt", new SCryptPasswordEncoder());
encoders.put("sha256", new StandardPasswordEncoder());
PasswordEncoder passwordEncoder =
new DelegatingPasswordEncoder(idForEncode, encoders);
----
.Kotlin
[source,kotlin,role="secondary"]
----
val idForEncode = "bcrypt"
val encoders: MutableMap<String, PasswordEncoder> = mutableMapOf()
encoders[idForEncode] = BCryptPasswordEncoder()
encoders["noop"] = NoOpPasswordEncoder.getInstance()
encoders["pbkdf2"] = Pbkdf2PasswordEncoder()
encoders["scrypt"] = SCryptPasswordEncoder()
encoders["sha256"] = StandardPasswordEncoder()
val passwordEncoder: PasswordEncoder = DelegatingPasswordEncoder(idForEncode, encoders)
----
====
[[authentication-password-storage-dpe-format]]
=== 密码存储格式
密码的一般格式是:
.DelegatingPasswordEncoder 存储格式
====
[source,text,attrs="-attributes"]
----
{id}encodedPassword
----
====
`id` 是用于查找应该使用那个 `PasswordEncoder` 的标识符,`encodedPassword` 是所选 `PasswordEncoder` 的加密原始密码后得到的密码. `id` 必须位于密码的开头,以 `{` 开头,以 `}` 结束. 如果找不到 `id`,则 `id` 将为 `null`. 例如,以下可能是使用不同 `id` 编码的密码列表. 所有原始密码都是 "password"..
[source,text]
----
{bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM/BG // <1>
{noop}password // <2>
{pbkdf2}5d923b44a6d129f3ddf3e3c8d29412723dcbde72445e8ef6bf3b508fbf17fa4ed4d6b99ca763d8dc // <3>
{scrypt}$e0801$8bWJaSu2IKSn9Z9kM+TPXfOc/9bdYSrN1oD9qfVThWEwdRTnO7re7Ei+fUZRJ68k9lTyuTeUp4of4g24hHnazw==$OAOec05+bXxvuu/1qZ6NUR+xQYvYv7BeL1QxwRpY5Pc= // <4>
{sha256}97cde38028ad898ebc02e690819fa220e88c62e0699403e94fff291cfffaf8410849f27605abcbc0 // <5>
----
<1> 第一个密码是 id 为 `bcrypt` 的 `PasswordEncoder`,encodedPassword 为 `$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM/BG`. 匹配时会委托给 `BCryptPasswordEncoder`
<2> 第二个密码是 id 为 `noop` 的 `PasswordEncoder`,encodedPassword 为 `password`. 匹配时会委托给 `NoOpPasswordEncoder`
<3> 第三个密码是 id 为 `pbkdf2` 的 `PasswordEncoder`,encodedPassword 为 `5d923b44a6d129f3ddf3e3c8d29412723dcbde72445e8ef6bf3b508fbf17fa4ed4d6b99ca763d8dc`. 匹配时会委托给 `Pbkdf2PasswordEncoder`
<4> 第四个密码是 id 为 `scrypt` 的 `PasswordEncoder`,encodedPassword 为 `$e0801$8bWJaSu2IKSn9Z9kM+TPXfOc/9bdYSrN1oD9qfVThWEwdRTnO7re7Ei+fUZRJ68k9lTyuTeUp4of4g24hHnazw==$OAOec05+bXxvuu/1qZ6NUR+xQYvYv7BeL1QxwRpY5Pc=` ,匹配时它将委托给 `SCryptPasswordEncoder`
<5> 最后一个密码是 id 为 `sha256` 的 `PasswordEncoder` ,encodedPassword 为 `97cde38028ad898ebc02e690819fa220e88c62e0699403e94fff291cfffaf8410849f27605abcbc0`. 匹配时会委托给 `StandardPasswordEncoder`
[NOTE]
====
一些用户可能担心这会为黑客提供存储格式. 其实不必担心,因为密码的存储不依赖于算法是秘密. 此外,大多数格式很容易让攻击者在没有前缀的情况下弄清楚. 例如,BCrypt 密码通常以 $2a$ 开头.
====
[[authentication-password-storage-dpe-encoding]]
=== 密码编码
传入构造函数的 `idForEncode` 确定将使用哪个 `PasswordEncoder` 来编码密码. 在上面构造的 `DelegatingPasswordEncoder` 中,这意味着编码 password 的结果将被委托给 `BCryptPasswordEncoder` 并以 `{bcrypt}` 为前缀. 最终结果如下:
.DelegatingPasswordEncoder Encode Example
====
[source,text,attrs="-attributes"]
----
{bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM/BG
----
====
[[authentication-password-storage-dpe-matching]]
=== 密码匹配
匹配是基于 `{id}` 以及构造函数中提供的 `id` 到 `PasswordEncoder` 的映射完成的. 我们在<<authentication-password-storage-dpe-format,密码存储格式>> 一节中的 example 提供了一个如何完成的工作示例.
默认情况下,使用密码调用 `matches(CharSequence, String)` 和未映射的id(包括空 `id`)的结果将导致 `IllegalArgumentException`.
可以使用 `DelegatingPasswordEncoder.setDefaultPasswordEncoderForMatches(PasswordEncoder)` 自定义此行为.
通过使用 `id`,我们可以匹配任何密码编码,但使用最现代的密码编码. 这很重要,
因为与加密不同,密码哈希的设计使得没有简单的方法来恢复明文. 由于无法恢复明文,因此难以迁移密码. 虽然用户很容易迁移 `NoOpPasswordEncoder`,但我们默认选择将其包含在内,但不是默认的 `PasswordEncoder`. 以便简化入门体验.
[[authentication-password-storage-dep-getting-started]]
=== 入门体验
如果您要编写演示或示例,则花一些时间来对用户密码进行哈希运算会很麻烦. 有一些便利机制可以简化此过程,但这仍然不适合生产.
.withDefaultPasswordEncoder Example
====
.Java
[source,java,role="primary",attrs="-attributes"]
----
User user = User.withDefaultPasswordEncoder()
.username("user")
.password("password")
.roles("user")
.build();
System.out.println(user.getPassword());
// {bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM/BG
----
.Kotlin
[source,kotlin,role="secondary",attrs="-attributes"]
----
val user = User.withDefaultPasswordEncoder()
.username("user")
.password("password")
.roles("user")
.build()
println(user.password)
// {bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM/BG
----
====
如果要创建多个用户,则还可以重复使用该构建器.
.withDefaultPasswordEncoder Reusing the Builder
====
.Java
[source,java,role="primary"]
----
UserBuilder users = User.withDefaultPasswordEncoder();
User user = users
.username("user")
.password("password")
.roles("USER")
.build();
User admin = users
.username("admin")
.password("password")
.roles("USER","ADMIN")
.build();
----
.Kotlin
[source,kotlin,role="secondary"]
----
val users = User.withDefaultPasswordEncoder()
val user = users
.username("user")
.password("password")
.roles("USER")
.build()
val admin = users
.username("admin")
.password("password")
.roles("USER", "ADMIN")
.build()
----
====
这会散列存储的密码,但是密码仍在内存和已编译的源代码中暴露. 因此,对于生产环境它仍然不被认为是安全的. 对于生产,您应该<<authentication-password-storage-boot-cli,在外部对密码进行哈希处理>>.
[[authentication-password-storage-boot-cli]]
=== 使用 Spring Boot CLI 编码
正确编码密码的最简单方法是使用 https://docs.spring.io/spring-boot/docs/current/reference/html/spring-boot-cli.html[Spring Boot CLI].
例如,以下将对与 <<authentication-password-storage-dpe,DelegatingPasswordEncoder>> 一起使用的 `password` 密码进行编码:
.Spring Boot CLI encodepassword Example
====
[source,attrs="-attributes"]
----
spring encodepassword password
{bcrypt}$2a$10$X5wFBtLrL/kHcmrOGGTrGufsBX8CJ0WpQpF3pgeuxBB/H73BK1DW6
----
====
[[authentication-password-storage-dpe-troubleshoot]]
=== 故障排除
如果 <<authentication-password-storage-dpe-format,密码存储格式>> 一节中描述的其中一个密码没有 id,则会发生以下错误. .
----
java.lang.IllegalArgumentException: There is no PasswordEncoder mapped for the id "null"
at org.springframework.security.crypto.password.DelegatingPasswordEncoder$UnmappedIdPasswordEncoder.matches(DelegatingPasswordEncoder.java:233)
at org.springframework.security.crypto.password.DelegatingPasswordEncoder.matches(DelegatingPasswordEncoder.java:196)
----
解决错误的最简单方法是切换到显式提供密码编码的 `PasswordEncoder`. 解决问题的最简单方法是弄清楚当前如何存储密码并明确提供正确的 `PasswordEncoder`.
如果要从 Spring Security 4.2.x 进行迁移,则可以通过 <<authentication-password-storage-configuration,暴露 `NoOpPasswordEncoder` bean>> 恢复到先前的行为.
或者,您可以为所有密码加上正确的ID前缀,然后继续使用 `DelegatingPasswordEncoder`. 例如,如果您使用的是 `BCrypt`,则可以从以下方式迁移密码:
----
$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM/BG
----
to
[source,attrs="-attributes"]
----
{bcrypt}$2a$10$dXJ3SW6G7P50lGmMkkmwe.20cQQubK3.HZWzG3YB1tlRy.fqvM/BG
----
有关映射的完整列表,请参阅 https://docs.spring.io/spring-security/site/docs/5.0.x/api/org/springframework/security/crypto/factory/PasswordEncoderFactories.html[PasswordEncoderFactories] 上的 Javadoc.
[[authentication-password-storage-bcrypt]]
== BCryptPasswordEncoder
`BCryptPasswordEncoder` 实现使用广泛支持的 https://en.wikipedia.org/wiki/Bcrypt[bcrypt] 算法对密码进行哈希处理. 为了使其更能抵抗密码破解,`bcrypt` 故意降低了速度. 与其他自适应单向函数一样,应将其调整为大约1秒钟,以验证系统上的密码.
`BCryptPasswordEncoder` 的默认实现使用强度 10, 如 https://docs.spring.io/spring-security/site/docs/current/api/org/springframework/security/crypto/bcrypt/BCryptPasswordEncoder.html[BCryptPasswordEncoder] 的 Javadoc 中所述. 鼓励您
在您自己的系统上调整和测试强度参数, 以使验证密码大约需要 `1` 秒钟.
.BCryptPasswordEncoder
====
.Java
[source,java,role="primary"]
----
// Create an encoder with strength 16
BCryptPasswordEncoder encoder = new BCryptPasswordEncoder(16);
String result = encoder.encode("myPassword");
assertTrue(encoder.matches("myPassword", result));
----
.Kotlin
[source,kotlin,role="secondary"]
----
// Create an encoder with strength 16
val encoder = BCryptPasswordEncoder(16)
val result: String = encoder.encode("myPassword")
assertTrue(encoder.matches("myPassword", result))
----
====
[[authentication-password-storage-argon2]]
== Argon2PasswordEncoder
`Argon2PasswordEncoder` 实现使用 Argon2 算法对密码进行哈希处理. https://en.wikipedia.org/wiki/Argon2[Argon2] 是 https://en.wikipedia.org/wiki/Password_Hashing_Competition[Password Hashing Competition] 的获胜者.
为了克服自定义硬件上的密码破解问题,`Argon2` 是一种故意慢速的算法,需要大量内存.
与其他自适应单向函数一样,应将其调整为大约1秒钟,以验证系统上的密码. 如果 `Argon2PasswordEncoder` 需要 BouncyCastle,则为当前实现.
.Argon2PasswordEncoder
====
.Java
[source,java,role="primary"]
----
// Create an encoder with all the defaults
Argon2PasswordEncoder encoder = new Argon2PasswordEncoder();
String result = encoder.encode("myPassword");
assertTrue(encoder.matches("myPassword", result));
----
.Kotlin
[source,kotlin,role="secondary"]
----
// Create an encoder with all the defaults
val encoder = Argon2PasswordEncoder()
val result: String = encoder.encode("myPassword")
assertTrue(encoder.matches("myPassword", result))
----
====
[[authentication-password-storage-pbkdf2]]
== Pbkdf2PasswordEncoder
`Pbkdf2PasswordEncoder` 实现使用 https://en.wikipedia.org/wiki/PBKDF2[PBKDF2] 算法对密码进行哈希处理. 为了消除密码破解,PBKDF2 是一种故意缓慢的算法. 与其他自适应单向函数一样,应将其调整为大约1秒钟,以验证系统上的密码. 当需要 FIPS 认证时,此算法是不错的选择.
.Pbkdf2PasswordEncoder
====
.Java
[source,java,role="primary"]
----
// Create an encoder with all the defaults
Pbkdf2PasswordEncoder encoder = new Pbkdf2PasswordEncoder();
String result = encoder.encode("myPassword");
assertTrue(encoder.matches("myPassword", result));
----
.Kotlin
[source,kotlin,role="secondary"]
----
// Create an encoder with all the defaults
val encoder = Pbkdf2PasswordEncoder()
val result: String = encoder.encode("myPassword")
assertTrue(encoder.matches("myPassword", result))
----
====
[[authentication-password-storage-scrypt]]
== SCryptPasswordEncoder
`SCryptPasswordEncoder` 实现使用 https://en.wikipedia.org/wiki/Scrypt[scrypt] 算法对密码进行哈希处理. 为了克服自定义硬件 scrypt 上的密码破解问题,它是一种故意缓慢的算法,需要大量内存.
与其他自适应单向函数一样,应将其调整为大约 1 秒钟,以验证系统上的密码.
.SCryptPasswordEncoder
====
.Java
[source,java,role="primary"]
----
// Create an encoder with all the defaults
SCryptPasswordEncoder encoder = new SCryptPasswordEncoder();
String result = encoder.encode("myPassword");
assertTrue(encoder.matches("myPassword", result));
----
.Kotlin
[source,kotlin,role="secondary"]
----
// Create an encoder with all the defaults
val encoder = SCryptPasswordEncoder()
val result: String = encoder.encode("myPassword")
assertTrue(encoder.matches("myPassword", result))
----
====
[[authentication-password-storage-other]]
== 其他的 PasswordEncoders
还有许多其他的 `PasswordEncoder` 的实现,他们完全是为了保持向后兼容而存在的. 目前均已弃用,以表明它们不再被视为安全. 但是,由于很难迁移现有的旧系统,因此没有删除它们的计划.
[[authentication-password-storage-configuration]]
== 密码存储配置
Spring Security 默认使用 <<authentication-password-storage-dpe,DelegatingPasswordEncoder>>. 但是,可以通过将 `PasswordEncoder` 暴露为 Spring Bean 来对其进行自定义.
如果您是从 Spring Security 4.2.x 迁移的,则可以通过暴露 `NoOpPasswordEncoder` bean恢复到以前的行为.
[WARNING]
====
恢复为 `NoOpPasswordEncoder` 不被认为是安全的. 相反,您应该迁移到使用 `DelegatingPasswordEncoder` 来支持安全密码编码.
====
.NoOpPasswordEncoder
====
.Java
[source,java,role="primary"]
----
@Bean
public static NoOpPasswordEncoder passwordEncoder() {
return NoOpPasswordEncoder.getInstance();
}
----
.XML
[source,xml,role="secondary"]
----
<b:bean id="passwordEncoder"
class="org.springframework.security.crypto.password.NoOpPasswordEncoder" factory-method="getInstance"/>
----
.Kotlin
[source,kotlin,role="secondary"]
----
@Bean
fun passwordEncoder(): PasswordEncoder {
return NoOpPasswordEncoder.getInstance();
}
----
====
[NOTE]
====
XML 配置要求 `NoOpPasswordEncoder` Bean名称为 `passwordEncoder`.
====
| 32.667431
| 239
| 0.779541
|
13dff3d247388a28932bbde2d1421cf320a4512d
| 1,810
|
adoc
|
AsciiDoc
|
_posts/2017-04-05-Pagine-nuove.adoc
|
justafool5/justafool5.github.io
|
5e41655a837bb7b04fcc43e2352a888577db58a7
|
[
"MIT"
] | null | null | null |
_posts/2017-04-05-Pagine-nuove.adoc
|
justafool5/justafool5.github.io
|
5e41655a837bb7b04fcc43e2352a888577db58a7
|
[
"MIT"
] | null | null | null |
_posts/2017-04-05-Pagine-nuove.adoc
|
justafool5/justafool5.github.io
|
5e41655a837bb7b04fcc43e2352a888577db58a7
|
[
"MIT"
] | null | null | null |
// = Your Blog title
// See https://hubpress.gitbooks.io/hubpress-knowledgebase/content/ for information about the parameters.
// :hp-image: /covers/cover.png
// :published_at: 2017-04-04
// :hp-tags: Riflessioni personali, BG
// :hp-alt-title: Nuovamente il diario
= Pagine nuove
Mi trovo a riaprire queste pagine polverose dopo alcuni anni senza che vi abbia scritto nemmeno una parola.
È come ritrovare un vecchio amico, e rileggere le vecchie pagine oramai stinte dal tempo e dall'umidità è come rivivere gli anni passati.
Gli anni della giovinezza.
Anni che sono passati e non torneranno. È forse questo il merito di questo vecchio diario. Mi ha fatto prender coscienza che non sono più la Ygwild di un tempo. Mi fan male le ginocchia quando le piego, mi alzo almeno una volta ogni notte per andare a pisciare e nei miei capelli c'è oramai quasi più grigio che altro.
Ah, mi guardo allo specchio e cosa vedo? Una donna oramai in là con gli anni. Non voglio dire _vecchia_, perchè non è così che mi sento. La mia mente è limpida ed il corpo, malgrado gli acciacchi e l'età, mi risponde ancora bene.
Ma non mi faccio illusioni. I riflessi non sono quelli di un tempo. Così come non è quella di un tempo la forza, e nemmeno la resistenza.
Ygwild, ragazza mia. È ora di smettere di vagare per il mondo e cercare un posto dove posare le tue vecchie ossa. È ora di trovare un nuovo ruolo su questa terra.
E forse questi luoghi sono quelli giusti per ricominciare. Qui è un guazzabuglio incredibile di persone, razze, fedi, costumi dei più disparati. In un territorio grande come un fazzoletto pare concentrata l'intera diversità del creato.
Se poi mi risultasse che mi inganno e dovessi ben presto ripartire non fa nulla. Credo che tener nota di quanto mi accadrà qui si rivelerà in ogni caso interessante.
| 86.190476
| 318
| 0.785635
|
6624858ff4419b800903b072458ad577a4e7a70a
| 598
|
adoc
|
AsciiDoc
|
spec/ainfo_types.adoc
|
Wren6991/TwoWireDebug
|
fc5aaaa842542a9278bdc2eeffe815cda781f173
|
[
"CC0-1.0"
] | 19
|
2022-03-13T16:02:01.000Z
|
2022-03-27T14:46:43.000Z
|
spec/ainfo_types.adoc
|
Wren6991/TwoWireDebug
|
fc5aaaa842542a9278bdc2eeffe815cda781f173
|
[
"CC0-1.0"
] | null | null | null |
spec/ainfo_types.adoc
|
Wren6991/TwoWireDebug
|
fc5aaaa842542a9278bdc2eeffe815cda781f173
|
[
"CC0-1.0"
] | null | null | null |
[cols="10h,30h,~", options="header"]
|===
|Type | Brief | Description
| `0x000` | System bus | Raw access to some system-bus-like address space, potentially including multiple RAM, ROM or IO regions which aren't described by this table. `EXTRA` encodes the size of the segment, as ceil(log2(size in bytes)).
| `0x001` | RISC-V Debug Module | The start of a chain of RISC-V Debug Modules, linked by their `nextdm` registers.
| `0x002` | CoreSight Access Port | The least-addressed in an array of CoreSight APs. Other APs, if any, are spaced at 4 kiB intervals.
|===
| 74.75
| 248
| 0.683946
|
4abfb63bc6854c3e71cdcd6b2dec6d82f9b53100
| 78
|
adoc
|
AsciiDoc
|
util/qav-doc-generator/src/test/resources/TestPlugin_expected.adoc
|
qavalidator/qav
|
5bb872828ea25ab6ae1688762a2c5cd6cc3193ef
|
[
"Apache-2.0"
] | 7
|
2017-12-18T18:57:24.000Z
|
2021-01-09T20:47:15.000Z
|
util/qav-doc-generator/src/test/resources/TestPlugin_expected.adoc
|
qavalidator/qav
|
5bb872828ea25ab6ae1688762a2c5cd6cc3193ef
|
[
"Apache-2.0"
] | 19
|
2017-12-19T06:57:53.000Z
|
2019-07-03T08:00:20.000Z
|
util/qav-doc-generator/src/test/resources/TestPlugin_expected.adoc
|
qavalidator/qav
|
5bb872828ea25ab6ae1688762a2c5cd6cc3193ef
|
[
"Apache-2.0"
] | 2
|
2018-02-13T21:58:51.000Z
|
2020-06-19T15:23:59.000Z
|
=== TestPlugin
This is a test plugin
==== testCmd1
Desc1
No parameters.
| 6.5
| 21
| 0.666667
|
b6b97c0352e0cd000c0f516918f2d51eccabef58
| 3,249
|
adoc
|
AsciiDoc
|
spark-webui-StagesTab.adoc
|
viswanatha2020/spark_submit-options
|
fe484095a2cc7f7fc0a02cb944b612115678e42c
|
[
"Apache-2.0"
] | 2
|
2019-01-02T14:32:15.000Z
|
2019-04-01T16:21:55.000Z
|
spark-webui-StagesTab.adoc
|
Jayvardhan-Reddy/mastering-apache-spark-book
|
e751193fb55b475a11815026d172f46597015d55
|
[
"Apache-2.0"
] | null | null | null |
spark-webui-StagesTab.adoc
|
Jayvardhan-Reddy/mastering-apache-spark-book
|
e751193fb55b475a11815026d172f46597015d55
|
[
"Apache-2.0"
] | 1
|
2020-11-17T08:18:57.000Z
|
2020-11-17T08:18:57.000Z
|
== [[StagesTab]] StagesTab -- Stages for All Jobs
[[prefix]]
`StagesTab` is a link:spark-webui-SparkUITab.adoc[SparkUITab] with *stages* link:spark-webui-SparkUITab.adoc#prefix[prefix].
`StagesTab` is <<creating-instance, created>> exclusively when `SparkUI` is link:spark-webui-SparkUI.adoc#initialize[initialized].
When <<creating-instance, created>>, `StagesTab` creates the following pages and link:spark-webui-WebUITab.adoc#attachPage[attaches] them immediately:
* link:spark-webui-AllStagesPage.adoc[AllStagesPage]
* link:spark-webui-StagePage.adoc[StagePage]
* link:spark-webui-PoolPage.adoc[PoolPage]
*Stages* tab in link:spark-webui.adoc[web UI] shows link:spark-webui-AllStagesPage.adoc[the current state of all stages of all jobs in a Spark application] (i.e. a link:spark-SparkContext.adoc[SparkContext]) with two optional pages for link:spark-webui-StagePage.adoc[the tasks and statistics for a stage] (when a stage is selected) and link:spark-webui-PoolPage.adoc[pool details] (when the application works in link:spark-taskscheduler-schedulingmode.adoc#FAIR[FAIR scheduling mode]).
The title of the tab is *Stages for All Jobs*.
You can access the Stages tab under `/stages` URL, i.e. http://localhost:4040/stages.
With no jobs submitted yet (and hence no stages to display), the page shows nothing but the title.
.Stages Page Empty
image::images/spark-webui-stages-empty.png[align="center"]
The Stages page shows the stages in a Spark application per state in their respective sections -- *Active Stages*, *Pending Stages*, *Completed Stages*, and *Failed Stages*.
.Stages Page With One Stage Completed
image::images/spark-webui-stages-completed.png[align="center"]
NOTE: The state sections are only displayed when there are stages in a given state. Refer to link:spark-webui-AllStagesPage.adoc[Stages for All Jobs].
In link:spark-taskscheduler-schedulingmode.adoc#FAIR[FAIR scheduling mode] you have access to the table showing the scheduler pools.
.Fair Scheduler Pools Table
image::images/spark-webui-stages-fairschedulerpools.png[align="center"]
Internally, the page is represented by https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/ui/jobs/StagesTab.scala[org.apache.spark.ui.jobs.StagesTab] class.
The page uses the parent's link:spark-webui-SparkUI.adoc[SparkUI] to access required services, i.e. link:spark-SparkContext.adoc[SparkContext], link:spark-sql-SQLConf.adoc[SparkConf], link:spark-webui-JobProgressListener.adoc[JobProgressListener], link:spark-webui-RDDOperationGraphListener.adoc[RDDOperationGraphListener], and to know whether <<killEnabled, kill is enabled or not>>.
`StagesTab` is <<creating-instance, created>> when...FIXME
=== [[killEnabled]] `killEnabled` flag
CAUTION: FIXME
=== [[creating-instance]] Creating StagesTab Instance
`StagesTab` takes the following when created:
* [[parent]] link:spark-webui-SparkUI.adoc[SparkUI]
* [[store]] link:spark-core-AppStatusStore.adoc[AppStatusStore]
=== [[handleKillRequest]] Handling Request to Kill Stage (from web UI) -- `handleKillRequest` Method
[source, scala]
----
handleKillRequest(request: HttpServletRequest): Unit
----
`handleKillRequest`...FIXME
NOTE: `handleKillRequest` is used when...FIXME
| 49.227273
| 486
| 0.781779
|
22cae9e480620a0b7a4e5c6b4ddf8701e84c9108
| 1,846
|
adoc
|
AsciiDoc
|
source/documentation/virtual_machine_management_guide/topics/Updating_the_Guest_Agents_and_Drivers_on_Windows.adoc
|
anhhai986/ovirt-site
|
fcdd23b676cb105c0da7a55ce3c7eae3c94ce03a
|
[
"MIT"
] | null | null | null |
source/documentation/virtual_machine_management_guide/topics/Updating_the_Guest_Agents_and_Drivers_on_Windows.adoc
|
anhhai986/ovirt-site
|
fcdd23b676cb105c0da7a55ce3c7eae3c94ce03a
|
[
"MIT"
] | null | null | null |
source/documentation/virtual_machine_management_guide/topics/Updating_the_Guest_Agents_and_Drivers_on_Windows.adoc
|
anhhai986/ovirt-site
|
fcdd23b676cb105c0da7a55ce3c7eae3c94ce03a
|
[
"MIT"
] | null | null | null |
[[Updating_the_Guest_Agents_and_Drivers_on_Windows]]
==== Updating the Guest Agents and Drivers on Windows
*Updating the guest agents, tools, and drivers on Windows*
. On the {virt-product-fullname} {engine-name} machine, update the {virt-product-fullname} Guest Tools package to the latest version:
+
[options="nowrap" subs="normal"]
----
# dnf upgrade -y virtio-win
----
+
The ISO file is located in [filename]`/usr/share/virtio-win/virtio-win___version__.iso` on the {engine-name} machine.
. Upload [filename]`virtio-win___version__.iso` to a data domain. See link:{URL_virt_product_docs}{URL_format}administration_guide/index#Uploading_Images_to_a_Data_Storage_Domain_storage_tasks[Uploading Images to a Data Storage Domain] in the __Administration Guide__ for details.
. In the Administration or VM Portal, if the virtual machine is running, use the *Change CD* drop-down list to attach the [filename]`virtio-win___version__.iso` file to each of your virtual machines. If the virtual machine is powered off, click the *Run Once* button and attach the ISO as a CD.
. Log in to the virtual machine.
. Select the CD Drive containing the [filename]`virtio-win___version__.iso` file.
. Double-click `virtio-win-gt-x64.msi` for a 64-bit guest operating system or `virtio-win-gt-x86.msi` for a 32-bit guest operating system.
. Click btn:[Next] at the welcome screen.
. Follow the prompts in the installation wizard. Ensure all check boxes in the list of components are selected, including the *RHEV Agent* which is disabled by default.
. Once installation is complete, select *Yes, I want to restart my computer now* and click *Finish* to apply the changes.
. After the reboot completes, open the virtual CD drive and change to the [filename]`guest-agent` directory.
. Double-click [filename]`qemu-ga-x86_64.msi` or [filename]`qemu-ga-i386.msi`.
| 76.916667
| 294
| 0.779523
|
7095129d78ab74bc7bb97addb35f816ce81a46b8
| 3,101
|
adoc
|
AsciiDoc
|
docs/modules/ROOT/pages/contributor-guide/ci.adoc
|
javaduke/camel-quarkus
|
e82c314ff0578c60b9f72f11b11d4547ea0eec6f
|
[
"Apache-2.0"
] | 179
|
2019-06-21T09:00:21.000Z
|
2022-03-30T06:00:47.000Z
|
docs/modules/ROOT/pages/contributor-guide/ci.adoc
|
javaduke/camel-quarkus
|
e82c314ff0578c60b9f72f11b11d4547ea0eec6f
|
[
"Apache-2.0"
] | 2,658
|
2019-06-24T10:21:30.000Z
|
2022-03-31T16:51:12.000Z
|
docs/modules/ROOT/pages/contributor-guide/ci.adoc
|
javaduke/camel-quarkus
|
e82c314ff0578c60b9f72f11b11d4547ea0eec6f
|
[
"Apache-2.0"
] | 143
|
2019-06-21T09:41:07.000Z
|
2022-03-29T16:27:24.000Z
|
[[ci]]
= Camel Quarkus CI Builds
:page-aliases: ci.adoc
== Branches
[width="100%",cols="1,3"]
|===
|main | Latest releasable work
|camel-main | To verify camel-quarkus against the latest Camel SNAPSHOT release
|quarkus-main| To verify camel-quarkus against the latest Quarkus SNAPSHOT release
|===
=== Branch maintenance
Some scheduled build jobs (mentioned below) run each day to synchronize branches `camel-main` & `quarkus-main` with the latest work from the `main` branch.
Sometimes these jobs fail and it is necessary to fix any issues and manually rebase the branches. The process for doing this is as follows.
1. Depending on which branch you're rebasing, you may want to begin by building the latest Camel `main` or Quarkus `main` branch to avoid pulling in non-deterministic SNAPSHOT artifacts. Otherwise you can build the branches and activate the relevant SNAPSHOT repositories via Maven profiles `-Papache-snapshots` or `-Poss-snapshots`.
2. Checkout the branch you want to work on. E.g. `git checkout camel-main`. Make sure the core component version properties in the root `pom.xml` are set correctly. E.g `camel.version` the parent version for `camel-dependencies` & `quarkus.version`.
3. Ensure your local repository is up-to-date with the remote. `git pull -r`.
4. Synchronize the branch with the latest work from the main branch. The 'origin' remote is assumed here but you can substitute this for whatever reference you want to work with. `git fetch origin main && git rebase origin/main`. You may need to fix up merge conflicts.
5. Build the project quickly `mvn clean install -Dquickly -T1C`. Remember to activate any required SNAPSHOT profiles if required.
6. Now you can proceed to work on fixing issues and committing the code. Any problems that are found to originate in Camel or Quarkus should be tracked by issues that you create in those projects.
7. When pushing changes, it's likely that you'll need to 'force push'. I.e with `--force` or in case others are working simultaneously on the same branch `--force-with-lease`.
8. Follow the GitHub CI build to verify that the project builds successfully.
== Builds
https://github.com/apache/camel-quarkus/actions?query=workflow%3A%22Camel+Quarkus+CI%22
== Daily main -> camel-main synchronization
https://github.com/apache/camel-quarkus/actions?query=workflow%3A%22Camel+Quarkus+CI%22+branch%3Acamel-main
This build creates an issue on failure, which can be tracked here:
https://github.com/apache/camel-quarkus/issues?q=is%3Aopen+is%3Aissue+label%3Abuild%2Fcamel-main
== Daily main -> quarkus-main synchronization
https://github.com/apache/camel-quarkus/actions?query=workflow%3A%22Camel+Quarkus+CI%22+branch%3Aquarkus-main
This build creates an issue on failure, which can be tracked here:
https://github.com/apache/camel-quarkus/issues?q=is%3Aopen+is%3Aissue+label%3Abuild%2Fquarkus-main
== Snapshot Deploy Build
https://ci-builds.apache.org/job/Camel/job/Camel%20Quarkus%20SNAPSHOT%20Deploy/
SNAPSHOTs are built and published on push for each branch to:
https://repository.apache.org/snapshots/
| 49.222222
| 333
| 0.777491
|
7aac803048c28d822623484d48b0eabeaa4855f4
| 669
|
asciidoc
|
AsciiDoc
|
docs/infrastructure/index.asciidoc
|
LAFINAL/kibana
|
5251267bb952f890edba8260f79771a8694d802d
|
[
"Apache-2.0"
] | null | null | null |
docs/infrastructure/index.asciidoc
|
LAFINAL/kibana
|
5251267bb952f890edba8260f79771a8694d802d
|
[
"Apache-2.0"
] | null | null | null |
docs/infrastructure/index.asciidoc
|
LAFINAL/kibana
|
5251267bb952f890edba8260f79771a8694d802d
|
[
"Apache-2.0"
] | null | null | null |
[role="xpack"]
[[xpack-infra]]
= Infrastructure
[partintro]
--
beta[]
Use the interactive Infrastructure UI to monitor your infrastructure and
identify problems in real time. You can explore metrics and logs for common
servers, containers, and services.
[role="screenshot"]
image::infrastructure/images/infra-sysmon.jpg[Infrastructure Overview in Kibana]
[float]
== Add data sources
Kibana provides step-by-step instructions to help you add your data sources.
The {infra-guide}[Infrastructure Monitoring Guide] is good source for more detailed
instructions and information.
--
include::monitor.asciidoc[]
include::infra-ui.asciidoc[]
| 24.777778
| 84
| 0.751868
|
4fada215143e9713b9c002821f14e0dda4005c3d
| 618
|
adoc
|
AsciiDoc
|
docs/src/main/docs/guide/gettingStarted/create.adoc
|
ianibo/gorm-graphql
|
ad4f51e22bbba853a51ab55ba387c3c6b38aadaf
|
[
"Apache-2.0"
] | 83
|
2017-06-19T14:05:38.000Z
|
2021-08-03T07:14:18.000Z
|
docs/src/main/docs/guide/gettingStarted/create.adoc
|
ianibo/gorm-graphql
|
ad4f51e22bbba853a51ab55ba387c3c6b38aadaf
|
[
"Apache-2.0"
] | 49
|
2017-07-21T15:52:32.000Z
|
2021-07-19T13:39:56.000Z
|
docs/src/main/docs/guide/gettingStarted/create.adoc
|
ianibo/gorm-graphql
|
ad4f51e22bbba853a51ab55ba387c3c6b38aadaf
|
[
"Apache-2.0"
] | 25
|
2017-08-10T22:49:11.000Z
|
2022-03-19T19:42:26.000Z
|
=== Create
In this example the request is a mutation to create a speaker.
:url: http://localhost:8080/graphql
[source, bash, subs="attributes"]
----
include::{sourcedir}/examples/grails-docs-app/src/integration-test/groovy/demo/SpeakerIntegrationSpec.groovy[tags=curlCommand]
----
The API answers with the properties we requested.
[source, json]
----
include::{sourcedir}/examples/grails-docs-app/src/integration-test/groovy/demo/SpeakerIntegrationSpec.groovy[tags=response]
----
NOTE: If there was a validation error during the create process, the `errors` property would be populated with the validation errors.
| 34.333333
| 133
| 0.775081
|
044a1bc06ec87943dde80ccd36c4edb30ba408b0
| 762
|
adoc
|
AsciiDoc
|
documentation/assemblies/assembly-kafka-broker-external-listeners.adoc
|
vdinovi/strimzi-kafka-operator
|
cb80784f6274b7dc316491a5bac6bdb95d44ee4c
|
[
"Apache-2.0"
] | 1
|
2020-04-07T12:44:31.000Z
|
2020-04-07T12:44:31.000Z
|
documentation/assemblies/assembly-kafka-broker-external-listeners.adoc
|
vdinovi/strimzi-kafka-operator
|
cb80784f6274b7dc316491a5bac6bdb95d44ee4c
|
[
"Apache-2.0"
] | 5
|
2020-04-23T20:30:41.000Z
|
2021-12-14T21:39:00.000Z
|
documentation/assemblies/assembly-kafka-broker-external-listeners.adoc
|
vdinovi/strimzi-kafka-operator
|
cb80784f6274b7dc316491a5bac6bdb95d44ee4c
|
[
"Apache-2.0"
] | 1
|
2020-01-02T09:39:33.000Z
|
2020-01-02T09:39:33.000Z
|
// This assembly is included in the following assemblies:
//
// assembly-configuring-kafka-listeners.adoc
[id='assembly-kafka-broker-external-listeners-{context}']
= External listeners
Use an external listener to expose your {ProductName} Kafka cluster to a client outside a Kubernetes environment.
.Additional resources
* {Externallisteners}
include::../modules/con-kafka-broker-external-listeners-addresses.adoc[leveloffset=+1]
include::assembly-kafka-broker-external-listeners-routes.adoc[leveloffset=+1]
include::assembly-kafka-broker-external-listeners-loadbalancers.adoc[leveloffset=+1]
include::assembly-kafka-broker-external-listeners-nodeports.adoc[leveloffset=+1]
include::assembly-kafka-broker-external-listeners-ingress.adoc[leveloffset=+1]
| 33.130435
| 113
| 0.808399
|
63c25a4eb01db679d13ee429c20493425e4c49e4
| 4,477
|
adoc
|
AsciiDoc
|
docs/KlassenKategorisammenligning.adoc
|
Informasjonsforvaltning/xkos-ap-no
|
9fe18babe8bd0695c3821a784b0855efd4e657ff
|
[
"Apache-2.0"
] | 1
|
2021-09-10T02:39:57.000Z
|
2021-09-10T02:39:57.000Z
|
docs/KlassenKategorisammenligning.adoc
|
Informasjonsforvaltning/xkos-ap-no
|
9fe18babe8bd0695c3821a784b0855efd4e657ff
|
[
"Apache-2.0"
] | 12
|
2021-09-03T13:20:25.000Z
|
2022-03-17T14:57:00.000Z
|
docs/KlassenKategorisammenligning.adoc
|
Informasjonsforvaltning/xkos-ap-no
|
9fe18babe8bd0695c3821a784b0855efd4e657ff
|
[
"Apache-2.0"
] | 1
|
2021-09-23T07:08:04.000Z
|
2021-09-23T07:08:04.000Z
|
== Klassen Kategorisammenligning (xkos:ConceptAssociation) [[Kategorisammenligning]]
[[img-KlassenKategorisammenligning]]
.Klassen Kategorisammenligning (xkos:ConceptAssociation) og klassen den refererer til.
[link=images/KlassenKategorisammenligning.png]
image::images/KlassenKategorisammenligning.png[]
[cols="30s,70d"]
|===
|English name |Category correspondence
|Beskrivelse |Brukes til å representere sammenligning mellom kategorier.
|Usage note |To represent correspondences between classification items.
|URI|xkos:ConceptAssociation
|Kravsnivå |Valgfri/Optional
|Merknad |
|Eksempel |Sammenligning mellom kategorien «1.11 – Produksjon av elektroniske komponenter og kretskort» i «Klassifisering av informasjonssektoren 2007» (målkategori), og kategorien «26.1 – Produksjon av elektroniske komponenter og kretskort» i «Næringsgruppering 2007 (SN 2007)» (kildekategori).
|===
Eksempel i RDF Turtle:
----
<261vs111> a xkos:ConceptAssociation ;
xkos:sourceConcept <sn2007-261> ;
xkos:targetConcept <snInfo2007-111> ; .
<snInfo2007-111> a skos:Concept ;
skos:prefLabel "Produksjon av elektroniske komponenter og kretskort"@nb ;
skos:inScheme <snInfo2007> ; .
<sn2007-261> a skos:Concept ;
skos:prefLabel "Produksjon av elektroniske komponenter og kretskort"@nb ;
skos:inScheme <sn2007> ; .
<1102n1129vs1108> a xkos:ConceptAssociation ;
xkos:targetConcept <1108> ;
xkos:sourceConcept <1102> , <1129> ; .
<1108> a skos:Concept ;
skos:prefLabel "Sandnes"@nb ;
skos:notation "1108" ;
skos:inScheme <kommuneinndeling2020> ; .
<1102> a skos:Concept ;
skos:prefLabel "Sandnes"@nb ;
skos:notation "1102" ;
skos:inScheme <kommuneinndeling2019> ; .
<1102> a skos:Concept ;
skos:prefLabel "Forsand"@nb ;
skos:notation "1129" ;
skos:inScheme <kommuneinndeling2019> ; .
----
=== Anbefalte egenskaper for klassen _Kategorisammenligning_ [[Kategorisammenligning-anbefalte-egenskaper]]
==== Kategorisammenligning – har kildekategori (xkos:sourceConcept) [[Kategorisammenligning-harKildekategori]]
[cols="30s,70d"]
|===
|English name |source category
|URI |xkos:sourceConcept
|Range |skos:Concept
|Beskrivelse |Brukes til å referere til en kildekategori i en kategorisammenligning.
|Usage note |To link to a source concept in the correspondence.
|Multiplisitet |0..n
|Kravsnivå |Anbefalt/Recommended
|Merknad |
|Eksempel |Se eksemplet under beskrivelsen av denne klassen i begynnelsen av dette kapittelet.
|===
Eksempel i RDF Turtle: Se eksemplet under beskrivelsen av denne klassen i begynnelsen av dette kapittelet.
==== Kategorisammenligning – har målkategori (xkos:targetConcept) [[Kategorisammenligning-harMålkategori]]
[cols="30s,70d"]
|===
|English name |target category
|URI |xkos:targetConcept
|Range |skos:Concept
|Beskrivelse |Brukes til å referere til en målkategori i en kategorisammenligning.
|Usage note |To link to a target concept in the correspondence.
|Multiplisitet |0..n
|Kravsnivå |Anbefalt/Recommended
|Merknad |
|Eksempel | Se eksemplet under beskrivelsen av denne klassen i begynnelsen av dette kapittelet.
|===
Eksempel i RDF Turtle: Se eksemplet under beskrivelsen av denne klassen i begynnelsen av dette kapittelet.
=== Valgfrie egenskaper for klassen _Kategorisammenligning_ [[Kategorisammenligning-valgfrie-egenskaper]]
==== Kategorisammenligning – type endring (dct:type) [[Kategorisammenligning-typeEndring]]
[cols="30s,70d"]
|===
|English name |change type
|URI |dct:type
|Range |skos:Concept
|Beskrivelse |Brukes til å oppgi type endring mellom to klassifikasjoner i samme klassifikasjonsfamilie/-serie.
|Usage note |To specify the type of the change between two classifications in the same classification family/series.
|Multiplisitet |0..1
|Kravsnivå |Valgfri/Optional
|Merknad 1 |Verdien skal velges fra listen over lovlige endringstyper. Se <<ForslagTilEndringstyper>>.
|Merknad 2 |Norsk utvidelse: ikke eksplisitt tatt med i XKOS.
|Eksempel |Se https://www.ssb.no/klass/klassifikasjoner/6/endringer[https://www.ssb.no/klass/klassifikasjoner/6/endringer] fra SN2002 til SN2007
|===
Eksempel i RDF Turtle:
----
<nil90035> a xkos:ConceptAssociation ;
xkos:targetConcept <sn2007-90035> ; # en målkategori og ingen kildekategori
dct:type xkosno:itemCreation ; .
<01210vs01410n01420> a xkos:ConceptAssociation ;
xkos:sourceConcept <sn2002-01210> ; # en kildekategori
xkos:tagetConcept <sn2007-01410> , <sn2007-01420> # to målkategorier
dct:type xkosno:ItemBreakdown ; .
----
| 37.940678
| 295
| 0.771945
|
b10fdaecc7a2ec7366f39635f9e421b7b4016fed
| 5,542
|
adoc
|
AsciiDoc
|
components/camel-docker/src/main/docs/docker-component.adoc
|
lhein/camel
|
85177223fa2143199cda5d3630b4eb893409f449
|
[
"Apache-2.0"
] | 1
|
2016-06-29T09:44:38.000Z
|
2016-06-29T09:44:38.000Z
|
components/camel-docker/src/main/docs/docker-component.adoc
|
lhein/camel
|
85177223fa2143199cda5d3630b4eb893409f449
|
[
"Apache-2.0"
] | null | null | null |
components/camel-docker/src/main/docs/docker-component.adoc
|
lhein/camel
|
85177223fa2143199cda5d3630b4eb893409f449
|
[
"Apache-2.0"
] | null | null | null |
[[docker-component]]
== Docker Component
*Available as of Camel version 2.15*
Camel component for communicating with Docker.
The Docker Camel component leverages the
https://github.com/docker-java/docker-java[docker-java] via the
https://docs.docker.com/reference/api/docker_remote_api[Docker Remote
API].
### URI format
[source,java]
------------------------------
docker://[operation]?[options]
------------------------------
Where *operation* is the specific action to perform on Docker.
### General Options
// component options: START
The Docker component supports 2 options which are listed below.
[width="100%",cols="2,5,^1,2",options="header"]
|===
| Name | Description | Default | Type
| *configuration* (advanced) | To use the shared docker configuration | | DockerConfiguration
| *resolveProperty Placeholders* (advanced) | Whether the component should resolve property placeholders on itself when starting. Only properties which are of String type can use property placeholders. | true | boolean
|===
// component options: END
// endpoint options: START
The Docker endpoint is configured using URI syntax:
----
docker:operation
----
with the following path and query parameters:
==== Path Parameters (1 parameters):
[width="100%",cols="2,5,^1,2",options="header"]
|===
| Name | Description | Default | Type
| *operation* | *Required* Which operation to use | | DockerOperation
|===
==== Query Parameters (20 parameters):
[width="100%",cols="2,5,^1,2",options="header"]
|===
| Name | Description | Default | Type
| *email* (common) | Email address associated with the user | | String
| *host* (common) | *Required* Docker host | localhost | String
| *port* (common) | *Required* Docker port | 2375 | Integer
| *requestTimeout* (common) | Request timeout for response (in seconds) | | Integer
| *bridgeErrorHandler* (consumer) | Allows for bridging the consumer to the Camel routing Error Handler which mean any exceptions occurred while the consumer is trying to pickup incoming messages or the likes will now be processed as a message and handled by the routing Error Handler. By default the consumer will use the org.apache.camel.spi.ExceptionHandler to deal with exceptions that will be logged at WARN or ERROR level and ignored. | false | boolean
| *exceptionHandler* (consumer) | To let the consumer use a custom ExceptionHandler. Notice if the option bridgeErrorHandler is enabled then this options is not in use. By default the consumer will deal with exceptions that will be logged at WARN or ERROR level and ignored. | | ExceptionHandler
| *exchangePattern* (consumer) | Sets the exchange pattern when the consumer creates an exchange. | | ExchangePattern
| *cmdExecFactory* (advanced) | The fully qualified class name of the DockerCmdExecFactory implementation to use | com.github.dockerjava.netty.NettyDockerCmdExecFactory | String
| *followRedirectFilter* (advanced) | Whether to follow redirect filter | false | boolean
| *loggingFilter* (advanced) | Whether to use logging filter | false | boolean
| *maxPerRouteConnections* (advanced) | Maximum route connections | 100 | Integer
| *maxTotalConnections* (advanced) | Maximum total connections | 100 | Integer
| *serverAddress* (advanced) | Server address for docker registry. | https://index.docker.io/v1/ | String
| *socket* (advanced) | Socket connection mode | true | boolean
| *synchronous* (advanced) | Sets whether synchronous processing should be strictly used or Camel is allowed to use asynchronous processing (if supported). | false | boolean
| *certPath* (security) | Location containing the SSL certificate chain | | String
| *password* (security) | Password to authenticate with | | String
| *secure* (security) | Use HTTPS communication | false | boolean
| *tlsVerify* (security) | Check TLS | false | boolean
| *username* (security) | User name to authenticate with | | String
|===
// endpoint options: END
### Header Strategy
All URI option can be passed as Header properties. Values found in a
message header take precedence over URI parameters. A header property
takes the form of a URI option prefixed with *CamelDocker* as shown
below
[width="100%",cols="50%,50%",options="header",]
|=======================================================================
|URI Option |Header Property
|containerId |CamelDockerContainerId
|=======================================================================
### Examples
The following example consumes events from Docker:
[source,java]
----------------------------------------------------------------------
from("docker://events?host=192.168.59.103&port=2375").to("log:event");
----------------------------------------------------------------------
The following example queries Docker for system wide information
[source,java]
-------------------------------------------------------------------
from("docker://info?host=192.168.59.103&port=2375").to("log:info");
-------------------------------------------------------------------
### Dependencies
To use Docker in your Camel routes you need to add a dependency on
*camel-docker*, which implements the component.
If you use Maven you can just add the following to your pom.xml,
substituting the version number for the latest and greatest release (see
the download page for the latest versions).
[source,java]
-------------------------------------
<dependency>
<groupId>org.apache.camel</groupId>
<artifactId>camel-docker</artifactId>
<version>x.x.x</version>
</dependency>
-------------------------------------
| 41.984848
| 458
| 0.674305
|
26c3d5908f6f9e7c051cd586f7df2e06da64ac04
| 659
|
adoc
|
AsciiDoc
|
lib/src/main/asciidoc/setup.adoc
|
sadam21/spring-hateoas-jsonapi
|
ac65cb28e7dfd1a1a7b84fb928b3d3767609115a
|
[
"Apache-2.0"
] | null | null | null |
lib/src/main/asciidoc/setup.adoc
|
sadam21/spring-hateoas-jsonapi
|
ac65cb28e7dfd1a1a7b84fb928b3d3767609115a
|
[
"Apache-2.0"
] | null | null | null |
lib/src/main/asciidoc/setup.adoc
|
sadam21/spring-hateoas-jsonapi
|
ac65cb28e7dfd1a1a7b84fb928b3d3767609115a
|
[
"Apache-2.0"
] | null | null | null |
[[setup]]
= Setup
To enable the JSON:API media type you just need to add this module as a dependency to your project.
[source,xml,indent=0,role="primary"]
.Maven
----
<dependency>
<groupId>com.toedter</groupId>
<artifactId>spring-hateoas-jsonapi</artifactId>
<version>1.1.1</version>
</dependency>
----
[source,groovy,indent=0,role="secondary"]
.Gradle
----
implementation 'com.toedter:spring-hateoas-jsonapi:1.1.1'
----
The latest published snapshot version is `1.1.2-SNAPSHOT`.
If you want to try it out, please make sure to add https://oss.sonatype.org/content/repositories/snapshots/
as a repository to your Maven or Gradle configuration.
| 26.36
| 107
| 0.732929
|
444d3ac4ab862d7e9a461ab3f329eaf00c7fe737
| 1,442
|
adoc
|
AsciiDoc
|
documentation/assemblies/upgrading/assembly-upgrade-kafka-versions.adoc
|
dthadi3/strimzi-kafka-operator
|
d62e603a63c80fca58b23e850f12d74ed2d06ea7
|
[
"Apache-2.0"
] | 3
|
2020-10-20T22:25:56.000Z
|
2021-09-21T05:37:01.000Z
|
documentation/assemblies/upgrading/assembly-upgrade-kafka-versions.adoc
|
dthadi3/strimzi-kafka-operator
|
d62e603a63c80fca58b23e850f12d74ed2d06ea7
|
[
"Apache-2.0"
] | 9
|
2021-03-10T03:59:44.000Z
|
2022-01-04T16:44:44.000Z
|
documentation/assemblies/upgrading/assembly-upgrade-kafka-versions.adoc
|
dthadi3/strimzi-kafka-operator
|
d62e603a63c80fca58b23e850f12d74ed2d06ea7
|
[
"Apache-2.0"
] | 1
|
2020-10-20T22:23:27.000Z
|
2020-10-20T22:23:27.000Z
|
// This assembly is included in the following assemblies:
//
// assembly-upgrade.adoc
[id='assembly-upgrading-kafka-versions-{context}']
= Upgrading Kafka
After you have upgraded your Cluster Operator, you can upgrade your brokers to a higher supported version of Kafka.
Kafka upgrades are performed using the Cluster Operator. How the Cluster Operator performs an upgrade depends on the differences between versions of:
* Interbroker protocol
* Log message format
* ZooKeeper
When the versions are the same for the current and target Kafka version, as is typically the case for a patch level upgrade, the Cluster Operator can upgrade through a single rolling update of the Kafka brokers.
When one or more of these versions differ, the Cluster Operator requires two or three rolling updates of the Kafka brokers to perform the upgrade.
.Additional resources
* xref:assembly-upgrade-cluster-operator-str[]
include::modules/con-upgrade-versions-and-images.adoc[leveloffset=+1]
//how to switch to generic listener config
include::modules/con-upgrade-listeners.adoc[leveloffset=+1]
include::modules/con-upgrade-strategies-for-upgrading-clients.adoc[leveloffset=+1]
include::modules/proc-upgrade-brokers-newer-kafka.adoc[leveloffset=+1]
//upgrade clients and kafka streams to incremental cooperative rebalance protocol added in kafka 2.4
include::modules/proc-upgrading-consumers-streams-cooperative-rebalancing.adoc[leveloffset=+1]
| 40.055556
| 211
| 0.806519
|
dbfbc15f6dc7d163b08e16812fa7e5b29b36ede2
| 86,544
|
adoc
|
AsciiDoc
|
documentation/content/es/books/handbook/ppp-and-slip/_index.adoc
|
EngrRezCab/freebsd-doc
|
b2364e0d8f5cc3d57c8be7eed928aba42c72009f
|
[
"BSD-2-Clause"
] | 1
|
2022-01-30T03:27:41.000Z
|
2022-01-30T03:27:41.000Z
|
documentation/content/es/books/handbook/ppp-and-slip/_index.adoc
|
EngrRezCab/freebsd-doc
|
b2364e0d8f5cc3d57c8be7eed928aba42c72009f
|
[
"BSD-2-Clause"
] | null | null | null |
documentation/content/es/books/handbook/ppp-and-slip/_index.adoc
|
EngrRezCab/freebsd-doc
|
b2364e0d8f5cc3d57c8be7eed928aba42c72009f
|
[
"BSD-2-Clause"
] | null | null | null |
---
title: Capítulo 25. PPP y SLIP
part: Parte IV. Comunicaciones en red
prev: books/handbook/serialcomms
next: books/handbook/firewalls
showBookMenu: true
weight: 30
path: "/books/handbook/"
---
[[ppp-and-slip]]
= PPP y SLIP
:doctype: book
:toc: macro
:toclevels: 1
:icons: font
:sectnums:
:sectnumlevels: 6
:sectnumoffset: 25
:partnums:
:source-highlighter: rouge
:experimental:
:images-path: books/handbook/ppp-and-slip/
ifdef::env-beastie[]
ifdef::backend-html5[]
:imagesdir: ../../../../images/{images-path}
endif::[]
ifndef::book[]
include::shared/authors.adoc[]
include::shared/mirrors.adoc[]
include::shared/releases.adoc[]
include::shared/attributes/attributes-{{% lang %}}.adoc[]
include::shared/{{% lang %}}/teams.adoc[]
include::shared/{{% lang %}}/mailing-lists.adoc[]
include::shared/{{% lang %}}/urls.adoc[]
toc::[]
endif::[]
ifdef::backend-pdf,backend-epub3[]
include::../../../../../shared/asciidoctor.adoc[]
endif::[]
endif::[]
ifndef::env-beastie[]
toc::[]
include::../../../../../shared/asciidoctor.adoc[]
endif::[]
== Sinopsis
FreeBSD cuenta con un gran numero de formas para conectar una computadora a otra. Para establecer una red o una conección a Internet por medio de un módem, o bien, permitir a otras computadoras conectarse por medio de este, se requiere del uso de PPP o SLIP. Este capítulo describe en detalle como configurar los servicios de comunicación para llevar esto a cabo.
Una vez que haya leido este capítulo, usted sabrá:
* Como configurar User PPP.
* Como configurar Kernel PPP.
* Como configurar PPPoE (PPP over Ethernet*).
* Como configurar PPPoA (PPP over ATM*).
* Como instalar y configurar un cliente y servidor SLIP.
Nota del Traductor.: En estricto sentido esto se refiere a contar con la conexión por medio de un dispositivo Ethernet, o bien ATM, pero debido a que usted encontrará estos metodos en su sistema, como PPPoE o bien PPPoA, se han dejado los conceptos "literales" del documento original. Espero que no sea un problema.
Antes de leer este capítulo, usted debiese:
* Estar familiarizado con la terminología básica de redes.
* Comprender lo básico y el propósito de una conección por módem SLIP y/o PPP.
Puede ser que usted se pregunte cual es la principal diferencia entre User PPP y kernel PPP. La respuesta es sencilla; el método User PPP procesa la entrada y salida de datos en userland (ver nota siguiente) en lugar de hacerlo en el kernel. Esto es algo desgastante, en términos del manejo de datos entre userland y el kernel, pero permite, por mucho, un mejor desempeño e implementación de PPP. User PPP utiliza el dispositivo [.filename]#tun# para comunicarse con el mundo exterior, mientras que kernel-ppp, utiliza el dispositivo [.filename]#ppp#.
En el desarrollo de este capítulo, se hará referencia a User PPP, simplemente como _ppp_, a menos de que sea necesaria hacer una distinción entre este y otro software de PPP, como es el caso de `pppd`. Así mismo, si en el desarrollo del capítulo no se señala lo contrario, todos los comandos explicados, deberán ser ejecutados como `root`.
Nota del Traductor : Cuando se habla de "userland" se hace referencia a todo aquello que *no* forma parte del kernel y que en el caso de código de programa, se ejecuta en modo usuario, ya que el código del kernel se ejecuta en modo kernel, supervisor, o bien en modo privilegiado de ejecución. En lo sucesivo este término será utilizado tal cual.
[[userppp]]
== Uso de User PPP
=== User PPP
==== Aclaraciones
Este documento asume que usted cuenta con lo siguiente:
* Una cuenta activa con un Proveedor del Servicio de Internet (ISP-por sus siglas en inglées), que usted utliza para conectarse.
* Adicionalmente, un módem o algún otro dispositivo, conectado a su sistema, y configurado correctamente, que le permite realizar la conexión con su ISP.
* El número telefónico de su proveedor.
* Su nombre de usuario y contraseña. (Ya sea un nombre de usuario y/o contraseña estilo UNIX, o bien para uso por medio de PAP o CHAP)
*n La dirección IP de uno o más servidores de nombres (DNS). Normalmente, estos serán provistos por su proveedor de Internet. Si su proveedor no le ha dado esta información, puede utilizar la opción `enable dns` en su fichero [.filename]#ppp.conf#, para indicarle a ppp que configure el DNS por usted. Esta característica depende del sistema de negociación de DNS que mantenga su proveedor de Internet.
La siguiente información puede ser que haya sido provista por su proveedor de servicios de internet, pero no es completamente necesaria:
* La dirección IP del gateway (pasarela de salida) de su PSI. El gateway es la máquina a la cual usted se conectará y será la _ruta por default_. Si usted no cuenta con esta información, puede inventar uno y al intentar conectarse, el servidor de su PSI, este nos indicará cual es el valor correcto.
+
Esta dirección IP, es referida por ppp como `HISADDR`.
* La mascara de red (netmask) que debe utilizar. Si su PSI no le ha provisto de una, puede utilizar sin problema `255.255.255.255`.
* Si su PSI, le ha provisto de una dirección de IP estática y un nombre de host, puede capturarla. De otra forma podemos dejar que el servidor asigne cualquier IP que corresponda.
Si usted no cuenta con alguna de la información que hemos comentado, le recomendamos contactar con su PSI para requerirla.
[NOTE]
====
En el transcurso de la presente sección, algunos ejemplos muestran el contenido de archivos de configuración los cuales presentan una numeración. Estos números sirven como ayuda y referencia a cada línea, pero estos no deben de estar presentes en el archivo original. Una sangría adecuada, así como espacios adecuados, también son de suma importancia.
====
==== Preparando el Kernel
Como se comento anteriormente, la aplicación ppp utiliza el dispositivo [.filename]#tun#. Si este dispositivo no ha sido compilado dentro del kernel, ppp lo cargará como módulo cuando sea requerido. El dispositivo tun es dinámico, de tal forma que se generara de acuerdo a la demanda que tenga (usted no esta limitado por el kernel).
[NOTE]
====
Vale la pena hacer notar que el controlador tun, crea los dispositivos de acuerdo a sus necesidades, por lo que el comando `ifconfig -a`, no necesariamente mostrará los dispositivos [.filename]#tun#.
====
==== Verificando el dispositivo [.filename]#tun#
Bajo circunstancias normales, la mayoría de los usuarios sólo utilizaran un dispositivo [.filename]#tun# ([.filename]#/dev/tun0#). En lo sucesivo podemos hacer referencia a [.filename]#tun0# con la expresión [.filename]#tunN# donde `N` es el número que corresponde en su sistema.
Para instalaciones de FreeBSD que no tienen el habilitado el DEVFS la existencia de [.filename]#tun0# debe ser verificada (esto no es necesario si se cuenta habilitada la opción DEVFS ya que los nodos de dispositivos seán creados en función a las necesidades).
La forma más sencilla de verificar si el dispositivo [.filename]#tun0# se encuentra configurado correctamente, es la de rehacer el dispositivo. Para hacer esto simplemente siga los siguientes pasos:
[source,shell]
....
# cd /dev
# sh MAKEDEV tun0
....
Si usted necesita 16 dispositivos tun en su kernel, deberá crearlos. Esto puede hacerse de la siguiente manera:
[source,shell]
....
# cd /dev
# sh MAKEDEV tun15
....
==== Configuración de la Resolución de Nombres
La resolución es la parte del sistema que busca una dirección IP en los nombres de servidores (host) y viceversa. Puede ser configurado para que busque en "mapas" que describen la IP del servidor en uno de dos lugares, el primero es un archivo llamado [.filename]#/etc/hosts#. Lea man:hosts[5] para más información al respecto. El segundo es el Servicio de Nombres de Dominio de Internet (DNS-Internet Domain Name Service), el cual es una base de datos de distribución. Para mayor información con respecto a los DNS, referirse a dns.
La resolución de nombres es un sistema que por medio de llamadas, realiza el mapeo de nombres, pero es necesario inidicarle donde debe buscar la información. Para versiones de FreeBSD anteriores a la 5.0, esto es hecho al editar el archivo [.filename]#/etc/host.conf#. La versión 5.0 de FreeBSD utiliza el archivo [.filename]#/etc/nsswitch.conf#.
===== Edición del archivo [.filename]#/etc/host.conf#
Para versiones de FreeBSD anteriores a la 5.0, este archivo debe contener las siguientes dos líneas (en este orden):
[.programlisting]
....
hosts
bind
....
Esto le indica a la resolución que busque en primer término en el archivo [.filename]#/etc/hosts#, y posteriormente en el DNS, si el nombre no fué localizado
===== Editando el archivo [.filename]#/etc/nsswitch.conf#
Para versiones de FreeBSD 5.0 y posteriores, este archivo debe contener, al menos, la siguiente línea:
[.programlisting]
....
hosts: files, dns
....
Esto le indica a la resolución de nombres, que busque en primer lugar en el archivo [.filename]#/etc/hosts#, y en caso de que el nombre no haya sido localizado, busque en el DNS.
===== Editando el archivo [.filename]#/etc/hosts#
Este archivo puede contener direcciones IP, así como el nombre de las máquinas de su red local. Como mínimo debe contar con la información de la máquina que correrá ppp. Asumiendo que su ordenador se llama `foo.bar.com` con la dirección IP `10.0.0.1`, el archivo [.filename]#/etc/hosts# debiese contener:
[.programlisting]
....
127.0.0.1 localhost.bar.com localhost
::1 localhost.bar.com localhost
10.0.0.1 foo.bar.com foo
....
Las primeras dos líneas definen el alias del `localhost`, como sinónimo de la maquina actual. Independientemente de su propia dirección IP, la dirección IP en estas líneas siempre debe ser `127.0.0.1` y `::1`. La última línea especifica el nombre `foo.bar.com` (asi como `foo` para acortarlo), para la dirección `10.0.0.1`.
[NOTE]
====
La dirección `127.0.0.1` y el nombre `localhost` son conocidos como direcciones "loopback" las cuales hacen un "loopback" (salto de regreso) a la maquina local.
====
Si su proveedor de Internet, le asigna una dirección IP fija, asín como un nombre, y usted no lo utiliza como nombre del host, añada esto también al archivo [.filename]#/etc/hosts#.
===== Editando el archivo [.filename]#/etc/resolv.conf#
El archivo [.filename]#/etc/resolv.conf#, le indica a la resolución de nombres, como comportarse. Normalmente deberá de incluir la(s) siguiente(s) línea(s):
[.programlisting]
....
domain ejemplo.com
nameserver x.x.x.x
nameserver y.y.y.y
....
Donde _x.x.x.x_ y _y.y.y.y_ deben reemplazarse con las direcciones IP de los servidores DNS, de su ISP. Puede ser que esta información se la hayan entregado al suscribirse o no, pero una rápida llamada a su ISP debe resolver esto.
Tambié puede configurar su sistema, de tal forma que man:syslog[3] provee de un login para su conección por PPP. Sólo añada:
[.programlisting]
....
!ppp
*.* /var/log/ppp.log
....
al fichero [.filename]#/etc/syslog.conf#. En la mayoría de los casos esto funciona bien.
==== Configuración Automática de PPP
Ambos, `ppp` así como `pppd` (la implementación del kernel para PPP), utilizan la configuración de los archivos localizados en el directorio [.filename]#/etc/ppp#. Ejemplos para ppp, pueden encontrarse en: [.filename]#/usr/shared/examples/ppp/#.
Para efecto de configurar correctamente `ppp`, es necesario editar varios ficheros, dependiendo de sus necesidades. La manera en que edite dichos archivos, depende en la forma que utilice su PSI (Proveedor de Servicios de Internet) para brindarle conexión, ya sea por medio de una dirección IP estática o bien una IP dinámica (ya sea que cada vez que se conecta obtiene una nueva dirección).
[[userppp-staticIP]]
===== PPP y direcciones de IP estáticas (fijas)
Será necesario editar el archivo de configuración; [.filename]#/etc/ppp/ppp.conf#. Y deberá quedar de una manera similar al ejemplo que se describe a continuación.
[NOTE]
====
Las líneas que terminan con `:`, deben comenzar en la primer columna del archivo - el resto de las líneas deberán utilizar sangría como se muestra, utilizando espacios o bien el tabulador. La mayor parte de la información que requiere ingresar aqui, se mostro en el marcado manual anterior.
====
[.programlisting]
....
1 default:
2 set log Phase Chat LCP IPCP CCP tun command
3 ident user-ppp VERSION (built COMPILATIONDATE)
4 set device /dev/cuaa0
5 set speed 115200
6 set dial "ABORT BUSY ABORT NO\\sCARRIER TIMEOUT 5 \
7 \"\" AT OK-AT-OK ATE1Q0 OK \\dATDT\\T TIMEOUT 40 CONNECT"
8 set timeout 180
9 enable dns
10
11 provider:
12 set phone "(123) 456 7890"
13 set authname foo
14 set authkey bar
15 set login "TIMEOUT 10 \"\" \"\" gin:--gin: \\U word: \\P col: ppp"
16 set timeout 300
17 set ifaddr x.x.x.x y.y.y.y 255.255.255.255 0.0.0.0
18 add default HISADDR
....
Línea 1:::
Identifica la entrada por omisión a utilizar. Los comandos descritos en esta parte, serán ejecutados de manera automática cuando se ejecute ppp.
Línea 2:::
Habilita los parámetros de acceso. Cuando la configuración trabaja sin problemas, esta línea deberá quedar de la siguiente forma:
+
[.programlisting]
....
set log phase tun
....
para efecto de evitar avisos masivos del sistema (logs).
Línea 3:::
Esta línea le indica a PPP como identificarse ante el puerto. PPP se identifica, si tiene algun problema para efecto de establecer la conexión, en esta identificación, PPP provee de cierta información que puede resultar util para detectar el probelma.
Línea 4:::
Le indica a PPP cual es el dispositivo a utilizar para realizar la conexión, o bien al que esta conectado el módem. El dispositivo [.filename]#COM1# es [.filename]#/dev/cuaa0# y [.filename]#COM2# es [.filename]#/dev/cuaa1#.
Línea 5:::
Establece la velocidad a utilizar en la conexión. Si la velocidad de 115200 no trabaja correctamente (la cual deberia con cualquier módem normal), intente con una velocidad inferior, como puede ser 38400.
Líneas 6 y 7:::
La cadena de inicialización. El modo User PPP, utiliza y espera enviar-recibir, la información utilizando una sintaxis similar a la descrita en el programa man:chat[8]. Favor de consultar la página de ayuda para conocer las opciones de este lenguaje.
+
Nota: Este comando continua en la siguiente línea, para facilitar su lectura. Cualquier comando en el archivo [.filename]#ppp.conf# puede utilizar este formato, siempre y cuando el último caracter de la línea sea una diagonal invertida "\".
Línea 8:::
Establece el tiempo de espera que debe tratar de realizar la conexión. Por omisión se establecen 180 segundos, por lo que esta línea se deja por pura estética.
Línea 9:::
Esta línea le indica a PPP, que solicite confirmación al puerto, sobre la configuración de la resolución local. Si usted esta corriendo un servidor local de nombres, deberá comentar o eliminar esta línea.
Línea 10:::
Una línea en blanco, para facilitar la lectura. Las líneas en blanco son ignoradas por PPP.
Línea 11:::
Identifica el incio de datos para un "proveedor" determinado, de servicios de internet. Este podrá ser cambiado por el nombre de su ISP, de tal forma que en lo sucesivo utilice la opción `load ISP`, para iniciar una sesión.
Línea 12:::
Indica el numero telefónico del proveedor. Pueden indicarse varios numeros a utilizar, utilizando el signo de dos puntos (`:`) o bien la barra (`|`) como separador. La diferencia entre estos dos separadores, es detallada en el man:ppp[8]. Pero en resumen, se puede decir que si se desean utilizar varios numeros de manera aleatoria se debe utilizar los dos puntos, pero si se desea siempre utilizar el primer numero y en caso de falla el siguiente y así sucesivamente, se debe utilizar la barra. Es importante que todo lo que se refiere a numeros telefonicos, este entre comillas como se muestra. Es importante que si piensa usar espacios en los numeros, haga uso de estas comillas (`"`). La falta de estas pueden ocasionar un simple error.
Líneas 13 y 14:::
Identifica el nombre de usuario y su contraseña. Cuando uno se conecta utilizando un login de tipo Unix, estos valores hacen referencia al comando `set login`, utilizando las variables \U y \P. Cuando la conexión es utilizando algún metodo como PAP o CHAP, estos valores, son utilizados al momento de la autentificación.
Línea 15:::
Si usted esta utilizando el metodo PAP o CHAP, no habrá un login en este punto, y esta línea deberá ser comentada (utilizando el símbolo `#` al principio de la línea) o bien eliminada por completo. Vea la parte <<userppp-PAPnCHAP,Autentificación con PAP y CHAP >> para más detalles.
+
La cadena de acceso (login), utiliza la misma sintáxis que se utiliza en la cadena de marcado. En este ejemplo, la cadena sirve para un servicio, en el cual el inicio de sesión se ve algo así como lo siguiente:
+
[source,shell]
....
Proveedor de servicios X
login: foo
password: bar
protocol: ppp
....
+
Es recomendable editar el script, para que se ajuste a sus propias necesidades. Cuando cree este script por primera vez, asegurese de haber habilitado la parte que se refiere a al acceso por medio de "chat", para efecto de poder dar seguimiento al curso de la conexión y la resolución de la misma.
Línea 16:::
Establece el tiempo por defecto en el que se perderá la conexión (en segundos). En este caso la conexión será cortada de forma automática, después de 300 segundos de inactividad. Si no desea habilitar esta función establezca este valor en cero o bien utilice el comando en línea `-ddial`.
Línea 17:::
Indica la dirección de la interfaz. La cadena que aparece como _x.x.x.x._, debe se cambiada por la dirección asignada por su PSI. La línea que aparece como _y.y.y.y._, debe se substituida por la direcció IP especificada por su PSI, como servidor de salida o pasarela (gateway)(la maquina a la cual se va a conectar). Si su PSI no le ha indicado una dirección de este tipo, puede utilizar `10.0.0.2/0`. Si usted necesita utilizar una dirección "aleatoria", asegurese de crear el fichero [.filename]#/etc/ppp/ppp.linkup#, siguiendo las instrucciones de <<userppp-dynamicIP,PPP y las direcciones de IP Dinámicas>>, para su llenado. Si esta línea es omitida, `ppp`, no podrá ejecutarse en el modo `-auto`.
Línea 18:::
Añade una ruta por omisión al servidor de salida de su PSI. La palabra especial `HISADDR` se reemplaza con la dirección del gateway indicado por su PSI, que esta en la línea 9, de otra forma `HISADDR` no será inicializado.
+
Si no desea ejecutar `ppp` en modo `-auto`, esta línea deberá pasar al archivo [.filename]#ppp.linkup#.
No hay necesidad de editar el archivo [.filename]#ppp.linkup# si usted cuenta con una dirección IP estática y se esta ejecutando ppp en modo `-auto`, en virtud de que para efecto de realizar la conexión sus mapas de ruteo debe estar correctas. De cualquier forma puede ser que usted desee ejecutar algun programa/comando, posterior a la conexión. Este es explicado con más detalle posteriormente, cuando se vea el ejemplo de sendmail.
Ejemplo de los archivos de configuración, se pueden encontrar en el directorio; [.filename]#/usr/shared/examples/ppp#.
[[userppp-dynamicIP]]
===== PPP y direcciones de IP Dinámicas (Variables)
Si su proveedor de servicios, no le asigna una dirección de IP fija, será necesario configurar a `ppp`, de tal forma que al momento de realizar la conexión, negocie tanto la dirección local, como la remota. Esto se lleva a cabo al "adivinar" una dirección IP y permitiendo a `ppp` que la establezca correctamente, usando el Protocolo de Configuración de IP (IPCP), una vez que se ha conectado. La configuración que debe tener el archivo [.filename]#ppp.conf#, es la misma que la utilizada en <<userppp-staticIP,PPP y direcciones de IP fijas>>, salvo el siguiente cambio:
[.programlisting]
....
17 set ifaddr 10.0.0.1/0 10.0.0.2/0 255.255.255.255
....
Una vez más, no debe incluir el numero de línea, este sólo es una referencia. Así mismo deberá existir sangrado, de cuando menos 1 espacio.
Línea 17:::
El numero siguiente a la diagonal (`/`), es el numero de bits de la dirección en la cual ppp insistirá en conectarse. Puede ser que usted desee utilizar numeros de IP que sean más apropiados, para ajustar a sus necesiadades, pero el ejemplo descrito anteriormente siempre podrá utilizarse.
+
El último argumento (`0.0.0.0`), le indica a PPP, que inicie las negociaciones, utilizando como dirección `0.0.0.0`, en lugar de que utilice `10.0.0.1`, lo cual es necesario con algunos proveedores. No utilice la dirección `0.0.0.0` como el primer argumento, para el comando `set ifaddr`, ya que impide que PPP configure de forma correcta el sistema, cuando se utiliza en modo `-auto`.
Si usted no esta ejecutando PPP en modo `-auto`, deberá editar su archivo [.filename]#/etc/ppp/ppp.linkup#. El archivo [.filename]#ppp.linkup#, es utilizado una vez que se ha realizado la conexión. En este punto, `ppp` habrá negociado una dirección de interfaz, y será posible ahora, añadir las entradas para la las tablas de ruteo:
[.programlisting]
....
1 provider:
2 add default HISADDR
....
Línea 1:::
Al establecer (`ppp`) una conexión, buscará en [.filename]#ppp.linkup# una entrada, de acuerdo a las siguientes reglas. Primero, tratar de encontrar una entrada que sea igual a la utilizada en el archivo [.filename]#ppp.conf#. Si esto falla, buscar una IP con la dirección de nuestro gateway. Esta entrada es una etiqueta de tipo IP, de cuatro-octetos. Si aun después de esto no se ha detectado la entrada correcta, buscar la entrada `MYADDR`.
Línea 2:::
Esta línea le indica a `ppp` que añada una ruta por omisión, que este dirigida hacia `HISADDR`. `HISADDR` será reemplazada, con la IP del gateway, como se negocio por IPCP.
Para ver un detalle más preciso de esto, puede consultar la entrada de `pmdemand` en los archivos de ejemplo [.filename]#/usr/shared/examples/ppp/ppp.conf.sample# así como [.filename]#/usr/shared/examples/ppp/ppp.linkup.sample#.
===== Recibiendo Llamadas Externas
Cuando se configure ppp, para recibir llamadas externas, en una maquina conectada a un LAN (Red de Area Local), debe decidir si se va a permitir el envío de paquetes a la LAN. Si es así, debe asignar un numero de IP de su red local y utilizar el comando `enable proxy` en el archivo de configuracion [.filename]#/etc/ppp/ppp.conf#. También deberá asegurarse que en su archivo [.filename]#/etc/rc.conf# cuente con la línea:
[.programlisting]
....
gateway_enable="YES"
....
====== ?Qué getty utilizar?
El enlace Configurando FreeBSD para Servicios de Marcado provee de una buena descripció, sobre la configuración de estos servicios, basado en man:getty[8].
Una alternativa para el comando `getty` es http://www.leo.org/~doering/mgetty/index.html[mgetty ], el cual es una versión más inteligente de getty diseñada para servicios de marcado telefonico.
Una de las principales ventajas de `mgetty` es que, de hecho _platica_ con los modems, esto es, significativo, ya que si el puerto esta desactivado en su [.filename]#/etc/ttys# el modem no responderá el llamado.
Las últimas versiones de `mgetty` (de la 0.99beta y sucesivas), también cuentan con soporte para la detección automática de llamados de PPP, permitiendo el acceso a servidores de una manera más sencilla (sin uso de tanto scripts).
Puede referirse a <<userppp-mgetty,Mgetty y AutoPPP>> para más información con respecto al comando `mgetty`.
====== Permisos de PPP
El comando `ppp` normalmente debe ser ejecutado por root (superusuario). Si de cualquier forma, usted desea permitir que `ppp` pueda ser ejecutado en modo servidor, por un usuario regular, como se describe a continuación, deberá otorgar los permisos necesarios a ese usuario al añadirlo al grupo `network`, en el fichero [.filename]#/etc/groups#.
También será necesario darle acceso a una o más partes del archivo de configuración, haciendo uso del comando `allow`, como se ve a continuación:
[.programlisting]
....
allow users fred mary
....
Si el comando es utilizado en la sección `default`, esto le dará a el(los) usuario(s) especificado(s), acceso a todo.
====== Shells de PPP para Usuarios de IP Dinámica
Cree un fichero llamado: [.filename]#/etc/ppp/ppp-shell# y que contenga lo siguiente:
[.programlisting]
....
#!/bin/sh
IDENT=`echo $0 | sed -e 's/^.*-\(.*\)$/\1/'`
CALLEDAS="$IDENT"
TTY=`tty`
if [ x$IDENT = xdialup ]; then
IDENT=`basename $TTY`
fi
echo "PPP for $CALLEDAS on $TTY"
echo "Starting PPP for $IDENT"
exec /usr/sbin/ppp -direct $IDENT
....
Este script deberá ser ejecutable. Ahora cree un enláce simbólico llamado [.filename]#ppp-dialup# a este script, utilizando los siguientes comandos:
[source,shell]
....
# ln -s ppp-shell /etc/ppp/ppp-dialup
....
Deberá utilizar este script como _shell_ para todos los usuarios que realicen conexión. Este es un ejemplo del fichero [.filename]#/etc/password# para un usuario con acceso a PPP, con nombre de usuario `pchilds` (recuerde no editar directamente el fichero password, utilice `vipw`).
[.programlisting]
....
pchilds:*:1011:300:Peter Childs PPP:/home/ppp:/etc/ppp/ppp-dialup
....
Cree un directorio llamado [.filename]#/home/ppp# que contenga los siguentes archivos de 0 bytes:
[source,shell]
....
-r--r--r-- 1 root wheel 0 May 27 02:23 .hushlogin
-r--r--r-- 1 root wheel 0 May 27 02:22 .rhosts
....
los cuales impiden que [.filename]#/etc/motd# sea desplegado.
====== Shells de PPP para Usuarios de IP Estática
Cree el fichero [.filename]#ppp-shell# al igual que el mencionado con anterioridad, y por cada cuenta donde se tenga asignada una IP estática, cree un enlace simbólico al fichero [.filename]#ppp-shell#.
Por ejemplo, si usted cuenta con tres usuarios que utilicen este servicio; `fred`, `sam` y `mary`, los cuales redirecciona a una red de clase C, habria que hacer lo siguiente:
[source,shell]
....
# ln -s /etc/ppp/ppp-shell /etc/ppp/ppp-fred
# ln -s /etc/ppp/ppp-shell /etc/ppp/ppp-sam
# ln -s /etc/ppp/ppp-shell /etc/ppp/ppp-mary
....
Cada uno de los usuarios señalados, deberán de contar con el enlace a su shell-script como se indicó (por ejemplo, el usuario `mary`, debe contar con su enlace al fichero [.filename]#/etc/ppp/ppp-mary#).
====== Configurando [.filename]#ppp.conf# para Usuarios de IP-Dinámica
El archivo [.filename]#/etc/ppp/ppp.conf# deberá contener algo similar a lo siguiente:
[.programlisting]
....
default:
set debug phase lcp chat
set timeout 0
ttyd0:
set ifaddr 203.14.100.1 203.14.100.20 255.255.255.255
enable proxy
ttyd1:
set ifaddr 203.14.100.1 203.14.100.21 255.255.255.255
enable proxy
....
[NOTE]
====
Tomar en cuenta el sangrado, ya que es importante.
====
La sección `default:` es cargada para cada sesión. Para cada línea que exista y habilite el marcado, en el fichero [.filename]#/etc/ttys#, se deberá crear una entrada similar a la línea `ttyd0:` mencionada arriba. Cada línea debera contar con su propia dirección IP, de sus direcciones IP disponibles para asignar dinámicamente.
====== Configurando [.filename]#ppp.conf# para Usuarios de IP Estática
Junto con el contenido del fichero de ejemplo [.filename]#/usr/shared/examples/ppp/ppp.conf# mencionado anteriormente, deberá agregar una sección para cada usuario asignado estáticamente. Continuaremos con nuestro ejemplo con los usuarios `fred`, `sam` y `mary`.
[.programlisting]
....
fred:
set ifaddr 203.14.100.1 203.14.101.1 255.255.255.255
sam:
set ifaddr 203.14.100.1 203.14.102.1 255.255.255.255
mary:
set ifaddr 203.14.100.1 203.14.103.1 255.255.255.255
....
El archivo [.filename]#/etc/ppp/ppp.linkup# deberá de contener también información del ruteo, para cada IP estática, si es necesario. Las líneas a continuación añadirán una ruta a la dirección `203.14.101.0` de clase C, por medio del ppp link del cliente.
[.programlisting]
....
fred:
add 203.14.101.0 netmask 255.255.255.0 HISADDR
sam:
add 203.14.102.0 netmask 255.255.255.0 HISADDR
mary:
add 203.14.103.0 netmask 255.255.255.0 HISADDR
....
===== Algo más de `mgetty`, AutoPPP, y Extensiones MS
[[userppp-mgetty]]
====== `mgetty` y AutoPPP
Configurando y compilando `mgetty` con la opción `AUTO_PPP` habilitada, permite a `mgetty` detectar la fase LCP de conexiones PPP y automáticamente enviarlo a un shel de ppp. Aun con esto, y debido a que no se ingresa el nombre de usuario y contraseña, es necesario autntificarse por medio de PAP o CHAP.
Esta sección asume que el usuaio ha configurado, compilado e instalado correctamente una versión de `mgetty`, con la opción `Auto_PPP` (v0.99beta o posterior).
Asegurese de que su fichero [.filename]#/usr/local/etc/mgetty+sendfax/login.conf# contiene la siguiente línea en él:
[.programlisting]
....
/AutoPPP/ - - /etc/ppp/ppp-pap-dialup
....
Esto le indicará a `mgetty` que ejecute el script [.filename]#ppp-pap-dialup#, para efecto de detectar conexiones de tipo PPP.
Cree un fichero llamado [.filename]#/etc/ppp/ppp-pap-dialup# que contenga las siguientes líneas (el fichero deberá ser ejecutable):
[.programlisting]
....
#!/bin/sh
exec /usr/sbin/ppp -direct pap$IDENT
....
Para cada línea de marcado habilitada en [.filename]#/etc/ttys#, cree la entrada correspondiente en [.filename]#/etc/ppp/ppp.conf#. Esto co-existirá pacíficamente con las definiciaones que se hayan hecho, de acuerdo a lo mostrado en la parte de arriba.
[.programlisting]
....
pap:
enable pap
set ifaddr 203.14.100.1 203.14.100.20-203.14.100.40
enable proxy
....
Cada usuario que ingrese al sistema utilizando este metodo, deberá de contar con su clave de usuario, así como su contraseña, en el archivo [.filename]#/etc/ppp/ppp.secret#, o bien agrege la siguiente opción, para efecto de que se pueda realizar la autentificación por medio de PAP, directamente del fichero [.filename]#/etc/password#.
[.programlisting]
....
enable passwdauth
....
Si desea asignar una dirección IP fija a algunos usuarios, puede especificar el número como un tercer argumento en el fichero [.filename]#/etc/ppp/ppp.secrets#. Vea el archivo [.filename]#/usr/shared/examples/ppp/ppp.secret.sample# para obtener ejemplos más detallados de esto.
====== Extensiones de MS
Es posible configurar PPP, para efecto de que brinde a DNS y a NetBIOS, direcciones de servidores de nombres de forma automática.
Para efecto de habilitar estas extensiones con PPP versión 1.x, las siguientes líneas deberán añadirse a la sección relevante de [.filename]#/etc/ppp/ppp.conf#.
[.programlisting]
....
enable msext
set ns 203.14.100.1 203.14.100.2
set nbns 203.14.100.5
....
Y para versiones de PPP 2 y posteriores:
[.programlisting]
....
accept dns
set dns 203.14.100.1 203.14.100.2
set nbns 203.14.100.5
....
Esto le indicará a los clientes, las direcciones del servidor primario y secundario y el servidor-host para NetBIOS.
Si la línea `set dns`, es omitida en versiones 2 y posteriores, PPP utilizará los valores que encuentre en [.filename]#/etc/resolv.conf#.
[[userppp-PAPnCHAP]]
===== Autentificación por medio de PAP y CHAP
Algunos proveedores de internet tienen su sistema configurado para que cada usuario al conecatrse sean autentificados por medio de PAP o CHAP. Si este es el caso, al momento de realizar la conexion, no aparecerá un `login:`, sino que comenzará a comunicarse PPP inmediatamente.
El metodo PAP es menos seguro que CHAP, pero la seguridad normalmente no se toma mucho en cuenta en este tipo de conexiones, en funcion de que al enviarse la información de contraseña en texto plano, por medio de una línea serial, no deja mucho espacio para que los crackers "husmeen".
Haciendo referencia a lo que vimos de <<userppp-staticIP,PPP y Direcciones de IP Fijas>> o bien <<userppp-dynamicIP,PPP y Direcciones de IP Dinámicas>>, habría que aplicar los siguientes cambios:
[.programlisting]
....
7 set login
...
12 set authname MiNombreDeUsuario
13 set authkey MiContraseña
....
Línea 7:::
Su PSI normalmente requerirá que usted ingrese al sistema, cuando se utiliza PAP o CHAP. Por esta razón debemos deshabilitar la línea que corresponde a "set login".
Línea 12:::
Esta línea especifíca a PAP/CHAP su nombre de usuario. Usted deberá cambiar el valor a quedar el nombre correcto en el campo; _MiNombreDeUsuario_.
Línea 13:::
Esta línea especifíca su contraseña de PAP/CHAP. Es necesario que usted cambie el valor a quedar el dato correcto, en el campo; _MiContraseña_. Quizás sea recomendable que añada una línea a quedar:
+
[.programlisting]
....
15 accept PAP
....
+
o
+
[.programlisting]
....
15 accept CHAP
....
+
la intención de esto es para hacerlo obvio, aunque en realidad PAP y CHAP son aceptadas por omisión.
===== Cambiando la configuración de `ppp` sobre la marcha (al vuelo)
Es posible hablar con el programa `ppp` mientras se esta ejecutando en segundo plano, pero sólo si se ha habilitado un puerto de diagnóstico. Para hacer esto, añada lo siguiente a su configuración:
[.programlisting]
....
set server /var/run/ppp-tun%d DiagnosticPassword 0177
....
Esto le indicará a PPP que preste atención al socket del dominio-Unix, solicitando a los usuarios su contraseña, antes de permitir el acceso. La variable `%d` deberá ser reemplazada por el numero de dispositivo [.filename]#tun# que este utilizando (ej. [.filename]#tun0#).
Una vez que se a configurado el socket, se puede utilizar man:pppctl[8] en scripts que deseen manipular el programa.
[[userppp-final]]
==== Configuración Final del Sistema
Ahora usted cuenta con un `ppp` configurado, pero es necesario hacer algunas cosas, antes de que este disponible para trabajar. Todas ellas giran entorno a la edición del fichero [.filename]#/etc/rc.conf#.
En primer lugar es importante que se asegure que ha asignado un nombre a su maquina. Esto se hace asignandolo en la línea de `hostname=`, por ejemplo:
[.programlisting]
....
hostname="foo.ejemplo.com"
....
Si su Proveedor de Servicios de Internet (PSI), le ha provisto de una dirección fija y un nombre de host, es recomendable que utilice este como su `hostname`.
Localice la línea que se refiera a sus dispositivos de red, la cual es `network_interfaces`. Si desea configurar su sistema para marcar a su PSI a petición, asegurese de que el dispositivo [.filename]#tun0# este en la lista, de otra forma eliminelo.
[.programlisting]
....
network_interfaces="lo0 tun0" ifconfig_tun0=
....
[NOTE]
====
La variable `ifconfig_tun0` debe permanecer en blanco (vacia), y deberá crearse un fichero llamado [.filename]#/etc/start_if.tun0# que contenga la siguiente línea:
[.programlisting]
....
ppp -auto MiSistema
....
Este script se ejecuta cuando se esta configurando la red, inicializando el demonio de ppp de modo automático. Si usted cuenta con una LAN (red de área local), de la cual esta maquina sea la pasarela (gateway), es tambien recomendable que utilice la opción `-alias`. Referirse a la página de ayuda (man) para mayores detalles.
====
Especifique el programa router a `NO`, con la siguiente línea en su fichero [.filename]#/etc/rc.conf#:
[.programlisting]
....
router_enable="NO"
....
Es importante que el demonio `routed` no se inicialice por default, en virtud de que `routed` tiende a eliminar las variables creadas por `ppp`.
Probablemente valga la pena asegurarse de que la línea `sendmail_flags`, no incluya la opción `-q`, ya que de ser así `sendmail` intentará localizar los parámetros de la red de vex en cuando, ocasionando que realice llamados al exterior. Puede intentar esto:
[.programlisting]
....
sendmail_flags="-bd"
....
La parte negativa de esta conifguración es que tiene que forzar a `sendmail` a re-examinar los llamados del servidor de correo, cada vez que `ppp` realiza una conexión, con el siguiente comando:
[source,shell]
....
# /usr/sbin/sendmail -q
....
Puede utilizar el comando `!bg` en el fichero [.filename]#ppp.linkup# para hacer esto de manera automática:
[.programlisting]
....
1 provider:
2 delete ALL
3 add 0 0 HISADDR
4 !bg sendmail -bd -q30m
....
Si usted no desea hacer esto, es posible establecer un "dfilter" (filtro), para bloquear el tráfico al servidor de salida de correo (SMTP). Favor de referirse a los archivos de ejemplos para mayor detalle al respecto.
Ahora lo único que queda pendiente de hacerse es reiniciar el equipo. Una vez reiniciado el equipo, puede teclear:
[source,shell]
....
# ppp
....
y posteriormente `dial proveedor` para iniciar la sesión, o bien si desea que `ppp` inicie la sesión automáticamente, cuando haya una petición de salida (y no haya creado el fichero [.filename]#start_if.tun0#), puede teclear:
[source,shell]
....
# ppp -auto proveedor
....
==== Summario
A manera de recapitulación, podemos decir que para configurar ppp por primera ocasión, debemos:
Por parte del Cliente:
[.procedure]
====
. Asegurese de que existe el dispositivo [.filename]#tun# dentro de su kernel.
. Asegurses de que el dispositivo [.filename]#tunX#, se encuentra disponible, bajo el directorio [.filename]#/dev#.
. Cree una entrada en su fichero [.filename]#/etc/ppp/ppp.conf#. Con el fichero de ejemplo [.filename]#pmdemand# debe ser suficiente para la mayoria de proveedores.
. Si cuenta con una dirección de IP dinámica, cree una entrada en el fichero [.filename]#/etc/ppp/ppp.linkup# .
. Actualice su fichero [.filename]#/etc/rc.conf#.
. Cree un archivo script llamado [.filename]#start_if.tun0# si requiere servicio de conexión a solicitud.
====
Por parte del Servidor:
[.procedure]
====
. Asegurese de que dentro de su kernel exista el dispositivo [.filename]#tun#.
. Asegurses de que el dispositivo [.filename]#tunX#, se encuentra disponible, bajo el directorio [.filename]#/dev#.
. Cree una entrada en el fichero [.filename]#/etc/passwd# (usando el programa man:vipw[8]).
. Cree un perfil en el directorio home de este usuario, que ejecute `ppp -direct direct-server` o algo similar.
. Cree una entrada en el fichero [.filename]#/etc/ppp/ppp.conf#. El fichero de ejemplo [.filename]#direct-server# debe ser suficiente para darse una idea.
. Cree una entrada en el fichero [.filename]#/etc/ppp/ppp.linkup#.
. Actualice su fichero [.filename]#/etc/rc.conf#.
====
[[ppp]]
== Uso de Kernel PPP
=== Configurando Kernel PPP
Antes de comenzar a configurar PPP en su maquina, asegurese de `pppd` se localiza en [.filename]#/usr/sbin# y de que existe el directorio [.filename]#/etc/ppp#.
`pppd` puede trabajar de dos maneras
. Como un "cliente" - cuando desea conectar su maquina al mundo exterior utilizando PPP, por medio de una conexión serial o bien una línea de modem.
.
+
como un "servidor" - cuando su maquina esta conectada a una red y es utilizada para que otras maquinas se conecten utilizando ppp.
En ambos casos, será necesario configurar un fichero de opciones ([.filename]#/etc/ppp/options# o bien [.filename]#~/.ppprc# si se cuenta con más de un usuario que utilizará ppp en la misma maquina.
También deberá de contar con un software para hacer la conexión por medio de módem (de preferencia kermit), de manera que pueda hacer la conexión con un host remoto.
=== Uso de `pppd` como Cliente
El siguiente archivo de configuración [.filename]#/etc/ppp/options# puede utilizarse para realizar la conexión a una terminal CISCO, por medio de PPP.
[.programlisting]
....
crtscts # habilita el flujo de controls de hardware
modem # línea de control del modem
noipdefault # el servidor PPP remoto asignará la dirección IP
# si el servidor no envia una dirección IP durante IPCP
# remueva esta opción.
passive # espere por los paquetes LCP
domain ppp.foo.com # escriba su nombre de dominio aqui
:<remote_ip> # escriba la IP del host remoto aqui
# este será utilizado para el ruteo de paquetes por medio
# de PPP, si no especifica esta opción, cambie la
# línea a quedar <local_ip>:<remote_ip>
defaultroute # establezca esta opción si el servidor su ruteador
# por default
....
Para conectarse:
[.procedure]
====
. Realice el llamado al host remoto, utilizando kermit (o cualquier otra aplicación de este tipo), ingrese su nombre de usuario y contraseña (o cualquier info que sea necesaria para habilitar PPP en el host remoto).
. Salga de kermit (sin colgar la línea).
. Ingrese lo siguiente:
+
[source,shell]
....
# /usr/src/usr.sbin/pppd.new/pppd /dev/tty01 19200
....
+
Asegurese de utilizar el dispositivo y la velocidad adecuados.
====
Ahora su computadora esta conectada por medio de PPP. Si la conexión falla, puede añadir la opción `debug` en el fichero [.filename]#/etc/ppp/options# de tal forma que pueda verificar la que esta ocurriendo y pueda resolver el problema.
El siguiente script; [.filename]#/etc/ppp/pppup# realizará los 3 pasos de forma automática:
[.programlisting]
....
#!/bin/sh
ps ax |grep pppd |grep -v grep
pid=`ps ax |grep pppd |grep -v grep|awk '{print $1;}'`
if [ "X${pid}" != "X" ] ; then
echo 'killing pppd, PID=' ${pid}
kill ${pid}
fi
ps ax |grep kermit |grep -v grep
pid=`ps ax |grep kermit |grep -v grep|awk '{print $1;}'`
if [ "X${pid}" != "X" ] ; then
echo 'killing kermit, PID=' ${pid}
kill -9 ${pid}
fi
ifconfig ppp0 down
ifconfig ppp0 delete
kermit -y /etc/ppp/kermit.dial
pppd /dev/tty01 19200
....
El fichero [.filename]#/etc/ppp/kermit.dial# es un script de kermit, uqe realiza el marcado y negocia la autorización necesaria con el host remoto (un ejemplo de este script se encuentra al final de este documento).
Utilice el siguiente script, llamado [.filename]#/etc/ppp/pppdown# para desconectar la línea PPP:
[.programlisting]
....
#!/bin/sh
pid=`ps ax |grep pppd |grep -v grep|awk '{print $1;}'`
if [ X${pid} != "X" ] ; then
echo 'killing pppd, PID=' ${pid}
kill -TERM ${pid}
fi
ps ax |grep kermit |grep -v grep
pid=`ps ax |grep kermit |grep -v grep|awk '{print $1;}'`
if [ "X${pid}" != "X" ] ; then
echo 'killing kermit, PID=' ${pid}
kill -9 ${pid}
fi
/sbin/ifconfig ppp0 down
/sbin/ifconfig ppp0 delete
kermit -y /etc/ppp/kermit.hup
/etc/ppp/ppptest
....
Verifique que su PPP aun se esta ejecutando, por medio de [.filename]#/usr/etc/ppp/ppptest#, que deberá verse algo similar a esto:
[.programlisting]
....
#!/bin/sh
pid=`ps ax| grep pppd |grep -v grep|awk '{print $1;}'`
if [ X${pid} != "X" ] ; then
echo 'pppd running: PID=' ${pid-NONE}
else
echo 'No pppd running.'
fi
set -x
netstat -n -I ppp0
ifconfig ppp0
....
Para colgar el módem, ejecute [.filename]#/etc/ppp/kermit.hup#, que deberá contener:
[.programlisting]
....
set line /dev/tty01 ; aqui va el dispositivo del modem
set speed 19200
set file type binary
set file names literal
set win 8
set rec pack 1024
set send pack 1024
set block 3
set term bytesize 8
set command bytesize 8
set flow none
pau 1
out +++
inp 5 OK
out ATH0\13
echo \13
exit
....
He aqui un metodo alterno, donde se utiliza `chat` en lugar de utilizar `kermit`.
Los siguientes dos ficheros deben ser suficiente, para realizar una conexión por medio de `pppd`.
[.programlisting]
....
/dev/cuaa1 115200
crtscts # habilita el control de flujo por medio de hardware
modem # línea de control del módem
connect "/usr/bin/chat -f /etc/ppp/login.chat.script"
noipdefault # el servidor remoto debe asignar la dirección IP.
# si el servidor no asigna una IP durante la negociación
# IPCP , remueva esta línea y espere por los
passive # paquetes LCP
domain <your.domain> # aqui va su dominio
: # escriba la IP del host remoto aqui
# si no ha especificado la opción noipdefault
# cambie esta línea a quedar <local_ip>:<remote_ip>
defaultroute # escriba esto, si desea que el servidor PPP sea su
# router por default
....
[.filename]#/etc/ppp/login.chat.script#:
[NOTE]
====
Lo siguiente debe ir en una sola línea.
====
[.programlisting]
....
ABORT BUSY ABORT 'NO CARRIER' "" AT OK ATDT<numero.de.telefono>
CONNECT "" TIMEOUT 10 ogin:-\\r-ogin: <nombre.usuario>
TIMEOUT 5 sword: <contraseña>
....
Una vez que estos ficheros han sido modificados correctamente e instalados, todo lo que necesita es ejecutar el comando `pppd`, algo como:
[source,shell]
....
# pppd
....
=== Uso de `pppd` como Servidor
El fichero [.filename]#/etc/ppp/options# debe contener algo similar a lo siguiente:
[.programlisting]
....
crtscts # control de flujo por Hardware
netmask 255.255.255.0 # mascara de red (no es requisito)
192.114.208.20:192.114.208.165 # direcciones ip del host local y remoto
# la dirección ip local debe ser
# diferente a la que le haya asignado a su
# dispositivo de red ethernet (u otro)
# la dirección ip remota que será
# asignada a la maquina remota
domain ppp.foo.com # su dominio
passive # espera por LCP
modem # línea de modem
....
El siguiengte script, llamado [.filename]#/etc/ppp/pppserv# habilitará pppd, para que actue como servidor:
[.programlisting]
....
#!/bin/sh
ps ax |grep pppd |grep -v grep
pid=`ps ax |grep pppd |grep -v grep|awk '{print $1;}'`
if [ "X${pid}" != "X" ] ; then
echo 'killing pppd, PID=' ${pid}
kill ${pid}
fi
ps ax |grep kermit |grep -v grep
pid=`ps ax |grep kermit |grep -v grep|awk '{print $1;}'`
if [ "X${pid}" != "X" ] ; then
echo 'killing kermit, PID=' ${pid}
kill -9 ${pid}
fi
# reset ppp interface
ifconfig ppp0 down
ifconfig ppp0 delete
# enable autoanswer mode
kermit -y /etc/ppp/kermit.ans
# run ppp
pppd /dev/tty01 19200
....
Utilice el script [.filename]#/etc/ppp/pppservdown# para detener el servidor:
[.programlisting]
....
#!/bin/sh
ps ax |grep pppd |grep -v grep
pid=`ps ax |grep pppd |grep -v grep|awk '{print $1;}'`
if [ "X${pid}" != "X" ] ; then
echo 'killing pppd, PID=' ${pid}
kill ${pid}
fi
ps ax |grep kermit |grep -v grep
pid=`ps ax |grep kermit |grep -v grep|awk '{print $1;}'`
if [ "X${pid}" != "X" ] ; then
echo 'killing kermit, PID=' ${pid}
kill -9 ${pid}
fi
ifconfig ppp0 down
ifconfig ppp0 delete
kermit -y /etc/ppp/kermit.noans
....
El siguiente script de kermit ([.filename]#/etc/ppp/kermit.ans#) habilita/deshabilita el modo de autorespuesta en su módem. Y debe verse algo similar a lo siguiente:
[.programlisting]
....
set line /dev/tty01
set speed 19200
set file type binary
set file names literal
set win 8
set rec pack 1024
set send pack 1024
set block 3
set term bytesize 8
set command bytesize 8
set flow none
pau 1
out +++
inp 5 OK
out ATH0\13
inp 5 OK
echo \13
out ATS0=1\13 ; cambiar esto a quedar out ATS0=0\13 si desea deshabilitar el modo
; de autorespuesta
inp 5 OK
echo \13
exit
....
Un script llamado [.filename]#/etc/ppp/kermit.dial# es utilizado para llamar y autentificarse en un host remoto. Es necesario que edite este fichero, de acuerdo a sus necesidades. Escriba su nombre de usuario (login) y contraseña (password) en este fichero, también será necesario cambiar su metodo de conexión, de acuerdo a lo que se ajuste a sus necesidades.
[.programlisting]
....
;
; ingrese el dispositivo que esta apuntando a su módem:
;
set line /dev/tty01
;
; escriba la velocidad del módem:
;
set speed 19200
set file type binary ; full 8 bit file xfer
set file names literal
set win 8
set rec pack 1024
set send pack 1024
set block 3
set term bytesize 8
set command bytesize 8
set flow none
set modem hayes
set dial hangup off
set carrier auto ; Posteriormente SET CARRIER si es necesario
set dial display on ; despues SET DIAL si es necesario
set input echo on
set input timeout proceed
set input case ignore
def \%x 0
goto slhup
:slcmd ; cambio a modo de comando
echo Put the modem in command mode.
clear ; Limpieza del buffer de entrada
pause 1
output +++
input 1 OK\13\10 ; esperar para OK
if success goto slhup
output \13
pause 1
output at\13
input 1 OK\13\10
if fail goto slcmd ; si el modem no responde Ok, intentar de nuevo
:slhup ; colgar el teléfono
clear ; Limpieza del buffer de entrada
pause 1
echo Hanging up the phone.
output ath0\13
input 2 OK\13\10
if fail goto slcmd ; si no hay un OK como respuesta, poner el modem en modo de comando
:sldial ; marcar el numero telefonico
pause 1
echo Dialing.
output atdt9,550311\13\10 ; escriba el numero de telefono
assign \%x 0 ; asignar cero al contador
:look
clear ; Limpieza del buffer de entrada
increment \%x ; Conteo de segundos
input 1 {CONNECT }
if success goto sllogin
reinput 1 {NO CARRIER\13\10}
if success goto sldial
reinput 1 {NO DIALTONE\13\10}
if success goto slnodial
reinput 1 {\255}
if success goto slhup
reinput 1 {\127}
if success goto slhup
if < \%x 60 goto look
else goto slhup
:sllogin ; login
assign \%x 0 ; asignar cero al contador
pause 1
echo Looking for login prompt.
:slloop
increment \%x ; Conteo de segundos
clear ; Limpieza del buffer de entrada
output \13
;
; escriba su login prompt aqui:
;
input 1 {Username: }
if success goto sluid
reinput 1 {\255}
if success goto slhup
reinput 1 {\127}
if success goto slhup
if < \%x 10 goto slloop ; intentar 10 veces para obtener un login
else goto slhup ; colgar y empezar de nuevo si a la decima falla
:sluid
;
; escriba su nombre de usuario:
;
output ppp-login\13
input 1 {Password: }
;
; escriba su contraseña:
;
output ppp-password\13
input 1 {Entering SLIP mode.}
echo
quit
:slnodial
echo \7No dialtone. Check the telephone line!\7
exit 1
; local variables:
; mode: csh
; comment-start: "; "
; comment-start-skip: "; "
; end:
....
[[pppoe]]
== Uso de PPP sobre Ethernet (PPPoE)
En esta sección veremos como configurar PPP sobre una red Ethernet (PPPoE).
=== Configurando el kernel
Ya no es necesario realizar una configuración especial para que nuestro kernel cuente con soporte para PPPoE. Siempre y cuando el soporte de redes necesario se encuentre en él, ppp se encargará de cargarlo de una manera dinámica.
=== Editando el fichero [.filename]#ppp.conf#
He aqui un ejemplo de un fichero de configuración [.filename]#ppp.conf# completamente funcional:
[.programlisting]
....
default:
set log Phase tun command # puede añadir más dispositivos si lo desea
set ifaddr 10.0.0.1/0 10.0.0.2/0
nombre_del_proveedor_del_servicio_de_internet:
set device PPPoE:xl1 # sustituya xl1 con su dispositivo ethernet
set authname SuNombreDeUsuario
set authkey SuContraseña
set dial
set login
add default HISADDR
....
=== Ejecutando PPP
Estando en modo `superusuario` (root) puede ejecutar:
[source,shell]
....
# ppp -ddial nombre_del_proveedor_de_inet
....
=== Ejecutando PPP al inicio de sesión
Añada las siguientes líneas a su archivo [.filename]#/etc/rc.conf#:
[.programlisting]
....
ppp_enable="YES"
ppp_mode="ddial"
ppp_nat="YES" # siempre y cuando desee habilitar nat para su red local
ppp_profile="nombre_del_proveedor_de_inet"
....
=== Diferenciando el uso del Servicio de PPPoE
En ocasiones es necesario utilizar una pequeña marca para diferenciar el servicio que vamos a utilizar para establecer la conexión. Las marcas ("tags")de servicio son utilizadas para distinguir entre diferentes servidores de una red, a los que nos podemos conectar utilizando PPPoE.
Su proveedor de internet debe haberle provisto de la información necesaria para crear esta marca. Si esto no fué así, puede solicitar a su proveedor que le brinde esta información.
Como último recurso, puede intentar el método sugerido por el programa http://www.roaringpenguin.com/pppoe/[Roarging Penguin PPPoE], que puede encontrarse en la crossref:ports[ports, colección de ports]. Al utilizar este programa debe tener en mente, que este puede desconfigurar su módem por completo, por esta razón piense biena antes de utilizarlo. Simplemente instale el programa controlador del módem, provisto por su porveedor. Posteriormente, debe acceder al menú de `Sistema` del programa. El nombre de su perfil debe aparecer listado. Que normalmente es _ISP_.
El nombre del perfil (marca del servicio) será utilizada por la configuración de PPPoE en el fichero de configuración [.filename]#ppp.conf# como el proveedor para la opción del comando `set device` (puede ver la página de ayuda man:ppp[8] para más detalles). Esto debe verse algo similar a lo siguiente:
[.programlisting]
....
set device PPPoE:xl1:ISP
....
No olvide cambiar _x11_ por el dispositivo Ethernet que este utilizando.
No olvide cambiar _ISP_ por el nombre del perfil que le fué descrito anteriormente (por lo general el nombre de su Proveedor de Servicio de Internet).
Para información adicional consulte:
* http://renaud.waldura.com/doc/freebsd/pppoe/[Cheaper Broadband with FreeBSD on DSL] por Renauld Waldura.
[[ppp-3com]]
=== Uso de PPPoE en Casa con un Modem Dual ADSL 3Com
Este módem no sigue el estandar establecido en el http://www.faqs.org/rfcs/rfc2516.html[RFC 2516] (_Un metodo que describe el uso de PPP por medio de un dispositivo Ethernet (PPoE)_, escrito por L. Mamakos, K. Lidl, J. Evarts, D. Carrel, D. Simone y R. Wheeler). En su lugar, el código de diferentes tipos de paquetes ha sido utilizado para el manejo del entorno Ethernet. Si cree que esto es incorrecto y que se debiera ajustar a las especificaciones de PPPoE, por favor comentelo en http://www.3com.com/[3Com].
Para poder hacer que FreeBSD sea capaz de comunicarse con este dispositivo, se debe establecer un control de sistema (sysctl). Esto puede hacerse de forma automática al momento del arranque, editando el fichero [.filename]#/etc/sysctl.conf#:
[.programlisting]
....
net.graph.nonstandard_pppoe=1
....
o bien pude hacerse desde la línea de comandos, para un efecto inmediato, por medio del comando `sysctl -w net.graph.nonstandard_pppoe=1`.
Desafortunadamente y dado que esto implica una configuración general del sistema, por lo que no es posible comunicarnos con un dispositivo cliente - servidor que utilice PPPoE y con un módem casero 3Com ADSL, al mismo tiempo.
[[pppoa]]
== Uso de PPP sobre ATM (PPPoA)
Lo siguiente describe como configurar PPP utilizando ATM, alias PPPoA. PPPoA es una alternativa muy común entre proveedores de DSL en Europa.
=== Uso de PPPoA con un Alcatel Speedtouch USB
El soporte bajo FreeBSD para este dispositivo se puede encontrar como un port, por que el firmware es distribuido bajo http://www.alcatel.com/consumer/dsl/disclaimer_lx.htm[ la licencia de Alcatel].
Para instalar este software, simplemente utilice la crossref:ports[ports,colección de ports]. Instale el port package:net/pppoa[] y siga las instrucciones provistas por el port.
=== Uso de mpd
Puede usar mpd para conectarse a una gran variedad de servicios, en particular servicios pptp. Puede encontrar mpd en la colección de ports, bajo package:net/mpd[].
Primero debe instalar el port, y posteriormente configurar mpd para que se ajuste a sus necesidades y a la configuración del proveedor. El port instala un conjunto de ficheros de configuración de ejemplo, que estan bien documentados en [.filename]#PREFIX/etc/mpd/#. Note que _PREFIX_ se refiere al directorio donde sus ports son instalados, que normalmente es en [.filename]#/usr/local#. Una guía completa en formato HTML, esta disponible una vez que se ha instalado el port. Esta se localiza en [.filename]#PREFIX/shared/mpd/#. Aqui tenemos un ejemplo simple de configuración para conectarse a un servicio ADSL con mpd. La configuración se divide en dos ficheros, primero tenemos el fichero [.filename]#mpd.conf#.
[.programlisting]
....
default:
load adsl
adsl:
new -i ng0 adsl adsl
set bundle authname usuario <.>
set bundle password contraseña <.>
set bundle disable multilink
set link no pap actcomp protocomp
set link disable chap
set link accept chap
set link keep-alive 30 10
set ipcp no vjcomp
set ipcp ranges 0.0.0.0/0 0.0.0.0/0
set iface route default
set iface disable on-demand
set iface enable proxy-arp
set iface idle 0
open
....
<.> El nombre de usuario para autentificar con su proveedor.
<.> La contraseña para autentificar con su proveedor.
El fichero [.filename]#mpd.links# contiene información a cerca de la, o las conecciones, que desee establecer. Un ejemplo de [.filename]#mpd.links# y que sea acompañante del ejemplo anterior, se muestra a continuación.
[.programlisting]
....
adsl:
set link type pptp
set pptp mode active
set pptp enable originate incoming outcall
set pptp self 10.0.0.140
set pptp peer 10.0.0.138
....
La conexión es fácil de inicializarla, al ingresar los siguientes comandos como `root`.
[source,shell]
....
# mpd -b adsl
....
El estatus de la conexión la puede ver con el comando.
[source,shell]
....
% ifconfig ng0
: flags=88d1<UP,POINTOPOINT,RUNNING,NOARP,SIMPLEX,MULTICAST> mtu 1500
inet 216.136.204.117 --> 204.152.186.171 netmask 0xffffffff
....
Usar mpd es la forma recomendada para conectarse con servicios ADSL con FreeBSD.
=== Uso de pptpclient
También es posible usar FreeBSD para conectarse a otros servicios PPPoA por medio de package:net/pptpclient[].
Para conectarse por medio de package:net/pptpclient[] a un servicio DSL, instale el port o paquete y edite el fichero [.filename]#/etc/ppp/ppp.conf#. Debe ser `root` para hacer estas operaciones. Un ejemplo de la sección de [.filename]#ppp.conf#, se muestra a continuación. Para mayor información sobre las opciones de [.filename]#ppp.conf#, consulte la página de ayuda de ppp; man:ppp[8].
[.programlisting]
....
adsl:
set log phase chat lcp ipcp ccp tun command
set timeout 0
enable dns
set authname usuario <.>
set authkey contraseña <.>
set ifaddr 0 0
add default HISADDR
....
<.> Nombre de usuario de la cuenta DSL.
<.> La contraseña de su cuenta.
[WARNING]
====
Debido a que debe poner su contraseña en el fichero [.filename]#ppp.conf# en texto plano, debe asegurarse que nadie tenga acceso de lectura a este fichero. Los siguientes comandos se aseguran de que el fichero solo pueda se leido por `root`. Ve las páginas de ayuda man:chmod[1] y man:chown[8] para mayor información.
[source,shell]
....
# chown root:wheel /etc/ppp/ppp.conf
# chmod 600 /etc/ppp/ppp.conf
....
====
Esto abrirá una sesion por medio de PPP con su ruteador DSL. Los módems Ethernet DSL cuentan con una dirección IP de LAN preconfigurada a la cual se puede conectar. En el caso del Alcatel Speedtouch, esta dirección es `10.0.0.138`. La documentación de su equipo debe indicarle que dirección utiliza. Para abrir el "tunel" e iniciar la sesión ppp, ejecute el siguiente comando.
[source,shell]
....
# pptp dirección proveedor
....
[TIP]
====
Puede añadir un símbolo de ampersand ("&") al final de este comando, ya que pptp no retorna al shell por default.
====
Un dispositivo virtual [.filename]#tun# será creado, para interactuar con los procesos de pptp y ppp. Una vez que regrese al shell puede examinar la conexión por medio del siguiente comando.
[source,shell]
....
% ifconfig tun0
tun0: flags=8051<UP,POINTOPOINT,RUNNING,MULTICAST> mtu 1500
inet 216.136.204.21 --> 204.152.186.171 netmask 0xffffff00
Opened by PID 918
....
Si no le es posible conectarse, verifique la configuración de su ruteador, que normalmente es accesible por medio de telnet o de su navegador web. Si aun no puede conectarse examine la salida que da el comando pptp y el contenido del fichero de registro (log) de ppp; [.filename]#/var/log/ppp.log#.
[[slip]]
== Uso de SLIP
[[slipc]]
=== Configurando un cliente SLIP
Lo siguiente es una forma de configurar FreeBSD para que utilice SLIP, en un red con dirección estática. Para direcciones dinámicas (esto es, donde su dirección cambia cada vez que se conecta), probablemente sea necesario realizar algunos ajustes que complican la configuración.
En primer término, es necesario determinar a que puerto serial esta conectado nuestro módem. Mucha gente opta por} contar con un enláce simbólico, tal como [.filename]#/dev//modem#, que apunta al nombre real del dispositivo, [.filename]#/dev/cuaaN#. Esto permite abstenerse de usar el nombre real del dispositivo, en caso de que sea necesario cambiar de puerto nuestro módem. Lo cual puede ser de mucha ayuda, ya que puede ser un fastidio tener que editar un monton de ficheros en [.filename]#/etc# y ficheros de tipo [.filename]#.kermrc# en todo el sistema!.
[NOTE]
====
[.filename]#/dev/cuaa0# es [.filename]#COM1#, [.filename]#cuaa1# es [.filename]#COM2#, etc.
====
Asegurese de contar con la siguiente opción en la configuración de su kernel:
[.programlisting]
....
pseudo-device sl 1
....
Esta opción esta incluida en el archivo del kernel [.filename]#GENERIC#, así que no debe haber problema, claro esta, a menos que lo haya borrado intencionalmente.
==== Cosas Que Tiene Que Hacer Solo Una Vez
[.procedure]
====
. Añada el nombre de su maquina, gateway, servidores de nombre a su fichero [.filename]#/etc/hosts#. Este es un ejemplo de mi fichero:
+
[.programlisting]
....
127.0.0.1 localhost loghost
136.152.64.181 water.CS.Example.EDU water.CS water
136.152.64.1 inr-3.CS.Example.EDU inr-3 slip-gateway
128.32.136.9 ns1.Example.EDU ns1
128.32.136.12 ns2.Example.EDU ns2
....
+
. Asegurese de que cuenta con la opción `hosts` antes de la opción `bind`, en su fichero [.filename]#/etc/host.conf#. De lo contrario pueden ocurrir cosas graciosas en su sistema.
. Edite el fichero [.filename]#/etc/rc.conf#.
.. Especifique su nombre host al editar la línea que dice:
+
[.programlisting]
....
hostname=minombre.mi.dominio
....
+
El nombre completo de su sistema para internet, debe ser escrito en este punto.
.. Añada el dispositivo [.filename]#sl0# a la lista de dispositivos de red, al cambiar la línea que dice:
+
[.programlisting]
....
network_interfaces="lo0"
....
+
a quedar:
+
[.programlisting]
....
network_interfaces=lo0 sl0
....
+
.. Añada los parámetros de inicialización del dispositivo sl0, al añadir la línea:
+
[.programlisting]
....
ifconfig_sl0="inet ${hostname} slip-gateway netmask 0xffffff00 up"
....
+
.. Especificque cual será su ruteador por omisión al editar la línea:
+
[.programlisting]
....
defaultrouter=NO
....
+
a quedar:
+
[.programlisting]
....
defaultrouter=slip-gateway
....
+
. Edite su fichero [.filename]#/etc/resolv.conf# (si no existe debe crearlo), a que contenga lo siguiente:
+
[.programlisting]
....
domain CS.Ejemplo.EDU
nameserver 128.32.136.9
nameserver 128.32.136.12
....
+
Como puede ver, lo anterior define el nombre de host, de su servidor de nombres. Claro esta, el nombre de dominio y las direcciones IP, dependen de su sistema específico.
. Establezca la contraseña del superusuario `root` y de su símil `toor` (y de cualquier otro usuario que aun no cuente con la misma).
. Reinicie su sistema y asegurese que cuenta con el nombre de host (hostname) correcto.
====
==== Haciendo una Conexión con SLIP
[.procedure]
====
. Marque el número, teclee en el signo de comando `slip`, ingrese el nombre y la contraseña. Lo que se requiere ingresar, depende de su sistema. Si utiliza kermit, puede utilizar un script similar al siguiente:
+
[.programlisting]
....
# kermit setup
set modem hayes
set line /dev/modem
set speed 115200
set parity none
set flow rts/cts
set terminal bytesize 8
set file type binary
# El siguiente macro se encarga de llamar e ingresar al sistema
define slip dial 643-9600, input 10 =>, if failure stop, -
output slip\x0d, input 10 Username:, if failure stop, -
output silvia\x0d, input 10 Password:, if failure stop, -
output ***\x0d, echo \x0aCONNECTED\x0a
....
+
Claro esta, que debe cambiar el nombre y contraseñ a quedar de acuerdo a sus necesidades. Después de hacer esto, puede simplemente teclear `slip` en el símbolo de sistema (prompt) de kermit, para realizar la conexión.
+
[NOTE]
======
El dejar su contraseña en texto plano, en cualquier parte del sistema, generalmente es una _mala_ idea. Hágalo bajo su propio riesgo.
======
. Dejé a kermit en ese punto trabajando (puede suspenderlo tecleando kbd:[Ctrl+z]) y como `root`, teclee:
+
[source,shell]
....
# slattach -h -c -s 115200 /dev/modem
....
+
Si puede hacer `ping` a cualquier host que se encuentre del otro lado del ruteador, usted esta conectado!. Si esto no funciona, puede intentar como argumento del comando `slattach`, la opción `-a` en lugar de utilizar la optción `-c`.
====
==== Como Terminar la Conexión
Para terminar la conexión haga lo siguiente:
[source,shell]
....
# kill -INT `cat /var/run/slattach.modem.pid`
....
esto terminaraá `slattach`. Recuerde que para hacer esto, usted debe estar firmado como superusuario (root). Posteriormente dirijase a kermit (puede hacer esto con `fg` si lo envio a segundo plano) y salga (tecleando `q`).
La página de ayuda de `slattach` indica que debe utilizar el comando `ifconfig sl0 down`, para marcar como terminado el uso del dispositivo, pero tal parece que esto no hace una gran diferencia para mi. (`ifconfig sl0` da el mismo resultado.)
En algunas ocasiones, puede que su módem se niegue a cortar la comunicación (el mio lo hace a veces). Si ese es el caso, simplemente inicie de nuevo kermit y vuelva a salir. Normalmente en el segundo intento hay exito.
==== Problemas Comunes
Si esto no funciona, sientase libre de preguntarme. Lo siguiente es una recapitulación de los problemas que más comunmente se presentan:
* El no utilizar la opción `-c` o `-a` con el comando `slattach` (Esto debiera se fatal, pero algunos usuarios han reportado que esto ha solucionado sus problemas.
* Usar la opción `s10` en vez de usar la opción `sl0` (puede ser difícil ver la diferencia con algunos tipos de letras).
* Intente `ifconfig sl0` para visualizar el estatus de sus dispositivos de red. Por ejemplo, puede ser que obtenga algo similiar a lo siguiente:
+
[source,shell]
....
# ifconfig sl0
sl0: flags=10<POINTOPOINT>
inet 136.152.64.181 --> 136.152.64.1 netmask ffffff00
....
* También el comando `netstat -r` le mostrará la tabla de ruteo, en caso de que obtenga el mensaje "no route to te host", al hacer `ping`. Un ejemplo de esto se muestra a continuación:
+
[source,shell]
....
# netstat -r
Routing tables
Destination Gateway Flags Refs Use IfaceMTU Rtt Netmasks:
(root node)
(root node)
Route Tree for Protocol Family inet:
(root node) =>
default inr-3.Example.EDU UG 8 224515 sl0 - -
localhost.Exampl localhost.Example. UH 5 42127 lo0 - 0.438
inr-3.Example.ED water.CS.Example.E UH 1 0 sl0 - -
water.CS.Example localhost.Example. UGH 34 47641234 lo0 - 0.438
(root node)
....
+
Esto es después de que el sistema ha estado conectado por un tiempo. Los numeros pueden variar en su sistema.
[[slips]]
=== Estableciendo un Servidor SLIP
Este documento provee sugerencias, para establecer un servidor de SLIP, bajo FreeBSD, lo que generalmente significa configurar su sistema, para que de manera automática inicie los servicios, al firmarse usuarios-clientes remotos en su sistema.
[[slips-prereqs]]
==== Prerequisitos
Esta sección es de naturaleza muy técnica, así que contar con antecedentes sobre esto es requerido. Este documento supone que usted cuenta con conocimientos sobre el protocolo de redes TCP/IP, y particularmente, redes y direcciones de nodos, mascaras de red, subneteo, ruteo y protocolos de ruteo, tal como RIP. El configurar servicios SLIP, en un servidor, requiere un conocimiento de estos conceptos, y si usted no esta familiarizado con estos, puede leer una copia, ya sea del libro de Craig Hunt; _TCP/IP Networking Administration_, publicado por O'Reilly & Associates, Inc. (Numero ISBN 0-937175-82-X), o alguno de los libros de Douglas Comer, sobre protocolos TCP/IP.
Se da por un hecho, que usted ha instalado y configurado correctamente su(s) módem(s), así como la configuración de su sistema, para efecto de utilizar el mismo para realizar la conexión. Si usted no lo ha hecho, por favor lea el tutorial sobre configuración de estos servicios; si cuenta con un navegador para la World-Wide Web, puede ver los tutoriales disponibles en link:../../../../index.html[http://www.FreeBSD.org/].
Puede ser que también desee revisar las páginas de ayuda (_man_), man:sio[4] para información referente a los controladores de dispositivos de puertos en serie, y man:ttys[5], man:gettytab[5], man:getty[8], & man:init[8], para ver información relevante, sobre la configuración de su sistema, para efecto de que acepte accesos (logins) por medio de un módem, y quizás man:stty[1] para información sobre los parámetros de configuración de puertos en serie (tal como `clocal`, que se utiliza para la conexión directa por medio de puertos seriales).
==== Echemos un Vistazo
En su configuración típica, el desarrollo de FreeBSD como un servidor SLIP, funciona de la siguiente manera: un Usuario SLIP, se conecta del Servidor SLIP FreeBSD e ingresa al sistema con una identificación especial, que utiliza `/usr/sbin/sliplogin` como shell del usuario. El programa `sliplogin` busca en el fichero [.filename]#/etc/sliphome/slip.hosts# la línea que haya sido creada especialmente para el usuario, conecta la línea serial a una interfaz SLIP disponible y posteriormente ejecuta el script [.filename]#/etc/sliphome/slip.login#, para configurar la interfaz SLIP.
===== Un Ejemplo de Acceso al Servidor SLIP
Por ejemplo si la clave de acceso de un usuario SLIP fuese `Shelmerg`, la entrada del usuario `Shelmerg`, en el fichero [.filename]#/etc/master.passwd# se vera algo similar a lo siguiente:
[.programlisting]
....
Shelmerg:password:1964:89::0:0:Guy Helmer - SLIP:/usr/users/Shelmerg:/usr/sbin/sliplogin
....
Cuando `Shelmerg` accese al sistema, el comando `sliplogin`, buscará en el fichero [.filename]#/etc/sliphome/slip.hosts#, una línea, en la cual el ID (identificación) del usuario coincida, por ejemplo, puede ser que en el fichero [.filename]#/etc/sliphome/slip.hosts# exista una línea simliar a la siguiente:
[.programlisting]
....
Shelmerg dc-slip sl-helmer 0xfffffc00 autocomp
....
El comando `sliplogin` encontrará la línea que coincida, enganchará la línea serial a cualquier interfaz SLIP disponible y posteriormente ejecutará [.filename]#/etc/sliphome/slip.login# de manera similar a:
[.programlisting]
....
/etc/sliphome/slip.login 0 19200 Shelmerg dc-slip sl-helmer 0xfffffc00 autocomp
....
Si todo marcha bien, [.filename]#/etc/sliphome/slip.login# creará una configuración, por medio de `ifconfig`, para la interfaz SLIP, a la cual `sliplogin` se ha adjuntado (la interfaz slip 0, que era el primer parámetro dado en la lista de [.filename]#slip.login#), para establecer la dirección local IP (`dc-slip`), la interfaz de la direción IP Remota (`sl-helmer`), la submascara de red para la interfaz SLIP (`0xfffffc00`) y cualquier otra opción adicional (`autocomp`). Si algo no va del todo bien, normalmente `sliplogin` guarda bastante información para depurar, por medio del `demonio` (`daemon`) syslog, que usualmente guarda dicha infomración en [.filename]#/var/log/messages# (vea la página de ayuda man:syslogd[8] así como man:syslog.conf[5] y quizas el fichero [.filename]#/etc/syslog.conf#, para ver que es lo que `syslogd` esta almacenando y donde es que lo almacena.
OK, basta de ejemplos - entremos de lleno en la configuración del sistema.
==== Configuración del Kernel
El kernel de FreeBSD, por omisión, cuenta con 2 dispositivos SLIP definidos ([.filename]#sl0# y [.filename]#sl1#); usted puede utilizar `netstat -i`, para verificar si estos dispositivos se encuentran en el kernel de su sistema.
Un ejemplo del resultado de `netstat -i`:
[source,shell]
....
Name Mtu Network Address Ipkts Ierrs Opkts Oerrs Coll
ed0 1500 <Link>0.0.c0.2c.5f.4a 291311 0 174209 0 133
ed0 1500 138.247.224 ivory 291311 0 174209 0 133
lo0 65535 <Link> 79 0 79 0 0
lo0 65535 loop localhost 79 0 79 0 0
sl0* 296 <Link> 0 0 0 0 0
sl1* 296 <Link> 0 0 0 0 0
....
En este ejemplo vemos que existen dos dispositivos SLIP en el kernel, que son; [.filename]#sl0# y [.filename]#sl1# (el asterisco que aparece después de `sl0` y `sl1` indica que los dispositivos no estan "trabajando".)
Aun cuando el kernel cuente con los dispositivos, por omisión el kernel de FreeBSD, no viene configurado para enviar paquetes (de hecho su sistema FreeBSD no trabajara como ruteador, por default) esto en base a los requerimientos para Internet, establecidos por los RFCs ( vea 1009 [Requerimentos para Pasarelas (Gateway) en Internet], 1122 [Requerimientos para hosts de Internet - Capas de comunicación] y quizás 1127 [RFC sobre Una Perspectiva de los Requerimientos de Hosts]). Si usted desea que su servidor SLIP sobre FreeBSD, opere como un ruteador, será necesario que edite el fichero [.filename]#/etc/rc.conf# y cambie la opción `gateway_enable`, a quedar `YES`, esto habilitará esta función.
Será necesario que reinicie su sistema, para efecto de que estos cambios surtan efecto.
Al verificar su fichero de configuración del kernel ([.filename]#/sys/i386/conf/GENERIC#), podrá notar que cerca del final, hay una línea como la siguiente:
[.programlisting]
....
pseudo-device sl 2
....
Esta línea es la que define el numero de dispositivos SLIP disponibles en el kernel; el numero al final de la línea es el numero máximo de conecciones SLIP que puede manejar el servidor simultaneamente.
Para ayuda con relación a la configuración y compilación del kernel en su sistema FreeBSD, por favor refierase crossref:kernelconfig[kernelconfig,Configuración del kernel de FreeBSD] al apartado correspondiente.
==== Configuración de Sliplogin
Como se menciono anteriormente, existen tres ficheros en el directorio [.filename]#/etc/sliphome#, que son parte de la configuración de [.filename]#/usr/sbin/sliplogin# (vea la pagina de ayuda man:sliplogin[8] de para ver la ayuda del comando `sliplogin`): [.filename]#slip.hosts#, que es el fichero que define a los usuarios SLIP, así como sus direcciones IP correspondientes; [.filename]#slip.login#, que normalmente es utilizado para configurar la interfaz de SLIP; y (opcionalmente) [.filename]#slip.logout#, que hace lo opuesto a [.filename]#slip.login#, cuando la conexión serial ha terminado.
===== Configuració de [.filename]#slip.hosts#
El fichero [.filename]#/etc/sliphome/slip.hosts# contiene líneas, que al menos cuentan con cuatro partes, separadas por espacios en blanco:
* Identificador (nombre) del usuario SLIP
* Dirección Local (local para el servidor SLIP) de la liga a SLIP
* Dirección Remota de la liga a SLIP
* Mascara de red
Las direcciones local y remota, pueden ser nombres del host (la resolución de los mismos, es llevada a cabo, por medio de [.filename]#/etc/hosts# o por el servidor de nombres de dominio (DNS), dependiendo de lo que haya especificado en el fichero [.filename]#/etc/host.conf#), y la mascara de red puede ser un nombre, que puede ser resuelto revisando [.filename]#/etc/networks#. En un sistema de ejemplo, el fichero [.filename]#/etc/sliphome/slip.hosts#, puede verse así:
[.programlisting]
....
#
# login local-addr remote-addr mask opt1 opt2
# (normal,compress,noicmp)
#
Shelmerg dc-slip sl-helmerg 0xfffffc00 autocomp
....
Al final de la línea puede ver que existen una o más opciones.
* `normal` - sin compresión de los encabezados.
* `compress` - compresión de los encabezados.
* `autocomp` - compresión automática, si el host remoto lo permite.
* `noicmp` - deshabilitar los paquetes ICMP (de tal forma que cualquier paquete enviado por "ping" seráa rechazado, en lugar de ocupar de su ancho de banda).
La elección sobre la dirección local y remota depende si usted va a utilizar una conexión TCP/IP dedicada o bien si va a utilizar una conexión por medio de "proxy ARP" en su servidor SLIP (no es correcto "proxy ARP", pero es la terminología utilizada en esta sección para describirlo). Si usted no esta seguro que metodo manejar o como asignar la dirección IP, por favor refierase a alguno de los libros sobre TCP/IP, que se mencionan en los Prerequisitos de SLIP (<<slips-prereqs>>) y/o consulte al administrador de IP de su red.
Si usted piensa subnetear para los diferentes clientes SLIP, será necesario que la dirección de la subred (subnet), salga de la dirección IP que tenga asignada su red, y el numero de cada cliente, del numero que asigne a su subred. Posteriormente puede que sea necesario, o bien configurar una ruta estática a la subred SLIP, por medio de su servidor SLIP en su ruteador más cercano por IP.
De otra forma, si usted piensa utilizar un metodo "proxy ARP", será necesario que a sus clientes SLIP, se les asigne una dirección IP, que se encuentre dentro del rango que este utilizando para su subred Ethernet, y tambié será necesario que haga algunos ajustes en los ficheros script [.filename]#/etc/sliphome/slip.login# y en [.filename]#/etc/sliphome/slip.logout#, para que usen man:arp[8], para que maneje la tabla ARP del servidor SLIP y llamados del proxy-ARP.
===== [.filename]#slip.login# Configuración
El típico fichero [.filename]#/etc/sliphome/slip.login# se ve de la siguiente manera:
[.programlisting]
....
#!/bin/sh -
#
# @(#)slip.login 5.1 (Berkeley) 7/1/90
#
# generic login file for a slip line. sliplogin invokes this with
# the parameters:
# 1 2 3 4 5 6 7-n
# slipunit ttyspeed loginname local-addr remote-addr mask opt-args
#
/sbin/ifconfig sl$1 inet $4 $5 netmask $6
....
This [.filename]#slip.login# file merely runs `ifconfig` for the appropriate SLIP interface with the local and remote addresses and network mask of the SLIP interface.
If you have decided to use the "proxy ARP" method (instead of using a separate subnet for your SLIP clients), your [.filename]#/etc/sliphome/slip.login# file will need to look something like this:
[.programlisting]
....
#!/bin/sh -
#
# @(#)slip.login 5.1 (Berkeley) 7/1/90
#
# generic login file for a slip line. sliplogin invokes this with
# the parameters:
# 1 2 3 4 5 6 7-n
# slipunit ttyspeed loginname local-addr remote-addr mask opt-args
#
/sbin/ifconfig sl$1 inet $4 $5 netmask $6
# Answer ARP requests for the SLIP client with our Ethernet addr
/usr/sbin/arp -s $5 00:11:22:33:44:55 pub
....
La línea adicional, `arp -s $5 00:11:22:33:44:55 pub` del script [.filename]#slip.login#, crea una entrada ARP en la tabla del servidor SLIP. Esta entrada le indica al servidor SLIP que debe responder con la dirección MAC de su dispositivo Ethernet, cuando cualquier otro nodo IP en la red, solicite información a la IP del cliente SLIP.
Al tomar en cuenta el ejemplo anterior, es importante que sustituya la dirección Ethernet MAC (`00:11:22:33:44:55`), con la dirección que corresponde a su tarjeta de red, o definitivamente su "proxy ARP" no va a funcionar!. Para efecto de conocer cual es la dirección MAC del dispositivo Ethernet (tarjeta de red), de su servidor SLIP, puede ejecutar el comando `netstat -i`, el cual tendrá como resultado algo similar a lo siguiente:
[source,shell]
....
ed0 1500 <Link>0.2.c1.28.5f.4a 191923 0 129457 0 116
....
Esto indica que la dirección MAC de su dispositivo Ethernet, en este sistema es `00:02:c1:28:5f:4a` - los puntos que aparecen en la salida del comando `netstat -i` deben cambiarse por dos puntos, así mismo deberá de anteponerse un cero, a cada dígito hexadecimal que aparezca sólo (no en pares), de tal forma que convirtamos la dirección en lo que man:arp[8] requiere para trabajar; vea la página de ayuda man:arp[8], para ver información completa sobre su uso.
[NOTE]
====
Recuerde que cuando cree los ficheros [.filename]#/etc/sliphome/slip.login# y [.filename]#/etc/sliphome/slip.logout#, deben contar con permisos de ejecución (`chmod 755 /etc/sliphome/slip.login /etc/sliphome/slip.logout`), de otra forma estos scripts no podrán llevar a cabo su función.
====
===== Configuración de [.filename]#slip.logout#
El fichero [.filename]#/etc/sliphome/slip.logout# no es indispensable (a menos que vaya a utilizar "proxy ARP"), pero si aun así decide crearlo, el siguiente es un ejemplo básico del script [.filename]#slip.logout# :
[.programlisting]
....
#!/bin/sh -
#
# slip.logout
#
# logout file for a slip line. sliplogin invokes this with
# the parameters:
# 1 2 3 4 5 6 7-n
# slipunit ttyspeed loginname local-addr remote-addr mask opt-args
#
/sbin/ifconfig sl$1 down
....
Si usted esta utilizando "proxy ARP", es recomendable que le indique a [.filename]#/etc/sliphome/slip.logout#, que desea eliminar la entrada ARP, para el cliente SLIP:
[.programlisting]
....
#!/bin/sh -
#
# @(#)slip.logout
#
# logout file for a slip line. sliplogin invokes this with
# the parameters:
# 1 2 3 4 5 6 7-n
# slipunit ttyspeed loginname local-addr remote-addr mask opt-args
#
/sbin/ifconfig sl$1 down
# Dejar de solicitar respuesta ARP al cliente SLIP
/usr/sbin/arp -d $5
....
El comando `arp -d $5`, elimina la entrada ARP, que [.filename]#slip.login# de "proxy ARP" añadió al cliente SLIP al ingresar al sistema.
Para esta más seguros: asegurese de que el fichero [.filename]#/etc/sliphome/slip.logout# cuenta con los permisos adecuados para su ejecución, una vez que lo ha creado (ej. `chmod 755 /etc/sliphome/slip.logout`) .
==== Consideraciones sobre el Enrutamiento
Si usted no esta utilizando el metodo "proxy ARP", para efecto de rutear los paquetes entre sus clientes SLIP y el resto de la red (y quizás Internet), deberá de hacer una de las siguientes dos acciones, o bien añadir direcciones estáticas, a su(s) ruteador(es) más cercanos, para que se reenvien los paquetes de la subred de sus clientes SLIP, por medio de su servidor SLIP, o bien tendrá que instalar y configurar `gated` en su servidor SLIP (que corre FreeBSD!), de tal forma que le indique a su(s) ruteador(es), por medio del protocolo correcto, a cerca de su subred SLIP.
===== Direcciones de Enrutamiento Estáticas
Añadir direcciones estáticas de enrutamiento puede ser un problema (o imposible si usted no cuenta con la autoridad para hacerlo...). Si usted cuenta con una red de ruteo-múltiple en su organización, algunos ruteadores, tales como los fabricados por Cisco y Proteon, puede ser que no sea suficiente con el hecho de configurar las rutas estáticas de su subred SLIP, sino que sea necesario indicar que rutas utilizar para informar a cerca de otras rutas, así que algo de experiencia así como determinación para la resolución de problemas serán necesarias para poner la ruta basada-en- ruteo-estático a trabajar.
===== Ejecutando `gated`
Una alternativa para los dolores de cabeza que pueden dar las redes con ruteo estático, es intalar `gated` en su servidor SLIP bajo FreeBSD y configurarlo, para que utilice los protocolos de ruteo apropiados (RIP/OSPF/BGP/EGP) para informar a otros ruteadores, a cerca de su subred SLIP. Una vez que lo ha compilado e instalado, deberá crear el fichero [.filename]#/etc/gated.conf#, que configurará a `gated`; aqui hay un ejemplo, similar al que el autor utilizó en un servidor SLIP FreeBSD:
[NOTE]
====
`gated` es un software propietario y su código fuente no estará disponible al público más (más información en el sitio http://www.gated.org/[gated]). Esta sección solo existe para asegurarse de la compatibilidad con aquellos que usan la versió antigua.
====
[.programlisting]
....
#
# gated configuration file for dc.dsu.edu; for gated version 3.5alpha5
# Only broadcast RIP information for xxx.xxx.yy out the ed Ethernet interface
#
#
# tracing options
#
traceoptions "/var/tmp/gated.output" replace size 100k files 2 general ;
rip yes {
interface sl noripout noripin ;
interface ed ripin ripout version 1 ;
traceoptions route ;
} ;
#
# Turn on a bunch of tracing info for the interface to the kernel:
kernel {
traceoptions remnants request routes info interface ;
} ;
#
# Propagate the route to xxx.xxx.yy out the Ethernet interface via RIP
#
export proto rip interface ed {
proto direct {
xxx.xxx.yy mask 255.255.252.0 metric 1; # SLIP connections
} ;
} ;
#
# Accept routes from RIP via ed Ethernet interfaces
import proto rip interface ed {
all ;
} ;
....
En el ejemplo anterior, el fichero de configuración [.filename]#gated.conf# transmite información sobre la subred SLIP _xxx.xxx.yy_, por medio de RIP al dispositivo Ethernet; si usted esta utilizando un dispositivo de red, diferente de [.filename]#ed#, será necesario que modifique el parámetro [.filename]#ed# por el correspondiente. En este ejemplo, el fichero también realiza una busqueda por el fichero [.filename]#/var/tmp/gated.output#, que es un fichero que nos sirve para depurar cualquier error que se presente en la actividad de `gated`; usted puede desactivar la opción de depuración (debug), si es que `gated` esta funcionando correctamente. Será necesario que modifique _xxx.xxx.yy._, a quedar con la dirección correcta de su subred SLIP (asegurese de modificar también la máscara de red, en la cláusula `proto direct` también).
Una vez que ha instalado y configurado `gated` en su sistema, necesitará indicarle a FreeBSD que al iniciar el sistema, ejecute el script para `gated`, en lugar de ejecutar `routed`. La forma más fácil de hacer esto, es editar las variables de `route` y `router_flags`, en el fichero [.filename]#/etc/rc.conf#. Por favor vea la página de ayuda de `gated`, para ver información sobre los parámetros de la línea-de-comandos.
| 46.881907
| 882
| 0.736042
|
4fac69c5ad89cf53bfbd68900abe61984e0a4234
| 1,965
|
adoc
|
AsciiDoc
|
chapters/2-chapter.adoc
|
dorta/asciidoc-resume
|
f2010d94816d20661669b59392ba0b06fee2938e
|
[
"BSD-3-Clause"
] | null | null | null |
chapters/2-chapter.adoc
|
dorta/asciidoc-resume
|
f2010d94816d20661669b59392ba0b06fee2938e
|
[
"BSD-3-Clause"
] | null | null | null |
chapters/2-chapter.adoc
|
dorta/asciidoc-resume
|
f2010d94816d20661669b59392ba0b06fee2938e
|
[
"BSD-3-Clause"
] | null | null | null |
// Copyright 2020 Diego Dorta
== icon:suitcase[] Professional Experience
=== Variscite LTD
* icon:group[] **Senior Software Engineer at R&D Team** | icon:calendar[] **July, 2021 to Present** +
** _Artificial Intelligence and Machine Learning Applications Developer_;
** _Linux BSP Developer for NXP platforms_;
=== Robert Bosch | Bosch Integrated Solutions Brazil
* icon:group[] **Innovation and Technology Engineer at BISB Team** | icon:calendar[] **April, 2021 to June, 2021** +
** FreeRTOS developer for IoT Devices.
=== NXP Semiconductors | Automotive, Security, IoT
* icon:group[] **System and Application Engineer at System Engineering Team** | icon:calendar[] **August, 2018 to February, 2021** +
** _Artificial Intelligence and Machine Learning Applications Developer (TensorFlow, Scikit-Learn, PyTorch)_;
*** https://github.com/diegohdorta/pyeiq[icon:file[]] Most relevant work: _A Python Demo Framework for eIQ on i.MX Processors_.
** _User Interface and Multimedia Applications Developer (Qt, GTK, GStreamer)_;
** _Bootloader (U-Boot) and Kernel Driver Contributer (Ethernet, USB), Root File System (Buildroot, Yocto)_.
* icon:group[] **Research and Development Intern at MPU Team** | icon:calendar[] **June, 2016 to July, 2018** +
** _Development of Machine Learning Applications, Open Source Contributions and Documentation Review_.
*** https://www.youtube.com/watch?v=O5F1N312Bhg[icon:youtube[]] https://imxdev.gitlab.io/video/tutorial/Creating_Qt_Application_for_iMX/[icon:file[]] Most relevant work: _Creating Qt Application for i.MX Embedded Systems_.
=== Brazilian Synchrotron Light Source | CNPEM
* icon:group[] **Computer Engineering Intern at Beamline Software Group** | icon:calendar[] **January, 2014 to December, 2015** +
** _Scientific Instruments and Driver Applications Developer_.
*** https://epics.anl.gov/modules/contact.php#Diego%20Dorta[icon:file[]] Most relevant work: _Published EPICS Drivers for Scientific Instruments_.
| 61.40625
| 222
| 0.759288
|
6d301a7efc7bee32e7684af475be0620b5bf784c
| 1,214
|
adoc
|
AsciiDoc
|
modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc
|
georgettica/openshift-docs
|
728a069f9c8ecd73701ac84175374e7e596b0ee4
|
[
"Apache-2.0"
] | null | null | null |
modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc
|
georgettica/openshift-docs
|
728a069f9c8ecd73701ac84175374e7e596b0ee4
|
[
"Apache-2.0"
] | 1
|
2022-01-12T21:27:35.000Z
|
2022-01-12T21:27:35.000Z
|
modules/nodes-pods-autoscaling-requests-and-limits-hpa.adoc
|
georgettica/openshift-docs
|
728a069f9c8ecd73701ac84175374e7e596b0ee4
|
[
"Apache-2.0"
] | null | null | null |
// Module included in the following assemblies:
//
// * nodes/nodes-pods-autoscaling-about.adoc
:_content-type: CONCEPT
[id="nodes-pods-autoscaling-requests-and-limits-hpa_{context}"]
= About requests and limits
The scheduler uses the resource request that you specify for containers in a pod, to decide which node to place the pod on. The kubelet enforces the resource limit that you specify for a container to ensure that the container is not allowed to use more than the specified limit.
The kubelet also reserves the request amount of that system resource specifically for that container to use.
.How to use resource metrics?
In the pod specifications, you must specify the resource requests, such as CPU and memory. The HPA uses this specification to determine the resource utilization and then scales the target up or down.
For example, the HPA object uses the following metric source:
[source,yaml]
----
type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 60
----
In this example, the HPA keeps the average utilization of the pods in the scaling target at 60%. Utilization is the ratio between the current resource usage to the requested resource of the pod.
| 41.862069
| 278
| 0.783361
|
bda2c3c5acb8878c6098a2d618da6d8a4aef3d79
| 63
|
adoc
|
AsciiDoc
|
kfk.confluent/04.confluent.admin/02.kafka.admin-config-perf-tuning/misc/kfk.config.docs/kafka-consumer-RangeAssignor.adoc
|
cnb0/learning-kafka
|
c43feaeac59e157f56e319a688f1b801855811c2
|
[
"MIT"
] | 138
|
2016-09-14T21:21:59.000Z
|
2022-03-25T06:51:29.000Z
|
kafka-consumer-RangeAssignor.adoc
|
jaceklaskowski/kafka-notebook
|
fe87d5b28eb00c189ec29c8fd0f4014f7c55142a
|
[
"Apache-2.0"
] | 2
|
2020-11-14T14:27:43.000Z
|
2020-11-16T10:13:03.000Z
|
kafka-consumer-RangeAssignor.adoc
|
jaceklaskowski/kafka-notebook
|
fe87d5b28eb00c189ec29c8fd0f4014f7c55142a
|
[
"Apache-2.0"
] | 51
|
2016-11-14T02:54:27.000Z
|
2022-01-11T16:28:04.000Z
|
== [[RangeAssignor]] RangeAssignor
`RangeAssignor` is...FIXME
| 15.75
| 34
| 0.730159
|
2ab8062f6d51771b8d29dba2a85a073c71c18059
| 1,508
|
adoc
|
AsciiDoc
|
doc/man/jose-jwk-pub.1.adoc
|
imirkin/jose
|
c96f74661f8b7f1dc5af75796b695252eff1e596
|
[
"Apache-2.0"
] | 123
|
2016-12-08T17:22:57.000Z
|
2022-03-31T21:18:58.000Z
|
doc/man/jose-jwk-pub.1.adoc
|
imirkin/jose
|
c96f74661f8b7f1dc5af75796b695252eff1e596
|
[
"Apache-2.0"
] | 90
|
2016-07-14T19:59:54.000Z
|
2022-02-21T16:31:16.000Z
|
doc/man/jose-jwk-pub.1.adoc
|
imirkin/jose
|
c96f74661f8b7f1dc5af75796b695252eff1e596
|
[
"Apache-2.0"
] | 57
|
2016-07-15T13:16:44.000Z
|
2022-03-07T15:52:21.000Z
|
jose-jwk-pub(1)
===============
:doctype: manpage
== NAME
jose-jwk-pub - Cleans private keys from a JWK
== SYNOPSIS
*jose jwk pub* -i JWK [-o JWK]
== OVERVIEW
The *jose jwk pub* command removes all private key material from one or more
JWK(Set) inputs. The output will contain only public key material.
If the JWK contains the "key_ops" property, it will be automatically adjusted
to include only operations relevant to public keys.
== OPTIONS
* *-i* _JSON_, *--input*=_JSON_ :
Parse JWK(Set) from JSON
* *-i* _FILE_, *--input*=_FILE_ :
Read JWK(Set) from FILE
* *-i* -, *--input*=- :
Read JWK(Set) from standard input
* *-o* _FILE_, *--output*=_FILE_ :
Write JWK(Set) to FILE
* *-o* -, *--output*=- :
Write JWK(Set) to standard input
* *-s*, *--set* :
Always output a JWKSet
== EXAMPLES
Clean private key material from a JWK:
$ jose jwk gen -i '{"alg":"ES256"}' -o prv.jwk
$ cat prv.jwk
{"alg":"ES256","crv":"P-256","key_ops":["sign","verify"],"kty":"EC", ...}
$ jose jwk pub -i prv.jwk -o pub.jwk
$ cat pub.jwk
{"alg":"ES256","crv":"P-256","key_ops":["verify"],"kty":"EC", ...}
== AUTHOR
Nathaniel McCallum <npmccallum@redhat.com>
== SEE ALSO
link:jose-alg.1.adoc[*jose-alg*(1)],
link:jose-jwe-enc.1.adoc[*jose-jwe-enc*(1)],
link:jose-jwk-exc.1.adoc[*jose-jwk-exc*(1)],
link:jose-jwk-gen.1.adoc[*jose-jwk-gen*(1)],
link:jose-jwk-thp.1.adoc[*jose-jwk-thp*(1)],
link:jose-jwk-use.1.adoc[*jose-jwk-use*(1)],
link:jose-jws-ver.1.adoc[*jose-jws-ver*(1)]
| 23.2
| 77
| 0.631963
|
08037040019dfa843ee9f4364f87800c2c5179d8
| 400
|
adoc
|
AsciiDoc
|
authentication/impersonating-system-admin.adoc
|
makentenza/openshift-docs
|
e6fc2f6eba18780c10ab226137ad3730613f84c6
|
[
"Apache-2.0"
] | null | null | null |
authentication/impersonating-system-admin.adoc
|
makentenza/openshift-docs
|
e6fc2f6eba18780c10ab226137ad3730613f84c6
|
[
"Apache-2.0"
] | null | null | null |
authentication/impersonating-system-admin.adoc
|
makentenza/openshift-docs
|
e6fc2f6eba18780c10ab226137ad3730613f84c6
|
[
"Apache-2.0"
] | null | null | null |
:_content-type: ASSEMBLY
[id="impersonating-system-admin"]
= Impersonating the system:admin user
include::modules/common-attributes.adoc[]
:context: impersonating-system-admin
toc::[]
include::modules/authentication-api-impersonation.adoc[leveloffset=+1]
include::modules/impersonation-system-admin-user.adoc[leveloffset=+1]
include::modules/impersonation-system-admin-group.adoc[leveloffset=+1]
| 28.571429
| 70
| 0.805
|
6175500e91d59f6fe1f9b43fde2089119ba7299f
| 531
|
adoc
|
AsciiDoc
|
src/main/asciidoc/introduction.adoc
|
Bedework/bw-caldav-tester
|
aa183e83b90ea78365b54ab2a334cde9f7975476
|
[
"Apache-2.0"
] | null | null | null |
src/main/asciidoc/introduction.adoc
|
Bedework/bw-caldav-tester
|
aa183e83b90ea78365b54ab2a334cde9f7975476
|
[
"Apache-2.0"
] | 6
|
2021-12-14T21:00:33.000Z
|
2021-12-19T19:26:56.000Z
|
src/main/asciidoc/introduction.adoc
|
Bedework/bw-dav-tester
|
9fcff7fb32946b880160787bf064baeb063c9b00
|
[
"Apache-2.0"
] | null | null | null |
== Introduction
The tester is a java app that will run a series of scripted tests
against a CalDAV or CardDAV server and verify the output, and optionally measure
the time taken to complete one or more repeated requests. The tests are
defined by XML files and ancillary HTTP request body files. A number of
different verification options are provided.
Many tests are included in this package.
The DAV tester can be extended to run tests against almost any type of HTTP server protocol by simply defining a new set of XML files.
| 44.25
| 134
| 0.804143
|
39c7a6f78fbb293be722f25fcb9a9f5559c21221
| 162
|
adoc
|
AsciiDoc
|
infinispan/infinispan-remote/readme.adoc
|
torstenwerner/mastertheboss
|
76e37a50c0b25990fe417ff6aa02736410bf3eae
|
[
"MIT"
] | 146
|
2015-05-08T12:55:21.000Z
|
2022-03-24T10:15:25.000Z
|
infinispan/infinispan-remote/readme.adoc
|
torstenwerner/mastertheboss
|
76e37a50c0b25990fe417ff6aa02736410bf3eae
|
[
"MIT"
] | 12
|
2016-01-11T14:40:27.000Z
|
2022-02-25T09:29:51.000Z
|
infinispan/infinispan-remote/readme.adoc
|
torstenwerner/mastertheboss
|
76e37a50c0b25990fe417ff6aa02736410bf3eae
|
[
"MIT"
] | 588
|
2015-07-06T21:53:45.000Z
|
2022-03-30T07:54:59.000Z
|
== Infinispan remote cache demo
Source code for this tutorial: http://www.mastertheboss.com/jboss-frameworks/infinispan/clustering-infinispan-a-complete-example/
| 54
| 129
| 0.82716
|
3e2156955d7848dc32917144bc70067b2395a197
| 1,358
|
adoc
|
AsciiDoc
|
docs/man/nng_tcp_dialer_alloc.3tcp.adoc
|
vikramkarandikar/nng
|
1e7b955c9b1caa0262ea3d87d07eabf403927869
|
[
"MIT"
] | null | null | null |
docs/man/nng_tcp_dialer_alloc.3tcp.adoc
|
vikramkarandikar/nng
|
1e7b955c9b1caa0262ea3d87d07eabf403927869
|
[
"MIT"
] | null | null | null |
docs/man/nng_tcp_dialer_alloc.3tcp.adoc
|
vikramkarandikar/nng
|
1e7b955c9b1caa0262ea3d87d07eabf403927869
|
[
"MIT"
] | null | null | null |
= nng_tcp_dialer_alloc(3tcp)
//
// Copyright 2018 Staysail Systems, Inc. <info@staysail.tech>
// Copyright 2018 Capitar IT Group BV <info@capitar.com>
//
// This document is supplied under the terms of the MIT License, a
// copy of which should be located in the distribution where this
// file was obtained (LICENSE.txt). A copy of the license may also be
// found online at https://opensource.org/licenses/MIT.
//
== NAME
nng_tcp_dialer_alloc - allocate TCP dialer
== SYNOPSIS
[source, c]
----
#include <nng/nng.h>
#include <nng/supplemental/tcp/tcp.h>
int nng_tcp_dialer_alloc(nng_tcp_dialer *dp);
----
== DESCRIPTION
The `nng_tcp_dialer_alloc()` allocates a TCP dialer, which can be used
to create outgoing connections over TCP, and stores a pointer to it
it in the location referenced by _dp_.
== RETURN VALUES
This function returns 0 on success, and non-zero otherwise.
== ERRORS
[horizontal]
`NNG_ENOMEM`:: Insufficient free memory exists.
== SEE ALSO
[.text-left]
<<nng_strerror.3#,nng_strerror(3)>>,
<<nng_tcp_dialer_close.3tcp#,nng_tcp_dialer_close(3tcp)>>,
<<nng_tcp_dialer_dial.3tcp#,nng_tcp_dialer_dial(3tcp)>>,
<<nng_tcp_dialer_free.3tcp#,nng_tcp_dialer_free(3tcp)>>,
<<nng_tcp_dialer_getopt.3tcp#,nng_tcp_dialer_getopt(3tcp)>>,
<<nng_tcp_dialer_setopt.3tcp#,nng_tcp_dialer_setopt(3tcp)>>,
<<nng_tcp_dialer.5#,nng_tcp_dialer(5)>>
| 26.627451
| 70
| 0.755523
|
ba18ab3612df1b7b659a3fa1c437e2ffb13aa189
| 14,860
|
adoc
|
AsciiDoc
|
http/0.3.7/modules/ROOT/pages/http-connector-reference.adoc
|
mtcarvalho/docs-connectors
|
3a9b37a8a245e502850e21bece1f496b4077d5c6
|
[
"BSD-3-Clause"
] | 12
|
2019-05-01T21:29:44.000Z
|
2022-03-14T13:18:39.000Z
|
http/0.3.7/modules/ROOT/pages/http-connector-reference.adoc
|
mtcarvalho/docs-connectors
|
3a9b37a8a245e502850e21bece1f496b4077d5c6
|
[
"BSD-3-Clause"
] | 288
|
2018-09-28T02:47:25.000Z
|
2022-03-29T22:02:17.000Z
|
http/0.3.7/modules/ROOT/pages/http-connector-reference.adoc
|
mtcarvalho/docs-connectors
|
3a9b37a8a245e502850e21bece1f496b4077d5c6
|
[
"BSD-3-Clause"
] | 130
|
2018-09-25T22:19:47.000Z
|
2022-03-24T01:50:55.000Z
|
= HTTP Connector Reference
:keywords: anypoint studio, esb, connectors, http, https, http headers, query parameters, rest, raml
:page-aliases: 3.7@mule-runtime::http-connector-reference.adoc
Mule Utilities for HTTP Services.
This page serves as a reference for all of the supported attributes and child elements of the xref:index.adoc[HTTP Connector].
== Listener
Listener for incoming HTTP requests.
=== Attributes of listener
[%header,cols="30a,70a"]
|===
|Name |Description
|path |Path to listen for incoming requests. +
Type: string +
Required: yes +
Default: none
|allowedMethods |Comma separated list of allowed HTTP methods by this listener. To allow all methods, do not define the attribute. +
Type: string +
Required: no +
Default: none
|config-ref |A reference to the configuration element for this listener. If no reference is provided, a default configuration is created. +
Type: string +
Required: yes +
Default: none
|responseStreamingMode |Defines if the response should be sent using streaming or not. If this attribute is not present, the behavior depends on the type of the payload (it streams only for InputStream). If set to true, it always streams. If set to false, it never streams. As streaming is done, the response is sent to the user. +
Transfer-Encoding: chunked. +
Type: enumeration +
Required: no +
Default: AUTO
|parseRequest |By default, the request is parsed (for example, a multi-part request is mapped as a Mule message with null payload and inbound attachments with each part). If this property is set to false, no parsing is done, and the payload always contains the raw contents of the HTTP request. +
Type: string +
Required: no +
Default: none
|===
=== Child Elements of listener
[%header,cols="30a,10a,60a"]
|===
|Name |Cardinality |Description
|response-builder |0..1 |Contains the definition of all the parameters that should be sent in the response (headers, status code, response phrase).
|error-response-builder |0..1 |Contains the definition of all the parameters that should be sent in the response (headers, status code, response phrase).
|===
== Listener config
Grouping configuration for a set of listener elements.
=== Attributes of listener-config
[%header,cols="30a,70a"]
|===
|Name |Description
|name |Identifies the configuration in the registry, so that it can be referenced by the request message processor. +
Type: name (no spaces) +
Required: yes +
|protocol |Protocol to use for communication. Valid values are HTTP and HTTPS. Default value is HTTP. When using HTTPS the HTTP communication is going to be secured using TLS or SSL. If HTTPS was configured as protocol, then the user needs to configure at least the keystore in the tls:context child element of this listener-config. +
Type: httpProtocol +
Required: no +
Default: HTTP
|host |Host where requests are sent. +
Type: string +
Required: yes +
Default: none
|port |Port where the requests are received. If the protocol attribute is HTTP (default) then the default value is 80, if the protocol attribute is HTTPS then the default value is 443.
Type: integer +
Required: no +
Default: none
|basePath |Base path to use for all requests that reference this config. +
Type: string +
Required: no +
Default: none
|tlsContext-ref |Reference to a TLS config element. This enables HTTPS for this config. +
Type: string +
Required: no +
Default: none
|parseRequest |By default, the request is parsed (for example, a multi part request is mapped as a Mule message with null payload and inbound attachments with each part). If this property is set to false, no parsing is done, and the payload always contains the raw contents of the HTTP request. +
Type: string +
Required: no +
Default: none
|connectionIdleTimeout |The number of milliseconds that a connection can remain idle before it is closed. The value of this attribute is only used when persistent connections are enabled.
Type: integer +
Required: no +
Default: 30000
|usePersistentConnections |If false, each connection is closed after the first request is completed. +
Type: boolean +
Required: no +
Default: true
|===
=== Child Elements of listener-config
[%header%autowidth.spread]
|===
|Name |Cardinality
|tls:context |0..1
|worker-threading-profile |0..1
|===
== HTTP Response Builder
=== Attributes of response-builder
No attributes of response-builder.
No child elements of response-builder.
== Request
=== Attributes of request
[%header,cols="30a,70a"]
|===
|Name |Description
|path |Path where the request is sent. +
Type: string +
Required: yes +
Default: none
|method |The HTTP method for the request. +
Type: string +
Required: no +
Default: none
|config-ref |A reference to the configuration element for this requester. If no reference is provided, a default configuration is created. +
Type: string +
Required: yes +
Default: none
|source |The expression used to obtain the body that is sent in the request. Default is empty, so the payload is used as the body. +
Type: string +
Required: no +
Default: none
| target |The enricher expression used to enrich the current message with the body of the response. Default is "#payload", so after processing the response, the contents of its body is set as payload of the message. +
Type: string +
Required: no +
Default: #payload
|followRedirects |Specifies whether to follow redirects or not. +
Type: boolean +
Required: no +
Default: true
|host |Host where requests are sent. +
Type: string +
Required: no +
Default: none
|port |Port where the requests is sent. If the protocol attribute is HTTP (default) then the default value is 80, if the protocol attribute is HTTPS then the default value is 443. +
Type: integer +
Required: no +
Default: none
|parseResponse |By default, the response is parsed (for example, a multi part response is mapped as a Mule message with null payload and inbound attachments with each part). If this property is set to false, no parsing is done, and the payload always contains the raw contents of the HTTP response. +
Type: boolean +
Required: no +
Default: true
|requestStreamingMode |Defines if the request should be sent using streaming or not. If this attribute is not present, the behavior depends on the type of the payload (it streams only for InputStream). If set to true, it always streams. If set to false, it never streams. As streaming is done the request is sent user Transfer-Encoding: chunked. +
Type: enumeration +
Required: no +
Default: AUTO
|sendBodyMode |Defines if the request should contain a body or not. If AUTO, it depends on the method (GET, HEAD, and OPTIONS do not send a body). +
Type: enumeration +
Required: no +
Default: AUTO
|responseTimeout |Maximum time that the request element blocks the execution of the flow waiting for the HTTP response. If this value is not present, the default response timeout from the Mule configuration is used. +
Type: integer +
Required: no +
Default: 10 seconds
|===
=== Child Elements of request
[%header,cols="30a,10a,60a"]
|===
|Name |Cardinality |Description
|request-builder |0..1 |Contains the definition of all the parameters that should be sent in the request (uri params, query params and headers).
|success-status-code-validator |0..1 |Configures error handling of the response based on the status code.
|failure-status-code-validator |0..1 |Configures error handling of the response based on the status code.
|===
== Request Builder
=== Attributes of request-builder
`name`: Identifies the builder so that other elements can reference it.
* Type: name (no spaces)
* Required: no
No child elements of request-builder
=== Attributes of request-config
[%header,cols="30a,70a"]
|===
|Name |Description
|protocol |Protocol to use for communication. Valid values are HTTP and HTTPS. Default value is HTTP. When using HTTPS the HTTP communication is going to be secured using TLS and SSL. If HTTPS was configured as protocol then the user can customize the tls/ssl configuration by defining the tls:context child element of this listener-config. If not tls:context is defined then the default JVM certificates are going to be used to establish communication. +
Type: httpProtocol +
Required: no +
Default: HTTP
|name |Identifies the configuration in the registry, so that it can be referenced by the request message processor. +
Type: name (no spaces) +
Required: yes
Default: none
|basePath |Base path to use for all requests that reference this config. +
Type: string +
Required: no +
Default: none
|tlsContext-ref |Reference to a TLS context element. This enables HTTPS for this config. +
Type: string +
Required: no +
Default: none
|clientSocketProperties-ref |Reference to a TCP Client Socket properties element. +
Type: string +
Required: no +
Default: none
|proxy-ref |Reference to a proxy context element. +
Type: string +
Required: no +
Default: none
|maxConnections |The maximum number of outbound connections that is kept open at the same time. By default the number of connections is unlimited. +
Type: integer +
Required: no +
Default: -1
|connectionIdleTimeout |The number of milliseconds that a connection can remain idle before it is closed. The value of this attribute is only used when persistent connections are enabled. +
Type: integer +
Required: no +
Default: 30000
|usePersistentConnections |If false, each connection is closed after the first request is completed. +
Type: boolean +
Required: no +
Default: true
|followRedirects |Specifies whether to follow redirects or not. +
Type: boolean +
Required: no +
Default: true
|host |Host where the requests are sent. +
Type: string +
Required: no +
Default: none
|port |Port where the requests are sent. If the protocol attribute is HTTP (default) then the default value is 80, if the protocol attribute is HTTPS then the default value is 443. +
Type: integer +
Required: no +
Default: none
|parseResponse |By default, the response is parsed (for example, a multi part response is mapped as a Mule message with null payload and inbound attachments with each part). If this property is set to false, no parsing is done, and the payload always contain the raw contents of the HTTP response. +
Type: boolean +
Required: no +
Default: true
|requestStreamingMode |Defines if the request should be sent using streaming or not. If this attribute is not present, the behavior depends on the type of the payload (it streams only for InputStream). If set to true, it always streams. If set to false, it never streams. As streaming is done the request is sent user Transfer-Encoding: chunked. +
Type: enumeration +
Required: no +
Default: AUTO
|sendBodyMode |Defines if the request should contain a body or not. If AUTO, it depends on the method (GET, HEAD, and OPTIONS do not send a body). +
Type: enumeration +
Required: no +
Default: AUTO
|responseTimeout |Maximum time that the request element blocks the execution of the flow waiting for the HTTP response. If this value is not present, the default response timeout from the Mule configuration is used. +
Type: integer +
Required: no +
Default: 10 seconds
|===
=== Child Elements of request-config
[%header,cols="30a,10a,60a"]
|===
|Name |Cardinality |Description
|abstract-http-request-authentication-provider
|0..1
|A security manager is a container for security providers. More than one security manager may be configured; each contains providers from a particular module and has that module type. This element is abstract - a security-related module or transport provides a suitable implementation.
|tcp:client-socket-properties
|0..1
|
|tls:context
|0..1
|
|raml-api-configuration
|0..1
|Specifies a RAML configuration file for the API that is being consumed.
|proxy
|0..1
|Reusable configuration element for outbound connections through a proxy. A proxy element must define a host name and a port attributes, and optionally can define a username and a password.
|ntlm-proxy
|0..1
|Reusable configuration element for outbound connections through a proxy. A proxy element must define a host name and a port attributes, and optionally can define a username and a password.
|===
== Basic Authentication
Configures basic authentication for the requests.
Attributes of basic-authentication
[%header,cols="30a,70a"]
|===
|Name |Description
|username |The username to authenticate. +
Type: string +
Required: yes +
Default: none
|password |The password to authenticate. +
Type: string +
Required: yes +
Default: none
|preemptive |Configures if authentication should be preemptive or not. Preemptive authentication sends the authentication header in the first request, instead of waiting for a 401 response code to send it. +
Type: boolean +
Required: no +
Default: false
|===
No child elements of basic-authentication.
== Digest Authentication
Configures digest authentication for the requests.
=== Attributes of digest-authentication
[%header,cols="30a,70a"]
|===
|Name |Description
|username |The username to authenticate. +
Type: string +
Required: yes +
Default: none
|password |The password to authenticate. +
Type: string +
Required: yes +
Default: none
|===
No child elements of digest-authentication.
== NTLM Authentication
Configures NTLM authentication for the requests.
=== Attributes of ntlm-authentication
[%header,cols="30a,70a"]
|===
|Name |Description
|username |The username to authenticate. +
Type: string +
Required: yes +
Default: none
|password |The username to authenticate. +
Type: string +
Required: yes +
Default: none
|domain |The domain to authenticate. +
Type: string +
Required: no +
Default: none
|workstation |The workstation to authenticate. +
Type: string +
Required: no +
Default: none
|===
No child elements of ntlm-authentication.
== Proxy
Reusable configuration element for outbound connections through a proxy.
A proxy element must define a host name and a port attributes, and optionally can define a username and a password.
=== Attributes of Proxy
`name`: Identifies the proxy configuration in the registry, so that it can be referenced by the request config.
Type: name (no spaces) +
Required: yes +
Default: none
No child elements of proxy.
== NTLM Proxy
Reusable configuration element for outbound connections through a proxy.
A proxy element must define a host name and a port attributes, and optionally
can define a username and a password.
=== Attributes of ntlm-proxy
`name`: Identifies the proxy configuration in the registry, so that it can be referenced by the request config.
Type: name (no spaces) +
Required: yes +
Default: no
No child elements of ntlm-proxy.
== Config
HTTP global configuration.
=== Attributes of config
`useTransportForUris`: Backwards Compatabilty Flag: Since Mule 3.6, default HTTP URIs are resolved with the new HTTP connector (for example when using MuleClient). If set to true, this behavior is changed so that the HTTP transport is used.
Type: boolean +
Required: no +
Default: false
| 37.811705
| 455
| 0.766824
|
f9ce2d3a907967488fd74ada5ffcd743106fb751
| 3,228
|
adoc
|
AsciiDoc
|
_posts/2016-10-18-Book-Review-Progress-City-Primer.adoc
|
mouseguests/mouseguests.github.io
|
dfba9251efcb2c404c12c24d6d469a8f23c97832
|
[
"MIT"
] | null | null | null |
_posts/2016-10-18-Book-Review-Progress-City-Primer.adoc
|
mouseguests/mouseguests.github.io
|
dfba9251efcb2c404c12c24d6d469a8f23c97832
|
[
"MIT"
] | null | null | null |
_posts/2016-10-18-Book-Review-Progress-City-Primer.adoc
|
mouseguests/mouseguests.github.io
|
dfba9251efcb2c404c12c24d6d469a8f23c97832
|
[
"MIT"
] | null | null | null |
= Book Review: Progress City Primer
:hp-tags: Reviews, Disney World, Disneyland
:hp-image: covers/ProgressCityPrimer.png
image::covers/ProgressCityPrimer.png[caption="Progress City Primer by Michael Crawford", link="https://www.amazon.com/gp/product/0986205060/ref=as_li_tl?ie=UTF8&camp=1789&creative=9325&creativeASIN=0986205060&linkCode=as2&tag=habumacom-20&linkId=bb1ea4f5992ed4690bbad1c892ec99a3"]
If you're anything like me, you enjoy reading, but struggle to find books that both capture and hold your attention. As a Disney fanatic, however, I've discovered that there are countless books available about Walt Disney, the Walt Disney Company, and the Disney Parks. On the off-chance that some of our _MouseGuests_ readers are looking for a good book to read, I though I'd write some book reviews on this blog, sharing my thoughts on some of the best Disney books I find.
Among the many books on Disney are several books that delve into Disney history, including one that I finished recently, https://www.amazon.com/gp/product/0986205060/ref=as_li_tl?ie=UTF8&camp=1789&creative=9325&creativeASIN=0986205060&linkCode=as2&tag=habumacom-20&linkId=bb1ea4f5992ed4690bbad1c892ec99a3[_Progress City Primer: Stories, Secrets, and Silliness from the Many Worlds of Walt Disney_ by Michael Crawford].
Walt Disney was a storyteller. And virtually everything that has the Disney name on it, from the movies to the theme parks, is wrapped in a story. So it seems fitting that there be a book that tells stories about Disney. That's exactly what _Progress City Primer_ is: a collection of stories. Stories about Walt, stories about the imagineers, and stories about Disney's Parks.
The book doesn't follow a serial flow. Instead, it is structured such that you can pick it up and start reading at any chapter. (I read it front to back, but that was merely by choice.) Although all of the stories are interesting and provide insight into Walt and the history of the Disney company, I rather enjoyed the stories about Walt himself near the beginning of the book the most. Some of the best stories are "The Lake Buena Vista STOLport", "Walt Disney vs. the Air Pirates", and "Pooh for President", all collected and retold brilliantly by the author. My favorite story in _Progress City Primer_ is a humorous story about Walt Disney, Herb Ryman, and 49 tiny elephants in "Walt's Elephants". Although it's one of the shortest (if not _the_ shortest) story in the book, this one story alone is worth the price of the book.
I found _Progress City Primer_ difficult to put down and found myself in multi-chapter marathon reading sessions on airplanes as I travel. My only complaint is that after 33 fascinating stories, the book ended and I was wanting more Disney stories. As a parting gift, there is an appendix that collects several Disney-related recipes, including a recipe for the iconic strawberry shortcake from the _Hoop-Dee-Doo Musical Revue_ at Walt Disney World's Fort Wilderness.
I highly recommend _Progress City Primer_ to any reader who is even a marginal Disney fan. You won't be disappointed.
Have you read _Progress City Primer_? What did you think? What Disney books can you recommend? Leave a comment and let us know!
| 169.894737
| 832
| 0.803284
|
910b73d825b648a6173435c45118bc55262384f3
| 1,055
|
adoc
|
AsciiDoc
|
documentation/src/docs/asciidoc/reloadly-summary-and-highlights.adoc
|
fossabot/reloadly-services
|
f8b64782274f06d84221d54ac6fab2049fad66df
|
[
"MIT"
] | null | null | null |
documentation/src/docs/asciidoc/reloadly-summary-and-highlights.adoc
|
fossabot/reloadly-services
|
f8b64782274f06d84221d54ac6fab2049fad66df
|
[
"MIT"
] | null | null | null |
documentation/src/docs/asciidoc/reloadly-summary-and-highlights.adoc
|
fossabot/reloadly-services
|
f8b64782274f06d84221d54ac6fab2049fad66df
|
[
"MIT"
] | null | null | null |
ifndef::imagesdir[:imagesdir: images]
[[summary-highlights]]
== Code Challenge Summary
The solution developed for the code challenge, embraces modern architectural styles, battle tested architectural principles and robust engineering practices.
The following features have been implemented.
//[.thumb]
image::reloadly-solution-highlights-and-summary.png[scaledwidth=100%]
[NOTE]
====
Items grayed out, are proposed.
Please see open issues in GitHub https://github.com/arunkpatra/reloadly-services/issues[here].
====
== Code Challenge Highlights
- *~ 1500 lines of code* covered by JUnit tests.
- Modern tech stack, event driven architecture and solid design principles.
- Fully leverages Spring Boot Technology.
- Advanced Spring Boot features including auto-configuration.
- Ultra scalable and resilient transaction processor built on Kafka technology.
- End to end enterprise security using dedicated Authentication Service designed to be used across the enterprise.
- Production ready features, e.g. Spring Boot Admin.
- Extensive documentation.
| 37.678571
| 157
| 0.797156
|
13318ba62e1a332e2580fff67c436b63000ff264
| 2,345
|
adoc
|
AsciiDoc
|
jdbc-examples/README.adoc
|
sahilnarwal/vertx-examples
|
01ec463acea622d335e3fdde81166bfc40c200f3
|
[
"Apache-2.0"
] | null | null | null |
jdbc-examples/README.adoc
|
sahilnarwal/vertx-examples
|
01ec463acea622d335e3fdde81166bfc40c200f3
|
[
"Apache-2.0"
] | 1
|
2022-02-11T00:29:54.000Z
|
2022-02-11T00:29:54.000Z
|
jdbc-examples/README.adoc
|
sahilnarwal/vertx-examples
|
01ec463acea622d335e3fdde81166bfc40c200f3
|
[
"Apache-2.0"
] | null | null | null |
= Vert.x JDBC Client examples
Here you will find examples demonstrating the usage of the Vert.x JDBC Client.
== Simple
This example shows the basic functionality of the JDBC client, it demonstrates how to connect to a database, perform
basic data definition queries by creating a test table plus test data, after it shows how to perform simple queries
without parameters and shows how to close the connection.
The example is available in several languages:
* Java: link:src/main/java/io/vertx/example/jdbc/simple/JDBCExample.java[JDBCExample.java]
* JavaScript: link:src/main/js/io/vertx/example/jdbc/simple/jdbc_example.js[jdbc_example.js]
* Groovy: link:src/main/groovy/io/vertx/example/jdbc/simple/jdbc_example.groovy[jdbc_example.groovy]
* Ruby: link:src/main/rb/io/vertx/example/jdbc/simple/jdbc_example.rb[jdbc_example.rb]
== Query Params
This is a follow up to the `Simple` example. In this example you will see how to write queries with parameters, the
advantage of doing so is to avoid common SQL injection security issues since all parameters are used in a
`PreparedStatement` before being sent to the database engine.
The example is available in several languages:
* Java: link:src/main/java/io/vertx/example/jdbc/query_params/JDBCExample.java[JDBCExample.java]
* JavaScript: link:src/main/js/io/vertx/example/jdbc/query_params/jdbc_example.js[jdbc_example.js]
* Groovy: link:src/main/groovy/io/vertx/example/jdbc/query_params/jdbc_example.groovy[jdbc_example.groovy]
* Ruby: link:src/main/rb/io/vertx/example/jdbc/query_params/jdbc_example.rb[jdbc_example.rb]
== Transaction
The Transaction example show how to start and finish a transaction using the asynchronous jdbc client. Note that the
expected result is `1` since the example is counting the number of inserted rows after a the end of the transaction.
The example is only available in Java:
* link:src/main/java/io/vertx/example/jdbc/transaction/JDBCExample.java[JDBCExample.java]
== Transaction Rollback
The Transaction rollback example is a modified version of the simple Transaction example. In this example after
inserting data we rollback the transaction and the final count show that we have `0` rows in our table.
The example is only available in Java:
* link:src/main/java/io/vertx/example/jdbc/transaction_rollback/JDBCExample.java[JDBCExample.java]
| 48.854167
| 116
| 0.805117
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.