blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
d68fe5b13a1ea73f3427dc3490512a7af4b629e3
|
Shell
|
spreadshirt/continuation-token
|
/prepare-bintray-deployment.sh
|
UTF-8
| 752 | 3.171875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
PROJECT_ROOT="${1}"
export POM_VERSION=$(grep --max-count=1 '<version>' "${PROJECT_ROOT}/pom.xml" | sed -E 's/<version>(.*)<\/version>/\1/' | tr -d '[:space:]')
export GIT_VERSION=$(git describe --always --tags)
export RELEASE_DATE=$(date --utc --iso-8601)
COMMIT_MESSAGE=${TRAVIS_COMMIT_MESSAGE:-"Automated CI release"}
export COMMIT_MESSAGE=$(echo $COMMIT_MESSAGE | tr [:space:] \ )
rm -rf deploy && mkdir deploy
cp "${PROJECT_ROOT}/pom.xml" deploy/continuation-token-${POM_VERSION}.pom
cp "${PROJECT_ROOT}"/target/*.jar deploy
for f in $(ls deploy); do
md5sum < "deploy/$f" > "deploy/${f}.md5"
sha1sum < "deploy/$f" > "deploy/${f}.sha1"
done
cat bintray.json.tmpl | envsubst > bintray.json
cat bintray.json
| true |
a9e96f8385f1143aa6082d2f9c786cca7e7a90ea
|
Shell
|
Francesco-Giocondo/controllofornitori
|
/raccoltaidee2
|
UTF-8
| 905 | 3.28125 | 3 |
[] |
no_license
|
#!/bin/bash mah
row="$(wc --lines < file1 | egrep -o '[0-9]*')"
echo Ecco le righe del file1: $row
URL="http://www.svapoweb-pro.com/big-batteries-ecigarette/big-battery-kanger/kit-kanger-subox-nano-black"
rm file3
COUNTER=1
RCOUNTER=2
while [ $COUNTER -le $row ]; do
echo numero ciclo: $COUNTER
if [ "$COUNTER" != "1" ]; then
let "RCOUNTER=($COUNTER*2)"
else
echo
fi
URL=$( sed -n "$COUNTER"p file1 )
wget -qO- $URL | perl -l -0777 -ne 'print $1 if /<title.*?>\s*(.*?)\s*<\/title/si' >> file3
AVA=$( lynx -dump $URL | grep -i Availability )
CHECK=$( sed -n "$RCOUNTER"p file2 )
if [ "$CHECK" = "$AVA" ]; then
echo quantità invariata
lynx -dump $URL | grep -i Availability >> file3
else
echo ATTENZIONE! quantità diversa
lynx -dump $URL | grep -i Availability >> file3
fi
let COUNTER=COUNTER+1
done
cp file3 file2
| true |
a771161d3e2fb4acb98920dc3afa52632ad1c7cd
|
Shell
|
rubixlinux/rubixlinux
|
/kde/qt/PKGBUILD
|
UTF-8
| 5,282 | 2.65625 | 3 |
[] |
no_license
|
# Maintainer: Joshua Rubin <joshua@rubixlinux.org>
pkgname=qt
pkgver=3.3.6
pkgrel=2
pkgdesc="Qt (a multi-platform C++ graphical user interface toolkit) Qt is a complete and well-developed object-oriented framework for developing graphical user interface (GUI) applications using C++. This release is free only for development of free software for the X Window System. If you use Qt for developing commercial or other non-free software, you must have a professional license. Please see http://www.trolltech.com/purchase.html for information on how to obtain a professional license."
url="http://www.trolltech.com/products/qt/index.html"
depends=('libSM' 'libXext' 'glibc' 'mesa3d' 'fontconfig' 'openssl' 'expat' 'libICE' 'libXrender' 'zlib' 'libXau' 'freetype2' 'libXfixes' 'libjpeg' 'libXcursor' 'libX11' 'libXdmcp' 'libXmu' 'mysql' 'libmng' 'libXt' 'gcc' 'libpng' 'libXrandr' 'libXinerama' 'libXft')
install=$pkgname.install
source=(ftp://ftp.trolltech.com/qt/source/$pkgname-x11-free-$pkgver.tar.bz2 \
qt-x11.diff \
qt.csh \
qt.mysql.h.diff \
qt.sh \
qt.x86.cflags.diff \
qt-uic-fix.diff )
md5sums=('dc1384c03ac08af21f6fefab32d982cf' \
'45c9e1b5e141b998e8240990696129ce' \
'1e06d17cea6a138d67a5e22f6c3761e1' \
'8ab765eef22a871177a4cfeba2be8249' \
'391cfbf36341bab9150b7a3784fd3e08' \
'ee83e613e11e7acc10ea8f2294ccf0e2' \
'08ed7e5a6f33e7f88c7e86f13898fca2' )
## Todo:
## None
## Notes:
## The x11 patch enables the builtin gif reader
## The mysql patch changes the mysql include directory
## The x86.cflags sets the proper Rubix CFLAGS, qt is built with O2 an no PIE or SSP
## Changelog:
## rel2: rebuilt without dependance on nvidia 2006.04.23
## rel1: upgraded to 3.3.6 2006.04.22
## rel4: moved pkgconfig file to proper location 2006.03.21
## rel3: rebuilt against mysql 5.0, removed sql dependencies 2006.03.05
## rel2: fixed namespace hinting 2006.01.21
## rel1: upgraded to 3.3.5 2005.10.13
## rel6: added stl 2005.03.27
## rel5: rebuild with new cflags 2005.03.17
## rel4: Fixed build flags
## rel1: Initial Rubix release
build() {
export QTDIR=$startdir/src/$pkgname-x11-free-$pkgver
export PATH=$QTDIR/bin:$PATH
export MANPATH=$QTDIR/doc/man:$MANPATH
export LD_LIBRARY_PATH=$QTDIR/lib:$LD_LIBRARY_PATH
cd $QTDIR
patch -p1 < $startdir/src/qt.x86.cflags.diff || return 1
patch -p1 < $startdir/src/qt-x11.diff || return 1
patch -p1 < $startdir/src/qt.mysql.h.diff || return 1
patch -p0 < $startdir/src/qt-uic-fix.diff || return 1
find . -perm 2775 -exec chmod 755 {} \;
find . -perm 2755 -exec chmod 755 {} \;
find . -perm 775 -exec chmod 755 {} \;
find . -perm 555 -exec chmod 755 {} \;
find . -perm 664 -exec chmod 644 {} \;
find . -perm 444 -exec chmod 644 {} \;
mkdir -p $startdir/pkg/usr/doc/$pkgname-$pkgver
cd $startdir/pkg/usr/doc/$pkgname-$pkgver
ln -sf /usr/lib/$pkgname-$pkgver/examples .
ln -sf /usr/lib/$pkgname-$pkgver/tutorial .
ln -sf /usr/lib/$pkgname-$pkgver/doc/html .
ln -sf /usr/lib/$pkgname-$pkgver/doc/man .
cd $QTDIR
echo "yes" | ./configure \
-prefix /usr/lib/qt \
-release \
-system-zlib \
-system-libpng \
-qt-imgfmt-png \
-system-libmng \
-qt-imgfmt-mng \
-system-libjpeg \
-qt-imgfmt-jpeg \
-qt-gif \
-thread \
-stl \
-no-g++-exceptions \
-xft \
-plugin-sql-mysql \
-plugin-style-cde \
-plugin-style-compact \
-qt-style-motif \
-plugin-style-motifplus \
-plugin-style-platinum \
-plugin-style-sgi \
-plugin-style-windows \
-enable-opengl
make symlinks sub-src sub-tools || return 1
mkdir -p $startdir/pkg/usr/lib
cp -a $QTDIR $startdir/pkg/usr/lib
cd $startdir/pkg/usr/lib
mv $pkgname-x11-free-$pkgver $pkgname-$pkgver
ln -sf $pkgname-$pkgver $pkgname
cd $startdir/pkg/usr/lib/$pkgname-$pkgver
mv \
FAQ \
INSTALL \
LICENSE.GPL \
LICENSE.QPL \
MANIFEST \
PLATFORMS \
README \
changes* \
$startdir/pkg/usr/doc/$pkgname-$pkgver
cd $startdir/pkg/usr/lib/qt/include/private
mkdir tmp
cp *.h tmp
rm *.h
mv tmp/* .
rmdir tmp
cd ..
mkdir tmp
cp *.h tmp
rm *.h
mv tmp/* .
rmdir tmp
cd $startdir/pkg/usr/lib/qt/bin
cp qmake qmake.bin
rm qmake
mv qmake.bin qmake
mkdir -p $startdir/pkg/usr/bin
cd $startdir/pkg/usr/bin
for file in \
assistant \
designer \
linguist \
lrelease \
lupdate \
moc \
qm2ts \
qmake \
qtconfig \
uic
do
ln -sf /usr/lib/qt/bin/$file .
done
rm $startdir/pkg/usr/lib/qt/lib/README
cd $startdir/pkg/usr/lib/qt
find . -type d -name .obj -exec rm -r {} \;
find . -type d -name .moc -exec rm -r {} \;
rm -r \
config.tests \
qmake \
src \
tools \
translations \
config.status \
configure \
Makefile
mkdir -p $startdir/pkg/etc/profile.d
cp $startdir/src/qt.sh $startdir/pkg/etc/profile.d/qt.sh
cp $startdir/src/qt.csh $startdir/pkg/etc/profile.d/qt.csh
chmod 755 $startdir/pkg/etc/profile.d/*
cd $startdir/pkg/usr/lib/qt
patch -p1 -R < $startdir/src/qt.x86.cflags.diff || return 1
mkdir -p $startdir/pkg/usr/lib/pkgconfig
mv $startdir/pkg/usr/lib/qt-$pkgver/lib/*.pc $startdir/pkg/usr/lib/pkgconfig
chown -R root.bin $startdir/pkg/usr/bin
find $startdir/pkg -perm 444 -exec chmod 644 {} \;
find $startdir/pkg -name *.la -exec rm {} \;
}
| true |
3331fd0461ebee65823756d8aee3a787b4346a61
|
Shell
|
UlasSAYGINIM/jailize
|
/jailize
|
UTF-8
| 22,011 | 2.8125 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
#
# Script to manage jails for daily tasks based on dhcpd, netgraph and vnet
#
# Written by M.Zhilin aka mizhka (FreeBSD/NetCracker/PostgresPro)
#
# v0.1 - time to publish stable version (2016-2021)
#
### Some random notes below
#### /boot/loader.conf
# ipfw_nat_load="YES"
# libalias_load="YES"
# net.inet.ip.fw.default_to_accept="1"
###############
# pkg install dhcpd
#####
# unbound local
#
# module-config: "iterator"
# interface: 0.0.0.0
# access-control: 0.0.0.0/0 allow
#############
#### Poudriere ##### poudriere + nginx
#zfs set mountpoint=/poudriere buildpool/jails/poudriere/data
#
#/usr/local/etc/poudriere.conf
#ZPOOL=zroot
#ZROOTFS=/jails/poudriere/data
#BASEFS=/poudriere
#BUILD_AS_NON_ROOT=yes
#DISTFILES_CACHE=/poudriere/distfiles
#FREEBSD_HOST=https://download.FreeBSD.org
#KEEP_OLD_PACKAGES_COUNT=10
#KEEP_OLD_PACKAGES=yes
#MAX_EXECUTION_TIME=86400
#NOLINUX=yes
#PARALLEL_JOBS=3
#PRESERVE_TIMESTAMP=yes
#RESOLV_CONF=/etc/resolv.conf
#USE_PORTLINT=no
#ALLOW_MAKE_JOBS_PACKAGES="cmake pkg llvm* gcc* node* *webengine rust* firefox* mame mess"
#
#poudriere ports -c
#poudriere jail -c -v 12.2-RELEASE -j amd64-12
#poudriere jail -c -v 12.2-RELEASE -j amd64-12-debug
#zfs create buildpool/jails/poudriere/data/distfiles
#poudriere bulk -J 4 -j amd64-12 -p default databases/postgresql12-server
# pkg install nginx
# cp /usr/local/share/examples/poudriere/nginx.conf.sample /usr/local/etc/nginx/nginx.conf
# Let's root it
[ "root" != "$USER" ] && exec sudo $0 "$@"
CONFFILE="/usr/local/etc/jailize.conf"
if [ -f $CONFFILE ]; then
. $CONFFILE
fi
# command parameters
_action="create"
_basejail=base-default
_clean=0
_interactive=0
_jailname="tmp`date +%Y%m%d`"
_release=${RELEASE:-"12.2-RELEASE"}
_verbose=0
_quantity=1
####### Extra settings
_domain=${DOMAIN:-"unclebear.ru"}
_internal_nw=${INTERNAL_NW:-"192.168.20.0"}
_internal_ip=${INTERNAL_IP:-"192.168.20.1"}
_etherprefix=${ETHERPREFIX:-"6a:61:69:6c:"}
_zfsjail=${ZFSJAIL:-"tank/jails"}
_dirjail=${DIRJAIL:-"/usr/jails"}
_dhcpconf="/usr/local/etc/jailize.dhcpd.conf"
_jailconf="/usr/local/etc/jailize.jail.conf"
_unboundconf="/var/unbound/conf.d/jailize.conf"
# vnet
KLDMODS="ng_ipfw ng_ether ng_nat ng_eiface"
NG_IFPREFIX="ng-"
INTERNAL_IF="${NG_IFPREFIX}jailgw"
NG_NATNAME="jail_nat0"
NG_BRGNAME="jail_bridge0"
# base
DEFAULT_PKGS="pkg tmux zsh neovim pfetch python3 sudo"
DISTR_HOST=https://download.freebsd.org/ftp/releases/amd64
DISTCACHE=${HOME}/.jailize/
# Environment discovery
EXTERNAL_IF=`netstat -r4n | grep defa | cut -f4 -w`
EXTERNAL_IP=`ifconfig $EXTERNAL_IF | grep inet | cut -f3 -w`
# Coloring
RED='\033[0;31m'
GREEN='\033[1;32m'
YELLOW='\033[0;33m'
NC='\033[0m' # No Color
######## Common functions
log ( )
{
if [ $_verbose -ne 0 ] ; then
echo -e "["$(date "+%v %T")" DEBUG]" $*
fi
}
print ( )
{
echo -e "["$(date "+%v %T")" INFO ]" $*
}
print_error ( )
{
echo -e "["$(date "+%v %T") "${RED}ERROR${NC}]" $*
}
fatal ( )
{
echo "Stopped at function" $*
exit 1
}
helpme ( )
{
echo "usage: jailize base [-c] [-b basejail] [-r release] [pkgs]"
echo " jailize delta [-j jailname]"
echo " jailize delete [-j jailname] "
echo " jailize init"
echo " jailize forward -j jailname port"
echo " jailize list"
echo " jailize rename [-j jailname] newname"
echo " jailize start [-i] [-b basejail] [-j jailname]"
echo " jailize shell [jailname]"
echo " jailize vnet -a create"
echo " jailize vnet -a delete"
echo " jailize vnet -a fini"
echo " jailize vnet -a init"
}
check_len ()
{
local _val
_val=$1
if [ "${_cmd}" == "delete" ]; then
return
fi
if [ ${#_val} -gt 12 ]; then
print_error "Jail name ($_val) is too long. Maximum length is 12 chars"
exit 1
fi
}
############# BASE ###############
base_main ( )
{
local _jaildir _jailzfs _url _pkgs _distcache
_jailname=${_basejail}
_jaildir=${_dirjail}/${_jailname}
_jailzfs=${_zfsjail}/${_jailname}
_url=${DISTR_HOST}/${_release}
_pkgs="${DEFAULT_PKGS} "$@
_distcache=${DISTCACHE}/${_release}/
print "Fill basejail \"${_jailname}\" by base and packages:"
print "*${YELLOW} ${_release} (base lib32)${NC}"
print "*${YELLOW} ${_pkgs}${NC}"
if [ ${_clean} -ne 0 ]
then
zfs get -H name ${_jailzfs} > /dev/null 2>&1
if [ $? -eq 0 ]; then
log "Clean old basejail ${_jailname}"
jls -j ${_jailname} > /dev/null 2>&1
if [ $? -eq 0 ]
then
${_jailcmd} -r ${_jailname} || fatal base_main+${LINENO}
fi
mount | grep "${_jaildir}/" | cut -f3 -w | xargs -I$ umount $
# FIXME: cannot open 'tank/guests/vm/jails/base1202': dataset does not exist
zfs destroy -r ${_jailzfs} || fatal base_main+${LINENO}
fi
fi
mkdir -p ${_distcache}
if [ ! -e ${_distcache}/base.txz ]
then
log "Fetch base.txz of ${_release} "
fetch -o ${_distcache} ${_url}/base.txz || fatal base_main+${LINENO}
fi
if [ ! -e ${_distcache}/lib32.txz ]
then
log "Fetch lib32.txz of ${_release} "
fetch -o ${_distcache} ${_url}/lib32.txz || fatal base_main+${LINENO}
fi
zfs create -p -o mountpoint=${_jaildir} ${_jailzfs} || fatal base_main+${LINENO}
if [ ! -e ${_jaildir}/.tardone ]
then
log "Unpack base.txz and lib32.txz into new basejail ${_jailname}"
tar -xf ${_distcache}/base.txz -C ${_jaildir} || fatal base_main+${LINENO}
tar -xf ${_distcache}/lib32.txz -C ${_jaildir} || fatal base_main+${LINENO}
touch ${_jaildir}/.tardone
fi
mkdir -p ${_jaildir}/usr/ports || fatal base_main+${LINENO}
# freebsd-update -b ${BASEJAILDIR} fetch install || fatal base_main+${LINENO}
print "Configure basejail ${_jailname}"
jail_singlestart ${_jailname} || fatal base_main+${LINENO}
# rc.conf's ifconfig_X doesn't work in jail, so crontab it
(echo '@reboot (/sbin/dhclient $(/sbin/ifconfig -l ether))';\
echo '@reboot (/sbin/ipfw add 5000 allow all from any to any)';) \
| jexec ${_jailname} crontab -
${_jailcmd} -rc ${_jailname} || fatal base_main+${LINENO}
jexec ${_jailname} pkg install -y ${_pkgs} || fatal base_main+${LINENO}
jexec ${_jailname} tee /usr/local/etc/zshrc > /dev/null <<EOF
autoload promptinit;
promptinit;
prompt adam1 blue;
export CLICOLOR=1
EOF
${_jailcmd} -r ${_jailname} || fatal base_main+${LINENO}
zfs get -H name ${_jailzfs}@base > /dev/null 2>&1
if [ $? -eq 0 ]
then
zfs destroy ${_jailzfs}@base
fi
zfs snapshot ${_jailzfs}@base || fatal base_main+${LINENO}
print "\"${_jailname}\" ready"
# Cleanup
zfs list -r -t snapshot ${_jailzfs}
}
############# VNET ###############
# Load kernel modules
load_kld ( )
{
local _kldmod
print "Loading kernel modules"
for _kldmod in ${KLDMODS}; do
log "... Loading" ${_kldmod}
kldload -n ${_kldmod}
done
print "All kernel module loaded"
}
vnet_init_ipfw_nat ( )
{
local _name _in _out _ext_ip _ext_if _int_ip _int_if
_int_ip=$2
_int_if=$3
print "[NAT] start configuration of $_name from $_int_ip/$_int_if"
# remove all ipfw rules between 100 and 999
vnet_destroy_ipfw_rules
ipfw -q add 100 allow ip from any to $_int_ip in via $_int_if || fatal vnet_init_ipfw_nat+${LINENO}
# iterate over interfaces and construct firewall rules based on routing tables
_i=0
for _ext_if in $(netstat -rn4 --libxo json | \
jq '.statistics."route-information"."route-table"."rt-family"[] | select(."address-family"=="Internet") | ."rt-entry"[] | select(."interface-name"!="lo0") | select(."destination"!="'${_internal_nw}'/24")."interface-name"' | \
sort | uniq | tr -d \")
do
_i=$((_i + 1))
_in=$((_i * 100))
_out=$((_in + 10))
_name=$1${_i}
if [ ${_i} -ge 10 ]; then
print "TOO MANY INTERFACES"
fatal vnet_init_ipfw_nat+${LINENO}
fi
_ext_ip=$(ifconfig ${_ext_if} inet | grep inet | cut -f3 -w)
print \\t ${_i}: ${_name}\\t${_ext_if}\\t${_ext_ip}\\t${_in}\\t${_out}
_j=0
# loop over network masks
for _ext_nw in $(netstat -rn4 | grep ${_ext_if} | cut -f1 -w | sort | uniq)
do
_j=$((_j + 1))
if [ "${_ext_nw}" = "default" ]; then
ipfw -q add 998 netgraph ${_out} all from ${_internal_nw}/24 to any in via ${_int_if} || fatal vnet_init_ipfw_nat+${LINENO}
else
ipfw -q add $((_in + _j)) netgraph ${_out} all from any to ${_ext_nw} in via ${_int_if} || fatal vnet_init_ipfw_nat+${LINENO}
fi
done
ipfw -q add $((_in + 99)) netgraph ${_in} all from any to any in via ${_ext_if} || fatal vnet_init_ipfw_nat+${LINENO}
# exit if ipfw/nat is configured
ngctl info ${_name}: > /dev/null 2>&1 && continue
#ngctl mkpeer ipfw: nat ${_in} in || fatal vnet_init_ipfw_nat+${LINENO}
#ngctl name ipfw:${_in} ${_name} || fatal vnet_init_ipfw_nat+${LINENO}
#ngctl connect ipfw: ${_name}: ${_out} out || fatal vnet_init_ipfw_nat+${LINENO}
ngctl mkpeer ipfw: tee ${_in} left || fatal vnet_init_ipfw_nat+${LINENO}
ngctl name ipfw:${_in} di_${_name} || fatal vnet_init_ipfw_nat+${LINENO}
ngctl mkpeer di_${_name}: nat right in || fatal vnet_init_ipfw_nat+${LINENO}
ngctl name di_${_name}:right ${_name} || fatal vnet_init_ipfw_nat+${LINENO}
ngctl mkpeer ipfw: tee ${_out} left || fatal vnet_init_ipfw_nat+${LINENO}
ngctl name ipfw:${_out} do_${_name} || fatal vnet_init_ipfw_nat+${LINENO}
ngctl connect do_${_name}: ${_name}: right out || fatal vnet_init_ipfw_nat+${LINENO}
ngctl msg ${_name}: setaliasaddr ${_ext_ip} || fatal vnet_init_ipfw_nat+${LINENO}
done
# golden rule
ipfw -q add 65534 allow ip from any to any || fatal vnet_init_ipfw_nat+${LINENO}
print "[NAT] done"
}
vnet_refresh_nat ( )
{
for _ext_if in $(netstat -rn4 --libxo json | \
jq '.statistics."route-information"."route-table"."rt-family"[] | select(."address-family"=="Internet") | ."rt-entry"[] | select(."interface-name"!="lo0") | select(."destination"!="'${_internal_nw}'/24")."interface-name"' | \
sort | uniq | tr -d \")
do
_i=$((_i + 1))
_name=$1${_i}
if [ ${_i} -ge 10 ]; then
print "TOO MANY INTERFACES"
fatal vnet_refresh_nat+${LINENO}
fi
_ext_ip=$(ifconfig ${_ext_if} inet | grep inet | cut -f3 -w)
print \\t ${_i}: ${_name}\\t${_ext_if}\\t${_ext_ip}
ngctl msg ${_name}: setaliasaddr ${_ext_ip} || fatal vnet_refresh_nat+${LINENO}
done
}
vnet_destroy_ipfw_rules ( )
{
_i=100
while [ ${_i} -lt 1000 ]; do
ipfw -q delete ${_i} > /dev/null 2>&1
_i=$((_i + 1))
done
}
vnet_destroy_ng_node ( )
{
ngctl shutdown $1: > /dev/null 2>&1
}
vnet_init_jail_bridge ( )
{
local _bridge _int_if _int_ip _ext_if _oldname _ether
_bridge=$1
_ext_if=$(echo $2 | tr . _)
_int_ip=$3
_int_if=$4
_ether="6a:61:69:6c:00:aa"
print "Initializing bridge $_bridge"
print "Interfaces: $_ext_if <> $_int_if/$_int_ip"
# Make sure the interface has been bridged
ngctl info ${_bridge}: > /dev/null 2>&1 && return
# Create bridge
ngctl mkpeer $_ext_if: bridge lower link0 || fatal init_jail_bridge+${LINENO}
ngctl name $_ext_if:lower ${_bridge} || fatal init_jail_bridge+${LINENO}
ngctl mkpeer ${_bridge}: eiface link1 ether || fatal init_jail_bridge+${LINENO}
# Disconnect from external interface
ngctl rmhook ${_bridge}: link0 || fatal init_jail_bridge+${LINENO}
_oldname=`ngctl show -n ${_bridge}:link1 | cut -f3 -w`
ngctl name ${_bridge}:link1 ${_int_if} || fatal init_jail_bridge+${LINENO}
ifconfig ${_oldname} name ${_int_if} > /dev/null || fatal init_jail_bridge+${LINENO}
ifconfig ${_int_if} inet alias ${_int_ip} > /dev/null || fatal init_jail_bridge+${LINENO}
ifconfig ${_int_if} ether ${_ether} || fatal create_interface+${LINENO}
print "Let packets continue with after being (de)aliased"
sysctl net.inet.ip.fw.one_pass=0
sysctl net.inet.ip.forwarding=1
}
vnet_fini ( )
{
pkill -f "dhcpd ${INTERNAL_IF}"
_i=1
while [ ${_i} -lt 10 ]; do
vnet_destroy_ng_node ${NG_NATNAME}${_i}
vnet_destroy_ng_node di_${NG_NATNAME}${_i}
vnet_destroy_ng_node do_${NG_NATNAME}${_i}
_i=$((_i + 1))
done
vnet_destroy_ng_node ${NG_BRGNAME}
vnet_destroy_ng_node ${INTERNAL_IF}
vnet_destroy_ipfw_rules
}
vnet_init ( )
{
local _out _i
load_kld
vnet_init_jail_bridge ${NG_BRGNAME} ${EXTERNAL_IF} ${_internal_ip} ${INTERNAL_IF}
vnet_init_ipfw_nat ${NG_NATNAME} ${_internal_ip} ${INTERNAL_IF}
print "Start DHCPD on ${INTERNAL_IF}"
pgrep -f "dhcpd -c ${_dhcpconf} ${INTERNAL_IF}" > /dev/null 2>&1
if [ $? -ne 0 ]; then
_out=$(dhcpd -c ${_dhcpconf} ${INTERNAL_IF} 2>&1)
if [ $? -ne 0 ]; then
print ${_out}
vnet_fini
fatal vnet_init+${LINENO}
fi
fi
}
vnet_create ( ) {
local _num _link _ifname _oldname _ether _bridge
ngctl info ${NG_BRGNAME}: > /dev/null 2>&1 || vnet_init
_num=2
_ifname="${NG_IFPREFIX}${_jailname}"
_bridge=${NG_BRGNAME}
# Silently exit if interface exists (job is done before)
ngctl msg "$_ifname:" getifname > /dev/null 2>&1 && \
print "Interface $_ifname already exists" && return
while ngctl msg ${_bridge}: getstats $_num > /dev/null 2>&1
do
_num=$(( $_num + 1 ))
done
_link="link${_num}"
# 6a:61:69:6c means "jail"
# mac is linked to jailname which is immutable
# TODO how to avoid collision? base64?
_ether=$(md5 -qs ${_jailname} | sed -e "s#^\(..\)\(..\).*#\1:\2#g")
print "Create interface ${_ifname} with MAC ${_etherprefix}${_ether}"
ngctl mkpeer ${_bridge}: eiface ${_link} ether || fatal create_interface+${LINENO}
_oldname=`ngctl show -n ${_bridge}:${_link} | cut -f3 -w`
log "Adjust names (netgraph) ${_oldname} -> ${_ifname}"
ngctl name ${_bridge}:${_link} ${_ifname} || fatal create_interface+${LINENO}
log "Adjust names (if) ${_oldname} -> ${_ifname}"
ifconfig ${_oldname} name ${_ifname} > /dev/null || fatal create_interface+${LINENO}
print "Set MAC address: ${_etherprefix}${_ether}"
ifconfig ${_ifname} ether ${_etherprefix}${_ether} || fatal create_interface+${LINENO}
print "Done"
}
vnet_main ( )
{
case "${_action}" in
create)
vnet_create
;;
delete)
vnet_destroy_ng_node "${NG_IFPREFIX}${_jailname}"
;;
refresh)
vnet_refresh_nat ${NG_NATNAME}
;;
init)
vnet_init
;;
fini)
vnet_fini
;;
*)
helpme
esac
exit 0
}
###################### Jail ################################
jail_init ()
{
if [ ! -e ${_dhcpconf} ]; then
cat > ${_dhcpconf} << EOF
option subnet-mask 255.255.255.0;
default-lease-time 600;
max-lease-time 7200;
subnet 192.168.20.0 netmask 255.255.255.0 {
range 192.168.20.2 192.168.20.40;
option domain-name-servers ${_internal_ip};
option routers ${_internal_ip};
}
EOF
fi
if [ ! -e ${_unboundconf} ]; then
cat > ${_unboundconf} << EOF
server:
module-config: "iterator"
interface: 0.0.0.0
access-control: 0.0.0.0/0 allow
EOF
service local_unbound restart
fi
if [ ! -e ${_jailconf} ]; then
cat > ${_jailconf} << EOF
host.hostname="\$name.${_domain}";
path = "${_dirjail}/\$name";
vnet;
vnet.interface = "${NG_IFPREFIX}\$name";
exec.clean;
# Start
exec.prestart = "/sbin/mount -t unionfs -o below /usr/ports/ \${path}/usr/ports";
exec.prestart += "sh /usr/local/bin/jailize vnet -a create -j \$name";
exec.start = "/bin/sh /etc/rc";
exec.start += "env netwait_ip=${_internal_ip} netwait_timeout=5 /etc/rc.d/netwait onestart";
exec.poststart = "/sbin/zfs jail \$name ${_zfsjail}/\$name/data";
## zfs mount -a inside jail
## ----- exec.poststart += "sh /usr/local/bin/jailize forward -j \$name 80";
## ----- osrelease="12.2-RELEASE";
# Stop
exec.stop = "/bin/sh /etc/rc.shutdown";
exec.poststop = "/sbin/umount -f \${path}/usr/ports";
exec.poststop += "sh /usr/local/bin/jailize vnet -a delete -j \$name";
# Allows the jail to mount file systems: devfs, nullfs, tmpfs, procfs, zfs
allow.mount;
allow.mount.devfs;
allow.mount.nullfs;
allow.mount.tmpfs;
allow.mount.procfs;
allow.mount.zfs;
allow.chflags;
allow.raw_sockets;
allow.set_hostname = 1;
allow.socket_af;
allow.sysvipc = 1;
children.max = 20;
devfs_ruleset = "6";
enforce_statfs = 1;
mount.devfs;
EOF
fi
zfs create -p ${_zfsjail}
vnet_init
}
jail_bulkstart ( )
{
if [ ${_quantity} -eq 1 ]; then
jail_singlestart ${_jailname}
else
i=1
while [ "$i" -le "$_quantity" ]; do
jail_singlestart "${_jailname}$(printf %02d $i)"
i=$(($i + 1))
done
fi
vnet_refresh_nat ${NG_NATNAME}
}
jail_bulkstop ( )
{
if [ ${_quantity} -eq 1 ]; then
jail_singlestop ${_jailname}
else
i=1
while [ "$i" -le "$_quantity" ]; do
jail_singlestop "${_jailname}$(printf %02d $i)"
i=$(($i + 1))
done
fi
}
jail_singlestart ( )
{
local _origin _targetname _jailzfs
_targetname=$1
_origin="${_zfsjail}/${_basejail}@base"
_jailzfs=${_zfsjail}/${_targetname}
# Check if dataset exists
zfs get name ${_zfsjail}/${_targetname} > /dev/null 2>&1
if [ $? -ne 0 ]
then
log "Create ZFS dataset ${_jailzfs}"
zfs clone ${_origin} ${_jailzfs} || \
fatal jail_singlestart+${LINENO}: zfs clone ${_origin} ${_jailzfs}
fi
zfs create -p -o mountpoint=none -o jailed=on ${_jailzfs}/data || \
fatal jail_singlestart+${LINENO}: zfs create -p -o mountpoint=none ${_jailzfs}/data
log "Register new temp jail in jail.conf"
grep -qxF "${_targetname} {}" ${_jailconf} || \
printf "${_targetname} {}\n" >> ${_jailconf}
jls -j ${_targetname} > /dev/null 2>&1
if [ $? -ne 0 ]
then
log "Start jail"
${_jailcmd} -c ${_targetname} || \
fatal jail_singlestart+${LINENO}: ${_jailcmd} -c ${_targetname}
fi
if [ ${_interactive} -eq 1 ]
then
jail_shell ${_targetname}
fi
}
jail_shell ( )
{
local _targetname
_targetname=$1
log "Go to shell"
jexec -l ${_targetname} /bin/sh -c "pfetch" 2>/dev/null
jexec -l ${_targetname} /bin/sh -c "zsh" || exit 4
}
jail_singlestop ( )
{
local _origin _targetname
_targetname=$1
${_jailcmd} -r ${_targetname} || \
fatal jail_singlestop+${LINENO}: ${_jailcmd} -r ${_targetname}
}
jail_rename ( )
{
local _newname
_newname=$1
jls -j ${_jailname} > /dev/null 2>&1
if [ $? -eq 0 ]
then
${_jailcmd} -r ${_jailname} || \
fatal jail_rename+${LINENO}: ${_jailcmd} -r ${_jailname}
fi
for pid in $(procstat -fa | grep -F "${_dirjail}/${_jailname}" | cut -w -f1 | uniq)
do
print "Killing "$pid
procstat -c $pid
kill $pid
done
# dataset
zfs rename ${_zfsjail}/${_jailname} ${_zfsjail}/${_newname} || \
fatal jail_rename+${LINENO}: zfs rename ${_zfsjail}/${_jailname} ${_zfsjail}/${_newname}
# jail.conf
sed -i '' -e "s#^${_jailname} {#${_newname} {#g" /etc/jail.conf || \
fatal jail_rename+${LINENO}: sed -i '' -e "s#^${_jailname} {#${_newname} {#g" /etc/jail.conf
# restart jail
${_jailcmd} -c ${_newname} || \
fatal jail_rename+${LINENO}: ${_jailcmd} -c ${_newname}
}
jail_list ( )
{
local _jailstatus
for jailname in $(zfs list -H -d 1 -o name ${_zfsjail} | tail +2 | sed -e "s#${_zfsjail}/##"); do
if jls -n -j $jailname > /dev/null 2>&1;
then
_jailstatus="${GREEN} running ${NC}"
else
if [ "${jailname#base}" == "${jailname}" ];
then
_jailstatus="\`${RED}stopped ${NC}"
else
_jailstatus="*${YELLOW}basejail${NC}"
fi
fi
echo -e $_jailstatus $jailname
done
}
jail_deletebulk ( )
{
if [ ${_quantity} -eq 1 ]; then
jail_delete ${_jailname}
else
i=1
while [ "$i" -le "$_quantity" ]; do
jail_delete "${_jailname}$(printf %02d $i)"
i=$(($i + 1))
done
fi
}
jail_delete ( )
{
local _jailzfs _targetname
_targetname=$1
_jailzfs=${_zfsjail}/${_targetname}
# FIXME: cannot open 'tank/guests/vm/jails/base1202': dataset does not exist
# Check if dataset exists
zfs get name ${_jailzfs} > /dev/null 2>&1
if [ $? -eq 0 ]
then
print "Removing jail ${_targetname}"
jls -j ${_targetname} > /dev/null 2>&1
if [ $? -eq 0 ]
then
${_jailcmd} -r ${_targetname} || fatal delete_single+${LINENO}: ${_jailcmd} -r ${_targetname}
fi
for pid in $(procstat -fa | grep -F "${_dirjail}/${_targetname} " | cut -w -f2 | uniq)
do
print "Killing "$pid $(procstat -c $pid)
kill $pid
done
zfs destroy -r ${_jailzfs} || fatal delete_single+${LINENO}: zfs destroy -r ${_jailzfs}
print "Jail ${_targetname} removed"
else
print_error "${_targetname} doesn't exist"
jail_list
fi
}
jail_diff ( )
{
local _basedataset _jaildataset
_jaildataset=${_zfsjail}/${_jailname}
_basedataset=$(zfs get -H -o value origin ${_jaildataset})
zfs diff -F ${_basedataset} ${_jaildataset} | \
grep "[[:space:]]F[[:space:]]" | \
cut -w -f3 | \
sed -e "s#${_dirjail}/${_jailname}##g" | \
sort | uniq
}
jail_forward ( )
{
local _jailaddress _ifname _port
_ifname="${NG_IFPREFIX}${_jailname}"
_jailaddress=$(jexec ${_jailname} ifconfig ${_ifname} inet | grep inet | cut -f3 -w)
_port=$1
ngctl msg jail_nat0: redirectport { local_addr=${_jailaddress} local_port=${_port} alias_port=${_port} proto=6 }
}
_cmd=$1
shift
while getopts "h?a:b:cij:n:r:v" opt; do
case "$opt" in
h|\?)
helpme
exit 0
;;
a) _action=$OPTARG
;;
b) _basejail=$OPTARG
check_len $OPTARG
;;
c) _clean=1
;;
i) _interactive=1
;;
j) _jailname=$OPTARG
check_len $OPTARG
;;
n) _quantity=$OPTARG
;;
r) _release=$OPTARG
;;
v) _verbose=1
;;
esac
done
shift $((OPTIND-1))
if [ $_verbose -ne 0 ] ; then
_jailcmd="/usr/sbin/jail -v -f ${_jailconf}"
else
_jailcmd="/usr/sbin/jail -f ${_jailconf}"
fi
case "$_cmd" in
vnet)
vnet_main
exit 0
;;
base)
base_main $@
exit 0
;;
esac
### Jail simplified cases
if [ $# -gt 1 ];
then
helpme
exit 1
fi
if [ $# -eq 1 -a "$_cmd" != "forward" ];
then
_jailname=$1
fi
case "$_cmd" in
init)
jail_init
exit 0
;;
start)
jail_bulkstart
exit 0
;;
stop)
jail_bulkstop
exit 0
;;
shell)
jail_shell ${_jailname}
exit 0
;;
list)
jail_list | sort
exit 0
;;
delete)
jail_deletebulk
exit 0
;;
delta)
jail_diff
exit 0
;;
rename)
jail_rename $@
exit 0
;;
forward)
jail_forward $@
exit 0
;;
*)
helpme
esac
| true |
797f03f024ae162d4ef668d75a44a7a6b2894ced
|
Shell
|
ceylanbaysalaydin/HYF-Module-HTMLCSSGIT
|
/week1/commands.txt
|
UTF-8
| 496 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/bash
#1-How do I create a hidden file or folder? How do I display it in the CLI?
#Create hidden folder
mkdir .hidden-folder
#Create hidden file
touch .hidden-file
#Display hidden file and folders
ls -a
#2-How do I create multiple nested directories, like /c/Users/myusername/these/folders/are/just/for/fun?
mkdir -p nested/folder/just/for/fun
#3-How do I append a message to a file, without a newline character
echo "first message" >> file.txt && echo "second message" >> file.txt
| true |
ef177009072a139b85ec03d61938382d4bfb3d1d
|
Shell
|
hce-project/hce-bundle
|
/src/usr/bin/deploy/dts-prereq-test.sh
|
UTF-8
| 970 | 3.59375 | 4 |
[] |
no_license
|
#! /bin/bash
#
# DTS prerequisite components
# availability test (check for PHP
# and Python modules/includes)
#
. ./cfg/screen_cfg.sh
. ./cfg/php.sh
. ./cfg/py.sh
# PHP includes list
inclist="zmsg.php"
for i in ${api_groups}; do
inclist="${inclist} ${i}.inc.php"
done
# test include file presence on disk
echo "Checking for PHP includes... "
chk_php_includes ${inclist}
echo "Checking if PHP modules are available... "
chk_php_module "zmq"
# check for 'zmsg' class existence
echo "Checking if classes are available... "
chk_php_class_exist zmsg.php zmsg
# check for API groups (listed in ${api_groups})
echo "Checking if API functions are available... "
chk_php_api_groups ${api_groups}
echo "Checking Python modules that are installed (are in pip base)... "
for p in ${pypkgs}; do
chk_py_module_installed "${p}"
done
echo "Checking Python modules that could be included... "
for m in ${pymodules}; do
chk_py_module_inc "${m}"
done
echo -ne "All OK\n"
| true |
f6218402dcf07e4ca822922879d5c98706fd0378
|
Shell
|
ghdl/docker
|
/.todo/gh-pages.sh
|
UTF-8
| 2,464 | 3.921875 | 4 |
[] |
no_license
|
#!/bin/sh
set -e # Exit with nonzero exit code if anything fails
TARGET_BRANCH="gh-pages"
REPO=`git config remote.origin.url`
getWiki() {
printf "\n[GH-PAGES] Clone wiki\n"
git clone "${REPO%.*}.wiki.git" content/wiki
cd content/wiki
rm -rf wip .git
printf "\n[GH-PAGES] Adapt wiki pages\n"
for f in *.md; do
sed -i -r 's/\[\[(.*)\|(.*)\]\]/[\1]({{< relref "wiki\/\2.md" >}})/g' ./*.md
#name="$`sed -e 's/-/ /g' <<< $f`"
#printf -- "---\ntitle: \"%s\"\ndescription: \"%s\"\ndate: \"%s\"\nlastmod: \"%s\"\n---\n$(cat $f)" "${name%.*}" "${f%.*}" $(git log -1 --format="%ai" -- "$f" | cut -c1-10) $(date +%Y-%m-%d -r "$f") > $f
done;
}
if [ "$DEPLOY" = "" ]; then
curl -L https://raw.githubusercontent.com/buildthedocs/btd/master/btd.sh | sh -s build -d -n "GHDL" -v "builders,rtd2travis,ghdl-0.35"
#"builders,v0.35,v0.34"
mv ../btd_builds/html ghdl-io/static/doc/
# getWiki
printf "\n[GH-PAGES] Clone the '$TARGET_BRANCH' to 'out' and clean existing contents\n"
git clone -b "$TARGET_BRANCH" "$REPO" ../out
rm -rf ../out/**/* || exit 0
set +e
docker run --rm -t \
-v /$(pwd):/src \
-w //src/ghdl-io \
btdi/hugo -DEF -d hugo_out
set -e
cp -r ghdl-io/hugo_out/. ../out
rm -rf ghdl-io/static/doc
else
# Pull requests and commits to other branches shouldn't try to deploy, just build to verify
if [ "$TRAVIS_PULL_REQUEST" != "false" ]; then
printf "\nSkipping pages deploy\n"
exit 0
fi
cd ../out
git config user.name "Travis CI"
git config user.email "travis@gh-pages"
printf "\n[GH-PAGES] Add changes\n"
git add .
# If there are no changes to the compiled out (e.g. this is a README update) then just bail.
if [ $(git status --porcelain | wc -l) -lt 1 ]; then
echo "No changes to the output on this push; exiting."
exit 0
fi
git commit -am "deploy to github pages: `git rev-parse --verify HEAD`"
printf "\n[GH-PAGES] Get the deploy key \n"
# by using Travis's stored variables to decrypt deploy_key.enc
eval `ssh-agent -s`
openssl aes-256-cbc -K $encrypted_0198ee37cbd2_key -iv $encrypted_0198ee37cbd2_iv -in ../ghdl/ghdl-io/deploy_key.enc -d | ssh-add -
printf "\n[GH-PAGES] Push to $TARGET_BRANCH \n"
# Now that we're all set up, we can push.
git push `echo $REPO | sed -e 's/https:\/\/github.com\//git@github.com:/g'` $TARGET_BRANCH
fi
| true |
1cb37b4a2a20290e89bccef7e7de485390d02af8
|
Shell
|
phoenix-rtos/phoenix-rtos-build
|
/build.sh
|
UTF-8
| 4,647 | 3.734375 | 4 |
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
#
# Shell script for building Phoenix-RTOS based firmware
#
# Main builder
#
# Copyright 2018, 2019 Phoenix Systems
# Author: Kaja Swat, Aleksander Kaminski, Pawel Pisarczyk
#
set -e
# Colon-separated list of dirs to overlay the default rootFS.
# It can be overwritten by build.project scripts.
ROOTFS_OVERLAYS=""
. ./phoenix-rtos-build/build.subr
. ./build.project
PREFIX_PROJECT="$(pwd)"
# TODO: Remove
# To preserve compabilitiy with project scripts, TOPDIR is set
TOPDIR="$PREFIX_PROJECT"
# Some makefiles add "$PROJECT_PATH/" to their include path so it has to be set
if [ -z "$PROJECT_PATH" ]; then
echo "PROJECT_PATH is not set (or is empty)"
exit 1;
fi
PREFIX_BUILD="$PREFIX_PROJECT/_build/$TARGET"
PREFIX_BUILD_HOST="$PREFIX_PROJECT/_build/host-generic-pc"
PREFIX_FS="$PREFIX_PROJECT/_fs/$TARGET"
PREFIX_BOOT="$PREFIX_PROJECT/_boot/$TARGET"
PREFIX_PROG="$PREFIX_BUILD/prog/"
PREFIX_PROG_STRIPPED="$PREFIX_BUILD/prog.stripped/"
PREFIX_A="$PREFIX_BUILD/lib/"
PREFIX_H="$PREFIX_BUILD/include/"
PLO_SCRIPT_DIR="$PREFIX_BUILD/plo-scripts"
PREFIX_ROOTFS="$PREFIX_FS/root/"
: "${PREFIX_ROOTSKEL:="$PREFIX_PROJECT/_fs/root-skel/"}"
# Default project's overlay directory, it does not have to exist.
ROOTFS_OVERLAYS="$PROJECT_PATH/rootfs-overlay:${ROOTFS_OVERLAYS}"
CC=${CROSS}gcc
AS=${CROSS}as
LD=${CROSS}ld
AR=${CROSS}ar
OBJCPY=${CROSS}objcopy
MAKEFLAGS="--no-print-directory -j 9"
export TARGET TARGET_FAMILY TARGET_SUBFAMILY TARGET_PROJECT PROJECT_PATH TOPDIR PREFIX_PROJECT PREFIX_BUILD\
PREFIX_BUILD_HOST PREFIX_FS PREFIX_BOOT PREFIX_PROG PREFIX_PROG_STRIPPED PREFIX_A\
PREFIX_H PREFIX_ROOTFS CROSS CFLAGS CXXFLAGS LDFLAGS CC LD AR AS CLEAN MAKEFLAGS DEVICE_FLAGS PLO_SCRIPT_DIR
# export flags for ports - call make only after all necessary env variables are already set
EXPORT_CFLAGS="$(make -f phoenix-rtos-build/Makefile.common export-cflags)"
# export only generic flags: "-z xxx", "-Lxxx", "-q"
EXPORT_LDFLAGS="$(make -f phoenix-rtos-build/Makefile.common export-ldflags | grep -E -o "(-z [^ ]+)|(-L[^ ]+)|(-q)" | xargs)"
export EXPORT_CFLAGS EXPORT_LDFLAGS
#
# Parse command line
#
if [ $# -lt 1 ]; then
echo "Build options should be specified!"
echo "Usage: build.sh [clean] [all] [fs] [core] [test] [ports] [project] [image]";
exit 1;
fi
B_FS="n"
B_CORE="n"
B_PORTS="n"
B_PROJECT="n"
B_IMAGE="n"
B_TEST="n"
# GA CI passes all params as quoted first param - split on ' ' if necessary
ARGS=("$@")
[ "$#" -eq 1 ] && read -ra ARGS <<< "$1"
for arg in "${ARGS[@]}"; do
case "$arg"
in
clean)
CLEAN="clean";;
fs)
B_FS="y";;
core)
B_CORE="y";;
test|tests)
B_TEST="y";;
ports)
B_PORTS="y";;
project)
B_PROJECT="y";;
image)
B_IMAGE="y";;
all)
B_FS="y"; B_CORE="y"; B_PORTS="y"; B_PROJECT="y"; B_IMAGE="y";;
*)
echo "Unknown build option: \"$arg\"."
exit 1;;
esac;
done
#
# Clean if requested
#
if [ -n "$CLEAN" ]; then
b_log "Cleaning build dirs"
rm -rf "$PREFIX_BUILD" "$PREFIX_BUILD_HOST"
rm -rf "$PREFIX_FS"
rm -rf "$PREFIX_BOOT"
fi
#
# Prepare
#
mkdir -p "$PREFIX_BUILD"
mkdir -p "$PREFIX_BUILD_HOST"
mkdir -p "$PREFIX_BOOT"
mkdir -p "$PREFIX_PROG" "$PREFIX_PROG_STRIPPED"
if declare -f "b_prepare" > /dev/null; then
b_prepare
fi
if command -v git > /dev/null && [ -a ".git" ]; then
echo " $(git rev-parse HEAD) $(basename "$(git rev-parse --show-toplevel)") ($(git describe --always --dirty))" > "${PREFIX_BUILD}/git-version"
git submodule status --recursive >> "${PREFIX_BUILD}/git-version"
else
echo "not available" > "${PREFIX_BUILD}/git-version"
fi
#
# Preparing filesystem
#
if [ "${B_FS}" = "y" ] && [ -d "${PREFIX_ROOTSKEL}" ]; then
b_log "Preparing filesystem"
mkdir -p "${PREFIX_ROOTFS}"
cp -a "${PREFIX_ROOTSKEL}/." "${PREFIX_ROOTFS}"
mkdir -p "$PREFIX_ROOTFS/"{dev,etc,local,data,mnt,tmp,var,usr}
# ROOTFS_OVERLAYS contains colon-separated path
(
IFS=:
for path in $ROOTFS_OVERLAYS; do
if [ -d "$path" ]; then
echo "Applying overlay: $path"
cp -a "${path}/." "${PREFIX_ROOTFS}"
else
echo "Not existing rootfs overlay: $path"
fi
done
)
b_log "Saving git-version"
install -m 664 "${PREFIX_BUILD}/git-version" "$PREFIX_FS/root/etc"
fi
#
# Build core part
#
if [ "${B_CORE}" = "y" ]; then
"./phoenix-rtos-build/build-core-${TARGET_FAMILY}-${TARGET_SUBFAMILY}.sh"
fi
#
# Build test part
#
if [ "${B_TEST}" = "y" ]; then
b_build_test
fi
#
# Build ports
#
if [ "${B_PORTS}" = "y" ] && [ -d phoenix-rtos-ports ]; then
./phoenix-rtos-ports/build.sh
fi
#
# Build project part
#
if [ "${B_PROJECT}" = "y" ]; then
b_build
fi
#
# Build final filesystems
#
if [ "${B_IMAGE}" = "y" ]; then
b_image
fi
| true |
5ba94249379bfc9ae74e080912cdfaead4de52f1
|
Shell
|
lmangani/homer-docker
|
/mysql/run.sh
|
UTF-8
| 525 | 3.359375 | 3 |
[] |
no_license
|
#!/bin/bash
# ----------------------------------------------------
# HOMER 5 Docker (http://sipcapture.org)
# ----------------------------------------------------
# -- To facilitate starting (or not starting) MySQL
# -- based on option to use remote mysql.
# ----------------------------------------------------
# Reads from environment variables to set:
# USE_REMOTE_MYSQL If true, does not start mysql
if [[ "$USE_REMOTE_MYSQL" = true ]]; then
echo "MySQL existing, not necessary."
else
./entrypoint.sh mysqld
fi
| true |
5fbe8c35ea7aac406e6b21b2143747627befa518
|
Shell
|
soartec-lab/terraform_stationery
|
/bin/terraform.exec.sh
|
UTF-8
| 1,293 | 3.53125 | 4 |
[] |
no_license
|
#!/bin/bash
ACTION=$1
ENV=$2
log() {
echo `date '+%Y-%m-%d %H:%M:%S'` $*
}
debug() {
if [ "${DEBUG}" = 1 ]; then
log DEUBG $*
fi
}
error() {
log ERROR $*
}
info() {
log INFO $*
}
fail_exit() {
log ERROR $*
exit 1
}
arguments () {
echo "ArgumentsError: Require '$1'"
echo \
"
USAGE
`basename $0` Env Action
Arguments
Action : plan | apply
Env : ploduction | staging
"
}
trap "fail_exit "予期せぬエラーが発生しました"" ERR
# 引数チェック
if [ "$ACTION" = '' ]
then
arguments Action
exit 1
fi
if [ "$ENV" = '' ]
then
arguments Env
exit 1
fi
TFSTATE_FILE="${ENV}.tfstate"}}
AWS_REGION=`curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed -e s/[^0-9]$//g`]
source ../environments/${ENV}/config/${ENV}.conf
terraform init \
-backend=true \
-backend-config="bucket=${S3_BUCKET_NAME}" \
-backend-config="key=${TFSTATE_FILE}" \
-backend-config="region=${AWS_REGION}"}}}
terraform ${ACTION} \
-state=${TFSTATE_FILE} \
-refresh=true \
-var-file=../tfvars/common.tfvars \
-var-file=../environments/${ENV}/tfvars/${ENV}.tfvars \
-var-file=../environments/${ENV}/tfvars/credential.tfvars
# 後始末
if [ "$ACTION" != "plan" ]
then
rm -f ${TFSTATE_FILE}.backup
fi
echo "DONE!"
| true |
c9b597d310d6ba960486ffbc7cf5c4d1547e2836
|
Shell
|
Gustavhol/dotfiles
|
/.config/zsh/path.zsh
|
UTF-8
| 870 | 2.921875 | 3 |
[] |
no_license
|
# ____ ___ ________ __
# / __ \/ |/_ __/ / / /
# / /_/ / /| | / / / /_/ /
# / ____/ ___ |/ / / __ /
# /_/ /_/ |_/_/ /_/ /_/
#
# Prepend to path if entry does not exist.
# This version from: https://superuser.com/questions/39751/add-directory-to-path-if-its-not-already-there
path_prepend() {
if [ -d $1 ] && [[ ":$PATH:" != *":$1:"* ]]; then
PATH="$1${PATH:+":$PATH"}"
fi
}
# Doom emacs
DOOM_PATH=$HOME/.emacs.d/bin
path_prepend $DOOM_PATH
#python3
PYTHON3_PATH=/usr/local/opt/python/libexec/bin
path_prepend $PYTHON3_PATH
# Go
export GOPATH=$HOME/.go
export GOBIN=$GOPATH/bin
path_prepend $GOBIN
# User path
path_prepend $HOME/.local/bin
path_prepend /usr/bin
path_prepend /usr/sbin
path_prepend /usr/local/bin
path_prepend /usr/local/sbin
export PATH
| true |
156cd1326caa7d259aff08c18efd5a169e953a29
|
Shell
|
kickops/teleport
|
/files/teleport-pam-exec.sh
|
UTF-8
| 241 | 2.578125 | 3 |
[] |
no_license
|
#!/bin/sh
COMMENT="User ${TELEPORT_USERNAME} with roles ${TELEPORT_ROLES} created by Teleport."
id -u "${TELEPORT_LOGIN}" > /dev/null 2>&1 || /usr/sbin/useradd -m -g teleport-admin -c "${COMMENT}" "${TELEPORT_LOGIN}" > /dev/null 2>&1
exit 0
| true |
cea1db78015097715503f84ac48196d0c1799c5c
|
Shell
|
yugr/ShlibVisibilityChecker
|
/tests/basic/run.sh
|
UTF-8
| 941 | 3.65625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
# Copyright 2021-2022 Yury Gribov
#
# The MIT License (MIT)
#
# Use of this source code is governed by MIT license that can be
# found in the LICENSE.txt file.
# This is a simple test for ShlibVisibilityChecker functionality.
set -eu
cd $(dirname $0)
if test -n "${GITHUB_ACTIONS:-}"; then
set -x
fi
CFLAGS='-g -O2 -Wall -Wextra -Werror -shared -fPIC'
ROOT=$PWD/../..
$ROOT/bin/read_header_api -r. xyz.h > api.txt
errors=0
for name_flags in 'a;-DA' 'b;-DB' 'ab;-DA -DB'; do
name=${name_flags%;*}
flags=${name_flags#*;}
${CC:-gcc} $CFLAGS $flags -shared -fPIC xyz.c -o libxyz.so
${PYTHON:-python3} $ROOT/read_binary_api --permissive libxyz.so > abi.txt
(comm -3 api.txt abi.txt || true) > api_abi.diff
if ! diff -q $name.ref api_abi.diff; then
echo >&2 "Invalid results for test $name"
diff $name.ref api_abi.diff >&2
errors=1
fi
done
if test $errors = 0; then
echo SUCCESS
else
echo FAIL
fi
| true |
df150bcd3cb0cf39c2de0e297e39e10783a8b0f4
|
Shell
|
LeChinok/outset-scripts
|
/usr/local/outset/login-once/DesktopPicture.sh
|
UTF-8
| 389 | 2.578125 | 3 |
[] |
no_license
|
#!/bin/bash
## Version 1.0
## Compiled by Kevin M. Cox
## Sets the initial desktop picture for all users at first login
## Should be set in Munki to REQUIRE the custom DesktopPictures.pkg
# Set the user's desktop picture as The Daily News 175th Anniversary logo
osascript -e 'tell application "Finder" to set desktop picture to POSIX file "/Library/Desktop Pictures/GalvNews 175 Years.png"'
| true |
5de21c1514f138f54edfc940090225c9dd272a96
|
Shell
|
sunxiaolin2016/bsp_tools
|
/release/tools/config_ds9478.sh
|
UTF-8
| 10,026 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/sh
#addr_bus=1
#addr_bus=3
addr_ds947=0x0c
addr_ds948=0x2c
read_num=0
function usage()
{
echo "ds9478 [mode]"
printf "mode:\n 1: read all register ds947/948\n 2: Reset FPD PLL\n 3: Reset OLDI PLL\n 4: Reset OLDI block\n 5: Rest ds947\n "
printf "6: Disable color bars test on ds947\n 7: Set color bars with external timing clock on 947\n 8: Set color bars with internal clock timing.(1920*720)\n "
printf "9: Set color bars with internal clock timing.(1920*1080)\n 10: Set color bars with internal clock timing.(960*1280)\n"
}
function read_all_register()
{
echo "========================================================"
echo "Start read ds947 register"
# for((i=0x00; i<=0xff;i++))
for i in $(seq 0x00 0xff)
do
printf "add:0x%x\n" $i
i2cget -f -y $addr_bus $addr_ds947 $i
done
echo "start read ds948 register"
# for((i=0x00; i<=0xff;i++));
for i in $(seq 0x00 0xff)
do
#echo "obase=16;$i"|bc
printf "add:0x%x\n" $i
i2cget -f -y $addr_bus $addr_ds948 $i
done
echo "========================================================"
}
function set_external_color_timing_color()
{
echo "========================================================"
echo "Enable color bars with external timing clock on 947"
i2cset -f -y $addr_bus $addr_ds947 0x64 0x05
echo "========================================================"
}
function Disable_color_bars_test()
{
echo "========================================================"
echo "Disable color bars test on 947"
i2cset -f -y $addr_bus $addr_ds947 0x65 0x00
i2cset -f -y $addr_bus $addr_ds947 0x64 0x00
echo "========================================================"
}
function Reset_FPD_pll()
{
echo "========================================================"
echo "Reset FPD PLL"
i2cset -f -y $addr_bus $addr_ds947 0x40 0x14
i2cset -f -y $addr_bus $addr_ds947 0x41 0x49
i2cset -f -y $addr_bus $addr_ds947 0x42 0x10
i2cset -f -y $addr_bus $addr_ds947 0x42 0x00
echo "========================================================"
}
function Reset_OLDI_pll()
{
echo "========================================================"
echo "Reset OLDI PLL"
i2cset -f -y $addr_bus $addr_ds947 0x40 0x10
i2cset -f -y $addr_bus $addr_ds947 0x41 0x49
i2cset -f -y $addr_bus $addr_ds947 0x42 0x10
i2cset -f -y $addr_bus $addr_ds947 0x42 0x00
echo "========================================================"
}
function Reset_OLDI_block()
{
echo "========================================================"
echo "Reset OLDI block"
i2cset -f -y $addr_bus $addr_ds947 0x40 0x10
i2cset -f -y $addr_bus $addr_ds947 0x41 0x49
i2cset -f -y $addr_bus $addr_ds947 0x42 0x16
i2cset -f -y $addr_bus $addr_ds947 0x41 0x47
i2cset -f -y $addr_bus $addr_ds947 0x42 0x20
i2cset -f -y $addr_bus $addr_ds947 0x42 0xa0
i2cset -f -y $addr_bus $addr_ds947 0x42 0x20
i2cset -f -y $addr_bus $addr_ds947 0x42 0x00
i2cset -f -y $addr_bus $addr_ds947 0x41 0x49
i2cset -f -y $addr_bus $addr_ds947 0x42 0x00
echo "========================================================"
}
function Reset_Ds947()
{
echo "========================================================"
#patch=`find /sys/ -name *pdb* `
#echo "$patch"
echo "Reset Ds947 pdb"
echo 1 > "$patch/value"
sleep 1
echo 0 > "$patch/value"
echo "========================================================"
}
function set_internal_clock_timing_1920_720()
{
echo "========================================================"
echo "Internal clock timing Pattern(1920 * 720)"
# 1920*720 test pattern
#200M / div = pixclk
i2cset -f -y $addr_bus $addr_ds947 0x66 0x03
i2cset -f -y $addr_bus $addr_ds947 0x67 0x02
#h total lowest 8bit 2000= 0x7D0
i2cset -f -y $addr_bus $addr_ds947 0x66 0x04
i2cset -f -y $addr_bus $addr_ds947 0x67 0xd0
# ht most 4bit is 0x7 vt lowest 4bit is 4 vt is 756=0x2f4
i2cset -f -y $addr_bus $addr_ds947 0x66 0x05
i2cset -f -y $addr_bus $addr_ds947 0x67 0x47
#vt most 8bit 756=0x2f4
i2cset -f -y $addr_bus $addr_ds947 0x66 0x06
i2cset -f -y $addr_bus $addr_ds947 0x67 0x2f
#h a lowest 8bit 1920= 0x780
i2cset -f -y $addr_bus $addr_ds947 0x66 0x07
i2cset -f -y $addr_bus $addr_ds947 0x67 0x80
# ha most 4bit is 0x7 vt lowest 4bit is 4 vt is 720=0x2d0
i2cset -f -y $addr_bus $addr_ds947 0x66 0x08
i2cset -f -y $addr_bus $addr_ds947 0x67 0x07
#vt most 8bit 756=0x2d0
i2cset -f -y $addr_bus $addr_ds947 0x66 0x09
i2cset -f -y $addr_bus $addr_ds947 0x67 0x2d
#hsw
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0a
i2cset -f -y $addr_bus $addr_ds947 0x67 0x48
#vsw
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0b
i2cset -f -y $addr_bus $addr_ds947 0x67 0x07
#hbp
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0c
i2cset -f -y $addr_bus $addr_ds947 0x67 0x60
#vbp
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0d
i2cset -f -y $addr_bus $addr_ds947 0x67 0x0a
#external clock:0x0c internal clock:0x04
i2cset -f -y $addr_bus $addr_ds947 0x65 0x04
i2cset -f -y $addr_bus $addr_ds947 0x64 0x05
echo "========================================================"
}
function set_internal_clock_timing_1920_1080()
{
echo "========================================================"
echo "Internal clock timing Pattern(1920 * 1080)"
# 1920*1080 test pattern
#200M / div = pixclk
i2cset -f -y $addr_bus $addr_ds947 0x66 0x03
i2cset -f -y $addr_bus $addr_ds947 0x67 0x02
#h total lowest 8bit 2000= 0x7D0
i2cset -f -y $addr_bus $addr_ds947 0x66 0x04
i2cset -f -y $addr_bus $addr_ds947 0x67 0xd0
# ht most 4bit is 0x7 vt lowest 4bit is 4 vt is 1188=0x4a4
i2cset -f -y $addr_bus $addr_ds947 0x66 0x05
i2cset -f -y $addr_bus $addr_ds947 0x67 0x47
#vt most 8bit 1188=0x4a4 //756=0x2f4
i2cset -f -y $addr_bus $addr_ds947 0x66 0x06
i2cset -f -y $addr_bus $addr_ds947 0x67 0x4a
#h a lowest 8bit 1920= 0x780
i2cset -f -y $addr_bus $addr_ds947 0x66 0x07
i2cset -f -y $addr_bus $addr_ds947 0x67 0x80
# ha most 4bit is 0x7 vt lowest 4bit is 0 vt is 1080=0x438
i2cset -f -y $addr_bus $addr_ds947 0x66 0x08
i2cset -f -y $addr_bus $addr_ds947 0x67 0x87
#vt most 8bit 1188=0x4a4
i2cset -f -y $addr_bus $addr_ds947 0x66 0x09
i2cset -f -y $addr_bus $addr_ds947 0x67 0x4a
#hsw
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0a
i2cset -f -y $addr_bus $addr_ds947 0x67 0x0a
#vsw
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0b
i2cset -f -y $addr_bus $addr_ds947 0x67 0x08
#hbp
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0c
i2cset -f -y $addr_bus $addr_ds947 0x67 0x1e
#vbp
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0d
i2cset -f -y $addr_bus $addr_ds947 0x67 0x4e
#external clock:0x0c internal clock:0x04
i2cset -f -y $addr_bus $addr_ds947 0x65 0x04
i2cset -f -y $addr_bus $addr_ds947 0x64 0x05
echo "========================================================"
}
function set_internal_clock_timing_960_1280()
{
echo "========================================================"
echo "Internal clock timing Pattern(960 * 1280)"
# 960*1280 test pattern
#200M / div = pixclk
i2cset -f -y $addr_bus $addr_ds947 0x66 0x03
i2cset -f -y $addr_bus $addr_ds947 0x67 0x02
#h total lowest 8bit 1040= 0x410 //0x7D0
i2cset -f -y $addr_bus $addr_ds947 0x66 0x04
i2cset -f -y $addr_bus $addr_ds947 0x67 0x10
# ht most 4bit is 0x4 vt lowest 4bit is c vt is 1388=0x56c
i2cset -f -y $addr_bus $addr_ds947 0x66 0x05
i2cset -f -y $addr_bus $addr_ds947 0x67 0xc4
#vt most 8bit 1388=0x56c //1188=0x4a4
i2cset -f -y $addr_bus $addr_ds947 0x66 0x06
i2cset -f -y $addr_bus $addr_ds947 0x67 0x56
#h a lowest 8bit 960=0x3c0
i2cset -f -y $addr_bus $addr_ds947 0x66 0x07
i2cset -f -y $addr_bus $addr_ds947 0x67 0xc0
# ha most 4bit is 0x3 vt lowest 4bit is 0 vt is 1280=0x500
i2cset -f -y $addr_bus $addr_ds947 0x66 0x08
i2cset -f -y $addr_bus $addr_ds947 0x67 0x03
#vt most 8bit 1388=0x56c
i2cset -f -y $addr_bus $addr_ds947 0x66 0x09
i2cset -f -y $addr_bus $addr_ds947 0x67 0x56
#hsw
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0a
i2cset -f -y $addr_bus $addr_ds947 0x67 0x0a
#vsw
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0b
i2cset -f -y $addr_bus $addr_ds947 0x67 0x08
#hbp
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0c
i2cset -f -y $addr_bus $addr_ds947 0x67 0x1e
#vbp
i2cset -f -y $addr_bus $addr_ds947 0x66 0x0d
i2cset -f -y $addr_bus $addr_ds947 0x67 0x4e
#external clock:0x0c internal clock:0x04
i2cset -f -y $addr_bus $addr_ds947 0x65 0x04
i2cset -f -y $addr_bus $addr_ds947 0x64 0x05
echo "========================================================"
}
usage
patch=`find /sys/ -name *pdb* `
#echo "$patch"
addr_bus=`echo "${patch:39:1}" `
#echo "$addr_bus"
#read -p "enter you choice [1-5]:" choice
case $1 in
1)
read_all_register;;
# read_register > ds947-948.txt
# read -p "press [enter] key to continue..." Key
# read -n1 "press [enter] key to continue..." ;;
2)
Reset_FPD_pll;;
# read -p "press [enter] key to continue..." Key
# read "press [enter] key to continue..." ;;
3)
Reset_OLDI_pll;;
# read -p "press [enter] key to continue..." Key
# read "press [enter] key to continue..." ;;
4)
Reset_OLDI_block;;
# read -p "press [enter] key to continue..." Key
# read "press [enter] key to continue..." ;;
5)
Reset_Ds947;;
# read -p "press [enter] key to continue..." Key
# read "press [enter] key to continue..." ;;
6)
Disable_color_bars_test;;
# read -p "press [enter] key to continue..." Key
# read "press [enter] key to continue..." ;;
7)
set_external_color_timing_color;;
# read -p "press [enter] key to continue..." Key
# read "press [enter] key to continue..." ;;
8)
set_internal_clock_timing_1920_720;;
# read -p "press [enter] key to continue..." Key
# read "press [enter] key to continue..." ;;
9)
set_internal_clock_timing_1920_1080;;
# read -p "press [enter] key to continue..." Key
# read "press [enter] key to continue..." ;;
10)
set_internal_clock_timing_960_1280;;
# read -p "press [enter] key to continue..." Key
# read "press [enter] key to continue..." ;;
*)
echo "please enter num[1-10]";;
esac
| true |
2384d9de3e819bdbb164372de54bc440fdbb230a
|
Shell
|
sipian/autoscaling-sfc
|
/openstack installation guide/setup-openstack.sh
|
UTF-8
| 2,684 | 3.25 | 3 |
[] |
no_license
|
#!/bin/bash
read -p "Enter Your public IP address: " IP
$password="root"
function install_openstack {
sudo apt-get upgrade
sudo apt-get update
sudo apt-get install -y python-systemd
sudo useradd -s /bin/bash -d /opt/stack -m stack
echo "stack ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/stack
sudo su -l stack
git clone --branch stable/pike https://git.openstack.org/openstack-dev/devstack
cd /opt/stack/devstack
echo "
[[local|localrc]]
ADMIN_PASSWORD=secret
DATABASE_PASSWORD=$ADMIN_PASSWORD
RABBIT_PASSWORD=$ADMIN_PASSWORD
SERVICE_PASSWORD=$ADMIN_PASSWORD
HOST_IP=$IP" > local.conf
sudo echo -e "\n\n$IP devstack" >> /etc/hosts
./stack.sh
}
function setup-openstack {
echo "modifying defualt security group"
sudo sysctl -w net.ipv4.ip_forward=1
echo 1 > /proc/sys/net/ipv4/ip_forward
/sbin/iptables -t nat -A POSTROUTING -o eno1 -j MASQUERADE
/sbin/iptables -A FORWARD -i eno1 -o br-ex -m state --state RELATED,ESTABLISHED -j ACCEPT
/sbin/iptables -A FORWARD -i br-ex -o eno1 -j ACCEPT
/sbin/iptables -t nat -A PREROUTING -s 192.168.136.0/24 -i eno1 -j REDIRECT
openstack image create --public --disk-format qcow2 --container-format bare --file xenial-server-cloudimg-amd64-disk1.img ubuntu
default_security_group=$(openstack security group list --project admin -c ID -f value)
openstack security group rule create \
--remote-ip 0.0.0.0/0 \
--protocol icmp \
--ingress \
--project admin \
--description "allow all ingress ICMP" \
$default_security_group
openstack security group rule create \
--remote-ip 0.0.0.0/0 \
--protocol icmp \
--egress \
--project admin \
--description "allow all egress ICMP" \
$default_security_group
#TCP
openstack security group rule create \
--remote-ip 0.0.0.0/0 \
--dst-port 1:65535 \
--protocol tcp \
--ingress \
--project admin \
--description "allow all ingress TCP" \
$default_security_group
openstack security group rule create \
--remote-ip 0.0.0.0/0 \
--dst-port 1:65535 \
--protocol tcp \
--egress \
--project admin \
--description "allow all egress TCP" \
$default_security_group
#UDP
openstack security group rule create \
--remote-ip 0.0.0.0/0 \
--dst-port 1:65535 \
--protocol udp \
--ingress \
--project admin \
--description "allow all ingress TCP" \
$default_security_group
openstack security group rule create \
--remote-ip 0.0.0.0/0 \
--dst-port 1:65535 \
--protocol udp \
--egress \
--project admin \
--description "allow all egress TCP" \
$default_security_group
echo "add rule for ssh"
}
install_openstack
echo "openstack-password -> secret"
| true |
5fa3c5c76ead5d6a7398a8d4b4debc8846165f82
|
Shell
|
sajan19/Practice_Coding
|
/Day 7/ques1RandomNumbers.sh
|
UTF-8
| 453 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/bash -x
for((i=0; i<10; i++))
do
a[i]=$((RANDOM%900 + 99))
for((j=0;j<10;j++))
do
#echo ${a[@]};
#secondLargest=$(printf '%s\n' "${a[@]}" | sort -n | tail -2 | head -1)
#secondSmallest=$(printf '%s\n' "${a[@]}" | sort -n | tail -2 | head -1)
#secondLargest=$(printf '%s\n' "${a[@]}" | sort -n )
# | tail -2 | head -1)
#array=${a[@]};
#echo ${a[0]}
#echo ${a[5]}
#for((j=0; j<10; j++))
#do
echo b[j]=${a[i]}
done
done
echo ${a[@]}
echo ${b[@]}
| true |
ff8910aff38fe2b5f7add8ac945614d99ae5fffe
|
Shell
|
haochunqiu/lnmp-lite
|
/tools/ddos_deflate_install.sh
|
UTF-8
| 1,144 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/sh
if [ -d '/usr/local/ddos' ]; then
echo; echo; echo "Please un-install the previous version first"
exit 0
else
mkdir /usr/local/ddos
fi
clear
echo; echo 'Installing DOS-Deflate 0.6'; echo
echo; echo -n 'Downloading source files...'
cp ../conf/ddos.conf /usr/local/ddos/ddos.conf
cp ../conf/ignore.ip.list /usr/local/ddos/ignore.ip.list
/sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:" >> /usr/local/ddos/ignore.ip.list;
chattr +i /usr/local/ddos/ignore.ip.list;
cp ../conf/ddos.sh /usr/local/ddos/ddos.sh
chmod 0755 /usr/local/ddos/ddos.sh
cp -s /usr/local/ddos/ddos.sh /usr/local/sbin/ddos
echo '...done'
echo; echo -n 'Creating cron to run script every minute.....(Default setting)'
/usr/local/ddos/ddos.sh --cron > /dev/null 2>&1
echo; echo 'DOS-Deflate Installation has completed.'
echo 'Config file is at /usr/local/ddos/ddos.conf'
echo 'ignore ip is at /usr/local/ddos/ignore.ip.list '
echo 'if you want edit it please use chattr -i /usr/local/ddos/ignore.ip.list first'
echo 'Please send in your comments and/or suggestions to zaf@vsnl.com'
echo
cat /usr/local/ddos/LICENSE | less
| true |
c5b64eba2ee4fe1f50e5f9ff94c5a1593386b5d8
|
Shell
|
Highstaker/RSS-Atom-telegram-bot
|
/run_RSS_bot.sh
|
UTF-8
| 243 | 2.734375 | 3 |
[] |
no_license
|
#!/bin/bash
env/bin/python3 rss_atom_bot.py &
PROC_PID=$!
echo "Bot process:$PROC_PID"
# kill background processes on exit
trap 'kill $(jobs -p)' EXIT
# wait for process to finish. Or simply until this sxcript exits
wait $PROC_PID
exit 0;
| true |
7bd5abea134afca61e6a4713b4ecfe4769fbfa1d
|
Shell
|
sgtpepperpt/apdcDB
|
/proxy/cryptdb-proxy.sh
|
UTF-8
| 571 | 2.75 | 3 |
[] |
no_license
|
#!/bin/bash
#This script runs the CryptDB proxy
#usage: ./proxy.sh <proxy-address> <proxy-backend-to-mysql> <cryptdb-path> <cryptdb-pass>
PROXY_IP=$1
MYSQL_IP=$2
# prepare proxy launch
export EDBDIR=$3 #/home/pepper/apdc/proxy/cryptdb
export CRYPTDB_PASS=$4
#Y1VzkAmF
fuser -k 3307/tcp
# now launch
$EDBDIR/bins/proxy-bin/bin/mysql-proxy \
--plugins=proxy \
--event-threads=4 \
--max-open-files=1024 \
--proxy-lua-script=$EDBDIR/mysqlproxy/wrapper.lua \
--proxy-address=$PROXY_IP:3307 \
--proxy-backend-addresses=$MYSQL_IP:3306
$SHELL
| true |
f46440985b0dee0813f1044f96ae5e67e8323d48
|
Shell
|
translate/mozilla-l10n
|
/ttk-put
|
UTF-8
| 372 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
source $(dirname $0)/ttk.inc.sh
for opt in $*
do
case "$opt" in
--yes)
opt_yes=""
shift 1
;;
esac
done
langs=$(which_langs $*)
# We don't want to sync_store anymore as Pootle now has last_sync_revision and
# this would mess with that. Keep it here while we validate that we mean what
# we say here.
#sync_stores $langs
rsync_files_put $langs
| true |
89904a32aceabf2f539db593b3191a5d7e934d5f
|
Shell
|
delaaxe/.dotfiles
|
/setup.sh
|
UTF-8
| 488 | 2.90625 | 3 |
[] |
no_license
|
for file in .{vim,vimrc,ideavimrc,inputrc}; do
if [ -e ~/$file ]; then
echo "~/$file: not linked, already exists."
else
ln -s ~/.dotfiles/$file ~/$file
echo "~/$file: linked."
fi
done
unset file
ln -s ~/.dotfiles/vscode/settings.json ~/Library/Application\ Support/Code/User/settings.json
ln -s ~/.dotfiles/vscode/keybindings.json ~/Library/Application\ Support/Code/User/keybindings.json
echo "source ~/.dotfiles/.local.bash" >> ~/.zshrc
echo ".local.bash: linked."
| true |
63ac2b0867c034eb6e4767318b3fe35a6529c044
|
Shell
|
endavid/CheatSheets
|
/bashScript.sh
|
UTF-8
| 2,293 | 4.03125 | 4 |
[] |
no_license
|
#!/bin/bash
# The first line is to tell bash what to use to exe this script,
# if the file is executable (chmod 755)
# run handle_error function when errors occur.
# https://stackoverflow.com/a/19622569
trap 'handle_error' ERR
# Exit immediately if a command exits with a non-zero status.
set -e
function my_function() {
local my_first_arg="$1"
local my_second_arg="$2"
echo "$2: " "$1"
}
echo "$0 is this script file"
echo "$1 is the first argument"
echo "$2 is the second argument"
echo "$@ are all the arguments"
echo "$# is the total number of arguments"
my_function "invokes your function" "🎉🎉"
my_function "you can split lines with back slashes"\
"🤓🤓"
# Some interesting functions
function post_message_to_slack() {
# Parameters:
# $1 is the text message
# $2 is the icon
local slack_token='xoxb-***'
local slack_user_name='My Pipeline Bot'
local slack_channel="#my-pipeline-channel"
local text="$1"
local icon_emoji="$2"
curl --request POST \
--data-urlencode "token=$slack_token" \
--data-urlencode "channel=$slack_channel" \
--data-urlencode "icon_emoji=$icon_emoji" \
--data-urlencode "username=$slack_user_name" \
--data-urlencode "text=$text" \
--data-urlencode "mrkdwn=true" \
'https://slack.com/api/chat.postMessage' \
-w "\n"
}
function post_info_to_slack() {
# Parameters:
# $1 is the text message
post_message_to_slack "$1" ":man-getting-massage:"
}
function post_error_to_slack() {
# Parameters:
# $1 is the error message
post_message_to_slack "$1" ":skull:"
}
function die() {
echo "$1" >&2
#post_error_to_slack "$1"
exit 2
}
function handle_error() {
# the last command: https://stackoverflow.com/a/15748957/1765629
local lc="$BASH_COMMAND" rc=$?
# stack trace: https://stackoverflow.com/a/34381499
while caller $((n++)); do :; done;
echo "😱😱😱😱😱 Command [$lc] exited with code [$rc]"
}
# Main code should come after the functions are defined
# ------------------------------------------------------
# check files exist
for F in "$1" "$2"; do
test -r $F || die "File $F not found"
done
test -r "Some-inexistent-file-to-trigger-error-trap"
| true |
6b47be45f8fc1a2568056f0847e136c3e19eb4b2
|
Shell
|
linuxsun/tools
|
/shell/centos7_sys_init.sh
|
UTF-8
| 12,374 | 3.453125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
if [[ "$(whoami)" != "root" ]]; then
echo "please run this script as root ." >&2
exit 1
fi
LOCK_FILE="/var/run/sys_init.lock"
SSH_PORT=22
yum_update(){
#yum -y install wget
#cd /etc/yum.repos.d/ && mkdir bak && mv ./*.repo bak
#wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.aliyun.com/repo/Centos-7.repo
#wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
yum clean all && yum makecache 2>&1 >/dev/null
yum -y install net-tools lrzsz telnet gcc gcc-c++ make cmake libxml2-devel \
openssl-devel curl curl-devel unzip sudo ntp libaio-devel wget vim ncurses-devel \
autoconf automake zlib-devel python-devel 2>&1 >/dev/null
}
zone_time(){
TIME_SERVER="asia.pool.ntp.org"
CRON_ROOT="/var/spool/cron/root"
ECODE_I18N="/etc/sysconfig/i18n"
/bin/ln -sf /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
#printf 'ZONE="Asia/Shanghai"\nUTC=false\nARC=false' > /etc/sysconfig/clock
/usr/sbin/ntpdate $TIME_SERVER
/bin/grep "$TIME_SERVER" $CRON_ROOT || /bin/echo "* */5 * * * /usr/sbin/ntpdate $TIME_SERVER > /dev/null 2>&1" >> $CRON_ROOT
/bin/chmod 600 $CRON_ROOT
/bin/echo 'LANG="en_US.UTF-8"' > $ECODE_I18N
source $ECODE_I18N
}
ulimit_config(){
unset ret
LIMITS="/etc/security/limits.conf"
MEMTOTAL=`/bin/grep MemTotal /proc/meminfo | awk '{print $2}'`
NOFILE=0
if [ -z $MEMTOTAL ]; then
exit 1
fi
if [ $MEMTOTAL -ge 16770000 ]
then
NOFILE=65536
elif [ $MEMTOTAL -ge 8380000 ]
then
NOFILE=51200
elif [ $MEMTOTAL -ge 4190000 ]
then
NOFILE=25600
elif [ -z $MEMTOTAL -o $MEMTOTAL -le 4189999 ]
then
NOFILE=2048
fi
ulimit -SHn $NOFILE
/bin/grep "$NOFILE" "$LIMITS" > /dev/null ;ret=$?
if [ $ret -eq 1 ];then
cat >> $LIMITS << EOF
* soft nofile $NOFILE
* hard nofile $NOFILE
* soft nproc $NOFILE
* hard nproc $NOFILE
EOF
fi
}
sshd_config(){
SSHD_CONFIG="/etc/ssh/sshd_config"
SSH_CONFIG="/etc/ssh/ssh_config"
cp $SSHD_CONFIG ${SSHD_CONFIG}.$RANDOM
cp $SSH_CONFIG ${SSH_CONFIG}.$RANDOM
sed -i -e 's/^GSSAPIAuthentication yes$/GSSAPIAuthentication no/' \
-e 's/#UseDNS yes/UseDNS no/'\
-e 's|#PermitEmptyPasswords\ no|PermitEmptyPasswords\ no|g'\
-e "s|#Port\ 22|Port\ $SSH_PORT|g"\
-e 's|ChallengeResponseAuthentication\ yes|ChallengeResponseAuthentication\ no|g'\
-e 's|\#RSAAuthentication\ yes|RSAAuthentication\ yes|g'\
-e 's|\#PubkeyAuthentication\ yes|PubkeyAuthentication\ yes|g' $SSHD_CONFIG
#-e 's|#PermitRootLogin\ yes|PermitRootLogin\ no|g'\
#sed -i 's|PasswordAuthentication\ yes|PasswordAuthentication\ no|g' $SSHD_CONFIG
systemctl restart crond
unset ret
grep 'StrictHostKeyChecking no' $SSH_CONFIG >/dev/null 2>&1 ; ret=$?
if [ $ret -ne 0 ]; then
echo 'StrictHostKeyChecking no' >> $SSH_CONFIG
echo 'UserKnownHostsFile /dev/null' >> $SSH_CONFIG
fi
echo -e """
If only the key login is allowed, please:
\033[33msed -i 's|PasswordAuthentication\ yes|PasswordAuthentication\ no|g' $SSHD_CONFIG \033[0m
If the root user is not allowed to log in, please:
\033[33msed -i 's|#PermitRootLogin\ yes|PermitRootLogin\ no|g' $SSHD_CONFIG \033[0m
systemctl restart sshd.service
"""
}
sysctl_config(){
SYSCTL="/etc/sysctl.conf"
/bin/cp "$SYSCTL" "$SYSCTL".bak.$RANDOM
cat > $SYSCTL << 'EOF'
# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0
# Controls whether core dumps will append the PID to the core filename.
# Useful for debugging multi-threaded applications.
kernel.core_uses_pid = 1
#Allow for more PIDs
kernel.pid_max = 65535
# The contents of /proc/<pid>/maps and smaps files are only visible to
# readers that are allowed to ptrace() the process
#kernel.maps_protect = 1
#Enable ExecShield protection
#kernel.exec-shield = 1
kernel.randomize_va_space = 2
# Controls the maximum size of a message, in bytes
kernel.msgmnb = 65535
# Controls the default maxmimum size of a mesage queue
kernel.msgmax = 65535
# Restrict core dumps
fs.suid_dumpable = 0
# Hide exposed kernel pointers
kernel.kptr_restrict = 1
###
### IMPROVE SYSTEM MEMORY MANAGEMENT ###
###
# Increase size of file handles and inode cache
fs.file-max = 209708
# Do less swapping
vm.swappiness = 10
vm.dirty_ratio = 30
vm.dirty_background_ratio = 5
# specifies the minimum virtual address that a process is allowed to mmap
vm.mmap_min_addr = 4096
# 50% overcommitment of available memory
vm.overcommit_ratio = 50
vm.overcommit_memory = 0
# Set maximum amount of memory allocated to shm to 256MB
kernel.shmmax = 268435456
kernel.shmall = 268435456
# Keep at least 256MB of free RAM space available
vm.min_free_kbytes = 262144
###
### GENERAL NETWORK SECURITY OPTIONS ###
###
#Prevent SYN attack, enable SYNcookies (they will kick-in when the max_syn_backlog reached)
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_syn_retries = 2
net.ipv4.tcp_synack_retries = 2
net.ipv4.tcp_max_syn_backlog = 4096
# Disables packet forwarding
#net.ipv4.ip_forward = 0
net.ipv4.ip_forward = 1
#net.ipv4.conf.all.forwarding = 0
#net.ipv4.conf.default.forwarding = 0
#net.ipv6.conf.all.forwarding = 0
net.ipv6.conf.all.forwarding = 1
#net.ipv6.conf.default.forwarding = 0
# Disables IP source routing
#net.ipv4.conf.all.send_redirects = 0
#net.ipv4.conf.default.send_redirects = 0
#net.ipv4.conf.all.accept_source_route = 0
#net.ipv4.conf.default.accept_source_route = 0
#net.ipv6.conf.all.accept_source_route = 0
#net.ipv6.conf.default.accept_source_route = 0
# Enable IP spoofing protection, turn on source route verification
net.ipv4.conf.all.rp_filter = 1
net.ipv4.conf.default.rp_filter = 1
# Disable ICMP Redirect Acceptance
net.ipv4.conf.all.accept_redirects = 0
net.ipv4.conf.default.accept_redirects = 0
net.ipv4.conf.all.secure_redirects = 0
net.ipv4.conf.default.secure_redirects = 0
net.ipv6.conf.all.accept_redirects = 0
net.ipv6.conf.default.accept_redirects = 0
# Enable Log Spoofed Packets, Source Routed Packets, Redirect Packets
net.ipv4.conf.all.log_martians = 1
net.ipv4.conf.default.log_martians = 1
# Decrease the time default value for tcp_fin_timeout connection
net.ipv4.tcp_fin_timeout = 7
# Decrease the time default value for connections to keep alive
net.ipv4.tcp_keepalive_time = 300
net.ipv4.tcp_keepalive_probes = 5
net.ipv4.tcp_keepalive_intvl = 15
# Don't relay bootp
net.ipv4.conf.all.bootp_relay = 0
# Don't proxy arp for anyone
net.ipv4.conf.all.proxy_arp = 0
# Turn on the tcp_timestamps, accurate timestamp make TCP congestion control algorithms work better
net.ipv4.tcp_timestamps = 1
# Don't ignore directed pings
net.ipv4.icmp_echo_ignore_all = 0
# Enable ignoring broadcasts request
net.ipv4.icmp_echo_ignore_broadcasts = 1
# Enable bad error message Protection
net.ipv4.icmp_ignore_bogus_error_responses = 1
# Allowed local port range
net.ipv4.ip_local_port_range = 16384 65535
# Enable a fix for RFC1337 - time-wait assassination hazards in TCP
net.ipv4.tcp_rfc1337 = 1
# Do not auto-configure IPv6
#net.ipv6.conf.all.autoconf=0
#net.ipv6.conf.all.accept_ra=0
#net.ipv6.conf.default.autoconf=0
#net.ipv6.conf.default.accept_ra=0
#net.ipv6.conf.eth0.autoconf=0
#net.ipv6.conf.eth0.accept_ra=0
###
### TUNING NETWORK PERFORMANCE ###
###
# For high-bandwidth low-latency networks, use 'htcp' congestion control
# Do a 'modprobe tcp_htcp' first
net.ipv4.tcp_congestion_control = htcp
# For servers with tcp-heavy workloads, enable 'fq' queue management scheduler (kernel > 3.12)
net.core.default_qdisc = fq
# Turn on the tcp_window_scaling
net.ipv4.tcp_window_scaling = 1
# Increase the read-buffer space allocatable
net.ipv4.tcp_rmem = 8192 87380 16777216
net.ipv4.udp_rmem_min = 16384
net.core.rmem_default = 262144
net.core.rmem_max = 16777216
# Increase the write-buffer-space allocatable
net.ipv4.tcp_wmem = 8192 65536 16777216
net.ipv4.udp_wmem_min = 16384
net.core.wmem_default = 262144
net.core.wmem_max = 16777216
# Increase number of incoming connections
net.core.somaxconn = 32768
# Increase number of incoming connections backlog
net.core.netdev_max_backlog = 16384
net.core.dev_weight = 64
# Increase the maximum amount of option memory buffers
net.core.optmem_max = 65535
# Increase the tcp-time-wait buckets pool size to prevent simple DOS attacks
net.ipv4.tcp_max_tw_buckets = 1440000
# try to reuse time-wait connections, but don't recycle them (recycle can break clients behind NAT)
net.ipv4.tcp_tw_recycle = 0
net.ipv4.tcp_tw_reuse = 1
# Limit number of orphans, each orphan can eat up to 16M (max wmem) of unswappable memory
net.ipv4.tcp_max_orphans = 16384
net.ipv4.tcp_orphan_retries = 0
# Increase the maximum memory used to reassemble IP fragments
net.ipv4.ipfrag_high_thresh = 512000
net.ipv4.ipfrag_low_thresh = 446464
# don't cache ssthresh from previous connection
net.ipv4.tcp_no_metrics_save = 1
net.ipv4.tcp_moderate_rcvbuf = 1
# Increase size of RPC datagram queue length
net.unix.max_dgram_qlen = 50
# Don't allow the arp table to become bigger than this
net.ipv4.neigh.default.gc_thresh3 = 2048
# Tell the gc when to become aggressive with arp table cleaning.
# Adjust this based on size of the LAN. 1024 is suitable for most /24 networks
net.ipv4.neigh.default.gc_thresh2 = 1024
# Adjust where the gc will leave arp table alone - set to 32.
net.ipv4.neigh.default.gc_thresh1 = 32
# Adjust to arp table gc to clean-up more often
net.ipv4.neigh.default.gc_interval = 30
# Increase TCP queue length
net.ipv4.neigh.default.proxy_qlen = 96
net.ipv4.neigh.default.unres_qlen = 6
# Enable Explicit Congestion Notification (RFC 3168), disable it if it doesn't work for you
net.ipv4.tcp_ecn = 1
net.ipv4.tcp_reordering = 3
# How many times to retry killing an alive TCP connection
net.ipv4.tcp_retries2 = 15
net.ipv4.tcp_retries1 = 3
# Avoid falling back to slow start after a connection goes idle
# keeps our cwnd large with the keep alive connections (kernel > 3.6)
net.ipv4.tcp_slow_start_after_idle = 0
# Allow the TCP fastopen flag to be used, beware some firewalls do not like TFO! (kernel > 3.7)
net.ipv4.tcp_fastopen = 3
# This will enusre that immediatly subsequent connections use the new values
net.ipv4.route.flush = 1
net.ipv6.route.flush = 1
###
### other ###
###
vm.max_map_count=262144
#vm.vfs_cache_pressure=200
EOF
/sbin/sysctl -p 2>&1 >/dev/null
echo "sysctl set OK!!"
}
selinux_config(){
sed -i '/SELINUX/s/enforcing/disabled/' /etc/selinux/config
setenforce 0
}
iptables_config(){
systemctl mask firewalld
systemctl stop firewalld.service
systemctl disable firewalld.service
yum -y install iptables-services >/dev/null 2>&1
IPTABLES="/etc/sysconfig/iptables"
IPFWS=`mktemp`
tee $IPFWS <<- EOF >/dev/null 2>&1
-A INPUT -p tcp -m state --state NEW -m tcp --dport 22 -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport $SSH_PORT -j ACCEPT
-A INPUT -p tcp -m state --state NEW -m tcp --dport 80 -j ACCEPT
-A INPUT -p icmp -m limit --limit 10/sec --limit-burst 100 -j ACCEPT
-A INPUT -j REJECT --reject-with icmp-host-prohibited
EOF
unset ret
while read LINE
do
check=$(echo $LINE | sed -e 's/-A/-C/g')
/usr/sbin/xtables-multi iptables $check ;ret=$?
if [ "$ret" -eq 0 ]; then
continue
#exit 0;
else
xtables-multi iptables $LINE; >/dev/null 2>&1
fi &>/dev/null
done < $IPFWS
test -f $IPTABLES && cp $IPTABLES ${IPTABLES}.$RANDOM
#docker启动会自动添加iptables rules,请不要让iptables开机自动加载默认的rules,默认rules不带docker相关的链表
#iptables-save > $IPTABLES
\rm "$IPFWS"
/usr/bin/systemctl enable iptables
/usr/bin/systemctl enable ip6tables
/usr/bin/systemctl restart iptables.service
/usr/bin/systemctl restart ip6tables
}
main(){
yum_update
zone_time
ulimit_config
sysctl_config
sshd_config
#selinux_config
#iptables_config # docker与firewalld兼容性不好,建议禁用firewalld,改为iptables。
}
if [ -f $LOCK_FILE ]; then
echo "locking..."
echo -e "\033[31m>>> delete $LOCK_FILE \033[0m"
exit 1
else
echo -e "\033[31mplease ctrl+C to cancel \033[0m"
sleep 6
main
/bin/touch "$LOCK_FILE"
fi
# https://github.com/linuxsun
#
# https://github.com/linuxsun/tools.git
# https://klaver.it/linux/sysctl.conf
# https://wiki.archlinux.org/index.php/sysctl
# https://github.com/linuxsun
| true |
036ff752a87bee01f0995a32e293c33c00c91c00
|
Shell
|
mprocha/scripts
|
/git.sh
|
UTF-8
| 188 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash
date=`date`
read -p "Digite o comentario sobre as modificações feitas: " comment
#git init
git add .
git commit -m "Atualização $date: $comment"
git push origin master
| true |
fd5295d4ed5107751cd80bef87f1897409533435
|
Shell
|
ksoldau/dotfiles
|
/sublime-text-3/install.sh
|
UTF-8
| 909 | 3.640625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
# Sublime Text 3 install with Package Control
# Run this script with:
$ curl -L git.io/sublimetext | sh
# Detect the architecture
if [ "$(uname -s)" == "Darwin" ]; then
URL="http://c758482.r82.cf2.rackcdn.com/Sublime%20Text%20Build%203065.dmg"
else
exit;
fi
# # Download the dmg, mount and install
curl -o ~/st3.dmg $URL
mountpoint="/Volumes/Sublime Text"
IFS="
"
hdiutil attach -mountpoint $mountpoint ~/st3.dmg
for app in `find $mountpoint -type d -maxdepth 2 -name \*.app `; do
cp -a "$app" /Applications/
done
hdiutil detach $mountpoint
rm ~/st3.dmg
# # Symlink config
st3=~/Library/'Application Support'/'Sublime Text 3'
if [[ ! -d $st3 ]]; then
mkdir $st3
fi
ln -s "${ZSH}/sublime-text-3/Installed Packages" "${st3}/Installed Packages"
ln -s "${ZSH}/sublime-text-3/Packages" "${st3}/Packages"
echo ""
echo "Sublime Text 3 installed successfully!"
echo "Run with: subl"
| true |
18af49f21d4bd90fe4845a8b853fcb5ac3a476f0
|
Shell
|
TJNII/DockerBases
|
/build.sh
|
UTF-8
| 270 | 3.40625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
PREFIX=tjnii_base
for base in $(find ./ -maxdepth 2 -type f -name Dockerfile | cut -f 2 -d / | sort); do
grep FROM $base/Dockerfile | awk '{print $2}' | xargs docker pull
docker build --no-cache -t ${PREFIX}/${base} ${base}
done
| true |
f14308978b4063b4c83091bdd84cd5ed323c2aed
|
Shell
|
sboehringer/ImputationPipeline
|
/src/maintenance/pullin-R-sb.sh
|
UTF-8
| 751 | 3.0625 | 3 |
[] |
no_license
|
#!/bin/sh
PIPELINEBASE=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/../../
pushd $PIPELINEBASE/src/maintenance
#
# <p> Rprivate
#
R=~/src/Rprivate
. $R/exportR.sh
FILES="Rlibraries.R Rdata.R Rsystem.R Rmeta.R Rgraphics.R Rreporting.R Rfunctions.R Rstatistic.R Rpatches.R Rdataset.R Rsimulation.R RpropertyList.R Rlinux.R RsowReap.R RgenericAll.R RgenericAllRaw.R"
echo "copying from $R."
for i in $FILES; do
echo "copying $i ->" $PIPELINEBASE/src/R/Rprivate
cp $R/$i $PIPELINEBASE/src/R/Rprivate
done
#
# <p> Rscripts
#
echo copying ~/src/Rscripts/gwas '->' $PIPELINEBASE/src/R
mkdir $PIPELINEBASE/src/R/gwas
cp -r ~/src/Rscripts/gwas/gwas* ~/src/Rscripts/gwas/report* `realpath ~/src/Rscripts/gwas/setup*` $PIPELINEBASE/src/R/gwas
popd
| true |
0e35cd3e69c178f3d284534677059e9c197dd2bd
|
Shell
|
wearemovement/devops-realworld-example-backend
|
/scripts/run-fast-test.sh
|
UTF-8
| 892 | 3.40625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -x
imagename=${IMAGENAME:-todo}
version=${GO_PIPELINE_LABEL:-dev}
repository_prefix=${REPOSITORY_PREFIX}
full_imagename=${repository_prefix}/${imagename}:${version}
mode="--fast"
if [ $# -eq 1 ]; then
mode=$1
fi
# Fetch the image
gcloud docker -- pull ${full_imagename}
# Instant
if [ "${mode}" = "--fast" ]; then
# Hardcoded parts to work on a Go agent running in docker cloud
docker_id=`docker ps | grep $HOSTNAME | grep -v POD | awk '{print $1}'`
cred_dir=`dirname ${GOOGLE_APPLICATION_CREDENTIALS}`
host_vol_dir=`docker inspect $docker_id | grep $cred_dir | grep /var/lib/kub | sed s/\"//g | awk -F: '{print $1}'`
docker run --rm -v ${host_vol_dir}:${cred_dir} -e GOOGLE_DATASTORE_NAMESPACE="${imagename}${mode}${version}" -e GOOGLE_APPLICATION_CREDENTIALS=${GOOGLE_APPLICATION_CREDENTIALS} ${full_imagename} ./runtest --fast
fi
| true |
09f3169f0d86066139f63705ba3a1f5350b9996c
|
Shell
|
bgxavier/devstack_utils
|
/scripts/setup_docker.sh
|
UTF-8
| 579 | 2.59375 | 3 |
[] |
no_license
|
[ -e /usr/lib/apt/methods/https ] || {
sudo apt-get update
sudo apt-get install apt-transport-https
}
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
sudo sh -c "echo deb https://get.docker.com/ubuntu docker main \
> /etc/apt/sources.list.d/docker.list"
sudo apt-get update
sudo apt-get install -y lxc-docker
source /etc/bash_completion.d/docker # Docker file completion for bash
sudo bash -c "echo DOCKER_OPTS=\'-G stack\' >> /etc/default/docker"
sudo restart docker
docker version # Should work!
| true |
07b1f092ca2620790c6db0ea522e0193be6646ee
|
Shell
|
gturi/bash-utils
|
/remove-empty-directories.sh
|
UTF-8
| 762 | 4.40625 | 4 |
[] |
no_license
|
#!/bin/bash
# usage: ./remove_empty_directories "/absolute/path/to/target_folder"
# description: recursively removes empty directories inside the specified path
# removes all the empty directories in the specified one (included)
function find_and_remove {
# finds the directories in the specified one ($1)
# and recursively calls the function for each match
for dir in $(find "$1" -maxdepth 1 -type d ! -path "$1"); do
find_and_remove "$dir"
done
# checks if current directory is empty
if [[ ! $(ls -A "$1") ]]
then
rmdir "$1"
fi
}
if [ $# -ne 1 ]
then
echo "Wrong arguments number, expected 1 got $#"
exit -1
fi
# prevents errors with files with spaces in their name
IFS=$'\n'
find_and_remove "$1"
| true |
9899c37f4826cdd0befd316603cacab149431326
|
Shell
|
Vikash84/MegaPath-Nano
|
/db_preparation/updateDB.sh
|
UTF-8
| 3,599 | 3.125 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
SCRIPT=$(readlink -f $0)
SCRIPT_PATH=$(dirname ${SCRIPT})
ROOT_PATH=$(dirname $(dirname ${SCRIPT}))
DB_DIR=${ROOT_PATH}/genomes/refseq/
SQL_DB_DIR=${ROOT_PATH}/db/
if [ $# -eq 2 ]
then
DB_DIR=$1
SQL_DB_DIR=$2
fi
if [ ! -d ${DB_DIR} ]; then
echo "DB not exist. Please use refseq_download.sh to download RefSeq DB first."
exit
fi
if [ ! -d ${SQL_DB_DIR} ]; then
mkdir -p ${SQL_DB_DIR}
fi
cd ${SQL_DB_DIR}
wget ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/taxdump.tar.gz && tar xzf taxdump.tar.gz && rm citations.dmp delnodes.dmp division.dmp gencode.dmp merged.dmp readme.txt
#sequence_name table
python ${SCRIPT_PATH}/genSequenceName.py --function 1 --sequenceName abhvfp.sequence_name --db_dir ${DB_DIR}
python ${SCRIPT_PATH}/genSequenceName.py --function 2 --sequenceName plasmid.sequence_name --db_dir ${DB_DIR} --num 8
cat abhvfp.sequence_name plasmid.sequence_name > sequence_name.csv && rm abhvfp.sequence_name plasmid.sequence_name
#assembly_summary table
python ${SCRIPT_PATH}/genAssemblySummary.py --db_dir ${DB_DIR} --assemblySummary assembly_summary.csv
#ranks
python ${SCRIPT_PATH}/genRank.py --ranks ranks.csv
#names and nodes
python ${SCRIPT_PATH}/parseDml.py --dmp names.dmp --outputFile abhvfp.names --function 1
python ${SCRIPT_PATH}/parseDml.py --outputFile plasmid.names --function 3 --num 8
cat abhvfp.names plasmid.names > names.csv && rm abhvfp.names plasmid.names
python ${SCRIPT_PATH}/parseDml.py --dmp nodes.dmp --outputFile abhvfp.nodes --function 2
python ${SCRIPT_PATH}/parseDml.py --outputFile plasmid.nodes --function 4 --num 8
cat abhvfp.nodes plasmid.nodes > nodes.csv && rm abhvfp.nodes plasmid.nodes
cp ${SCRIPT_PATH}/source.csv source.csv
#source.csv table is provided
#sqlite3 script for importing to db
#DELETE FROM table #remove all records from current table
sqlite3 ncbi_taxonomy.db << 'END_SQL'
.mode csv
.separator "\t"
CREATE TABLE assembly_summary(
assembly_id char(20) not null,
bioproject char(20),
biosample char(20),
wgs_master char(20),
refseq_category char(30),
taxid int not null,
species_taxid int not null,
organism_name char(150),
infraspecific_name char(150),
isolate char(150),
version_status char(15),
assembly_level char(20),
release_type char(15),
genome_rep char(15),
seq_rel_date char(10),
asm_name char(150),
submitter char(255),
gbrs_paired_asm char(20),
paired_asm_comp char(20),
ftp_path char(250),
excluded_from_refseq char(100),
relation_to_type_material char(100));
.import assembly_summary.csv assembly_summary
CREATE TABLE sequence_name (sequence_id char(20), sequence_name char(100));
CREATE UNIQUE INDEX idx_sequence_name_sequence_id on sequence_name (sequence_id);
.import sequence_name.csv sequence_name
CREATE TABLE ranks (
rank VARCHAR NOT NULL,
height INTEGER NOT NULL,
PRIMARY KEY (rank),
UNIQUE (height)
);
.import ranks.csv ranks
create table names(tax_id,tax_name,unique_name,name_class,source_id,is_primary,is_classified);
.import names.csv names
CREATE TABLE nodes (
tax_id VARCHAR NOT NULL,
parent_id VARCHAR,
rank VARCHAR,
embl_code VARCHAR,
division_id VARCHAR,
source_id INTEGER,
is_valid BOOLEAN,
PRIMARY KEY (tax_id),
FOREIGN KEY(rank) REFERENCES ranks (rank),
FOREIGN KEY(source_id) REFERENCES source (id),
CHECK (is_valid IN (0, 1))
);
.import nodes.csv nodes
CREATE TABLE source (
id INTEGER NOT NULL,
name VARCHAR,
description VARCHAR,
PRIMARY KEY (id),
UNIQUE (name)
);
.import source.csv source
END_SQL
| true |
ddae43a8c3a56a617fd598ad37fcc8abd2f49fdf
|
Shell
|
slachiewicz/docker-zabbix-server
|
/container-files/config/init/bootstrap.sh
|
UTF-8
| 12,128 | 3.640625 | 4 |
[] |
no_license
|
#!/bin/sh
set -eu
export TERM=xterm
# Bash Colors
red=`tput setaf 1`
green=`tput setaf 2`
yellow=`tput setaf 3`
white=`tput setaf 7`
bold=`tput bold`
reset=`tput sgr0`
separator=$(echo && printf '=%.0s' {1..100} && echo)
# Logging Functions
log() {
if [[ "$@" ]]; then echo "${bold}${green}[LOG `date +'%T'`]${reset} $@";
else echo; fi
}
warning() {
echo "${bold}${yellow}[WARNING `date +'%T'`]${reset} ${yellow}$@${reset}";
}
error() {
echo "${bold}${red}[ERROR `date +'%T'`]${reset} ${red}$@${reset}";
}
create_db() {
mysql -u ${ZS_DBUser} -p${ZS_DBPassword} -h ${ZS_DBHost} -P ${ZS_DBPort} -e "CREATE DATABASE IF NOT EXISTS ${ZS_DBName} CHARACTER SET utf8;"
mysql -u ${ZS_DBUser} -p${ZS_DBPassword} -h ${ZS_DBHost} -P ${ZS_DBPort} -e "GRANT ALL ON ${ZS_DBName}.* TO '${ZS_DBUser}'@'%' identified by '${ZS_DBPassword}';"
mysql -u ${ZS_DBUser} -p${ZS_DBPassword} -h ${ZS_DBHost} -P ${ZS_DBPort} -e "flush privileges;"
}
import_zabbix_db() {
mysql -u ${ZS_DBUser} -p${ZS_DBPassword} -h ${ZS_DBHost} -P ${ZS_DBPort} -D ${ZS_DBName} < ${ZABBIX_SQL_DIR}/schema.sql
mysql -u ${ZS_DBUser} -p${ZS_DBPassword} -h ${ZS_DBHost} -P ${ZS_DBPort} -D ${ZS_DBName} < ${ZABBIX_SQL_DIR}/images.sql
mysql -u ${ZS_DBUser} -p${ZS_DBPassword} -h ${ZS_DBHost} -P ${ZS_DBPort} -D ${ZS_DBName} < ${ZABBIX_SQL_DIR}/data.sql
}
logging() {
mkdir -p /var/log/zabbix
chmod 777 /var/log/zabbix
touch /var/log/zabbix/zabbix_server.log /var/log/zabbix/zabbix_agentd.log
chmod 777 /var/log/zabbix/zabbix_server.log /var/log/zabbix/zabbix_agentd.log
}
system_pids() {
touch /var/run/zabbix_server.pid /var/run/zabbix_agentd.pid /var/run/zabbix_java.pid
chmod 777 /var/run/zabbix_server.pid /var/run/zabbix_agentd.pid /var/run/zabbix_java.pid
}
fix_permissions() {
getent group zabbix || groupadd zabbix
getent passwd zabbix || useradd -g zabbix -M zabbix
chown -R zabbix:zabbix /usr/local/etc/
chown -R zabbix:zabbix /usr/local/src/zabbix/
mkdir -p /usr/local/src/zabbix/frontends/php/conf/
chmod 777 /usr/local/src/zabbix/frontends/php/conf/
chmod u+s /usr/bin/ping
chown root:zabbix /usr/sbin/fping
chown root:zabbix /usr/sbin/fping6
chmod 4710 /usr/sbin/fping
chmod 4710 /usr/sbin/fping6
}
update_config() {
sed -i 's#=ZS_ListenPort#='${ZS_ListenPort}'#g' /usr/local/etc/zabbix_server.conf
if [ "$ZS_SourceIP" != "" ]; then
echo SourceIP=${ZS_SourceIP} >> /usr/local/etc/zabbix_server.conf
fi
sed -i 's#=ZS_LogFileSize#='${ZS_LogFileSize}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_LogFile#='${ZS_LogFile}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_DebugLevel#='${ZS_DebugLevel}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_PidFile#='${ZS_PidFile}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_DBHost#='${ZS_DBHost}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_DBName#='${ZS_DBName}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_DBSchema#='${ZS_DBSchema}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_DBUser#='${ZS_DBUser}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_DBPassword#='${ZS_DBPassword}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_DBSocket#='${ZS_DBSocket}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_DBPort#='${ZS_DBPort}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartPollersUnreachable#='${ZS_StartPollersUnreachable}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartPollers#='${ZS_StartPollers}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartTrappers#='${ZS_StartTrappers}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartPingers#='${ZS_StartPingers}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartDiscoverers#='${ZS_StartDiscoverers}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartHTTPPollers#='${ZS_StartHTTPPollers}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartTimers#='${ZS_StartTimers}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_JavaGatewayPort#='${ZS_JavaGatewayPort}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_JavaGateway#='${ZS_JavaGateway}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartJavaPollers#='${ZS_StartJavaPollers}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartVMwareCollectors#='${ZS_StartVMwareCollectors}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_VMwareFrequency#='${ZS_VMwareFrequency}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_VMwarePerfFrequency#='${ZS_VMwarePerfFrequency}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_VMwareCacheSize#='${ZS_VMwareCacheSize}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_VMwareTimeout#='${ZS_VMwareTimeout}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_SNMPTrapperFile#='${ZS_SNMPTrapperFile}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartSNMPTrapper#='${ZS_StartSNMPTrapper}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_ListenIP#='${ZS_ListenIP}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_HousekeepingFrequency#='${ZS_HousekeepingFrequency}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_MaxHousekeeperDelete#='${ZS_MaxHousekeeperDelete}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_SenderFrequency#='${ZS_SenderFrequency}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_CacheSize#='${ZS_CacheSize}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_CacheUpdateFrequency#='${ZS_CacheUpdateFrequency}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartDBSyncers#='${ZS_StartDBSyncers}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_HistoryCacheSize#='${ZS_HistoryCacheSize}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_TrendCacheSize#='${ZS_TrendCacheSize}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_HistoryTextCacheSize#='${ZS_HistoryTextCacheSize}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_ValueCacheSize#='${ZS_ValueCacheSize}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_Timeout#='${ZS_Timeout}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_TrapperTimeout#='${ZS_TrapperTimeout}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_UnreachablePeriod#='${ZS_UnreachablePeriod}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_UnavailableDelay#='${ZS_UnavailableDelay}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_UnreachableDelay#='${ZS_UnreachableDelay}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_AlertScriptsPath#='${ZS_AlertScriptsPath}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_ExternalScripts#='${ZS_ExternalScripts}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_FpingLocation#='${ZS_FpingLocation}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_Fping6Location#='${ZS_Fping6Location}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_SSHKeyLocation#='${ZS_SSHKeyLocation}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_LogSlowQueries#='${ZS_LogSlowQueries}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_TmpDir#='${ZS_TmpDir}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_StartProxyPollers#='${ZS_StartProxyPollers}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_ProxyConfigFrequency#='${ZS_ProxyConfigFrequency}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_ProxyDataFrequency#='${ZS_ProxyDataFrequency}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_AllowRoot#='${ZS_AllowRoot}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_User#='${ZS_User}'#g' /usr/local/etc/zabbix_server.conf
if [ "$ZS_Include" != "" ]; then
echo Include=${ZS_Include} >> /usr/local/etc/zabbix_server.conf
fi
sed -i 's#=ZS_SSLCertLocation#='${ZS_SSLCertLocation}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_SSLKeyLocation#='${ZS_SSLKeyLocation}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_SSLCALocation#='${ZS_SSLCALocation}'#g' /usr/local/etc/zabbix_server.conf
sed -i 's#=ZS_LoadModulePath#='${ZS_LoadModulePath}'#g' /usr/local/etc/zabbix_server.conf
if [ "$ZS_LoadModule" != "" ]; then
echo LoadModule=${ZS_LoadModule} >> /usr/local/etc/zabbix_server.conf
fi
sed -i 's/ZS_DBHost/'${ZS_DBHost}'/g' /usr/local/src/zabbix/frontends/php/conf/zabbix.conf.php
sed -i 's/ZS_DBUser/'${ZS_DBUser}'/g' /usr/local/src/zabbix/frontends/php/conf/zabbix.conf.php
sed -i 's/ZS_DBPassword/'${ZS_DBPassword}'/g' /usr/local/src/zabbix/frontends/php/conf/zabbix.conf.php
sed -i 's/ZS_DBPort/'${ZS_DBPort}'/g' /usr/local/src/zabbix/frontends/php/conf/zabbix.conf.php
sed -i 's/ZS_DBName/'${ZS_DBName}'/g' /usr/local/src/zabbix/frontends/php/conf/zabbix.conf.php
sed -i 's/PHP_date_timezone/'${PHP_date_timezone}'/g' /etc/php.d/zz-zabbix.ini
sed -i 's/PHP_max_execution_time/'${PHP_max_execution_time}'/g' /etc/php.d/zz-zabbix.ini
sed -i 's/PHP_max_input_time/'${PHP_max_input_time}'/g' /etc/php.d/zz-zabbix.ini
sed -i 's/PHP_memory_limit/'${PHP_memory_limit}'/g' /etc/php.d/zz-zabbix.ini
sed -i 's/PHP_error_reporting/'${PHP_error_reporting}'/g' /etc/php.d/zz-zabbix.ini
if [ -f /etc/custom-config/php-zabbix.ini ]; then
cp -f /etc/custom-config/php-zabbix.ini /etc/php.d/zz-zabbix.ini
fi
if [ -f /etc/custom-config/zabbix_server.conf ]; then
cp -f /etc/custom-config/zabbix_server.conf /usr/local/etc/zabbix_server.conf
fi
}
email_setup() {
sed -i 's/default@domain.com/'${ZABBIX_ADMIN_EMAIL}'/g' /usr/local/share/zabbix/alertscripts/zabbix_sendmail.sh
sed -i 's/default.smtp.server.com/'${ZABBIX_SMTP_SERVER}'/g' /usr/local/share/zabbix/alertscripts/zabbix_sendmail.sh
sed -i 's/default.smtp.username/'${ZABBIX_SMTP_USER}'/g' /usr/local/share/zabbix/alertscripts/zabbix_sendmail.sh
sed -i 's/default.smtp.password/'${ZABBIX_SMTP_PASS}'/g' /usr/local/share/zabbix/alertscripts/zabbix_sendmail.sh
}
slack_webhook() {
sed -i 's#SLACK_WEBHOOK#'$SLACK_WEBHOOK'#g' /usr/local/share/zabbix/alertscripts/zabbix_slack.sh
}
####################### End of default settings #######################
# Zabbix default sql files
ZABBIX_SQL_DIR="/usr/local/src/zabbix/database/mysql"
# load DB config from custom config file if exist
if [ -f /etc/custom-config/zabbix_server.conf ]; then
FZS_DBPassword=$(grep ^DBPassword= /etc/custom-config/zabbix_server.conf | awk -F= '{print $2}')
if [ ! -z "$VAR" ]; then
export ZS_DBPassword=$FZS_DBPassword
fi
FZS_DBUser=$(grep ^DBUser= /etc/custom-config/zabbix_server.conf | awk -F= '{print $2}')
if [ ! -z "$FZS_DBUser" ]; then
export ZS_DBUser=$FZS_DBUser
fi
FZS_DBHost=$(grep ^DBHost= /etc/custom-config/zabbix_server.conf | awk -F= '{print $2}')
if [ ! -z "$FZS_DBHost" ]; then
export ZS_DBHost=$FZS_DBHost
fi
FZS_DBPort=$(grep ^DBPort= /etc/custom-config/zabbix_server.conf | awk -F= '{print $2}')
if [ ! -z "$FZS_DBPort" ]; then
export ZS_DBPort=$FZS_DBPort
fi
FZS_DBName=$(grep ^ZS_DBName= /etc/custom-config/zabbix_server.conf | awk -F= '{print $2}')
if [ ! -z "$FZS_DBName" ]; then
export ZS_DBName=$FZS_DBName
fi
fi
log "Preparing server configuration"
update_config
log "Config updated."
log "Enabling Logging and pid management."
logging
system_pids
fix_permissions
log "Done"
# wait 120sec for DB
retry=24
until mysql -u ${ZS_DBUser} -p${ZS_DBPassword} -h ${ZS_DBHost} -P ${ZS_DBPort} -e "exit" &>/dev/null
do
log "Waiting for database, it's still not available"
retry=`expr $retry - 1`
if [ $retry -eq 0 ]; then
error "Database is not available!"
exit 1
fi
sleep 5
done
log "Checking if Database exists or fresh install"
if ! mysql -u ${ZS_DBUser} -p${ZS_DBPassword} -h ${ZS_DBHost} -P ${ZS_DBPort} -e "use ${ZS_DBName};" &>/dev/null; then
warning "Zabbix database doesn't exists. Installing and importing default settings"
log `create_db`
log "Database and user created, importing default SQL"
log `import_zabbix_db`
log "Import finished, starting"
else
log "Zabbix database exists, starting server"
fi
log "Editing Admin Email Server Settings"
email_setup
log "Email server settings updated."
log "Adding Slack integration and webhook provided by the user"
slack_webhook
log "Slack configuration updated"
zabbix_agentd -c /usr/local/etc/zabbix_agentd.conf
| true |
675c61f295d9dfb0b109ee823bbec139f0efde1a
|
Shell
|
DavidUnzue/html-diff-tool
|
/src/markdown-format-wdiff
|
UTF-8
| 687 | 3.140625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Markdown-wdiff -- format diff of Markdown files with decoration
# Usage:
# wdiff old.md new.md | markdown-format-wdiff
# git diff --word-diff origin/master -- README.md docs/tutorial/README.md | markdown-format-wdiff
#
# Author: Jaeho Shin <netj@cs.stanford.edu>
# Created: 2013-11-18
set -eu
# word diff the given unified diff as input, and format it by hunks
sed '
# format ins/del of words
s|\[-|<del class="del">|g; s|-]|</del>|g
s|{+|<ins class="ins">|g; s|+}|</ins>|g
'
# attach a small stylesheet
echo '
<style>
.del,.ins{ display: inline-block; margin-left: 0.5ex; }
.del { background-color: #fcc; }
.ins{ background-color: #cfc; }
</style>
'
| true |
bb87196cfb9d1ee83451de147b3f3bfaa33364d1
|
Shell
|
opme976/bash-opgaver
|
/12.5.sh
|
UTF-8
| 351 | 3.015625 | 3 |
[] |
no_license
|
#!/bin/bash
#**************
#Scriptguden bag: Tobias Nielsen
#Opgave: 12.5
#Beskrivelse: division af to tal med tjek at første er større end #2
echo -n "skriv det tal som der skal divideres med "
read TAL1
echo -n "skriv det andet tal "
read TAL2
if [[ $a < 0 ]]; then
$a=$((TAL1 / TAL2))
else
$a=$((TAL2 / TAL1))
fi
#echo "$((TAL1/TAL2))"
echo $a
| true |
73e4fc8ed345a7aed0b90d016b69c143de639da5
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/armake-git/PKGBUILD
|
UTF-8
| 689 | 2.78125 | 3 |
[] |
no_license
|
# Maintainer: Felix Wiegand <koffeinflummi@gmail.com>
_pkgname="armake"
pkgname="${_pkgname}-git"
pkgver=0.5.r0.g68dbdc7
pkgrel=1
pkgdesc="An open-source implementation of the Arma modding tools."
arch=('i686' 'x86_64')
url="https://github.com/KoffeinFlummi/armake"
license=('GPL2')
depends=('openssl')
optdepends=()
conflicts=('armake')
source=("git+https://github.com/KoffeinFlummi/armake.git")
sha256sums=('SKIP')
pkgver() {
cd "${srcdir}/${_pkgname}"
git describe --long --tags | tail -c +2 | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
cd "${srcdir}/${_pkgname}"
make
}
package() {
cd "${srcdir}/${_pkgname}"
mkdir -p "${pkgdir}/usr/bin"
make DESTDIR="${pkgdir}" install
}
| true |
c92d5feb50d1b45f24d643b14eb127e7be979767
|
Shell
|
C2SM-RCM/buildenv
|
/package_builder/build_libgrib_api.sh
|
UTF-8
| 7,381 | 3.9375 | 4 |
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env bash
exitError()
{
echo "ERROR $1: $3" 1>&2
echo "ERROR LOCATION=$0" 1>&2
echo "ERROR LINE=$2" 1>&2
exit $1
}
TEMP=$@
eval set -- "$TEMP --"
fwd_args=""
# COSMO Resources repository default
resources_repo="git@github.com:C2SM-RCM/libgrib-api-cosmo-resources.git"
# COSMO Resources version default
resources_version="master"
# Compiler target default
compiler_target="all"
while true; do
case "$1" in
--dir|-d) package_basedir=$2; shift 2;;
--idir|-i) install_dir=$2; shift 2;;
--local|-l) install_local="yes"; shift;;
--compiler|-c) compiler_target=$2; shift 2;;
--jasper_dir|-j) jasper_dir=$2; shift 2;;
--resources_version|-r) resources_version=$2; shift 2;;
--resources_repo) resources_repo=$2; shift 2;;
--thread_safe|-n) thread_safe=yes; shift 2;;
-- ) shift; break ;;
* ) fwd_args="$fwd_args $1"; shift ;;
esac
done
if [[ "${help_enabled}" == "yes" ]]; then
echo "Available Options for libgrib:"
echo "* --local |-l {install locally} Default=No"
echo "* --compiler |-c {compiler} Default=all"
echo "* --jasper_dir |-j {jasper installation dir} Default=install_path/libjasper"
echo "* --resources_version |-r {resources version} Default=master (git object: branch, tag, etc..)"
echo "* --resources_repo |-r {resources repository} COSMO Definitions Git Repository"
echo " Default=git@github.com:MeteoSwiss-APN/libgrib-api-cosmo-resources.git"
echo "* --thread_safe |-n {thread_safe mode} Default=False"
exit 0
fi
if [[ -z ${package_basedir} ]]; then
exitError 4201 ${LINENO} "package basedir has to be specified"
fi
if [[ -z ${install_dir} ]]; then
exitError 4202 ${LINENO} "package install dir has to be specified"
fi
if [[ -z ${resources_version} ]]; then
exitError 4203 ${LINENO} "resources_version has to be specified (coupling to libgrib)"
fi
# Setup
echo $@
# The current directory
base_path=$PWD
setupDefaults
# Obtain
source ${package_basedir}/version.sh
grib_api_version="${GRIB_API_MAJOR_VERSION}.${GRIB_API_MINOR_VERSION}.${GRIB_API_REVISION_VERSION}${GRIB_API_MCH_PATCH}"
if [[ -z "${jasper_dir}" ]]; then
jasper_dir="${install_dir}/libjasper"
fi
# Name of the COSMO Definitions Dir
cosmo_definitions_dir="cosmo_definitions"
# Temporary COSMO Definitions download location
cosmo_definitions_path=${base_path}/${cosmo_definitions_dir}
# Download the cosmo_definitions to the current base_path
get_cosmo_definitions()
{
echo ">>> Downloading the COSMO definitions"
if [ ! -d "${cosmo_definitions_path}" ]; then
git clone $resources_repo "${cosmo_definitions_path}"
if [ $? -ne 0 ]; then
exitError 4211 ${LINENO} "unable to obtain ${resources_repo}"
fi
fi
echo ">>> Checking out ${resources_version}"
pushd "${cosmo_definitions_path}" &>/dev/null
git fetch
git checkout "${resources_version}"
if [ $? -ne 0 ]; then
exitError 4212 ${LINENO} "unable to checkout ${resources_version}"
fi
popd
local cosmo_definitions_version_=$(cat $cosmo_definitions_path/RELEASE)
local grib_api_version_short=${GRIB_API_MAJOR_VERSION}.${GRIB_API_MINOR_VERSION}.${GRIB_API_REVISION_VERSION}
if [[ "${cosmo_definitions_version_}" != "v${grib_api_version_short}"* ]]; then
exitError 4213 ${LINENO} "grib api ${grib_api_version_} and cosmo definitions version ${cosmo_definitions_version_} mismatch. "
fi
}
# Build grib for compiler and install_path
build_compiler_target()
{
local install_path=$2
# Set fortran environment
export compiler=$1
setFortranEnvironment
if [ $? -ne 0 ]; then
exitError 4331 ${LINENO} "Invalid fortran environment"
fi
# Set F77 compiler to F90
export F77=$FC
echo "Compiling and installing for $compiler (install path: $install_path)"
# Build config command
config_command="./configure --build=x86_64 --host=x86_64 --prefix=${install_path} --with-jasper=${jasper_dir} --enable-static enable_shared=no --disable-jpeg"
if [[ "${thread_safe}" == "yes" ]]; then
config_command="${config_command} --enable-pthread --enable-omp-packing"
fi
writeModuleList ${base_path}/modules.log loaded "FORTRAN MODULES" ${base_path}/modules_fortran.env
echo "Building for ${compiler} compiler"
# Go to the grib api dir to call make
pushd "${package_basedir}" &> /dev/null
echo ">>>Running distclean"
make distclean 2>/dev/null 1>/dev/null
echo ">>>Running autoreconf"
autoreconf &> build.log
echo ">>>Running configure ($config_command)"
$config_command &>> build.log
if [ $? -ne 0 ]; then
cat build.log
exitError 4333 "Unable to configure libgrib_api with ${config_command}. See config.log for details."
fi
# need this for daint from Nov. 2019 onwards
echo ">>> Running automake --add-missing"
automake --add-missing
echo ">>>Compiling $packageName (make)"
make &>> build.log
if [ $? -ne 0 ]; then
cat build.log
exitError 4334 "Unable to compile libgrib_api."
fi
if [[ "${compiler}" != "cray" && "$(hostname)" != kesch* ]] ; then
echo ">>>Checking (make check)"
make check &>> build.log
if [ $? -ne 0 ]; then
cat build.log
exitError 4335 "Check failed."
fi
else
echo ">>> Check ignored for CRAY on Kesch"
fi
unsetFortranEnvironment
popd &> /dev/null
}
# Install the package
install_to_target()
{
local install_path=$1
pushd "${package_basedir}" &> /dev/null
echo ">>>Purging ${install_path}"
rm -rf ${install_path}
echo ">>>Installing to ${install_path}"
make install &> install.log
if [ $? -ne 0 ]; then
cat build.log
cat install.log
exitError 4341 "Installation failed."
fi
popd
cp -a ${cosmo_definitions_path} ${install_path}
# Copy module files
cp ${base_path}/modules_fortran.env ${install_path}/modules.env
cat > ${install_path}/configuration.sh <<-EOF
# Generated by the package script
export GRIB_DEFINITION_PATH=${install_path}/cosmo_definitions/definitions/:${install_path}/share/grib_api/definitions/
export GRIB_SAMPLES_PATH=${install_path}/cosmo_definitions/samples/
EOF
}
# Build
get_cosmo_definitions
resource_version=$(cat $cosmo_definitions_path/RELEASE)
if [[ ${install_local} == "yes" ]]; then
install_path_prefix_="${base_path}/install"
else
install_path_prefix_="${install_dir}/libgrib_api/${resource_version}"
fi
if [ "${compiler_target}" != "all" ]; then
if [ "${install_local}" != "yes" ] ; then
install_path_prefix_="${install_path_prefix_}/${compiler_target}"
fi
build_compiler_target "${compiler_target}" "${install_path_prefix_}"
install_to_target "${install_path_prefix_}"
else
for c_ in ${compilers[@]}; do
build_compiler_target "${c_}" "${install_path_prefix_}/$c_/"
install_to_target "${install_path_prefix_}"
done
fi
echo ">>> Finished"
| true |
8ae105fea240a438b0fe939f3d6d87e519564e8e
|
Shell
|
benaux/bkb-env
|
/kickstart.sh
|
UTF-8
| 930 | 3.515625 | 4 |
[] |
no_license
|
cwd=$(pwd)
here=$(basename $cwd)
today=$(date "+%F")
me=$(whoami)
host=$(hostname)
homedir=$HOME/homedir_${me}_${host}
[ -d "$homedir" ] || { echo "Err: no homedir under $homedir" ; exit 1; }
userboxes=$homedir/userboxes_${me}_${host}
[ -d "$userboxes" ] || { echo "Err: no userboxes dir under $userboxes" ; exit 1; }
kickstart_store=$userboxes/kickstart-store_${host}.sec
[ -d "$kickstart_store" ] || { echo "Err: no kickstart-store under $kickstart_store" ; exit 1; }
kickdir=$kickstart_store/kickstart_${today}
mkdir -p $kickdir
case "$here" in
*.*)
heredom=${here%.*}
heredom=${heredom##*.}
herename=${heredom%-*}
rm -rf $kickdir/$herename
echo "Success:Copy $here to $kickdir/$herename"
cp -r $cwd $kickdir/$herename
;;
*)
rm -rf $kickdir/$here
echo "Success:Copy $here to $kickdir/$here"
echo ""
cp -r $cwd $kickdir/$here
;;
esac
| true |
48b129d860dafe55abd08d379a29365472cf1840
|
Shell
|
ANTsX/ANTs
|
/Scripts/asymmetry.sh
|
UTF-8
| 3,138 | 3.0625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
usage=" $0 -d 3 -f symmetric_template.nii.gz -m moving.nii.gz -o output_prefix "
:<<supercalifragilisticexpialidocious
here is a first pass at registration-based asymmetry based on mapping an image A to a symmetric-template.
note - care must be taken to look at prefix_L_aff.nii.gz to make sure that it's correct - we dont yet have an automated way to verify this - though we know the theory. if prefix_L_aff.nii.gz is not well aligned, results will be invalid.
supercalifragilisticexpialidocious
A=A ; B=B ; prefix=J ; dim=3 ; a=1
if [[ $# -eq 0 ]] ; then echo $usage ; exit 0 ; fi
while getopts ":d:f:m:o:h:a:" opt; do
case $opt in
d)
echo "-d $OPTARG" >&2
dim=$OPTARG
;;
f)
echo "-f $OPTARG" >&2
A=$OPTARG
;;
h)
echo $usage
exit 0;
;;
m)
echo "-m $OPTARG" >&2
B=$OPTARG
;;
o)
echo "-o $OPTARG " >&2
prefix=$OPTARG
;;
a)
echo "-a $OPTARG " >&2
a=$OPTARG
;;
\?)
echo "Usage: $usage " >&2
exit 0
;;
esac
done
echo inputs: $A $B $prefix $dim
if [[ ${#dim} -lt 1 ]] ; then echo must provide input dimension $dim ; echo $usage ; exit 0 ; fi
if [[ ${#prefix} -lt 3 ]] ; then echo must provide output prefix $prefix ; echo $usage ; exit 0 ; fi
if [[ ! -s $A ]] || [[ ! -s $B ]] ; then echo inputs: $A $B $prefix ; echo $usage ; exit 1 ; fi
#####################################################
reg=antsRegistration
uval=0
affits=999x999x1550x200
rig=" -t rigid[ 0.2 ] -c [ $affits ,1.e-7,20 ] -s 3x2x1x0 -f 8x4x2x1 -u $uval -l 0 "
aff=" -t affine[ 0.2 ] -c [ $affits ,1.e-7,20 ] -s 3x2x1x0 -f 8x4x2x1 -u $uval -l 0 "
metparams=" 1 , 32, regular , 0.5 "
synits=220x220x100x50 #BA
# synits=0x0x0x0 #BA
dtx="syn[ 0.25, 3, 0. ]"
ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS=2
export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS
#####################################################
ImageMath $dim ${prefix}_reflection.mat ReflectionMatrix $A $a
antsApplyTransforms -d $dim -t ${prefix}_reflection.mat -i $B -o ${prefix}_reflect.nii.gz -r $B
imgs=" $A, $B "
antsAffineInitializer ${dim} $A $B ${prefix}_init.mat 5 0.25 0 3
$reg -d $dim -r ${prefix}_init.mat\
-m mattes[ $imgs, $metparams ] $rig \
-m mattes[ $imgs, $metparams ] $aff \
-u $uval -b 0 -z 1 \
-o [ ${prefix}_L,${prefix}_L_aff.nii.gz]
$reg -d $dim -r ${prefix}_L0GenericAffine.mat \
-m mattes[ $imgs , 1 , 32 ] \
-t $dtx \
-c [ ${synits},1.e-8,10 ] \
-s 3x2x1x0 \
-f 8x4x2x1 \
-u $uval -b 0 -z 1 \
-o [ ${prefix}_L,${prefix}_L.nii.gz]
$reg -d $dim -r ${prefix}_reflection.mat -r ${prefix}_L0GenericAffine.mat \
-m mattes[ $imgs , 1 , 32 ] \
-t $dtx \
-c [ ${synits},1.e-8,10 ] \
-s 3x2x1x0 \
-f 8x4x2x1 \
-u $uval -b 0 -z 1 \
-o [ ${prefix}_R,${prefix}_R.nii.gz]
##########################################################
ANTSJacobian $dim ${prefix}_R1Warp.nii.gz ${prefix}_R 1
ANTSJacobian $dim ${prefix}_L1Warp.nii.gz ${prefix}_L 1
ImageMath $dim ${prefix}_asym.nii.gz - ${prefix}_Llogjacobian.nii.gz ${prefix}_Rlogjacobian.nii.gz
| true |
b2c5b8812f1c076436c0d26cc8505bf1149257b9
|
Shell
|
XayOn/biibot
|
/biibot.bash
|
UTF-8
| 2,246 | 3.96875 | 4 |
[] |
no_license
|
#!/bin/bash -
# This somehow demonstrates "Quote! Unless you don't want to...
# Irc servers and channels can't have spaces, so we're storing them in an associative array and using parameter expansion to make it a multilevel associative array, even three levels once we tr ":" " ".
declare -x -A options
source iibot_external.sh
{ source ~/.iibot.cfg || source /etc/iibot.cfg || source iibot.cfg || { echo "Could not load config, exitting"; exit 1; }; } 2>/dev/null
in_array(){ first=$1; shift; for a in $@; do [[ $first == $a ]] && return ; done; return 1; }
log(){ echo $@; } # for now...
wait_for_commands(){
server=$1; channel=$2;
while [ '1' ]; do [[ -e "${options[dir]}/$server/#$channel/in" ]] && break; done
tail -n0 -f "${options[dir]}/$server/#$channel/out" | while read date time owner isme command arguments; do
owner="$(printf -- $owner|tr -d '<>')"; nickd="${options[nick]}"; isme=$(echo $isme|tr -d ":");
echo "$isme"|grep "$nickd" &>/dev/null && {
in_array $command ${allowed_commands[@]} && {
log Executing $command $server $channel $isme $owner $arguments >> "${options[dir]}/$server/#$channel/log";
$command "$server" "$channel" "$owner" "$arguments" >> "${options[dir]}/$server/#$channel/in";
} || log "$command not allowed" >> "${options[dir]}/$server/#$channel/log";
} || log "$command not directed to me, directed to -$isme- instead of -$nickd- " >> "${options[dir]}/$server/#$channel/log";
done
}
mkdir ${options[dir]} &>/dev/null;
for server in ${options[servers]}; do
current_server=( $(echo $server|tr ":" " ") )
echo "Connecting to ${current_server[@]}";
echo "Starting allowed commands are ${allowed_commands[@]}"
ii -i ${options[dir]} -s ${current_server[1]} -n ${options[nick]} &
while [ '1' ]; do [[ -e "${options[dir]}/${current_server[1]}/out" ]] && break; done
echo $! > "${options[dir]}/${current_server[1]}/run"
for channel in ${options[channels_${current_server[0]}]}; do
echo "/join #$channel" >> "${options[dir]}/${current_server[1]}/in"
{ wait_for_commands ${current_server[1]} $channel; } &
done
echo "$$" > "${options[dir]}/current_pid"
done
| true |
27a494516db7991dfa531f938815b1b6097503df
|
Shell
|
levithomason/grok
|
/topics/nvm/install.sh
|
UTF-8
| 186 | 2.765625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
original_dir=$(pwd)
rm -rf ~/.nvm
git clone https://github.com/creationix/nvm.git ~/.nvm && cd $_
git checkout v0.39.1
cd ${original_dir}
unset original_dir
| true |
93a999a95374208408b42a463fb1e4b591d008fe
|
Shell
|
philips-labs/vault-ca
|
/issue-cert.sh
|
UTF-8
| 793 | 3.21875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
source ${BASH_SOURCE%/*}/functions.sh
keybase_path=$(get_keybase_path $1)
if [ -z "$2" ] ; then
>&2 echo please provide the certificate you would like to request
>&2 echo e.g. marco.philips.dev
exit 1
fi
export VAULT_ADDR=http://127.0.0.1:8200
echo
${BASH_SOURCE%/*}/unseal.sh $1
export VAULT_TOKEN=$(get_root_token $keybase_path)
export VAULT_TOKEN=$(vault token create -policy=issue-cert-philips-dot-dev -format=json -ttl=5m | jq -r .auth.client_token)
cert_data=$(vault write pki_int/issue/philips-dot-dev common_name="$2" ttl="24h" format="pem" -format=json | jq .data)
mkdir -p certs
echo $cert_data | jq -r '.certificate' > certs/$2.crt
echo $cert_data | jq -r '.issuing_ca' >> certs/$2.crt
echo $cert_data | jq -r '.private_key' > certs/$2.key
| true |
36e76d1127277ebf238728952e47c79572e051fa
|
Shell
|
gene1wood/genes-laptop
|
/cookbooks/wendy/files/default/homedir/bin/show
|
UTF-8
| 743 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/bash
if [ -e "$1" ]; then
if grep -- ' PRIVATE KEY-' "$1" >/dev/null; then
action=rsa
elif grep -- '-BEGIN CERTIFICATE REQUEST-' "$1" >/dev/null; then
action=req
elif grep -- '-BEGIN CERTIFICATE-' "$1" >/dev/null; then
action=x509
elif grep -- '-BEGIN TRUSTED CERTIFICATE-' "$1" >/dev/null; then
action=x509
else
action=unknown
fi
if [ "$action" = "unknown" ]; then
echo "Couldn't detect the file type of $1"
exit 1
fi
echo -n | openssl $action -in "$1" -noout -text
elif [ "${1:0:5}" = "https" ]; then
curl --insecure -v $1 2>&1 | awk 'BEGIN { cert=0 } /^\* SSL connection/ { cert=1 } /^\*/ { if (cert) print }'
else
echo -n | openssl s_client -connect $1:443 | openssl x509 -text
fi
| true |
16cb2a66e55377945d3371d6e76117d2592d55c2
|
Shell
|
lybird300/CPTAC3-RNA-related-pipeline
|
/rna_pipeline.sh
|
UTF-8
| 3,281 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
sample=$1
fq_1=$2
fq_2=$3
cpu=8
cd ${sample}
##Examine quality metrics for sequencing reads using fastqc
mkdir -p FASTQC
fastqc -o FASTQC -f fastq -t 1 $fq_1 $fq_2
##Mapping
#MapSplice
mkdir -p MAPSPLICE
bowtie_ref=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/Bowtie/hg19
chr_dir=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/Chromosome
gtf=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/GTF/Homo_sapiens.GRCh37.75.gtf
zcat $fq_1 > MAPSPLICE/left.fastq
zcat $fq_2 > MAPSPLICE/right.fastq
python /gscmnt/gc2521/dinglab/qgao/Tools/MapSplice-v2.2.1/mapsplice.py -p $cpu -o MAPSPLICE --bam --gene-gtf $gtf -c $chr_dir -x $bowtie_ref -1 MAPSPLICE/left.fastq -2 MAPSPLICE/right.fastq
rm -f MAPSPLICE/left.fastq MAPSPLICE/right.fastq
samtools sort -m 20G MAPSPLICE/alignments.bam MAPSPLICE/sorted.alignments
##Cufflinks
#MapSplice bam
gtf=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/GTF/Homo_sapiens.GRCh37.75.gtf
cufflinks -o CUFFLINKS_wMAPSPLICE -p $cpu --library-type fr-firststrand -g $gtf MAPSPLICE/sorted.alignments.bam
#MapSplice bam
gtf=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/GTF/Homo_sapiens.GRCh37.75.gtf
cufflinks -o CUFFLINKS_wMAPSPLICE_GTF -p $cpu --library-type fr-firststrand -G $gtf MAPSPLICE/sorted.alignments.bam
##Generate BED for expressed transcripts
binDir=/gscmnt/gc2521/dinglab/qgao/Scripts/RNA
mkdir -p TRANSCRIPT_BED
perl $binDir/Convert_GTF_To_Bed12.pl CUFFLINKS_wMAPSPLICE/transcripts.gtf exon TRANSCRIPT_BED/${sample}.bed
##Fusion calling
#STAR-Fusion
genome_lib_dir=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/FusionDatabase/GRCh37_gencode_v19_CTAT_lib_July192017/ctat_genome_lib_build_dir
mkdir -p STAR_FUSION
STAR-Fusion --left_fq $fq_1 --right_fq $fq_2 --CPU $cpu --annotate --examine_coding_effect --genome_lib_dir $genome_lib_dir --output_dir STAR_FUSION
#EricScript
genome_db=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/FusionDatabase/ericscript_db_homosapiens_ensembl73
ericscript.pl -o ERICSCRIPT --remove -ntrim 0 --refid homo_sapiens -db $genome_db -p $cpu -name $sample $fq_1 $fq_2
#Integrate
bowtie2_ref=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/Bowtie2/hg19
gtf=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/GTF/Homo_sapiens.GRCh37.75.gtf
mkdir -p TOPHAT
tophat -o TOPHAT -p $cpu -G $gtf $bowtie2_ref $fq_1 $fq_2
bwts=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/FusionDatabase/Integrate/bwts
bam_dir=/gscmnt/gc2521/dinglab/qgao/RNA/Batch_20171110/$sample/TOPHAT/
fasta=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/FusionDatabase/Integrate/hg19.fa
annot=/gscmnt/gc2521/dinglab/qgao/Reference/GRCh37.75/FusionDatabase/Integrate/annot.ensembl.GRCh37.txt
mkdir -p Integrate
samtools index $bam_dir/accepted_hits.bam
samtools index $bam_dir/unmapped.bam
Integrate fusion -reads Integrate/reads.txt -sum Integrate/summary.tsv -ex Integrate/exons.tsv -bk Integrate/breakpoints.tsv -vcf Integrate/bk_sv.vcf -bedpe Integrate/fusions.bedpe $fasta $annot $bwts $bam_dir/accepted_hits.bam $bam_dir/unmapped.bam
#Merge three tools
mkdir -p Fusion
perl combine_call.pl $sample STAR_FUSION/star-fusion.fusion_predictions.abridged.annotated.coding_effect.tsv ERICSCRIPT/${sample}.results.total.tsv Integrate/summary.tsv Integrate/breakpoints.tsv Fusion
#Filtering
perl filter.pl Fusion $sample
| true |
d5c7fcd84879f531e2f8c9075a58d053fe44293e
|
Shell
|
Vlacross/bash-tut
|
/floor/8_char.sh
|
UTF-8
| 418 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/bash
# rand-string generatpr - code-along from tldp
if [ -n "$1" ] #if command line arg present
then #then set start string to it
str0="$1"
else # else use the PID of script
str0="$$"
fi
POS=2 #starting point of string
LEN=8 #extract 8 characters
str1=$( echo "$str0" | md5sum | md5sum )
# double scramble by piping into md5sum twice
randstring="${str1:$POS:$LEN}"
echo "$randstring"
exit $?
| true |
9e70f929503adbfe338c8aab38c3b81fa631458a
|
Shell
|
IMIO/imio.devpi
|
/run.sh
|
UTF-8
| 661 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
set -x
initialize=yes
if [ ! -f "$DEVPISERVER_SERVERDIR/.serverversion" ]; then
initialize=no
fi
if [[ "$initialize" = "no" ]]; then
if [ -f "$DEVPISERVER_SERVERDIR/export/dataindex.json" ]; then
/srv/devpi/import.sh
else
/srv/devpi/bin/devpi-init
/srv/devpi/bin/devpi index root/pypi mirror_use_external_urls=true
fi
fi
outsideurl="https://${HOSTNAME}"
if [[ "$HOSTNAME" = "localhost:3141" ]]; then
outsideurl="http://${HOSTNAME}"
fi
/srv/devpi/bin/devpi-server --start --host 0.0.0.0 --port 3141 --outside-url "${outsideurl}"
tail -f "$DEVPISERVER_SERVERDIR/.xproc/devpi-server/xprocess.log"
| true |
5d9771c610e173c74a907eade457371ad92244c7
|
Shell
|
FroggDev/BASH_froggLinuxWikiFamilyManager
|
/include/function/com.sh
|
UTF-8
| 1,860 | 3.953125 | 4 |
[] |
no_license
|
#func used to ask user to answer yes or no, return 1 or 0
# makeachoice {forThisQuestion}
makeachoice()
{
if [ $doAUTO = 1 ];then
return 1
fi
userChoice=0
echo " " #add a pre space to have a better display
while true; do
read -p " [ Q ] Do you wish to $1 ?" yn
case $yn in
y|Y|yes|YES|Yes|O|o|oui|OUI|Oui)userChoice=1;break;;
n|N|no|NO|No|non|NON|Non)userChoice=0;break;;
* )warn "'$yn' isn't a correct value, Please choose yes or no";;
esac
done
return $userChoice
}
#newaction {question} {title}
newaction()
{
#ask user to continue
makeachoice "$2"
if [ $? = 0 ];then
warn "Script aborted by user"
exit 1
else
title "$1" "1"
fi
}
# trim {string}
trim()
{
echo $1 | sed -e 's/^ *//' -e 's/ *$//'
}
# check if command exist, return false if cant exec
# canExec {command}
canExec()
{
type "$1" &> /dev/null ;
}
#File/Folder exist -f / -d return false if not found
# exist {file(f)orFolder(d)} {inThePath}
exist()
{
[ -$1 $2 ] && return 1 || return 0
}
#check if is a file or folder, return f or d
# isFile {FileOrFolder}
isFile()
{
[ -f $1 ] && echo "f"
[ -d $1 ] && echo "d"
}
#string $1 end with string $2, return 1 if endwith $2
# endWith {string} {endString}
endWith()
{
[[ $1 == *$2 ]] && return 1 || return 0
}
#add / to folder string if is not existing at the end, return good format fold
# addSlashToFold {folder}
addSlashToFold()
{
[[ "$1" == */ ]] && echo "$1" || echo "${1}/"
}
#test if an URL exist, return 1 if exist 0 if not
# urlExist {url}
urlExist()
{
curl --output /dev/null --silent --head --fail "$1" && return 1 || return 0
}
#Test if str is in array return 1 if is in array
# testIsInArray {what} {inArray}
testIsInArray()
{
#replace $1 param by FOUND if has been found in $2 array
arrTmp=${2/${1}/FOUND}
#if both arrays are equals then return 0 else return 1
[ "${arrTmp[*]}" == "${2}" ] && return 0 || return 1
}
| true |
aafb7f41dac12f136fec1a3401a92ed1eff58903
|
Shell
|
chrisgzf/manjaro-linux
|
/01-update-mirrors
|
UTF-8
| 573 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
sudo pacman-mirrors --country United_States
sudo pacman-mirrors -f 0
sudo pacman -Syyu --noconfirm
sudo pacman -S yaourt --noconfirm
yaourt -Syyua --noconfirm
sudo pacman -Sy base-devel --noconfirm
sudo pacman -S linux419-headers --noconfirm
sudo pacman -S timeshift --noconfirm
sudo pacman -S gnome-disk-utility --noconfirm
sudo mkdir -p /media/hdd
sudo chmod -R 777 /media/hdd
echo "System Settings - Hardware Configuration - Install video-nvidia"
nvidia-smi
echo "Backup system using timeshift"
echo "mount hard drive to /media/hdd using gnome-disk-utility"
| true |
be5b8f831a308f4f271c27995d82e86086261046
|
Shell
|
DrakeM0/lab_3
|
/lab3_script.sh
|
UTF-8
| 335 | 2.703125 | 3 |
[] |
no_license
|
#!/bin/bash
# Author : Your Name
# Date: Today's date
# Script follows here:
# Authors : Drake Morley
# Date: 2/7/2020
echo "a filename: "
read numOne
echo "what you would like to search for "
read numTwo
grep -P '$numTwo' $numOne
grep -c -P '303-\d{3}-\d{4}' $numOne
grep -c -P '@' $numOne
grep -P 'geocities.com' $numOne>>email_results.txt
| true |
da6b9d2d9cc27311ffb4cc595301bac117a11e00
|
Shell
|
saantiaguilera/fiuba-sisop-budget-scheduler
|
/src/Initep.sh
|
UTF-8
| 7,922 | 3.609375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
GRUPO="Grupo6"
#CONF_FILE="$GRUPO/dirconf/EPLAM.conf"
CONF_FILE="$GRUPO/dirconf/instalep.conf"
#### Messages ####
TYPE_INF="INF"
TYPE_ERR="ERR"
TYPE_WAR="WAR"
MSG_ENV_INITIALIZED="Ambiente ya inicializado, para reiniciar termine la sesión e ingrese nuevamente."
MSG_UNKNOWN_ENV_VAR="Se encontró una variable de entorno desconocida en \"EPLAM.config\". Vuelva a ejecutar Installep.sh e intente nuevamente."
MSG_MISSING_ENV_VAR="No se encontraron todas las variables requeridas en \"EPLAM.config\". Vuelva a ejecutar Installep.sh e intente nuevamente."
MSG_SCRIPT_WITHOUT_PERMISSIONS_WAR="El script %SCRIPT% no tiene permisos para ser ejecutado. Se intenta configurarlos."
MSG_SCRIPT_WITHOUT_PERMISSIONS_ERR="El script %SCRIPT% no tiene permisos para ser ejecutado. No se pudo efectuar la corrección."
MSG_FILE_WITHOUT_PERMISSIONS_WAR="El archivo %FILE% no tiene permisos de lectura. Se intenta configurarlos."
MSG_FILE_WITHOUT_PERMISSIONS_ERR="El archivo %FILE% no tiene permisos de lectura. No se pudo efectuar la corrección."
MSG_SYSTEM_INITIALIZED="Estado del Sistema: INICIALIZADO"
MSG_ASK_DEMONEP_ACTIVATION="¿Desea efectuar la activación de Demonep? (S/n)"
MSG_DEMONEP_ACTIVATED="El proceso Demonep ha sido activado."
MSG_DEMONEP_PID="Demonep corriendo bajo el no.: %PID%."
MSG_DEMONEP_MANUAL_STOP="Para detener manualmente al proceso Demonep utilice el comando \"kill %PID%\"."
MSG_DEMONEP_MANUAL_ACTIVATION="Para activar al demonio manualmente puede ingresar \"bash %SCRIPT% &\"."
MSG_ANSWER_FAILURE="Responda por Sí (S) o por No (N)"
MSG_INITEP_FINISHED="Proceso Initep finalizado exitosamente."
#######################################
# Write log message
# Globals:
# None
# Arguments:
# message type_of_message
# Returns:
# None
#######################################
function log_message() {
bash "$DIRBIN/Logep.sh" -c "Initep" -m "$1" -t "$2"
}
#######################################
# Check previous environment initialization
# Globals:
# None
# Arguments:
# None
# Returns:
# 1 if initialized, 0 if not.
#######################################
function check_previous_init() {
EXIT_CODE=0
if [ ${ENV-0} -eq 1 ]
then
log_message "$MSG_ENV_INITIALIZED" "$TYPE_ERR"
echo "$MSG_ENV_INITIALIZED"
EXIT_CODE=1
fi
return $EXIT_CODE
}
#######################################
# Extract directory out of configuration file line
# Globals:
# None
# Arguments:
# dir_variable line
# Returns:
# None
#######################################
function extract_dir() {
eval $1=$(echo '$2' | cut -d '=' -f 2)
}
#######################################
# Initialize environment variables
# Globals:
# GRUPO, BIN_DIR, MAE_DIR, REC_DIR, OK_DIR, PROC_DIR,
# INFO_DIR, LOG_DIR, NOK_DIR, ENVIRONMENT
# Arguments:
# None
# Returns:
# 1 if successful, 0 if not.
#######################################
function init_environment() {
EXIT_CODE=0
DIRBIN=""
DIRMAE=""
DIRREC=""
DIROK=""
DIRPROC=""
DIRINFO=""
DIRLOG=""
DIRNOK=""
while read -r LINE; do
case $LINE in
DIRBIN*) DIRBIN=$(echo "$LINE" | cut -d "=" -f 2);;
DIRMAE*) DIRMAE=$(echo "$LINE" | cut -d "=" -f 2);;
DIRREC*) DIRREC=$(echo "$LINE" | cut -d "=" -f 2);;
DIROK*) DIROK=$(echo "$LINE" | cut -d "=" -f 2);;
DIRPROC*) DIRPROC=$(echo "$LINE" | cut -d "=" -f 2);;
DIRINFO*) DIRINFO=$(echo "$LINE" | cut -d "=" -f 2);;
DIRLOG*) DIRLOG=$(echo "$LINE" | cut -d "=" -f 2);;
DIRNOK*) DIRNOK=$(echo "$LINE" | cut -d "=" -f 2);;
dirconf*) ;;
*)
log_message "$MSG_UNKNOWN_ENV_VAR" "$TYPE_ERR"
echo "$MSG_UNKNOWN_ENV_VAR"
EXIT_CODE=1
return $EXIT_CODE
;;
esac
done < $CONF_FILE
if [[ -z $GRUPO || -z $DIRBIN || -z $DIRMAE || -z $DIRREC || -z $DIROK || \
-z $DIRPROC || -z $DIRINFO || -z $DIRLOG || -z $DIRNOK ]]; then
log_message "$MSG_MISSING_ENV_VAR" "$TYPE_ERR"
echo "$MSG_MISSING_ENV_VAR"
EXIT_CODE=1
fi
export GRUPO
export DIRBIN
export DIRMAE
export DIRREC
export DIROK
export DIRPROC
export DIRINFO
export DIRLOG
export DIRNOK
ENV=1
export ENV
return $EXIT_CODE
}
#######################################
# Check scripts execute permissions
# Globals:
# None
# Arguments:
# None
# Returns:
# 1 if denied, 0 if not.
#######################################
function check_script_permissions() {
EXIT_CODE=0
shopt -s nullglob
for SCRIPT in "$DIRBIN"/*
do
if [ ! -x "$SCRIPT" ]; then
log_message "`echo $MSG_SCRIPT_WITHOUT_PERMISSIONS_WAR | sed "s@%SCRIPT%@$SCRIPT@"`" "$TYPE_WAR"
echo `echo $MSG_SCRIPT_WITHOUT_PERMISSIONS_WAR | sed "s@%SCRIPT%@$SCRIPT@"`
chmod +x "$SCRIPT"
fi
if [ ! -x "$SCRIPT" ]; then
log_message "`echo $MSG_SCRIPT_WITHOUT_PERMISSIONS_ERR | sed "s@%SCRIPT%@$SCRIPT@"`" "$TYPE_ERR"
echo `echo $MSG_SCRIPT_WITHOUT_PERMISSIONS_ERR | sed "s@%SCRIPT%@$SCRIPT@"`
EXIT_CODE=1
fi
done
return $EXIT_CODE
}
#######################################
# Check files read permissions
# Globals:
# None
# Arguments:
# None
# Returns:
# 1 if denied, 0 if not.
#######################################
function check_file_permissions() {
EXIT_CODE=0
shopt -s nullglob
for FILE in "$DIRMAE"/*
do
if [ ! -r "$FILE" ]; then
log_message "`echo $MSG_FILE_WITHOUT_PERMISSIONS_WAR | sed "s@%FILE%@$FILE@"`" "$TYPE_WAR"
echo `echo $MSG_FILE_WITHOUT_PERMISSIONS_WAR | sed "s@%FILE%@$FILE@"`
chmod +r "$FILE"
fi
if [ ! -r "$FILE" ]; then
log_message "`echo $MSG_FILE_WITHOUT_PERMISSIONS_ERR | sed "s@%FILE%@$FILE@"`" "$TYPE_ERR"
echo `echo $MSG_FILE_WITHOUT_PERMISSIONS_ERR | sed "s@%FILE%@$FILE@"`
EXIT_CODE=1
fi
done
return $EXIT_CODE
}
#######################################
# Ask user to start Demonep.sh
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#######################################
function start_demonep() {
ANSWER=""
while [ "$ANSWER" != "s" -a "$ANSWER" != "n" ]; do
echo "$MSG_ASK_DEMONEP_ACTIVATION"
log_message "$MSG_ASK_DEMONEP_ACTIVATION" "$TYPE_INF"
read ANSWER
log_message "$ANSWER" "$TYPE_INF"
ANSWER="$(echo $ANSWER | tr '[:upper:]' '[:lower:]')"
case $ANSWER in
"s")
log_message "$MSG_DEMONEP_ACTIVATED" "$TYPE_INF"
echo "$MSG_DEMONEP_ACTIVATED"
bash "$DIRBIN/Demonep.sh" &
PROCESS_ID=$(pgrep -f "$DIRBIN/Demonep.sh")
log_message "`echo $MSG_DEMONEP_PID | sed "s@%PID%@$PROCESS_ID@"`" "$TYPE_INF"
echo `echo $MSG_DEMONEP_PID | sed "s@%PID%@$PROCESS_ID@"`
log_message "`echo $MSG_DEMONEP_MANUAL_STOP | sed "s@%PID%@$PROCESS_ID@"`" "$TYPE_INF"
echo `echo $MSG_DEMONEP_MANUAL_STOP | sed "s@%PID%@$PROCESS_ID@"`
;;
"n")
log_message "`echo $MSG_DEMONEP_MANUAL_ACTIVATION | sed "s@%SCRIPT%@$DIRBIN/Demonep.sh@"`" "$TYPE_INF"
echo `echo $MSG_DEMONEP_MANUAL_ACTIVATION | sed "s@%SCRIPT%@$DIRBIN/Demonep.sh@"`
;;
*) echo "$MSG_ANSWER_FAILURE";;
esac
done
}
#######################################
# Unset environment variables
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#######################################
function destroy_environment() {
unset GRUPO
unset DIRBIN
unset DIRMAE
unset DIRREC
unset DIROK
unset DIRPROC
unset DIRINFO
unset DIRLOG
unset DIRNOK
unset ENV
}
function main() {
# 1. Verify if environment has been initialized
check_previous_init
if [ $? -eq 1 ]; then
return 1
fi
# 2. Initialize environment variables
init_environment
if [ $? -eq 1 ]; then
destroy_environment
return 2
fi
# 3. Check permissions
check_script_permissions
if [ $? -eq 1 ]; then
destroy_environment
return 3
fi
check_file_permissions
if [ $? -eq 1 ]; then
destroy_environment
return 4
fi
log_message "$MSG_SYSTEM_INITIALIZED" "$TYPE_INF"
echo "$MSG_SYSTEM_INITIALIZED"
# 4-6. Ask to release the DEMONIO
start_demonep
# 7. Close Log
log_message "$MSG_INITEP_FINISHED" "$TYPE_INF"
echo "$MSG_INITEP_FINISHED"
}
main
| true |
e0564dc8988ca9ee57ac9c5cc09e74e9b7e6d953
|
Shell
|
unstoppa61e/hello_node
|
/ex01/tester.sh
|
UTF-8
| 313 | 2.90625 | 3 |
[] |
no_license
|
subject="ex01"
esc=$(printf '\033')
yellow=$esc"[33:1m"
green=$esc"[32:1m"
red=$esc"[31:1m"
reset=$esc"[m"
put_color() {
echo "$2$1$reset"
}
put_color "test "$subject $yellow
node vars.js > output
diff expected output
if [ $? -eq 0 ] ;then
put_color "OK" $green
else
put_color "KO" $red
fi
rm -f ./output
| true |
79931639308dff314e018021096c444d218a2e15
|
Shell
|
UKHomeOffice/messaging
|
/push
|
UTF-8
| 1,417 | 4.3125 | 4 |
[] |
no_license
|
#!/bin/bash
url=example
messages_dir=example
set -e
git symbolic-ref --short -q HEAD | grep 'master' &> /dev/null
if [ $? != 0 ]; then
echo "Error: Messages can only be pushed from master git branch. Once you are ready to push to master then run this from master branch"
exit 1
fi
# Allow the user to run the script from directory above messages_dir.
if ! [ -d ${messages_dir} ]; then
echo "Error: CWD must be the directory above ${messages_dir}"
fi
if ! [ -d ${messages_dir} ]; then
echo "Error: CWD must be ${messages_dir}"
exit 1
fi
mkdir -p "$messages_dir"
# Usage help message if -h or --help.
if [ "$1" == "-h" -o "$1" == "--help" ]; then
echo -e "Usage: ${0} [-s story]\n"
exit 2
fi
# By default, we will push all files starting DJ-.
# But the user can specify a story to restrict this to one (or more) matching files only.
file_prefix=DJ-
# OPTIONS:
while getopts s: opt; do
case $opt in
s)
file_prefix=$OPTARG
;;
esac
done
file_list=$(ls $messages_dir | grep ^${file_prefix} | awk -v msgdir=$messages_dir '{print msgdir"/"$1}')
for file in $file_list; do
lang_code=$(echo $file | sed -n 's/.*\(_[a-zA-Z-]*\).*/\1/p' | sed -e '1s/^.//')
printf "\nAdding messages in file: $file for language: $lang_code\n"
curl --fail -X POST -F messagesFile=@"$file" "${url}/$lang_code"
printf "\n !!!Deleting $file"
rm "$file"
printf "\n"
done
echo -e ""
| true |
a13b6503ea15c96cc257c04b59ffba6976676421
|
Shell
|
sthordall/dotfiles
|
/scripts/setup-work-proxy
|
UTF-8
| 2,270 | 3.71875 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
if [ "$EUID" -ne 0 ]; then
echo "Please run as root"
exit
fi
ROOT=/etc/nixos
RED='\033[1;31m'
GREEN='\033[1;32m'
NC='\033[0m'
echo ""
echo -e "${RED}## ${GREEN}Configuring proxy${NC}"
echo ""
echo -n -e "${GREEN}Domain:${NC} "
read domain
echo -n -e "${GREEN}Username:${NC} "
read user
echo -n -e "${GREEN}Password:${NC} "
read -s password
echo ""
echo ""
echo -e "${RED}## ${GREEN}Applying auxiliary proxy${NC}"
echo ""
spid=$(ps aux | grep "^cntlm" | gawk '{print $2}')
if [ -z ${spid+x} ]; then
systemctl start cntlm
fi
sleep 5
cntlm=$(ps aux | grep "^cntlm.*" | gawk '{print $11}')
config=$(ps aux | grep "^cntlm.*" | gawk '{print $15}')
if [ -z ${cntlm+x} ]; then
echo "ERROR: cannot detect cntlm"
exit 1
fi
if [ -z ${config+x} ]; then
echo "ERROR: cannot detect cntlm configuration"
exit 1
fi
systemctl stop cntlm
cp -vf $config $HOME/cntlm.config
exit_code=$?
if [ "$exit_code" -ne 0 ]; then
echo "ERROR: cannot copy aux proxy configuration"
exit $exit_code
fi
sed -i "s/Username.*/Username $user/" "$HOME/cntlm.config"
sed -i "s/Password.*/Password $password/" "$HOME/cntlm.config"
sed -i "s/Domain .*/Domain $domain/" "$HOME/cntlm.config"
$cntlm -c $HOME/cntlm.config
exit_code=$?
if [ "$exit_code" -ne 0 ]; then
echo "ERROR: cannot start aux proxy"
exit $exit_code
fi
pid=$(ps aux | grep "[c]ntlm" | gawk '{print $2}')
echo "Auxiliary CNTLM pid: $pid"
echo ""
echo -e "${RED}## ${GREEN}Applying configuration${NC}"
echo ""
sed -i "s/username = \".*\";/username = \"$user\";/" "$ROOT/networking.nix"
sed -i "s/password = \".*\";/password = \"$password\";/" "$ROOT/networking.nix"
sed -i "s/domain = \".*\";/domain = \"$domain\";/" "$ROOT/networking.nix"
nixos-rebuild boot
exit_code=$?
if [ "$exit_code" -ne 0 ]; then
echo "ERROR: cannot apply configuration"
exit $exit_code
fi
echo ""
echo -e "${RED}## ${GREEN}Killing auxiliary proxy${NC}"
echo ""
if [ ! -z ${pid+x} ]; then
kill -9 $pid
fi
rm -f $HOME/cntlm.config
echo ""
echo -e "${RED}## ${GREEN}Starting proxy${NC}"
echo ""
nixos-rebuild switch
exit_code=$?
if [ "$exit_code" -ne 0 ]; then
echo "ERROR: cannot apply configuration"
exit $exit_code
fi
echo ""
echo -e "${RED}## ${GREEN}Cleanup${NC}"
echo ""
nix-collect-garbage -d
| true |
afd00e96fb2a6298f9df9ec7ad5f9c1521320cbc
|
Shell
|
lino-network/linod-scripts
|
/fullnode/codes/watch_dog.sh
|
UTF-8
| 1,127 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
sed -i "11s/.*/moniker=\"$(openssl rand -base64 6)\"/" ~/.lino/config/config.toml
./lino start --log_level=info &
pid=$!
last_height=0
while true
do
sleep 30s
catching_up=$(curl --max-time 10 http://localhost:26657/status | jq '. | .result.sync_info.catching_up')
if [ "$catching_up" = true ] ; then
echo 'still catching up!'
continue
fi
status=$(curl --max-time 10 -s -o /dev/null -w "%{http_code}" http://localhost:26657)
height=$(curl --max-time 10 http://localhost:26657/status | jq '. | .result.sync_info.latest_block_height')
echo "running at height $height"
if [ $status -eq 200 ]
then
echo node is running
if [ "$height" = "$last_height" ]
then
echo node is at the same height about 30s
kill -9 $pid
sleep 10s
./lino start --log_level=error &
pid=$!
healthy=false
else
echo node is healthy
last_height=$height
fi
else
echo node is down
kill -9 $pid
sleep 10s
./lino start --log_level=error &
pid=$!
healthy=false
fi
done
| true |
485b45c960ad3734dad9f481420e1f2e3150107a
|
Shell
|
Kehrlann/spring-security-and-session
|
/scripts/login.sh
|
UTF-8
| 481 | 3.5625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
TOKEN_FILE="$SCRIPT_DIR"/token-header.txt
if [ -f "$TOKEN_FILE" ]; then
echo 'Removing previous token...'
rm "$TOKEN_FILE"
fi
echo 'Authenticating ...'
RESPONSE=$(curl http://localhost:8080/login -d username=user -d password=password -i -v 2> /dev/null)
echo 'Success !'
TOKEN=$(grep "X-Auth-Token" <(echo "$RESPONSE"))
echo "$TOKEN" > "$TOKEN_FILE"
echo "Wrote token in $TOKEN_FILE"
| true |
f347cd359981869607064ea03b63b2f134e100a7
|
Shell
|
shanahanjrs/arch-maintenance
|
/archtools.sh
|
UTF-8
| 1,484 | 3.625 | 4 |
[] |
no_license
|
#!/bin/bash
# TODO
# implement:
# - failed systemd services
# systemctl --failed
#
# - errors in /var/log
# sudo journalctl -p 3 -xb
#
# - restore from timeshift backup
# sudo timeshift --restore
#
# - journalctl logs from boot
# sudo journalctl -b
#
# - clean up orphaned packages
# sudo pacman -Rsn $(pacman -Qdtq)
#
# sudo journalctl --vacuum-time=2months
#
# - dump pacman installed packages
# pacman -Qet | awk '{print $1}' > $HOME/PACMAN-PACKAGES.txt
#
# - install from a list of packages ($HOME/PACMAN-PACKAGES.txt)
# pacman -S - < $HOME/PACMAN-PACKAGES.txt
PS3="> "
options=(
"exit"
"disk usage"
"find broken symlinks"
"pacman update"
"pacman clean cache"
"timeshift quick create"
)
select userselection in "${options[@]}"; do
case $userselection in
"exit")
exit;;
"disk usage")
echo "df -ih"
df -ih
;;
"find broken symlinks")
echo "sudo find / -xtype l -print"
sudo find / -xtype l -print
;;
"pacman update")
echo "sudo pacman -Syu"
sudo pacman -Syu
;;
"pacman clean cache")
echo "sudo paccache -r"
sudo paccache -r
;;
"timeshift quick create")
echo "sudo timeshift --create"
sudo timeshift --create
;;
*)
echo "Unknown option." && exit;;
esac
done
| true |
4c863af493d41b53a8add588f62801581e99fcad
|
Shell
|
SBPL-Cruz/perception
|
/object_recognition_node/site/include/roslibjs/test/examples/setup_examples.sh
|
UTF-8
| 947 | 2.9375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
if command -v rosrun 2>/dev/null; then
echo "Shutting everything down"
ps aux | grep ros | awk '{print $2}' | xargs kill -9
sleep 1
echo "Starting roscore and various examples in background processes"
nohup roscore > /dev/null 2>&1&
sleep 2
nohup rosrun tf static_transform_publisher 0 0 0 0 0 0 world turtle1 100 > /dev/null 2>&1&
nohup rosrun tf2_web_republisher tf2_web_republisher > /dev/null 2>&1&
nohup rosrun actionlib_tutorials fibonacci_server > /dev/null 2>&1&
nohup rosrun rospy_tutorials add_two_ints_server > /dev/null 2>&1&
nohup rostopic pub /listener std_msgs/String "Hello, World" > /dev/null 2>&1&
# wait a moment then start up bridge
sleep 3
nohup roslaunch rosbridge_server rosbridge_websocket.launch > /dev/null 2>&1&
sleep 3
echo "Ready for lift off"
else
echo "Couldn't find ROS on path (try to source it)"
echo "source /opt/ros/indigo/setup.bash"
fi
| true |
457d095c4053c854c61ba1c50c955c32b837a975
|
Shell
|
nasyaoris/pdb-voltdb
|
/volt-config/start_cluster.sh
|
UTF-8
| 1,192 | 3.421875 | 3 |
[] |
no_license
|
dir=$(pwd)
activeCtnPrefixFile=$dir/activeCtnPrefix
if [ -f "$activeCtnPrefixFile" ]; then
read -r activeCtnPrefix<$activeCtnPrefixFile
echo 'Starting Docker Container ...'
docker start ${activeCtnPrefix}1 > /dev/null 2>&1
docker start ${activeCtnPrefix}2 > /dev/null 2>&1
docker start ${activeCtnPrefix}3 > /dev/null 2>&1
docker start ${activeCtnPrefix}4 > /dev/null 2>&1
docker start ${activeCtnPrefix}5 > /dev/null 2>&1
docker start ${activeCtnPrefix}6 > /dev/null 2>&1
echo
echo 'Container started, you can access VoltDB nodes via this port:'
docker port ${activeCtnPrefix}1 21212
docker port ${activeCtnPrefix}2 21212
docker port ${activeCtnPrefix}3 21212
docker port ${activeCtnPrefix}4 21212
docker port ${activeCtnPrefix}5 21212
docker port ${activeCtnPrefix}6 21212
echo
echo 'You can also access VoltDB web admin via this port:'
docker port ${activeCtnPrefix}1 8080
docker port ${activeCtnPrefix}2 8080
docker port ${activeCtnPrefix}3 8080
docker port ${activeCtnPrefix}4 8080
docker port ${activeCtnPrefix}5 8080
docker port ${activeCtnPrefix}6 8080
else
echo 'No Active Cluster Record Found, please run init_cluster.sh first'
fi
| true |
befad6f043c039f7ca2cbffa924cd20125fdb65e
|
Shell
|
mlnlbrt/dotfiles
|
/.bin/launcher.sh
|
UTF-8
| 298 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/bash
# this is a helper script which launches various commands
TERM=i3-sensible-terminal
case `basename $0` in
surf-priv)
surf -d -a "a"
;;
_hints)
$TERM -e "hints.sh $@ && read"
;;
_mc)
$TERM -e mc
;;
*)
exit 1
esac
| true |
4ec1c761cc0ecbd28ed495aae31b03bc51e9336a
|
Shell
|
jnaka66/ECE3524p3
|
/proc.sh
|
UTF-8
| 767 | 3.5 | 4 |
[] |
no_license
|
#!/bin/bash
echo "(please enter the number of your selection below)"
PS3="Choice: "
select opt in 'Show all processes' 'Kill a process' 'Bring up top' 'Return to Main Menu'; do
case $opt in
'Show all processes')
ps -ef
n1=0
while [ "$n1" != ":q" ]
do
read n1
done
sh ./proc.sh
;;
'Kill a process')
n1=0
while [ "$n1" != "" ]
do
echo 'Please enter the PID of the process you would like to kill:'
read num
if [ "$num" == ":q" ]
then
break
fi
kill $num
done
sh ./proc.sh
;;
'Bring up top')
top
sh ./proc.sh
;;
'Return to Main Menu')
sh ./proj3.sh
break
;;
*)
echo "Invalid option $REPLY"
;;
esac
done
| true |
120ae58e444a6f037fa51c77ba92154260c95a10
|
Shell
|
m0oml/dotfiles
|
/.config/i3/lemonbar/i3_lemonbar_config
|
UTF-8
| 3,002 | 2.59375 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/local/bin/bash
# i3 panel config. Powerline style.
panel_fifo="/tmp/i3_lemonbar_${USER}"
geometry="2560x24+2560x"
font="-xos4-terminesspowerline-medium-r-normal--20-200-72-72-c-100-iso10646-1"
#iconfont="-xos4-terminusicons2mono-medium-r-normal--12-120-72-72-m-60-iso8859-1"
#font="-xos4-terminesspowerline-medium-r-normal--17-120-100-100-c-0-iso10646-1"
iconfont="-xos4-terminusicons2mono-medium-r-normal--17-120-100-100-m-0-iso8859-1"
res_w=$(xrandr | grep "current" | awk '{print $8a}')
#snd_cha=$(amixer get Master | grep "Playback channels:" | awk '{if ($4 == "") {printf "%s: Playback", $3} else {printf "%s %s: Playback", $3, $4}}')
# Alarm settings
cpu_alert=75 # % cpu use
net_alert=5 # K net use
# update setting, in seconds (conky update in i3_lemonbar_conky
upd_vol=3 # Volume update
upd_mail=300 # Mail check update
upd_mpd=5 # MPD song update
# color definitions
color_back="#FF1D1F21" # Default background
color_fore="#FFC5C8C6" # Default foreground
color_head="#FFB5BD68" # Background for first element
color_sec_b1="#FF282A2E" # Background for section 1
color_sec_b2="#FF454A4F" # Background for section 2
color_sec_b3="#FF60676E" # Background for section 3
color_icon="#FF979997" # For icons
color_mail="#FFCE935F" # Background color for mail alert
color_chat="#FFCC6666" # Background color for chat alert
color_cpu="#FF5F819D" # Background color for cpu alert
color_net="#FF5E8D87" # Background color for net alert
color_disable="#FF1D1F21" # Foreground for disable elements
color_wsp="#FF8C9440" # Background for selected workspace
#default space between sections
if [ ${res_w} -gt 1024 ]; then
stab=' '
else
stab=' '
fi
# Char glyps for powerline fonts
sep_left="" # Powerline separator left
sep_right="" # Powerline separator right
sep_l_left="" # Powerline light separator left
sep_l_right="" # Powerline light sepatator right
# Icon glyphs from Terminusicons2
icon_clock="Õ" # Clock icon
icon_cpu="Ï" # CPU icon
icon_mem="Þ" # MEM icon
icon_dl="Ð" # Download icon
icon_ul="Ñ" # Upload icon
icon_vol="Ô" # Volume icon
icon_hd="À" # HD / icon
icon_home="Æ" # HD /home icon
icon_mail="Ó" # Mail icon
icon_chat="Ò" # IRC/Chat icon
icon_music="Î" # Music icon
icon_prog="Â" # Window icon
icon_contact="Á" # Contact icon
icon_wsp="É" # Workspace icon
| true |
dd8f71e7a09edd5e3fffed3e923edd0cdff472e8
|
Shell
|
rafalogan/athonApi
|
/bin/create.module.sh
|
UTF-8
| 849 | 3.296875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/zsh
DIR_MODULES="./src/api"
DIR_CONTROLLERS="./src/api/controllers"
DIR_ROUTES="./src/api/routes"
if [ ! -d "$DIR_MODULES" ]; then
mkdir "$DIR_MODULES" && mkdir "$DIR_CONTROLLERS" && mkdir "$DIR_ROUTES"
fi
if [! -f "$DIR_MODULES/api.module.ts" ]; then
touch "$DIR_MODULES/api.module.ts"
fi
if [ ! -f "$DIR_CONTROLLERS/controllers.module.ts" ]; then
touch "$DIR_CONTROLLERS/controllers.module.ts" &&
touch "$DIR_CONTROLLERS/index.ts"
fi
if [ ! -f "$DIR_ROUTES/routes.module.ts" ]; then
touch "$DIR_ROUTES/routes.module.ts" &&
touch "$DIR_ROUTES/index.ts"
fi
touch "$DIR_CONTROLLERS/$1.controller.ts" &&
touch "$DIR_ROUTES/$1.routes.ts" &&
echo "export * from './$1.controller';" >> "$DIR_CONTROLLERS/index.ts"
echo "export * from './$1.routes';" >> "$DIR_DIR_ROUTES/index.ts"
echo "Module Files $1 Created with Success!"
| true |
a3cb10f088b0390a8e6859f6ff9fe09135f434f6
|
Shell
|
dimitern/go-tools
|
/scripts/test-lxc.sh
|
UTF-8
| 10,519 | 3.71875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Bootstraps a juju environment (from source) and deploys a few
# services in LXC containers. Uses 2 machines with different series.
# Waits for the deployment then collects all logs and relevant info
# from the remote machines and destroys the environment.
# Call with an environment name, its default series and another
# series as arguments. e.g. ./test-lxc.sh maas-hw trusty precise
# NOTE: All remote logs collected will be placed in the current
# directory.
JUJU_ENV="$1"
SERIES1="$2"
SERIES2="$3"
echo "Bootstrapping $JUJU_ENV with default-series: $SERIES1 using LXC containers"
# instance-type=m3.medium added to overcome the limitation of 4 private IPs per ENI on the default m1.small instance type.
# Once the template container is not getting a static IP (and thus wasting an IP effectively) remove this.
juju bootstrap -e $JUJU_ENV --upload-tools --debug --constraints "root-disk=20G instance-type=m3.medium"
echo "Deploying services"
juju deploy -e $JUJU_ENV cs:${SERIES1}/wordpress --to lxc:0
juju deploy -e $JUJU_ENV cs:${SERIES1}/mysql --to lxc:0
juju deploy -e $JUJU_ENV cs:${SERIES1}/ubuntu ubuntu1 --to lxc:0
juju deploy -e $JUJU_ENV cs:${SERIES2}/ubuntu ubuntu2 --to lxc:0
echo "Adding a second machine --series $SERIES2"
juju add-machine -e $JUJU_ENV --series $SERIES2
echo "Adding more units to the second machine"
juju add-unit -e $JUJU_ENV ubuntu1 --to lxc:1
juju add-unit -e $JUJU_ENV ubuntu2 --to lxc:1
echo "Adding relations"
juju add-relation -e $JUJU_ENV wordpress mysql
echo "Waiting for all machines to start..."
watch -n 5 juju status -e $JUJU_ENV --format tabular
echo "Getting logs from machine 0"
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/log/juju/all-machines.log" | tee -a all-machines.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/log/juju/machine-0.log" | tee -a machine-0.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/log/cloud-init-output.log" | tee -a machine-0.cloud-init-output.log
juju ssh -e $JUJU_ENV 0 -- "sudo iptables-save" | tee -a machine-0.iptables-save
juju ssh -e $JUJU_ENV 0 -- "sudo ip route list" | tee -a machine-0.ip-route-list
juju ssh -e $JUJU_ENV 0 -- "sudo ip addr list" | tee -a machine-0.ip-addr-list
juju ssh -e $JUJU_ENV 0 -- "sudo ip link show" | tee -a machine-0.ip-link-show
echo "Getting LXC template containers logs from machine 0"
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-${SERIES1}-lxc-template/console.log" | tee -a machine-0-${SERIES1}-lxc-template.console.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-${SERIES1}-lxc-template/container.log" | tee -a machine-0-${SERIES1}-lxc-template.container.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-${SERIES1}-lxc-template/cloud-init" | tee -a machine-0-${SERIES1}-lxc-template.cloud-init
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-${SERIES1}-lxc-template/lxc.conf" | tee -a machine-0-${SERIES1}-lxc-template.lxc.conf
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/lxc/juju-${SERIES1}-lxc-template/rootfs/var/log/cloud-init-output.log" | tee -a machine-0-${SERIES1}-lxc-template.cloud-init-output.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-${SERIES2}-lxc-template/console.log" | tee -a machine-0-${SERIES2}-lxc-template.console.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-${SERIES2}-lxc-template/container.log" | tee -a machine-0-${SERIES2}-lxc-template.container.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-${SERIES2}-lxc-template/cloud-init" | tee -a machine-0-${SERIES2}-lxc-template.cloud-init
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-${SERIES2}-lxc-template/lxc.conf" | tee -a machine-0-${SERIES2}-lxc-template.lxc.conf
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/lxc/juju-${SERIES2}-lxc-template/rootfs/var/log/cloud-init-output.log" | tee -a machine-0-${SERIES2}-lxc-template.cloud-init-output.log
echo "Getting LXC remaining containers logs from machine 0"
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-0/console.log" | tee -a machine-0-lxc-0.console.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-0/container.log" | tee -a machine-0-lxc-0.container.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-0/cloud-init" | tee -a machine-0-lxc-0.cloud-init
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-0/lxc.conf" | tee -a machine-0-lxc-0.lxc.conf
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/lxc/juju-machine-0-lxc-0/rootfs/var/log/cloud-init-output.log" | tee -a machine-0-lxc-0.cloud-init-output.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-1/console.log" | tee -a machine-0-lxc-1.console.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-1/container.log" | tee -a machine-0-lxc-1.container.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-1/cloud-init" | tee -a machine-0-lxc-1.cloud-init
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-1/lxc.conf" | tee -a machine-0-lxc-1.lxc.conf
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/lxc/juju-machine-0-lxc-1/rootfs/var/log/cloud-init-output.log" | tee -a machine-0-lxc-1.cloud-init-output.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-2/console.log" | tee -a machine-0-lxc-2.console.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-2/container.log" | tee -a machine-0-lxc-2.container.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-2/cloud-init" | tee -a machine-0-lxc-2.cloud-init
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-2/lxc.conf" | tee -a machine-0-lxc-2.lxc.conf
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/lxc/juju-machine-0-lxc-2/rootfs/var/log/cloud-init-output.log" | tee -a machine-0-lxc-2.cloud-init-output.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-3/console.log" | tee -a machine-0-lxc-3.console.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-3/container.log" | tee -a machine-0-lxc-3.container.log
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-3/cloud-init" | tee -a machine-0-lxc-3.cloud-init
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/juju/containers/juju-machine-0-lxc-3/lxc.conf" | tee -a machine-0-lxc-3.lxc.conf
juju ssh -e $JUJU_ENV 0 -- "sudo cat /var/lib/lxc/juju-machine-0-lxc-3/rootfs/var/log/cloud-init-output.log" | tee -a machine-0-lxc-3.cloud-init-output.log
echo "Getting logs from machine 1"
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/log/juju/machine-1.log" | tee -a machine-1.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/log/cloud-init-output.log" | tee -a machine-1.cloud-init-output.log
juju ssh -e $JUJU_ENV 1 -- "sudo iptables-save" | tee -a machine-1.iptables-save
juju ssh -e $JUJU_ENV 1 -- "sudo ip route list" | tee -a machine-1.ip-route-list
juju ssh -e $JUJU_ENV 1 -- "sudo ip addr list" | tee -a machine-1.ip-addr-list
juju ssh -e $JUJU_ENV 1 -- "sudo ip link show" | tee -a machine-1.ip-link-show
echo "Getting LXC template containers logs from machine 1"
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-${SERIES1}-lxc-template/console.log" | tee -a machine-1-${SERIES1}-lxc-template.console.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-${SERIES1}-lxc-template/container.log" | tee -a machine-1-${SERIES1}-lxc-template.container.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-${SERIES1}-lxc-template/cloud-init" | tee -a machine-1-${SERIES1}-lxc-template.cloud-init
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-${SERIES1}-lxc-template/lxc.conf" | tee -a machine-1-${SERIES1}-lxc-template.lxc.conf
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/lxc/juju-${SERIES1}-lxc-template/rootfs/var/log/cloud-init-output.log" | tee -a machine-1-${SERIES1}-lxc-template.cloud-init-output.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-${SERIES2}-lxc-template/console.log" | tee -a machine-1-${SERIES2}-lxc-template.console.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-${SERIES2}-lxc-template/container.log" | tee -a machine-1-${SERIES2}-lxc-template.container.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-${SERIES2}-lxc-template/cloud-init" | tee -a machine-1-${SERIES2}-lxc-template.cloud-init
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-${SERIES2}-lxc-template/lxc.conf" | tee -a machine-1-${SERIES2}-lxc-template.lxc.conf
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/lxc/juju-${SERIES2}-lxc-template/rootfs/var/log/cloud-init-output.log" | tee -a machine-1-${SERIES2}-lxc-template.cloud-init-output.log
echo "Getting LXC remaining containers logs from machine 1"
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-machine-1-lxc-0/console.log" | tee -a machine-1-lxc-0.console.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-machine-1-lxc-0/container.log" | tee -a machine-1-lxc-0.container.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-machine-1-lxc-0/cloud-init" | tee -a machine-1-lxc-0.cloud-init
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-machine-1-lxc-0/lxc.conf" | tee -a machine-1-lxc-0.lxc.conf
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/lxc/juju-machine-1-lxc-0/rootfs/var/log/cloud-init-output.log" | tee -a machine-1-lxc-0.cloud-init-output.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-machine-1-lxc-1/console.log" | tee -a machine-1-lxc-1.console.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-machine-1-lxc-1/container.log" | tee -a machine-1-lxc-1.container.log
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-machine-1-lxc-1/cloud-init" | tee -a machine-1-lxc-1.cloud-init
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/juju/containers/juju-machine-1-lxc-1/lxc.conf" | tee -a machine-1-lxc-1.lxc.conf
juju ssh -e $JUJU_ENV 1 -- "sudo cat /var/lib/lxc/juju-machine-1-lxc-1/rootfs/var/log/cloud-init-output.log" | tee -a machine-1-lxc-1.cloud-init-output.log
echo "Destroying environment $JUJU_ENV"
juju destroy-environment $JUJU_ENV -y --force
| true |
875e9a5643456a1eef2b3d28ea873d0249cc4e07
|
Shell
|
stffrdhrn/or1k-utils
|
/glibc/glibc.build
|
UTF-8
| 1,136 | 3.96875 | 4 |
[] |
no_license
|
#!/bin/bash
# Wrapper to run all builds in the right order
DIR=`dirname $0`
source $DIR/glibc.config
start_time=`date -u +%s`
build_log=$BUILDDIR/log/build.log
mkdir -p $BUILDDIR/log
success_or_failure() {
if grep libc.build.done ${build_log} > /dev/null ; then
echo -n SUCCESS
else
echo -n FAILURE
fi
}
summary() {
echo
echo -n "# build finish: "; date -Is
grep glibc.build ${build_log}
echo -n "# build time(m): "; expr `date -u +%s` / 60 - $start_time / 60
echo
}
set -x
# Clean any old artifacts that may impact our build
rm -rf $INSTALLDIR/or1k-smh-linux-gnu
time (
$DIR/glibc.build.binutils &&
$DIR/glibc.build.linux-headers &&
$DIR/glibc.build.gcc1 &&
$DIR/glibc.build.libc-headers &&
$DIR/glibc.build.gcc1.5 &&
$DIR/glibc.build.libc-final &&
$DIR/glibc.build.gcc2 &&
$DIR/glibc.build.libc-final &&
$DIR/glibc.build.done
) > ${build_log} 2>&1
set +x
# Sync gcc runtime libs into sysroot
rsync -a $INSTALLDIR/or1k-smh-linux-gnu/lib/ $SYSROOTDIR/lib
if [ "$MAILTO" ] ; then
summary | mail -s "Glibc build report $(date +"%Y-%m-%d-%H:%M") - $(success_or_failure)" $MAILTO
fi
| true |
6b7433ba73873a66f76c664f9af12f290e55f5e7
|
Shell
|
ChrisMLawson/Rick-Richardson-geocaching-tools
|
/geo-logic-box
|
UTF-8
| 3,240 | 3.734375 | 4 |
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/sh
PROGNAME="$0"
usage() {
cat <<EOF
NAME
`basename $PROGNAME` - Logic box/grid/grille solver
SYNOPSIS
`basename $PROGNAME` [options] initial-value count
DESCRIPTION
Logic box/grid/grille solver. Take an initial value which is nine or
ten digits and produce "count" new values.
OPTIONS
-D lvl Debug level
EXAMPLES
Try 9 digits:
https://www.geocaching.com/geocache/GC5CWMH_the-logic-box
The columns are numbered 1, 2, ... 9. The "1" goes in column 9,
the "9" goes in column 7, the "7" goes in column 3, and so on.
$ geo-logic-box 197346258 12
197346258
867324951
537298614
675148392
598172643
874632195
236794851
512983647
518673492
597328614
637148925
796125348
615329847
Try 10 digits:
https://www.geocaching.com/geocache/GC8GZ4B_a-logical-progression
The columns are numbered 0, 1, ... 9. The "5" goes in column 7,
the "7" goes in column 1, the "1" goes in column 4, and so on.
$ geo-logic-box 5714893620 10
5714893620
2769103548
1980537246
8675204391
2954078613
4631598072
8376214095
4268197350
5847032961
7630819452
3856947201
SEE ALSO
https://www.geocaching.com/geocache/GC2MXG3_logique
https://www.geocaching.com/geocache/GC3KX7F_logical-grid
https://www.geocaching.com/geocache/GC8GZ4B_a-logical-progression
https://www.geocaching.com/geocache/GC69775_voilier-8-grille-logique
EOF
exit 1
}
#
# Report an error and exit
#
error() {
echo "`basename $PROGNAME`: $1" >&2
exit 1
}
debug() {
if [ $DEBUG -ge $1 ]; then
echo "`basename $PROGNAME`: $2" >&2
fi
}
#
# Process the options
#
DEBUG=0
#unset OPTIND
while getopts "D:h?" opt
do
case $opt in
D) DEBUG="$OPTARG";;
h|\?) usage;;
esac
done
shift `expr $OPTIND - 1`
#
# Main Program
#
if [ $# != 2 ]; then
usage
fi
# https://coord.info/GC5CWMH
# https://coord.info/GC3KX7F
# https://coord.info/GC2MXG3
# https://coord.info/GC8GZ4B
# https://coord.info/GC8MZ6F
#
# "the 1 goes in column 9", "the 9 goes in column 7",
# "the 7 goes in column 3", and so on
#
# Use grille-logique.sh for http://coord.info/GC69775
#
awk -v logic=$1 -v count=$2 '
function x2pos(x, i) {
for (i = 1; i <= 9; ++i)
if (x == a[i])
return a[i%9+1]
print "error"
exit
}
function doit() {
for (i = 1; i <= 9; ++i) { pos = x2pos(i); b[pos] = i }
for (i = 1; i <= 9; ++i) printf "%d", a[i]=b[i]; printf "\n"
}
function x2pos10(x, i) {
for (i = 0; i < 10; ++i)
{
# print x, a[i]
if (x == a[i])
{
return a[(i+1)%10]
}
}
print "error", x
exit
}
function doit10() {
for (i = 0; i < 10; ++i) { pos = x2pos10(i); b[pos] = i }
for (i = 0; i < 10; ++i) printf "%d", a[i%10]=b[i%10]; printf "\n"
}
BEGIN {
len = length(logic)
if (len == 9)
{
for (i = 1; i <= len; ++i) a[i] = substr(logic, i, 1)
for (i = 1; i <= 9; ++i) printf "%d", a[i]; printf "\n"
for (j = 1; j <= count; ++j)
doit()
}
else if (len == 10)
{
for (i = 1; i <= len; ++i) a[i-1] = substr(logic, i, 1)
for (i = 0; i <= 9; ++i) printf "%d", a[i]; printf "\n"
for (j = 1; j <= count; ++j)
doit10()
}
else
{
print "Error: only 9 or 10 characters for the initial-value"
exit
}
}'
| true |
1a77cc1fefe1ee53c71d0015e089be96c23d0561
|
Shell
|
lojaintegrada/django-mptt
|
/tests/runtests.sh
|
UTF-8
| 453 | 2.96875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
export PYTHONPATH="./"
export DJANGO_SETTINGS_MODULE='settings'
if [ `which django-admin.py` ] ; then
export DJANGO_ADMIN=django-admin.py
else
export DJANGO_ADMIN=django-admin
fi
export args="$@"
if [ -z "$args" ] ; then
# avoid running the tests for django.contrib.* (they're in INSTALLED_APPS)
export args=myapp
fi
$DJANGO_ADMIN test --traceback --settings=$DJANGO_SETTINGS_MODULE --verbosity 2 --pythonpath="../" "$args"
| true |
bc3935e326ac34702898281c598b5143cac079ab
|
Shell
|
cloudcafetech/AKS-setup
|
/winexporter-setup.sh
|
UTF-8
| 1,360 | 3.75 | 4 |
[] |
no_license
|
#!/bin/bash
# Windows Nodes Exporter setup script on AKS Windows Host
CLUSTER=prod-aks-win
WINUSER=adminprod
SSHKEY=ssh-key-$CLUSTER
echo "Deploying AKS SSH POD on Cluster"
kubectl create -f https://raw.githubusercontent.com/cloudcafetech/AKS-setup/master/aks-ssh.yaml
echo "Waiting for SSH POD ready .."
while [[ $(kubectl get pods aks-ssh -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do printf '.'; sleep 2; done
echo "Copy ssh key to SSH POD"
kubectl cp $SSHKEY $(kubectl get pod aks-ssh | awk '{print $1}' | grep -v NAME):/$SSHKEY
for winhost in $(kubectl get nodes -o wide | grep Windows | awk '{print $6}'); do
echo "Deploying Host Exporter on Windows Nodes"
kubectl exec -it aks-ssh -- ssh -o 'StrictHostKeyChecking no' -i $SSHKEY $WINUSER@$winhost "curl -LO https://raw.githubusercontent.com/cloudcafetech/AKS-setup/master/windows-exporter-setup.bat && windows-exporter-setup.bat"
done
echo "Deleting SSH POD"
kubectl delete pod aks-ssh
# Cluster linux host login
#kubectl exec -it aks-ssh -- ssh -o 'StrictHostKeyChecking no' -i $SSHKEY azureuser@<LINUX HOST IP>
# Cluster Windows host login
#kubectl exec -it aks-ssh -- ssh -o 'StrictHostKeyChecking no' -i $SSHKEY $WINUSER@<Windows HOST IP>
# Exit container cleanup command
#docker rm `docker ps -a | grep -v CONTAINER | grep Exited | awk '{print $1}'`
| true |
31794e612221f9e34ba9cb91da5eacac18dda989
|
Shell
|
otboss/Mobile-Application-Tech-Year-3-Semester-2
|
/SWEN3004/SERVER/start.sh
|
UTF-8
| 252 | 3.125 | 3 |
[] |
no_license
|
#!/bin/bash
export DEBUGGING=false;
read -p 'Launch in debugging mode? [y/N] ' choice;
if [ "$choice" == "y" -o "$choice" == "Y" ];
then
cd "./bin";
export DEBUGGING=true;
./node server.js;
else
cd "./bin";
./node index.js;
fi
| true |
693745a83e132360b5cf87cb931797a67a7756e3
|
Shell
|
ShalokShalom/apps
|
/pidgin/PKGBUILD
|
UTF-8
| 1,602 | 2.71875 | 3 |
[] |
no_license
|
# perl issue on server, build locally only
pkgname=pidgin
pkgver=2.14.6
pkgrel=1
pkgdesc="Multi-protocol instant messaging client"
arch=('x86_64')
url="https://pidgin.im/"
license=('GPL')
depends=("libpurple=${pkgver}" 'startup-notification' 'gtk2' 'libxss' 'hicolor-icon-theme'
'libsm' 'libidn' 'avahi' 'gst-plugins-good' 'farstream')
makedepends=('nss' 'libsasl' 'python3' 'tk' 'ca-certificates' 'intltool' )
optdepends=('kde-gtk-config: gtk integration settings for Plasma')
options=('!libtool')
source=("https://downloads.sourceforge.net/pidgin/${pkgname}-${pkgver}.tar.bz2")
sha256sums=('bb45f7c032f9efd6922a5dbf2840995775e5584771b23992d04f6eff7dff5336')
build() {
cd ${pkgname}-${pkgver}
./configure --prefix=/usr \
--sysconfdir=/etc \
--disable-schemas-install \
--disable-meanwhile \
--disable-gnutls \
--enable-cyrus-sasl \
--disable-nm \
--with-python=/usr/bin/python3 \
--with-system-ssl-certs=/etc/ssl/certs \
--disable-gtkspell \
--disable-gevolution
make
}
package(){
cd ${pkgbase}-${pkgver}
#for linking
make -C libpurple DESTDIR=${pkgdir} install-libLTLIBRARIES
make -C pidgin DESTDIR=${pkgdir} install
make -C doc DESTDIR=${pkgdir} install
#clean up libpurple
make -C libpurple DESTDIR=${pkgdir} uninstall-libLTLIBRARIES
install -D -m 0644 pidgin/data/pidgin.desktop ${pkgdir}/usr/share/applications/pidgin.desktop
rm -f ${pkgdir}/usr/share/man/man1/finch.1
}
| true |
3d43ea19d7471863789ddeab67d2617abe3240ee
|
Shell
|
hawkw/mycelium
|
/bin/_util.sh
|
UTF-8
| 1,689 | 4.5 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# utility functions used in other shell scripts.
#
# currently, this includes:
# - cargo-style stderr logging (`err`, `note`, and `status` functions)
# - confirmation prompts (`confirm` function)
set -euo pipefail
# Log an error to stderr
#
# Args:
# $1: message to log
err() {
echo -e "\e[31m\e[1merror:\e[0m" "$@" 1>&2;
}
# Log a note to stderr
#
# Args:
# $1: message to log
note() {
echo -e "\e[31m\e[1mnote:\e[0m" "$@" 1>&2;
}
# Log a cargo-style status message to stderr
#
# Args:
# $1: a "tag" for the log message (should be 12 characters or less in
# length)
# $2: message to log
status() {
local width=12
local tag="$1"
local msg="$2"
printf "\e[32m\e[1m%${width}s\e[0m %s\n" "$tag" "$msg"
}
# Prompt the user to confirm an action
#
# Args:
# $1: message to display to the user along with the `[y/N]` prompt
#
# Returns:
# 0 if the user confirmed, 1 otherwise
confirm() {
while read -r -p "$1 [Y/n] " input
do
case "$input" in
[yY][eE][sS]|[yY])
return 0
;;
[nN][oO]|[nN])
return 1
;;
*)
err "invalid input $input"
;;
esac
done
}
# Returns the path to a Mycelium crate.
#
# Args:
# $1: crate name
#
# Returns:
# 0 if the crate exists, 0 if it does not exist.
crate_path() {
local crate="$1"
local mycoprefix='mycelium-';
if [[ -d $crate ]]; then
echo "$crate"
elif [[ -d "${crate#"$mycoprefix"}" ]]; then
echo "${crate#"$mycoprefix"}"
else
err "unknown crate $crate"
return 1;
fi
}
| true |
bfe3fd8ad13d8ef7e643bffb6a1bb894a25dbd04
|
Shell
|
BatNiy/NewPlatform.Flexberry.ServiceBus
|
/NewPlatform.Flexberry.ServiceBus/Flexberry Service Bus/Linux/flexberryservicebus
|
UTF-8
| 1,438 | 3.75 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# template Flexberry Service Bus - esb from NewPlatform.
#
# chkconfig: - 90 10
# description:
# processname: flexberryservicebus
# config: NewPlatform.Flexberry.ServiceBus.WinServiceHost.exe.config
# pidfile: /var/run/monosrv/flexberryservicebus.pid
# Do not load RH compatibility interface.
WITHOUT_RC_COMPAT=1
# Source function library.
. /etc/init.d/functions
LOCKFILE=/tmp/flexberryservicebus.lock
MONOSERVER=/usr/local/mono/mono-4.6.1/bin/mono-service2
SERVICEPATH=/opt/flexberryservicebus
SERVICEEXE=NewPlatform.Flexberry.ServiceBus.WinServiceHost.exe
USER=flexberryservicebus
export MONO_IOMAP=all
start()
{
if [ ! -f ${LOCKFILE} ]; then
su -c "${MONOSERVER} -l:${LOCKFILE} -d:${SERVICEPATH} -m:flexberryservicebus ${SERVICEEXE}" ${USER}
else
echo "Flexberry Service Bus is already running!"
fi
}
stop()
{
if [ -f ${LOCKFILE} ]; then
PID=$(su -c "cat ${LOCKFILE}")
kill ${PID}
else
echo "Flexberry Service Bus is not running"
fi
}
restart()
{
stop
sleep 1
start
}
reset()
{
stop
sleep 1
rm ${LOCKFILE}
sleep 1
start
}
status()
{
if [ -f ${LOCKFILE} ]; then
echo "Flexberry Service Bus is running"
else
echo "Flexberry Service Bus is not running"
fi
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
reset)
reset
;;
status)
status
;;
*)
msg_usage "${0##*/} {start|stop|restart|reset|status}"
RETVAL=1
esac
| true |
3fd7516555b3bfbf9b32acf0f0897993f513a02a
|
Shell
|
e-ark/RPi-Web-L-chika
|
/api/api-button.cgi
|
UTF-8
| 318 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/sh -vx
#
# CGI Button API(JSON)
#
# Wed May 15 23:58:09 JST 2013
#
exec 2> /tmp/log.$(basename $0)
# Settings
PIN=4
gpio -g mode $PIN out
# Exec
value=$(gpio -g read $PIN)
date=$(date +"%s")
# Output
echo "Content-type:application/json"
echo ""
echo '[ {"data": "'$value'", "time": "'$date'"} ]'
exit 0
| true |
a6eead60eba503b1929b2d6d91095448112de145
|
Shell
|
z-george-ma/docker-dev-tools
|
/docker-init.sh
|
UTF-8
| 772 | 3.328125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
MOUNT_TAG="DOCKER_MOUNT_"$(date +"%s")
VBOXMANAGE=$(which VBoxManage)
BOOT2DOCKER=$(which boot2docker)
if [ x$VBOXMANAGE = x ]; then
VBOXMANAGE="$VBOX_MSI_INSTALL_PATH/VBoxManage"
fi
if [ $# -ge 1 ]; then
docker-machine stop $1
"$VBOXMANAGE" sharedfolder add $1 -name $MOUNT_TAG -hostpath `pwd`
docker-machine start $1
docker-machine ssh $1 "sudo sh -c \"if [ ! -d /$MOUNT_TAG ]; then mkdir /$MOUNT_TAG; fi\" && sudo mount -t vboxsf $MOUNT_TAG /$MOUNT_TAG"
else
boot2docker down
"$VBOXMANAGE" sharedfolder add boot2docker-vm -name $MOUNT_TAG -hostpath `pwd`
boot2docker up
boot2docker ssh "sudo sh -c \"if [ ! -d /$MOUNT_TAG ]; then mkdir /$MOUNT_TAG; fi\" && sudo mount -t vboxsf $MOUNT_TAG /$MOUNT_TAG"
fi
echo $MOUNT_TAG > .boot2docker-path
| true |
16fd263a02745631649973813d931c6602b3afb1
|
Shell
|
UW-GAC/aws_ami
|
/upgrade_ubuntu_to_topmed.bash
|
UTF-8
| 7,075 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/bash
# upgrade a base ubuntu to have the following:
#1. Ubuntu server with appropriate security and ssh access
#2. Ubuntu packages supporting:
# a. Developers (e.g., gcc)
# b. R and various R packages
# c. HPC (e.g., hdf5, openmpi)
#3. Intel's MKL
#4. Base R
#5. TOPMed Specific Support
# a. NFS volume for projects, home base directory for users (not for ubuntu user)
# b. TOPMed analysis packages (via bioconductor)
# c. Miscellaneous R packages (e.g., rmarkdown)
# d. UW analysis_pipeline
#6. User accounts in topmed group and having home directories on NFS
#7. Optional RStudio server
#8. Optional Shiny server
#
# arg1: R version
# arg2: analysis pipeline branch
# arg2: NFS address /projects
# arg3: NFS address /topmed_home
# arg4: NFS address /admin
f () {
errcode=$? # save the exit code as the first thing done in the trap function
echo "error $errorcode"
echo "the command executing at the time of the error was"
echo "$BASH_COMMAND"
echo "on line ${BASH_LINENO[0]}"
# do some error handling, cleanup, logging, notification
# $BASH_COMMAND contains the command that was being executed at the time of the trap
# ${BASH_LINENO[0]} contains the line number in the script of that command
# exit the script or return to try again, etc.
exit $errcode # or use some other value or do return instead
}
trap f ERR
R_VERSION=${1:-3.5.1}
AP_BRANCH=${2:-master}
PROJ_IP=${3:-172.255.33.0:/export_ebs/projects}
HOME_IP=${4:-172.255.33.0:/export_ebs/topmed_home}
ADMIN_IP=${5:-172.255.33.0:/export_ebs/topmed_admin}
RS_VERSION=1.1.447
echo ">>> Upgrading Ubuntu to R $R_VERSION"
echo ">>> Analysis Pipeline: $AP_BRANCH"
echo ">>> Project NFS address: $PROJ_IP"
echo ">>> Home NFS address: $HOME_IP"
echo ">>> Admin NFS address: $ADMIN_IP"
# update basic ubuntu
echo ">>> Update Ubuntu packages ..."
./update_ubuntu.bash > update_ubuntu.log 2>&1
echo ">>> Update Ubuntu with hpc packages ..."
./install_ubuntu_hpc.bash > update_ubuntu_hpc.log 2>&1
echo ">>> Installing R $R_VERSION ..."
./install_R.bash $R_VERSION > install_r.log 2>&1
# install TOPMed R packages
echo ">>> Installing TOPMed R packages ..."
./install_topmed_ubuntu.bash $R_VERSION $AP_BRANCH > install_topmed.log 2>&1
# manually, mount the NFS volumes
echo ">>> Mounting NFS volumes ..."
#if [ ! -d /projects ]; then
# sudo mkdir /projects
#fi
if [ ! -d /topmed_home ]; then
sudo mkdir /topmed_home
fi
if [ ! -d /admin ]; then
sudo mkdir /admin
fi
if ! sudo mount | grep $PROJ_IP > /dev/null; then
sudo mount -t nfs4 -o vers=4.1 $PROJ_IP /projects
else
echo "$PROJ_IP already mounted"
fi
if ! sudo mount | grep $HOME_IP > /dev/null; then
sudo mount -t nfs4 -o vers=4.1 $HOME_IP /topmed_home
else
echo "$HOME_IP already mounted"
fi
if ! sudo mount | grep $ADMIN_IP > /dev/null; then
sudo mount -t nfs4 -o vers=4.1 $ADMIN_IP /admin
else
echo "$ADMIN_IP already mounted"
fi
echo ">>> Adding topmed group ..."
# create the topmed group
mgroup=topmed
if ! compgen -g | grep $mgroup > /dev/null; then
sudo addgroup -gid 1002 $mgroup
# update ubuntu account (current login)
sudo usermod -a -G $mgroup ubuntu
sudo usermod -g $mgroup ubuntu
fi
# create user account
echo ">>> Creating UW user accounts ..."
uaccnt=levined
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --home /topmed_home/$uaccnt --uid 1002 --ingroup $mgroup --disabled-password --gecos GECOS $uaccnt
sudo adduser $uaccnt sudo
fi
uaccnt=kuraisa
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --home /topmed_home/$uaccnt --uid 1001 --ingroup $mgroup --disabled-password --gecos GECOS $uaccnt
sudo adduser $uaccnt sudo
fi
uaccnt=sdmorris
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --home /topmed_home/$uaccnt --uid 1003 --ingroup $mgroup --disabled-password --gecos GECOS $uaccnt
sudo adduser $uaccnt sudo
fi
uaccnt=mchughc
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --home /topmed_home/$uaccnt --uid 1010 --ingroup $mgroup --disabled-password --gecos GECOS $uaccnt
sudo adduser $uaccnt sudo
fi
uaccnt=mconomos
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --home /topmed_home/$uaccnt --uid 1011 --ingroup $mgroup --disabled-password --gecos GECOS $uaccnt
sudo adduser $uaccnt sudo
fi
uaccnt=amarise
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --home /topmed_home/$uaccnt --uid 1012 --ingroup $mgroup --disabled-password --gecos GECOS $uaccnt
sudo adduser $uaccnt sudo
fi
uaccnt=avmikh
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --home /topmed_home/$uaccnt --uid 1013 --ingroup $mgroup --disabled-password --gecos GECOS $uaccnt
sudo adduser $uaccnt sudo
fi
uaccnt=calaurie
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --home /topmed_home/$uaccnt --uid 1014 --ingroup $mgroup --disabled-password --gecos GECOS $uaccnt
sudo adduser $uaccnt sudo
fi
uaccnt=analyst
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --home /topmed_home/$uaccnt --uid 1099 --ingroup $mgroup --disabled-password --gecos GECOS $uaccnt
sudo adduser $uaccnt sudo
fi
# install RStudio server
echo ">>> Install RStudio server ..."
# install RStudio
if [ ! -d /usr/local/src/rstudio ]; then
sudo apt-get update
sudo apt-get install -y gdebi-core net-tools
mkdir /usr/local/src/rstudio
cd /usr/local/src/rstudio
wget https://download2.rstudio.org/rstudio-server-$RS_VERSION-amd64.deb
sudo gdebi -n rstudio-server-$RS_VERSION-amd64.deb
else
echo "RStudio Server already built"
fi
# add uw users to rstudio group
echo ">>> Adding rstudio-server group to UW accounts ..."
sudo usermod -a -G rstudio-server levined
sudo usermod -a -G rstudio-server kuraisa
sudo usermod -a -G rstudio-server sdmorris
# rstudio users
echo ">>> Creating RStudio accounts"
uaccnt=rstudio1
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --ingroup rstudio-server --disabled-password --gecos GECOS $uaccnt
PWD=$uaccnt"server"
echo "$uaccnt:$PWD" | sudo chpasswd
fi
uaccnt=rstudio2
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --ingroup rstudio-server --disabled-password --gecos GECOS $uaccnt
PWD=$uaccnt"server"
echo "$uaccnt:$PWD" | sudo chpasswd
fi
uaccnt=rstudio3
if ! compgen -u | grep $uaccnt > /dev/null; then
sudo adduser --ingroup rstudio-server --disabled-password --gecos GECOS $uaccnt
PWD=$uaccnt"server"
echo "$uaccnt:$PWD" | sudo chpasswd
fi
echo "Modifying /etc/rc.local to mount nfs volumes ..."
# update /etc/rc.local to mount NFS volumes
echo '#!/bin/sh -e' > rc.local
echo '#' >> rc.local
echo '# rc.local' >> rc.local
echo '# ' >> rc.local
echo '# mount nfs topmed volumes' >> rc.local
#echo mount -t nfs4 -o vers=4.1 $PROJ_IP /projects >> rc.local
echo mount -t nfs4 -o vers=4.1 $HOME_IP /topmed_home >> rc.local
echo mount -t nfs4 -o vers=4.1 $ADMIN_IP /admin >> rc.local
echo exit 0 >> rc.local
sudo cp rc.local /etc/rc.local
| true |
fc6d3a70607db36ccffe4c0dbd3fb5d8f4155269
|
Shell
|
Twinklebear/libbat-benchmark-scripts
|
/ior/submit_summit_ior_scaling.sh
|
UTF-8
| 1,289 | 3.65625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -z "$WORK_DIR" ]; then
echo "WORK_DIR must be set"
exit 1
fi
REPO_ROOT=`git rev-parse --show-toplevel`
SCRIPTS_DIR=$REPO_ROOT/scripts/ior
if [ "$1" == "fpp" ]; then
export IOR_SCRIPT=$SCRIPTS_DIR/ior-fpp-no-fsync.txt
elif [ "$1" == "shared" ]; then
export IOR_SCRIPT=$SCRIPTS_DIR/ior-shared-file-no-fsync.txt
elif [ "$1" == "hdf5" ]; then
export IOR_SCRIPT=$SCRIPTS_DIR/ior-hdf5-no-fsync.txt
else
echo "Set the IOR benchmark type to run (fpp/shared/hdf5) as arg 1"
exit 1
fi
if [ -z "$2" ]; then
echo "Set min number of nodes to run to as second arg (should be power of 2)"
exit 1
fi
if [ -z "$3" ]; then
echo "Set max number of nodes to run to as second arg (should be power of 2)"
exit 1
fi
min_nodes=$2
max_nodes=$3
start_run=`echo "x = (l($min_nodes)/l(2)); scale = 0; x / 1" | bc -l`
end_run=`echo "x = (l($max_nodes)/l(2)); scale = 0; x / 1" | bc -l`
export IOR_EXE=~/repos/ior/install/bin/ior
for i in `seq $start_run $end_run`; do
num_nodes=$((2**i))
echo "Test $i on $num_nodes nodes"
export OUTPUT=ior-$1-${num_nodes}summit
bsub -nnodes $num_nodes -W 00:20 -P $PARTITION \
-o $OUTPUT-%J.out \
-e $OUTPUT-%J.err \
-J $OUTPUT \
$SCRIPTS_DIR/summit_ior.sh
done
| true |
48647abe1eaf709060910c756cd90707a7cad0e4
|
Shell
|
lkiesow/docker-scp
|
/run.sh
|
UTF-8
| 583 | 3.390625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -ue
if [ -n "${DEPLOY_KEY+x}" ]; then
echo "${DEPLOY_KEY}" > "/conf/${DEPLOY_USER:-deploy}"
fi
# remove old keys
rm -f /etc/ssh/authorized_keys/* || :
# add new users and keys
cd /conf
for user in *; do
if ! id -u "${user}"; then
adduser -D -s /usr/bin/rssh -h "/data/${user}" "${user}"
passwd -u "${user}"
fi
install -m 400 -o "${user}" "${user}" /etc/ssh/authorized_keys/
done
if [ ! -f /etc/ssh/ssh_host_ed25519_key ]; then
ssh-keygen -f /etc/ssh/ssh_host_ed25519_key -N '' -t ed25519
fi
/usr/sbin/sshd -D -e -h /etc/ssh/ssh_host_ed25519_key
| true |
cba6fd4522fadc8b365c0cb251fe09aea5be9b7d
|
Shell
|
divramod/darbs
|
/scripts/common/wik_mux_pro_cra
|
UTF-8
| 436 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/bash
# helper
wik_path="${HOME}/art/wik"
cat_path="${wik_path}/ref"
src_path="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/$(basename "$0")"
# files
f1="${cat_path}/senior_developer/project.md"
f2="${cat_path}/vim/project.md"
f3="${cat_path}/wiki/project.md"
f4="${cat_path}/manjaro/project.md"
f5="${cat_path}/admin/project.md"
# run
cd "${wik_path}" || exit
vim -p "${f5}" "${f1}" "${f2}" "${f3}" "${f4}" "${src_path}"
| true |
d281e36a60ade24b3bc174f8f4805df2e32e07e5
|
Shell
|
djchopper/bosp
|
/scripts/bbqProfiler.sh
|
UTF-8
| 5,687 | 3.359375 | 3 |
[] |
no_license
|
#!/bin/bash
#
# @file profiling.sh
# @brief Barbeque RTRM Profiling
#
# This is a script based Profiling routine which runs the Barbeque RTRM on
# different platform configurations in order to collect performance metrics and
# graph them.
#
# @author Patrick Bellasi (derkling), derkling@gmail.com
#
# @internal
# Created 09/12/2011
# Revision $Id: doxygen.templates,v 1.3 2010/07/06 09:20:12 mehner Exp $
# Company Politecnico di Milano
# Copyright Copyright (c) 2011, Patrick Bellasi
#
# This source code is released for free distribution under the terms of the
# GNU General Public License as published by the Free Software Foundation.
# ============================================================================
DATETIME=`date +%Y%m%d_%H%M%S`
DATETIME_STR=`date`
RESULTS=${RESULTS:-"`pwd`/bbqprof_$DATETIME"}
BOSP_BASE=${BOSP_BASE:-"/opt/MyBOSP"}
EXC_RECIPE=${EXC_RECIPE:-"1Awm1PEPrio1"}
EXC_WORKLOAD=${EXC_WORKLOAD:-2}
EXC_CYCLES=${EXC_CYCLES:-20}
COUNT=${COUNT:-30}
BG=${BG:-"true"}
CORES=${CORES:-"1"}
CLUSTERS=${CLUSTERS:-"1 4"}
PES=${PES:-"4 16"}
EXCS=${EXCS:-"4 8 16 32 64"}
# The set of metrics to be collected for graph generation
METRICS="
bq.sp.syncp.avg.time \
bq.sp.syncp.avg.pre \
bq.sp.syncp.avg.lat \
bq.sp.syncp.avg.sync \
bq.sp.syncp.avg.do \
bq.sp.syncp.avg.post \
bq.sm.time \
bq.sm.yamca.map \
bq.sm.yamca.entities \
bq.sm.yamca.ord \
bq.sm.yamca.mcomp \
bq.sm.yamca.sel \
"
# Setup other configuration vars
BBQUE_SCRIPT="$RESULTS/bbque_profiling_$USER.sh"
PROGRESS="$RESULTS/bbque_profiling.log"
DRY_RUN=${DRY_RUN:-0}
function bbq_log {
echo -e "`date`: $1" | tee -a $PROGRESS
}
function dumpConf {
clear
echo -e "\t\t=====[ BBQ Profiling Configuration ]=====\n"\
"\n"\
"BOSP Base: $BOSP_BASE\n"\
"Recipe: $EXC_RECIPE\n"\
"Host Cores: $CORES\n"\
"Loops per test: $COUNT\n"\
" Clusters: $CLUSTERS\n"\
" PEs: $PES\n"\
" EXCs: $EXCS\n"\
"Results dir: $RESULTS\n"\
"\n" | tee -a $PROGRESS
}
function bbq_stats {
kill -USR2 `ps aux | grep "sbin/barbeque " | grep $USER | grep -v grep \
| head -n1 | awk '{print \$2}'`
}
function bbq_running {
# Look for a BarbequeRTRM running instance
BBQ_GREP=`ps aux | grep "sbin/barbeque " | grep $USER | grep -v grep \
| wc -l`
#bbq_log "Grep: [$BBQ_GREP]"
[ $BBQ_GREP -eq 0 ] && return 0
# Report the PID of the BarbequeRTRM instance
BBQ_PID=`ps aux | grep "sbin/barbeque " | grep $USER | grep -v grep \
| head -n1 | awk '{print \$2}'`
bbq_log " Found BarbequeRTRM instance, PID: $BBQ_PID"
return 1
}
function bbq_stop {
sleep 1
BBQ_PID=`ps aux | grep "sbin/barbeque " | grep $USER | grep -v grep \
| head -n1 | awk '{print \$2}'`
bbq_log " Terminating BBQ (PID: $BBQ_PID)"
kill -INT $BBQ_PID
}
# Setup the BarbequeRTRM sartup script
# Function parameter:
# CLUSTER; PEs; EXCs; TIME; CYCLES; RECIPE; LOGFILE
function runTest {
cat > $BBQUE_SCRIPT <<EOF
#!/bin/sh
if [ "x$BG" == "xtrue" ]; then
stdbuf -oL $BOSP_BASE/out/sbin/barbeque \
--tpd.clusters $1 --tpd.pes $2 \
> $RESULTS/$7.log
else
stdbuf -oL $BOSP_BASE/out/sbin/barbeque \
--tpd.clusters $1 --tpd.pes $2 \
| tee $RESULTS/$7.log
fi
EOF
chmod a+x $BBQUE_SCRIPT
if [ "x$BG" == "xtrue" ]; then
$BBQUE_SCRIPT &
else
xterm -bg black -fg gray -geometry 143x73+7-34 \
-title "Barbeque RTMR" $BBQUE_SCRIPT &
fi
sleep 1
for i in `seq 1 $COUNT`; do
$BOSP_BASE/out/usr/bin/BbqRTLibTestApp -e $3 -w $4 -c $5 -r $6
done
while [ 1 ]; do
bbq_running && break
bbq_stop
done
bzip2 $RESULTS/$7.log
}
function getStats {
bbq_log " Distilling stats for $1..."
bzcat $RESULTS/$1.log.bz2 | $DISTILLER > $RESULTS/$1.stats
}
# Function parameter:
# Input: CLUSTERS; PEs; EXCs
function testCase {
LOGFILE=`printf "bbqprof-%s-HC%02d-e%03dw%03dc%03d-C%03dPE%03d" \
$EXC_RECIPE $CORES $3 $EXC_WORKLOAD $EXC_CYCLES $1 $2`
bbq_log "Running test $LOGFILE..."
[ $DRY_RUN -eq 1 ] && return
runTest $1 $2 $3 $EXC_WORKLOAD $EXC_CYCLES $EXC_RECIPE $LOGFILE
getStats $LOGFILE
}
# Graph extraction: Metric vs EXCs
# Function parameter: METRIC
function extXexcYmet {
M=${1:-"bq.sp.syncp.avg.time"}
cd results
echo "Extracing data for metric $M..."
for C in $CLUSTERS; do
for P in $PES; do
PLAT=`printf "w%03dc%03d-C%03dPE%03d" \
$EXC_WORKLOAD $EXC_CYCLES $C $P`
DATFILE=`printf "graph-%s-%s-HC%02d-%s.dat" \
${M//./_} $EXC_RECIPE $CORES $PLAT`
DATHEADER=""
DATHEADER="$DATHEADER# BarbequeRTRM (Host Cores: $CORES)\n"
DATHEADER="$DATHEADER# Test started at: $DATETIME_STR\n"
DATHEADER="$DATHEADER# Metrics: $M\n# Platform: $PLAT\n"
DATHEADER="$DATHEADER#EXCs\tMin\tMax\tAvg\tStdDev"
echo -e $DATHEADER > $DATFILE
grep $M *$PLAT.stats \
| sed -r 's/-e([0-9]{3}+)w/-e \1 w/' \
| tr -s " " \
| awk '{printf " %3d %13.3f %13.3f %13.3f %13.3f\n", \
$2, $12, $14, $16, $18}' \
>> $DATFILE
done
done
cd - >/dev/null
}
###############################################################################
# MAIN
###############################################################################
# Setup results dir
mkdir -p $RESULTS
[ -e results ] && rm -f results
ln -s $RESULTS results
# Dump configuration used
dumpConf
# Setup metrics distiller
DISTILLER="grep -e Metrics -e Description "
for M in $METRICS; do
DISTILLER="$DISTILLER -e $M"
done
# Run tests
for C in $CLUSTERS; do
for P in $PES; do
for E in $EXCS; do
testCase $C $P $E
done
done
done
# Extracing results dataset
for M in $METRICS; do
extXexcYmet $M
done
rm -f $BBQUE_SCRIPT
bbq_log "\n\nTesting completed\n\n"
| true |
6410cf304072445a8c0e6e5b260541bcc4a4b096
|
Shell
|
liskin/dotfiles
|
/bin/needsterminal
|
UTF-8
| 306 | 2.921875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eu
[[ -t 0 && $DISPLAY ]] || _WAIT_FOR_WM=: exec x-terminal-emulator ${_XTERM_HOLD:+-hold} ${_XTERM_FLOAT:+-xrm 'URxvt.transient-for: 0x11111111')} -e "$0" "$@"
[[ ${_WAIT_FOR_WM-} ]] && { xwaitforwindow || :; }
unset _WAIT_FOR_WM _XTERM_HOLD
echo -ne "\033]0;$1\007"
exec "$@"
| true |
47d1dc1caf2e971f8b2f6d44adcd2fa0b5c88b0e
|
Shell
|
exoscale/puppetdb
|
/docker/puppetdb-base/docker-entrypoint.d/40-consul.sh
|
UTF-8
| 624 | 3.015625 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
if [ "$CONSUL_ENABLED" = "true" ]; then
ipaddress="$(ifconfig $NETWORK_INTERFACE | grep -v 'inet6' | grep 'inet' | head -1 | tr -s ' ' | cut -d ' ' -f 3)"
cat <<SERVICEDEF > /puppet-service.json
{
"name": "puppetdb",
"id": "$HOSTNAME",
"port": 8080,
"address": "$ipaddress",
"checks": [
{
"http": "http://$HOSTNAME:8080/status/v1/services/puppetdb-status",
"interval": "10s",
"deregister_critical_service_after": "10m"
}
]
}
SERVICEDEF
curl \
--request PUT \
--data @puppet-service.json \
http://$CONSUL_HOSTNAME:$CONSUL_PORT/v1/agent/service/register
fi
| true |
da90b96c71938b79e4b32c2faa2719557146f21d
|
Shell
|
onap/archive-vfc-nfvo-driver-sfc
|
/zte/sfc-driver/plugin-standalone/src/main/assembly/docker/docker-env-config.sh
|
UTF-8
| 926 | 2.828125 | 3 |
[
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
install_sf(){
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
sed -i 's|#baseurl=http://mirror.centos.org/centos|baseurl=http://mirrors.ocf.berkeley.edu/centos|' /etc/yum.repos.d/*.repo
yum update -y
yum install -y wget unzip socat java-1.8.0-openjdk-headless
sed -i 's|#networkaddress.cache.ttl=-1|networkaddress.cache.ttl=10|' /usr/lib/jvm/jre/lib/security/java.security
# get binary zip from nexus
wget -q -O vfc-sfcdriver-zte.zip 'https://nexus.onap.org/service/local/artifact/maven/redirect?r=snapshots&g=org.onap.vfc.nfvo.driver.sfc.zte.sfc-driver-standalone&a=vfc-nfvo-driver-sfc-zte-sfc-driver&v=LATEST&e=zip' && \
unzip vfc-sfcdriver-zte.zip && \
rm -rf vfc-sfcdriver-zte.zip
chmod +x *.sh
chmod +x docker/*.sh
}
add_user(){
useradd onap
chown onap:onap -R /service
}
clean_sf_cache(){
yum clean all
}
install_sf
wait
add_user
clean_sf_cache
| true |
68204f8fef66a511aa078582abdf5b9dfa946c0d
|
Shell
|
pezholio/dotfiles
|
/aliases.sh
|
UTF-8
| 584 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/bash
alias a="atom ."
alias t="gittower ."
alias c="code ."
function bi() {
echo "==> Installing $1"
brew install "$@"
echo "==> Adding $1 to the Brewfile"
brew bundle dump --force
echo "==> Committing and pushing changes"
git add Brewfile
git commit -a -m "Adding $1 to the Brewfile"
git push
}
function bu() {
echo "==> Uninstalling $1"
brew install "$@"
echo "==> Removing $1 from the Brewfile"
brew bundle dump --force
echo "==> Committing and pushing changes"
git add Brewfile
git commit -a -m "Removing $1 from the Brewfile"
git push
}
| true |
91cb4d720d5397abfb231cb5a8c2aa67ecab0bc4
|
Shell
|
mwaghadhare/terraform-infra-bootmst
|
/modules/networking/bastion/scripts/userdata.sh
|
UTF-8
| 234 | 2.515625 | 3 |
[] |
no_license
|
#!/bin/bash
# Shell script to format block device and mount it to /data and create fstab entry.
sudo mkfs -t ext4 /dev/xvdb
sudo mkdir /data
sudo mount /dev/xvdb /data
sudo echo /dev/xvdb /data ext4 defaults,nofail 0 2 >> /etc/fstab
| true |
746c6d6e8fcf174bb406495eedbceeda561bd6cb
|
Shell
|
giordanorn/dotfiles
|
/.config/i3blocks/scripts/cpu-frequency
|
UTF-8
| 273 | 2.78125 | 3 |
[] |
no_license
|
#!/bin/sh
# TODO it runs fine when in terminal, but in i3block it shows wrong results
#OUTPUT=$(cat /proc/cpuinfo | awk '/^cpu MHz/ { print (int($4 / 100) / 10) " GHz" }' | head -n1)
OUTPUT=$(lscpu | awk '/^CPU MHz/ { print (int($3 / 100) / 10) " GHz" }')
echo "$OUTPUT"
| true |
dfdc523973159b2043611b6a5ecab8bc073c05e2
|
Shell
|
mkenney/k8s-proxy
|
/test/start-dev.sh
|
UTF-8
| 3,834 | 3.953125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
IMAGE=mkenney/k8s-proxy:latest
DEPLOYMENT=k8s-proxy
k8s_context=$(kubectl config view -o=jsonpath='{.current-context}')
WARN=$'\033[38;5;1m'
EMPH=$'\033[38;5;172m'
NORM=$'\033[0m'
printf "
This script will start the kubernetes proxy service using the \`kubectl apply\`
command.
${WARN}Please make sure you are configured for the intended environment${NORM}
Current context: ${EMPH}${k8s_context}${NORM}
"
read -p "Do you want to continue? [y/N]: " EXECUTE
if [ "y" != "$EXECUTE" ] && [ "Y" != "$EXECUTE" ]; then
exit 0
fi
printf "
Starting the k8s-proxy and test services.
This script will build the binary from the current source and start (or
restart) the proxy service, mounting the binary into the container.
It will also start several simple nginx services to use for testing.
Each one hosts a page that reports the name of the service being
accessed. Each service represents a different test case:
* k8s-proxy
This service should serve all traffic on port 80 (working on 443...).
It will route based on the domain being requested. For example,
http://service1.somehost should route the deployment managed by
\`service1\`, and http://service2.somehost should route to the
deployment managed by \`service2\`.
* k8s-proxy-test-1
No labels defined, traffic routed to the service name. Service
should be available at http://k8s-proxy-test-1... and should
result in a page that displays 'k8s-proxy-test-1'.
* k8s-proxy-test-2
Labels defined, traffic routed to the specified subdomain:
k8s-proxy-domain: k8s-proxy-test-2-label
k8s-proxy-protocol: HTTP
Service should be available at http://k8s-proxy-test-2-label... and
should result in a page that displays 'k8s-proxy-test-2'.
* k8s-proxy-test-3
Valid service deployed but no deployment to route traffic to.
Service is expected to be available at http://k8s-proxy-test-3...
and should instead result in a 503 error after a 30 second timeout
period.
* k8s-proxy-test-4
No service deployed and navigating to http://k8s-proxy-test-4... (or
any other non-existant service) should immediately result in a 502
error.
"
workdir=$(pwd)
cd $workdir/..
if [ "build" = "$1" ] || [ "--build" = "$1" ]; then
echo "building image..."
docker build -t $IMAGE . &> /dev/null
exit_code=$?
if [ "0" != "$exit_code" ]; then
echo " building image '$IMAGE' failed"
exit $exit_code
fi
fi
cd $workdir/../pkg
echo "building k8s-proxy binary"
GOOS=linux go build -o $workdir/bin/k8s-proxy
if [ "0" != "$?" ]; then
echo " building binary failed"
exit 1
fi
echo
echo "removing k8s-proxy deployment and service..."
kubectl delete deploy k8s-proxy &> /dev/null
kubectl delete service k8s-proxy &> /dev/null
kubectl delete ingress k8s-proxy &> /dev/null
cd $workdir
echo
echo "applying k8s-proxy deployment and service..."
cat k8s-proxy-dev.yml | sed s,\$PWD,$(pwd), | kubectl create -f - > /dev/null
pod=
printf "\n"
trycount=0
while [ ! -n "$pod" ] && [ "60" -gt "$trycount" ]; do
sleep 1
pod=$(kubectl get po | grep 'k8s-proxy' | grep -i running | grep '1/1' | grep -v 'k8s-proxy-test' | awk '{print $1}')
printf "."
((trycount+=1))
done
printf "\n"
echo
echo "Service:"
echo "$(kubectl get service | egrep '(k8s-proxy)|(NAME)' | grep -v 'k8s-proxy-test')"
echo
echo "Deployment:"
echo "$(kubectl get deploy | egrep '(k8s-proxy)|(NAME)' | grep -v 'k8s-proxy-test')"
echo
echo "Pods:"
echo "$(kubectl get po | egrep '(k8s-proxy)|(NAME)' | grep -v Terminating | grep -v 'k8s-proxy-test')"
echo
if [ "" = "$pod" ]; then
echo "Timed out waiting for pod to be ready"
exit 0
fi
# hide the readiness/liveness probe noise...
echo "kubectl logs -f $pod | grep -v 'probe OK'"
echo
kubectl logs -f $pod | grep -v 'probe OK'
| true |
db620f804df0e977dfbff356e67e959708496eb1
|
Shell
|
Brainfood-com/localdev
|
/images/control/configure_docker_daemons
|
UTF-8
| 1,589 | 3.765625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
configure_docker_node() {
declare type="$1" address="$2" master=
shift 2
if [[ $# -gt 0 ]]; then
master="$1"
shift
fi
declare swarm_status="$(docker -H "$address" info -f '{{if eq .Swarm.NodeID ""}}needs-swarm{{else if eq .Swarm.ControlAvailable true}}control=true{{else}}control=false{{range .Swarm.RemoteManagers}} {{.Addr}}{{end}}{{end}}' 2>/dev/null)"
case "$type:$swarm_status" in
(master:needs-swarm)
docker -H "$address" swarm init
;;
(master:control=true)
;;
(manager:needs-swarm)
declare JOIN_TOKEN=$(docker -H "$master" swarm join-token manager -q)
docker -H "$address" swarm join --token "$JOIN_TOKEN" "$master"
;;
(manager:control=true)
;;
(slave:needs-swarm)
declare JOIN_TOKEN=$(docker -H "$master" swarm join-token worker -q)
docker -H "$address" swarm join --token "$JOIN_TOKEN" "$master"
;;
(slave:control=false\ *)
set -- $swarm_status
shift
# TODO: validate remote managers match
;;
(*)
exit 1
;;
esac
}
configure_docker_daemons() {
declare master_addresses=($(getent hosts docker-master | cut -f 1 -d ' '))
declare slave_addresses=($(getent hosts docker-slave | cut -f 1 -d ' '))
[[ ${#master_addresses[*]} -gt 0 ]] || return 0
configure_docker_node master "${master_addresses[0]}"
declare address
for address in "${master_addresses[@]:1}"; do
configure_docker_node manager "$address" "${master_addresses[0]}"
done
for address in "${slave_addresses[@]}"; do
configure_docker_node slave "$address" "${master_addresses[0]}"
done
}
configure_docker_daemons
| true |
2bae57fdd4f8b861b8449a14c589da39a96e93d0
|
Shell
|
ez3r0sec/massmgmt
|
/munki
|
UTF-8
| 968 | 3.3125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# munki (UNIX exec.)
# configure munki (if desired)
# Last Edited: 6/16/18
### VARIABLES
source /usr/local/massmgmt/lib/massmgmtlib.sh
scriptEnabled="$(cat $configFile | awk '/Munki/ {print $2}')"
munkiURL="$(cat $configFile | awk '/MunkiURL/ {print $2}')"
munkiClient="$(cat $configFile | awk '/MunkiClient/ {print $2}')"
### SCRIPT
if [ "$scriptEnabled" == "Disable" ] ; then
echo "$logStart Munki configuration is disabled" >> "$logFile"
exit
elif [ "$scriptEnabled" == "Enable" ] ; then
echo "$logStart Configuring Munki URL: $munkiURL" >> "$logFile"
defaults write /Library/Preferences/ManagedInstalls SoftwareRepoURL "$munkiURL"
echo "$logStart Configuring Munki Client: $munkiClient" >> "$logFile"
defaults write /Library/Preferences/ManagedInstalls ClientIdentifier "$munkiClient"
echo "$logStart Munki configuration complete" >> "$logFile"
else
echo "$logStart Munki configuration invalid" >> "$logFile"
fi
exit
| true |
2f1b4655d5e33baed8cc259f7362c16854ab2b2d
|
Shell
|
gigadeleo/keyboard_walk
|
/keyboard_walk.sh
|
UTF-8
| 5,113 | 3.890625 | 4 |
[] |
no_license
|
# !/bin/bash
#
# +-----------------------------------------------+
# | Keyboard_Walk.sh |
# +-----------------------------------------------+
# | A simple SSH host discovery and a |
# | pre-set password bruteforce tool. |
# +-----------------------------------------------+
#
# -------------------------------------------------
# COLOUR PRESETS
# -------------------------------------------------
GRN='\033[1;32m'
LRED='\033[1;31m'
YLW='\033[1;33m'
NC='\033[0m' #nocolor
#
# -------------------------------------------------
# HELP FILE / ARGUMENTS NOT FOUND: N00b ALERT
# -------------------------------------------------
#
if [[ $# -eq 0 ]] ; then
echo -e "${YLW}WARNING: The script expects 3 arguments:${NC}"
echo -e "----------------------------------------------"
echo -e " 1: [File] Name of the subnet file in the same directory;"
echo -e " 2: [File] Name of the username file in the same directory;"
echo -e " 3: [File] Name of the password file in the same directory;"
echo -e " 3: [Flag] 0 - to Force Re-scan of hosts; 1 - to use previously identified scan results."
echo -e ""
echo -e "Required Tools:"
echo -e "----------------------------------------------"
echo -e " nmap"
echo -e " medusa"
echo -e ""
echo -e "Usage Example:"
echo -e "----------------------------------------------"
echo -e " ./keyboard_walk subnet_file uname_file pass_file 0${NC}"
exit 0
fi
#
# -------------------------------------------------
# START PROCEDURE
# -------------------------------------------------
#
echo -e "${GRN}--[ Starting Keyboard Walk Script ]--${NC}"
#
# -------------------------------------------------
# SETTING ARGUMENTS
# -------------------------------------------------
#
subnet_file=$1
uname_file=$2
pword_file=$3
force_rescan=$4
DATE_FORM=`date +"%Y-%m-%d"` #date in the format we want YYYY-mm-dd
#
# -------------------------------------------------
# FIND PREREQUISITES
# -------------------------------------------------
#
# Check Subnet File
echo -e "${GRN}--[ Checking Prerequisites ]--${NC}"
if [ -f "$1" ]
then
echo "Required 'subnet' File: Found."
else
echo "Required 'subnet' File: Not Found. Aborting."
exit 1
fi
#
# Check Uname File
if [ -f "$2" ]
then
echo "Required 'uname' File: Found."
else
echo "Required 'uname' File: Not Found. Aborting."
exit 1
fi
#
# Check Password File
if [ -f "$3" ]
then
echo "Required 'password' File: Found."
else
echo "Required 'password' File: Not Found. Aborting."
exit 1
fi
#
# Check Re-Scan Flag
if [ -f livehost_list ] && [ $force_rescan -eq 0 ]
then
echo "Previous scan File: livehost_list Found & Force Rescan = ON. Deleting livehosts list."
rm livehost_list
elif [ -f livehost_list ] && [ $force_rescan -eq 1 ]
then
echo "Previous scan File: livehost_list Found & Force Rescan = OFF. Keeping livehosts list for next scan."
else
echo "No files from previous scan found."
fi
#
# -------------------------------------------------
# COUNT ALL
# -------------------------------------------------
#
# Count Subnets
echo -e "${LRED}Total Subnets Identified${NC}"
cat $1 | wc -l
#
# Count Uname
echo -e "${LRED}Usernames to Test per host${NC}"
cat $2 | wc -l
#
# Count Passwords
echo -e "${LRED}Passwords to Test per host${NC}"
cat $3 | wc -l
#
# -------------------------------------------------
# START KEYBOARDWALK AUDIT
# -------------------------------------------------
#
# Start MassScan
echo -e "${GRN}--[ Discovering Live Hosts (nmap) ]--${NC}"
#
# Check force_rescan flag
if [ $force_rescan -eq 0 ]
then
nmap -n -p 22 --max-retries 2 --open -iL $1 -oG - | awk '/Up$/{print $2}' >> livehost_list
else
echo -e "Using old scan results"
fi
#
# Count IPs
echo -e "${LRED}Live IPs Found${NC}"
cat livehost_list | wc -l
#
# Start Crackin'
echo -e "${GRN}--[ Running Keyboard Walk Exploit Tool (medusa) ]--${NC}"
medusa -U $2 -P $3 -H livehost_list -M ssh -T10 -e ns -O log/scan.log
#
# End Procedure
echo -e "${GRN}--[ Ending Keyboard Walk Assessment Script ]--${NC}"
#
# -------------------------------------------------
# DISPLAY RESULTS
# -------------------------------------------------
#
echo -e ""
echo -e "----------------------------------------------"
echo -e "${YLW}LAST SCAN RESULTS:${NC}"
echo -e "----------------------------------------------"
more +/$DATE_FORM log/scan.log
#
# -------------------------------------------------
# ALTERNATIVES / TESTING
# -------------------------------------------------
#
# MassScan to replace NMAP
# #masscan --rate 100 -c msconf -oG - | awk '/^Host:/ { print $2 }' >> livehost_list
# #nmap -n -p 22 --open -iL $1 -oG - | awk '/Up$/{print $2}' >> livehost_list
#
# Hydra / NCRACK to replace MEDUSA
# #hydra -V -l root -P $2 -e ns -t 5 -w 10 -f -M livehost_list ssh
# #hydra -t5 -l root -P $2 -M livehost_list ssh
# #ncrack -u root -P $2 -p 22 -iL livehost_list
#
# -------------------------------------------------
# [-END SCRIPT-]
# -------------------------------------------------
| true |
d7d8e0da2aaf4614a89db48ebe63efd380ddd917
|
Shell
|
sgwilbur/jbe-helpers
|
/engine_template.sh
|
UTF-8
| 515 | 2.59375 | 3 |
[] |
no_license
|
#!/bin/sh
ENGINE_ID=
REPOSITORY_URL=
JBE_USER=
JBE_PASSFILE=
JBE_WORKDIR=
export ENGINE_ID REPOSITORY_URL JBE_USER JBE_PASSFILE
export JBE_INSTALL=/opt/IBM/jazz/v3/TeamConcertBuild_3000
export JBE_HOME=${JBE_INSTALL}/buildsystem/buildengine/eclipse
export JBE_BUILDTOOLKIT=${JBE_INSTALL}/buildsystem/buildtoolkit
export JBE_INSTALL JBE_HOME JBE_BUILDTOOLKIT
cd ${JBE_WORKDIR}
${JBE_HOME}/jbe -repository ${REPOSITORY_URL} -userId ${JBE_USER} -passwordFile ${JBE_PASSFILE} -engineId ${ENGINE_ID}
| true |
29627bf568e5685685c32666c433b2fbccc9987c
|
Shell
|
automation-workshop/shell-kibana
|
/scripts/elasticsearch.sh
|
UTF-8
| 480 | 3.125 | 3 |
[] |
no_license
|
#!/bin/bash
DEB_URL="https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.4.3.deb"
if [ ! -x /usr/bin/java ]; then
apt-get install -qq -y openjdk-7-jre
fi
if [ ! -f /etc/init.d/elasticsearch ]; then
cd /tmp
curl -Os $DEB_URL
dpkg -i ${DEB_URL##*/}
sudo update-rc.d elasticsearch defaults 95 10
fi
cat > /etc/elasticsearch/elasticsearch.yml <<-EOF
node:
name: shell-kibana
path:
data: /esdata
EOF
sudo /etc/init.d/elasticsearch restart
| true |
8041155931e22ece197da2fa13b5cbf042cc8a8c
|
Shell
|
odeter/emacs
|
/.emacs.d/var/backup/!home!christopher!Documents!Work!Derant!derant_angle_app!dockers!fix_partition!fix_partition.sh~
|
UTF-8
| 817 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
# A simple script
# source venv/bin/activate
## get port and login from dokku
#export EXPORT=`dokku postgres:info angle_db --exposed-ports | sed 's/[0-9]*->//g' | sed 's/[[:space:]]//g'`
#export DATABASE_URL=`dokku postgres:info angle_db --dsn | sed "s/@[a-z -]*:['0-9']*/@localhost:$EXPORT/g"`
#var1=$(tac dash.log | grep -m1 INFO | sed -e 's/\(^.*|\ \)\(.*\)\(\ |.*$\)/\2/')
#python3 dash_check.py "$var1"
if [ "$1" == "--local" ];
then
source venv/bin/activate
python3 fix_partition.py
else
export BACKEND_PROC_LOG_DIR='logs/'
export EXPORT=`dokku postgres:info angle_db --exposed-ports | sed 's/[0-9]*->//g' | sed 's/[[:space:]]//g'`
export DATABASE_URL=`dokku postgres:info angle_db --dsn | sed "s/@[a-z -]*:['0-9']*/@localhost:$EXPORT/g"`
python3 fix_partition.py
fi
| true |
8f072e40dbcd18c0c18304ddd7380f991caafe90
|
Shell
|
Prakash878/Shell-Scripting
|
/shell-new
|
UTF-8
| 2,436 | 3.640625 | 4 |
[] |
no_license
|
#!/bin/bash
USERS=$@
if [ $# -gt 0 ]
then
for USERNAME in $USERS
do
EXUSER=$(cat /etc/passwd | grep -w $USERNAME | cut -d ":" -f1)
if [ "$USERNAME" = "$EXUSER" ]
then
echo "User Exists, please use a diffrent username."
else
SPEC=$(echo '!@#$%^&*()_' | fold -w1 | shuf | head -1)
PASSWORD=India@${RANDOM}${SPEC}
echo "Lets Create User $USERNAME"
useradd -m $USERNAME
echo "${USERNAME}:${PASSWORD}" | sudo chpasswd
passwd -e ${USERNAME}
echo "Username is ${USERNAME} Password is ${PASSWORD}"
fi
done
else
echo "Provide Some Input as you have given $# params"
fi
#=============================================================================
#!/bin/bash
while true
do
read -p "Please Enter The User Name:" USERNAME
if [ -z $USERNAME ]; then
echo "Please Enter A Valid Input."
else
EXUSER=$(cat /etc/passwd | grep -w $USERNAME | cut -d ":" -f 1)
SPEC=$(echo '!@#$%^&*()_' | fold -w1 | shuf | head -1)
PASSWORD=India@${RANDOM}${SPEC}
if [ "$USERNAME" = "$EXUSER" ]; then
echo "User $USERNAME exists. Try diffrent name."
else
echo "Lets Create User $USERNAME"
useradd -m ${USERNAME}
echo "${USERNAME}:${PASSWORD}" | sudo chpasswd
passwd -e ${USERNAME}
#Print the USername and Password.
echo "Username is ${USERNAME} Password is ${PASSWORD} "
fi
fi
done
#=========================================================================
root@ip-10-1-1-227:~# for x in $(cat /etc/passwd | grep -i devopsb14 | cut -d ":" -f 1)
do
userdel -r $x
sleep 1
done
===========================================================================
#!/bin/bash
USERS=$@
for USERNAME in $USERS
do
if [ -z $USERNAME ]; then
echo "Please Enter A Valid Input."
else
EXUSER=$(cat /etc/passwd | grep -w $USERNAME | cut -d ":" -f 1)
SPEC=$(echo '!@#$%^&*()_' | fold -w1 | shuf | head -1)
PASSWORD=India@${RANDOM}${SPEC}
if [ "$USERNAME" = "$EXUSER" ]; then
echo "User $USERNAME exists. Try diffrent name."
else
echo "Lets Create User $USERNAME"
useradd -m ${USERNAME}
echo "${USERNAME}:${PASSWORD}" | sudo chpasswd
passwd -e ${USERNAME}
#Print the USername and Password.
echo "Username is ${USERNAME} Password is ${PASSWORD} "
fi
fi
done
| true |
f71237bdc7296be7feb463aa710c60d638abea78
|
Shell
|
buildscript/i9300
|
/i9300_test.sh
|
UTF-8
| 1,377 | 3.25 | 3 |
[] |
no_license
|
#!/bin/bash
cd ~/du
# Turn off break on error to allow build process to run
set +e
############################################################
# BUILD ROM #
############################################################
# Turn on compiler caching
export USE_CCACHE=1
# Set the TESTBUILD build flag
export DU_BUILD_TYPE=TESTBUILD
# Set the prebuilt chromium flag
export USE_PREBUILT_CHROMIUM=1
# Start build process
cd ~/du && . build/envsetup.sh && time brunch i9300
############################################################
# COPY ROM TO FTP SERVER #
############################################################
set -e
echo "Do you want to upload it to i9300/Testing?"
read -p "(y/n)" -n 1
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
. ~/du/i9300_up_test.sh
fi
# Notify user of success
beep -f 784 -r 3 -l 100
sleep .1
beep -f 784 -l 600
beep -f 622 -l 600
beep -f 698 -l 600
beep -f 784 -l 200
sleep .2
beep -f 698 -l 200
beep -f 784 -l 800
echo 'Build completed successfully!'
############################################################
# CLOBBER #
############################################################
echo "Do you want to clobber $OUT?"
read -p "(y/n)" -n 1
echo
if [[ $REPLY =~ ^[Yy]$ ]]; then
. ~/du/clobber.sh
fi
| true |
4852c738ed3e809e22004c4f1ad9360b2c995a17
|
Shell
|
Kdecherf/user-config
|
/git/.local/bin/git-prco
|
UTF-8
| 292 | 3.28125 | 3 |
[] |
no_license
|
#!/bin/bash
if [ -z ${1} ]; then
echo "You must provide a PR number"
exit 1
fi
git rev-parse --show-toplevel 2>&1 >/dev/null
if [ $? -gt 0 ]; then
echo "not a git repository"
exit 2
fi
git fetch --force origin pull/${1}/head:remotes/pr/${1} &&
git switch -d remotes/pr/${1}
| true |
705fb4d21f4425c9e5a275ef84d1cf5a26c392bc
|
Shell
|
koraynilay/linux-custom-scripts
|
/rofmount
|
UTF-8
| 621 | 3.703125 | 4 |
[] |
no_license
|
#!/bin/sh
com() {
device=$(lsblk -o PATH,SIZE,FSTYPE,LABEL,MOUNTPOINT | rofi -dmenu | awk '{print $1}')
}
device=""
if [[ "$1" = "mount" || "$1" = "m" ]];then
com
echo -n $device
if [ $device ];then
out=$(gksudo mount $device >&1)
fi
elif [[ "$1" = "unmount" || "$1" = "umount" || "$1" = "u" ]];then
com
if [ $device ];then
out=$(gksudo umount $device >&1)
fi
else
dunstify -a rofmount "Usage: $0 [m|mount|u|unmount|umount]"
echo "Usage: $0 [m|mount|u|unmount|umount]"
exit 1
fi
if [ $device ];then
[[ -z $out ]] && output="" || output="Output: $out"
dunstify "$1 command exited with code $?.$output"
fi
| true |
eba9bba451bdf27db3b38a50d4bfb65086a4270e
|
Shell
|
MYOMAO/EICEMCAL
|
/EtaScanClus.sh
|
UTF-8
| 375 | 2.71875 | 3 |
[] |
no_license
|
#!/bin/sh
i=11
N=40
etainit=1.25
step=0.25
while [ $i -lt $N ]
do
eta=$(echo "${etainit} + ${step} * $i" | bc) ;
echo "eta now " $eta
root -b -l -q Fun4All_G4_EICDetector.C'('5000,1,${eta}')'
mv G4EICDetector.root_g4femc_eval.root GammaPosAna/G4EICDetector.root_g4femc_eval_${i}.root
mv ShowerInfo.root GammaPosAna/ShowerInfo_${i}.root
i=$(($i+1))
done
| true |
4ec6f2f7e4a7353968797d8f4d0de09fc5bf289b
|
Shell
|
ojarsbuddy/TinyBlogTutorial
|
/TinyBlogImages6/loadAllSolutions.sh
|
UTF-8
| 121 | 2.53125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
for i in $(seq 2 9);
do
echo "./loadSolutionForChapter.sh $i"
./loadSolutionForChapter.sh $i
done
| true |
14ade511eb59973442e7046e2681c69ecc6184ad
|
Shell
|
spoorthyb93/lambda_test
|
/test/simulations-ex3.sh
|
UTF-8
| 347 | 2.90625 | 3 |
[] |
no_license
|
# total runs to perform
#rm -f .myruns
totalEx3Runs=5
# API Gateway endpoint
apigateway=$1
echo "[simulations-ex3.sh] Running ex3.sh using API Gateway: " $apigateway
for (( i=1 ; i <= $totalEx3Runs; i++ ))
do
echo "[simulations-ex3.sh] Run #$i for ex3.sh -----------------------------------------------------------------------"
./ex3.sh
done
| true |
a3b74cd06d81840bcebb86f885c913c25e1b686b
|
Shell
|
martin-juul/bootstrap-macos
|
/home/.zshrc
|
UTF-8
| 3,129 | 2.890625 | 3 |
[] |
no_license
|
# =============================================================================
# Environment
# =============================================================================
source "${HOME}/.localrc"
export CLICOLOR=1
export BOOTSTRAP_DIR=/Volumes/Dev/personal/bootstrap-macos
# =============================================================================
# functions
# =============================================================================
# Sources file on first invocation
lazy_source () {
eval "$1 () { [ -f $2 ] && source $2 && $1 \$@ }"
}
# Colorful messages
e_header() { echo -e "\n\033[1m$@\033[0m"; }
e_success() { echo -e " \033[1;32m✔\033[0m $@"; }
e_error() { echo -e " \033[1;31m✖\033[0m $@"; }
ergo_add_proxy() {
local host="${1}"
e_header "Adding host ${host} to .ergo"
echo "ergoproxy ${host}" >> .ergo
}
# =============================================================================
# zgen
# =============================================================================
# load zgen
source "${HOME}/.zgen/zgen.zsh"
# if the init script doesn't exist
if ! zgen saved; then
e_header "Creating a zgen save"
zgen oh-my-zsh
# plugins
zgen oh-my-zsh plugins/git
zgen oh-my-zsh plugins/sudo
zgen oh-my-zsh plugins/rsync
zgen oh-my-zsh plugins/osx
zgen oh-my-zsh plugins/command-not-found
zgen oh-my-zsh plugins/colored-man-pages
zgen oh-my-zsh plugins/shrink-path
zgen oh-my-zsh plugins/ssh-agent
zgen load zsh-users/zsh-autosuggestions
zgen load zdharma/fast-syntax-highlighting
zgen load zsh-users/zsh-history-substring-search
zgen load zsh-users/zsh-completions src
zgen load supercrabtree/k
zgen load junegunn/fzf
zgen load StackExchange/blackbox
# theme
zgen load romkatv/powerlevel10k powerlevel10k
# generate the init script from plugins above
zgen save
fi
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ -f ~/.p10k.zsh ]] && source ~/.p10k.zsh
# =============================================================================
# zsh
# =============================================================================
# History
setopt append_history
setopt extended_history
setopt hist_expire_dups_first
setopt hist_ignore_all_dups
setopt hist_ignore_dups
setopt hist_ignore_space
setopt hist_reduce_blanks
setopt hist_save_no_dups
setopt hist_verify
setopt share_history
setopt auto_cd
HISTSIZE=100000
SAVEHIST=100000
HISTFILE=~/.zsh_history
export HISTIGNORE="ls:cd:cd -:pwd:exit:date:* --help"
source "${HOME}/.aliases"
# =============================================================================
# tools
# =============================================================================
eval "$(pyenv init -)"
eval "$(pipenv --completion)"
eval "$(rbenv init -)"
. /usr/local/opt/asdf/asdf.sh
. /usr/local/opt/asdf/etc/bash_completion.d/asdf.bash
test -e "${HOME}/.iterm2_shell_integration.zsh" && source "${HOME}/.iterm2_shell_integration.zsh"
| true |
2b1c42ba3735c3815158a22380df1cb5c3701c51
|
Shell
|
mlafeldt/gbp-cookbook
|
/templates/default/posttag-hook.erb
|
UTF-8
| 199 | 3.140625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# git-buildpackage post-tag hook
set -e
say() { echo "[posttag] $@"; }
tag=$GBP_TAG
branch=$GBP_BRANCH
sha1=$GBP_SHA1
say "Tag name: $tag"
say "Branch..: $branch"
say "SHA1....: $sha1"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.