blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
90b0549c46814f89a1609bcf441a4abbd92334fc
|
Shell
|
henesy/Plan9
|
/porting/nogodsnoheaders
|
UTF-8
| 274 | 3.328125 | 3 |
[
"Unlicense"
] |
permissive
|
#!/bin/rc
# Strip all `#include <*.h>` lines
# usage: nogodsnoheaders file …
rfork n
argv0 = $0
files = ()
fn usage {
echo >[1=2] 'usage:' $argv0 'file …'
exit 'usage'
}
if(~ $#* 0)
usage
files = $*
incregex = '#include[ ]<'
sedi '/'^$incregex^'/d' -- $files
| true |
bc526f64617defa856ed780fec64875a7b2407ec
|
Shell
|
yinhaoyun/scripts
|
/remove_dir.sh
|
UTF-8
| 253 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/sh
while true; do
read -p "Do you really want to delete the folder $1?" yn
case $yn in
[Yy]* ) break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
rsync -a --delete `mktemp -d`/ $1
rmdir $1
| true |
708cd5c8543a8a4df4fe74a681d3e73f5923b0d0
|
Shell
|
mateusrissi/shell-course
|
/mod01/variables.sh
|
UTF-8
| 378 | 3.03125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
echo 'test'
NAME="Mateus
Rissi"
echo "${NAME}"
N1=45
N2=24
TOTAL=$((N1+N2))
echo "${TOTAL}"
CAT_OUTPUT=$(cat /etc/passwd | grep mtr)
echo "${CAT_OUTPUT}"
echo "--------------------------"
echo "Parameter 1: ${1}"
echo "Parameter 2: ${2}"
echo "All parameters: ${*}"
echo "Parameter 0: ${0}"
echo "Parameters quantity: ${#}"
echo "PID: ${$}"
| true |
189710063507e03e589374aab56c6cb6a41f8c7c
|
Shell
|
swapnanildutta/Bashing-Life-PCC-CS592
|
/Day 4/case.sh
|
UTF-8
| 332 | 3.828125 | 4 |
[
"MIT"
] |
permissive
|
echo "1.Contents of the file\n2.Present working directory and files under current directory\n3. Current Date\n4. Calender of a particular Year\n5.Exit"
read x
case $x in
1)read -p "Enter filename: " f1
cat $f1
;;
2) pwd && ls;;
3) date +%m.%d.%Y;;
4) read -p "Enter year: " year
cal $year ;;
5) exit;;
*) echo "Wrong input"
esac
| true |
22c82158ab91e9e7be19e9bdc5577a8edef08889
|
Shell
|
klpanagi/Thesis
|
/jetson-tk1/theano/build_scipy.sh
|
UTF-8
| 310 | 2.65625 | 3 |
[] |
no_license
|
#!/bin/bash -ie
function _build_scipy_with_openblas() {
# Deps
pip install cython tempita
cd ${HOME}
git clone git://github.com/scipy/scipy.git scipy
cd scipy
BLAS=/opt/OpenBLAS/lib/libopenblas.so.0 LAPACK=/opt/OpenBLAS/lib/libopenblas.so.0 python setup.py install
}
_build_scipy_with_openblas
| true |
442497f9ed8707d1ced2d840d1466b84e80e303e
|
Shell
|
projetotestbed/Testbed
|
/TBControl/files/tbcontrol
|
UTF-8
| 1,236 | 3.765625 | 4 |
[] |
no_license
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: tbcontrol
# Required-Start: postgres
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Stop/Start/Restart TBControl on System shutdown
### END INIT INFO
TBCUSER=projetotestbed
TBCPATH=/home/projetotestbed/Testbed/TBControl
TBCPROG=TBControl.jar
SU="sudo -H -u $TBCUSER"
DATABASE=portal2
DBUSER=postgres
PGPWD=postgres
SQL_COMMAND="select distinct(netid) from networks;"
TARGETS=$($SU PGPASSWORD=$PGPWD psql -d $DATABASE -h localhost -U $DBUSER -t -c "$SQL_COMMAND")
START_COMMAND="java -jar $TBCPROG"
START_TIME=`date '+%Y%d%m-%H%M%S'`
REMOVE_DAYS=30
do_stop() {
pkill -f "java -jar TBControl.jar"
}
do_start(){
find $TBCPATH/logs/tbc_*-*.log -mtime +$REMOVE_DAYS -exec rm {} \;
for netId in $TARGETS; do
mv $TBCPATH/logs/tbc_$netId.log $TBCPATH/logs/tbc_$netId-$START_TIME.log 2>/dev/null
cd $TBCPATH
echo "Starting TBControl netId=$netId" && $SU $START_COMMAND $netId >> $TBCPATH/logs/tbc_$netId.log & > /dev/null
done
}
case $1 in
stop)
do_stop
;;
start)
do_start
;;
restart)
echo "* Restarting TBControl"
do_stop
sleep 5
do_start
;;
*)
echo "Usage: /etc/init.d/tbcontrol start | stop | restart"; exit 1
;;
esac
exit 0
| true |
49cece8100a5f0ea8641eaf22d63f6ef52d76bea
|
Shell
|
12yanogden/pepr
|
/modules/output/div.sh
|
UTF-8
| 7,529 | 4.21875 | 4 |
[] |
no_license
|
#!/bin/bash
# Builds a customized divider
#
# +--------------------- Example Divider ---------------------+
include "commands"
include "print"
div() {
# Exits if printPermission is set to quiet
if [ $printPermission -eq 0 ]; then
return 0
fi
# Default values of arguments
local height=1
local width=89
local upperPadding=0
local outerPaddingLeft=0
local outerEdgeLeft=''
local leftArm='-'
local leftArmLength=0
local innerEdgeLeft=''
local innerPaddingLeft=0
local text=''
local justify="center"
local innerPaddingRight=0
local innerEdgeRight=''
local rightArmLength=0
local rightArm='-'
local outerEdgeRight=''
local outerPaddingRight=0
local lowerPadding=0
local newLine=1
local contentWidth=0
# Loops through arguments and processes them
for arg in "$@"
do
case $arg in
-op|--outerPadding)
outerPaddingLeft=$2
outerPaddingRight=$2
shift # Removes flag from processing
shift # Removes value from processing
;;
-opl|--outerPaddingLeft)
outerPaddingLeft=$2
shift
shift
;;
-opr|--outerPaddingRight)
outerPaddingRight=$2
shift
shift
;;
-oe|--outerEdges)
outerEdgeLeft="$2"
outerEdgeRight="$2"
shift
shift
;;
-oel|--outerEdgeLeft)
outerEdgeLeft="$2"
shift
shift
;;
-oer|--outerEdgeRight)
outerEdgeRight="$2"
shift
shift
;;
-a|--arms)
leftArm="$2"
rightArm="$2"
shift
shift
;;
-la|--leftArm)
leftArm="$2"
shift
shift
;;
-ra|--rightArm)
rightArm="$2"
shift
shift
;;
-ie|--innerEdge)
innerEdgeLeft="$2"
innerEdgeRight="$2"
shift
shift
;;
-iel|--innerEdgeLeft)
innerEdgeLeft="$2"
shift
shift
;;
-ier|--innerEdgeRight)
innerEdgeRight="$2"
shift
shift
;;
-ip|--innerPadding)
innerPaddingLeft=$2
innerPaddingRight=$2
shift
shift
;;
-ipl|--innerPaddingLeft)
innerPaddingLeft=$2
shift
shift
;;
-ipr|--innerPaddingRight)
innerPaddingRight=$2
shift
shift
;;
-j|--justify)
justify=$2
shift
shift
;;
-vp|--verticalPadding)
upperPadding=$2
lowerPadding=$2
shift
shift
;;
-up|--upperPadding)
upperPadding=$2
shift
shift
;;
-lp|--lowerPadding)
lowerPadding=$2
shift
shift
;;
-w|--width)
width=$2
shift
shift
;;
-h|--height)
height=$2
shift
shift
;;
-n|--noNewLine)
newLine=0
shift
;;
-d|--debug)
printPermission=4
shift
;;
*)
while [[ $1 != -* && ! -z "$1" ]]
do
text+="$1 "
shift
done
text=$(echo $text | sed 's/ *$//g')
;;
esac
done
# If justify is not center, eliminates corresponding outerEdge
case $justify in
l|left)
outerEdgeLeft=''
;;
r|right)
outerEdgeRight=''
;;
esac
# Calculates inner padding length by text length and justification
innerPaddingLeft=$(div_calcInnerPadding -l)
innerPaddingRight=$(div_calcInnerPadding -r)
# Reconciles width with width of contents
contentWidth=$(($outerPaddingLeft + ${#outerEdgeLeft} + ${#innerEdgeLeft} + $innerPaddingLeft + ${#text} + $innerPaddingRight + ${#innerEdgeLeft} + ${#outerEdgeRight} + $outerPaddingRight))
if [ "$contentWidth" -gt "$width" ]; then
width=$contentWidth
fi
# Calculates final arm length
if [ "$contentWidth" -lt "$width" ]; then
local armLengths=($(calcArmLengths))
leftArmLength=${armLengths[0]}
rightArmLength=${armLengths[1]}
fi
# Calculates vertical padding
verticalPaddings=($(div_calcVerticalPadding))
upperPadding=$(($upperPadding + ${verticalPaddings[0]}))
lowerPadding=$(($lowerPadding + ${verticalPaddings[1]}))
div_debug "Pre-print"
# Prints the divider
div_print
}
div_calcInnerPadding() {
local lr=$1
local innerPadding=0
if [ "${#text}" != "0" ]; then
case $lr in
-l)
case $justify in
c|m|center|middle)
innerPadding=1
;;
r|right)
innerPadding=1
;;
esac
;;
-r)
case $justify in
c|m|center|middle)
innerPadding=1
;;
l|left)
innerPadding=1
;;
esac
;;
esac
fi
echo $innerPadding
}
calcArmLengths() {
local lengthOfArms=$(($width - $contentWidth))
case $justify in
l|left)
rightArmLength=$lengthOfArms
;;
c|m|center|middle)
local lesserHalf=$(divide lengthOfArms)
local greaterHalf=$(divide -r lengthOfArms)
leftArmLength=$(($leftArmLength + $lesserHalf))
rightArmLength=$(($rightArmLength + $greaterHalf))
;;
r|right)
leftArmLength=$lengthOfArms
;;
esac
echo $leftArmLength $rightArmLength
}
div_calcVerticalPadding() {
local contentHeight=$(($upperPadding + $lowerPadding + 1))
local upperPadding=0
local lowerPadding=0
if [ "$contentHeight" -lt "$height" ]; then
local difference=$(($height - $contentHeight))
upperPadding=$(divide difference)
lowerPadding=$(divide -r difference)
fi
echo $upperPadding $lowerPadding
}
div_calcArmRepCount() {
local armRepCount=$1
local characterCount=$2
if [ "$characterCount" -gt 1 ]; then
armRepCount=$(($armRepCount / $characterCount))
fi
echo $armRepCount
}
# Prints the divider
div_print() {
local out=''
out+=$(skip $upperPadding)
out+=$(space $outerPaddingLeft)
out+=$(echo -n "${outerEdgeLeft}")
if [ "$leftArm" = " " ]; then
out+=$(space $leftArmLength)
else
out+=$(repeat "$leftArm" $(div_calcArmRepCount $leftArmLength ${#leftArm}))
fi
out+=$(echo -n "$innerEdgeLeft")
out+=$(space $innerPaddingLeft)
out+=$(echo -n "$text")
out+=$(space $innerPaddingRight)
out+=$(echo -n "$innerEdgeRight")
if [ "$rightArm" = " " ]; then
out+=$(space $rightArmLength)
else
out+=$(repeat "$rightArm" $(div_calcArmRepCount $rightArmLength ${#rightArm}))
fi
out+=$(echo -n "$outerEdgeRight")
out+=$(space $outerPaddingRight)
out+=$(skip $lowerPadding)
if [ "$newLine" = "1" ]; then
print3 "$out"
else
print3 -n "$out"
fi
}
div_debug() {
local msg="$1"
if [ ! -z "$msg" ]; then
print4 "$msg"
fi
print4 "height: $height"
print4 "width: $width"
print4 "upperPadding: $upperPadding"
print4 "outerPaddingLeft: $outerPaddingLeft"
print4 "outerEdgeLeft: ${outerEdgeLeft}"
print4 "leftArm: $leftArm"
print4 "leftArmLength: $leftArmLength"
print4 "innerEdgeLeft: $innerEdgeLeft"
print4 "innerPaddingLeft: $innerPaddingLeft"
print4 "text: $text"
print4 "justify: $justify"
print4 "innerPaddingRight: $innerPaddingRight"
print4 "innerEdgeRight: $innerEdgeRight"
print4 "rightArmLength: $rightArmLength"
print4 "rightArm: $rightArm"
print4 "outerEdgeRight: $outerEdgeRight"
print4 "outerPaddingRight: $outerPaddingRight"
print4 "lowerPadding: $lowerPadding"
print4 "newLine: $newLine"
print4 ""
}
| true |
dce1dbb0966da9a7f696ef6f4f72970d99fabd70
|
Shell
|
yuyang158/Unity-Extend
|
/moon_build/luac/build_unix/build_lua.sh
|
UTF-8
| 820 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
cd $(dirname "$0") || exit 1
SHELL_FOLDER=$(pwd)
ROOT_PATH=$1
ROOT_NAME=Lua
if [ ! -d $ROOT_PATH ]
then
echo "build_lua.sh: Input should be a directory @"$ROOT_PATH
exit 1
fi
ROOT_NAME=$(basename "$ROOT_PATH")
if [ -d $ROOT_NAME ]
then
rm -rf ./$ROOT_NAME
fi
cp -rf $ROOT_PATH .
echo "start copy lua"
find $ROOT_NAME -name ".svn" | xargs rm -rf
function travers_files()
{
#1st param, the dir name
for file in `ls $1`;
do
if [ -d "$1/$file" ]; then
travers_files "$1/$file"
else
if [ ${file##*.} == lua ]
then
#echo "$1/$file.bytes"
./luac.a -o "$1/$file.bytes" "$1/$file"
fi
fi
done
}
echo "start build lua"
travers_files $ROOT_NAME
echo "start delete origin lua"
find $ROOT_NAME -name "*.lua" | xargs rm -f
echo "lua build all done"
exit 0
| true |
1eff6530b6d9d67bf720a6ee532a2a81f6384777
|
Shell
|
neingeist/dirty-helpers
|
/crontab-suggest
|
UTF-8
| 278 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
# suggest a random time for a cronjob
set -u
set -e
hour=$((($RANDOM*24)/32768))
minute=$((($RANDOM*60)/32768))
dow="*"
if [ $# -gt 0 ]; then
if [ "$1" == "-w" ]; then
dow=$((($RANDOM*7)/32768))
fi
fi
printf "%02i %02i * * %s\n" "$minute" "$hour" "$dow"
| true |
812652ed709187c4d7f37377559fa1235d6495eb
|
Shell
|
kganser/simpl.js
|
/docker/launch.sh
|
UTF-8
| 334 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/sh
cleanup() {
kill -TERM $nwjs
wait $nwjs
kill -TERM $xvfb
rm -f /usr/var/simpljs/SingletonLock
}
trap cleanup TERM INT
Xvfb :99 -ac -screen 0 1280x720x16 -nolisten tcp &
xvfb=$!
export DISPLAY=:99
/usr/lib/simpljs/nw $@ --disable-gpu --user-data-dir=/usr/var/simpljs --port=8000 &
nwjs=$!
wait $nwjs
wait $xvfb
| true |
ab4d14afabd444995f121a6aa211d2be3d62991a
|
Shell
|
sunh20/ECoG_data_curation
|
/ecog_processing_pipeline/runPreprocMain_1.sh
|
UTF-8
| 742 | 2.53125 | 3 |
[] |
no_license
|
#!/bin/sh
subjects='q873748d abdb496b' #a0f66459 ab2431d9 b4ac1726 #c95c1e82 d6532718 ec374ad0 ffb52f92 aa97abcd c7980193 cb46fd46 e5bad52f fcb01f7a
for subject in $subjects
do
mkdir /data1/ecog_project/derived/processed_ecog/$subject/
mkdir /data1/ecog_project/derived/processed_ecog/$subject/preproc_hospital_ecog/
mkdir /data1/ecog_project/derived/processed_ecog/$subject/chan_preproc_plots/
#cd /data1/ecog_project/edf/$subject/
cd /nas/ecog_project/edf/$subject/
find . -name '*.edf' -exec python /home/stepeter/Documents/ECoG_Preprocessing/Steve_preprocPipeline.py -s $subject -d '{}' -lp /nas/ecog_project/edf/$subject/ -sp /data1/ecog_project/derived/processed_ecog/$subject/preproc_hospital_ecog/ -af False \;
done
| true |
d68296e1ef20ad6babf5d0eb2cf6e98733ce9091
|
Shell
|
hi-cli/hi-cli
|
/bin/usage
|
UTF-8
| 1,211 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/bash
###############################################################################
# Project: hi-cli
# Description: The cross platform development toolkit - hi-cli
# Author: John Deng (john.deng@outlook.com)
#
# Copyright (c) 2014-2017 John Deng (john.deng@outlook.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: John Deng (john.deng@outlook.com)
# Updated: 2017-02-11
###############################################################################
source "${HI_CLI_HOME}/bin/colors"
function show_hi_usage() {
${ECHO_E} "${GRAY}Usages:
${hi_app} [man] [do] [something] with [arguments]
Examples:
${hi_app} install cicd
${hi_app} update cicd
${hi_app} cicd deploy new build
For more information, run:
hi help${ENDCOLOR}"
}
| true |
4c6049dff4b74912fac12f7c624461e3c028fd68
|
Shell
|
wittawatj/configs
|
/script/start_multicore_slaves.sh
|
UTF-8
| 596 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
# Launch multiple screen commands, each containing one Matlab running startmulticoreslave preparing to be a slave in multicore package
num=$1
cdpath="/nfs/nhome/live/wittawat/SHARE/gatsby/research/code"
#matlab_path="/opt/matlab-R2012b/bin/matlab"
matlab_path="/opt/matlab-R2014a/bin/matlab"
for i in $(seq 1 $num)
do
echo "starting a multicore Matlab slave on screen #${i}"
screen -dmS "matlab_slave${i}" ${matlab_path} -nodesktop -nosplash -r "cd ${cdpath}; startup; multicoreSlaveLoop(); "
done
screen -list
echo "Use kill_multicore_slaves.sh too kill all multicore slaves."
| true |
a6061c93b79b32bdc78e00241e6e69b46807bb0d
|
Shell
|
csmunuku/myscripts
|
/bash/commandOnRemBoxes.sh
|
UTF-8
| 1,485 | 4 | 4 |
[] |
no_license
|
#!/bin/bash
###################################################################################################
# SCRIPT NAME: commandOnRemBoxes.sh
# DESCRIPTION: Used to execute command(s) on remote boxes
# AUTHOR: csmunuku@gmail.com
###################################################################################################
USER_NAME="myusername"
START_TIME="`date`"
if [ $# -eq 0 ]; then
echo "Please provide FQDN of boxes as arguments to this script "
exit 1
fi
echo "Please specify the Command you would like to run:"
read commandToRun
if [ -z "$commandToRun" ]; then
echo "Command String that you entered currently is \"$commandToRun\" - Empty.."
echo "Exiting now.."
exit 1
fi
if [ $# -eq 1 -a -f ${1} ]; then
for i in `cat ${1} |grep -v "^#"`
do
echo -n "$i - "
ssh -xtq $i "sHost=\`hostname -s\`; if [ -f ~${USER_NAME}/.bash_profile ]; then . ~${USER_NAME}/.bash_profile; ${commandToRun}; else ${commandToRun}; fi"
# echo "#######################################################################"
done
else
for i in $*
do
echo "On $i ..."
ssh -xtq $i "sHost=\`hostname -s\`; if [ -f ~${USER_NAME}/.bash_profile ]; then . ~${USER_NAME}/.bash_profile; ${commandToRun}; else ${commandToRun}; fi"
echo "#######################################################################"
done
fi
END_TIME="`date`"
echo "#################"
echo "#################"
echo "Start Time is - $START_TIME"
echo "End Time is - $END_TIME"
| true |
569f94ae8e10219717841d026d441cf88ac790c1
|
Shell
|
surskitt/PKGBUILD
|
/yadm/PKGBUILD
|
UTF-8
| 1,139 | 2.625 | 3 |
[] |
no_license
|
# Maintainer: Stefan Tatschner <stefan@rumpelsepp.org>
# Contributor: Franek Madej <franek.madej@gmail.com>
# Completion changes and optdepends by Artemis
pkgname=yadm
pkgver=2.5.0
pkgrel=1
pkgdesc="Yet Another Dotfiles Manager"
arch=('any')
url="https://github.com/TheLocehiliosan/yadm"
license=('GPL3')
depends=('git' 'awk')
optdepends=('gnupg: encrypt/decrypt sensitive files'
'python-j2cli: use jinja2 templates'
'python-envtpl: use templates with variables')
provides=('yadm')
conflicts=('yadm-git')
source=("$pkgname-$pkgver.tar.gz::https://github.com/TheLocehiliosan/${pkgname}/archive/${pkgver}.tar.gz")
sha256sums=('d022d118a1a477e09afa00c80e10bd94b150d99709e57f01ba48ca2eaaeeb2de')
package() {
cd $srcdir/$pkgname-$pkgver
install -D -m 755 yadm $pkgdir/usr/bin/yadm
install -D -m 644 yadm.1 $pkgdir/usr/share/man/man1/yadm.1
install -D -m 644 completion/yadm.bash_completion "${pkgdir}/usr/share/bash-completion/completions/yadm"
install -D -m 644 completion/yadm.zsh_completion "${pkgdir}/usr/share/zsh/site-functions/_yadm"
install -D -m 644 completion/yadm.fish_completion "${pkgdir}/usr/share/fish/vendor_completions.d/yadm.fish"
}
| true |
0122771710c1bb26cb64bbab3bd7987d017222eb
|
Shell
|
spdk/spdk
|
/test/vhost/fiotest/fio.sh
|
UTF-8
| 8,761 | 3.484375 | 3 |
[
"Intel",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (C) 2017 Intel Corporation
# All rights reserved.
#
testdir=$(readlink -f $(dirname $0))
rootdir=$(readlink -f $testdir/../../..)
source $rootdir/test/common/autotest_common.sh
source $rootdir/test/vhost/common.sh
dry_run=false
no_shutdown=false
fio_bin=""
remote_fio_bin=""
fio_jobs=""
test_type=spdk_vhost_scsi
reuse_vms=false
vms=()
used_vms=""
x=""
readonly=""
packed=false
function usage() {
[[ -n $2 ]] && (
echo "$2"
echo ""
)
echo "Shortcut script for doing automated test"
echo "Usage: $(basename $1) [OPTIONS]"
echo
echo "-h, --help print help and exit"
echo " --test-type=TYPE Perform specified test:"
echo " virtio - test host virtio-scsi-pci using file as disk image"
echo " kernel_vhost - use kernel driver vhost-scsi"
echo " spdk_vhost_scsi - use spdk vhost scsi"
echo " spdk_vhost_blk - use spdk vhost block"
echo "-x set -x for script debug"
echo " --fio-bin=FIO Use specific fio binary (will be uploaded to VM)"
echo " --fio-job= Fio config to use for test."
echo " All VMs will run the same fio job when FIO executes."
echo " (no unique jobs for specific VMs)"
echo " --dry-run Don't perform any tests, run only and wait for enter to terminate"
echo " --no-shutdown Don't shutdown at the end but leave environment working"
echo " --vm=NUM[,OS][,DISKS] VM configuration. This parameter might be used more than once:"
echo " NUM - VM number (mandatory)"
echo " OS - VM os disk path (optional)"
echo " DISKS - VM os test disks/devices path (virtio - optional, kernel_vhost - mandatory)"
echo " --readonly Use readonly for fio"
echo " --packed Virtqueue format is packed"
exit 0
}
#default raw file is NVMe drive
while getopts 'xh-:' optchar; do
case "$optchar" in
-)
case "$OPTARG" in
help) usage $0 ;;
fio-bin=*) fio_bin="--fio-bin=${OPTARG#*=}" ;;
fio-job=*) fio_job="${OPTARG#*=}" ;;
dry-run) dry_run=true ;;
no-shutdown) no_shutdown=true ;;
test-type=*) test_type="${OPTARG#*=}" ;;
vm=*) vms+=("${OPTARG#*=}") ;;
readonly) readonly="--readonly" ;;
packed) packed=true ;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
;;
h) usage $0 ;;
x)
set -x
x="-x"
;;
*) usage $0 "Invalid argument '$OPTARG'" ;;
esac
done
shift $((OPTIND - 1))
if [[ ! -r "$fio_job" ]]; then
fail "no fio job file specified"
fi
vhosttestinit
trap 'error_exit "${FUNCNAME}" "${LINENO}"' ERR
vm_kill_all
if [[ $test_type =~ "spdk_vhost" ]]; then
notice "==============="
notice ""
notice "running SPDK"
notice ""
vhost_run -n 0
rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
$rpc_py bdev_split_create Nvme0n1 4
$rpc_py bdev_malloc_create -b Malloc0 128 4096
$rpc_py bdev_malloc_create -b Malloc1 128 4096
$rpc_py bdev_malloc_create -b Malloc2 64 512
$rpc_py bdev_malloc_create -b Malloc3 64 512
$rpc_py bdev_malloc_create -b Malloc4 64 512
$rpc_py bdev_malloc_create -b Malloc5 64 512
$rpc_py bdev_malloc_create -b Malloc6 64 512
$rpc_py bdev_raid_create -n RaidBdev0 -z 128 -r 0 -b "Malloc2 Malloc3"
$rpc_py bdev_raid_create -n RaidBdev1 -z 128 -r 0 -b "Nvme0n1p2 Malloc4"
$rpc_py bdev_raid_create -n RaidBdev2 -z 128 -r 0 -b "Malloc5 Malloc6"
$rpc_py vhost_create_scsi_controller --cpumask 0x1 vhost.0
$rpc_py vhost_scsi_controller_add_target vhost.0 0 Malloc0
$rpc_py vhost_create_blk_controller --cpumask 0x1 -r vhost.1 Malloc1
notice ""
fi
notice "==============="
notice ""
notice "Setting up VM"
notice ""
rpc_py="$rootdir/scripts/rpc.py -s $(get_vhost_dir 0)/rpc.sock"
for vm_conf in "${vms[@]}"; do
IFS=',' read -ra conf <<< "$vm_conf"
if [[ -z ${conf[0]} ]] || ! assert_number ${conf[0]}; then
fail "invalid VM configuration syntax $vm_conf"
fi
# Sanity check if VM is not defined twice
for vm_num in $used_vms; do
if [[ $vm_num -eq ${conf[0]} ]]; then
fail "VM$vm_num defined more than twice ( $(printf "'%s' " "${vms[@]}"))!"
fi
done
used_vms+=" ${conf[0]}"
if [[ $test_type =~ "spdk_vhost" ]]; then
notice "Adding device via RPC ..."
while IFS=':' read -ra disks; do
for disk in "${disks[@]}"; do
notice "Create a lvol store on RaidBdev2 and then a lvol bdev on the lvol store"
if [[ $disk == "RaidBdev2" ]]; then
ls_guid=$($rpc_py bdev_lvol_create_lvstore RaidBdev2 lvs_0 -c 4194304)
free_mb=$(get_lvs_free_mb "$ls_guid")
based_disk=$($rpc_py bdev_lvol_create -u $ls_guid lbd_0 $free_mb)
else
based_disk="$disk"
fi
if [[ "$test_type" == "spdk_vhost_blk" ]]; then
disk=${disk%%_*}
notice "Creating vhost block controller naa.$disk.${conf[0]} with device $disk"
$rpc_py vhost_create_blk_controller naa.$disk.${conf[0]} $based_disk
else
notice "Creating controller naa.$disk.${conf[0]}"
$rpc_py vhost_create_scsi_controller naa.$disk.${conf[0]}
notice "Adding device (0) to naa.$disk.${conf[0]}"
$rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
fi
done
done <<< "${conf[2]}"
unset IFS
$rpc_py vhost_get_controllers
fi
setup_cmd="vm_setup --force=${conf[0]} --disk-type=$test_type"
[[ x"${conf[1]}" != x"" ]] && setup_cmd+=" --os=${conf[1]}"
[[ x"${conf[2]}" != x"" ]] && setup_cmd+=" --disks=${conf[2]}"
if [[ "$test_type" == "spdk_vhost_blk" ]] && $packed; then
setup_cmd+=" --packed"
fi
$setup_cmd
done
# Run everything
vm_run $used_vms
vm_wait_for_boot 300 $used_vms
if [[ $test_type == "spdk_vhost_scsi" ]]; then
for vm_conf in "${vms[@]}"; do
IFS=',' read -ra conf <<< "$vm_conf"
while IFS=':' read -ra disks; do
for disk in "${disks[@]}"; do
# For RaidBdev2, the lvol bdev on RaidBdev2 is being used.
if [[ $disk == "RaidBdev2" ]]; then
based_disk="lvs_0/lbd_0"
else
based_disk="$disk"
fi
notice "Hotdetach test. Trying to remove existing device from a controller naa.$disk.${conf[0]}"
$rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
sleep 0.1
notice "Hotattach test. Re-adding device 0 to naa.$disk.${conf[0]}"
$rpc_py vhost_scsi_controller_add_target naa.$disk.${conf[0]} 0 $based_disk
done
done <<< "${conf[2]}"
unset IFS
done
fi
sleep 0.1
notice "==============="
notice ""
notice "Testing..."
notice "Running fio jobs ..."
# Check if all VM have disk in tha same location
DISK=""
fio_disks=""
for vm_num in $used_vms; do
qemu_mask_param="VM_${vm_num}_qemu_mask"
host_name="VM-$vm_num"
notice "Setting up hostname: $host_name"
vm_exec $vm_num "hostname $host_name"
vm_start_fio_server $fio_bin $readonly $vm_num
if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
vm_check_scsi_location $vm_num
#vm_reset_scsi_devices $vm_num $SCSI_DISK
elif [[ "$test_type" == "spdk_vhost_blk" ]]; then
vm_check_blk_location $vm_num
fi
fio_disks+=" --vm=${vm_num}$(printf ':/dev/%s' $SCSI_DISK)"
done
if $dry_run; then
read -r -p "Enter to kill everything" xx
sleep 3
at_app_exit
exit 0
fi
run_fio $fio_bin --job-file="$fio_job" --out="$VHOST_DIR/fio_results" $fio_disks
if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
for vm_num in $used_vms; do
vm_reset_scsi_devices $vm_num $SCSI_DISK
done
fi
if ! $no_shutdown; then
notice "==============="
notice "APP EXITING"
notice "killing all VMs"
vm_shutdown_all
notice "waiting 2 seconds to let all VMs die"
sleep 2
if [[ $test_type =~ "spdk_vhost" ]]; then
notice "Removing vhost devices & controllers via RPC ..."
for vm_conf in "${vms[@]}"; do
IFS=',' read -ra conf <<< "$vm_conf"
while IFS=':' read -ra disks; do
for disk in "${disks[@]}"; do
disk=${disk%%_*}
notice "Removing all vhost devices from controller naa.$disk.${conf[0]}"
if [[ "$test_type" == "spdk_vhost_scsi" ]]; then
$rpc_py vhost_scsi_controller_remove_target naa.$disk.${conf[0]} 0
fi
$rpc_py vhost_delete_controller naa.$disk.${conf[0]}
if [[ $disk == "RaidBdev2" ]]; then
notice "Removing lvol bdev and lvol store"
$rpc_py bdev_lvol_delete lvs_0/lbd_0
$rpc_py bdev_lvol_delete_lvstore -l lvs_0
fi
done
done <<< "${conf[2]}"
done
fi
notice "Testing done -> shutting down"
notice "killing vhost app"
vhost_kill 0
notice "EXIT DONE"
notice "==============="
else
notice "==============="
notice ""
notice "Leaving environment working!"
notice ""
notice "==============="
fi
vhosttestfini
| true |
73072f8cb8af328e9fe556ce61e3440542c5135f
|
Shell
|
magma/magma
|
/experimental/cloudstrapper/playbooks/roles/build-platform/files/helm-publish.bash
|
UTF-8
| 494 | 2.765625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#arg1: gitusername arg2: gitpassword arg3: git publish branch
gitBranchCheckout=a7580153
gitPublishBranch=$3
gitUserName=$1
gitPat=$2
dirMagmaRoot=$4
dirHelmChart=~/magma-charts
cd $dirMagmaRoot
git checkout $gitBranchCheckout
cd $dirHelmChart
git init
helm package $dirMagmaRoot/orc8r/cloud/helm/orc8r/ && helm repo index .
git add . && git commit -m "Initial Commit"
git remote add origin https://$gitUserName:$gitPat@github.com/$gitUserName/$gitPublishBranch && git push -u origin master
| true |
8700e47b7ebd37999de37ad117b775f752273f15
|
Shell
|
marshall-lee/dotfiles
|
/zsh/init/golang.zsh
|
UTF-8
| 168 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
function my_init_golang() {
(( ! ${+commands[go]} )) && return
[[ -z $GOBIN ]] && export GOBIN=$HOME/.local/bin
my_link_zsh_completion golang
}
my_init_golang
| true |
5f0d1482049119e94297e5ef9f3c1e22ac2b4f70
|
Shell
|
ChunchunIsMe/learningShell
|
/printf.sh
|
UTF-8
| 401 | 2.9375 | 3 |
[] |
no_license
|
#!/bin/bash
# printf 是另一个输出命令
# 语法:
# printf format-string [arguments...]
# format-string: 为格式控制字符串
# arguments: 参数列表
echo "hello shell";
printf "hello shell\n";
printf "%-10s %-8s %4s\n" 姓名 性别 体重KG
printf "%-10s %-8s %4.2f\n" 郭靖 男 66.1234
printf "%-10s %-8s %4.2f\n" 杨过 男 66.1234
printf "%-10s %-8s %4.2f\n" 黄蓉 女 47.1234
| true |
3071f00472834dc9e90dc09c1d2a8073e41f2fb3
|
Shell
|
monotonemonk/arch_svntogit_community-
|
/idesk/trunk/PKGBUILD
|
UTF-8
| 1,137 | 2.609375 | 3 |
[] |
no_license
|
# $Id$
# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
# Contributor: Claudio Sabattoli <gasherbrum3@alice.it>
pkgname=idesk
pkgver=0.7.5
pkgrel=7
pkgdesc="gives users of minimal wm's (fluxbox, blackbox, openbox, windowsmaker...) icons on their desktop"
arch=("i686" "x86_64")
url="http://idesk.sourceforge.net/wiki/index.php"
license=('GPL')
depends=('pkgconfig' 'imlib2' 'libpng' 'libxpm' 'libxft' 'gcc-libs')
source=("http://downloads.sourceforge.net/sourceforge/idesk/$pkgname-$pkgver.tar.bz2")
md5sums=('beb48c97815c7b085e3b3d601297fbb8')
prepare() {
cd "$srcdir"/$pkgname-$pkgver
sed -i \
-e '1,1i#include <unistd.h>' \
-e '1,1i#include <sys/stat.h>' \
-e '1,1i#include <sys/types.h>' \
src/DesktopConfig.cpp
sed -i 's#usr/local#usr#' examples/default.lnk
sed -i 's#IMLIB2_LIBS=.*#IMLIB2_LIBS=-lImlib2#g' configure
sed -i 's#IMLIB_LIBS=.*#IMLIB_LIBS="-L/usr/lib -Wl,-O1,--sort-common,--as-needed,-z,relro -ljpeg -ltiff -lgif -lpng -lz -lm -lXext -lXext -lX11 -lImlib2"#g' configure
}
build() {
cd "$srcdir"/$pkgname-$pkgver
./configure --prefix=/usr
make
}
package() {
cd "$srcdir"/$pkgname-$pkgver
make DESTDIR="$pkgdir" install
}
| true |
56f92a84305307d9f57a6992042d87c25130e03a
|
Shell
|
git4example/sqs-testing
|
/sendmessage.sh
|
UTF-8
| 559 | 3.484375 | 3 |
[] |
no_license
|
#!/bin/bash
export AWS_DEFAULT_REGION=ap-southeast-2
echo $1
if [ ! -n "$NO_MESSAGES_TO_SEND" ]
then
echo "No of messages to send set to default 1"
export NO_MESSAGES_TO_SEND=1
else
echo "No of messages to send set to " $NO_MESSAGES_TO_SEND
fi
for ((i=1;i<=$NO_MESSAGES_TO_SEND;i++));
do
message=$(LC_CTYPE=C tr -dc A-Za-z0-9 < /dev/urandom | head -c 10 | xargs)
echo "Sending message number " $i " to the queue :" $message
aws sqs send-message --queue-url https://sqs.ap-southeast-2.amazonaws.com/064250592128/SQS4ECS --message-body $message
done
| true |
22d50cd610888fa38d1e1ab66e9dc0c4d2de498e
|
Shell
|
marccremer/AP1_3
|
/src/compile.sh
|
UTF-8
| 164 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/sh
#compile all C files of the source folder into the bin folder
for file in ../src/*.c
do
gcc -std=c99 -o "../bin/$(basename "$file" .c)" "$file"
done
| true |
d51afefe83a0d927eab14068b920b865a995265a
|
Shell
|
stratixx/SOI
|
/Project_4/test/bigTest.sh
|
UTF-8
| 599 | 2.640625 | 3 |
[] |
no_license
|
export PATH=$PATH":./../bin"
mkmfs disc 4000000
echo "----- STAN POCZĄTKOWY ------------------------------------"
lsmfs disc
echo "----------------------------------------------------------"
infomfs disc
echo "----- KOPIOWANIE WIELU PLIKÓW 100MB -----------------------"
export linuxFiles
export MFSfiles
for n in `seq 1 50`;
do
linuxFiles=$linuxFiles"lorem_100M.txt "
MFSfiles=$MFSfiles"lorem_100M_$n.txt "
done
cpmfs disc -TO $linuxFiles -AS $MFSfiles
echo "----- STAN PO KOPIOWANIU ------------------------"
infomfs disc
echo "----------------------"
lsmfs disc
rm disc
| true |
d1f7071ed4e9f7e669705000c398ceae7d8b4dc5
|
Shell
|
ravivamsi/shellscripts
|
/3_callwebservicecurl.sh
|
UTF-8
| 260 | 2.578125 | 3 |
[] |
no_license
|
#! /bin/bash
<<CallWebservice
You can call the webservice using the curl library in shell
$ sh 3_callwebservice.sh
or
$ ./3_callwebservice.sh
CallWebservice
output=$(curl -s -k -X GET "https://jsonplaceholder.typicode.com/posts/1")
echo $output
| true |
8b276aff6a1ee7507bdb8639398759f85c1d66a9
|
Shell
|
Ahaad2021/adbanking3.24.0
|
/utilities/devel/crypte.sh
|
UTF-8
| 472 | 2.765625 | 3 |
[] |
no_license
|
#!/bin/bash
# Ce script permet de crypter un fichier de licence
############################################################
# Variables utiles
############################################################
source ${ADB_INSTALL_DIR:="/usr/share/adbanking"}/web/lib/bash/misc.sh
let ${DBUSER:=adbanking}
let ${DBNAME:=$DBUSER}
php -d include_path=".:/usr/share/pear:${ADB_INSTALL_DIR}/web" ${ADB_INSTALL_DIR}/bin/crypte.php /tmp/licence_decrypte.bin /tmp/licence.bin public
| true |
41a323fa1b49144fe8edea2166727af1b7e5ca9e
|
Shell
|
jdbeutel/grails.sh
|
/grails
|
UTF-8
| 2,464 | 4 | 4 |
[] |
no_license
|
#!/bin/bash
# Original Version: http://github.com/deluan
# Customized for my conventions: http://github.com/jdbeutel/grails.sh
# Check if GRAILS_HOME is set
if [ -z "$GRAILS_HOME" -o ! -d "$GRAILS_HOME" ]; then
echo "Error: GRAILS_HOME not set"
exit 2
fi
# From: https://github.com/alxndrsn/grails.sh/blob/master/dirname
dirname()
{
echo $1 | sed -e "s/[^\/\\]*$//" -e "s/[\/\\]$//" -e "s/^$/./"
}
# Extract the default version and base path from GRAILS_HOME
DEFAULT_VERSION=`basename $GRAILS_HOME`
BASE_GRAILS_PATH=`dirname $GRAILS_HOME`
APP_PROP="application.properties"
GRADLE_PROP="gradle.properties" # since Grails 3
DOWNLOAD_BASE_URL="https://github.com/grails/grails-core/releases/download"
# Try to get the version from the command line
TRY_VERSION=$1
if [[ $TRY_VERSION =~ [0-9]\.[0-9]\.[0-9]+$ ]]; then
VERSION=$TRY_VERSION
shift
fi
# Try to get release candidate version from the command line
if [[ $TRY_VERSION =~ [0-9]\.[0-9]\.[0-9]+\.RC[0-9] ]]; then
VERSION=$TRY_VERSION
shift
fi
# Or else get the version from the application.properties in the current directory
[ -z "$VERSION" -a -f "$APP_PROP" ] &&
VERSION=`awk -F'=' '/app.grails.version/ { print $2 }' $APP_PROP | tr -d '\r\n'`
# Or else get the version from the gradle.properties in the current directory (for Grails 3)
[ -z "$VERSION" -a -f "$GRADLE_PROP" ] &&
VERSION=`awk -F'=' '/grailsVersion/ { print $2 }' $GRADLE_PROP | tr -d '\r\n'`
# Or else use the default version
[ -z "$VERSION" ] &&
VERSION=$DEFAULT_VERSION
export GRAILS_HOME=${BASE_GRAILS_PATH}/${VERSION}
# Attempt to download and unzip the specified version if it does not exist
if [ ! -d ${GRAILS_HOME} ]; then
NEW_GRAILS_FILE=grails-${VERSION}
URL="${DOWNLOAD_BASE_URL}/v${VERSION}/${NEW_GRAILS_FILE}.zip"
echo "grails ${VERSION} does not exist, attempting to download..."
echo "$URL"
curl -L "$URL" -o ${NEW_GRAILS_FILE}.zip
unzip ./${NEW_GRAILS_FILE}.zip -d ${BASE_GRAILS_PATH}/
rm ${NEW_GRAILS_FILE}.zip
mv ${BASE_GRAILS_PATH}/${NEW_GRAILS_FILE} ${BASE_GRAILS_PATH}/${VERSION}
if [ -d ${GRAILS_HOME} ]; then
echo "Got grails version $VERSION successfully"
else
echo "Failed to get grails version $VERSION"
fi
fi
GRAILS_CMD=${GRAILS_HOME}/bin/grails
if [ ! -x "$GRAILS_CMD" ]; then
echo "Error: grails command not found at '$GRAILS_CMD'!"
exit 3
fi
exec $GRAILS_CMD "$@"
| true |
b43cbd7eaad91818076b3b57b0339b6a8c91f9b7
|
Shell
|
schuricov/test
|
/.timeno
|
UTF-8
| 1,045 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
#conditional for entering argument
# checking came parametrs
if [[ -n $1 ]]
then
case $1 in
get)
for a in 0 1 2 3 4 5 6
do
#echo $(i2cget -y 0 0x68 0x0$a)":0x0"$a >> b
echo $(i2cget -y 0 0x68 0x0$a) >> b
done
#cat b
# s - заменяем 0x в строках которые начинаются с 0x на пустоту (/заменяемое/) g - для всех строк >> перезаписываем b
sed '/^0x/s/0x//g' b >> c
#sed -r 's0x/' b
cat c
rm b
rm c
;;
set)
# set hours
if [[ $4 != "" ]]
then
$(i2cset -y 0 0x68 0x02 0x$2)
fi
# set min
if [[ $3 != "" ]]
then
$(i2cset -y 0 0x68 0x01 0x$3)
fi
# set sec
if [[ $2 != "" ]]
then
$(i2cset -y 0 0x68 0x00 0x$4)
fi
echo "time:" $2":"$3":"$4
;;
esac
fi
| true |
cb293d38c6f1b2fca8ef1cb9f925a5be5c22e4b8
|
Shell
|
dsmic/oakfoam
|
/scripts/decisiontrees/dt-compare.sh
|
UTF-8
| 929 | 3.625 | 4 |
[
"FSFAP",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -eu
TEMPGTP="cmp_`date +%F_%T`.tmp"
TEMPLOG="cmplog_`date +%F_%T`.tmp"
OAKFOAM="../../oakfoam --nobook --log $TEMPLOG"
if (( $# < 1 )); then
echo "Exactly one DT required" >&2
exit 1
fi
DTFILE=$1
DTSOLOLEAF=1
if (( $# > 1 )); then
DTSOLOLEAF=$2
fi
if ! test -x ../../oakfoam; then
echo "File ../../oakfoam not found" >&2
exit 1
fi
echo "dtload \"$DTFILE\"" >> $TEMPGTP
echo "param dt_solo_leaf $DTSOLOLEAF" >> $TEMPGTP
echo 'param dt_ordered_comparison 1' >> $TEMPGTP
echo 'param undo_enable 0' >> $TEMPGTP # so gogui-adapter doesn't send undo commands
i=0
cat | while read GAME
do
let "i=$i+1"
echo "echo @@ GAME: \"$i '$GAME'\"" >> $TEMPGTP
echo "loadsgf \"$GAME\"" >> $TEMPGTP
done
# Use gogui-adapter to emulate loadsgf
cat "$TEMPGTP" | gogui-adapter "$OAKFOAM" 2>&1 | sed -n 's/^= @@ //p' >&2
cat "$TEMPLOG" | grep 'matched at' | sed 's/.*matched at: //'
rm -f $TEMPGTP $TEMPLOG
| true |
c15ef7f75c9804788027477fbb42bdb710180160
|
Shell
|
UFSC/moodle-provas-livecd-provas
|
/packages/src/moodle-provas/opt/provas/start_X.sh
|
UTF-8
| 543 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/bash
#set -x
provas_config_file='/opt/provas/moodle_provas.conf'
[ -r "$provas_config_file" ] && source "$provas_config_file" || exit 1
functions_file="$provas_dir/includes/functions.sh"
[ -r "$functions_file" ] && source "$functions_file" || exit 1
log 'Verificando se o script foi executado com poder de root'
is_root
log 'Verificando se o computador é um multiterminal'
if is_multiseat; then
log 'Iniciando em modo multiterminal.'
start_multiseat_mode
else
log 'Iniciando em modo normal.'
start_normal_mode
fi
| true |
212ccaed3c4531602aafea14eb72b8cd0c069fe9
|
Shell
|
mattrix27/moos-ivp-oyster
|
/.svn/pristine/21/212ccaed3c4531602aafea14eb72b8cd0c069fe9.svn-base
|
UTF-8
| 5,771 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/bash
SHORE_IP=192.168.1.155
SHORE_LISTEN="9300"
TIME_WARP=1
HELP="no"
JUST_BUILD="no"
START_POS="0,0,0"
VNAME="mokai"
VTEAM=""
VPORT="9013"
SHARE_LISTEN="9313"
BUTTON="5"
JOY_ID="0"
TEAMMATE1=""
TEAMMATE2=""
VOICE="ON"
for ARGI; do
if [ "${ARGI}" = "--help" -o "${ARGI}" = "-h" ] ; then
HELP="yes"
elif [ "${ARGI//[^0-9]/}" = "$ARGI" -a "$TIME_WARP" = 1 ]; then
TIME_WARP=$ARGI
elif [ "${ARGI}" = "--red" -o "${ARGI}" = "-r" ] ; then
VTEAM="red"
START_POS="50,-24,240"
GRAB_POS="-58,-71"
UNTAG_POS="50,-24"
RETURN_POS="50,-24"
VPORT="9013"
SHARE_LISTEN="9313"
BUTTON="5"
echo "Red team selected."
elif [ "${ARGI}" = "--blue" -o "${ARGI}" = "-b" ] ; then
VTEAM="blue"
START_POS="-58,-71,60"
GRAB_POS="50,-24"
UNTAG_POS="-58,-71"
RETURN_POS="-58,-71"
VPORT="9014"
SHARE_LISTEN="9314"
BUTTON="4"
echo "Blue team selected."
elif [ "${ARGI}" = "--w-evan" -o "${ARGI}" = "-e" ] ; then
if [[ -z $TEAMMATE1 ]]; then
TEAMMATE1="evan"
else TEAMMATE2="evan";
fi
elif [ "${ARGI}" = "--w-felix" -o "${ARGI}" = "-f" ] ; then
if [[ -z $TEAMMATE1 ]] ; then
TEAMMATE1="felix"
else TEAMMATE2="felix";
fi
elif [ "${ARGI}" = "--w-gus" -o "${ARGI}" = "-g" ] ; then
if [[ -z $TEAMMATE1 ]] ; then
TEAMMATE1="gus"
else TEAMMATE2="gus";
fi
elif [ "${ARGI}" = "--w-hal" -o "${ARGI}" = "-H" ] ; then
if [[ -z $TEAMMATE1 ]] ; then
TEAMMATE1="hal"
else TEAMMATE2="hal";
fi
elif [ "${ARGI}" = "--w-ida" -o "${ARGI}" = "-i" ] ; then
if [[ -z $TEAMMATE1 ]] ; then
TEAMMATE1="ida"
else TEAMMATE2="ida";
fi
elif [ "${ARGI}" = "--w-jing" -o "${ARGI}" = "-J" ] ; then
if [[ -z $TEAMMATE1 ]] ; then
TEAMMATE1="jing"
else TEAMMATE2="jing";
fi
elif [ "${ARGI}" = "--just_build" -o "${ARGI}" = "-j" ] ; then
JUST_BUILD="yes"
echo "Just building files; no vehicle launch."
elif [ "${ARGI}" = "--sim" -o "${ARGI}" = "-s" ] ; then
SIM="SIM=FULL"
echo "Full simulation mode ON."
elif [ "${ARGI}" = "--semi-sim" -o "${ARGI}" = "-ss" ] ; then
SIM="SIM=SEMI"
echo "Semi simulation mode ON."
elif [ "${ARGI}" = "--voice-on" -o "${ARGI}" = "-von" ] ; then
VOICE="ON"
echo "Voice recognition ON."
elif [ "${ARGI}" = "--voice-off" -o "${ARGI}" = "-voff" ] ; then
VOICE="OFF"
echo "Voice recognition OFF."
else
echo "Undefined argument:" $ARGI
echo "Please use -h for help."
exit 1
fi
done
if [ "${HELP}" = "yes" ]; then
echo "$0 [SWITCHES]"
echo " --blue, -b : Blue team"
echo " --red, -r : Red team"
echo " --w-evan, -e : Evan as a teammate."
echo " --w-felix, -f : Felix as a teammate."
echo " --w-gus, -g : Gus as a teammate."
echo " --w-hal, -H : Hal as a teammate."
echo " --w-ida, -i : Ida as a teammate."
echo " --w-jing, -J : Jing as a teammate."
echo " --semi-sim, -ss : Semi-autonomous simulation (w/ joysticks)"
echo " --sim, -s : Full simulation"
echo " --voice-on, -von : Voice recognition on"
echo " --voice-off, -voff : Voice recognition off"
echo " --just_build, -j"
echo " --help, -h"
exit 0;
fi
if [ -z $VTEAM ]; then
echo "No team has been selected..."
echo "Exiting."
exit 3
fi
if [ -z $TEAMMATE1 ]; then
echo "Teammate 1 is missing..."
echo "Exiting."
exit 2
fi
if [ -z $TEAMMATE2 ]; then
echo "Teammate 2 is missing..."
echo "Exiting."
exit 2
fi
echo "Assembling MOOS file targ_${VNAME}_${VTEAM}.moos ."
nsplug meta_mokai.moos targ_${VNAME}_${VTEAM}.moos -f \
VNAME="${VNAME}_${VTEAM}" \
VPORT=$VPORT \
SHARE_LISTEN=$SHARE_LISTEN \
WARP=$TIME_WARP \
SHORE_LISTEN=$SHORE_LISTEN \
SHORE_IP=$SHORE_IP \
VTYPE="mokai" \
VTEAM=$VTEAM \
BUTTON=$BUTTON \
JOY_ID=$JOY_ID \
TEAMMATE1=$TEAMMATE1 \
TEAMMATE2=$TEAMMATE2 \
VOICE=$VOICE \
START_POS=$START_POS \
$SIM
echo "Assembling BHV file targ_${VNAME}_${VTEAM}.bhv ."
nsplug meta_mokai.bhv targ_${VNAME}_${VTEAM}.bhv -f \
VNAME="${VNAME}_${VTEAM}" \
VPORT=$VPORT \
SHARE_LISTEN=$SHARE_LISTEN \
WARP=$WARP \
SHORE_LISTEN=$SHORE_LISTEN \
SHORE_IP=$SHORE_IP \
VTYPE="mokai" \
VTEAM=$VTEAM \
BUTTON=$BUTTON \
JOY_ID=$JOY_ID \
TEAMMATE1=$TEAMMATE1 \
TEAMMATE2=$TEAMMATE2 \
START_POS=$START_POS \
RETURN_POS=$RETURN_POS \
GRAB_POS=$GRAB_POS \
UNTAG_POS=$UNTAG_POS
if [ ${JUST_BUILD} = "yes" ] ; then
echo "Files assembled; vehicle not launched; exiting per request."
exit 0
fi
if [ ! -e targ_${VNAME}_${VTEAM}.moos ]; then echo "no targ_${VNAME}_${VTEAM}.moos!"; exit 1; fi
if [ ! -e targ_${VNAME}_${VTEAM}.bhv ]; then echo "no targ_${VNAME}_${VTEAM}.bhv!"; exit 1; fi
echo "Launching $VNAME MOOS Community."
pAntler targ_${VNAME}_${VTEAM}.moos >& /dev/null &
uMAC targ_${VNAME}_${VTEAM}.moos
echo "Killing all processes ..."
kill -- -$$
echo "Done killing processes."
| true |
c82ec53f44677c8d5b405e279d7ea06f65d7beaf
|
Shell
|
agalitsyna/sc_dros
|
/scripts/01_data_mapping/012_run_pairsam2cooler.sh
|
UTF-8
| 783 | 3.28125 | 3 |
[
"MIT"
] |
permissive
|
for data_type in data data_subsampled data_shuffled
do
mkdir -p ../../${data_type}/PAIRIX
mkdir -p ../../${data_type}/COOL
done
exps="Cell1 Cell2 Cell3 Cell4 Cell5 Cell6 Cell7 Cell8 Cell9 Cell10 Cell11 Cell12 Cell13 Cell14 Cell15 Cell16 Cell17 Cell18 Cell19 Cell20"
for pref in $exps
do
echo "$pref"
# Extracting JJ and PP pairs for QC
for file in ../../data/PAIR/${pref}_*.pairsam
do
grep -E 'JJ|#|PP' $file > ${file}.JJPP
grep -E 'JJ|#' $file > ${file}.JJ
grep -E 'PP|#' $file > ${file}.PP
done
# Running pairsam to cooler conversion
python 012_pairsam2cooler.py ../../data/PAIR/${pref}_\*.pairsam $pref ../../data/COOL/
python 012_pairsam2cooler_savemarg.py ../../data/PAIR/${pref}_\*.pairsam $pref ../../data/COOL
done
| true |
a9dbc256a3b9eab672de335402c5d883d0bdb8ab
|
Shell
|
MPZinke/SmartCurtain
|
/MergeProduction.sh
|
UTF-8
| 1,131 | 4.0625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# FROM: https://gist.github.com/tmiller/5222478
# Merges the master branch into all other branches
#
# Process:
#
# - Save the name of the current branch
# - If the current branch is not master then checkout master.
# - Pull the latest changes for master from its upstream branch.
# - Loop over each local branch.
# - If the branch is not master.
# - Checkout the branch.
# - Merge master into the branch.
# - If the current branch is not the saved branch name checkout the saved
# branch name
MAIN_BRANCH="Production"
# Returns the names of all the local branches
local_branches()
{
git for-each-ref --format="%(refname:short)" refs/heads
}
# Returns the name of the current branch
current_branch()
{
git symbolic-ref --short HEAD
}
saved_branch=$(current_branch)
[[ "${saved_branch}" != "${MAIN_BRANCH}" ]] && git checkout "${MAIN_BRANCH}"
git pull
for branch in $(local_branches); do
if [[ "${branch}" != "${MAIN_BRANCH}" ]]; then
echo
git checkout "${branch}"
git merge "${MAIN_BRANCH}"
git push
fi
done
echo
[[ "${saved_branch}" != "$(current_branch)" ]] && git checkout "${saved_branch}"
| true |
41e7df687a03c5088c9576bcca898bff90039d5f
|
Shell
|
Jaynesh1609/infrastructure
|
/tasks/scripts/compile.sh
|
UTF-8
| 142 | 2.75 | 3 |
[] |
no_license
|
#!/bin/bash
set -euo pipefail
output="$(pwd)/compiled/"
cd repo
[ ! -z "$CONTEXT" ] && cd "$CONTEXT"
go build -o "$output" "$PACKAGE_PATH"
| true |
6fa22e684d72d71a710139677b5bb88703246cfe
|
Shell
|
pluser/packers
|
/mattermost/install-mattermost.sh
|
UTF-8
| 857 | 2.75 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
apt update
apt install --assume-yes curl
apt install --assume-yes postgresql postgresql-contrib
psql -U postgres -c "\n
CREATE DATABASE mattermost;\n
CREATE USER mattermost;\n
ALTER DATABASE mattermost OWNER BY mattermost;\n
GRANT ALL PRIVILEGES ON DATABASE mattermost to mattermost;"
systemctl start postgresql.service
wget https://releases.mattermost.com/4.5.0/mattermost-4.5.0-linux-amd64.tar.gz
tar -xf mattermost*.tar.gz -C /opt
mkdir /opt/mattermost/data
useradd --system --user-group mattermost
chown -R mattermost:mattermost /opt/mattermost
chmod -R g+w /opt/mattermost
apt install --assume-yes patch
patch /opt/mattermost/config/config.json mattermost-config.json.patch
cp mattermost.service /etc/systemd/system/mattermost.service
systemctl daemon-reload
systemctl start mattermost.service
systemctl enable mattermost.service
| true |
408b23849f2e67e4d1d5f6fad783eda8f0b13245
|
Shell
|
ccin2p3/jsaga
|
/jsaga-engine/config/examples/template.sh
|
UTF-8
| 952 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/sh
# set arguments
ARGS=$*
#
if test "$OS" = "Windows_NT"
then
SEP=";"
else
SEP=":"
fi
# set JSAGA_HOME
if test -z "$JSAGA_HOME" ; then
if test -d "%INSTALL_PATH" ; then
JSAGA_HOME="%INSTALL_PATH"
else
JSAGA_HOME=.
fi
fi
# set system properties
#PROPERTIES=
PROPERTIES="${PROPERTIES} -DJSAGA_HOME=$JSAGA_HOME"
#PROPERTIES="${PROPERTIES} -Ddebug"
# set classpath
CLASSPATH=.
for i in $JSAGA_HOME/lib/*.jar ; do
CLASSPATH="${CLASSPATH}${SEP}${i}"
done
for i in $JSAGA_HOME/lib-adaptors/*.jar ; do
CLASSPATH="${CLASSPATH}${SEP}${i}"
done
if test "${class.name}" = "junit.textui.TestRunner" ; then
for i in $JSAGA_HOME/lib-test/*.jar ; do
CLASSPATH="${CLASSPATH}${SEP}${i}"
done
fi
# set java
if test -z "$JAVA_HOME" ; then
JAVA=java
else
JAVA="$JAVA_HOME/bin/java"
fi
# run command
CMD="\"$JAVA\" $PROPERTIES -cp \"$CLASSPATH\" ${class.name} $*"
if test -n "$DEBUG" ; then
echo $CMD
fi
eval $CMD
| true |
a677e647592b237eb6a556368f6c201cd0224a70
|
Shell
|
nhlshstr/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/6-superstitious_numbers
|
UTF-8
| 268 | 3.34375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# Superstitious Numbers
x=1
while [ $x -lt 21 ]
do
echo $x
if [ $x -eq 3 ]
then
echo "bad luck from China"
elif [ $x -eq 8 ]
then
echo "bad luck from Japan"
elif [ $x -eq 16 ]
then
echo "bad luck from Italy"
fi
x=$((x + 1))
done
| true |
22f919ee0c1666902396c5b5b89b17a5a1d6b2ea
|
Shell
|
MooglyOogly/dotfiles
|
/polybar/launch.sh
|
UTF-8
| 302 | 2.6875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# Terminate already running bar instances
killall -q polybar
# Wait until the processes have been shut down
# while pgrep -u $UID -x polybar >/dev/null; do sleep 1; done
# Launch Polybar
polybar top -c ~/.config/polybar/config.ini &
polybar top2 -c ~/.config/polybar/config.ini &
| true |
16b3d1618e26004be93b8f989392665050654ea3
|
Shell
|
shashank195/USP-UG-SEM5
|
/WLCcount.sh
|
UTF-8
| 176 | 3.234375 | 3 |
[] |
no_license
|
#! /bin/sh
echo "Enter the file path:"
read path
w=`cat $path | wc -w`
l=`cat $path | wc -l`
c=`cat $path | wc -c`
output="Words: $w | Lines: $l | Characters: $c"
echo $output
| true |
42517d4748af2ff44a0e07bbac20972a87f3e99f
|
Shell
|
zitounh/wilAime
|
/mainBruteForce.sh
|
UTF-8
| 157 | 2.765625 | 3 |
[] |
no_license
|
#!/bin/bash
input="dict.txt"
proc=0
N=6
COUNTER=0
while IFS= read -r line
do
((proc=proc%N)); ((proc++==0)) && wait
./oneWord.sh $line &
done < "$input"
| true |
90fd57a0ea7167dd4b3e07fb4db37e037513cf9a
|
Shell
|
sadmankiba/FAST-EF
|
/scripts/create_netns_foreign.sh
|
UTF-8
| 562 | 2.734375 | 3 |
[] |
no_license
|
# default values
NAME="ns1"
HOST_VETH_IP="10.20.40.2"
NETNS_VETH_IP="10.20.40.3"
VETH_NETWORK="10.20.40.0"
ip netns add $NAME
ip link add veth0 type veth peer name veth1
ip link set veth1 netns $NAME
ne1="ip netns exec $NAME"
# set address
ip addr add $HOST_VETH_IP/24 dev veth0
$ne1 ip addr add $NETNS_VETH_IP/24 dev veth1
ip link set veth0 up
$ne1 ip link set veth1 up
$ne1 ip link set lo up
# set route
$ne1 ip route add default via $HOST_VETH_IP
sysctl net.ipv4.ip_forward=1
iptables -t nat -A POSTROUTING -s $VETH_NETWORK/24 -d 0.0.0.0/0 -j MASQUERADE
| true |
20279d93d03ff13fef9b927c46116f8a8f23836b
|
Shell
|
jakubguzek/arch-dotfiles-macbookair
|
/.local/bin/nmenu
|
UTF-8
| 1,218 | 3.375 | 3 |
[] |
no_license
|
#!/usr/bin/env sh
if [ -e $HOME/.local/bin/color_cache ]; then
. "${HOME}/.local/bin/color_cache"
network_name=$(nmcli device wifi list | sed -n '1!p' | dmenu -i -l 15 -fn Monospace-14 -nb "$color0" -nf "$color7" -sb "$color1" -sf "$color7" | awk -F ' {2,}' '{print $2}')
[ "$network_name" != "" ] || exit
ask_pass=$(printf Yes\\nNo | dmenu -p "password?" -i -l 2 -fn Monospace-14 -nb "$color0" -nf "$color7" -sb "$color1" -sf "$color7")
[ "$ask_pass" != "" ] || exit
if [ $ask_pass = Yes ]; then
read -p "password for $network_name: " password
nmcli device wifi connect "$network_name" password "$password"
elif [ $ask_pass = No ]; then
nmcli device wifi connect "$network_name"
else
exit
fi
else
network_name=$(nmcli device wifi list | sed -n '1!p' | dmenu -i -l 15 -fn Monospace-14 | awk -F ' {2,}' '{print $2}')
[ "$network_name" != "" ] || exit
ask_pass=$(printf Yes\\nNo | dmenu -p "password?" -i -l 2 -fn Monospace-14)
[ "$ask_pass" != "" ] || exit
if [ $ask_pass = Yes ]; then
read -p "password for $network_name: " password
nmcli device wifi connect "$network_name" password "$password"
elif [ $ask_pass = No ]; then
nmcli device wifi connect "$network_name"
else
exit
fi
fi
| true |
74277416112a4faab429776055c01ee7246fcc07
|
Shell
|
ZhangXinNan/LearnPractice
|
/linux/shell/shell_for.sh
|
UTF-8
| 102 | 2.71875 | 3 |
[] |
no_license
|
for port in {9980..9989}
do
echo $port
done
for port in `seq 9980 9989`
do
echo $port
done
| true |
f62b2753893eede745c2ae8e1e036037ef5cc489
|
Shell
|
christopher-barry/lishp
|
/lishp.sh
|
UTF-8
| 2,005 | 3.5 | 4 |
[] |
no_license
|
#!/bin/bash
# If this file has already been sourced, just return
[ ${LISHP_SH+true} ] && return
declare -g LISHP_SH=true
declare -g VERBOSE
if [ ${1+isset} ] && [ "$1" == "-verbose" ]; then
VERBOSE=true
shift
else
VERBOSE=false
fi
. ${BASH_SOURCE%/*}/common.sh
. ${BASH_SOURCE%/*}/variables.sh
. ${BASH_SOURCE%/*}/variables.arraylist.sh
. ${BASH_SOURCE%/*}/variables.atom.sh
. ${BASH_SOURCE%/*}/variables.linkedlist.sh
. ${BASH_SOURCE%/*}/variables.map.sh
. ${BASH_SOURCE%/*}/variables.queue.sh
. ${BASH_SOURCE%/*}/variables.stack.sh
. ${BASH_SOURCE%/*}/callable.sh
. ${BASH_SOURCE%/*}/callable.lambda.sh
. ${BASH_SOURCE%/*}/environment.sh
. ${BASH_SOURCE%/*}/evaluator.sh
. ${BASH_SOURCE%/*}/evaluator.functions.builtin.sh
. ${BASH_SOURCE%/*}/parser.sh
. ${BASH_SOURCE%/*}/logger.sh
. ${BASH_SOURCE%/*}/specialforms.sh
. ${BASH_SOURCE%/*}/specialforms.if.sh
. ${BASH_SOURCE%/*}/specialforms.lambda.sh
. ${BASH_SOURCE%/*}/specialforms.let.sh
. ${BASH_SOURCE%/*}/specialforms.letstar.sh
$VERBOSE && echo "Sourced libraries!"
# (lambda (x y)
# (+ x y)
# per: http://stackoverflow.com/questions/6980090/bash-read-from-file-or-stdin
# This will read from the filename specified as a parameter...
# or from stdin if none specified
read -r -d '' code < "${1:-/proc/${$}/fd/0}"
$VERBOSE && echo "Code read!"
$VERBOSE && echo =================
$VERBOSE && echo "$code"
$VERBOSE && echo =================
if ! parser::parse::multiExpression "${code}"; then
echo "Could not parse input
====
${code}
===="
exit 1
fi
$VERBOSE && echo "Parsed!"
#variable::printMetadata
#variable::toSexp "${PARSER_PARSED}" ; echo ${RESULT}
#variable::debug "${PARSER_PARSED}" ; echo ${RESULT}
environment::new ; declare env=${RESULT}
evaluator::setup_builtins "${env}"
$VERBOSE && echo "Environment setup!"
evaluator::evalFromLinkedList ${env} ${PARSER_PARSED}
variable::debug ${RESULT}
$VERBOSE && echo "Done!"
$VERBOSE && echo =================
echo "$RESULT"
$VERBOSE && echo =================
| true |
0641cfe20e5b0d4a3137aa349ce1e39330f2c406
|
Shell
|
daveharris/personal
|
/bash/podcast.sh
|
UTF-8
| 3,615 | 3.140625 | 3 |
[] |
no_license
|
#Written by David Harris (david.harris <at> orcon.net.nz)
#Takes in the fullpath of the podcastz and tags/renames file
#Pops up when finished
#!/bin/bash
filename="$(echo $1 | cut -d '/' -f 6)"
#echo "Filename: $filename"
dir="$(echo "$1" | sed 's/\(.*\)\/.*/\1/')"
#echo "Dir: $dir"
notify () {
zenity --info --text "$1 was downloaded and tagged successfully" &
}
testTitle () {
if [ -z "$title" ]; then
echo "The Title is empty ... quitting"
exit
fi
}
diggnation () {
title="Diggnation $(echo "$filename" | cut -d '-' -f 3 | sed 's/00\(..\)/\1/')"
testTitle
id3v2 -D "$filename" &>/dev/null
id3v2 --artist 'Diggnation' --album 'Diggnation' --song "$title" --TCON 'Podcast' "$filename"
mv "$filename" "$title.mp3"
notify "$title"
}
floss () {
title=$(id3v2 -l "$filename" | grep TIT2 | cut -d ' ' -f 4- | sed 's/:/ -/')
testTitle
id3v2 -D "$filename" &>/dev/null
id3v2 --artist 'FLOSS' --album 'FLOSS' --song "$title" --TCON 'Podcast' "$filename"
mv "$filename" "$title.mp3"
notify "$title"
}
jaynjack () {
#title=$(id3v2 -l "$filename" | grep Title | cut -d ' ' -f 8)
#if [ "$title" = "1-X" ]; then
#title=$(id3v2 -l "$filename" | grep TIT2 | cut -d ' ' -f 7- | sed 's/\"//g' | sed 's/\(Ep\. .\...\) \(.*\)/Jay \& Jack \1 - \2/')
title=$(id3v2 -l "$filename" | grep 'Title' | cut -d ' ' -f 7- | sed 's/ Artist.*//')
testTitle
id3v2 -D "$filename" &>/dev/null
id3v2 --artist 'Jay & Jack' --album 'Jay & Jack' --song "$title" --TCON 'Podcast' "$filename"
mv "$filename" "$title.mp3"
notify_title=$(echo $title | sed 's/&/and/')
notify "$notify_title"
}
lost () {
title="Lost $(echo "$filename" | cut -d '_' -f 2)"
testTitle
id3v2 -D "$filename" &>/dev/null
id3v2 --artist 'Lost' --album 'Lost' --song "$title" --TCON 'Podcast' "$filename"
mv "$filename" "$title.mp3"
notify "$title"
}
lugradio () {
title="LugRadio $(echo $filename | cut -d '-' -f 2 | sed 's/s0\(.\)e\(..\)/[\1x\2]/')"
testTitle
id3v2 -D "$filename" &>/dev/null
id3v2 --artist 'LugRadio' --album 'LugRadio' --song "$title" --TCON 'Podcast' "$filename"
mv "$filename" "$title.mp3"
notify "$title"
}
novell () {
title="Novell Open Audio $(echo $filename | cut -d '/' -f 6 | sed 's/.*_.\(..\).*/\1/')"
testTitle
description=$(id3v2 -l "$filename" | grep TIT2 | cut -d ' ' -f 4-)
id3v2 -D "$filename" &>/dev/null
id3v2 --artist 'Novell Open Audio' --album 'Novell Open Audio' --song "$description" --TCON 'Podcast' "$filename"
mv "$filename" "$title.mp3"
notify "$title"
}
twit () {
title=$(id3v2 -l "$filename" | grep 'TIT2' | cut -d ' ' -f 4-)
testTitle
id3v2 -D "$filename" &>/dev/null
id3v2 --artist 'TWiT' --album 'TWiT' --song "$title" --TCON 'Podcast' "$filename"
title=$(echo $title | sed 's/\:/\ -/')
mv "$filename" "$title.mp3"
notify "$title"
}
security_now () {
title=$(id3v2 -l "$filename" | grep 'TIT2' | cut -d ' ' -f 4- | sed 's/Security Now/SN/'| sed 's/: / - /' | cut -d '-' -f 1-2 | sed 's/ $//' )
testTitle
id3v2 -D "$filename" &>/dev/null
id3v2 --artist 'Security Now!' --album 'Security Now!' --song "$title" --TCON 'Podcast' "$filename"
mv "$filename" "$title.mp3"
notify "$title"
}
cd "$dir"
#artist=$(id3v2 -l $filename | grep Artist | cut -d ':' -f 4 | cut -d ' ' -f 2,3)
#artist=$(id3v2 -l "$filename" | grep "Jeremiah")
#if [ -n "$artist" ]; then #= "Jeremiah Glatfelter" ]; then
# jaynjack
#fi
case "$filename" in
diggnation* ) diggnation ;;
FLOSS* ) floss ;;
Lostpodcast* ) lost ;;
lugradio* ) lugradio ;;
TWiT* ) twit ;;
SN* ) security_now ;;
noa* ) novell ;;
* ) jaynjack;;
esac
| true |
77ee49ece9c1ad00dc040fd05416cb03271e736e
|
Shell
|
lukaszachy/tmt
|
/tests/steps/invalid/test.sh
|
UTF-8
| 758 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
. /usr/share/beakerlib/beakerlib.sh || exit 1
rlJournalStart
rlPhaseStartSetup
rlRun "tmp=\$(mktemp -d)" 0 "Create tmp directory"
rlRun "pushd data"
rlRun "set -o pipefail"
rlPhaseEnd
rlPhaseStartTest
rlRun "tmt run --scratch -i $tmp 2>&1 | tee output" 2 "Expect the run to fail"
rlAssertGrep "Unsupported provision method" "output"
rlRun "tmt run --scratch -i $tmp discover 2>&1 | tee output" 0 \
"Invalid step not enabled, do not fail"
rlAssertGrep "warn: Unsupported provision method" "output"
rlPhaseEnd
rlPhaseStartCleanup
rlRun "rm output"
rlRun "popd"
rlRun "rm -r $tmp" 0 "Remove tmp directory"
rlPhaseEnd
rlJournalEnd
| true |
d5b089b3709b2f018465b4ecffc0eb8d879418c2
|
Shell
|
greenpeace/planet4-helper-scripts
|
/k8s/drain_node.sh
|
UTF-8
| 413 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -euo pipefail
node=$1
grace=${2:-30}
kubectl get nodes
echo
[[ -n ${FORCE_DRAIN:-} ]] || {
read -rp "Drain node '$node'? [y/N] " yn
case "$yn" in
[Yy]* ) : ;;
* ) exit 1;;
esac
}
echo
echo " >> Draining ${node#node\/} ..."
set -x
time kubectl drain --force --ignore-daemonsets --delete-local-data --grace-period="${grace}" "${node}"
{ set +x; } 2>/dev/null
echo
date
| true |
0ecf1b2577fc2a22b8838632842c2ac047754b00
|
Shell
|
9001/usr-local-bin
|
/ffchk
|
UTF-8
| 2,564 | 3.84375 | 4 |
[] |
no_license
|
#!/bin/bash
set -e
# ffchk - look for corruption in multimedia files
# ed <irc.rizon.net>, MIT-licensed, https://github.com/9001/usr-local-bin
# -- reads through the provided multimedia files
# -- shows list of healthy/corrupted files and errors detected
# -- first arg switches verification mode:
# -- parse container packets (fast)
# -- full stream-decode (slow)
mode="$1"
shift
[ -z "$1" ] && {
echo "need arg 1: mode, [f]ast or [s]low"
echo "need arg n: filename(s)"
exit 1
}
args=()
[ "$mode" = f ] &&
args=(-codec copy)
[ "$mode" = s ] &&
args=(-vcodec rawvideo -acodec pcm_s16le)
[ -z "${args[*]}" ] && {
echo "bad arg 1: $mode"
exit 1
}
tmp=$(mktemp)
cln() {
rm -f $tmp
#echo $tmp
}
trap cln EXIT
trap cln ERR
get_warnings() {
grep -vE '^Command line:|^ffmpeg -y' $tmp |
grep -vE 'Could not find codec parameters for stream|Consider increasing the value for the .?analyzeduration|Estimating duration from bitrate,'
}
maxrv=0
report=()
for fn in "$@"
do
printf '\n\033[1;37;44m%s\033[0m\n' "$fn"
# trace debug verbose info warning error fatal panic quiet
# 56 48 40 32 24 16 8 0 -8
FFREPORT="level=24:file=$tmp" \
nice ffmpeg -y -hide_banner -nostdin -v info \
-err_detect +crccheck+bitstream+buffer+careful+compliant+aggressive+explode \
-xerror -i "$fn" "${args[@]}" -f null - &&
rv=0 || rv=$?
[ $rv -eq 0 ] ||
printf '\033[1;37;41m error \033[0m\n'
# ffmpeg doesn't always return nonzero depending on error,
# so we also check for warnings in stdout
get_warnings >/dev/null &&
printf '\033[1;30;43m/ warning \\\033[0m\n' &&
get_warnings &&
printf '\033[1;30;43m\\ warning /\033[0m\n'
[ $rv -eq 0 ] && get_warnings >/dev/null &&
rv=1
[ $rv -eq 0 ] &&
printf '\033[32mok\033[0m\n'
[ $maxrv -lt $rv ] &&
maxrv=$rv
[ $rv -eq 0 ] &&
c=32 ||
c=35
report+=("$(printf '\033[0;%sm%d %s\033[0m' $c "$rv" "$fn")")
while IFS= read -r x
do
report+=("$(printf ' \033[33m└─ %s\033[0m' "$x")")
done < <(get_warnings | head -n 5)
done
printf '\n\n'
for ln in "${report[@]}"
do
printf '%s\n' "$ln"
done
exit $maxrv
stdbuf=()
which stdbuf >/dev/null 2>/dev/null &&
stdbuf=(stdbuf -oL)
# subprocess to capture ffmpeg error-code into logfile
(
AV_LOG_FORCE_COLOR=1 \
nice "${stdbuf[@]}" ffmpeg -y -hide_banner -nostdin -v info \
-err_detect +crccheck+bitstream+buffer+careful+compliant+aggressive+explode \
-xerror -i "$fn" "${args[@]}" -f null - 2>&1
echo $? >> $tmp
) | "${stdbuf[@]}" tee $tmp
rv=$(tail -n 1 $tmp)
| true |
d540cc51826389ba886977a605a326008b7949e9
|
Shell
|
redpeacock78/dotfiles
|
/bin/youmus
|
UTF-8
| 9,466 | 3.453125 | 3 |
[] |
no_license
|
#! /bin/bash
#Author:[redpeacock78]
#email:[scarletpeacock78@gmail.com]
#git:[git@github.com:redpeacock78]
# _ _ ___ _ _ _ __ ___ _ _ ___ #
# | | | |/ _ \| | | | '_ ` _ \| | | / __| #
# | |_| | (_) | |_| | | | | | | |_| \__ \ #
# \__, |\___/ \__,_|_| |_| |_|\__,_|___/ #
# |___/ #
##########BEGIN##########
#区切り文字を改行に限定
IFS='
'
###Variable specification###
PLAYBACK_TARGET=`echo $@ | sed -e "s/^-*[a-z0-9]*[ ]//" -e "s/watch?v=[0-9a-zA-Z]*&/playlist?/g" -e "s/[ ]-*[a-z0-9]*$//" -e "s/ \//\n\//g" -e "s/ http/\nhttp/g"`
###Play it shuffle.###
if [[ $1 == '-s' ]] \
|| [[ ${@:2} == '-s' ]] \
|| [[ $1 == '--shuffle' ]] \
|| [[ ${@:2} == '--shuffle' ]]; then
if [[ "$PLAYBACK_TARGET" =~ playlist ]] && [[ "$PLAYBACK_TARGET" =~ http ]]; then
mpv --ytdl-format="bestaudio" --shuffle $PLAYBACK_TARGET
elif [[ "$PLAYBACK_TARGET" =~ .m3u ]] || [[ "$PLAYBACK_TARGET" =~ .m3u8 ]]; then
if [[ `grep -s '' $PLAYBACK_TARGET | grep -v -s '#' | wc -l` -gt 1 ]]; then
mpv --ytdl-format="bestaudio" --shuffle --playlist=$PLAYBACK_TARGET
elif [[ `grep -s '' $PLAYBACK_TARGET | grep -v -s '#' | wc -l` -le 1 ]]; then
#OS判定
#Mackintosh
if [ "$(uname)" == 'Darwin' ]; then
printf "\e[31mThis can not be shuffled.\nThere is only one item you want to play back, or nothing is written.\e[m\n"
while true; do
read -p "Do you play without shuffling? [Y/n/q]: " ANSWER
case $ANSWER in
[Yy] | [Yy][Ee][Ss] | "" ) printf "\e[33mShuffle Playback will be canceled and played on a playlist basis.\e[m\n"
sleep 1.7s
mpv --ytdl-format="bestaudio" --playlist=$PLAYBACK_TARGET
break ;;
[Nn] | [Nn][Oo] ) printf "\e[34mShuffle play is executed...\e[m\n"
sleep 1.7s
mpv --shuffle --ytdl-format="bestaudio" --playlist=$PLAYBACK_TARGET
break ;;
[Qq] | [Uu][Ii][Tt] ) printf "\nExiting... (Quit)\n" && sleep 1.7s && break ;;
* ) printf "\nPlease enter in [Y/n/q].";;
esac
done
#Linux system
elif [ "$(expr substr $(uname -s) 1 5)" == 'Linux' ]; then
echo -e "\e[31mThis can not be shuffled.\nThere is only one item you want to play back, or nothing is written.\e[m\n"
while true; do
read -p "Do you play without shuffling? [Y/n/q]: " ANSWER
case $ANSWER in
[Yy] | [Yy][Ee][Ss] | "" ) echo -e "\e[33mShuffle Playback will be canceled and played on a playlist basis.\e[m\n"
sleep 1.7s
mpv --ytdl-format="bestaudio" --playlist=$PLAYBACK_TARGET
break ;;
[Nn] | [Nn][Oo] ) echo -e "\e[34mShuffle play is executed...\e[m\n"
sleep 1.7s
mpv --shuffle --ytdl-format="bestaudio" --playlist=$PLAYBACK_TARGET
break ;;
[Qq] | [Uu][Ii][Tt] ) echo -e "\nExiting... (Quit)\n" && sleep 1.7s && break ;;
* ) echo -e "\nPlease enter in [Y/n/q].";;
esac
done
fi
fi
elif [[ "$PLAYBACK_TARGET" =~ watch ]] && [[ "$PLAYBACK_TARGET" =~ http ]]; then
if [[ `echo "$PLAYBACK_TARGET" | sed "s/watch/watch\n/gi" | grep -c "watch" ` != 1 ]]; then
mpv --ytdl-format="bestaudio" --shuffle $PLAYBACK_TARGET
elif [[ `echo "$PLAYBACK_TARGET" | sed "s/watch/watch\n/gi" | grep -c "watch"` = 1 ]]; then
echo "This can not be shuffled." && exit 2
fi
elif [[ -d "$PLAYBACK_TARGET" ]]; then
if [[ `file "$PLAYBACK_TARGET"* | grep M3U | wc -l` -gt 1 ]]; then
if [[ `grep -s '' "$PLAYBACK_TARGET"* | grep -v -s '#' | wc -l` -gt 1 ]]; then
mpv --ytdl-format="bestaudio" --shuffle --playlist=$PLAYBACK_TARGET
elif [[ `grep -s '' "$PLAYBACK_TARGET"* | grep -v -s '#' | wc -l` -le 1 ]]; then
echo -e "This can not be shuffled.\nThere is only one item you want to play back, or nothing is written."
while true; do
read -p 'Do you play without shuffling? [Y/n]: ' ANSWER
case $ANSWER in
[Yy] | [Yy][Ee][Ss] | "" ) break;;
[Nn] | [Nn][Oo] ) echo "Canceled." && exit 2;;
* ) echo "Please enter in [Y/n].";;
esac
done
mpv --ytdl-format="bestaudio" --playlist=$PLAYBACK_TARGET
fi
elif [[ `file "$PLAYBACK_TARGET"* | grep -i audio | wc -l` -gt 1 ]]; then
mpv --shuffle $PLAYBACK_TARGET
elif [[ `file "$PLAYBACK_TARGET"* | grep -i audio | wc -l` -le 1 ]]; then
echo "This can not be shuffled."
while true; do
read -p 'Do you play without shuffling? [Y/n]: ' ANSWER
case $ANSWER in
[Yy] | [Yy][Ee][Ss] | "" ) break;;
[Nn] | [Nn][Oo] ) echo "Canceled." && exit 2;;
* ) echo "Please enter in [Y/n].";;
esac
done
mpv $PLAYBACK_TARGET
elif [[ `file "$PLAYBACK_TARGET"* | grep -i "ISO Media" | wc -l` -gt 1 ]]; then
mpv --shuffle --no-video-stereo-mode $PLAYBACK_TARGET
elif [[ `file "$PLAYBACK_TARGET"* | grep -i "ISO Media" | wc -l` -le 1 ]]; then
echo "This can not be shuffled."
while true; do
read -p 'Do you play without shuffling? [Y/n]: ' ANSWER
case $ANSWER in
[Yy] | [Yy][Ee][Ss] | "" ) break;;
[Nn] | [Nn][Oo] ) echo "Canceled." && exit 2;;
* ) echo "Please enter in [Y/n].";;
esac
done
mpv --no-video-stereo-mode $PLAYBACK_TARGET
fi
#アドレスのglob展開に対応(audio)
elif [[ `file $PLAYBACK_TARGET | grep -i audio | wc -l` -gt 1 ]]; then
if [[ `file $PLAYBACK_TARGET | grep -i audio | wc -l` -gt 1 ]]; then
mpv --shuffle $PLAYBACK_TARGET
elif [[ `file $PLAYBACK_TARGET | grep -i audio | wc -l` -lt 1 ]]; then
echo "This can not be shuffled." && exit 2
fi
else
echo "This can not be shuffled."
while true; do
read -p 'Do you play without shuffling? [Y/n]: ' ANSWER
case $ANSWER in
[Yy] | [Yy][Ee][Ss] | "" ) break;;
[Nn] | [Nn][Oo] ) echo -e "\nCanceled." && exit 2;;
* ) echo "Please enter in [Y/n].";;
esac
done
mpv $PLAYBACK_TARGET
fi
###Play it loop.###
elif [[ $1 == '-l' ]] \
|| [[ ${@:2} == '-l' ]] \
|| [[ $1 == '--loop' ]] \
|| [[ ${@:2} == '--loop' ]]; then
if [[ "$PLAYBACK_TARGET" =~ watch ]] && [[ "$PLAYBACK_TARGET" =~ http ]]; then
mpv --ytdl-format="bestaudio" --loop=inf $PLAYBACK_TARGET
elif [[ "$PLAYBACK_TARGET" =~ playlist ]] && [[ "$PLAYBACK_TARGET" =~ http ]];then
mpv --ytdl-format="bestaudio" --loop-playlist $PLAYBACK_TARGET
elif [[ `file $PLAYBACK_TARGET | grep -i audio` =~ audio ]] \
|| [[ `file $PLAYBACK_TARGET | grep -i "ISO Media"` =~ "ISO Media" ]] \
|| [[ "$PLAYBACK_TARGET" =~ .m3u ]]; then
mpv --no-video-stereo-mode --loop=inf $PLAYBACK_TARGET
else
echo -e "This cannot be playing filetype." && exit 2
fi
###Play it shuffle&loop.###
elif [[ $1 == '-sl' ]] \
|| [[ $1 == '-ls' ]] \
|| [[ ${@:2} == '-sl' ]] \
|| [[ ${@:2} == '-ls' ]] \
|| [[ $1 == '--shuffle --loop' ]] \
|| [[ $1 == '--loop --shuffle' ]] \
|| [[ ${@:2} == '--shuffle --loop' ]] \
|| [[ ${@:2} == '--shuffle --loop' ]]; then
if [[ $PLAYBACK_TARGET =~ watch ]] && [[ $PLAYBACK_TARGET =~ http ]]; then
sleep 1.7s
echo -e "\nThis URL cannot be shuffled.\nOnly loop playback is executed.\n"
sleep 1.7s
mpv --ytdl-format="bestaudio" --shuffle --loop=inf $PLAYBACK_TARGET
elif [[ $PLAYBACK_TARGET =~ playlist ]] && [[ $PLAYBACK_TARGET =~ http ]]; then
sleep 1.7s
echo -e "\nShuffle & loop play Youtube playlist.\n"
sleep 1.7s
mpv --ytdl-format="bestaudio" --shuffle --loop-playlist --load-unsafe-playlists $PLAYBACK_TARGET
elif [[ "$PLAYBACK_TARGET" =~ .m3u ]]; then
if [[ `grep -s '' $PLAYBACK_TARGET | grep -v -s '#' | wc -l` -gt 1 ]]; then
mpv --ytdl-format="bestaudio" --shuffle --loop-playlist --playlist=$PLAYBACK_TARGET
elif [[ `grep -s '' $PLAYBACK_TARGET | grep -v -s '#' | wc -l` -le 1 ]]; then
sleep 1.7s
echo -e "This can not be shuffled.\nOnly loop playback is executed.\n"
sleep 1.7s
mpv --ytdl-format="bestaudio" --loop-playlist --playlist=$PLAYBACK_TARGET
fi
elif [[ -d "$PLAYBACK_TARGET" ]]; then
if [[ `file "$PLAYBACK_TARGET"* | grep M3U | wc -l` -gt 1 ]]; then
if [[ `grep -s '' "$PLAYBACK_TARGET"* | grep -v -s '#' | wc -l` -gt 1 ]]; then
mpv --ytdl-format="bestaudio" --shuffle --loop-playlist --playlist=$PLAYBACK_TARGET
elif [[ `grep -s '' "$PLAYBACK_TARGET"* | grep -v -s '#' | wc -l` -le 1 ]]; then
sleep 1.7s
echo -e "This can not be shuffled.\nOnly loop playback is executed.\n"
sleep 1.7s
mpv --ytdl-format="bestaudio" --loop-playlist --playlist=$PLAYBACK_TARGET
fi
elif [[ `file "$PLAYBACK_TARGET"* | grep -i audio | wc -l` -gt 1 ]]; then
mpv --shuffle --loop $PLAYBACK_TARGET
elif [[ `file "$PLAYBACK_TARGET"* | grep -i audio | wc -l` -le 1 ]]; then
sleep 1.7s
echo -e "This can not be shuffled.\nOnly loop playback is executed.\n"
sleep 1.7s
mpv --loop $PLAYBACK_TARGET
elif [[ `file "$PLAYBACK_TARGET"* | grep -i "ISO Media" | wc -l` -gt 1 ]]; then
mpv --shuffle --loop --no-video-stereo-mode $PLAYBACK_TARGET
elif [[ `file "$PLAYBACK_TARGET"* | grep -i "ISO Media" | wc -l` -le 1 ]]; then
sleep 1.7s
echo -e "This can not be shuffled\nOnly loop playback is executed.\n"
sleep 1.7s
mpv --loop --no-video-stereo-mode $PLAYBACK_TARGET
fi
fi
###Play it normal.###
else
if [[ ! $PLAYBACK_TARGET =~ watch ]]; then
mpv --ytdl-format="bestaudio" $PLAYBACK_TARGET
elif [[ $PLAYBACK_TARGET =~ playlist ]]; then
mpv --ytdl-format="bestaudio" --playlist=$PLAYBACK_TARGET
else
mpv --ytdl-format="bestaudio" $PLAYBACK_TARGET
fi
fi
| true |
13f2e59a5fb3fab7822c57f1e1405bce7231d077
|
Shell
|
xbrineh4ck/Tools
|
/FP-tools.sh
|
UTF-8
| 1,634 | 2.921875 | 3 |
[] |
no_license
|
clear
blue='\033[34;1m'
green='\033[32;1m'
purple='\033[35;1m'
cyan='\033[36;1m'
red='\033[31;1m'
white='\033[37;1m'
yellow='\033[33;1m'
sleep 1
echo
toilet -f small -F gay Tools
sleep 2
echo
toilet -f mini -F gay Selamat Datang
echo $yellow"Author:Fajar"
echo $red"Youtube:Fajar X-Brine"
echo
sleep 2
echo $white"Silahkan pilih Tools nya :)"
echo
echo $cyan"1.Stabilkan Jaringan"
echo $purple"_________________________"
echo $green"2.Install Spam-sms"
echo $purple"_________________________"
echo $blue"3.Install Perusak"
echo $purple"_________________________"
echo $red"4.Exit/Keluar"
echo $purple"_________________________"
echo
echo $white
read -p "PilihMana Anyng!: " bro
if [ $bro = 1 ] || [ $bro = 1 ]
then
clear
toilet -f mini -F gay Tools
echo
echo $white"Stabilkan jaringan"
echo
sleep 1
echo $yellow"Loading..."
ping -s1000 1.1.1.1
fi
if [ $bro = 2 ] || [ $bro = 2 ]
then
clear
toilet -f mini -F gay Tools
echo
echo $white"Install Spam-sms"
echo
sleep 1
echo $yellow"Loading..."
apt update && apt upgrade
pkg install python git bash
git clone https://github.com/Sxp-ID/Brutal-Sms
cd Brutal-Sms
ls
pip install -r requirements.txt
bash install.sh
fi
if [ $bro = 3 ] || [ $bro = 3 ]
then
clear
toilet -f mini -F gay Tools
echo
echo $white"Install Perusak"
echo
sleep 1
echo $yellow"Loading..."
apt update
apt upgrade
pkg install python2
pkg install git
git clone https://github.com/justahackers/perusak
cd perusak
python2 perusak.py
fi
if [ $bro = 4 ] || [ $bro = 4 ]
then
clear
toilet -f mini -F gay Tools
echo
echo $red"Keluar dari SCRIPT!!!"
sleep 2
echo
echo $white"Thanks telah menggunakan Script ini:)"
exit
fi
fi
| true |
c1a1eddfc0736d2e877bbe2d892ebbe93f2ec1ce
|
Shell
|
ekaestner/MMIL
|
/EXTERNAL/MMPS_245/MMPS_245/ksh/lsTouchFiles.ksh
|
UTF-8
| 1,016 | 3.25 | 3 |
[] |
no_license
|
#!/bin/ksh
# Give as an agrument a file containing a space-separated list of subject IDs
#touchFilename=conform.touch # Oddly enough, this one is for affine registration to baseline
#touchFilename=longitudinal_nonlinreg.touch
touchFilename=longitudinal_nonlinregROI_left_ROIs.touch
#touchFilename=longitudinal_nonlinregROI_right_ROIs.touch
parentPath=/space/md2/2/data/MMILDB/ADNI/Containers
echo Using $1
subjList=`cat $1`
#subjList=`cat ~adniproj/MetaData/ADNI/subjList.txt`
for subj in $subjList; do
skip=1
dirList=`ls -d $parentPath/FREESURFERRECON_${subj}*`
for dir in $dirList; do
if [ $skip = 1 ]; then
skip=0
#echo skipping baseline
continue
fi
touchFile=$dir/touch/$touchFilename
if [ ! -f $touchFile ]; then
echo Could NOT find ${touchFile}
tail -1 $dir/scripts/fs_recon.log
# if [ -f $touchFile ]; then
# echo Found ${touchFile}
# ls -l $touchFile
# else
# echo Could NOT find ${touchFile}
fi
done
done
| true |
b5e1ffe03dee575991ffbbed40c70a0b8f8fd6cb
|
Shell
|
iridium-browser/iridium-browser
|
/third_party/ruy/src/cmake/run_android_test.sh
|
UTF-8
| 645 | 3.40625 | 3 |
[
"Apache-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"MIT",
"GPL-1.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# Minimal script pushing and running a file on device!
# Contemporary versions of ADB properly propagate exit codes so nothing more
# is needed to let CTest report test success/failure.
# TODO: consider clearing temporary files after testing, although that will
# get in the way of debugging and will make code more complex... also,
# Ruy's test files aren't huge and people running these probably have
# bigger clutter issues in their /data/local/tmp anyway. Anyway, if we want
# to do this, we could copy IREE's code.
device_tmpdir=/data/local/tmp
adb push "$1" "${device_tmpdir}"
adb shell "${device_tmpdir}/$(basename "$1")"
| true |
3f9b3c52e038713a2e0319f3de8d6d485cc80bd1
|
Shell
|
yyqng/note
|
/shell/exec_upg.sh
|
UTF-8
| 462 | 3.21875 | 3 |
[] |
no_license
|
#!/bin/bash
# 安装升级包
PKTSPY_PATH=/usr/pktspy
if [ -n "$1" ]; then
UPG=$1
else
echo "usage: $0 <pktspy upg path>"
exit 1
fi
$PKTSPY_PATH/script/pktspyd stop
echo ================ $UPG install upgrade package begin ==================
echo install path : $PKTSPY_PATH
tar xJvf $1 -C $PKTSPY_PATH
echo ================ $UPG install upgrade package end ====================
#$PKTSPY_PATH/script/pktspyd start
#$PKTSPY_PATH/script/plist_pktspy.sh
| true |
d34dbc2b2aa7a23302aa3513d715f98f18b8285f
|
Shell
|
shigemk2/DB_TEST_MK2
|
/insert.sh
|
UTF-8
| 187 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/sh
# 指定した回数だけtweet.rbを実行するだけのスクリプト
a=0
while [ $a -ne $1 ]
do
ruby tweet.rb
a=`expr 1 + $a`
echo "${a} 回目の処理"
done
| true |
584cb98299580cc92b4addd341f6ea34167eb79b
|
Shell
|
walacereis-a11/serenity
|
/Ports/SDL2_ttf/package.sh
|
UTF-8
| 525 | 2.765625 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env -S bash ../.port_include.sh
port=SDL2_ttf
version=2.0.15
useconfigure=true
files="https://www.libsdl.org/projects/SDL_ttf/release/SDL2_ttf-${version}.tar.gz SDL2_ttf-${version}.tar.gz"
depends="SDL2 freetype"
configure() {
run ./configure \
--host="${SERENITY_ARCH}-pc-serenity" \
--with-sdl-prefix="${SERENITY_BUILD_DIR}/Root/usr" \
--with-x=no \
FT2_CFLAGS="-I${SERENITY_BUILD_DIR}/Root/usr/local/include/freetype2" \
LIBS="-lgui -lgfx -lipc -lcore -lcompress"
}
| true |
e49ec9b541de5fddc7c4e34ae52547fe85fe02e9
|
Shell
|
Kv-045DevOps/ingress-part
|
/create-cluster.sh
|
UTF-8
| 1,749 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash
# aws s3api create-bucket --bucket k8s-storage-2 --region eu-central-1a --create-bucket-configuration LocationConstraint=eu-west-1
export NAME=cluster.k8s.local
export KOPS_STATE_STORE=s3://k8s-storage-2
kops create cluster --zones eu-central-1a --master-size=t2.micro --node-size=t2.micro ${NAME}
kops create secret --name cluster.k8s.local sshpublickey admin -i ./aws_k8s_key.pub
kops update cluster ${NAME} --yes
while [ 1 ]; do
kops validate cluster && break || sleep 30
done;
./get_helm.sh
helm init --wait
kubectl apply -f kubectl/acc-helm.yml
helm install stable/cert-manager --wait
helm install --namespace kube-system --name nginx-ingress stable/nginx-ingress --set rbac.create=true --wait
kubectl apply -f kubectl/services.yml
kubectl apply -f kubectl/issuer.yml
export DOMAIN=cooki3.com
export GODADDY_KEY=
export GODADDY_SECRET=
aws route53 create-hosted-zone --name ${DOMAIN}. --caller-reference `date +%Y-%m-%d-%H:%M`
sleep 10
export LB_URL=`kubectl get svc -n kube-system | grep nginx-ingress-controller | tr -s ' ' | cut -d ' ' -f4`
envsubst < cname_template.json > cname.json
export ZONE_ID=`aws route53 list-hosted-zones --output text | grep -w ${DOMAIN}.|awk '{print $3}' | cut -d'/' -f3`
sleep 10
aws route53 change-resource-record-sets --hosted-zone-id "$ZONE_ID" --change-batch file://cname.json
aws route53 get-hosted-zone --id "$ZONE_ID" | jq '.DelegationSet.NameServers' > zone-ns.json
python3 set-ns.py
curl -X PUT -H "Authorization: sso-key $GODADDY_KEY:$GODADDY_SECRET" -H "Content-Type: application/json" -T domain-update.json "https://api.godaddy.com/v1/domains/$DOMAIN/records"
rm -f zone-ns.json domain-update.json
envsubst < kubectl/ingress2.yml > tmp.yml
kubectl apply -f tmp.yml
rm -f tmp.yml
| true |
dadb5cb8376a646a7ce3bede5c40cdc4acdc072e
|
Shell
|
joaquincasarino85/vagrant_configuration
|
/Vagrant.bootstrap.sh
|
UTF-8
| 1,042 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
### Aprovisionamiento de software ###
# Actualizo los paquetes de la maquina virtual
sudo apt-get update
# Instalo un servidor web
sudo apt-get install -y apache2
### Configuración del entorno ###
##Genero una partición swap. Previene errores de falta de memoria
if [ ! -f "/swapdir/swapfile" ]; then
sudo mkdir /swapdir
cd /swapdir
sudo dd if=/dev/zero of=/swapdir/swapfile bs=1024 count=2000000
sudo mkswap -f /swapdir/swapfile
sudo chmod 600 /swapdir/swapfile
sudo swapon swapfile
echo "/swapdir/swapfile none swap sw 0 0" | sudo tee -a /etc/fstab /etc/fstab
sudo sysctl vm.swappiness=10
echo vm.swappiness = 10 | sudo tee -a /etc/sysctl.conf
fi
# ruta raíz del servidor web
APACHE_ROOT="/var/www";
# ruta de la aplicación
APP_PATH="$APACHE_ROOT/utn-devops-app";
apt-get install libapache2-mod-php7.2 php7.2 php7.2-mysql php7.2-sqlite -y
apt-get install php7.2-mbstring php7.2-curl php7.2-intl php7.2-gd php7.2-zip php7.2-bz2 -y
apt-get install php7.2-dom php7.2-xml php7.2-soap -y
| true |
fb927a8c54f55325aaed853a571dc80f9e7bb64d
|
Shell
|
mcaravario/rn-tps
|
/gen_makefile.sh
|
UTF-8
| 1,489 | 3.609375 | 4 |
[] |
no_license
|
#!/usr/bin/bash
declare -A deps_ej1
declare -A deps_ej2
while read line; do
if [[ ! -z "$line" ]] && [[ ! $line =~ ^#+ ]]; then
output="$(echo "${line}" | cut -f 1 -d ':')"
cmd="$(echo "${line}" | cut -f 2 -d ':')"
deps_ej1["tp1/ej1/pruebas/${output}"]="$cmd"
fi
done < red_ej1.txt
while read line; do
if [[ ! -z "$line" ]] && [[ ! $line =~ ^#+ ]]; then
output="$(echo "${line}" | cut -f 1 -d ':')"
cmd="$(echo "${line}" | cut -f 2 -d ':')"
deps_ej2["tp1/ej2/pruebas/${output}"]="$cmd"
fi
done < red_ej2.txt
cat <<EOT
PYTHON=python3
SCRIPT_TABLA_EJ1=tp1/ej1/build_table.sh
TABLA_EJ1=tp1/ej1/pruebas/aciertos.txt
DATA_EJ1=\\
EOT
for key in ${!deps_ej1[@]}; do
echo "${key} \\"
done
cat <<EOT
DATA_EJ2=\\
EOT
for key in ${!deps_ej2[@]}; do
echo "${key} \\"
done
cat <<EOT
ERRORES_EJ1=\$(subst .dat,_errors.png,\${DATA_EJ1})
ERRORES_EJ2=\$(subst .dat,_errors.png,\${DATA_EJ2})
all: \${ERRORES_EJ1} \${ERRORES_EJ2} \${DATA_EJ1} \${DATA_EJ2} \${TABLA_EJ1}
%_errors.png: %.dat
gnuplot -e "datafile='$<'" tp1/plot/errores.gpi > \$@
EOT
for key in ${!deps_ej1[@]}; do
echo "${key}:"
echo -e "\t\${PYTHON} ./ej1-runner.py ${deps_ej1[$key]} > \$@"
done
for key in ${!deps_ej2[@]}; do
echo "${key}:"
echo -e "\t\${PYTHON} ./ej2-runner.py ${deps_ej2[$key]} > \$@"
done
cat <<EOT
\${TABLA_EJ1}: \${DATA_EJ1}
\${SCRIPT_TABLA_EJ1} \$^ > \$@
clean:
rm -rf \${ERRORES_EJ1} \${ERRORES_EJ2} \${DATA_EJ1} \${DATA_EJ2} \${TABLA_EJ1}
.PHONY: clean
EOT
| true |
c1b4760cc74b654f2ffadbb0336a53e5441c576d
|
Shell
|
jfqd/mi-qutic-varnish
|
/customize
|
UTF-8
| 1,443 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/bash
PATH=/opt/local/gnu/bin:/opt/local/bin:/opt/local/sbin:/usr/bin:/usr/sbin
# Exit if any commands fail
set -o errexit
VARNISH_VERSION="5.2.1"
MUNIN_PLUGINS="
varnish_main_uptime
varnish_hit_rate
varnish_expunge
varnish_threads
varnish_mgt_uptime
varnish_backend_traffic
varnish_bad
varnish_objects
varnish_memory_usage
varnish_request_rate
varnish_transfer_rates
"
echo "* Activate munin plugins"
/opt/qutic/bin/munin-node-plugins ${MUNIN_PLUGINS}
echo "* Build varnish"
mkdir /opt/qutic/src
cd /opt/qutic/src
# get it from: https://github.com/varnishcache/varnish-cache/releases
curl -s -L -O https://download.qutic.com/src/varnish/varnish-${VARNISH_VERSION}.tgz
tar xf varnish-${VARNISH_VERSION}.tgz
cd varnish-${VARNISH_VERSION}
./configure --prefix=/opt/local
gmake
gmake install
echo "* Create varnish guid and uid"
groupadd -g 201 varnish
useradd -m -s /usr/bin/false -u 201 -g varnish varnish
# passwd -N varnish
# passwd -d varnish
svccfg import /opt/local/lib/svc/manifest/varnish.xml
svccfg import /opt/local/lib/svc/manifest/varnish-log.xml
mkdir -p /var/run/varnishd
mkdir -p /opt/local/var/varnish
chown -R varnish:varnish /var/run/varnishd
chown -R varnish:varnish /opt/local/var/varnish
touch /var/log/varnish.log
chown varnish:varnish /var/run/varnishd
# Clean up
echo "* Cleaning up."
cd /root
rm -rf /opt/qutic/src
rm /root/customize
# Prepare image for provisioning
sm-prepare-image -y
| true |
bf5b501bbe1a63ac380c6ba8ef7056709021e384
|
Shell
|
ThesisDistributedProduction/HPPP_Controller
|
/ScriptsHelper/runXDecentralized.sh
|
UTF-8
| 731 | 3.25 | 3 |
[] |
no_license
|
#!/bin/bash
N=1
START=1
mSleep=20
if [ -n "$3" ]; then
mSleep=$3
fi
if [ -n "$2" ]; then
START=$2
fi
if [ -n "$1" ]; then
N=$(($1+START-1))
fi
cd ../DecentralizedParkPilot
echo "Starting nodes form $START to start $N"
for (( i=START+1; i<=N; i++ )); do
echo "staring with id: " $i "and mSleep: " $mSleep
build/DecentralicedClient -id $i -msleep $mSleep -s > /dev/null&
done
echo "staring with id: " $START "and mSleep: " $mSleep
build/DecentralicedClient -id $START -msleep $mSleep -s
cd ..
#echo
#echo "staring instances please wait..."
#echo
#sleep 5s
#echo $(ps -a | grep DecentralizedPa | wc -l) instances are running
#echo
#echo "Press any key to kill all instances"
#read
killall DecentralizedParkPilot
| true |
458237c8004274ac3e5927fd8d74411e46585be2
|
Shell
|
FauxFaux/debian-control
|
/f/fonts-ipaexfont/fonts-ipaexfont-gothic_00301-5_all/preinst
|
UTF-8
| 956 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/sh
set -e
VERSION=00103-1
VERSION_2=00103-3
VERSION_3=00103-6
OLD_ALT_NAME=ttf-japanese-gothic
FONT_ENTRY=/usr/share/fonts/opentype/ipaexfont/ipaexg.ttf
FONT_ENTRY_2=/usr/share/fonts/opentype/ipaexfont/ipaexg.ttf
FONT_ENTRY_3=/usr/share/fonts/opentype/fonts-ipaexfont-gothic/ipaexg.ttf
case "$1" in
install|upgrade)
if dpkg --compare-versions "$2" lt-nl "$VERSION"; then
# remove old alternative otf symlinks.
update-alternatives --remove $OLD_ALT_NAME.otf $FONT_ENTRY
fi
if dpkg --compare-versions "$2" lt-nl "$VERSION_2"; then
if [ -d /usr/share/fonts/opentype/ipaexfont ]; then
update-alternatives --remove $OLD_ALT_NAME.ttf $FONT_ENTRY_2
fi
fi
if dpkg --compare-versions "$2" lt-nl "$VERSION_3"; then
if [ -d /usr/share/fonts/opentype/fonts-ipaexfont-gothic ]; then
update-alternatives --remove $OLD_ALT_NAME.ttf $FONT_ENTRY_3
fi
fi
esac
exit 0
| true |
337c5a5c74fb64e454ddfa30dabbdb79dd471d0f
|
Shell
|
oyvindronningstad/trusted-firmware-m-zephyr-module
|
/init-git.sh
|
UTF-8
| 1,051 | 2.78125 | 3 |
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
TFM_GIT_SHA=TF-Mv1.0
CMSIS_5_GIT_SHA=5.5.0
MBED_CRYPTO_GIT_SHA=mbedcrypto-3.0.1
PSA_ARCH_TESTS_GIT_SHA=v20.03_API1.0
if [ ! -d trusted-firmware-m/.git ]; then
git clone https://git.trustedfirmware.org/trusted-firmware-m.git -b ${TFM_GIT_SHA} --bare trusted-firmware-m/.git
cd trusted-firmware-m
git init
git checkout -f
cd ..
fi
if [ ! -d CMSIS_5/.git ]; then
git clone https://github.com/ARM-software/CMSIS_5.git -b ${CMSIS_5_GIT_SHA} --bare CMSIS_5/.git
cd CMSIS_5
git init
git checkout -f ${CMSIS_5_GIT_SHA} CMSIS/RTOS2
git lfs fetch
git lfs checkout CMSIS/RTOS2
cd ..
fi
if [ ! -d mbed-crypto/.git ]; then
git clone https://github.com/ARMmbed/mbed-crypto.git -b ${MBED_CRYPTO_GIT_SHA} --bare mbed-crypto/.git
cd mbed-crypto
git init
git checkout -f
cd ..
fi
if [ ! -d psa-arch-tests/.git ]; then
git clone https://github.com/ARM-software/psa-arch-tests.git -b ${PSA_ARCH_TESTS_GIT_SHA} --bare psa-arch-tests/.git
cd psa-arch-tests
git init
git checkout -f
cd ..
fi
| true |
539e376cf859dbf79180a5059eb303af4706bce3
|
Shell
|
ramayanaocr/ocr-comparison
|
/Tesseract/projects/tessdata_sanskrit/ram.sh
|
UTF-8
| 3,632 | 3.125 | 3 |
[] |
no_license
|
#!/bin/bash
rm -rf build
mkdir build
my_files=$(ls tessdata_ramayana/*.png )
for my_file in ${my_files}; do
f=${my_file%.*}
echo $f
#time tesseract $f.png $f -l script/Devanagari --psm 6 --dpi 300 wordstrbox
time tesseract $f.png $f -l script/Devanagari --dpi 300 wordstrbox
mv "$f.box" "$f.wordstrbox"
sed -i -e "s/ \#.*/ \#/g" "$f.wordstrbox"
sed -e '/^$/d' "$f.gt.txt" > build/tmp.txt
sed -e 's/$/\n/g' build/tmp.txt > "$f.gt.txt"
paste --delimiters="\0" "$f.wordstrbox" "$f.gt.txt" > "$f.box"
rm "$f.wordstrbox" build/tmp.txt
#time tesseract "$f.png" "$f" -l script/Devanagari --psm 6 --dpi 300 lstm.train
time tesseract "$f.png" "$f" -l script/Devanagari --dpi 300 lstm.train
done
combine_tessdata -u ~/tessdata_best/script/Devanagari.traineddata ~/tessdata_best/script/Devanagari.
ls -1 tessdata_ramayana/*.lstmf > build/san.ram.training_files.txt
sed -i -e '/eval/d' build/san.ram.training_files.txt
ls -1 tessdata_ramayana/*eval*.lstmf > build/san.ram_eval.training_files.txt
rm build/*checkpoint ram*.traineddata
# review the console output with debug_level -1 for 100 iterations
# if there are glaring differences in OCR output and groundtruth, review the Wordstr box files
lstmtraining \
--model_output build/ram \
--continue_from ~/tessdata_best/script/Devanagari.lstm \
--traineddata ~/tessdata_best/script/Devanagari.traineddata \
--train_listfile build/san.ram.training_files.txt \
--debug_interval -1 \
--max_iterations 100
# eval error is minimum at 700 iterations
for num_iterations in {300..700..100}; do
lstmtraining \
--model_output build/ram \
--continue_from ~/tessdata_best/script/Devanagari.lstm \
--traineddata ~/tessdata_best/script/Devanagari.traineddata \
--train_listfile build/san.ram.training_files.txt \
--debug_interval 0 \
--max_iterations $num_iterations
lstmtraining \
--stop_training \
--continue_from build/ram_checkpoint \
--traineddata ~/tessdata_best/script/Devanagari.traineddata \
--model_output san_ram.traineddata
lstmeval \
--verbosity -1 \
--model san_ram.traineddata \
--eval_listfile build/san.ram_eval.training_files.txt
done
time tesseract tessdata_ramayana/ram-eval.png build/ram-eval -l san_ram --tessdata-dir ./ --psm 6 --dpi 300
wdiff -3 -s tessdata_ramayana/ram-eval.gt.txt build/ram-eval.txt
echo "Convert to Integer Model"
lstmtraining \
--stop_training \
--convert_to_int \
--continue_from build/ram_checkpoint \
--traineddata ~/tessdata_best/script/Devanagari.traineddata \
--model_output san_ram_int.traineddata
lstmeval \
--verbosity -1 \
--model san_ram_int.traineddata \
--eval_listfile build/san.ram_eval.training_files.txt
time tesseract tessdata_ramayana/ram-eval.png build/ram_int-eval -l san_ram_int --tessdata-dir ./ --psm 6 --dpi 300
wdiff -3 -s tessdata_ramayana/ram-eval.gt.txt build/ram_int-eval.txt
echo "Compare with Devanagari"
lstmeval \
--verbosity -1 \
--model ~/tessdata_best/script/Devanagari.traineddata \
--eval_listfile build/san.ram_eval.training_files.txt
time tesseract tessdata_ramayana/ram-eval.png build/ram-eval-deva -l script/Devanagari --psm 6 --dpi 300
wdiff -3 -s tessdata_ramayana/ram-eval.gt.txt build/ram-eval-deva.txt
echo "Compare with Sanskrit"
lstmeval \
--verbosity -1 \
--model ~/tessdata_best/san.traineddata \
--eval_listfile build/san.ram_eval.training_files.txt
time tesseract tessdata_ramayana/ram-eval.png build/ram-eval-san -l san --psm 6 --dpi 300
wdiff -3 -s tessdata_ramayana/ram-eval.gt.txt build/ram-eval-san.txt
| true |
c61b83d91d811f699a4828194d3cd9ef5940ae29
|
Shell
|
matthew-r-richards/vagrant
|
/orientdb/vagrant.sh
|
UTF-8
| 3,159 | 2.734375 | 3 |
[] |
no_license
|
#!/bin/bash
echo install JDK 8, git ant wget curl vim unzip
sudo apt-get install software-properties-common python-software-properties -y
sudo add-apt-repository ppa:openjdk-r/ppa -y
sudo apt-get update -y
sudo apt-get upgrade -y
sudo apt-get install openjdk-8-jdk git ant wget curl vim zip unzip -y
echo Move to JDK 1.8
sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
if [[ ! -e /opt/orientdb ]] ; then
echo Download packages
# to use last stable version 1.7.8
#wget http://www.orientdb.org/portal/function/portal/download/unknown@unknown.com/-/-/-/-/-/orientdb-community-1.7.8.tar.gz/false/false/linux -O /home/vagrant/orientdb-community.tar.gz -c
#For version 2.0 milestone 3
wget -O /home/vagrant/orientdb-community.tar.gz http://www.orientechnologies.com/download.php?file=orientdb-community-2.0-M3.tar.gz&os=linux -c
wget -O /home/vagrant/gremlin-server.zip http://tinkerpop.com/downloads/3.0.0.M4/gremlin-server-3.0.0.M4.zip -c
wget -O /home/vagrant/gremlin-console.zip http://tinkerpop.com/downloads/3.0.0.M4/gremlin-console-3.0.0.M4.zip -c
echo Unpacking
cd /home/vagrant/
sudo tar -zxvf orientdb-community.tar.gz
sudo unzip gremlin-server.zip
sudo unzip gremlin-console.zip
echo Move directories to /opt/
# to use last stable version 1.7.8
# sudo mv orientdb-community-1.7.8 /opt/orientdb
#For version 2.0 mileston 1
sudo mv orientdb-community-2.0-M3/ /opt/orientdb
sudo mv gremlin-server-3.0.0.M4 /opt/gremlin-server
sudo mv gremlin-console-3.0.0.M4 /opt/gremlin-console
echo Give rights
sudo chmod -R 777 /opt/orientdb/log
sudo chmod -R 777 /opt/orientdb/bin
sudo chmod -R 777 /opt/orientdb/config/
sudo chmod -R 777 /opt/orientdb/databases/
echo Add Environment Variables
echo "export ORIENTDB_ROOT_PASSWORD=\"RootPaSSword\"" >> /home/vagrant/.bashrc
echo "export ORIENTDB_NODE_NAME=\"ORIENTDB_FIRSTNODE\"" >> /home/vagrant/.bashrc
echo Add System variables and path
echo PATH $PATH
echo "export ORIENTDB_HOME=\"/opt/orientdb\"" >> /home/vagrant/.bashrc
echo "export PATH=\$PATH:\$ORIENTDB_HOME/bin" >> /home/vagrant/.bashrc
echo "export GREMLIN_HOME=\"/opt/gremlin-console\"" >> /home/vagrant/.bashrc
echo "export PATH=\$PATH:\$GREMLIN_HOME/bin" >> /home/vagrant/.bashrc
echo Move OrientDB conf file and Remove read permission permissions
sudo cp /home/vagrant/orientdb-server-config.xml /opt/orientdb/config/
# Create user orientdb and assign ownership
#sudo useradd -d /opt/orientdb -M -r -s /bin/false -U orientdb
#sudo chown -R orientdb.orientdb orientdb*
#sudo usermod -a -G orientdb orientdb
echo Copy the init.d script:
sudo cp /home/vagrant/orientdb.sh /etc/init.d/
echo Update the rc.d dirs
cd /etc/init.d
sudo update-rc.d orientdb.sh defaults
sudo chmod 777 /etc/init.d/orientdb.sh
fi
#/opt/orientdb/bin/server.sh
sudo /etc/init.d/orientdb.sh start
# Prevent Gremlin Console in Orient_db (2.6) to be called by default. 3.0 is called.
mv /opt/orientdb/bin/gremlin.sh /opt/orientdb/bin/gremlin_26.sh
cd /opt/gremlin-server
sudo bin/gremlin-server.sh config/gremlin-server-classic.yaml &
| true |
c3b76330c2685190d9688441371ffdf68e4dc7a0
|
Shell
|
nishanthprakash/GestureRecognition_Kinect-ROS
|
/gesture/src/start.sh
|
UTF-8
| 786 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/sh
export ROS_PACKAGE_PATH=~/GR:$ROS_PACKAGE_PATH
while :
do
echo
echo "Choose : 1.Gesture Training 2.Gesture Recognition 3.Exit"
read response
case "$response" in
1) roslaunch gesture handposition.launch > /dev/null &
count=`ls ../data | wc -l`
echo "Starting Training No. : `expr $count + 1`"
sleep 20
rosrun gesture trainsub
echo "Do you want the previous training data to be used for training the ANN ? (y/n)"
read response
case "$response" in
y|Y) rosrun gesture trainannv
mv "~/training.data" "../data/tr$count"
;;
n|N) echo "Gesture training data discarded !!!"
rm "~/training.data"
;;
esac
;;
2) roslaunch gesture handposition.launch
sh
rosrun gesture recsub
;;
3) break
;;
esac
done
| true |
de1111aeaf5e77f0682478139c966a5a43dc33ea
|
Shell
|
ytyaru/Shell.RasPiOs.20200820.20201002102428
|
/src/5_install_apps/src/font/install_font_idkana014.sh
|
UTF-8
| 763 | 3 | 3 |
[
"CC0-1.0"
] |
permissive
|
#!/bin/bash
set -Ceu
#-----------------------------------------------------------------------------
# install_font_idkana014.sh
# http://idfont.jp/free-kanji/free-kana.html
# 作成日時: 2019-03-21 18:30:12
#-----------------------------------------------------------------------------
Run() {
mkdir -p /tmp/work
cd /tmp/work
wget http://idfont.jp/free-kanji/kana_pac2.zip
wget http://idfont.jp/free-kanji/kana_pac3.zip
unzip kana_pac2.zip
unzip kana_pac3.zip
mkdir -p ~/.fonts
cp -a /tmp/work/kana014.ttc ~/.fonts/kana014.ttc
cp -a /tmp/work/kana026.ttc ~/.fonts/kana026.ttc
fc-list | grep 'kana014'
display /home/pi/.fonts/kana014.ttc
fc-list | grep 'kana026'
display /home/pi/.fonts/kana026.ttc
rm -Rf kana_pac2.zip
rm -Rf kana_pac3.zip
}
Run
| true |
9f81f706299898515989d0f67218def0d9535d45
|
Shell
|
axitkhurana/dotfiles
|
/bashrc
|
UTF-8
| 2,610 | 3.453125 | 3 |
[] |
no_license
|
# Sample .bashrc for SuSE Linux
# Copyright (c) SuSE GmbH Nuernberg
# There are 3 different types of shells in bash: the login shell, normal shell
# and interactive shell. Login shells read ~/.profile and interactive shells
# read ~/.bashrc; in our setup, /etc/profile sources ~/.bashrc - thus all
# settings made here will also take effect in a login shell.
#
# NOTE: It is recommended to make language settings in ~/.profile rather than
# here, since multilingual X sessions would not work properly if LANG is over-
# ridden in every subshell.
# Some applications read the EDITOR variable to determine your favourite text
# editor. So uncomment the line below and enter the editor of your choice :-)
export EDITOR=/usr/bin/vim
#export EDITOR=/usr/bin/mcedit
# For some news readers it makes sense to specify the NEWSSERVER variable here
#export NEWSSERVER=your.news.server
# If you want to use a Palm device with Linux, uncomment the two lines below.
# For some (older) Palm Pilots, you might need to set a lower baud rate
# e.g. 57600 or 38400; lowest is 9600 (very slow!)
#
#export PILOTPORT=/dev/pilot
#export PILOTRATE=115200
test -s ~/.alias && . ~/.alias || true
# Colors
use_color=false
# Set colorful PS1 only on colorful terminals.
# dircolors --print-database uses its own built-in database
# instead of using /etc/DIR_COLORS. Try to use the external file
# first to take advantage of user additions. Use internal bash
# globbing instead of external grep binary.
safe_term=${TERM//[^[:alnum:]]/?} # sanitize TERM
match_lhs=""
[[ -f ~/.dir_colors ]] &&
match_lhs="${match_lhs}$(<~/.dir_colors)"
[[ -f /etc/DIR_COLORS ]] &&
match_lhs="${match_lhs}$(</etc/DIR_COLORS)"
[[ -z ${match_lhs} ]] \
&& type -P dircolors >/dev/null \
&& match_lhs=$(dircolors --print-database)
[[ $'\n'${match_lhs} == *$'\n'"TERM "${safe_term}* ]] &&
use_color=true
if ${use_color} ; then
# Enable colors for ls, etc. Prefer ~/.dir_colors
#64489
if type -P dircolors >/dev/null ; then
if [[ -f ~/.dir_colors ]] ; then
eval $(dircolors -b ~/.dir_colors)
elif [[ -f /etc/DIR_COLORS ]] ; then
eval $(dircolors -b /etc/DIR_COLORS)
fi
fi
if [[ ${EUID} == 0 ]] ; then
PS1='\[\033[01;31m\]\h\[\033[01;34m\] \W
\$\[\033[00m\] '
else
PS1='\[\033[01;32m\]\u@\h\[\033[01;34m\] \w
\$\[\033[00m\] '
fi
alias ls='ls --color=auto'
alias grep='grep --colour=auto'
else
if [[ ${EUID} == 0 ]] ; then
# show root@ when we do not have colors
PS1='\u@\h \W \$ '
else
PS1='\u@\h \w \$ '
fi
fi
# Try to keep environment pollution down, EPA loves us.
unset use_color safe_term match_lhs
| true |
da6d24bf54d44261fe96bdc2bf4dac3d6f874a1a
|
Shell
|
HenriqueMCastro/kafka-hello-world
|
/startCluster.sh
|
UTF-8
| 2,375 | 2.609375 | 3 |
[] |
no_license
|
#!/bin/bash
# zookeeper nodes
zookeeperServers="zookeeper-node-1,zookeeper-node-2,zookeeper-node-3"
for id in `seq 1 3`;
do
docker run -d --net=mynetwork -h zookeeper-node-${id} -e ZOO_LOG_DIR=/var/log/zookeeper -e MYID=${id} -e SERVERS=${zookeeperServers} --name=zookeeper-node-${id} --net=mynetwork mesoscloud/zookeeper:3.4.6-ubuntu-14.04
done
ZK_HOSTS="zookeeper-node-1:2181,zookeeper-node-2:2181,zookeeper-node-3:2181"
docker build -t kafka-node kafka-node
for id in `seq 1 5`;
do
docker run --privileged -d -t --net=mynetwork -h kafka-node-${id} --name kafka-node-${id} -e ZK_HOSTS=${ZK_HOSTS} -e BROKER_ID=${id} -e HOST_NAME=kafka-node-${id} kafka-node
done
docker build -t kafka-manager kafka-manager
docker run --privileged -d -t --net=mynetwork --name kafka-manager -p 9000:9000 -e ZK_HOSTS=${ZK_HOSTS} kafka-manager
BOOTSTRAP_SERVERS="kafka-node-1:9092,kafka-node-2:9092,kafka-node-3:9092,kafka-node-4:9092,kafka-node-5:9092"
docker build -t kafka-client kafka-client
docker run -d -t --net=mynetwork --name kafka-client -v /logs:/logs -e BOOTSTRAP_SERVERS=${BOOTSTRAP_SERVERS} kafka-client
docker build -t logkafka logkafka
docker run --privileged -d -t --net=mynetwork -h logkafka --name logkafka -v /logs:/logs -e ZK_HOSTS=${ZK_HOSTS} logkafka
#docker run --privileged -d -t --net=mynetwork -h graylog --name graylog -p 8080:9000 -p 12201:12201 graylog2/allinone
LOGSTASH="logstash-kafka"
docker build -t ${LOGSTASH} ${LOGSTASH}
docker run --privileged -d -t --net=mynetwork -h ${LOGSTASH} --name ${LOGSTASH} -e LOGSTASH_CONF=/opt/logstash/config/logstash.conf -v /logs:/logs ${LOGSTASH}
#SPARK="kafka-spark-streaming"
#docker build -t ${SPARK} ${SPARK}
#docker run --privileged -d -t --net=mynetwork -h ${SPARK} --name ${SPARK} ${SPARK}
docker run -d -t --net=mynetwork -p 9200:9200 -p 9300:9300 -h elasticsearch --name elasticsearch elasticsearch
docker build -t henriquemcastro/kafka-connect-elasticsearch kafka-connect-elasticsearch
docker run -d -t --net=mynetwork -h kafka-connect-elasticsearch --add-host=database.dev.adhslx.int:10.1.230.1 -e ES_CLUSTER_NAME=dev -e BOOTSTRAP_SERVERS=kafka-node-1:9092,kafka-node-2:9092,kafka-node-3:9092,kafka-node-4:9092,kafka-node-5:9092 -e ES_HOSTNAME=database.dev.adhslx.int -e KAFKA_TOPICS=connect-click --name kafka-connect-elasticsearch henriquemcastro/kafka-connect-elasticsearch
mv logs/* /logs
| true |
c0b0e6d2254117f76cffae185ed2379ab994b1fa
|
Shell
|
HaoxueChang/what-links-to-what
|
/CodeLinkR/data/Turtle/zipTheTurtles.sh
|
UTF-8
| 166 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/bash
files=`ls *.turtle`
for file in $files
do
# only create zip if it doesn't exist
if [ ! -f $file.tar.gz ]; then
tar -czvf $file.tar.gz $file
fi
done
| true |
80c95aefd52721b9cddb72bb8a0f41ecce0b0f76
|
Shell
|
fincharry27/AC21009-bash-Scripting
|
/menu
|
UTF-8
| 4,328 | 4.28125 | 4 |
[] |
no_license
|
#!/bin/bash
#Function to display the main menu
displayMenu ()
{
echo "Please select an option"
select action in Create_repository Access_repoistory Access_log quit
do
case $action in
#Once selected calls the createRepository function
Create_repository)
createRepository
;;
#Once selected calls the accessRepository function
Access_repoistory)
accessRepository
;;
Access_log)
accessLog
;;
quit)
exit
;;
esac
done
}
#Function to create repository
createRepository ()
{
echo "Please enter a name for the repository"
#Gets the input from the user
read -p 'repository name: ' name
#Ensure the repository name is correct
echo "$name, is this correct?"
read -p 'y/n: ' uinput
#If the user types y then a new repository is created using the name inputted by the user with access to all
if [ $uinput = y ];
then
mkdir $name
chmod 777 $name
#If the user types n then the user will be able to change the name of the repository
elif [ $uinput = n ];
then
read -p 'repository name: ' name
#If the user types something else then the user will be asked to input y or n
else
read -p 'Invalid input, please type in y or n: ' uinput
fi
#After repository is created take the user back to the menu
displayMenu
}
#Function to access repository
accessRepository ()
{
#Get the repository name from the user
read -p 'Enter the repository name: ' rname
#If it matches with a directory then display the contents
if [ -d $rname ];
then
cd $rname
ls -l
#Else re-enter a repository name
else
echo "$rname does not exist, please try again "
read rname
fi
#Tells the user what features they can perform
echo "Pick an option: c: create, e: edit, d: delete "
read uinput
if [ $uinput = c ];
then
read -p 'Please enter a name for the file: ' filename
#creates a new file with the name requested by the user
touch $filename
fi
#Your code is here
if [ $uinput = e ]; then
username=$(whoami)
echo "---Avaliable Files---"
#Figure out a way to only show .txt files? or csv or whatever the files we need to be able to edit are
ls
echo "Enter the file name of the file you wish to edit: "
read fileName
if [ -f $fileName ]; then
#Also figure out how to store these, maybe in directories with the date / time as their name?
if [ -d olderVersion ]; then
cp $fileName olderVersion
else
mkdir olderVersion && cp $fileName olderVersion
fi
mkdir filesInUse && mv $fileName filesInUse && cd filesInUse && nano $fileName
while [ -n "`pgrep nano`" ]; do :; done
mv $fileName .. && cd .. && rmdir filesInUse
#Figure out a better way to name these backups to distinguish them more
if [ -d backups ]; then
cp $fileName backups/"${fileName}-backup"
else
mkdir backups && cp $fileName backups/"${fileName}-backup"
fi
echo "Complete"
#Theres aslo probably a better way of formatting these logs, figure this out
if [ -d log ]; then
echo "---EDIT---" >> log/"${fileName}-${username}-edit-log"
diff -c $fileName olderVersion/$fileName >> log/"${fileName}-${username}-edit-log"
else
echo "---EDIT---" >> log/"${fileName}-${username}-edit-log"
mkdir log && diff -c $fileName olderVersion/$fileName >> log/"${fileName}-${username}-edit-log"
fi
echo "Please add a comment for your entry (leave blank if not): "
read comment
echo "---${username}'s edit comment---">>log/"${fileName}-${username}-edit-log" && echo $comment >> log/"${fileName}-${username}-edit-log"
else
echo "File does not exist, would you like to create it? (y/n): "
#Implement the creation of new files inside the edit function?
fi
fi
displayMenu
}
accessLog ()
{
#Get the repository name from the user
read -p 'Enter the repository name: ' rname
#If it matches with a directory then display the contents
if [ -d $rname ];
then
cd $rname/log
ls -l
#Else re-enter a repository name
else
echo "$rname does not exist, please try again "
read rname
fi
#Get filename from user
echo "Please enter in the log file you wish to read "
read filename
#Display file contents
cat $filename
displayMenu
}
displayMenu
| true |
d623ca01e6deec322adebc81f0041bd2654f2c85
|
Shell
|
ubikusss/argodemo
|
/scripts/rke2_centos8_dependencies.sh
|
UTF-8
| 4,489 | 3.28125 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
# set variables
YUM_PACKAGES="zip unzip container-selinux rke2-server rke2-agent git"
#working
#RKE_IMAGES_DL_URL="https://github.com/rancher/rke2/releases/download/v1.18.12%2Brke2r1/rke2-images.linux-amd64.tar.gz"
#RKE_IMAGES_DL_SHASUM="https://github.com/rancher/rke2/releases/download/v1.18.12%2Brke2r1/sha256sum-amd64.txt"
RKE_IMAGES_DL_URL="https://github.com/rancher/rke2/releases/download/v1.18.13%2Brke2r1/rke2-images.linux-amd64.tar.gz"
RKE_IMAGES_DL_SHASUM="https://github.com/rancher/rke2/releases/download/v1.18.13%2Brke2r1/sha256sum-amd64.txt"
#Not Working, Going to docker for fetching base images Get https://index.docker.io/v2/: dial tcp 52.6.170.51:443: i/o timeout"
#RKE_IMAGES_DL_URL="https://github.com/rancher/rke2/releases/download/v1.18.12%2Brke2r2/rke2-images.linux-amd64.tar.gz"
#RKE_IMAGES_DL_SHASUM="https://github.com/rancher/rke2/releases/download/v1.18.12%2Brke2r2/sha256sum-amd64.txt"
RKE2_VERSION="1.18"
# preflight - check for centos-8 and root user
if ! ( [[ $(awk -F= '/^ID=/{print $2}' /etc/os-release) = "\"centos\"" ]] && [[ $(awk -F= '/^VERSION_ID=/{print $2}' /etc/os-release) = "\"8\"" ]] ) ; then
echo "needs to be run on centos 8";
exit 1;
fi
if [ "$EUID" -ne 0 ] ; then
echo "needs to be run as root";
exit 1;
fi
# create a working directory, install dependency collection dependencies
export workdir=rke-government-deps-$(date +"%y-%m-%d-%H-%M-%S");
mkdir $workdir;
cd $workdir;
rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY*
yum install -y yum-utils createrepo unzip wget;
#Install moduletools https://unix.stackexchange.com/questions/567057/download-rpm-and-all-dependencies-on-rhel-centos-8
wget -O modulemd-tools.rpm http://ftp.riken.jp/Linux/fedora/epel/8/Everything/x86_64/Packages/m/modulemd-tools-0.6-1.el8.noarch.rpm
dnf -y install modulemd-tools.rpm
rm modulemd-tools.rpm
#Install python3 for module-tools
dnf -y install python3
dnf -y install pkg-config
#for moduletools gi library
wget https://download-ib01.fedoraproject.org/pub/epel/7/x86_64/Packages/p/python36-gobject-base-3.22.0-6.el7.x86_64.rpm
dnf -y install python36-gobject-base-3.22.0-6.el7.x86_64.rpm
rm python36-gobject-base-3.22.0-6.el7.x86_64.rpm
# grab and verify rke images
curl -LO ${RKE_IMAGES_DL_URL};
curl -LO ${RKE_IMAGES_DL_SHASUM};
CHECKSUM_EXPECTED=$(grep "rke2-images.linux-amd64.tar.gz" "sha256sum-amd64.txt" | awk '{print $1}');
CHECKSUM_ACTUAL=$(sha256sum "rke2-images.linux-amd64.tar.gz" | awk '{print $1}');
if [ "${CHECKSUM_EXPECTED}" != "${CHECKSUM_ACTUAL}" ]; then echo "FATAL: download sha256 does not match"; exit 1; fi
rm -f sha256sum-amd64.txt
echo "Downloaded from ${RKE_IMAGES_DL_URL}"
# install rke rpm repo
cat <<-EOF >"/etc/yum.repos.d/rancher-rke2-latest.repo"
[rancher-rke2-common-latest]
name=Rancher RKE2 Common (latest)
baseurl=https://rpm.rancher.io/rke2/latest/common/centos/8/noarch
enabled=0
gpgcheck=1
gpgkey=https://rpm.rancher.io/public.key
[rancher-rke2-latest]
name=Rancher RKE2 ${RKE2_VERSION} (latest)
baseurl=https://rpm.rancher.io/rke2/latest/${RKE2_VERSION}/centos/8/x86_64
enabled=0
gpgcheck=1
gpgkey=https://rpm.rancher.io/public.key
EOF
# install hashicorp repo
cat <<-EOF >"/etc/yum.repos.d/hashicorp.repo"
[hashicorp]
name=Hashicorp Stable
baseurl=https://rpm.releases.hashicorp.com/RHEL/8/\$basearch/stable
enabled=0
gpgcheck=1
gpgkey=https://rpm.releases.hashicorp.com/gpg
EOF
#Install kubectl
cat <<-EOF >"/etc/yum.repos.d/kubernetes.repo"
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=0
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
EOF
# download all rpms and their dependencies
mkdir rke_rpm_deps;
cd rke_rpm_deps;
echo "y" | yum -y install --enablerepo="rancher-rke2-common-latest" --enablerepo="hashicorp" --enablerepo="rancher-rke2-latest" --enablerepo="kubernetes" --releasever=/ --installroot=$(pwd) --downloadonly --downloaddir $(pwd) ${YUM_PACKAGES};
createrepo -v .;
# create traditional rpm repo
createrepo_c .
# generate modules meta info
repo2module -s stable -d . modules.yaml
# adjust modules meta info to traditional rpm repo
modifyrepo_c --mdtype=modules modules.yaml repodata/
cd ..;
tar -zcvf rke_rpm_deps.tar.gz rke_rpm_deps;
rm -rf rke_rpm_deps;
# create tar with everything, delete working directory
tar -zcvf ../$workdir.tar.gz .;
cd ..;
rm -rf $workdir;
echo $workdir.tar.gz;
| true |
d7a68bf2eac20bebbc6151db5fb9c4c473e09ba2
|
Shell
|
stuhlmueller/python-tools
|
/tests/async/async_infinite.sh
|
UTF-8
| 97 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/bash
let i=0;
while true; do
echo "This is number $i";
let i=$i+1;
sleep 1
done
| true |
df77a5a0c0557be6d6255268fec0bf54293ccda4
|
Shell
|
GenericMappingTools/sandbox
|
/registration/GMT_usage_map.sh
|
UTF-8
| 6,430 | 3.890625 | 4 |
[] |
no_license
|
#!/bin/sh
#
# $Id$
#
# This script creates a fresh gmt_usage.jpg plot for the web page
# The coordinates passed have been checked for range etc
# It is run from inside the registration directory and will
# collect new lon/lat locations from /tmp/gmtregistration.
# This script performs any of three operations; by default they
# are all done unless you specify one of them:
#
# get Get fresh registrations and compile locations
# update Update the CVS version of the complete list
# map Generate a new usage map with the latest data
# all Do all of the above [Default]
# help Give a brief help message
#
# Paul Wessel
# 30-SEPT-2008
#
# Typicall this script is run by cron on Paul's computer since
# SOEST does not want jobs to run on the web server. These are
# the crontab entries on macnut right now:
#
# Run 1 min past midnight, every day [Creates updated hit map for GMT main page]
# 1 0 * * * /Users/pwessel/UH/RESEARCH/CVSPROJECTS/GMTdev/GMT/registration/GMT_usage_map.sh > $HOME/macnut_cron1.log 2>&1
# Run 1 am, every night [Makes sure my local GMT tree is up-to-date with the latest changes]
# 0 1 * * * /Users/pwessel/UH/RESEARCH/CVSPROJECTS/GMTdev/GMT/guru/nightly_gmt_cvsupdate.sh > $HOME/cron.log 2>&1
# Run 2 am, every day [Place the latest ChangeLog file on the SOEST web server]
# 0 2 * * * scp /Users/pwessel/UH/RESEARCH/CVSPROJECTS/GMTdev/GMT/ChangeLog imina:/export/imina2/httpd/htdocs/gmt/gmt >> $HOME/cron.log 2>&1
#
# The first cmd will scp the file /tmp/gmtregistrations from the SOEST web server and
# process the data, produce an updated JPG image, and scp the file to the
# proper GMT directory on the web server
#
# 5 0 * * * rm -f /tmp/gmtregistrations
#
# Related info: The registration form on gmt.soest.hawaii.edu collects
# a lon,lat location of the users site. The submitted data form
# is processed by gmt_form.pl in the cgi-bin directory on the gmt
# server (currently /var/www/cgi-bin on gmt) which will write the lon/lat
# to /tmp/gmtregistration on gmt. This script
# then acts on these records as described above.
# Note: This script uses the GMT4 registration dir since that part is in GMT 4 subversion
HDF5_DISABLE_VERSION_CHECK=2
if [ "X$GMTHOME" = "X" ]; then # Running crontab and environment is not set
GS_LIB=/opt/local/share/ghostscript/9.10/lib
GMTHOME4=/Volumes/MacNutRAID/UH/RESEARCH/CVSPROJECTS/GMTdev/gmt4-dev
GMTHOME5=/Volumes/MacNutRAID/UH/RESEARCH/CVSPROJECTS/GMTdev/gmt5-dev
PATH=$GMTHOME5/trunk/build/gmt5/bin:/usr/local/bin:/opt/local/bin:$PATH
export PATH
export GS_LIB
fi
if [ $# = 1 ] && [ $1 = "help" ]; then
cat << EOF >&2
usage: GMT_usage_map.sh [-v] [all | get | update | map | help]
get Get fresh registrations and compile locations
update Update the Subversion version of the complete list
map Generate a new usage map with the latest data
all Do all of the above [Default]
help Give a brief help message
EOF
exit
fi
REGHOME4=/Volumes/MacNutRAID/UH/RESEARCH/CVSPROJECTS/GMTdev/gmtadmin/registration # Where to do the work
REGHOME5=$GMTHOME5/sandbox/registration # Where to do the work
cd $REGHOME5
verbose=0
if [ $# -ge 1 ]; then # Check for verbose first
if [ "X$1" = "X-v" ]; then
verbose=1
shift
fi
fi
if [ $# = 1 ]; then # Only wanted some tasks done
key=$1
else # Default is all tasks
key="all"
fi
#if [ $key = "all" ] || [ $key = "get" ]; then
# Extracts new sites from teh web server's tmp dir and only returns
# those over land. To be run from the GMT/registration directory.
# Check if there is new data there
#
# scp imina.soest.hawaii.edu:/tmp/gmtregistration /tmp
# FILE=/tmp/gmtregistration
# if [ ! -e $FILE ] & [ $verbose -eq 1 ]; then
# echo "GMT_usage_map.sh: No new registrations to process" >&2
# exit
# fi
#
## OK, go ahead and process the new data
#
# Only keep ones over land
#
# gmtselect -R0/360/-60/72 -Jm1 -Ns/k -Dl $FILE > new_sites_land.d
# n=`cat new_sites_land.d | wc -l`
# if [ $n -gt 0 ] & [ $verbose -eq 1 ]; then
# echo "GMT_usage_map.sh: Found $n new sites" >&2
# fi
# rm -f $FILE
#fi
#if [ $key = "all" ] || [ $key = "update" ]; then
#
# Gets the previous GMT_old_unique_sites.d file,
# add in the new_sites_land.d data, and runs blockmean
# on it again to remove duplicates
#
# svn -q update
# egrep '^#' GMT_old_unique_sites.d > $$.d
# n_old=`grep -v '^#' GMT_old_unique_sites.d | wc -l`
# egrep -v '^#' GMT_old_unique_sites.d > $$.add
# awk '{print $1, $2, 1}' new_sites_land.d >> $$.add
# blockmean -R0/360/-72/72 -I15m $$.add -S >> $$.d
# mv -f $$.d GMT_old_unique_sites.d
# svn -q commit -m "Automatic update" GMT_old_unique_sites.d
# rm -f $$.add new_sites_land.d
# n_new=`grep -v '^#' GMT_old_unique_sites.d | wc -l`
# delta=`expr $n_new - $n_old`
# if [ $delta -gt 0 ] & [ $verbose -eq 1 ]; then
# echo "GMT_usage_map.sh: Added $delta new sites" >&2
# fi
#fi
if [ $key = "all" ] || [ $key = "map" ]; then
# svn -q update
# cd $REGHOME5
cp $REGHOME4/GMT_old_unique_sites.d .
# pscoast -R-175/185/-60/72 -JKf0/5.0i -Gburlywood -Sazure -Dc -A2000 -Ba60f30/30WSne -K -P -X0.6i -Y0.35i --MAP_FRAME_WIDTH=0.04i --FONT_ANNOT_PRIMARY=12p > gmt_usage.ps
# pscoast -Rg -JKf10/5.0i -Gspringgreen -Sazure -Dc -Wfaint -A2000 -B0 -K -P -X0.6i -Y0.35i --MAP_FRAME_PEN=1p --FONT_ANNOT_PRIMARY=12p > gmt_usage.ps
# grep -v '^#' GMT_old_unique_sites.d | psxy -R -J -O -K -Sc0.02i -Gred >> gmt_usage.ps
# date +%x | awk '{print 0.1, 0.1, $1}' | pstext -R0/5/0/5 -Jx1i -F+f10p,Helvetica+jLB -O -Gcornsilk -To -W0.25p >> gmt_usage.ps
# ps2raster -E150 -A -TG gmt_usage.ps
# rm -f gmt_usage.ps
#scp gmt_usage.png imina.soest.hawaii.edu:/export/imina2/httpd/htdocs/gmt5/gmt
pscoast -R200/340/-90/90 -Ji0.014i -Bg -A10000 -Dc -Gdarkred -Sazure -K -P > gmt_usage.ps
grep -v '^#' GMT_old_unique_sites.d | psxy -R -J -O -K -Sc0.02i -Gyellow >> gmt_usage.ps
pscoast -R-20/60/-90/90 -J -B -Dc -A10000 -Gdarkgreen -Sazure -X1.96i -O -K >> gmt_usage.ps
grep -v '^#' GMT_old_unique_sites.d | psxy -R -J -O -K -Sc0.02i -Gyellow >> gmt_usage.ps
pscoast -R60/200/-90/90 -J -B -Dc -A10000 -Gdarkblue -Sazure -X1.12i -O -K >> gmt_usage.ps
grep -v '^#' GMT_old_unique_sites.d | psxy -R -J -O -K -Sc0.02i -Gyellow >> gmt_usage.ps
date +%x | awk '{print 0.05, 0.05, $1}' | pstext -R0/5/0/5 -Jx1i -F+f10p,Helvetica+jLB -O -Gcornsilk -TO -W0.25p -X-3.08i >> gmt_usage.ps
ps2raster -E150 -A -TG gmt_usage.ps
rm -f gmt_usage.ps
scp gmt_usage.png imina.soest.hawaii.edu:/export/imina2/httpd/htdocs/gmt5/gmt
fi
| true |
da3fa30cae6d330c7cbae54e5b9f52f95a25d940
|
Shell
|
bitPogo/mediawiki-docker-dev-playbook
|
/roles/install/files/debianDockerCompose.sh
|
UTF-8
| 327 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
if [ -e /usr/local/bin/docker-compose ]
then
echo "ok"
else
curl -L https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
fi
| true |
c351a9c7c1f7995484920aa059d3e73b53074454
|
Shell
|
ZNEW/vimConfigurator
|
/configureVim.sh
|
UTF-8
| 1,422 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
#----------------------------------------------------------------------------------------------------------------------
# Any subsequent commands which fail will cause the shell script to exit immediately
set -e
#----------------------------------------------------------------------------------------------------------------------
source ./parameters.sourceMe.sh
source ./errorHandling.sourceMe.sh
source ./logging.sourceMe.sh
source ./functions.sourceMe.sh
#----------------------------------------------------------------------------------------------------------------------
logWithSourceInfo "$(tput setaf 2)HERE WE GO:$(tput sgr 0)" $LINENO
#----------------------------------------------------------------------------------------------------------------------
checkOS
prepareConfiguration
backupExistingFolder
backupExistingConfigFile
createVimFolders
addVimBinFolderToPath
prepareConfigFiles
manageMinGW64
prepareGitRepository
addPluginsUsingVimPlugSystemAndListConfiguration
installPlugins
finalizeConfiguration
installSourcedVimAndPluginsConfigurationFiles
addSourcedTestScript
commitConfiguration
checkPluginsDependencies
proposeFontInstallation
installColors
#----------------------------------------------------------------------------------------------------------------------
logWithSourceInfo "$(tput setaf 2)DONE !$(tput sgr 0)" $LINENO
| true |
02de332c2908fb7de33c800f1f18c967464ab9cb
|
Shell
|
silverd/silverd.github.io
|
/res/attach/install_mac_dev_env.sh
|
UTF-8
| 3,428 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/sh
#author: yuu
echo "Are you sure to start the installation?y/n"
read sure
if [ $sure != "y" ]; then
exit 1
fi
echo "==========================================="
echo "Installing Homebrew"
echo "==========================================="
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
echo "==========================================="
echo "Installing OhMyZsh"
echo "==========================================="
brew install zsh
wget https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh -O - | sh
echo 'export PATH="/usr/local/sbin:$PATH"' >> ~/.zshrc
source ~/.zshrc
echo "==========================================="
echo "Installing Basic Library"
echo "==========================================="
brew install wget
brew install libevent
brew link libevent
brew install autoconf
brew install pkg-config
brew install libmemcached
echo "==========================================="
echo "Installing Apache+PHP+Redis+Memcached"
echo "==========================================="
brew install homebrew/apache/httpd24
brew install homebrew/php/php70 --with-apache
brew install memcached
brew install redis
brew install mongodb
echo "==========================================="
echo "Installing PHP Extensions"
echo "==========================================="
brew install --HEAD homebrew/php/php70-memcached
brew install --HEAD homebrew/php/php70-redis
brew install homebrew/php/php70-mongodb
brew install homebrew/php/php70-mcrypt
brew install homebrew/php/php70-xxtea
brew install homebrew/php/php70-yaf
brew install homebrew/php/php70-swoole
echo "==========================================="
echo "Installing PHP Composer"
echo "==========================================="
brew install homebrew/php/composer
composer config -g repo.packagist composer https://packagist.phpcomposer.com
echo "==========================================="
echo "Modifing Config Files"
echo "==========================================="
#php.ini
sed -i '' 's/short_open_tag = Off/short_open_tag = On/g' /usr/local/etc/php/7.0/php.ini
#ext-memcached.ini
sed -i '' -e '/memcached.sess_lock_wait = 150000/d' \
-e '/memcached.sess_lock_max_wait = 0/d' \
/usr/local/etc/php/7.0/conf.d/ext-memcached.ini
echo 'memcached.sess_lock_wait_min = 0;
memcached.sess_lock_wait_max = 0;
memcached.sess_lock_retries = 0;' >> /usr/local/etc/php/7.0/conf.d/ext-memcached.ini
#httpd.conf
sed -i '' \
-e 's/Require all denied/Require all granted/g' \
-e 's/#LoadModule\(.*\)mod_rewrite.so/LoadModule\1mod_rewrite.so/g' \
-e 's/#Include\(.*\)httpd-vhosts.conf/Include\1httpd-vhosts.conf/g' \
-e 's/Listen 8080/Listen 80/g' \
-e 's/#ServerName www.example.com:8080/ServerName localhost/g' \
/usr/local/etc/apache2/2.4/httpd.conf
echo '<IfModule php7_module>
AddType application/x-httpd-php .php
AddType application/x-httpd-php-source .phps
<IfModule dir_module>
DirectoryIndex index.html index.php
</IfModule>
</IfModule>' >> /usr/local/etc/apache2/2.4/httpd.conf
mkdir -p ~/home/wwwroot/
cd ~/home/wwwroot/
wwwpath=$(pwd)
echo '<VirtualHost *:80>
ServerName localhost
DocumentRoot "'$wwwpath'"
<Directory "'$wwwpath'">
Options Indexes FollowSymLinks
Require all granted
AllowOverride All
</Directory>
</VirtualHost>' > /usr/local/etc/apache2/2.4/extra/httpd-vhosts.conf
sudo apachectl restart
| true |
5591155348b4916cd0d275c62d0d38db8e58ed4e
|
Shell
|
kzapfe/ExamenOPI
|
/Pregunta1/limpiadatos.sh
|
UTF-8
| 380 | 2.71875 | 3 |
[] |
no_license
|
#!/bin/bash
## Script al aventón para limpiar datos del CVP2010 y solo quedarme con lo que me interesa.
#los encabezados
head -1 resultados_ageb_urbana_09_cpv2010.csv > encabezados.csv
#los totales por AGEB
cat resultados_ageb_urbana_09_cpv2010.csv | grep "Obreg" | grep "Total AGEB" > tabla_AGEBAObre.cvs
#Listo
cat tabla_AGEBAObre.cvs encabezados.csv > TablaObregonAGEB.csv
| true |
8ae523d1f4c2698c908eb70521617abbfc2605cf
|
Shell
|
Elexi-dev/paper-auto-1.14.4-egg
|
/install-file.sh
|
UTF-8
| 4,529 | 3.625 | 4 |
[] |
no_license
|
#!/bin/ash
# Paper Installation Script
# Server Files: /mnt/server
apk add --no-cache --update curl jq git
if [ -n "${DL_PATH}" ]; then
echo -e "using supplied download url"
DOWNLOAD_URL=`eval echo $(echo ${DL_PATH} | sed -e 's/{{/${/g' -e 's/}}/}/g')`
else
VER_EXISTS=`curl -s https://papermc.io/api/v1/paper | jq -r --arg VERSION $MINECRAFT_VERSION '.versions[] | IN($VERSION)' | grep true`
LATEST_PAPER_VERSION=`curl -s https://papermc.io/api/v1/paper | jq -r '.versions' | jq -r '.[0]'`
if [ "${VER_EXISTS}" == "true" ]; then
echo -e "Version is valid. Using version ${MINECRAFT_VERSION}"
else
echo -e "Using the latest paper version"
MINECRAFT_VERSION=${LATEST_PAPER_VERSION}
fi
BUILD_EXISTS=`curl -s https://papermc.io/api/v1/paper/${MINECRAFT_VERSION} | jq -r --arg BUILD ${BUILD_NUMBER} '.builds.all[] | IN($BUILD)' | grep true`
LATEST_PAPER_BUILD=`curl -s https://papermc.io/api/v1/paper/${MINECRAFT_VERSION} | jq -r '.builds.latest'`
if [ "${BUILD_EXISTS}" == "true" ] || [ ${BUILD_NUMBER} == "latest" ]; then
echo -e "Build is valid. Using version ${BUILD_NUMBER}"
else
echo -e "Using the latest paper build"
BUILD_NUMBER=${LATEST_PAPER_BUILD}
fi
echo "Version being downloaded"
echo -e "MC Version: ${MINECRAFT_VERSION}"
echo -e "Build: ${BUILD_NUMBER}"
DOWNLOAD_URL=https://papermc.io/api/v1/paper/${MINECRAFT_VERSION}/${BUILD_NUMBER}/download
fi
cd /mnt/server
echo -e "running curl -o ${SERVER_JARFILE} ${DOWNLOAD_URL}"
if [ -f ${SERVER_JARFILE} ]; then
mv ${SERVER_JARFILE} ${SERVER_JARFILE}.old
fi
curl -o ${SERVER_JARFILE} ${DOWNLOAD_URL}
echo -e "Downloading Server files/plugins from Elexi.dev"
git clone https://github.com/Elexi-dev/paper-auto-1.14.4.git
mv paper-auto-1.14.4/ ../
echo -e "Changing configs based off of user variable input"
cd /mnt/server/plugins/DiscordSRV
sed -i 's/!BOTTOKEN!/"$BOT_TOKEN"/g' config.yml
sed -i 's/!CHANNELS!/{"global": "$CHAT_ID"}/g' config.yml
sed -i 's/!CONSOLE!/"$CONSOLE_ID"/g' config.yml
echo -e "Installed Server Config Stuff & DiscordSRV Shit"
# Put anything else above this line!
# ----------------------------------
start(){
cd /mnt/server
echo "Starting Auto Updater Installation..."
currentvercheck(){
if [ -f currentversion.txt ]; then
echo "currentversion exists..."
cd ../
echo "Auto Updater Installation Finished"
cd
else
echo "creating currentversion..."
cat << 'EOF' > currentversion.txt
0
EOF
currentvercheck
fi
}
currentmc(){
if [ -f currentmc.txt ]; then
echo "currentmc exists..."
currentvercheck
else
echo "creating currentversion..."
cat << 'EOF' > currentmc.txt
1.14.4
EOF
currentmc
fi
}
uthingcheck(){
if [ -f updatething.sh ]; then
echo "updatething exists..."
currentmc
else
echo "creating updatething..."
cat << 'EOF' > updatething.sh
#!/bin/bash
cd updatescript/
currentmc=`cat currentmc.txt`
current=`cat currentversion.txt`
echo -e "Checking for Server Update..."
newthing=`curl -s "https://papermc.io/api/v1/paper/${currentmc}" | jq -r '.builds | .latest' 2>&1 | tee latestversion.txt`
echo -e "Latest Paper is on version ${newthing}"
startserver(){
echo -e "You good on your server/plugin updates my dude."
echo -e "Starting server."
}
updatecoreplugins(){
echo -e "Checking/Updating Core Plugins from Elexi.dev"
git checkout master
git stash
git pull
git stash pop --quiet
echo -e "Core Plugin Check/Update done."
startserver
}
comparedemapples(){
if [ "${newthing}" -gt "${current}" ]; then
echo -e "paper-${newthing}.jar is a new update."
echo -e "Updating to paper-${newthing}.jar"
wget -nv -nc --content-disposition https://papermc.io/api/v1/paper/${currentmc}/${newthing}/download
file="paper-${newthing}.jar"
if [ -f "${file}" ]; then
echo -e "paper-${newthing}.jar has been downloaded. Renaming some shit..."
rm -R ../paper-${current}.jar
mv paper-${newthing}.jar ../paper-${newthing}.jar
echo -e "${newthing}" > currentversion.txt
startserver
else
echo -e "Error 404: paper-${newthing}.jar could not be found."
comparedemapples
fi
else
echo -e "paper-${newthing}.jar is already installed and running."
updatecoreplugins
fi
}
comparedemapples
EOF
chmod +x updatething.sh
uthingcheck
fi
}
dircheck(){
if [ -d updatescript ]; then
echo "updatescript dir exists..."
cd updatescript
uthingcheck
else
echo "creating dir updatescript..."
mkdir updatescript
dircheck
fi
}
dircheck
}
start
| true |
206b3b70a70a63b79868dd4836756b5d29d835e4
|
Shell
|
KonstantinDjairo/mouse-brain-templates_generator
|
/make_mesh.sh
|
UTF-8
| 3,622 | 4.03125 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#add parent directory to PYTONPATH
D=$(cd ../ && pwd)
export PYTHONPATH="${PYTHONPATH}:$D"
#default
CUT=false
MASK=false
BOUNDARY=false
USAGE="usage:\n\
'basename $0' -i <image file> -t <treshhold> [-m <mask-file>] [-c] [-s <size> -a <axis> -d <direction>] [-b] [-x]\n\
-i: Image file name to create mesh from. Nifti format required.
-t: Treshhold for marching cube algorithm
-m: Optional mask file to be provided. Will be resampled to resolution of image file. If not specified, mask is created out of Image.
-c: invokes additional function that trims brain image prior to mesh creation. Identifies image boundaries (non-zero entries) and Need to specify
-s: size of cut in voxel
-a: axis along which to cut (0,1,2)
-d: {^beginning','end'}.Direction of cut. Trim either from start of axis inwards or from end of axis inwards.
-b: use the given mask as boundary for cut
-x: use Blenders mesh triangulation algorithm to decimate resulting mesh and smooth mesh. Requires working installation of Blender.
-h: displays help message."
#read options
while getopts ':i:t:bcs:a:d:m:hx' flag; do
case "${flag}" in
i)
IMAGE_NAME="$OPTARG"
;;
t)
TRESHHOLD="$OPTARG"
;;
b)
BOUNDARY=true
;;
c)
CUT=true
;;
s)
SIZE="$OPTARG"
;;
a)
AXIS="$OPTARG"
;;
d)
TRIM_STARTING_FROM="$OPTARG"
;;
m)
MASK=true
MASK_FILE="$OPTARG"
;;
x)
DECIMATE=true
;;
h)
echo -e "$USAGE"
exit 0
;;
esac
done
if $MASK; then
RESOLUTION=$(fslinfo $IMAGE_NAME | grep pixdim1)
RESOLUTION=($(echo $RESOLUTION | tr " " "\n"))
RESOLUTION=${RESOLUTION[1]}
CM=x
RESOLUTION=$RESOLUTION$CM$RESOLUTION$CM$RESOLUTION
NAME=($(echo $MASK_FILE | tr "." "\n"))
PREFIX=${NAME[0]}
SUFFIX=_resampled.nii
MASK_NAME=$PREFIX$SUFFIX
echo ResampleImage 3 $MASK_FILE $MASK_NAME $RESOLUTION size=1 spacing=0 1
ResampleImage 3 $MASK_FILE $MASK_NAME $RESOLUTION size=1 spacing=0 1
fi
if [ "$MASK" == "false" ]; then
NAME=($(echo $IMAGE_NAME | tr "." "\n"))
PREFIX=${NAME[0]}
SUFFIX=_mask.nii
MASK_NAME=$PREFIX$SUFFIX
fslmaths $IMAGE_NAME -thr 10 -bin $MASK_NAME
fi
echo mask created
NAME_M=($(echo $MASK_NAME | tr "." "\n"))
PREFIX_M=${NAME_M[0]}
SUFFIX_M=_smoothed.nii
SMOOTHED_MASK=$PREFIX_M$SUFFIX_M
#smooth one mask
SmoothImage 3 $MASK_NAME 6 $SMOOTHED_MASK
#make mesh using marching cube.
echo mask smoothed
if $CUT; then
NAME_C=($(echo $IMAGE_NAME | tr "." "\n"))
PREFIX_C=${NAME_C[0]}
SUFFIX_C="_cut.nii"
OUTPUTFILE=$PREFIX_C$SUFFIX_C
if $BOUNDARY; then
python -c "import make_mesh; make_mesh.cut_img_mas(\"$IMAGE_NAME\",\"$OUTPUTFILE\",$SIZE,$AXIS,\"$TRIM_STARTING_FROM\",\"$MASK_NAME\")"
IMAGE_NAME=$OUTPUTFILE
else
python -c "import make_mesh; make_mesh.cut_img_mas(\"$IMAGE_NAME\",\"$OUTPUTFILE\",$SIZE,$AXIS,\"$TRIM_STARTING_FROM\")"
IMAGE_NAME=$OUTPUTFILE
fi
echo Image cut
fi
if [ -f make_mesh.py ]; then
python make_mesh.py -i $IMAGE_NAME -m $SMOOTHED_MASK -t $TRESHHOLD
else
python ../make_mesh.py -i $IMAGE_NAME -m $SMOOTHED_MASK -t $TRESHHOLD
fi
echo mesh created
#Decimate and smooth mesh using Blender
if $DECIMATE; then
MESH_NAME=$(find . -name '*.obj')
NAMES=($(echo $MESH_NAME | tr "\n" "\n"))
if [ -f decimate_mesh_blender.py ]; then
for NAME in "${NAMES[@]}"
do
blender -b -P decimate_mesh_blender.py -- -f $NAME -r 0.4 -i 2 -n 4 -l 0.5
rm $NAME
done
else
for NAME in "${NAMES[@]}"
do
blender -b -P ../decimate_mesh_blender.py -- -f $NAME -r 0.4 -i 2 -n 4 -l 0.5
rm $NAME
done
fi
echo mesh processed
fi
#Clean UP
rm $SMOOTHED_MASK
rm $OUTPUTFILE
rm $MASK_NAME
| true |
fc95c6860e3331e51e6a2dc754574eba91c5d3ac
|
Shell
|
hyperia-sk/gitlab-checker
|
/gitlab-check.sh
|
UTF-8
| 7,065 | 3.9375 | 4 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -o errexit
SUBJECT="GitLab Checker"
START_TIME=$SECONDS
white=$(tput bold)
black=$(tput setaf 0)
red=$(tput setaf 1)
green=$(tput setaf 2)
yellow=$(tput setaf 3)
blue=$(tput setaf 6)
bg_red=$(tput setab 1)
bg_green=$(tput setab 2)
bg_yellow=$(tput setab 3)
normal=$(tput sgr0)
OK="${green}OK${normal}"
FAIL="${red}FAIL${normal}"
function _echo
{
if [[ -z "$2" ]]
then
printf '%s' "$1"
else
printf '%s\n' "$1"
fi
}
function _echoTest
{
_echo "${yellow}$1 test${normal} ... "
}
function _ok
{
_echo $OK;
}
function _fail
{
_echo $FAIL; exit 1;
}
function _startTime
{
start=$(date +'%s')
}
function _endTime
{
_echo " ($(($(date +'%s') - $start))s)" 1
}
function _logo
{
echo "${blue} _ _ ${yellow}_${normal}"
echo "${blue} | | | |_ _ _ __ ___ _ _${yellow}(_)${normal}${blue} __ _ ${normal}"
echo "${blue} | |_| | | | | '_ \ / _ \ '_${yellow}| |${normal}${blue}/ _' |${normal}"
echo "${blue} | _ | |_| | |_) | __/ | ${yellow}| |${normal}${blue} (_| |${normal}"
echo "${blue} |_| |_|\__, | .__/ \___|_| ${yellow}|_|${normal}${blue}\__,_|${normal}"
echo "${blue} |___/|_| ${normal}${white}$SUBJECT${normal}"
echo ""
}
function usage
{
cat <<EOF
usage:
$(basename $0) [ -n | -i | -t | -h ]
-n <ip|hostname>
Name of the host or IP address.
-i <number>
Project ID. Gitlab path:
"Project" -> "General Settings" -> "Expand: General project settings"
-t <string>
Personal access tokens. Gitlab path:
"User Settings" -> "Access Tokens"
-h
Prints this help.
EOF
exit $1
}
function _curl
{
_curl_with_error_code "$@" | sed '$d'
}
function _curl_with_error_code
{
local curl_error_code http_code
exec 17>&1
http_code=$(curl --write-out '\n%{http_code}\n' "$@" | tee /dev/fd/17 | tail -n 1)
curl_error_code=$?
exec 17>&-
if [ $curl_error_code -ne 0 ]; then
return $curl_error_code
fi
if [ $http_code -ge 400 ] && [ $http_code -lt 600 ]; then
echo "$FAIL (HTTP $http_code)" # >&2
return 127
fi
}
function _checkUpdate
{
if [[ $1 != "null" ]]; then
VERSION=$1
VERSIONB64=`echo "{\"version\":\"$VERSION\"}" | base64`
URL="https://version.gitlab.com/check.svg?gitlab_info=${VERSIONB64}"
RESULT=`wget -q -O- --header="Referer: https://google.com" ${URL} | grep -oPm1 "(?<=\">)(.*)<\/text>" | grep -oP ".+?(?=<\/text>)"`
if [[ $RESULT == "update asap" ]]; then
echo "${bg_red}${white} ${RESULT} ${normal}"
elif [[ $RESULT == "update available" ]]; then
echo "${bg_yellow}${white} ${RESULT} ${normal}"
else
echo "${bg_green}${white} ${RESULT} ${normal}"
fi
else
echo ""
fi
}
OPTERR=0
GITLAB_API=""
TOKEN=""
PROJECT_ID=""
while getopts ":n:i:t:h" options
do
case $options in
n)
GITLAB_API=$OPTARG"/api/v4"
;;
i)
PROJECT_ID=$OPTARG
;;
t)
TOKEN=$OPTARG
;;
h|*)
_logo
usage 0
;;
esac
done
shift $(($OPTIND - 1))
function _errorMessage
{
_echo "${red}Error${normal}" 1
_echo "${red}$1${normal}" 1
_echo "" 1
_echo "usage:" 1
_echo "$(basename $0) -h" 1
exit 1
}
if [[ -z $GITLAB_API ]]; then
_errorMessage "Hostname not set."
fi
if [[ -z $PROJECT_ID ]]; then
_errorMessage "Project ID not set."
fi
if [[ -z $TOKEN ]]; then
_errorMessage "Token not set."
fi
function main
{
_logo
# init / clean
tmp_dir="tmp_dir/"
rm -rf $tmp_dir
_echoTest "API"
_startTime
gitlab_version=$(mktemp)
_curl --silent "$GITLAB_API/version/?private_token=$TOKEN" > $gitlab_version; td=`cat $gitlab_version`; if [[ -z $td ]]; then _fail; else _ok; fi;
_endTime
version=`cat $gitlab_version | jq '.version' | tr -d '"'`
_echo " ├ version: $version "
_checkUpdate $version
_echo " └ revision: "`cat $gitlab_version | jq '.revision' | tr -d '"'` 1
_echoTest "Project detail"
_startTime
project_data=$(mktemp)
_curl --silent "$GITLAB_API/projects/$PROJECT_ID/?private_token=$TOKEN" > $project_data; td=`cat $project_data`; if [[ -z $td ]]; then _fail; else _ok; fi;
_endTime
_echo " ├ id: $PROJECT_ID" 1
_echo " ├ name: "`cat $project_data | jq '.name' | tr -d '"'` 1
_echo " └ url: "`cat $project_data | jq '.web_url' | tr -d '"'` 1
_echoTest "Star project"
_startTime
_curl -XPOST --silent "$GITLAB_API/projects/$PROJECT_ID/star/" -d "private_token=$TOKEN" >&/dev/null && _ok || _fail
_endTime
_echoTest "Unstar project"
_startTime
_curl -XPOST --silent "$GITLAB_API/projects/$PROJECT_ID/unstar/" -d "private_token=$TOKEN" >&/dev/null && _ok || _fail
_endTime
_echoTest "Clone"
_startTime
repo_url=$(echo $(cat $project_data | jq '.ssh_url_to_repo') | tr -d '"' | sed 's/\.vpn//')
git clone --quiet $repo_url $tmp_dir && _ok || _fail
_endTime
_echo " └ git version: $(git --version | awk '{ print $3 }')" 1
_echoTest "Change directory"
_startTime
cd $tmp_dir && _ok || _fail
_endTime
_echoTest "Commit"
_startTime
echo "Test `date`
" >> test.md || _echo $FAIL
git add . && git commit --quiet -am "test `date`" && _ok || _fail
_endTime
_echoTest "Push to master"
_startTime
git push --quiet origin master && _ok || _fail
_endTime
_echoTest "Checkout to develop branch"
_startTime
git checkout --quiet -b develop && _ok || _fail
_endTime
_echoTest "Push to develop branch"
_startTime
git push --quiet origin develop >&/dev/null && git checkout --quiet master && _ok || _fail
_endTime
_echoTest "Create merge request"
_startTime
mr_data=$(mktemp)
title="test `date`"
_curl -XPOST --silent "$GITLAB_API/projects/$PROJECT_ID/merge_requests/" -d "private_token=$TOKEN&source_branch=master&target_branch=develop&title=$title" > $mr_data && _ok || _fail
mr_id=$(cat $mr_data | jq '.iid')
_endTime
_echo " ├ id: $mr_id" 1
_echo " ├ name: "`cat $mr_data | jq '.title' | tr -d '"'` 1
_echo " └ url: "`cat $mr_data | jq '.web_url' | tr -d '"'` 1
_echoTest "Add comment to merge request"
_startTime
mr_note=$(mktemp)
_curl -XPOST --silent "$GITLAB_API/projects/$PROJECT_ID/merge_requests/$mr_id/notes" -d "private_token=$TOKEN&body=message%20test%20hi" > $mr_note && _ok || _fail
_endTime
_echoTest "Close merge request"
_startTime
mr_close=$(mktemp)
_curl -XPUT --silent "$GITLAB_API/projects/$PROJECT_ID/merge_requests/$mr_id" -d "private_token=$TOKEN&state_event=close" > $mr_close && _ok || _fail
_endTime
cd ../
rm -rf $tmp_dir
_echo "" 1
}
main
ELAPSED_TIME=$(($SECONDS - $START_TIME))
_echo "Time: $(($ELAPSED_TIME/60)) minutes $(($ELAPSED_TIME%60)) seconds" 1
_echo "${green}done.${normal}" 1
exit 0
| true |
bde56ff374b173e6bb3d9a66800b7bee8b33dcdf
|
Shell
|
dimagi/jsonobject
|
/scripts/test_cython_files.sh
|
UTF-8
| 845 | 3.015625 | 3 |
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
#! /bin/bash
find jsonobject -iname '*.c' -delete
find jsonobject -iname '*.so' -delete
python setup.py build_ext --inplace
git update-index -q --refresh
if git diff --quiet HEAD --; then
echo "The recompiled cython files are a match"
exit 0
else
echo "====================================="
echo "ERROR: ./scripts/test_cython_files.sh"
echo "-------------------------------------"
git diff HEAD -- | head -n 20
echo "-------------------------------------"
echo "Compiling the C files from scratch shows a difference"
echo "The first 20 lines of the diff is shown above"
echo "Did you rebuild and commit the changes? Try running:"
echo " find jsonobject -iname '*.c' -delete"
echo " find jsonobject -iname '*.so' -delete"
echo " python setup.py build_ext --inplace"
exit 1
fi
| true |
d62d828b6d45e286ed2b8a0586bc687c291ff0d4
|
Shell
|
appleily/test2
|
/ifuser.sh
|
UTF-8
| 159 | 3.15625 | 3 |
[] |
no_license
|
#!/bin/bash
who | grep $1 > /dev/null
if [ $? == 0 ]
then
echo "$1 alread login"
info=`cat /etc/passwd | grep $1`
echo "$1的用户信息如下:"
echo $info
| true |
5d8efb0f1d67cf5969aeab34356e302cbdd7a895
|
Shell
|
RDLLab/SKD_DEVEL
|
/skd_full_install.sh
|
UTF-8
| 1,706 | 4.09375 | 4 |
[] |
no_license
|
#!/bin/bash
# Get the name of the directory where the repository is located
SKD_ROOT_DIR=$(cd `dirname $0` && pwd)
# Ensure that the commandline arguments are used
if [ $# -lt 6 ]
then
echo "Usage : $0 --install_oppt install_dir_path [true|false] --oppt_install_dir [oppt_install_path] --install_conda [true|false]"
exit
fi
# Assign the options
OPPT_OPTION=$1
OPPT_INSTALL_OPTION=$3
CONDA_OPTION=$5
# Get the whole install procedure
case "${OPPT_OPTION}" in
-i|--install_oppt)
INSTALL_OPPT=$2
echo "OPPT INSTALL OPTION WAS SET TO ${INSTALL_OPPT}"
;;
esac
case "${OPPT_INSTALL_OPTION}" in
-d|--oppt_install_dir)
OPPT_INSTALL_DIR=$4
echo "INSTALL_DIR is $OPPT_INSTALL_DIR"
echo "OPPT INSTALL LOCATION WAS SET TO ${OPPT_INSTALL_DIR}"
;;
esac
case "${CONDA_OPTION}" in
-c|--install_conda)
INSTALL_CONDA_FLAG=$6
echo "CONDA INSTALL OPTION WAS SET TO ${INSTALL_CONDA_FLAG}"
;;
esac
# Check if oppt is to be installed
echo ${INSTALL_OPPT}
if (${INSTALL_OPPT});
then
echo "FLAG SET TO ${INSTALL_OPPT_FLAG}: Installing OPPT into ${OPPT_INSTALL_DIR}"
cd ${SKD_ROOT_DIR}
chmod +x oppt_install.sh && ./oppt_install.sh --install_dir ${OPPT_INSTALL_DIR}
fi
# # Export oppt libraries for installed oppt dir
# set oppt_DIR for CMake to find oppt
export oppt_DIR=${OPPT_INSTALL_DIR}/lib/cmake/oppt
echo "INSTALLING SKD_OPPT PLUGINS AND ENVIRONMENT MODELS"
cd ${SKD_ROOT_DIR}
chmod +x skd_oppt_install.sh && ./skd_oppt_install.sh
# # # Install conda dependencies
echo "SETTING UP ANACONDA DEPENDENCIES"
cd ${SKD_ROOT_DIR}
chmod +x install_conda_env.sh && ./install_conda_env.sh -c ${INSTALL_CONDA_FLAG}
echo "DONE INSTALLING ALL MODULES. READY TO RUN ASSESSMENT FOR TESTING"
| true |
40e5c511617a6d14d2e97d3a51923c95067b1b6c
|
Shell
|
webmalc/mbs
|
/scripts/permissions.sh
|
UTF-8
| 623 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/bash
GREEN='\e[0;32m'
NC='\e[0m'
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
FOLDER=${DIR}'/../var/'
HTTPDUSER=`ps aux | grep -E '[a]pache|[h]ttpd|[_]www|[w]ww-data|[n]ginx' | grep -v root | head -1 | cut -d\ -f1`
sudo rm -rf ${FOLDER}'cache'
sudo rm -rf ${FOLDER}'logs'
mkdir ${FOLDER}'cache'
mkdir ${FOLDER}'logs'
sudo setfacl -R -m u:"$HTTPDUSER":rwX -m u:`whoami`:rwX ${DIR}'/../var/cache' ${DIR}'/../var/logs' ${DIR}'/../protectedUpload'
sudo setfacl -dR -m u:"$HTTPDUSER":rwX -m u:`whoami`:rwX ${DIR}'/../var/cache' ${DIR}'/../var/logs' ${DIR}'/../protectedUpload'
echo -e "${GREEN}Complete!${NC}"
| true |
95acaf31c8eebe27c867c9c7a3ecf57ae876b91e
|
Shell
|
ptptptptptpt/install
|
/remove.sh
|
UTF-8
| 2,476 | 3.734375 | 4 |
[] |
no_license
|
#!/bin/bash
programDir=`dirname $0`
programDir=$(readlink -f $programDir)
parentDir="$(dirname $programDir)"
programDirBaseName=$(basename $programDir)
function usage {
echo "
Usage:
bash $(basename $0) CONFIG_FILE
"
}
[ "$1" ] || { usage; exit 1; }
[ -f "$1" ] || { echo "Error: $1 not exists or not a file!"; exit 1; }
source $(readlink -f $1) || { echo "'source $(readlink -f $1)' failed!"; exit 1; }
[ "${CONTROL_NODE_PRIVATE_IP}" ] || { echo "Error: CONTROL_NODE_PRIVATE_IP not defined!"; exit 1; }
[ "${NETWORK_NODES_PRIVATE_IP}" ] || { echo "Error: NETWORK_NODES_PRIVATE_IP not defined!"; exit 1; }
[ "${COMPUTE_NODES_PRIVATE_IP}" ] || { echo "Error: COMPUTE_NODES_PRIVATE_IP not defined!"; exit 1; }
[ "${STORAGE_NODES_PRIVATE_IP}" ] || { echo "Error: STORAGE_NODES_PRIVATE_IP not defined!"; exit 1; }
[ "${STORAGE_NODES_CEPH_OSD_DATA_DIR}" ] || { echo "Error: STORAGE_NODES_CEPH_OSD_DATA_DIR not defined!"; exit 1; }
#####################
set -x
## log
logDir='/var/log/stackube'
logFile="${logDir}/remove.log-$(date '+%Y-%m-%d_%H-%M-%S')"
mkdir -p ${logDir}
allIpList=`echo "
${CONTROL_NODE_PRIVATE_IP}
${NETWORK_NODES_PRIVATE_IP}
${COMPUTE_NODES_PRIVATE_IP}
${STORAGE_NODES_PRIVATE_IP}" | sed -e 's/,/\n/g' | sort | uniq `
{
echo -e "\n$(date '+%Y-%m-%d %H:%M:%S') remove_kubernetes"
remove_kubernetes=''
for i in `seq 1 10`; do
bash ${programDir}/remove_kubernetes.sh $(readlink -f $1)
if [ "$?" == "0" ]; then
remove_kubernetes='done'
break
fi
done
[ "${remove_kubernetes}" == "done" ] || { echo "Error: remove_kubernetes failed !"; exit 1; }
echo -e "\n$(date '+%Y-%m-%d %H:%M:%S') remove_openstack"
remove_openstack=''
for i in `seq 1 10`; do
bash ${programDir}/remove_openstack.sh $(readlink -f $1)
if [ "$?" == "0" ]; then
remove_openstack='done'
break
fi
done
[ "${remove_openstack}" == "done" ] || { echo "Error: remove_openstack failed !"; exit 1; }
echo -e "\n$(date '+%Y-%m-%d %H:%M:%S') remove_ceph"
remove_ceph=''
for i in `seq 1 10`; do
bash ${programDir}/remove_ceph.sh $(readlink -f $1)
if [ "$?" == "0" ]; then
remove_ceph='done'
break
fi
done
[ "${remove_ceph}" == "done" ] || { echo "Error: remove_ceph failed !"; exit 1; }
echo -e "\n$(date '+%Y-%m-%d %H:%M:%S') All done!"
} 2>&1 | tee -a ${logFile}
| true |
cf50ec1a4f51ed6d9064e80c913c719f897641c9
|
Shell
|
ariutta/jupyterWith-demo
|
/jupyterlab-launch/jupyterlab-launch
|
UTF-8
| 4,928 | 4.25 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
OUTPUT_FILE=$(mktemp) || exit 1
SERVER_START_CMD="jupyter lab --no-browser --port=8889"
HELP_MSG="jupyterlab-launch <target>
When server is local, target defaults to pwd.
When server is remote, an ssh-style url is required, e.g.:
jupyterlab-launch nixos.gladstone.internal:code/jupyterlab-demo"
server_pid=""
tunnel_pid=""
# Based on http://linuxcommand.org/lc3_wss0140.php
# and https://codeinthehole.com/tips/bash-error-reporting/
PROGNAME=$(basename "$0")
cleanup_complete=0
cleanup() {
if [[ -f "$OUTPUT_FILE" ]]; then
rm "$OUTPUT_FILE"
fi
# TODO: should we delete the nohup.out file created when server is remote?
# (it's a different file from $OUTPUT_FILE)
# stopping the server and tunnel processes, if running
if [[ -n "$server_pid" ]] && [[ -n "$(ps -p $server_pid | grep $server_pid)" ]]; then
kill -9 "$server_pid"
fi
if [[ -n "$tunnel_pid" ]] && [[ -n "$(ps -p $tunnel_pid | grep $tunnel_pid)" ]]; then
kill -9 "$tunnel_pid"
fi
if [[ -n "$db_tunnel_pid" ]] && [[ -n "$(ps -p $db_tunnel_pid | grep $db_tunnel_pid)" ]]; then
kill -9 "$db_tunnel_pid"
fi
cleanup_complete=1
echo "$PROGNAME: goodbye"
}
error_exit() {
# ----------------------------------------------------------------
# Function for exit due to fatal program error
# Accepts 1 argument:
# string containing descriptive error message
# ----------------------------------------------------------------
read -r line file <<<"$(caller)"
echo "" 1>&2
echo "ERROR: file $file, line $line" 1>&2
if [ ! "$1" ]; then
sed "${line}q;d" "$file" 1>&2
else
echo "${1:-"Unknown Error"}" 1>&2
fi
echo "" 1>&2
# TODO: should error_exit call cleanup?
# The EXIT trap already calls cleanup, so
# calling it here means calling it twice.
if [ ! $cleanup_complete ]; then
cleanup
fi
exit 1
}
get_server_url() {
attempt_iteration=${1:-1}
ATTEMPT_LIMIT=5
WAIT_SEC=3
echo "Waiting for server to start (Attempt $attempt_iteration/$ATTEMPT_LIMIT)..." 1>&2
sleep $WAIT_SEC
cat "$OUTPUT_FILE" 1>&2
url="$(perl -ne "print if s/(^|.*?[ \"])(http.*?)([\" >].*|$)/\$2/" "$OUTPUT_FILE" | head -n 1)"
if [[ -z "$url" ]]; then
if [[ $attempt_iteration -eq $ATTEMPT_LIMIT ]]; then
error_exit "No server URL found"
else
next_attempt_iteration=$(echo "$attempt_iteration + 1" | bc)
get_server_url $next_attempt_iteration
fi
else
echo "$url"
fi
}
trap error_exit ERR
trap cleanup EXIT INT QUIT TERM
if [[ "$1" == '-h' ]] || [[ "$1" == '--help' ]]; then
echo "$HELP_MSG"
exit 0
fi
# if input has a colon, assume it's a remote target
if [[ "$1" == *":"* ]]; then
JUPYTER_SERVER_ADDRESS="${1%:*}"
TARGET_DIR="${1##*:}"
else
TARGET_DIR="$1"
fi
DB_SERVER_ADDRESS="wikipathways-workspace.gladstone.internal"
TARGET_DIR=${TARGET_DIR:-'./'}
if [[ -z "$JUPYTER_SERVER_ADDRESS" ]]; then
if jupyter-lab --version > /dev/null 2>&1; then
echo "Launching locally..."
INITIAL_DIR="$(pwd)"
cd "$TARGET_DIR" || error_exit "Could not change to $TARGET_DIR"
nohup $SERVER_START_CMD > "$OUTPUT_FILE" &
server_pid=$!
cd "$INITIAL_DIR" || error_exit "Could not return to $INITIAL_DIR"
else
error_exit "$HELP_MSG"
fi
else
SERVER_IS_REMOTE=1
nohup ssh "$JUPYTER_SERVER_ADDRESS" -tt "cd $TARGET_DIR && direnv exec ./ $SERVER_START_CMD || exit 1" > "$OUTPUT_FILE" &
server_pid=$!
sleep 3
fi
url=$(get_server_url)
port="$(echo "$url" | sed -E 's/.*:([0-9]{4}).*/\1/')"
if [[ -z "$port" ]]; then
error_exit "No port found"
fi
if [ $SERVER_IS_REMOTE ]; then
echo "Starting tunnel..."
nohup ssh -N -L $port:localhost:$port "$JUPYTER_SERVER_ADDRESS" > /dev/null &
tunnel_pid=$!
sleep 3
fi
if xdg-open --version > /dev/null 2>&1; then
xdg-open "$url"
elif which open > /dev/null 2>&1; then
open "$url"
else
echo "Warning: Not sure how to open browser."
echo "Here's the server output:"
cat "$OUTPUT_FILE"
fi
## TODO: if there's a password prompt, the following code will miss it.
## TODO: get the db tunnel working. The following code doesn't work (at least for a local jupyter server):
#echo "Starting db tunnel..."
## access via URI like postgres://localhost:3333/pfocr_plus
#if [ $SERVER_IS_REMOTE ]; then
# echo "TODO: look into how to create db tunnel"
# # maybe it's something like this?
# nohup ssh -N -L 3333:"$JUPYTER_SERVER_ADDRESS":5432 "$DB_SERVER_ADDRESS" > /dev/null &
#else
# nohup ssh -N -L 3333:localhost:5432 "$DB_SERVER_ADDRESS" > /dev/null &
#fi
#
# I'd like to do this the same way as I connect to a remote jupyter server,
# but that isn't working.
echo "Ctrl-d to close db tunnel"
if [ $SERVER_IS_REMOTE ]; then
ssh "$JUPYTER_SERVER_ADDRESS" -t "ssh -N -L 3333:localhost:5432 \"$DB_SERVER_ADDRESS\""
else
ssh -N -L 3333:localhost:5432 "$DB_SERVER_ADDRESS"
fi
#db_tunnel_pid=$!
echo ""
read -rp "To quit, hit Enter"
| true |
a837ef4307029b5bd8c6a5c2dbeb2c3e1ec2854f
|
Shell
|
cracker0dks/ezLicode
|
/initDockerLicode.sh
|
UTF-8
| 6,139 | 3.65625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
SCRIPT=`pwd`/$0
ROOT=/opt/licode
SCRIPTS="$ROOT"/scripts
BUILD_DIR="$ROOT"/build
DB_DIR="$BUILD_DIR"/db
EXTRAS="$ROOT"/extras
NVM_CHECK="$ROOT"/scripts/checkNvm.sh
parse_arguments(){
if [ -z "$1" ]; then
echo "No parameters -- starting everything"
MONGODB=true
RABBITMQ=true
NUVE=true
ERIZOCONTROLLER=true
ERIZOAGENT=true
BASICEXAMPLE=true
ERIZODEBUG=false
else
while [ "$1" != "" ]; do
case $1 in
"--mongodb")
MONGODB=true
;;
"--rabbitmq")
RABBITMQ=true
;;
"--nuve")
NUVE=true
;;
"--erizoController")
ERIZOCONTROLLER=true
;;
"--erizoAgent")
ERIZOAGENT=true
;;
"--erizoDebug")
ERIZODEBUG=true
;;
"--basicExample")
BASICEXAMPLE=true
;;
esac
shift
done
fi
}
run_nvm() {
echo "Running NVM"
. $ROOT/build/libdeps/nvm/nvm.sh
}
check_result() {
if [ "$1" -eq 1 ]
then
exit 1
fi
}
run_rabbitmq() {
echo "Starting Rabbitmq"
rabbitmq-server -detached
sleep 3
}
run_mongo() {
if ! pgrep mongod; then
echo [licode] Starting mongodb
if [ ! -d "$DB_DIR" ]; then
mkdir -p "$DB_DIR"/db
fi
mongod --repair --dbpath $DB_DIR
mongod --nojournal --dbpath $DB_DIR --logpath $BUILD_DIR/mongo.log --fork
sleep 10
else
echo [licode] mongodb already running
fi
dbURL=`grep "config.nuve.dataBaseURL" $SCRIPTS/licode_default.js`
dbURL=`echo $dbURL| cut -d'"' -f 2`
dbURL=`echo $dbURL| cut -d'"' -f 1`
echo [licode] Creating superservice in $dbURL
mongo $dbURL --eval "db.services.insert({name: 'superService', key: '$RANDOM', rooms: []})"
SERVID=`mongo $dbURL --quiet --eval "db.services.findOne()._id"`
SERVKEY=`mongo $dbURL --quiet --eval "db.services.findOne().key"`
SERVID=`echo $SERVID| cut -d'"' -f 2`
SERVID=`echo $SERVID| cut -d'"' -f 1`
if [ -f "$BUILD_DIR/mongo.log" ]; then
echo "Mongo Logs: "
cat $BUILD_DIR/mongo.log
fi
echo [licode] SuperService ID $SERVID
echo [licode] SuperService KEY $SERVKEY
cd $BUILD_DIR
replacement=s/_auto_generated_ID_/${SERVID}/
sed $replacement $SCRIPTS/licode_default.js > $BUILD_DIR/licode_1.js
replacement=s/_auto_generated_KEY_/${SERVKEY}/
sed $replacement $BUILD_DIR/licode_1.js > $ROOT/licode_config.js
rm $BUILD_DIR/licode_1.js
}
run_nuve() {
echo "Starting Nuve"
cd $ROOT/nuve/nuveAPI
node nuve.js &
sleep 5
}
run_erizoController() {
echo "Starting erizoController"
cd $ROOT/erizo_controller/erizoController
node erizoController.js &
}
run_erizoAgent() {
echo "Starting erizoAgent"
cd $ROOT/erizo_controller/erizoAgent
if [ "$ERIZODEBUG" == "true" ]; then
node erizoAgent.js -d &
else
node erizoAgent.js &
fi
}
run_basicExample() {
echo "Starting basicExample"
sleep 5
cp $ROOT/nuve/nuveClient/dist/nuve.js $EXTRAS/basic_example/
cd $EXTRAS/basic_example
node basicServer.js &
}
parse_arguments $*
cd $ROOT/scripts
run_nvm
nvm use
if [ "$MONGODB" == "true" ]; then
run_mongo
fi
if [ "$RABBITMQ" == "true" ]; then
run_rabbitmq
fi
if [ ! -f "$ROOT"/licode_config.js ]; then
cp "$SCRIPTS"/licode_default.js "$ROOT"/licode_config.js
fi
if [ -n "$ERIZOCLIENTPORT" ]; then
echo "config.erizoController.port = '$ERIZOCLIENTPORT';" >> /opt/licode/licode_config.js
fi
if [ -n "$MAXPROCESSES" ]; then
echo "config.erizoAgent.maxProcesses = $MAXPROCESSES;" >> /opt/licode/licode_config.js
fi
if [ -n "$PRERUNPROCESSES" ]; then
echo "config.erizoAgent.prerunProcesses = $PRERUNPROCESSES;" >> /opt/licode/licode_config.js
fi
if [ ! -f "$ROOT"/rtp_media_config.js ]; then
cp "$SCRIPTS"/rtp_media_config_default.js "$ROOT"/rtp_media_config.js
fi
if [ -n "$RECORDINGPATH" ]; then
echo "config.erizoController.recording_path = '$RECORDINGPATH';" >> /opt/licode/licode_config.js
fi
if [ "$NUVE" == "true" ]; then
run_nuve
fi
if [ "$ERIZOCONTROLLER" == "true" ]; then
echo "config.erizoController.publicIP = '$PUBLIC_IP';" >> /opt/licode/licode_config.js
run_erizoController
fi
if [ "$ERIZOAGENT" == "true" ]; then
echo "config.erizoAgent.publicIP = '$PUBLIC_IP';" >> /opt/licode/licode_config.js
echo "config.erizo.minport = '$MIN_PORT';" >> /opt/licode/licode_config.js
echo "config.erizo.maxport = '$MAX_PORT';" >> /opt/licode/licode_config.js
if [ -n "$LICODEHOSTNAME" ]; then
echo "config.erizoController.hostname = '$LICODEHOSTNAME';" >> /opt/licode/licode_config.js
fi
if [ -n "$MAXVIDEOBW" ]; then
echo "SET MAX VIDEO BW"
echo "config.erizoController.defaultVideoBW = $MAXVIDEOBW;" >> /opt/licode/licode_config.js
echo "config.erizoController.maxVideoBW = $MAXVIDEOBW;" >> /opt/licode/licode_config.js
fi
if [ -n "$SSL" ]; then
echo "SET SSL"
echo "config.erizoController.ssl = true;" >> /opt/licode/licode_config.js
echo "config.erizoController.listen_ssl = true;" >> /opt/licode/licode_config.js
fi
if [ -n "$STUNTURN" ]; then
echo "SET MAX VIDEO BW"
echo "config.erizoController.iceServers = $STUNTURN;" >> /opt/licode/licode_config.js
fi
#GUID: http://lynckia.com/licode/nginx-dep.html
if [ -n "$REVERSEPROXY" ]; then
echo "SET REVERSEPROXY"
echo "config.erizoController.ssl = true;" >> /opt/licode/licode_config.js
echo "config.erizoController.listen_ssl = false;" >> /opt/licode/licode_config.js
echo "config.erizoController.listen_port = 8080;" >> /opt/licode/licode_config.js
echo "config.erizoController.port = 443;" >> /opt/licode/licode_config.js
echo "config.erizoController.ssl = true;" >> /opt/licode/licode_config.js
fi
run_erizoAgent
fi
if [ "$BASICEXAMPLE" = "true" ]; then
if [ -n "$DEV" ]; then
echo "\n\n>>>THIS IS THE DEV START<<<!\n\n"
if [ -f temp2.txt ]; then
echo "ex" > temp2.txt
sleep 2
fi
echo "\n\n-------------------------\n>>>>RDY FOR DEVELOPMENT!<<<<\n-------------------------\n\n"
else
run_basicExample
fi
fi
wait
| true |
6960dc7438b6a465c642650967dca3f4aa8557e8
|
Shell
|
automotiveMastermind/condo
|
/scripts/install.sh
|
UTF-8
| 8,600 | 3.890625 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
CONDO_ROOT="$HOME/.am/condo"
CONDO_LOG="$CONDO_ROOT/condo.log"
CONDO_VERBOSITY='normal'
CONDO_CLR_INFO='\033[1;33m' # BRIGHT YELLOW
CONDO_CLR_FAILURE='\033[1;31m' # BRIGHT RED
CONDO_CLR_SUCCESS="\033[1;32m" # BRIGHT GREEN
CONDO_CLR_CLEAR="\033[0m" # DEFAULT COLOR
__condo-success() {
echo -e "${CONDO_CLR_SUCCESS}$@${CONDO_CLR_CLEAR}"
echo "log : $@" >> $CONDO_LOG
}
__condo-failure() {
echo -e "${CONDO_CLR_FAILURE}$@${CONDO_CLR_CLEAR}"
echo "err : $@" >> $CONDO_LOG
}
__condo-info() {
if [[ "$CONDO_VERBOSITY" != 'quiet' && "$CONDO_VERBOSITY" != 'minimal' ]]; then
echo -e "${CONDO_CLR_INFO}$@${CONDO_CLR_CLEAR}"
echo "log : $@" >> $CONDO_LOG
fi
}
__condo-install-help() {
echo 'Installation for Condo Build System'
echo
echo 'Usage:'
echo ' ./condo.sh install [arguments] [common-options]'
echo
echo 'Common options:'
echo ' -h|-?|--help print this help information'
echo ' -l|--log location of the installation log'
echo ' DEFAULT: $CONDO_INSTALL_DIR/condo.log'
echo
echo 'Arguments:'
echo ' -nc|--no-color do not emit color to output, which is useful for capture'
echo ' -id|--install-dir location in which to install condo'
echo ' DEFAULT: $HOME/.am/condo'
echo ' -r|--reset reinstall condo'
echo ' -u|--update update to the latest version of condo'
echo ' NOTE: this argument is effected by the branch argument; the version will be determined by the latest commit available on the specified branch.'
echo ' -b|--branch install condo from the specified branch'
echo ' DEFAULT: master'
echo ' -s|--source install condo from the specified source path (local)'
echo ' --uri install condo from the specified URI'
echo
echo 'EXAMPLE:'
echo ' ./condo.sh install --branch feature/cli --no-color --install-dir $HOME/.condo --log $HOME/condo.log'
echo ' - installs condo from the `feature/cli` branch'
echo ' - no color will be emitted to the console (either STDOUT or STDERR)'
echo ' - condo will be installed to `$HOME/.condo`'
echo ' - the installation log will be saved to $HOME/condo-install.log'
}
__condo-help() {
echo 'Condo Build System'
echo
echo 'Usage:'
echo ' ./condo.sh [host-options] [command] [arguments] [common-options]'
echo
echo 'Host options:'
echo ' --version display version number'
echo ' --info display info about the host and condo build system'
echo
echo 'Arguments:'
echo ' [host-options] options passed to the host (dotnet)'
echo ' [command] the command to execute'
echo ' [arguments] options passed to the `install` command'
echo ' [common-options] options common to all commands'
echo
echo 'Common options:'
echo ' -h|-?|--help print this help information'
echo ' -l|--log location of the installation log'
echo ' DEFAULT: $CONDO_INSTALL_DIR/condo.log'
echo ' -nc|--no-color do not emit color to output, which is useful for capture'
echo
echo 'Commands:'
echo ' install installs condo on the local system'
echo ' update updates condo to the latest version'
echo ' init initializes condo in the current directory'
echo ' build uses condo to execute the build target (Build)'
echo ' test uses condo to execute the test target (Test)'
echo ' publish uses condo to execute the publish target (Publish)'
echo ' ci uses condo to execute the continuous integration target (CI)'
echo ' clean uses condo to execute the clean target'
echo
echo 'Advanced commands:'
echo ' nuget uses condo to manipulate nuget feeds and credentials'
echo ' conventions uses condo to create new conventions'
echo ' config edit the condo configuration'
}
__condo-install() {
# get the current path
local CURRENT_PATH="$pwd"
# find the script path
local ROOT_PATH=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
# setup well-known condo paths
local CLI_ROOT="$CONDO_ROOT/cli"
local SRC_ROOT="$CONDO_ROOT/src"
local CONDO_SHELL="$SRC_ROOT/src/AM.Condo/Scripts/condo.sh"
# change to the root path
pushd $ROOT_PATH 1>/dev/null
# write a newline for separation
echo
# test for help command
case $1 in
-h|-\?|--help)
__condo-help
exit 0
;;
esac
# continue testing for arguments
while [[ $# > 0 ]]; do
case $1 in
-h|-\?|--help)
__condo-help
exit 0
;;
-r|--reset)
local CONDO_RESET=1
;;
-l|--local)
local CONDO_LOCAL=1
;;
-u|--update)
local CONDO_UPDATE=1
;;
--uri)
local CONDO_URI=$2
shift
;;
-b|--branch)
local CONDO_BRANCH=$2
shift
;;
-s|--source)
local CONDO_SOURCE=$2
shift
;;
-id|--install-dir)
CONDO_ROOT="$1"
if [ -z "${CONDO_LOG_SET:-}" ]; then
CONDO_LOG="$CONDO_ROOT/condo-install.log"
fi
shift
;;
-l|--log)
CONDO_LOG="$1"
local CONDO_LOG_SET=1
shift
;;
-nc|--no-color)
local CONDO_CLR_INFO=
local CONDO_CLR_FAILURE=
local CONDO_CLR_CLEAR=
break
;;
esac
shift
done
if [[ -d "$CONDO_ROOT" && "$CONDO_RESET" = "1" ]]; then
__condo-info 'Resetting condo build system...'
rm -rf "$CONDO_ROOT"
fi
if [ -d "$CLI_ROOT" ]; then
echo 'Condo was already installed. Use `--reset` to force remove.'
return 0
fi
if [ -z "${DOTNET_INSTALL_DIR:-}" ]; then
export DOTNET_INSTALL_DIR=~/.dotnet
fi
if [ -z "${CONDO_BRANCH:-}" ]; then
CONDO_BRANCH='master'
fi
if [ -z "${CONDO_URI:-}" ]; then
CONDO_URI="https://github.com/automotivemastermind/condo/tarball/$CONDO_BRANCH"
fi
if [ "$CONDO_LOCAL" = "1" ]; then
CONDO_SOURCE="$ROOT_PATH"
fi
if [ ! -d "$SRC_ROOT" ]; then
__condo-info "Creating path for condo at $CONDO_ROOT..."
mkdir -p $SRC_ROOT
if [ ! -z $CONDO_SOURCE ]; then
__condo-info "Using condo build system from $CONDO_SOURCE..."
cp -r $CONDO_SOURCE/* $SRC_ROOT/
cp -r $CONDO_SOURCE/template $CONDO_ROOT
else
local CURL_OPT='-s'
if [ ! -z "${GH_TOKEN:-}" ]; then
CURL_OPT='$CURL_OPT -H "Authorization: token $GH_TOKEN"'
fi
__condo-info "Using condo build system from $CONDO_URI..."
CONDO_TEMP=$(mktemp -d)
CONDO_TAR="$CONDO_TEMP/condo.tar.gz"
retries=5
until (wget -O $CONDO_TAR $CONDO_URI 2>/dev/null || curl -o $CONDO_TAR --location $CONDO_URI 2>/dev/null); do
__condo-failure "Unable to retrieve condo: '$CONDO_URI'"
if [ "$retries" -le 0 ]; then
exit 1
fi
retries=$((retries - 1))
__condo-failure "Retrying in 10 seconds... attempts left: $retries"
sleep 10s
done
CONDO_EXTRACT="$CONDO_TEMP/extract"
CONDO_SOURCE="$CONDO_EXTRACT"
mkdir -p $CONDO_EXTRACT
tar xf $CONDO_TAR --strip-components 1 --directory $CONDO_EXTRACT
cp -r $CONDO_SOURCE/* $SRC_ROOT/
cp -r $CONDO_SOURCE/template $CONDO_ROOT
rm -Rf $CONDO_TEMP
fi
fi
# ensure that the condo shell is executable
chmod a+x $CONDO_SHELL
# write a newline for separation
echo
# change to the original path
popd
}
__condo-install "$@"
# capture the current exit code
EXIT_CODE=$?
# exit
exit $EXIT_CODE
| true |
e80cd69f6e2388cba11c8e23de9580cfb5e484a4
|
Shell
|
vonKloppen/vmBuilder-bash
|
/buildVM.sh
|
UTF-8
| 7,769 | 3.765625 | 4 |
[] |
no_license
|
#!/bin/env bash
##### FOLDER CONFIGURATION #####
confFOLDER="./conf"
binFOLDER="./bin"
vmFOLDER="./vm"
ksFOLDER="$confFOLDER/kickstarts"
profFOLDER="$confFOLDER/profiles"
scrFOLDER="$confFOLDER/scripts"
keyFOLDER="$confFOLDER/sshkeys"
tmplFOLDER="$confFOLDER/templates"
##### FILE CONFIGURATION #####
defFILE="defaults.conf"
vcFILE="vcenter.conf"
vmTMPLFILE="vm.json"
##### ENVIRONMENT CONFIGURATION #####
user=`echo $USER`
date=`date "+%F %R"`
##### PURPOSE SPECIFIC CONFIGURATION #####
purDISKmongo=2
purDISKmysql=2
purDISKpsql=2
purDISKk8s=2
purDISKkafka=4
purDISKweb=2
##### OTHER #####
vmPURPOSELIST="generic,mongo,mysql,psql,k8s,kafka,web"
vmOSTYPELIST="centos7,centos8"
##### COLORS #####
cINPUT="\e[32m"
cOUTPUT="\e[34m"
cOTHER="\e[33m"
cWARN="\e[31m"
source "$confFOLDER"/"$defFILE"
clear
echo -e "$cOTHER""VM Builder script"
echo -e "CopyLeft Marian\n"
echo -e "$cOUTPUT""Please enter machine specific parameters."
read -p "$(echo -e $cOUTPUT"Hostname: "$cINPUT)" -i "$vmHOSTNAME" -e userInput
vmHOSTNAME="$userInput"
vmcFOLDER=""$vmFOLDER"/"$vmHOSTNAME""
if [[ -f "$vmcFOLDER"/"$vmHOSTNAME" ]]; then
echo -e "\n$cWARN""Configuration for "$vmHOSTNAME" already exists."
read -p "$(echo -e "Do you want to reuse that configuration {""$cINPUT"y/n"$cWARN""}? : "$cINPUT)" -i "n" -e userInput
if [[ "$userInput" == "y" ]]; then
screen -S "$vmHOSTNAME" -d -m "$binFOLDER"/packer build -var-file="$vmcFOLDER"/"$vmHOSTNAME" -var-file="$confFOLDER"/"$vcFILE" "$vmcFOLDER"/"$vmHOSTNAME.json"
#"$binFOLDER"/packer build -var-file="$vmcFOLDER"/"$vmHOSTNAME" -var-file="$confFOLDER"/"$vcFILE" "$vmcFOLDER"/"$vmHOSTNAME.json"
echo -e "\n"$cOUTPUT"Packer started in screen."
echo -e "Enter"$cINPUT" \"screen -r "$vmHOSTNAME"\" "$cOUTPUT"to attach to it."
exit 0
fi
fi
echo -e "\n"
read -p "$(echo -e $cOUTPUT"OS type {""$cINPUT""$vmOSTYPELIST""$cOUTPUT""}: "$cINPUT)" -i "$vmOSTYPE" -e userInput
vmOSTYPE="$userInput"
read -p "$(echo -e $cOUTPUT"Select machine purpose {""$cINPUT""$vmPURPOSELIST""$cOUTPUT""}: "$cINPUT)" -i "generic" -e userInput
vmPurpose="$userInput"
if [[ ! -f "$ksFOLDER"/"$vmOSTYPE"-"$vmPurpose".cfg ]]; then
echo -e "$cWARN Kickstart for $cINPUT "$vmPurpose" $cWARN doesn't exists"
exit 1
fi
if [[ ! -f "$profFOLDER"/"$vmOSTYPE"-"$vmPurpose".json ]]; then
echo -e "$cWARN Profile for $cINPUT "$vmPurpose" $cWARN doesn't exists"
exit 1
fi
case "$vmPurpose" in
"mongo") vmDISK=$(( $vmDISK + $purDISKmongo )) ;;
"mysql") vmDISK=$(( $vmDISK + $purDISKmysql )) ;;
"psql") vmDISK=$(( $vmDISK + $purDISKpsql )) ;;
"k8s") vmDISK=$(( $vmDISK + $purDISKk8s )) ;;
"kafka") vmDISK=$(( $vmDISK + $purDISKkafka )) ;;
"web") vmDISK=$(( $vmDISK + $purDISKweb )) ;;
esac
echo -e "\n"
read -p "$(echo -e $cOUTPUT"CPU (Cores): "$cINPUT)" -i "$vmCPU" -e userInput
vmCPU="$userInput"
read -p "$(echo -e $cOUTPUT"RAM (GB): "$cINPUT)" -i "$vmRAM" -e userInput
vmRAM=$(($userInput*1024))
read -p "$(echo -e $cOUTPUT"Disk (GB): "$cINPUT)" -i "$vmDISK" -e userInput
vmDISK=$(($userInput*1024))
read -p "$(echo -e $cOUTPUT"VLAN: "$cINPUT)" -i "$vmVLAN" -e userInput
vmVLAN="$userInput"
read -p "$(echo -e $cOUTPUT"Notes: "$cINPUT)" -i "$vmNOTES" -e userInput
vmNOTES="$userInput"
read -p "$(echo -e $cOUTPUT"Network configuration (""$cINPUT"d"$cOUTPUT"")hcp,(""$cINPUT"s"$cOUTPUT"")tatic: "$cINPUT)" -i "d" -e userInput
if [[ "$userInput" = "s" ]]; then
vmNETWORK="static"
read -p "$(echo -e $cOUTPUT"IP: "$cINPUT)" -i "$vmIP" -e userInput
vmIP="$userInput"
read -p "$(echo -e $cOUTPUT"Netmask: "$cINPUT)" -i "$vmNETMASK" -e userInput
vmNETMASK="$userInput"
read -p "$(echo -e $cOUTPUT"Gateway: "$cINPUT)" -i "$vmGATEWAY" -e userInput
vmGATEWAY="$userInput"
read -p "$(echo -e $cOUTPUT"Nameservers: "$cINPUT)" -i "$vmNAMESERVERS" -e userInput
vmNAMESERVERS="$userInput"
fi
echo -e "\n$cOUTPUT""Please enter VCenter specific parameters\n"
read -p "$(echo -e $cOUTPUT"Datacenter: "$cINPUT)" -i "$vcDATACENTER" -e userInput
vcDATACENTER="$userInput"
read -p "$(echo -e $cOUTPUT"Cluster: "$cINPUT)" -i "$vcCLUSTER" -e userInput
vcCLUSTER="$userInput"
read -p "$(echo -e $cOUTPUT"Host: "$cINPUT)" -i "$vcHOST" -e userInput
vcHOST="$userInput"
read -p "$(echo -e $cOUTPUT"Datastore: "$cINPUT)" -i "$vcDATASTORE" -e userInput
vcDATASTORE="$userInput"
read -p "$(echo -e $cOUTPUT"Folder: "$cINPUT)" -i "$vcFOLDER" -e userInput
vcFOLDER="$userInput"
echo -e "\n"
read -p "$(echo -e $cOUTPUT"Use (""$cINPUT"l"$cOUTPUT"")ocal or (""$cINPUT"r"$cOUTPUT"")emote kickstart file?: "$cINPUT)" -i "r" -e userInput
if [[ "$userInput" = "r" ]]; then
ksREMOTE=1
read -p "$(echo -e $cOUTPUT"Remote user: "$cINPUT)" -i "$ksREMOTEUSER" -e userInput
ksREMOTEUSER="$userInput"
read -p "$(echo -e $cOUTPUT"Remote server: "$cINPUT)" -i "$ksREMOTESERVER" -e userInput
ksREMOTESERVER="$userInput"
read -p "$(echo -e $cOUTPUT"Remote path: "$cINPUT)" -i "$ksREMOTEPATH" -e userInput
ksREMOTEPATH="$userInput"
fi
ksNAME="$vmHOSTNAME.cfg"
vmHOSTNAME="$vmHOSTNAME"
vcFOLDER=`echo "$vcFOLDER" | sed 's:\/:\\\/:g'`
vmNOTES=`echo "$vmNOTES" |sed 's:\/:\\\/:g'`
mkdir -p "$vmcFOLDER"
cp "$ksFOLDER"/"$vmOSTYPE"-"$vmPurpose".cfg "$vmcFOLDER"/"$ksNAME"
cp "$tmplFOLDER"/"$vmTMPLFILE" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vmHOSTNAME/$vmHOSTNAME/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vmCPU/$vmCPU/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vmRAM/$vmRAM/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vmDISK/$vmDISK/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vmVLAN/$vmVLAN/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vmNOTES/$vmNOTES/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vcDATACENTER/$vcDATACENTER/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vcCLUSTER/$vcCLUSTER/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vcHOST/$vcHOST/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vcDATASTORE/$vcDATASTORE/g" "$vmcFOLDER"/"$vmHOSTNAME"
sed -i "s/__vcFOLDER/$vcFOLDER/g" "$vmcFOLDER"/"$vmHOSTNAME"
case "$vmOSTYPE" in
"centos7") sed -i "s/__vmOSTYPE/centos7_64Guest/g" "$vmcFOLDER"/"$vmHOSTNAME";;
"centos8") sed -i "s/__vmOSTYPE/centos8_64Guest/g" "$vmcFOLDER"/"$vmHOSTNAME";;
esac
if [[ "$vmNETWORK" = "static" ]]; then
sed -i "s/__IPADDR/$vmIP/g" "$vmcFOLDER"/"$ksNAME"
sed -i "s/__NETMASK/$vmNETMASK/g" "$vmcFOLDER"/"$ksNAME"
sed -i "s/__GATEWAY/$vmGATEWAY/g" "$vmcFOLDER"/"$ksNAME"
sed -i "s/__NAMESERVERS/$vmNAMESERVERS/g" "$vmcFOLDER"/"$ksNAME"
sed -i "s/__HOSTNAME/$vmHOSTNAME/g" "$vmcFOLDER"/"$ksNAME"
sed -i "s/#static\ //g" "$vmcFOLDER"/"$ksNAME"
else
sed -i "s/#dhcp\ //g" "$vmcFOLDER"/"$ksNAME"
fi
packerKey=`cat "$keyFOLDER"/"$sshKeyPub" | sed 's:\/:\\\/:g'`
export rootPass=`pwgen -s -1 16`
rootPassEnc=`python -c 'import crypt,os; print(crypt.crypt(os.environ["rootPass"]))' | sed 's:\/:\\\/:g'`
sed -i "s/__HOSTNAME/$vmHOSTNAME/g" "$vmcFOLDER"/"$ksNAME"
sed -i "s/__packerKey/$packerKey/g" "$vmcFOLDER"/"$ksNAME"
sed -i "s/__rootPass/$rootPassEnc/g" "$vmcFOLDER"/"$ksNAME"
if [[ "$ksREMOTE" = 1 ]]; then
echo -e "$cWARN"
scp "$vmcFOLDER"/"$ksNAME" "$ksREMOTEUSER"@"$ksREMOTESERVER":"$ksREMOTEPATH"
fi
cp "$profFOLDER"/"$vmOSTYPE"-"$vmPurpose".json "$vmcFOLDER"/"$vmHOSTNAME".json
sed -i "s/__KICKSTART/$ksNAME/g" "$vmcFOLDER"/"$vmHOSTNAME".json
screen -dmS "$vmHOSTNAME" "$binFOLDER"/packer build -var-file="$confFOLDER"/vcenter.conf -var-file="$vmcFOLDER"/"$vmHOSTNAME" "$vmcFOLDER"/"$vmHOSTNAME".json
#"$binFOLDER"/packer build -var-file="$confFOLDER"/vcenter.conf -var-file="$vmcFOLDER"/"$vmHOSTNAME" "$vmcFOLDER"/"$vmHOSTNAME".json
echo -e "\n"$cOTHER"Packer started"
echo -e "\n"$cOUTPUT"Enter"$cINPUT" \"screen -r "$vmHOSTNAME"\" "$cOUTPUT"to attach to it."
echo -e "\nroot password: ""$cWARN""$rootPass""\n"
| true |
e615d5d511783296b440c5e6b265b8b26b7c71b9
|
Shell
|
alucas/clojure-env
|
/.zshrc
|
UTF-8
| 3,521 | 2.765625 | 3 |
[
"MIT"
] |
permissive
|
# Set up the prompt
#autoload -Uz promptinit
#promptinit
#prompt bart
typeset -ga preexec_functions
typeset -ga precmd_functions
typeset -ga chpwd_functions
setopt prompt_subst
export __CURRENT_GIT_BRANCH=
# Get git infos
parse_git_branch() {
git branch --no-color 2> /dev/null \
| sed -e '/^[^*]/d' -e 's/* \(.*\)/[%F{green}\1%F{white}]/'
}
# When we execute some git command
preexec_functions+='zsh_preexec_update_git_vars'
zsh_preexec_update_git_vars() {
case "$(history $HISTCMD)" in
*git*)
export __CURRENT_GIT_BRANCH="$(parse_git_branch)"
;;
esac
}
# When we change directory
chpwd_functions+='zsh_chpwd_update_git_vars'
zsh_chpwd_update_git_vars() {
export __CURRENT_GIT_BRANCH="$(parse_git_branch)"
}
get_git_prompt_info() {
echo $__CURRENT_GIT_BRANCH
}
#TEST module vcs_info
#autoload -Uz vcs_info
#zstyle ':vcs_info:*' enable git cvs svn
#precmd(){
# vcs_info 'prompt'
#}
#export PS1='[%F{blue}%n%F{white}@%F{red}%M%F{white}:%F{cyan}%~%F{white}]$(get_git_prompt_info)${vcs_info_msg_0_}$ '
export PS1='%F{white}[%F{blue}%n%F{white}@%F{red}%M%F{white}:%F{cyan}%~%F{white}]$(get_git_prompt_info)$ '
setopt histignorealldups sharehistory
# Use emacs keybindings even if our EDITOR is set to vi
bindkey -e
# Keep 1000 lines of history within the shell and save it to ~/.zsh_history:
HISTSIZE=1000
SAVEHIST=1000
HISTFILE=~/.zsh_history
# Use modern completion system
autoload -Uz compinit
compinit
zstyle ':completion:*' auto-description 'specify: %d'
zstyle ':completion:*' completer _expand _complete _correct _approximate
zstyle ':completion:*' format 'Completing %d'
zstyle ':completion:*' group-name ''
zstyle ':completion:*' menu select=2
eval "$(dircolors -b)"
zstyle ':completion:*:default' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' list-colors ''
zstyle ':completion:*' list-prompt %SAt %p: Hit TAB for more, or the character to insert%s
zstyle ':completion:*' matcher-list '' 'm:{a-z}={A-Z}' 'm:{a-zA-Z}={A-Za-z}' 'r:|[._-]=* r:|=* l:|=*'
zstyle ':completion:*' menu select=long
zstyle ':completion:*' select-prompt %SScrolling active: current selection at %p%s
zstyle ':completion:*' use-compctl false
zstyle ':completion:*' verbose true
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#)*=0=01;31'
zstyle ':completion:*:kill:*' command 'ps -u $USER -o pid,%cpu,tty,cputime,cmd'
# Alias
alias rm='rm -i'
alias lc='ls --color=auto'
alias ll='lc -lrt'
alias lla='lc -la'
alias gre='grep -rn --color'
alias grepsvn='gre --exclude-dir=.svn'
alias grepwaf='gre --exclude-dir=.waf*'
alias dofus='~/src/jeux/dofus/Dofus/launch-dofus.sh'
alias dofusbeta='~/liens/DofusBM/bin/Dofus'
alias dofusbot='LD_PRELOAD=~/src/jeux/dofus/bot4/libdofus.so dofus'
alias dofussniff='LD_PRELOAD=~/.usr/lib/libdofussniff.so dofus'
alias flashdecompiler='wine ~/.wine/drive_c/Program\ Files/Eltima\ Software/Flash\ Decompiler\ Trillix/FlashDecompiler.exe'
export CREMI=alucas@jaguar.emi.u-bordeaux1.fr
export TUTO=tutorat@ssh.alwaysdata.com
export KANJI=kanji@ssh.alwaysdata.com
# Paths
export JAVA_HOME=$(readlink -f /usr/bin/java | sed "s:jre/bin/java::")
export CLASS_PATH=$CLASS_PATH:$(readlink -f /usr/bin/java | sed "s:jre/bin/java:lib:"):~/prog/idea-IC-139.1117.1/lib
export EDITOR=vim
export PYTHONPATH=~/.usr/lib/python
export PATH=$PATH:~:~/.usr/bin
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:~/.usr/lib
export CPATH=$CPATH:~/.usr/include
#print "Execution du script d'initialisation de zsh: OK"
export MANPAGER="/usr/bin/most -s"
[[ -s "$HOME/.rvm/scripts/rvm" ]] && . "$HOME/.rvm/scripts/rvm" # Load RVM function
| true |
6bb9eff1c0525ae692f3fa28d93b276c4d542a85
|
Shell
|
wangzan/script
|
/add_patch_from_list.sh
|
UTF-8
| 2,144 | 3.796875 | 4 |
[] |
no_license
|
#!/bin/bash
LOGS=
function getopts_args()
{
while getopts h:l: ARGS
do
case $ARGS in
h)
echo "help !!!!!!!!!!!!!!!!"
;;
l)
LOGS=$OPTARG/logs
#echo "out path is $LOGS"
;;
*)
echo "Unknow option: $ARGS"
;;
esac
done
}
getopts_args $@
function add_patch_from_list()
{
local PATCH_LIST_FILE=$LOGS/patch/$(echo "$REPO_PATH" | awk '{gsub("\/","_");print $0;}').patch.list
local PATCHED_LIST_FILE=$LOGS/patch/$(echo "$REPO_PATH" | awk '{gsub("\/","_");print $0;}').patched.list
local PATCH_LOG=$LOGS/repo_patch.log
local PATCHED=
if [ -e $PATCH_LIST_FILE ] ; then
echo -e "\n============ $(date '+%Y-%m-%d %T') ============" >> $PATCH_LOG
echo -e "\npatch project name is $REPO_PATH \n" >> $PATCH_LOG
while read LINE ; do
SHA=`echo "$LINE" | awk -F "|" '{print $2;}'`
if [ ! -e $PATCHED_LIST_FILE ] ; then
echo -e "\n[patch backup files]\n" >> $PATCHED_LIST_FILE
fi
while read LINE_PATCHED ; do
SHA_PATCHED=`echo "$LINE_PATCHED" | awk -F "|" '{print $2;}'`
if [ "$SHA" != "$SHA_PATCHED" ] ; then
PATCHED="FALSE"
else
PATCHED="TRUE"
fi
done < $PATCHED_LIST_FILE
if [ "$PATCHED" = "FALSE" ] ; then
echo -e "patching $SHA ... " >> $PATCH_LOG
echo -e "$LINE" >> $PATCHED_LIST_FILE
# git cherry-pick one by one
git cherry-pick $SHA
if [ $? != 0 ] ; then
echo -e "\n[error] happend in project $REPO_PATH \n" >> $PATCH_LOG
git cherry-pick --abort >> $PATCH_LOG 2>&1
echo -e "\nplease patch [ $SHA ] manuelly!!!" >> $PATCH_LOG
echo -e "Using: git cherry-pick $SHA in $REPO_PATH \n" >> $PATCH_LOG
return
fi
fi
done < $PATCH_LIST_FILE
fi
}
add_patch_from_list
| true |
4d17cae39a51917560916b7c0c731c71f10627a6
|
Shell
|
tourfl/Sogitest
|
/test/tests.bats
|
UTF-8
| 773 | 2.96875 | 3 |
[] |
no_license
|
@test "ajout d'un 0 pour 5 bits valant 1" {
run bash -c "echo 11111 | ./bin/encoder"
echo $output
[ "$status" -eq 0 ]
[ "$output" = "111110" ]
}
@test "ajout d'un 1 pour 5 bits valant 0" {
run bash -c "echo 00000 | ./bin/encoder"
[ "$status" -eq 0 ]
[ "$output" = "000001" ]
}
@test "caracteres non 0 ou 1 ignores" {
run bash -c "echo 11a110 | ./bin/encoder"
[ "$status" -eq 0 ]
[ "$output" = "11110" ]
}
@test "traitement d'une chaine complexe" {
run bash -c "echo 11111000011111000 | ./bin/encoder"
[ "$status" -eq 0 ]
[ "$output" = "1111100000111110000" ]
}
@test "traitement d'un fichier" {
run bash -c "cat test/input.txt | ./bin/encoder"
[ "$status" -eq 0 ]
[ "$output" = `cat test/output.txt` ]
}
| true |
fb06ca540265102a1451c205512316aec31ae5f9
|
Shell
|
caiob-wmc/DSMTester
|
/canonical.sh
|
UTF-8
| 419 | 3.5 | 4 |
[] |
no_license
|
#!/bin/bash
htmlCanonical=$(curl $1 2>&1 | grep -in "\<link.*rel=.canonical.")
httpCanonical=$(curl -I $1 2>&1 | grep -in "\link.*rel=.canonical.")
if [ -z "$htmlCanonical" ]
then
if [ -z "$httpCanonical" ]
then
echo "No canonical declarations found"
else
echo "Canonical Link in HTTP header:"
echo $httpCanonical
fi
else
echo "Canonical in <link> tag:"
echo $htmlCanonical
fi
| true |
1c36560d8d8b66171a977a727141056902901fe6
|
Shell
|
eugene-tarassov/vivado-risc-v
|
/mk-sd-image
|
UTF-8
| 3,302 | 3.796875 | 4 |
[] |
no_license
|
#!/bin/bash
set -e
cd -P `dirname $0`
SD_SIZE=1500
SD_BOOT_SIZE=64
SD_SWAP_SIZE=
SD_IMG=debian-riscv64/debian-riscv64.sd.img
RSYNC_BOOT=
while getopts r:s:b:p:i: name ; do
case $name in
r)
RSYNC_BOOT=$OPTARG
;;
s)
SD_SIZE=$OPTARG
;;
b)
SD_BOOT_SIZE=$OPTARG
;;
p)
SD_SWAP_SIZE=$OPTARG
;;
i)
SD_IMG=$OPTARG
;;
esac
done
if [ ! -d linux-stable/kernel ] ; then
make update-submodules
fi
KERNEL_VER=$(cd linux-stable && git describe --exact-match --abbrev=0)
# --- Retrive Debian disk image ---
make debian-riscv64/initrd debian-riscv64/rootfs.tar.gz
# --- Build BBL and Linux ---
if [ ! -f workspace/boot.elf ] ; then
make bootloader
fi
if [ ! -f linux-stable/arch/riscv/boot/Image ] ; then
make linux
fi
# --- build SD card image ---
mount -l | grep `pwd`/ | while IFS=' ' read -ra LINE ; do
sudo umount ${LINE[0]}
done
losetup -a | grep `pwd`/ | while IFS=':' read -ra LINE ; do
sudo losetup -d ${LINE[0]}
done
losetup -a | grep `pwd`/ | while IFS=':' read -ra LINE ; do
echo "Cannot detach ${LINE[*]}"
exit 1
done
rm -f $SD_IMG
dd if=/dev/zero of=$SD_IMG bs=1M count=$SD_SIZE
sudo losetup -f $SD_IMG
SD_LOOP=$(
losetup -a | grep `pwd`/ | while IFS=':' read -ra LINE ; do
echo ${LINE[0]}
done
)
echo "SD image device: ${SD_LOOP}"
sudo sfdisk --no-tell-kernel ${SD_LOOP} <<-__EOF__
1M,${SD_BOOT_SIZE}M,0xE,*
,,,-
__EOF__
sudo partprobe ${SD_LOOP}
UUID=68d82fa1-1bb5-435f-a5e3-862176586eec
sudo mkfs.vfat -F 16 -n BOOT ${SD_LOOP}p1
sudo mkfs.ext4 -E nodiscard -L rootfs -U $UUID ${SD_LOOP}p2
cat >debian-riscv64/extlinux.conf <<EOF
menu title RISC-V Boot Options.
timeout 50
default Debain $KERNEL_VER
label Debain $KERNEL_VER
kernel /extlinux/image-$KERNEL_VER
initrd /extlinux/initrd-$KERNEL_VER.img
append ro root=UUID=$UUID earlycon initramfs.runsize=24M locale.LANG=en_US.UTF-8
EOF
mkdir -p debian-riscv64/boot
mkdir -p debian-riscv64/rootfs
sudo mount ${SD_LOOP}p1 debian-riscv64/boot
sudo mount ${SD_LOOP}p2 debian-riscv64/rootfs
pushd debian-riscv64/rootfs
if [ -z "$SD_SWAP_SIZE" ] ; then
sudo tar xzf ../rootfs.tar.gz
else
sudo tar --exclude=swapfile -xzf ../rootfs.tar.gz
sudo fallocate -l ${SD_SWAP_SIZE}M swapfile
sudo chmod 600 swapfile
sudo mkswap swapfile
fi
popd
pushd debian-riscv64/boot
sudo mkdir extlinux
sudo cp ../extlinux.conf extlinux
sudo cp ../initrd extlinux/initrd-$KERNEL_VER.img
sudo cp ../../linux-stable/arch/riscv/boot/Image extlinux/image-$KERNEL_VER
sudo cp ../../workspace/boot.elf boot.elf
popd
sudo chown root:root debian-riscv64/rootfs
sudo chmod 755 debian-riscv64/rootfs
echo
echo "Boot partition:"
df debian-riscv64/boot
ls -l debian-riscv64/boot
echo
echo "Root partition:"
df debian-riscv64/rootfs
ls -l debian-riscv64/rootfs
echo
if [ ! -z "$RSYNC_BOOT" ] ; then
rsync -r --delete debian-riscv64/boot/ $RSYNC_BOOT
fi
# According to docs, don't need to run sync before umount.
# umount will complete all pending writes before it actually unmounts the filesystem.
# In reality, without sync, VFAT filesystem sometimes gets corrupted after umount.
# Must be a Linux bug.
sync
sudo umount ${SD_LOOP}p1
sudo umount ${SD_LOOP}p2
sudo fsck -f -p -T ${SD_LOOP}p1 || true
sudo fsck -f -p -T ${SD_LOOP}p2
sudo losetup -d ${SD_LOOP}
| true |
7463d125977acbb110f60ee908125a5c54dc6fe3
|
Shell
|
eleanoryc/shell_scripts
|
/s_random.sh
|
UTF-8
| 459 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
#x=1234567890
#for((i=10;i>0;i--));do
# ((r=RANDOM%i+1))
# echo ${x:r-1:1}
# x=${x:0:r-1}${x:r}
#done
# Generate random number between 6 and 30.
rnumber=$((RANDOM%25+6))
echo $rnumber
# generate random numbers between 65535
HBASE_PORT=()
for (( i=1;i<=5;i++ )); do
#x=`echo $(($RANDOM$RANDOM$RANDOM%65535+1001))`
x=`echo $(($RANDOM$RANDOM$RANDOM%65535))`
HBASE_PORT+=($x)
done
for x in `seq 0 4`
do
echo ${HBASE_PORT[$x]}
done
| true |
5bde5e739b09dedfcfbdc272cfd41eabf74333ab
|
Shell
|
Surfict/osparc-ops
|
/scripts/local-deploy.sh
|
UTF-8
| 9,263 | 3.953125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Deploys in local host
#
#
# Using osx support functions
# shellcheck source=/dev/null
source "$( dirname "${BASH_SOURCE[0]}" )/portable.sh"
# ${psed:?}
set -euo pipefail
IFS=$'\n\t'
# Paths
this_script_dir=$(dirname "$0")
repo_basedir=$(realpath "${this_script_dir}"/../)
# VCS info on current repo
current_git_url=$(git config --get remote.origin.url)
current_git_branch=$(git rev-parse --abbrev-ref HEAD)
machine_ip=$(get_this_ip)
devel_mode=0
usage="$(basename "$0") [-h] [--key=value]
Deploys all the osparc-ops stacks and the SIM-core stack on osparc.local.
where keys are:
-h, --help show this help text
--devel_mode (default: ${devel_mode})"
for i in "$@"
do
case $i in
--devel_mode=*)
devel_mode="${i#*=}"
shift # past argument=value
;;
##
:|--help|-h|*)
echo "$usage" >&2
exit 1
;;
esac
done
# Loads configurations variables
# See https://askubuntu.com/questions/743493/best-way-to-read-a-config-file-in-bash
source "${repo_basedir}"/repo.config
min_pw_length=8
if [ ${#SERVICES_PASSWORD} -lt $min_pw_length ]; then
echo "Password length should be at least $min_pw_length characters"
fi
cd "$repo_basedir";
echo
echo -e "\e[1;33mDeploying osparc on ${MACHINE_FQDN}, using credentials $SERVICES_USER:$SERVICES_PASSWORD...\e[0m"
# -------------------------------- PORTAINER ------------------------------
echo
echo -e "\e[1;33mstarting portainer...\e[0m"
pushd "${repo_basedir}"/services/portainer
sed -i "s/PORTAINER_ADMIN_PWD=.*/PORTAINER_ADMIN_PWD=$SERVICES_PASSWORD/" .env
make up
popd
# -------------------------------- TRAEFIK -------------------------------
echo
echo -e "\e[1;33mstarting traefik...\e[0m"
pushd "${repo_basedir}"/services/traefik
# copy certificates to traefik
cp "${repo_basedir}"/certificates/*.crt secrets/
cp "${repo_basedir}"/certificates/*.key secrets/
# setup configuration
$psed -i -e "s/MACHINE_FQDN=.*/MACHINE_FQDN=$MACHINE_FQDN/" .env
$psed -i -e "s/TRAEFIK_USER=.*/TRAEFIK_USER=$SERVICES_USER/" .env
traefik_password=$(docker run --rm --entrypoint htpasswd registry:2 -nb "$SERVICES_USER" "$SERVICES_PASSWORD" | cut -d ':' -f2)
$psed -i -e "s|TRAEFIK_PASSWORD=.*|TRAEFIK_PASSWORD=${traefik_password}|" .env
make up-local
popd
# -------------------------------- MINIO -------------------------------
echo
echo -e "\e[1;33mstarting minio...\e[0m"
pushd "${repo_basedir}"/services/minio;
$psed -i -e "s/MINIO_ACCESS_KEY=.*/MINIO_ACCESS_KEY=$SERVICES_PASSWORD/" .env
$psed -i -e "s/MINIO_SECRET_KEY=.*/MINIO_SECRET_KEY=$SERVICES_PASSWORD/" .env
make up; popd
echo "waiting for minio to run...don't worry..."
while [ ! "$(curl -s -o /dev/null -I -w "%{http_code}" --max-time 10 https://"${MACHINE_FQDN}":10000/minio/health/ready)" = 200 ]; do
echo "waiting for minio to run..."
sleep 5s
done
# -------------------------------- PORTUS/REGISTRY -------------------------------
echo
echo -e "\e[1;33mstarting portus/registry...\e[0m"
pushd "${repo_basedir}"/services/portus
# copy certificates to portus
cp -u "${repo_basedir}"/certificates/domain.crt secrets/portus.crt
cp -u "${repo_basedir}"/certificates/domain.key secrets/portus.key
cp -u "${repo_basedir}"/certificates/rootca.crt secrets/rootca.crt
# set configuration
$psed -i -e "s/MACHINE_FQDN=.*/MACHINE_FQDN=$MACHINE_FQDN/" .env
$psed -i -e "s/S3_ACCESSKEY=.*/S3_ACCESSKEY=$SERVICES_PASSWORD/" .env
$psed -i -e "s/S3_SECRETKEY=.*/S3_SECRETKEY=$SERVICES_PASSWORD/" .env
make up-self-signed
# auto configure portus
echo
echo "waiting for portus to run...don't worry..."
while [ ! "$(curl -s -o /dev/null -I -w "%{http_code}" --max-time 10 -H "Accept: application/json" -H "Content-Type: application/json" -X GET https://"${MACHINE_FQDN}":5000/api/v1/users)" = 401 ]; do
echo "waiting for portus to run..."
sleep 5s
done
if [ ! -f .portus_token ]; then
echo
echo "configuring portus via its API ..."
json_data=$(cat <<EOF
{
"user": {
"username": "$SERVICES_USER",
"email": "admin@swiss",
"password": "$SERVICES_PASSWORD"
}
}
EOF
)
portus_token=$(curl -H "Accept: application/json" -H "Content-Type: application/json" -X POST \
-d "${json_data}" https://"$MACHINE_FQDN":5000/api/v1/users/bootstrap | jq -r .plain_token)
echo "${portus_token}" >> .portus_token
json_data=$(cat <<EOF
{
"registry": {
"name": "$MACHINE_FQDN",
"hostname": "$MACHINE_FQDN:5000",
"use_ssl": true
}
}
EOF
)
curl -H "Accept: application/json" -H "Content-Type: application/json" -H "Portus-Auth: $SERVICES_USER:${portus_token}" -X POST \
-d "${json_data}" https://"$MACHINE_FQDN":5000/api/v1/registries
fi
popd
# -------------------------------- MONITORING -------------------------------
echo
echo -e "\e[1;33mstarting monitoring...\e[0m"
# set MACHINE_FQDN
pushd "${repo_basedir}"/services/monitoring
$psed -i -e "s|GF_SERVER_ROOT_URL=.*|GF_SERVER_ROOT_URL=https://$MACHINE_FQDN/grafana|" grafana/config.monitoring
$psed -i -e "s|GF_SECURITY_ADMIN_PASSWORD=.*|GF_SECURITY_ADMIN_PASSWORD=$SERVICES_PASSWORD|" grafana/config.monitoring
$psed -i -e "s|basicAuthPassword:.*|basicAuthPassword: $SERVICES_PASSWORD|" grafana/provisioning/datasources/datasource.yml
$psed -i -e "s|--web.external-url=.*|--web.external-url=https://$MACHINE_FQDN/prometheus/'|" docker-compose.yml
make up
popd
# -------------------------------- JAEGER -------------------------------
echo
echo -e "\e[1;33mstarting jaeger...\e[0m"
# set MACHINE_FQDN
pushd "${repo_basedir}"/services/jaeger
make up
popd
# -------------------------------- GRAYLOG -------------------------------
echo
echo -e "\e[1;33mstarting graylog...\e[0m"
# set MACHINE_FQDN
pushd "${repo_basedir}"/services/graylog;
graylog_password=$(echo -n "$SERVICES_PASSWORD" | sha256sum | cut -d ' ' -f1)
$psed -i -e "s|GRAYLOG_HTTP_EXTERNAL_URI=.*|GRAYLOG_HTTP_EXTERNAL_URI=https://$MACHINE_FQDN/graylog/|" .env
$psed -i -e "s|GRAYLOG_ROOT_PASSWORD_SHA2=.*|GRAYLOG_ROOT_PASSWORD_SHA2=$graylog_password|" .env
make up
echo
echo "waiting for graylog to run..."
while [ ! "$(curl -s -o /dev/null -I -w "%{http_code}" --max-time 10 -H "Accept: application/json" -H "Content-Type: application/json" -X GET https://"$MACHINE_FQDN"/graylog/api/users)" = 401 ]; do
echo "waiting for graylog to run..."
sleep 5s
done
json_data=$(cat <<EOF
{
"title": "standard GELF UDP input",
"type": "org.graylog2.inputs.gelf.udp.GELFUDPInput",
"global": "true",
"configuration": {
"bind_address": "0.0.0.0",
"port":12201
}
}
EOF
)
curl -u "$SERVICES_USER":"$SERVICES_PASSWORD" --header "Content-Type: application/json" \
--header "X-Requested-By: cli" -X POST \
--data "$json_data" https://"$MACHINE_FQDN"/graylog/api/system/inputs
popd
# -------------------------------- ADMINER -------------------------------
echo
echo -e "\e[1;33mstarting adminer...\e[0m"
pushd "${repo_basedir}"/services/adminer;
make up
popd
if [ $devel_mode -eq 0 ]; then
# -------------------------------- DEPlOYMENT-AGENT -------------------------------
echo
echo -e "\e[1;33mstarting deployment-agent for simcore...\e[0m"
pushd "${repo_basedir}"/services/deployment-agent;
if [[ $current_git_url == git* ]]; then
# it is a ssh style link let's get the organisation name and just replace this cause that conf only accepts https git repos
current_organisation=$(echo "$current_git_url" | cut -d":" -f2 | cut -d"/" -f1)
sed -i "s|https://github.com/ITISFoundation/osparc-ops.git|https://github.com/$current_organisation/osparc-ops.git|" deployment_config.default.yaml
else
sed -i "/- id: simcore-ops-repo/{n;s|url:.*|url: $current_git_url|}" deployment_config.default.yaml
fi
sed -i "/- id: simcore-ops-repo/{n;n;s|branch:.*|branch: $current_git_branch|}" deployment_config.default.yaml
secret_id=$(docker secret inspect --format="{{ .ID }}" rootca.crt)
# full original -> replacement
YAML_STRING="environment:\n S3_ENDPOINT: ${MACHINE_FQDN}:10000\n S3_ACCESS_KEY: ${SERVICES_PASSWORD}\n S3_SECRET_KEY: ${SERVICES_PASSWORD}\n DIRECTOR_SELF_SIGNED_SSL_SECRET_ID: ${secret_id}"
sed -i "s/environment: {}/$YAML_STRING/" deployment_config.default.yaml
# update
sed -i "s/S3_ENDPOINT:.*/S3_ENDPOINT: ${MACHINE_FQDN}:10000/" deployment_config.default.yaml
sed -i "s/S3_ACCESS_KEY:.*/S3_ACCESS_KEY: ${SERVICES_PASSWORD}/" deployment_config.default.yaml
sed -i "s/S3_SECRET_KEY:.*/S3_SECRET_KEY: ${SERVICES_PASSWORD}/" deployment_config.default.yaml
sed -i "s/DIRECTOR_SELF_SIGNED_SSL_SECRET_ID:.*/DIRECTOR_SELF_SIGNED_SSL_SECRET_ID: ${secret_id}/" deployment_config.default.yaml
# portainer
sed -i "/- url: .*portainer:9000/{n;s/username:.*/username: ${SERVICES_USER}/}" deployment_config.default.yaml
sed -i "/- url: .*portainer:9000/{n;n;s/password:.*/password: ${SERVICES_PASSWORD}/}" deployment_config.default.yaml
# extra_hosts
sed -i "s|extra_hosts: \[\]|extra_hosts:\n - \"${MACHINE_FQDN}:${machine_ip}\"|" deployment_config.default.yaml
# update
sed -i "/extra_hosts:/{n;s/- .*/- \"${MACHINE_FQDN}:${machine_ip}\"/}" deployment_config.default.yaml
make down up;
popd
fi
| true |
131cf4fd884ce24d41d1a3cd1090d5bc5e029b80
|
Shell
|
mikemeding/3DPrinter
|
/octoprint_relay/toggle.sh
|
UTF-8
| 88 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/bash
if [ $(gpio read $1) -eq 1 ]
then
gpio write $1 0
else
gpio write $1 1
fi
| true |
e83ede20db8465ee2ca512dcfd949d808c57f6c6
|
Shell
|
woramet1/script
|
/install.sh
|
UTF-8
| 2,471 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
#script by jiraphat yuenying for ubuntu 16
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
#install openvpn
apt-get purge openvpn easy-rsa -y;
apt-get purge squid -y;
apt-get update
MYIP=$(wget -qO- ipv4.icanhazip.com);
MYIP2="s/xxxxxxxxx/$MYIP/g";
apt-get update
apt-get install bc -y
apt-get -y install openvpn easy-rsa;
apt-get -y install python;
wget -O /etc/openvpn/openvpn.tar "https://raw.githubusercontent.com/jiraphaty/auto-script-vpn/master/openvpn.tar"
wget -O /etc/openvpn/default.tar "https://raw.githubusercontent.com/jiraphaty/auto-script-vpn/master/default.tar"
cd /etc/openvpn/
tar xf openvpn.tar
tar xf default.tar
cp sysctl.conf /etc/
cp before.rules /etc/ufw/
cp ufw /etc/default/
rm sysctl.conf
rm before.rules
rm ufw
systemctl restart openvpn
#install squid3
apt-get -y install squid;
cp /etc/squid/squid.conf /etc/squid/squid.conf.bak
wget -O /etc/squid/squid.conf "https://raw.githubusercontent.com/jiraphaty/auto-script-vpn/master/squid.conf"
sed -i $MYIP2 /etc/squid/squid.conf;
systemctl restart squid
#config client
cd /etc/openvpn/
wget -O /etc/openvpn/client.ovpn "https://raw.githubusercontent.com/jiraphaty/auto-script-vpn/master/client.ovpn"
sed -i $MYIP2 /etc/openvpn/client.ovpn;
cp client.ovpn /root/
ufw allow ssh
ufw allow 1194/tcp
ufw allow 8080/tcp
ufw allow 3128/tcp
ufw allow 80/tcp
yes | sudo ufw enable
# download script
cd /usr/bin
wget -q -O m "http://43.229.149.140/scriptvpn/menu.sh"
wget -q -O speedtest "http://43.229.149.140/scriptvpn/Speedtest.sh"
wget -q -O b-user "http://43.229.149.140/scriptvpn/b-user.sh"
echo "30 3 * * * root /sbin/reboot" > /etc/cron.d/reboot
chmod +x speedtest
chmod +x m
chmod +x b-user
service cron restart -q > /dev/null 2>&1
clear
printf '###############################\n'
printf '# Script by WORAMET KOSANAN #\n'
printf '# #\n'
printf '# #\n'
printf '# พิมพ์ menu เพื่อใช้คำสั่งต่างๆ #\n'
printf '###############################\n\n'
echo -e "ดาวน์โหลดไฟล์ : /root/client.ovpn\n\n"
printf '\n\nเพิ่ม user โดยใช้คำสั่ง useradd'
printf '\n\nตั้งรหัสโดย ใช้คำสั่ง passwd'
printf '\n\nคุณจำเป็นต้องรีสตาร์ทระบบหนึ่งรอบ (y/n):'
read a
if [ $a == 'y' ]
then
reboot
else
exit
fi
| true |
75e93bc1c727c494aa9a2e8c68aaed6166a39c93
|
Shell
|
iwonbigbro/bash-cat
|
/lib/bashcat/datafile.py
|
UTF-8
| 9,651 | 2.625 | 3 |
[] |
no_license
|
#!/bin/bash
# Copyright (C) 2015 Craig Phillips. All rights reserved.
import fcntl, copy, os, sys, hashlib, re, string
import cached, bashcat.output
try: import cPickle as pickle
except ImportError: import pickle
class NotFoundError(Exception):
pass
class DataLine(object):
def __init__(self, source, prev=None):
self._source = source.rstrip('\n')
self._data = {}
self._modified = False
self._maskinit = [ 0 ] * len(source)
self._masklen = len(self._maskinit)
self._mask = self._maskinit[:]
self._count = 0
self._multiline = False
self._heredoc = None
# Create a linked list so that lines can reference their
# siblings, ancestors and descendents.
self._prev = prev
self._next = None
if prev is not None:
prev._next = self
self._heredoc = prev.heredoc
if self._source.endswith('\\'):
self._multiline = True
def preceding(self, filter=None):
prev = self._prev
while prev is not None:
if not callable(filter) or filter(prev):
return prev
prev = prev._prev
raise NotFoundError("No preceding sibling")
def following(self, filter=None):
next = self._next
while next is not None:
if not callable(filter) or filter(prev):
return prev
next = next._next
raise NotFoundError("No following sibling")
@cached.property
def heredoc(self):
if self._heredoc is None:
m = re.search(r'[^<]<<-?(["\'])?(\w+)\1?', self.stripped_source)
if m is not None:
self._heredoc = m.group(2)
return self._heredoc
# Does the here document terminate here?
m = re.search(r'^' + self._heredoc + '$', self.stripped_source)
if m is not None:
return None
return self._heredoc
@property
def multiline(self):
return self._multiline
@property
def source(self):
return self._source
@cached.property
def stripped_source(self):
src = self._source.strip()
m = re.search(r'^([^#]*)#.*$', src)
if m is not None:
src = m.group(1).strip()
return src
@cached.property
def is_heredoc(self):
return (self.heredoc is not None)
@cached.property
def is_branch(self):
src = self.stripped_source
if not src:
return False
return (re.search(r'\s*(then|else|;;|in|do)\s*$', src) is not None)
@cached.property
def is_executable(self):
src = self.stripped_source
if not src:
return False
# If we are a closing statemet within a here document.
if self._heredoc and not self.is_heredoc:
return False
# Or we are within a here block.
if self._prev and self._prev.is_heredoc and self.is_heredoc:
# If there is a count on execution, there must have been
# some embedded code that was executed.
if self._count > 0:
return True
return False
# Bash case statements are ayntactically heavy, with little in
# the way of runtime interpretation. We need to specialise these
# blocks and filter out the pattern logic and decoration.
try:
preceding_keyword = \
self.preceding(lambda x: x.is_branch or x.is_executable) \
.command_keyword()
if preceding_keyword in ('in', ';;'):
keyword = self.command_keyword(is_case=True, before=';;')
if keyword is None:
return False
# Denotes a case statement branch.
return True
except NotFoundError:
pass
# Some statements are closing statements and are just syntax
# directives to bash. Therefore, they do not get executed at
# runtime and should be excluded.
src = re.sub(r'\s*(then|else|esac|done|in|fi|do|;;|}|\)|\(|{)\s*;?', r'', src).strip()
if not src:
return False
return True
def command_keyword(self, **kwargs):
src = self._source
if kwargs.get('is_case'):
# Match everything after the first unescaped closing bracket.
m = re.search(r'^(?:\\\)|[^\)])+\)(\s+\S+.*)?$', self._source)
if m is not None:
src = m.group(1)
if src is None:
return None
if src == 'esac':
return None
before = kwargs.get('before')
if before is not None:
m = re.search(r'^(.*)' + before, src)
if m is not None:
src = m.group(1).strip()
# Default: Obtain the command keyword from the current line.
m = re.search(r'(\S+)\s*$', src)
if m is not None:
return m.group(1)
return None
def sync(self):
if not self._modified:
return
mask = self._maskinit[:]
count = 0
for v in self._data.itervalues():
count += v['count']
vmask = v['mask']
for i in xrange(self._masklen):
mask[i] |= vmask[i]
self._mask = mask
self._count = count
self._modified = False
def update(self, statement):
pid = os.getpid()
try:
val = self._data[pid]
except KeyError:
val = self._data.setdefault(pid, {
'count': 0,
'mask': self._maskinit[:]
})
self._modified = True
val['count'] += 1
valmask = val['mask']
offset = string.find(self.source, statement)
if -1 != offset:
for i in xrange(offset, offset + len(statement)):
valmask[i] = 1
@property
def count(self):
self.sync()
return self._count
@property
def mask(self):
self.sync()
return self._mask
def merge(self, line):
if self is line:
return self._modified
self._modified = True
for pid, v in line._data.iteritems():
sv = self._data.get(pid)
if sv is None:
self._data[pid] = copy.deepcopy(v)
continue
if v['count'] > sv['count']:
sv['count'] = v['count']
vmask = v['mask']
svmask = sv['mask']
if vmask != svmask:
for i in xrange(self._masklen):
svmask[i] |= vmask[i]
return self._modified
def value(self):
return { 'source':self.source, 'count':self.count, 'mask':self.mask }
class DataFile(object):
def __init__(self, srcfile, lineno, branch, line, *args, **kwargs):
self._srcfile = srcfile
self._datadir = kwargs['datadir']
self._datafile = os.path.join(self._datadir, hashlib.sha1(srcfile).hexdigest())
self._modified = False
self._lines = {}
with open(srcfile, "r") as f:
sha = hashlib.sha1()
srclineno = 0
dl_prev = None
for srcline in f:
sha.update(srcline)
dl_curr = DataLine(srcline, dl_prev)
srclineno += 1
self._lines[srclineno] = dl_prev = dl_curr
self._digest = sha.hexdigest()
if not os.path.exists(self._datadir):
os.makedirs(self._datadir, 0700)
self.sync()
self.update(srcfile, lineno, branch, line)
def __len__(self):
return len(self._lines)
@property
def digest(self):
return self._digest
@property
def path(self):
return self._srcfile
def iteritems(self):
for k, v in self._lines.iteritems():
yield k, v
def itervalues(self):
for v in self._lines.itervalues():
yield v
def merge(self, datafile):
if self._digest != datafile.digest:
return self._modified
for k, v in datafile.iteritems():
if self._lines.setdefault(k, v).merge(v):
self._modified = True
return self._modified
def update(self, srcfile, lineno, branch, line, *args, **kwargs):
try:
dataline = self._lines[int(lineno)]
except:
bashcat.output.err(
"{0}: invalid line number '{1}'".format(srcfile, lineno)
)
return
dataline.update(branch)
self._modified = True
def sync(self):
fd = os.open(self._datafile, os.O_CREAT | os.O_RDWR)
f = os.fdopen(fd, "r+")
fcntl.lockf(fd, fcntl.LOCK_EX)
try:
try:
datafile = pickle.load(f)
if datafile.digest == self.digest:
self.merge(datafile)
except EOFError:
pass
except Exception as e:
raise
if self._modified:
for dl in self._lines.itervalues():
dl.sync()
self._modified = False
f.seek(0)
pickle.dump(self, f)
finally:
# Unlock and close the file.
f.close()
def value(self):
return [ dl.value() for dl in self._lines.values() ]
def load(datafile):
with open(datafile) as fd:
return pickle.load(fd)
| true |
0870f2eedd2c777ef57439506c48e6fd6159369c
|
Shell
|
PandikKumar/dell-devops
|
/ci-scripts/test/unit/ci-script-tests/pingaccess-was/common-api/get-entity-operations/13-get-token-provider-tests.sh
|
UTF-8
| 966 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
script_to_test="${PROJECT_DIR}"/ci-scripts/test/integration/pingaccess-was/common-api/get-entity-operations.sh
. "${script_to_test}"
readonly resources_dir="${PROJECT_DIR}"/ci-scripts/test/unit/ci-script-tests/pingaccess-was/common-api/get-entity-operations/resources
# Mock this function call
curl() {
get_token_provider_200_response=$(cat "${resources_dir}"/get-token-provider-200-response.txt)
# echo into stdout as a return value
echo "${get_token_provider_200_response}"
return 0
}
testGetTokenProviderHappyPath() {
local http_ok_status_line='HTTP/1.1 200 OK'
get_token_provider_response=$(get_token_provider "" "" "")
assertEquals "The mocked curl function should force get_token_provider to return 0." 0 $?
assertContains "The get_token_provider response \"${get_token_provider_response}\" does not contain \"${http_ok_status_line}\"" "${get_token_provider_response}" "${http_ok_status_line}"
}
# load shunit
. ${SHUNIT_PATH}
| true |
ed3b78ffd3024e2b9198fee752103eeb2ae8001c
|
Shell
|
loveshort/DevelopBaseNote
|
/shell/devEnvInstallTool.sh
|
UTF-8
| 1,784 | 2.796875 | 3 |
[] |
no_license
|
# !/bin/bash
# helper tool
echo -e " \033[32m Dev Env install Tool By DragonLi (Version 1.0.0) \033[0m \n"
HTTP_CODE=`curl --connect-timeout 5 -o /dev/null 2>&1 -s --head -w "%{http_code}" "https://Twitter.com"`
if [ ${HTTP_CODE} -ne 200 ]
then
echo -e "\033[31m当前网络环境无法满足安装条件,请切换网络环境后再重试!!!\033[0m"
exit -1024
else
echo -e "\033[32m 恭喜,当前网络环境检测通过。😁 \033[0m"
fi
echo -e "\033[35m 🚀:begain installing Homebrew,may take some times! Please Wating ☕️☕️☕️ \033[0m"
(/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)")
if [[ $? -ne 0 ]]
then
echo -e "\033[31m install Homebrew error !!!\n\n\n \033[0m"
fi
echo -e "\033[35m 🚀:begain rvm update 😁 \033[0m"
ruby -v && curl -L get.rvm.io | bash -s stable
source ~/.bashrc && source ~/.bash_profile
rvm -v && rvm install "ruby-2.6.3" && rvm use 2.6.3 --default
if [[ $? -ne 0 ]]
then
echo -e "\033[31m update rvm error !!!\n\n\n \033[0m"
fi
echo -e "\033[35m gem update \033[0m"
sudo gem update --system
gem list --local
gem sources --remove https://cdn.cocoapods.org/ && gem sources --remove https://rubygems.org/ && gem sources --add https://gems.ruby-china.com/ && gem sources -l
if [[ $? -ne 0 ]]
then
echo -e "\033[31m update gem error !!!\n\n\n \033[0m"
else
echo -e "\033[32m update gem done \033[0m \n"
fi
# oh-my-zsh
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
echo -e "\033[32m cocoapods , you can cancel it\033[0m"
# option cocoapods
sudo gem install -n /usr/local/bin cocoapods
echo -e "\033[32m all operation done 🍺 🍺 🍺 \033[0m \n"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.