blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
7502dee5692b043744036a5ff0097e01e2327ecc
|
Shell
|
brncsk/dotfiles
|
/tmux/.tmux/segments/acpitemp/acpitemp.segment.zsh
|
UTF-8
| 777 | 2.84375 | 3 |
[] |
no_license
|
function status_segment_acpitemp {
local DEFAULT_LOW_MAX=70
local DEFAULT_MID_MAX=85
local low_max=${THEME_TEMP[low_max]:-$DEFAULT_LOW_MAX}
local mid_max=${THEME_TEMP[mid_max]:-$DEFAULT_MID_MAX}
local tempdata="$(acpi -t | sed -e 's/\(Thermal 0: \|degrees C\|,\)//g')"
local tempstat=`echo "${tempdata}" | cut -d' ' -f1`
local tempval=$(printf '%d' $(echo "${tempdata}" | cut -d' ' -f2))
[[ $(i8kfan) == '0 0' ]] || fan_status="$CH[f] "
if [ ${tempval} -lt ${low_max} ]; then
severity='default';
elif [ ${tempval} -lt ${mid_max} ]; then
severity='warning';
else
severity='critical'
fi
tempval="${tempval} °C"
[[ $fan_status ]] &&
render_status_segment_split "$severity" "$fan_status" "$tempval" ||
render_status_segment "$severity" "$tempval"
}
| true |
7d6b648cc413c96ba2655a25a892eff444d9c3c7
|
Shell
|
GurjotSinghAulakh/operating-systems
|
/Bash/funksjoner.sh
|
UTF-8
| 583 | 3.703125 | 4 |
[] |
no_license
|
#! /bin/bash
users(){
date
who
}
users # Kall paa en funksjon ETTER deklarasjon
findUser(){
echo "funk-args: $*"
local bruker # Lokal variabel
bruker=$1
funn=$(grep ^bruker /etc/passwd) # Global
if [ "$funn" ]; then
return 0 # Alt ok
fi
return 1
}
# Hovedprog
echo "Script args: $*"
for user in $*
do
echo -e "\nLeter etter $user" # -e tillater \n
findUser $user
if [ $? = 0 ] # Returnverdi fra findUser i $?
then
echo "$user finnes"
echo $funn # $funn er global
else
echo "$user finnes ikke"
fi
done
echo -e "\nScript-arg: $*"
| true |
2ce8f7379686a696a3a619d9e8f60a1fecd2c071
|
Shell
|
killangell/linux-auto-installation
|
/unit_test/tools/core/kickstart/test_ks_template_handler.sh
|
UTF-8
| 6,980 | 2.96875 | 3 |
[] |
no_license
|
source debug.sh
source ks_template_handler.sh
#Global define, should be unique in system
test_ks_template_handler_func_index="null"
test_ks_template_handler_func_arr="null"
test_ks_template_handler_func_iterator="null"
UNIT_TEST_TMP_DIR=$RUNNING_DIR/unit_test/tools/core/kickstart
mkdir -p $UNIT_TEST_TMP_DIR
test_ks_file=$UNIT_TEST_TMP_DIR/ks.cfg
test_source_bootloader=$UNIT_TEST_TMP_DIR/source_bootloader
test_source_harddrive=$UNIT_TEST_TMP_DIR/source_harddrive
test_source_partition=$UNIT_TEST_TMP_DIR/source_partition
function test_ks_template_handler_init()
{
rm -rf $test_ks_file
rm -rf $test_source_bootloader
rm -rf $test_source_harddrive
rm -rf $test_source_partition
echo "#harddrive
#bootloader
#partition" >> $test_ks_file
echo "bootloader --location=partition --driveorder=sda --append=\"crashkernel=auto rhgb quiet\"" >> $test_source_bootloader
echo "harddrive --partition=sda3 --dir=" >> $test_source_harddrive
echo "clearpart --all --drives=sda
ignoredisk --only-use=sda
part /boot --fstype ext4 --asprimary --size=200M
part pv.008019 --grow --size=1
volgroup vg0 --pesize=4096 pv.008019
logvol swap --vgname=vg0 --size=2048M --name=lv_swap
logvol / --vgname=vg0 --size=1G --name=lv_root
logvol /var --vgname=vg0 --size=1G --name=lv_var
logvol /home --vgname=vg0 --size=1G --name=lv_home
logvol /opt --vgname=vg0 --size=1G --name=lv_opt
logvol /usr --vgname=vg0 --size=1G --name=lv_usr
logvol /data--vgname=vg0 --size=1 --grow --name=lv_data" >> $test_source_partition
}
function test_replace_bootloader()
{
rm -rf $test_source_bootloader.expect
test_ks_template_handler_init
echo "#harddrive
bootloader --location=partition --driveorder=sda --append=\"crashkernel=auto rhgb quiet\"
#partition" >> $test_source_bootloader.expect
key="#bootloader"
source_file=$test_source_bootloader
ks_file=$test_ks_file
replace_key_by_file $key $source_file $ks_file
diff $ks_file $test_source_bootloader.expect > /dev/null
if [ $? -ne 0 ];then
return 0
fi
return 1
}
function test_set_bootloader_script()
{
rm -rf $test_source_bootloader.expect
test_ks_template_handler_init
echo "#harddrive
bootloader --location=partition --driveorder=sda --append=\"crashkernel=auto rhgb quiet\"
#partition" >> $test_source_bootloader.expect
source_file=$test_source_bootloader
ks_file=$test_ks_file
sh set_bootloader.sh $source_file $ks_file
diff $ks_file $test_source_bootloader.expect > /dev/null
if [ $? -ne 0 ];then
return 0
fi
return 1
}
function test_replace_harddrive()
{
rm -rf $test_source_harddrive.expect
test_ks_template_handler_init
echo "harddrive --partition=sda3 --dir=
#bootloader
#partition">> $test_source_harddrive.expect
key="#harddrive"
source_file=$test_source_harddrive
ks_file=$test_ks_file
replace_key_by_file $key $source_file $ks_file
diff $ks_file $test_source_harddrive.expect > /dev/null
if [ $? -ne 0 ];then
return 0
fi
return 1
}
function test_set_harddrive_script()
{
rm -rf $test_source_harddrive.expect
test_ks_template_handler_init
echo "harddrive --partition=sda3 --dir=
#bootloader
#partition">> $test_source_harddrive.expect
source_file=$test_source_harddrive
ks_file=$test_ks_file
sh set_harddrive.sh $source_file $ks_file
diff $ks_file $test_source_harddrive.expect > /dev/null
if [ $? -ne 0 ];then
return 0
fi
return 1
}
function test_replace_partition()
{
rm -rf $test_source_partition.expect
test_ks_template_handler_init
echo "#harddrive
#bootloader
clearpart --all --drives=sda
ignoredisk --only-use=sda
part /boot --fstype ext4 --asprimary --size=200M
part pv.008019 --grow --size=1
volgroup vg0 --pesize=4096 pv.008019
logvol swap --vgname=vg0 --size=2048M --name=lv_swap
logvol / --vgname=vg0 --size=1G --name=lv_root
logvol /var --vgname=vg0 --size=1G --name=lv_var
logvol /home --vgname=vg0 --size=1G --name=lv_home
logvol /opt --vgname=vg0 --size=1G --name=lv_opt
logvol /usr --vgname=vg0 --size=1G --name=lv_usr
logvol /data--vgname=vg0 --size=1 --grow --name=lv_data">> $test_source_partition.expect
key="#partition"
source_file=$test_source_partition
ks_file=$test_ks_file
replace_key_by_file $key $source_file $ks_file
diff $ks_file $test_source_partition.expect > /dev/null
if [ $? -ne 0 ];then
return 0
fi
return 1
}
function test_set_partition_script()
{
rm -rf $test_source_partition.expect
test_ks_template_handler_init
echo "#harddrive
#bootloader
clearpart --all --drives=sda
ignoredisk --only-use=sda
part /boot --fstype ext4 --asprimary --size=200M
part pv.008019 --grow --size=1
volgroup vg0 --pesize=4096 pv.008019
logvol swap --vgname=vg0 --size=2048M --name=lv_swap
logvol / --vgname=vg0 --size=1G --name=lv_root
logvol /var --vgname=vg0 --size=1G --name=lv_var
logvol /home --vgname=vg0 --size=1G --name=lv_home
logvol /opt --vgname=vg0 --size=1G --name=lv_opt
logvol /usr --vgname=vg0 --size=1G --name=lv_usr
logvol /data--vgname=vg0 --size=1 --grow --name=lv_data">> $test_source_partition.expect
source_file=$test_source_partition
ks_file=$test_ks_file
sh set_partition.sh $source_file $ks_file
diff $ks_file $test_source_partition.expect > /dev/null
if [ $? -ne 0 ];then
return 0
fi
return 1
}
function test_insert_after_harddrive()
{
rm -rf $test_source_harddrive.expect
test_ks_template_handler_init
echo "#harddrive
harddrive --partition=sda3 --dir=
#bootloader
#partition">> $test_source_harddrive.expect
key="#harddrive"
source_file=$test_source_harddrive
ks_file=$test_ks_file
insert_file_after_key $key $source_file $ks_file
diff $ks_file $test_source_harddrive.expect > /dev/null
if [ $? -ne 0 ];then
return 0
fi
return 1
}
function test_insert_after_bootloader()
{
rm -rf $test_source_bootloader.expect
test_ks_template_handler_init
echo "#harddrive
#bootloader
bootloader --location=partition --driveorder=sda --append=\"crashkernel=auto rhgb quiet\"
#partition">> $test_source_bootloader.expect
key="#bootloader"
source_file=$test_source_bootloader
ks_file=$test_ks_file
insert_file_after_key $key $source_file $ks_file
diff $ks_file $test_source_bootloader.expect > /dev/null
if [ $? -ne 0 ];then
return 0
fi
return 1
}
#Test list
test_ks_template_handler_func_arr=(
test_replace_bootloader
test_set_bootloader_script
test_replace_harddrive
test_set_harddrive_script
test_replace_partition
test_set_partition_script
test_insert_after_harddrive
test_insert_after_bootloader
)
function test_ks_template_handler_all_funcs()
{
test_ks_template_handler_func_index=1
for test_ks_template_handler_func_iterator in ${test_ks_template_handler_func_arr[*]}
do
print_head LEVEL_INFO "func $test_ks_template_handler_func_index: ${test_ks_template_handler_func_iterator}"
${test_ks_template_handler_func_iterator}
if [ $? -eq 0 ];then
print_body LEVEL_INFO " ... failed\n"
return 0
else
print_body LEVEL_INFO " ... passed\n"
fi
let test_ks_template_handler_func_index=$test_ks_template_handler_func_index+1
done
return 1
}
| true |
5b1e7be56fb234d8e9111000afc0b9d3ff8ead90
|
Shell
|
XORkey/SLWP
|
/com/xorkey/login/login_server/HSM.sh
|
UTF-8
| 1,481 | 3.625 | 4 |
[] |
no_license
|
#!/bin/sh
#
# DESCRIPTION: Calculate login values using the array with secret keys.
# This is a simulation of the computations of the HSM.
# SYNTAX:
# HSM.sh <Au> <Ks> <n> <v> <i>
# ARGUMENTS:
# <Au>: [hex string]
# Au.
# <Ks>: [hex string]
# Ks.
# <n>: [int]
# Number of bits. There is a maximum imposed of 64 bits, due to the size
# of an integer in bash(1). Furthermore, only multiples of 4 should work
# as expected.
# <v>: [bool]
# Can user verify? Ignored in this simulation and fixed to 'false'.
# <i>: [int]
# Iteration count.
# AUTHOR: T.M.C. Ruiter B ICT
# COPYRIGHT(c): 2014 by XORkey B.V., Winterkoning 5, 1722 CA, Zuid-Scharwoude, The Netherlands.
#
. _HSM-functions.sh
. _SECRET_KEYS.sh
typeset logfile=/tmp/slwp.log
typeset -i m=${#S[*]}
typeset -i MAX_ACTIVE_KEYS=6
typeset -i MAX_KEYS=$(Min $m $MAX_ACTIVE_KEYS)
typeset -i Au=0x${1#0x} Ks=0x${2#0x} n=$3 v=$4 i=$5
typeset -i Bs=0 Ps=0 Qs=0
if [[ $i -lt $MAX_KEYS ]]
then
typeset -i Ku=0x$(AES256modN $Ks ${S[$i]} $n)
typeset -i Ru=$(XOR $Au $Ku $n)
if [[ $i -eq 0 ]]
then
typeset -i Rs=$(Random $n)
else
typeset -i Rs=0x$(AES256modN $Ks ${S[0]} $n)
fi
typeset -i Bs=0x$(SHA256modN $Ru $n) Ps=$(XOR $Rs $Ku $n) Qs=$(XOR $Rs $Ru $n)
fi
printf "Au: 0x%0.$((n/4))x; Ks: 0x%0.$((n/4))x; Ku: 0x%0.$((n/4))x; Rs: 0x%0.$((n/4))x; n: %d; v: %d; i: %d -> " $Au $Ks $Ku $Rs $n $v $i >> $logfile
printf "0x%0.$((n/4))x,0x%0.$((n/4))x,0x%0.$((n/4))x\n" $Bs $Ps $Qs | tee -a $logfile
| true |
22cefe47169a594d0fc571d8c57e37f8084466c1
|
Shell
|
Flavio-Varejao/Exercicios
|
/Shell Script/Q17.sh
|
UTF-8
| 303 | 3.734375 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#Faça um programa que calcule o fatorial de um número inteiro fornecido pelo usuário. Ex.: 5!=5.4.3.2.1
read -rp "Digite um número inteiro: " num
fatorial=1
for ((numero=1; numero<=num; numero++)); do
fatorial=$((fatorial*numero))
done
echo "Fatorial de $num é $fatorial"
| true |
37bb8564adef93e6cb452bed87b3bfba2a6aa710
|
Shell
|
vovanshu/linxbackup2
|
/steps/440_usr_lib_libamdocl64.sh
|
UTF-8
| 188 | 2.578125 | 3 |
[] |
no_license
|
# if [[ $(ls ./usr/lib/libamdocl64*) ]]; then
if [ -f "./usr/lib/libamdocl64*" ] ; then
defbk $n 'usr/lib/libamdocl64*' 'usr.lib.libamdocl64'
else
skipmsg $n 'usr.lib.libamdocl64'
fi
| true |
ba916fd268c77a785b9792043b07996363fa5cad
|
Shell
|
markfili/android-shell-commands
|
/install_apk_ssh.sh
|
UTF-8
| 974 | 3.671875 | 4 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
# a script to install APK on multiple devices over SSH
# IP and port settings are read from a file
# file line format example:
# 192.168.1.12:2223
APK_FOLDER=home/user/Development/android/projects/test/app/build/outputs/apk
APK_FLAVOR=free
APK_BUILD_TYPE=debug
APK_FILE=app_debug.apk
APP_PACKAGE_NAME=com.test.app
APP_LAUNCHER_ACTIVITY="${APP_PACKAGE_NAME}.screens.SplashActivity"
DEVICES_IP_FILE=lots_of_ips.txt
SOURCE="/${APK_FOLDER}/${APK_FLAVOR}/${APK_BUILD_TYPE}/${APK_FILE}"
DEST=/storage/emulated/0/${APK_FILE}
while read line
do
echo "$line";
IP="$(echo $line | cut -d':' -f1)"
PORT="$(echo $line | cut -d':' -f2)"
timeout 20 scp -P $PORT $SOURCE root@$IP:$DEST;
ssh -n -oStrictHostKeyChecking=no "root@${IP}" -p $PORT "am force-stop ${APP_PACKAGE_NAME}; mv ${DEST} /data/local/tmp/; pm install -r -t -d /data/local/tmp/${APK_FILE}; am start -n${APP_PACKAGE_NAME}/${APP_LAUNCHER_ACTIVITY};";
sleep 1;
done < $DEVICES_IP_FILE
| true |
602feac54413c0db64839d87494651db781d85da
|
Shell
|
chennachaos/gen3dpreprocessor
|
/bin/fortranjob.job
|
UTF-8
| 1,226 | 2.65625 | 3 |
[] |
no_license
|
#!/bin/bash
# Set the name of the job
# (this gets displayed when you get a list of jobs on the cluster)
#SBATCH --job-name="parallelfort"
#SBATCH --output=fortran-partition.out
# Specify the maximum wall clock time your job can use
# (Your job will be killed if it exceeds this)
#SBATCH --time=5:00:00
# Specify the number of cpu cores your job requires
#SBATCH --ntasks=4
# Specify the amount of memory your job needs per cpu-core (in Mb)
# (Your job will be killed if it exceeds this for a significant length of time)
#SBATCH --mem-per-cpu=5000
# Set up the environment
module purge
module load hpcw
module load petsc/3.7.5
#module load compiler/intel/2018/3
#module load mpi/intel/2018/3
#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib64
#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/app/libraries/impi/5.0.1.035/lib64
#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/home/s.engkadac/mylibs/parmetis-4.0.3-install/lib
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/app/libraries/petsc/3.7.5/el6/AVX/intel-16.0/intel-5.1/lib
#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/app/libraries/petsc/3.7.5/el6/SSE4.2/intel-16.0/intel-5.1/lib
# Run the application
echo My job is started
mpirun ./partfort m6 10
echo My job has finished
| true |
a9985a36f09e1bd6e101d551de4c1cb8b480fd57
|
Shell
|
widnyana/dotfiles
|
/bin/swagger-editor
|
UTF-8
| 380 | 2.84375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
if [[ "$(docker inspect "swaggerapi/swagger-editor" >> /dev/null 2>&1)" ]]; then
echo -e "Pulling swaggerapi/swagger-editor from docker hub"
docker pull "swaggerapi/swagger-editor:latest"
fi
echo -e "Running swagger-editor..."
docker run -p 80:8080 swaggerapi/swagger-editor
#echo -e "Opening new tab on your browser"
#xdg-open "http://localhost:8080"
| true |
62c0517b31ea7e5bc1a36d9fffa2a3913fc3e212
|
Shell
|
m2f0u4d/var
|
/remove-i2p.sh
|
UTF-8
| 2,926 | 2.890625 | 3 |
[] |
no_license
|
#/bin/sh
TORCONF="
'TransPort 127.0.0.1:9041'
'DnsPort 127.0.0.1:54'"
NEWBINDS="
binds+=( '/etc/i2p' )
binds+=( '/var/lib/i2p/i2p-config/' )"
OLDBINDS="
'/rw/srv/whonix/etc/i2p:/etc/i2p'
'/rw/srv/whonix/var/lib/i2p/i2p-config:/var/lib/i2p/i2p-config'
'/rw/srv/whonix/usr/share/i2p:/usr/share/i2p'"
FWCONFIG=NO_NAT_USERS+="
GATEWAY_TRANSPARENT_DNS=1
GATEWAY_TRANSPARENT_TCP=1
SOCKS_PORT_I2P_BOB=2827
SOCKS_PORT_I2P_TAHOE=3456
SOCKS_PORT_I2P_WWW=4444
SOCKS_PORT_I2P_WWW2=4445
SOCKS_PORT_I2P_IRC=6668
SOCKS_PORT_I2P_XMPP=7622
SOCKS_PORT_I2P_CONTROL=7650
SOCKS_PORT_I2P_SOCKSIRC=7651
SOCKS_PORT_I2P_SOCKS=7652
SOCKS_PORT_I2P_I2CP=7654
SOCKS_PORT_I2P_SAM=7656
SOCKS_PORT_I2P_EEP=7658
SOCKS_PORT_I2P_SMTP=7659
SOCKS_PORT_I2P_POP=7660
SOCKS_PORT_I2P_BOTESMTP=7661
SOCKS_PORT_I2P_BOTEIMAP=7662
SOCKS_PORT_I2P_MTN=8998"
FILES="
/usr/bin/i2p-launcher
/usr/share/icons/anon-icon-pack/i2p*
/etc/qubes/suspend-post.d/30_i2p_start.sh
/etc/qubes/suspend-pre.d/30_i2p_restart.sh
/usr/lib/i2p/i2p.sh"
disclaimer(){
echo "Disclaimer \n"
echo "This script will revert all changes regarding I2P"
echo -n "Are you sure you wish to continue? (y/n) "
read ans
case $ans in
y*|Y*|j*|J*)
;;
*)
exit 0
;;
esac
}
remove_i2p_ws(){
apt-get remove xul-ext-foxyproxy-standard
rm /home/user/.tb/tor-browser/Browser/TorBrowser/Data/Browser/profile.default/extensions/foxyproxy@eric.h.jung
#remove socat forwarding
}
remove_i2p_gw(){
echo "Removing I2P"
apt-get remove i2p
clear
echo "OK"
echo "Removing the I2P Repository from the Apt list"
rm /etc/apt/sources.list.d/i2p.list
echo "OK"
echo "Removing I2P Key"
apt-key del 0x67ECE5605BCF1346
echo "OK"
echo "Removing all I2P scripts"
for file in $FILES; do
if [ -e $file];then
rm $file
else
fi
done
echo "OK"
echo "Removing Tor Config changes"
for config in $TORCONF; do
sed -i /$config/d /etc/tor/torrc
done
echo "OK"
echo "Removing I2P Path from Persistent dirs"
if [ -e /usr/lib/qubes/bind-dirs.sh ] || [ -e /usr/lib/qubes/init/bind-dirs.sh ] ; then
for binds in $NEWBINDS; do
sed -i /$binds/d /usr/lib/qubes-bind-dirs.d/50_qubes-whonix.conf
done
echo "OK"
else
for binds in $OLDBINDS; do
sed -i /$binds/d /usr/lib/qubes-whonix/bind-directories
done
echo "OK"
fi
echo "Removing I2P Firewall Rules"
for conf in $FWCONFIG; do
sed -i /$conf/d test
done
}
echo "OK"
qubes_vm_type="$(qubesdb-read /qubes-vm-type)"
if [ "$qubes_vm_type" = "TemplateVM" ]; then
# Display warning that TemplateVM is not connected to a Tor update proxy.
if [ ! -e '/var/run/qubes-service/whonix-secure-proxy' ]; then
/usr/lib/qubes-whonix/alert update /usr/lib/qubes-whonix/messages.yaml
fi
if [ -e /usr/share/anon-gw-base-files/gateway ]; then
disclaimer
remove_i2p_gw
elif [-e /usr/share/anon-ws-base-files/worksation ]; then
remove_i2p_ws
fi
fi
| true |
14e28020c60f276091cba3cbc8952703a053401c
|
Shell
|
Ohyoukillkenny/parallelTest
|
/scripts/uniform_threadN.sh
|
UTF-8
| 1,376 | 2.609375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
ROOT=".."
ENV="java -classpath $ROOT/target/classes/ "
ROUND="3"
STREAM_LENGTH="100000000"
NUM_OF_KEYS="10000"
RANGE_OF_VALS="1000"
echo ">>>>>> baseline (no child thread) <<<<<<<"
for ((i = 0 ; i < $ROUND ; i++)); do
$ENV groupbySum.Thread_Single_Uniform $STREAM_LENGTH $NUM_OF_KEYS $RANGE_OF_VALS
done
echo ">>>>>> 1 threads in the thread pool <<<<<<<"
for ((i = 0 ; i < $ROUND ; i++)); do
$ENV groupbySum.Thread_N_Uniform 1 $STREAM_LENGTH $NUM_OF_KEYS $RANGE_OF_VALS
done
echo ">>>>>> 2 threads in the thread pool <<<<<<<"
for ((i = 0 ; i < $ROUND ; i++)); do
$ENV groupbySum.Thread_N_Uniform 2 $STREAM_LENGTH $NUM_OF_KEYS $RANGE_OF_VALS
done
echo ">>>>>> 4 threads in the thread pool <<<<<<<"
for ((i = 0 ; i < $ROUND ; i++)); do
$ENV groupbySum.Thread_N_Uniform 4 $STREAM_LENGTH $NUM_OF_KEYS $RANGE_OF_VALS
done
echo ">>>>>> 8 threads in the thread pool <<<<<<<"
for ((i = 0 ; i < $ROUND ; i++)); do
$ENV groupbySum.Thread_N_Uniform 8 $STREAM_LENGTH $NUM_OF_KEYS $RANGE_OF_VALS
done
echo ">>>>>> 16 threads in the thread pool <<<<<<<"
for ((i = 0 ; i < $ROUND ; i++)); do
$ENV groupbySum.Thread_N_Uniform 16 $STREAM_LENGTH $NUM_OF_KEYS $RANGE_OF_VALS
done
echo ">>>>>> 32 threads in the thread pool <<<<<<<"
for ((i = 0 ; i < $ROUND ; i++)); do
$ENV groupbySum.Thread_N_Uniform 32 $STREAM_LENGTH $NUM_OF_KEYS $RANGE_OF_VALS
done
| true |
f9189ac832a31b13ec19e01edf9caea8803f2ca1
|
Shell
|
rsling/linkingelements
|
/Data/Database/compounds/extract_compounds.sh
|
UTF-8
| 127 | 2.5625 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
set -u
gunzip -c ${1} | cut -s -f9 | grep -v '^_$' | sort -T . -S 5G > $(basename ${1} .xml.gz)_comps.txt
| true |
cacbff9afe98e36899f2695adac9227530c261cd
|
Shell
|
PhyloStar/NorskASK
|
/rest-udpipe
|
UTF-8
| 319 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/bash
INFILE=$1
OUTFILE=$2
BASE_URL="https://lindat.mff.cuni.cz/services/udpipe/api/process"
MODEL="norwegian-bokmaal-ud-2.3-181115" # Norwegian (bokmål)
PARAMS="-F data=@$INFILE -F input=horizontal -F tagger= -F parser= -F model=$MODEL"
curl "$PARAMS" "$BASE_URL" | jq --raw-output ".result" > "$OUTFILE"
| true |
d9bbf453186fe430fc0a8b9bc7ee932e03998d4a
|
Shell
|
linuxmint/ubiquity
|
/tests/build
|
UTF-8
| 1,342 | 3.15625 | 3 |
[] |
no_license
|
#!/bin/sh
# I'm sure replicating much of debian/rules is entirely wrong, but I'm going to
# do it anyway...
if ! dpkg-checkbuilddeps; then
sudo apt-get -y build-dep ubiquity
fi
if [ ! -e d-i/source ]; then
make -C d-i update
make -C d-i build
fi
[ -e src/Makefile ] || ./configure
make -C src
rebuild=false
if [ ! -e d-i/templates ]; then
# For debian-installer-utils
DEB_HOST_ARCH_OS=$(dpkg-architecture -qDEB_HOST_ARCH_OS) \
fakeroot make -C d-i install
rebuild=:
fi
# For loading the console-setup plugin. This is needed for at least testing
# loading of plugins.
[ -e ubiquity/keyboard_names.py ] || ./debian/rules ubiquity/keyboard_names.py
[ -e tests/debconf-stamp ] || rebuild=:
if ! $rebuild && [ -e tests/debconfdb ]; then
for x in d-i/templates debian/ubiquity.templates \
debian/ubiquity.templates-imported; do
if [ $x -nt tests/debconf-stamp ]; then
rebuild=:
break
fi
done
else
rebuild=:
fi
$rebuild && (cat d-i/templates; echo;
po2debconf debian/ubiquity.templates; echo;
po2debconf debian/ubiquity.templates-imported \
--podir=debian/imported-po) > tests/debconfdb
touch tests/debconf-stamp
export DEBCONF_SYSTEMRC=tests/debconf.conf
$rebuild && debconf-loadtemplate ubiquity tests/debconfdb
rm -rf tests/partman-tree
for x in d-i/source/part*.udeb; do dpkg -x $x tests/partman-tree; done
| true |
f54f3afc236efcb8675990770b8405d2548a09ef
|
Shell
|
zchee/zsh-default-completions
|
/src/Unix/Type/_diff_options
|
UTF-8
| 9,267 | 3.71875 | 4 |
[] |
no_license
|
#autoload
local of ofwuc ouc oss ofwy ofwg ofwl cmd variant
local -a args
cmd="$1"
shift
_diff_palette() {
local context state line ret=1
local -a suf
_values -s : attribute \
"ad[added text]:attribute [32]:->attrs" \
"de[deleted text]:attribute [31]:->attrs" \
"hd[header]:attribute [1]:->attrs" \
"ln[line numbers]:attribute [36]:->attrs" \
"rs[rest - other text]:attribute [0]:->attrs" && ret=0
if [[ -n $state ]]; then
compset -P '*;'
compset -S '[;=]*' || suf=( -S: -r ": ;\\\t\n\=" )
_alternative -C context -O suf \
'attributes:attributes:((0:reset 1:bold 3:italics 4:underline 5:blink))' \
'colors:color:((30:default 31:red 32:green 33:yellow 34:blue 35:magenta 36:cyan 37:white))' && ret=0
fi
return ret
}
if _pick_variant -r variant -c $cmd gnu=GNU unix -v || [[ $OSTYPE = freebsd<12->.* ]]; then
# output formats
of="-y --side-by-side -n --rcs -e -f --ed -q --brief -c -C --context -u -U \
--unified --old-group-format --new-group-format --changed-group-format \
--unchanged-group-format --line-format --old-line-format --new-line-format \
--unchanged-line-format -D --ifdef"
# output formats w/o unified and context
ofwuc="-y --side-by-side -n --rcs -e -f --ed -q --brief --old-group-format \
--new-group-format --changed-group-format --unchanged-group-format \
--line-format --old-line-format --new-line-format --unchanged-line-format \
-D --ifdef"
# option specific to unified or context diff
ouc='-L --label -p --show-c-function -F --show-function-line'
# option specific to side by side
oss='-W --width --left-column --suppress-common-lines'
# output formats w/o side by side
ofwy="-n --rcs -e -f --ed -q --brief -c -C --context -u -U --unified \
--old-group-format --new-group-format --changed-group-format \
--unchanged-group-format --line-format --old-line-format \
--new-line-format --unchanged-line-format -D --ifdef"
# output formats w/o group format
ofwg="-n --rcs -e -f --ed -q --brief -c -C --context -u -U --unified \
--line-format --old-line-format --new-line-format --unchanged-line-format
-D --ifdef"
# output formats w/o line format
ofwl="-n --rcs -e -f --ed -q --brief -c -C --context -u -U --unified \
--old-group-format --new-group-format --changed-group-format \
--unchanged-group-format"
if [[ $variant = gnu ]]; then
(( $#words > 2 )) || args+=(
'(-v --version)'{-v,--version}'[display version information]'
'--help[display usage information]'
)
args+=(
'(-H --speed-large-files)'{-H,--speed-large-files}'[assume large files and many small changes]'
'(-E --ignore-tab-expansion)'{-E,--ignore-tab-expansion}'[ignore changes due to tab expansion]'
'(-Z --ignore-trailing-space)'{-Z,--ignore-trailing-space}'[ignore white space at line end]'
"($ofwuc $oss -F --show-function-line)"{-F+,--show-function-line=}'[show the most recent line matching regex]:regex'
"($ofwy $ouc --width -W)"{--width=,-W+}'[set size of line]:number of characters per line'
"($ofwy $ouc)--left-column[output only left column of common lines]"
"($ofwy $ouc)--suppress-common-lines[do not output common lines]"
"($ofwg $ouc $oss)--old-group-format=[set old group format]:old group format"
"($ofwg $ouc $oss)--new-group-format=[set new group format]:new group format"
"($ofwl $ouc $oss)--unchanged-line-format=[set unchanged line format]:unchanged line format"
'(--to-file)--from-file=[compare specified file to all operands]:from file:_files' \
'(--from-file)--to-file=[compare all operands to specified file]:to file:_files' \
'--color=-[use colors in output]::when [auto]:(never always auto)'
'--palette=[specify colors to use]:color:_diff_palette'
"($of $ouc)--side-by-side[output in two columns]"
"($of $ouc)-y[output in two columns]"
)
else
args+=( '!--speed-large-files' )
fi
_arguments -s $args \
'(-i --ignore-case)'{-i,--ignore-case}'[case insensitive]' \
'--ignore-file-name-case[ignore case when comparing file names]' \
'!(--ignore-file-name-case)--no-ignore-file-name-case' \
'(-b --ignore-space-change)'{-b,--ignore-space-change}'[ignore changes in the amount of white space]' \
'(-w --ignore-all-space)'{-w,--ignore-all-space}'[ignore all white space]' \
'(-B --ignore-blank-lines)'{-B,--ignore-blank-lines}'[ignore lines that are all blank]' \
'(-I --ignore-matching-lines)'{-I+,--ignore-matching-lines=}'[ignore lines that match regex]:line exclusion regex:' \
'--strip-trailing-cr[strip trailing carriage return on input]' \
'(-a --text)'{-a,--text}'[treat all files as text]' \
"($of $oss)"{-C+,--context=-}'[output a context diff]:number of lines of copied context' \
"($of $oss)-c[output a context diff]" \
"($of $oss)"{-U+,--unified=-}'[output a unified diff]:number of lines of unified context' \
"($of $oss)-u[output a unified diff]" \
"($ofwuc $oss)*"{-L+,--label=}'[set label to use instead of file name and timestamp]:label' \
"($ofwuc $oss -p --show-c-function)"{-p,--show-c-function}'[show C function of each change]' \
"($of $ouc $oss)"{-q,--brief}'[output only whether files differ]' \
"($of $ouc $oss -e --ed)"{--ed,-e}'[output an ed script]' \
"!($of $ouc $oss)--normal" \
"($of $ouc $oss)"{-f,--forward-ed}'[output a reversed ed script]' \
"($of $ouc $oss)"{-n,--rcs}'[output an RCS format diff]' \
"($of $oss)"{-D+,--ifdef=}'[output merged file with preprocessor directives]:preprocessor symbol' \
"($ofwg $ouc $oss)--changed-group-format=[set changed group format]:changed group format" \
"($ofwg $ouc $oss)--unchanged-group-format=[set unchanged group format]:unchanged group format" \
"($ofwl $ouc $oss)--line-format=[set line format]:line format" \
"($ofwl $ouc $oss)--old-line-format=[set old line format]:old line format" \
"($ofwl $ouc $oss)--new-line-format=[set new line format]:new line format" \
'(-l --paginate)'{-l,--paginate}'[long output format (paginate with pr(1))]' \
'(-t --expand-tabs)'{-t,--expand-tabs}'[expand tabs to spaces]' \
'(-T --initial-tab)'{-T,--initial-tab}'[prepend a tab]' \
'--tabsize=[specify width of tab]:width [8]' \
'(-r --recursive)'{-r,--recursive}'[recursively compare subdirectories]' \
"--no-dereference[don't follow symbolic links]" \
'(-N --new-file)'{-N,--new-file}'[treat absent files as empty]' \
'(-P --unidirectional-new-file)'{-P,--unidirectional-new-file}'[treat absent first files as empty]' \
'(-s --report-identical-files)'{-s,--report-identical-files}'[report when two files are the same]' \
\*{-x+,--exclude=}'[exclude files matching pattern]:exclusion pattern' \
\*{-X+,--exclude-from=}'[exclude files matching pattern in file]:exclude file:_files' \
'(-S --starting-file)'{-S+,--starting-file=}'[set first file in comparison]:start with file:_files' \
'--horizon-lines=[set number of lines to keep in prefix and suffix]:number of horizon lines' \
'(-d --minimal)'{-d,--minimal}'[try to find a smaller set of changes]' \
"$@"
else
of='-c -e -f'
case $OSTYPE in
openbsd*|solaris2.<9->)
of+=' -u -U'
args+=(
"($of)-u[output a unified diff]"
"($of)-U+[output a unified diff]:lines of context"
)
;|
openbsd*|solaris*)
args+=(
"($of)-C+[output a context diff]:lines of context"
"($of)-D+[output merged file with preprocessor directives]:preprocessor symbol"
'-i[case insensitive]'
'-l[long output format (paginate with pr(1))]'
'-s[report on identical files]'
'-t[expand tabs in output lines]'
)
;|
solaris*)
of+=' -h -n'
args+=(
'-w[ignore all white space]'
"($of)-h[do a fast, half-hearted job]"
"($of)-n[output a reversed ed script]"
'-S+[set first file in comparison]:start with file:_files'
)
;;
openbsd*)
of+=' -n -q -u -C -D -U'
args=(
"($of)-n[output an rcsdiff(1)-compatible diff]"
"($of)-q[only print a line when the files differ; does not produce a list of changes]"
'-a[treat all files as ASCII text]'
'-d[try to produce the smallest diff possible]'
'-I[ignore changes whose lines match the extended regular expression]:extended regular expression pattern'
'*-L[print a label instead of the file name and time]:label'
'-p[show characters from the last line before the context]'
'-T[consistently align tabs]'
'-w[like -b, but totally ignore whitespace]'
'-N[treat absent files in either directory as if they were empty]'
'-P[treat absent files in the second directory as if they were empty]'
'-S[start a directory diff from a file name]:file name:_files'
'*-X[exclude files and subdirectories whose basenames match lines in a file]:file name:_files'
'-x[exclude files and subdirectories whose basenames match a pattern]:pattern'
)
;;
esac
_arguments -s "$args[@]" \
"($of)-c[output a context diff]" \
"($of)-e[output an ed script]" \
"($of)-f[output a reversed ed script]" \
'-b[skip trailing white spaces]' \
'-r[recursively compare subdirectories]' \
"$@"
fi
# vim:ft=zsh
| true |
9c1ac9ac8a94703634cf13fc5006f2a61b70fb51
|
Shell
|
GligorGrigorov/os-course-2020
|
/shell/05/05-b-9200.sh
|
UTF-8
| 437 | 3.359375 | 3 |
[] |
no_license
|
#!/bin/bash
if [ $# -lt 1 ]; then
echo "Wrong arg. num."
exit 1
fi
if [ ${1} = "-r" ]; then
if [ $# -lt 2 ]; then
echo "Wrong Arg. num."
exit 2
fi
touch "${logFile}"
shift 1
while [ $# -ne 0 ]; do
echo -n $(date) >> "${logFile}"
rm -rdv ${1} &>> "${logFile}"
shift 1
done
exit 0
fi
touch "${logFile}"
while [ $# -ne 0 ]; do
echo -n $(date) >> "${logFile}"
rm -dv ${1} &>> "${logFile}"
shift 1;
done
| true |
21524a404177b37ea9eb6141ac66940b05e9c998
|
Shell
|
DeepBehavier/STR-PIP
|
/scripts_dir/test_loc_concat.sh
|
UTF-8
| 1,830 | 2.625 | 3 |
[] |
no_license
|
#!/bin/bash
# mode='evaluate'
mode='extract'
gpu_id=0
n_workers=0
n_acts=1
seq_len=1
predict=0
pred_seq_len=30
# test only
slide=0
rand_test=1
log_every=10
split='train'
# split='test'
use_gru=1
use_trn=0
pos_mode='none'
use_act=0
use_gt_act=0
use_pose=0
# branch='ped'
branch='both'
collapse_cls=0
combine_method='pair'
annot_loc_format='/sailhome/bingbin/STR-PIP/datasets/annot_{}_loc.pkl'
load_cache='masks'
# load_cache='none'
cache_format='/sailhome/bingbin/STR-PIP/datasets/cache/jaad_loc/{}/ped{}_fid{}.pkl'
save_cache_format=$cache_format
ckpt_name='loc_concat_gru_seq30_pred30_lr1.0e-04_wd1.0e-05_bt1_posNone_branchboth_collapse0_combinepair_tmp'
# -1 for the best epoch
which_epoch=-1
# this is to set a non-existent epoch s.t. the features are extracted from ImageNet backbone
# which_epoch=100
if [ "$mode" = "extract" ]
then
extract_feats_dir='/sailhome/bingbin/STR-PIP/datasets/cache/jaad_loc/JAAD_conv_feats/'$ckpt_name'/'$split'/'
else
extract_feats_dir='none_existent'
fi
CUDA_VISIBLE_DEVICES=$gpu_id python3 test.py \
--model='loc_concat' \
--split=$split \
--n-acts=$n_acts \
--mode=$mode \
--device=$gpu_id \
--log-every=$log_every \
--dset-name='JAAD_loc' \
--ckpt-name=$ckpt_name \
--batch-size=1 \
--n-workers=$n_workers \
--annot-loc-format=$annot_loc_format \
--load-cache=$load_cache \
--save-cache-format=$save_cache_format \
--cache-format=$cache_format \
--seq-len=$seq_len \
--predict=$predict \
--pred-seq-len=$pred_seq_len \
--use-gru=$use_gru \
--use-trn=$use_trn \
--use-act=$use_act \
--use-gt-act=$use_gt_act \
--use-pose=$use_pose \
--pos-mode=$pos_mode \
--collapse-cls=$collapse_cls \
--slide=$slide \
--rand-test=$rand_test \
--branch=$branch \
--which-epoch=$which_epoch \
--extract-feats-dir=$extract_feats_dir
| true |
11614dc5e55bb1f0023c0ac72f39db4ba00a5038
|
Shell
|
specialworld83/condres-packages
|
/nagios/check_md_raid/PKGBUILD
|
UTF-8
| 1,702 | 2.59375 | 3 |
[] |
no_license
|
# Maintainer: Condres OS Dev Team (x86_64) <info@codelinsoft.it>
pkgname=check_md_raid
pkgver=0.7.2
pkgrel=9
pkgdesc="Linux Software RAID Monitoring Plugin for Nagios"
arch=('any')
url="http://exchange.nagios.org/directory/Plugins/Operating-Systems/Linux/Linux-Software-Raid-Plugin-for-32-2Dbit-and-64-2Dbit-systems/details"
license=('GPL')
depends=('nagios' 'python2' 'mdadm')
optdepends=('sudo: check with non-root user')
source=("${pkgname}-${pkgver}.py::http://exchange.nagios.org/components/com_mtree/attachment.php?link_id=782&cf_id=24"
'0001-use-python2.patch'
'0002-strip-whitespace-from-state.patch'
'0003-use-sudo-when-running-as-non-root-user.patch'
'0004-make-checking-a-clean-state.patch'
'sudo')
sha256sums=('b85f0a77215870839cb18a74d9e38b594eaeda5c44ddc88aff8d2c2246f506f7'
'9721c32d4b8124f2db54f41574008018a605567e50a59eb9ffcd98e99ba8db98'
'527d98535f4cda98a468ea69f6a3cfb3a53e27ecbd2affc553985be70e29125a'
'6267ecf8aff1da308216675f94e9e91f91349ed3b1054b8c8c1dc4d08ef632ad'
'542d756357f4e8afd16962015428efccabb9f4410c873556a2b8d6e0cc37ca69'
'4882cf971684deb2afd6e1deb3883272dd41ed544576619f816498c6111c16cc')
build() {
cp ${pkgname}-${pkgver}.py ${pkgname}
patch -Np1 < ${srcdir}/0001-use-python2.patch
patch -Np1 < ${srcdir}/0002-strip-whitespace-from-state.patch
patch -Np1 < ${srcdir}/0003-use-sudo-when-running-as-non-root-user.patch
patch -Np1 < ${srcdir}/0004-make-checking-a-clean-state.patch
}
package() {
install -D -m 0755 ${srcdir}/${pkgname} ${pkgdir}/usr/lib/monitoring-plugins/${pkgname}
install -d -m 0750 ${pkgdir}/etc/sudoers.d
install -D -m 0440 ${srcdir}/sudo ${pkgdir}/etc/sudoers.d/check_md_raid
}
| true |
6052b3d63c40e0e44fbc453b37b9f2173471b0e4
|
Shell
|
AlexHarn/modded-PPC
|
/real/fit/compress.sh
|
UTF-8
| 106 | 2.609375 | 3 |
[] |
no_license
|
#!/bin/sh
dir=out/tmp.$1
egrep '(War|Err)' $dir/run-*/fit.*/log*
rm -r $dir/run-*
gzip $dir/{out,log}*
| true |
1ab5d81ba699b0df498683ec2f212235c0dd9bce
|
Shell
|
burmist-git/corto_win2
|
/mountBackup.bash
|
UTF-8
| 1,551 | 3.25 | 3 |
[] |
no_license
|
#!/bin/bash
########################################################################
# #
# Copyright(C) 2017 - CORTO Collaboration #
# Mon Sep 11 10:34:14 CEST 2017 #
# Autor: Leonid Burmistrov #
# #
# Script description: #
# This script makes symbolic link to the folder #
# with backup information from Corto Crates. #
# #
# Input paramete: NON #
# #
# #
# This software is provided "as is" without any warranty. #
# #
########################################################################
mountBackup () {
symbl=$CORTOWINHOME"/"$CORTOWINDATAPCBACKUP
if [ ! -L $symbl ];
then
#echo "ln -s $CORTOWINPCSERDI6MPC/Program\ Files\ \(x86\)/WaveCatcher_64ch/${CORTOWINUSBWCCARTEFOLDERarr[$i]}/ $symbl"
ln -s $CORTOWINARCHIVEFULLPATH $symbl
echo " ---> $symbl"
else
unlink $symbl
ln -s $CORTOWINARCHIVEFULLPATH $symbl
fi
return 1
}
mountBackup
| true |
1873f2e0b16c41c6cfae39f01962f34e61ea37f9
|
Shell
|
qrush/go
|
/assignment2/test/run
|
UTF-8
| 2,254 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/bash
PASSED=0
FAILED=0
function t {
e "$1"
test "$EVAL" = "$2"
if [ $? = 0 ]
then
echo " PASS: '$1' = '$2'"
pass
else
echo " FAIL: '$1' = '$2' WAS '$EVAL'"
fail
fi
}
function m {
if [[ $EVAL =~ $1 ]]
then
echo " PASS: =~ '$1'"
pass
else
echo " FAIL: '$EVAL' !~ '$1' WAS '$EVAL'"
fail
fi
}
function e {
EVAL=`eval bin/ls $1`
echo -e ">> WITH args '$1'"
}
function pass {
PASSED=$(($PASSED+1))
}
function fail {
FAILED=$(($FAILED+1))
}
t "", "Usage: ls [directory] [script.ls]"
t ".", "Usage: ls [directory] [script.ls]"
t ". test/syntax-fail1.ls" "syntax error"
t ". test/syntax-fail2.ls" "syntax error"
t "nonexistentdir test/example.ls" "Couldn't stat nonexistentdir"
t ". test/nonexistent.ls" "Couldn't find test/nonexistent.ls"
t ". test/empty.ls" "Script was empty: test/empty.ls"
chmod 100 test/example.ls
t "test/simpletree test/example.ls" "There was an error reading: test/example.ls"
chmod 644 test/example.ls
e "test/simpletree test/example.ls"
m "test/simpletree:"
m "test/simpletree/one"
m "test/simpletree/two"
m "test/simpletree/three"
e "test/complextree test/example.ls"
m "test/complextree:"
m "test/complextree:"
m "test/complextree/gamma"
m "test/complextree/beta"
m "test/complextree/alpha"
m "test/complextree/eins"
m "test/complextree/eins:"
m "test/complextree/eins/rawr"
m "test/complextree/eins/foobar"
m "test/complextree/eins/one"
m "test/complextree/eins/one:"
m "test/complextree/eins/one/foodir"
m "test/complextree/eins/one/foodir:"
m "test/complextree/eins/one/foodir/blah"
chmod 100 test/chmod100tree
e "test/chmod100tree test/example.ls"
m "Cannot get contents of test/chmod100tree"
chmod 755 test/chmod100tree
chmod 000 test/chmod600tree
chmod o+r test/chmod600tree
chmod o+w test/chmod600tree
e "test/chmod600tree test/example.ls"
m "Cannot get contents of test/chmod600tree"
chmod 755 test/chmod600tree
e "test/simpletree test/size.ls"
m "one.+2"
m "rit.jpg.+59525"
m "dorms.jpg.+1181694"
e "test/simpletree test/human_size.ls"
m "one.+2 B"
m "rit\.jpg.+58 KB"
m "dorms\.jpg.+1.1 MB"
e "test/simpletree test/uid.ls"
m "one.+`id -u`"
e "test/simpletree test/gid.ls"
m "one.+`id -g`"
echo -e "\n$PASSED passed, $FAILED failed"
| true |
fc1d03c562703a8d273bec709f5fb024b245814d
|
Shell
|
4charles2/livre
|
/Scripts-shell-Linux-et-Unix/exemples/ch05-Commandes_Variables_Systeme/exemple_getopts_1.sh
|
ISO-8859-2
| 371 | 3.859375 | 4 |
[] |
no_license
|
#! /bin/sh
while getopts "abc:d:" option ; do
echo -n "Analyse argument numro $OPTIND : "
case $option in
a ) echo "Option A" ;;
b ) echo "Option B" ;;
c ) echo "Option C, argument $OPTARG" ;;
d ) echo "Option D, argument $OPTARG" ;;
? ) echo "Inconnu" ;;
esac
done
shift $((OPTIND - 1))
while [ $# -ne 0 ] ; do
echo "Argument suivant : " $1
shift
done
| true |
8dacb7f627fae3e51f3cd670c372f1a505cb231a
|
Shell
|
brainlife/app-dipy-csamodel
|
/submit.pbs
|
UTF-8
| 1,558 | 3.015625 | 3 |
[] |
no_license
|
#!/bin/bash
#PBS -l nodes=4:ppn=16:dc2,walltime=0:45:00
#PBS -N app-dipy-csamodel
#PBS -V
#for local testing
if [ -z $SERVICE_DIR ]; then export SERVICE_DIR=`pwd`; fi
#ENV="IUHPC"
[ $PBS_O_WORKDIR ] && cd $PBS_O_WORKDIR
if [ $ENV == "IUHPC" ]; then
if [ $HPC == "KARST" ]; then
module unload python
module load anaconda2
module load freesurfer/6.0.0
fi
if [ $HPC == "CARBONATE" ]; then
module load freesurfer/6.0.0
fi
export PYTHONPATH=/N/u/brlife/Karst/git/dipy:$PYTHONPATH
export PYTHONPATH=/N/u/aryaam/Karst/github_repos/nibabel:$PYTHONPATH
fi
if [ $ENV == "VM" ]; then
export PYTHONPATH=$PYTHONPATH:/usr/local/dipy
export PYTHONPATH=$PYTHONPATH:/usr/local/nibabel
fi
input_nii_gz=`$SERVICE_DIR/jq -r .data_file config.json`
freesurfer=`$SERVICE_DIR/jq -r .freesurfer config.json`
if [ ! -f volume.nii.gz ]; then
echo "converting freesurfer segments $freesufer to volume using $input_nii_gz"
echo mri_label2vol \
--seg $freesurfer/mri/aparc+aseg.mgz \
--regheader $freesurfer/mri/aparc+aseg.mgz \
--temp $input_nii_gz \
--o volume.nii.gz
mri_label2vol \
--seg $freesurfer/mri/aparc+aseg.mgz \
--regheader $freesurfer/mri/aparc+aseg.mgz \
--temp $input_nii_gz \
--o volume.nii.gz
if [ ! $? -eq 0 ]; then
echo "failed to mri_label2vol"
exit 1
fi
fi
echo "running main"
time python $SERVICE_DIR/main.py
ret=$?
if [ $ret -ne 0 ]; then
echo "main.py failed"
echo $ret > finished
exit $ret
fi
if [ -s peaks.pam5 ];
then
echo 0 > finished
else
echo "files missing"
echo 1 > finished
exit 1
fi
| true |
a022452f50aaee80180551a8d1f8f30c42bddfc4
|
Shell
|
peterroelants/dotfiles
|
/setup.sh
|
UTF-8
| 3,011 | 3.265625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
#######################################
# Create symlinks from files in this directory to the corresponding
# dot files in the home directory.
#######################################
# Variables
#######################################
# directory with dotfiles
setup_script_path=`realpath $0`
dotdir=`dirname $setup_script_path`
echo "Dotfiles directory = $dotdir"
# Make symlink
#######################################
# Set the directory to the dotfiles directory (dir of this script)
cd $dotdir
echo "cd $dotdir"
# Symlink aliases
echo "Make symlink to aliases"
rm -f ~/.aliases
ln -s $dotdir/aliases ~/.aliases
# Symlink bash_profile
echo "Make symlink to bashrc"
rm -f ~/.bashrc
ln -s $dotdir/bashrc ~/.bashrc
# Symlink zshrc
echo "Make symlink to zshrc"
rm -f ~/.zshrc
ln -s $dotdir/zshrc ~/.zshrc
# Install zsh plugins
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions
git clone https://github.com/zsh-users/zsh-syntax-highlighting.git ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-syntax-highlighting
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git ${ZSH_CUSTOM:-$HOME/.oh-my-zsh/custom}/themes/powerlevel10k
# Symlink gitignore_global
echo "Make symlink to gitignore_global"
rm -f ~/.gitignore_global
ln -s $dotdir/gitignore_global ~/.gitignore_global
# Symlink gitconfig
echo "Make symlink to gitconfig"
rm -f ~/.gitconfig
ln -s $dotdir/gitconfig ~/.gitconfig
# Symlink condarc
echo "Make symlink to condarc"
rm -f ~/.condarc
ln -s $dotdir/condarc ~/.condarc
# Install VSCode extensions
echo "Install VSCode extensions"
# Create list with `code --list-extensions > extensions.txt`
cat $dotdir/vscode_extensions.txt | xargs -L 1 code --force --install-extension
# Symlink VSCode settings
echo "Make symlink to vscode settings"
rm -f ~/.config/Code/User/settings.json
ln -s $dotdir/vscode_settings.json ~/.config/Code/User/settings.json
# Symlink init.vim
echo "Make symlink to init.vim for nvim"
rm -f ~/.config/nvim/init.vim
mkdir -p ~/.config/nvim/
ln -s $dotdir/init.vim ~/.config/nvim/init.vim
# Install vim plugins
echo "Install vim-plug"
# https://github.com/junegunn/vim-plug
sh -c 'curl -fLo "${XDG_DATA_HOME:-$HOME/.local/share}"/nvim/site/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim'
echo "Install vim plugins"
nvim --headless +PlugInstall +qall
# Setup Gnome Terminal Theme
$dotdir/config_gnome_terminal.sh
# # Symlink tmux.conf
# echo "Make symlink to tmux.conf"
# rm -f ~/.tmux.conf
# ln -s $dotdir/tmux.conf ~/.tmux.conf
# # Symlink xsessionrc
# echo "Make symlink to xsessionrc"
# rm -f ~/.xsessionrc
# ln -s $dotdir/xsessionrc ~/.xsessionrc
# Set theme
#######################################
gsettings set org.gnome.desktop.interface gtk-theme 'Yaru-dark'
# Remap Keyboard
#######################################
gsettings set org.gnome.desktop.input-sources xkb-options "['caps:escape']"
| true |
649a1a20d2719e67dad76b4422762e8491b6adf1
|
Shell
|
mt5555/remap-ncl
|
/tests/toporemap-test.sh
|
UTF-8
| 1,770 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
#
# map ne120 highres topo to low res RLL gridtest ne20np4 and ne30pg2 maps to lat/lon grids
#
# used to llok at downsampling errors
#
# bilin vs mono/intbilin
# not a clear advantage between bilin and mono
#
# what about Y16_32?
#
#
exepath=~/codes/tempestremap/
wdir=~/scratch1/mapping
mapalg=bilin_esmf
#mapalg=intbilin
#mapalg=bilin
#mapalg=mono
name1=ne120pg2
grid1=TEMPEST_ne120pg2.g
grid1s=TEMPEST_ne120pg2.scrip.nc
#name1=ne120np4
#grid1=TEMPEST_ne120.g
#grid1s=ne120np4_pentagons.100310.nc
./makeSE.sh 120
./makeRLL.sh 128 uni
rll1=64x128
if [[ $name1 == *"np4"* ]]; then
var=PHIS_d
if [ "$mapalg" == "bilin_esmf" ] ; then
./makeFVtoFV_esmf.sh bilin $name1 $grid1s $rll1 ${rll1}_SCRIP.nc || exit 1
else
./makeSEtoFV.sh $mapalg $name1 $grid1 $rll1 ${rll1}_SCRIP.nc || exit 1
fi
else
if [ "$mapalg" == "bilin_esmf" ] ; then
./makeFVtoFV_esmf.sh bilin $name1 $grid1s $rll1 ${rll1}_SCRIP.nc || exit 1
else
./makeFVtoFV.sh $mapalg $name1 $grid1 $rll1 ${rll1}_SCRIP.nc || exit 1
fi
var=PHIS
fi
map=$wdir/maps/map_${name1}_to_${rll1}_${mapalg}.nc
if [ ! -f $map ]; then
echo missing map: $map
exit 1
fi
#./make_testdata.sh $name1 $grid1
#./make_testdata.sh $rll1 ${rll1}_SCRIP.nc
ncremap -5 -m $map \
$wdir/testdata/ne120np4pg2_fx1t.nc \
$wdir/testdata/${rll1}_${mapalg}_mapped.nc
~/codes/nclscript/contour/contour.py \
-i $wdir/testdata/${rll1}_${mapalg}_mapped.nc \
-c -40000,40000,1000 -m andes $var
# -m europe -c 5 Y16_32
# -s $wdir/grids/TEMPEST_${rll1}.scrip.nc \
#~/codes/nclscript/contour/contour.py \
# -i $wdir/testdata/ne120np4_x0topo.nc \
# -r 1024x2048 \
# -c -40000,40000,1000 -m andes PHIS
| true |
ffc3f53abe72f7d2591cdfbb9e213af7953110f7
|
Shell
|
xingmegshuo/pro
|
/python/docker/docker_com/start.sh
|
UTF-8
| 404 | 2.578125 | 3 |
[] |
no_license
|
#!/bin/bash
if ! sudo docker images |grep com;then
sudo docker build -t com django-uwsgi-nginx
fi
if sudo docker ps -a |grep -i comapp; then
sudo docker rm -f comapp
sudo docker run -itd --link mysql:mysql --name comapp -p 80:80 \
com
else
sudo docker run -itd --link mysql:mysql -p 80:80 \
com \
sh -c 'python3 code/com/manage.py migrate && supervisord -n'
fi
| true |
6ccf5642108b8a4609b86ecc78923dfad1130bd9
|
Shell
|
simon04/pkgbuilds
|
/osmtogeojson/PKGBUILD
|
UTF-8
| 726 | 2.53125 | 3 |
[
"Unlicense"
] |
permissive
|
# Maintainer: Simon Legner <Simon.Legner@gmail.com>
pkgname=osmtogeojson
pkgver=3.0.0b4
_npmver=3.0.0-beta.4
pkgrel=1
pkgdesc="Convert OSM data to GeoJSON"
arch=(any)
url="https://github.com/tyrasd/osmtogeojson#readme"
license=('MIT')
depends=('nodejs')
makedepends=('npm')
source=(http://registry.npmjs.org/$pkgname/-/$pkgname-$_npmver.tgz)
noextract=($pkgname-$_npmver.tgz)
replaces=('nodejs-osmtogeojson')
package() {
cd $srcdir
local _npmdir="$pkgdir/usr/lib/node_modules/"
mkdir -p $_npmdir
cd $_npmdir
npm install -g --prefix "$pkgdir/usr" $pkgname@$_npmver
install -Dm755 "$_npmdir/$pkgname/LICENSE" "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
# vim:set ts=2 sw=2 et:
sha1sums=('00e270af7c080bd3bc503bd612fa5b90a1c95d50')
| true |
43b47559b490c72c70d108ca8b817c793b911b3f
|
Shell
|
monty-pavel/indexfs_old
|
/sbin/start-all.sh
|
UTF-8
| 2,295 | 3.75 | 4 |
[] |
no_license
|
#!/bin/bash
#
# Copyright (c) 2014 The IndexFS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. See the AUTHORS file for names of contributors.
#
# Please run this script at the indexfs's home directory:
# > sbin/start-all.sh
#
# Use this script to start an indexfs server cluster over the network.
# Root privilege is neither required nor recommended to run this scripts.
#
# Note that this script makes use of the 'sbin/start-idxfs.sh' to start
# each indexfs server on each participanting server node. Currently,
# we recommend to run 1 single indexfs server on 1 single machine.
#
# Note also that this script uses SSH to launch jobs on remote servers.
# Before using this script, please prepare your server list file at
# etc/indexfs-distributed/server_list, and make sure your "control node" can
# SSH to these servers without providing a password.
#
# Please also make sure that all servers have access to the same indexfs distribution
# and can access that with the same file system path. It is recommended to place
# the indexfs distribution on a shared file system, such as NFS.
#
me=$0
INDEXFS_HOME=$(cd -P -- `dirname $me`/.. && pwd -P)
INDEXFS_ROOT=${INDEXFS_ROOT:-"/tmp/indexfs"}
INDEXFS_CONF_DIR=${INDEXFS_CONF_DIR:-"$INDEXFS_HOME/etc/indexfs-distributed"}
# make ssh a bit more admin-friendly
SSH='ssh -o ConnectTimeout=5 -o ConnectionAttempts=1 -o StrictHostKeyChecking=no'
# check if we have the required server list file
if test ! -e "$INDEXFS_CONF_DIR/server_list"
then
echo "Cannot find our server list file -- oops"
echo "It is supposed to be found at $INDEXFS_CONF_DIR/server_list"
exit 1
fi
# remove old indexfs data
rm -rf $INDEXFS_ROOT/*
mkdir -p $INDEXFS_ROOT
report_error() {
echo "Fail to start indexfs server at $1"
echo "Abort!"
exit 1
}
# ask all member server nodes to start a new indexfs server instance
for srv_node in \
$(cat $INDEXFS_CONF_DIR/server_list | cut -d':' -f1)
do
INDEXFS_ID=$((${INDEXFS_ID:-"-1"} + 1))
INDEXFS_RUN=$INDEXFS_ROOT/run/server-$INDEXFS_ID
$SSH $srv_node "env INDEXFS_ID=$INDEXFS_ID INDEXFS_CONF_DIR=$INDEXFS_CONF_DIR \
INDEXFS_RUN=$INDEXFS_RUN $INDEXFS_HOME/sbin/start-idxfs.sh" || report_error $srv_node
done
exit 0
| true |
c7385a9079df20e6bdb3b53727f952adfb1edb39
|
Shell
|
InkubatorTieto/melvil
|
/docker/remove-container.sh
|
UTF-8
| 2,537 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/bash
# clear *.pyc files
. docker/clear-cache.sh
remove_container () {
# cases for removing specific test container
case "$1" in
"t")
# find IDs of all test containers
found_containers=$(docker ps -a --filter 'name=melvil_tests' --format "{{.ID}}")
if [ '$found_containers' ] ; then
# remove test containers
remove_containers=$(docker rm $found_containers)
fi;;
"p")
# find IDs of all migration containers on production
case "$2" in
"m")
found_containers=$(docker ps -a --filter 'name=melvil_db_migration_prod' --format "{{.ID}}")
if [ '$found_containers' ] ; then
remove_containers=$(docker rm $found_containers)
fi;;
"u")
found_containers=$(docker ps -a --filter 'name=melvil_db_upgrade_prod' --format "{{.ID}}")
if [ '$found_containers' ] ; then
remove_containers=$(docker rm $found_containers)
fi;;
"d")
found_containers=$(docker ps -a --filter 'name=melvil_db_prod' --format "{{.ID}}")
if [ '$found_containers' ] ; then
remove_containers=$(docker rm $found_containers)
fi;;
"x")
found_containers=$(docker ps -a --filter 'name=upload_lib_items_prod' --format "{{.ID}}")
if [ '$found_containers' ] ; then
remove_containers=$(docker rm $found_containers)
fi;;
esac;;
"d")
# find IDs of all migration containers on dev
case "$2" in
"m")
found_containers=$(docker ps -a --filter 'name=melvil_db_migration_dev' --format "{{.ID}}")
if [ '$found_containers' ] ; then
remove_containers=$(docker rm $found_containers)
fi;;
"u")
found_containers=$(docker ps -a --filter 'name=melvil_db_upgrade_dev' --format "{{.ID}}")
if [ '$found_containers' ] ; then
remove_containers=$(docker rm $found_containers)
fi;;
"d")
found_containers=$(docker ps -a --filter 'name=melvil_db_dev' --format "{{.ID}}")
if [ '$found_containers' ] ; then
remove_containers=$(docker rm $found_containers)
fi;;
"x")
found_containers=$(docker ps -a --filter 'name=upload_lib_items_dev' --format "{{.ID}}")
if [ '$found_containers' ] ; then
remove_containers=$(docker rm $found_containers)
fi;;
esac;;
esac
}
| true |
9143d164650189d055c1cac37bab0c92b69122a1
|
Shell
|
DanSchum/NMTGMinor
|
/start_preprocessing.sh
|
UTF-8
| 343 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Define a timestamp function
timestamp() {
date +"%Y_%m_%d_%H_%M_%S"
}
# start training
timestampValue=$(timestamp)
outputFilename="logs/outputPreprocessing_"$timestampValue".log"
#echo $outputFilename
sbatch -c 1 --mem=10000 -t 3-00 -p HPC -o $outputFilename -e $outputFilename cmd_preprocessing_command_wrapper.sh
| true |
f805a196ff019f6e13d894c2b35040bcf50c0752
|
Shell
|
kvz/jekyll-docker
|
/script/travis
|
UTF-8
| 104 | 2.578125 | 3 |
[
"ISC"
] |
permissive
|
#!/bin/bash
set -e
# What we want to run on the CI.
for v in install build deploy; do
script/$v
done
| true |
dbabe0ebbafa1a5ae1c7697398a520104ac0917d
|
Shell
|
DarcJC/gamemath-cn
|
/bin/create_page.sh
|
UTF-8
| 276 | 3.34375 | 3 |
[] |
no_license
|
# /usr/bin/env bash
echo "请输入文件名(示例:000-奇奇怪怪的页面, 无需.md后缀): ";
read filename;
if [ ! -d "pages" ];
then
cp ../template/template.md ../pages/$filename.md
else
cp templates/template.md pages/$filename.md
fi
echo "文件创建完成..."
| true |
460c1922bc07cf4207004b831265d33a5126a71f
|
Shell
|
pbr90x/flask-jwt-example
|
/setup.sh
|
UTF-8
| 186 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ ! -f "server.crt" || ! -f "server.key" ]]
then
./create_selfsigned_cert.sh
fi
if [[ ! -f "ec-pub.key" || ! -f "ec.key" ]]
then
./create_ec_signing_keys.sh
fi
| true |
064a55487c66b2a59912926abfddccd9a9837b5c
|
Shell
|
kanoop640/SnakeLadder
|
/SanekLadder.sh
|
UTF-8
| 2,003 | 3.53125 | 4 |
[] |
no_license
|
echo " Welcome to Snake and Ladder Game "
len=100
declare -A arr[100]
for(( k=0;k<100;k++ ))
do
arr[$k]=$len
len=$(( len-1 ))
done
count=0
print()
{
echo "----------------------------------------------------"
for(( i=0;i<10;i++ ))
{
if(( $i%2!=0 ))
then
count=$(( count+9 ))
fi
for(( j=0; j<10; j++ ))
{
if(( $i%2==0 ))
then
if(( ${arr[$count]}==99 ))
then
echo -ne "||"${arr[$count]}
else
echo -ne " ||"${arr[$count]}
fi
count=$(( count+1 ))
else
if(( ${arr[$count]} > 9 ))
then
echo -ne " ||"${arr[$count]}
else
echo -ne " ||0"${arr[$count]}
fi
count=$(( count-1 ))
fi
}
if(( $i%2!=0 ))
then
count=$(( count+11))
fi
echo
}
echo "----------------------------------------------------"
}
anoop=1
imran=1
RollTime=0
read -p "Enter choice to playe for player 1 for player1 and 2 for player2 " ch
while(( $anoop!=100 && $imran!=100 ))
do
if(( $anoop==10 ))
then
anoop=91
echo "Anoop got the ladder he reached at position : " $anoop
fi
if(( $imran==10 ))
then
imran=91
echo "Imran got the ladder he reached at position : " $imran
fi
if(( $anoop==99 ))
then
anoop=1
echo "Anoop bitten by snake he reached at position : " $anoop
fi
if(( $imran==99 ))
then
imran=1
echo "Imran bitten by snake he reached at position : " $imran
fi
rnd=$(( 1+RANDOM%6 ))
if(( $ch==1 ))
then
echo "Anoo's random number is : " $rnd
anoop=$(( anoop+rnd ))
if(( $anoop>100 ))
then
anoop=$(( anoop-rnd ))
fi
echo "Anoop is at position : "$anoop
ch=2
if(( $anoop==100 ))
then
echo "Anoop won"
fi
elif(( $ch==2 ))
then
echo "Imran's random number is : " $rnd
imran=$(( imran+rnd ))
if(( $imran>100 ))
then
imran=$(( imran-rnd ))
fi
echo "Imran is at position : "$imran
ch=1
if(( $imran==100 ))
then
echo "Imran won"
fi
fi
RollTime=$(( RollTime+1 ))
done
echo $RollTime "times roll is done "
print
| true |
f79bed96b0d5061e5357ecfa34f425a0144ba043
|
Shell
|
nguyendinhnien/elasticsearch5-kubenetes
|
/run-elastic.sh
|
UTF-8
| 1,176 | 3.453125 | 3 |
[] |
no_license
|
#!/bin/bash
echo "Creating Elasticsearch services..."
kubectl create namespace es-cluster
kubectl create -f es-discovery.yaml
kubectl create -f es-svc.yaml
kubectl create -f es-master.yaml
# Check to see if the deployments are running
while true; do
active=`kubectl get deployments --all-namespaces | grep es-master | awk '{print $6}'`
if [ "$active" == "2" ]; then
break
fi
sleep 2
done
kubectl create -f es-client.yaml
kubectl create -f es-data-svc.yaml
kubectl create -f es-data-stateful.yaml
while true; do
active=`kubectl get deployments --all-namespaces | grep es-client | awk '{print $6}'`
if [ "$active" == "1" ]; then
break
fi
sleep 2
done
# Scale the cluster to 3 master, 4 data, and 2 client nodes
kubectl scale deployment es-master --replicas 3
kubectl scale deployment es-client --replicas 2
kubectl scale statefulsets es-data --replicas 4
echo "Waiting for Elasticsearch public service IP..."
while true; do
es_ip=`kubectl get svc elasticsearch | grep elasticsearch | awk '{print $3}'`
if [ "$es_ip" != "<pending>" ]; then
break
fi
sleep 2
done
echo "Elasticsearch public IP: "$es_ip"
| true |
758ec7716067dc863629d7b7264e5c648ab9d90e
|
Shell
|
GUOShuxuan/eval-nas
|
/scripts/nasbench-sampler-search.sh
|
UTF-8
| 2,350 | 3.328125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Template file for submit jobs or testing with interactive jobs.
PYTHON=python
PARTITION=gpu
SUBMIT_FILE='slurm-submit.sh'
DAYS=20
EXPERIMENT="cnn-nasbench"
if [ $1 = "debug" ]; then
policy='nao'
SUB_EXP="cifar_nasbench_${policy}"
node=5
$PYTHON cnn_search_main.py --epochs=4 \
--save_every_epoch=1 \
--search_space nasbench \
--search_policy $policy \
--num_intermediate_nodes ${node} \
--nasbenchnet_vertex_type ${policy}_ws \
--supernet_train_method darts \
--seed_range_start 1268 \
--seed_range_end 1269 \
--batch_size 16 \
--evaluate_batch_size 16 \
--tensorboard \
--gpus 4 \
--test_dir experiments/debug/${EXPERIMENT}_${SUB_EXP}-normal-test1-node${node} \
--debug
fi
if [ $1 = "search" ]; then
# Experiments extensively on four policys.
# Note that this intermediate_nodes = node (in paper) - 2.
NUM_SEED_TO_USE=10 # reduce this number to save time.
declare -a arr1=("2" "3" "4" "5")
declare -a arr2=("enas" "nao" "darts" "fbnet")
for ((k=0;k<${NUM_SEED_TO_USE};k=k+1));do
SEED=$((1268 + $k))
SEED_END=$((1268 + $k + 1))
for ((i=0;i<${#arr1[@]};i=i+1)); do
node=${arr1[i]}
for((j=0;j<${#arr2[@]};j=j+1));do
policy=${arr2[j]}
SUB_EXP="cifar_nasbench_${policy}"
cmd="$PYTHON cnn_search_main.py --epochs=200 \
--save_every_epoch=20 \
--search_space nasbench \
--search_policy ${policy} \
--num_intermediate_nodes ${node} \
--nasbenchnet_vertex_type ${policy}_ws \
--supernet_train_method darts \
--seed_range_start ${SEED} \
--seed_range_end ${SEED_END} \
--batch_size 256 \
--evaluate_batch_size 64 \
--tensorboard \
--gpus $GPU \
--test_dir experiments/${EXPERIMENT}/${SUB_EXP}/baseline-new-node${node}-${SEED} \
> logs/${EXPERIMENT}/${SUB_EXP}_baseline-new-node${node}-${SEED}.log \
2>&1"
cmdALL="$cmd"
bash $SUBMIT_FILE "$cmdALL" $(($i)) $GPU $PARTITION $DAYS
done
done
done
fi
| true |
c5a82629ed4a9ffabf46885fd434cd797d80ca34
|
Shell
|
kara-franco/bash-script
|
/stats
|
UTF-8
| 9,347 | 4.28125 | 4 |
[] |
no_license
|
#!/bin/bash
# stats.sh
# Kara Franco
# CS 344 Operating Systems
# Program 1 - A bash shell script that computes statistics. This program may be used to calculate the averages
# and medians of either the rows OR columns of an input file of whole numbers.
# program will start when user enters the correct commands - stats {-rows|-cols} {file} OR
# - cat {file} | stats {-rows|cols}
# {-rows|-cols} means that the user should choose to compute statistics on either rows OR columns
# --------------------- check for correct number and format of arguments -----------------------------------
# if command has too many or too few arguements then output error to std err
# $# has a value that represents the number of arguments were passed to the stats program
# this concept was introduced on the class discussion forum:
# stats = command (or 0th element of arguements)
# -rows = 1st agrument ($1)
# test_file = 2nd argument ($2)
if [ "$#" -gt 2 -o "$#" -lt 1 ]
then
echo "Usage: stats {-rows|-cols} {file}" 1>&2
exit 1
fi
# allow users to abbreviate -rows and -cols
# if command has arguments in wrong format output error to std err
# Positional Parameters: the below method was introduced to me on the class dicussion forum:
# - ${1: this is looking at the 1st argument (-rows or -cols)
# - 0: start at the 0th place in string
# - 2} look at the 2 first place (-r or -c)
# || is conditonal expression for OR (-o would not work)
if [[ "${1:0:2}" == '-r' || "${1:0:2}" == '-c' ]]
# use of : which is a null command
# the use of != was not allowing proper control flow :(
then
:
else
echo "Usage: stats {-rows|-cols} {file}" 1>&2
exit 1
fi
# ------------------------------ read file and store to temporary file -----------------------------------
# create tempFile variable with PID in name (use of $$) to ensure the program can be ran 'rapid fire'
# set up trap command to remove if any disruptions:
# 1 = hangup; 2 = interrupt from keyboard; 15 = termination signal
tempFile=$$temp
trap "rm -f $tempFile; exit 1" 1 2 15
# check if user supplied file in command
# if there is one arguement, then we can read file from stdin
# if input file is not readable than send output error to std error
# below is checking if there is a file provided in argument 2 postion and is it readable (-r)
if [ "$#" -eq 2 -a ! -r "$2" ]
then
echo "stats: cannot read ${2}" 1>&2
exit 1
fi
# read file and store in temporay file
# this statement is checking for the number of arguments
# if there is 1 argument, then read from stdin since the file was given in cat test_file | stats -r format
# else, cat the file from argument 2 since the file was given in stats -r test_file format
if [ "$#" -eq 1 ]
then
while read line
do
echo $line >> $tempFile
done
else
# this method below was suggested by the professor in the class discussion forum
cat "$2" > $tempFile
fi
# ------------------------- calculating statistics for the rows of the file -------------------------------
# statement checks if user wants to compute statistics for rows
if [[ "${1:0:2}" == '-r' ]]
then
# since we need our output to print in columns, print Average and Median and read each row into a lineArray
echo "Average Median"
# ------------------------------------------ average -------------------------------------------------------
while read -r line
do
# initialize variables to numeric values that will hold:
# index count(number of values per line), sum, middle value and median
sum=0
indexCount=0
middle=0
median=0
for i in $line
do
# i holds the current value in the line, it is being added to sum, this syntax was discussed in class forum
sum=$(expr "$sum" + "$i")
# capture the value of i into the array at current index count, lineArray is used for average & median calc.
lineArray[$indexCount]=$i
indexCount=$(expr $indexCount + 1)
done
# all numbers must be whole numbers
# if half value occurs, round up (7.5 = 8)
# by default Bash will round 3.75 OR 3.5 to 3, thus we need to add .5 to every number
# to do this, we take the index counter, divide by 2 and add that to the sum
average=$((($sum + $indexCount / 2) / $indexCount))
# -------------------------------------------- median -----------------------------------------------------
# to calculate the median, sort the values and take the middle value
# if number of rows is even, take the largest of the two middle (54, 83, 90, 97 median will be 90)
# Selection sort implementation
for ((i=0; i<$indexCount-1; i++))
do
# initiate minNumber to the first value in line, every number after arr[0] will be compared if less than
minNumber=${lineArray[$i]}
index=$i
# look at next numbers to see if it is less than, if so arr[j] = minNumber, if not, place minNum in lineArray
for ((j=i+1; j<$indexCount; j++))
do
if ((lineArray[j]<minNumber))
then
minNumber=${lineArray[$j]}
index=$j
fi
done
# tempHolder stores the value of the current index while the current minNumber is being placed in lineArray
tempHolder=${lineArray[$i]}
lineArray[$i]=${lineArray[$index]}
lineArray[$index]=$tempHolder
done
# this method of finding the middle of the line was found on the class discussion forum:
# since the array starts at 0, we have to simply divide by 2 for both odd and even line cases
# for refererence for the below
# (if there are 4 values per line, 4/2 = 2, index 2 equals the third value in the array, 54, 83, 90. 97 = 90)
# (if there are 5 values per line, 5/2 = 2, index 2 equals the thrid value in the array, 2, 5, 6, 10, 12 = 6)
middle=$(expr "$indexCount" / 2)
median=${lineArray[middle]}
# print out the averages and medians to screen with spaces inbetween
echo "$average" " " "$median"
done < $tempFile
fi
# --------------------------- calculating statistics for the columns of the file ----------------------------
# statement checks if user wants to compute statistics for columns
if [[ "${1:0:2}" == '-c' ]]
then
# to find the number columns in the table, find the number of values in a row
# the method below was introduced to me on the class discussion forum:
# - head will only read the first 10 rows
# - -n 1 will tell head only read the first row
# - $tempfile holds the copy of the input file
# - | sends the above to wc -w
# - wc -w returns/hold the number of values per row
numberOfCols=$(head -n 1 $tempFile | wc -w)
colNumber=0
# -------------------------------------------- average ------------------------------------------------------
# while the current column number is less than the number of cols in the file then calculate BOTH statisics
# this is different then how rows are handled, so build two arrays of averages and medians
while [ $colNumber -lt "$numberOfCols" ]
do
# declare variables that will hold index count(number of values per col), sum, middle value and median
indexCount=0
sum=0
middle=0
median=0
while read -r line
do
# put current line of file in lineArray variable
lineArray=($line)
# the ave and median is calculated as each column is captured, these values are then placed in ave/med arrays
# while at the curr col number (start at 0) read the value and put it into the colunm array (start at index 0)
columnArray[$indexCount]=${lineArray[$colNumber]}
# increase the index by one for the next element to be captured
indexCount=$(expr "$indexCount" + 1)
# this will continue until all elements are captured in the colunm array (hit the end of the file)
# as the colunm number increases, this will repeat
done < $tempFile
# next calculate the sum of the values captured in the colunm array
for i in "${columnArray[@]}"
do
sum=$(expr "$sum" + "$i")
done
# use the same fomula as the row calculation to calculate the average (rounding included)
average=$((($sum + $indexCount / 2) / $indexCount))
# place the number into the average array (at the index of the column number)and store until printing
averageArray[$colNumber]=$average
# ---------------------------------------------- median ---------------------------------------------------
# treat the column array as we did with the line array (for the row calculations) and sort
# use selection sort
for ((i=0; i<$indexCount-1; i++))
do
minNumber=${columnArray[$i]}
index=$i
for ((j=i+1; j<$indexCount; j++))
do
if ((columnArray[j]<minNumber))
then
minNumber=${columnArray[$j]}
index=$j
fi
done
tempHolder=${columnArray[$i]}
columnArray[$i]=${columnArray[$index]}
columnArray[$index]=$tempHolder
done
# place the middle number (at index middle) into the median array
middle=$(expr "$indexCount" / 2)
median=${columnArray[middle]}
# store the median in median array until printing, index will increase with colNumber
medianArray[$colNumber]=$median
colNumber=$(expr $colNumber + 1)
done
# print the averages from the average array
# use | colunm to print in correct colunm format
echo "Averages:"
for i in "${averageArray[@]}"
do
echo "${i}"
done | column
# print the medians from the median array
echo "Medians:"
for i in "${medianArray[@]}"
do
echo "${i}"
done | column
fi
# remove the temporary file
rm -f $tempFile
| true |
825cf751d2c0b1845670a3b52cfe6374e42ee3a3
|
Shell
|
pdonorio/irods2graph
|
/irodsgraph/bootstrap.sh
|
UTF-8
| 1,209 | 3.25 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
USER=`whoami`
export HOME="/home/$USER"
IRODS_DIR="$HOME/.irods"
IRODS_ENV="$IRODS_DIR/.irodsEnv"
rm -f $IRODS_ENV
mkdir -p $IRODS_DIR
if [ "$1" == "remote" ]; then
echo "Remote connection"
# dmp1.local
remoteconf="
irodsHost 130.186.13.14
irodsPort 1247
irodsHome '/cinecaDMPZone/home/pdonorio'
irodsUserName pdonorio
irodsZone cinecaDMPZone
irodsCwd '/cinecaDMPZone/home/pdonorio'
"
echo "$remoteconf" > $IRODS_ENV
bash -c "iinit" || exit $?
else
echo "Working locally"
# my laptop/docker
localconf="
irodsHost rodserver
irodsPort 1247
irodsUserName rods
irodsZone tempZone
"
# Note: for local configuration, password is 'mytest'
# (specified inside docker compose)
if [ ! -f $IRODS_ENV ]; then
echo "$localconf" > $IRODS_ENV
bash -c "iinit" || exit $?
# # Create a second resource for replicas?
iadmin mkresc replicaResc unixfilesystem rodserver:/tmp/REPLICA
# #https://docs.irods.org/master/manual/installation/#add-additional-resources
fi
cmd="./app.py --mock -v"
$cmd popolae --size=20
if [ "$?" == "0" ]; then
$cmd convert
fi
fi
| true |
8f8452e25c7ebbf18ff39c55c8ad86962b182883
|
Shell
|
limbo018/rsyn-x
|
/rsyn/install/bin/gurobi.sh
|
UTF-8
| 266 | 2.96875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!bin/bash
export GUROBI_HOME="/opt/gurobi702/linux64"
if [! -d "$GUROBI_HOME" ]; then
echo "[ERROR] Gurobi not installed!"
exit 1
fi
export PATH="${PATH}:${GUROBI_HOME}/bin"
export LD_LIBRARY_PATH="${GUROBI_HOME}/lib"
gurobi_cl Threads=$1 ResultFile=$2 $3
| true |
97b35ea891a8ee3d1980c8b1134952f50db3094a
|
Shell
|
zeuxisoo/my-scripts
|
/shell-backup-all-db-to-dropbox/backup.sh
|
UTF-8
| 1,267 | 3.796875 | 4 |
[] |
no_license
|
#!/bin/bash
# Settings
TIMESTAMP=$(date +"%F")
BACKUP_DIR="backup/$TIMESTAMP"
MYSQL_USER="MYSQL_USER"
MYSQL_PASS="MYSQL_PASSWORD"
MYSQL_SELF="/usr/local/mysql/bin/mysql"
MYSQL_DUMP="/usr/local/mysql/bin/mysqldump"
DROPBOX_PATH="/Backup/Server/Database/All"
# Create backup directory
mkdir -p $BACKUP_DIR
# Get all database
databases=`$MYSQL_SELF -u $MYSQL_USER -p"$MYSQL_PASS" -e 'SHOW DATABASES;' | grep -Ev "(Database|information_schema)"`
for database in $databases; do
echo "Dumping $database"
$MYSQL_DUMP --force --opt -u $MYSQL_USER -p"$MYSQL_PASS" --single-transaction --skip-lock-tables $database | gzip > "$BACKUP_DIR/$database.gz"
done
# Zip databases to one file then remove all databases
tar zcvf $BACKUP_DIR.tar.gz $BACKUP_DIR
rm -rf $BACKUP_DIR
# Put to dropbox
put_cmd="/home/backup/dropbox_uploader.sh upload $BACKUP_DIR.tar.gz $DROPBOX_PATH/$TIMESTAMP.tar.gz"
echo $put_cmd && eval "$put_cmd"
# Remove from dropbox (expire 6 day files)
EXPIRE_DAY=$(date --date='6 days ago' +'%F')
if [[ "$OSTYPE" == "darwin"* ]]; then
EXPIRE_DAY=$(date -v -6d +'%F')
fi
delete_cmd="/home/backup/dropbox_uploader.sh delete $DROPBOX_PATH/$EXPIRE_DAY.tar.gz"
echo $delete_cmd && eval "$delete_cmd"
# Remove backup zip file
rm -rf $BACKUP_DIR.tar.gz
| true |
2cb64682e78c44f1300091cd87fd2f28c6bc4da6
|
Shell
|
ahochleitner/CodeOne2018_JvmCpuMemory4DockerInProduction
|
/Memory/12_runAllMemoryInfo.sh
|
UTF-8
| 899 | 3.484375 | 3 |
[] |
no_license
|
#!/bin/bash
MAIN=MemoryInfo
#MAIN=PrintMemoryInfo
if [ "$#" = "1" -a "$1" = "-usage" ]; then
echo "run Java7 ... Java11-Docker with MemoryInfo inside"
echo "usage: $0"
echo "usage: $0 -setMemory1024m"
echo "usage: $0 -setCpus"
echo "usage: $0 -setCGroupMemoryLimitForHeap"
exit 1
fi
if [ "$1" = "-setMemory1024m" -o "$1" = "-setCGroupMemoryLimitForHeap" ]; then
ADDITIONAL_OPTS="-m 1024m"
echo "run with $ADDITIONAL_OPTS"
fi
if [ "$1" = "-setCpus" ]; then
ADDITIONAL_OPTS="--cpus .5 --cpuset-cpus 1"
echo "run with $ADDITIONAL_OPTS"
fi
if [ "$1" = "-setCGroupMemoryLimitForHeap" ]; then
RUN_OPTS="-setCGroupMemoryLimitForHeap"
echo "run with $ADDITIONAL_OPTS"
fi
for i in 7 8 9 10 11; do
echo "====== " openjdk$i " ====="
docker run --rm -v "$PWD":/usr/src/myapp -w /usr/src/myapp $ADDITIONAL_OPTS openjdk:$i ./runJavaCompileAndRun.sh $MAIN $RUN_OPTS
sleep 3
done
| true |
37d1b9496635fb96150c4e42fadfd503ac5f2135
|
Shell
|
amerlyq/airy
|
/zsh/#/accept-line.zsh
|
UTF-8
| 4,333 | 3.59375 | 4 |
[
"MIT"
] |
permissive
|
# a generic accept-line wrapper
# This widget can prevent unwanted autocorrections from command-name
# to _command-name, rehash automatically on enter and call any number
# of builtin and user-defined widgets in different contexts.
#
# For a broader description, see:
# <http://bewatermyfriend.org/posts/2007/12-26.11-50-38-tooltime.html>
#
# The code is imported from the file 'zsh/functions/accept-line' from
# <http://ft.bewatermyfriend.org/comp/zsh/zsh-dotfiles.tar.bz2>, which
# distributed under the same terms as zsh itself.
# A newly added command will may not be found or will cause false
# correction attempts, if you got auto-correction set. By setting the
# following style, we force accept-line() to rehash, if it cannot
# find the first word on the command line in the $command[] hash.
zstyle ':acceptline:*' rehash true
function Accept-Line() {
setopt localoptions noksharrays
local -a subs
local -xi aldone
local sub
local alcontext=${1:-$alcontext}
zstyle -a ":acceptline:${alcontext}" actions subs
(( ${#subs} < 1 )) && return 0
(( aldone = 0 ))
for sub in ${subs} ; do
[[ ${sub} == 'accept-line' ]] && sub='.accept-line'
zle ${sub}
(( aldone > 0 )) && break
done
}
function Accept-Line-getdefault() {
emulate -L zsh
local default_action
zstyle -s ":acceptline:${alcontext}" default_action default_action
case ${default_action} in
((accept-line|))
printf ".accept-line"
;;
(*)
printf ${default_action}
;;
esac
}
function Accept-Line-HandleContext() {
zle Accept-Line
default_action=$(Accept-Line-getdefault)
zstyle -T ":acceptline:${alcontext}" call_default \
&& zle ${default_action}
}
function accept-line() {
setopt localoptions noksharrays
local -ax cmdline
local -x alcontext
local buf com fname format msg default_action
alcontext='default'
buf="${BUFFER}"
cmdline=(${(z)BUFFER})
com="${cmdline[1]}"
fname="_${com}"
Accept-Line 'preprocess'
zstyle -t ":acceptline:${alcontext}" rehash \
&& [[ -z ${commands[$com]} ]] \
&& rehash
if [[ -n ${com} ]] \
&& [[ -n ${reswords[(r)$com]} ]] \
|| [[ -n ${aliases[$com]} ]] \
|| [[ -n ${functions[$com]} ]] \
|| [[ -n ${builtins[$com]} ]] \
|| [[ -n ${commands[$com]} ]] ; then
# there is something sensible to execute, just do it.
alcontext='normal'
Accept-Line-HandleContext
return
fi
if [[ -o correct ]] \
|| [[ -o correctall ]] \
&& [[ -n ${functions[$fname]} ]] ; then
# nothing there to execute but there is a function called
# _command_name; a completion widget. Makes no sense to
# call it on the commandline, but the correct{,all} options
# will ask for it nevertheless, so warn the user.
if [[ ${LASTWIDGET} == 'accept-line' ]] ; then
# Okay, we warned the user before, he called us again,
# so have it his way.
alcontext='force'
Accept-Line-HandleContext
return
fi
if zstyle -t ":acceptline:${alcontext}" nocompwarn ; then
alcontext='normal'
Accept-Line-HandleContext
else
# prepare warning message for the user, configurable via zstyle.
zstyle -s ":acceptline:${alcontext}" compwarnfmt msg
if [[ -z ${msg} ]] ; then
msg="%c will not execute and completion %f exists."
fi
zformat -f msg "${msg}" "c:${com}" "f:${fname}"
zle -M -- "${msg}"
fi
return
elif [[ -n ${buf//[$' \t\n']##/} ]] ; then
# If we are here, the commandline contains something that is not
# executable, which is neither subject to _command_name correction
# and is not empty. might be a variable assignment
alcontext='misc'
Accept-Line-HandleContext
return
fi
# If we got this far, the commandline only contains whitespace, or is empty.
alcontext='empty'
Accept-Line-HandleContext
}
zle -N accept-line
zle -N Accept-Line
zle -N Accept-Line-HandleContext
| true |
2bfb5fabc5b0b6a1f9d46d63a7224e8d1066cb7c
|
Shell
|
rainboyan/grails-data-mapping
|
/travis-build.sh
|
UTF-8
| 739 | 3.171875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
EXIT_STATUS=0
./gradlew --stop
if [[ $TRAVIS_TAG =~ ^v[[:digit:]] ]]; then
echo "Tagged Release Skipping Tests for Publish"
./travis-publish.sh || EXIT_STATUS=$?
else
./gradlew --no-daemon compileTestGroovy || EXIT_STATUS=$?
if [[ $EXIT_STATUS -eq 0 ]]; then
./gradlew --no-daemon --refresh-dependencies check || EXIT_STATUS=$?
if [[ $EXIT_STATUS -eq 0 && $TRAVIS_PULL_REQUEST == 'false' ]]; then
echo "Travis Branch $TRAVIS_BRANCH"
if ([[ -n $TRAVIS_TAG ]] || [[ $TRAVIS_BRANCH =~ ^master|[7]\..\.x$ ]] && [[ "${TRAVIS_JDK_VERSION}" != "openjdk11" ]]); then
./travis-publish.sh || EXIT_STATUS=$?
fi
fi
fi
fi
exit $EXIT_STATUS
| true |
d4272a43a00c3e9bd65ed79055c38002f651eaef
|
Shell
|
gillins/conda-recipes-rios-channel
|
/pylidar/run_test.sh
|
UTF-8
| 288 | 3.015625 | 3 |
[] |
no_license
|
#!/bin/sh
if [ "x$PYLIDAR_TESTDATA" = "x" ]; then
echo "Set \$PYLIDAR_TESTDATA to location of latest testdata_X.tar.gz"
echo "from https://bitbucket.org/chchrsc/pylidar/downloads"
echo "to run test as part of build"
exit 0
fi
pylidar_test -i $PYLIDAR_TESTDATA -p /tmp
| true |
481b305303a05d1e177894ae0ab53ecd7611d2d7
|
Shell
|
yuvaldori/DevOps
|
/releasemanagement/scripts/allproj/rename_branch.sh
|
UTF-8
| 955 | 3.84375 | 4 |
[] |
no_license
|
#!/bin/bash
function exit_on_error {
status=$?
echo "exit code="$status
if [ $status != 0 ] ; then
echo "Failed (exit code $status)"
#exit 1
fi
}
old_branch_name="10.1.1-patch1"
new_branch_name="10.1.1patch1-build"
for dir in `pwd`/*/
do
dir=${dir%*/}
repo=${dir##*/}
if [ "$repo" != "examples" ]
then
echo "### Processing repository: $repo"
pushd $repo
if [ -d ".git" ]
then
if [[ ! `git branch | grep "$old_branch_name"` ]]
then
git checkout -b $old_branch_name origin/$old_branch_name
fi
git branch -m $old_branch_name $new_branch_name
exit_on_error
git push origin :$old_branch_name
exit_on_error
git push origin $new_branch_name
exit_on_error
fi
popd
fi
done
| true |
916bb504ca19e0a250f5e11aeb036b1bfe2dfb52
|
Shell
|
jhajek/packer-vagrant-build-scripts
|
/packer/scripts/itmo-453-553/post_install_itmo-453-553-vagrant-ub-graphitea-setup.sh
|
UTF-8
| 2,707 | 2.78125 | 3 |
[] |
permissive
|
#!/bin/bash
set -e
set -v
# http://superuser.com/questions/196848/how-do-i-create-an-administrator-user-on-ubuntu
# http://unix.stackexchange.com/questions/1416/redirecting-stdout-to-a-file-you-dont-have-write-permission-on
echo "vagrant ALL=(ALL) NOPASSWD:ALL" | sudo tee /etc/sudoers.d/init-users
sudo cat /etc/sudoers.d/init-users
# Installing vagrant keys
wget --no-check-certificate 'https://raw.github.com/mitchellh/vagrant/master/keys/vagrant.pub'
sudo mkdir -p /home/vagrant/.ssh
cat ./vagrant.pub >> /home/vagrant/.ssh/authorized_keys
sudo chown -R vagrant:vagrant /home/vagrant/.ssh
# Add customizations after this line
##################################################
# Change hostname and /etc/hosts
##################################################
cat << EOT >> /etc/hosts
# Nodes
192.168.33.10 centos-riemanna centos-riemanna.project.iit.edu
192.168.33.11 centos-riemannb centos-riemannb.project.iit.edu
192.168.33.12 centos-riemannmc centos-riemannmc.project.iit.edu
192.168.33.110 centos-graphitea centos-graphitea.project.iit.edu
192.168.33.111 centos-graphiteb centos-graphiteb.project.iit.edu
192.168.33.112 centos-graphitemc centos-graphitemc.project.iit.edu
192.168.33.20 ub-riemanna ub-riemanna.project.iit.edu
192.168.33.21 ub-riemannb ub-riemannb.project.iit.edu
192.168.33.22 ub-riemannmc ub-riemannmc.project.iit.edu
192.168.33.210 ub-graphitea ub-graphitea.project.iit.edu
192.168.33.211 ub-graphiteb ub-graphiteb.project.iit.edu
192.168.33.212 ub-graphitemc ub-graphitemc.project.iit.edu
EOT
sudo hostnamectl set-hostname ub-graphitea
##################################################
sudo apt-get update
sudo apt-get install -y python3-dev python3-pip python3-setuptools
#http://askubuntu.com/questions/549550/installing-graphite-carbon-via-apt-unattended
sudo DEBIAN_FRONTEND=noninteractive apt-get -q -y --force-yes install graphite-carbon python-whisper
sudo apt-get install -y apt-transport-https
# P.135 - Listing 4.13: Installing the graphite-api package on Ubuntu
sudo apt-get install -y graphite-api gunicorn3
# https://grafana.com/grafana/download
sudo apt-get install -y adduser libfontconfig1
wget https://dl.grafana.com/oss/release/grafana_7.1.3_amd64.deb
sudo dpkg -i grafana_7.1.3_amd64.deb
# cloning source code examples for the book
git clone https://github.com/turnbullpress/aom-code.git
##################################################################################################
# Start Services
##################################################################################################
sudo systemctl enable graphite-api
sudo systemctl enable grafana-server
sudo systemctl start graphite-api
sudo systemctl start grafana-server
| true |
205dcaea702f916cdb759a38861ee34ab94e2078
|
Shell
|
merothh/dotfiles
|
/setup.sh
|
UTF-8
| 2,825 | 3.703125 | 4 |
[] |
no_license
|
#!/bin/bash
cyan='tput setaf 6'
yellow='tput setaf 3'
reset='tput sgr0'
release=$(sed -rn 's/^NAME="([^"]*)"/\1/p' /etc/os-release)
echo -e "\n$($cyan)// Switch to $($yellow)zsh $($cyan)+ $($yellow)zsh-theme-powerlevel10k-git $($cyan)? [y/n]$($reset)?"
read zsh
case $release in
"Arch Linux"|"Artix Linux"|"Manjaro Linux")
packages="adwaita-icon-theme bdf-unifont feh gnome-themes-extra i3-gaps i3lock lxappearance noto-fonts otf-font-awesome papirus-icon-theme picom playerctl ponymix pulseaudio rofi rxvt-unicode scrot urxvt-perls xorg-xbacklight"
[ ! "$zsh" = "y" ] || packages+=" zsh zsh-theme-powerlevel10k"
echo -e "\n$($cyan)// Installing required packages$($reset)\n"
sudo pacman -S $packages
echo -e "\n$($cyan)// Installing AUR packages$($reset)\n"
aur_packages="polybar ttf-comfortaa termsyn-font urxvt-resize-font-git"
aur_dependencies="base-devel git"
echo -e "\n$($cyan)Installing dependencies for building $($yellow) AUR packages $($reset)\n"
sudo pacman -S $aur_dependencies
echo -e "\n$($cyan)// Cloning & Building $($yellow)AUR packages$($reset)\n"
for aur_package in $aur_packages
do
echo -e "\n$($yellow)$aur_package$($reset)\n"
git clone https://aur.archlinux.org/$aur_package .build
cd .build && makepkg -si
cd ../
rm -rf .build
done
;;
*)
echo -e "\n$($cyan)// woops. you're probably not running an $($yellow)Arch $($cyan)based distro$($reset)\n"
;;
esac
backup_list=(.p10k.zsh .vimrc .Xresources .zshrc)
symlink_list=(.config/i3 .config/git .config/polybar .config/rofi .fonts/Material-Icons .fonts/MesloLGS-NF .p10k.zsh .vimrc .Xresources)
dir_list=(.fonts .config Pictures/Screenshots)
if [ "$zsh" = "y" ]; then
symlink_list+=" .zshrc"
echo -e "\n$($cyan)// Changing default shell to $($yellow)zsh$($reset)\n"
chsh -s $(which zsh)
fi
# get rid of system beep
sudo rmmod pcspkr &> /dev/null
echo "blacklist pcspkr" | sudo tee /etc/modprobe.d/nobeep.conf 1> /dev/null
# backup some specified files
rm -rf ~/dotfiles/.backup; mkdir ~/dotfiles/.backup
for file in ${backup_list[*]}
do
cp ~/$file ~/dotfiles/.backup 2> /dev/null
done
# cleanup previous files if any
for file in ${symlink_list[*]}
do
rm -rf ~/$file
done
# make sure directories we need are present
for dir in ${dir_list[*]}
do
mkdir -p ~/$dir
done
# go ahead and symlink everything
for file in ${symlink_list[*]}
do
ln -s ~/dotfiles/$file ~/$file
done
echo -e "\n$($cyan)// All done. Make sure to \n 1. Set themes and fonts using $($yellow)lxappearance $($cyan)after logging into i3-gaps\n 2. Log out and back in for $($yellow)zsh $($cyan)to kick in\n 3. Your previous $($yellow).bashrc .zshrc .Xresources $($cyan) are at $($yellow) ~/dotfiles/.backup $($cyan)\n 4. Maybe move useful code from previous $($yellow).zshrc $($cyan)or $($yellow).bashrc\n $($reset)"
| true |
e2749ff0518a06a710c02dc32d788b589a0f0f3f
|
Shell
|
marcosscriven/chromebook-coreboot
|
/provision/setup.sh
|
UTF-8
| 1,509 | 3.234375 | 3 |
[
"MIT"
] |
permissive
|
#! /bin/bash
apt-get update -qq
echo "Installing deps for coreboot"
apt-get install -qq -y git build-essential gcc-4.8-multilib iasl unzip sharutils
echo "Installing ncurses for ad hoc 'make menuconfig'"
apt-get install -qq -y libncurses-dev
echo "Installing deps for coreboot cross-compilation toolchain build"
apt-get install -qq -y ccache m4 bison flex zlib1g-dev
echo "Installing Docker"
apt-get install -qq -y apt-transport-https ca-certificates
apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
echo "deb https://apt.dockerproject.org/repo ubuntu-trusty main" > /etc/apt/sources.list.d/docker.list
apt-get update -qq
apt-get install -qq -y docker-engine
# User must be in 'docker' group
usermod -aG docker $USER
service docker start
# Build cbfstool and ifdtool
echo "Building coreboot utils"
mkdir -p /tmp/coreboot
cd /tmp/coreboot
wget -O coreboot.tar.gz -q https://chromium.googlesource.com/chromiumos/third_party/coreboot/+archive/chromeos-2016.02.tar.gz
wget -O vboot.tar.gz -q https://chromium.googlesource.com/chromiumos/platform/vboot_reference/+archive/master.tar.gz
tar --touch -xzf coreboot.tar.gz
tar --touch -xzf vboot.tar.gz -C 3rdparty/vboot
make -C util/cbfstool/
make -C util/ifdtool/
cp util/cbfstool/cbfstool /usr/local/bin/
cp util/ifdtool/ifdtool /usr/local/bin/
cd ..
rm -rf /tmp/coreboot
# Add a couple of common locales if necessary (to avoid annoying Perl errors)
echo "Installing US and GB locales"
locale-gen "en_US.UTF-8" "en_GB.UTF-8"
| true |
66c1484f08cb2148905a9650ca902890da9bbc9e
|
Shell
|
reven-tang/shell
|
/tomcat.sh
|
UTF-8
| 4,090 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/bash
##############################################################################
# 脚本名称: tomcat.sh
# 版本:3.00
# 语言:bash shell
# 日期:2017-09-30
# 作者:Reven
# QQ:254674563
##############################################################################
# 颜色定义
red='\e[91m'
green='\e[92m'
yellow='\e[93m'
none='\e[0m'
# 定义脚本环境变量
PACKAGE_NAME=${1}
PACKAGES_DIR="/usr/local/script"
PACKAGE_DIR="/usr/local/script/${1}"
SHELL_DIR="/usr/local/script/${1}"
DOWNLOAD_URL="http://192.168.124.169:86/software/${1}"
ENV_DIR="/etc/profile"
ACTIVE=1 # 1:部署 2:卸载 3:回滚
ACTIVE_TIME=`date '+%Y-%m-%d'`
IS_DOWNLOAD=$2
INSTALL_DIR=$3
TOMCAT_NUM=$4
HTTP_PORTS=$5
AJP_PORTS=$6
SHUTDOWN_PORTS=$7
#--------------------------------- 基础模块 ---------------------------------#
# 检查命令是否正确运行
check_ok() {
if [ $? != 0 ] ; then
echo -e "${red}[*] Error! Error! Error! Please check the error info. ${none}"
exit 1
fi
}
# 如果包已经安装,则提示并跳过安装.
myum() {
if ! rpm -qa | grep -q "^$1" ; then
yum install -y $1
check_ok
else
echo $1 already installed
fi
}
# 添加用户
create_user() {
if ! grep "^$1:" /etc/passwd ; then
useradd $1
echo "$1" | passwd "$1" --stdin &>/dev/null
check_ok
else
echo $1 already exist!
fi
}
# 确保目录存在
dir_exists() {
[ ! -d "$1" ] && mkdir -p $1
}
pkg_download() {
# 创建介质存放目录
mkdir -p ${PACKAGE_DIR}
wget -P ${PACKAGE_DIR} -r -np -nd -nH -R index.html -q ${DOWNLOAD_URL}"/"
}
#--------------------------------- 程序模块 ---------------------------------#
# 修改网站发布目录(可选)
# WEB_PATH="/usr/local/Reven_vip002/Reven_vipDynamic/"
install_tomcat() {
echo "开始部署tomcat..."
cd ${INSTALL_DIR}
tar -xf ${PACKAGE_DIR}/apache-tomcat-*.tar.gz
mv apache-tomcat-* tomcat${i}
check_ok
echo "开始修改启动脚本..."
sed -i '1a\CATALINA_OPTS=-Dfile.encoding=GB18030\nJAVA_OPTS="-server -Xms1024m -Xmx1024m -XX:PermSize=256m -XX:MaxPermSize=512m" \
JAVA_HOME='`ls -d /usr/java/jdk*` ${INSTALL_DIR}/tomcat${i}/bin/catalina.sh
check_ok
echo "开始修改配置文件..."
sed -i "/Server port=\"8005\" shutdown=\"SHUTDOWN\"/s/8005/${SHUTDOWN_PORT}/" ${INSTALL_DIR}/tomcat${i}/conf/server.xml
check_ok
sed -i "/Connector port=\"8080\" protocol=\"HTTP\/1.1\"/s/8080/${HTTP_PORT}/" ${INSTALL_DIR}/tomcat${i}/conf/server.xml
check_ok
sed -i "/Connector port=\"8009\" protocol=\"AJP\/1.3\"/s/8009/${AJP_PORT}/" ${INSTALL_DIR}/tomcat${i}/conf/server.xml
check_ok
sed -i "/connectionTimeout=\"20000\"/a\ maxThreads=\"500\"\n disableUploadTimeout=\"true\"\n\
enableLookups=\"false\"" ${INSTALL_DIR}/tomcat${i}/conf/server.xml
check_ok
# 修改网站发布目录(可选)
# sed -i "/<\/Host>/i\ <Context path=\"\/\" docBase=\"${WEB_PATH}\" \/>" ${INSTALL_DIR}/tomcat${i}/conf/server.xml
}
# Main函数入口
main(){
# read -p "是否需要下载软件包(Y/N): " answer
if [[ ${IS_DOWNLOAD} = "Y" || ${IS_DOWNLOAD} = "y" ]]; then
echo -e "${green}正在下载软件,请稍等...${none}"
pkg_download
check_ok
fi
echo "创建安装目录"
dir_exists ${INSTALL_DIR}
# read -p "请输入tomcat实例个数:" TOMCAT_NUM
# read -p "请输入HTTP起始端口(原端口为8080):" HTTP_PORTS
# read -p "请输入AJP起始端口(原端口为8009):" AJP_PORTS
# read -p "请输入SHUTDOWN起始端口(原端口为8005):" SHUTDOWN_PORTS
for ((i=0; i<${TOMCAT_NUM}; i++)); do
HTTP_PORT=`expr ${HTTP_PORTS} + ${i}`
AJP_PORT=`expr ${AJP_PORTS} + ${i}`
SHUTDOWN_PORT=`expr ${SHUTDOWN_PORTS} + ${i}`
install_tomcat
sleep 1
done
}
#--------------------------------- 部署选择 ---------------------------------#
main
| true |
525df0b43ea66a5bceaa2d1eb5ec742b6e4e3757
|
Shell
|
gripped/xe503c12-stuff
|
/.bashrc
|
UTF-8
| 360 | 3.078125 | 3 |
[
"MIT"
] |
permissive
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
function bat() {
a=$(</sys/class/power_supply/sbs-20-000b/charge_full)
b=$(</sys/class/power_supply/sbs-20-000b/charge_now)
c=$((b * 1000 / a))
d=$((c / 10))
e=$((c % 10))
printf "%i.%1i%%" $d $e
}
PS1='[\u@\h \W][`bat`]\$ '
| true |
2764dc1a203a776cd7eda45bd05bc8859ad62748
|
Shell
|
shilezi/bitcoin-strings-with-txids
|
/gentoc.sh
|
UTF-8
| 670 | 3.4375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
set -eu
readme=README.adoc
sed -Ei "/^Here is the index of files:$/q" "$readme"
printf "\n=== Inputs index\n\n" >> "$readme"
n="$(ls data/in | tail -n1 | sed -r 's/0*//;s/.txt//')"
i=0
while (( $i <= $n )); do
f="$(printf data/in/%04d.txt "$i")"
printf "* link:$f[] ($(du -sh "$f" | cut -f1))\n" >> "$readme"
i=$(($i + 1))
done
printf '\n=== Outputs index\n\n' >> "$readme"
i=0
while (( $i <= $n )); do
f="$(printf data/out/%04d.txt "$i")"
printf "* link:$f[] ($(du -sh "$f" | cut -f1))\n" >> "$readme"
i=$(($i + 1))
done
printf "\n=== Atomsea index\n\n" >> "$readme"
sed -r 's/^/* http:\/\/bitfossil.org\//' data/atomsea >> "$readme"
| true |
81de40766c51e9380cb76f05989cf7ca415d51d5
|
Shell
|
LumberjacksIncorperated/OfflineModule
|
/DecryptFiles.sh
|
UTF-8
| 1,490 | 3.921875 | 4 |
[] |
no_license
|
#!/bin/bash
#------------------------------------------------------------------------------------------
#*
#* PURPOSE
#* -------
#* For use by the 'Haystack Application Offline Module' to decrypt received message and
#* create the plaintext message files
#*
#* AUTHOR
#* ------
#* Lumberjacks Incorperated (2018)
#*
#------------------------------------------------------------------------------------------
#------------------------------------------------------------------------------------------
# MAIN
#------------------------------------------------------------------------------------------
main() {
echo "Attempting to decrypt all encrypted messages..."
iterateThroughFilesAndDecrypt
echo "... Done"
}
#------------------------------------------------------------------------------------------
# INTERNAL FUNCTIONS
#------------------------------------------------------------------------------------------
function iterateThroughFilesAndDecrypt()
{
let "i = 0"
cwd=$(pwd)
for filename in "$cwd"/ToDecrypt/*; do
echo "Attempting to decrypt file $i which is $filename"
cat "$filename" | openssl rsautl -decrypt -passin pass:passphrase -inkey private.pem > "recieved$i.txt"
mv ./"recieved$i.txt" ./Decrypted/
i=$((i+1))
done
}
#------------------------------------------------------------------------------------------
# SCRIPT
#------------------------------------------------------------------------------------------
main
| true |
6f5ae7547a0c84c42ac12504ba32d4f58b0f5565
|
Shell
|
yutkat/dotfiles
|
/.config/tmux/conf/scripts/renumber-sessions.sh
|
UTF-8
| 254 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# https://github.com/maximbaz/dotfiles/commit/925a5b88a8263805a5a24c6198dad23bfa62f44d
sessions=$(tmux list-sessions -F '#S' | grep '^[0-9]\+$' | sort -n)
new=1
for old in $sessions; do
tmux rename -t "$old" "$new"
((new++))
done
| true |
7f74216a14ed679d9f621807f992ab3c4a71824e
|
Shell
|
maxned/GarageDoorControl
|
/distance.sh
|
UTF-8
| 784 | 3.3125 | 3 |
[] |
no_license
|
# WiringPi pin 1 is GPIO 18
GPIO_TRIGGER=1
# WiringPi pin 5 is GPIO 24
GPIO_ECHO=5
gpio mode $GPIO_TRIGGER out
gpio write $GPIO_TRIGGER 0
gpio mode $GPIO_ECHO in
gpio mode $GPIO_ECHO up
gpio write $GPIO_TRIGGER 1
sleep 0.00001 # Set trigger to LOW after 0.01ms
gpio write $GPIO_TRIGGER 0
START_TIME=0
STOP_TIME=0
while [[ $(gpio read $GPIO_ECHO) -eq 0 ]]; do
# START_TIME=$(date +%s)
echo "hit"
# echo "read"
# sleep 1
done
while [[ $(gpio read $GPIO_ECHO) -eq 1 ]]; do
# STOP_TIME=$(date +%s)
echo "2"
# sleep 1
done
echo "got here"
TIME_ELAPSED=$(( $STOP_TIME - $START_TIME ))
DISTANCE=$(( ($TIME_ELAPSED * 34300) / 2 ))
echo $DISTANCE
# Reset pins back to input mode to not accidentally output something
gpio mode $GPIO_TRIGGER in
gpio mode $GPIO_ECHO in
| true |
2f0ef91e606c2cbaba780853142e5f088eac3adc
|
Shell
|
Amar1729/bin
|
/mkdir-files
|
UTF-8
| 708 | 4.53125 | 5 |
[] |
no_license
|
#! /usr/bin/env bash
# make a new directory and immediately move files into it
usage () {
echo "Usage:"
echo " $(basename "$0") DIR FILE1 [FILE2 ...]"
echo " FILE1, FILE2 will be moved into new directory DIR"
echo " FILE* may be specified as a glob"
}
_check () {
if [[ "$#" -lt 2 ]]; then
usage
exit 1
fi
if [[ -e "$1" ]]; then
echo "'$1': Already exists." 1>&2
echo "(must create a directory with a unique name)" 1>&2
exit 2
fi
}
main () {
local dir
local -a files
_check "$@"
dir="$1"
shift
files=("$@")
mkdir "$dir"
for f in "${files[@]}"; do
mv "$f" "$dir"
done
}
main "$@"
| true |
899170622ee3c150dfb20021a76e88130c89a6f3
|
Shell
|
sipman/ubuntu-installer
|
/scripts/android.sh
|
UTF-8
| 1,007 | 3.171875 | 3 |
[] |
no_license
|
require java.sh
# Ensure java 8 is used as android is old
sudo update-alternatives --set java /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
ANDROID_HOME="/usr/lib/android-sdk/"
SDKMANAGER=${ANDROID_HOME}tools/bin/sdkmanager
FILE=$(curl -s 'https://developer.android.com/studio#downloads' -o- | grep -o -m1 "sdk-tools-linux-[0-9]*.zip")
echo https://dl.google.com/android/repository/$FILE
wget https://dl.google.com/android/repository/$FILE
sudo mkdir $ANDROID_HOME
sudo mv $FILE $ANDROID_HOME
cd $ANDROID_HOME
sudo unzip $FILE
sudo rm $FILE
sudo chmod -R 777 $ANDROID_HOME
# Fix .bashrc
echo "# Android SDK" >> ~/.bashrc
echo "export ANDROID_HOME=\"${ANDROID_HOME}\"" >> ~/.bashrc
echo "export PATH=\"\${PATH}:\${ANDROID_HOME}tools/:\${ANDROID_HOME}tools/bin:\${ANDROID_HOME}platform-tools/\"" >> ~/.bashrc
# Install latest android vesion
$SDKMANAGER --update
yes | $SDKMANAGER --licenses
$SDKMANAGER "platform-tools" "platforms;android-28"
yes | $SDKMANAGER --licenses
unset ANDROID_HOME
unset FILE
| true |
bca40cf60ba81c203a8b7863f43b8e09668f5f23
|
Shell
|
Simbotic/UnrealGAMS
|
/getDefaultEnv.sh
|
UTF-8
| 662 | 3.5 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit
# UE4 environment
ENVIRONMENT_URL=https://github.com/VertexStudio/UnrealGAMS/releases/download/v0.1/GrassMountains.zip
if [ ! -d "$(pwd)/Content" ]; then
echo "ERROR! You must execute command in the root of UE4 project."
echo "If you are in the right place, make sure the 'Content' folder exists."
echo "Exiting."
exit
fi
echo "Downloading..."
wget "$ENVIRONMENT_URL" -O environment.zip
mkdir -p ./Content/Environments
mv ./environment.zip ./Content/Environments
pushd ./Content/Environments
unzip environment.zip
rm environment.zip
popd
echo
echo -n $'\u2714'; echo " Environment successfully added."
echo
| true |
760d1a4e799c3cf93f1fa1d262ee35bbe5ea49ab
|
Shell
|
GoogleCloudDataproc/initialization-actions
|
/cloudbuild/run-presubmit-on-k8s.sh
|
UTF-8
| 1,182 | 3.484375 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -euxo pipefail
readonly IMAGE=$1
readonly BUILD_ID=$2
readonly DATAPROC_IMAGE_VERSION=$3
readonly POD_NAME=presubmit-${DATAPROC_IMAGE_VERSION//./-}-${BUILD_ID//_/-}
gcloud container clusters get-credentials "${CLOUDSDK_CONTAINER_CLUSTER}"
LOGS_SINCE_TIME=$(date --iso-8601=seconds)
kubectl run "${POD_NAME}" \
--image="${IMAGE}" \
--restart=Never \
--env="COMMIT_SHA=${COMMIT_SHA}" \
--env="IMAGE_VERSION=${DATAPROC_IMAGE_VERSION}" \
--command -- bash /init-actions/cloudbuild/presubmit.sh
# Delete POD on exit and describe it before deletion if exit was unsuccessful
trap '[[ $? != 0 ]] && kubectl describe "pod/${POD_NAME}"; kubectl delete pods "${POD_NAME}"' EXIT
kubectl wait --for=condition=Ready "pod/${POD_NAME}" --timeout=15m
while ! kubectl describe "pod/${POD_NAME}" | grep -q Terminated; do
kubectl logs -f "${POD_NAME}" --since-time="${LOGS_SINCE_TIME}" --timestamps=true
LOGS_SINCE_TIME=$(date --iso-8601=seconds)
done
EXIT_CODE=$(kubectl get pod "${POD_NAME}" \
-o go-template="{{range .status.containerStatuses}}{{.state.terminated.exitCode}}{{end}}")
if [[ ${EXIT_CODE} != 0 ]]; then
echo "Presubmit failed!"
exit 1
fi
| true |
7e8eb1d18dcf1bf7106b92b43017eaef951064d1
|
Shell
|
atteo/uberbin
|
/kube-watch
|
UTF-8
| 1,308 | 3.671875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# More safety, by turning some bugs into errors.
set -euCo pipefail
IFS=$'\n\t'
green='\033[0;32m'
reset='\033[0m'
green() {
echo -ne "${green}"
echo "$@"
echo -ne "${reset}"
}
namespace="$1"
if [[ -z "$namespace" ]]; then
export namespaceParam="-A"
else
export namespaceParam="-n $namespace"
fi
while IFS= read -r line; do
echo "$line"
read type reason object subobject message < <( echo "$line" )
if [[ "$reason" == "BackOff" ]]; then
if [[ "$subobject" == "spec.initContainers{"* ]]; then
container="$(echo "$subobject" | sed -re 's,^spec.initContainers\{([^\})]+)\}$,\1,')"
green "Showing log for pod $object init container $container"
kubectl -n "$namespace" logs "$object" -c "$container"
elif [[ "$subobject" == "spec.containers{"* ]]; then
container="$(echo "$subobject" | sed -re 's,^spec.containers\{([^\})]+)\}$,\1,')"
green "Showing log for pod $object container $container"
kubectl -n "$namespace" logs "$object" -c "$container"
fi
fi
done < <(kubectl get events $namespaceParam \
-o custom-columns=TYPE:.type,REASON:.reason,OBJECT:.involvedObject.name,SUBOBJECT:.involvedObject.fieldPath,MESSAGE:.message \
-w --watch-only=true )
| true |
f3316fbd0beee357d8537bcf1139038f88d3ef1a
|
Shell
|
mit0110/argument_mining
|
/scripts/01_create_configs.sh
|
UTF-8
| 1,174 | 3.734375 | 4 |
[] |
no_license
|
DATA_DIR="../data/echr/for_training"
FILES=( $(ls $DATA_DIR | sed 's/.\{4\}$//' | uniq) )
PARTITION_NUMBER=0
echo $FILES
for TEST_FILENAME in "${FILES[@]}"
do
echo "Partition $PARTITION_NUMBER"
echo "Test filename $TEST_FILENAME"
mkdir "$DATA_DIR/partition$PARTITION_NUMBER"
mkdir "$DATA_DIR/partition$PARTITION_NUMBER/test"
cp $DATA_DIR/$TEST_FILENAME.* "$DATA_DIR/partition$PARTITION_NUMBER/test"
mkdir "$DATA_DIR/partition$PARTITION_NUMBER/dev"
DEV_FILENAME=$TEST_FILENAME
while [ $DEV_FILENAME == $TEST_FILENAME ]; do
rand=$[$RANDOM % ${#FILES[@]}]
DEV_FILENAME=${FILES[$rand]}
done
cp $DATA_DIR/$DEV_FILENAME.* "$DATA_DIR/partition$PARTITION_NUMBER/dev"
echo "Dev filename $DEV_FILENAME"
mkdir "$DATA_DIR/partition$PARTITION_NUMBER/train"
for TRAIN_FILENAME in "${FILES[@]}"
do
if [ $TEST_FILENAME != $TRAIN_FILENAME ] && [ $DEV_FILENAME != $TRAIN_FILENAME ]; then
echo "copying $TRAIN_FILENAME for Training"
cp $DATA_DIR/$TRAIN_FILENAME* "$DATA_DIR/partition$PARTITION_NUMBER/train"
fi
done
PARTITION_NUMBER=`expr $PARTITION_NUMBER + 1`
done
| true |
36454f29b8baa2a306c0405850a000519288c73e
|
Shell
|
bentyeh/scripts
|
/sh/conda_batch.sbatch
|
UTF-8
| 97 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
ENV="$1"
SCRIPT="$2"
shift 2
source ~/.bashrc
conda activate "$ENV"
"$SCRIPT" "$@"
| true |
2113e407cdc94f7491a0fa99c17e436fe99ff7f0
|
Shell
|
ohmyzsh/ohmyzsh
|
/plugins/virtualenv/virtualenv.plugin.zsh
|
UTF-8
| 263 | 2.765625 | 3 |
[
"MIT"
] |
permissive
|
function virtualenv_prompt_info(){
[[ -n ${VIRTUAL_ENV} ]] || return
echo "${ZSH_THEME_VIRTUALENV_PREFIX=[}${VIRTUAL_ENV:t:gs/%/%%}${ZSH_THEME_VIRTUALENV_SUFFIX=]}"
}
# disables prompt mangling in virtual_env/bin/activate
export VIRTUAL_ENV_DISABLE_PROMPT=1
| true |
65c13a244ad2b7d5d8253ec8d5eb8941f5daa266
|
Shell
|
mbrukman/ServiceWorker
|
/compile.sh
|
UTF-8
| 876 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
set -e # Exit with nonzero exit code if anything fails
curlretry() {
curl --retry 2 "$@"
}
curlbikeshed() {
INPUT_FILE=$(find . -maxdepth 1 -name "*.bs" -print -quit)
# The Accept: header ensures we get the error output even when warnings are produced, per
# https://github.com/whatwg/whatwg.org/issues/227#issuecomment-419969339.
HTTP_STATUS=$(curlretry https://api.csswg.org/bikeshed/ \
--output "$1" \
--write-out "%{http_code}" \
--header "Accept: text/plain, text/html" \
-F die-on=error \
-F file=@"$INPUT_FILE" \
"${@:2}")
if [[ "$HTTP_STATUS" != "200" ]]; then
cat "$1"
rm -f "$1"
exit 22
fi
}
cd docs && curlbikeshed "index.html"
| true |
7af52943dba51be8d3d984fe6f975d3597a25e54
|
Shell
|
UniversityOfIowaHealthCare/gene-expression-tool
|
/MRI_to_Waxholm/003_regions_2_waxholm.sh
|
UTF-8
| 882 | 2.59375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
# Diffusion space to structural space registration
source=diffusion_b0.nii.gz # name of b0 image
target=555_thr1200.nii.gz # name of the structural image
flirt -in $source -ref $target -out b0_str.nii -omat b0_str.mat -bins 256 -cost corratio -searchrx -180 180 -searchry -180 180 -searchrz -180 180 -dof 12 -interp trilinear
# ROIs from Diffusion to structural space
v=region.nii.gz # Diffusion spaced regions
flirt -in $v -applyxfm -init b0_str.mat -out "${v}"_str -paddingsize 0.0 -interp trilinear -ref $target
# Structural spaced ROIs to Waxholm space:
f=tbss_result.nii.gz # give file name (Diffusion results in structural space)
flirt -in $f -applyxfm -init flirt1.mat -out "${f}"_1 -paddingsize 0.0 -interp trilinear -ref canon_T1_r.nii.gz
flirt -in "${y}"_1 -applyxfm -init flirt2.mat -out "${y}"_2 -paddingsize 0.0 -interp trilinear -ref canon_T1_r.nii.gz
| true |
b112476ed7410e0ba28cbd9e8b83ad5fee8980d1
|
Shell
|
showsmall/ngx
|
/build.sh
|
UTF-8
| 940 | 3.515625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
Version=$(git describe --tags $(git rev-list --tags --max-count=1))
GitCommit=$(git rev-parse HEAD)
BuildDate=$(date +"%F %T")
binout=nq
debug="-w -s"
param="-X main.VERSION=${Version} -X main.GITLOG_VERSION=${GitCommit} -X 'main.BUILD_TIME=${BuildDate}'"
build() {
echo "build $1 $2 $3"
export CGO_ENABLED=0
export GOOS=$1
export GOARCH=$2
export SUFFIX=$3
go build -ldflags "${debug} ${param}" -o bin/${binout}-${GOOS}-${GOARCH}${SUFFIX} cmd/main.go
if [ "$GOOS" == "windows" ]; then
zip bin/dist/${binout}-${GOOS}-${GOARCH}.zip bin/${binout}-${GOOS}-${GOARCH}${SUFFIX}
else
tar -czvf bin/dist/${binout}-${GOOS}-${GOARCH}.tar.gz bin/${binout}-${GOOS}-${GOARCH}${SUFFIX}
fi
}
mkdir -p bin/dist
build windows amd64 .exe
build windows 386 .exe
build windows arm .exe
build darwin amd64
build linux amd64
build linux 386
build linux arm
build freebsd amd64
build freebsd 386
build freebsd arm
| true |
b219bdb30626e34f197ab4568cd27c9fc16fec38
|
Shell
|
Lee-L-Boyd/Improving-MapReduce-with-Spark
|
/project-master/PyScripts/NewsMiner/repeatScript.sh
|
UTF-8
| 236 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/bash
while :
do
echo "Running Data Miner. Do not interrupt"
date
date >>progress.txt 2>&1
./newsMiner.py >>progress.txt 2>&1
echo "Sleeping for twenty mins. You may kill process."
date
sleep 1200
done
| true |
e53f24f4c4d2600e337e3360a2d5c07a0249901a
|
Shell
|
istoliving/luci-app-kcptun
|
/root/etc/init.d/kcptun
|
UTF-8
| 5,846 | 3.09375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh /etc/rc.common
#
# Copyright 2016-2020 Xingwang Liao <kuoruan@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
START=99
USE_PROCD=1
KCPTUN=kcptun
CONFIG_FOLDER=/var/etc/$KCPTUN
if [ -r /usr/share/libubox/jshn.sh ]; then
. /usr/share/libubox/jshn.sh
elif [ -r /lib/functions/jshn.sh ]; then
. /lib/functions/jshn.sh
else
logger -p daemon.err -t "$KCPTUN" \
"Package required: jshn."
exit 1
fi
_log() {
local level="$1"
local msg="$2"
logger -p "daemon.${level}" -t "$KCPTUN" "$msg"
}
gen_client_config_file() {
local config_file="$1"
json_init
json_add_string "remoteaddr" "${server_addr}:${server_port}"
json_add_string "localaddr" "${listen_addr}:${listen_port}"
add_configs() {
local type="$1"; shift
local k v
for k in "$@"; do
v="$(eval echo "\$$k")"
if [ -n "$v" ]; then
if [ "$type" = "string" ]; then
json_add_string "$k" "$v"
elif [ "$type" = "int" ]; then
json_add_int "$k" "$v"
elif [ "$type" = "boolean" ]; then
if [ "$v" = "true" ]; then
json_add_boolean "$k" "1"
else
json_add_boolean "$k" "0"
fi
fi
fi
done
}
add_configs "string" key crypt mode
add_configs "int" conn autoexpire mtu sndwnd rcvwnd datashard parityshard dscp \
nodelay interval resend nc sockbuf smuxver smuxbuf streambuf keepalive scavengettl snmpperiod
add_configs "boolean" nocomp acknodelay quiet tcp
if [ -n "$log_file" ]; then
json_add_string "log" "$log_file"
fi
json_close_object
json_dump -i >"$config_file"
}
add_iptables_rule() {
local port="$1"
iptables-restore --noflush <<-EOF 2>/dev/null
*nat
:KCPTUN -
-A KCPTUN -p tcp --dport $port -j ACCEPT
-A INPUT -p tcp -j KCPTUN
COMMIT
EOF
}
clear_iptables_rule() {
iptables-save --counters | grep -vi "KCPTUN" | iptables-restore --counters
}
validate_config_section() {
uci_validate_section "$KCPTUN" general "$1" \
'server:uciname' \
'client_file:string' \
'daemon_user:string:root' \
'enable_logging:bool:0' \
'log_folder:directory:/var/log/kcptun' \
'mem_percentage:range(0,100):80'
}
validate_server_section() {
uci_validate_section "$KCPTUN" servers "$1" \
'server_addr:host' \
'server_port:port:29900' \
'listen_addr:host:0.0.0.0' \
'listen_port:port:12948' \
'key:string' \
'crypt:string:aes' \
'mode:or("normal","fast","fast2","fast3","manual"):fast' \
'conn:min(1)' \
'autoexpire:uinteger' \
'scavengettl:min(-1)' \
'mtu:range(64,9200)' \
'sndwnd:min(1)' \
'rcvwnd:min(1)' \
'datashard:uinteger' \
'parityshard:uinteger' \
'dscp:uinteger' \
'nocomp:or("true", "false")' \
'quiet:or("true", "false")' \
'tcp:or("true", "false")' \
'nodelay:bool' \
'interval:uinteger' \
'resend:range(0,2)' \
'nc:bool' \
'acknodelay:or("true", "false")' \
'sockbuf:uinteger' \
'smuxver:or("1", "2")' \
'smuxbuf:uinteger' \
'streambuf:uinteger' \
'keepalive:uinteger' \
'snmpperiod:min(1)'
}
validate_client_file() {
local file="$1"
if [ ! -f "$file" ]; then
return 1
fi
test -x "$file" || chmod 755 "$file"
( $file -v 2>/dev/null | grep -q "$KCPTUN" )
}
start_kcptun_instance() {
local section="$1"
if ! validate_config_section "$section" ; then
_log "err" "Config validate failed."
return 1
fi
if [ -z "$server" ] || [ "$server" = "nil" ]; then
_log "info" "No server selected, Client will stop."
return 0
elif ! validate_server_section "$server"; then
_log "err" "Server config validation failed."
return 1
elif [ -z "$server_addr" ] || [ -z "$listen_port" ]; then
_log "err" "Server config validation failed."
return 1
fi
if [ -z "$client_file" ]; then
_log "err" "Please set client file path, or use auto download."
return 1;
elif ! validate_client_file "$client_file"; then
_log "err" "Client file validation failed."
return 1
fi
is_ipv6_address() {
echo "$1" | grep -q ":"
}
is_ipv6_address "$server_addr" && server_addr="[${server_addr}]"
is_ipv6_address "$listen_addr" && listen_addr="[${listen_addr}]"
test -d "$CONFIG_FOLDER" || mkdir -p "$CONFIG_FOLDER"
log_file=""
if [ "x$enable_logging" = "x1" ]; then
mkdir -p "$log_folder"
chown -R "$daemon_user" "$log_folder"
log_file="${log_folder}/client.${section}.log"
fi
local config_file="${CONFIG_FOLDER}/client.${section}.json"
if ! ( gen_client_config_file "$config_file" ); then
_log "err" "Can't create config file".
return 1
fi
add_iptables_rule "$listen_port"
procd_open_instance
procd_set_param command "$client_file"
procd_append_param command -c "$config_file"
procd_set_param limits nofile="65535 65535"
if [ -e /proc/sys/kernel/core_pattern ] ; then
procd_append_param limits core="unlimited"
fi
if [ "$mem_percentage" -gt "0" ] ; then
local mem_total="$(awk '/MemTotal/ {print $2}' /proc/meminfo)"
if [ -n "$mem_total" ] ; then
local use_mem="$(expr $mem_total \* $mem_percentage \* 10)"
procd_append_param limits as="$use_mem $use_mem"
_log "info" "Starting kcptun with $use_mem virt mem"
fi
fi
procd_set_param respawn
procd_set_param user "$daemon_user"
procd_set_param file "$config_file"
procd_close_instance
}
service_triggers() {
procd_add_reload_trigger "$KCPTUN"
}
start_service() {
clear_iptables_rule
config_load "$KCPTUN"
config_foreach start_kcptun_instance "general"
}
stop_service() {
clear_iptables_rule
}
| true |
45a95c9b28c69b807ed2c7dc59d8f603577ad02c
|
Shell
|
TheBlackParrot/remote-control-thing
|
/vlc-cli.sh
|
UTF-8
| 4,176 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/bash
####### ~/.bin/controlvlc_socket.sh
#
# Purpose: Control vlc using your multimedia keys or cli
# Author: j@mesrobertson.com
# Date: 11-03-2011
# Dependancies: netcat-openbsd
#
# Instructions:
#
# * Place this scipt in your path and make it executable.
# * Open VLC.
# * Go to Tools > Preferences and change "Show Settings" to "All".
# * Drill down to "Interface" > "Main Inteface" and select "Remote
# control interace".
# * Drill down from "Main Interface" to RC and select "Fake TTY" and in
# "UNIX socket command input" type in: /tmp/vlc.sock
# * Click Save.
# * Restart VLC.
# * Bind a shortcut key in you WM or DE for each command e.g. in
# Openbox rc.xml's "keyboard" section and example is:
#
# ...
# <keybind key="XF86AudioPlay">
# <action name="Execute">
# <execute>controlvlc_socket.sh pause</execute>
# </action>
# </keybind>
# ...
#
# you can of course execute these straight from the cli to test.
# type "controlvlc_socket.sh help" for details about the
# available commands.
#
# It is up to your imagination how you bind any other commands that
# you would like to use. I imagine some Conky goodness :)
#
#######
vlcSocket="/tmp/vlc.sock"
case $1 in
add) command="add" ;;
enqueue) command="enqueue" ;;
playlist) command="playlist" ;;
search) command="search" ;;
sort) command="sort" ;;
sd) command="sd" ;;
play) command="play" ;;
stop) command="stop" ;;
next) command="next" ;;
prev) command="prev" ;;
goto) command="goto" ;;
repeat) command="repeat" ;;
loop) command="loop" ;;
random) command="random" ;;
clear) command="clear" ;;
status) command="status" ;;
title) command="title" ;;
title_n) command="title_n" ;;
title_p) command="title_p" ;;
chapter) command="chapter" ;;
chapter_n) command="chapter_n" ;;
chapter_p) command="chapter_p" ;;
seek) command="seek $2" ;;
pause) command="pause" ;;
fastforward) command="fastforward" ;;
rewind) command="rewind" ;;
faster) command="faster" ;;
slower) command="slower" ;;
normal) command="normal" ;;
rate) command="rate" ;;
frame) command="frame" ;;
fullscreen) command="fullscreen" ;;
info) command="info" ;;
stats) command="stats" ;;
get_time) command="get_time" ;;
is_playing) command="is_playing" ;;
get_title) command="get_title" ;;
get_length) command="get_length" ;;
volume) command="volume" ;;
volup) command="volup" ;;
voldown) command="voldown" ;;
adev) command="adev" ;;
achan) command="achan" ;;
atrack) command="atrack" ;;
vtrack) command="vtrack" ;;
vratio) command="vratio" ;;
vcrop) command="vcrop" ;;
vzoom) command="vzoom" ;;
snapshot) command="snapshot" ;;
strack) command="strack" ;;
hotkey) command="hotkey" ;;
menu) command="menu" ;;
set) command="set" ;;
save_env) command="save_env" ;;
alias) command="alias" ;;
description) command="description" ;;
license) command="license" ;;
help) command="help" ;;
longhelp) command="longhelp" ;;
logout) command="logout" ;;
quit) command="quit" ;;
shutdown) command="shutdown" ;;
*) echo "Usage: $0 [command]" ;;
esac
exec echo "${command}" | nc -U ${vlcSocket}
####### END #######
| true |
c7283317d5f86f8422f118ca63df95cc8ff143de
|
Shell
|
Jimmy-Xu/packaging
|
/obs-packaging/linux-container/kata-multiarch.sh
|
UTF-8
| 1,079 | 4.4375 | 4 |
[
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
# Convert architecture to the name used by the Linux kernel build system
arch_to_kernel() {
local -r arch="$1"
case "${arch}" in
aarch64) echo "arm64";;
ppc64le) echo "powerpc";;
s390|s390x) echo "s390";;
x86_64) echo "${arch}";;
*) echo "unsupported architecture: ${arch}" >&2; exit 1;;
esac
}
# Convert architecture to the location of the compressed linux image
arch_to_image() {
local -r arch="$1"
case "${arch}" in
aarch64)
echo "arch/arm64/boot/Image"
;;
ppc64le)
# No compressed image
;;
s390|s390x)
echo "arch/s390/boot/image"
;;
*)
echo "arch/${arch}/boot/bzImage"
;;
esac
}
usage() {
echo "$(basename $0) FLAG ARCHITECTURE"
echo "Allowed flags:"
echo " -a : Print kernel architecture"
echo " -i : Print kernel compressed image location (may be empty)"
}
if [ "$#" != "2" ]; then
echo -e "Invalid options\n\n$(usage)" >&2
exit 1
fi
case "$1" in
-a)
arch_to_kernel $2
;;
-i)
arch_to_image $2
;;
*)
echo -e "Invalid options\n\n$(usage)" >&2
exit 1
;;
esac
| true |
fdbf40c54ecd11aa580225c0144daae9ab68f04d
|
Shell
|
MathieuB1/KOREK-backend
|
/nginx/ssl_conf/generate_ssl.sh
|
UTF-8
| 3,873 | 4.21875 | 4 |
[] |
no_license
|
#!/bin/bash
echo "Starting sctipt $0"
display_usage() {
echo
echo "Usage: $0"
echo
echo " -h, --help Display usage instructions"
echo " -t, --testing Test mode"
echo " -l, --letsencrypt Use Let's encrypt/Use OpenSSL (default if not specified)"
echo " -d, --domains Domain names -d korek.com -d www.korek.com -d korek.net"
echo
}
staging=0
letsencrypt=0
domains=""
staging_message() {
echo "Test mode activated"
staging=1
}
generate_message() {
echo "Generate certificate"
letsencrypt=1
}
domain_message() {
echo "Add domain name"
}
raise_error() {
local error_message="$@"
echo "${error_message}" 1>&2;
}
while (( "$#" )); do
argument="$1"
if [[ -z $argument ]] ; then
raise_error "Expected argument to be present"
display_usage
else
case $argument in
-h|--help)
display_usage
break
;;
-t|--testing)
staging_message
shift
;;
-l|--letsencrypt)
generate_message
shift
;;
-d|--domains)
domains=$2" "$domains
domain_message
shift 2
if [ ! $2 ]; then
break
fi
;;
*)
raise_error "Unknown argument: ${argument}"
display_usage
break
;;
esac
fi
done
dummy_ssl=1
if [ $letsencrypt ] && [ $letsencrypt -eq 1 ]; then
dummy_ssl=0
fi
if [ -n "$domains" ]; then
domains=($domains)
echo "set" $domains
else
domains=(korek.ddns.net)
fi
data_path="/data/certbot"
if [ $dummy_ssl -eq 1 ]; then
echo "### Creating dummy certificate for $domains ..."
path="/etc/letsencrypt/live/$domains"
mkdir -p "$path"
# Mark for the certificate
mkdir -p "$data_path/conf/live/$domains"
# Disable crontab for Let's encrypt renewal'
crontab -l | grep -v 'Renew SSL' | crontab -
# Generate dummy SSL
openssl req -x509 -nodes -newkey rsa:2048 -days 365 \
-keyout ${path}'/privkey.pem' \
-out ${path}'/fullchain.pem' \
-subj '/CN='$domains
fi
if [ $letsencrypt -eq 1 ]; then
echo "### Deleting dummy certificate for $domains ..."
rm -Rf /etc/letsencrypt/live/$domains && \
rm -Rf /etc/letsencrypt/archive/$domains && \
rm -Rf /etc/letsencrypt/renewal/$domains.conf
echo
echo "### Requesting Let's Encrypt certificate for $domains ..."
#Join $domains to -d args
domain_args=""
for domain in "${domains[@]}"; do
domain_args="$domain_args -d $domain"
done
echo "Domains:$domain_args"
# Enable staging mode if needed
if [ $staging != "0" ]; then staging_arg="--staging"; fi
# Generate Let's encrypt SSL
mkdir -p /var/www/certbot
certbot certonly --webroot -w /var/www/certbot \
$staging_arg \
--register-unsafely-without-email \
$domain_args \
--agree-tos
# Add Renewal each 12 hours
crontab -l | grep -v 'Renew SSL' | crontab -
crontab -l | { cat; echo "0 0,12 * * * /bin/bash -c \"echo 'Renew SSL certificate' && certbot renew && echo 'Restart Nginx' && nginx -s reload\""; } | crontab -
service cron start
fi
echo "### Apply SSL settings to Nginx ..."
# Set Nginx for HTTPS Only
cp /etc/nginx/conf.d/app.conf /tmp/initial_app.conf
server_name=$(cat /tmp/initial_app.conf | grep server_name | head -1 | cut -d " " -f6 | cut -d ";" -f1)
sed 's/'"$server_name"'/'"$domains"'/g' /tmp/initial_app.conf > /tmp/app.conf
sed 's/#ssl_certificate/ssl_certificate/g' /tmp/app.conf > /tmp/initial_app.conf
sed 's/#listen 443/listen 443/g' /tmp/initial_app.conf > /tmp/app.conf
sed 's/ listen 80;/ #listen 80;/g' /tmp/app.conf > /tmp/initial_app.conf
# Redirect all HTTP requets to HTTPS
sed 's/#!//g' /tmp/initial_app.conf > /tmp/app.conf
cat /tmp/app.conf > /etc/nginx/conf.d/app.conf
echo "### Reloading Nginx ..."
nginx -s reload
| true |
6eec068be75bd693f65db3fa6301abb790e624d5
|
Shell
|
joshspicer/helloworld-legoblock
|
/ubuntu/apply.sh
|
UTF-8
| 543 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
LANG=${LANG:-undefined}
HAS_COLOR=${HAS_COLOR:-false}
tee /usr/hello.sh > /dev/null \
<< EOF
#!/bin/bash
RED='\033[0;91m'
NC='\033[0m' # No Color
if [[ "$HAS_COLOR" == true ]]; then
echo -e "\${RED}"
fi
case "$LANG" in
"english")
echo -n -e "Hello, there!"
;;
"italian")
echo -n -e "Salve!"
;;
*)
echo -n -e "Beep Boop"
esac
echo -e "\${NC}"
sleep 1
EOF
chmod +x /usr/hello.sh
echo '/usr/hello.sh &' >> ~/.bashrc
| true |
d2d82f6de3555f4deb3e0f41427c384aaaeb88fd
|
Shell
|
AlexanderVishnevsky/domashku
|
/AlexanderVishnevsky/0/files/provision.sh
|
UTF-8
| 698 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash
#Kill all processes
sudo killall -r S20 infinite-loop
#Chech for updates
sudo apt-get update
echo "All dependencies are up-to-date"
sudo apt-get install memcached
echo "++ Memcached"
#Conf
cp /tmp/site.conf /etc/apache2/sites-available/site.conf
a2ensite site.conf
#SSL
sudo a2enmod ssl rewrite
sudo mkdir /etc/apache2/ssl
#Cert
sudo openssl genrsa -out /etc/apache2/ssl/test.key 2048
sudo openssl req -new -x509 -key /etc/apache2/ssl/test.key -out /etc/apache2/ssl/test.pem -days 365 -subj /CN=localhost
{ crontab -l -u vagrant; echo '1 * * * * sudo -u vagrant /home/vagrant/exercise-memcached.sh'; } | crontab -u vagrant -
sudo service cron restart
sudo service apache2 restart
| true |
ff181f62ac07e0820baadf55a7f3e32e3bf88e84
|
Shell
|
killarny/django-template
|
/project_template/server/uwsgi.sh
|
UTF-8
| 345 | 2.53125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
echo "Collecting static files.."
python /{{ project_name }}/manage.py collectstatic -v0 --noinput
until nc -z -v -w30 database 5432 > /dev/null 2>&1
do
echo "Waiting for database container to start..."
sleep 5
done
python /{{ project_name }}/manage.py migrate --noinput
uwsgi /{{ project_name }}/server/uwsgi.ini "$@"
| true |
e6eb23be70afe2d766237808566bc3c60a24b87d
|
Shell
|
a4everyone/a4e._browntrail
|
/infra/utility/run-lab-report.sh
|
UTF-8
| 484 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/bash
VOLUMES_INPUT_PATH=~/a4e/volumes/input/dev${LAB_IDX}
if [ ! -d $VOLUMES_INPUT_PATH/control ]; then
echo ERROR: $VOLUMES_INPUT_PATH/control missing
exit 1
fi
source $1$2
if [ -z $INPUT_DATA_FILE_NAME ]; then
echo ERROR: could not find value for INPUT_DATA_FILE_NAME
exit 1
fi
cp $1/$INPUT_DATA_FILE_NAME $VOLUMES_INPUT_PATH/bucket/
cp $1/$2 $VOLUMES_INPUT_PATH/bucket/request.sh
echo 'sugar sprice and everything nice' > $VOLUMES_INPUT_PATH/control/go
| true |
c5404dbce8f6e916e11ddd831dd2f236c01bdd7e
|
Shell
|
jeromescuggs/.tmux
|
/install.sh
|
UTF-8
| 2,037 | 3.3125 | 3 |
[
"WTFPL",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "Setting root execution path from $(echo $DIR)..."
DIR=$(dirname $(readlink -f $0))
echo "
██╗ ██████╗ ███╗ ███╗ ██╗ ██╗ ██╗ ██╗ ██╗
██║ ██╔══██╗ ████╗ ████║ ██║ ██║ ╚██╗██╔╝ ██║
██║ ██████╔╝ ██╔████╔██║ ██║ ██║ ╚███╔╝ ██║
██ ██║ ██╔══██╗ ██║╚██╔╝██║ ██║ ██║ ██╔██╗ ╚═╝
╚█████╔╝ ██║ ██║ ██║ ╚═╝ ██║ ╚██████╔╝ ██╔╝ ██╗ ██╗
╚════╝ ╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝
"
echo "installing config files..."
echo "looking for any prior config files and storing backups in $(echo $DIR)"
if [[ -e $HOME/.tmux.conf ]]; then
cp $HOME/.tmux.conf $DIR/tmux-conf-original.conf
fi
if [[ -e $HOME/.tmux.conf.local ]]; then
cp $HOME/.tmux.conf.local $DIR/tmux-conf-local-original.conf
fi
echo "installing configuration..."
cp $DIR/tmux.conf $HOME/.tmux.conf
echo "installing tmux.conf at .tmux.conf in user home dir..."
if [[ $1 == "alt" ]]; then
cp $DIR/.tmux.conf.alt.local $HOME/.tmux.conf.local
echo "installing alternate .tmux.conf.local in user home dir..."
else
cp $DIR/.tmux.conf.local $HOME/.tmux.conf.local
echo "installing .tmux.conf.local in user home dir..."
fi
echo "setting up session management plugins in $(echo $DIR)..."
cd $DIR && git config url."https://".insteadOf git:// && git submodule update --init
echo "tmux-resurrection and tmux-continuum have been installed."
echo "all done! open an issue or pull request if you experience any turbulence when running this script."
echo "to further tweak your setup, remember to edit ~/.tmux.conf.local, and not ~/.tmux.conf"
| true |
ee8042a1e7a364094bad6170a63d8439832a7763
|
Shell
|
matteo1990/backup_repo
|
/2/esercizio.sh~
|
UTF-8
| 392 | 3.5 | 4 |
[] |
no_license
|
#!/bin/bash
echo -e "quanti file vuoi creare?\n";
read n;
if [ $n -gt 10 ]; then
echo -e "numero troppo grande";
exit;
fi
if [ $n -lt 0 ]; then
echo -e "numero troppo piccolo";
exit;
fi
for i in $( seq 1 $n ); do
touch "file-$i";
echo - "vuoi dare i permessi x a file-$i? Y/N \n";
read lettera;
if [[ $lettera == "Y" || $lettera == "y" ]]; then
fi
echo -e "eseguito!"
done;
| true |
cd1bcb9661ef072bf40035bf745e61520a1e2489
|
Shell
|
retinens/laravel-scripts
|
/retinens-create-admin-account
|
UTF-8
| 636 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash
echo "admin name:"
read nameAdmin
echo "email of the admin account ;"
read emailAdmin
echo "admin password:"
read -s passwordAdmin
echo "
<?php
use App\User;
use Illuminate\Database\Seeder;
class DatabaseSeeder extends Seeder
{
/**
* Seed the application's database.
*
* @return void
*/
public function run()
{
User::create([
'email' => '$emailAdmin',
'name' => '$nameAdmin',
'password' => bcrypt('$passwordAdmin'),
'type' => 'admin'
]);
}
}" > database/seeds/DatabaseSeeder.php
php artisan migrate:fresh --seed --quiet
| true |
045d5222917f60a5dfdf2e09a783628322b7c6c1
|
Shell
|
inissa/system-management
|
/buildscripts/buildlibcap
|
UTF-8
| 817 | 3.1875 | 3 |
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash -e
export PKGNAME=libcap
export PKGVER=2.25
export PKGTAR=${PKGNAME}-${PKGVER}.tar.xz
export PKGURL="https://www.kernel.org/pub/linux/libs/security/linux-privs/libcap2/${PKGTAR}"
export MAKE_JOBS_FLAGS="-j4"
export MAKE_INSTALL_FLAGS="RAISE_SETFCAP=no PAM_LIBDIR=/lib prefix=/usr"
configure_override() {
sed -i "s|-D_FILE_OFFSET_BITS=64|& ${CFLAGS}|" Make.Rules
sed -i "s| gcc| ${CC}|" Make.Rules
sed -i "s:LIBDIR:PAM_&:g" pam_cap/Makefile
unset CFLAGS
}
make_install_post() {
install -dm755 ${DEST}/etc/security
install -m644 pam_cap/capability.conf ${DEST}/etc/security
}
post_install_config() {
rm -rf ${DEST}/usr/*/libcap.a
mv ${DEST}/usr/lib/libcap.so.* ${DEST}/lib
ln -sf ../../lib/$(readlink ${DEST}/usr/lib/libcap.so) ${DEST}/usr/lib/libcap.so
}
. $(dirname $0)/master.sh
| true |
75cecfd747069186aace5ed7337a30fd804da68a
|
Shell
|
SHREC-DOE-UF/CMT-bone-BE
|
/compute-only/compute-genjob-mira.sh
|
UTF-8
| 2,986 | 3.578125 | 4 |
[] |
no_license
|
#!/bin/bash
if [ $# -lt 2 ]; then
echo "x---x---x---x---x---x---x---x---x"
echo "No command line argument supplied"
echo "Run again with jobscript and data folder name as cmd line inputs"
echo "x---x---x---x---x---x---x---x---x"
exit 0
fi
make clean
make
#bone-be parameters
TIMESTEP=100
E_SIZE=$(seq 5 25)
#EPP="1,1,1;2,2,2;5,5,2;5,5,4"
#CART="2,2,2;4,4,2;8,5,5;13,13,8;32,16,16"
#Test case
#E_SIZE="5 6 7"
EPP="2,2,2" #"7,5,2;5,5,5;6,6,5"
#CART="2,2,2;4,4,2;8,5,5;13,13,8;32,16,16"
PHY_PARAM=5
#Creating stack for different EPP and CART
epp_stack=$(echo $EPP | tr ";" "\n")
#cart_stack=$(echo $CART | tr ";" "\n")
#user variables for # of nodes calcution
Tlnum_proc=16
one=1
#Check for job dir to store job scripts
if [ ! -d "$1" ]; then
mkdir $1/
fi
#Check for data dir to store *.out and *.err
if [ ! -d "$2" ]; then
mkdir $2/
fi
echo "Creating job file(s)..."
#Looping on ELEMENT_SIZE
for ELE_SIZE in $E_SIZE
do
for ele in $epp_stack
do
#Calculating EL_X, EL_Y,EL_Z from epp_stack
EL_X=$(echo $ele | tr "," " " | awk '{print $1}')
EL_Y=$(echo $ele | tr "," " " | awk '{print $2}')
EL_Z=$(echo $ele | tr "," " " | awk '{print $3}')
Exyz=$((EL_X*EL_Y*EL_Z))
echo '#!/bin/bash' > jobfile
echo "" >> jobfile
echo 'runjob --np 1 -p 1 --verbose==INFO : ./cmtbonebe '$TIMESTEP' '$ELE_SIZE' '$EL_X' '$EL_Y' '$EL_Z' > '$2'/bonebe-es'$ELE_SIZE'ec'$Exyz'.csv' >> jobfile
mv jobfile job-bonebe'-es'$ELE_SIZE'ec'$Exyz'.job'
chmod +x job-bonebe'-es'$ELE_SIZE'ec'$Exyz'.job'
mv job-bonebe'-es'$ELE_SIZE'ec'$Exyz'.job' $1/
done
done
echo "Job file(s) created!"
echo " "
echo "Listing Jobfile(s):"
echo "---------------------------------------------------------------"
ls $1/
echo "----------------------------------------------------------------"
sleep 2
echo " "
echo "Job file(s) present in $1 folder"
echo "Output and error files present in $2 folder"
echo " "
#Submit jobscript
echo "Submitting Job files:-"
for ELE_SIZE in $E_SIZE
do
for ele in $epp_stack
do
#Calculating EX_X, EL_Y,EL_Z from epp_stack
EL_X=$(echo $ele | tr "," " " | awk '{print $1}')
EL_Y=$(echo $ele | tr "," " " | awk '{print $2}')
EL_Z=$(echo $ele | tr "," " " | awk '{print $3}')
Exyz=$((EL_X*EL_Y*EL_Z))
#Time assignment
if [ $ELE_SIZE -le 13 ];
then
wtime=15
elif [ $ELE_SIZE -gt 13 ] && [ $ELE_SIZE -le 20 ];
then
wtime=30
elif [ $ELE_SIZE -gt 20 ] && [ $ELE_SIZE -le 22 ];
then
wtime=45
elif [ $ELE_SIZE -gt 22 ] && [ $ELE_SIZE -le 24 ];
then
wtime=60
elif [ $ELE_SIZE -eq 25 ];
then
wtime=75
fi
#if [ $EL_X = $EL_Y ] && [ $EL_Y = $EL_Z ];
#then
qsub -A tools -t $wtime -n 1 --mode script $1/job-bonebe'-es'$ELE_SIZE'ec'$Exyz'.job'
#fi
done
done
echo "* * * * -----------Completed!----------- * * * *"
| true |
776ab62b4111fb06667a49a1d6ce22892995c49e
|
Shell
|
maxkarkowski/dotfiles
|
/.zshrc
|
UTF-8
| 1,443 | 2.765625 | 3 |
[] |
no_license
|
source ~/aliases/.aliases
source ~/aliases/.gitalias
source ~/aliases/.yarn
source $(brew --prefix)/share/antigen/antigen.zsh
# Load the oh-my-zsh's library.
antigen use oh-my-zsh
# Bundles from the default repo (robbyrussell's oh-my-zsh).
antigen bundle git
antigen bundle zsh-users/zsh-completions
antigen bundle heroku
antigen bundle pip
antigen bundle yarn
antigen bundle command-not-found
antigen bundle autojump
antigen bundle brew
antigen bundle common-aliases
antigen bundle compleat
antigen bundle git-extras
antigen bundle git-flow
antigen bundle npm
antigen bundle osx
antigen bundle web-search
antigen bundle zsh-users/zsh-syntax-highlighting
antigen bundle zsh-users/zsh-history-substring-search ./zsh-history-substring-search.zsh
antigen theme https://github.com/denysdovhan/spaceship-zsh-theme spaceship
# Load the theme.
# antigen theme avit
# Tell antigen that you're done.
antigen apply
export NVM_DIR="$HOME/.nvm"
[ -s "/usr/local/opt/nvm/nvm.sh" ] && . "/usr/local/opt/nvm/nvm.sh" # This loads nvm
[ -s "/usr/local/opt/nvm/etc/bash_completion" ] && . "/usr/local/opt/nvm/etc/bash_completion" # This loads nvm bash_completion
eval "$(rbenv init -)"
autoload -U add-zsh-hook
load-nvmrc() {
if [[ -f .nvmrc && -r .nvmrc ]]; then
nvm use
elif [[ $(nvm version) != $(nvm version default) ]]; then
echo "Reverting to nvm default version"
nvm use default
fi
}
add-zsh-hook chpwd load-nvmrc
load-nvmrc
| true |
09e1a80890909c41601b0459c438237d6740ad66
|
Shell
|
rkotamaraja/pso-hdp-local-repo
|
/scripts/post_yum_ambari_install.sh
|
UTF-8
| 837 | 3.40625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# THIS SCRIPT SHOULD BE RUN ON THE AMBARI SERVER
#
# ---- AFTER the Ambari-Server has been installed (yum install ambari-server)
# ----- AND BEFORE ambari-server start
#
# Copy over the repoinfo.xml to the stacks area of the newly
# install ambari server
if [ $# -lt 1 ]; then
echo " Please specify the Local Repo's host FQDN
$0 repo.mycompany.com
"
exit -1
fi
STACK_REPOS_TEMPLATE_DIR=/var/lib/ambari-server/resources/stacks/HDPLocal
# This repo template should already have been "adjusted" for use
# by this cluster during the Local repo configuration.
wget http://$1/templates/ambari-server/resources/stacks/HDPLocal/1.3.0/repos/repoinfo.xml -O $STACK_REPOS_TEMPLATE_DIR/1.3.0/repos/repoinfo.xml
wget http://$1/repos/jdk/jdk-6u31-linux-x64.bin -O /var/lib/ambari-server/resources/jdk-6u31-linux-x64.bin
| true |
bf4c7edbe504c1086b6dbba459584d8b850d3c97
|
Shell
|
opensourceradio/ram
|
/usr/local/bin/btd-dropbox-audit
|
UTF-8
| 22,919 | 3.828125 | 4 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/zsh
# shellcheck shell=bash disable=SC2016,SC2034,SC2048,SC2086,SC2154,SC2155,SC2190,SC2207,SC2248,SC2295,SC2296,SC2299,SC2312
# SC2016: $Hash is not a variable, it's a token that gets replaced by git(1)
# SC2034: shellcheck does not know about ZSH param expansions
# SC2048: huch shellcheck's aggressive quoting suggestions
# SC2086: ignore shellcheck's aggressive quoting suggestion
# SC2154: the only "unassigned" parameters *should be* upper case command names
# SC2155: ignore shellcheck's declare and assign separately suggestions
# SC2190: ZSH associative arrays work differently from Bash associative arrays
# SC2207: mapfile or read -a are Bash-isms
# SC2248: ignore shellcheck's aggressive quoting recommendations
# SC2295: ignore shellcheck's conservative quoting suggestions
# SC2296: ZSH parameter expansions CAN start with "("
# SC2299: ZSH parameter expansions CAN be nested
# SC2312: ignore shellcheck's invoke separately suggestions
zmodload zsh/stat
zmodload zsh/datetime
# This script complies with Semantic Versioning: http://semver.org/
declare -ri v_major=0
declare -ri v_minor=2
declare -ri v_patch=0
declare -r v_hash='$Hash$'
##########################################################################################
##########################################################################################
##
## Locally defined subroutines
##
##########################################################################################
##########################################################################################
# Attempt to kill all PIDs passed. Try first without privilege
# escalation, then try _with_ privilege escalation.
# Returns with non-zero status if we are unable to kill any of the
# passed PIDs.
function kill_with_privilege() {
local -i proc_id
local -ir attempt_limit=5
local -i count=0
local sig=INT
local -i return_value=0
for proc_id ; do
while ((count < attempt_limit)) ; do
((count > (attempt_limit/2))) && sig=KILL
((VERBOSE)) && printf "Attempting to terminate process ID %d with signal SIG%s..." "${proc_id}" "${sig}" >&2
${KILL} -"${sig}" "${proc_id}"
${SLEEP} 0.5
if ! [[ -d "/proc/${proc_id}" ]] ; then
continue 2 # next proc_id in the for loop
fi
((VERBOSE)) && printf "No go; attempting to terminate process ID %d with privileges using signal SIG%s... " "${proc_id}" "${sig}" >&2
${SUDO} "${KILL}" -"${sig}" "${proc_id}"
${SLEEP} 0.5
if ! [[ -d "/proc/${proc_id}" ]] ; then
continue 2 # next proc_id in the for loop
fi
${SLEEP} 0.5
print
((count++))
done
printf '\nUnable to terminate process %s. Please seek help!\n' "${proc_id}" >&2
return_value=1
done
((VERBOSE && (count == attempt_limit))) && print >&2
return "${return_value}"
}
# get_yes_no() repeatedly prompts with ${our_prompt} to /dev/tty until
# the user responds affirmatively or negatively (reading from
# /dev/tty). Returns 0 ("success") for a "yes"-like response and 1
# ("non-success") for a "no"-like response to the given prompt.
function get_yes_no() {
local -r our_prompt="${1}"
local -i answer_no="${2}"
local -i again=0
local response=''
if ((answer_no)) ; then
response='n'
fi
until [[ "${(L)response}" =~ ^(no*|y(es)*)$ ]] ; do
((again)) && print "Please respond with 'yes' ('y') or 'no' ('n')." > /dev/tty
print "${our_prompt} [y or n] \c" > /dev/tty
# SC2034: response is used, but in a non-Bash way, see below.
# shellcheck disable=SC2034
read -r response < /dev/tty
again=1
done
[[ "${(L)response}" =~ ^no*$ ]] && return 1
return 0
}
# Show how to use this command.
function usage() {
local -r my_name="${1:t}"
${CAT} <<EOF
NAME
${my_name}
SYNOPSIS
${my_name} [ --stranded-files (-s) ] [ --interactive (-i) ]
[ --verbose (-v) ] [ --no (-n) ]
[ --help (-h) ] [ --version (-V) ]
DESCRIPTION
$(${FMT} <<<"${my_name} performs several checks on Rivendell dropboxes in order to ensure their consistent and reliable functionality. Checks include:
* running processes for all defined dropboxes
* 'left-over' processes from deleted or changed dropboxes
* existence of dropbox folders (directories)
* 'stale' import records in the dropbox history table ('stale' is defined as the import date/time being older than the file timestamp)
* existence of Carts for dropboxes that specify them
* 'hung' import processes (rdxport.cgi)
* 'stranded' (unimported) files in dropbox directories") (optional)
$(${FMT} <<<"${my_name} takes specific action in cases where it makes sense and is safe:
* starts any missing dropbox processes
* terminates any 'hung' import processes")
* removes 'stale' import history records
$(${FMT} <<<"Specify '--interactive' (short option: '-i') to have ${my_name} prompt before taking any action.")
$(${FMT} <<<"Specify '--no' (short option: '-n') to answer 'No' to all prompts.")
$(${FMT} <<<"Specify '--stranded-files' (short option: '-s') to have ${my_name} look for and report on files in dropbox directories that have not been imported.")
EOF
}
##########################################################################################
##########################################################################################
##
## Script Main Line
##
##########################################################################################
##########################################################################################
# Prefer the instance in /usr/bin (that gets installed with the
# rdfetch package) over /usr/local/bin
if [[ -f /usr/bin/zsh-functions ]] ; then
source /usr/bin/zsh-functions
elif [[ -f /usr/local/bin/zsh-functions ]] ; then
source /usr/local/bin/zsh-functions
else
print "Missing zsh-functions library. Cannot continue." >&2
exit 1
fi
typeset -i ANSWER_NO=0
typeset -i CHECK_STRANDED_FILES=0
typeset -i INTERACTIVE=0
typeset -i SHOW_VERSION=
typeset -i VERBOSE=00
typeset -ri CURRENT_TIME=${EPOCHSECONDS}
typeset -ri ONE_DAY=$((60 * 60 * 24))
############# BEGIN external shell commands used in this script. #############
# This script uses these 14 external commands.
# Look for them in their upper case, parameter expanded form.
typeset -a our_commands
our_commands=( cat date find fmt getopt grep kill pidof ps sed sendusr1 sleep sort sudo )
# Find the executables we need; this uses some basic shell and a ZSH trick:
# the (U) in the eval says to evaluate the parameter as all upper case
# letters. This snippet generates shell parameters representing the upper case
# equivalent of the command names and sets the parameter values to the full path
# of the commands.
# Refresh this segment in Emacs by marking the appropriate region (or the whole
# buffer with C-xH) and replacing it with C-uM-|mk-ourCommands (shell-command-on-region).
typeset C D
for C in ${our_commands[*]} ; do
# shellcheck disable=SC2154 # ZSH: ${path} is set by the shell.
for D in ${path} ; do
# shellcheck disable=SC2140,SC2086 # we need the quotes
[[ -x "${D}/${C}" ]] && { eval "${(U)C//-/_}"="${D}/${C}" ; break ; }
done
[[ -x $(eval print \$"${(U)C//-/_}") ]] || { print "Cannot find ${C}! Done."; return 1 ; }
done
unset our_commands C D
############## END external shell commands used in this script. ##############
if ! TEMP=$(${GETOPT} -o hinsVv --long help,interactive,no,stranded-files,version,verbose -n "${0:t}" -- "${@}") ; then
print "getopt(1) Fatal Error (${?}). Terminating..." >&2
return 1
fi
eval set -- "${TEMP}"
while : ; do
# VERBOSE is used, but in a non-Bash way.
# shellcheck disable=SC2034
case "${1}" in
-h|--help) usage "${0}" ; exit ;;
-i|--inter*) INTERACTIVE=1 ; shift ;;
-n|--no) ANSWER_NO=1 ; shift ;;
-s|--stranded*) CHECK_STRANDED_FILES=1 ; shift ;;
-V|--version) SHOW_VERSION=1 ; shift ;;
-v|--verbose) ((VERBOSE += 1)) ; shift ;;
--) shift ; break ;;
*) print "${0:t}: getopt internal error!. Terminating..." >&2 ; return 1 ;;
esac
done
unset TEMP
if ((SHOW_VERSION)) ; then
typeset commit_hash="${v_hash}"
[[ "${v_hash}" == '$Hash$' ]] &&
commit_hash="prerelase"
print "${0:t}: version ${v_major}.${v_minor}.${v_patch}-${${commit_hash#\$Hash: }%$}"
exit 0
fi
if ((INTERACTIVE)) && ! ((CHECK_STRANDED_FILES)) ; then
if get_yes_no "Do you want to check for files that have not been imported?" 0 ; then
CHECK_STRANDED_FILES=1
fi
fi
((VERBOSE)) && printf "%s: beginning audit of Rivendell dropboxes.\n" "${0:t}" >&2
# hostname -s *must* match the station name in the STATIONS table.
typeset -r station=$(hostname -s)
typeset -r myFS='^:^'
# Get the count of dropboxes and running processes.
typeset -ri box_count=$(doSQL "select count(*) from DROPBOXES where station_name='${station}'")
typeset -ri pid_count=$(${PIDOF} rdimport | wc -w)
((VERBOSE)) && printf "Dropbox count: %d, PID count: %d\n" "${box_count}" "${pid_count}" >&2
if ((box_count > pid_count)) ; then
printf "Found %d dropboxes, but only %d processes.\n" "${box_count}" "${pid_count}" >&2
elif ((pid_count > box_count)) ; then
printf "Found %d dropboxes, but there are too many (%d) processes.\n" "${box_count}" "${pid_count}" >&2
else
printf "Good! %d dropboxes and %d dropbox processes.\n" "${box_count}" "${pid_count}" >&2
fi
# procs is an associative array containing the full command line,
# indexed by the PID.
typeset -A procs
typeset oIFS="${IFS}"
IFS=$'\n\t'
# NOTE: the whitespace between %p (PID) and %a (ARGS) should be a single <TAB>.
procs=($(PS_PERSONALITY=aix ${PS} -eo "%p$(print '\t\c')%a" | ${GREP} -v "${GREP}" | ${GREP} "rdimport" | ${SED} -e 's/^ *//'))
((VERBOSE > 1)) && printf "rdimport processes:\n%s\n" "${(v)procs[*]}" >&2
# path_specs is an associative array containing the dropbox cart number
# (may be 0) and the path spec, indexed by the dropbox ID.
typeset -A path_specs
path_specs=($(doSQL "select ID, concat(TO_CART,'${myFS}',PATH) from DROPBOXES where STATION_NAME='${station}' order by TO_CART"))
((VERBOSE > 1)) && printf "path_specs:\n%s\n" "${(v)path_specs[*]}" >&2
# Check if all defined dropbox processes are running.
print "Checking for running processes for all dropboxes..." >&2
typeset -i to_cart
for id in ${(k)path_specs[*]} ; do
path_dir="${${path_specs[${id}]#*${myFS}}%/*}"
to_cart="${path_specs[${id}]%${myFS}*}"
if ! ${GREP} -F -q "${path_specs[${id}]#*${myFS}}" <<<${(v)procs[*]} ; then
printf "\tMissing dropbox process for dropbox ID %d ('%s', Cart # %d).\n" \
"${id}" \
"${path_specs[${id}]#*${myFS}}" \
"${to_cart}" >&2
if ((INTERACTIVE)) ; then
if get_yes_no "Restart dropboxes?" ${ANSWER_NO}; then
if pidof_rdservice="$(${PIDOF} 'rdservice')" ; then
# The Rivendell v3 way to restart all dropboxes.
${SENDUSR1} "${pidof_rdservice}"
else
# The pre-Rivendell v3 way to restart all dropboxes.
kill_with_privilege "$(${PIDOF} rdcatchd)"
# SC2046: we want the words of pidof to be split.
# shellcheck disable=SC2046
kill_with_privilege $(${PIDOF} rdimport)
fi
print "Dropboxes have been restarted."
fi
else
printf '\tMight want to restart dropboxes as appropriate.\n' >&2
fi
fi
done
IFS="${oIFS}"
# Other way around: check if all running processes have a dropbox
# defined. extract the dropbox IDs from the rdimport processes and
# compare them to the list of dropbox IDs from the database.
typeset -i proc_id
print "Checking whether all running dropbox processes have a dropbox configured..." >&2
for proc_id in ${(k)procs[*]} ; do
# BUG ALERT: This expansion assumes '--drop-box' follows
# '--persistent-dropbox-id=' in the ps(1) output.
typeset process_dropbox_id="${${procs[${proc_id}]#*persistent-dropbox-id=}% --drop-box*}"
typeset process_path_spec="${procs[${proc_id}]/* \///}"
if [[ "${process_path_spec#/usr/bin/rdimport }" =~ ^/ ]] ; then
if ! ${GREP} -F -q "${process_path_spec}" <<<"${(v)path_specs[*]}" ; then
printf "\tMissing dropbox for proccess with PATH specifier: %s\n" "${process_path_spec}" >&2
if ((INTERACTIVE)) ; then
if get_yes_no "Terminate process ID ${proc_id}?" ${ANSWER_NO} ; then
kill_with_privilege "${proc_id}"
print "Done."
else
printf "OK, but you probably want to terminate process ID %d\n." "${proc_id}"
fi
else
((VERBOSE)) && printf "Terminating process ID %s... " "${proc_id}" >&2
kill_with_privilege "${proc_id}"
((VERBOSE)) && print "done." >&2
fi
fi
else
# Deal with dropboxes that are missing a Path Spec.
printf "\tDropbox ID %d has no Path Spec. Where should it look for files to import?\n" \
"${process_dropbox_id}"
fi
done
# Check for and remove 'stale' filenames in DROPBOX_PATHS.
typeset -i dropbox_path_count
print "Checking for 'stale' filenames in DROPBOX_PATHS table..." >&2
for id in ${(k)path_specs[*]} ; do
dropbox_path_count=$(doSQL "select count(*) from DROPBOX_PATHS where DROPBOX_ID=${id}")
if ((dropbox_path_count > 0)) ; then
((VERBOSE)) && printf "dropbox_path_count for dropbox %d (%s): %d\n" "${id}" "${path_specs[${id}]/^:^/: }" "${dropbox_path_count}" >&2
oIFS="${IFS}"
IFS=$'\t\n'
## Scan all the filenames in DROPBOX_PATHS for this dropbox and check the
## timestamp on the files. Delete "stale" entries from DROPBOX_PATHS.
doSQL "select DROPBOX_ID, FILE_PATH, unix_timestamp(FILE_DATETIME) from DROPBOX_PATHS where DROPBOX_ID=${id}" |
while read -r dropbox_id file_path file_datetime ; do
if ! [[ -r "${file_path}" ]] ; then
delete_source=$(doSQL "select DELETE_SOURCE from DROPBOXES where ID=${dropbox_id}")
# Warn about missing files only if this dropbox was supposed to preserve
# the source files.
if [[ "${delete_source}" =~ [Nn] ]] ; then
((VERBOSE)) && printf "Previously imported file '%s' does not exist, skipping 'staleness' check.\n" "${file_path}"
continue
fi
fi
((VERBOSE > 1)) && printf "\t--%s--%d--" "${file_path}" ${file_datetime} >&2
zstat -H file_stat "${file_path}"
((VERBOSE > 1)) && printf "%s--\n" "${file_stat[mtime]}" >&2
# Deal with the situation where the modification time of the file in the
# dropbox is newer than the timestamp in DROPBOX_PATHS.
if (( file_stat[mtime] > file_datetime )) ; then
((VERBOSE)) && printf "%s: %d > %d\n" "${file_path}" "${file_stat[atime]}" "${file_datetime}"
if ((INTERACTIVE)) ; then
if get_yes_no "Remove STALE '${file_path}' from DROPBOX_PATHS?" ${ANSWER_NO} ; then
if ! doSQL "delete from DROPBOX_PATHS where FILE_PATH = '${file_path}'" ; then
printf "Unable to delete DROPBOX_PATH row for '%s'. Please seek additional help.\n" "${file_path}" >&2
fi
fi
else
((VERBOSE)) && printf "Deleting stale entry '%s' from DROPBOX_PATHS.\n" "${file_path}"
if ! doSQL "delete from DROPBOX_PATHS where FILE_PATH = '${file_path}'" ; then
printf "Unable to delete DROPBOX_PATH row for '%s'. Please seek additional help.\n" "${file_path}" >&2
fi
fi
fi
done
IFS="${oIFS}"
unset oIFS
fi
done
# Check for dropboxes that have no PATH spec defined.
print "Checking for Empty PATH specifications..." >&2
typeset -a missing_path_ids
missing_path_ids=( $(doSQL "select ID from DROPBOXES where PATH = '' or PATH is NULL") )
if ((${#missing_path_ids} > 0)) ; then
${CAT} <<EOF >&2
Missing PATH specification for DROPBOX IDs
${(j:\n:)missing_path_ids}
EOF
fi
# Check for valid directory names in dropbox PATH specs. Spaces and special
# characters are discouraged (and may even break dropboxes).
print "Checking for 'forbidden' directory names..." >&2
for path_spec in ${(v)path_specs[*]#*${myFS}} ; do
typeset path_spec_dir=${path_spec%/*}
if p=$(print "${path_spec_dir}" | ${GREP} --color=always '[|&\{\}#\$\<\>\?]') ; then
${CAT} <<EndOfText >&2
One ore more directory components in Dropbox PATH spec
${p}
contains one of the characters |, &, {, }, # \$, <, >, and ?.
Please consider choosing a PATH spec without these characters.
EndOfText
fi
done
# Check for existence of dropbox directories.
print "Checking for missing dropbox directories (folders)..." >&2
for path_spec in ${(v)path_specs[*]#*${myFS}} ; do
typeset path_spec_dir=${path_spec%/*}
if ! [[ -d ${path_spec_dir} ]] ; then
printf "Missing directory (%s) for dropbox PATH specifier: %s\n" \
"${path_spec_dir}" "${path_spec}" >&2
fi
done
typeset -a spinners
# SC1003: Not escaping a single-quote, quoting an escaped backslash.
# shellcheck disable=SC1003
spinners=('|' '/' '-' '\\')
typeset -ir spinner_count="${#spinners}"
typeset -i x=1
# Check for Cart Numbers as needed (some dropboxes create new Carts
# for all new imports).
print "Checking dropbox Cart numbers (this may take some time)..." >&2
for id in ${(k)path_specs[*]} ; do
((INTERACTIVE)) && printf "%c\r" "${spinners[$((x++ % spinner_count))]}" > /dev/tty
to_cart=${path_specs[${id}]%${myFS}*}
if ((to_cart > 0)) ; then
typeset -i cut_count=$(doSQL "select count(*) from CUTS where CART_NUMBER = ${to_cart}")
if ((cut_count < 1)) ; then
typeset cart_title
# SC2046: shellcheck is too quote-crazy
# shellcheck disable=SC2046
read -r cart_title<<<$(doSQL "select TITLE from CART where NUMBER = ${to_cart}")
if [[ -z "${cart_title}" ]] || [[ "${cart_title}" = '' ]] ; then
# The "\r \n" is in these printfs to clean up the "spinners" output.
printf "\r \nMissing TITLE in Cart %d associated with dropbox ID %d, PATH '%s'\n" \
"${to_cart}" "${id}" "${path_specs[${id}]#*${myFS}}" >&2
fi
printf "\r \nNo CUTS for CART %d (TITLE: '%s', dropbox ID %d, PATH '%s')\n" \
"${to_cart}" "${cart_title}" "${id}" "${path_specs[${id}]#*${myFS}}" >&2
unset cart_title
fi
fi
done
((INTERACTIVE)) && printf "\r \r\n" > /dev/tty
# Check for "hung" rdxport.cgi processes. Kill them if they are old or
# appear to be "stranded".
print "Checking for 'hung' rdxport.cgi processes..." >&2
declare -a possibly_hung_xport_procs
possibly_hung_xport_procs=( $(${PIDOF} 'rdxport.cgi') )
if ((${#possibly_hung_xport_procs} > 0)) ; then
printf "Found %d possibly 'hung' Import processes...\n" ${#possibly_hung_xport_procs}
fi
unset proc_id
for proc_id in ${possibly_hung_xport_procs[*]} ; do
typeset process_start_time=$(${DATE} --date="$(${PS} -p ${proc_id} --format 'lstart=')" +%s)
if ((process_start_time < (CURRENT_TIME - ONE_DAY))) ; then
printf "NOTICE: import process ID %d was started %s.\n" "${proc_id}" \
"$(strftime '%a, %d %b %Y %T %z' ${process_start_time})"
if ((INTERACTIVE)) ; then
if get_yes_no "Terminate process ID ${proc_id}?" ${ANSWER_NO} ; then
kill_with_privilege "${proc_id}"
fi
else
((VERBOSE)) && printf "Killing process ID %s: " "${proc_id}" >&2
kill_with_privilege "${proc_id}"
((VERBOSE)) && print "done." >&2
fi
fi
done
# Check for missing log settings in the dropboxes.
printf "Checking for missing Log settings in dropboxes...\n"
x=1
for id in ${(k)path_specs[*]} ; do
((INTERACTIVE)) && printf "%c\r" "${spinners[$((x++ % spinner_count))]}" > /dev/tty
if okDatabaseStructure DROPBOXES:LOG_TO_SYSLOG ; then
log_specs=$(doSQL "select concat(LOG_TO_SYSLOG,'${myFS}',LOG_PATH) from DROPBOXES where ID=${id}")
if [[ "${log_specs%${myFS}*}" =~ [Nn] ]] ; then
if [[ -z "${log_specs#*${myFS}}" ]] ; then
printf "Warning: no logging configured for dropbox ID %d, path spec %s\n" \
"${id}" "${path_specs[${id}]#*${myFS}}"
fi
fi
else
log_path=$(doSQL "select LOG_PATH from DROPBOXES where ID=${id}")
if [[ -z "${log_path}" ]] ; then
printf "Warning: no logging configured for dropbox ID %d, path spec %s\n" \
"${id}" "${path_specs[${id}]#*${myFS}}"
fi
fi
done
((INTERACTIVE)) && printf "\r \r\n" > /dev/tty
# A "stranded" file is one that is in a dropbox directory and is not
# registered in DROPBOX_PATHS. This likely means the file was never
# successfully imported. It may also mean the file does not match the
# pattern in DROPBOX.PATH.
oIFS="${IFS}"
IFS=$'\t\n'
x=1
if ((CHECK_STRANDED_FILES)) ; then
print "Checking for 'stranded' files (in a dropbox folder, but not imported). This may take some time..." >&2
typeset -A file_history
file_history=($(doSQL "select DROPBOX_ID,FILE_PATH from DROPBOX_PATHS join DROPBOXES on (DROPBOX_ID = DROPBOXES.ID) where STATION_NAME='${station}'"))
((VERBOSE > 1)) && print "File history:\n${(v)file_history[*]}" >&2
typeset -A dropbox_groups
dropbox_groups=($(doSQL "select ID,GROUP_NAME from DROPBOXES where STATION_NAME='${station}'"))
for path_dir in $(print ${(vj:\n:)${path_specs[*]#*${myFS}}%/*} | ${SORT} -u) ; do
typeset -a possibly_stranded
if ! [[ -d "${path_dir}" ]] ; then
((VERBOSE)) && printf "Skipping missing Rivendell dropbox folder '%s'\n" "${path_dir}" >&2
continue
fi
possibly_stranded=(
$(${FIND} "${path_dir}" -maxdepth 1 -type f -print | while read -r line ; do
declare escaped_line=""
escaped_line="$(print ${line} | perl -ne 's,([\[\]{}\?\&]),\\$1,g; print;')"
[[ "${escaped_line}" =~ (\.log|/@eaDir/) ]] && continue
[[ "${(v)file_history[*]}" =~ .*${escaped_line}.* ]] ||
print "${line}"
((INTERACTIVE)) && printf "%c\r" "${spinners[$((x++ % spinner_count))]}" > /dev/tty
done)
)
if [[ -n "${possibly_stranded[*]}" ]] ; then
if ((${#possibly_stranded[*]} == 1)) ; then
typeset thing='file'
typeset plural='s'
else
typeset thing=$(printf '%d files' "${#possibly_stranded[*]}")
typeset plural=''
fi
printf "The following %s in dropbox folder '%s' appear%c to be stranded:\n" \
"${thing}" "${path_dir}" "${plural}"
print "\t${(oj:\n\t:)possibly_stranded[*]}"
if get_yes_no "Do you want to reset this dropbox?" ${ANSWER_NO} ; then
((VERBOSE)) && print "Resetting dropbox..." >&2
for file_path in ${possibly_stranded[*]} ; do
doSQL "delete from DROPBOX_PATHS where FILE_PATH = '${file_path}'"
done
fi
fi
done
((INTERACTIVE)) && printf "\r \r\n" > /dev/tty
fi
if ((INTERACTIVE)) ; then
print "Dropbox audit complete. Press <Enter> to exit." > /dev/tty
read -r < /dev/tty
fi
exit
# Local Variables: ***
# mode:shell-script ***
# indent-tabs-mode: f ***
# sh-indentation: 2 ***
# sh-basic-offset: 2 ***
# sh-indent-for-do: 0 ***
# sh-indent-after-do: + ***
# sh-indent-comment: t ***
# sh-indent-after-case: + ***
# sh-indent-after-done: 0 ***
# sh-indent-after-else: + ***
# sh-indent-after-if: + ***
# sh-indent-after-loop-construct: + ***
# sh-indent-after-open: + ***
# sh-indent-after-switch: + ***
# sh-indent-for-case-alt: ++ ***
# sh-indent-for-case-label: + ***
# sh-indent-for-continuation: + ***
# sh-indent-for-done: 0 ***
# sh-indent-for-else: 0 ***
# sh-indent-for-fi: 0 ***
# sh-indent-for-then: 0 ***
# End: ***
| true |
f817d0962bd76965347819a0ecefb325ac2dd142
|
Shell
|
marian-nmt/marian-regression-tests
|
/tests/training/multi-gpu/test_sync_sgd.sh
|
UTF-8
| 841 | 2.640625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash -x
# Exit on error
set -e
if (( $MRT_NUM_DEVICES < 2 )); then
echo "Too few devices available"
exit 100
fi
# Test code goes here
rm -rf sync_sgd sync_sgd.log
mkdir -p sync_sgd
$MRT_MARIAN/marian \
--no-shuffle --seed 777 --mini-batch 4 --maxi-batch 1 --maxi-batch-sort none \
--dim-rnn 64 --dim-emb 32 --learn-rate 0.001 --clip-norm 0 \
--devices 0 1 --sync-sgd --optimizer sgd --cost-type ce-mean \
-m sync_sgd/model.full.npz -t $MRT_DATA/europarl.de-en/corpus.bpe.{en,de} -v vocab.en.yml vocab.de.yml \
--disp-freq 10 --after-batches 100 \
--log sync_sgd.log
test -e sync_sgd/model.full.npz
test -e sync_sgd.log
cat sync_sgd.log | $MRT_TOOLS/extract-costs.sh > sync_sgd.out
$MRT_TOOLS/diff-nums.py sync_sgd.out sync_sgd.expected -p 0.0001 -o sync_sgd.diff
# Exit with success code
exit 0
| true |
e28b6a9167b3908786922f4de606c42d4f92b9dd
|
Shell
|
cezs/dotfiles
|
/bash/.bash_profile
|
UTF-8
| 886 | 3.4375 | 3 |
[
"Unlicense"
] |
permissive
|
#!/bin/bash
#
# ~/.profile: executed by the command interpreter for login shells.
# This file is not read by bash(1), if ~/.bash_profile or ~/.bash_login
# exists.
# see /usr/share/doc/bash/examples/startup-files for examples.
# the files are located in the bash-doc package.
#
# the default umask is set in /etc/profile; for setting the umask
# for ssh logins, install and configure the libpam-umask package.
#umask 022
# If running bash
if [ -n "$BASH_VERSION" ]; then
# Include .bashrc if it exists
if [ -f "~/.bashrc" ]; then
. "~/.bashrc"
fi
fi
# Set PATH so it includes user's private bin if it exists
if [ -d "~/bin" ] ; then
PATH="$PATH:~/bin"
export PATH
fi
# Clean up PATH by removing duplicates
PATH=$(echo "$PATH" | awk -v RS=':' -v ORS=":" '!a[$1]++')
# Autostart X at login
if [ -z "$DISPLAY" ] && [ "$(fgconsole)" -eq 1 ]; then
exec startx
fi
| true |
15b704e773f2b1b858de6e7d4501fded6c409ddf
|
Shell
|
sunxm2357/TwoStreamVAN
|
/bashes/classifier/weizmann_eval.bash
|
UTF-8
| 688 | 2.59375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
cd classifier/
EXP_NAME='weizmann_classifier'
if [ "${1}" == "--is_actor" ]
then
EXP_NAME+='_actor'
MODEL_IS_ACTOR='--model_is_actor'
DATA_IS_ACTOR='--data_is_actor'
CKPT_PATH='/research/sunxm/classifier/weizmann/checkpoints/actorAction_best_model.pth.tar'
else
MODEL_IS_ACTOR=' '
DATA_IS_ACTOR=' '
CKPT_PATH='/research/sunxm/classifier/weizmann/checkpoints/action_best_model.pth.tar'
fi
python eval.py \
--exp_name ${EXP_NAME} \
--dataset Weizmann \
--dataroot /scratch/dataset/Weizmann_crop_npy_new2/ \
--textroot ../videolist/Weizmann/ \
--ckpt_path ${CKPT_PATH} \
${MODEL_IS_ACTOR} \
${DATA_IS_ACTOR}
| true |
9685c366a816f858052374de09ea680cc3fcc32b
|
Shell
|
AeolusX/daily-scripts
|
/shell-scripts/软件安装脚本/insPHP7.sh
|
UTF-8
| 9,880 | 3.625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
base_dir=/app/local/php
srvurl="http://repo-ops.soft.com/soft/php"
version=7.1.17
LOG=/tmp/phpinstall.log
path_check(){
echo "开始检测目录"
for dir in $base_dir
do
if [ -d $dir ];then
echo -e "\033[31mError:${dir}目录已存在,请先确认次目录是否已安装php,如果没有安装,请手动删除此目录,并重新运行此程序!\033[0m"
exit 0
else
mkdir -p ${dir}
echo "${dir}目录已创建完成!"
fi
done
}
port_check(){
port_nu=`netstat -ntlp | grep -w 9000 |wc -l`
if [ $port_nu -gt 0 ]; then
echo -e "\033[31mError: 安装程序已退出,PHP port 被占用,请确认该端口号\033[0m"
exit 0
fi
}
user_check(){
num1=`id www| wc -l`
if [ $num1 -lt 1 ]; then
/usr/sbin/groupadd www
/usr/sbin/useradd www -g www
mkdir /home/www/.ssh -p
cd /home/www/.ssh && wget $srvurl/authorized_keys
chmod 600 /home/www/.ssh/authorized_keys
chmod 700 /home/www/.ssh/
chown www:www /home/www/.ssh -R
echo "www用户已创建完成"
else
echo "www用户已存在,无需创建"
fi
}
dev_install(){
yum -y install yum-fastestmirror
yum remove httpd mysql mysql-server php php-cli php-common php-devel php-gd -y
yum install -y wget gcc gcc-c++ openssl* curl curl-devel libxml2 libxml2-devel glibc glibc-devel glib2 glib2-devel gd gd2 gd-devel gd2-devel libaio autoconf libjpeg libjpeg-devel libpng libpng-devel freetype freetype-devel
echo "基础开发环境已安装完成!"
}
soft_install(){
wget $srvurl/libmcrypt-2.5.8.tar.gz
tar -zxvf libmcrypt-2.5.8.tar.gz 1> /dev/null
cd libmcrypt-2.5.8
./configure && make && make install && echo "install libmcrypt ok">$LOG
cd ../ && rm -rf libmcrypt-2.5.8*
wget $srvurl/mhash-0.9.9.9.tar.gz
tar -zxvf mhash-0.9.9.9.tar.gz 1> /dev/null
cd mhash-0.9.9.9
./configure
make && make install && echo "install mhash ok">>$LOG
cd ../ && rm -rf mhash-0.9.9.9*
wget $srvurl/mcrypt-2.6.8.tar.gz
tar -zxvf mcrypt-2.6.8.tar.gz 1> /dev/null
cd mcrypt-2.6.8
LD_LIBRARY_PATH=/usr/local/lib ./configure
make && make install && echo "install mcrypt ok">>$LOG
cd ../ && rm -rf mcrypt-2.6.8*
}
php5_5_install(){
wget $srvurl/php5.5/php-5.5.27.tar.gz
tar zxvf php-5.5.27.tar.gz 1> /dev/null
cd php-5.5.27
./configure --prefix=/app/local/php \
--with-config-file-path=/app/local/php/etc \
--enable-fpm \
--enable-mbstring \
--with-mhash \
--with-mcrypt \
--with-curl \
--with-openssl \
--with-mysql=mysqlnd \
--with-mysqli=mysqlnd \
--with-pdo-mysql=mysqlnd
make && make install
cp ./sapi/fpm/init.d.php-fpm /etc/rc.d/init.d/php-fpm
cd ../ && rm -rf php-5.5.27*
wget $srvurl/php5.5/src/memcache-3.0.8.tgz
tar zxvf memcache-3.0.8.tgz 1> /dev/null
cd memcache-3.0.8
/app/local/php/bin/phpize
./configure --enable-memcache \
--with-php-config=/app/local/php/bin/php-config \
--with-zlib-dir
make && make install
cd ../ && rm -rf memcache-3.0.8*
wget $srvurl/php5.5/src/mongo-1.6.10.tgz
tar zxvf mongo-1.6.10.tgz 1> /dev/null
cd mongo-1.6.10
/app/local/php/bin/phpize
./configure --with-php-config=/app/local/php/bin/php-config
make && make install
cd ../ && rm -rf mongo-1.6.10*
wget $srvurl/php5.5/src/redis-2.2.7.tgz
tar zxvf redis-2.2.7.tgz 1> /dev/null
cd redis-2.2.7
/app/local/php/bin/phpize
./configure --with-php-config=/app/local/php/bin/php-config
make && make install
cd ../ && rm -rf redis-2.2.7*
wget $srvurl/php5.5/init/php.ini
wget $srvurl/php5.5/init/php-fpm.conf
}
php7_install(){
wget $srvurl/php7.0/src/libmemcached-1.0.18.tar.gz
tar -zxvf libmemcached-1.0.18.tar.gz 1> /dev/null
cd libmemcached-1.0.18
./configure --prefix=/app/local/libs/libmemcached --with-memcached
make && make install && echo "install libmemcached ok">>$LOG
cd ../ && rm -rf libmemcached-1.0.18*
#install freetype
wget $srvurl/php7.0/src/freetype-2.7.tar.bz2
tar -xvf freetype-2.7.tar.bz2 1>/dev/null
cd freetype-2.7
./configure --prefix=/app/local/libs/freetype --enable-shared && make && make install && echo "install freetype" >>$LOG
cd ../ && rm -rf freetype-2.7*
###install jpg
wget $srvurl/php7.0/src/jpegsrc.v9c.tar.gz
tar -xvf jpegsrc.v9c.tar.gz 1>/dev/null
cd jpeg-9c
./configure --prefix=/app/local/libs/jpeg --enable-shared
make && make install && echo "install jpeg ok" >>$LOG
cd ../ && rm -rf jpeg*
#####install png
wget $srvurl/php7.0/src/libpng-1.6.32.tar.gz
tar -xvf libpng-1.6.32.tar.gz 1>/dev/null
cd libpng-1.6.32
./configure --prefix=/app/local/libs/png --enable-shared
make && make install && echo "install png" >>$LOG
cd ../ && rm -rf libpng-1.6.32*
#################### install icu
wget $srvurl/php7.0/src/icu4c-58_2-src.tgz
tar -xf icu4c-58_2-src.tgz 1>/dev/null
cd icu/source
./configure --prefix=/app/local/libs/icu && make && make install && echo "install icu OK" >>$LOG
cd ../../ && rm -rf icu*
####
wget $srvurl/php7.0/php-${version}.tar.gz
tar -zxvf php-${version}.tar.gz 1> /dev/null
cp -ar php-${version}/ext/zlib/config0.m4 php-${version}/ext/zlib/config.m4
cd php-${version}
./configure --prefix=/app/local/php \
--with-config-file-path=/app/local/php/etc \
--enable-fpm \
--enable-mbstring \
--with-mhash \
--with-mcrypt \
--with-curl \
--with-openssl \
--with-mysqli=mysqlnd \
--with-pdo-mysql=mysqlnd \
--with-jpeg-dir=/app/local/libs/jpeg \
--with-png-dir=/app/local/libs/png \
--with-freetype-dir=/app/local/libs/freetype \
--with-gd \
--with-zlib \
--enable-ftp \
--enable-soap \
--enable-bcmath \
--with-icu-dir=/app/local/libs/icu \
--enable-intl
make && make install && echo "install php7 ok">>$LOG
cp ./sapi/fpm/init.d.php-fpm /etc/rc.d/init.d/php-fpm
cd ../ && rm -rf php-${version}*
#wget $srvurl/php7.0/src/php-memcached-php7.zip
#unzip php-memcached-php7.zip 1> /dev/null
#cd php-memcached-php7
#/app/local/php/bin/phpize
#./configure --enable-memcached \
#--with-php-config=/app/local/php/bin/php-config \
#--with-zlib-dir \
#--with-libmemcached-dir=/app/local/libs/libmemcached \
#--disable-memcached-sasl
#make && make install
#cd ../ && rm -rf php-memcached-php7*
#wget $srvurl/php7.0/src/pecl-memcache-php7.zip
#unzip pecl-memcache-php7.zip 1> /dev/null
#cd pecl-memcache-php7
#/app/local/php/bin/phpize
#./configure --with-php-config=/app/local/php/bin/php-config
#make && make install
#cd ../ && rm -rf pecl-memcache-php7*
wget $srvurl/php7.0/src/memcached-3.0.4.tgz
tar -xf memcached-3.0.4.tgz 1>/dev/null
cd memcached-3.0.4
/app/local/php/bin/phpize
./configure --enable-memcached \
--with-php-config=/app/local/php/bin/php-config \
--with-zlib-dir \
--with-libmemcached-dir=/app/local/libs/libmemcached \
--disable-memcached-sasl
make && make install && echo "installed memcached3.0.4 success">>$LOG
cd ../ && rm -rf memcached-3.0.4*
wget $srvurl/php7.0/src/mongodb-1.4.3.tgz
tar zxvf mongodb-1.4.3.tgz 1> /dev/null
cd mongodb-1.4.3
/app/local/php/bin/phpize
./configure --with-php-config=/app/local/php/bin/php-config
make && make install && echo "install mongodb ok">>$LOG
cd ../ && rm -rf mongodb-1.4.3*
#wget $srvurl/php7.0/src/phpredis-php7.zip
#unzip phpredis-php7.zip 1> /dev/null
# cd phpredis-php7
# /app/local/php/bin/phpize
# ./configure --with-php-config=/app/local/php/bin/php-config
# make && make install
# cd ../ && rm -rf phpredis-php7*
wget $srvurl/php7.0/src/redis-4.0.2.tgz
tar -xf redis-4.0.2.tgz 1>/dev/null
cd redis-4.0.2
/app/local/php/bin/phpize
./configure --with-php-config=/app/local/php/bin/php-config
make && make install && echo "install redis ok">>$LOG
cd ../ && rm -rf redis-4.0.2*
wget $srvurl/php7.0/init/php.ini
wget $srvurl/php7.0/init/php-fpm.conf
}
php_config(){
cp php.ini /app/local/php/etc/
cp php-fpm.conf /app/local/php/etc/
echo 'export PATH=$PATH:/app/local/php/bin'>>/etc/profile && source /etc/profile
touch /app/local/php/var/log/php-fpm.slow.log
chmod +x /etc/rc.d/init.d/php-fpm
chkconfig --add php-fpm
chkconfig php-fpm on
/etc/init.d/php-fpm start
rm -rf php.ini && rm -rf php-fpm.conf && rm -rf package.xml
}
php_logrotate(){
echo '/app/local/php/var/log/*.log {
daily
missingok
notifempty
nocompress
dateext
dateformat %Y%m%d
rotate 20
sharedscripts
postrotate
if [ -s "/app/local/php/var/run/php-fpm.pid" ] ; then
/bin/kill -SIGUSR1 `cat /app/local/php/var/run/php-fpm.pid 2>/dev/null` 2>/dev/null || true
fi
endscript
}' > /etc/logrotate.d/php-fpm
}
php_package_choice(){
read -p "Please choose php version (press 5.5/7.0) :" php_version
case $php_version in
5|5.5|php5.5)
echo "php5.5 will be installed."
path_check
port_check
user_check
dev_install
soft_install
php5.5_install
php_config
php_logrotate
;;
7|7.0|php7)
echo "php7.0 will be installed."
path_check
port_check
user_check
dev_install
soft_install
php7_install
php_config
php_logrotate
;;
*)
echo "Your input is wrong, please try again."
php_package_choice
;;
esac
}
php_package_choice
| true |
3f6e3d48aa632962cf56ad2565976aaf61d1eac4
|
Shell
|
janvanmansum/archivematica
|
/ansible/files/create-superuser.sh
|
UTF-8
| 546 | 2.984375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
SUPER_NAME=$1
SUPER_EMAIL=$2
SUPER_PASSWORD=$3
set -a -e -x
source /etc/default/archivematica-storage-service || \
source /etc/sysconfig/archivematica-storage-service \
|| (echo 'Environment file not found'; exit 1)
cd /usr/lib/archivematica/storage-service
echo "from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser('$SUPER_NAME', '$SUPER_EMAIL', '$SUPER_PASSWORD')" | \
/usr/share/archivematica/virtualenvs/archivematica-storage-service/bin/python manage.py shell
| true |
ea7ea5086b1eb39993fc39a41f5f280c5af89087
|
Shell
|
jakutis/dotfiles
|
/bin/git-remove-local
|
UTF-8
| 139 | 3.0625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
BRANCHES=$(git branch | grep -v master)
if [ -z "$BRANCHES" ]
then
exit
fi
echo "$BRANCHES" | xargs git branch -D
| true |
4ed6b9a07b6d631be1478cc6cca563f53848b3a1
|
Shell
|
qinshulei/leetcode
|
/TransposeFile/test.bash
|
UTF-8
| 200 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/bash
# Memory Limit Exceeded
row=`head file.txt -n1 | grep -o " " | wc -l`
let row=row+1
for i in `seq $row`;do
awk -vrow_line=$i '{print $row_line}' file.txt | sed ':a N;s/\n/ /g;ta'
done
| true |
0997a3a9601b447d40844aa6a454facf9ad8c908
|
Shell
|
mastarink/zoc
|
/projects/shn/doprj.sh
|
UTF-8
| 95 | 2.625 | 3 |
[] |
no_license
|
#!/bin/sh
if [[ -L shn ]] && [[ -f shn/libwork.bash ]] ; then
. shn/libwork.bash
shn $@
fi
| true |
9102436c154764c09d992d0a070cf79332fbc272
|
Shell
|
straubt1/terraform-enterprise-backup-service
|
/tfedr
|
UTF-8
| 1,788 | 3.921875 | 4 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/usr/bin/env bash
set -e
export TFEDR_CLI_WORKDIR=$(cd $(dirname $0) && pwd)
cli_help() {
cli_name=${0##*/}
echo "
$cli_name
Terraform Enterprise Backup and Restore CLI
Version: $(cat $TFEDR_CLI_WORKDIR/version)
https://github.com/straubt1/terraform-enterprise-backup-service
Usage: $cli_name [task]
Tasks:
install-backup Install Backup
uninstall-backup Uninstall Backup
enable-backup Enable Backup
install-restore Install Restore
uninstall-restore Uninstall Restore
enable-restore Enable Restore
* Help
"
exit 1
}
# cli_log "Exporting config ..."
[ ! -f "$TFEDR_CLI_WORKDIR/config" ] \
&& echo "ERROR: No $TFEDR_CLI_WORKDIR/config file found. " \
&& echo "cp $TFEDR_CLI_WORKDIR/config.template $TFEDR_CLI_WORKDIR/config and adjust." \
&& exit 1
export $(cat "$TFEDR_CLI_WORKDIR/config" | xargs)
case "$1" in
install-backup|ib)
"$TFEDR_CLI_WORKDIR/tasks/install-backup" "${@:2}" | tee -ia "$TFEDR_CLI_WORKDIR/logs/install-backup_${2}.log"
;;
uninstall-backup|ib)
"$TFEDR_CLI_WORKDIR/tasks/uninstall-backup" "${@:2}" | tee -ia "$TFEDR_CLI_WORKDIR/logs/uninstall-backup_${2}.log"
;;
enable-backup|ib)
"$TFEDR_CLI_WORKDIR/tasks/enable-backup" "${@:2}" | tee -ia "$TFEDR_CLI_WORKDIR/logs/enable-backup_${2}.log"
;;
install-restore|ib)
"$TFEDR_CLI_WORKDIR/tasks/install-restore" "${@:2}" | tee -ia "$TFEDR_CLI_WORKDIR/logs/install-restore_${2}.log"
;;
uninstall-restore|ib)
"$TFEDR_CLI_WORKDIR/tasks/uninstall-restore" "${@:2}" | tee -ia "$TFEDR_CLI_WORKDIR/logs/uninstall-restore_${2}.log"
;;
enable-restore|ib)
"$TFEDR_CLI_WORKDIR/tasks/enable-restore" "${@:2}" | tee -ia "$TFEDR_CLI_WORKDIR/logs/enable-restore_${2}.log"
;;
*)
cli_help
;;
esac
| true |
7fda52fb2cb0ac519bab9d0a43a8a64556249b04
|
Shell
|
yuxng/posecnn-pytorch
|
/experiments/scripts/demo.sh
|
UTF-8
| 469 | 2.578125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -x
set -e
export PYTHONUNBUFFERED="True"
export CUDA_VISIBLE_DEVICES=$1
LOG="experiments/logs/demo.txt.`date +'%Y-%m-%d_%H-%M-%S'`"
exec &> >(tee -a "$LOG")
echo Logging output to "$LOG"
time ./tools/test_images.py --gpu 0 \
--imgdir data/Images/kitchen \
--network posecnn \
--pretrained data/checkpoints/vgg16_ycb_object_pose_dgx_2_epoch_16.checkpoint.pth \
--dataset ycb_object_test \
--cfg experiments/cfgs/ycb_object_subset_demo.yml
| true |
79b6e59c4c3b539fbc6ae25dd0b64c9ba204ad65
|
Shell
|
vinibfranc/ScriptingTools
|
/ShellScript/basics/tutorial_derekBanas.sh
|
UTF-8
| 8,448 | 4 | 4 |
[] |
no_license
|
#!/bin/bash
# Comment
# echo "Hello World"
# myName = "Vini"
# declare -r NUM1=5
# num2=4
# num3=$((NUM1+num2))
# num4=$((NUM1-num2))
# num5=$((NUM1*num2))
# num6=$((NUM1/num2))
# echo "5 + 4 = $num3"
# echo "5 - 4 = $num4"
# echo "5 * 4 = $num5"
# echo "5 / 4 = $num6"
# echo $((5**2))
# echo $(( 5%4 ))
# Assignment operators allow for shorthand arithmetic
# +=, -=, *=, /=
# rand=5
# let rand+=4
# echo "$rand"
# Shorthand increment and decrement
# echo "rand++ = $(( rand++ ))"
# echo "++rand = $(( ++rand ))"
# echo "rand-- = $(( rand-- ))"
# echo "--rand = $(( --rand ))"
# Use Python to add floats
# num7=1.2
# num8=3.4
# num9=$(python -c "print($num7+$num8)")
# echo $num9
# You can print over multiple lines with a Here Script
# cat prints a file or any string past to it
# cat << END
# This text
# prints on
# many lines
# END
######################
# # Define function
######################
# getDate() {
# # Get current date and time
# date
# # Return returns an exit status number between 0 - 255
# return
# }
# getDate
# # This is a global variable
# name="Derek"
# # Local variable values aren't available outside of the function
# demLocal() {
# local name="Paul"
# return
# }
# demLocal
# echo "$name"
# # A function that receives 2 values and prints a sum
# getSum() {
# # Attributes are retrieved by referring to $1, $2, etc.
# local num3=$1
# local num4=$2
# # Sum values
# local sum=$((num3+num4))
# # Pass values back with echo
# echo $sum
# }
# num1=5
# num2=6
# # You pass atributes by separating them with a space
# # Surround function call with $() to get the return value
# sum=$(getSum num1 num2)
# echo "The sum is $sum"
######################
# Conditionals / Input
######################
# You can use read to receive input which is stored in name
# The p option says that we want to prompt with a string
# read -p "What is your name? " name
# echo "Hello $name"
# read -p "How old are you? " age
# You place your condition with in []
# Include a space after [ and before ]
# Integer Comparisons: eq, ne, le, lt, ge, gt
# if [ $age -ge 16 ]
# then
# echo "You can drive"
# # Check another condition
# elif [ $age -eq 15 ]
# then
# echo "You can drive next year"
# # Executed by default
# else
# echo "You can't drive"
# # Closes the if statement
# fi
# Extended integer test
# read -p "Enter a number : " num
# if ((num == 10)); then
# echo "Your number equals 10"
# fi
# if ((num > 10)); then
# echo "It is greater then 10"
# else
# echo "It is less then 10"
# fi
# if (( ((num % 2)) == 0 )); then
# echo " It is even"
# fi
# You can use logical operators like &&, || and !
# if (( ((num > 0)) && ((num < 11)) )); then
# echo "$num is between 1 and 10"
# fi
# Create a file and then if that worked open it in Vim
#touch samp_file && vim samp_file
# If samp_dir doesn't exist make it
#[ -d samp_dir ] || mkdir samp_dir
# Delete file rm samp_file
# Delete directory rmdir samp_dir
# Testing strings
# str1=""
# str2="Sad"
# str3="Happy"
# # Test if a string is null
# if [ "$str1" ]; then
# echo "$str1 is not null"
# fi
# if [ -z "$str1" ]; then
# echo "str1 has no value"
# fi
# # Check for equality
# if [ "$str2" == "$str3" ]; then
# echo "$str2 equals $str3"
# elif [ "$str2" != "$str3" ]; then
# echo "$str2 is not equal to $str3"
# fi
# if [ "$str2" > "$str3" ]; then
# echo "$str2 is greater then $str3"
# elif [ "$str2" < "$str3" ]; then
# echo "$str2 is less then $str3"
# fi
# # Check the file test_file1 and test_file2
# file1="./test_file1"
# file2="./test_file2"
# if [ -e "$file1" ]; then
# echo "$file1 exists"
# if [ -f "$file1" ]; then
# echo "$file1 is a normal file"
# fi
# if [ -r "$file1" ]; then
# echo "$file1 is readable"
# fi
# if [ -w "$file1" ]; then
# echo "$file1 is writable"
# fi
# if [ -x "$file1" ]; then
# echo "$file1 is executable"
# fi
# if [ -d "$file1" ]; then
# echo "$file1 is a directory"
# fi
# if [ -L "$file1" ]; then
# echo "$file1 is a symbolic link"
# fi
# if [ -p "$file1" ]; then
# echo "$file1 is a named pipe"
# fi
# if [ -S "$file1" ]; then
# echo "$file1 is a network socket"
# fi
# if [ -G "$file1" ]; then
# echo "$file1 is owned by the group"
# fi
# if [ -O "$file1" ]; then
# echo "$file1 is owned by the userid"
# fi
# fi
# With extended test [[ ]] you can use Regular Expressions
# read -p "Validate Date : " date
# pat="^[0-9]{8}$"
# if [[ $date =~ $pat ]]; then
# echo "$date is valid"
# else
# echo "$date is not valid"
# fi
# Read multiple values
# read -p "Enter 2 Numbers to Sum : " num1 num2
# sum=$((num1+num2))
# echo "$num1 + $num2 = $sum"
# # Hide the input with the s code
# read -sp "Enter the Secret Code" secret
# if [ "$secret" == "password" ]; then
# echo "Enter"
# else
# echo "Wrong Password"
# fi
# Use case to when it makes more sense then if
# read -p "How old are you : " age
# # Check the value of age
# case $age in
# # Match numbers 0 - 4
# [0-4])
# echo "To young for school"
# ;; # Stop checking further
# # Match only 5
# 5)
# echo "Go to kindergarten"
# ;;
# # Check 6 - 18
# [6-9]|1[0-8])
# grade=$((age-5))
# echo "Go to grade $grade"
# ;;
# # Default action
# *)
# echo "You are to old for school"
# ;;
# esac # End case
# 8. Ternary Operator performs different actions based on a condition
# #!/bin/bash
# can_vote=0
# age=18
# ((age>=18?(can_vote=1):(can_vote=0)))
# echo "Can Vote : $can_vote"
######################
# Parameter Expansions and Strings
######################
# rand_str="A random string"
# # Get string length
# echo "String Length : ${#rand_str}"
# # Get string slice starting at index (0 index)
# echo "${rand_str:2}"
# # Get string with starting and ending index
# echo "${rand_str:2:7}"
# # Return whats left after A
# echo "${rand_str#*A }"
######################
# Looping
######################
# While
# num=1
# while [ $num -le 10 ]; do
# echo $num
# num=$((num + 1))
# done
# Continue and Break
# num=1
# while [ $num -le 20 ]; do
# # Don't print evens
# if (( ((num % 2)) == 0 )); then
# num=$((num + 1))
# continue
# fi
# # Jump out of the loop with break
# if ((num >= 15)); then
# break
# fi
# echo $num
# num=$((num + 1))
# done
# Until loops until the loop is true
# num=1
# until [ $num -gt 10 ]; do
# echo $num
# num=$((num + 1))
# done
# Use read and a loop to output file info
# while read avg rbis hrs; do
# # printf allows you to use \n
# printf "Avg: ${avg}\nRBIs: ${rbis}\nHRs: ${hrs}\n"
# # Pipe data into the while loop
# done < barry_bonds.txt
# There are many for loop options. Here is the C form.
# for (( i=0; i <= 10; i=i+1 )); do
# echo $i
# done
# We can cycle through ranges
# for i in {A..Z}; do
# echo $i
# done
######################
# Arrays
######################
# Messing with arrays
# Create an array
# fav_nums=(3.14 2.718 .57721 4.6692)
# echo "Pi : ${fav_nums[0]}"
# # Add value to array
# fav_nums[4]=1.618
# echo "GR : ${fav_nums[4]}"
# # Add group of values to array
# fav_nums+=(1 7)
# # Output all array values
# for i in ${fav_nums[*]}; do
# echo $i;
# done
# # Output indexes
# for i in ${!fav_nums[@]}; do
# echo $i;
# done
# # Get number of items in array
# echo "Array Length : ${#fav_nums[@]}"
# # Get length of array element
# echo "Index 3 length : ${#fav_nums[3]}"
# # Sort an array
# sorted_nums=($(for i in "${fav_nums[@]}"; do
# echo $i;
# done | sort))
# for i in ${sorted_nums[*]}; do
# echo $i;
# done
# # Delete array element
# unset 'sorted_nums[1]'
# # Delete Array
# unset sorted_nums
######################
# Positional Parameters
######################
# Print the first argument
echo "1st Argument : $1"
sum=0
# $# tells you the number of arguments
while [[ $# -gt 0 ]]; do
# Get the first argument
num=$1
sum=$((sum + num))
# shift moves the value of $2 into $1 until none are left
# The value of $# decrements as well
shift
done
echo "Sum : $sum"
| true |
6dac3dda21d7d7f8d573e0de07e0e4fda6c67b98
|
Shell
|
missinglink/mongrel2-php
|
/install/zmqphp-install.sh
|
UTF-8
| 585 | 3.0625 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
sudo apt-get update
sudo apt-get install -y git-core php5-common php5-dev php5-cli php5-uuid
cd /tmp
git clone git://github.com/mkoppanen/php-zmq.git
cd php-zmq
git pull origin master
phpize
./configure
make
sudo make install
PHP_INI_PATH=$(php --ini | grep "Scan for additional" | sed -e "s|.*:\s*||")
echo "extension=zmq.so" | sudo tee $PHP_INI_PATH/zmq.ini
clear
php -i | grep zmq
echo "Installation Complete"
echo "The Imagick extension for PHP can provide segmentation faults on some systems"
echo "Optionally remove imagick with: sudo apt-get remove php5-imagick"
| true |
6a4915c8856188e483a723f6d1bd4d60c492b3fc
|
Shell
|
IMAGINARY/mima-scripts
|
/install.sh
|
UTF-8
| 247 | 3.1875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Sync scripts into this repo into users home directory
SCRIPTNAME=$(basename "$0")
# Change to directory where the install script resides
cd "$(dirname "$0")"
# Sync files via rsync
rsync -a --info=ALL "$PWD/.config" "$PWD/bin" ~/
| true |
1f0cc631a3ca4d4c1a57a9ae9bd4e0336eddd5e2
|
Shell
|
KaOSx/main
|
/gtk3/PKGBUILD
|
UTF-8
| 2,367 | 2.828125 | 3 |
[] |
no_license
|
pkgbase=gtk3
pkgname=('gtk3' 'gtk-update-icon-cache')
pkgver=3.24.38
_pkgver=3.24
pkgrel=1
arch=('x86_64')
url="https://www.gtk.org/"
makedepends=('gobject-introspection' 'cairo' 'libcups' 'libxcursor' 'libxinerama' 'libxrandr'
'libxi' 'libxcomposite' 'libxdamage' 'pango' 'shared-mime-info' 'gdk-pixbuf2'
'colord' 'at-spi2-core' 'libepoxy' 'libxkbcommon' 'librsvg' 'wayland'
'wayland-protocols' 'iso-codes' 'sassc' 'meson' 'ninja')
options=('!libtool' '!docs')
license=('LGPL')
source=("https://ftp.gnome.org/pub/gnome/sources/gtk+/${_pkgver}/gtk+-${pkgver}.tar.xz"
'gtk-update-icon-cache.hook'
'gtk-update-icon-cache.script'
'gcc11.diff')
sha256sums=('ce11decf018b25bdd8505544a4f87242854ec88be054d9ade5f3a20444dd8ee7'
'2d435e3bec8b79b533f00f6d04decb1d7c299c6e89b5b175f20be0459f003fe8'
'bbe06e1b4e1ad5d61a4e703445a2bb93c6be918964d6dd76c0420c6667fa11eb'
'f62d5e7f7ae9694021ba87e232dbcd3ba402987132708baff282b34c508c1b53')
prepare() {
cd gtk+-${pkgver}
#patch -p1 -i ${srcdir}/gcc11.diff
}
build() {
mkdir -p build
cd build
meson setup ../gtk+-${pkgver} \
--prefix=/usr \
--buildtype=release \
--sysconfdir=/etc \
--localstatedir=/var \
-Dgtk_doc=false \
-Dintrospection=true \
-Ddemos=false \
-Dexamples=false \
-Dtests=false \
-Dinstalled_tests=false
ninja
}
package_gtk3() {
pkgdesc="The GTK+ Toolkit (v3)"
depends=('cairo' 'libcups' 'libxcursor' 'libxinerama' 'libxrandr'
'libxi' 'libxcomposite' 'libxdamage' 'pango' 'shared-mime-info'
'colord' 'at-spi2-core' 'libepoxy' 'libxkbcommon' 'wayland'
'wayland-protocols' 'iso-codes' 'gtk-update-icon-cache')
install="gtk3.install"
cd build
DESTDIR=${pkgdir} ninja install
# avoid gtk2 conflict, used for both gtk2 & 3 this way
rm $pkgdir/usr/bin/gtk-update-icon-cache
}
package_gtk-update-icon-cache() {
pkgdesc="The GTK+ update icon cache tool"
depends=('gdk-pixbuf2')
cd build/gtk
install -Dm755 gtk-update-icon-cache ${pkgdir}/usr/bin/gtk-update-icon-cache
install -Dm644 ../../gtk-update-icon-cache.hook ${pkgdir}/usr/share/libalpm/hooks/gtk-update-icon-cache.hook
install -Dm755 ../../gtk-update-icon-cache.script ${pkgdir}/usr/share/libalpm/scripts/gtk-update-icon-cache
}
| true |
24383aa824f2c52e61fcb787dc215bcde7f0f13f
|
Shell
|
pgomersbach/puppet-autoinstall
|
/modules/production/nagiosslave/files/check_qnap_serial.sh
|
UTF-8
| 1,623 | 4.125 | 4 |
[] |
no_license
|
#!/bin/sh
###############################################################################
# #
# Nagios plugin to monitor hw serial with get_hwsn on qnap devices #
# Written in Bash (and uses sed & awk). #
# #
###############################################################################
VERSION="Version 1.0"
AUTHOR="Paul Gomersbach (p.gomersbach@rely.nl)"
# Sensor program
SENSORPROG=/sbin/get_hwsn
# Exit codes
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
# get_hwsn not working
exit $STATE_OK
shopt -s extglob
#### Functions ####
# Print version information
print_version()
{
printf "\n\n$0 - $VERSION\n"
}
#Print help information
print_help()
{
print_version
printf "$AUTHOR\n"
printf "Monitor hw serial with get_hwsn on qnap devices\n"
/bin/cat <<EOT
Options:
-h
Print detailed help screen
-V
Print version information
-v
Verbose output
EOT
}
###### MAIN ########
# See if we have the program installed and can execute it
if [[ ! -x "$SENSORPROG" ]]; then
printf "\nIt appears you don't have get_hwsn installed \
in $SENSORPROG\n"
exit $STATE_UNKOWN
fi
# Parse command line options
while [[ -n "$1" ]]; do
case "$1" in
-h | --help)
print_help
exit $STATE_OK
;;
-V | --version)
print_version
exit $STATE_OK
;;
esac
done
#Get the serial
TEMP=`${SENSORPROG}`
echo "Qnap serial: $TEMP"
exit $STATE_OK
| true |
7ff377d2a87a92eaac631d403f40b0395caf5633
|
Shell
|
Saphir/Elan_vim_bashrc
|
/install.sh
|
UTF-8
| 610 | 3.53125 | 4 |
[] |
no_license
|
#!/bin/bash
function die() {
echo "[ERROR] $@" >&2
exit 1
}
now=$(date +%F_%T)
echo "[INFO] Make sure vim is 8.0+ with --enable-pythoninterp=yes compiled"
cd $(dirname $(readlink -m $0))
for i in vim vimrc bashrc gitconfig cheat zshrc screenrc
do
[ -e ~/.${i} ] && (mv ~/.${i} ~/.${i}.bak.$now || die "Fail to mv ~/.${i} ~/.${i}.bak.$now")
if [[ ${i} == vim ]]
then
ln -s $(readlink -m .) ~/.${i} || die "Fail to create symbolic link ~/.${i}"
else
ln -s $(readlink -m ./${i}) ~/.${i} || die "Fail to create symbolic link ~/.${i}"
fi
echo "~/.${i} Done"
done
| true |
f827b485568e5d3595ea2ac386d5d759db1dabc1
|
Shell
|
alexko/dotfiles
|
/install
|
UTF-8
| 697 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/sh
git submodule update --init .zsh .vim z
for f in .profile .shrc .bashrc .inputrc .zshrc .tmux.conf .tmux_urxvt_tabs .screenrc .vim .vim/.vimrc .calendar .emacs.d; do
fn=$(basename $f)
fl=$(readlink -f ~/$fn)
[ "$fl" = "$(pwd)/$f" ] || [ ! -r "$f" ] || {
echo symlinking $f
[ -e ~/$fn -o -h ~/$fn ] && mv ~/$fn ~/$fn.$(date +%Y%m%d%H%M%S)
ln -s $(pwd)/$f ~/$fn
}
done
mkdir -p ~/bin
[ -r ~/bin/z.sh ] || ln -s $PWD/z/z.sh ~/bin/z.sh
case $1 in
--full)
git submodule update --init --recursive
cd .emacs.d/vendor/pymacs
make && python setup.py install --user
;;
*)
echo "run install --full for emacs setup"
;;
esac
| true |
16bfbd3ef84107db59e52c3a6768578813679104
|
Shell
|
MasayukiMiyake97/JVMWatcher
|
/product/bin/setEnv.sh
|
UTF-8
| 174 | 2.546875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export WATCHER_LIBPATH=../lib
export WATCHER_CONFIGPATH=../config
# get JDK path
pathtojava=$(readlink -e $(which javac))
export JDK_LIB=${pathtojava%/*/*}/lib
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.