blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
f28844f2f5e3b63c9752dc8bc09c65d123c59a22
|
Shell
|
scollis/SimRadar
|
/o.sh
|
UTF-8
| 787 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
function show_last_seg() {
cmd="grep -n \"==================<<<\" \"$1\" | tail -n 1 | awk -F \":\" '{print \$1}'"
line=`eval $cmd`
if [ ! -z ${line} ]; then
tail -n +$line $1
fi
}
if [[ "$#" -gt 0 && "$1" == "t" ]]; then
# Test
echo -e "\033[33m"
show_last_seg tests_stdout.txt
echo -e "\033[31m"
show_last_seg tests_stderr.txt
elif [[ "$#" -gt 0 && "$1" == "1" ]]; then
# Single node execution
echo -e "\033[32m"
show_last_seg radarsim_single_stdout.txt
echo -e "\033[31m"
show_last_seg radarsim_single_stderr.txt
elif [[ "$#" -gt 0 && "$1" == "c" ]]; then
echo -e "\033[36m"
show_last_seg radarsim_cpu.txt
else
echo -e "\033[32m"
show_last_seg radarsim_stdout.txt
echo -e "\033[31m"
show_last_seg radarsim_stderr.txt
fi
echo -e "\033[0m"
| true |
ddd62e5202a591cc924c5b9005bfaa49a389dd5f
|
Shell
|
blar/docker-postfix
|
/src/usr/local/bin/postfix-setup
|
UTF-8
| 560 | 2.890625 | 3 |
[] |
no_license
|
#!/bin/sh
set -e
apk add --no-cache postfix
postconf -e "inet_interfaces = all"
postconf -e "mynetworks = 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16"
postconf -e "myorigin = \$mydomain"
# Fügt $mydomain zur Absenderadresse hinzu (foo@bar => foo@bar.$mydomain)
postconf -e "append_dot_mydomain = yes"
# Entfernt Subdomains von der Domain des Senders, die mit $masquerade_domains enden.
postconf -e "masquerade_domains = \$mydomain"
postconf -e "local_header_rewrite_clients = permit_inet_interfaces"
postconf -e "remote_header_rewrite_domain = \$mydomain"
| true |
94dd5a3829445d14da7d349feeacea0083da0afe
|
Shell
|
dawnbreaks/taomee
|
/mole/dbsvr/ser/sql/clean_user_attire.sh
|
UTF-8
| 999 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/bash
db_index=0
user="root"
password="ta0mee"
host="localhost"
tmp_file="table.sql"
create_user_table_sql() {
cat <<EOF >$tmp_file
delete from t_user_attire_$1 where count=0;
EOF
}
if [ "$1" == "" ] ; then
echo need 0,1..16
exit;
fi
db_index=0
end_index=99
if [ "$1" == "0" ]; then
let "db_index=0"
let "end_index=99"
else
let "fix=($1-1)/4"
let "add1flag=($1-1)%4"
if [ "$add1flag" == "0" ]; then
let "db_index=($1-1)*6+fix"
else
let "db_index=($1-1)*6+fix+1"
fi
let "end_index=($1)*6+fix"
fi
echo "do db:" $db_index "-" $end_index
while [ $db_index -le $end_index ] ; do
echo "deal" $db_index
dbx=`printf "%02d" $db_index`
table_index=0
while [ $table_index -lt 100 ] ; do
tbx=`printf "%02d" $table_index`
create_user_table_sql $tbx
cat $tmp_file | mysql -u $user --password="$password" -h $host USER_$dbx
table_index=`expr $table_index + 1`
done
db_index=`expr $db_index + 1`
done
| true |
815f5c9f30f6f51a30c79137ef15d1e28de6ce07
|
Shell
|
yurisuki/Archosu
|
/lib/archosu-chroot/lib/archosu-utils-chroot
|
UTF-8
| 3,074 | 3.765625 | 4 |
[] |
no_license
|
#!/bin/bash
# This script will be executed in chroot environment.
source /lib/archosu-main
introduction() {
dialog --backtitle "$tit" --title "Chrooted" --msgbox "Now, you're chrooted inside your new system. You're almost ready to go, but I still have to do some stuff for you." 6 70
}
locale() { # Set system locale
dialog --backtitle "$tit" --title "Setting system locale" --infobox "Setting system locale..." 4 70
echo "LANG=en_US.UTF-8" >> /etc/locale.conf
echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen
echo "en_US ISO-8859-1" >> /etc/locale.gen
locale-gen >/dev/null 2>&1
}
enableservices() { # Enable some key services
dialog --backtitle "$tit" --title "Enabling services..." --infobox "Enabling NetworkManager..." 4 70
systemctl enable NetworkManager >/dev/null
}
getuserandpass() { # Prompts user for new username an password.
name=$(dialog --title "Creating new account" --backtitle "$tit" --inputbox "First, please enter a name for the user account." 10 60 3>&1 1>&2 2>&3 3>&1) || exit
while ! echo "$name" | grep "^[a-z_][a-z0-9_-]*$" >/dev/null 2>&1; do
name=$(dialog --title "Creating new account" --backtitle "$tit" --no-cancel --inputbox "Username not valid. Give a username beginning with a letter, with only lowercase letters, - or _." 10 60 3>&1 1>&2 2>&3 3>&1)
done
pass1=$(dialog --title "Creating new account" --backtitle "$tit" --no-cancel --passwordbox "Enter a password for that user." 10 60 3>&1 1>&2 2>&3 3>&1)
pass2=$(dialog --title "Creating new account" --backtitle "$tit" --no-cancel --passwordbox "Retype password." 10 60 3>&1 1>&2 2>&3 3>&1)
while ! [ "$pass1" = "$pass2" ]; do
unset pass2
pass1=$(dialog --title "Creating new account" --backtitle "$tit" --no-cancel --passwordbox "Passwords do not match.\\n\\nEnter password again." 10 60 3>&1 1>&2 2>&3 3>&1)
pass2=$(dialog --title "Creating new account" --backtitle "$tit" --no-cancel --passwordbox "Retype password." 10 60 3>&1 1>&2 2>&3 3>&1)
done
}
adduserandpass() { # Adds user `$name` with password $pass1.
dialog --title "Creating new account" --backtitle "$tit" --infobox "Adding user \"$name\"..." 4 50
useradd -m -g wheel -s /bin/bash "$name" >/dev/null 2>&1 ||
usermod -a -G wheel "$name" && mkdir -p /home/"$name" && chown "$name":wheel /home/"$name"
echo "$name:$pass1" | chpasswd
unset pass1 pass2
}
installgrub() { # Install grub, the bootloader
dialog --title "Downloading bootloader" --backtitle "$tit" --infobox "Downloading \`grub\`, the bootloader." 4 70
sudo pacman --noconfirm -Sy grub os-prober >/dev/null 2>&1
dialog --title "Installing bootloader" --backtitle "$tit" --infobox "Installing \`grub\`, the bootloader." 4 70
grub-install "/dev/$(cat /grubdrive)" >/tmp/grublog || clear && echo -e "The installation of bootloader has failed.\\nLOG:\\n$(cat /tmp/grublog)" && exit
grub-mkconfig -o /boot/grub/grub.cfg >/dev/null 2>&1
}
bash /lib/archosu-utils-time # Set timezone
enableservices
getuserandpass
adduserandpass
echo "%wheel ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers # Make sudo want no password
installgrub
| true |
b876b6ce1cef24546151ffb8eeb9593ac29e51cf
|
Shell
|
cha63506/ubuntu-8
|
/setup.sh
|
UTF-8
| 386 | 3.15625 | 3 |
[] |
no_license
|
#!/bin/bash
# bashrc
echo -n "Backup your original bashrc? (y/n)"
read IN
if [ "$IN" = "y" ] || [ "$IN" = "Y" ]; then
rm -f ~/.bashrc_backup; mv -f ~/.bashrc ~/.bashrc_backup
fi
ln -f -s $PWD/_bashrc ~/.bashrc
echo "=== DONE! ==="
# scripts
ln -f -s $PWD/scripts ~/
# vim
bash $PWD/vim/setup.sh
# tmux & powerline
bash $PWD/tmux/setup.sh
ln -f -s $PWD/_gitconfig ~/.gitconfig
| true |
43bdadc8ac92c7633a2be31ab781296b87a2e448
|
Shell
|
tiffanywang3/prompt
|
/src/scripts/gcloud-install.sh
|
UTF-8
| 1,402 | 3.75 | 4 |
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [ ! -z "${AM_PROMPT_DEBUG:-}" ]; then
echo 'gcloud-install'
fi
__prompt-set-gcloud-path() {
local GCLOUD_PATH_INC=$HOME/.gcloud/path.bash.inc
if [ -e $GCLOUD_PATH_INC ]; then
source $GCLOUD_PATH_INC
fi
}
gcloud-install() {
local GCLOUD_PATH=$HOME/.gcloud
local PLATFORM='linux'
if [ "$(uname)" == "Darwin" ]; then
PLATFORM='darwin'
fi
local URL=https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-161.0.0-$PLATFORM-x86_64.tar.gz
local TEMP=$(mktemp -d)
local FILENAME=$TEMP/gcloud-sdk.tar.gz
local EXPANDED=$TEMP/google-cloud-sdk
if ! type gcloud 1>/dev/null 2>&1; then
echo
echo "downloading gcloud-sdk"
curl -L $URL -o $FILENAME --silent 2>&1
tar -xf $FILENAME -C $TEMP 1>/dev/null 2>&1
rm -rf $GCLOUD_PATH 1>/dev/null 2>&1
mv $EXPANDED $GCLOUD_PATH 1>/dev/null 2>&1
rm -rf $TEMP 1>/dev/null 2>&1
echo
echo "installing gcloud-sdk"
~/.gcloud/install.sh --command-completion false --usage-reporting false --quiet --path-update false 2>&1
__prompt-set-gcloud-path
fi
echo
echo "installing and updating gcloud components"
gcloud components install kubectl docker-credential-gcr gsutil core --quiet 2>&1
gcloud components update --quiet 2>&1
}
__prompt-set-gcloud-path
| true |
9c2c727f08b084b9f668c134c41271e878401a03
|
Shell
|
michalczapko/dotfiles
|
/bashrc
|
UTF-8
| 467 | 3.015625 | 3 |
[
"MIT"
] |
permissive
|
# Bash aliases
alias l='ls -l'
alias la="ls -Alh"
alias ..='cd ..'
# Rails aliases
alias sc='./script/console'
alias ss='./script/server'
# Rails aliases
alias gs='git status'
alias gc='git checkout'
# Set prompt to show git branch
parse_git_branch (){
[`pwd | grep "work|Sites|Projects"` != ""] && git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(git:\1)/'
}
export PS1='\[\033[0;33m\]\w\[\033[00m\]\[\033[01;00m\]$(parse_git_branch): '
| true |
3143c6b45cd411bf8e61f772456f58e85027d965
|
Shell
|
ivanfioravanti/easy-azure-opensource
|
/mongodb/devShardedCluster/mongoCluster.sh
|
UTF-8
| 1,808 | 3.609375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
DATAFOLDER=/mnt/data
NUMOFSHARDS=4
MONGOUSER=azureuser
# feel free to use mmapv1 or wiredTiger
STORAGEENGINE=wiredTiger
case "$1" in
start)
for i in $(seq -f "%02g" 0 $((NUMOFSHARDS-1)))
do
echo "\n\nStarting Shard $i.\n"
mongod --port 270$i --dbpath $DATAFOLDER/s$i --storageEngine $STORAGEENGINE --smallfiles --oplogSize 128 --fork --logpath $DATAFOLDER/s$i/s$i.log
done
echo "\n\nStarting Configuration Servers.\n"
mongod --configsvr --dbpath $DATAFOLDER/conf/conf0 --storageEngine $STORAGEENGINE --port 26000 --fork --logpath $DATAFOLDER/conf/conf0.log
echo "\n\nWaiting before starting the Query Router.\n"
sleep 25
mongos --configdb localhost:26000 --port 27017 --chunkSize 1 --fork --logpath $DATAFOLDER/mongos.log
;;
stop)
echo "\n\nShutting down the cluster.\n"
mongo localhost:27017 js/shutdown.js
mongo localhost:26000 js/shutdown.js
for i in $(seq -f "%02g" 0 $((NUMOFSHARDS-1)))
do
mongo localhost:270$i js/shutdown.js
done
;;
init)
echo "\n\nCreating directories.\n"
for i in $(seq -f "%02g" 0 $((NUMOFSHARDS-1)))
do
mkdir -p $DATAFOLDER/s$i
done
mkdir -p $DATAFOLDER/conf/conf0
if [ -n "$MONGOUSER" ]
then
chown -R $MONGOUSER $DATAFOLDER
fi
;;
configure)
echo "\n\nConfiguring the cluster.\n"
mongo localhost:27017 --eval "var numOfShards=$NUMOFSHARDS" js/cluster.js
;;
clean)
echo "\n\nCleaning.\n"
rm -rf $DATAFOLDER/*
;;
*)
echo "Usage: $prog {init|start|configure|stop|clean}"
exit 1
;;
esac
| true |
ffa1d9c1ea4c06dc981a970553b444a192492899
|
Shell
|
chanzuckerberg/miniwdl
|
/tests/singularity.t
|
UTF-8
| 1,435 | 3.21875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# bash-tap tests for miniwdl's Singularity task runtime. `singularity` must be available.
set -o pipefail
cd "$(dirname $0)/.."
SOURCE_DIR="$(pwd)"
BASH_TAP_ROOT="tests/bash-tap"
source tests/bash-tap/bash-tap-bootstrap
export PYTHONPATH="$SOURCE_DIR:$PYTHONPATH"
miniwdl="python3 -m WDL"
if [[ -z $TMPDIR ]]; then
TMPDIR=/tmp
fi
DN=$(mktemp -d "${TMPDIR}/miniwdl_runner_tests_XXXXXX")
DN=$(realpath "$DN")
cd $DN
echo "$DN"
plan tests 7
export MINIWDL__SCHEDULER__CONTAINER_BACKEND=singularity
$miniwdl run_self_test --dir "$DN"
is "$?" "0" "run_self_test"
export MINIWDL__SINGULARITY__IMAGE_CACHE=$(mktemp -d)
$miniwdl run_self_test --dir "$DN"
is "$?" "0" "run_self_test with image cache"
ls $MINIWDL__SINGULARITY__IMAGE_CACHE/*.sif
is "$?" "0" "singularity images cached successfully"
$miniwdl run_self_test --dir "$DN/use_cache"
is "$?" "0" "run_self_test with image cache"
grep 'SIF found in image cache directory' $(find "$DN/use_cache" -name workflow.log)
is "$?" "0" "singularity image used from cache"
$miniwdl run $SOURCE_DIR/tests/task_env_inputs.wdl --dir "$DN"
is "$?" "0" "env input escaping"
git clone --depth=1 https://github.com/broadinstitute/viral-pipelines.git
cd viral-pipelines
$miniwdl run pipes/WDL/workflows/assemble_denovo.wdl \
--path pipes/WDL/tasks --dir "$DN" --verbose \
-i test/input/WDL/test_inputs-assemble_denovo-local.json
is "$?" "0" "assemble_denovo success"
| true |
54b594ec8588c2b78ecb6f2b7dfaf78784bf8202
|
Shell
|
alexeykazakov/saas
|
/fetch_and_apply.sh
|
UTF-8
| 1,360 | 3.921875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
TSTAMP=$(date +%Y%m%d_%H%M%S)
TPLDIR="dsaas-templates"
function git_prep {
# should also check that the git master co is clean
git checkout master
git pull --rebase upstream master
}
function prep {
git_prep
TOK=$(cat ../osd-dsaas-token-`whoami`)
oc login https://api.dsaas.openshift.com --token=${TOK}
if [ $? -ne 0 ]; then echo "E: unable to login to openshift"; exit 2 ;fi
oc project dsaas-production
if [ $? -ne 0 ]; then echo "E: unable to get oc project"; exit 3 ;fi
}
function oc_apply {
cat $1 | oc apply -f -
cp $1 last_applied/
}
# get some basics in place
prep
# lets clear this out to make sure we always have a
# fresh set of templates, and nothing else left behind
rm -rf ${TPLDIR}; mkdir -p ${TPLDIR}
python saasherder/cli.py -D ${TPLDIR}/ -s dsaas-services/ pull
mkdir -p $TSTAMP
python saasherder/cli.py -D ${TPLDIR}/ -s dsaas-services/ \
template --output-dir $TSTAMP tag
for f in `ls $TSTAMP/*`; do
if [ -e last_applied/$(basename $f) ]; then
difflines=$(diff -uNr $f last_applied/$(basename $f) | wc -l )
if [ ${difflines} -lt 1 ]; then
rm -f $f
else
# not the same file
oc_apply $f
fi
else
oc_apply $f
fi
done
if [ $(find ${TSTAMP}/ -name \*.yaml | wc -l ) -lt 1 ]; then
# if we didnt apply anything, dont keep the dir around
rm -rf $TSTAMP
echo "R: Nothing to apply"
fi
| true |
e1a0da6ae1a822419ac9dc0ed88259f322412d8b
|
Shell
|
skywalker-nick/homework
|
/neutron-scripts/create_vm.sh
|
UTF-8
| 1,551 | 2.921875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Create Tenant and User #
keystone_ip=192.168.247.6
tenant=Tenant$1
user=User$1
usermail=user$1@awcloud.com
role=Member
extnet=$2
image_id=`glance index | grep Cirr | awk '{print $1}'`
nova --os-tenant-name $tenant --os-username $user --os-password password --os-auth-url=http://$keystone_ip:5000/v2.0 keypair-add key01 --pub-key ~/.ssh/id_rsa.pub
for (( i=0; i<1; i++));
do
netid=`neutron --os-tenant-name $tenant --os-username $user --os-password password --os-auth-url=http://$keystone_ip:5000/v2.0 net-list | grep $tenant-Net | awk '{print $2}'`
nova --os-tenant-name $tenant --os-username $user --os-password password --os-auth-url=http://$keystone_ip:5000/v2.0 boot --flavor 2 --nic net-id=$netid --image ${image_id} --key-name key01 vm00$i
sleep 10
fixip=`nova --os-tenant-name $tenant --os-username $user --os-password password --os-auth-url=http://$keystone_ip:5000/v2.0 list | grep vm00$i | grep RUNNING | awk '{print $6}'`
portid=`neutron --os-tenant-name $tenant --os-username $user --os-password password --os-auth-url=http://$keystone_ip:5000/v2.0 port-list | grep $fixip | awk '{print $2}'`
floatingip=`neutron --os-tenant-name $tenant --os-username $user --os-password password --os-auth-url=http://$keystone_ip:5000/v2.0 floatingip-create $extnet grep -v float | grep -v port | grep -v router | grep -v tenant | grep id | awk '{print $4}'`
neutron --os-tenant-name $tenant --os-username $user --os-password password --os-auth-url=http://$keystone_ip:5000/v2.0 floatingip-associate $floatingip $portid
done;
| true |
84236ca9e6b59d592c4bf913763d110dd6b4c4d3
|
Shell
|
ttylta/dots
|
/scripts/artic
|
UTF-8
| 698 | 4.09375 | 4 |
[] |
no_license
|
#! /bin/bash
while test $# -gt 0; do
case "$1" in
-a)
shift
article=$1
shift
;;
-n) # TODO: Auto generate the name from the website title.
shift
name=$1
shift
;;
*)
echo "$1 is not a recognized argument"
exit;
;;
esac
done
if [ ! -z "$article" ] && [ ! -z "$name" ]
then
echo ""
echo "Generating pdf article..."
echo ""
lynx -source "$article" > ~/tmp/articles/$name.html
prince ~/tmp/articles/$name.html
rm -f ~/tmp/articles/$name.html
echo ""
echo "If this fails, the website probably thinks you're a bot."
echo ""
else
echo ""
echo "Name or article not provided."
echo ""
fi
| true |
d59d35bb4c9bb87419c3579ed0850117dfc2dfe1
|
Shell
|
hhadian/kaldi
|
/egs/mandarin_bn_bc/s5/local/tdt_mandarin_data_prep_txt.sh
|
UTF-8
| 3,198 | 3.546875 | 4 |
[
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 Johns Hopkins University (author: Jinyi Yang)
# Apache 2.0
. ./path.sh || exit 1;
echo $0 "$@"
export LC_ALL=C
tdtData=$(utils/make_absolute.sh "${@: -1}" );
length=$(($#-1))
args=${@:1:$length}
top_pwd=`pwd`
txtdir=$tdtData/txt
sph_scp=$tdtData/wav.scp
mkdir -p $txtdir
cd $txtdir
for cdx in ${args[@]}; do
echo "Preparing $cdx"
if [[ $cdx == *.tgz ]] ; then
tar -zxf $cdx
elif [ -d "$cdx" ]; then
tgt=$(basename $cdx)
zfile=`find $cdx -type f -name *.tgz`
if [ ! -z $zfile ]; then
test -x $tgt || mkdir $tgt
cd $tgt
tar -zxf $zfile
cd $txtdir
else
test -x $tgt || ln -s $cdx `basename $tgt`
fi
else
echo "I don't really know what I shall do with $cdx " >&2
fi
done
# There are more transcriptions that audio files. We only use that
# transcriptions which have corresponding audio files.
find -L $txtdir -type f -name *.src_sgm | grep "MAN" | \
awk 'NR==FNR {a[$1];next}; {name=$0;gsub(".src_sgm$", "", name); gsub(".*/", "", name); \
if (name in a) print $0}' $sph_scp - | sort > $txtdir/trans.flist || exit 1;
perl $top_pwd/local/tdt_mandarin_parse_sgm.pl $txtdir/trans.flist > $txtdir/text.tmp || exit 1;
cd $top_pwd
cut -d " " -f1 $txtdir/text.tmp > $txtdir/uttid
cut -d " " -f2- $txtdir/text.tmp > $txtdir/trans
pyver=`python --version 2>&1 | sed -e 's:.*\([2-3]\.[0-9]\+\).*:\1:g'`
export PYTHONPATH=$PYTHONPATH:`pwd`/tools/mmseg-1.3.0/lib/python${pyver}/site-packages
if [ ! -d tools/mmseg-1.3.0/lib/python${pyver}/site-packages ]; then
echo "--- Downloading mmseg-1.3.0 ..."
echo "NOTE: it assumes that you have Python, Setuptools installed on your system!"
wget -P tools http://pypi.python.org/packages/source/m/mmseg/mmseg-1.3.0.tar.gz
tar xf tools/mmseg-1.3.0.tar.gz -C tools
cd tools/mmseg-1.3.0
mkdir -p lib/python${pyver}/site-packages
CC=gcc CXX=g++ python setup.py build
python setup.py install --prefix=.
cd ../..
if [ ! -d tools/mmseg-1.3.0/lib/python${pyver}/site-packages ]; then
echo "mmseg is not found - installation failed?"
exit 1
fi
fi
# Create text, use mmseg for splitting Mandarin characters into words.
cat $txtdir/trans |\
sed -e 's/,//g' | \
sed -e 's/((\([^)]\{0,\}\)))/\1/g' |\
perl local/mandarin_text_normalize.pl |\
python local/mandarin_segment.py |\
sed -e 's/THISISSPKTURN/<TURN>/g' |\
paste $txtdir/uttid - |\
awk '{if (NF>2 || (NF==2 && $2 != "<TURN>")) print $0}' > $txtdir/text_with_spk_turn
# The text_with_spk_turn file contains label "<TURN>" to indicate speaker
# switching, in case the speaker diarization process is required. We do not use
# speaker diarization at this moment, so the spk id will be the segment
# (utterance)
cat $txtdir/text_with_spk_turn | sed 's/<TURN>//g' > $txtdir/text
awk '{print $1" "$1}' $txtdir/text_with_spk_turn > $txtdir/utt2spk
cp $txtdir/utt2spk $txtdir/spk2utt
awk '{segments=$1; split(segments, S, "_"); uttid=S[1];for (i=2;i<=5;++i) uttid=uttid"_"S[i]; print segments " " uttid " " S[7]/100 " " S[8]/100}' < $txtdir/text > $txtdir/segments
awk '{print $1}' $txtdir/text > $txtdir/uttid
echo "TDT Mandarin text preparation succeed !"
| true |
20f268ace4cf8a593bc9abc194d0ede65787d138
|
Shell
|
malyzajko/daisy
|
/scripts/optimization_mixed-tuning_rewriting.sh
|
UTF-8
| 1,517 | 3.21875 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash --posix
#
# Runs mixed-precision tuning on a set of benchmarks
# Usage: this script needs to be run from the /daisy home directory. If you
# get an error that daisy is not found, check this.
#
# Parameters are non-optional:
#
# 1: whether to use rewriting
# 2: whether to run benchmarking
#
# ./scripts/mixed-precision/run_delta_batch.sh [rewriting|foo] [bench|foo] [directory of generated files] [bench-id]
#
# seeds used: 1469010147126
declare -a arr=("Bsplines0" \
"Bsplines1" \
"Bsplines2" \
"Doppler" \
"Himmilbeau" \
"InvertedPendulum" \
"Kepler0" \
"Kepler1" \
"Kepler2" \
"RigidBody1" \
"RigidBody2" \
"Sine" \
"Sqrt" \
"Traincar4_State8" \
"Traincar4_State9" \
"Turbine1" \
"Turbine2" \
"Turbine3")
echo "Running mixed-precision optimization with delta debugging"
# rm mixed-opt-numValid.txt # just in case there is old data there
# rm mixed-opt-minCost.txt
# rm mixed-opt-opCount.txt
for file in "${arr[@]}"
do
echo "---> $file" # --rangeMethod=smt #--rewrite-seed-system-millis \
if [ "$1" = "rewriting" ]; then
./daisy --mixed-tuning --rewrite --rangeMethod=smt \
--rewrite-seed=1490794789615 "testcases/mixed-precision/${file}.scala"
#--rewrite-seed-system-millis "testcases/mixed-precision/${file}.scala"
else
./daisy --mixed-tuning --rangeMethod=smt "testcases/mixed-precision/${file}.scala"
#./daisy --mixed-fixedpoint --mixed-tuning --rangeMethod=smt "testcases/mixed-precision/fixedpoint/${file}.scala"
fi
done
| true |
2b962f53d20bbce49fcd10db7beb16cf49097679
|
Shell
|
lloydtawanda/os_hardening_scripts
|
/rhel/audit.sh
|
UTF-8
| 7,972 | 2.953125 | 3 |
[] |
no_license
|
################################################
# 4.0 Logging and Auditing
################################################
# **********************************************
# 4.1 Configure System Accounting (auditd)
# **********************************************
# 4.1.1 Ensure auditing is enabled
# 4.1.1.1 Ensure auditd is installed (Scored)
dnf install audit audit-libs -y
# 4.1.1.2 Ensure auditd service is enabled (Scored)
systemctl --now enable auditd
# 4.1.1.3 Ensure auditing for processes that start prior to auditd is enabled (Scored)
echo 'GRUB_CMDLINE_LINUX="audit=1"' | sudo tee -a /etc/default/grub
grub2-mkconfig -o /boot/grub2/grub.cfg
# 4.1.1.4 Ensure audit_backlog_limit is sufficient (Scored)
echo 'GRUB_CMDLINE_LINUX="audit_backlog_limit=8192"' | sudo tee -a /etc/default/grub
grub2-mkconfig -o /boot/grub2/grub.cfg
# 4.1.2 Configure Data Retention
# 4.1.2.1 Ensure audit log storage size is configured (Scored)
# TODO:
# Set the following parameter in /etc/audit/auditd.conf in accordance with site policy:
# max_log_file = <MB>
# 4.1.2.2 Ensure audit logs are not automatically deleted (Scored)
sed -i "/max_log_file_action/ c\max_log_file_action = keep_logs" /etc/audit/auditd.conf
# 4.1.2.3 Ensure system is disabled when audit logs are full (Scored)
cat >> /etc/audit/auditd.conf <<EOT
space_left_action = email
action_mail_acct = root
admin_space_left_action = halt
EOT
# 4.1.3 Ensure changes to system administration scope (sudoers) is collected (Scored)
echo "-w /etc/sudoers -p wa -k scope" | sudo tee -a /etc/audit/rules.d/scope.rules
echo "-w /etc/sudoers.d/ -p wa -k scope" | sudo tee -a /etc/audit/rules.d/scope.rules
# 4.1.4 Ensure login and logout events are collected (Scored)
echo "-w /var/log/faillog -p wa -k logins" | sudo tee -a /etc/audit/rules.d/audit.rules
echo "-w /var/log/lastlog -p wa -k logins" | sudo tee -a /etc/audit/rules.d/audit.rules
# 4.1.5 Ensure session initiation information is collected (Scored)
tee -a /etc/audit/rules.d/logins.rules > /dev/null <<EOT
-w /var/run/utmp -p wa -k session
-w /var/log/wtmp -p wa -k logins
-w /var/log/btmp -p wa -k logins
EOT
# 4.1.6 Ensure events that modify date and time information are collected (Scored)
tee -a /etc/audit/rules.d/time-change.rules > /dev/null <<EOT
-a always,exit -F arch=b64 -S adjtimex -S settimeofday -k time-change
-a always,exit -F arch=b32 -S adjtimex -S settimeofday -S stime -k timechange
-a always,exit -F arch=b64 -S clock_settime -k time-change
-a always,exit -F arch=b32 -S clock_settime -k time-change
-w /etc/localtime -p wa -k time-change
EOT
# 4.1.7 Ensure events that modify the system's Mandatory Access Controls are collected (Scored)
tee -a /etc/audit/rules.d/MAC-policy.rules > /dev/null <<EOT
-w /etc/selinux/ -p wa -k MAC-policy
-w /usr/share/selinux/ -p wa -k MAC-policy
EOT
# 4.1.8 Ensure events that modify the system's network environment are collected (Scored)
tee -a /etc/audit/rules.d/system-locale.rules > /dev/null <<EOT
a always,exit -F arch=b64 -S sethostname -S setdomainname -k system-locale
-a always,exit -F arch=b32 -S sethostname -S setdomainname -k system-locale
-w /etc/issue -p wa -k system-locale
-w /etc/issue.net -p wa -k system-locale
-w /etc/hosts -p wa -k system-locale
-w /etc/sysconfig/network -p wa -k system-locale
EOT
# 4.1.9 Ensure discretionary access control permission modification events are collected (Scored)
tee -a /etc/audit/rules.d/perm_mod.rules > /dev/null <<EOT
-a always,exit -F arch=b64 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F
auid!=4294967295 -k perm_mod
-a always,exit -F arch=b32 -S chmod -S fchmod -S fchmodat -F auid>=1000 -F
auid!=4294967295 -k perm_mod
-a always,exit -F arch=b64 -S chown -S fchown -S fchownat -S lchown -F
auid>=1000 -F auid!=4294967295 -k perm_mod
-a always,exit -F arch=b32 -S chown -S fchown -S fchownat -S lchown -F
auid>=1000 -F auid!=4294967295 -k perm_mod
-a always,exit -F arch=b64 -S setxattr -S lsetxattr -S fsetxattr -S
removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295
-k perm_mod
-a always,exit -F arch=b32 -S setxattr -S lsetxattr -S fsetxattr -S
removexattr -S lremovexattr -S fremovexattr -F auid>=1000 -F auid!=4294967295
-k perm_mod
EOT
# 4.1.10 Ensure unsuccessful unauthorized file access attempts are collected (Scored)
tee -a /etc/audit/rules.d/access.rules > /dev/null <<EOT
-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S
ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access
-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S
ftruncate -F exit=-EACCES -F auid>=1000 -F auid!=4294967295 -k access
-a always,exit -F arch=b64 -S creat -S open -S openat -S truncate -S
ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access
-a always,exit -F arch=b32 -S creat -S open -S openat -S truncate -S
ftruncate -F exit=-EPERM -F auid>=1000 -F auid!=4294967295 -k access
EOT
# 4.1.11 Ensure events that modify user/group information are collected (Scored)
tee -a /etc/audit/rules.d/identity.rules > /dev/null <<EOT
-w /etc/group -p wa -k identity
-w /etc/passwd -p wa -k identity
-w /etc/gshadow -p wa -k identity
-w /etc/shadow -p wa -k identity
-w /etc/security/opasswd -p wa -k identity
EOT
# 4.1.12 Ensure successful file system mounts are collected (Scored)
tee -a /etc/audit/rules.d/mounts.rules > /dev/null <<EOT
-a always,exit -F arch=b64 -S mount -F auid>=1000 -F auid!=4294967295 -k
mounts
-a always,exit -F arch=b32 -S mount -F auid>=1000 -F auid!=4294967295 -k
mounts
EOT
# 4.1.13 Ensure use of privileged commands is collected (Scored)
# TODO: remediate
# 4.1.14 Ensure file deletion events by users are collected (Scored)
tee -a /etc/audit/rules.d/delete.rules > /dev/null <<EOT
-a always,exit -F arch=b64 -S unlink -S unlinkat -S rename -S renameat -F
auid>=1000 -F auid!=4294967295 -k delete
-a always,exit -F arch=b32 -S unlink -S unlinkat -S rename -S renameat -F
auid>=1000 -F auid!=4294967295 -k delete
EOT
# 4.1.15 Ensure kernel module loading and unloading is collected (Scored)
tee -a /etc/audit/rules.d/modules.rules > /dev/null <<EOT
-w /sbin/insmod -p x -k modules
-w /sbin/rmmod -p x -k modules
-w /sbin/modprobe -p x -k modules
-a always,exit -F arch=b64 -S init_module -S delete_module -k modules
EOT
# 4.1.16 Ensure system administrator actions (sudolog) are collected (Scored)
tee -a /etc/audit/rules.d/audit.rules > /dev/null <<EOT
-w /var/log/sudo.log -p wa -k actions
EOT
# 4.1.17 Ensure the audit configuration is immutable (Scored)
tee -a /etc/audit/rules.d/99-finalize.rules > /dev/null <<EOT
-e 2
EOT
# **********************************************
# 4.2 Configure Logging
# **********************************************
# 4.2.1 Configure rsyslog
# 4.2.1.1 Ensure rsyslog is installed (Scored)
dnf install rsyslog -y
# 4.2.1.2 Ensure rsyslog Service is enabled (Scored)
systemctl --now enable rsyslog
# 4.2.1.3 Ensure rsyslog default file permissions configured (Scored)
sed -i '/$FileCreateMode/ c\$FileCreateMode 0640' /etc/rsyslog.conf /etc/rsyslog.d/*.conf
# 4.2.1.4 Ensure logging is configured (Not Scored)
# TODO: remediate
# 4.2.1.5 Ensure rsyslog is configured to send logs to a remote log host (Scored)
# TODO: remediate
# 4.2.1.6 Ensure remote rsyslog messages are only accepted on designated log hosts. (Not Scored)
# TODO: remediate
# 4.2.2 Configure journald
# 4.2.2.1 Ensure journald is configured to send logs to rsyslog (Scored)
# 4.2.2.2 Ensure journald is configured to compress large log files (Scored)
# 4.2.2.3 Ensure journald is configured to write logfiles to persistent disk (Scored)
tee -a /etc/systemd/journald.conf > /dev/null <<EOT
ForwardToSyslog=yes
Compress=yes
Storage=persistent
EOT
# 4.2.3 Ensure permissions on all logfiles are configured (Scored)
find /var/log -type f -exec chmod g-wx,o-rwx "{}" + -o -type d -exec chmod gw,o-rwx "{}" +
# 4.3 Ensure logrotate is configured (Not Scored)
# TODO: remediate
| true |
844c80677149884ff27be6cc22cb3da449f266e4
|
Shell
|
ram437/scripts
|
/smoketest_valhalla_glassfish.sh
|
UTF-8
| 556 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
MY_PATH="`dirname \"$0\"`"
cd $MY_PATH
. get_inventory.sh || { echo "Failed to fetch the inventory .."; exit 1; }
if [[ "${ENV_DC}_${ENV_TYPE}" == "RS_STAGE" ]]; then
search="Best Run"
else
search="Lorem ipsum"
fi
for server in $VALHALLASERVERS;
do
url="http://$server/"
output=`no_proxy='*' curl -s --max-redirs 0 -H "Host: $VALHALLAHOST" -H "X-forwarded-proto: https" $url`
if [[ ! "$output" =~ "$search" ]]; then
echo "Failed $url"
echo "$output"
exit 1
else
echo "OK - $url"
fi
done
| true |
c39be21e08e0a57f8ec7566df2fe4f6813f7daed
|
Shell
|
rsenn/scripts
|
/profile/pkg-config.sh
|
UTF-8
| 526 | 3.40625 | 3 |
[] |
no_license
|
init_pkgconfig_path() {
if [ "${PKG_CONFIG_PATH+set}" != set ]; then
old_IFS=$IFS IFS=:; set -- $PATH; IFS=";
";isin() { N=$1; while [ "$#" -gt 1 ]; do shift; [ "$N" = "$1" ] && return 0; done; return 1
}; P="${*%%/bin*}"; set --; for D in $P; do D=$D/lib/pkgconfig; if [ -d "$D" ]; then
type cygpath 2>/dev/null >/dev/null && IFS=: && D=`cygpath -a "$D"`
! isin "$D" "$@" && set -- "$@" "$D"
fi; done; PKG_CONFIG_PATH=$*; IFS=$old_IFS
fi
export PKG_CONFIG_PATH
}
#unset PKG_CONFIG_PATH
init_pkgconfig_path
| true |
77eb661830956dbe995fcaf2667ed56326e785f5
|
Shell
|
CrownBonded/bazel
|
/compile.sh
|
UTF-8
| 8,053 | 3.890625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2014 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script bootstraps building a Bazel binary without Bazel then
# use this compiled Bazel to bootstrap Bazel itself. It can also
# be provided with a previous version of Bazel to bootstrap Bazel
# itself.
# The resulting binary can be found at output/bazel.
set -o errexit
# Correct PATH on Windows, to avoid using "FIND.EXE" instead of "/usr/bin/find"
# etc, leading to confusing errors.
export BAZEL_OLD_PATH=$PATH
case "$(uname -s | tr [:upper:] [:lower:])" in
msys*|mingw*)
# Check that the PATH is set up correctly by attempting to locate `[`.
# This ensures that `which` is installed correctly and can succeed, while
# also avoids accidentally locating a tool that exists in plain Windows too
# (like "find" for "FIND.EXE").
which [ >&/dev/null || export PATH="/bin:/usr/bin:$PATH"
esac
# Check that the bintools can be found, otherwise we would see very confusing
# error messages.
which [ >&/dev/null || {
echo >&2 "ERROR: cannot locate GNU bintools; check your PATH."
echo >&2 " (You may need to run 'export PATH=/bin:/usr/bin:\$PATH)'"
exit 1
}
cd "$(dirname "$0")"
# Set the default verbose mode in buildenv.sh so that we do not display command
# output unless there is a failure. We do this conditionally to offer the user
# a chance of overriding this in case they want to do so.
: ${VERBOSE:=no}
source scripts/bootstrap/buildenv.sh
function usage() {
[ -n "${1:-compile}" ] && echo "Invalid command(s): $1" >&2
echo "syntax: $0 [command[,command]* [BAZEL_BIN [BAZEL_SUM]]]" >&2
echo " General purpose commands:" >&2
echo " compile = compile the bazel binary (default)" >&2
echo " Commands for developers:" >&2
echo " all = compile,determinism,test" >&2
echo " determinism = test for stability of Bazel builds" >&2
echo " srcs = test that //:srcs contains all the sources" >&2
echo " test = run the full test suite of Bazel" >&2
exit 1
}
function parse_options() {
local keywords="(compile|all|determinism|bootstrap|srcs|test)"
COMMANDS="${1:-compile}"
[[ "${COMMANDS}" =~ ^$keywords(,$keywords)*$ ]] || usage "$@"
DO_COMPILE=
DO_CHECKSUM=
DO_FULL_CHECKSUM=1
DO_TESTS=
DO_SRCS_TEST=
[[ "${COMMANDS}" =~ (compile|all) ]] && DO_COMPILE=1
[[ "${COMMANDS}" =~ (bootstrap|determinism|all) ]] && DO_CHECKSUM=1
[[ "${COMMANDS}" =~ (bootstrap) ]] && DO_FULL_CHECKSUM=
[[ "${COMMANDS}" =~ (srcs|all) ]] && DO_SRCS_TEST=1
[[ "${COMMANDS}" =~ (test|all) ]] && DO_TESTS=1
BAZEL_BIN=${2:-"bazel-bin/src/bazel"}
BAZEL_SUM=${3:-"x"}
}
parse_options "${@}"
mkdir -p output
: ${BAZEL:=${2-}}
#
# Create an initial binary so we can host ourself
#
if [ ! -x "${BAZEL}" ]; then
display "$INFO You can skip this first step by providing a path to the bazel binary as second argument:"
display "$INFO $0 ${COMMANDS} /path/to/bazel"
new_step 'Building Bazel from scratch'
source scripts/bootstrap/compile.sh
# The DO_COMPILE flow will actually create the bazel binary and set BAZEL.
DO_COMPILE=1
fi
#
# Bootstrap bazel using the previous bazel binary = release binary
#
if [ "${EMBED_LABEL-x}" = "x" ]; then
# Add a default label when unspecified
git_sha1=$(git_sha1)
EMBED_LABEL="$(get_last_version) (@${git_sha1:-non-git})"
fi
if [[ $PLATFORM == "darwin" ]] && \
xcodebuild -showsdks 2> /dev/null | grep -q '\-sdk iphonesimulator'; then
EXTRA_BAZEL_ARGS="${EXTRA_BAZEL_ARGS-} --define IPHONE_SDK=1"
fi
source scripts/bootstrap/bootstrap.sh
if [ $DO_COMPILE ]; then
new_step 'Building Bazel with Bazel'
display "."
log "Building output/bazel"
bazel_build "src:bazel${EXE_EXT}" \
|| fail "Could not build Bazel"
bazel_bin_path="$(get_bazel_bin_path)/src/bazel${EXE_EXT}"
[ -e "$bazel_bin_path" ] \
|| fail "Could not find freshly built Bazel binary at '$bazel_bin_path'"
cp -f "$bazel_bin_path" "output/bazel${EXE_EXT}" \
|| fail "Could not copy '$bazel_bin_path' to 'output/bazel${EXE_EXT}'"
chmod 0755 "output/bazel${EXE_EXT}"
BAZEL="$(pwd)/output/bazel${EXE_EXT}"
fi
#
# Output is deterministic between two bootstrapped bazel binary using the actual tools and the
# released binary.
#
if [ $DO_CHECKSUM ]; then
new_step "Determinism test"
if [ ! -f ${BAZEL_SUM:-x} ]; then
BAZEL_SUM=bazel-out/bazel_checksum
log "First build"
bootstrap_test ${BAZEL} ${BAZEL_SUM}
else
BOOTSTRAP=${BAZEL}
fi
if [ "${BAZEL_SUM}" != "${OUTPUT_DIR}/bazel_checksum" ]; then
cp ${BAZEL_SUM} ${OUTPUT_DIR}/bazel_checksum
fi
if [ $DO_FULL_CHECKSUM ]; then
log "Second build"
bootstrap_test ${BOOTSTRAP} bazel-out/bazel_checksum
log "Comparing output"
(diff -U 0 ${OUTPUT_DIR}/bazel_checksum bazel-out/bazel_checksum >&2) \
|| fail "Differences detected in outputs!"
fi
fi
#
# Test that //:srcs contains all the sources
#
if [ $DO_SRCS_TEST ]; then
new_step "Checking that //:srcs contains all the sources"
log "Querying //:srcs"
${BAZEL} query 'kind("source file", deps(//:srcs))' 2>/dev/null \
| grep -v '^@' \
| sed -e 's|^//||' | sed -e 's|^:||' | sed -e 's|:|/|' \
| sort -u >"${OUTPUT_DIR}/srcs-query"
log "Finding all files"
# SRCS_EXCLUDES can be overriden to adds some more exceptions for the find
# commands (for CI systems).
SRCS_EXCLUDES=${SRCS_EXCLUDES-XXXXXXXXXXXXXX1268778dfsdf4}
# See file BUILD for the list of grep -v exceptions.
# tools/defaults package is hidden by Bazel so cannot be put in the srcs.
find . -type f | sed -e 's|./||' \
| grep -v '^bazel-' | grep -v '^WORKSPACE.user.bzl' \
| grep -v '^\.' | grep -v '^out/' | grep -v '^output/' \
| grep -v '^derived' \
| grep -Ev "${SRCS_EXCLUDES}" \
| grep -v '^tools/defaults/BUILD' \
| sort -u >"${OUTPUT_DIR}/srcs-find"
log "Diffing"
res="$(diff -U 0 "${OUTPUT_DIR}/srcs-find" "${OUTPUT_DIR}/srcs-query" | sed -e 's|^-||' | grep -Ev '^(@@|\+\+|--)' || true)"
if [ -n "${res}" ]; then
fail "//:srcs filegroup do not contains all the sources, missing:
${res}"
fi
fi
#
# Tests
#
if [ $DO_TESTS ]; then
new_step "Running tests"
display "."
ndk_target="$(get_bind_target //external:android_ndk_for_testing)"
sdk_target="$(get_bind_target //external:android_sdk_for_testing)"
if [ "$ndk_target" = "//:dummy" -o "$sdk_target" = "//:dummy" ]; then
display "$WARNING Android SDK or NDK are not set in the WORKSPACE file. Android tests will not be run."
fi
[ -n "$JAVAC_VERSION" ] || get_java_version
if [[ ! "${BAZEL_TEST_FILTERS-}" =~ "-jdk8" ]]; then
if [ "8" -gt ${JAVAC_VERSION#*.} ] || [ "${JAVA_VERSION}" = "1.7" ]; then
display "$WARNING Your version of Java is lower than 1.8!"
display "$WARNING Deactivating Java 8 tests, please use a JDK 8 to fully"
display "$WARNING test Bazel."
if [ -n "${BAZEL_TEST_FILTERS-}" ]; then
BAZEL_TEST_FILTERS="${BAZEL_TEST_FILTERS},-jdk8"
else
BAZEL_TEST_FILTERS="-jdk8"
fi
fi
fi
$BAZEL --bazelrc=${BAZELRC} --nomaster_bazelrc \
${BAZEL_DIR_STARTUP_OPTIONS} \
test \
--test_tag_filters="${BAZEL_TEST_FILTERS-}" \
--build_tests_only \
--nolegacy_bazel_java_test \
--define JAVA_VERSION=${JAVA_VERSION} \
${EXTRA_BAZEL_ARGS} \
-k --test_output=errors //src/... //third_party/ijar/... //scripts/... \
|| fail "Tests failed"
fi
clear_log
display "Build successful! Binary is here: ${BAZEL}"
| true |
c6f8abaeb6560c454537f94a84c1f51f2ab1389f
|
Shell
|
topel-research-group/Arctic_plant_chloproplast_genomes
|
/05_Contamination_filtering/remove_seq_based_on_contaminated_contigs.sh
|
UTF-8
| 966 | 3.484375 | 3 |
[] |
no_license
|
#this script loops over all folders (genes) and removes any sequences derived from contigs that were contaminated then loads the results into a new fasta file (${GENE}_post_contig_contamination_filer.fasta)
for TAX in `find ./* -maxdepth 0 -type d`; do
cd $TAX
#Create correct output file name
GENE=$(pwd | cut -d '/' -f 7)
echo $GENE
cat ../contigs_to_remove.txt | sed "s/\t/.*/g" | sed "s/^/^/g" | sed "s/ //g" | sed 's/$/$/g' > remove_contaminated_contigs_grep.txt
cat ${GENE}_BLAST_top_ncbi_hit_and_self_top_4_out_family_contig_summary.txt | cut -d $'\t' -f1,11 > contigs_to_match.txt
grep -v -f remove_contaminated_contigs_grep.txt contigs_to_match.txt | cut -d $'\t' -f1 > sequences_to_keep.txt
grep -A1 -f sequences_to_keep.txt ${GENE}_all_seq.fa | sed 's/-*//g' > ${GENE}_post_contig_contamination_filer.fasta
rm remove_contaminated_contigs_grep.txt
rm contigs_to_match.txt
rm sequences_to_keep.txt
cd ..
done
| true |
14ee42ac29fbf58c4dd4287b4007bbd8bbf881dd
|
Shell
|
NetBSD/pkgsrc-wip
|
/deforaos-probe/files/DaMon.sh
|
UTF-8
| 380 | 3.03125 | 3 |
[] |
no_license
|
#!/bin/sh
#
# $NetBSD: DaMon.sh,v 1.1 2010/04/15 15:54:57 khorben Exp $
#
# PROVIDE: DaMon
# REQUIRE: Probe
# KEYWORD: shutdown
if [ -f /etc/rc.subr ]
then
. /etc/rc.subr
fi
name="DaMon"
rcvar=$name
command="/usr/local/bin/DaMon"
command_args="&"
required_files="/usr/local/etc/$name.conf"
DaMon_chdir="/home/damon"
DaMon_user="damon"
load_rc_config $name
run_rc_command "$1"
| true |
5d54377e81c5015019d0ef6adb776b510ec1eda3
|
Shell
|
klooj/zsh-dots
|
/my_plugins/utils/funcs/ytvid
|
UTF-8
| 373 | 2.828125 | 3 |
[] |
no_license
|
youtube-dl $1
open ~/Desktop/yt
[[ $OSTYPE =~ darwin ]] && \
echo "if you want to add it straight to apple tv library, run:\nrmove ~/Desktop/yt/video '/Volumes/kusb3_250hfs/TV/Automatically Add to TV.localized'"
[[ $OSTYPE =~ linux ]] && \
echo "i'm unprepared for this part. just raw dog it."
shift
while (( $# > 0 )); do
youtube-dl $1
shift
done
# vim: ft=zsh:
| true |
26b876dbc6be484e6f5774599ef479cd91cafed0
|
Shell
|
manangatangy/flutter_vpn_switch
|
/vpnswitch/vpn_current.sh
|
UTF-8
| 134 | 3.09375 | 3 |
[] |
no_license
|
#! /bin/bash
if ls /etc/openvpn/*.conf > /dev/null ; then
F="`ls /etc/openvpn/*.conf`"
basename "$F" .conf
else
exit 1
fi
| true |
55cfdf47828282ac8053726b4ab46a2c217c91d8
|
Shell
|
dpark6060/FSL_Preprocessing
|
/Bash/FillTemplate.sh
|
UTF-8
| 4,171 | 3.578125 | 4 |
[] |
no_license
|
#!/bin/sh
TEMPLATE=../Template/Template.fsf
# This should be set up in some universal path file
FSL_DIR=/usr/local/fsl/5.0.7/bin
####################################################################
# VOLUME INFO
####################################################################
# THE INPUT FMRI VOLUME TO USE (CAN BE .NII OR .NII.GZ)
INPUT_DATA='/share/dbp2123/dparker/Code/FSL_DefaultProcessing/fMRI.nii.gz'
# USE 'FSLHD' TO FIND AND SET THE TR
#TR=` fslhd ${INPUT_DATA} | grep pixdim3 | tr -s ' ' | cut -d' ' -f2 `
TR=` fslhd ${INPUT_DATA} | grep pixdim4 | awk '{print $2}' `
# FOR SOME REASON IT LIKES TO KNOW THE NUMBER OF VOXELS. WE'LL
# use 'fslhd' again to do this
DXYZ=(` fslhd ${INPUT_DATA} | grep ^dim[1-4] `)
NX=${DXYZ[1]}
NY=${DXYZ[3]}
NZ=${DXYZ[5]}
NUM_VOL=${DXYZ[7]}
# As long as NX NY NZ and NUM_VOL are int, this works fine.
# Decimals will mess it up
NUM_VOX=$(( NX*NY*NZ*NUM_VOL ))
# SET THE NUMBER OF VOLUMES TO DELETE
DEL_VOL=0
# SET THE OUTPUT DIRECTORY
OUTPUTDIR=` dirname ${INPUT_DATA} `/fsl_preproc
if [ ! -e ${OUTPUTDIR} ]; then
mkdir ${OUTPUTDIR}
fi
####################################################################
# STATISTICS
# Strictly speaking, these aren't important for the preprocessing
# and COULD be ignored
####################################################################
# SET THE BRAIN BACKGROUND THRESHOLD
# It is used in intensity normalisation, brain mask
# generation and various other places in the analysis.
BB_THRESH=10
# SET THE Z THRESHOLD FOR DESIGN EFFICIENCY CALCULATION
# used to determine what level of activation would
# be statistically significant, to be used only in the design
# efficiency calculation. Increasing this will result in higher
# estimates of required effect.
Z_THRESH=5.3
# SET THE FMRI NOISE LEVEL
# the standard deviation (over time) for a
# typical voxel, expressed as a percentage of the baseline signal level.
NOISE_LVL=0.66
# SET TNE TEMPORAL SMOOTHNESS
# is the smoothness coefficient in a simple
# AR(1) autocorrelation model (much simpler than that actually used in
# the FILM timeseries analysis but good enough for the efficiency
# calculation here).
T_SMOOTH=0.34
####################################################################
# PREPROCESSING OPTIONS
####################################################################
# RUN MOTION CORRECTION
MC=1
# RUN SLICE TIMING CORRECTION
# 0 : None
# 1 : Regular up (0, 1, 2, 3, ...)
# 2 : Regular down
# 3 : Use slice order file
# 4 : Use slice timings file
# 5 : Interleaved (0, 2, 4 ... 1, 3, 5 ... )
STC=1
# SLICE ORDER/TIMING FILE
# If at slice order or timing file is chosen,
# This must also be set
SLICE_FILE=''
# RUN BRAIN EXTRACTION USING FSL's BET
BET=1
# SET THE FWHM FOR SPATIAL SMOOTHING (mm)
FWHM=5
# RUN INTENSITY NORMILIZATION
INT_NORM=1
# HIGHPASS FILTER CUTOFF (seconds)
HPF_CUTOFF=100
# RUN HIGHPASS FILTERING
HPF=1
####################################################################
# CREATE TEMPLATE
####################################################################
# Create a lost of all the variable names
# which match the place-holding text in the template
VAR_STRINGS=( INPUT_DATA TR NUM_VOL NUM_VOX DEL_VOL OUTPUTDIR BB_THRESH Z_THRESH NOISE_LVL T_SMOOTH MC STC SLICE_FILE BET FWHM INT_NORM HPF_CUTOFF HPF )
cp ${TEMPLATE} ${OUTPUTDIR}/MyDesign.fsf
# loop through and preform substitution
for var_name in ${VAR_STRINGS[@]}; do
var_val=` eval 'echo $'$var_name `
#We need to repalce and backslashes with "\/"
var_val=` echo ${var_val////"\/"} `
sed -i -e "s/\^${var_name}\^/${var_val}/g" ${OUTPUTDIR}/MyDesign.fsf
done
## Or with a bash-only approach:
#
#for var_name in ${VAR_STRINGS[@]}; do
#
# var_val=eval 'echo $'$var_name
# while read a ; do echo ${a//^${var_name}^/${var_val}} ; done < ${OUTPUTDIR}/MyDesign.fsf > ${OUTPUTDIR}/MyDesign.fsf.t ; mv ${OUTPUTDIR}/MyDesign.fsf{.t,}
#
#done
#
# RUN THE .FSF FILE
$FSL_DIR/feat ${OUTPUTDIR}/MyDesign.fsf
# CLEANUP THE OUTPUT DIRECTORIES
# fsl will create an ${OUTPUTDIR}.feat directory anyways,
# so we can delete the old one
rm -rf ${OUTPUTDIR}
| true |
42c935e3b366c5f1ebd76a216f5da037a4a80a38
|
Shell
|
uchile-robotics/bender_core
|
/bender_sensors/install/cameras.sh
|
UTF-8
| 1,021 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/bash
# - - - - - - S E T U P - - - - - - - -
# # # # # # # # # # # # # # # # # # # #
UDEV_DIR="/opt/bender/udev"
RULES_DIR="/etc/udev/rules.d"
# - - - - - - I N S T A L L - - - - - -
# # # # # # # # # # # # # # # # # # # #
# usb_cam fork install
# TODO: unificar como metodo llamado desde bender_system
# (esto se usa en el instalador de bender)
uchile_cd forks
if [ ! -d usb_cam ]; then
echo "Cloning -usb_cam- fork from github."
git clone https://github.com/uchile-robotics/usb_cam.git
cd usb_cam
git checkout master
else
echo "-usb_cam- fork already exists. updating"
cd usb_cam
git checkout -- .
git fetch
git checkout 0.3.4
fi
uchile_cd bender_sensors
# -- udev rules --
echo -e "\n$installer Installing udev rules for Logitech cameras"
# prepare script for udev rules
sudo mkdir -p "$UDEV_DIR"
sudo cp -f install/files/camera.sh "$UDEV_DIR"/camera.sh
# add udev rules
sudo cp -f install/files/10-bender_camera.rules "$RULES_DIR"/10-bender_camera.rules
sudo udevadm control --reload
| true |
53c78093d8256f6161655a229b1b7327dc265602
|
Shell
|
Enstore-org/enstore
|
/sbin/ipmi_restart
|
UTF-8
| 882 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/sh
set -u
if [ "${1:-}" = "-x" ] ; then set -xv; shift; fi
if [ "`whoami`" != "root" ]; then
echo `date` Only root can run $0
exit 1
fi
here=`pwd`
cd /root
# stop the watchdog timer and ipmisrv process
### all lines that begin with 3 pound signs were commented
### out in order to remove any watchdog timer functionality.
### uncomment these lines to get the functionality back.
###echo `date` Stopping watchdog timer and ipmisrv
echo `date` Stopping ipmisrv ### remove me for watchdog support
$IPMI_DIR/dogdisarm
ps axuww|grep ipmisrv| grep -v grep
x=`ps axuww|grep ipmisrv| grep -v grep|awk '{print $2}'`
echo kill $x
kill $x
sleep 3
# Startup the watchdog timer and ipmisrv process.
# The timer will reset the system if 5 minutes passes
# without a reset by deadman with ipmisrv.
$IPMI_DIR/ipmi_startup
ps axuww|grep ipmisrv| grep -v grep
cd $here
| true |
0bc0cc568ef4a43c80c91a459f62758a9e379450
|
Shell
|
jamespharaoh/devbox-tools
|
/devbox-preseed-template
|
UTF-8
| 3,179 | 2.625 | 3 |
[] |
no_license
|
cat <<-EOF
# localisation
d-i debian-installer/locale select C
d-i debian-installer/language string en
d-i debian-installer/country string GB
# keyboard
d-i console-setup/ask_detect boolean false
d-i keyboard-configuration/layoutcode string gb
# network
d-i netcfg/choose_interface select eth0
d-i netcfg/get_hostname string $NAME
d-i netcfg/get_domain string wistla.com
# user account
d-i passwd/root-login boolean false
d-i passwd/make-user bolean true
d-i passwd/user-fullname string Ubuntu User
d-i passwd/username string ubuntu
d-i passwd/user-password password ubuntu
d-i passwd/user-password-again password ubuntu
d-i user-setup/allow-password-weak boolean true
d-i user-setup/encrypt-home boolean false
# timezone
d-i clock-setup/utc boolean true
d-i clock-setup/ntp boolean false
d-i time/zone string Etc/UTC
# partitioning
d-i partman-auto/disk string /dev/sda
d-i partman-auto/method string regular
d-i partman-auto/expert_recipe string \\
boot-root :: \\
500 10000 1000000000 ext4 \\
\$primary{ } \\
\$bootable{ } \\
method{ format } \\
format{ } \\
use_filesystem{ } \\
filesystem{ ext4 } \\
mountpoint{ / } \\
.
d-i partman-partitioning/confirm_write_new_label boolean true
d-i partman-basicfilesystems/default_label string mbr
d-i partman/choose_partition select finish
d-i partman-basicfilesystems/no_swap boolean false
d-i partman/confirm_nooverwrite boolean true
d-i partman/confirm boolean true
# base system
d-i base-installer/install-recommends boolean false
d-i base-installer/kernel/image string $KERNEL
# mirror
d-i mirror/http/proxy string
# packages
tasksel tasksel/first multiselect standard
d-i pkgsel/upgrade select full-upgrade
d-i pkgsel/language-packs multiselect en
d-i pkgsel/update-policy select none
popularity-contest popularity-contest/participate boolean false
d-i pkgsel/updatedb boolean true
d-i pkgsel/include string avahi-daemon openssh-server
# grub
d-i grub-installer/skip boolean false
d-i lilo-installer/skip boolean true
d-i grub-installer/only_debian boolean true
d-i grub-installer/with_other_os boolean true
# other
d-i preseed/late_command string \\
clear; \\
\\
echo "Allow sudo without password" >&2; \\
echo "%sudo ALL=(ALL) NOPASSWD: ALL" >>/target/etc/sudoers; \\
\\
echo "Add SSH public key to authorized hosts" >&2; \\
mkdir -p /target/home/ubuntu/.ssh; \\
echo "$(cat "$HOME/.ssh/id_rsa.pub")" \\
>> /target/home/ubuntu/.ssh/authorized_keys; \\
chown 1000:1000 \\
/target/home/ubuntu/.ssh \\
/target/home/ubuntu/.ssh/authorized_keys; \\
\\
echo "Disable apt source package sources" >&2; \\
sed -i '/^deb-src/s/^/#/' /etc/apt/sources.list; \\
\\
echo "Disable sshd reverse dns lookups" >&2; \\
echo "UseDNS no" >> /target/etc/ssh/sshd_config; \\
\\
echo "Synchronize all changes to disk" >&2; \\
sync; \\
\\
echo "Custom preseed confguration complete" >&2; \\
sleep 2;
# finish
d-i finish-install/reboot_in_progress note
d-i cdrom-detect/eject boolean false
d-i debian-installer/exit/poweroff boolean true
EOF
# ex: noet ts=4 filetype=sh
| true |
a5763ca7b3cb4f9b712cc238cde7bcf4bbd63462
|
Shell
|
michaelmusty/.dotfiles
|
/bash/bash_completion.d/pass.bash
|
UTF-8
| 1,631 | 3.921875 | 4 |
[
"Unlicense",
"LicenseRef-scancode-public-domain"
] |
permissive
|
# Load _completion_ignore_case helper function
if ! declare -F _completion_ignore_case >/dev/null ; then
source "$HOME"/.bash_completion.d/_completion_ignore_case.bash
fi
# Custom completion for pass(1), because I don't like the one included with the
# distribution
_pass() {
# Iterate through completions produced by subshell
local ci comp
while IFS= read -d '' -r comp ; do
COMPREPLY[ci++]=$comp
done < <(
# Make globs expand appropriately
shopt -u dotglob
shopt -s nullglob
if _completion_ignore_case ; then
shopt -s nocaseglob
fi
# Set password store path
pass_dir=${PASSWORD_STORE_DIR:-"$HOME"/.password-store}
# Gather the entries
for entry in "$pass_dir"/"$2"*.gpg ; do
entries[ei++]=$entry
done
# Try to iterate into subdirs, use depth search with ** if available
if shopt -s globstar 2>/dev/null ; then
for entry in "$pass_dir"/"$2"*/**/*.gpg ; do
entries[ei++]=$entry
done
else
for entry in "$pass_dir"/"$2"*/*.gpg ; do
entries[ei++]=$entry
done
fi
# Iterate through entries
for entry in "${entries[@]}" ; do
# Skip directories
! [[ -d $entry ]] || continue
# Strip leading path
entry=${entry#"$pass_dir"/}
# Strip .gpg suffix
entry=${entry%.gpg}
# Print shell-quoted entry, null terminated
printf '%q\0' "$entry"
done
)
}
complete -F _pass pass
| true |
d48ff4cc91acb56a239900b393ae980f0665dfb6
|
Shell
|
t-wissmann/dotfiles
|
/menu/rofi-file-open.sh
|
UTF-8
| 806 | 3.828125 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
set -e
usage() {
cat <<EOF
$0 DIR
List files in the directory DIR and its subdirectories in a rofi menu and open
the selected files. When selecting files via <Shift>-Return then the menu is
kept open.
EOF
}
dir="$1"
if [[ -z "$dir" ]] ; then
usage >&2
exit 1
fi
multiselectkey=$(rofi -h \
| grep -m 1 -A1 '[-]kb-accept-alt' \
| tail -n 1 \
| awk '{print $1 ; }')
mesg="Type <i>$multiselectkey</i> to open multiple files."
rofiflags=(
-dmenu
-multi-select
-i
-p 'open'
-mesg "$mesg"
)
find -L "$dir" -type f -printf '%P\n' \
| grep -v '~$\|/\.\|^\.\|.swp$' \
| sort \
| rofi "${rofiflags[@]}" \
| while read line ; do
echo "$line"
xdg-open "$dir/$line" 2> /dev/null 1> /dev/null &
done
# vim: tw=80
| true |
8c61d159370e988fe19a2a39230be9f1f48d3af6
|
Shell
|
coralnexus/corl-bootstrap
|
/os/ubuntu/01_git.sh
|
UTF-8
| 760 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/bash
#-------------------------------------------------------------------------------
# Install Git.
echo "1. Ensuring Git"
apt-get -y install git >/tmp/git.install.log 2>&1 || exit 10
# Make sure it is easy to communicate with repo hosts
echo "2. Adding GitHub to root known hosts"
mkdir -p "/root/.ssh" || exit 11
touch "/root/.ssh/known_hosts" || exit 12
ssh-keygen -R github.com >/dev/null 2>&1 || exit 13 # No duplicates
ssh-keyscan -H github.com >> "/root/.ssh/known_hosts" 2>/dev/null || exit 14
echo "3. Adding default Git configurations"
if [ -z "`git config --global user.name`" ]
then
git config --global user.name "CoralNexus Machine"
fi
if [ -z "`git config --global user.email`" ]
then
git config --global user.email "admin@coralnexus.com"
fi
| true |
a66085a9d0570cc6811899d3fb69238cadbd4035
|
Shell
|
enr/dockerfiles
|
/sdkman/docker-entrypoint.sh
|
UTF-8
| 203 | 3.125 | 3 |
[] |
no_license
|
#!/bin/bash
set -eo pipefail
sdkman_init="${SDKMAN_DIR}/bin/sdkman-init.sh"
[[ -r "$sdkman_init" ]] || {
echo "NOT FOUND ${sdkman_init}"
}
[[ -r "$sdkman_init" ]] && source "$sdkman_init"
exec "$@"
| true |
86549ebb0681e2e355e195ff45d42ac461f28214
|
Shell
|
yafraorg/yafra
|
/org.yafra.sysadm/defaults/scripts/sa_ishpux11
|
UTF-8
| 5,770 | 2.875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/sh
#------------------------------------------------------------------------------
# (c) yafra.org
#
# function: install a client (hp 9000 series 700) system (IS)
#
# autor: Administrator (mw)
#
# date: 18.10.94
#
# !! USE cvs for check in and check out and lock !!!!
#
#------------------------------------------------------------------------------
#
# last modified:
# 27.02.96 mw convertet to HP-UX 10.01
# 02.02.98 mw added symbolic link from /home/remote/hpux10 to /usr/remote
# 22.08.01 mw updated
#
#------------------------------------------------------------------------------
IS_LOGFILE=/var/adm/psinstallsystem.log
IS_MASTERDIR=/home/sysadm/hp/master11
IS_SYSTEMNAME=$(hostname)
IS_SYSTEM=$(uname -s)
IS_HW=$(uname -m | cut -c6)
IS_OSVER=$(uname -r | cut -c3-4)
install_must() {
echo "\nStartup scripts/files\n" >> $IS_LOGFILE
IS_COPYFILE=/etc/rc.config.d/psvariables
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/sbin/init.d/psinit
IS_SPECIALCMD="/usr/bin/ln -s /sbin/init.d/psinit /sbin/rc3.d/S900psinit"
copyfile
/usr/bin/ln -s /sbin/init.d/psinit /sbin/rc2.d/K110psinit
IS_COPYFILE=/sbin/sa_psbootsystem
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/sbin/sa_pshostentry
IS_SPECIALCMD=
copyfile
echo "\nInstalling root user setup files\n" >> $IS_LOGFILE
IS_COPYFILE=/.forward
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/.rhosts
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/.profile
IS_SPECIALCMD=
copyfile
echo "\nInstalling hosts related files\n" >> $IS_LOGFILE
IS_COPYFILE=/etc/hosts
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/hosts.equiv
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/X0.hosts
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/resolv.conf
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/libsocks5.conf
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/nsswitch.conf
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/syslog.conf
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/networks
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/ftpusers
IS_SPECIALCMD=
copyfile
#
# !! ATTENTION only on series 700 it's possible to copy passwd file
# I will make a check with uname -m later here
#
# USE vi to add the passwd.add and group.add to insert into the
# system original passwd and group - make a copy of the
# original first cp passwd passwd.orig !! mw 22.8.2001
# TO USE the LDAP HP-UX client ask mw
#
# echo "\nInstalling user related files\n" >> $IS_LOGFILE
IS_COPYFILE=/etc/passwd.add
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/group.add
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/logingroup
IS_SPECIALCMD=
copyfile
echo "\nInstalling system profile related files\n" >> $IS_LOGFILE
# set PATH right here with check ! cat $IS_MASTERDIR/etc/PATH >> /etc/PATH
IS_COPYFILE=/etc/profile
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/profile.ps
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/profile.psobj
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/profile.psproj
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/profile.addons
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/issue
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/copyright
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/crontab.root
IS_SPECIALCMD=
copyfile
IS_COPYFILE=/etc/shells
IS_SPECIALCMD=
copyfile
}
mail() {
echo "\nInstalling mailing / mail route related files\n" >> $IS_LOGFILE
IS_COPYFILE=/etc/mail/aliases
IS_SPECIALCMD=/usr/sbin/newaliases
copyfile
IS_COPYFILE=/etc/mail/sendmail.cf
IS_SPECIALCMD=/usr/sbin/freeze
copyfile
}
#
# logging
#
loginit() {
echo "\nStart of PISO installsystem script" >> $IS_LOGFILE
echo "\nMastersource: $IS_MASTERDIR" >> $IS_LOGFILE
echo "\nHost is: `/bin/hostname` at date: `/bin/date`\n" >> $IS_LOGFILE
}
logexit() {
echo "\nEnd of PISO installsystem script at: `/bin/date`" >> $IS_LOGFILE
}
#
# first make a backup of the original
# then copy master files
#
# INPUT: IS_COPYFILE file to copy (absolut)
# IS_SPECIALCMD system command to execute AFTER copy
#
copyfile()
{
if [ -f $IS_MASTERDIR$IS_COPYFILE ]
then
echo "$IS_MASTERDIR$IS_COPYFILE -> $IS_COPYFILE"
echo "$IS_MASTERDIR$IS_COPYFILE -> $IS_COPYFILE" >> $IS_LOGFILE
cp $IS_COPYFILE $IS_COPYFILE.org
cp $IS_MASTERDIR$IS_COPYFILE $IS_COPYFILE
if [ -n "$IS_SPECIALCMD" ]
then
echo "exec special cmd $IS_SPECIALCMD"
echo "exec special cmd $IS_SPECIALCMD" >> $IS_LOGFILE
$IS_SPECIALCMD
fi
fi
}
# copy files with * or ? expressions
#
# INPUT: IS_COPYFILE file to copy (absolut)
# IS_COPYDEST where to copy to (absolut)
# IS_SPECIALCMD system command to execute AFTER copy
copyfiles()
{
echo "$IS_MASTERDIR$IS_COPYFILE -> $IS_COPYDEST"
echo "$IS_MASTERDIR$IS_COPYFILE -> $IS_COPYDEST" >> $IS_LOGFILE
cp $IS_MASTERDIR$IS_COPYFILE $IS_COPYDEST
if [ -n "$IS_SPECIALCMD" ]
then
echo "exec special cmd $IS_SPECIALCMD"
echo "exec special cmd $IS_SPECIALCMD" >> $IS_LOGFILE
$IS_SPECIALCMD
fi
}
#
# main section
#
if [ `id -u` -ne 0 ]
then
echo "You must be root to run this script";
exit -1;
fi
if [ "$IS_SYSTEM" != "HP-UX" ]
then
echo "This script is only valid for HP-UX";
exit -1;
fi
if [ $IS_HW -ne 7 ]
then
echo "This script is only valid for 700 series";
exit -1;
fi
if [ $IS_OSVER -ne 11 ]
then
echo "This script is only valid for release 11.x";
exit -1;
fi
# make logging
loginit
# begin installing
install_must
# make directories and links
mkdir /work
mkdir /work/yafra-runtime
mkdir /work/yafra-runtime/libs
mkdir /work/yafra-runtime/bin
mkdir /work/yafra-runtime/obj
mkdir /work/yafra-runtime/obj/mapo
mkdir /work/yafra-runtime/obj/libs
mkdir /work/yafra-runtime/obj/addons
mkdir /work/yafra-runtime/obj/testsuite
#ln -s /home/remote/hpux10 /usr/remote
#ln -s /usr/remote/man /usr/local/man
# end logging
logexit
exit 0
| true |
5064eb9ca941b59085537d3c0fbe7143f36df938
|
Shell
|
piandpower/docker-UnrealEngine4
|
/entrypoint.sh
|
UTF-8
| 1,537 | 3.640625 | 4 |
[] |
no_license
|
#!/bin/bash
# abort if UnrealEngine source code not found
sudo test ! -f ${_UNREAL_DATA_DIR}/Setup.sh \
&& echo "UnrealEngine source code not found in ${_UNREAL_DATA_DIR}" \
&& exit 1
# build if not already built
sudo test ! -f ${_UNREAL_DATA_DIR}/Engine/Binaries/Linux/UE4Editor \
&& echo "Building... takes a while" \
&& sudo chown -R ${_USER}:${_USER} ${_UNREAL_DATA_DIR} \
&& cd ${_UNREAL_DATA_DIR} \
&& ./Setup.sh \
&& ./GenerateProjectFiles.sh \
&& make \
&& make BlankProgram \
&& make SlateViewer \
&& make UE4Client \
&& make UE4Game \
&& make UE4Server \
&& echo "Finished building"
# initialize user home directory, if not already
sudo test ! -f ${_UNREAL_HOME_DIR}/.bashrc \
&& sudo chown -R ${_USER}:${_USER} ${_UNREAL_HOME_DIR} \
&& cp /etc/skel/.bashrc ${_UNREAL_HOME_DIR}/.bashrc \
&& cp /etc/skel/.profile ${_UNREAL_HOME_DIR}/.profile \
&& echo "Initialized home directory"
# install plugin: vim/emacs editor
cd ${_UNREAL_DATA_DIR}/Engine/Plugins/Developer \
&& test ! -d SensibleEditorSourceCodeAccess \
&& git clone https://github.com/fire/SensibleEditorSourceCodeAccess \
&& cd - \
&& mono Engine/Binaries/DotNET/UnrealBuildTool.exe Linux Development UE4Editor -module SensibleEditorSourceCodeAccess
# enter the binaries directory
cd ${_UNREAL_DATA_DIR}/Engine/Binaries/Linux
# start a shell by default because UE4Editor launched here fails to start projects
if [[ -z ${1} ]]
then
exec /bin/bash
else
exec ${@}
fi
| true |
7871a304fd920039e4de731473b586832ef4e045
|
Shell
|
windanchaos/MK-CI
|
/V1.1/build.sh
|
UTF-8
| 2,931 | 3.609375 | 4 |
[] |
no_license
|
#!/bin/bash
#设置环境变量
source /etc/profile
source /home/mkstar/mkci/repository.sh
##代码编写遵守<Defensive BASH Programming>博客描述的以下原则
######## Immutable global variables
######## Everything is local
######## Everything is a function
######## Debugging functions(bash -x)(set -x …… set +x)
######## Code clarity
######## Each line does just one thing
##变量命名规则:局部变量小写,下划线区分单词;全局变量大写,下滑线分单词
##Default working directory is account's home path like /home/user/, you can define function 'get_work_home_path' in repository.sh to change it
##默认功作目录是登录账户根目录,如需定制,变更get_user_home_path函数的返回值
##time
TODAY=`date +%Y-%m-%d`
##############################################################################
### 取项目名
##############################################################################
function get_webent_name(){
local webent=${1}
local filter=`echo ${webent}|awk -F '-' '{print $1}'`
echo ${webent}|awk -F ${filter}'-' '{print $2}'|awk -F '-webent' '{print $1}'
}
##############################################################################
### 取项目名
##############################################################################
function get_jar_name(){
local webent=${1}
echo "${webent#*-}"
}
##############################################################################
### 构建项目函数,传入webent名称和profile,需cd到对应目录
### build mk-wm-webent st-https
##############################################################################
function build(){
local webent=${1}
local profile=${2}
cd ${GIT_JAVA}${webent}
if [[ -e "pom.xml" ]]
then
if [[ -n ${profile} ]]
then
echo "Building path with profile ${profile} "
echo `pwd`
mvn -q -ff clean install -P ${profile}
else
echo "Building path without profile"
echo `pwd`
mvn -q -ff clean install
fi
else
echo "make sure `pwd` is a maven project"
exit 0
fi
}
##############################################################################
### 打包,传入webent名称
##############################################################################
function package(){
local webent=${1}
cd ${GIT_JAVA}${webent}
mvn package
}
##############################################################################
### build_agg 依赖库编译,mk独有,不具有通用性
##############################################################################
function build_agg(){
local aggregator=${1}
build ${aggregator}
}
##############################################################################
### 打包到阿里云,将包上传到阿里云
##############################################################################
function package_to_ali(){
echo "do nothing"
}
| true |
84d70f0f859fe9eef42a53a3ce830d3c75331252
|
Shell
|
leticiaSaraiva/scripts-programming
|
/atividades/atividade05/maiorDe3Verificado.sh
|
UTF-8
| 656 | 3.25 | 3 |
[] |
no_license
|
#!/bin/bash
# Correção: 1,0
numero1=${1}
numero2=${2}
numero3=${3}
if ! expr ${numero1} + 1 &> /dev/null
then
echo "Opa! ${numero1} não é um número."
elif ! expr ${numero2} + 1 &> /dev/null
then
echo "Opa! ${numero2} não é um número."
elif ! expr ${numero3} + 1 &> /dev/null
then
echo "Opa! ${numero3} não é um número."
else
if [ ${numero1} -ge ${numero2} -a ${numero1} -ge ${numero3} ] &> /dev/null
then
echo ${numero1}
elif [ ${numero2} -ge ${numero1} -a ${numero2} -ge ${numero3} ] &> /dev/null
then
echo ${numero2}
elif [ ${numero3} -ge ${numero1} -a ${numero3} -ge ${numero2} ] &> /dev/null
then
echo ${numero3}
fi
fi
| true |
f90e1c62738a77a54bab19693c47403c18e4115b
|
Shell
|
Abang81/x250
|
/USB.command
|
UTF-8
| 8,182 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/sh
# This script will automatically download the needed kexts and create a
# directory in the process
# Explain that Xcode must be installed
clear
echo "\n================================================================================\n"
echo " (!) Xcode must be installed to continue. Answer the following questions"
echo " accordingly to ensure successful downloads and folder creation."
echo "\n================================================================================\n"
# Ask if user has Xcode installed
read -r -p "---> Do you have Xcode installed? <--- " response
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]
then
echo "\n================================================================================\n"
continue
else
echo "\n================================================================================\n"
echo " (!) Sign in with your apple ID. If you do not have a developer account, you"
echo " will need to create one to download and install the latest Xcode."
echo "\n================================================================================\n"
sleep 5
echo " (!) Opening apple downloads page. Do not use the Mac App Store as iCloud"
echo " has not been fixed yet. After downloading and installing, continue."
echo " *** Must Unzip, and move to the applications folder ***"
echo "\n================================================================================\n"
sleep 5
open https://developer.apple.com/download/more/
fi
# Asking user if they have opened and accepted terms
read -r -p "---> Has Xcode been opened and the terms accepted? <--- " response
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]
then
echo "\n================================================================================\n"
continue
else
echo "\n================================================================================\n"
echo " (!) You must open Xcode and accept the terms and conditions."
echo "\n================================================================================\n"
sleep 3
echo # Blank line
echo " (i) Opening Xcode, continue after accepting the terms."
echo "\n================================================================================\n"
open -a Xcode
sleep 3
fi
# Asking user if they have already installed the command line Tools
read -r -p "---> Have you installed the Xcode comand line Tools? <--- " response
echo # Move to a new line
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]
then
echo "\n================================================================================\n"
continue
else
echo "\n================================================================================\n"
echo " (!) You must install Xcode command line tools."
echo "\n================================================================================\n"
sleep 3
echo " (i) Opening Installer for command line toos, continue after installation is"
echo " complete."
echo "\n================================================================================\n"
sleep 3
echo # Blank line
xcode-select --install
fi
read -r -p "Press enter to begin downloads. "
echo "\n================================================================================\n"
echo "----------------------Making USB Folder!----------------------"
echo "\n================================================================================\n"
cd ~/Desktop
mkdir -v USB
echo "\n================================================================================\n"
echo "----------------------Donwloading USB Files!----------------------"
echo "\n================================================================================\n"
cd ~/Desktop/USB
# curl --progress-bar -L -o Cloverv24kr4061.zip https://downloads.sourceforge.net/project/cloverefiboot/Installer/Clover_v2.4k_r4061.zip?r=https%3A%2F%2Fsourceforge.net%2Fprojects%2Fcloverefiboot%2F&ts=1493489376&use_mirror=pilotfiber
# Updated 12/28/17
# curl --progress-bar -L -o Cloverv24kr4359.zip https://downloads.sourceforge.net/project/cloverefiboot/Installer/Clover_v2.4k_r4359.zip?r=https%3A%2F%2Fsourceforge.net%2Fprojects%2Fcloverefiboot%2F&ts=1514337371&use_mirror=astuteinternet
# Updated to RehabMans branch 1/1/2018
# curl --progress-bar -L -O https://bitbucket.org/RehabMan/clover/downloads/Clover_v2.4k_r4359.RM-4506.7036cf0a.zip
# Updated to RehabMans branch 5/27/2018
curl --progress-bar -L -O https://bitbucket.org/RehabMan/clover/downloads/Clover_v2.4k_r4444.RM-4652.c1f8602f.zip
# curl --progress-bar -L -O https://bitbucket.org/RehabMan/os-x-fakesmc-kozlek/downloads/RehabMan-FakeSMC-2017-0414.zip
# Updated 12/28/17
# curl --progress-bar -L -O https://bitbucket.org/RehabMan/os-x-fakesmc-kozlek/downloads/RehabMan-FakeSMC-2017-1017.zip
# Updated 5/27/2018
curl --progress-bar -L -O https://bitbucket.org/RehabMan/os-x-fakesmc-kozlek/downloads/RehabMan-FakeSMC-2018-0403.zip
# curl --progress-bar -L -O https://bitbucket.org/RehabMan/os-x-intel-network/downloads/RehabMan-IntelMausiEthernet-v2-2017-0321.zip
# Updated 12/28/17
# curl --progress-bar -L -O https://bitbucket.org/RehabMan/os-x-intel-network/downloads/RehabMan-IntelMausiEthernet-v2-2017-0914.zip
# Updated 5/27/2018
curl --progress-bar -L -O https://bitbucket.org/RehabMan/os-x-intel-network/downloads/RehabMan-IntelMausiEthernet-v2-2018-0424.zip
curl --progress-bar -L -O https://github.com/JrCs/CloverGrowerPro/raw/master/Files/HFSPlus/X64/HFSPlus.efi
curl --progress-bar -O https://raw.githubusercontent.com/Limitless1Studio/x250/master/installconfig.plist
cd ~/downloads
# curl --progress-bar -L -o OS-X-Voodoo-PS2-Controller-master.zip https://github.com/tluck/OS-X-Voodoo-PS2-Controller/archive/master.zip
curl --progress-bar -L -o OS-X-Voodoo-PS2-Controller-master.zip https://github.com/tluck/OS-X-Voodoo-PS2-Controller/archive/3b5d68a4b6dc2afb478b0232aaa5849b12b49b82.zip
sleep 5
echo "\n================================================================================\n"
echo "----------------------Unzipping Files!----------------------"
echo "\n================================================================================\n"
cd ~/Desktop/USB
# unzip Cloverv24kr4061.zip
# unzip Cloverv24kr4359.zip
unzip -q Clover_v2.4k_r4444.RM-4652.c1f8602f.zip
# unzip -q RehabMan-FakeSMC-2017-0414.zip
unzip -q RehabMan-FakeSMC-2018-0403.zip
#unzip -q RehabMan-IntelMausiEthernet-v2-2017-0321.zip
unzip -q RehabMan-IntelMausiEthernet-v2-2018-0424.zip
cd ~/downloads
unzip -q OS-X-Voodoo-PS2-Controller-master.zip
cd ~/downloads/OS-X-Voodoo-PS2-Controller-3b5d68a4b6dc2afb478b0232aaa5849b12b49b82
sudo make --silent
echo "\n================================================================================\n"
echo "----------------------Cleaning up USB Folder!----------------------"
echo "\n================================================================================\n"
cd ~/Desktop/USB
# rm -v -f Clover_v2.4k_r4061.pkg.md5
# rm -v -f Clover_v2.4k_r4359.pkg.md5
rm -v -f Clover_v2.4k_r4444.RM-4652.c1f8602f.pkg.md5
# rm -v -f Cloverv24kr4061.zip
# rm -v -f Cloverv24kr4359.zip
rm -v -f Clover_v2.4k_r4444.RM-4652.c1f8602f.zip
mv -v ~/desktop/USB/Release/IntelMausiEthernet.kext ~/desktop/USB
sudo rm -v -r FakeSMC_ACPISensors.kext
sudo rm -v -r FakeSMC_CPUSensors.kext
sudo rm -v -r FakeSMC_GPUSensors.kext
sudo rm -v -r FakeSMC_LPCSensors.kext
sudo rm -v -r Debug
sudo rm -v -r HWMonitor.app
sudo rm -v -r __MACOSX
# rm -v -f RehabMan-FakeSMC-2017-0414.zip
rm -v -f RehabMan-FakeSMC-2018-0403.zip
# rm -v -f RehabMan-IntelMausiEthernet-v2-2017-0321.zip
rm -v -f RehabMan-IntelMausiEthernet-v2-2018-0424.zip
sudo rm -v -r Release
cd ~/downloads/OS-X-Voodoo-PS2-Controller-3b5d68a4b6dc2afb478b0232aaa5849b12b49b82/build/products/Release
sudo mv -v ~/downloads/OS-X-Voodoo-PS2-Controller-3b5d68a4b6dc2afb478b0232aaa5849b12b49b82/build/products/Release/VoodooPS2Controller.kext ~/desktop/USB
cd ~/downloads
sudo rm -v -r OS-X-Voodoo-PS2-Controller-3b5d68a4b6dc2afb478b0232aaa5849b12b49b82
sudo rm -v -f OS-X-Voodoo-PS2-Controller-master.zip
exit 0
| true |
fcd1725d2d6b384c272f0e0b211ca3e8c899d3b5
|
Shell
|
DiegoRincon/Spring2016
|
/WSEHW1/src/runRetriever.sh
|
UTF-8
| 178 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash
if [ "$#" -lt 2 ]
then
echo "Usage: <indexerDir> <query>"
exit 1
fi
indexerDir="$1"
shift
java -cp .:../Lucene/*:../HTMLParser/* Retriever "$indexerDir" "$@"
| true |
f27e470f447bb33de8b204385ad0944bdcb555b1
|
Shell
|
Allegra42/zircon
|
/scripts/build-zircon
|
UTF-8
| 1,493 | 3.734375 | 4 |
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2018 The Fuchsia Authors
#
# Use of this source code is governed by a MIT-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/MIT
function HELP {
echo "help:"
echo "-a <arch> : arm64, or x64"
echo "-A : use ASan build"
echo "-C : use Clang build"
echo "-l : use ThinLTO build"
echo "-L : use LTO build"
echo "-q : build quietly"
echo "-v : build verbosely"
echo "-r : build release build"
echo "-d : build unoptimized, with full debug symbols"
echo "-h for help"
echo "all arguments after -- are passed to make directly"
exit 1
}
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ARGS=
ARCH=
QUIET=0
while getopts a:ACdhlLqrv FLAG; do
case $FLAG in
a) ARCH=${OPTARG};;
A) ARGS+=" USE_ASAN=true USE_CLANG=true";;
C) ARGS+=" USE_CLANG=true";;
d) ARGS+=" DEBUG_HARD=1";;
l) ARGS+=" USE_THINLTO=true";;
L) ARGS+=" USE_LTO=true";;
q) ARGS+=" QUIET=1";;
r) ARGS+=" DEBUG=0";;
v) ARGS+=" NOECHO=";;
h) HELP ;;
\?)
echo unrecognized option
HELP
esac
done
shift $((OPTIND-1))
if [[ ! ${ARCH} ]]; then
echo no arch specified!
HELP
fi
exec ${DIR}/make-parallel ${ARCH} ${ARGS} "$@"
| true |
e1aef761941e2247c97b6b679dae24eb59e00f56
|
Shell
|
Xiddler/PORTABLE_ENV
|
/dotfiles/scripts/.scripts/get_defs.sh
|
UTF-8
| 1,444 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/bash
# Get definitions from OED --> $HOME/REPOS/WORDS/Dictionaries/Oxford_English_Dictionary/oxford_dict.txt
# Edit 2022-10-17
# Alternative to dict (ie dictd from AUR)
# usage $def <word_sought> where def is aliased in .zshrc as: alias def='~/.scripts/get_defs.sh '
string=$1
#origingal
# first=`echo $string|cut -c1|tr [a-z] [A-Z]`
# my method
first=`echo "${string:0:1}" | tr '[:lower:]' '[:upper:]'`
second=`echo $string|cut -c2-`
myword=$first$second
echo $myword
# the following line works so keep it safe
# cat /media/donagh/3520-FD13/1donaghs-stuff/Dictionaries/Oxford_English_Dictionary/oxford_dict.txt | grep `echo $myword`
cat $HOME/REPOS/WORDS/Dictionaries/Oxford_English_Dictionary/oxford_dict.txt | grep `echo $myword`
# dict='$HOME/REPOS/WORDS/Dictionaries/Oxford_English_Dictionary/oxford_dict.txt'
# cat $dict | grep `echo $myword`
# line=`cat /media/donagh/3520-FD13/1donaghs-stuff/Dictionaries/Oxford_English_Dictionary/oxford_dict.txt`
# echo $line | grep `'^Film\s'`
# echo $line | grep $myword
#
#
#
#
#
#
#
#
#
#
#
#
# the following will spit out the definitions of all words in a file it has a def for from the oxford_dict.txt file.
# usage ./get_defs <filename>
# while IFS='' read -r line || [[ -n "$line" ]]; do
# while IFS='' read line || [[ -n "$line" ]]; do
# cat /media/donagh/3520-FD13/1donaghs-stuff/Dictionaries/Oxford_English_Dictionary/oxford_dict.txt | grep ^$line
# done < "$1"
# #
#
| true |
e568ea16fc8cbbe2a9781c75a1e8b1d0b8e04469
|
Shell
|
openbsd/src
|
/regress/sys/ffs/tests/rename/01.t
|
UTF-8
| 476 | 2.671875 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# $FreeBSD: src/tools/regression/fstest/tests/rename/01.t,v 1.1 2007/01/17 01:42:10 pjd Exp $
desc="rename returns ENAMETOOLONG if a component of either pathname exceeded 255 characters"
n0=`namegen`
expect 0 create ${name255} 0644
expect 0 rename ${name255} ${n0}
expect 0 rename ${n0} ${name255}
expect 0 unlink ${name255}
expect 0 create ${n0} 0644
expect ENAMETOOLONG rename ${n0} ${name256}
expect 0 unlink ${n0}
expect ENAMETOOLONG rename ${name256} ${n0}
| true |
38794408b1b1db1bab8350519a501a5b534954ab
|
Shell
|
paridhishr/rabbithub
|
/test/regressionTest2/subq_contactall.sh
|
UTF-8
| 495 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
## create a subscriber to a queue with contact information
## $1 - vhost
## $2 - queue name
## $3 - topic
## $4 - callback url
## $5 - rabbitmq user
## $6 - rabbitmq password
## $7 - application name
## $8 - contact name
## $9 - phone
## $10 - email
## $11 - description
curl -vd "hub.mode=subscribe&hub.callback=$4&hub.topic=$3&hub.verify=sync&hub.app_name=$7&hub.contact_name=$8&hub.phone=$9&hub.email=${10}&hub.description=${11}" http://$5:$6@localhost:15670/$1/subscribe/q/$2
| true |
73c7085fe39c4ba1cb570ba33b95f487effc3fae
|
Shell
|
xusl/android-wilhelm
|
/apps_proc/kernel-tests/cpu/cpuhotplug_test.sh
|
UTF-8
| 4,508 | 3.4375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#-----------------------------------------------------------------------------
# Copyright (c) 2011 QUALCOMM Incorporated.
# All Rights Reserved. QUALCOMM Proprietary and Confidential.
#-----------------------------------------------------------------------------
. $TEST_ENV_SETUP
CPUHOTPLUG_DIR="/sys/devices/system/cpu"
SYS_PM_8x60="/sys/module/pm_8x60/"
SLEEP_MODE_NODE_STD_8x60_1="modes/cpu1/standalone_power_collapse/suspend_enabled"
get_num_cpu(){
num_cpu=`ls $CPUHOTPLUG_DIR | grep "cpu[0-9]" | wc -l`
}
# Function flip_value
# Parameters:
# 1) cpu index
# 2) new online value
# return 0 if sucessfully change online to new online value
# - otherwise, return 1
flip_value(){
echo $2 > "$CPUHOTPLUG_DIR/cpu$i/online"
if [ `cat $CPUHOTPLUG_DIR/cpu$1/online` -ne $2 ]; then
echo "flip online value for cpu$i failed"
return 1
fi
return 0
}
# Function test_cpuinfo
# Parameters:
# 1) cpu index
# 2) online value
# return 0 on success otherwise return 1
test_cpuinfo(){
cpu_info=`cat /sys/devices/system/cpu/offline | grep $1`
if [ $2 -eq 0 ]
then
if [ "$cpu_info" = "" ];then
echo "ERROR: cpu$1 not present in /sys/devices/system/cpu/offline"
return 1
fi
else
if [ "$cpu_info" != "" ];then
echo "ERROR: cpu$1 present in /sys/devices/system/cpu/offline"
return 1
fi
fi
return 0
}
# Function test_interrupts
# Parameters:
# 1) cpu index
# 2) online value
# return 0 on success otherwise return 1
test_interrupts(){
interrupt_col=$(( 2 + $1 ))
if [ $verbosity -gt 0 ];then
echo "interrput_col is $interrupt_col"
fi
if [ $2 -eq 0 ];then
cpu_match=`cat /proc/interrupts | awk '/'CPU$1'/ {print}'`
if [ "$cpu_match" != "" ];then
echo "ERROR: cpu$1 is not offline"
return 1
fi
else
cpu_irs_1=`cat /proc/interrupts | awk '{print $'$interrupt_col'}'`
sleep 5
cpu_irs_2=`cat /proc/interrupts | awk '{print $'$interrupt_col'}'`
if [ "$cpu_irs_1" = "$cpu_irs_2" ];then
echo "ERROR: cpu$1 is not receiving irq when it is online"
return 1
fi
fi
return 0
}
do_test(){
get_num_cpu
if [ $verbosity -gt 0 ];then
echo "num_cpu is $num_cpu"
fi
if [ $num_cpu -le 1 ];then
echo "ERROR: Test only supported on SMP system"
return 1
fi
num_cpu_test=$(($num_cpu - 1))
if [ -e $SYS_PM_8x60/$SLEEP_MODE_NODE_STD_8x60_1 ]; then
# need to enable standalone power collapse for cpuhotplug to work
echo "enable standalone PC: $SYS_PM_8x60/$SLEEP_MODE_NODE_STD_8x60_1"
echo 1 > $SYS_PM_8x60/$SLEEP_MODE_NODE_STD_8x60_1
fi
for i in $(seq 1 $num_cpu_test)
do
old_online=`cat $CPUHOTPLUG_DIR/cpu$i/online`
new_online=$(( ! $old_online ))
if [ $verbosity -gt 0 ];then
echo "old online is $old_online"
echo "new online is $new_online"
fi
flip_value $i $new_online
if [ $? -ne 0 ]; then
return 1
fi
test_cpuinfo $i $new_online
if [ $? -ne 0 ]; then
#flip online value back
flip_value $i $old_online
return 1
fi
test_interrupts $i $new_online
if [ $? -ne 0 ]; then
#flip online value back
flip_value $i $old_online
return 1
fi
#flip online value back
flip_value $i $old_online
if [ $? -ne 0 ]; then
return 1
fi
test_cpuinfo $i $old_online
if [ $? -ne 0 ]; then
return 1
fi
test_interrupts $i $old_online
if [ $? -ne 0 ]; then
return 1
fi
done
return 0
}
# Begin script execution here
nominal_test=0
repeatability_test=0
verbosity=0
while [ $# -gt 0 ]
do
case $1 in
-n | --nominal)
nominal_test=1
shift 1
;;
-r | --repeatability)
repeatability_test=1
shift 1
;;
-v | --verbosity)
verbosity=$2
shift 2
;;
-h | --help | *)
echo "Usage: $0 [-n] [-s] [-v <verbosity>]"
exit 1
;;
esac
done
if ! [ -d $CPUHOTPLUG_DIR ]; then
echo "ERROR: $CPUHOTPLUG_DIR is not a directory"
exit 1
fi
if [ $nominal_test -eq 0 -a $repeatability_test -eq 0 ]; then
nominal_test=1
fi
#do nominal test
if [ $nominal_test -eq 1 ];then
if [ $verbosity -gt 0 ];then
echo "=== Running Nominal Test ==="
fi
do_test
if [ $? -eq 0 ];then
echo "Nominal Test Passed"
else
echo "Nominal Test Failed"
exit 1
fi
fi
#do repeatability test
if [ $repeatability_test -eq 1 ];then
if [ $verbosity -gt 0 ];then
echo "=== Running Repeatability Test ==="
fi
for i in $(seq 1 20)
do
do_test
if [ $? -ne 0 ];then
echo "Repeatability Test Failed"
exit 1
fi
done
echo "Repeatability Test Passed"
fi
| true |
45f8b04deed82627f6f6bc9e329d6f78d710be34
|
Shell
|
jjvein/git-script
|
/git-needcommit
|
UTF-8
| 208 | 3.015625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
#这里必须使用"" 将命令包围
if [ -z "$(git status --porcelain)" ];
then
echo "IT IS CLEAN"
else
echo "PLEASE COMMIT YOUR CHANGE FIRST!!!"
git status --porcelain
fi
| true |
acb6b4ff929606f1d20b77226a2bded58ae81b96
|
Shell
|
sonro/dotfiles
|
/bin/setup
|
UTF-8
| 1,558 | 3.921875 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
set -e
HELP="\
DOTFILE SETUP
=============
sonro <sonro@gmx.com>
Setup software and configurations for development on *nix
USAGE: setup [OPTIONS]
OPTIONS:
-h, --help Prints help information
-s, --server Setup without node and neovim (for a more minimal install)
-n, --new Replace existing config files
If any combination of these flags are set: only those functions will run
-f, --files Setup files
-i, --install Install software
-p, --plugins Install plugins
"
# Config
DOTFILE_DIR="$HOME/.dotfiles"
# Options
FRESH=false
SERVER=false
EXPLICIT=false
FILES=false
INSTALL=false
PLUGINS=false
main() {
parse_args "$@"
if [[ "$EXPLICIT" == false || "$FILES" == true ]]; then
source "$DOTFILE_DIR/setup-src/setupfiles.bash"
setup_files "$SERVER" "$DOTFILE_DIR" "$FRESH"
fi
if [[ "$EXPLICIT" == false || "$INSTALL" == true ]]; then
source "$DOTFILE_DIR/setup-src/installsoftware.bash"
install_software "$SERVER" "$DOTFILE_DIR" "$FRESH"
fi
if [[ "$EXPLICIT" == false || "$PLUGINS" == true ]]; then
source "$DOTFILE_DIR/setup-src/installplugins.bash"
install_plugins "$SERVER" "$DOTFILE_DIR" "$FRESH"
fi
return 0
}
parse_args() {
for opt in "$@"; do
case "$opt" in
-n|--new) FRESH=true;;
-s|--server) SERVER=true;;
-h|--help)
echo "$HELP"
exit 0
;;
-f|--files)
EXPLICIT=true
FILES=true
;;
-i|--install)
EXPLICIT=true
INSTALL=true
;;
-p|--plugins)
EXPLICIT=true
PLUGINS=true
;;
esac
done
return 0
}
main "$@"
| true |
6e26365121420bcb1d83ed88549828fa6730315c
|
Shell
|
aalhour/kaos
|
/scripts/apply_destroy.sh
|
UTF-8
| 800 | 3.71875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
infrastructure=$1
cd ${infrastructure}
function tf_apply_destroy() {
if [[ ! -f done ]]; then
terraform init
if [[ $# -eq 1 ]]; then
terraform workspace select "$1"
fi
terraform apply --auto-approve
terraform destroy --auto-approve
rm -r .terraform/
touch done
fi
}
function apply_destroy_environments() {
for env in "dev" "prod" "stage"
do
pushd "$env"
tf_apply_destroy "$env"
popd
done
}
for cloud in "aws" "azure" "gcp" "local"
do
env_dir="$cloud/envs"
if [[ -d "$env_dir" ]]; then
pushd "$env_dir"
else
pushd "$cloud"
tf_apply_destroy
popd
break
fi
apply_destroy_environments
popd
done
| true |
b8117b6a791f649a89219ab0c634b6fa11c682e3
|
Shell
|
francescacairoli/HPC
|
/ex1/strong_one_iter.sh
|
UTF-8
| 314 | 2.59375 | 3 |
[] |
no_license
|
module load openmpi
echo "Strong Scalability: base size is $1"
#for procs in 1 2 4 8 16 20; do
for ((procs=1;procs<=20;procs++)); do
echo "Niter_per_CPU =$(($1/${procs})), np=${procs}"
time mpirun -np ${procs} ./parallel_pi $(($1/${procs}))
echo "------------------------------------------------"
done
| true |
e6759f06a6a75f5d191f8c9674ef1e337b72296e
|
Shell
|
scottford-io/core-plans
|
/lynx/plan.sh
|
UTF-8
| 716 | 2.671875 | 3 |
[
"Apache-2.0"
] |
permissive
|
pkg_origin=core
pkg_name=lynx
pkg_version=2.8.8
pkg_description="Lynx is the text web browser."
pkg_upstream_url=http://lynx.browser.org/
pkg_license=("GPL-2.0")
pkg_maintainer="The Habitat Maintainers <humans@habitat.sh>"
pkg_source="http://invisible-mirror.net/archives/$pkg_name/tarballs/$pkg_name${pkg_version}rel.2.tar.gz"
pkg_shasum=234c9dc77d4c4594ad6216d7df4d49eae3019a3880e602f39721b35b97fbc408
pkg_dirname="$pkg_name${pkg_version//./-}"
pkg_deps=(
core/glibc
core/ncurses
)
pkg_build_deps=(
core/gcc
core/make
core/patch
)
pkg_bin_dirs=(bin)
do_prepare() {
# http://lists.gnu.org/archive/html/bug-ncurses/2017-03/msg00009.html
patch -p1 < "${PLAN_CONTEXT}/ncurses.patch"
}
do_check() {
make test
}
| true |
069ea2f70a9c0c3579ddfb95e14a873981f76687
|
Shell
|
botfapnews/newscoop-botfap
|
/setup.sh
|
UTF-8
| 1,309 | 4.03125 | 4 |
[] |
no_license
|
#!/bin/bash
#
function sanity_check {
if [ `cat /etc/os-release | grep VERSION_ID | cut -d "\"" -f 2` != "16.04" ]; then echo "You must be running Ubuntu 16.04 LTS" && exit 0; fi
if [ `whoami` != "root" ]; then echo "You must run me as root or with sudo" && exit 0; fi
if [ `basedir $0` != "/var/www" ]; then echo "You must copy or move this folder to /var/www and run me from there" && exit 0; fi
}
function help {
echo "Newscoop Legacy setup.sh usage:"
echo ""
echo "setup.sh base|system|theme|plugin|help [subcommand]"
echo " base all|os|apache|newscoop|clean"
echo " system update|backup|restore|backup-data|backup-site|reestore-data|restore-site"
echo " theme install|remove [theme-name]"
echo " plugin install|remove [plugin-name]"
echo " help (this message)"
}
function base {
case "$UARG" in
all)
scripts/clean-prep.sh
scripts/os-prep.sh
scripts/apache-prep.sh
scripts/newscoop-prep.sh
;;
os)
scripts/os-prep.sh
;;
apache)
scripts/apache-prep.sh
;;
newscoop)
scripts/newscoop-prep.sh
;;
clean)
scripts/clean-prep.sh
;;
*)
help
;;
esac
}
# Main
sanity_check
UARG=$2A
UBASE="/var/www"
case "$1" in
base)
base
;;
system)
echo "TODO"
;;
theme)
echo "TODO"
;;
plugin)
echo "TODO"
;;
help|*)
help
;;
esac
| true |
ca01c69d1b8dfca157ebfbdba034c908115c482a
|
Shell
|
KatyBrown/CIAlign
|
/benchmarking/BAliBase/run_alignment.sh
|
UTF-8
| 618 | 2.796875 | 3 |
[
"MIT"
] |
permissive
|
for i in {1..10}
do
for file in reference\_set\_$i/input/*;
do
stem=$(echo $file | rev | cut -d "/" -f1 | cut -d "." -f2 | rev)
clustalo --auto -i reference\_set\_$i/input/$stem.fasta -o reference\_set\_$i/clustal/$stem\_auto.fasta
mafft --localpair --maxiterate 100 reference\_set\_$i/input/$stem.fasta > reference\_set\_$i/mafft/$stem\_local\_max100.fasta
mafft --maxiterate 100 reference\_set\_$i/input/$stem.fasta > reference\_set\_$i/mafft/$stem\_global\_max100.fasta
muscle -maxiters 100 -in reference\_set\_$i/input/$stem.fasta > reference\_set\_$i/muscle/$stem\_max100.fasta
done
done
| true |
42a1b402f7dbf2a280dd7d5a0f3ffd6be4a39564
|
Shell
|
iodar/linux-scripts
|
/linux-helpers/array-split.sh
|
UTF-8
| 167 | 3.625 | 4 |
[] |
no_license
|
#!/bin/bash
CSV=$1
[ -z "${CSV}" ] && echo "please provide an args array" && exit 1
CSV_AS_ARRAY=${1//,/ }
for element in $CSV_AS_ARRAY; do
echo $element
done
| true |
c6d13608699f873dbdc5afb326bcb76d09c8dda9
|
Shell
|
kunalpict/recipes-ui-angular2-jspm
|
/bin/jenkins-publish.sh
|
UTF-8
| 797 | 3.9375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Script used to publish the project in Jenkins
#
(
set -e
get_value() {
sed -n -e "s/.*\"$1\".*\"\(.*\)\".*/\1/p" package.json
}
# Ensure we are in the workspace
cd "$WORKSPACE"
__DIR__="${BASH_SOURCE%/*}"
# shellcheck disable=SC1091
# shellcheck source=bin/setup-node.sh
source "$__DIR__/_setup-node.sh"
# Publish the pacakge if necessary
PACKAGE_NAME=$(get_value name)
PACKAGE_VERSION=$(get_value version)
REGISTRY_VERSION=$(npm show "$PACKAGE_NAME" version) || true
echo "Registry version: $REGISTRY_VERSION"
echo "Package version: $PACKAGE_VERSION"
if [ "$PACKAGE_VERSION" != "$REGISTRY_VERSION" ]
then
echo "Publishing $PACKAGE_NAME v$PACKAGE_VERSION …"
npm publish ./dist --loglevel warn
else
echo "$PACKAGE_NAME v$PACKAGE_VERSION is already published."
fi
)
| true |
a449b4717ca1fd9be8f13eee88702dbb8568cebc
|
Shell
|
joeytwiddle/jsh
|
/code/shellscript/shio/readbytes.sh
|
UTF-8
| 495 | 3.265625 | 3 |
[] |
no_license
|
TO_READ="$1"
# NUM_READ=` ddshowbytes bs="$TO_READ" count=1 `
while true
do
if [ "$TO_READ" = 0 ]
then exit 0
fi
jshinfo "[readbytes] requesting $TO_READ bytes"
DDLOGFILE=/tmp/$$.ddlog
dd bs="$TO_READ" count=1 2> "$DDLOGFILE"
NUM_READ=` tail -n 1 "$DDLOGFILE" | sed 's+ bytes .*++' `
rm -f "$DDLOGFILE"
jshinfo "[readbytes] received $NUM_READ bytes"
if [ "$NUM_READ" = 0 ]
then exit 1
fi
TO_READ=$((TO_READ-NUM_READ))
# jshinfo "[readbytes] still reading $TO_READ"
done
| true |
90188b703b6e117450fddb450c10bb14870d3a86
|
Shell
|
spencertipping/ni
|
/dev/bench-startup
|
UTF-8
| 377 | 2.671875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
ni < /dev/null > /dev/null
./nfu < /dev/null > /dev/null
date > /dev/null
ta=$(date +%s%N)
for i in `seq 100`; do
ni < /dev/null > /dev/null
done
tb=$(date +%s%N)
for i in `seq 100`; do
./nfu < /dev/null > /dev/null
done
tc=$(date +%s%N)
echo -e "ni_startup\t$(( (tb - ta) / 1000000 / 100 ))\tms"
echo -e "nfu_startup\t$(( (tc - tb) / 1000000 / 100 ))\tms"
| true |
be1731e496f304c8c570f53738a980024d3b8d90
|
Shell
|
tomduhourq/dotfiles
|
/.zshrc
|
UTF-8
| 3,810 | 2.640625 | 3 |
[] |
no_license
|
# Path to your oh-my-zsh installation.
## CHANGE THIS
export ZSH=/home/tomas/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="robbyrussell"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git)
# User configuration
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games"
# export MANPATH="/usr/local/man:$MANPATH"
source $ZSH/oh-my-zsh.sh
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
# export SSH_KEY_PATH="~/.ssh/dsa_id"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
#theFuck alias
alias fuck='$(thefuck $(fc -ln -1))'
#JAVA
#export JAVA_HOME=/usr/lib/jvm/java-8-oracle
export JAVA_HOME=/usr/lib/jvm/java-7-oracle
export SCALA_HOME=/usr/lib/scala
#Hadoop stuff
export HADOOP_INSTALL=/usr/local/hadoop
export HADOOP_COMMON_HOME=$HADOOP_INSTALL
export HADOOP_HDFS_HOME=$HADOOP_INSTALL
export HADOOP_MAPRED_HOME=$HADOOP_INSTALL
export HADOOP_YARN_HOME=$HADOOP_INSTALL
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_INSTALL/lib/native
export HADOOP_OPTS="-Djava.library.path=$HADOOP_INSTALL/lib"
export HADOOP_CONF_DIR=$HADOOP_COMMON_HOME/etc/hadoop
export YARN_CONF_DIR=$HADOOP_COMMON_HOME/etc/hadoop
#Activator
export ACTIVATOR_HOME=/usr/lib/activator
#Spark
export SPARK_HOME=/home/tomas/Dev/Spark
#Cassandra
export CASSANDRA_HOME=/home/tomas/Dev/apache-cassandra-2.1.4
#Pycharm
export PYCHARM=/home/tomas/Dev/IDES/pycharm-community-4.0.5
#IntelliJ IDEA
export INTELLIJ=/home/tomas/Dev/IDES/idea-IC-139.224.1
#PATH
export PATH=$PATH:$JAVA_HOME/bin:$SCALA_HOME/bin:$HADOOP_INSTALL/bin:$HADOOP_INSTALL/sbin:$ACTIVATOR_HOME:$PYCHARM/bin:$INTELLIJ/bin:$SPARK_HOME/bin:$CASSANDRA_HOME/bin
| true |
f71bb23838f5588c6851eaac86e27030a3be85f2
|
Shell
|
HPCNow/cbgp-training
|
/hpcnowxx/slurm2.sl
|
UTF-8
| 620 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/bash
##### These lines are for Slurm
#SBATCH -t 5:00
#SBATCH -N 1
#SBATCH --cpus-per-task=4
#SBATCH -p main
#SBATCH -o output.omp
#SBATCH -J omp
##### These are shell commands
echo -n 'This machine is '; hostname
echo -n 'My jobid is '; echo $SLURM_JOBID
echo 'My path is:'
echo $PATH
echo ' '
echo 'Compiling...'
ml GCC/8.2.0-2.31.1
export OMP_NUM_THREADS=4
gcc -o omp_hello -fopenmp /data/training/omp_hello.c
echo ' '
echo 'Starting job with the following nodes:'
squeue|grep $SLURM_JOBID
echo ' '
echo '--------------------- Job Output Below ---------------------'
./omp_hello
sleep 30
echo ' '
echo 'Done'
| true |
457dc5b7617e71c2a980debd0ca24ce149624ac8
|
Shell
|
fabiim/pagerduty-client
|
/travis-ci/deploy.sh
|
UTF-8
| 919 | 3.09375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -ev
# This script will be used by Travis CI and will deploy the project to maven, making sure to use the sign and
# build-extras profiles and any settings in our settings file.
openssl aes-256-cbc -pass pass:$PEM_ENCRYPTION_PASSWORD -in travis-ci/keys/github.com_rsa.pem.enc -out ~/.ssh/github.com_rsa.pem -d
eval "$(ssh-agent -s)"
chmod 600 ~/.ssh/github.com_rsa.pem
ssh-add ~/.ssh/github.com_rsa.pem
ssh-add -l
git config --global user.email $GITHUB_EMAIL
git config --global user.name $GITHUB_USERNAME
if [ "$TRAVIS_BRANCH" = "master" ];
then
mvn --batch-mode release:clean release:prepare || { echo $0: mvn failed; exit 1; }
mvn release:perform --settings travis-ci/settings.xml || { echo $0: mvn failed; exit 1; }
else if [ "$TRAVIS_PULL_REQUEST" == "true" ];
then
mvn --batch-mode release:clean release:prepare release:stage || { echo $0: mvn failed; exit 1; }
fi
fi
| true |
6a6ac92d065648f46cc72dceab80f8a9a6b0de09
|
Shell
|
kenyonj/dotfiles-1
|
/install.sh
|
UTF-8
| 1,347 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/bash
exec > >(tee -i $HOME/dotfiles_install.log)
exec 2>&1
set -x
# Install curl, tar, git, other dependencies if missing
PACKAGES_NEEDED="\
silversearcher-ag \
bat \
fuse \
libfuse2"
if ! dpkg -s ${PACKAGES_NEEDED} > /dev/null 2>&1; then
if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then
sudo apt-get update
fi
sudo apt-get -y -q install ${PACKAGES_NEEDED}
fi
# sudo apt-get --assume-yes install silversearcher-ag bat fuse
# install latest neovim
sudo modprobe fuse
sudo groupadd fuse
sudo usermod -a -G fuse "$(whoami)"
# wget https://github.com/neovim/neovim/releases/download/v0.5.1/nvim.appimage
wget https://github.com/github/copilot.vim/releases/download/neovim-nightlies/appimage.zip
unzip appimage.zip
sudo chmod u+x nvim.appimage
sudo mv nvim.appimage /usr/local/bin/nvim
ln -s $(pwd)/tmux.conf $HOME/.tmux.conf
ln -s $(pwd)/vimrc $HOME/.vimrc
ln -s $(pwd)/vim $HOME/.vim
ln -s $(pwd)/emacs $HOME/.emacs
ln -s $(pwd)/screenrc $HOME/.screenrc
rm -f $HOME/.zshrc
ln -s $(pwd)/zshrc $HOME/.zshrc
ln -s $(pwd)/bash_profile $HOME/.bash_profile
rm -rf $HOME/.config
mkdir $HOME/.config
ln -s "$(pwd)/config/nvim" "$HOME/.config/nvim"
nvim +'PlugInstall --sync' +qa
vim -Es -u $HOME/.vimrc -c "PlugInstall | qa"
sudo chsh -s "$(which zsh)" "$(whoami)"
| true |
c1835b0659701fa6a90cb4d0aeb900d8bbde4e06
|
Shell
|
jhby1/react-bootstrap-typeahead
|
/scripts/build.sh
|
UTF-8
| 770 | 3.25 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
# Delete existing dist & lib files
npm run clean
# Compile SCSS file to development and prod CSS files
for file in scss/Typeahead*; do
filename=$(basename "$file")
extension="${filename##*.}"
filename="${filename%.*}"
./node_modules/node-sass/bin/node-sass ${file} css/${filename}.css \
--output-style expanded
./node_modules/node-sass/bin/node-sass ${file} css/${filename}.min.css \
--output-style compressed
done;
# Build minified standalone version in dist
./node_modules/.bin/webpack --mode development
./node_modules/.bin/webpack --mode production
# Build ES5 modules to lib
./node_modules/.bin/babel src --out-dir lib
# Build minified example file
./node_modules/.bin/webpack --config example/webpack.config.js --mode production
| true |
1605c832cb8204b5bd3e6948508581ca27bbf08e
|
Shell
|
nu-art/dev-tools
|
/scripts/dev/android-oo/classes/AndroidDeviceBridge.class.sh
|
UTF-8
| 671 | 3.875 | 4 |
[] |
no_license
|
#!/bin/bash
AndroidDeviceBridge() {
declare -a devices
_isDeviceRegistered() {
local serial=${1}
for device in ${devices[@]}; do
[[ "$("${device}.serial")" == "${serial}" ]] && echo true && return
done
}
_detectDevices() {
local output=$(adb devices | grep -E "[A-Za-z0-9.:].*" )
while IFS= read -r deviceLine; do
local deviceRef=device${#devices[@]}
lcoal serial="$(echo "${deviceLine}" | sed -E "s/(.*) .*/\1/")"
[[ $(this.isDeviceRegistered "${serial}") ]] && continue
new AndroidDevice "${deviceRef}"
"${deviceRef}.serial" = "${serial}"
devices+=("${deviceRef}")
done <<< "$output"
}
}
| true |
ddc78fcaeaf8effecdef10aaf63baa2430da450a
|
Shell
|
pherms/i3wm_Desktop
|
/cputemp.sh-old
|
UTF-8
| 659 | 2.59375 | 3 |
[] |
no_license
|
#!/bin/bash
degrees=$(exec sensors | grep -oP 'Package.*?\+\K[0-9]+')
#echo $degrees
if (("$degrees" <= "10" ))
then
echo " $(exec sensors | grep -oP 'Package.*?\+\K[0-9]+') C"
elif (("$degrees" > "11" )) && (("$degrees" <= "25" ))
then
echo " $(exec sensors | grep -oP 'Package.*?\+\K[0-9]+') C"
elif (("$degrees" > "26" )) && (("$degrees" <= "50" ))
then
echo " $(exec sensors | grep -oP 'Package.*?\+\K[0-9]+') C"
elif (("$degrees" > "51" )) && (( "$degrees" <= "85" ))
then
echo " $(exec sensors | grep -oP 'Package.*?\+\K[0-9]+') C"
elif (("$degrees" > "86" ))
then
echo " $(exec sensors | grep -oP 'Package.*?\+\K[0-9]+') C"
fi
| true |
7d0306d26468988a2c5235f8df101c8caae9f2b9
|
Shell
|
dp12/dotfiles
|
/tmux/tmux_gdb_helper.sh
|
UTF-8
| 402 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/env bash
target=$(cat "$@")
if [[ "$target" =~ ^[a-fA-F0-9]+$ ]]; then
target="0x${target}"
elif [[ "$target" =~ ^[a-z]+$ ]]; then
target="\$${target}"
fi
cmd="$(cat ~/.tmux_gdb_cmd)"
if [[ $cmd == *'*' ]]; then
# for breakpoints, don't add space after the asterisk, e.g. b *0xdeadbeef
send_str="${cmd}${target}"
else
send_str="${cmd} ${target}"
fi
tmux send-keys "$send_str" Enter
| true |
52909e4c6a1b339b3887c5ff36860b9d466b3683
|
Shell
|
pletzer/mint
|
/scripts/fluxA.sh
|
UTF-8
| 2,048 | 3.015625 | 3 |
[
"0BSD"
] |
permissive
|
# compute flux
# function determining the path
nline=2
xlinefunc="320.*t + 25.0"
ylinefunc="170.*t - 85.0"
# write to VTK file
python writeLineToVTK.py "$nline" "$xlinefunc" "$ylinefunc" lineA.vtk
# stream function
streamfunc="sin(2*pi*x/180. - 0.3)**2 + cos(2*pi*y/180. - 0.2)**2"
x0expr=$(echo $xlinefunc | perl -ne "s/t/0./g;print;")
y0expr=$(echo $ylinefunc | perl -ne "s/t/0./g;print;")
x1expr=$(echo $xlinefunc | perl -ne "s/t/1./g;print;")
y1expr=$(echo $ylinefunc | perl -ne "s/t/1./g;print;")
x0=$(python -c "from math import *; print '{:3.18f}'.format($x0expr)")
y0=$(python -c "from math import *; print '{:3.18f}'.format($y0expr)")
x1=$(python -c "from math import *; print '{:3.18f}'.format($x1expr)")
y1=$(python -c "from math import *; print '{:3.18f}'.format($y1expr)")
# exact flux
s0=$(python -c "from math import *; x = $x0; y = $y0; print '{:3.18f}'.format($streamfunc)")
s1=$(python -c "from math import *; x = $x1; y = $y1; print '{:3.18f}'.format($streamfunc)")
exact_flux=$(python -c "print '{:3.18f}'.format($s1 - $s0)")
echo "Exact flux = $exact_flux"
fluxes="["
num_cells="["
errors="["
relerrors="["
for n in 4 8 16 32 64 128 256 512 1024 2048; do
echo "n = $n"
rm -rf rs.txt
../build/tools/flux -i cs_${n}.vtk \
-nline "$nline" -xline "$xlinefunc" -yline "$ylinefunc" \
>& res.txt
ncells=$(python get_num_cells.py "res.txt")
flux=$(python get_flux.py "res.txt")
err=$(python -c "print '{:3.18f}'.format(abs($flux - $exact_flux))")
relerr=$(python -c "print '{:3.18f}'.format($err/abs($exact_flux))")
echo "Flux error: $err"
num_cells="$num_cells $ncells,"
fluxes="$fluxes $flux,"
errors="$errors $err,"
relerrors="$relerrors $relerr,"
done
num_cells="$num_cells ]"
fluxes="$fluxes ]"
errors="$errors ]"
relerrors="$relerrors ]"
#
echo "num_cells = $num_cells"
echo "fluxes = $fluxes"
echo "errors = $errors"
echo "rel_errors = $relerrors"
# plot
python plot.py "$num_cells" "$relerrors"
| true |
d032c32e2efd8786fde69ca0d87fec0cd205c8f4
|
Shell
|
aswen/scripts
|
/backup-with-input
|
UTF-8
| 2,696 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/bash
# backup-homedir-harm`
# makes a backup of some dirs to network
# Copyright (C) 2014-2015 Alexander Swen <alex@swen.nu>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# starten kan met konsole --notoolbar --notabbar --nomenubar -T BACKUP --vt_sz 157x55 -e /data/scripts/backup-to-usb-disk
# of gnome-terminal --window-with-profile=HIER --hide-menubar --geometry=112x50 -t "homedir naar server" --working-directory="/home/alex" -e '/data/scripts/backup-to-usb-disk'
# Alexander Swen
# Private contact: alex@swen.nu
# CHANGELOG:
# 2009-05-27 A.Swen created.
# SETTINGS
date=$(date +%Y%m%d)
me=$(basename $0)
mydir=$(dirname $0)
rsyncopts="-av --no-perms --no-owner --no-group --exclude=.~lock* --exclude=~/.config/libreoffice/*/user/backup --exclude=lost+found --exclude=.cache --exclude=.thumbnails --delete --size-only"
# FUNCTIONS
die () {
rc=$1
shift
printf '%s\n' "=====================" >&2
printf '%s\n' "==== FATAL ERROR ====" >&2
printf '%s\n\n' "=====================" >&2
printf '%s\n\n' "$@" >&2
printf '%s\n' "Er is iets mis!"
printf '%s\n' "Bel Alex en vertel hem de error die hierboven staat."
printf '%s\n' "Druk op een enter om dit scherm te sluiten"
read whatever
exit $rc
}
get_options () {
[ $# -gt 0 ]||usage
while getopts "s:d:" opt;do
case ${opt} in
n) export dryrun='-n' ;;
s) export source="`echo $OPTARG`" ;;
d) export destination="`echo $OPTARG`" ;;
*) usage;;
esac
done
unset OPTIND
}
log () { printf '%s %s\n' "$(date +%F' '%T)" "$@"; }
# SCRIPT
get_options $@
[ -n "$source" ] || dir="/home/$LOGNAME/DOCUMENTEN"
[ -n "$destination" ] || destination=/netwerk/home-$LOGNAME
log "Backing up $source naar $destination."
/usr/bin/rsync $rsyncopts $dryrun "$source" "$destination"
log " "
log "alles wat je hierboven ziet is gebackupped (plus alles wat al was gebackupped natuurlijk)"
log "(er staan ook voor elke dir samenvattingen, als er geen files genoemd worden is er kennelijk"
log "niets veranderd. niets om je zorgen over te maken dus)."
log " "
log "druk op een enter om dit scherm te sluiten"
read whatever
# END
| true |
d7a8335817638347a288264f7d30b338a77f042d
|
Shell
|
didrocks/otto
|
/lxc.defaults/guest/usr/local/bin/check-installed
|
UTF-8
| 2,318 | 4.125 | 4 |
[] |
no_license
|
#!/bin/bash
# This script verifies that only the packages passed in argument will be
# upgraded or installed
# Copyright (C) 2013 Canonical
#
# Authors: Jean-Baptiste Lallement <jean-baptiste.lallement@canonical.com>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
usage() {
# Prints usage and exit
cat<<EOF
Usage: $(basename $0) package [package...]
This script checks that only the packages passed in argument wil be installed
or upgraded
package: Binary package name
EOF
exit 1
}
check_install() {
# Simulates an installation and return 1 if other packages than those
# passed in argument are installed
#
# $@: list of packages
simulate="$(apt-get install --simulate $@ 2>&1 )"
rc=$?
echo "Output of: apt-get install --simulate $@"
# Tail removes the notice displayed because we don't run as root
echo "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"
apt-get install --simulate -qq -o Debug::pkgDepCache::AutoInstall=true $@|tail -n+5
echo "- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"
if [ $rc -gt 0 ]; then
echo "E: apt-get install --simulate failed with non-zero exit status."
exit 1
fi
inst="$( echo "$simulate" |awk '/Inst / {print $2}'|sort )"
input="$( echo $@|tr ' ' '\n' | sort)"
pkgs=$(diff -u <(echo "$input" ) <(echo "$inst" )|grep -E '^\+[[:alnum:]]')
if [ -z "$pkgs" ]; then
exit 0
else
echo "E: The following additional packages will be installed:"
echo "$pkgs"
echo
exit 1
fi
}
echo "I: Checking packages requirements"
# Do not fail when no argument is provided
if [ $# -eq 0 ]; then
echo "W: No package provided, exiting!"
exit 0
fi
check_install $@
| true |
65bb70892f176a2904df0f8f1bae96b5863fd848
|
Shell
|
HPCNow/SubmitScripts
|
/GridEngine/nwchem-ompi-iqtc04.sub
|
UTF-8
| 1,402 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/bash
##########################################
# SGE options and parameters
##########################################
# (1) Name of the job
#$ -N test_nwchem_iqtc04
# (2) Requested resources
# Parallel Environment and number of cores
#$ -pe omp* 4
# Queue
#$ -q iqtc04.q
# Shell
#$ -S /bin/bash
# (3) Output files
#$ -cwd
#$ -o iqtc04-4.out
#$ -e iqtc04-4.err
# (4) Remove the first '#' of the following 2 lines if you want to receive an email when the job ends.
##$ -m e
##$ -M yourmail@ub.edu
##########################################
# User environment.
##########################################
# Load the modules needed
. /etc/profile.d/modules.sh
module load nwchem/5.1.1_ics-11.1.072_ompi-1.4.2
INPUT=input_m5_2x_Rqm3.nw
output=$INPUT.log
##########################################
# Copying files needed
##########################################
# We copy the inputs to the directory where the jobs will run
cd $TMPDIR
cp -r $HOME/bench/NWCHEM/$INPUT .
##########################################
# Run the job
##########################################
export OMP_NUM_THREADS=1
echo "INICI"
date
mpirun -np $NSLOTS nwchem $INPUT > $output
echo "FI"
date
##########################################
# Copy the results to our home directory
##########################################
mkdir -p $HOME/bench/NWCHEM/OUT_iqtc04
cp -r $TMPDIR $HOME/bench/NWCHEM/OUT_iqtc04
| true |
9b72ed049802b8820de53b4f12778a7d3fdbe4d2
|
Shell
|
swjtk/hardLinks
|
/Lb1.sh
|
UTF-8
| 214 | 3.28125 | 3 |
[] |
no_license
|
#!/bin/bash
TMP='/tmp/errorfile'
mkdir -p $2
>$TMP
direction=$(realpath $2)
find "$3" -type f -name "*.$1" -exec ln {} $direction 2>$TMP ';'
while read LINE
do
echo "$(basename $0): $LINE">&2
done<$TMP
<$TMP
| true |
4907097238f2325fd56c37a3ca871f051b00a1fb
|
Shell
|
mariano78/nettemp
|
/modules/sensors/1wire/1wire_serial_scan
|
UTF-8
| 975 | 3.421875 | 3 |
[
"MIT"
] |
permissive
|
#! /bin/bash
dir=$( cd "$( dirname "$0" )" && cd ../../../ && pwd )
date=`date +%y%m%d-%H%M`
# DS9097 - serial
if [ ! -e $dir/tmp/.digitemprcs ]; then
dev=$(sqlite3 -cmd ".timeout 2000" $dir/dbf/nettemp.db "SELECT dev FROM usb WHERE device='1wire Serial'"|sed 's/\/dev\///g')
if [[ "$dev" == "none" ]]; then
for i in 0 1 2 3 4
do
if [ `ls /dev/ttyS$i 2> /dev/null` ]; then
/usr/bin/digitemp_DS9097 -i -c $dir/tmp/.digitemprcs -s/dev/ttyS$i &> /dev/null
fi
done
else
/usr/bin/digitemp_DS9097 -i -c $dir/tmp/.digitemprcs -s/dev/$dev &> /dev/null
fi
if [ -e $dir/tmp/.digitemprcs ]; then
sqlite3 -cmd ".timeout 2000" $dir/dbf/nettemp.db "UPDATE device SET serial='DS9097' WHERE id='1'"
echo Serial: DS9097 found
echo "$date temp_dev_scan - Discovered DS9097 on serial port" >> $dir/tmp/log.txt
else
sqlite3 -cmd ".timeout 2000" $dir/dbf/nettemp.db "UPDATE device SET serial='off'"
fi
fi
| true |
ff02e4bae8602c713d6822a6eb8ee49809d2cb9f
|
Shell
|
mariusv/Personal
|
/bash/ip_camera_capture.sh
|
UTF-8
| 1,573 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/sh -
#Primitive IP Camera Capture Script
#Axis 210a Camera
#Use a Cron Job To Control
#Tested under FreeBSD
ROT=$(date "+%b%d%y%H%M")
CAPTOOL=/usr/local/bin/mencoder
CAP_OPT1="-prefer-ipv4 -fps 6 -demuxer lavf"
CAP_OPT2="-nosound -oac mp3lame -ovc xvid -xvidencopts pass=1 -o"
ADDIES="cam1 cam3 cam4 cam5" # IP must be in hosts
STORE=/camera
ISTORE=/str/backup
LOGS=/var/log
DSPACE=200000
USED=`df -hm $STORE | awk '{print $1}'`
CAM_USED=`du -ms $STORE | awk '{print $1}'`
CAM_MAX=200000
STR_USED=`du -ms $ISTORE | awk '{print $1}'`
STR_MAX=200000
unset SUDO_COMMAND
export MKISOFS=/usr/local/bin/mkisofs
BURNSIZE=4196
DEVICE=/dev/cd1
BURNLIST=$(ls $STORE/*.avi)
GROWISOFS=/usr/local/bin/growisofs
MKISOFS=/usr/local/bin/mkisofs
#send this in cron email
echo cam_used $CAM_USED
echo str_used $STR_USED
capcam ()
{
rm ${LOGS}/cam*.log
for X in ${ADDIES} ;do
${CAPTOOL} ${CAP_OPT1} http://${X}/mjpg/video.mjpg ${CAP_OPT2} ${STORE}/${X}.$ROT.avi > ${LOGS}/${X}.log &
done
}
cdir ()
{
for Y in ${BURNLIST} ;do
rm $Y
done
}
killall -9 mencoder
sleep 3
if [ $STR_USED -lt $STR_MAX ]
then
if [ $CAM_USED -lt $BURNSIZE ]
then
capcam
else
if ${GROWISOFS} -dvd-compat -Z ${DEVICE} -J -R ${BURNLIST}
then
cdir
capcam
else
if ${MKISOFS} -o $ISTORE/${ROT}.iso -R ${BURNLIST}
then
cdir
capcam
else
echo System Full
fi
fi
fi
fi
| true |
1ab8bc355eb70599f5878d634fdad87b251782fa
|
Shell
|
Lubell/Scripts_Flow
|
/Recons/Linear_NonLinear_Register
|
UTF-8
| 5,802 | 3.640625 | 4 |
[] |
no_license
|
#!/bin/sh
#Performs linear and NonLinear registration with bioimagesuite3
#Takes patient code, name of recon_folder where images are stored, type of file for
#Reference image (can be MR, CT, MNI), Transform file type (can be MR, CT, MNI), and if you want inverses of the grids and matrices put a letter in the fifth option or leave blank for no.
echo "Help?(y/n): "
read helper
if echo "$helper" | grep -q -i "y"
then
echo "When two images are registered the intent is to create a matrix or congruency file that would potentially allow the user to reslice, that is change, one image so that it has the same coordinates and matches a specified image. The reference image is the one you will pick first, this the static file. The transform image is the dynamic file, which you choose second. This image is then warped and moved until it matches the reference file. The orginal transform image is left as it was, but the steps taken to match the transform image to the reference image are recorded in a matrix/congruency file. This matrix/congruency file is what we then use to make the electrode grid, which was made in the space of the transform image, fit the coordinates or space of the reference image."
echo "..."
echo "..."
echo "If the registration keeps spitting out empty grids, i.e. - all zeros, this means that one of the images is too filled with ambient voxels. Try raising the thresholding of the transform image till an image emerges. Then save the thresholded image and rerun the registration using the thresholded image file."
echo "."
echo "."
echo "."
fi
echo "Enter Patient ID"
read patient
#Takes location from script patient_location
location=`/home/knight/ecog/Scripts/patient_location $patient`
cd /home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/
Lfolder=$(ls -d Recon_*)
PS3='Select which Recon folder contains the files you want to register: '
echo
select Rfolder in $Lfolder
do
rec="$Rfolder"
break
done
cd /home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec
generate_list=$(ls *.nii.*)
PS3='What style of Registration do you want to perform: '
echo
select format in "MR_CT" "Bext_CT" "MNI_Bext"
do
folder="$format"_Reg
if echo "$format" | grep -q -i -e "MR_CT"
then
PS3='Choose MR reference image: '
echo
select MR in $generate_list
do
ref=$MR
break
done
PS3='Choose CT transform image: '
echo
select CT in $generate_list
do
trans=$CT
break
done
elif echo "$format" | grep -q -i -e "Bext_CT"
then
PS3='Choose Bext reference image: '
echo
select Bext in $generate_list
do
ref=$Bext
break
done
PS3='Choose CT transform image: '
echo
select CT in $generate_list
do
trans=$CT
break
done
else
PS3='Choose MNI reference image: '
echo
select MNI in $generate_list
do
ref=$MNI
break
done
PS3='Choose Bext transform image: '
echo
select Bext in $generate_list
do
trans=$Bext
break
done
fi
break
done
echo "Would you like to have inverses made?(yes/no)"
read inverse
#Checks for pre-exsiting Registrations folder
#if echo "$ref" | grep -q -i -e "MR"
#then
#folder=/home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations/MR_CT_Reg
#elif echo "$ref" | grep -q -i -e "Bext"
#then
#folder=/home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations/Bext_CT_Reg
#else
#folder=/home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations/MNI_Bext_Reg
#fi
checker=/home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations/$folder
E_WRONG_ARGS=95
if [[ -e $checker ]]
then
echo "A folder already exists for this registration do you wish to delete its contents and proceed?(y/n): "
read Keypress
if echo "$Keypress" | grep -q -i -e "y"
then
rm -R $checker
mkdir /home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations/$folder
else
echo "Well you better rename $fchecker or move it then..."
echo "Ciao"
echo "...done"
exit $E_WRONG_ARGS
fi
else
mkdir /home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations
mkdir /home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations/$folder
fi
#compute lin Reg
for style in rigid affine similarity affine2d rigid2d similarity2d
do
sh /srv/local/bioimagesuite/bioimagesuite30_64/bin/bis_linearintensityregister --inp $ref --inp2 $trans --out /home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations/$folder/"$format"_Lin_"$style"_xform.matr --mode "$style" --numberoflevels 3 --resolution 1.5 --useinitial 0 --reslimage Resliced --metric NMI --useweightimage 0 --iterations 15 --resolutionrate 2 --autonormalize 1 --optimization default --numberofbins 64 --numberofsteps 1 --stepsize 1.0 --autooptscalefactor 1 --optscalefactor 1.0
done
#Then computes nonlin reg and saves
for style in none rigid affine similarity
do
sh /srv/local/bioimagesuite/bioimagesuite30_64/bin/bis_nonlinearintensityregister --inp $ref --inp2 $trans --out /home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations/$folder/"$format"_NonLin_"$style"_xform.grd --initialmode "$style" --spacing 15.0 --smoothness 0.001 --numberoflevels 3 --resolution 1.5 --useinitial 0 --reslimage Resliced --metric NMI --useweightimage 0 --iterations 15 --resolutionrate 2 --autonormalize 1 --optimization default --numberofbins 64 --spacingrate 2.0 --extralevels 0 --windowsize 1.0 --numberofsteps 1 --stepsize 1.0
done
cat > /home/knight/ecog/DATA_FOLDER/$location/$patient/3D_Images/$rec/IP/Registrations/$folder/Files_Used_In_Reg.txt<<<"The reference file used was $ref and the transformation file used was $trans"
if echo "$inverse" | grep -q -i -e "y"
then
sh /home/knight/lubell/Jamie_Scripts/Scan_Adjustment/inverse $location $patient $rec $ref $folder
fi
echo "...done"
| true |
fc15c1a1a61dba5134fca10cc14d687052646a68
|
Shell
|
mjmunger/sysutils
|
/scripts/run_checklist.sh
|
UTF-8
| 1,099 | 3.1875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
run_checklist() {
update_apt
setup_auto_updates
apt-get install --assume-yes ntp vim
prefer_ipv4
disable_ipv6
}
setup_auto_updates() {
apt-get install --assume-yes unattended-upgrades apt-listchanges
sed -i 's#//Unattended-Upgrade::Mail "root"#Unattended-Upgrade::Mail "root"#g' /etc/apt/apt.conf.d/50unattended-upgrades
sed -i 's#// *"o=Debian,a=stable";# "o=Debian,a=stable";#g' /etc/apt/apt.conf.d/50unattended-upgrades
sed -i 's#// *"o=Debian,a=stable-updates";# "o=Debian,a=stable-updates";#g' /etc/apt/apt.conf.d/50unattended-upgrades
}
prefer_ipv4() {
sed -i 's/^#precedence ::ffff:0:0\/96 100/precedence ::ffff:0:0\/96 100/g' /etc/gai.conf
}
disable_ipv6() {
cat >> /etc/sysctl.conf <<-'EOF'
net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv6.conf.eth0.disable_ipv6 = 1
net.ipv6.conf.eth1.disable_ipv6 = 1
net.ipv6.conf.ppp0.disable_ipv6 = 1
net.ipv6.conf.tun0.disable_ipv6 = 1
EOF
echo "You should reboot to ensure ipv6 is disabled."
}
| true |
a53fe57ee0f6e574585d353c6fefb52dcf69738d
|
Shell
|
RL-code-lib/dapo
|
/tools/auto_func.sh
|
UTF-8
| 2,185 | 3.296875 | 3 |
[] |
no_license
|
#/bin/bash
export DARL_ROOT=$HOME/darl
SCRIPT_ENV=$HOME/darl/bench
function run_exp() {
dir=$1
envname=$2
suite=$3
n_actor=$4
n_slot=$5
local_port=$6
gpu_idx=$7
#
ploss=$8
vloss=$9
reg=${10}
priority=${11}
base_lr=${12}
final_lr=${13}
adv_est=${14}
adv_coef=${15}
adv_off=${16}
reg_coef=${17}
ent_coef=${18}
cbarD=${19}
mix_lambda=${20}
pub_interval=${21}
total_samples=${22}
rnn=${23}
rollout_len=${24}
max_episode=${25}
max_step=${26}
#
if [ "$rnn" = "rnn" ]; then
echo "RNN"
elif [ "$rnn" = "nornn" ]; then
echo "NO RNN"
else
echo "Unknown option '$rnn'"
error
fi
#
echo `date` "Running $dir on $envname"
cd $SCRIPT_ENV
# kill actors first to clean
cd $SCRIPT_ENV
lsof actors_$gpu_idx.log | awk -F" " '{ print $2 }' | xargs kill
sleep 10
# start
$DARL_ROOT/tools/master_run3.sh $serverlist $envname $suite $n_actor $local_ip $local_port $gpu_idx \
> actors_$gpu_idx.log 2>&1 &
mkdir -p $dir/out
cd $dir
rm -f *.log *.err
python3 -m darl.run_learner \
--game $envname \
--suite $suite \
--$rnn \
--rollout_len $rollout_len \
--port $local_port \
--policy_loss $ploss \
--value_loss $vloss \
--reg $reg \
--priority_exponent $priority \
--base_lr $base_lr \
--final_lr $final_lr \
--adv_est $adv_est \
--adv_coef $adv_coef \
--adv_off $adv_off \
--reg_coef $reg_coef \
--ent_coef $ent_coef \
--cbarD $cbarD \
--mix_lambda $mix_lambda \
--n_slot $n_slot \
--gpu_idx $gpu_idx \
--pub_interval $pub_interval \
--max_episode $max_episode \
--max_step $max_step \
--total_samples $total_samples \
--pickle_protocol 3 \
> run.log 2> run.err &
# sleep
sleep $sleep_time
# kill actors first to get scores of unfinished episodes
cd $SCRIPT_ENV
lsof actors_$gpu_idx.log | awk -F" " '{ print $2 }' | xargs kill
sleep 10
# then kill the learner
cd $dir
lsof run.log | awk -F" " '{ print $2 }' | xargs kill
sleep 61 # Related to cat /proc/sys/net/ipv4/tcp_fin_timeout ?
# we occasionally encounter 'Address already in use' error when starting new learner
}
| true |
a187f4b2f23cbe9c8909deb51b1bb100a7daba0c
|
Shell
|
osuka/bank-statement-processor
|
/legacy/clasificador-pdf-firstdirect.sh
|
UTF-8
| 1,803 | 3.953125 | 4 |
[] |
no_license
|
#!/bin/bash
# virtualenv --python=python3 venv
# source venv/bin/activate
# pip install -r requirements.txt
# date usage works on mac os x - need testing on Linux (!)
UNAME="`uname -a`"
if [[ $UNAME =~ *Darwin* ]] ; then
echo "Need to test this on linux - take a look"
exit 1
fi
TMPFILE="/tmp/procesar-tmp.txt"
for DOC in ./fd\ statement\ *.pdf; do
TYPE="error"
DATE="NOTFOUND"
EXTRA=""
if pdf2txt.py -Y loose -A "$DOC" >${TMPFILE} 2>${TMPFILE}-err; then
DATE="`cat ${TMPFILE} | sed -n 's/.*[0-9]* [a-zA-Z]* to \([0-9]*\) \([a-zA-z]*\) \([0-9][0-9][0-9][0-9]\).*/\3.\2.\1/gp' | head -1`"
if [ "$DATE" == "" ]; then
DATE="`cat ${TMPFILE} | sed -n 's/.*[0-9]* [a-zA-Z]* [0-9][0-9][0-9][0-9] to \([0-9]*\) \([a-zA-z]*\) \([0-9][0-9][0-9][0-9]\).*/\3.\2.\1/gp' | head -1`"
fi
# convert from 2018.december.12 to 2018.12.12
DATE="`date -j -f \"%Y.%B.%d\" \"$DATE\" \"+%Y.%m.%d\"`"
# posibles textos conocidos
if grep -s "Your Bonus Savings A/C details" ${TMPFILE} >/dev/null ; then
TYPE="first direct bonus statement"
elif grep -s "Your 1st Account details" ${TMPFILE} >/dev/null ; then
TYPE="first direct statement"
else
TYPE="unknown"
fi
fi
# nuevo nombre
if [ "$TYPE" == "error" ]; then
echo "$DOC: Could not process. Document is encrypted."
elif [ "$DATE" == "NOTFOUND" ]; then
echo "$DOC: Type is $TYPE but date can't be found"
else
if [ "$EXTRA" != "" ]; then
EXTRA=" `echo $EXTRA | sed 's/[^ a-zA-Z0-9-]//g'`"
fi
NAME="$DATE $TYPE$EXTRA firstdirect.pdf"
NAME="`echo $NAME | awk '{print tolower($0)}'`"
if [ -f "$NAME" ]; then
echo "$DOC: Can't copy to $NAME as it already exists"
else
cp "$DOC" "$NAME"
mv "$DOC" "$DOC.processed"
fi
fi
done
| true |
d4807fed8fd2966c545ef2b6c17dd02d3c775476
|
Shell
|
garborg/mydots
|
/dots/.bash_profile
|
UTF-8
| 324 | 3.5625 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# This file only exists to protect against tools that create
# .bash_profile, causing it to be read instead of .profile in some situations.
# (or existence of .bash_login)
# .bashrc checks if shell is interactive and/or bash, and acts accordingly
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
| true |
3c372b1d56585176f625a7e7723dc09226aae46a
|
Shell
|
whart222/pyutilib
|
/pyutilib/component/loader/tests/eggsrc/update
|
UTF-8
| 468 | 2.515625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
#
# A script that is used to update the EGG files with a new version of Python
#
cd pkg1
python setup.py sdist bdist_egg
cd ../pkg2
python setup.py sdist bdist_egg
cd ../pkg3
python setup.py sdist bdist_egg
cd ../pkg4
python setup.py sdist bdist_egg
cd ../pkg5
python setup.py sdist bdist_egg
cd ../pkg6
python setup.py sdist bdist_egg
cd ../pkg7
python setup.py sdist bdist_egg
cd ..
cp pkg[1-2]/dist/P*egg ../eggs1
cp pkg[3-7]/dist/P*egg ../eggs2
| true |
80c391ba2e1c15faccf8a625bc34d2bbbf6559f7
|
Shell
|
ProsAndCons/bin
|
/junk/hex2rgb
|
UTF-8
| 172 | 2.984375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# hex2rgb
hex=${1:-$(< /dev/stdin)}
hex=${hex/#}
((r=16#${hex:1:2},
g=16#${hex:3:2},
b=16#${hex:4:2})) 2>/dev/null
echo "${r-0} ${g-0} ${b-0}"
| true |
7557821af806d14cf62ee9ca9c53a4b08ed10d19
|
Shell
|
redbrick/HelpdeskTalks
|
/IntroToBash/Fibonacci.sh
|
UTF-8
| 273 | 3.640625 | 4 |
[] |
no_license
|
#!/bin/bash
echo "How many numbers do you want of Fibinacci series?"
read total
x=1
y=1
i=2
echo "Fibonacci Series up to $total terms :: "
echo "$x"
echo "$y"
while [ $i -lt $total ]; do
i=`expr $i + 1 `
z=`expr $x + $y `
echo "$z"
x=$y
y=$z
done
| true |
6d61a0c161de73cfe564eb35479de5656c224672
|
Shell
|
DDnie19/Shell_scripts
|
/vm.sh
|
UTF-8
| 778 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
if [ $(ls /opt/ | grep vmware-host-modules) ];
then
sudo rm -rf /opt/vmware-host-modules
echo -e "\033[31m vmware-host-modules is deleted!\033[0m"
echo "Start Download!"
sudo git clone \
-b workstation-$( grep player.product.version /etc/vmware/config | sed '/.*\"\(.*\)\".*/ s//\1/g' ) \
https://github.com/mkubecek/vmware-host-modules.git \
/opt/vmware-host-modules/
echo -e "\033[31m Download Finished!\033[0m"
echo -e "\033[31m Start Make!\033[0m"
cd /opt/vmware-host-modules/
sudo make
sudo make install
echo "\033[31m Make install Done!\033[0m"
elif [ $(ls /opt/ | grep vmware-host-modules)!=vmware-host-modules ]
then
echo -e "\033[31m /opt/vmware-host-modules is Null!\033[0m"
echo -e "\033[31m Some errors,do some things!\033[0m"
fi
| true |
80640d43996926e59e790b6cf138c931782facf3
|
Shell
|
jollywho/Yaya-tan
|
/lib/scripts/eidirpop.sh
|
UTF-8
| 880 | 3.65625 | 4 |
[] |
no_license
|
if [ "$HOSTNAME" == casper ]; then
YFSDIR=${HOME}
elif [ "$HOSTNAME" == balthasar ]; then
YFSDIR=${HOME}/casper
fi
YFSDIR="${YFSDIR}/YFS/ALL"
find "${1}" -iname '*.*' | while read file; do
#if good extension
shopt -s nocasematch
if [[ "${file}" =~ ^.*\.(mkv|avi|mp4|ogm)$ ]]; then
# if eidata supplied
if [ $# -gt 1 ]; then
filename=$(basename "$file")
name="$2"
else
filename=$(basename "$file")
dest=$(echo "$filename" | eidata )
name=$(echo $dest | cut -d " " -f1)
fi
# if a valid name is returned
if [ -n $name ]; then
new_YFSDIR="$YFSDIR"/"$name"
new_file=${new_YFSDIR}/$(basename "${filename}")
mkdir -p "$new_YFSDIR"
#if file doesnt exist (to prevent overwrite)
if [ ! -f "$new_file" ]; then
mv -i "$file" "$new_YFSDIR"
fi
fi
fi
done
#delete empty directories
#find "${1}" -type d -empty -delete
| true |
8d8a38a39cf68feabb960fb3d7935f6fede08c8c
|
Shell
|
imattman/dotfiles
|
/scripts/tolower
|
UTF-8
| 123 | 2.859375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# fail early
#set -euo pipefail
while IFS= read -r line; do
# modern bash
echo "${line,,}"
done
| true |
2671493126ad537d759ca98632bd356e752f5a9b
|
Shell
|
RoHBee/Robee
|
/Robee.sh
|
UTF-8
| 6,207 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
# Edited by:Neutral Me
# Youtube:Neutral Me
youtube='Neutral Me'
bb="\e[1;5m"
nocolor='\033[0m'
red='\033[0;31m'
green='\033[0;32m'
orange='\033[0;33m'
blue='\033[0;34m'
purple='\033[0;35m'
cyan='\033[0;36m'
lightgray='\033[0;37m'
darkgray='\033[1;30m'
lightred='\033[1;31m'
lightgreen='\033[1;32m'
lightblue='\033[1;34m'
lightpurple='\033[1;35m'
lightcyan='\033[1;36m'
white='\033[1;37m'
d="\e[0m"
version="2.1"
function subscribem(){
gio open "https://www.youtube.com/channel/UC3rLWL2mIeRHHvwT9ZrgMmA"
}
function error1(){
logo
echo -e "[${cyan}*${d}] Fixing error may take a bit time....."
sleep 2
echo -e "Which one do you want??"
echo -e "${cyan}[1]${d} Kali-Rolling ${lightblue}*${d}(Default and frequently updated)"
echo -e "${cyan}[2]${d} Kali-Last-Snapshot ${lightblue}*${d}(Stable and safest) [${green}RECOMMENDED${d}]"
echo -e "${cyan}[3]${d} Kali-Experimental ${lightblue}*${d}(Under testing Packages)"
echo -e "${cyan}[4]${d} Other ${lightblue}*${d}(Additional tools)"
echo -e "${cyan}[5]${d} All ${lightblue}*${d}(May cause slower downloads)"
echo -e "${cyan}[6]${d} Back"
read pname
if [[ "${pname}" = "1" ]];then
echo "deb http://http.kali.org/kali kali-rolling main non-free contrib" | sudo tee /etc/apt/sources.list
elif [[ "${pname}" = "2" ]];then
echo "deb http://http.kali.org/kali kali-last-snapshot main non-free contrib" | sudo tee /etc/apt/sources.list
elif [[ "${pname}" = "3" ]];then
echo "deb http://http.kali.org/kali kali-experimental main non-free contrib" | sudo tee -a /etc/apt/sources.list
elif [[ "${pname}" = "4" ]];then
echo "deb-src http://http.kali.org/kali kali-rolling main non-free contrib" | sudo tee -a /etc/apt/sources.list
elif [[ "${pname}" = "5" ]];then
echo "deb http://http.kali.org/kali kali-rolling main non-free contrib"
echo "deb http://http.kali.org/kali kali-last-snapshot main non-free contrib"
echo "deb http://http.kali.org/kali kali-experimental main non-free contrib"
echo "deb-src http://http.kali.org/kali kali-rolling main non-free contrib"
elif [[ "${pname}" = "6" ]];then
menu
else
error1
fi
echo -e "[${cyan}*${d}] Error is set now lets update it. [${green}INSTANT${d}]"
sleep 3
sudo apt-get update
clear
logo
echo -e "[${green}*${d}] Error is fixed"
sleep 2
echo -e "[${green}?${d}] Subscribe to Me??[y/n]"
read
subscribem
}
function checkroot(){
if [[ $EUID != 0 ]]; then
echo -e "[${white}*${d}]${bb}${red}ERROR${d}: Not ${cyan}root${d}${bb} : YOU MIGHT GET ERROR"
fi
sleep 3
}
function logo(){
clear
echo -e "${lightred}RRRRRRRRRRRRRRRRR BBBBBBBBBBBBBBBBB ${d} ";
echo -e "${lightred}R::::::::::::::::R B::::::::::::::::B ${d} ";
echo -e "${lightred}R::::::RRRRRR:::::R B::::::BBBBBB:::::B ${d} ";
echo -e "${lightred}RR:::::R R:::::R BB:::::B B:::::B ${d} ";
echo -e "${lightred} R::::R R:::::R ooooooooooo B::::B B:::::B eeeeeeeeeeee eeeeeeeeeeee ${d}";
echo -e "${lightred} R::::R R:::::Roo:::::::::::oo B::::B B:::::B ee::::::::::::ee ee::::::::::::ee ${d}";
echo -e "${lightred} R::::RRRRRR:::::Ro:::::::::::::::o B::::BBBBBB:::::B e::::::eeeee:::::eee::::::eeeee:::::ee${d}";
echo -e "${lightred} R:::::::::::::RR o:::::ooooo:::::o B:::::::::::::BB e::::::e e:::::e::::::e e:::::e${d}";
echo -e "${lightred} R::::RRRRRR:::::Ro::::o o::::o B::::BBBBBB:::::Be:::::::eeeee::::::e:::::::eeeee::::::e${d}";
echo -e "${lightred} R::::R R:::::o::::o o::::o B::::B B:::::e:::::::::::::::::ee:::::::::::::::::e ${d}";
echo -e " ${lightred} R::::R R:::::o::::o o::::o B::::B B:::::e::::::eeeeeeeeeee e::::::eeeeeeeeeee ${d} ";
echo -e " ${lightred} R::::R R:::::o::::o o::::o B::::B B:::::e:::::::e e:::::::e ${d} ";
echo -e "${lightred}RR:::::R R:::::o:::::ooooo:::::BB:::::BBBBBB::::::e::::::::e e::::::::e ${d} ";
echo -e "${lightred}R::::::R R:::::o:::::::::::::::B:::::::::::::::::B e::::::::eeeeeeee e::::::::eeeeeeee${d} ";
echo -e "${lightred}R::::::R R:::::Roo:::::::::::ooB::::::::::::::::B ee:::::::::::::e ee:::::::::::::e ${d}";
echo -e "${lightred}RRRRRRRR RRRRRRR ooooooooooo BBBBBBBBBBBBBBBBB eeeeeeeeeeeeee eeeeeeeeeeeeee${d}";
echo ""
echo -e " BY : ${green}Neutral Me${d} V${version}"
echo -e " ${red}YOUTUBE${d}=>${green}Neutral Me${d}"
}
function installtools(){
logo
echo -e "[${cyan}*${d}] ${green}We will be installing following tools.${d}"
echo -e "${bb} DOWNLOADS WILL BE STORED IN $(pwd)"
echo -e "[${cyan}*${d}] Lazy Script-----For lazy hackers only."
sleep 1
echo -e "[${cyan}*${d}] Airgeddon-------All in one tools (${red}hot${d})."
sleep 1
echo -e "[${cyan}*${d}] Geany-----------Best notepad for linux."
sleep 1
echo -e "[${cyan}*${d}] Tor-Browser-----Surf Anonymously in web."
sleep 1
echo -e "[${cyan}*${d}] Metasploit------Best tool for hackers."
sleep 1
echo -e "[${cyan}*${d}] "
sleep 1
echo ""
echo -e "[${cyan}*${d}] Press ${green}ENTER${d} to continue......"
read enterme
apt-get install geany
apt-get install curl
git-clone https://github.com/arismelachroinos/lscript.git
curl https://raw.githubusercontent.com/rapid7/metasploit-omnibus/master/config/templates/metasploit-framework-wrappers/msfupdate.erb > msfinstall && \
chmod 755 msfinstall && \
./msfinstall
git clone https://github.com/v1s1t0r1sh3r3/airgeddon.git
apt-get install torbrowser-launcher
echo -e "[${green}?${d}] Do you want to subscribe my channel?[y/n]"
read
subscribem
}
function menu(){
logo
echo -e "CHOOSE:"
echo -e "[1] Fix UNABLE TO LOCATE PACKAGE ERROR"
echo -e "[2] Download SPECIAL TOOLS FOR YOUR OS"
echo -e "[3] Subscribe NEUTRAL ME"
read menuchoose
if [[ "$menuchoose" = "1" ]];then
error1
elif [[ "$menuchoose" = "2" ]];then
installtools
elif [[ "$menuchoose" = "3" ]];then
subscribem
menu
else
menu
fi
}
checkroot
menu
| true |
3d1822781310a49dae7d448bb66b28ad518e2e73
|
Shell
|
Gilbert-Gb-Li/sage-bigdata-azkaban
|
/azkaban-script/bigdata/bilibili/02.bili_live_temporary_tables_snapshot.sh
|
UTF-8
| 11,163 | 2.671875 | 3 |
[] |
no_license
|
#!/bin/sh
source /etc/profile
source ${AZKABAN_HOME}/conf/env.conf
source ${base_path}/util.sh
interval=5000
date=$1
yesterday=`date -d "-1 day $date" +%Y-%m-%d`
# =========================================== 打赏金额计算 ================================================ #
# ----------------------------------------- #
# -- 挑出含礼物的弹幕
# ----------------------------------------- #
danmu_daily0="create temporary table bigdata.bili_danmu_gift_info_daily_tmp0 as
select *
from bigdata.bili_live_danmu_data_origin
where dt='${date}' and gift_num>0 and room_id!='unknown'
and audience_id is not null and audience_id!='';"
# --------------------------------------------#
# 末尾添加一条大于时间间隔的数据
#---------------------------------------------#
danmu_daily1="create temporary table bigdata.bili_danmu_gift_info_daily_tmp1 as
select
room_id,audience_id,gift_id,gift_num,data_generate_time
from bigdata.bili_danmu_gift_info_daily_tmp0
union all
select
room_id,audience_id,gift_id,0 as gift_num,
(max(data_generate_time)+${interval}+1000) as data_generate_time
from bigdata.bili_danmu_gift_info_daily_tmp0
group by room_id,audience_id,gift_id;"
# --------------------------------------------- #
# -- 挑出时间间隔大于阈值的数据
# --------------------------------------------- #
danmu_daily2="create temporary table bigdata.bili_danmu_gift_info_daily_tmp2 as
select a.room_id,a.audience_id,a.gift_id,
a.gift_num a_gift_num,b.gift_num b_gift_num,
a.data_generate_time a_data_generate_time,b.data_generate_time b_data_generate_time,
case
when b.data_generate_time-a.data_generate_time<${interval} and a.gift_num<b.gift_num
then 0
else 1
end tag
from
(select
room_id,audience_id,gift_id,gift_num,data_generate_time,
row_number() over(partition by room_id,audience_id,gift_id order by data_generate_time asc) row_id
from bigdata.bili_danmu_gift_info_daily_tmp1) a
join
(select
room_id,audience_id,gift_id,gift_num,data_generate_time,
row_number() over(partition by room_id,audience_id,gift_id order by data_generate_time asc) row_id
from bigdata.bili_danmu_gift_info_daily_tmp1) b
on a.row_id=b.row_id-1 and a.room_id=b.room_id and a.audience_id=b.audience_id and a.gift_id=b.gift_id;"
# ------------------------------ #
# -- 当天礼物收入
# ------------------------------ #
payer_info_all="create table bigdata.bili_payer_gift_info_tmp as
select
a.room_id,a.audience_id,c.audience_name,
c.audience_title,c.audience_title_grade,c.audience_level,
a.gift_id,a.gift_num,
if(b.gift_gold is null,0,b.gift_gold) gift_gold,
if((a.gift_num * b.gift_gold) is null,0,a.gift_num * b.gift_gold) gift_val
from (
select room_id,audience_id,gift_id,
sum(a_gift_num) gift_num
from bigdata.bili_danmu_gift_info_daily_tmp2
where tag=1
group by room_id,audience_id,gift_id) a
left join (
select * from(
select *,row_number() over(partition by room_id,audience_id,gift_id order by data_generate_time desc) row_id
from bigdata.bili_danmu_gift_info_daily_tmp0) d
where d.row_id=1
) c
on a.room_id=c.room_id and a.audience_id=c.audience_id and a.gift_id=c.gift_id
left join (
select gift_id,gift_gold
from bigdata.bili_live_gift_info_all_snapshot
where dt='${date}') b
on a.gift_id=b.gift_id;"
# ============================================ 大航海价值计算 ================================================ #
# -- 大航海数量计算 -- #
guard_num="create temporary table bigdata.bili_live_guard_num_tmp as
select a.room_id,a.guard_num
from(
select *,row_number() over(partition by room_id order by data_generate_time desc) row_id
from bigdata.bili_live_guard_num_data_origin
where dt='${date}' and guard_num>0) a
where a.row_id=1;"
# -- 大航海明细表计算 -- #
guard_list="create table bigdata.bili_live_guard_list_value_tmp as
select
a.room_id,a.guard_user_id payer_id,a.guard_user_name payer_name,
a.guard_level,a.guard_rank,b.guard_val,
a.data_generate_time,a.meta_table_name,a.meta_app_name,dt
from(
select * from (
select *,row_number() over(partition by guard_user_id,room_id order by data_generate_time desc) row_id
from bigdata.bili_live_guard_list_data_origin
where dt='${date}' and room_id!='unknown'
and guard_user_id is not null and guard_user_id!='') c
where c.row_id=1) a
join bigdata.bili_live_guard_value_data_origin b
on a.guard_level=b.guard_level;"
# -- 大航海统计临时表 -- #
guard_stat="create temporary table bigdata.bili_live_guard_list_stat_tmp as
select
if(a.room_id is null,b.room_id,a.room_id) room_id,
if(b.room_id is null,0,b.guard_num) guard_num1,
if(a.room_id is null,0,a.guard_num) guard_num2,
if(b.room_id is null,0,zong_du) zong_du,
if(b.room_id is null,0,ti_du) ti_du,
if(b.room_id is null,0,jian_zhang) jian_zhang1
from bigdata.bili_live_guard_num_tmp a
full join
(select room_id,count(payer_id) guard_num,
count(case when guard_level=1 then payer_id else null end) zong_du,
count(case when guard_level=2 then payer_id else null end) ti_du,
count(case when guard_level=3 then payer_id else null end) jian_zhang
from bigdata.bili_live_guard_list_value_tmp
group by room_id) b
on a.room_id=b.room_id;"
# -- 大航海价值统计临时表 -- #
guard_value="create table bigdata.bili_live_guard_stat_value_tmp as
select
a.room_id,a.guard_num1,a.guard_num2,
a.zong_du,a.ti_du,a.jian_zhang1,a.jian_zhang2,
a.zong_du*b.guard_val+a.ti_du*c.guard_val+a.jian_zhang2*d.guard_val guard_val
from
(select *,
if(guard_num1<guard_num2,guard_num2-zong_du-ti_du,jian_zhang1) jian_zhang2
from bigdata.bili_live_guard_list_stat_tmp) a,
(select guard_val from bigdata.bili_live_guard_value_data_origin where guard_level=1) b,
(select guard_val from bigdata.bili_live_guard_value_data_origin where guard_level=2) c,
(select guard_val from bigdata.bili_live_guard_value_data_origin where guard_level=3) d;"
# ============================================ 互动人数计算 ============================================= #
audience_num="create table bigdata.bili_user_audience_num_tmp AS
select
a.room_id,count(audience_id) audience_num
from (
select room_id,audience_id
from bigdata.bili_live_danmu_data_origin
where dt='${date}' and room_id!='unknown'
and audience_id is not null and audience_id!=''
group by room_id,audience_id
) a
group by a.room_id;"
# ====================================== 计算开播时间与开播时长 BEGIN ========================================== #
# ----------------------------------------#
# 每个主播最大最小数据
# 第一条开播的数据为准
# 最后一条数据无论是否是开播
# ----------------------------------------#
bc_outside="create temporary table bigdata.bili_broadcast_outside_tmp as
select room_id,min(data_generate_time) start_time,
min(data_generate_time) end_time,
1 from_state,1 to_state
from bigdata.bili_live_user_info_data_origin
where dt='${date}' and is_live=1 and room_id!='unknown'
group by room_id
union all
select room_id,max(data_generate_time) start_time,
max(data_generate_time) end_time,
0 from_state,0 to_state
from bigdata.bili_live_user_info_data_origin
where dt='${date}' and room_id!='unknown'
group by room_id;"
# ------------------------ #
# 原始数据添加序号
# ------------------------ #
bc_row_num="create temporary table bigdata.bili_broadcast_row_num_tmp as
select row_number() over(partition by room_id order by data_generate_time asc) row_id,
room_id,data_generate_time,is_live
from bigdata.bili_live_user_info_data_origin
where dt='${date}' and room_id!='unknown';"
# ----------------------- #
# 获取中间数据--join
# ----------------------- #
bc_row_mid="create temporary table bigdata.bili_broadcast_row_middle_tmp as
select a.row_id,a.room_id,a.data_generate_time start_time,b.data_generate_time end_time,
a.is_live from_state,b.is_live to_state
from bigdata.bili_broadcast_row_num_tmp a
join bigdata.bili_broadcast_row_num_tmp b
on a.row_id = b.row_id-1 and a.room_id=b.room_id;"
# ---------------------------------------- #
# 筛选开始结尾及中间状态变换的数据
# ---------------------------------------- #
bc_row_exchange="create temporary table bigdata.bili_broadcast_row_exchange_tmp as
select row_id,room_id,
case
when from_state=0 and to_state=1
then end_time
when from_state=1 and to_state=1
then start_time
end start_time,
case
when from_state=1 and to_state=0
then end_time
when from_state=0 and to_state=0
then start_time
end end_time,from_state,to_state
from (
select row_number() over(partition by room_id order by start_time,end_time asc) row_id,*
from(
select room_id,start_time,end_time,from_state,to_state
from bigdata.bili_broadcast_row_middle_tmp
where from_state!=to_state
union all
select room_id,start_time,end_time,from_state,to_state
from bigdata.bili_broadcast_outside_tmp) a
) b;"
# ---------------------------------------- #
# 最近结果临时表
# ---------------------------------------- #
bc_final="create table bigdata.bili_broadcast_row_final_tmp as
select a.room_id,a.start_time,b.end_time,
b.end_time-a.start_time duration
from bigdata.bili_broadcast_row_exchange_tmp a
join bigdata.bili_broadcast_row_exchange_tmp b
on a.row_id=b.row_id-1 and a.room_id=b.room_id
where a.start_time is not null and b.end_time is not null;"
# ====================================== 计算开播时间与开播时长 END ========================================== #
echo "日期:${date}"
echo "打赏用户临时表创建, BEGIN..."
executeHiveCommand "${danmu_daily0} ${danmu_daily1} ${danmu_daily2} ${payer_info_all}"
# /usr/bin/hive -e "${danmu_daily0} ${danmu_daily1} ${danmu_daily2} ${payer_info_all}"
echo "打赏用户临时表创建, END."
echo "大航海用户价值计算, BEGIN..."
executeHiveCommand "${guard_num} ${guard_list} ${guard_stat} ${guard_value}"
# /usr/bin/hive -e "${guard_num} ${guard_list} ${guard_stat} ${guard_value}"
echo "大航海用户价值计算, END."
echo "互动人数临时表创建, BEGIN..."
executeHiveCommand "${audience_num}"
# /usr/bin/hive -e "${audience_num}"
echo "互动人数临时表创建, END."
echo "开播时间与开播时长临时表创建, BEGIN..."
executeHiveCommand "${bc_outside} ${bc_row_num} ${bc_row_mid} ${bc_row_exchange} ${bc_final}"
# /usr/bin/hive -e "${bc_outside} ${bc_row_num} ${bc_row_mid} ${bc_row_exchange} ${bc_final}"
echo "开播时间与开播时长临时表创建, END."
| true |
f78a4781972fbf72420a900ca9ce91e49285130e
|
Shell
|
drkennetz/cwlexec
|
/src/test/integration-test/run.scatter.sh
|
UTF-8
| 1,780 | 2.59375 | 3 |
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
CWLTEST_TOP=$(pwd)
cd $CWLTEST_TOP/integration/scatter/cat
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 0
fi
_test_count=1
cd $CWLTEST_TOP/integration/scatter/foo
./run_example.foo.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 1
fi
_test_count=2
cd $CWLTEST_TOP/integration/scatter/mapreduce
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 2
fi
_test_count=3
cd $CWLTEST_TOP/integration/scatter/any
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 3
fi
_test_count=4
cd $CWLTEST_TOP/integration/scatter/queue
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 4
fi
_test_count=5
cd $CWLTEST_TOP/integration/scatter/flat_crossproduct
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 5
fi
_test_count=6
cd $CWLTEST_TOP/integration/scatter/dotproduct
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 6
fi
_test_count=7
cd $CWLTEST_TOP/integration/scatter/valueform0
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 1
fi
_test_count=8
cd $CWLTEST_TOP/integration/scatter/valueform1
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 1
fi
_test_count=9
cd $CWLTEST_TOP/integration/scatter/valueform2
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 1
fi
_test_count=10
cd $CWLTEST_TOP/integration/scatter/valueform3
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 1
fi
_test_count=11
cd $CWLTEST_TOP/integration/scatter/valueform4
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 1
fi
_test_count=12
cd $CWLTEST_TOP/integration/scatter/valueform5
./run_example.sh
exitcode=$?
if [ $exitcode -ne 0 ]; then
exit 1
fi
_test_count=13
exit ${_test_count}
| true |
271ec5cea9266b0c250196d4d0619274b8a43259
|
Shell
|
darkearl/cookbook
|
/libraries/fileRemoteUtils.bash
|
UTF-8
| 3,150 | 4.3125 | 4 |
[] |
no_license
|
#!/bin/bash -e
#########################
# FILE REMOTE UTILITIES #
#########################
function checkExistURL()
{
local -r url="${1}"
if [[ "$(existURL "${url}")" = 'false' ]]
then
fatal "url '${url}' not found"
else
debug "url '${url}' is working"
fi
}
function existURL()
{
local -r url="${1}"
# Install Curl
installCURLCommand > '/dev/null'
# Check URL
if ( curl -f --head -L "${url}" -o '/dev/null' -s --retry 12 --retry-delay 5 ||
curl -f -L "${url}" -o '/dev/null' -r 0-0 -s --retry 12 --retry-delay 5 )
then
echo 'true'
else
echo 'false'
fi
}
function downloadFile()
{
local -r url="${1}"
local -r destinationFile="${2}"
local overwrite="${3}"
checkExistURL "${url}"
# Check Overwrite
if [[ "$(isEmptyString "${overwrite}")" = 'true' ]]
then
overwrite='false'
fi
checkTrueFalseString "${overwrite}"
# Validate
if [[ -f "${destinationFile}" ]]
then
if [[ "${overwrite}" = 'false' ]]
then
fatal "file '${destinationFile}' found"
fi
rm -f "${destinationFile}"
elif [[ -e "${destinationFile}" ]]
then
fatal "file '${destinationFile}' already exists"
fi
# Download
debug "Downloading '${url}' to '${destinationFile}'\n"
curl -L "${url}" -o "${destinationFile}" --retry 12 --retry-delay 5
}
function unzipRemoteFile()
{
local -r downloadURL="${1}"
local -r installFolder="${2}"
local extension="${3}"
# Install wget
installWgetCommand
# Validate URL
checkExistURL "${downloadURL}"
# Find Extension
local exExtension=''
if [[ "$(isEmptyString "${extension}")" = 'true' ]]
then
extension="$(getFileExtension "${downloadURL}")"
exExtension="$(rev <<< "${downloadURL}" | cut -d '.' -f 1-2 | rev)"
fi
# Unzip
if [[ "$(grep -i '^tgz$' <<< "${extension}")" != '' || "$(grep -i '^tar\.gz$' <<< "${extension}")" != '' || "$(grep -i '^tar\.gz$' <<< "${exExtension}")" != '' ]]
then
debug "Downloading '${downloadURL}'\n"
curl -L "${downloadURL}" --retry 12 --retry-delay 5 | tar -C "${installFolder}" -x -z --strip 1
echo
elif [[ "$(grep -i '^tar\.bz2$' <<< "${exExtension}")" != '' ]]
then
# Install BZip2
installBZip2Command
# Unzip
debug "Downloading '${downloadURL}'\n"
curl -L "${downloadURL}" --retry 12 --retry-delay 5 | tar -C "${installFolder}" -j -x --strip 1
echo
elif [[ "$(grep -i '^zip$' <<< "${extension}")" != '' ]]
then
# Install Unzip
installUnzipCommand
# Unzip
if [[ "$(existCommand 'unzip')" = 'false' ]]
then
fatal 'command unzip not found'
fi
local -r zipFile="${installFolder}/$(basename "${downloadURL}")"
downloadFile "${downloadURL}" "${zipFile}" 'true'
unzip -q "${zipFile}" -d "${installFolder}"
rm -f "${zipFile}"
echo
else
fatal "file extension '${extension}' not supported"
fi
}
| true |
93cfd31e5dfbe77032340fe9a8e9f92a8a9f4cf6
|
Shell
|
Zenglinxiao/alignment-scripts
|
/preprocess/deen-train.sh
|
UTF-8
| 631 | 3.296875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -ex
prefix="deen"
DIR_NAME="German-English"
if [ ! -d $DIR_NAME ]; then
wget http://statmt.org/europarl/v7/de-en.tgz
tar -xvzf de-en.tgz
mkdir ${DIR_NAME}
mv europarl-v7.de-en.* ${DIR_NAME}
rm de-en.tgz
fi
# tokenization to match the test data
${MOSES_DIR}/scripts/tokenizer/tokenizer.perl -l de -no-escape -threads 4 < ${DIR_NAME}/europarl-v7.de-en.de > ${DIR_NAME}/${prefix}.src
${MOSES_DIR}/scripts/tokenizer/tokenizer.perl -l en -no-escape -threads 4 < ${DIR_NAME}/europarl-v7.de-en.en > ${DIR_NAME}/${prefix}.tgt
../scripts/remove_sentences.py ${DIR_NAME}/${prefix} ../test/${prefix} ${prefix}
| true |
7ffae6972e9ea1fdc1c191dfc750f80d2475170e
|
Shell
|
husjon/docker-bash-signal
|
/test.sh
|
UTF-8
| 279 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/bash
WORKER_PID=''
handle_sig_term(){
echo "[Shell] SIGTERM received, informing python script"
kill -TERM $WORKER_PID
wait $WORKER_PID
}
trap 'handle_sig_term' TERM
I=0
echo "[Shell] Starting python script"
python test.py & WORKER_PID=$!
wait $WORKER_PID
| true |
ebcba7ef65c97a878014c72b90ceb6c0f74c1972
|
Shell
|
mcdado/dotfiles
|
/.local-deployment-example.sh
|
UTF-8
| 326 | 2.78125 | 3 |
[] |
no_license
|
COUNTER=0
echo $(date)
echo "Locally deploying for $(whoami)"
rsync -a --delete-excluded --exclude-from=.rsyncrc $(pwd) ~/Sites/site-a/modules/ && COUNTER=$((COUNTER+1))
rsync -a --delete-excluded --exclude-from=.rsyncrc $(pwd) ~/Sites/site-b/modules/ && COUNTER=$((COUNTER+1))
echo "Deployment complete in $COUNTER projects"
| true |
4bdbee9333a80d8956742241cf52d4c1c2e5240c
|
Shell
|
ilyakharlamov/cmdlinefu
|
/kvplot.sh
|
UTF-8
| 2,457 | 3.578125 | 4 |
[] |
no_license
|
#!/bin/sh
# print "processes per user" bar chart
# source: blog.sleeplessbeastie.eu/2014/11/25/how-to-create-simple-bar-charts-in-terminal-using-awk/
# Debian/GNU awk: /usr/bin/awk -> /etc/alternatives/awk -> /usr/bin/gawk
if test -t 1; then
# color steps
cstep1="\033[32m"
cstep2="\033[33m"
cstep3="\033[31m"
cstepc="\033[0m"
ncolors=$(tput colors)
if test -n "$ncolors" && test $ncolors -ge 8; then
echo "support colors"
bold="$(tput bold)"
underline="$(tput smul)"
standout="$(tput smso)"
normal="$(tput sgr0)"
black="$(tput setaf 0)"
red="$(tput setaf 1)"
green="$(tput setaf 2)"
yellow="$(tput setaf 3)"
blue="$(tput setaf 4)"
magenta="$(tput setaf 5)"
cyan="$(tput setaf 6)"
white="$(tput setaf 7)"
else
echo "no color support"
fi
else
echo "no colors"
fi
# get usernames
IFS=
user_processes=`cat $1`
# character used to print bar chart
barchr="+"
# current min, max values [from 'ps' output]
vmin=0
echo "vmin:$vmin"
vmax=$(echo "$user_processes" | awk 'BEGIN {max=0} {if($2>max) max=$2} END {print max}')
echo "vmax:$vmax"
# range of the bar graph
dmin=1
dmax=80-5
# generate output
echo "$user_processes" | awk -v dmin="$dmin" -v dmax="$dmax" \
-v vmin="$vmin" -v vmax="$vmax" \
-v cstep1="$cstep1" -v cstep2="$cstep2" -v cstep3="$cstep3" -v cstepc="$cstepc"\
-v barchr="$barchr" \
'BEGIN {
printf("%15s %7s %2s%54s\n","key","value","|<", "bar chart >|")
}
{
x=int(dmin+($2-vmin)*(dmax-dmin)/(vmax-vmin));
printf("%15s %7s ",$1,$2);
for(i=1;i<=x;i++)
{
if (i >= 1 && i <= int(dmax/3))
{printf(cstep1 barchr cstepc);}
else if (i > int(dmax/3) && i <= int(2*dmax/3))
{printf(cstep2 barchr cstepc);}
else
{printf(cstep3 barchr cstepc);}
};
print ""
}'
| true |
bf563c9831c4cc7ae04d696d1fc22aef16fca2eb
|
Shell
|
atkuzmanov/youtube-delete-playlists-script
|
/youtube-delete-playlists-script-v1.0.sh
|
UTF-8
| 2,243 | 3.859375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
## Uncomment line below if you want it to clear the screen each time the script runs.
# clear
usage="Please run the script with the required parameters as follows: ./delete_youtube_playlists_[VERSION].sh --file [FILE-LOCATION] --api_key [API-KEY] --authorization [AUTHORIZATION_TOKEN]"
while [ -n "$1" ]; do
case "$1" in
-f | --file ) shift
filename="$1"
;;
-ak | --api_key ) shift
api_key="$1"
;;
-au | --authorization ) shift
authorization="$1"
;;
-h | --help) echo ${usage}
exit 1
;;
--)
shift ## The double dash makes everything passed after it a parameter
break
;;
*) echo ${usage} ;;
esac
shift
done
echo ">. Running delete_youtube_playlists_1.sh script..."
## Parsing the ids of the YouTube playlists which are to be deleted and storing them in a variable.
## Parsing using jq with -r specified for "raw" output to avoid quotes around the ids.
youtube_playlist_ids=$( cat ${filename} | jq -r '.items[].id' )
for id in ${youtube_playlist_ids}
do
CURL1=$(curl \
'https://content.googleapis.com/youtube/v3/playlists?id='"${id}"'&key='"${api_key}"'' \
-X DELETE \
-H 'User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:77.0) Gecko/20100101 Firefox/77.0' \
-H 'Accept: */*' \
-H 'Accept-Language: en-GB,en;q=0.5' --compressed \
-H 'X-ClientDetails: appVersion=5.0%20(Macintosh)&platform=MacIntel&userAgent=Mozilla%2F5.0%20(Macintosh%3B%20Intel%20Mac%20OS%20X%2010.14%3B%20rv%3A77.0)%20Gecko%2F20100101%20Firefox%2F77.0' \
-H 'Authorization: '"${authorization}"'' \
-H 'X-Requested-With: XMLHttpRequest' \
-H 'X-JavaScript-User-Agent: apix/3.0.0 google-api-javascript-client/1.1.0' \
-H 'X-Origin: https://explorer.apis.google.com' \
-H 'X-Referer: https://explorer.apis.google.com' \
-H 'X-Goog-Encode-Response-If-Executable: base64' \
-H 'Origin: https://content.googleapis.com' \
-H 'Connection: keep-alive' \
-H 'Referer: https://content.googleapis.com/static/proxy.html?usegapi=1&jsh=m%3B%2F_%2Fscs%2Fapps-static%2F_%2Fjs%2Fk%3Doz.gapi.en_GB.iWyQuFdbWzA.O%2Fam%3DwQc%2Fd%3D1%2Fct%3Dzgms%2Frs%3DAGLTcCOyhlEmBby7qWoiftyYszJcOof1oQ%2Fm%3D__features__'
)
echo $CURL1
sleep 1
done
echo "Done."
| true |
a0f1adeb5b791bc78c2b3fcbcd3ec18148b33b92
|
Shell
|
gdelanoy/shells
|
/bin/batwatch
|
UTF-8
| 21,030 | 4.28125 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
# -----------------------------------------------------------------------------
# bat-extras | Copyright (C) 2019-2020 eth-p | MIT License
#
# Repository: https://github.com/eth-p/bat-extras
# Issues: https://github.com/eth-p/bat-extras/issues
# -----------------------------------------------------------------------------
# shellcheck disable=SC1090
# --- BEGIN LIBRARY FILE: opt.sh ---
# An array of functions to call before returning from `shiftopt`.
#
# If one of these functions returns a successful exit code, the
# option will be transparently skipped instead of handled.
SHIFTOPT_HOOKS=()
# A setting to change how `shiftopt` will interpret short options that consist
# of more than one character.
#
# Values:
#
# SPLIT -- Splits the option into multiple single-character short options.
# "-abc" -> ("-a" "-b" "-c")
#
# VALUE -- Uses the remaining characters as the value for the short option.
# "-abc" -> ("-a=bc")
#
# CONV -- Converts the argument to a long option.
# "-abc" -> ("--abc")
#
# PASS -- Pass the argument along as-is.
# "-abc" -> ("-abc")
#
SHIFTOPT_SHORT_OPTIONS="VALUE"
# Sets the internal _ARGV, _ARGV_INDEX, and _ARGV_LAST variables used when
# parsing options with the shiftopt and shiftval functions.
#
# Arguments:
# ... -- The program arguments.
#
# Example:
# setargs "--long=3" "file.txt"
setargs() {
_ARGV=("$@")
_ARGV_LAST="$((${#_ARGV[@]} - 1))"
_ARGV_INDEX=0
_ARGV_SUBINDEX=1
}
# Gets all the remaining unparsed arguments and saves them to a variable.
#
# Arguments:
# "-a" -- Append the arguments to the variable instead of replacing it.
# $1 -- The variable to save the args to.
#
# Example:
# getargs remaining_args
getargs() {
if [[ "$1" = "-a" || "$1" = "--append" ]]; then
if [[ "${_ARGV_INDEX}" -ne "$((_ARGV_LAST+1))" ]]; then
eval "$2=(\"\${$2[@]}\" $(printf '%q ' "${_ARGV[@]:$_ARGV_INDEX}"))"
fi
else
if [[ "${_ARGV_INDEX}" -ne "$((_ARGV_LAST+1))" ]]; then
eval "$1=($(printf '%q ' "${_ARGV[@]:$_ARGV_INDEX}"))"
else
eval "$1=()"
fi
fi
}
# Resets the internal _ARGV* variables to the original script arguments.
# This is the equivalent of storing the top-level $@ and using setargs with it.
resetargs() {
setargs "${_ARGV_ORIGINAL[@]}"
}
# INTERNAL.
#
# Increments the argv index pointer used by `shiftopt`.
_shiftopt_next() {
_ARGV_SUBINDEX=1
((_ARGV_INDEX++)) || true
}
# Gets the next option passed to the script.
#
# Variables:
# OPT -- The option name.
#
# Returns:
# 0 -- An option was read.
# 1 -- No more options were read.
#
# Example:
# while shiftopt; do
# shiftval
# echo "$OPT = $OPT_VAL"
# done
shiftopt() {
# Read the top of _ARGV.
[[ "$_ARGV_INDEX" -gt "$_ARGV_LAST" ]] && return 1
OPT="${_ARGV[$_ARGV_INDEX]}"
unset OPT_VAL
if [[ "$OPT" =~ ^-[a-zA-Z0-9_-]+=.* ]]; then
OPT_VAL="${OPT#*=}"
OPT="${OPT%%=*}"
fi
# Handle short options.
if [[ "$OPT" =~ ^-[^-]{2,} ]]; then
case "$SHIFTOPT_SHORT_OPTIONS" in
# PASS mode: "-abc=0" -> ("-abc=0")
PASS) _shiftopt_next ;;
# CONV mode: "-abc=0" -> ("--abc=0")
CONV) OPT="-${OPT}"; _shiftopt_next ;;
# VALUE mode: "-abc=0" -> ("-a=bc=0")
VALUE) {
OPT="${_ARGV[$_ARGV_INDEX]}"
OPT_VAL="${OPT:2}"
OPT="${OPT:0:2}"
_shiftopt_next
} ;;
# SPLIT mode: "-abc=0" -> ("-a=0" "-b=0" "-c=0")
SPLIT) {
OPT="-${OPT:$_ARGV_SUBINDEX:1}"
((_ARGV_SUBINDEX++)) || true
if [[ "$_ARGV_SUBINDEX" -gt "${#OPT}" ]]; then
_shiftopt_next
fi
} ;;
# ????? mode: Treat it as pass.
*)
printf "shiftopt: unknown SHIFTOPT_SHORT_OPTIONS mode '%s'" \
"$SHIFTOPT_SHORT_OPTIONS" 1>&2
_shiftopt_next
;;
esac
else
_shiftopt_next
fi
# Handle hooks.
local hook
for hook in "${SHIFTOPT_HOOKS[@]}"; do
if "$hook"; then
shiftopt
return $?
fi
done
return 0
}
# Gets the value for the current option.
#
# Variables:
# OPT_VAL -- The option value.
#
# Returns:
# 0 -- An option value was read.
# EXIT 1 -- No option value was available.
shiftval() {
# Skip if a value was already provided.
if [[ -n "${OPT_VAL+x}" ]]; then
return 0
fi
if [[ "$_ARGV_SUBINDEX" -gt 1 && "$SHIFTOPT_SHORT_OPTIONS" = "SPLIT" ]]; then
# If it's a short group argument in SPLIT mode, we grab the next argument.
OPT_VAL="${_ARGV[$((_ARGV_INDEX+1))]}"
else
# Otherwise, we can handle it normally.
OPT_VAL="${_ARGV[$_ARGV_INDEX]}"
_shiftopt_next
fi
# Error if no value is provided.
if [[ "$OPT_VAL" =~ -.* ]]; then
printc "%{RED}%s: '%s' requires a value%{CLEAR}\n" "batwatch" "$ARG"
exit 1
fi
}
setargs "$@"
_ARGV_ORIGINAL=("$@")
# --- END LIBRARY FILE ---
# --- BEGIN LIBRARY FILE: opt_hook_color.sh ---
# Option parser hook: color support.
# This will accept --no-color or --color.
# It will also try to accept --color=never|always|auto.
#
# The variable OPT_COLOR will be set depending on whether or not a TTY is
# detected and whether or not --color/--no-color is specified.
hook_color() {
SHIFTOPT_HOOKS+=("__shiftopt_hook__color")
__shiftopt_hook__color() {
case "$OPT" in
--no-color) OPT_COLOR=false ;;
--color) {
case "$OPT_VAL" in
"") OPT_COLOR=true ;;
always | true) OPT_COLOR=true ;;
never | false) OPT_COLOR=false ;;
auto) return 0 ;;
*)
printc "%{RED}%s: '--color' expects value of 'auto', 'always', or 'never'%{CLEAR}\n" "batwatch"
exit 1
;;
esac
} ;;
*) return 1 ;;
esac
printc_init "$OPT_COLOR"
return 0
}
# Default color support.
if [[ -z "$OPT_COLOR" ]]; then
if [[ -t 1 ]]; then
OPT_COLOR=true
else
OPT_COLOR=false
fi
printc_init "$OPT_COLOR"
fi
}
# --- END LIBRARY FILE ---
# --- BEGIN LIBRARY FILE: opt_hook_help.sh ---
# Option parser hook: --help support.
# This will accept -h or --help, which prints the usage information and exits.
hook_help() {
SHIFTOPT_HOOKS+=("__shiftopt_hook__help")
if [[ "$1" == "--no-short" ]]; then
__shiftopt_hook__help() {
if [[ "$OPT" = "--help" ]]; then
show_help
exit 0
fi
return 1
}
else
__shiftopt_hook__help() {
if [[ "$OPT" = "--help" ]] || [[ "$OPT" = "-h" ]]; then
show_help
exit 0
fi
return 1
}
fi
}
# --- END LIBRARY FILE ---
# --- BEGIN LIBRARY FILE: opt_hook_version.sh ---
# Option parser hook: --version support.
# This will accept --version, which prints the version information and exits.
hook_version() {
SHIFTOPT_HOOKS+=("__shiftopt_hook__version")
__shiftopt_hook__version() {
if [[ "$OPT" = "--version" ]]; then
printf "%s %s\n\n%s\n%s\n" \
"batwatch" \
"2021.04.06" \
"Copyright (C) 2019-2021 eth-p | MIT License" \
"https://github.com/eth-p/bat-extras"
exit 0
fi
return 1
}
}
# --- END LIBRARY FILE ---
# --- BEGIN LIBRARY FILE: opt_hook_width.sh ---
# --- BEGIN LIBRARY FILE: term.sh ---
# Gets the width of the terminal.
# This will return 80 unless stdin is attached to the terminal.
#
# Returns:
# The terminal width, or 80 if there's no TTY.
#
term_width() {
# shellcheck disable=SC2155
local width="$({ stty size 2>/dev/null || echo "22 80"; } | cut -d ' ' -f2)"
if [[ "$width" -ne 0 ]]; then
echo "$width"
else
echo "80"
fi
return 0
}
# Clears the terminal using the ANSI escape sequences for erase screen and cursor absolute positioning.
term_clear() {
printf "\x1B[3J\x1B[2J\x1B[H"
}
# --- END LIBRARY FILE ---
# Option parser hook: --terminal-width support.
# This will accept --terminal-width=number.
#
# The variable OPT_TERMINAL_WIDTH will be set.
hook_width() {
SHIFTOPT_HOOKS+=("__shiftopt_hook__width")
__shiftopt_hook__width() {
case "$OPT" in
--terminal-width) shiftval; OPT_TERMINAL_WIDTH="$OPT_VAL" ;;
*) return 1 ;;
esac
return 0
}
# Default terminal width.
OPT_TERMINAL_WIDTH="$(term_width)"
}
# --- END LIBRARY FILE ---
# --- BEGIN LIBRARY FILE: print.sh ---
# Printf, but with optional colors.
# This uses the same syntax and arguments as printf.
#
# Example:
# printc "%{RED}This is red %s.%{CLEAR}\n" "text"
#
printc() {
printf "$(sed "$_PRINTC_PATTERN" <<<"$1")" "${@:2}"
}
# Initializes the color tags for printc.
#
# Arguments:
# true -- Turns on color output.
# false -- Turns off color output.
printc_init() {
case "$1" in
true) _PRINTC_PATTERN="$_PRINTC_PATTERN_ANSI" ;;
false) _PRINTC_PATTERN="$_PRINTC_PATTERN_PLAIN" ;;
"[DEFINE]") {
_PRINTC_PATTERN_ANSI=""
_PRINTC_PATTERN_PLAIN=""
local name
local ansi
while read -r name ansi; do
if [[ -z "${name}" && -z "${ansi}" ]] || [[ "${name:0:1}" = "#" ]]; then
continue
fi
ansi="${ansi/\\/\\\\}"
_PRINTC_PATTERN_PLAIN="${_PRINTC_PATTERN_PLAIN}s/%{${name}}//g;"
_PRINTC_PATTERN_ANSI="${_PRINTC_PATTERN_ANSI}s/%{${name}}/${ansi}/g;"
done
if [[ -t 1 && -z "${NO_COLOR+x}" ]]; then
_PRINTC_PATTERN="$_PRINTC_PATTERN_ANSI"
else
_PRINTC_PATTERN="$_PRINTC_PATTERN_PLAIN"
fi
} ;;
esac
}
# Print a warning message to stderr.
# Arguments:
# 1 -- The printc formatting string.
# ... -- The printc formatting arguments.
print_warning() {
printc "%{YELLOW}[%s warning]%{CLEAR}: $1%{CLEAR}\n" "batwatch" "${@:2}" 1>&2
}
# Print an error message to stderr.
# Arguments:
# 1 -- The printc formatting string.
# ... -- The printc formatting arguments.
print_error() {
printc "%{RED}[%s error]%{CLEAR}: $1%{CLEAR}\n" "batwatch" "${@:2}" 1>&2
}
# Initialization:
printc_init "[DEFINE]" <<END
CLEAR \x1B[0m
RED \x1B[31m
GREEN \x1B[32m
YELLOW \x1B[33m
BLUE \x1B[34m
MAGENTA \x1B[35m
CYAN \x1B[36m
DEFAULT \x1B[39m
DIM \x1B[2m
END
# --- END LIBRARY FILE ---
# --- BEGIN LIBRARY FILE: pager.sh ---
# Returns 0 (true) if the current pager is less, otherwise 1 (false).
is_pager_less() {
[[ "$(pager_name)" = "less" ]]
return $?
}
# Returns 0 (true) if the current pager is bat, otherwise 1 (false).
is_pager_bat() {
[[ "$(pager_name)" = "bat" ]]
return $?
}
# Returns 0 (true) if the current pager is disabled, otherwise 1 (false).
is_pager_disabled() {
[[ -z "$(pager_name)" ]]
return $?
}
# Prints the detected pager name.
pager_name() {
_detect_pager 1>&2
echo "$_SCRIPT_PAGER_NAME"
}
# Prints the detected pager version.
pager_version() {
_detect_pager 1>&2
echo "$_SCRIPT_PAGER_VERSION"
}
# Executes a command or function, and pipes its output to the pager (if it exists).
#
# Returns: The exit code of the command.
# Example:
# pager_exec echo hi
pager_exec() {
if [[ -n "$SCRIPT_PAGER_CMD" ]]; then
"$@" | pager_display
return $?
else
"$@"
return $?
fi
}
# Displays the output of a command or function inside the pager (if it exists).
#
# Example:
# bat | pager_display
pager_display() {
if [[ -n "$SCRIPT_PAGER_CMD" ]]; then
if [[ -n "$SCRIPT_PAGER_ARGS" ]]; then
"${SCRIPT_PAGER_CMD[@]}" "${SCRIPT_PAGER_ARGS[@]}"
return $?
else
"${SCRIPT_PAGER_CMD[@]}"
return $?
fi
else
cat
return $?
fi
}
# Detect the pager information.
# shellcheck disable=SC2120
_detect_pager() {
if [[ "$_SCRIPT_PAGER_DETECTED" = "true" ]]; then return; fi
_SCRIPT_PAGER_DETECTED=true
# If the pager command is empty, the pager is disabled.
if [[ -z "${SCRIPT_PAGER_CMD[0]}" ]]; then
_SCRIPT_PAGER_VERSION=0
_SCRIPT_PAGER_NAME=""
return;
fi
# Determine the pager name and version.
local output
local output1
output="$("${SCRIPT_PAGER_CMD[0]}" --version 2>&1)"
output1="$(head -n 1 <<<"$output")"
if [[ "$output1" =~ ^less[[:blank:]]([[:digit:]]+) ]]; then
_SCRIPT_PAGER_VERSION="${BASH_REMATCH[1]}"
_SCRIPT_PAGER_NAME="less"
elif [[ "$output1" =~ ^bat(cat)?[[:blank:]]([[:digit:]]+) ]]; then
# shellcheck disable=SC2034
__BAT_VERSION="${BASH_REMATCH[2]}"
_SCRIPT_PAGER_VERSION="${BASH_REMATCH[2]}"
_SCRIPT_PAGER_NAME="bat"
else
_SCRIPT_PAGER_VERSION=0
_SCRIPT_PAGER_NAME="$(basename "${SCRIPT_PAGER_CMD[0]}")"
fi
}
# Configure the script pager.
# This attempts to mimic how bat determines the pager and pager arguments.
#
# 1. Use BAT_PAGER
# 2. Use PAGER with special arguments for less
# 3. Use PAGER
_configure_pager() {
# shellcheck disable=SC2206
SCRIPT_PAGER_ARGS=()
if [[ -n "${PAGER+x}" ]]; then
SCRIPT_PAGER_CMD=($PAGER)
else
SCRIPT_PAGER_CMD=("less")
fi
# Prefer the BAT_PAGER environment variable.
if [[ -n "${BAT_PAGER+x}" ]]; then
# [note]: This is intentional.
# shellcheck disable=SC2206
SCRIPT_PAGER_CMD=($BAT_PAGER)
SCRIPT_PAGER_ARGS=()
return
fi
# If the pager is bat, use less instead.
if is_pager_bat; then
SCRIPT_PAGER_CMD=("less")
SCRIPT_PAGER_ARGS=()
fi
# Add arguments for the less pager.
if is_pager_less; then
SCRIPT_PAGER_CMD=("${SCRIPT_PAGER_CMD[0]}" -R --quit-if-one-screen)
if [[ "$(pager_version)" -lt 500 ]]; then
SCRIPT_PAGER_CMD+=(--no-init)
fi
fi
}
if [[ -t 1 ]]; then
# Detect and choose the arguments for the pager.
_configure_pager
else
# Prefer no pager if not a tty.
SCRIPT_PAGER_CMD=()
SCRIPT_PAGER_ARGS=()
fi
# --- END LIBRARY FILE ---
# --- BEGIN LIBRARY FILE: version.sh ---
# Gets the current bat version.
bat_version() {
if [[ -z "${__BAT_VERSION}" ]]; then
__BAT_VERSION="$(command "/usr/bin/bat" --version | cut -d ' ' -f 2)"
fi
echo "${__BAT_VERSION}"
}
# Compares two version strings.
# Arguments:
# 1 -- The version to compare.
# 2 -- The comparison operator (same as []).
# 3 -- The version to compare with.
version_compare() {
local version="$1"
local compare="$3"
if ! [[ "$version" =~ \.$ ]]; then
version="${version}."
fi
if ! [[ "$compare" =~ \.$ ]]; then
compare="${compare}."
fi
version_compare__recurse "$version" "$2" "$compare"
return $?
}
version_compare__recurse() {
local version="$1"
local operator="$2"
local compare="$3"
# Extract the leading number.
local v_major="${version%%.*}"
local c_major="${compare%%.*}"
# Extract the remaining numbers.
local v_minor="${version#*.}"
local c_minor="${compare#*.}"
# Compare the versions specially if the final number has been reached.
if [[ -z "$v_minor" && -z "$c_minor" ]]; then
[ "$v_major" $operator "$c_major" ];
return $?
fi
# Insert zeroes where there are missing numbers.
if [[ -z "$v_minor" ]]; then
v_minor="0."
fi
if [[ -z "$c_minor" ]]; then
c_minor="0."
fi
# Compare the versions.
# This is an early escape case.
case "$operator" in
-eq) [[ "$v_major" -ne "$c_major" ]] && return 1 ;;
-ne) [[ "$v_major" -ne "$c_major" ]] && return 0 ;;
-ge | -gt) [[ "$v_major" -lt "$c_major" ]] && return 1
[[ "$v_major" -gt "$c_major" ]] && return 0 ;;
-le | -lt) [[ "$v_major" -gt "$c_major" ]] && return 1
[[ "$v_major" -lt "$c_major" ]] && return 0 ;;
esac
version_compare__recurse "$v_minor" "$operator" "$c_minor"
}
# --- END LIBRARY FILE ---
# -----------------------------------------------------------------------------
# Init:
# -----------------------------------------------------------------------------
hook_color
hook_version
hook_width
hook_help
# -----------------------------------------------------------------------------
# Help:
# -----------------------------------------------------------------------------
show_help() {
echo 'Usage: batwatch --file [--watcher entr|poll][--[no-]clear] <file> [<file> ...]'
echo ' batwatch --command [-n<interval>] <command> [<arg> ...]'
}
# -----------------------------------------------------------------------------
# Watchers:
# -----------------------------------------------------------------------------
WATCHERS=("entr" "poll")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
watcher_entr_watch() {
ENTR_ARGS=()
if [[ "$OPT_CLEAR" == "true" ]]; then
ENTR_ARGS+=('-c')
fi
entr "${ENTR_ARGS[@]}" \
"/usr/bin/bat" "${BAT_ARGS[@]}" \
--terminal-width="$OPT_TERMINAL_WIDTH" \
--paging=never \
"$@" \
< <(printf "%s\n" "$@")
}
watcher_entr_supported() {
command -v entr &> /dev/null
return $?
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
POLL_STAT_VARIANT=''
POLL_STAT_COMMAND=()
determine_stat_variant() {
if [[ -n "$POLL_STAT_VARIANT" ]]; then
return 0
fi
local variant name flags ts
for variant in "gnu -c %Z" "bsd -f %m"; do
read -r name flags <<< "$variant"
# save the results of the stat command
if read -r ts < <(stat ${flags} "$0" 2> /dev/null); then
# verify that the value is an epoch timestamp
# before proceeding
if [[ "${ts}" =~ ^[0-9]+$ ]]; then
POLL_STAT_COMMAND=(stat ${flags})
POLL_STAT_VARIANT="$name"
return 0
fi
fi
done
return 1
}
watcher_poll_watch() {
determine_stat_variant
local files=("$@")
local times=()
# Get the initial modified times.
local file
local time
local modified=true
for file in "${files[@]}"; do
time="$("${POLL_STAT_COMMAND[@]}" "$file")"
times+=("$time")
done
# Display files.
while true; do
if "$modified"; then
modified=false
clear
"/usr/bin/bat" "${BAT_ARGS[@]}" \
--terminal-width="$OPT_TERMINAL_WIDTH" \
--paging=never \
"${files[@]}"
fi
# Check if the file has been modified.
local i=0
for file in "${files[@]}"; do
time="$("${POLL_STAT_COMMAND[@]}" "$file")"
if [[ "$time" -ne "${times[$i]}" ]]; then
times[$i]="$time"
modified=true
fi
((i++))
done
# Wait for "q" to exit, or check again after a few seconds.
local input
read -r -t "${OPT_INTERVAL}" input
if [[ "$input" =~ [q|Q] ]]; then
exit
fi
done
"${POLL_STAT_COMMAND[@]}" "$@"
local ts
}
watcher_poll_supported() {
determine_stat_variant
return $?
}
# -----------------------------------------------------------------------------
# Functions:
# -----------------------------------------------------------------------------
determine_watcher() {
local watcher
for watcher in "${WATCHERS[@]}"; do
if "watcher_${watcher}_supported"; then
OPT_WATCHER="$watcher"
return 0
fi
done
return 1
}
# -----------------------------------------------------------------------------
# Options:
# -----------------------------------------------------------------------------
BAT_ARGS=(--paging=never)
FILES=()
FILES_HAS_DIRECTORY=false
OPT_MODE=file
OPT_INTERVAL=3
OPT_CLEAR=true
OPT_WATCHER=""
# Set options based on tty.
if [[ -t 1 ]]; then
OPT_COLOR=true
fi
# Parse arguments.
while shiftopt; do
case "$OPT" in
# Script options
--watcher) shiftval; OPT_WATCHER="$OPT_VAL" ;;
--interval|-n) shiftval; OPT_INTERVAL="$OPT_VAL" ;;
--file|-f) OPT_MODE=file ;;
--command|-x) OPT_MODE=command ;;
--clear) OPT_CLEAR=true ;;
--no-clear) OPT_CLEAR=false ;;
# bat/Pager options
-*) BAT_ARGS+=("$OPT=$OPT_VAL") ;;
# Files
*) {
FILES+=("$OPT")
if [[ "$OPT_MODE" = "command" ]]; then
getargs --append FILES
break
fi
} ;;
esac
done
# Validate that a file/command was provided.
if [[ ${#FILES[@]} -eq 0 ]]; then
if [[ "$OPT_MODE" = "file" ]]; then
print_error "no files provided"
else
print_error "no command provided"
fi
exit 1
fi
# Validate that the provided files exist.
if [[ "$OPT_MODE" = "file" ]]; then
for file in "${FILES[@]}"; do
if ! [[ -e "$file" ]]; then
print_error "'%s' does not exist" "$file"
exit 1
fi
if [[ -d "$file" ]]; then
FILES_HAS_DIRECTORY=true
fi
done
fi
# Append bat arguments.
if "$OPT_COLOR"; then
BAT_ARGS+=("--color=always")
else
BAT_ARGS+=("--color=never")
fi
# Initialize clear command based on whether or not ANSI should be used.
if [[ "$OPT_CLEAR" == "true" ]]; then
if "$OPT_COLOR"; then
clear() {
term_clear || return $?
}
fi
else
clear() {
:
}
fi
# -----------------------------------------------------------------------------
# Main:
# -----------------------------------------------------------------------------
if [[ "$OPT_MODE" = "file" ]]; then
# Determine the watcher.
if [[ -z "$OPT_WATCHER" ]]; then
if ! determine_watcher; then
print_error "Your system does not have any supported watchers."
printc "Please read the documentation at %{BLUE}%s%{CLEAR} for more details.\n" "https://github.com/eth-p/bat-extras" 1>&2
exit 2
fi
else
if ! type "watcher_${OPT_WATCHER}_supported" &> /dev/null; then
print_error "Unknown watcher: '%s'" "$OPT_WATCHER"
exit 1
fi
if ! "watcher_${OPT_WATCHER}_supported" &> /dev/null; then
print_error "Unsupported watcher: '%s'" "$OPT_WATCHER"
exit 1
fi
fi
main() {
"watcher_${OPT_WATCHER}_watch" "${FILES[@]}"
return $?
}
else
# Set bat's header to show the command.
BAT_VERSION="$(bat_version)"
if version_compare "$BAT_VERSION" -ge "0.14"; then
BAT_ARGS+=(--file-name="${FILES[*]}")
fi
main() {
while true; do
clear
"${FILES[@]}" 2>&1 | "/usr/bin/bat" "${BAT_ARGS[@]}"
sleep "${OPT_INTERVAL}" || exit 1
done
}
fi
# Run the main function.
main
exit $?
| true |
dcc043e6eece7b36c0735a4b03bd8cec3c62e2eb
|
Shell
|
spider/spider-box
|
/puphpet/files/exec-once/3-install-gremlin.sh
|
UTF-8
| 1,219 | 3.140625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "------------ INSTALL GREMLIN-SERVER START ------------"
###install gremlin-server
export GREMLINSERVER_VERSION="3.1.1"
export NEO4J_VERSION="2.3.2"
export ORIENT_VERSION="2.1.16"
export INSTALL_DIR="/home/vagrant"
export VAGRANT_DIR="/vagrant"
export BOOTSTRAP_DIR="/var/www/puphpet/files/exec-once"
export GREMLIN_DIR="apache-gremlin-server-$GREMLINSERVER_VERSION-incubating"
# download and unzip
wget --no-check-certificate -O $INSTALL_DIR/apache-gremlin-server-$GREMLINSERVER_VERSION-incubating-bin.zip https://www.apache.org/dist/incubator/tinkerpop/$GREMLINSERVER_VERSION-incubating/apache-gremlin-server-$GREMLINSERVER_VERSION-incubating-bin.zip
unzip $INSTALL_DIR/apache-gremlin-server-$GREMLINSERVER_VERSION-incubating-bin.zip -d $INSTALL_DIR/
# get gremlin-server configuration files
cp $BOOTSTRAP_DIR/gremlin-spider-script.groovy $INSTALL_DIR/$GREMLIN_DIR/scripts/
cp $BOOTSTRAP_DIR/gremlin-server-spider.yaml $INSTALL_DIR/$GREMLIN_DIR/conf/
# get neo4j dependencies
cd $INSTALL_DIR/$GREMLIN_DIR
bin/gremlin-server.sh -i org.apache.tinkerpop neo4j-gremlin $GREMLINSERVER_VERSION-incubating
sleep 30
cd $VAGRANT_DIR
echo "------------ INSTALL GREMLIN-SERVER END ------------"
| true |
9d4742668ccbe26e436b05813eb298cc7eb2a633
|
Shell
|
EMBEDDIA/comment-filter
|
/services/web/entrypoint.prod.sh
|
UTF-8
| 302 | 3.125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
echo "Checking for classifier model files..."
#MODEL_DIR=./project/models
sh ./project/model_download.sh
if [ "$DATABASE" = "postgres" ]
then
echo "Waiting for postgres..."
while ! nc -z $SQL_HOST $SQL_PORT; do
sleep 0.5
done
echo "PostgreSQL started"
fi
exec "$@"
| true |
8aeefe170fe738439d15efd162e037ada43b52f0
|
Shell
|
openshift/sippy
|
/e2e-scripts/sippy-e2e-sippy-e2e-setup-commands.sh
|
UTF-8
| 6,624 | 3.609375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# In Prow CI, SIPPY_IMAGE variable is defined in the sippy-e2e-ref.yaml file as a
# dependency so that the pipeline:sippy image (containing the sippy binary)
# will be available to start the sippy-load and sippy-server pods.
# When running locally, the user has to define SIPPY_IMAGE.
echo "The sippy CI image: ${SIPPY_IMAGE}"
# The GCS_CRED allows us to pull artifacts from GCS when importing prow jobs.
# Redefine GCS_CRED to use your own.
GCS_CRED="${GCS_CRED:=/var/run/sippy-ci-gcs-sa/gcs-sa}"
echo "The GCS cred is: ${GCS_CRED}"
# If you're using Openshift, we use oc, if you're using plain Kubernetes,
# we use kubectl.
#
KUBECTL_CMD="${KUBECTL_CMD:=oc}"
echo "The kubectl command is: ${KUBECTL_CMD}"
echo "The Docker config.json is: ${DOCKERCONFIGJSON}"
is_ready=0
echo "Waiting for cluster to be usable..."
e2e_pause() {
if [ -z $OPENSHIFT_CI ]; then
return
fi
# In prow, we need these sleeps to keep things consistent -- TODO: we need to figure out why.
echo "Sleeping 30 seconds ..."
sleep 30
}
set +e
# We don't want to exit on timeouts if the cluster we got was not quite ready yet.
for i in `seq 1 20`; do
echo -n "${i})"
e2e_pause
echo "Checking cluster nodes"
${KUBECTL_CMD} get node
if [ $? -eq 0 ]; then
echo "Cluster looks ready"
is_ready=1
break
fi
echo "Cluster not ready yet..."
done
set -e
# This should be set to the KUBECONFIG for the cluster claimed from the cluster-pool.
echo "KUBECONFIG=${KUBECONFIG}"
echo "Showing kube context"
${KUBECTL_CMD} config current-context
if [ $is_ready -eq 0 ]; then
echo "Cluster never became ready aborting"
exit 1
fi
e2e_pause
echo "Checking for presense of GCS credentials ..."
if [ -f ${GCS_CRED} ]; then
ls -l ${GCS_CRED}
else
echo "Aborting: GCS credential file ${GCS_CRED} not found"
exit 1
fi
echo "Starting postgres on cluster-pool cluster..."
# Make the "postgres" namespace and pod.
cat << END | ${KUBECTL_CMD} apply -f -
apiVersion: v1
kind: Namespace
metadata:
name: sippy-e2e
labels:
openshift.io/run-level: "0"
openshift.io/cluster-monitoring: "true"
pod-security.kubernetes.io/enforce: privileged
pod-security.kubernetes.io/audit: privileged
pod-security.kubernetes.io/warn: privileged
END
e2e_pause
cat << END | ${KUBECTL_CMD} apply -f -
apiVersion: v1
kind: Pod
metadata:
name: postg1
namespace: sippy-e2e
labels:
app: postgres
spec:
volumes:
- name: postgredb
emptyDir: {}
containers:
- name: postgres
image: quay.io/enterprisedb/postgresql
ports:
- containerPort: 5432
env:
- name: POSTGRES_PASSWORD
value: password
- name: POSTGRESQL_DATABASE
value: postgres
volumeMounts:
- mountPath: /var/lib/postgresql/data
name: postgredb
securityContext:
privileged: false
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 3
seccompProfile:
type: RuntimeDefault
---
apiVersion: v1
kind: Service
metadata:
labels:
app: postgres
name: postgres
namespace: sippy-e2e
spec:
ports:
- name: postgres
port: 5432
protocol: TCP
selector:
app: postgres
END
echo "Waiting for postgres pod to be Ready ..."
# We set +e to avoid the script aborting before we can retrieve logs.
set +e
TIMEOUT=120s
echo "Waiting up to ${TIMEOUT} for the postgres to come up..."
${KUBECTL_CMD} -n sippy-e2e wait --for=condition=Ready pod/postg1 --timeout=${TIMEOUT}
retVal=$?
set -e
echo
echo "Saving postgres logs ..."
${KUBECTL_CMD} -n sippy-e2e logs postg1 > ${ARTIFACT_DIR}/postgres.log
if [ ${retVal} -ne 0 ]; then
echo "Postgres pod never came up"
exit 1
fi
${KUBECTL_CMD} -n sippy-e2e get po -o wide
${KUBECTL_CMD} -n sippy-e2e get svc,ep
# Get the gcs credentials out to the cluster-pool cluster.
# These credentials are in vault and maintained by the TRT team (e.g. for updates and rotations).
# See https://vault.ci.openshift.org/ui/vault/secrets/kv/show/selfservice/technical-release-team/sippy-ci-gcs-read-sa
#
${KUBECTL_CMD} create secret generic gcs-cred --from-file gcs-cred=$GCS_CRED -n sippy-e2e
# Get the registry credentials for all build farm clusters out to the cluster-pool cluster.
${KUBECTL_CMD} -n sippy-e2e create secret generic regcred --from-file=.dockerconfigjson=${DOCKERCONFIGJSON} --type=kubernetes.io/dockerconfigjson
# Make the "sippy loader" pod.
cat << END | ${KUBECTL_CMD} apply -f -
apiVersion: batch/v1
kind: Job
metadata:
name: sippy-load-job
namespace: sippy-e2e
spec:
template:
spec:
containers:
- name: sippy
image: ${SIPPY_IMAGE}
imagePullPolicy: ${SIPPY_IMAGE_PULL_POLICY:-Always}
resources:
limits:
memory: 1G
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
command: ["/bin/sh", "-c"]
args:
- /bin/sippy --init-database --load-database --log-level=debug --load-prow=true --load-testgrid=false --release 4.13 --skip-bug-lookup --database-dsn=postgresql://postgres:password@postgres.sippy-e2e.svc.cluster.local:5432/postgres --mode=ocp --config ./config/e2e-openshift.yaml --google-service-account-credential-file /tmp/secrets/gcs-cred
env:
- name: GCS_SA_JSON_PATH
value: /tmp/secrets/gcs-cred
volumeMounts:
- mountPath: /tmp/secrets
name: gcs-cred
readOnly: true
imagePullSecrets:
- name: regcred
volumes:
- name: gcs-cred
secret:
secretName: gcs-cred
dnsPolicy: ClusterFirst
restartPolicy: Never
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
backoffLimit: 1
END
date
echo "Waiting for sippy loader job to finish ..."
${KUBECTL_CMD} -n sippy-e2e get job sippy-load-job
${KUBECTL_CMD} -n sippy-e2e describe job sippy-load-job
# We set +e to avoid the script aborting before we can retrieve logs.
set +e
# This takes under 3 minutes so 5 minutes (300 seconds) should be plenty.
echo "Waiting up to ${SIPPY_LOAD_TIMEOUT:=300s} for the sippy-load-job to complete..."
${KUBECTL_CMD} -n sippy-e2e wait --for=condition=complete job/sippy-load-job --timeout ${SIPPY_LOAD_TIMEOUT}
retVal=$?
set -e
job_pod=$(${KUBECTL_CMD} -n sippy-e2e get pod --selector=job-name=sippy-load-job --output=jsonpath='{.items[0].metadata.name}')
${KUBECTL_CMD} -n sippy-e2e logs ${job_pod} > ${ARTIFACT_DIR}/sippy-load.log
if [ ${retVal} -ne 0 ]; then
echo "sippy loading never finished on time."
exit 1
fi
date
| true |
6b21e7fe133c95ad4e207cb0b0040fe95b7a014b
|
Shell
|
tianzhuoaza/hpc_in-memory
|
/all-transports/adios-all/lammps-adios/jobs/qsub_while_dimes.sh
|
UTF-8
| 1,844 | 2.8125 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# if(strcmp(transport_string, "ADIOS_DISK_MPIIO") == 0){
# else if (strcmp(transport_string, "ADIOS_STAGING_DSPACES") == 0){
# else if (strcmp(transport_string, "ADIOS_STAGING_DIMES") == 0){
# else if (strcmp(transport_string, "ADIOS_STAGING_FLEXPATH") == 0){
# else if (strcmp(transport_string, "NATIVE_STAGING_DSPACES") == 0){
# else if (strcmp(transport_string, "NATIVE_STAGING_DIMES") == 0){
trans=ADIOS_STAGING_DIMES
#napp1=32
#napp2=16
#napp3=4
#nnode=5
napp1=64
napp2=32
napp3=4
nnode=8
#napp1=128
#napp2=64
#napp3=4
#nnode=50
#napp2=8192
#napp1=16384
#napp3=1024
#nnode=2048
#napp2=1024
#napp1=2048
#napp3=512
#nnode=258
#if [[ "$trans" == *"STAGING"* ]];then
# DS_SERVER=${DATASPACES_DIR}/bin/dataspaces_server
# MPI_DS="aprun -n ${napp1} -o 0 $DS_SERVER -s ${napp1} -c $((${napp2}+${napp1}))"
# sleep 10s
#fi
#16384
while [ $napp1 -le 64 ]
do
echo "Welcome $nnode, $napp1, $napp2, $jobid times"
sed "s/\${nnode}/${nnode}/g;s/\${napp1}/${napp1}/g;s/\${napp2}/${napp2}/g;s/\${trans}/${trans}/g;s/\${napp3}/${napp3}/g" lammps_adios_titan_dimes_var.job > testnewjob
jobid=`qsub testnewjob`
echo "jobid: $jobid"
rc=0
sleep 15s
while [ $rc -eq 0 ]
do
check=`showq -u dhuang`
echo $check | grep "Idle" > /dev/null
rc="$?"
echo "rc: idle $jobid $rc"
sleep 3s
done
rc=0
sleep 15s
while [ $rc -eq 0 ]
do
check=`showq -u dhuang`
echo $check | grep "Running" > /dev/null
rc="$?"
echo "rc: running $jobid $rc"
sleep 3s
done
qdel $jobid
echo "delete job: $jobid"
sleep 5s
napp1=$(( $napp1 * 2 ))
napp2=$(( $napp2 * 2 ))
napp3=$(( $napp3 ))
nnode=$(( ($napp1 + $napp2) / 16 + 2))
done
#sed 's/${nnode}/3/g;s/${napp1}/2/g' lammps_adios_titan_var.job > testnewjob
| true |
970ffbd4cccbb472c92d460537c5976ddd1ae076
|
Shell
|
minorhash/react-plx-exp
|
/public/pos/cmd/pac.sh
|
UTF-8
| 146 | 2.953125 | 3 |
[] |
no_license
|
if [ -z $1 ];then
echo "usage"
else
se=$(echo $1|sed s/src/js/g|sed s/jsx//g)
echo ../js/"$se"js
npx webpack $1 -o ../js/"$se"js
fi
| true |
2ec37c28af85aa666ece4f9c426f10c27158c179
|
Shell
|
cybercongress/js-cosmos
|
/script.sh
|
UTF-8
| 454 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
for file in ~/build/docs/*
do
if [ -f "$file" ]
then
touch temp.md
echo "---
project: js-cosmos
---" >> temp.md
cat $file >> temp.md
cat temp.md > $file
rm -rf temp.md
fi
done
touch temp.md
echo "---
project: js-cosmos
---" >> temp.md
cat CONTRIBUTING.md >> temp.md
cat temp.md > CONTRIBUTING.md
rm -rf temp.md
touch temp.md
echo "---
project: js-cosmos
---" >> temp.md
cat CHANGELOG.md >> temp.md
cat temp.md > CHANGELOG.md
rm -rf temp.md
| true |
51d189c592b19c3ec21439ffc37898a62a6f91af
|
Shell
|
eth-cscs/tools
|
/python/import/FAST/fast.slurm.template
|
UTF-8
| 1,261 | 2.671875 | 3 |
[] |
no_license
|
#!/bin/bash -l
#SBATCH --time=00:02:00
#SBATCH --ntasks=XXX
#SBATCH --output=o_XXX
#SBATCH --error=o_XXX
# -------------- create tarfile
#OK IN=/apps/dora/UES/5.2.UP04/broadwell/easybuild/software/Python/2.7.11-CrayGNU-2016.03
#OK TARF=Python-2.7.11-CrayGNU-2016.03.tar
#OK /usr/bin/time -p \
#OK tar cf \
#OK $TARF \
#OK --exclude='*.html' \
#OK --exclude='*.jpg' \
#OK --exclude='*.jpeg' \
#OK --exclude='*.png' \
#OK --exclude='*.pyc' \
#OK --exclude='*.pyo' \
#OK $IN/bin \
#OK $IN/lib \
#OK $IN/include \
#OK $IN/share
#OK # <1G, <15sec
cat <<EOF > fast_import.sh
#!/bin/bash
# --- dora:
module load Python/2.7.11-CrayGNU-2016.03
IN=/apps/dora/UES/5.2.UP04/broadwell/easybuild/software/Python/2.7.11-CrayGNU-2016.03
TARF=Python-2.7.11-CrayGNU-2016.03.tar
mkdir -p /dev/shm/$USER
tar xf \$TARF -C /dev/shm/$USER
export OMP_NUM_THREADS=1
export X=/dev/shm/$USER/\$IN
export PATH=\$X/bin:\$PATH
export LD_LIBRARY_PATH=\$X/lib:\$LD_LIBRARY_PATH
export LIBRARY_PATH=\$X/lib:\$LIBRARY_PATH
echo python=\`which python\`
echo "start"
\$X/bin/python 0.py
echo "end"
EOF
chmod +x fast_import.sh
time -p srun -n $SLURM_NTASKS ./fast_import.sh
# -------------- put tarfile on compute node
# /apps/dora/UES/5.2.UP04/broadwell/.Python-2.7.11-CrayGNU-2015.11.tar
| true |
cb23b34ab2f00249ff9f1735a2d85d3103bc9a5b
|
Shell
|
yuchen-x/or_urdf
|
/catkin-env-hooks/20.or_urdf.sh.in
|
UTF-8
| 514 | 3.234375 | 3 |
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
# determine if we're in the devel or install space
if [ "@DEVELSPACE@" = "True" -o "@DEVELSPACE@" = "true" ]
then
PLUGINS=@CATKIN_DEVEL_PREFIX@/lib/openrave-@OpenRAVE_LIBRARY_SUFFIX@
else
PLUGINS=@CMAKE_INSTALL_PREFIX@/lib/openrave-@OpenRAVE_LIBRARY_SUFFIX@
fi
# prepend to paths (if not already there)
# from http://unix.stackexchange.com/a/124447
case ":${OPENRAVE_PLUGINS:=$PLUGINS}:" in
*:$PLUGINS:*) ;;
*) OPENRAVE_PLUGINS="$PLUGINS:$OPENRAVE_PLUGINS" ;;
esac
export OPENRAVE_PLUGINS
| true |
ae90344e9add41feb06cfd37760aac1c42595408
|
Shell
|
stefano/randcspsolver
|
/launcher.sh
|
UTF-8
| 1,587 | 2.765625 | 3 |
[] |
no_license
|
#!/bin/sh
echo "" > output.csv;
echo -e "\"Number of variables: 1-10\"\n" >> output.csv;
for n in {1,2,3,4,5,6,7,8,9,10};
do java Solver -n $n -l 5 -d 0.5 -s 0.5 -b 50 >> output.csv;
done
echo -e "\n\"Number of variables: 1-8 (with AC)\"\n" >> output.csv;
for n in {1,2,3,4,5,6,7,8,9,10};
do java Solver -n $n -l 5 -d 0.5 -s 0.5 -b 50 -ac >> output.csv;
done
echo -e "\n\"Cardinality of domains: 1-10\"\n" >> output.csv;
for l in {1,2,3,4,5,6,7,8,9,10};
do java Solver -n 5 -l $l -d 0.5 -s 0.5 -b 50 >> output.csv;
done
echo -e "\n\"Cardinality of domains: 1-10 (with AC)\"\n" >> output.csv;
for l in {1,2,3,4,5,6,7,8,9,10};
do java Solver -n 5 -l $l -d 0.5 -s 0.5 -b 50 -ac >> output.csv;
done
echo -e "\n\"Density: 0.1-1\"\n" >> output.csv;
for d in {0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1};
do java Solver -n 5 -l 5 -d $d -s 0.5 -b 50 >> output.csv;
done
echo -e "\n\"Density: 0.1-1 (with AC)\"\n" >> output.csv;
for d in {0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1};
do java Solver -n 5 -l 5 -d $d -s 0.5 -b 50 -ac >> output.csv;
done
echo -e "\n\"Strictness: 0.1-1\"\n" >> output.csv;
for s in {0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1};
do java Solver -n 5 -l 5 -d 0.5 -s $s -b 50 >> output.csv;
done
echo -e "\n\"Strictness: 0.1-1 (with AC)\"\n" >> output.csv;
for s in {0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,0.7,0.75,0.8,0.85,0.9,0.95,1};
do java Solver -n 5 -l 5 -d 0.5 -s $s -b 50 -ac >> output.csv;
done
| true |
cff788ebdc94f2597b6af4b7af62307638d1b150
|
Shell
|
kargig/vresspiti
|
/vresspiti.sh
|
UTF-8
| 2,008 | 3.25 | 3 |
[] |
no_license
|
#!/bin/bash
#Variables
RECIPIENTS="youremail@address.com,andonemore@newaddress.com"
WORK_PATH="/home/yourusername/myhouses"
URL="http://www.xe.gr/property/search?System.item_type=re_residence&Transaction.type_channel=117541&Transaction.price.from=300&Transaction.price.to=500&Item.area.from=40&Item.area.to=100&Geo.area_id_new__hierarchy=83268"
USER_AGENT="User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:68.0) Gecko/20100101 Firefox/68.0"
NEWOUT="${WORK_PATH}/tmp/newout"
ARROUT="${WORK_PATH}/tmp/array.out"
TOMAIL="${WORK_PATH}/tmp/tomail"
if [ "x${WORK_PATH}" = "x" ]; then
echo "WORK_PATH not defined"
exit 10
fi
#cleanup
rm -f $NEWOUT
rm -f $ARROUT
rm -f $TOMAIL
mkdir -p "${WORK_PATH}/tmp"
curl -H "${USER_AGENT}" "${URL}" | $PUP '[class="resultItem r ad_full_view"] json{}' > ${NEWOUT}
cat ${NEWOUT} | jq -r '[.[] as $house | [$house."href",$house.children[1].children[0].children[0].text+$house.children[1].children[1].children[0].text+$house.children[1].children[0].text]]' | \
sed -e 's/"//g' | sed -e 's/\(\/pro\)/https:\/\/www.xe.gr\1/g' | \
sed -e 's/],*//g' -e 's/\[//g' -e '/^\s*$/d' -e 's/,$//g' -e 's/^\s*//g'> ${ARROUT}
IFS=$'\r\n' GLOBIGNORE='*' command eval 'HOUSES=($(cat ${ARROUT}))'
ARRLEN=${#HOUSES[@]}
i=0
while [ "${i}" -lt "${ARRLEN}" ]; do
# get the links for the houses, string must contain http
echo "${HOUSES[$i]}" | grep -q "http"
if [ "$?" -eq "0" ]; then
# filter out houses that we have already sent email for
grep -q "${HOUSES[$i]}" ${WORK_PATH}/mailed
if [ "$?" -gt "0" ]; then
# only continue if the description of the house doesn't contain "κεντρικ. θ.ρμανση"
echo "${HOUSES[$((i+1))]}" | grep -Eq "κεντρικ. θ.ρμανση"
if [ "$?" -gt "0" ]; then
echo "New house ${HOUSES[$i]}" >> ${TOMAIL}
echo "${HOUSES[$((i+1))]}" >> ${TOMAIL}
fi
fi
fi
i=$(( i + 1 ))
done
if [ -f ${TOMAIL} ]; then
mailx -s "new houses" -r noreply@xe.gr ${RECIPIENTS} < "${TOMAIL}"
grep http ${TOMAIL} >> ${WORK_PATH}/mailed
rm -f ${TOMAIL}
fi
| true |
53c23757905c49b536d8bbf044483ceddb04d311
|
Shell
|
leominov/datalock
|
/contrib/vagrant/init.sh
|
UTF-8
| 685 | 3.1875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
export DEBIAN_FRONTEND=noninteractive
HOSTNAME=$(hostname)
apt-get update > /dev/null
apt-get install nginx -y
rm /etc/nginx/sites-enabled/default
if [ $HOSTNAME = "master" ]; then
cp /vagrant/contrib/vagrant/datalock-master.conf /etc/nginx/conf.d/
else
cp /vagrant/contrib/vagrant/datalock-node.conf /etc/nginx/conf.d/
mkdir -p /opt/datalock/ /opt/datalock/database
chmod 0777 /opt/datalock/database
cp /vagrant/bin/datalock /opt/datalock/
cp -r /vagrant/public /opt/datalock/
cp -r /vagrant/templates /opt/datalock/
cp /vagrant/contrib/init/systemd/datalock.service /etc/systemd/system/
systemctl enable datalock
systemctl start datalock
fi
nginx -s reload
| true |
3980aad5d6b529bee9e508028c4d7747beccd9ab
|
Shell
|
ngkim/stack_ops
|
/01_server_security/iptables/watch-deny-centos.sh
|
UTF-8
| 315 | 2.71875 | 3 |
[] |
no_license
|
#!/bin/bash
tailf /var/log/messages | grep DENY | awk '
{
if (NF == 22)
printf("%-10s %-20s %-20s %-10s %-10s\n", $16, $9,$10,$17,$18)
else if (NF == 23)
printf("%-10s %-20s %-20s %-10s %-10s\n", $17, $9,$10,$18,$19)
else
printf("%5d %-20s %-20s %-10s %-10s %-10s\n", NF, $9,$10,$16,$17,$18)
}
'
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.