blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
9f0f154f36c1d58f4cd19f880fd9f5b67e1331a3
|
Shell
|
Antezedens/temp-switch
|
/irrigation.sh
|
UTF-8
| 390 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
cd ${0%%irrigation.sh}
#1 huette
#2 pool
#####3 stiege
#4 flaeche
#5 grillplatz
#####6 böschung
alla=$@
gpio=$1
minutes=${alla##$1 }
echo "irrigation params: gpio $gpio: $minutes ('$alla')"
#for i in 5 6m 170 6m 8m 9m ; do
for i in $minutes ; do
./setrelais2.sh $gpio 0
echo "irrigation for $i"
sleep $i
./setrelais2.sh $gpio 1
sleep 60
done
./setrelais2.sh $gpio 1
| true |
341c03b30a714800e7f9d67e256ecf00ac3ddd3d
|
Shell
|
bhbo/docker-django-react
|
/bin/develop
|
UTF-8
| 846 | 3.375 | 3 |
[] |
no_license
|
#!/bin/bash
docker-compose up -d postgres client
# wait for postgres to initialize before migrating
for i in {30..0}; do
if [ $(docker-compose logs postgres 2>&1 | grep "database system is ready to accept connections" | wc -l) -gt 1 ]; then
echo "Postgres is ready."
echo "Migrating Django models..."
docker-compose up -d server
docker-compose exec server python manage.py migrate
break
else
sleep 1
fi
if [ "$i" = 0 ]; then
echo "Migration failed."
fi
done
for j in {30..0}; do
if [ $(docker-compose logs client 2>&1 | grep "webpack dev server listening" | wc -l) -gt 0 ]; then
echo "Webpack dev server is ready."
break
else
sleep 5
fi
if [ "$j" = 0 ]; then
echo "Webpack dev server did not initiatilize."
fi
done
| true |
d3061220001ba90b9a05b92486ed7ed98a63c878
|
Shell
|
goluckyryan/Cleopatra
|
/Cleopatra.sh
|
UTF-8
| 4,020 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
########################################################################
#
# This is Cleopatra.sh, a scripted version for Cleopatra
#
# Using bash script provide flexibility that user can add difference
# compoenents during the calculation
#
# A full package includes fellowing:
# 1) create a in-file for ptolemy
# 2) run ptolemy from that in-file and output an out-file
# 3) extract cross-section distribution from the out-file
# save as txt or root TGraph format
# 4) call ROOT to draw the TGraph
# 5) load possible experimental Xsec and fit with Ptolemy calulation
#
# User can easily select/comment-out different component
# to suit their needs
#-------------------------------------------------------
# created by Ryan (Tsz Leung) Tang, Nov-18, 2018
# email: goluckyryan@gmail.com
########################################################################
#for User, please Modify the path for thisroot.h
source /Applications/root/bin/thisroot.sh
#================================ User Defualt Control
CreateInFile=0 # 0 = false, 1 = true
RunPtolemy=0
IsExtractXSec=0
PlotResult=0
#============================================ USER don't need to change thing below
if [ $# -eq 0 ] ; then
echo "$./Cleopatra in-file X X X X"
echo " | | | |"
echo " | | | PlotResult? (1/0)"
echo " | | Extract cross-section? (1/0)"
echo " | Run Ptolemy? (1/0)"
echo " Create infile? (1/0)"
exit 1
fi;
loadfile=$1
infile=$1".in"
outfile=$1".out"
rootfile=$1".root"
if [ $# -eq 2 ]; then
CreateInFile=$2
fi;
if [ $# -eq 3 ]; then
CreateInFile=$2
RunPtolemy=$3
fi;
if [ $# -eq 4 ]; then
CreateInFile=$2
RunPtolemy=$3
IsExtractXSec=$4
fi;
if [ $# -eq 5 ]; then
CreateInFile=$2
RunPtolemy=$3
IsExtractXSec=$4
PlotResult=$5
fi;
echo "#################################################################"
echo "## @@@@ @@ @@@@ @@@@ @@@@@ @@@@ @@@@@@ @@@@@ @@@@ ##"
echo "## @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ ##"
echo "## @@ @@ @@@@ @@ @@ @@@@@ @@@@@@ @@ @@@@@ @@@@@@ ##"
echo "## @@ @@ @@ @@ @@ @@ @@ @@ @@ @@ @ @@ @@ ##"
echo "## @@@@ @@@@@ @@@@ @@@@ @@ @@ @@ @@ @@ @ @@ @@ ##"
echo "#################################################################"
echo "##### Cleopatra, Ptolemy for (d,p),(p,d) #####"
echo "#################################################################"
echo ""
echo "USER OPTION:"
echo " --- Is Create Ptolemy infile ? " ${CreateInFile}
echo " --- Is Run Ptolemy ? " ${RunPtolemy}
echo " --- Is Extract Cross-Section ? " ${IsExtractXSec}
echo " --- Is Plot Results ? " ${PlotResult}
echo "================================================================="
#if [ ${CreateInFile} -eq 1 ] ; then
# echo "infile ----> "${loadfile}
#fi;
#
#if [ ${RunPtolemy} -eq 1 ] ; then
# echo "Ptolemy infile ----> "${infile}
# echo "Ptolemy outfile ----> "${outfile}
#fi;
if [ ${CreateInFile} -eq 1 ] ; then
./InFileCreator ${loadfile} 0.0 50.0 0.5
fi;
if [ ${RunPtolemy} -eq 1 ] ; then
echo "================================================================="
echo "===== Ptolemy Calcualtion ==================================="
echo "================================================================="
./ptolemy <${infile}> ${outfile}
fi;
#===== Extracting XSec and save into *txt and *root
if [ ${IsExtractXSec} -eq 1 ] ; then
./ExtractXSec ${outfile}
fi;
if [ ${PlotResult} -eq 1 ] ; then
#===== Plot the result from the *.root
#./PlotTGraphTObjArray ${rootfile}
#--- other way within ROOT
echo "================================================================="
echo "===== Plot Result from ${rootfile}"
echo "================================================================="
com='PlotTGraphTObjArray.h("'${rootfile}'")'
echo ${com}
root -l ${com}
fi;
| true |
7c864ec48fa8a233baa1c29edb6f3295042aee23
|
Shell
|
abeer486/gke-dedicated-game-server
|
/scaling-manager/tests/test-loader.sh
|
UTF-8
| 1,317 | 3.5625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2017 Google LLC All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Default Openarena port.
DEFAULT_PORT=27961
# Check for kubectl in local path.
command -v kubectl >/dev/null 2>&1 || { echo >&2 "Command kubectl required in $PATH to use this script, exiting"; exit 1; }
# Loop 15 times, start one pod every ~20 seconds.
for i in $(seq 1 15); do
NEW_PORT=`expr ${DEFAULT_PORT} + ${i}`
echo "Starting 'openarena.dgs.${i}' DGS pod on port ${NEW_PORT} (replaces any exising pod with the same name)"
kubectl delete pods openarena.dgs.$i 2>&1 | grep -v "NotFound"
sleep 20
sed "s/openarena\.dgs/openarena.dgs.$i/g" "$( cd $(dirname $0) ; \
pwd -P )/../../openarena/k8s/openarena-pod.yaml" |
sed -e "s/${DEFAULT_PORT}/${NEW_PORT}/g" | kubectl apply -f -
done
| true |
0ce7fe2f706872ba09090683bf8615dcb51cf979
|
Shell
|
eric-johnson/docker-dev-runner
|
/bin/drun
|
UTF-8
| 418 | 3.328125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# DRUN_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
DRUN_PATH="$(dirname "$(readlink -f "$0")")"
image_file=`$DRUN_PATH/image-file`
image=`cat $image_file`
echo Image: $image
SRC=$( dirname "$image_file" )
WORK_DIR="/code${PWD#$SRC}"
DOCKER_OPTS="-e=MIX_HOME=/code/.mix"
CMD="docker run -it --rm --net=host -v=$SRC:/code -w=$WORK_DIR $DOCKER_OPTS $image $@"
echo $CMD
exec $CMD
| true |
e3d31c63eb522ce71c477eaf7c0f2096847f44ca
|
Shell
|
netzgut/ubuntu-setup
|
/scripts/powertop.sh
|
UTF-8
| 879 | 3.640625 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
MARKER=$(basename "${BASH_SOURCE%.*}")
###############################################################################
# Powertop
###############################################################################
# URL: https://wiki.archlinux.org/index.php/Powertop
###############################################################################
NAME="Powertop"
###############################################################################
print_banner "$NAME"
if [ ! -f "$MARKER_DIRECTORY"/"$MARKER" ]; then
sudo apt install -y \
powertop \
&& echo "[Unit]
Description=Powertop tunings
[Service]
Type=exec
ExecStart=/usr/bin/powertop --auto-tune
RemainAfterExit=true
[Install]
WantedBy=multi-user.target" | sudo tee /etc/systemd/system/powertop.service \
&& finish_install "$MARKER"
else
already_installed "$MARKER"
fi
| true |
29d109d1fe2480d6c728c21230d5575fb9b93e70
|
Shell
|
isnot/UnisonUnum
|
/docker/voicevox_engine_dev/start.sh
|
UTF-8
| 359 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
OPT_DIR=/opt
# 製品版 VOICEVOX でサーバーを起動
VOICEVOX_DIR=$OPT_DIR"/voicevox" # 製品版 VOICEVOX ディレクトリのパス
cd $OPT_DIR"/voicevox_engine" # コードがあるディレクトリのパス
/usr/bin/bash -c "/usr/bin/python3 proxy_run.py --voicevox_dir=${VOICEVOX_DIR} --host 0.0.0.0 --port ${VIRTUAL_PORT}"
| true |
0c8a4e150d9aa7b9b53950482c4837493c6fea36
|
Shell
|
arseny30/settings
|
/.bashrc
|
UTF-8
| 1,206 | 3.390625 | 3 |
[] |
no_license
|
# ~/.bashrc: executed by bash(1) for non-login shells.
# History
export HISTTIMEFORMAT="%h %d %H:%M:%S> "
#export HISTCONTROL=ignoredups
export HISTCONTROL=ignoredups:erasedups:ignorespace
export HISTSIZE=1000000
shopt -s histappend
# '<hostname>:<directory>$ '
export PS1='\h:\w\$ '
# Initial value of file permission bits for newly created files
umask 022
# utf8
export LANG=ru_RU.UTF-8
export LC_ALL=ru_RU.UTF-8
# Colorized ls
if which dircolors >/dev/null; then
export LS_OPTIONS='--color=auto'
eval "`dircolors`"
alias ls='ls $LS_OPTIONS'
alias ll='ls $LS_OPTIONS -l'
alias l='ls $LS_OPTIONS -lA'
else
export CLICOLOR=1
export LSCOLORS=gxBxhxDxfxhxhxhxhxcxcx
fi
# make and ninja aliases with notifications
function notify() {
osascript -e "display notification \"$1\" with title \"Term\" sound name \"Ping\""
}
function notify_after_command() {
$@;
notify "$*";
}
IGNOREEOF=1
alias make='notify_after_command nice -n6 make'
alias ninja='notify_after_command nice -n6 ninja'
alias vim='mvim -v'
# Utils
function range () {
if [ $1 -ge $2 ]; then
return
fi
a=$1
b=$2
while [ $a -le $b ]; do
echo $3$a$4; a=$(($a+1)); done
}
export range
| true |
8801f3e43d6e74f0ed24428dcf7f1dd5db350762
|
Shell
|
bigmountainideas/vagrant-shell-bootstrap
|
/provisioners/app.sh
|
UTF-8
| 482 | 3.4375 | 3 |
[
"MIT"
] |
permissive
|
NGX_HOME=/usr/local/nginx
for DOMAIN in "$@"
do
DOMAIN_CONFIG="include /home/apps/${DOMAIN}/bin/nginx.conf;"
if [ -a "${NGX_HOME}/sites-available/${DOMAIN}" ]; then
sudo rm ${NGX_HOME}/sites-available/${DOMAIN}
fi
sudo echo -e "$DOMAIN_CONFIG" >> ${NGX_HOME}/sites-available/${DOMAIN}
if ! [ -a "${NGX_HOME}/sites-enabled/${DOMAIN}" ]; then
sudo ln -s ${NGX_HOME}/sites-available/${DOMAIN} ${NGX_HOME}/sites-enabled/${DOMAIN}
fi
done
sudo nginx -s reload
| true |
2090bef1f72753f5e4d247fa0124d533d523c0a0
|
Shell
|
matveykolesnik/Drobysheva_et_al_2020
|
/Process_raw_data.sh
|
UTF-8
| 1,384 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
#Shell script that performs quality checking of the reads before and after trimming, maps the reads onto reference genome generating indexed BAM files
#Requires: fastqc, trimmomatic, bowtie2, samtools
WD="~/data/Drobysheva_et_al_2020/"
echo $WD
cd $WD
#Path to file with sequencing adapters
Adapters="TruSeq3-SE.fa"
RefSeqs="refseqs/Cba_and_phage.fa"
RefSeqs_index="refseqs/index/Cba_and_phage_index"
mkdir QC
mkdir QC/Trimmed
fastqc -t 4 -o QC Data/*
mkdir Data/Trimmed
#Removing adapter sequences and low-quality reads fragments
for f in Data/*fastq.gz;
do
echo $f;
java -jar ~/Trimmomatic-0.38/trimmomatic-0.38.jar SE -phred33 $f Data/Trimmed/`basename $f .fastq.gz`_trimmed.fastq.gz ILLUMINACLIP:$Adapters:2:30:10 LEADING:0 TRAILING:0 SLIDINGWINDOW:4:15 MINLEN:36;
done
fastqc -t 4 -o QC/Trimmed Data/Trimmed/*
#Mapping reads onto reference sequences
mkdir refseqs/index
mkdir alignments
#Build bowtie2 index
bowtie2-build $RefSeqs $RefSeqs_index
for f in Data/Trimmed/*fastq.gz;
do
echo $f;
bowtie2 -x $RefSeqs_index -U $f -q -p 4 | samtools view -S -b -u - | samtools sort - -o alignments/`basename $f .fastq.gz`.sorted.bam;
samtools index alignments/`basename $f .fastq.gz`.sorted.bam;
done
#Count mapped reads for each sample
for f in alignments/*bam;
do
printf "%s\t%s\n" `basename $f` `samtools view -F 4 -c $f` >> reads_per_lib.tsv;
done
| true |
e0a1988a6268ca19c9b781c29d071269346e4f46
|
Shell
|
tonetheman/hamming-test
|
/run.sh
|
UTF-8
| 262 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
COUNT=0
while [[ true ]]; do
rm -rf results
./tone -t target -w words
if [[ -e ./results ]]; then
echo "GOT IT"
rm -rf ./savedlow
tail -2 ./results > ./savedlow
fi
COUNT=`./addr.py 1 $COUNT`
if [[ $COUNT -eq 1 ]]; then
break
fi
done
| true |
59f5a7a88f218dd304edcff955e902dcd2ba6b41
|
Shell
|
kafedra-bit/resela-plus
|
/install/Controller/Mitaka/environmentConfig.sh
|
UTF-8
| 2,387 | 3.234375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# Author Annika Hansson, Fredrik Johansson
# Openstack version Mitaka
# Ubuntu Server version 16.04 LTS
# Load the variables containing user input
. controllerInstall
echo "Enabling the OpenStack repository..."
apt-get -y -q install software-properties-common
add-apt-repository -y cloud-archive:mitaka
echo "Updating and upgrading the system..."
apt-get -y -q update && apt-get -y -q dist-upgrade
echo "Installing OpenStack client..."
apt-get -y -q install python-openstackclient
echo "Installing MariaDB and PyMySQL..."
export DEBIAN_FRONTEND="nointeractive"
debconf-set-selections <<< "mariadb-server mysql-server/root_password password ${mysqlDBPass}"
debconf-set-selections <<< "mariadb-server mysql-server/root_password_again password ${mysqlDBPass}"
apt-get -y -q install mariadb-server python-pymysql
unset DEBIAN_FRONEND
if [ $(lsb_release -rs) == "14.04" ]; then
echo "Configuring /etc/mysql/conf.d/openstack.cnf..."
mariaDBTmp=${mysqlConfPathU14}
else
echo "Changing character encoding, from utf8m4 to utf8, in directory /etc/mysql/mariadb.conf.d/..."
find /etc/mysql/mariadb.conf.d/ -type f -exec sed -i "s/utf8m4/utf8/" {} \;
echo "Configuring /etc/mysql/mariadb.conf.d/99-openstack.cnf..."
mariaDBTmp=${mysqlConfPathU16}
fi
cat > ${mariaDBTmp} << END_OF_CONF
[mysqld]
bind-address = ${contIP}
default-storage-engine = innodb
innodb_file_per_table
max_connections = 4096
collation-server = utf8_general_ci
character-set-server = utf8
END_OF_CONF
echo "Restarting mysql..."
service mysql restart
mysql_secure_installation
echo "Installing MongoDB..."
apt-get -y -q install mongodb-server mongodb-clients python-pymongo
echo 'Configuring /etc/mongodb.conf...'
sed -i "/^bind_ip/{s/^/#/}" ${mongoDBConfPath}
cat >> ${mongoDBConfPath} << END_OF_CONF
bind_ip = ${contIP}
smallfiles = true
END_OF_CONF
echo "Restarting mongodb..."
service mongodb stop
rm /var/lib/mongodb/journal/prealloc.*
service mongodb start
echo 'Installing and configuring rabbit MQ server...'
apt-get -y -q install rabbitmq-server
rabbitmqctl add_user openstack ${rabbitPass}
rabbitmqctl set_permissions openstack ".*" ".*" ".*"
echo 'Installing memcached...'
apt-get -y -q install memcached python-memcache
echo 'Configuring /etc/memcached.conf...'
sed -i "s/^-l 127.0.0.1/-l ${contIP}/" ${memcachedConfPath}
echo 'Restarting memcached...'
service memcached restart
| true |
5dddd8e1da1cf41055b56ae843271d2b65cc6c84
|
Shell
|
robotspace/arminix
|
/drivers/memory/ramdisk/proto.sh
|
UTF-8
| 389 | 2.609375 | 3 |
[] |
no_license
|
#!/bin/sh
PATH=/bin:/sbin:/usr/bin:/usr/sbin
sed -n '1,/@DEV/p' <proto | grep -v @DEV@
(
cd /dev
ls -aln | grep '^[bc]' | egrep -v ' (fd1|fd0p|tcp|eth|ip|udp|tty[pq]|pty)' | \
sed -e 's/^[bc]/& /' -e 's/rw-/6/g' -e 's/r--/4/g' \
-e 's/-w-/2/g' -e 's/---/0/g' | \
awk '{ printf "\t\t%s %s--%s %d %d %d %d \n", $11, $1, $2, $4, $5, $6, $7; }'
)
sed -n '/@DEV/,$p' <proto | grep -v @DEV@
| true |
f9da541e7f81ef6e2d3285560f187b0a8d2dd90b
|
Shell
|
wohlben/dotfiles
|
/zsh_custom/plugins/histdb/histdb.plugin.zsh
|
UTF-8
| 739 | 2.65625 | 3 |
[] |
no_license
|
###### histdb && autosuggestions ################################
_zsh_autosuggest_strategy_histdb_top_here() {
local query="select commands.argv from
history left join commands on history.command_id = commands.rowid
left join places on history.place_id = places.rowid
where places.dir LIKE '$(sql_escape $PWD)%'
and commands.argv LIKE '$(sql_escape $1)%'
group by commands.argv order by count(*) desc limit 1"
_histdb_query "$query"
}
#ZSH_AUTOSUGGEST_STRATEGY=histdb_top_here
source ${0:A:h}/zsh-histdb/sqlite-history.zsh
autoload -Uz add-zsh-hook
add-zsh-hook precmd histdb-update-outcome
#add-zsh-hook preexec _start_timer
#add-zsh-hook precmd _stop_timer
| true |
f5dd9e007e63a9d33be2872a886a70693a472516
|
Shell
|
fneb/serverdensitysynology
|
/serverdensitysynoinstall.sh
|
UTF-8
| 7,040 | 3.78125 | 4 |
[] |
no_license
|
#!/bin/ash
# Installer for Server Density on Synologys, by Bethany Corcoran.
# This script will prompt for a path to install to, download the latest Server Density agent for Linux, extract it, and perform some basic setup (potentially from pre-made files that it will download).
# Released under the GNU General Public License, 2015. This script is provided with NO warranty whatsoever.
# You'll notice that applications in this all have their absolute path. This is to work around issues with applications no longer being usable once the script is in progress.
# If you want to pre-define your URLs for your pre-made checks.py and config.cfg, uncomment these lines and put the full URL here. Handy for large-scale roll-outs for many devices.
# CHECKSURL=""
# CONFIGURL=""
# If you are using a pre-defined config.cfg from a URL, then you probably want to skip asking for a sd_url value in config.cfg. If so, uncomment the below line to skip this prompt.
# SKIPSDURL="true"
# Now we start asking the user for variables. Firstly, where to install to with /etc as the default.
/bin/echo "Synology-specific installer for Server Density."
/bin/echo "Where should Server Density be installed to? Use the absolute path with no trailing slash. [/etc]"
read PATH
if [ -z "$PATH" ]
then
PATH="/etc"
/bin/echo "Using $PATH as install location."
else
/bin/echo "Using $PATH as install location. Please ensure this is valid - this script doesn't include any checks for this!"
fi
# Asks for a URL to grab a pre-customised config.cfg from, unless it has been defined above.
if [ -z "$CONFIGURL" ]
then
/bin/echo "Enter URL for your pre-customised config.cfg file. Leave blank to work from the default config file."
read CONFIGURL
fi
if [ -z "$CONFIGURL" ]
then
/bin/echo "Using default config file."
else
/bin/echo "Using config from $CONFIGURL. You'll find the default file in $PATH/sd-agent/replaced-files after the installation."
fi
# Asks for a URL to grab a pre-customised checks.py from, unless it has been defined above.
if [ -z "$CHECKSURL" ]
then
/bin/echo "Enter URL for your pre-customised checks.py. Leave blank to work from the default checks.py file."
read CHECKSURL
fi
if [ -z "$CHECKSURL" ]
then
/bin/echo "Using default checks.py file."
else
/bin/echo "Using checks.py from $CHECKSURL. You'll find the default file in $PATH/sd-agent/replaced-files after the installation."
fi
# Asks for the Server Density URL to use, unless this has been set to skip earlier.
if [ -z "$SKIPSDURL" ]
then
/bin/echo "Please enter the account-specific section of the Server Density URL you use to log in. So, for the default URL https://example.serverdensity.io this would be 'example'"
read SDURL
if [ -z "$SDURL" ]
then
/bin/echo "Server Density URL cannot be blank. Please edit $PATH/sd-agent/config.cfg after the installation is complete."
else
/bin/echo "Using $SDURL, so the full URL will be https://$SDURL.serverdensity.io - if this is incorrect, please edit $PATH/sd-agent/config.cfg after the installation is complete."
fi
else
/bin/echo "You've said you want to skip entering the Server Density URL in this script itself. You can change this after installation if needed by editing the $PATH/sd-agent/config.cfg file."
fi
# Asks for the Server Density agent key.
/bin/echo "Please enter the agent key for this device - you can get this from the overview screen when you're looking at the device in Server Density, in the top-left of the screen."
/bin/echo "Note that if you're using a pre-customised config.cfg which includes a set agent_key then leave this blank."
read AGENTKEY
if [ -z "$AGENTKEY" ]
then
/bin/echo "Agent key cannot be blank. Please edit $PATH/sd-agent/config.cfg after the installation is complete if this isn't pre-defined."
else
/bin/echo "Using $AGENTKEY - if this is incorrect, please edit $PATH/sd-agent/config.cfg after the installation is complete."
fi
# Now that we have our variables, it's time to download and extract Server Density's agent. We download to /tmp then extract the contents to the directory we want it to be installed to.
/usr/bin/curl -L "https://www.serverdensity.com/downloads/sd-agent.tar.gz" -o "/tmp/sd-agent.tar.gz"
/bin/gzip -dc "/tmp/sd-agent.tar.gz" | /bin/tar xf - -C "$PATH"
# Check if we have a URL set for either the checks.py or config.cfg files. If either is set, then makes a folder for the originals to be put into.
if [[ -z "$CHECKSURL" && -z "$CONFIGURL" ]]
then
/bin/sleep 0
else
/bin/mkdir "$PATH/sd-agent/replaced-files"
fi
# If there's a URL for the checks.py then moves the original out of the default location and downloads the pre-customised version in its place.
if [ -n "$CHECKSURL" ]
then
/bin/mv "$PATH/sd-agent/checks.py" "$PATH/sd-agent/replaced-files/checks.py"
/usr/bin/curl -L "$CHECKSURL" -o "$PATH/sd-agent/checks.py"
fi
# If there's a URL for the config.cfg then moves the original out of the default location and downloads the pre-customised version in its place.
if [ -n "$CONFIGURL" ]
then
/bin/mv "$PATH/sd-agent/config.cfg" "$PATH/sd-agent/replaced-files/config.cfg"
/usr/bin/curl -L "$CONFIGURL" -o "$PATH/sd-agent/config.cfg"
fi
# Edits the config file with our agent key, but only if the AGENTKEY variable has been set.
if [ -n "$AGENTKEY" ]
then
/bin/sed -i "s/agent_key:/agent_key: $AGENTKEY/g" "$PATH/sd-agent/config.cfg"
fi
# Edits the config file with our SD URL, but only if the SDURL variable has been set.
if [ -n "$SDURL" ]
then
/bin/sed -i "s/sd_url: https:\/\/example.serverdensity.io/sd_url: https:\/\/$SDURL.serverdensity.io/g" "$PATH/sd-agent/config.cfg"
fi
# Now we want to write the script agent-pid.sh. This is what we're going to use for launching Server Density.
/bin/echo "Writing agent-pid.sh file."
/bin/sleep 1
/bin/echo "#!/bin/ash" >> $PATH/sd-agent/agent-pid.sh
/bin/echo 'PIDFILE="/tmp/sd-agent.pid"' >> $PATH/sd-agent/agent-pid.sh
/bin/echo 'if [ -e "$PIDFILE" ]' >> $PATH/sd-agent/agent-pid.sh
/bin/echo "then" >> $PATH/sd-agent/agent-pid.sh
/bin/echo "exit" >> $PATH/sd-agent/agent-pid.sh
/bin/echo "else" >> $PATH/sd-agent/agent-pid.sh
/bin/echo 'python $PATH/sd-agent/agent.py start' >> $PATH/sd-agent/agent-pid.sh
/bin/echo "fi" >> $PATH/sd-agent/agent-pid.sh
/bin/sleep 1
/bin/chmod +x "$PATH/sd-agent/agent-pid.sh"
/bin/echo "Adding Server Density to /etc/crontab. It'll check if it needs to be re-launched every hour."
/bin/echo "0 * * * * root $PATH/sd-agent/agent-pid.sh" >> /etc/crontab
/bin/echo "Would you like to run Server Density now? y/n [y]"
read LAUNCH
if [ -z "$LAUNCH" ]
then
LAUNCH="y"
fi
if [ "$LAUNCH" == "y" ]
then
$PATH/sd-agent/agent-pid.sh
fi
if [ "$LAUNCH" == "n" ]
then
/bin/echo "Run $PATH/sd-agent/agent-pid.sh when ready."
fi
if [[ "$LAUNCH" != "y" && "$LAUNCH" != "n" ]]
then
/bin/echo "Oops, you've entered something other than y or n, so we won't run it now. Run $PATH/sd-agent/agent-pid.sh when ready."
fi
/bin/echo "Server Density should now be installed!"
exit
| true |
94160fd38f00c650760e3dffb2b621ea9b8d89b8
|
Shell
|
kio-networks/Checklist-BSA-1907BSACL010
|
/1907BSACL010/gluster/apifs/home/sakio/scripts/getAllServerClockDrift.sh
|
UTF-8
| 787 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/bash
driftthr="$1"
localdir=`dirname $0`
serverList=`cat ${localdir}/servers.txt`
slackChannel="$2"
timestamp=`date +"%Y%m%d-%H%M%S.%N"`
statusfile="/gluster/tmpcommon/sakio/driftStatusFiles/drift-status-${timestamp}.tmp"
touch $statusfile
numserver=`wc -l ${localdir}/servers.txt | cut -d " " -f 1`
echo "Querying date & time for ${numserver} servers..."
echo "Max drift allowed: ${driftthr} secs"
for i in $serverList
do
username=`echo $i | cut -d "|" -f 1`
password=`echo $i | cut -d "|" -f 2`
serveraddr=`echo $i | cut -d "|" -f 3`
#echo "${localdir}/getClockDrift.sh '$username' '$password' '$serveraddr' '${driftthr}' '$statusfile' '$slackChannel' &"
${localdir}/getClockDrift.sh "$username" "$password" "$serveraddr" "${driftthr}" "$statusfile" "$slackChannel" &
done
| true |
29b1fb1b888dd2e848d7364c1737503ea6b3fc5a
|
Shell
|
adriel/denyhosts-sync-docker
|
/run.sh
|
UTF-8
| 1,268 | 3.921875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
set -o pipefail
# Table to check if tables have been generated
TABLE='country_history'
printf "[database]
# Type of database. Choice of sqlite3, MySQLdb, psycopg2 (PostgreSQL)
type: mysql
db: ${MYSQL_DATABASE}
host: ${MYSQL_HOSTNAME}
user: ${MYSQL_USER}
passwd: ${MYSQL_PASSWORD}
# Maximum size of database connection pool. Default: 5
# For high volume servers, set this to 100 or so.
cp_max: 100" > /etc/denyhosts-server-database.conf
# Check if DB is running and accessable
until mysql -u "${MYSQL_USER}" -p"${MYSQL_PASSWORD}" --host="${MYSQL_HOSTNAME}" --port="${MYSQL_PORT}" -e "select 1" &>/dev/null; do
>&2 echo "Database is unavailable - waiting"
sleep 1
done
>&2 echo "Database is up - checking if tables exist"
# Check if tables exist, create if missing
if [[ $(mysql -N -s -u "${MYSQL_USER}" -p"${MYSQL_PASSWORD}" --host="${MYSQL_HOSTNAME}" --port="${MYSQL_PORT}" -e \
"select count(*) from information_schema.tables where \
table_schema='${MYSQL_DATABASE}' and table_name='${TABLE}';") -eq 1 ]]; then
echo "Tables exists - starting server"
else
echo 'Tables do not exist - creating tables'
echo 'Y' | /usr/bin/denyhosts-server --recreate-database
fi
/usr/bin/denyhosts-server -c /etc/denyhosts-server.conf
| true |
75bffa5ac6009b89115d07e59ea2391562fdd5e8
|
Shell
|
neoskop/s3surgeon
|
/release.sh
|
UTF-8
| 859 | 3.953125 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
function check_commands() {
for command in $@; do
if ! command -v $command >/dev/null; then
echo -e "Install \033[1m$command\033[0m"
exit 1
fi
done
}
check_commands git npm jq
if [[ "$#" != "1" ]] || [[ ! "$1" =~ ^(patch|minor|major)$ ]]; then
echo -e "Usage: $0 \033[1mpatch|minor|major\033[0m"
exit 1
fi
if [[ $(git status --porcelain) ]]; then
echo -e "The repository has changes. Commit first...\033[0;31mAborting!\033[0m"
exit 1
fi
git pull --rebase
npm i
npm version --no-git-tag-version $1
version=$(cat package.json | jq -r .version)
sed -i "s/\.version('.*',/.version('$version',/" src/index.ts
npm run build
npm publish
git add .
git commit -m "chore: Bump version to ${version}."
git tag ${version}
git push origin $version
git pull --rebase
git push
| true |
889d57a66eaa43b472dbfe46a7dd01f9448dfe21
|
Shell
|
dklaes/scripts
|
/KiDS/stats/2015.08.10/get_statistics.sh
|
UTF-8
| 1,458 | 2.984375 | 3 |
[] |
no_license
|
for FILTER in r_SDSS i_SDSS
do
rm mag_limit_individual_${FILTER}.txt
rm mag_limit_${FILTER}.txt
for POINTING in `ls -1 --color=none | grep KIDS_`
do
ldactoasc -i ${POINTING}/${FILTER}/precoadd_V0.5.7A/cat/V0.5.7A.cat \
-t STATS -k V0.5.7A IMAGENAME -b -s | awk '$1==1 {print $2}' \
> tmp_${FILTER}_$$
while read IMAGE
do
ldactoasc -i ${POINTING}/${FILTER}/precoadd_V0.5.7A/cat/${IMAGE}.cat \
-t FIELDS -k SEXBKDEV ZP EXPTIME -b \
>> mag_limit_individual_${FILTER}.txt
done < tmp_${FILTER}_$$
rm tmp_${FILTER}_$$
ldactoasc -i ${POINTING}/${FILTER}/postcoadd_V0.5.7A/cats/*.swarp.cut_sex_ldac.cat \
-t FIELDS -k SEXBKDEV MAGZP -b \
>> mag_limit_${FILTER}.txt
ldactoasc -i ${POINTING}/${FILTER}/precoadd_V0.5.7A/cat/V0.5.7A.cat \
-t STATS -k V0.5.7A SEEING -b -s | awk '$1==1 {print $2}' \
>> seeing_individual_${FILTER}.txt
dfits ${POINTING}/${FILTER}/coadd_V0.5.7A/${POINTING}_${FILTER}.V0.5.7A.swarp.cut.fits | \
fitsort -d SEEING | grep -v error | awk '{print $2}' >> seeing_${FILTER}.txt
done
done
for FILTER in u_SDSS g_SDSS z_SDSS
do
cp mag_limit_individual_r_SDSS.txt mag_limit_individual_${FILTER}.txt
cp mag_limit_r_SDSS.txt mag_limit_${FILTER}.txt
cp seeing_individual_r_SDSS.txt seeing_individual_${FILTER}.txt
cp seeing_r_SDSS.txt seeing_${FILTER}.txt
done
| true |
c1eb3227b56e217a539b16889b5e77e54594e592
|
Shell
|
fpga18/Shell-Build-Xcode_Static-library-and-Framework
|
/BuildXcodeShell.sh
|
UTF-8
| 1,440 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/sh
clear
PRECDPATH=`dirname $0`
echo "$PRECDPATH"
cd $PRECDPATH
CURRENTPATHCMD=`pwd`
ls
TARGETPATH="$CURRENTPATHCMD"
cd ..
CURRENTPATH="$CURRENTPATHCMD"
echo "CURRENTPATH = $CURRENTPATH"
dir=$(ls -l $CURRENTPATH |awk '/^d/ {print $NF}')
ISVAILDPATH="FALSE"
RIGHT="ISTRUE"
TARGETNAME=""
for i in $dir
do
if [[ $i =~ \.xcodeproj$ ]]; then
ISVAILDPATH="ISTRUE"
TARGETNAME="${i%.*}"
echo "$TARGETNAME"
echo $i
fi
done
if [ $ISVAILDPATH != $RIGHT ]
then
echo "current path is invalid"
exit 1
fi
cd $TARGETPATH
xcodebuild -target $TARGETNAME -configuration Release -sdk iphonesimulator -arch i386 -arch x86_64
xcodebuild -target $TARGETNAME -configuration Release -sdk iphoneos -arch armv7 -arch armv7s -arch arm64
LIBTOOL_FLAGS="-static"
FWNAME="ASDK"
RELEASEIPHONEA="$TARGETPATH/build/Release-iphoneos"
RELEASESIMULATORA="$TARGETPATH/build/Release-iphonesimulator"
STATICLIBNAME="lib$TARGETNAME.a"
HEADPATH=""
echo "Creating ASDK.framework"
if [ -d "ASDK.framework" ]; then
echo "Removing previous ASDK.framework copy"
rm -rf "ASDK.framework"
fi
mkdir -p "ASDK.framework/Headers"
libtool -no_warning_for_no_symbols $LIBTOOL_FLAGS -o $FWNAME.framework/$FWNAME $RELEASEIPHONEA/$STATICLIBNAME $RELEASESIMULATORA/$STATICLIBNAME
cp -r $RELEASEIPHONEA/"include/$TARGETNAME"/* $FWNAME.framework/Headers/
echo "Created $FWNAME.framework"
LIPOCMD=`lipo -info $FWNAME.framework/$FWNAME`
echo "CPU Architecture: $LIPOCMD"
rm -rf "build"
| true |
2a9b1692b439acde8ffc6581e5a97e86a2955193
|
Shell
|
rl337/incubator
|
/main/muxbot/src/exec/udict.sh
|
UTF-8
| 730 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/sh
ESCAPED=`echo "$1" | sed -e 's/ /+/g'`
RESULT=`curl -L -s "http://www.urbandictionary.com/define.php?term=$ESCAPED"`
echo "$RESULT" | grep -i "<i>$1</i> isn't defined" > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "No Definition Found"
exit 0
fi
echo "$RESULT" | grep '<div class="definition">' | tr '[A-Z]' '[a-z]' | sed -e 's/<[^>]*>//g' | \
sed -e 's/who has/with/g' |
sed -e 's/people/ppl/g' |
sed -e 's/one/1/g' |
sed -e 's/two/2/g' |
sed -e 's/three/3/g' |
sed -e 's/four/4/g' |
sed -e 's/usually/often/g' |
sed -e 's/"/"/g' |
sed -e 's/&/\&/g' |
sed -e 's/refer[r]*ing to/about/g' |
sed -e 's/larger and larger/larger/g' |
sed -e 's/got it all wrong/are wrong/g' 2> /dev/null
| true |
b0ab7586bd57c06d515801653e204e83b7a45f84
|
Shell
|
alfredo-milani/openwhisk-fn-bench
|
/script/env/dev/faas/openwhisk.sh
|
UTF-8
| 6,548 | 3.140625 | 3 |
[] |
no_license
|
# ============================================================================
# Titolo: openwhisk.sh
# Descrizione: Contiene alias utili per progetti
# Autore: Alfredo Milani (alfredo.milani.94@gmail.com)
# Data: Mon Nov 19 06:04:59 CET 2020
# Licenza: MIT License
# Versione: 1.0.0
# Note: --/--
# Versione bash: 4.4.19(1)-release
# ============================================================================
#######################################
##### OpenWhisk
##### see@ https://openwhisk.apache.org/documentation.html
#
alias wsk='/usr/local/opt/OpenWhisk_CLI-1.1.0-mac-amd64/wsk' # @doc: -/-
#
alias wski='wsk -i' # @doc: Use OpenWhisk in insecure mode
#
alias wsk-g-auth='wsk property get --auth' # @doc: Show configured auth propery
#
alias wsk-g-apihost='wsk property get --apihost' # @doc: Show configured apihost propery
#
alias wsk-act='wski action list' # @doc: Show all available actions (functions)
#
alias wsk-act-sort='wski action list --name-sort' # @doc: List all actions (functions) in alphabetical order
#
alias wsk-act-inv='wski action invoke' # @doc: Invoke OpenWhisk action, e.g. wsk-act-inv /whisk.sysytem/samples/greeting
#
alias wsk-act-create='wski action create' # @doc: Create action, e.g. wsk-act-create greeting greeting.js
#
alias wsk-act-ud='wski action update' # @doc: Update action, e.g. wsk-act-up greeting greeting.js
#
alias wsk-act-get='wski action get' # @doc: Get metadata that describes existing actions, e.g. wsk-act-get greeting
#
alias wsk-act-del='wski action delete' # @doc: Delete specified action, e.g. wsk-act-del greeting
#
alias wsk-actv-rec='wski activation get' # @doc: Get activation record, e.g. wsk-act-rec 31b580428454401fb580428454601f88
#
alias wsk-actv-res='wski activation result' # @doc: Get activation result, e.g. wsk-act-res 31b580428454401fb580428454601f88
#
alias wsk-actv-log='wski activation logs' # @doc: Get activation logs, e.g. wsk-act-log 31b580428454401fb580428454601f88
#
alias wsk-actv-poll='wski activation poll' # @doc: Watch the output of actions as they are invoked
#
alias wsk-actv-list='wski activation list' # @doc: List a series of activation records
#
alias wsk-nspace='wski namespace list -v' # @doc: Get namespace value for current user
#
wsk-act-save() { # @doc: Save deployed action code to file, e.g. wsk-act-save greeting /dir/greeting.js; wsk-act-save greeting
if [[ -z "${1}" ]]; then
_error "Action name can not be empty"
return 1
fi
if [[ -z "${2}" ]]; then
wsk-act-get "${1}" --save
else
wsk-act-get "${1}" --save-as "${2}"
fi
}
# @dep: basename
wsk-act-createe() { # @doc: Create all action specified, e.g. wsk-act-createe login.js /path1/failure.js /path2/success.js
for file in "${@}"; do
local file_basename="$(basename "${file}")"
local action="${file_basename%.*}"
wsk-act-create "${action}" "${file}"
done
}
#
wsk-act-ud-all() { # @doc: Update all OpenWhisk currently deployed actions, with specified concurrency level, e.g. wsk-act-ud-all 5 # update all actions using 5 as concurrency level
local -r concurrency="${1}"
if [[ -z "${concurrency}" ]]; then
_error "Concurrency level must be specified."
return 1
elif [[ "${concurrency}" -lt 0 ]]; then
_error "Concurrency limit can not be lower then 0."
return 1
fi
# retrieve currenty deployed actions
local -r actions="$(wsk-act | awk '{print $1}' | awk 'NR>1')"
if [[ -z "${actions}" ]]; then
_info "No action to update."
return 0
fi
while IFS= read -r action; do
_info "Updating \"${action}\" action with concurrency limit ${concurrency}."
wsk-act-ud "${action}" --concurrency "${concurrency}"
done <<< "${actions}"
}
#
wsk-act-dell() { # @doc: Delete all action specified, e.g. wsk-act-dell login failure success
for action in "${@}"; do
wsk-act-del "${action}"
done
}
#
wsk-act-del-all() { # @doc: Delete all OpenWhisk currently deployed actions
# retrieve currenty deployed actions
local -r actions="$(wsk-act | awk '{print $1}' | awk 'NR>1')"
# show currenty deployed actions
_info "Currently deployed actions:"
printf "${actions}\n\n"
if [[ -z "${actions}" ]]; then
_info "No action to delete."
return 0
fi
read -p "[DANGER] - Delete all currently deployed actions? [yes | no] > " choose
if [[ "${choose}" != 'yes' && "${choose}" != 'YES' ]]; then
return 0
fi
while IFS= read -r action; do
_info "Deleting \"${action}\" action."
wsk-act-del "${action}"
done <<< "${actions}"
}
#
wsk-act-url() { # @doc: Retrieve action URL, e.g. wsk-act-url greeting
local -r action="${1?Action name required, e.g. ${FUNCNAME} greeting}"
wsk-act-get "${@}" --url
}
#
wsk-act-binv() { # @doc: Invoke specified action in blocking mode and show result (if there are no errors), e.g. wsk-act-binv greeting
local -r action="${1?Action name required, e.g. ${FUNCNAME} greeting}"
wsk-act-inv "${@}" --result
}
# @dep: npx basename
wsk-cmp-cmp() { # @doc: Call openwhisk-composer (JavaScript) compose command to create composition. Be sure to be in the installation directory of composer. E.g. wsk-cmp-cmp ../../composition1.js /path/dir/composition2.js
for file in "${@}"; do
local -r file_basename="$(basename "${file}")"
local -r file_ext="${file_basename##*.}"
local -r composition="${file_basename%.*}"
if [[ "${file_ext}" != 'js' ]]; then
_warn "File \"${file}\" is not in JS format. Skipping..."
continue
fi
npx compose "${file}" --file || return 1
done
}
# @dep: npx basename
wsk-cmp-dp() { # @doc: Call openwhisk-composer (JavaScript) deploy command to create composition. Be sure to be in the installation directory of composer. E.g. wsk-cmp-cmp ../../composition1.json /path/dir/composition2.json
for file in "${@}"; do
local -r file_basename="$(basename "${file}")"
local -r file_ext="${file_basename##*.}"
local -r composition="${file_basename%.*}"
if [[ "${file_ext}" != 'json' ]]; then
_warn "File \"${file}\" is not in JSON format. Skipping..."
continue
fi
npx deploy "${composition}" "${file}" -i || return 1
done
}
# Automatic parse and display docs for aliases
# Args: none
# Deps: none
wsk_help() {
_als_quick_help "${BASH_SOURCE}"
printf '\n'
_fn_quick_help "${BASH_SOURCE}"
}
| true |
bca6935584b0e6a328bf53916c9ddb133dc22385
|
Shell
|
vansergen/gemini-node-api
|
/.github/scripts/decrypt.sh
|
UTF-8
| 320 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Descrypt the private key
gpg --quiet --batch --yes --decrypt --passphrase=${PRIVATE_KEY_PASSPHRASE} \
--output ./.github/pgp/key.asc ./.github/pgp/key.asc.gpg
# Set the access permissions
chmod 600 ./.github/pgp/key.asc
# Import the private key
gpg --batch --yes --import ./.github/pgp/key.asc
| true |
1d1ff9fa26021b3d215a7101e71207cfa59d8619
|
Shell
|
sfrom/dotnet-core-sam-actions
|
/deploy/entrypoint.sh
|
UTF-8
| 671 | 2.5625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
: ${AWS_REGION:=eu-central-1}
export PATH="$PATH:/root/.dotnet/tools"
cd "${DOTNET_LAMBDA_WORKING_DIR:-.}"
dotnet lambda deploy-serverless $DOTNET_LAMBDA_STACK_NAME
#aws s3 cp --only-show-errors $DOTNET_LAMBDA_PACKAGE_NAME s3://$DOTNET_LAMBDA_S3_LOCATION/$DOTNET_LAMBDA_PACKAGE_NAME
#aws lambda update-function-code \
# --region $AWS_REGION \
# --function-name $DOTNET_LAMBDA_FUNCTION_NAME \
# --zip-file fileb://$DOTNET_LAMBDA_PACKAGE_NAME
# dotnet lambda deploy-function \
# --region $AWS_REGION \
# --function-name $DOTNET_LAMBDA_FUNCTION_NAME \
# --function-handler $DOTNET_LAMBDA_FUNCTION_HANDLER \
# --package $DOTNET_LAMBDA_PACKAGE_NAME
exit
| true |
a08469cd7b26bb61f2107073ba60b8431455557f
|
Shell
|
hortonworks/cloudbreak-images
|
/saltstack/base/salt/ccm-client/cdp/bin/reverse-tunnel-values.sh
|
UTF-8
| 827 | 2.734375 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash -ux
CCM_HOST= # The host endpoint for the CCM (minasshd) service.
CCM_SSH_PORT=8990 # The port on which the CCM (minasshd) service listens for SSH connections.
CCM_PUBLIC_KEY_FILE= # The path to the public key file for the CCM (minasshd) service.
CCM_TUNNEL_INITIATOR_ID= # The ID of the tunnel initator. This is what other services will use to locate this host endpoint.
CCM_KEY_ID= # The ID under which the private key was registered with CCM.
CCM_ENCIPHERED_PRIVATE_KEY=/etc/ccm/ccm-private-key.enc # The private key that the CCM (minasshd) service will use to authenticate this instance (encrypted for production, but not necessarily for testing).
CCM_TUNNEL_ROLE= # The identifier for the specific service for which the tunnel is being created.
CCM_TUNNEL_SERVICE_PORT= # The service endpoint to be tunneled.
| true |
dbb47b8c6b7f60df0750ad63dcaa58212847d08e
|
Shell
|
LuisPatino92/holbertonschool-higher_level_programming
|
/0x10-python-network_0/0-body_size.sh
|
UTF-8
| 127 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/bash
# This scripts prints the length of the body of a HTTP response
expr $(echo $(curl "$1" 2> /dev/null) | wc -c) - 1
| true |
35e813f6898f068449ed995c84f4ae5b0b5ce14b
|
Shell
|
costing/Monalisa
|
/WEBS/WEB_GRAPH/WEB-INF/classes/compile.sh
|
UTF-8
| 499 | 2.78125 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
cd `dirname $0`
BASE=../../../../..
LIB=$BASE/lib
CP=.:$BASE/bin/alienpool:$BASE/lib/classes:$BASE/tomcat/lib/catalina.jar:$BASE/tomcat/lib/servlet-api.jar
for JAR in $LIB/*.jar; do
CP="$CP:$JAR"
done
cat alimonitor/Page.java | sed "s#/home/monalisa/MLrepository/tomcat/webapps/ROOT#$(dirname $(dirname `pwd`))#g" > alimonitor/Page.java.new && mv alimonitor/Page.java.new alimonitor/Page.java
`cat ../../../../../conf/env.JAVA_HOME`/bin/javac -classpath "$CP" *.java */*.java
| true |
c16e49d57e41b96e078238cbc9203afd69db173c
|
Shell
|
rmaliska/astrofoto
|
/astrofoto/foto
|
UTF-8
| 883 | 3.15625 | 3 |
[] |
no_license
|
#!/bin/bash
gphoto2 --set-config shutterspeed=bulb --quiet
# Ask for object name
read -p "Object Name: " oname
# ISO
read -p "ISO: " iso
# Bulb time
read -p "Bulbtime: " bulbtime
# Frames count
read -p "Frames: " frames
# Enviroment
HOME=/home/astroberry/astrofoto
DATE=`date +"%d%b%y"`
NAME=${oname}_${DATE}
FOLDER="$HOME/$DATE/$oname"
mkdir -p "$FOLDER"
cd "$FOLDER"
# gphoto2 --set-config=syncdatetime=1 --port usb: --set-config shutterspeed=bulb --set-config imageformat=RAW --set-config iso=$iso --bulb=$bulbtime --frames $frames --interval=$(($bulbtime+8)) --filename "${NAME}-%03n.cr2" --capture-image-and-download
env LANG=C gphoto2 --debug --debug-logfile=my-logfile.txt --reset --set-config imageformat=RAW --set-config iso=$iso --bulb=$bulbtime --frames $frames --interval=$(($bulbtime+5)) --filename "${NAME}-%03n.cr2" --capture-image-and-download
echo "Done!"
| true |
a2337c65baba381b4a712635bf4915fc57d24fe7
|
Shell
|
mroach/irix-builder
|
/ports/readline/PKGBUILD
|
UTF-8
| 499 | 2.640625 | 3 |
[] |
no_license
|
pkgname=readline
pkgver=8.0
pkgdesc="GNU readline"
url="https://tiswww.case.edu/php/chet/readline/rltop.html"
licenses=('GPL')
depends=(ncurses)
provides=('libhistory.so' 'libreadline.so')
sources=("ftp://ftp.cwru.edu/pub/bash/$pkgname-$pkgver.tar.gz")
sha256sums=(e339f51971478d369f8a053a330a190781acb9864cf4c541060f12078948e461)
build() {
cd "$pkgname-$pkgver"
./configure --prefix=$pkgprefix --host=$TARGET
make
}
package() {
cd "$pkgname-$pkgver"
make DESTDIR="$pkgdir" install
}
| true |
74efa9d278ab24d4b19103ab8dfe7d76d3e3e1e7
|
Shell
|
pldiiw/1lin
|
/src/pre-chroot.sh
|
UTF-8
| 130 | 2.515625 | 3 |
[
"Unlicense"
] |
permissive
|
for script in $(ls -1 scripts |
sed -n -E '1,/[0-9]{2}-begin-chroot.sh/p' |
sed -e 's:^:scripts/:'); do
source $script
done
| true |
57b10ec4e4a0703681cdc5017c684b35c3f231d2
|
Shell
|
glennakamura/aports
|
/community/roundcubemail/roundcubemail-pgsql.post-install
|
UTF-8
| 365 | 2.625 | 3 |
[] |
no_license
|
#!/bin/sh
cat >&2 <<EOF
*
* Create database for Roundcube and load the initialization script:
*
* su -l postgres
* psql -c "CREATE ROLE roundcube PASSWORD 'top-secret' INHERIT LOGIN;"
* psql -c "CREATE DATABASE roundcube OWNER roundcube ENCODING 'UTF-8';"
* psql -U roundcube roundcube < /usr/share/webapps/roundcube/SQL/postgres.initial.sql
*
EOF
| true |
26885d83aa1e9f6537e9eed1e816763c36754615
|
Shell
|
oncoapop/data_reporting
|
/beast_scripts/p3out2isPCRin.sh
|
UTF-8
| 1,991 | 3.734375 | 4 |
[] |
no_license
|
#!/bin/sh
# This Script was written by Damian Yap (Aug 2013)
# WSOP2013-001 version 4.0
# Script to generate isPCR input from primer3 output
# $Project and $type is exported from previous script
# Project Directory
# Project="TNBC"
dir="/home/dyap/Projects/"$Project
tmp="/home/dyap/dyap_temp/ITH"
# positions
posdir=$dir"/positions"
# primer3 output
p3dir=$dir"/primer3"
cd $dir
ls
# Need to change this for each file
# The name is exported from fasta2primer3.sh script
# if not, uncomment & input name here
# type="SNV"
# name=$Project"-"$type
echo $name
# Source and Output directories where Barcoded files stored
sourcedir=$posdir
outdir=$p3dir
# Part of the pipeline, use default output of fasta2primer3.sh
outfile=$p3dir"/"$name"_p3_output.txt"
# Final output
primerlist=$tmp"/"$name"_primerlist.txt"
echo "File from primer3 output to process: "$outfile"
echo Output to this directory $outdir
if [ -f $primerlist ];
then
echo $primerlist" will be overwritten. Press Return to continue, Ctrl-C to exit."
rm $primerlist
fi
wkfile=$outfile
echo "Processing..."
for i in `grep "SEQUENCE_ID=" $wkfile`
do
n=`grep -A12 $i $wkfile | grep "PRIMER_PAIR_NUM_RETURNED=" | awk -F"=" '{print $2}'`
if [[ $n =~ "0" ]];
then continue
fi
for j in 0 1 2 3 4
do
left=`grep -A140 $i $wkfile | grep -m1 "PRIMER_LEFT_"$j"_SEQUENCE" | awk -F"=" '{print $2}'`
right=`grep -A140 $i $wkfile | grep -m1 "PRIMER_RIGHT_"$j"_SEQUENCE" | awk -F"=" '{print $2}'`
size=`grep -A140 $i $wkfile | grep -m1 "PRIMER_PAIR_"$j"_PRODUCT_SIZE" | awk -F"=" '{print $2}'`
snv=`grep -A140 $i $wkfile | grep -m1 "P3_COMMENT=" | awk -F"=" '{print $2}'`
id=`echo $i | awk -F"=" '{print $2}'`
if [ -z "$right" -o -z "$left" ];
then continue
fi
echo $id,$snv,$left,$right,$size >> $primerlist
done
echo "."
done
cp $tmp"/"$name"_primerlist.txt" $p3dir"/"$name"_primerlist.txt"
cat $p3dir"/"$name"_primerlist.txt" | awk -F, '{print $2,$3,$4}' > $p3dir"/"$name"_isPCR-input"
exit;
| true |
50f9809f5897d04632a759d35218fd29a498ef29
|
Shell
|
mbovel/wp-backup-restore-command
|
/bin/install-tests.sh
|
UTF-8
| 1,450 | 3.578125 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
set -ex
WP_CLI_BIN_DIR=${WP_CLI_BIN_DIR-/tmp/wp-cli-phar}
PKG_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../ && pwd )"
BEHAT_DIR=${PKG_DIR}/features
download() {
if [ `which curl` ]; then
curl -s "$1" > "$2";
elif [ `which wget` ]; then
wget -nv -O "$2" "$1"
fi
}
install_composer_packages() {
cd $PKG_DIR
composer update
}
install_tests_includes() {
mkdir -p ${BEHAT_DIR}/bootstrap
mkdir -p ${BEHAT_DIR}/steps
mkdir -p ${BEHAT_DIR}/extra
cp $PKG_DIR/vendor/wp-cli/wp-cli/features/bootstrap/FeatureContext.php ${BEHAT_DIR}/bootstrap
cp $PKG_DIR/vendor/wp-cli/wp-cli/features/bootstrap/support.php ${BEHAT_DIR}/bootstrap
cp $PKG_DIR/vendor/wp-cli/wp-cli/php/WP_CLI/Process.php ${BEHAT_DIR}/bootstrap
cp $PKG_DIR/vendor/wp-cli/wp-cli/php/WP_CLI/ProcessRun.php ${BEHAT_DIR}/bootstrap
cp $PKG_DIR/vendor/wp-cli/wp-cli/php/utils.php ${BEHAT_DIR}/bootstrap
cp $PKG_DIR/vendor/wp-cli/wp-cli/features/steps/given.php ${BEHAT_DIR}/steps
cp $PKG_DIR/vendor/wp-cli/wp-cli/features/steps/when.php ${BEHAT_DIR}/steps
cp $PKG_DIR/vendor/wp-cli/wp-cli/features/steps/then.php ${BEHAT_DIR}/steps
cp $PKG_DIR/vendor/wp-cli/wp-cli/features/extra/no-mail.php ${BEHAT_DIR}/extra
}
install_db() {
mysql -h 127.0.0.1 -e 'CREATE DATABASE IF NOT EXISTS wp_cli_test; GRANT ALL PRIVILEGES ON wp_cli_test.* TO "wp_cli_test" IDENTIFIED BY "password1"' -uroot -p
}
install_composer_packages
install_tests_includes
install_db
| true |
35ddf3db415a665103412fe84e5a8e428ffdc73b
|
Shell
|
jandc3845/packer-kali
|
/scripts/chef.sh
|
UTF-8
| 327 | 2.609375 | 3 |
[] |
no_license
|
# Change our debian version because Kali's default doesn't work with the Omnibus installer
echo "7.4" > /etc/debian_version
# install Omnibus Chef Client
wget -O - http://opscode.com/chef/install.sh | sudo bash -s
# Change our debian version back so we don't break shit with Kali
echo "Kali Linux 1.0.9" > /etc/debian_version
| true |
a957b92e2fe314a0393cc4d81da5ba6017b09175
|
Shell
|
kitingChris/UbuntuQuickinstall
|
/_install/androidsdk.sh
|
UTF-8
| 666 | 3.28125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$MainScript" != "true" ]; then
echo "This is not the main script!" 1>&2
exit 1
fi
echo '###############################################'
echo 'INSTALL Android SDK'
echo '###############################################'
echo "Download Android SDK from http://developer.android.com/sdk/index.html#download - adt-bundle-linux*.zip"
while [ ! -s /home/$user/Downloads/adt-bundle-linux*.zip ]; do
printf ""
done
unzip /home/$user/Downloads/adt-bundle-linux*.zip -d /opt
mv /opt/adt-bundle-linux* /opt/adt-bundle-linux
chown -R $user:$group /opt/adt-bundle-linux
apt-get update
apt-get install android-tools-adb android-tools-fastboot
| true |
73df160d3ab3902ab0c625a0e18591297883957c
|
Shell
|
slimgroup/ServerlessImagingAWS
|
/numerical_examples/strong_scaling_bare_metal/devito_docker/mpi_run.sh
|
UTF-8
| 3,701 | 3.90625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
SSHDIR="/home/ubuntu/.ssh"
aws s3 cp s3://slim-bucket-common/pwitte/keys/ssh_host_rsa_key ${SSHDIR}/.
aws s3 cp s3://slim-bucket-common/pwitte/keys/ssh_host_rsa_key.pub ${SSHDIR}/.
cat ${SSHDIR}/ssh_host_rsa_key.pub >> ${SSHDIR}/authorized_keys
echo "Host *" >> ${SSHDIR}/config && echo " StrictHostKeyChecking no" >> ${SSHDIR}/config
sudo chmod 400 ${SSHDIR}/ssh_host_rsa_key ${SSHDIR}/config
eval $(ssh-agent)
ssh-add ~/.ssh/ssh_host_rsa_key
# Environment variables
source environment_variables.sh
NODE_TYPE=$1
AWS_BATCH_JOB_NUM_NODES=$2
HOST_FILE_PATH="/tmp/hostfile"
AWS_BATCH_EXIT_CODE_FILE="/tmp/batch-exit-code"
PYTHONPATH="/home/ubuntu/devito_isotropic"
THREAD_PINNING="FALSE"
S3_BUCKET="slim-bucket-common"
SCRIPT_PATH="pwitte/scripts/"
SCRIPT_NAME="bp_synthetic_mpi_bare_metal.py"
# Print function and hostfile path
BASENAME="$(hostname -I)"
log () {
echo "${BASENAME}"
}
# Error function
error_exit () {
log "${BASENAME} - ${1}" >&2
log "${2:-1}" > $AWS_BATCH_EXIT_CODE_FILE
kill $(cat /tmp/supervisord.pid)
}
# wait for all nodes to report
wait_for_nodes () {
log "Running as master node"
# Add my own ip to hostfile
touch $HOST_FILE_PATH
IP=$(hostname -I)
log "master details -> $IP"
echo "$IP" >> $HOST_FILE_PATH
touch /efs/scratch/master_ip
echo "$IP" >> /efs/scratch/master_ip
# Wait for all workers to send their ip to my hostfile
lines=$(sort $HOST_FILE_PATH|uniq|wc -l)
while [ "$AWS_BATCH_JOB_NUM_NODES" -gt "$lines" ]
do
log "$lines out of $AWS_BATCH_JOB_NUM_NODES nodes joined, check again in 1 second"
sleep 1
lines=$(sort $HOST_FILE_PATH|uniq|wc -l)
done
# Make the temporary file executable and run it with any given arguments
log "All nodes successfully joined"
# Enable thread pinning
if [ $THREAD_PINNING = "FALSE" ]; then
echo "No thread pinning."
else
echo "Use thread pinning."
./set_omp_pinning.sh hsw
fi
sleep 1
log "Run MPI strong scaling test on bare metal instance."
log $PYTHONPATH
# Move script to shared directory and run
mkdir /efs/scratch/devito
aws s3 cp s3://${S3_BUCKET}/${SCRIPT_PATH}${SCRIPT_NAME} . # copy script to home dir
mv /home/ubuntu/$SCRIPT_NAME /efs/scratch/devito/$SCRIPT_NAME # move script to shared directory
for i in {0..5}
do
mpiexec -n $AWS_BATCH_JOB_NUM_NODES --hostfile $HOST_FILE_PATH python3 /efs/scratch/devito/$SCRIPT_NAME
done
# Clean up, goodbye
sleep 1
rm -rf /efs/scratch/devito
rm -rf /efs/scratch/master_ip
rm -rf $HOST_FILE_PATH
log "done! goodbye, writing exit code to $AWS_BATCH_EXIT_CODE_FILE and shutting down my supervisord"
echo "0" > $AWS_BATCH_EXIT_CODE_FILE
exit 0
}
# Fetch and run a script
report_to_master () {
# Get ip and say hi
IP=$(hostname -I)
AWS_BATCH_JOB_MAIN_NODE_PRIVATE_IPV4_ADDRESS=$(cat /efs/scratch/master_ip)
log "I am a child node -> $IP, reporting to the master node -> ${AWS_BATCH_JOB_MAIN_NODE_PRIVATE_IPV4_ADDRESS}"
if [ $THREAD_PINNING = "FALSE" ]; then
echo "No thread pinning."
else
echo "Use thread pinning."
./set_omp_pinning.sh hsw
fi
# Send ip to master
until echo "$IP" | ssh ${AWS_BATCH_JOB_MAIN_NODE_PRIVATE_IPV4_ADDRESS} "echo ${IP} >> ${HOST_FILE_PATH}"
do
echo "Sleeping 2 seconds and trying again"
sleep 2
done
# kill time until master is done
tail -f /dev/null
log "done! goodbye"
exit 0
}
# Main - dispatch user request to appropriate function
log $NODE_TYPE
case $NODE_TYPE in
main)
wait_for_nodes "${@}"
;;
child)
report_to_master "${@}"
;;
*)
log $NODE_TYPE
usage "Could not determine node type. Expected (main/child)"
;;
esac
| true |
dacdc782e0a054cfd78e41308d593cfe3ae6e722
|
Shell
|
DylanCauwels/dotfiles
|
/zsh/zprofile.symlink
|
UTF-8
| 393 | 2.96875 | 3 |
[
"ISC"
] |
permissive
|
#!/usr/bin/env bash
#
# ZSH PROFILE
#
# `.zprofile' is meant as an alternative to `.zlogin' for ksh fans;
# the two are not intended to be used together,
# although this could certainly be done if desired.
#
# Put stuff in `~/.zshenv`, which is always executed.
# See comments in `~/.zshenv` for detailed summary.
[[ -f "${ZDOTDIR:-$HOME}/.profile" ]] && source ${ZDOTDIR:-$HOME}/.profile
| true |
ebc0ea1e09cdec10e662200775e93a25ad24bb74
|
Shell
|
zackeryf/up
|
/test/run_tests
|
UTF-8
| 564 | 3.1875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Use bashutils to run etest
# @requires bashutils environment variable BASHUTILS_HOME to be set to where bashutils is installed.
# This can be done in .bashrc. Example BASHUTILS_HOME=/home/myuser/bashutils
# I am just going to set it here
BASHUTILS_HOME=${HOME}/bashutils
BASHUTILS=${BASHUTILS_HOME}/share
if [[ ! -e ${HOME}/bash_programs/up/test/test_logs ]]; then
mkdir ${HOME}/bash_programs/up/test/test_logs
fi
${BASHUTILS_HOME}/bin/etest --verbose --log_dir ${HOME}/bash_programs/up/test/test_logs ${HOME}/bash_programs/up/test/up.etest
| true |
f46dd2a21b8fac318803da1985f0489af9e60a4b
|
Shell
|
niyonx/night-light
|
/nightlight.sh
|
UTF-8
| 293 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/bash
bool=$(gsettings get org.gnome.settings-daemon.plugins.color night-light-enabled);
if [ "$bool" = true ];
then
gsettings set org.gnome.settings-daemon.plugins.color night-light-enabled false
else
gsettings set org.gnome.settings-daemon.plugins.color night-light-enabled true
fi
| true |
82b9e642be06f7a2af6adba5d7a1b5e33495959d
|
Shell
|
radicallyopensecurity/netaidkit-nakd
|
/root/usr/share/nakd/scripts/get_inetstat.sh
|
UTF-8
| 98 | 2.53125 | 3 |
[] |
no_license
|
#!/bin/sh
ping -c1 8.8.8.8 &> /dev/null
if [ $? -ne 0 ]; then
echo "1"
else
echo "0"
fi
| true |
6ff1e6cdf272a68a1576fd9e373ec0c1f16a2e41
|
Shell
|
Quarkex/dotfiles
|
/functions/img2cover.sh
|
UTF-8
| 3,817 | 3.40625 | 3 |
[
"Unlicense"
] |
permissive
|
function img2cover {(
ext="jpg";
cover=" (cover)";
backcover=" (backcover)";
joint=" - ";
while getopts ":t:a:c:b:e:j:o:h" opt; do
case $opt in
h)
echo "Joins images at the first or last page of a pdf.";
echo "";
echo "-t Define the title string.";
echo "-a Define the author's name string.";
echo "-c Define the cover identifier string. Defaults to \" (cover)\"";
echo "-b Define the backcover identifier string. Defaults to \" (backcover)\"";
echo "-e Define the extension for cover images. Defaults to \"jpg\"";
echo "-j Define a join string between author and title. Defaults to \" - \"";
echo "-o Select an output name.";
echo "-h Show this help.";
exit 0;
;;
t)
title="$OPTARG"
;;
a)
author="$OPTARG"
;;
c)
cover="$OPTARG"
;;
b)
backcover="$OPTARG"
;;
e)
ext="$OPTARG"
;;
j)
joint="$OPTARG"
;;
o)
outputname="$OPTARG"
;;
\?)
echo "Invalid option: -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
shift $(( OPTIND-1 ));
if [ $author == "" ]; then
filename="${title%%.pdf}".pdf;
else
filename="$author$joint${title%%.pdf}".pdf;
fi
covername="${filename%%.pdf}$cover.$ext";
backcovername="${filename%%.pdf}$backcover.$ext";
if [ "$outputname" == "" ]; then
outputname="$filename";
else
outputname="${outputname%%.pdf}.pdf";
fi
if [ -e "$filename" ]; then
fileaux="$(mktemp -t "${0##*/}.XXXXXXXXXX.pdf")";
cp "$filename" "$fileaux";
size=$(pdfinfo "$fileaux" | grep "Page size");
size=${size##*: };
size=${size%% pts*};
size=${size/\ x\ /x};
function attach {
if [ -e "$imagename" ]; then
imageaux="$(mktemp -t "${0##*/}.XXXXXXXXXX.pdf")";
working_copy="$(mktemp -t "${0##*/}.XXXXXXXXXX.pdf")";
convert "$imagename" -gravity center -background white -extent $size "$imageaux";
if [ -e "$imageaux" ]; then
if [ ! $invert == true ];then
echo "Attaching cover...";
pdftk "$imageaux" "$fileaux" cat output "$working_copy";
else
echo "Attaching backcover...";
pdftk "$fileaux" "$imageaux" cat output "$working_copy";
fi
if [ $? ]; then
mv "$working_copy" "$fileaux";
fi
rm "$imageaux";
fi
fi
}
imagename="$covername"; invert=false; attach;
imagename="$backcovername"; invert=true; attach;
function conclude {
mv "$fileaux" "$outputname";
}
if [ ! -e "${outputname}" ]; then
conclude;
else
echo "File exist: ${outputname}. Overwrite?";
select item in Yes No;
do
if [ $item = "No" ]; then
rm "$fileaux";
break
fi
if [ $item = "Yes" ]; then
conclude;
break
fi
done
fi
else
echo "The file $filename does not exist.";
fi
)}
| true |
7640dcdd317332280474cb6e53ae2e6333271af9
|
Shell
|
arbal/projects
|
/kubernetes/install_kube.sh
|
UTF-8
| 675 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
USER=`whoami`
if [ "$USER" != "root" ]; then
echo ERROR: Run this script using sudo
exit 1
fi
apt-get update && sudo apt-get install -y apt-transport-https gnupg2
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-$(lsb_release -cs) main" | tee -a /etc/apt/sources.list.d/kubernetes.list
apt-get update
# install kubernetes controller
apt-get install -y kubectl
# install minikube
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 \
&& chmod +x minikube
mkdir -p /usr/local/bin/
install minikube /usr/local/bin/
rm minikube
| true |
146e3d347bf1c66ec3f8646fa623d60ac04edaa5
|
Shell
|
magento2setup/ubuntu-14.04-lts
|
/bin/permission
|
UTF-8
| 609 | 2.59375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# | magento
if [ -d '/var/www/magento2' ]; then
# permission(s) - document - root
chown 775 '/var/www/magento2'
# cd - document - root
cd '/var/www/magento2'
# permissin(s) - var/vendor/pub/app/...
find var vendor pub/static pub/media app/etc -type f -exec chmod g+w {} \;
find var vendor pub/static pub/media app/etc -type d -exec chmod g+ws {} \;
chmod u+x bin/magento
# ownership - file(s)
chown -R root:www-data .
fi
# magento |
| true |
4cc1284cb0b3cfe20fc67ee38aebdf327fc32e70
|
Shell
|
fengmudong/Scripts
|
/rigidscan.sh
|
UTF-8
| 2,033 | 3.34375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#########
# Script to do RIGID scan on a dihedral angle from 0 to 360 with AMBER.
# Adapted from a relaxed scan script https://github.com/swails/JmsScripts/blob/master/scan.sh.
#########
# User set Variables
NUM_STEPS=72 # Number of points in PES scan (increment is 360/NUM_STEPS degrees)
PRMTOP="strip.r6.prmtop" # The name of the topology file for the system
# Remove data from previous runs
rm Dih.dat Etot.dat Edih.dat
rm -r Restrts Mdinfos Mdouts
mkdir -p Restrts
mkdir -p Mdinfos
mkdir -p Mdouts
increment=`echo "360 / $NUM_STEPS" | bc`
# The following input file means only do energy evaluation, no minimization nor MD. It's in vaccum.
cat > energy.in << EOF
Energy evaluation only
&cntrl
imin=1,
maxcyc=1,
ntb=0,
igb=0,
ntpr=1,
cut=999,
/
EOF
for x in `seq 0 1 $NUM_STEPS`; do
dihedral=`echo "$x * $increment" | bc`
# The following cpptraj script generates initial structures each has different dihedral value.
# You should modify manually the line start with trajin and with makestructure.
cat > run.traj << EOF
parm $PRMTOP
trajin s1r6_3.mdcrd 216643 216643
makestructure cent:1:C3:C6:C17:C13:${dihedral}
trajout run.rst restart nobox
run
quit
EOF
cpptraj -i run.traj
sander -O -i energy.in -o mdout.$x.scan -p $PRMTOP -c run.rst -r rst
mv rst Restrts/rst.$x
mv mdinfo Mdinfos/mdinfo.$x
mv mdout.$x.scan Mdouts/
echo $dihedral >> Dih.dat
done
# Post-process the data
# Rename one-digit filenames to two-digit so that subsequent analysis goes in order.
for x in `seq 0 1 9`; do
cd Mdinfos
mv mdinfo.$x mdinfo.0$x
cd ../
done
# Parse mdinfo files into "rigidscan.dat". You should check whether the code is picking out the fields you desiqre.
cd Mdinfos
for file in mdinfo*; do
head -4 $file | tail -1 | awk '{print $2}' >> ../Etot.dat
head -6 $file | tail -1 | awk '{print $9}' >> ../Edih.dat
done
cd ../
# You should now plot Dih.dat, Etot.dat, Edih.dat in Jupyter or something.
| true |
48234f931f16106c5c806c499f039bd093226967
|
Shell
|
stajichlab/Afum_popgenome
|
/variantcall/pipeline/01_aln_Af100_2lanes.sh
|
UTF-8
| 3,884 | 3.0625 | 3 |
[] |
no_license
|
#!/usr/bin/bash
#SBATCH --mem 24G --ntasks 8 --nodes 1 -J Af100
#SBATCH --out logs/AF100.bwa.%a.log --time 2:00:00 -p short
module load bwa/0.7.17
module unload java
module load java/8
module load picard
module load samtools/1.9
module load gatk/3.7
MEM=24g
GENOMESTRAIN=Af293
INDIR=input
TOPOUTDIR=tmp
ALNFOLDER=aln
HTCEXT=cram
HTCFORMAT=cram
if [ -f config.txt ]; then
source config.txt
fi
if [ -z $REFGENOME ]; then
echo "NEED A REFGENOME - set in config.txt and make sure 00_index.sh is run"
exit
fi
if [ ! -f $REFGENOME.dict ]; then
echo "NEED a $REFGENOME.dict - make sure 00_index.sh is run"
fi
mkdir -p $TOPOUTDIR
SAMPFILE=Af100_samples.csv
TEMP=/scratch
N=${SLURM_ARRAY_TASK_ID}
CPU=1
if [ $SLURM_CPUS_ON_NODE ]; then
CPU=$SLURM_CPUS_ON_NODE
fi
if [ -z $N ]; then
N=$1
if [ -z $N ]; then
echo "need to provide a number by --array or cmdline"
exit
fi
fi
MAX=$(wc -l $SAMPFILE | awk '{print $1}')
if [ $N -gt $MAX ]; then
echo "$N is too big, only $MAX lines in $SAMPFILE"
exit
fi
ct=0
IFS=,
sed -n ${N}p $SAMPFILE | while read STRAIN FWD REV
do
LIBRARY=$(basename $FWD _R1_001.fastq.gz)
FINALMERGE=$ALNFOLDER/$STRAIN.$HTCEXT
FINALLIST=()
for LANE in DA002_lane1 DA002_lane2
do
PAIR1=$INDIR/$LANE/$FWD
PAIR2=$INDIR/$LANE/$REV
SAMFILE=$TOPOUTDIR/$STRAIN.$LANE.unsrt.sam
SRTED=$TOPOUTDIR/${STRAIN}.$LANE.srt.bam
DDFILE=$TOPOUTDIR/$STRAIN.$LANE.DD.bam
REALIGN=$TOPOUTDIR/$STRAIN.$LANE.realign.bam
INTERVALS=$TOPOUTDIR/$STRAIN.$LANE.intervals
FINALFILE=$TOPOUTDIR/$STRAIN.$LANE.$HTCEXT
FINALLIST[$ct]=$FINALFILE
ct=$(expr $ct + 1)
READGROUP="@RG\tID:$STRAIN.$LANE\tSM:$STRAIN\tLB:$LIBRARY.$LANE\tPL:illumina\tCN:Seqmatic"
if [ ! -f $FINALFILE ]; then
if [ ! -f $DDFILE ]; then
if [ ! -f $SRTED ]; then
if [ -e $PAIR2 ]; then
echo "SAMFILE is $SAMFILE"
if [ ! -f $SAMFILE ]; then
echo "bwa mem -t $CPU -R $READGROUP -o $SAMFILE $REFGENOME $PAIR1 $PAIR2"
bwa mem -t $CPU -R $READGROUP -o $SAMFILE $REFGENOME $PAIR1 $PAIR2
fi
else
echo "Cannot find $PAIR2, skipping $STRAIN"
exit
fi
if [ ! -f $SAMFILE ]; then
echo "no $SAMFILE exiting"
exit
fi
samtools fixmate --threads $CPU -O bam $SAMFILE $TEMP/${STRAIN}.fixmate.bam
samtools sort --threads $CPU -O bam -o $SRTED -T $TEMP $TEMP/${STRAIN}.fixmate.bam
if [ $SRTED ]; then
rm -f $TEMP/${STRAIN}.fixmate.bam $SAMFILE
fi
fi # SRTED file exists or was created by this block
time java -jar $PICARD MarkDuplicates I=$SRTED O=$DDFILE \
METRICS_FILE=logs/$STRAIN.dedup.metrics CREATE_INDEX=true VALIDATION_STRINGENCY=SILENT
if [ -f $DDFILE ]; then
rm -f $SRTED
fi
fi # DDFILE is created after this or already exists
if [ ! -f $INTERVALS ]; then
time java -Xmx$MEM -jar $GATK \
-T RealignerTargetCreator \
-R $REFGENOME \
-I $DDFILE \
-o $INTERVALS
fi
if [ ! -f $REALIGN ]; then
time java -Xmx$MEM -jar $GATK \
-T IndelRealigner \
-R $REFGENOME \
-I $DDFILE \
-targetIntervals $INTERVALS \
-o $REALIGN
fi # REALIGN created or already existed
samtools view -O $HTCFORMAT --threads $CPU --reference $REFGENOME -o $FINALFILE $REALIGN
samtools index $FINALFILE
if [ -f $FINALFILE ]; then
rm -f $DDFILE $REALIGN
rm -f $(echo $REALIGN | sed 's/bam$/bai/')
rm -f $(echo $DDFILE | sed 's/bam$/bai/')
rm -f $INTERVALS
fi
fi
done
# there should be a merging now?
echo "$FINALMERGE $FINALLIST"
LIST=$(printf ",%s" "${FINALLIST[@]}")
if [ ! -f $FINALMERGE ]; then
CMD="samtools merge --reference $REFGENOME --threads $CPU -O $HTCFORMAT $FINALMERGE $LIST"
eval $CMD
samtools index $FINALMERGE
for file in ${FINALLIST[@]}; do
rm -f $file
rm -f $file.crai
done
fi
done
| true |
26ef86fbfc7876f9a262800acbfe8f1b39f62efd
|
Shell
|
infosecirvin/packetwall
|
/light.sh
|
UTF-8
| 1,060 | 4.09375 | 4 |
[] |
no_license
|
#!/bin/bash
# Common path for all GPIO access
BASE_GPIO_PATH=/sys/class/gpio
# Assign names to GPIO pin numbers for each light
GREEN=12
# Assign names to states
ON="1"
OFF="0"
# Utility function to export a pin if not already exported
exportPin()
{
if [ ! -e $BASE_GPIO_PATH/gpio$1 ]; then
echo "$1" > $BASE_GPIO_PATH/export
fi
}
# Utility function to set a pin as an output
setOutput()
{
echo "out" > $BASE_GPIO_PATH/gpio$1/direction
}
# Utility function to change state of a light
setLightState()
{
echo $2 > $BASE_GPIO_PATH/gpio$1/value
}
# Utility function to turn all lights off
allLightsOff()
{
setLightState $GREEN $OFF
}
# Ctrl-C handler for clean shutdown
shutdown()
{
allLightsOff
exit 0
}
trap shutdown SIGINT
# Export pins so that we can use them
exportPin $GREEN
# Set pins as outputs
setOutput $GREEN
# Turn lights off to begin
allLightsOff
while [ 1 ]
do
rule1=$(<r1)
if [ $rule1 = $ON ]; then
setLightState $GREEN $ON
sleep 9
setLightState $GREEN $OFF
sleep 1
else
setLightState $GREEN $OFF
sleep 9
fi
done
| true |
3818c037dbf357290ac6dbff1dbff1005db904ea
|
Shell
|
shawe/Hackberry_boot_Android_from_SD
|
/ROM-Stock/createSD_Stock.sh
|
UTF-8
| 6,272 | 2.8125 | 3 |
[] |
no_license
|
#!/bin/bash
# Based on:
# http://linux-sunxi.org/Boot_Android_from_SdCard#BootLoader
# http://jas-hacks.blogspot.co.uk/2012/12/hackberry-a10-booting-android-from-sd.html
# http://tmerle.blogspot.fr/2012/11/booting-android-ics-system-from-sdcard.html
card=/dev/sdb
echo "Downloading required files..."
wget -c https://github.com/linux-sunxi/sunxi-bin-archive/raw/master/hackberry/stock-nanda-1gb/script.bin
wget -c http://dl.miniand.com/jas-hacks/uboot/1gb/sunxi-spl.bin
wget -c http://dl.miniand.com/jas-hacks/uboot/1gb/u-boot.bin
wget -c http://dl.miniand.com/adr1an/enAU-Hackb3rry-remote-rev2.img
wget -c http://dl.linux-sunxi.org/users/arete74/tools.tar.gz
tar -zxvf tools.tar.gz
git clone https://github.com/Ithamar/awutils.git
cd awutils
make
cd ..
cp awutils/awimage tools/
tools/awimage -u enAU-Hackb3rry-remote-rev2.img
echo "Extracting required files..."
cd enAU-Hackb3rry-remote-rev2.img.dump
cp RFSFAT16_BOOT_00000000000 boot.img
../tools/split_bootimg.pl boot.img
mkdir boot
cd boot
gunzip -c ../boot.img-ramdisk.gz | cpio -i
tar -cpvf ../../boot-stock.tar *
cd ..
mkimage -A arm -O linux -T kernel -C none -a 0x40008000 -e 0x40008000 -n "Linux 2.6" -d boot.img-kernel ../uImage
cp RFSFAT16_RECOVERY_0000000 recovery.img
../tools/split_bootimg.pl recovery.img
mkdir recovery
cd recovery
gunzip -c ../recovery.img-ramdisk.gz | cpio -i
tar -cpvf ../../recovery-stock.tar *
cd ..
cp RFSFAT16_SYSTEM_000000000 system.img
../tools/simg2img system.img system1.img
mkdir system
sudo mount -o loop system1.img system
cd system
tar -cpvf ../../system-stock.tar *
cd ..
umount system
cd ..
echo "Require manually partition of SD card before continue"
echo "Edit this file for more details"
exit 0 #Comment this line when your SD card was with this
# Partition Filesystem Label Size Internal NAND
# unallocated unallocated 17MB
# /dev/sdb1 fat16 bootloader 16MB nanda
# /dev/sdb2 ext4 environment 16MB nandb
# /dev/sdb3 ext4 boot 32MB nandc
# /dev/sdb4 extended fill all space
# /dev/sdb5 ext4 system 512MB nandd
# /dev/sdb6 ext4 data 1024MB nande
# /dev/sdb7 ext4 misc 16MB nandf
# /dev/sdb8 ext4 recovery 32MB nandg
# /dev/sdb9 ext4 cache 256MB nandh
# /dev/sdb10 ext4 private 32MB nandi
# /dev/sdb11 ext4 sysrecovery 512MB nandj
# /dev/sdb12 ext4 UDISK 2048MB nandk
# /dev/sdb13 ext4 extsd all available space
## DON'T UNCOMMENT THIS LINES !!!
#echo "Destroying TOC ${card}"
#dd if=/dev/zero of=$card bs=1M count=1
#sync
#partprobe
#echo "Destroying any old uboot environment ${card}"
#dd if=/dev/zero of=$card bs=512 count=2047
#sync
#partprobe
#echo "Partitionning ${card}"
#dd if=/dev/zero of=$card bs=512 count=1
#sync
#/sbin/sfdisk -R $card
#cat <<EOT | sfdisk --in-order -uM $card
#17,16,c
#,16,83
#,32,83
#,,5
#,512,83
#,1024,83
#,16,83
#,32,83
#,256,83
#,16,c
#,512,83
#,2048,83
#,,83
#EOT
#sync
#partprobe
for i in {1..13}
do
umount ${card}${i}
done
echo "Formatting partitions of ${card}"
echo "Formatting ${card}1 bootloader"
mkfs.vfat -n bootloader ${card}1
echo "Formatting ${card}2"
mkfs.ext4 -L env ${card}2
echo "Formatting ${card}3 boot"
mkfs.ext4 -L boot ${card}3
echo "Formatting ${card}5 system"
mkfs.ext4 -L system ${card}5
echo "Formatting ${card}6 data"
mkfs.ext4 -L data ${card}6
echo "Formatting ${card}7 misc"
mkfs.ext4 -L misc ${card}7
echo "Formatting ${card}8 recovery"
mkfs.ext4 -L recovery ${card}8
echo "Formatting ${card}9 cache"
mkfs.ext4 -L cache ${card}9
echo "Formatting ${card}10 private"
mkfs.vfat -n private ${card}10
echo "Formatting ${card}11 sysrecovery"
mkfs.ext4 -L sysrecovery ${card}11
echo "Formatting ${card}12 UDISK"
mkfs.vfat -n UDISK ${card}12
echo "Formatting ${card}13 extsd"
mkfs.vfat -n extsd ${card}13
echo "Deleting huge files"
tune2fs -O ^huge_file ${card}1
tune2fs -O ^huge_file ${card}2
tune2fs -O ^huge_file ${card}3
tune2fs -O ^huge_file ${card}5
tune2fs -O ^huge_file ${card}6
tune2fs -O ^huge_file ${card}7
tune2fs -O ^huge_file ${card}8
tune2fs -O ^huge_file ${card}9
tune2fs -O ^huge_file ${card}10
tune2fs -O ^huge_file ${card}11
tune2fs -O ^huge_file ${card}12
tune2fs -O ^huge_file ${card}13
echo "Checking integrity of ${card}"
fsck.vfat -a ${card}1
fsck.ext4 -p ${card}2
fsck.ext4 -p ${card}3
fsck.ext4 -p ${card}5
fsck.ext4 -p ${card}6
fsck.ext4 -p ${card}7
fsck.ext4 -p ${card}8
fsck.ext4 -p ${card}9
fsck.vfat -a ${card}10
fsck.ext4 -p ${card}11
fsck.vfat -a ${card}12
fsck.vfat -a ${card}13
echo "Flashing sunxi-spl to ${card}"
dd if=sunxi-spl.bin of=$card bs=1024 seek=8
sync
echo "Flashing u-boot to ${card}"
dd if=u-boot.bin of=$card bs=1024 seek=32
sync
echo "Preparing ${card}1"
mount ${card}1 /mnt/ || exit 0
cp uImage /mnt
cp script.bin /mnt
cp boot.scr /mnt
cat >/mnt/uEnv.txt << EOT
fexfile=script.bin
kernel=uImage
extraargs=root=/dev/mmcblk0p3 loglevel=8 rootwait console=ttyS0,115200 rw init=/init mac_addr=00:AE:99:A3:E4:AF
boot_mmc=fatload mmc 0 0x43000000 ${fexfile}; fatload mmc 0 0x48000000 ${kernel}; bootm 0x48000000
EOT
cat >/mnt/uEnv_recovery.txt << EOT
fexfile=script.bin
kernel=uImage
extraargs=root=/dev/mmcblk0p8 loglevel=8 rootwait console=ttyS0,115200 rw init=/init mac_addr=00:AE:99:A3:E4:AF
boot_mmc=fatload mmc 0 0x43000000 ${fexfile}; fatload mmc 0 0x48000000 ${kernel}; bootm 0x48000000
EOT
sync
umount /mnt
echo "Preparing ${card}3"
mount ${card}3 /mnt || exit 0
tar -xpf boot-stock.tar -C /mnt
sed -i "s/nandd/mmcblk0p5/g" /mnt/init.sun4i.rc
sed -i "s/nande/mmcblk0p6/g" /mnt/init.sun4i.rc
sed -i "s/nandh/mmcblk0p9/g" /mnt/init.sun4i.rc
sed -i "s/nandi/mmcblk0p10/g" /mnt/init.sun4i.rc
sync
umount /mnt
echo "Preparing ${card}5"
mount ${card}5 /mnt || exit 0
tar -xpf system-stock.tar -C /mnt
sed -i "s/\/devices\/platform\/sunxi-mmc.0\/mmc_host/\/devices\/virtual\/block\/mmcblk0p13/g" /mnt/etc/vold.fstab
sed -i "s/\/dev\/block\/nanda/\/devices\/virtual\/block\/mmcblk0p1/g" /mnt/bin/preinstall.sh
sed -i "s/nandc/mmcblk0p3/g" /mnt/bin/sop.sh
sync
umount /mnt
echo "Preparing ${card}8"
mount ${card}8 /mnt || exit 0
tar -xpf recovery-stock.tar -C /mnt
sed -i "s/nandf/mmcblk0p7/g" /mnt/ueventd.sun4i.rc
sync
umount /mnt
rm -rf enAU-Hackb3rry-remote-rev2.img*
| true |
5ffd10c298a8547ca0089401653cc352de603481
|
Shell
|
Seiyial/Docs
|
/centos-setup.sh
|
UTF-8
| 1,332 | 2.953125 | 3 |
[] |
no_license
|
# Install build-essential (Development tools)
sudo yum -y group install "Development Tools"
# Install Erlang
wget https://packages.erlang-solutions.com/erlang-solutions-1.0-1.noarch.rpm
sudo yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
sudo rpm -Uvh erlang-solutions-1.0-1.noarch.rpm
sudo yum -y install esl-erlang
# Install Kiex/Elixir
\curl -sSL https://raw.githubusercontent.com/taylor/kiex/master/install | bash -s
cat '# Kiex - Elixir Verioning' >> ~/.bashrc
cat 'test -s "$HOME/.kiex/scripts/kiex" && source "$HOME/.kiex/scripts/kiex"' >> ~/.bashrc
test -s "$HOME/.kiex/scripts/kiex" && source "$HOME/.kiex/scripts/kiex"
kiex install 1.7.1
kiex use 1.7.1
kiex default 1.7.1
# Install Node
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.11/install.sh | bash
cat '' >> ~/.bashrc
cat '# NVM - NodeJS Versioning' >> ~/.bashrc
cat 'export NVM_DIR="$HOME/.nvm"' >> ~/.bashrc
cat '[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm' >> ~/.bashrc
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
nvm install 10.7.0
nvm install 9.11.3
nvm install 8.11.3
nvm use default
curl --silent --location https://dl.yarnpkg.com/rpm/yarn.repo | sudo tee /etc/yum.repos.d/yarn.repo
sudo yum install yarn
echo "Completed !"
| true |
2a77b300828a5a4a91864c709c653d687c558a5c
|
Shell
|
TrendingTechnology/SDCLR
|
/cmds/shell_scrips/cifar-100-LT.sh
|
UTF-8
| 4,835 | 2.859375 | 3 |
[] |
no_license
|
# get opts
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-e|--epochs) pretrain_epochs="$2"; shift; shift ;;
-s|--split) pretrain_split="$2"; shift; shift ;;
-p|--port) port="$2"; shift; shift ;;
-w|--workers) workers="$2"; shift; shift ;;
-g|--GPU_NUM) GPU_NUM=("$2"); shift; shift ;;
--lr) pretrain_lr=("$2"); shift; shift ;;
--only_finetuning) only_finetuning=("$2"); shift; shift ;;
--test_only) test_only=("$2"); shift; shift ;;
--batch_size) batch_size=("$2"); shift; shift ;;
--temp) pretrain_temp=("$2"); shift; shift ;;
--few_shot_only) few_shot_only=("$2"); shift; shift ;;
--few_shot_lr) few_shot_lr=("$2"); shift; shift ;;
--few_shot_epochs) few_shot_epochs=("$2"); shift; shift ;;
# pruning
--prune) prune=("$2"); shift; shift ;;
--prune_percent) prune_percent=("$2"); shift; shift ;;
--random_prune_percent) random_prune_percent=("$2"); shift; shift ;;
--prune_dual_bn) prune_dual_bn=("$2"); shift; shift ;;
*) echo "${1} is not found"; exit 125;
esac
done
GPU_NUM=${GPU_NUM:-1}
workers=${workers:-5}
batch_size=${batch_size:-512}
few_shot_only=${few_shot_only:-False}
seed=10
port=${port:-4833}
pretrain_epochs=${pretrain_epochs:-2000}
pretrain_split=${pretrain_split:-cifar100_split1_D_i}
pretrain_lr=${pretrain_lr:-0.5}
pretrain_temp=${pretrain_temp:-0.2}
few_shot_lr=${few_shot_lr:-0.02}
few_shot_epochs=${few_shot_epochs:-300}
prune=${prune:-False}
prune_percent=${prune_percent:-0.7}
prune_dual_bn=${prune_dual_bn:-False}
random_prune_percent=${random_prune_percent:-0}
only_finetuning=${only_finetuning:-False}
test_only=${test_only:-False}
pretrain_name=res18_cifar100_scheduling_sgd_temp${pretrain_temp}_wd1e-4_lr${pretrain_lr}_b${batch_size}_o128_twolayerProj_epoch${pretrain_epochs}_${pretrain_split}_newNT_s${seed}
if [[ ${prune} == "True" ]]
then
pretrain_name="${pretrain_name}_pruneP${prune_percent}"
if [[ ${prune_dual_bn} == "True" ]]
then
pretrain_name="${pretrain_name}DualBN"
fi
if [[ ${random_prune_percent} != "0" ]]
then
pretrain_name="${pretrain_name}RandomP${random_prune_percent}"
fi
fi
save_dir=checkpoints_cifar100
cmd="python -m torch.distributed.launch --nproc_per_node=${GPU_NUM} --master_port ${port} train_simCLR.py ${pretrain_name} --epochs ${pretrain_epochs} \
--batch_size ${batch_size} --optimizer sgd --lr ${pretrain_lr} --temperature ${pretrain_temp} --model res18 \
--trainSplit cifar100_imbSub_with_subsets/${pretrain_split}.npy --save-dir ${save_dir} --seed ${seed} \
--dataset cifar100 --output_ch 128 --num_workers ${workers}"
if [[ ${prune} == "True" ]]
then
cmd="${cmd} --prune --prune_percent ${prune_percent}"
if [[ ${prune_dual_bn} == "True" ]]
then
cmd="${cmd} --prune_dual_bn"
fi
if [[ ${random_prune_percent} != "0" ]]
then
cmd="${cmd} --random_prune_percent ${random_prune_percent}"
fi
fi
tuneLr=30
cmd_full="python train_cifar.py ${pretrain_name}__f2layer4_d10d20_wd0_lr${tuneLr}_freezeBN \
--fixbn --wd 0 --model res18 --epochs 30 --lr ${tuneLr} --decreasing_lr 10,20 \
--trainSplit cifar100/cifar100_trainIdxList.npy --fixto layer4 --checkpoint checkpoints_cifar100/${pretrain_name}/model_${pretrain_epochs}.pt \
--cvt_state_dict --save-dir checkpoints_cifar100_tune --valiSplit cifar100/cifar100_valIdxList.npy --dataset cifar100"
if [[ ${prune_dual_bn} == "True" ]]
then
cmd_full="${cmd_full} --bnNameCnt 0"
fi
if [[ ${test_only} == "True" ]]
then
cmd_full="${cmd_full} --test_only"
fi
tuneLr=30
index_split="$(echo ${pretrain_split} | grep -P 'split\K([0-9])' -o)"
train_split1=cifar100_split${index_split}_S_b
cmd_few_shot="python train_cifar.py ${pretrain_name}__${train_split1}_f2layer4_d40d60_wd0_lr${tuneLr}_freezeBN \
--fixbn --wd 0 --model res18 --epochs 100 --lr ${tuneLr} --decreasing_lr 40,60 \
--trainSplit cifar100_imbSub_with_subsets/${train_split1}.npy --fixto layer4 --checkpoint checkpoints_cifar100/${pretrain_name}/model_${pretrain_epochs}.pt \
--cvt_state_dict --save-dir checkpoints_cifar100_tune --valiSplit cifar100/cifar100_valIdxList.npy --dataset cifar100 --test_freq 5"
if [[ ${prune_dual_bn} == "True" ]]
then
cmd_few_shot="${cmd_few_shot} --bnNameCnt 0"
fi
if [[ ${test_only} == "True" ]]
then
cmd_few_shot="${cmd_few_shot} --test_only"
fi
if [ ${few_shot_only} == "False" ]
then
mkdir -p ${save_dir}/${pretrain_name}
if [ ${only_finetuning} == "False" ]
then
echo ${cmd} >> ${save_dir}/${pretrain_name}/bash_log.txt
echo ${cmd}
${cmd}
fi
echo ${cmd_full} >> ${save_dir}/${pretrain_name}/bash_log.txt
echo ${cmd_full}
${cmd_full}
echo ${cmd_few_shot} >> ${save_dir}/${pretrain_name}/bash_log.txt
echo ${cmd_few_shot}
${cmd_few_shot}
else
echo ${cmd_few_shot} >> ${save_dir}/${pretrain_name}/bash_log.txt
echo ${cmd_few_shot}
${cmd_few_shot}
fi
| true |
866331f87fbe04f067e74c107533119952c45387
|
Shell
|
VeeamHub/veeam-terraform
|
/veeam-backup-and-replication/deploy_veeam_in_vmware_sddc_toolkit/aws_create_veeamrepo_veeampn/init_client.sh
|
UTF-8
| 13,261 | 3.453125 | 3 |
[
"MIT",
"JSON"
] |
permissive
|
#!/bin/sh
function trim
{
DATA="$1"
t="${DATA%\"}"
RES="${t#\"}"
echo "$RES"
}
function log
{
if [[ "$VEEAMPN_TEST_LOG_FILE" = "" ]]; then
echo "$(date +%Y-%m-%dT%H:%M:%S:%N -u) $@"
else
echo "$(date +%Y-%m-%dT%H:%M:%S:%N -u) $@" >> "$VEEAMPN_TEST_LOG_FILE"
fi
}
function check_val
{
if [[ "$1" = "" ]]; then
(>&2 echo "$2");
exit -1
fi
echo $(trim "$1")
}
function validate_res
{
ERR=$(echo "$CALL_RES" | jq '.error.code')
if [[ "$ERR" != null ]]; then
log "ERROR!!!"
log "$CALL"
log "$CALL_RES"
exit -1
fi
}
function call_jsonrpc
{
if [ "$AUTH_TOKEN" = "" ]; then
log "AUTH_TOKEN needs to be set"
exit -1
fi
CALL="$1"
CALL_URL="$2"
CALL_RES=$(curl -sS -k -X POST -H 'Content-type: application/x-www-form-urlencoded' -H "Authorization: Bearer ${AUTH_TOKEN}" --data "$CALL" "$CALL_URL")
validate_res
}
function call_jsonrpc_datafile
{
if [ "$AUTH_TOKEN" = "" ]; then
log "AUTH_TOKEN needs to be set"
exit -1
fi
CALL_FILENAME="$1"
CALL_URL="$2"
CALL_RES=$(curl -sS -k -X POST -H 'Content-type: application/x-www-form-urlencoded' -H "Authorization: Bearer ${AUTH_TOKEN}" --data @"$CALL_FILENAME" "$CALL_URL")
validate_res
}
function call_authorize
{
USERNAME="$1"
PASSWORD="$2"
URL="$3"
if [ "$USERNAME" = "" ]; then
log "Need USERNAME";
exit -1
fi
if [ "$PASSWORD" = "" ]; then
log "Need PASSWORD";
exit -1
fi
if [ "$URL" = "" ]; then
log "Need URL";
exit -1
fi
CALL="{ \"jsonrpc\":\"2.0\",\"method\" : \"login\", \"params\" : {\"username\":\"$USERNAME\", \"password\":\"$PASSWORD\"}, \"id\" : 123 }"
CALL_RES=$(curl -sS -k -X POST -H 'Content-type: application/x-www-form-urlencoded' --data "$CALL" "$URL"/auth )
if [[ $? != 0 ]]; then
log "Failed to Autorize"
exit -1
fi
validate_res
RES=$(echo "$CALL_RES" | jq '.result.token')
AUTH_TOKEN=$(trim "$RES")
}
function ovpn_init
{
PROT=$(check_val "$1" "Need Protocol")
PORT=$(check_val "$2" "Need Port")
TO_URL=$(check_val "$3" "Need URL")
call_jsonrpc '{"jsonrpc":"2.0","method":"initServer","params":{"protocol":"'$PROT'", "externalPort":'$PORT'},"id":123}' $TO_URL
}
function ovpn_getServerConfig
{
TO_URL=$(check_val "$1" "Need URL")
call_jsonrpc '{"jsonrpc":"2.0","method":"getServerSettings","params":{},"id":123}' $TO_URL
}
function ovpn_find_client_id
{
CLI_NAME=$(check_val "$1" "Need name")
TO_URL=$(check_val "$2" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "getClients", "params" : {}, "id" : 123 }' $TO_URL
CLI_ID=$(echo $CALL_RES | jq '.result.clients[]? |select(.name=="'$CLI_NAME'").id')
}
function ovpn_add_client
{
CLI_TYPE=$(check_val "$1" "Need type (site|endpoint)")
CLI_NAME=$(check_val "$2" "Need name")
CLI_NET=$(check_val "$3" "Need net address")
TO_URL=$(check_val "$4" "Need url")
if [[ "$CLI_TYPE" == "endpoint" ]]; then
log "Client is $CLI_TYPE type - so add without props"
call_jsonrpc '{ "jsonrpc":"2.0","method" : "addClient", "params" : {"type":"'$CLI_TYPE'", "name":"'$CLI_NAME'", "props":{}}, "id" : 123 }' $TO_URL
else
log "Client is $CLI_TYPE type - so specify network address $CLI_NET"
call_jsonrpc '{ "jsonrpc":"2.0","method" : "addClient", "params" : {"type":"'$CLI_TYPE'", "name":"'$CLI_NAME'", "props":{"networkAddress":"'$CLI_NET'"}}, "id" : 123 }' $TO_URL
fi
CLI_ID=$(echo $CALL_RES | jq '.result.id')
}
function ovpn_rename_client
{
CLI_ID=$(check_val "$1" "Need ID")
CLI_NAME=$(check_val "$2" "Need name")
TO_URL=$(check_val "$3" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "editClient", "params" : {"id":'$CLI_ID', "name":"'$CLI_NAME'", "props":{}}, "id" : 123 }' $TO_URL
SUCCESS=$(echo $CALL_RES | jq '.result')
if [[ "$SUCCESS" != "true" ]]; then
log "Client was not renamed properly!"
exit -1
fi
}
function ovpn_delete_client
{
CLI_ID=$(check_val "$1" "Need ID")
TO_URL=$(check_val "$2" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "deleteClient", "params" : {"id":'$CLI_ID'}, "id" : 123 }' $TO_URL
SUCCESS=$(echo $CALL_RES | jq '.result')
if [[ "$SUCCESS" != "true" ]]; then
log "Client was not deleted properly!"
exit -1
fi
}
function ovpn_download_file
{
CLI_ID=$(check_val "$1" "Need client id")
TO_URL=$(check_val "$2" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "downloadConfig", "params" : {"id":'$CLI_ID' }, "id" : 123 }' $URL/clients
CLI_FILE_NAME=$(echo $CALL_RES | jq '.result.filename')
CLI_FILE_NAME="${CLI_FILE_NAME%\"}"
CLI_FILE_NAME="${CLI_FILE_NAME#\"}"
CLI_FILE_DATA=$(echo $CALL_RES | jq '.result.packageFile')
CLI_FILE_DATA="${CLI_FILE_DATA%\"}"
CLI_FILE_DATA="${CLI_FILE_DATA#\"}"
echo $CLI_FILE_DATA > $CLI_FILE_NAME
}
function ovpn_init_client
{
CLI_CFG_FILE=$(check_val "$1" "Need client config file")
TO_URL=$(check_val "$2" "Need url")
echo -ne '{ "jsonrpc":"2.0","method" : "initClient", "params" : {"packageFile":"' > request.json
cat "$CLI_CFG_FILE" >> request.json
echo -ne '" }, "id" : 123 }' >> request.json
call_jsonrpc_datafile request.json $TO_URL
}
function ovpn_startVPN
{
TO_URL=$(check_val "$1" "Need URL")
call_jsonrpc '{"jsonrpc":"2.0","method":"startVPN","params":{},"id":123}' $TO_URL
SUCCESS=$(echo $CALL_RES | jq '.result')
if [[ "$SUCCESS" != "true" ]]; then
log "Failed to start VPN"
exit -1
fi
}
function ovpn_stopVPN
{
TO_URL=$(check_val "$1" "Need URL")
call_jsonrpc '{"jsonrpc":"2.0","method":"stopVPN","params":{},"id":123}' $TO_URL
SUCCESS=$(echo $CALL_RES | jq '.result')
if [[ "$SUCCESS" != "true" ]]; then
log "Failed to stop VPN"
exit -1
fi
}
function mon_client_status
{
CLI_NAME=$(check_val "$1" "Need name")
TO_URL=$(check_val "$2" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "getConnections", "params" : { "showOnlyConnected":false}, "id" : 123 }' $TO_URL
CLI_STATUS=$(echo $CALL_RES | jq -r '.result.connections[]? |select(.name=="'$CLI_NAME'").status')
}
function mon_client_traffic
{
TO_FILE=$(check_val "$1" "Need filename")
FIRST_POS_ID=$(check_val "$2" "Need name for first position")
TO_URL=$(check_val "$3" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "getConnections", "params" : { "showOnlyConnected":false}, "id" : 123 }' $TO_URL/monitor
echo $CALL_RES | jq -r '.result.connections[]?|("'$FIRST_POS_ID'\(if .name == "server" then "HUB" else .name end),'$FIRST_POS_ID'\(.traffic.in|tonumber|floor)")' >> $TO_FILE
echo $CALL_RES | jq -r '.result.connections[]?|("'$FIRST_POS_ID$FIRST_POS_ID'\(if .name == "server" then "HUB" else .name end),\(.traffic.out|tonumber|floor)")' >> $TO_FILE
}
function mon_client_metrics
{
TO_URL=$(check_val "$1" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "getPerfObjects", "params" : {"filterType" : "", "filterMetric" : ""}, "id" : 123 }' $TO_URL/monitor
echo $CALL_RES | jq -r '.result[]?|(.metricName)' | sort | uniq
}
# connect to localhost
function LOCAL_call_authorize
{
AUTH_USERNAME=$(check_val "$1" "Need username - 1st argument")
AUTH_URL=$(check_val "$2" "Need URL - 1st argument")
AUTH_RES=""
TRY=0
while [ ${TRY} -ne 5 ] && [ "$AUTH_RES" = "" ] ; do
AUTH_RES=$(echo -e "getToken\t$AUTH_USERNAME" | socat UNIX-CONNECT:$AUTH_URL -)
if [[ $? != 0 ]]; then
log "Failed to Local Autorize"
exit -1
fi
if [[ "$AUTH_RES" = "" ]]; then
log "Failed to obtain auth token from LOCAL_AUTH - $TRY"
sleep 1
fi
((TRY=TRY+1))
done
if [[ ${TRY} -eq 5 ]]; then
log "Cannot connect to LOCAL_AUTH - 5 retries"
exit -1
fi
AUTH_TOKEN=$AUTH_RES
}
function mgmt_reset
{
TO_URL=$(check_val "$1" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "resetConfig", "params" : {}, "id" : 123 }' $TO_URL
}
function mgmt_get_version
{
TO_URL=$(check_val "$1" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "getVersion", "params" : {}, "id" : 123 }' $TO_URL
MGMT_VERSION=$(echo $CALL_RES | jq '.result')
}
function mgmt_get_op_mode
{
TO_URL=$(check_val "$1" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "getOperationalMode", "params" : {}, "id" : 123 }' $TO_URL
OPMODE=$(echo $CALL_RES | jq '.result')
OPMODE="${OPMODE%\"}"
OPMODE="${OPMODE#\"}"
if [[ "$OPMODE" == "NONE_FIRST" ]]; then
echo "Op Mode - FIRST!"
OPMODE="NONE"
fi
}
function mgmt_initCA
{
PARAMS=$(check_val "$1" "Need params as in spec")
TO_URL=$(check_val "$2" "Need url")
call_jsonrpc '{"jsonrpc":"2.0","method":"initCAasync","params":'"$PARAMS"',"id":123}' "$TO_URL"
PERCENT=1
while [[ "$PERCENT" != "100" ]]; do
mgmt_get_op_mode $TO_URL
log "mode is: $OPMODE"
call_jsonrpc '{ "jsonrpc":"2.0","method" : "initCAstatus", "params" : {}, "id" : 123 }' $TO_URL
PERCENT=$(echo $CALL_RES | jq '.result.percent')
log "$PERCENT%"
sleep 1
done
}
function mgmt_get_publicIP
{
TO_URL=$(check_val "$1" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "getServerPublicIP", "params" : {}, "id" : 123 }' $TO_URL
}
function mgmt_get_IPsettings
{
TO_URL=$(check_val "$1" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "getIPsettings", "params" : {}, "id" : 123 }' $TO_URL
}
function mgmt_archiveLogs
{
TO_URL=$(check_val "$1" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "archiveLogs", "params" : {}, "id" : 123 }' $TO_URL
}
function mgmt_get_logfile
{
TO_URL=$(check_val "$1" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "getArchivedLogs", "params" : {}, "id" : 123 }' $TO_URL
}
function mgmt_startSSH
{
TO_URL=$(check_val "$1" "Need url")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "setSSH", "params" : { "serviceStarted":true, "serviceAutostart":false }, "id" : 123 }' $TO_URL
}
function mgmt_setPassword
{
TO_URL=$(check_val "$1" "Need url")
USER=$(check_val "$2" "Need username")
PASS=$(check_val "$3" "Need password")
call_jsonrpc '{ "jsonrpc":"2.0","method" : "setPassword", "params" : {"username":"'$USER'", "password":"'$PASS'"}, "id" : 123 }' $TO_URL
}
function main
{
# here is the filename of the client config file - edit if needed
CLIENT_CONFIG_FILENAME="$1"
VEEAMPN_TEST_LOG_FILE=/var/log/Veeam/test_log_file.log
echo "please check extended log at $VEEAMPN_TEST_LOG_FILE"
mkdir -p ${VEEAMPN_TEST_LOG_FILE%/*}
LOCAL_UNIX_SOCKET="/var/run/veeampn/auth_srv"
# port for management pf VeeamPN
LOCAL_PORT=12345
echo "Authenticating at local service"
# authenticate in local authentication service - it initialize the AUTH_TOKEN variable
LOCAL_call_authorize "localadmin" "$LOCAL_UNIX_SOCKET"
# local URL for all management calls (works only from inside of machine)
URL="http://localhost:$LOCAL_PORT"
echo "Resetting the appliance"
log "Resetting the appliance"
mgmt_reset "$URL/mgmt"
echo "re-Authenticating at local service"
log "re-Authenticating at local service"
# authenticate in local authentication service - it initialize the AUTH_TOKEN variable
LOCAL_call_authorize "localadmin" "$LOCAL_UNIX_SOCKET"
echo "Checking version"
# just in case let's test that management is responding
mgmt_get_version $URL/mgmt
echo "-VERSION=$VERSION-" $(echo $CALL_RES | jq '.result')
log "-VERSION=$VERSION-" $(echo $CALL_RES | jq '.result')
# OPMODE in newly setup machine should be INIT
echo "Checking mode"
mgmt_get_op_mode $URL/mgmt
echo "-CURRENT_MODE=$OPMODE-"
log "-CURRENT_MODE=$OPMODE-"
log "setting file: $CLIENT_CONFIG_FILENAME"
log "$URL/site_ovpn"
# cut off XML header
sed -i -e '/^<?xml/d' "$CLIENT_CONFIG_FILENAME"
echo "Setting config..."
# init VeeamPn with the config file
ovpn_init_client "$CLIENT_CONFIG_FILENAME" "$URL/site_ovpn"
mgmt_get_op_mode $URL/mgmt
echo "And now -CURRENT_MODE=$OPMODE-"
log "-CURRENT_MODE=$OPMODE-"
# let's start the service
# after this call client should start connecting
ovpn_startVPN "$URL/site_ovpn"
# wait when the connection is stablished
MAX_WAIT_TIME=60
COUNT=0
echo "Checking for client to connect to the server"
while [[ $COUNT -lt $MAX_WAIT_TIME ]]; do
echo "Try $COUNT"
mon_client_status "server" $URL/monitor
if [[ "$CLI_STATUS" == "Connected" ]]; then
echo "-Connected!-"
log "-Connected!-"
break
else
RES="false"
fi
((COUNT+=1))
sleep 1
done
if [[ "$CLI_STATUS" != "Connected" ]]; then
log "ERROR!!!"
log "Client didn't connect after $MAX_WAIT_TIME seconds of waiting"
echo "Client didn't connect after $MAX_WAIT_TIME seconds of waiting"
return -1
fi
return 0
}
if [[ "$1" == "" ]]; then
echo "Please supply config XML filename"
exit -1
fi
main $1
| true |
33177676689aef43ae06fcc19370834b5c42c9b5
|
Shell
|
sniperrifle2004/sniperrifle2004.github.io
|
/vba-rerecording/24.4.0/PKGBUILD
|
UTF-8
| 1,336 | 2.828125 | 3 |
[] |
no_license
|
pkgname=vba-rerecording
pkgver=24.4.0
pkgrel=1
pkgdesc='Re-recording version of VisualBoyAdvance with lua scripting'
arch=('i686' 'x86_64')
url='https://github.com/vba-rerecording/vba-rerecording'
license=('GPL')
depends=(
'lua51'
'sdl'
'libpng'
)
optdepends=()
makedepends=(
'bison'
'flex'
'cmake>=2.8.4'
)
backup=(
'etc/vba-rr.cfg'
)
source=("https://github.com/${pkgname}/${pkgname}/archive/${pkgver}.tar.gz")
sha256sums=('68a489168e68310fab6177a5221e0d519e3611c762d3920360b93e164087498d')
function build() {
cd ${pkgname}-${pkgver}
mkdir -p build
cd build
cmake -DCMAKE_BUILD_TYPE=Release ..
# Patch LUA_INCLUDE_DIR to the proper location (Hopefully this exact match will
# result in a noop once the cmake Lua51 macro is patched
sed -i.org -e 's%^\(LUA_INCLUDE_DIR:PATH=/usr/include\)$%\1/lua5.1%' CMakeCache.txt
# Acually compile the package
make
}
function package() {
# Use arch linux convention of installing all binaries in
# /usr/bin
mkdir -p ${pkgdir}/usr/bin
cp ${pkgname}-${pkgver}/build/vba-rr ${pkgdir}/usr/bin
# We must install this global config file in /etc
# since the binary will look for it there
#
# Manually install this since the make install
# of CMake (unfortunately) has to have access
# to /etc
mkdir ${pkgdir}/etc
cp ${pkgname}-${pkgver}/src/vba-rr.cfg ${pkgdir}/etc
}
| true |
15d7f46a10c9d3cb8cd8c6f8037b0d9b89b67d94
|
Shell
|
virginiadb/open-mccree
|
/scripts/nginx.sh
|
UTF-8
| 523 | 2.640625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
str="server {
listen 80;
server_name ${USER}.mccree.panda.tv;
charset utf8;
add_header 'Access-Control-Expose-Headers' 'Location';
root /home/${USER}/devspace/panda-mccree/packages/panda-mccree-live/;
location / {
index index.html;
}
}";
echo -e "$str" > ~/devspace/panda-mccree/server/${USER}_mccree_dev_ngx.conf;
sudo ln -s ~/devspace/panda-mccree/server/${USER}_mccree_dev_ngx.conf /usr/local/nginx/conf/include/${USER}_mccree_dev_ngx.conf;
sudo nginx -s reload;
| true |
3ec558819b6eec7fc8983b8b2600e0fe73f28f43
|
Shell
|
brofi/dotfiles
|
/lib/archlinux-utils.sh
|
UTF-8
| 1,620 | 4.21875 | 4 |
[] |
no_license
|
#!/bin/bash
#
# Collection of Arch Linux specific functions
url_aur=https://aur.archlinux.org
# Checks if given package is installed and prints error message if not
# $1 package to check if installed
function archlinux_is_installed {
if ! pacman -Q "$1" > /dev/null 2>&1; then
error "package '$1' is not installed."
return 1
fi
}
# Is true if http code begins with 2, or curl isn't installed
# $1 package name
function archlinux_exists_in_aur {
archlinux_is_installed "curl" || return 0
# s: operate in silent mode
# I: fetch header only
# L: follow if page moved
# f: fail silently
# o: output (header) to /dev/null
# w: print the http code
curl -sILfo /dev/null -w '%{http_code}' "$url_aur/packages/$1" \
| grep -q '^2'
}
# Prints all installed aur packages not existing anymore
function archlinux_print_missing_aur_packages {
for pkg in $(pacman -Qqm); do
if ! archlinux_exists_in_aur "$pkg"; then
echo "$pkg is missing!"
fi
done
}
# True if window manager present, false otherwise
# see: https://specifications.freedesktop.org/wm-spec/1.3/ar01s03.html
function archlinux_wm_check {
local child_id prop=_NET_SUPPORTING_WM_CHECK idP="0x[[:xdigit:]]\+"
# Get id of child window created by window manager
# If property not set, no WM is present
child_id=$(xprop -root $prop | grep -o "$idP") || return 1
# Child's prop must match child_id
[ "$child_id" = "$(xprop -id "$child_id" $prop | grep -o "$idP")" ]
# At this point we could query the WM name: xprop -id $child_id _NET_WM_NAME
}
| true |
e10a45edbbaff0770d4cddf1829025550f8b1249
|
Shell
|
1Blackdiamondsc/LitecoinNode
|
/raspbian/raspbian-remove-statuspage.sh
|
UTF-8
| 622 | 3.296875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#change working directory
cd $HOME
#remove website files
echo "Removing node status page website files"
rm -r -f -v $RASPBIAN_WEBSITE_DIR/banner.png
rm -r -f -v $RASPBIAN_WEBSITE_DIR/bootstrap.css
rm -r -f -v $RASPBIAN_WEBSITE_DIR/favicon.ico
rm -r -f -v $RASPBIAN_WEBSITE_DIR/style.css
rm -r -f -v $RASPBIAN_WEBSITE_DIR/index.html
#remove the nginx webserver
read -r -p "Do you want to remove the nginx webserver and all of its components? (Y/N) " ANSWER
echo
if [[ $ANSWER =~ ^([yY])$ ]]
then
apt-get purge nginx nginx-common -y
apt-get autoremove -y
#disable firewall rule
ufw delete allow 80/tcp
fi
| true |
b368c716340596a447d064baadd4cb280d17b64f
|
Shell
|
acupoints/devtools-boilerplate
|
/VirtualBox 6.1.16 安装 CentOS-8.2.2004 _.sh
|
UTF-8
| 2,209 | 3.015625 | 3 |
[] |
no_license
|
## VirtualBox 6.1.16 安装 CentOS-8.2.2004
### 系统安装前的配置
[<h>新建]
**名称 {CentOS-8.2.2004}
**内存大小 {4096}+-
**虚拟硬盘 (*)现在创建虚拟硬盘
**虚拟硬盘文件类型 (*)VDI (VirtualBox 磁盘映像)
**存储在物理硬盘上 (*)动态分配
**文件位置和大小 {CentOS-8.2.2004}... ==[60.00GB]
[<h>设置]
[<v>常规][高级]
**共享粘贴板 --[双向]
**拖放 --[双向]
[<v>系统][处理器]
**处理器数量 ==[2]+-
[<v>显示][屏幕]
**显存大小 ==[32MB]+-
[<v>存储]
[控制器: IDE|没有盘片]
**分配光驱 [选择一个虚拟光盘文件...]
[控制器: SATA]
[+]使用主机输入输出(I/O)缓存
[控制器: SATA|CentOS-8.2.2004.vdi]
[+]固态驱动器
[+]热插拨
[<v>网络][网卡1]
[+]启用网络连接
**连接方式 --[网络地址转换(NAT)]
[<v>网络][网卡2]
**连接方式 --[仅主机(Host-Only)网络]
### 安装增强功能所需依赖
#############################################
# dnf group install -y "Development Tools"
dnf install -y elfutils-libelf-devel
## 若看到 Waiting for process with pid 7464 to finish,使用 init 6 重启可解决
# 右Ctrl进入宿主机->设备->安装增强功能,输入 init 6 重启
### 测试粘贴复制,拖放文本文件到目录中,而非桌面
#############################################
### 系统安装后的配置
#############################################
### 重启之后,进入自动启动倒计时
### Automatic boot in 25 seconds...
### 此时进入设置菜单移除挂载,然后进入控制菜单选择重启
Ethernet (enp0s3)
------------------
[Details]
[+]Connect automatically (Apply)
Ethernet (enp0s8)
------------------
[Details]
[+]Connect automatically (Apply)
[Privacy]
**Screen Lock {Off}
-------------------------Screen Lock
**Automatic Screen Lock ++[OFF]
**Show Notifications ++[OFF]
[Power]
**Blank screen --[Never]
#############################################
~~~
# cat /etc/redhat-release
CentOS Linux release 8.2.2004 (Core)
# ifconfig
enp0s3: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 10.0.2.15
enp0s8: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.56.103
| true |
eb3032a03aec40119f82c7724ddc18167939c72e
|
Shell
|
xiebruce/PicUploader
|
/vendor/cloudinary/cloudinary_php/tools/allocate_test_cloud.sh
|
UTF-8
| 372 | 2.625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
API_ENDPOINT="https://sub-account-testing.cloudinary.com/create_sub_account"
SDK_NAME="${1}"
CLOUD_DETAILS=$(curl -sS -d "{\"prefix\" : \"${SDK_NAME}\"}" "${API_ENDPOINT}")
echo ${CLOUD_DETAILS} | python -c 'import json,sys;c=json.load(sys.stdin)["payload"];print("cloudinary://%s:%s@%s" % (c["cloudApiKey"], c["cloudApiSecret"], c["cloudName"]))'
| true |
f43630c03c7a57cc501624be6cb6a9cb82e868fb
|
Shell
|
GOTO-OBS/goto-vegas
|
/.ci/update.sh
|
UTF-8
| 417 | 3.09375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash -x
# Push the updated entries.csv and README.md file to GitHub
if [ -n "$GITHUB_API_KEY" ]; then
cd $TRAVIS_BUILD_DIR
cp entries.csv tmp/
cp README.md tmp/
cd tmp/
git add entries.csv
git add README.md
git -c user.name="referee" -c user.email="referee" commit -m "Updating scoreboard [ci skip]"
git push -q -f https://$GITHUB_USER:$GITHUB_API_KEY@github.com/goto-obs/goto-vegas master
fi
| true |
c0d09e13dcafc6f4583542af403f2a53748a5ca0
|
Shell
|
HankChow/12306-in-CLI
|
/auto_update.sh
|
UTF-8
| 237 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/zsh
old_list=`ls | grep station`
python3 train.py -u
new_list=`ls | grep station`
if [ $old_list != $new_list ]
then
git rm $old_list
git add $new_list
git commit -m UPDATED_AUTOMATICALLY
git push origin master
fi
| true |
ba2c9c9415fb73c1e093c973beee22804e49fde5
|
Shell
|
bendevorg/diagonal-rush-backend
|
/tools/app.sh
|
UTF-8
| 656 | 3.578125 | 4 |
[] |
no_license
|
APP_NAME="boilerplate"
# By default (without ENVIRONMENT set) we run as "dev"
if [ -z "${ENVIRONMENT}" ]
then
ENVIRONMENT="dev"
fi
#
# RUN PRODUCTION
#
run_prd() {
npm run prod
}
#
# RUN TESTS
#
run_test() {
# Install dev dependencies
npm i
npm run test && npm run coverage
}
#
# RUN LOCAL
#
run_local() {
# Install dev dependencies
npm i
npm run dev
}
echo -ne "\n\n##\n##\tRUNNING WITH ENVIRONMENT=\"${ENVIRONMENT}\"\n##\n\n"
if [ "${ENVIRONMENT}" == "prd" ]
then
run_prd
fi
if [ "${ENVIRONMENT}" == "test" ]
then
run_test
fi
if [ "${ENVIRONMENT}" == "dev" ]
then
run_local
fi
| true |
ecb61447ee07bbcb044a4de44692bcf6c07bb66c
|
Shell
|
davebaird/z5.stdlib
|
/sh/poettools
|
UTF-8
| 922 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/bash
import errcho
import pathtools
PoetRootDir=
poet._import () {
local rootdir; rootdir="${1:-"$(path.rootdir.pwd .poet_root)"}"
poet.switchroot "$rootdir"
poet._build
}
poet.conf.get () {
pause_fn "$PoetRootDir $*"
[[ -z $* ]] && errxit "ERROR: no config target key supplied"
"$Z5_STDLIB/docker/d.start.poet.conf.get" "$PoetRootDir" "$@"
}
poet.switchroot () {
PoetRootDir="$(readlink --canonicalize-existing "$1")"
if [[ -z "$PoetRootDir" ]]; then
errcho "No Poet root supplied on import"
return 1
fi
if ! [[ -d "$PoetRootDir" ]]; then
errcho "Supplied Poet root '$1' is not a directory"
return 1
fi
}
poet._build () {(
cd "$Z5_STDLIB/docker/poet.conf.get" || return 1
# TODO when debugging is done, put this on docker hub and pull the image instead
docker build -t poet.conf.get . > /dev/null
)}
poet._import "$@"
| true |
67a97361551297838ca447fed13ae6d7a15a6b48
|
Shell
|
joshuakester66/netventory
|
/current_scripts/netventory.fdb.sh
|
UTF-8
| 14,391 | 3.671875 | 4 |
[] |
no_license
|
#!/bin/bash
#Filename: netventory.fdb.sh
#Description:
# This script gets the fdb table or mac address table for the given IP addresses via SNMP
# it then updates the database
#Requirements:
# Credentials file located at $HOME/scripts/.credentials
# Packages:
# mariadb-server/mysqld
# net-snmp
# net-snmp-utils
# procmail (lockfile command)
# bc
# read -p "Press any key to continue. " DEBUG #DEUBG
SCRIPT_NAME="netventory.fdb"
SCRIPT_CAT="netventory"
IPS=$1
mkdir -p $HOME/logs/$SCRIPT_CAT &> /dev/null
mkdir -p $HOME/scripts/tmp/$SCRIPT_NAME &> /dev/null
TODAY=`date +%Y-%m-%d`
NOW=`date +%Y-%m-%d\ %H:%M:%S`
LOG="$HOME/logs/$SCRIPT_CAT/$SCRIPT_NAME.log"
CREDENTIALS="$HOME/scripts/.credentials"
MYSQL_USERNAME=$(cat $CREDENTIALS | egrep ^mysql_username: | sed 's/mysql_username://g')
MYSQL_PASSWORD=$(cat $CREDENTIALS | egrep ^mysql_password: | sed 's/mysql_password://g')
SNMP_TABLE="snmp"
SNMP_TIMEOUT="5"
LOCAL_DATABASE="netventory"
DEVICE_TABLE="device"
ENDPOINT_TABLE="endpoint"
FDB_TABLE="fdb"
OID_TABLE="oid"
INTERFACE_TABLE="interface"
WORKING_DIR="$HOME/scripts/tmp/$SCRIPT_NAME"
SCRIPT_DIR="$HOME/scripts/$SCRIPT_CAT"
LOCK_FILE="$WORKING_DIR/$SCRIPT_NAME.lock"
FILE="$WORKING_DIR/file"
FDB_FILE="$WORKING_DIR/fdb"
DEVICES_PER_RUN="20"
COUNT_DIVISION="12"
MACS_PER_PORT="5"
#################################################
# COLORS
#################################################
Colors() {
ESCAPE="\033";
BLACKF="${ESCAPE}[30m";
REDF="${ESCAPE}[31m";
GREENF="${ESCAPE}[32m";
YELLOWF="${ESCAPE}[33m";
BLUEF="${ESCAPE}[34m";
PURPLEF="${ESCAPE}[35m";
CYANF="${ESCAPE}[36m";
WHITEF="${ESCAPE}[37m";
RESET="${ESCAPE}[0m";
}
Colors;
#################################################
CTRL_C ()
{
rm -f $LOCK_FILE &> /dev/null
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}File has been unlocked"${RESET} >> $LOG
echo ""
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${REDF}$SCRIPT_NAME.sh was cancelled"${RESET} >> $LOG
echo "===============================================================================================" >> $LOG
# reset
exit 99
}
SCRIPT_RUNNING ()
{
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${YELLOWF}Script is currently running. Exiting"${RESET} >> $LOG
echo "===============================================================================================" >> $LOG
exit 95
}
EXIT_FUNCTION ()
{
rm -f $LOCK_FILE &> /dev/null
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}File has been unlocked"${RESET} >> $LOG
echo ""
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${REDF}$SCRIPT_NAME.sh has exited for error $EXIT_CODE"${RESET} >> $LOG
echo "===============================================================================================" >> $LOG
# reset
exit $EXIT_CODE
}
GET_IPS ()
{
NUMBER_OF_DEVICES=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT COUNT(ip) FROM $LOCAL_DATABASE.$DEVICE_TABLE WHERE snmp_enabled='1' AND ping > (NOW() - INTERVAL 30 DAY) AND category='network' ORDER BY fdb_check,ip;"`
COUNT_PER_RUN=$(expr $NUMBER_OF_DEVICES / $COUNT_DIVISION)
if [[ "$COUNT_PER_RUN" == "0" ]]
then
COUNT_PER_RUN=$NUMBER_OF_DEVICES
fi
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}$NUMBER_OF_DEVICES total eligible devices found"${RESET} >> $LOG
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Getting up to $COUNT_PER_RUN device IPs"${RESET} >> $LOG
# IPS=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT ip FROM $LOCAL_DATABASE.$DEVICE_TABLE WHERE snmp_enabled='1' AND ping > (NOW() - INTERVAL 30 DAY) AND category='network' AND (type='switch' OR type='firewall' OR type='router') ORDER BY vlan_check LIMIT $COUNT_PER_RUN;"`
IPS=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT ip FROM $LOCAL_DATABASE.$DEVICE_TABLE WHERE snmp_enabled='1' AND ping > (NOW() - INTERVAL 30 DAY) AND category='network' ORDER BY fdb_check,ip LIMIT $COUNT_PER_RUN;"`
# echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Getting the device IPs"${RESET} >> $LOG
# NUMBER_OF_IPS=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT COUNT(ip) FROM $LOCAL_DATABASE.$DEVICE_TABLE WHERE snmp_enabled='1' AND ping > (NOW() - INTERVAL 30 DAY) AND category='network' ORDER BY fdb_check,ip;"`
# IPS=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT ip FROM $LOCAL_DATABASE.$DEVICE_TABLE WHERE snmp_enabled='1' AND ping > (NOW() - INTERVAL 30 DAY) AND category='network' ORDER BY fdb_check,ip LIMIT $DEVICES_PER_RUN;"`
}
GET_DEVICE_ID ()
{
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Getting the device ID for $IP"${RESET} >> $LOG
DEVICE_ID=
DEVICE_ID=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT id FROM $LOCAL_DATABASE.$DEVICE_TABLE WHERE ip='$IP';"`
if [[ -z "$DEVICE_ID" ]]
then
ENDPOINT_ID=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT id FROM $LOCAL_DATABASE.$ENDPOINT_TABLE WHERE ip='$IP';"`
fi
if [[ -n "$ENDPOINT_ID" ]]
then
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${YELLOWF}$IP is an endpoint. Moving on"${RESET} >> $LOG
continue
fi
if [[ -z "$DEVICE_ID" ]]
then
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}$IP does not exist in the database yet. Adding it"${RESET} >> $LOG
ADD_DEVICE
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Getting the new device ID"${RESET} >> $LOG
DEVICE_ID=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT id FROM $LOCAL_DATABASE.$DEVICE_TABLE WHERE ip='$IP';"`
fi
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Device ID: $DEVICE_ID"${RESET} >> $LOG
}
ADD_DEVICE ()
{
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Adding the device to the $DEVICE_TABLE table"${RESET} >> $LOG
mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD $LOCAL_DATABASE << EOF
INSERT INTO $DEVICE_TABLE(ip,ping_check,ping,updated,added) VALUES('$IP',NOW(),NOW(),NOW(),NOW());
EOF
}
GET_IDS ()
{
OID_ID=
OID_FDB=
OID_ID=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT oid_id FROM $LOCAL_DATABASE.$DEVICE_TABLE WHERE id='$DEVICE_ID';"`
OID_FDB=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT fdb FROM $LOCAL_DATABASE.$OID_TABLE WHERE id='$OID_ID';"`
}
GET_SNMP_CREDS ()
{
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Getting the SNMP credentials for $IP"${RESET} >> $LOG
# read -p "Press any key to continue. Device ID: $DEVICE_ID " DEBUG
SNMP_ARRAY=(`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT $SNMP_TABLE.community,$SNMP_TABLE.authlevel,$SNMP_TABLE.authname,$SNMP_TABLE.authpass,$SNMP_TABLE.authalgo,$SNMP_TABLE.cryptopass,$SNMP_TABLE.cryptoalgo,$SNMP_TABLE.version,$SNMP_TABLE.port FROM $LOCAL_DATABASE.$SNMP_TABLE LEFT JOIN $LOCAL_DATABASE.$DEVICE_TABLE ON $SNMP_TABLE.id=$DEVICE_TABLE.snmp_id WHERE $DEVICE_TABLE.id='$DEVICE_ID';"`)
SNMP_COMMUNITY=${SNMP_ARRAY[0]}
SNMP_AUTHLEVEL=${SNMP_ARRAY[1]}
SNMP_AUTHNAME=${SNMP_ARRAY[2]}
SNMP_AUTHPASS=${SNMP_ARRAY[3]}
SNMP_AUTHALGO=${SNMP_ARRAY[4]}
SNMP_CRYPTOPASS=${SNMP_ARRAY[5]}
SNMP_CRYPTOALGO=${SNMP_ARRAY[6]}
SNMP_VERSION=${SNMP_ARRAY[7]}
SNMP_PORT=${SNMP_ARRAY[8]}
if [[ "$SNMP_VERSION" == "3" ]]
then
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${YELLOWF}SNMP Version 3 needs to be fixed"${RESET} >> $LOG
fi
# read -p "Press any key to continue. IP: $IP " DEBUG
}
GET_FILE ()
{
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Getting the fdb file"${RESET} >> $LOG
snmpwalk -t $SNMP_TIMEOUT -v $SNMP_VERSION -c $SNMP_COMMUNITY $IP $OID_FDB | egrep --color -o '\.[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s=\s.+$' | sed 's/\s=\sINTEGER: /;;/g' | sed 's/^\.//g' > $FDB_FILE.$IP
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Checking for uplinks"${RESET} >> $LOG
PORTS=$(cat $FDB_FILE.$IP | sed 's/^.*;;//g' | sort --unique)
for PORT in $PORTS
do
PORT_COUNT=$(cat $FDB_FILE.$IP | egrep --color=never ";;$PORT$" | wc -l)
if [[ "$PORT_COUNT" -gt "$MACS_PER_PORT" ]]
then
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${YELLOWF}Removing Port $PORT from the file since it has $PORT_COUNT macs and only $MACS_PER_PORT are allowed"${RESET} >> $LOG
sed -i "/;;$PORT\$/d" $FDB_FILE.$IP
INTERFACE_ID=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT id FROM $LOCAL_DATABASE.$INTERFACE_TABLE WHERE device_id='$DEVICE_ID' AND port='$PORT';"`
if [[ -n "$INTERFACE_ID" ]]
then
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Updating $INTERFACE_ID in the $INTERFACE_TABLE table as an uplink"${RESET} >> $LOG
mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD $LOCAL_DATABASE << EOF
UPDATE $INTERFACE_TABLE SET uplink='Y',updated=NOW() WHERE id='$INTERFACE_ID';
EOF
fi
fi
done
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${YELLOWF}Removing port 0 since that is the system MAC address for the device"${RESET} >> $LOG
sed -i '/;;0$/d' $FDB_FILE.$IP
}
GET_INFO_UPDATE ()
{
MAC_DECIMAL=
MAC_DECIMALS=$(cat $FDB_FILE.$IP | egrep --color -o '.+;;' | sed 's/;//g')
for MAC_DECIMAL in $MAC_DECIMALS
do
PORT=
INTERFACE_ID=
MAC=
MAC_TEMP=
MAC_DECIMAL_TEMP=
PORT=$(cat $FDB_FILE.$IP | egrep --color=never "^$MAC_DECIMAL;;" | egrep --color=never -o ';;.+$' | sed 's/;//g')
INTERFACE_ID=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT id FROM $LOCAL_DATABASE.$INTERFACE_TABLE WHERE device_id='$DEVICE_ID' AND port='$PORT';"`
if [[ -z "$INTERFACE_ID" ]]
then
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${YELLOWF}Skipping $PORT since there is no matching interface in the database"${RESET} >> $LOG
continue
fi
MAC_DECIMAL_TEMP=$(echo $MAC_DECIMAL | sed 's/\./ /g')
for MAC_TEMP in $MAC_DECIMAL_TEMP
do
MAC_TEMP=$(echo "obase=16; $MAC_TEMP" | bc)
MAC_TEMP_COUNT=$(echo -n "$MAC_TEMP" | wc -c)
if [[ "$MAC_TEMP_COUNT" -le "1" ]]
then
MAC_TEMP="0$MAC_TEMP"
fi
if [[ -z "$MAC" ]]
then
MAC="$MAC_TEMP"
else
MAC="$MAC:$MAC_TEMP"
fi
done
MAC="${MAC,,}"
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ===" >> $LOG
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}IP: $IP"${RESET} >> $LOG
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Port: $PORT"${RESET} >> $LOG
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Device ID: $DEVICE_ID"${RESET} >> $LOG
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Interface ID: $INTERFACE_ID"${RESET} >> $LOG
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}MAC Address: $MAC"${RESET} >> $LOG
UPDATE_MYSQL
done
}
UPDATE_MYSQL ()
{
FDB_ID=
FDB_ID=`mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD --silent --skip-column-names -e "SELECT id FROM $LOCAL_DATABASE.$FDB_TABLE WHERE device_id='$DEVICE_ID' AND interface_id='$INTERFACE_ID' AND mac='$MAC';"`
if [[ -z "$FDB_ID" ]]
then
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Adding $MAC to the database"${RESET} >> $LOG
mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD $LOCAL_DATABASE << EOF
INSERT INTO $FDB_TABLE(device_id,interface_id,mac,updated,added) VALUES('$DEVICE_ID','$INTERFACE_ID','$MAC',NOW(),NOW());
EOF
else
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Updating $MAC in the database"${RESET} >> $LOG
mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD $LOCAL_DATABASE << EOF
UPDATE $FDB_TABLE SET updated=NOW() WHERE id='$FDB_ID';
EOF
fi
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Updating $INTERFACE_ID in the $INTERFACE_TABLE table as a non-uplink"${RESET} >> $LOG
mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD $LOCAL_DATABASE << EOF
UPDATE $INTERFACE_TABLE SET uplink='N',updated=NOW() WHERE id='$INTERFACE_ID';
EOF
}
UPDATE_DEVICE ()
{
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Updating the $DEVICE_TABLE table with ID $DEVICE_ID"${RESET} >> $LOG
mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD $LOCAL_DATABASE << EOF
UPDATE $DEVICE_TABLE SET fdb_check=NOW(),ping_check=NOW(),ping=NOW() WHERE id='$DEVICE_ID';
EOF
}
UPDATE_DEVICE_NO_PING ()
{
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Updating the $DEVICE_TABLE table for $IP"${RESET} >> $LOG
mysql --user=$MYSQL_USERNAME --password=$MYSQL_PASSWORD $LOCAL_DATABASE << EOF
UPDATE $DEVICE_TABLE SET fdb_check=NOW(),updated=NOW(),ping_check=NOW() WHERE ip='$IP';
EOF
}
trap CTRL_C SIGINT
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${GREENF}$SCRIPT_NAME.sh was started by $USER"${RESET} >> $LOG
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Attempting to lock the file for execution"${RESET} >> $LOG
lockfile -r 0 $LOCK_FILE &> /dev/null || SCRIPT_RUNNING
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}File has been locked"${RESET} >> $LOG
if [[ -z "$IPS" ]]
then
GET_IPS
fi
if [[ -z "$IPS" ]]
then
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${REDF}No IPs or Hostnames provided"${RESET} >> $LOG
echo
# echo -e ${REDF}"No IPs or Hostnames provided. This script is exiting"${RESET}
EXIT_CODE="85"
EXIT_FUNCTION
# else
# echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Checking $DEVICES_PER_RUN devices out of $NUMBER_OF_IPS"${RESET} >> $LOG
fi
for IP in $IPS
do
if ping -c 1 -W 1 -i 0.2 $IP &> /dev/null
then
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Working on $IP"${RESET} >> $LOG
GET_DEVICE_ID
GET_IDS
GET_SNMP_CREDS
GET_FILE
GET_INFO_UPDATE
UPDATE_DEVICE
else
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}$IP is not reachable. Moving on"${RESET} >> $LOG
UPDATE_DEVICE_NO_PING
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ==========" >> $LOG
fi
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ==========" >> $LOG
done
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${CYANF}Unlocking the file"${RESET} >> $LOG
rm -f $LOCK_FILE &> /dev/null
echo -e "`date +%b\ %d\ %Y\ %H:%M:%S`: ${GREENF}$SCRIPT_NAME.sh finished"${RESET} >> $LOG
echo "===============================================================================================" >> $LOG
exit
| true |
10fc859fb50f8395a3ff1234baafee584146c5ba
|
Shell
|
lynxbat/foo
|
/scripts/run_tests_with_docker.sh
|
UTF-8
| 1,052 | 2.953125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
#http://www.apache.org/licenses/LICENSE-2.0.txt
#
#
#Copyright 2015 Intel Corporation
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
die() {
echo >&2 $@
exit 1
}
if [ $# -eq 2 ]; then
GIT_TOKEN=$1
fi
if [ -z "${GIT_TOKEN}" ]; then
die "arg missing: github token is required so we can clone a private repo)"
fi
sed s/\<GIT_TOKEN\>/${GIT_TOKEN}/ scripts/Dockerfile > scripts/Dockerfile.tmp
docker build -t intelsdi-x/snap-test -f scripts/Dockerfile.tmp .
rm scripts/Dockerfile.tmp
docker run -it intelsdi-x/snap-test scripts/test.sh
| true |
e4d292139f4f4defe3faf778d838ff0b2c21b6aa
|
Shell
|
mgcfish/pentesting-cookbook
|
/bootstrap/kali.deps.extra.kalitorify.sh
|
UTF-8
| 266 | 2.828125 | 3 |
[] |
no_license
|
HOMEDIR=$(grep -e "^$USERNAME:" /etc/passwd | cut -d: -f6)
LIBDIR=$HOMEDIR/lib
apt-get install -y tor curl
sudo -u $USERNAME -i <<EOF
mkdir $LIBDIR
cd $LIBDIR
git clone https://github.com/brainfucksec/kalitorify
EOF
cd $LIBDIR/kalitorify/
make install
| true |
361450d5fee3ffbb2a4273a54d09cafca6718e3d
|
Shell
|
vitessio/vitess
|
/web/vtadmin/build.sh
|
UTF-8
| 1,637 | 3.125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2023 The Vitess Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function output() {
echo -e "$@"
}
script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")"
source "${script_dir}/../../build.env"
web_dir="${script_dir}"
vtadmin_api_port=14200
# Download nvm and node
if [[ -z ${NVM_DIR} ]]; then
export NVM_DIR="$HOME/.nvm"
fi
if [[ -z ${NODE_VERSION} ]]; then
export NODE_VERSION="18.16.0"
fi
output "\nInstalling nvm...\n"
if [ -d "$NVM_DIR" ]; then
output "\033[1;32mnvm is already installed!\033[0m"
else
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.0/install.sh | bash && output "\033[1;32mnvm is installed!\033[0m" || fail "\033[1;32mnvm failed to install!\033[0m"
fi
source "$NVM_DIR/nvm.sh"
output "\nConfiguring Node.js $NODE_VERSION\n"
nvm install "$NODE_VERSION" || fail "Could not install and use nvm $NODE_VERSION."
npm --prefix "$web_dir" --silent install
export PATH=$PATH:$web_dir/node_modules/.bin/
VITE_VTADMIN_API_ADDRESS="http://${hostname}:${vtadmin_api_port}" \
VITE_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \
npm run --prefix "$web_dir" build
| true |
1ab6cf6457582bef67b66a44b772c69710fb6380
|
Shell
|
jrmarino/DPorts-Raven
|
/www/havp/files/pkg-install.in
|
UTF-8
| 1,166 | 3.40625 | 3 |
[] |
no_license
|
#!/bin/sh
# $FreeBSD: head/www/havp/files/pkg-install.in 340719 2014-01-22 15:52:06Z mat $
PREFIX=${PKG_PREFIX:-%%PREFIX%%}
HAVP_USER=havp
HAVP_GROUP=havp
LOG_DIR=%%LOG_DIR%%
TMP_DIR=%%TMP_DIR%%
RUN_DIR=%%RUN_DIR%%
if [ "$2" = "PRE-INSTALL" ]; then
if ! pw groupshow "$HAVP_GROUP" 2>/dev/null 1>&2; then
if pw groupadd $HAVP_GROUP; then
echo "=> Added group \"$HAVP_GROUP\"."
else
echo "=> Adding group \"$HAVP_GROUP\" failed..."
exit 1
fi
fi
if ! pw usershow "$HAVP_USER" 2>/dev/null 1>&2; then
if pw useradd $HAVP_USER -g $HAVP_GROUP -h - \
-s "/sbin/nologin" -d "/nonexistent" \
-c "havp daemon"; \
then
echo "=> Added user \"$HAVP_USER\"."
else
echo "=> Adding user \"$HAVP_USER\" failed..."
exit 1
fi
fi
elif [ "$2" = "POST-INSTALL" ]; then
if [ ! -d "$LOG_DIR" ]; then
mkdir -p "$LOG_DIR" || exit 1
chown "$HAVP_USER:$HAVP_GROUP" "$LOG_DIR" || exit 1
fi
if [ ! -d "$TMP_DIR" ]; then
mkdir -p "$TMP_DIR" || exit 1
chown "$HAVP_USER:$HAVP_GROUP" "$TMP_DIR" || exit 1
fi
if [ ! -d "$RUN_DIR" ]; then
mkdir -p "$RUN_DIR" || exit 1
chown "$HAVP_USER:$HAVP_GROUP" "$RUN_DIR" || exit 1
fi
fi
exit 0
| true |
a3e9a26aaa0e8d791b4d75b57d5d70bb511ce525
|
Shell
|
netsil/app-demo
|
/run-webservers.sh
|
UTF-8
| 1,201 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/bash
webserver=$1
if [ "$webserver" = "apache" ] ; then
APACHE_PORT=${APACHE_PORT:-8081}
WEB_APP_SERVER_PORT=${WEB_APP_SERVER_PORT:-8080}
if [ -z "${WEB_APP_HOST}" ] ; then
echo "Error! You must specify WEB_APP_HOST"
exit 1
fi
sudo docker run -td \
--name apache-app \
-e WEB_APP_HOST=${WEB_APP_HOST} \
-e WEB_APP_SERVER_PORT=${WEB_APP_SERVER_PORT} \
-p ${APACHE_PORT}:80 \
netsil/apache-app
elif [ "$webserver" = "haproxy" ] ; then
HAPROXY_PORT=${HAPROXY_PORT:-8082}
APACHE_PORT=${APACHE_PORT:-8081}
if [ -z "${APACHE_HOST}" ] ; then
echo "Error! You must specify APACHE_HOST"
exit 1
fi
sudo docker run -td \
--name haproxy-app \
-e APACHE_HOST=${APACHE_HOST} \
-e APACHE_PORT=${APACHE_PORT} \
-p ${HAPROXY_PORT}:80 \
netsil/haproxy-app
elif [ "$webserver" = "web-app" ] ; then
export WEB_APP_SERVER_PORT=${WEB_APP_SERVER_PORT:-8080}
python simple-http-server.py
else
echo "Error! Webserver $webserver not recognized! Please specify 'apache', 'haproxy', or 'web-app'"
exit 1
fi
| true |
dacad3d6e73619a42aedec728cb9f122de32a3d3
|
Shell
|
rafaelfschmid/scheduling
|
/scripts/results.sh
|
UTF-8
| 339 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
prog1=$1 #program to generate results
dir1=$2 #input files dir
dir2=$3 #result files dir
for i in 1 2 4 8 16 ; do
((t = $i * 1024))
((m = $i * 32))
for j in `seq 1 20` ; do
./${prog1} $t $m < ${dir1}/etc_c_${t}x${m}_hihi_${j}.dat > ${dir2}/${t}x${m}_${j}.out
echo etc_c_${t}x${m}_hihi_${j}.dat
done
echo " "
done
| true |
8c074610c7dfa88e1e29426ec41bbcc33f0ad1a1
|
Shell
|
stfc/ceph-performance-data
|
/dd/slave.sh
|
UTF-8
| 1,063 | 3.453125 | 3 |
[] |
no_license
|
#!/bin/bash
DATE=$(date "+%Y-%m-%d %H:%M:%S")
BS=$1
COUNT=$2
FOLDER="./logs/${BS}_bs_logs"
echo "Running Test $BS $COUNT"
if ! [ -e $FOLDER ] ; then
mkdir $FOLDER
fi
wname="./$FOLDER/write_${BS}_${COUNT}.log"
rname="./$FOLDER/read_${BS}_${COUNT}.log"
uname="${BS}_${COUNT}.log"
if [[ -e $wname ]] ; then
i=2
while [[ -e "./$FOLDER/write_${BS}_${COUNT}-$i.log" ]] ; do
let i++
done
wname="./$FOLDER/write_${BS}_${COUNT}-$i.log"
fi
if [[ -e $rname ]] ; then
j=2
while [[ -e "./$FOLDER/read_${BS}_${COUNT}-$j.log" ]] ; do
let j++
done
rname="./$FOLDER/read_${BS}_${COUNT}-$j.log"
fi
echo "Time:$DATE" >> $wname
echo "Blocksize:$BS" >> $wname
echo "Count:$COUNT" >> $wname
echo 3 > /proc/sys/vm/drop_caches
dd if=/dev/zero of=./tempfile bs="$BS" count=$COUNT oflag=direct >> $wname 2>&1
echo "Time:$DATE" >> $rname
echo "Blocksize:$BS" >> $rname
echo "Count:$COUNT" >> $rname
echo 3 > /proc/sys/vm/drop_caches
dd if=./tempfile of=/dev/null bs="$BS" count=$COUNT status=progress >> $rname 2>&1
rm ./tempfile
| true |
a9ac0608c77f06d2a3e8e92179401d1471387f2f
|
Shell
|
arcticicestudio/igloo
|
/snowblocks/zsh/pkgs/git/config-base.zsh
|
UTF-8
| 2,623 | 3.328125 | 3 |
[
"MIT"
] |
permissive
|
# Copyright (c) 2016-present Arctic Ice Studio <development@arcticicestudio.com>
# Copyright (c) 2016-present Sven Greb <development@svengreb.de>
# Project: igloo
# Repository: https://github.com/arcticicestudio/igloo
# License: MIT
# Loads configurations for Git's prompt support when the script has been loaded in the current shell session.
# See:
# 1. https://git-scm.com/book/en/v2/Appendix-A%3A-Git-in-Other-Environments-Git-in-Zsh
# 2. https://github.com/git/git/blob/master/contrib/completion/git-prompt.sh
if [[ "${+functions[__git_ps1]}" ]]; then
# Show more information about the identity of commits checked out as a detached HEAD.
# Control the behavior by setting one of these values:
# contains Relative to newer annotated tag (v1.6.3.2~35)
# branch Relative to newer tag or branch (master~4)
# describe Relative to older annotated tag (v1.6.3.1-13-gdd42c2f)
# tag Relative to any older tag (v1.6.3.1-13-gdd42c2f)
# default Exactly matching tag
export GIT_PS1_DESCRIBE_STYLE=describe
# Disable __git_ps1 output when the current working directory is set up to be ignored by git.
# Also configurable per repository via `bash.hideIfPwdIgnored`.
export GIT_PS1_HIDE_IF_PWD_IGNORED=
# Show colored hints about the current dirty state.
# The colors are based on the colored output of `git status -sb`.
# NOTE: Only available when using __git_ps1 via ZSH's `precmd` hook function!
export GIT_PS1_SHOWCOLORHINTS=
# Show unstaged (*) and staged (+) changes.
# Also configurable per repository via `bash.showDirtyState`.
export GIT_PS1_SHOWDIRTYSTATE=true
# Show currently stashed ($) changes.
export GIT_PS1_SHOWSTASHSTATE=
# Show untracked (%) changes.
# Also configurable per repository via `bash.showUntrackedFiles`.
export GIT_PS1_SHOWUNTRACKEDFILES=true
# Show indicator for difference between HEAD and its upstream.
#
# < Behind upstream
# > Ahead upstream
# <> Diverged upstream
# = Equal upstream
#
# Control behavior by setting to a space-separated list of values:
# auto Automatically show indicators
# verbose Show number of commits ahead/behind (+/-) upstream
# name If verbose, then also show the upstream abbrev name
# legacy Do not use the `--count` option available in recent versions of git-rev-list
# git Always compare HEAD to `@{upstream}`
# svn Always compare HEAD to your SVN upstream
#
# By default, `__git_ps1` will compare HEAD to SVN upstream (`@{upstream}` if not available).
# Also configurable per repository via `bash.showUpstream`.
export GIT_PS1_SHOWUPSTREAM=(auto name verbose)
fi
| true |
fa1898c085c0c1c2a575910033f000443e4b497d
|
Shell
|
praetore/MinecraftDeployment
|
/run-minecraft-client.sh
|
UTF-8
| 343 | 3.03125 | 3 |
[] |
no_license
|
#!/bin/sh
command -v java -version >/dev/null 2>&1 || { sudo apt-get install openjdk-7-jre; }
# Prepare MC client jar
if [ ! -f ./Minecraft.jar ];
then
wget https://s3.amazonaws.com/Minecraft.Download/launcher/Minecraft.jar;
fi
if [ ! -x ./Minecraft.jar ];
then
chmod +x Minecraft.jar;
fi
# Start Minecraft client
java -jar Minecraft.jar
| true |
deb955713c39d9bca93acc823386afc98c11d7de
|
Shell
|
tsward6/cs753_team2_assignment2
|
/prog2.sh
|
UTF-8
| 911 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/bash
#
# CS873 Programming Assignment2 main script Team2
#
# This script runs the necessary steps for parts 1 and 2 of programming
# assignment 2.
#
# 1) maven compile
# 2) run the jar file maven created
# 3) run the python script that uses pytrec_eval to get results for part 2
#
#
# remove program created directories (if they exist)
rm -rf ./run_files
rm -rf ./paragraphs
# create a run_files directory
mkdir ./run_files
# maven compile command
mvn clean compile assembly:single
if [ $? -eq 1 ]; then
echo "Maven compile failed"
exit 1
fi
# run the jar file maven created
java -jar target/team2_1-1.0-SNAPSHOT-jar-with-dependencies.jar
if [ $? -eq 1 ]; then
echo "Error accessing maven SNAPSHOT jar file"
exit 1
fi
# run the python script that uses pytrec_eval to get results for part 2
python3 part2.py
if [ $? -eq 1 ]; then
echo "Error using pytrec_eval script (for part2)"
exit 1
fi
| true |
da80eb2959f8d32d71d8e29a889ce437cf3cedd6
|
Shell
|
minjun-jang/ocpinstall
|
/99.download/tools_download.sh
|
UTF-8
| 2,085 | 3.09375 | 3 |
[] |
no_license
|
#!/bin/sh
ABSOLUTE_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${ABSOLUTE_PATH}/../00.prepare/config/openshift.env"
OPENSHIFT_RELEASE=4.5.1
#OPENSHIFT_RELEASE=$(curl -s https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/${OPENSHIFT_VERSION}/latest/sha256sum.txt | cut -d "-" -f2 | uniq)
#BUILD_VERSION=$(curl -s https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/release.txt | grep 'Name:' | awk '{print $NF}')
BUILD_VERSION=${OPENSHIFT_RELEASE}
echo OPENSHIFT_RELEASE=${OPENSHIFT_RELEASE}
echo BUILD_VERSION=${BUILD_VERSION}
echo -e "\033[32m[S]=============================================================================\033[0m"
echo -e "\033[46m@@@[S]_[TOOLS DOWNLOAD]\033[0m"
echo -e "----- [installer download]"
wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${BUILD_VERSION}/openshift-install-linux-${BUILD_VERSION}.tar.gz -P ${ABSOLUTE_PATH}/install
echo -e "----- [client download]"
wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/${BUILD_VERSION}/openshift-client-linux-${BUILD_VERSION}.tar.gz -P ${ABSOLUTE_PATH}/client/
echo -e "----- [rhcos download]"
#wget https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/${OPENSHIFT_VERSION}/${BUILD_VERSION}/rhcos-${OPENSHIFT_RELEASE}-x86_64-installer.x86_64.iso -P ${ABSOLUTE_PATH}/rhcos/
wget https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/${OPENSHIFT_VERSION}/${BUILD_VERSION}/rhcos-${OPENSHIFT_RELEASE}-x86_64-metal.x86_64.raw.gz -P ${ABSOLUTE_PATH}/rhcos/
#echo -e "----- [unzip tools --> /usr/local/bin]"
#tar -xzf ${ABSOLUTE_PATH}/client/openshift-client-linux-${BUILD_VERSION}.tar.gz -C /usr/local/bin/
#tar -xzf ${ABSOLUTE_PATH}/install/openshift-install-linux-${BUILD_VERSION}.tar.gz -C /usr/local/bin/
#rm -rf ${ABSOLUTE_PATH}/install/openshift-install-linux-*
#rm -rf ${ABSOLUTE_PATH}/client/openshift-client-linux-*
#rm -rf /usr/local/bin/README.md
echo -e "\033[36m@@@[E]_[TOOLS DOWNLOAD]\033[0m"
echo -e "\033[32m=============================================================================[E]\033[0m"
| true |
8f847792bbec6beac827bb8dff2e0cd53ffa58e5
|
Shell
|
petronny/aur3-mirror
|
/wolfclient-git/PKGBUILD
|
UTF-8
| 1,065 | 2.96875 | 3 |
[] |
no_license
|
# Contributor: Andreas Baumann <abaumann at yahoo dot com>
# Maintainer: Andreas Baumann <abaumann at yahoo dot com>
pkgname=wolfclient-git
pkgver=20140620
pkgrel=1
pkgdesc="Client for Wolframe."
license=('GPL3')
arch=('i686' 'x86_64' 'armv6h')
url="http://wolframe.net/"
depends=('qt4')
makedepends=('git' 'mesa-libgl' 'gdb')
_gitroot=git://github.com/Wolframe/wolfclient.git
_gitname=wolfclient
package() {
cd ${srcdir}/$_gitname-build
# regenarete Makefiles here, as files have to exist in order for
# the install rules to be generated
qmake-qt4 -config debug -recursive PREFIX=/usr LIBDIR=/usr/lib/wolframe
msg "Installing.."
make install INSTALL_ROOT=${pkgdir}
}
build() {
cd ${srcdir}
msg "Getting source from git..."
if [ -d ${srcdir}/$_gitname ] ; then
cd $_gitname && git pull origin
else
git clone $_gitroot
fi
cp -r ${srcdir}/$_gitname ${srcdir}/$_gitname-build
cd ${srcdir}/$_gitname-build
msg "Generating makefiles.."
qmake-qt4 -config debug -recursive PREFIX=/usr LIBDIR=/usr/lib/wolframe
msg "Building..."
make
}
| true |
b7fd3729c7d7d93984cc1e93472b53fafb877adf
|
Shell
|
chloeandisabel/heroku-buildpack-gulp
|
/lib/build.sh
|
UTF-8
| 7,631 | 3.578125 | 4 |
[] |
no_license
|
error() {
echo " ! $*" >&2
echo ""
return 1
}
head() {
echo ""
echo "-----> $*"
}
info() {
#echo "`date +\"%M:%S\"` $*"
echo " $*"
}
file_contents() {
if test -f $1; then
echo "$(cat $1)"
else
echo ""
fi
}
assert_json() {
local file=$1
if test -f $file; then
if ! cat $file | $bp_dir/vendor/jq '.' > /dev/null; then
error "Unable to parse $file as JSON"
fi
fi
}
read_json() {
local file=$1
local node=$2
if test -f $file; then
cat $file | $bp_dir/vendor/jq --raw-output "$node // \"\"" || return 1
else
echo ""
fi
}
get_modules_source() {
local build_dir=$1
if test -d $build_dir/node_modules; then
echo "prebuilt"
elif test -f $build_dir/npm-shrinkwrap.json; then
echo "npm-shrinkwrap.json"
elif test -f $build_dir/package.json; then
echo "package.json"
else
echo ""
fi
}
get_modules_cached() {
local cache_dir=$1
if test -d $cache_dir/node/node_modules; then
echo "true"
else
echo "false"
fi
}
read_current_state() {
info "package.json..."
assert_json "$build_dir/package.json"
iojs_engine=$(read_json "$build_dir/package.json" ".engines.iojs")
node_engine=$(read_json "$build_dir/package.json" ".engines.node")
npm_engine=$(read_json "$build_dir/package.json" ".engines.npm")
info "build directory..."
modules_source=$(get_modules_source "$build_dir")
info "cache directory..."
npm_previous=$(file_contents "$cache_dir/node/npm-version")
node_previous=$(file_contents "$cache_dir/node/node-version")
modules_cached=$(get_modules_cached "$cache_dir")
info "environment variables..."
export_env_dir $env_dir
export NPM_CONFIG_PRODUCTION=${NPM_CONFIG_PRODUCTION:-true}
export NODE_MODULES_CACHE=${NODE_MODULES_CACHE:-true}
}
show_current_state() {
echo ""
if [ "$iojs_engine" == "" ]; then
info "Node engine: ${node_engine:-unspecified}"
else
achievement "iojs"
info "Node engine: $iojs_engine (iojs)"
fi
info "Npm engine: ${npm_engine:-unspecified}"
info "node_modules source: ${modules_source:-none}"
info "node_modules cached: $modules_cached"
echo ""
printenv | grep ^NPM_CONFIG_ | indent
info "NODE_MODULES_CACHE=$NODE_MODULES_CACHE"
}
install_node() {
local node_engine=$1
# Resolve non-specific node versions using semver.herokuapp.com
if ! [[ "$node_engine" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
info "Resolving node version ${node_engine:-(latest stable)} via semver.io..."
node_engine=$(curl --silent --get --data-urlencode "range=${node_engine}" https://semver.herokuapp.com/node/resolve)
fi
# Download node from Heroku's S3 mirror of nodejs.org/dist
info "Downloading and installing node $node_engine..."
node_url="http://s3pository.heroku.com/node/v$node_engine/node-v$node_engine-linux-x64.tar.gz"
curl $node_url -s -o - | tar xzf - -C /tmp
# Move node (and npm) into .heroku/node and make them executable
mv /tmp/node-v$node_engine-linux-x64/* $heroku_dir/node
chmod +x $heroku_dir/node/bin/*
PATH=$heroku_dir/node/bin:$PATH
}
install_iojs() {
local iojs_engine=$1
# Resolve non-specific iojs versions using semver.herokuapp.com
if ! [[ "$iojs_engine" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
info "Resolving iojs version ${iojs_engine:-(latest stable)} via semver.io..."
iojs_engine=$(curl --silent --get --data-urlencode "range=${iojs_engine}" https://semver.herokuapp.com/iojs/resolve)
fi
# TODO: point at /dist once that's available
info "Downloading and installing iojs $iojs_engine..."
download_url="https://iojs.org/dist/v$iojs_engine/iojs-v$iojs_engine-linux-x64.tar.gz"
curl $download_url -s -o - | tar xzf - -C /tmp
# Move iojs/node (and npm) binaries into .heroku/node and make them executable
mv /tmp/iojs-v$iojs_engine-linux-x64/* $heroku_dir/node
chmod +x $heroku_dir/node/bin/*
PATH=$heroku_dir/node/bin:$PATH
}
install_npm() {
# Optionally bootstrap a different npm version
if [ "$npm_engine" != "" ]; then
if ! [[ "$npm_engine" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
info "Resolving npm version ${npm_engine} via semver.io..."
npm_engine=$(curl --silent --get --data-urlencode "range=${npm_engine}" https://semver.herokuapp.com/npm/resolve)
fi
if [[ `npm --version` == "$npm_engine" ]]; then
info "npm `npm --version` already installed with node"
else
info "Downloading and installing npm $npm_engine (replacing version `npm --version`)..."
npm install --quiet -g npm@$npm_engine 2>&1 >/dev/null | indent
fi
warn_old_npm `npm --version`
else
info "Using default npm version: `npm --version`"
fi
}
function build_dependencies() {
if [ "$modules_source" == "" ]; then
info "Skipping dependencies (no source for node_modules)"
elif [ "$modules_source" == "prebuilt" ]; then
info "Rebuilding any native modules for this architecture"
npm rebuild 2>&1 | indent
info "Installing any new modules"
npm install --dev --quiet --userconfig $build_dir/.npmrc 2>&1 | indent
else
cache_status=$(get_cache_status)
if [ "$cache_status" == "valid" ]; then
info "Restoring node modules from cache"
cp -r $cache_dir/node/node_modules $build_dir/
info "Pruning unused dependencies"
npm prune 2>&1 | indent
info "Installing any new modules"
npm install --dev --quiet --userconfig $build_dir/.npmrc 2>&1 | indent
else
info "$cache_status"
info "Installing node modules"
touch $build_dir/.npmrc
npm install --dev --quiet --userconfig $build_dir/.npmrc 2>&1 | indent
fi
fi
}
clean_npm() {
info "Cleaning npm artifacts"
rm -rf "$build_dir/.node-gyp"
rm -rf "$build_dir/.npm"
}
# Caching
create_cache() {
info "Caching results for future builds"
mkdir -p $cache_dir/node
echo `node --version` > $cache_dir/node/node-version
echo `npm --version` > $cache_dir/node/npm-version
if test -d $build_dir/node_modules; then
cp -r $build_dir/node_modules $cache_dir/node
fi
}
clean_cache() {
info "Cleaning previous cache"
rm -rf "$cache_dir/node_modules" # (for apps still on the older caching strategy)
rm -rf "$cache_dir/node"
}
get_cache_status() {
local node_version=`node --version`
local npm_version=`npm --version`
# Did we bust the cache?
if ! $modules_cached; then
echo "No cache available"
elif ! $NODE_MODULES_CACHE; then
echo "Cache disabled with NODE_MODULES_CACHE"
elif [ "$node_previous" != "" ] && [ "$node_version" != "$node_previous" ]; then
echo "Node version changed ($node_previous => $node_version); invalidating cache"
elif [ "$npm_previous" != "" ] && [ "$npm_version" != "$npm_previous" ]; then
echo "Npm version changed ($npm_previous => $npm_version); invalidating cache"
else
echo "valid"
fi
}
export_env_dir() {
env_dir=$1
if [ -d "$env_dir" ]; then
whitelist_regex=${2:-''}
blacklist_regex=${3:-'^(PATH|GIT_DIR|CPATH|CPPATH|LD_PRELOAD|LIBRARY_PATH)$'}
if [ -d "$env_dir" ]; then
for e in $(ls $env_dir); do
echo "$e" | grep -E "$whitelist_regex" | grep -qvE "$blacklist_regex" &&
export "$e=$(cat $env_dir/$e)"
:
done
fi
fi
}
# sed -l basically makes sed replace and buffer through stdin to stdout
# so you get updates while the command runs and dont wait for the end
# e.g. npm install | indent
indent() {
c='s/^/ /'
case $(uname) in
Darwin) sed -l "$c";; # mac/bsd sed: -l buffers on line boundaries
*) sed -u "$c";; # unix/gnu sed: -u unbuffered (arbitrary) chunks of data
esac
}
| true |
698859ed395f798583f036c93d38f4add0debe1b
|
Shell
|
davidvenegasr/Computer-Architecture-
|
/TP2 - SIMD Image Filters/src/experimentos/exps/exp1_Zigzag.sh
|
UTF-8
| 2,290 | 2.765625 | 3 |
[] |
no_license
|
#!/bin/bash
#-------------------------------------------------------------------------------
# Este script genera los resultados para el experimento 1 del informe
# FORMA DE LLAMAR AL SCRIPT
#
# ./exp1_Zigzag.sh
#
# (solo se puede llamar desde la carpeta donde sera guardado el sh)
#-------------------------------------------------------------------------------
CANT_ITER=500
IMG1="imgs/1.bmp"
cd ..
#Dir actual = "experimentos"
cd ..
#Dir actual = "src"
echo "********************** MAKE CLEAN *************************"
make clean
echo "********************** MAKE -O0 ***************************"
make
echo "********************** end MAKE -O0 ***********************"
cd experimentos
#Dir actual = "experimentos"
echo "++++++++++++ EJECUTANDO Zigzag ASM ..."
./filter_ejec.sh Zigzag asm $CANT_ITER $IMG1
mkdir resultados 2> /dev/null
mkdir resultados/exp1_Zigzag 2> /dev/null
mv log.Zigzag.asm.out resultados/exp1_Zigzag
mv log.Zigzag.asm.csv resultados/exp1_Zigzag
#HASTA ACA tengo los resultados de asm
echo "++++++++++++ EJECUTANDO Zigzag C -O0 ..."
./filter_ejec.sh Zigzag c $CANT_ITER $IMG1
mv log.Zigzag.c.out resultados/exp1_Zigzag/log.Zigzag.c.O0.out
mv log.Zigzag.c.csv resultados/exp1_Zigzag/log.Zigzag.c.O0.csv
#HASTA ACA tengo los resultados de c con -O0
#AHORA paso a hacer la modificacion del Makefile para cambiar la optimizacion
cp data/Makefile_O3 data/Makefile
mv data/Makefile ../filters/Makefile
cd ..
#Dir actual = "src"
echo "********************** MAKE CLEAN *************************"
make clean
echo "********************** MAKE -O3 ***************************"
make
echo "********************** end MAKE -O3 ***********************"
cd experimentos
#Dir actual = "experimentos"
echo "++++++++++++ EJECUTANDO Zigzag C -O3 ..."
./filter_ejec.sh Zigzag c $CANT_ITER $IMG1
mv log.Zigzag.c.out resultados/exp1_Zigzag/log.Zigzag.c.O3.out
mv log.Zigzag.c.csv resultados/exp1_Zigzag/log.Zigzag.c.O3.csv
cp data/Makefile_O0 data/Makefile
mv data/Makefile ../filters/Makefile
rm 1.bmp.Zigzag.*.bmp
cd ..
#Dir actual = "src"
echo "********************** MAKE CLEAN *************************"
make clean
echo "********************** MAKE -O0 ***************************"
make
echo "********************** end MAKE -O0 ***********************"
| true |
3d35aeff1189803c680529a7f3cbe9b7b9a18492
|
Shell
|
optionalg/bash-18
|
/system-dump
|
UTF-8
| 2,160 | 3.28125 | 3 |
[] |
no_license
|
#!/bin/bash
# date : 2011.11.01
# goal : this system will move to other machine
# usage: just run it.
HOME_DIR=/home/ghh
DUMP_DIR=/home/ghh/dump
SVN_REPOS=/home/ghh/svn-repos
SVN_DUMP_FILE=$DUMP_DIR/svn-dump
TRAC_ENV=/var/www/trac
TRAC_DUMP_ENV=$DUMP_DIR/trac-dump
PSQL_DUMP_DIR=$DUMP_DIR/psql_dump
DB_LIST=("db_test" "db_asac" "db_blog")
CONFIG_FILES_DIR=$DUMP_DIR/config-files
FILES_AND_DIRS_DUMP=$DUMP_DIR/files_dirs
FILE_DIRECTORY_LIST=("/var/www" "$HOME_DIR/workspace" "$HOME_DIR/useful-tip" "$HOME_DIR/software" "$HOME_DIR/temp" "$HOME_DIR/pictures" "$HOME_DIR/download" "$HOME_DIR/documents" "$HOME_DIR/music")
# for svn
function svn_dump(){
cd $SVN_REPOS
for file in `ls `
do
if [ -d $file ];then
echo entring directory: $file
echo dump to the file : $SVN_DUMP_FILE/$file.`date +%F`.dump
svnadmin dump $file > $SVN_DUMP_FILE/$file.`date +%F`.dump
fi
done
}
# for trac
function trac_dump(){
echo "trac-admin $TRAC_ENV hotcopy $TRAC_DUMP_ENV"
trac-admin $TRAC_ENV hotcopy $TRAC_DUMP_ENV
}
# for postgresql
# see more : http://www.pgsqldb.org/pgsqldoc-cvs/backup.html
function sql_dump(){
echo "dump all sql to $PSQL_DUMP_DIR. use psql -f 'infile' postgres to recovery."
for dbname in ${DB_LIST[*]}
do
pg_dump $dbname > $PSQL_DUMP_DIR/$dbname.`date +%F`.dump
done
}
# for the configure files.
function config_dump(){
cp /etc/apt/sources.list $CONFIG_FILES_DIR/sources.list
cp ~/.bash_aliases $CONFIG_FILES_DIR/bash_aliases
cp ~/.vimrc $CONFIG_FILES_DIR/vimrc
}
# files & directories need to copy
function files_dirs_dump(){
for dir in ${FILE_DIRECTORY_LIST[*]}
do
echo $dir
# ls $dir/*
echo ${dir##*/}
tar -cvf $FILES_AND_DIRS_DUMP/${dir##*/}.tar.gz $dir
done
}
##############################
#### dump start form here ###
##############################
rm -rf $DUMP_DIR
mkdir $DUMP_DIR
mkdir $SVN_DUMP_FILE
mkdir $PSQL_DUMP_DIR
mkdir $CONFIG_FILES_DIR
mkdir $FILES_AND_DIRS_DUMP
#ok now .
svn_dump
trac_dump
sql_dump
config_dump
files_dirs_dump
cd $HOME_DIR
tar -cvf system.dump.`date +%F`.tar.gz $DUMP_DIR
##############################
####### dump done ! ##########
##############################
| true |
c248ecd75dfd2da5f3be7f0c3fbcef9a834006f9
|
Shell
|
Villadelfia/zshsettings
|
/.zsh/plugins/autoupdate-impl.sh
|
UTF-8
| 722 | 3.0625 | 3 |
[] |
no_license
|
#!/bin/zsh
# Checks for updates on my rc files
# vim:syntax=zsh
trap '' 2
# Vim
cd ~/vimrc
if git fetch --dry-run 2>&1 | grep -q '.'; then
echo
echo "Updating vimrc..."
git pull
case $(uname -s) in
Cygwin*|CYGWIN*)
yes | $(pwd)/mkwincopy.sh
;;
*)
;;
esac
echo
fi
# Zsh
cd ~/zshrc
if git fetch --dry-run 2>&1 | grep -q '.'; then
echo
echo "Updating zshrc..."
git pull
echo
echo "################################################################"
echo " Zshrc was updated! You may need to restart your shell!"
echo "################################################################"
echo
fi
# End
cd
trap 2
| true |
2e02a50c7fbe91573fd4e5174c3226af9b2c02ad
|
Shell
|
pvmsikrsna/wallaroo
|
/.release/documentation-release.sh
|
UTF-8
| 2,463 | 4.15625 | 4 |
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#! /bin/bash
set -o errexit
set -o nounset
verify_args() {
## Verifies that the documentation release is being run for the provided args for
## version and commit
echo "Creating documentation for version $for_version with commit $commit"
while true; do
read -rp "Is this correct (y/n)?" yn
case $yn in
[Yy]*) break;;
[Nn]*) exit;;
*) echo "Please answer y or n.";;
esac
done
}
verify_branch() {
# determine remote branch to use
echo "Verifying that script is being run on a branch with a remote repo..."
BRANCH=$(git rev-parse --abbrev-ref HEAD)
if [[ "$BRANCH" == "master" ]]
then
remote_branch=master
DOCKER_VERSION=$(< VERSION)
DOCKER_URL="release\/wallaroo:$DOCKER_VERSION"
elif [[ "$BRANCH" == "release" ]]
then
remote_branch=release
DOCKER_VERSION=$(< VERSION)
DOCKER_URL="release\/wallaroo:$DOCKER_VERSION"
elif [[ "$BRANCH" == *"release-"* ]]
then
remote_branch=rc
DOCKER_VERSION=$(< VERSION)-$(git log -n 1 --oneline | cut -d' ' -f1)
DOCKER_URL="dev\/wallaroo:$DOCKER_VERSION"
else
echo "No remote repo to push book to. Exiting"
exit 0
fi
}
verify_commit_on_branch() {
echo "Verfying commit $commit is on branch: $BRANCH..."
if ! git branch --contains "$commit" | grep "$BRANCH"
then
echo "Commit $commit is not on branch: $BRANCH"
exit 1
fi
}
checkout_to_commit() {
git checkout "$commit"
}
update_version() {
echo "Updating version for docker image in config.toml..."
# Update docker version in config.toml
sed -i "s/^docker_version_url=.*/docker_version_url=\"${DOCKER_URL}\"/" documentation/config.toml
}
build_book() {
echo "Building book..."
pushd documentation
hugo
popd
}
upload_book() {
echo "Uploading book..."
# git magic. without all this, our ghp-import command won't work
pushd documentation
git remote add doc-site "git@github.com:wallaroolabs/docs.wallaroolabs.com.git"
git fetch doc-site
git reset doc-site/$remote_branch
ghp-import -p -r doc-site -b $remote_branch -f public
popd
}
git_clean() {
echo "Cleaning repo..."
git clean -fd
git remote rm doc-site
echo "Checking out to $BRANCH"
git reset --hard "origin/$BRANCH"
git checkout "$BRANCH"
}
if [ $# -lt 2 ]; then
echo "version and commit arguments required"
fi
set -eu
for_version=$1
commit=$2
verify_args
verify_branch
verify_commit_on_branch
checkout_to_commit
update_version
build_book
upload_book
git_clean
| true |
16d29c3e6a868f57fc8dc7f15fb1e7739be369d1
|
Shell
|
giorgos1987/bash_script
|
/2_cp_for_land.sh
|
UTF-8
| 794 | 2.84375 | 3 |
[] |
no_license
|
#!/bin/bash
#Author : Papoutsis Georgios Agroknow
while read line
do
name=$line
echo "$( cut -d ':' -f 1 <<< $line )" >> ~/landportal/agris_land_filenames.txt
# echo "Text read from file - $name"
###echo "$( cut -d ':' -f 1 <<< '$line' )" >> ~/agris_land_2015_filenames.txt
done < ~/landportal/agris_all_landresources.txt
#~/landportal/agris_2014_landresources.txt
# agris_2015_landresources.txt
#~/landportal/agris_all_landresources.txt #$1 read filenames and correct
while read F ; do
cp $F /home/giopap/landportal/agris_land/ #/home/giopap/landportal/
done < ~/landportal/agris_land_filenames.txt
#~/landportal/agris_land_2014_filenames.txt ###the cleaned filenames xxxxx/xxxx/xx.xml
#agris_all_landresources.txt
exit;
| true |
59ad0236a94bb73ba1e2e29a8c429dc012611b4f
|
Shell
|
mk-pmb/homerouter-util-pmb
|
/speedport/w724v/download_syslog.sh
|
UTF-8
| 1,683 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/bash
# -*- coding: utf-8, tab-width: 2 -*-
function download_syslog () {
export LANG{,UAGE}=en_US.UTF-8 # make error messages search engine-friendly
local SELFPATH="$(readlink -m "$BASH_SOURCE"/..)"
source "$SELFPATH"/lib_sp_w724v.sh --lib || return $?
local LOG_BFN="${SP_LOGSDIR:-.}/$(date +%Y-%m/%y%m%d.%H%M%S).$$."
mkdir --parents -- "${LOG_BFN%/*}" || return $?
sp_w724v__login || return $?
local RFR='html/content/config/system_info.html'
local URL="$RFR"
local REPLY="$(sp_w724v__jsonapi "$RFR" "$URL")"
local RGX='\s*var\s+_httoken\s*=\s*(\d+)\s*;\s*'
local TOKEN="$(<<<"$REPLY" LANG=C grep -xPe "$RGX" -m 1 \
| grep -oPe '\d+' -m 1)"
[ -n "$TOKEN" ] || return 3$(echo "E: unable to get an _httoken" >&2)
URL="data/SystemMessages.json?_tn=$TOKEN"
REPLY="$(sp_w724v__jsonapi "$RFR" "$URL")" || return $?
local SED_MSGS='
/^addmessage\ttemplate\t\[/{
: read_more
N
/\]$/!b read_more
s~^[a-z\t]+\[~~
s~\s*\]$~~
s~\n\s*id\s+value\s+[0-9]+\n\s+timestamp\s+value\s+($\
|[0-9]{2})\.([0-9]{2})\.([0-9]{4}) ~\3-\2-\1,~
s~\s*\n\s+message\s+value\s+~ ~
s~\<~‹~g
s~\>~›~g
s~\(~\(~g
s~\)~\)~g
s~\.~\.~g
s~\n~¶~g
}'
REPLY="$(<<<"$REPLY" sp_w724v__decode_varid_json | LANG=C sed -re "
$SED_MSGS")"
local META="${REPLY%%$'\n'20*}"
<<<"$META" sort -V >"$LOG_BFN"meta.txt || return $?
REPLY="${REPLY:${#META}}"
REPLY="${REPLY#$'\n'}"
<<<"$REPLY" tac >"$LOG_BFN"log.txt || return $?
echo "D: saved as ${LOG_BFN}{meta,log}.txt"
return 0
}
[ "$1" == --lib ] && return 0; download_syslog "$@"; exit $?
| true |
45d3e9c3f75e1c8e68e9d20b3ab1fbbad8d03c6f
|
Shell
|
sarvex/Impala
|
/thirdparty/openldap-2.4.25/tests/scripts/test044-dynlist
|
UTF-8
| 15,936 | 3.109375 | 3 |
[
"Apache-2.0",
"OLDAP-2.8",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-4.3RENO"
] |
permissive
|
#! /bin/sh
## This work is part of OpenLDAP Software <http://www.openldap.org/>.
##
## Copyright 1998-2011 The OpenLDAP Foundation.
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted only as authorized by the OpenLDAP
## Public License.
##
## A copy of this license is available in the file LICENSE in the
## top-level directory of the distribution or, alternatively, at
## <http://www.OpenLDAP.org/license.html>.
echo "running defines.sh"
. $SRCDIR/scripts/defines.sh
if test $DYNLIST = "dynlistno" ; then
echo "dynlist overlay not available, test skipped"
exit 0
fi
if test $BACKEND = ldif ; then
# dynlist+ldif fails because back-ldif lacks bi_op_compare()
echo "$BACKEND backend unsuitable for dynlist overlay, test skipped"
exit 0
fi
mkdir -p $TESTDIR $DBDIR1
$SLAPPASSWD -g -n >$CONFIGPWF
echo "rootpw `$SLAPPASSWD -T $CONFIGPWF`" >$TESTDIR/configpw.conf
echo "Running slapadd to build slapd database..."
. $CONFFILTER $BACKEND $MONITORDB < $MCONF > $ADDCONF
$SLAPADD -f $ADDCONF -l $LDIFORDERED
RC=$?
if test $RC != 0 ; then
echo "slapadd failed ($RC)!"
exit $RC
fi
. $CONFFILTER $BACKEND $MONITORDB < $DYNLISTCONF > $CONF1
echo "Starting slapd on TCP/IP port $PORT1..."
$SLAPD -f $CONF1 -h $URI1 -d $LVL $TIMING > $LOG1 2>&1 &
PID=$!
if test $WAIT != 0 ; then
echo PID $PID
read foo
fi
KILLPIDS="$PID"
sleep 1
echo "Testing slapd searching..."
for i in 0 1 2 3 4 5; do
$LDAPSEARCH -s base -b "$MONITOR" -h $LOCALHOST -p $PORT1 \
'(objectclass=*)' > /dev/null 2>&1
RC=$?
if test $RC = 0 ; then
break
fi
echo "Waiting 5 seconds for slapd to start..."
sleep 5
done
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
cat /dev/null > $SEARCHOUT
LISTDN="ou=Dynamic Lists,$BASEDN"
echo "Adding a dynamic list..."
$LDAPADD -v -D "$MANAGERDN" -h $LOCALHOST -p $PORT1 -w $PASSWD \
> $TESTOUT 2>&1 << EOMODS
dn: $LISTDN
objectClass: organizationalUnit
ou: Dynamic Lists
dn: cn=Dynamic List,$LISTDN
objectClass: groupOfURLs
cn: Dynamic List
memberURL: ldap:///ou=People,${BASEDN}?cn,mail?sub?(objectClass=person)
EOMODS
echo "Testing list search of all attrs..."
echo "# Testing list search of all attrs..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List)' '*' \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list search of a listed attr..."
echo "# Testing list search of a listed attr..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List)' mail \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list search of a non-listed attr..."
echo "# Testing list search of a non-listed attr..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List)' objectClass \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list search with (critical) manageDSAit..."
echo "# Testing list search with (critical) manageDSAit..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 -MM \
'(cn=Dynamic List)' '*' \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list compare..."
echo "# Testing list compare..." >> $SEARCHOUT
$LDAPCOMPARE -h $LOCALHOST -p $PORT1 \
"cn=Dynamic List,$LISTDN" "cn:Bjorn Jensen" \
>> $SEARCHOUT 2>&1
RC=$?
case $RC in
5)
echo "ldapcompare returned FALSE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
6)
echo "ldapcompare returned TRUE ($RC)"
;;
0)
echo "ldapcompare returned success ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit -1
;;
*)
echo "ldapcompare failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
esac
echo "" >> $SEARCHOUT
echo "Testing list compare (should return FALSE)..."
echo "# Testing list compare (should return FALSE)..." >> $SEARCHOUT
$LDAPCOMPARE -h $LOCALHOST -p $PORT1 \
"cn=Dynamic List,$LISTDN" "cn:FALSE" \
>> $SEARCHOUT 2>&1
RC=$?
case $RC in
5)
echo "ldapcompare returned FALSE ($RC)"
;;
6)
echo "ldapcompare returned TRUE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
0)
echo "ldapcompare returned success ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit -1
;;
*)
echo "ldapcompare failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
esac
echo "" >> $SEARCHOUT
echo "Testing list compare (should return UNDEFINED)..."
echo "# Testing list compare (should return UNDEFINED)..." >> $SEARCHOUT
$LDAPCOMPARE -h $LOCALHOST -p $PORT1 \
"cn=Dynamic List,$LISTDN" "dc:UNDEFINED" \
>> $SEARCHOUT 2>&1
RC=$?
case $RC in
5)
echo "ldapcompare returned FALSE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
6)
echo "ldapcompare returned TRUE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
16|32)
echo "ldapcompare returned UNDEFINED ($RC)"
;;
0)
echo "ldapcompare returned success ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit -1
;;
*)
echo "ldapcompare failed ($RC)"
;;
esac
echo "" >> $SEARCHOUT
echo "Testing list compare with manageDSAit..."
echo "# Testing list compare with manageDSAit..." >> $SEARCHOUT
$LDAPCOMPARE -h $LOCALHOST -p $PORT1 -MM \
"cn=Dynamic List,$LISTDN" "cn:Bjorn Jensen" \
>> $SEARCHOUT 2>&1
RC=$?
case $RC in
5)
echo "ldapcompare returned FALSE ($RC)"
;;
6)
echo "ldapcompare returned TRUE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
0)
echo "ldapcompare returned success ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit -1
;;
*)
echo "ldapcompare failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
esac
echo "" >> $SEARCHOUT
echo "Reconfiguring slapd..."
$LDAPMODIFY -x -D cn=config -h $LOCALHOST -p $PORT1 -y $CONFIGPWF > \
$TESTOUT 2>&1 << EOMODS
version: 1
dn: olcOverlay={0}dynlist,olcDatabase={2}$BACKEND,cn=config
changetype: modify
delete: olcDLattrSet
olcDLattrSet: {0}
-
add: olcDLattrSet
olcDLattrSet: groupOfURLs memberURL sn:cn mail
-
EOMODS
echo "==========================================================" >> $LOG1
echo "Testing attribute mapping"
echo "Testing list search of all (mapped) attrs..."
echo "# Testing list search of all (mapped) attrs..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List)' '*' \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list search of a (mapped) listed attr..."
echo "# Testing list search of a (mapped) listed attr..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List)' sn \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list search of a (n unmapped) listed attr..."
echo "# Testing list search of a (n unmapped) listed attr..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List)' mail \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list compare (mapped attrs) ..."
echo "# Testing list compare (mapped attrs) ..." >> $SEARCHOUT
$LDAPCOMPARE -h $LOCALHOST -p $PORT1 \
"cn=Dynamic List,$LISTDN" "sn:Bjorn Jensen" \
>> $SEARCHOUT 2>&1
RC=$?
case $RC in
5)
echo "ldapcompare returned FALSE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
6)
echo "ldapcompare returned TRUE ($RC)"
;;
0)
echo "ldapcompare returned success ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit -1
;;
*)
echo "ldapcompare failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
esac
echo "" >> $SEARCHOUT
echo "Testing list compare (mapped attrs; should return FALSE)..."
echo "# Testing list compare (mapped attrs; should return FALSE)..." >> $SEARCHOUT
$LDAPCOMPARE -h $LOCALHOST -p $PORT1 \
"cn=Dynamic List,$LISTDN" "sn:FALSE" \
>> $SEARCHOUT 2>&1
RC=$?
case $RC in
5)
echo "ldapcompare returned FALSE ($RC)"
;;
6)
echo "ldapcompare returned TRUE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
0)
echo "ldapcompare returned success ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit -1
;;
*)
echo "ldapcompare failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
esac
echo "" >> $SEARCHOUT
echo "Reconfiguring slapd..."
$LDAPMODIFY -x -D cn=config -h $LOCALHOST -p $PORT1 -y $CONFIGPWF > \
$TESTOUT 2>&1 << EOMODS
version: 1
dn: olcOverlay={0}dynlist,olcDatabase={2}$BACKEND,cn=config
changetype: modify
delete: olcDLattrSet
olcDLattrSet: {0}
-
add: olcDLattrSet
olcDLattrSet: groupOfURLs memberURL member
-
EOMODS
echo "==========================================================" >> $LOG1
echo "Adding a dynamic list..."
$LDAPADD -v -D "$MANAGERDN" -h $LOCALHOST -p $PORT1 -w $PASSWD \
> $TESTOUT 2>&1 << EOMODS
dn: cn=Dynamic List of Members,$LISTDN
objectClass: groupOfURLs
cn: Dynamic List of Members
memberURL: ldap:///ou=People,${BASEDN}??sub?(objectClass=person)
EOMODS
echo "Testing list search of all attrs..."
echo "# Testing list search of all attrs..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List of Members)' '*' \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list search of a listed attr..."
echo "# Testing list search of a listed attr..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List of Members)' member \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list search of a non-listed attr..."
echo "# Testing list search of a non-listed attr..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List of Members)' objectClass \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list search with (critical) manageDSAit..."
echo "# Testing list search with (critical) manageDSAit..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 -MM \
'(&(cn=Dynamic List of Members)(objectClass=groupOfURLs))' '*' \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
CMPDN="$BJORNSDN"
echo "Testing list compare..."
echo "# Testing list compare..." >> $SEARCHOUT
$LDAPCOMPARE -h $LOCALHOST -p $PORT1 \
"cn=Dynamic List of Members,$LISTDN" "member:$CMPDN" \
>> $SEARCHOUT 2>&1
RC=$?
case $RC in
5)
echo "ldapcompare returned FALSE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
6)
echo "ldapcompare returned TRUE ($RC)"
;;
0)
echo "ldapcompare returned success ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit -1
;;
*)
echo "ldapcompare failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
esac
echo "" >> $SEARCHOUT
echo "Testing list compare (should return FALSE)..."
echo "# Testing list compare (should return FALSE)..." >> $SEARCHOUT
$LDAPCOMPARE -h $LOCALHOST -p $PORT1 \
"cn=Dynamic List of Members,$LISTDN" "member:cn=Foo Bar" \
>> $SEARCHOUT 2>&1
RC=$?
case $RC in
5)
echo "ldapcompare returned FALSE ($RC)"
;;
6)
echo "ldapcompare returned TRUE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
0)
echo "ldapcompare returned success ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit -1
;;
*)
echo "ldapcompare failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
esac
echo "" >> $SEARCHOUT
echo "Testing list compare with manageDSAit..."
echo "# Testing list compare with manageDSAit..." >> $SEARCHOUT
$LDAPCOMPARE -h $LOCALHOST -p $PORT1 -MM \
"cn=Dynamic List,$LISTDN" "member:$CMPDN" \
>> $SEARCHOUT 2>&1
RC=$?
case $RC in
5)
echo "ldapcompare returned FALSE ($RC)"
;;
6)
echo "ldapcompare returned TRUE ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
0)
echo "ldapcompare returned success ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit -1
;;
*)
echo "ldapcompare failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
;;
esac
echo "" >> $SEARCHOUT
echo "==========================================================" >> $LOG1
echo "Testing dgIdentity..."
# Set ACL, require authentication to get list contents
$LDAPMODIFY -x -D cn=config -h $LOCALHOST -p $PORT1 -y $CONFIGPWF > \
$TESTOUT 2>&1 << EOMODS
version: 1
dn: olcDatabase={2}$BACKEND,cn=config
changetype: modify
add: olcAccess
olcAccess: to dn.base="cn=Dynamic List of Members,$LISTDN" by * read
olcAccess: to * by users read by * search
EOMODS
echo "Testing list search without dgIdentity..."
echo "# Testing list search without dgIdentity..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List of Members)' '*' \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
$LDAPMODIFY -v -D "$MANAGERDN" -h $LOCALHOST -p $PORT1 -w $PASSWD \
> $TESTOUT 2>&1 << EOMODS
dn: cn=Dynamic List of Members,$LISTDN
changetype: modify
add: objectClass
objectClass: dgIdentityAux
-
add: dgIdentity
dgIdentity: $CMPDN
EOMODS
echo "Testing list search with dgIdentity..."
echo "# Testing list search with dgIdentity..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List of Members)' '*' \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing dgAuthz..."
CMPDN="cn=Bjorn Jensen,ou=Information Technology Division,ou=People,$BASEDN"
$LDAPMODIFY -v -D "$MANAGERDN" -h $LOCALHOST -p $PORT1 -w $PASSWD \
> $TESTOUT 2>&1 << EOMODS
dn: cn=Dynamic List of Members,$LISTDN
changetype: modify
add: dgAuthz
dgAuthz: dn:$BABSDN
EOMODS
echo "Testing list search with dgIdentity and dgAuthz anonymously..."
echo "# Testing list search with dgIdentity and dgAuthz anonymously..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
'(cn=Dynamic List of Members)' '*' \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
echo "Testing list search with dgIdentity and dgAuthz as the authorized identity..."
echo "# Testing list search with dgIdentity and dgAuthz as the authorized identity..." >> $SEARCHOUT
$LDAPSEARCH -S "" -b "$LISTDN" -h $LOCALHOST -p $PORT1 \
-D "$BABSDN" -w bjensen \
'(cn=Dynamic List of Members)' '*' \
>> $SEARCHOUT 2>&1
RC=$?
if test $RC != 0 ; then
echo "ldapsearch failed ($RC)!"
test $KILLSERVERS != no && kill -HUP $KILLPIDS
exit $RC
fi
test $KILLSERVERS != no && kill -HUP $KILLPIDS
LDIF=$DYNLISTOUT
echo "Filtering ldapsearch results..."
$LDIFFILTER < $SEARCHOUT > $SEARCHFLT
echo "Filtering original ldif used to create database..."
$LDIFFILTER < $LDIF > $LDIFFLT
echo "Comparing filter output..."
$CMP $SEARCHFLT $LDIFFLT > $CMPOUT
if test $? != 0 ; then
echo "Comparison failed"
exit 1
fi
echo ">>>>> Test succeeded"
test $KILLSERVERS != no && wait
exit 0
| true |
454792f1b89bcd359afca085f39ec12c1f1e69f8
|
Shell
|
alexdlaird/old-college-projects
|
/Fall Semester 2010/Parallel Computing/Sieve to Find Primes (Shared Memory)/test.sh
|
UTF-8
| 474 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/bash
# Ensure we're testing the latest build
make 1>&2
if [ $? -ne 0 ]
then
echo "Build failed, fix that first" 1>&2
exit
fi
# Varying problem sizes
for prime in 100 10000 1000000 100000000 250000000 500000000 1000000000
do
# Headers for columns
echo "Max Prime, Threads, Time, Prime Count"
# 1-32 threads
for threadCount in {1..32}
do
echo Max prime: $prime, Threads: $threadCount 1>&2
./threaded -t $threadCount $prime
done
# newline
echo
done
| true |
9c2c8c6957eed2f1fa9f038560deab238e291b60
|
Shell
|
shauntheawesome1/DVC-COMSC-171-Introduction-to-Linux-and-UNIX
|
/BashTest_Shift.bash
|
UTF-8
| 2,127 | 3.53125 | 4 |
[] |
no_license
|
return codes
ls
echo $? # return code from previous command, zero if OK, non-zero if error
ls /foo # error
echo $?
true
echo $?
false
echo $?
string conditional expressions
x=qwerty y=asdfgh # set some variables for testing
[[ $x == q*y ]] # string matches wildcard (!= does not match)
echo $?
[[ $x =~ q[ertw]{4}y ]] # string matches extended regular expression
echo $?
[[ $x > $y ]] # first string sorts after second (< sorts before)
echo $?
[[ -r /etc/passwd ]] # file is readable (-w writable, -x executable)
echo $?
[[ -d /etc/passwd ]] # file is directory (-f data file, -l symbolic link)
echo $?
[[ script1 -nt /etc/passwd ]] # first file is newer than second (-ot older)
echo $?
[[ a == a && b == b ]] # logical AND (|| logical OR)
echo $?
[[ ! a == a ]] # logical NOT
echo $?
arithmetic conditional expressions
a=3 b=8 # set some variables for testing
(( a < b )) # less than ( > greater than)
echo $?
(( a == b )) # equal ( != not equal)
echo $?
(( a <= b )) # less than or equal ( >= greater than or equal)
echo $?
(( a )) # non-zero
echo $?
(( a - a )) # zero
echo $?
scripts
You can simplify the edit/run cycle by using an editor to edit your script. To test the code, save your work and then suspend the editor with ^Z. Run the script at the command prompt. Bring back the editor with fg.
Another way is to open two terminal sessions. Run an editor in one and run your script in the other.
cat > script1
#!/bin/bash
echo $0 # script name
echo $1 $2 $3 # $1 is 1st argument, $2 is 2nd argument, etc.
echo $* # all arguments ($@ does the same)
echo $# # number of arguments
^D # end of input
bash script1 first second third # run with 3 arguments
cat > script2
#!/bin/bash
a=($*) # assign arguments to an array
echo ${a[9]}
echo ${10} # you can reference more than 9 arguments with ${}
^D
bash script2 0 1 2 3 4 5 6 7 8 9 # run with 10 arguments
cat > script3
#!/bin/bash
echo $1
shift # moves 2nd arg to 1st position, 3rd arg to 2nd position, etc
echo $1
shift
echo $1
^D
bash script3 first second third # run with 3 arguments
rm script[1-3] # clean up
| true |
1c8cd7e3d697232bebc64a12110d88464aa7e3db
|
Shell
|
jaxxzer/DCA-dev
|
/scripts/remote_flash.sh
|
UTF-8
| 901 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/bash
# flash firmware to pixhawk connected to remote host
# Jacob Walser September 2016
DATE=$(date +%y-%m-%d.%H:%M:%S)
BRANCH=$(git rev-parse --abbrev-ref HEAD)
COMMIT=$(git rev-parse --short HEAD)
# Name the firmware with descriptive information
FILENAME=$DATE.$BRANCH.$COMMIT.px4
echo $FILENAME
# copy the firmware to remote host
scp ~/git/ArduSub/ArduSub/ArduSub-v2.px4 pi@20.0.0.20:~/$FILENAME
# quit mavproxy on remote host
# move old firmware
# move new firmware
# flash new firmware
# wait 5 seconds for pixhawk to reboot
# restart mavproxy
ssh pi@20.0.0.20 \
"sudo screen -X -S mavproxy quit; \
mv ~/Pixhawk_FW/current/*.px4 ~/Pixhawk_FW/previous/; \
mv $FILENAME ~/Pixhawk_FW/current/;
python px_uploader.py --port /dev/ttyACM0 ~/Pixhawk_FW/current/$FILENAME; \
sleep 5; \
sudo screen -dm -S mavproxy /home/pi/git/DCA-dev/companion/RPI2/Raspbian/start_mavproxy.sh; \
echo Done."
| true |
83200458aa42de2289f386627bd17d5efa1f206b
|
Shell
|
juanfmx2/heroku-buildpack-gettext-customizable
|
/bin/compile
|
UTF-8
| 1,843 | 4.0625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
# bin/compile <build-dir> <cache-dir>
# Resolves in which directory is this file located so it does not matter from which path it is being called
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
export DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
echo "SCRIPT LOCATION: "$DIR
set -e
BP_DIR=$DIR/..
GETTEXT_TARBALL=http://ftp.gnu.org/pub/gnu/gettext/gettext-latest.tar.gz
BUILD_DIR=$1
CACHE_DIR=$2
ENV_DIR=$3
TARGET_DIR=$CACHE_DIR
echo 'TARGET_DIR: '$TARGET_DIR
cd $TARGET_DIR
write_export() {
echo 'export PATH="'${TARGET_DIR}'/gettext/bin:$PATH"' > $BP_DIR/export
echo 'BP_DIR/export: '$BP_DIR'/export:'
cat $BP_DIR/export
cp -r ${TARGET_DIR}'/gettext' $HOME
}
export_gettext_to_path(){
echo 'ls -la '$TARGET_DIR'/gettext/bin:'
ls -la $TARGET_DIR/gettext/bin
write_export
}
if [ -d $TARGET_DIR/gettext/bin ]; then
echo "$TARGET_DIR/gettext/bin already installed"
export_gettext_to_path
exit 0
fi
echo "-----> Installing gettext..."
curl --silent --max-time 60 --location "$GETTEXT_TARBALL" | tar xz
cd $(find . -name 'gettext*' -maxdepth 1)
sed -i '/^TESTS =/d' gettext-runtime/tests/Makefile.in &&
sed -i 's/test-lock..EXEEXT.//' gettext-tools/gnulib-tests/Makefile.in
mkdir $TARGET_DIR/gettext
mkdir $TARGET_DIR/gettext-docs
./configure --prefix=$TARGET_DIR/gettext --disable-static --docdir=$TARGET_DIR/gettext-docs &&
make &&
make check &&
make install ||
(echo "-----> BUILD FAILED" && exit 1)
echo "-----> done in $TARGET_DIR/gettext"
export_gettext_to_path
| true |
917c44c8ee0a00f9468827cbd9751b2cd4862ef7
|
Shell
|
wonesy/wanki
|
/build.bash
|
UTF-8
| 177 | 2.8125 | 3 |
[] |
no_license
|
#!/bin/bash
pushd $(git rev-parse --show-toplevel)
docker build -t wonesy/wanki:latest . || exit 1
if [[ $1 == "publish" ]]; then
docker push wonesy/wanki:latest
fi
popd
| true |
8164306dc13c057c2bfcddae1773f59dad93cb25
|
Shell
|
joshisanonymous/class_assignments
|
/socio_seminar_project/extract_lines.sh
|
UTF-8
| 721 | 3.703125 | 4 |
[] |
no_license
|
#!/bin/bash
# This script will extract lines that contain 3rd person plural subject
# pronouns from plain text transcriptions of interviews and saves them
# a new text file with the original line numbers.
#
# Usage: The script takes one or more arguments, which is the list of text
# files.
#
# -Joshua McNeill (joshua dot mcneill at uga dot edu)
for file in $@
do
# Captures the speaker ID from the filename
filename=`echo "$file" |
awk -F "." '{ print $1 }' | # removes the extension
awk -F "/" '{ print $NF }'` # removes the path
# Captures the actual lines
cat $file |
egrep -vn "^J:" | # remove lines spoken by the interviewer
egrep "(ça|c'|ils|elles|eux)" > ${filename}_lines.txt
done
| true |
fb904b1c3245495b391ab9d64fef02314744212b
|
Shell
|
marcmascort/bash-helpers
|
/src/display
|
UTF-8
| 823 | 3 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
ANSI_NO_COLOR='\033[0m'
ANSI_COLOR_RED='\033[0;31m'
ANSI_COLOR_GREEN='\033[0;32m'
ANSI_COLOR_ORANGE='\033[0;33m'
ANSI_COLOR_YELLOW='\033[1;33m'
ANSI_COLOR_BLUE='\033[0;34m'
ANSI_COLOR_LIGHT_BLUE='\033[1;34m'
success() {
printf "${ANSI_COLOR_GREEN}$*${ANSI_NO_COLOR}"
}
warning() {
printf "${ANSI_COLOR_ORANGE}$*${ANSI_NO_COLOR}"
}
error() {
printf "${ANSI_COLOR_RED}$*${ANSI_NO_COLOR}"
}
highlight() {
printf "${ANSI_COLOR_LIGHT_BLUE}$*${ANSI_NO_COLOR}"
}
text() {
printf "${ANSI_NO_COLOR}$*"
}
title() {
echo ""
echo ""
echo -e "${ANSI_COLOR_YELLOW}$*${ANSI_NO_COLOR}"
echo -e "${ANSI_COLOR_YELLOW}----------------------------------------------------------------${ANSI_NO_COLOR}"
echo ""
}
yell() {
echo -e "$0: ${ANSI_COLOR_RED}$*${ANSI_NO_COLOR}" >&2;
}
| true |
ee00754938271651458ba363444d3219203698f6
|
Shell
|
j3ffml/dotfiles
|
/bashrc
|
UTF-8
| 883 | 3.28125 | 3 |
[] |
no_license
|
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# don't overwrite GNU Midnight Commander's setting of `ignorespace'.
HISTCONTROL=$HISTCONTROL${HISTCONTROL+:}ignoredups
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
#shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
#shopt -s checkwinsize
# 256 colors
export XTERM=xterm-256color
# set a fancy prompt
# PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
# enable color support of ls and also add handy aliases
alias ls='ls --color'
alias ll='ls -la'
alias la='ls -A'
alias df='df -h'
alias du='du -h'
| true |
b024bc416a48db433ae9aa62ec39402bf1838b99
|
Shell
|
sandeep937/Devel
|
/bin/android/sdcard1_umount.sh
|
UTF-8
| 923 | 3.96875 | 4 |
[] |
no_license
|
#!/system/xbin/env bash
CARD_AUTO_MOUNTED="/storage/sdcard1 /mnt/fuse/sdcard1"
mkdir_maybe ( ) {
[ -d "$1" ] || mkdir -p "$1"
}
is_mounted ( ) {
mount | grep -q " $1 "
}
say_and_do ( ) {
echo "$*"
"$@"
}
echo "umount the automatic mount of the card"
for auto_mounted in ${CARD_AUTO_MOUNTED}
do
ok=1
if ! is_mounted "${auto_mounted}"
then
echo "${auto_mounted} already unmounted"
continue
fi
for i in {1..5}
do
echo "Attempt $i to umount ${auto_mounted}"
say_and_do umount "${auto_mounted}"
ok="$?"
if [ "$ok" == "0" ]
then
echo "Umounted ${auto_mounted}"
break
else
echo "Could not umount ${auto_mounted}, waiting a bit"
sleep 5
fi
done
if [ "$ok" != "0" ]
then
echo "Could not umount ${auto_mounted}, exiting"
exit 1
fi
done
| true |
44df00c221c1395e43017676a396d0fab8101e5d
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/pachist-git/PKGBUILD
|
UTF-8
| 971 | 2.734375 | 3 |
[] |
no_license
|
# Maintainer: Daniel Sandman <revoltism@gmail.com>
# Co-Maintainer: Maxim Andersson <thesilentboatman@gmail.com>
pkgname=pachist-git
_gitname=pachist
pkgver=r51.3f5521b
pkgrel=1
pkgdesc="A small bash program to show the installed package history in Arch Linux."
arch=('any')
url="https://github.com/shellkr/pachist"
license=('GPL3')
makedepends=('git')
provides=('pachist')
conflicts=('pachist')
source=('git://github.com/shellkr/pachist.git#branch=master')
sha256sums=('SKIP')
pkgver() {
cd "${srcdir}/${_gitname}"
echo "r$(git rev-list --count HEAD).$(git rev-parse --short HEAD)"
}
package() {
cd "${srcdir}/${_gitname}"
install -D -m755 pachist -t "${pkgdir}/usr/bin"
install -D -m644 zsh_pachist "${pkgdir}/usr/share/zsh/site-functions/_pachist"
install -D -m644 bash_pachist "${pkgdir}/usr/share/bash-completion/completions/pachist"
install -D -m644 pachist.8 -t "${pkgdir}/usr/share/man/man8"
install -D -m644 README.md "${pkgdir}/usr/share/doc/${_gitname}/README"
}
# vim:set ts=2 sw=2 et:
| true |
cb9599bd058d049a1002bf0a4aa8280055e46a16
|
Shell
|
grpc/grpc
|
/tools/internal_ci/linux/grpc_xds_k8s_xlang.sh
|
UTF-8
| 4,093 | 3.53125 | 4 |
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2021 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eo pipefail
# Constants
readonly GITHUB_REPOSITORY_NAME="grpc"
readonly TEST_DRIVER_INSTALL_SCRIPT_URL="https://raw.githubusercontent.com/${TEST_DRIVER_REPO_OWNER:-grpc}/grpc/${TEST_DRIVER_BRANCH:-master}/tools/internal_ci/linux/grpc_xds_k8s_install_test_driver.sh"
## xDS test server/client Docker images
readonly SERVER_LANGS="cpp go java"
readonly CLIENT_LANGS="cpp go java"
readonly MAIN_BRANCH="${MAIN_BRANCH:-master}"
#######################################
# Main function: provision software necessary to execute tests, and run them
# Globals:
# KOKORO_ARTIFACTS_DIR
# GITHUB_REPOSITORY_NAME
# SRC_DIR: Populated with absolute path to the source repo
# TEST_DRIVER_REPO_DIR: Populated with the path to the repo containing
# the test driver
# TEST_DRIVER_FULL_DIR: Populated with the path to the test driver source code
# TEST_DRIVER_FLAGFILE: Populated with relative path to test driver flagfile
# TEST_XML_OUTPUT_DIR: Populated with the path to test xUnit XML report
# GIT_ORIGIN_URL: Populated with the origin URL of git repo used for the build
# GIT_COMMIT: Populated with the SHA-1 of git commit being built
# GIT_COMMIT_SHORT: Populated with the short SHA-1 of git commit being built
# KUBE_CONTEXT: Populated with name of kubectl context with GKE cluster access
# Arguments:
# None
# Outputs:
# Writes the output of test execution to stdout, stderr
#######################################
main() {
local script_dir
script_dir="${PWD}/$(dirname "$0")"
# Source the test driver from the master branch.
echo "Sourcing test driver install script from: ${TEST_DRIVER_INSTALL_SCRIPT_URL}"
source /dev/stdin <<< "$(curl -s "${TEST_DRIVER_INSTALL_SCRIPT_URL}")"
activate_gke_cluster GKE_CLUSTER_PSM_SECURITY
set -x
if [[ -n "${KOKORO_ARTIFACTS_DIR}" ]]; then
kokoro_setup_test_driver "${GITHUB_REPOSITORY_NAME}"
if [ "${TESTING_VERSION}" != "master" ]; then
echo "Skipping cross lang testing for non-master branch ${TESTING_VERSION}"
exit 0
fi
cd "${TEST_DRIVER_FULL_DIR}"
else
local_setup_test_driver "${script_dir}"
cd "${SRC_DIR}/${TEST_DRIVER_PATH}"
fi
source "${script_dir}/grpc_xds_k8s_run_xtest.sh"
local failed_tests=0
local successful_string
local failed_string
LATEST_BRANCH=$(find_latest_branch "${LATEST_BRANCH}")
OLDEST_BRANCH=$(find_oldest_branch "${OLDEST_BRANCH}" "${LATEST_BRANCH}")
# Run cross lang tests: for given cross lang versions
XLANG_VERSIONS="${MAIN_BRANCH} ${LATEST_BRANCH} ${OLDEST_BRANCH}"
declare -A FIXED_VERSION_NAMES=( ["${MAIN_BRANCH}"]="${MAIN_BRANCH}" ["${LATEST_BRANCH}"]="latest" ["${OLDEST_BRANCH}"]="oldest")
for VERSION in ${XLANG_VERSIONS}
do
for CLIENT_LANG in ${CLIENT_LANGS}
do
for SERVER_LANG in ${SERVER_LANGS}
do
if [ "${CLIENT_LANG}" != "${SERVER_LANG}" ]; then
FIXED="${FIXED_VERSION_NAMES[${VERSION}]}"
if run_test "${CLIENT_LANG}" "${VERSION}" "${SERVER_LANG}" "${VERSION}" "${FIXED}" "${FIXED}"; then
successful_string="${successful_string} ${VERSION}/${CLIENT_LANG}-${SERVER_LANG}"
else
failed_tests=$((failed_tests+1))
failed_string="${failed_string} ${VERSION}/${CLIENT_LANG}-${SERVER_LANG}"
fi
fi
done
echo "Failed test suites: ${failed_tests}"
done
done
set +x
echo "Failed test suites list: ${failed_string}"
echo "Successful test suites list: ${successful_string}"
}
main "$@"
| true |
edf387f537863ef6a43e28e06a37f90aee0ad98e
|
Shell
|
TranAnhOffical/codelab-project
|
/codelab.sh
|
UTF-8
| 768 | 3.078125 | 3 |
[] |
no_license
|
#!/bin/bash
echo "========== Main Menu Codelab =========="
echo " 1) Install Xfce4"
echo " 2) Install Net-Tools"
echo " 3) Install C3CBot"
echo " 4) Install MiraiBot"
echo "======================================="
read n
case $n in
1) echo "Loading Package";;
wget https://raw.githubusercontent.com/TranAnhOffical/codelab-project/main/install-xfce4-ngrok && bash install-xfce4-ngrok
2) echo "Install Net-Tools";;
sudo apt install net-tools
3) echo "Loading Package C3CBot";;
wget https://raw.githubusercontent.com/TranAnhOffical/codelab/main/c3clab.sh && bash c3clab.sh
4) echo "Loading Package MiraiBot";;
wget https://raw.githubusercontent.com/TranAnhOffical/codelab/main/miraibotlab.sh && bash miraibotlab.sh
*) echo "Sai lựa chọn";;
esac
| true |
20f3edd5d12a0e49f5227dfa936b8b7ebe00ffcf
|
Shell
|
rickb777/sqlapi
|
/version.sh
|
UTF-8
| 306 | 3.359375 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh -e
V="$1"
if [ -z "$V" ]; then
V=$(git describe --tags --always 2>/dev/null)
fi
VFILE=version.go
echo "// Updated automatically (altered manually just prior to each release)" > $VFILE
echo "" >> $VFILE
echo "package sqlapi" >> $VFILE
echo "" >> $VFILE
echo "const Version = \"$V\"" >> $VFILE
| true |
5d28dcec2c58c72a0442502e373ce4e9134c428a
|
Shell
|
anthony-khong/dotfiles
|
/aws/notes.sh
|
UTF-8
| 1,060 | 2.625 | 3 |
[
"Unlicense"
] |
permissive
|
# Startup
# - Create user
# - Give user "AdministratorAccess" policy
# - Create key pair
# - Allows SSH connection in security group
# Configure AWS (Region: ap-southeast-1, Output: json)
aws configure
# EC2
aws ec2 describe-instances
aws ec2 describe-instances \
--filters "Name=instance-type,Values=t2.micro" \
--query "Reservations[].Instances[].InstanceId"
aws ec2 describe-instances \
--filters "Name=instance-type,Values=t2.micro" \
--query "Reservations[].Instances[].PublicDnsName"
aws ec2 create-key-pair --key-name cloud-dev
aws ec2 describe-key-pairs --key-name cloud-dev
aws ec2 run-instances \
--image-id ami-0c5199d385b432989 \
--instance-type t2.micro \
--key-name cloud-dev
export INSTANCE_ID=i-0ff3125497d11faef
export PUBLIC_DNS=ec2-54-254-186-26.ap-southeast-1.compute.amazonaws.com
aws ec2 get-console-output --instance-id $INSTANCE_ID
ssh -i ~/.aws/cloud-dev.pem ubuntu@$PUBLIC_DNS
aws ec2 terminate-instances --instance-ids $INSTANCE_ID
# TODO: how to add storage?
# TODO: provide a startup script
| true |
035b49fcce20ca204ae922ed7877755f0390950c
|
Shell
|
xcode2010/saurik-svn-mirror
|
/menes/trunk/utils/mtime.sh
|
UTF-8
| 118 | 2.625 | 3 |
[] |
no_license
|
#!/bin/sh
if stat --version >/dev/null 2>&1; then
exec stat -c "%Y" -- "$@"
else
exec stat -f "%m" -- "$@"
fi
| true |
9af1d237706fb260f3af06e4a7293bc0aca095d8
|
Shell
|
h-youhei/homedir
|
/bin/add-network.sh
|
UTF-8
| 408 | 3.8125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
list=`networkctl list`
echo "$list"
printf "choose network. type IDX (default 2):"
read idx
case $idx in
"" )
idx=2 ;;
[1-9] ) ;;
* )
echo "type number"
exit 1 ;;
esac
name=`echo "$list" | grep "^ *$idx" | awk '{print $2}'`
if test -z "$name"
then
echo "not exist the network id"
exit 1
fi
cat > /etc/systemd/network/$name.network << END
[Match]
NAME=$name
[Network]
DHCP=yes
END
| true |
a28afe087dd772e52c193371e30a2b1190b185e3
|
Shell
|
tudorpavel/tomorrow-theme
|
/Mate-Terminal/setup-theme.sh
|
UTF-8
| 1,335 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
[[ -z "$PROFILE_NAME" ]] && PROFILE_NAME=Tomorrow
[[ -z "$PROFILE_SLUG" ]] && PROFILE_SLUG=tomorrow
[[ -z "$GCONFTOOL" ]] && GCONFTOOL=gsettings
[[ -z "$BASE_SCHEMA" ]] && BASE_SCHEMA=org.mate.terminal.profile:/org/mate/terminal/profiles
PROFILE_SCHEMA="$BASE_SCHEMA/$PROFILE_SLUG/"
gset() {
local key="$1"; shift
local val="$1"; shift
"$GCONFTOOL" set "$PROFILE_SCHEMA" "$key" "$val"
}
# because gconftool doesn't have "append"
glist_append() {
local schema="$1"; shift
local key="$1"; shift
local val="$1"; shift
local entries="$(
{
"$GCONFTOOL" get "$schema" "$key" | tr -d '[]' | tr , "\n" | fgrep -v "$val"
echo " '$val'"
} | head -c-1 | tr "\n" ,
)"
"$GCONFTOOL" set "$schema" "$key" "[$entries]"
}
# append the Tomorrow profile to the profile list
glist_append org.mate.terminal.global profile-list "$PROFILE_SLUG"
gset visible-name "$PROFILE_NAME"
gset palette "#000000000000:#919122222626:#777789890000:#AEAE7B7B0000:#1D1D25259494:#68682a2a9b9b:#2B2B66665151:#929295959393:#666666666666:#CCCC66666666:#B5B5BDBD6868:#F0F0C6C67474:#8181A2A2BEBE:#B2B29494BBBB:#8A8ABEBEB7B7:#ECECEBEBECEC"
gset background-color "#1d1d1f1f2121"
gset foreground-color "#c5c5c8c8c6c6"
gset bold-color "#8A8ABEBEB7B7"
gset bold-color-same-as-fg "false"
gset use-theme-colors "false"
| true |
53f736a83a9fb73a1128f30eb6dd2a5a926d2976
|
Shell
|
cmeiklejohn/AMBROSIA
|
/InternalImmortals/PerformanceTestInterruptible/run_two_docker_containers.sh
|
UTF-8
| 2,459 | 3.40625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "Using AZURE_STORAGE_CONN_STRING =" $AZURE_STORAGE_CONN_STRING
set -euo pipefail
################################################################################
# Run TWO docker containers, each with an ImmortalCoordinator, one
# containing the PTI server and one containing the client.
################################################################################
# This script is meant to be used in automated testing. The output is
# ugly (interleaved) because it creates concurrent child processes.
#
# It should exit cleanly after the test is complete.
cd `dirname $0`
source ./default_var_settings.sh
INSTANCE_PREFIX=""
if [ $# -ne 0 ];
then INSTANCE_PREFIX="$1"
fi
CLIENTNAME=${INSTANCE_PREFIX}dock2C
SERVERNAME=${INSTANCE_PREFIX}dock2S
echo "Running PerformanceTestInterruptible between two containers"
echo " Instance: names $CLIENTNAME, $SERVERNAME"
function DOCKRUN() {
echo "Running docker container with: $*"
docker run --rm --env AZURE_STORAGE_CONN_STRING="$AZURE_STORAGE_CONN_STRING" $*
}
DOCKRUN ambrosia/ambrosia-perftest Ambrosia RegisterInstance -i $CLIENTNAME --rp $PORT1 --sp $PORT2 -l "/ambrosia_logs/"
DOCKRUN ambrosia/ambrosia-perftest Ambrosia RegisterInstance -i $SERVERNAME --rp $PORT3 --sp $PORT4 -l "/ambrosia_logs/"
rm server.id || true
# [2018.11.29] Docker for Windows appears to have a bug that will not properly
# pass through an absolute path for the program to run, but instead will prepend
# "C:/Users/../vendor/git-for-windows/", incorrectly reinterpreting the path on
# the host *host*. For now, simply assume they're in PATH:
DOCKRUN --env AMBROSIA_INSTANCE_NAME=$SERVERNAME --cidfile ./server.id \
ambrosia/ambrosia-perftest runAmbrosiaService.sh \
Server --rp $PORT4 --sp $PORT3 -j $CLIENTNAME -s $SERVERNAME -n 1 -c &
sleep 10 # Clarifies output.
DOCKRUN --env AMBROSIA_INSTANCE_NAME=$CLIENTNAME ambrosia/ambrosia-perftest runAmbrosiaService.sh \
Job --rp $PORT2 --sp $PORT1 -j $CLIENTNAME -s $SERVERNAME --mms 65536 -n 2 -c
echo "Job docker image exited cleanly, killing the server one."
docker kill $(cat ./server.id)
rm server.id
echo "Docker ps should show as empty now:"
docker ps
echo "TwoContainers test mode completed."
echo "Attempt a cleanup of our table metadata:"
DOCKRUN ambrosia/ambrosia-perftest UnsafeDeregisterInstance $CLIENTNAME || true
DOCKRUN ambrosia/ambrosia-perftest UnsafeDeregisterInstance $SERVERNAME || true
echo "All done."
| true |
74f140f929a90beb1dd2eb5f43a3b672b3c57f15
|
Shell
|
wgx731/dr-spring
|
/scripts/build-images.sh
|
UTF-8
| 1,385 | 3.296875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "[BUILD-IMAGES]: precondition list"
echo "1. 'docker', 'docker-compose' and 'gzip' installed on system."
echo "2. following config files must be in '$PWD/target/config': "
echo "
dr-spring-dubbo-provider.properties
dr-spring-gateway.properties
grpc.env
"
echo "3. following environment variables must be present: "
echo "
DR_SPRING_VERSION
DUBBO_WEB_H_PORT
DUBBO_WEB_C_PORT
DUBBO_MM_H_PORT
DUBBO_MM_C_PORT
DUBBO_APP_H_PORT
DUBBO_APP_C_PORT
DUBBO_QOS_H_PORT
DUBBO_QOS_C_PORT
GRPC_APP_H_PORT
GRPC_APP_C_PORT
GATEWAY_WEB_H_PORT
GATEWAY_WEB_C_PORT
GATEWAY_MM_H_PORT
GATEWAY_MM_C_PORT
"
echo "[BUILD-IMAGES] source config.sh ..."
source ${PWD}/scripts/config.sh || exit 50
echo "[BUILD-IMAGES] build image ..."
BASE_DIR=${PWD}
cd ${BASE_DIR}/target || exit 51
cp -R ${BASE_DIR}/scripts/docker/* ./ || exit 51
cp -R ${BASE_DIR}/local/config.sample ./config || exit 51
source ${BASE_DIR}/local/ports.sh || exit 51
docker-compose build || exit 51
cd ${BASE_DIR} || exit 51
echo "[BUILD-IMAGES] save image ..."
mkdir -p ${PWD}/target/tars || exit 52
for module in ${DOCKER_MODULES}
do
echo " saving ${module} image ..."
docker save \
${module} |
gzip -c > ${PWD}/target/tars/${module}-${RELEASE_VERSION}.tar.gz || exit 52
done
ls -lah ${PWD}/target/tars || exit 52
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.