blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
0e37fdf78b03203e9f86e9c9726f4ceb73405f75
|
Shell
|
s-reimann/progressbar.sh
|
/progressbar.sh
|
UTF-8
| 1,073 | 3.921875 | 4 |
[] |
no_license
|
#!/bin/bash
#
# +-----------------------------------------------------------------------+
# | description: this script will count the number of items to process |
# | and print a progress bar. |
# | date: February 7th, 2018 |
# | author: Sascha Reimann |
# | git: https://github.com/s-reimann/progressbar.sh |
# +-----------------------------------------------------------------------+
#
total_steps="$(($(tput cols) - 8))"
total=$(find /dev/|wc -l)
if (( total < total_steps )); then
total_steps="$total"
fi
threshold=$(( total / total_steps - 1 ))
for i in $(find /dev/); do
if [ -f $i ] ; then sleep 0.01; fi
(( count++ ))
if (( $(echo "$count $threshold" | awk '{print ($1 > $2)}') )); then
threshold=$(awk "BEGIN {print $threshold + $total / $total_steps}")
if (( step < total_steps )); then
(( step++ ))
percentage=$(( step * 100 / total_steps ))
digits=$(echo "${#percentage}")
echo -en "\r|$(printf %${step}s |tr " " ":")$(printf %$(( total_steps - step ))s)| ${percentage}$(printf %$(( 3 - digits))s |tr " " " ")%"
fi
fi
done
echo
| true |
ae8ddcdf1c52dd6daa4620d3e201372b9e2ce241
|
Shell
|
shota-imoto/grass_chords_v2
|
/db_backup_sh/execute_db_backup.sh
|
UTF-8
| 366 | 2.546875 | 3 |
[] |
no_license
|
#!/bin/bash
# ダンプ生成用shファイルの起動
docker container exec ec2-user_db_1 bash db_backup.sh
# ダンプをコンテナからホストにコピー
docker container cp ec2-user_db_1:/etc/mysql_backup/gc_backup.sql .
# ダンプをホストからS3にアップロード
aws s3 mv gc_backup.sql s3://grasschords-db-backup/mysql_backup/gc_backup.sql
| true |
f62f13555998fdb3bdf5a0c6357d4e350149200c
|
Shell
|
KensoDev/pig-herder
|
/launcher/submit-to-amazon.sh
|
UTF-8
| 596 | 2.96875 | 3 |
[
"MIT"
] |
permissive
|
date_string=`date -v-1d +%F`
echo "Starting process on: $date_string"
cluster_id=`aws emr create-cluster --name "$CLUSTER_NAME-$date_string" \
--log-uri s3://$BUCKET_NAME/logs/ \
--ami-version 3.8.0 \
--applications Name=Hue Name=Hive Name=Pig \
--use-default-roles --ec2-attributes KeyName=$KEY_NAME \
--instance-type m3.xlarge --instance-count 3 \
--bootstrap-action Path=s3://$BUCKET_NAME/bootstrap.sh | awk '$1=$1' ORS='' | grep ClusterId | awk '{ print $2 }' | sed s/\"//g | sed s/}//g`
echo "Cluster Created: $cluster_id"
sh submit-steps.sh $cluster_id $date_string CONTINUE
| true |
c4f8c43d620fe5c6a27d78dc6459e40c61662a24
|
Shell
|
2stacks/docker-ovpn
|
/bin/run
|
UTF-8
| 3,184 | 2.84375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
[ -d /dev/net ] ||
mkdir -p /dev/net
[ -c /dev/net/tun ] ||
mknod /dev/net/tun c 10 200
cd /etc/openvpn
if [ -n "${OVPN_DEBUG}" ]
then
TCPSTATUS="/tmp/openvpn-status-443.log"
UDPSTATUS="/tmp/openvpn-status-1194.log"
TCPLOG="/tmp/tcp443.log"
UDPLOG="/tmp/udp1194.log"
VERBOSE="3"
else
TCPSTATUS="/dev/null"
UDPSTATUS="/dev/null"
TCPLOG="/dev/null"
UDPLOG="/dev/null"
VERBOSE="0"
fi
cat >tcp443.conf <<EOF
port 443
proto tcp-server
dev tun443
ca ca.crt
cert site.crt
key site.key
dh site.dh
server 10.43.95.0 255.255.255.0
push "persist-key"
push "persist-tun"
push "redirect-gateway def1"
push "dhcp-option DNS $DNS_HOST1"
push "dhcp-option DNS $DNS_HOST2"
duplicate-cn
keepalive 10 30
tls-auth ta.key 0
auth SHA256
cipher AES-128-CBC
tls-cipher TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256:TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256:TLS-DHE-RSA-WITH-AES-128-CBC-SHA256:LS-ECDHE-RSA-WITH-AES-128-GCM-SHA256:TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256
comp-lzo
user nobody
group nogroup
persist-key
persist-tun
status $TCPSTATUS
plugin /usr/lib/openvpn/radiusplugin.so /etc/openvpn/radius443.conf
username-as-common-name
verify-client-cert none
log $TCPLOG
suppress-timestamps
verb $VERBOSE
mute 10
EOF
cat >radius443.conf <<EOF
NAS-Identifier=OpenVpn
Service-Type=5
Framed-Protocol=1
NAS-Port-Type=5
NAS-IP-Address=ovpn
OpenVPNConfig=/etc/openvpn/tcp443.conf
subnet=255.255.255.0
overwriteccfiles=true
nonfatalaccounting=true
server
{
acctport=1813
authport=1812
name=$RADIUS_HOST
retry=3
wait=3
sharedsecret=$RADIUS_KEY
}
EOF
cat >udp1194.conf <<EOF
port 1194
proto udp
dev tun1194
ca ca.crt
key site.key
cert site.crt
dh site.dh
server 10.43.94.0 255.255.255.0
push "persist-key"
push "persist-tun"
push "redirect-gateway def1"
push "dhcp-option DNS $DNS_HOST1"
push "dhcp-option DNS $DNS_HOST2"
duplicate-cn
keepalive 10 30
tls-auth ta.key 0
auth SHA256
cipher AES-128-CBC
tls-cipher TLS-ECDHE-RSA-WITH-AES-128-CBC-SHA256:TLS-ECDHE-ECDSA-WITH-AES-128-CBC-SHA256:TLS-DHE-RSA-WITH-AES-128-CBC-SHA256:LS-ECDHE-RSA-WITH-AES-128-GCM-SHA256:TLS-ECDHE-ECDSA-WITH-AES-128-GCM-SHA256
comp-lzo
fast-io
user nobody
group nogroup
persist-key
persist-tun
status $UDPSTATUS
plugin /usr/lib/openvpn/radiusplugin.so /etc/openvpn/radius1194.conf
username-as-common-name
verify-client-cert none
log $UDPLOG
suppress-timestamps
verb $VERBOSE
mute 10
explicit-exit-notify 1
EOF
cat >radius1194.conf <<EOF
NAS-Identifier=OpenVpn
Service-Type=5
Framed-Protocol=1
NAS-Port-Type=5
NAS-IP-Address=ovpn
OpenVPNConfig=/etc/openvpn/udp1194.conf
subnet=255.255.255.0
overwriteccfiles=true
nonfatalaccounting=true
server
{
acctport=1813
authport=1812
name=$RADIUS_HOST
retry=3
wait=3
sharedsecret=$RADIUS_KEY
}
EOF
iptables -t nat -A POSTROUTING -s 10.43.94.0/23 -o eth0 -j MASQUERADE
if [ -n "${OVPN_DEBUG}" ]
then
touch $TCPLOG $UDPLOG
while true ; do openvpn tcp443.conf ; done >> $TCPLOG &
while true ; do openvpn udp1194.conf ; done >> $UDPLOG &
tail -f $TCPLOG $UDPLOG
else
while true ; do openvpn tcp443.conf ; done &
while true ; do openvpn udp1194.conf ; done
fi
| true |
4504fca837ca61626d994a321cbc2e4094dea48d
|
Shell
|
pskarin/pyqpgen
|
/makemany
|
UTF-8
| 369 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/bash
mv .config .config.save;
for mf in $@;
do
D=$(dirname $mf);
L=$(basename $mf .m);
MK=$D/.makemany.mk;
echo LIBNAME=qp$L > $MK;
echo MFILE=$L.m >> $MK;
echo QPPROJ=$L >> $MK;
popd >/dev/null;
make CONFIG=$MK;
if [ $? -ne 0 ];
then
echo "ERROR";
mv .config.save .config;
exit 1;
fi;
rm $MK;
done;
mv .config.save .config;
| true |
79c14f57abf6abfba5d51b00de47c04518acc903
|
Shell
|
syntelos/gwtcc
|
/properties.sh
|
UTF-8
| 759 | 3.90625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Generate list of missing properties files
gwt_dir=~/src/google-web-toolkit
dev_dir=${gwt_dir}/trunk/dev
core_dir=${dev_dir}/core
list=properties.txt
rm -f ${list}
for srcd in $(find src/com/google/gwt/dev -type d )
do
tgtd=${core_dir}/${srcd}
if [ -d ${tgtd} ]
then
if flist=$(2>/dev/null ls ${tgtd}/*.properties) && [ -n "${flist}" ]
then
for tgt in ${flist}
do
file=$(basename ${tgt} )
src=${srcd}/${file}
if [ ! -f ${src} ]
then
echo ${src} | tee -a ${list}
fi
done
fi
else
echo "Error, target not found '${tgtd}'"
exit 1
fi
done
exit 0
| true |
91bb550f556e929254f551472a75d86bfd67ae5a
|
Shell
|
symball/docker-quick-stack
|
/environment_php55_mysql_developer/usergroup.sh
|
UTF-8
| 643 | 3.875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
USER_ID=$1
GROUP_ID=$2
# Check if the platform group exists and configure accordingly
if grep -q "^:$GROUP_ID:" /etc/group
then
echo "Modifying the group name"
groupmod -n platform $GROUP_ID
else
if grep -q "platform" /etc/group; then
echo "group exists"
else
echo "Adding the group"
addgroup --gid $GROUP_ID platform
fi
fi
# Check whether the user exists and simply mod the ID if so
id -u php &>/dev/null || adduser --disabled-password --home /home/php --shell /usr/bin/bash --ingroup platform --uid $USER_ID php
usermod -u $USER_ID -g platform php
| true |
13d956fd01bdeb0f87171ca4d493278adc1d0df3
|
Shell
|
melezhik/sparky
|
/watcher.sh
|
UTF-8
| 329 | 2.828125 | 3 |
[] |
no_license
|
set -x
set -e
if ps uax|grep bin/sparky-web.raku|grep -v grep -q; then
echo "sparky-web already running"
else
cd ~/projects/sparky
export SPARKY_HTTP_ROOT="/sparky"
export SPARKY_ROOT=/home/rakudist/projects/RakuDist/sparky
export BAILADOR=host:0.0.0.0,port:5000
nohup raku bin/sparky-web.raku > sparky-web.log &
fi
| true |
8baa22b183cbc64e00503211bdc6ee9a90ee0e28
|
Shell
|
douglaswrg/tools_flash_cards
|
/scripts/adoc
|
UTF-8
| 1,607 | 4.40625 | 4 |
[] |
no_license
|
#!/bin/bash
EXT='html'
OUT_DIR='/tmp'
OPEN=1
QUIET=0
function strip_extension {
flength=${#1}
start=$((flength - 5))
if [[ ${1:$start:5} == '.adoc' ]]; then
echo ${1:0:$start}
else
echo 'ERROR: Only support .adoc extentions.' 1>&2
exit 1
fi
}
function usage {
bn=`basename $0`
echo "$bn [-p] [-n] [-o out_dir] filename"
echo
exit 0
}
function check_for_tool {
# Ensure that a given tool / command can be found.
cmd=`which $1`
if [[ $? -eq 0 ]]; then
echo $cmd
else
echo 'Unable to find command: ' $1 >&2
return 1
fi
}
set -e
ADOC=`check_for_tool asciidoctor`
ADOC_PDF=`check_for_tool asciidoctor-pdf`
CMD=${ADOC}
set +e
if [[ -d $ADOC_OUT_DIR ]]; then
OUT_DIR=$ADOC_OUT_DIR
fi
while getopts ":hpnqo:" opt; do
case $opt in
h)
usage
;;
n)
OPEN=0
;;
q)
QUIET=1
;;
p)
CMD=${ADOC_PDF}
EXT='pdf'
;;
o)
OUT_DIR=$OPTARG
;;
\?)
echo "Option is not valid: -${OPTARG}"
echo
usage
;;
esac
done
shift $((OPTIND -1))
if [[ ! -f ${1} ]]; then
echo "File not found: ${1}"
exit 1
fi
bn=`basename $1`
fname=`strip_extension $bn`
FOUT=${OUT_DIR}/${fname}.${EXT}
$CMD -o ${FOUT} $1
if [[ $QUIET -eq 0 ]]; then
echo "File: ${FOUT}"
fi
if [[ $OPEN -eq 1 ]]; then
open ${FOUT}
else
if [[ $QUIET -eq 0 ]]; then
echo 'Not opening file.'
fi
fi
| true |
0349618784eb55a036d5537182bf89d5f8c60b53
|
Shell
|
hoffmangroup/hoffutils
|
/bin/pickup
|
UTF-8
| 181 | 3.21875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
_PICKUPDIR="${PICKUPDIR:-$HOME/.pickup}"
rm -r ${_PICKUPDIR}
mkdir -p ${_PICKUPDIR}
for FILENAME in "$@"; do
ln -sfv "${PWD}/${FILENAME}" ${_PICKUPDIR}
done
| true |
883c2ed3668200731bf88a1e2c654f58367f8bdc
|
Shell
|
fishin/ficion
|
/bin/start.sh
|
UTF-8
| 856 | 3.25 | 3 |
[] |
no_license
|
#!/bin/bash
if [ ! -e cfg/gills_confidence.json ];
then
echo "cfg/gills_confidence.json does not exist. Please copy from gills_confidence_example.json"
exit 2
fi
if [ ! -e cfg/tacklebox_confidence.json ];
then
echo "cfg/tacklebox_confidence.json does not exist. Please copy from tacklebox_confidence_example.json"
exit 2
fi
## generate a gills.json file
node_modules/.bin/confidence -c cfg/gills_confidence.json --filter.env=dev > cfg/gills.json
echo "Starting up UI"
node_modules/.bin/rejoice -c cfg/gills.json > gills_console.log &
#for i in {1..2}
for i in 1
do
echo "Starting up API instance ${i}"
node_modules/.bin/confidence -c cfg/tacklebox_confidence.json --filter.port=port${i} --filter.env=dev > cfg/tacklebox_port${i}.json
node_modules/.bin/rejoice -c cfg/tacklebox_port${i}.json > tacklebox_port${i}_console.log &
done
| true |
14af180931b2d382e707773cb69fae1537e4a909
|
Shell
|
bennythejudge/toolbox
|
/Misc/zd_attachments.sh
|
UTF-8
| 1,121 | 3.921875 | 4 |
[] |
no_license
|
#!/bin/bash
#set -x
export subdomain=$2
export id=$1
if [[ "$zd_user" == "" || "$zd_password" == "" ]]; then
echo "Set zd_user and zd_password in your environment"
exit 1
fi
if [[ "$1" == "" || "$2" == "" ]]; then
echo "Usage is: $(basename $0) <ticket id> <domain>"
exit 1
fi
# get the list of file
echo "Executing curl"
curl -s -u ${zd_user}:${zd_password} \
https://${subdomain}.zendesk.com/api/v2/tickets/${id}/comments.json | jq ".comments[].attachments[].url" > file.list.txt
r=$(wc -l file.list.txt| sed 's/ *//')
echo "${r} entries"
i=1
cat file.list.txt | while read u; do
# clean_url=$(echo $u| sed 's/"//g')
clean_url=$(echo $u| tr -d '"')
echo "$i - $clean_url"
name=ATT_${i}
curl -u ${zd_user}:${zd_password} \
-o ${name} $clean_url 2>/dev/null
i=$((i+1))
url=$(cat $name | jq '.attachment.content_url' | tr -d '"')
file_name=$(cat $name | jq '.attachment.file_name' | tr -d '"')
rm $name
echo "url: $url"
echo "file_name: $file_name"
# when you curl the url there is a redirection - the location is, finally, the attachment
nohup curl -s -L -o $file_name $url 2> /dev/null &
done
| true |
0beed3d434b55ddbd436d0d98ba28ed39646441d
|
Shell
|
fqez/common
|
/jails/bin/newjail
|
UTF-8
| 1,119 | 3.765625 | 4 |
[] |
no_license
|
#!/bin/bash
if [ $# != 4 ]
then
echo "usage: newjail.sh so ver arch name"
echo "example: newjail.sh ubuntu trusty amd64 trusty"
exit -1
fi
so=$1
ver=$2
arch=$3
name=$4
jailpath="/usr/local/share/jails"
pathdest="/var/chroot/$name"
if [ ! -d $pathdest ]
then
url="http://archive.ubuntu.com/ubuntu"
if [ "$so" = "debian" ]
then
url="http://ftp.debian.org/debian"
elif [ "$so" = "ubuntu" ]
then
url="http://archive.ubuntu.com/ubuntu"
else
echo "SO $so not supported, only support ubuntu or debian"
exit -1
fi
sudo sudo mkdir -p /var/chroot/$name
sudo sh -c "cat<<EOF>/etc/schroot/chroot.d/$name.conf
[$name]
description=$so $ver $arch
directory=/var/chroot/$name
root-users=testuser
type=directory
users=testuser
EOF"
sudo debootstrap --variant=buildd --arch $arch $ver /var/chroot/$name $url
sudo cp /etc/resolv.conf /var/chroot/$name/etc/resolv.conf
if [ -d $jailpath ]; then
$jailpath/$so/$ver/setup.sh $name
else
../jails/$so/$ver/setup.sh $name
fi
#displayjail $name
gnome-terminal -e "sudo schroot -c $name -u root"
else
echo "the jail $name already exists"
exit -1
fi
| true |
03c1b49cd5384de314114b1d2065539e05d83767
|
Shell
|
sameergk/REINFORCE_SOURCE
|
/examples/flow_table/go.sh
|
UTF-8
| 507 | 2.96875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
cpu=$1
service=$2
print=$3
if [ -z $service ]
then
echo "$0 [cpu-list] [Service ID] [PRINT]"
echo "$0 3,4 1 --> cores 3,4 with Service ID of 1"
echo "$0 3,4 1 10000 --> cores 3,4 with Service ID of 1 and print rate of 10000 packets"
exit 1
fi
if [ -z $print ]
then
sudo ./flow_table/$RTE_TARGET/flow_table -l $cpu -n 3 --proc-type=secondary -- -r $service
else
sudo ./flow_table/$RTE_TARGET/flow_table -l $cpu -n 3 --proc-type=secondary -- -r $service -- $print
fi
| true |
cee30c4108bb095748d468850043ac96bfb24c2f
|
Shell
|
alexsuslov/dotfiles
|
/node.sh
|
UTF-8
| 1,349 | 2.75 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Ask for the administrator password upfront.
sudo -v
# Keep-alive: update existing `sudo` time stamp until the script has finished.
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
# Make sure we’re using the latest Homebrew.
brew update
# Upgrade any already-installed formulae.
brew upgrade
brew install node
# coffee
npm install -g coffee-script
npm -g install js2coffee
# jade
npm -g install jade
npm -g install html2jade
npm install -g less
npm install -g yo
npm install -g gulp
npm install -g bower
# jasmine
# http://jasmine.github.io/2.1/introduction.html
npm install -g jasmine
# mocha
npm install -g mocha
npm install -g mocha-phantomjs
npm install -g generator-mocha-amd
# grunt
npm install -g grunt-cli
# express
npm install -g generator-express
npm install -g generator-node-express
# angular
npm install -g generator-angular
npm install -g generator-angular-fullstack
npm install -g generator-gulp-angular
# webapp
npm install -g generator-gulp-webapp
npm install -g generator-webapp
# mobile
npm install -g yeoman/generator-mobile
# chrome
npm install -g generator-chrome-extension
# ember
npm install -g generator-ember
# backbone
npm install -g generator-backbone
npm install -g generator-marionette
npm install -g generator-marionette-coffee
# clean
npm cache clean
| true |
dbe7803ff3449c9d5e3e03286ca9b29bbf5473ba
|
Shell
|
andytanoko/4.2.x_integration
|
/GTAS/GridTalk/rollout/solaris_sparcv/setup.sh
|
UTF-8
| 2,371 | 3.375 | 3 |
[] |
no_license
|
#!/bin/bash
logfile=$HOME/GridTalk_setup.log
if [ -d $1 ]; then
cd $1
echo [setup] Changed directory to $1 >> $logfile
fi
echo [setup] before setenv >> $logfile
. ./setenv.sh
echo [setup] after setenv `set` >> $logfile
# application server environment settings
properties=
#properties=-Dappserver.name=jboss
#properties="$properties -Dappserver.version=4.0.2"
#properties="$properties -Dappserver.home=$APPSERVER_HOME"
#properties="$properties -Dappserver.deploy.dir=$APPSERVER_HOME/server/default/deploy"
# database environment settings
#properties="$properties -Ddb.name=mysql"
#properties="$properties -Ddb.home=$DB_HOME"
#properties="$properties -Ddb.script.cmd=./dbscript.sh"
#properties="$properties -Ddb.start.cmd=./startmysql.sh"
#properties="$properties -Ddb.stop.cmd=./stopmysql.sh"
#properties="$properties -Ddb.exec.cmd=bin/mysql"
#properties="$properties -Ddb.root.password=gtasdb"
#properties="$properties -Ddb.check.cmd=./pingmysql.sh"
# application environment settings
#properties="$properties -Dapplication.name=gtas"
#properties="$properties -Dapplication.bin.dir=$GRIDTALK_HOME/bin"
#properties="$properties -Dapplication.docs.dir=$GRIDTALK_HOME/docs"
#properties="$properties -Dapplication.data.dir=$APPSERVER_HOME/bin/gtas/data"
#properties="$properties -Dapplication.conf.dir=$APPSERVER_HOME/bin/conf"
#properties="$properties -Dapplication.backend.dir=$GRIDTALK_HOME/backend"
properties=-Dsetup.properties=setup.properties
ant_params="-debug $properties -buildfile $1/setup.xml"
echo [setup] copy keystore >> $logfile
# copy the keystore file to the keystore folder reference in jbossweb-tomcat55.sar/server.xml
# keystore file must be present during application startup
if [ ! -f "$GRIDTALK_HOME/data/keystore/keystore" ]; then
mkdir -p $GRIDTALK_HOME/data/keystore;
cp ./application/data/keystore/keystore $GRIDTALK_HOME/data/keystore/;
fi
echo [setup] copy tools.jar >> $logfile
# Put tools.jar into JRE to prevent exception for JSP
if [ ! -f "$JAVA_HOME/lib/tools.jar" ]; then
cp ./jre150_10/lib/tools.jar $JAVA_HOME/lib/tools.jar;
fi
#echo [setup] setting up GridTalk with these properties: $properties >> $logfile
#. ant -quiet -buildfile "$1/setup.xml" $properties >> $logfile
echo [setup] calling ant with: -debug $properties -buildfile $1/setup.xml >> $logfile
ant $properties -buildfile $1/setup.xml >> $logfile
| true |
76203d32fb51ab6731dc818b724e124194a7c98f
|
Shell
|
deepikabartwal/shellScripts
|
/underlinerl.sh
|
UTF-8
| 111 | 2.984375 | 3 |
[] |
no_license
|
#! /bin/bash
input="kanishka keerthy reshmi"
underscore=$(echo "$input" | tr "[a-z]" "_" | tr " " "_")
echo "$underscore"
| true |
f3ee04c4cd2d548da2b9130d49b0fba6631f2a98
|
Shell
|
otus-kuber-2019-06/SergeSpinoza_platform
|
/kubernetes-vault/vault-guides/identity/vault-chef-approle/terraform-aws/mgmt-node/templates/userdata-mgmt-node.tpl
|
UTF-8
| 9,906 | 3.65625 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -x
exec > >(tee /var/log/tf-user-data.log|logger -t user-data ) 2>&1
logger() {
DT=$(date '+%Y/%m/%d %H:%M:%S')
echo "$DT $0: $1"
}
logger "Running"
##--------------------------------------------------------------------
## Variables
# Get Public IP address
PRIVATE_DNS=$(curl http://169.254.169.254/latest/meta-data/hostname)
# User setup
USER_NAME="vault"
USER_COMMENT="HashiCorp Vault user"
USER_GROUP="vault"
USER_HOME="/srv/vault"
# S3 Bucket for demo
S3_BUCKET="${tpl_s3_bucket_name}"
# Vault
VAULT_ZIP_URL="${tpl_vault_zip_url}"
# Chef
CHEF_SERVER_PACKAGE_URL="${tpl_chef_server_package_url}"
CHEF_DK_PACKAGE_URL="${tpl_chef_dk_package_url}"
CHEF_SERVER_URL="https://$${PRIVATE_DNS}"
CHEF_ADMIN="${tpl_chef_admin}"
CHEF_ADMIN_PASSWORD="${tpl_chef_admin_password}"
CHEF_ADMIN_PEM="$${CHEF_ADMIN}-private-key.pem"
CHEF_DEMO_ORG="${tpl_chef_org}"
CHEF_DEMO_PEM="$${CHEF_DEMO_ORG}-validator.pem"
CHEF_DEMO_APP_NAME="${tpl_chef_app_name}"
# Detect package management system.
YUM=$(which yum 2>/dev/null)
APT_GET=$(which apt-get 2>/dev/null)
##--------------------------------------------------------------------
## Install Base Prerequisites
logger "Setting timezone to UTC"
sudo timedatectl set-timezone UTC
if [[ ! -z $${YUM} ]]; then
logger "RHEL/CentOS system detected"
logger "Performing updates and installing prerequisites"
sudo yum-config-manager --enable rhui-REGION-rhel-server-releases-optional
sudo yum-config-manager --enable rhui-REGION-rhel-server-supplementary
sudo yum-config-manager --enable rhui-REGION-rhel-server-extras
sudo yum -y check-update
sudo yum install -q -y wget unzip bind-utils ruby rubygems ntp jq
sudo systemctl start ntpd.service
sudo systemctl enable ntpd.service
elif [[ ! -z $${APT_GET} ]]; then
logger "Debian/Ubuntu system detected"
logger "Performing updates and installing prerequisites"
sudo apt-get -qq -y update
sudo apt-get install -qq -y wget unzip dnsutils ruby rubygems ntp jq
sudo systemctl start ntp.service
sudo systemctl enable ntp.service
logger "Disable reverse dns lookup in SSH"
sudo sh -c 'echo "\nUseDNS no" >> /etc/ssh/sshd_config'
sudo service ssh restart
else
logger "Prerequisites not installed due to OS detection failure"
exit 1;
fi
##--------------------------------------------------------------------
## Install AWS-Specific Prerequisites
if [[ ! -z $${YUM} ]]; then
logger "RHEL/CentOS system detected"
logger "Performing updates and installing prerequisites"
curl --silent -O https://bootstrap.pypa.io/get-pip.py
sudo python get-pip.py
sudo pip install awscli
elif [[ ! -z $${APT_GET} ]]; then
logger "Debian/Ubuntu system detected"
logger "Performing updates and installing prerequisites"
sudo apt-get -qq -y update
sudo apt-get install -qq -y awscli
else
logger "AWS Prerequisites not installed due to OS detection failure"
exit 1;
fi
##--------------------------------------------------------------------
## Configure Vault user
user_rhel() {
# RHEL/CentOS user setup
sudo /usr/sbin/groupadd --force --system $${USER_GROUP}
if ! getent passwd $${USER_NAME} >/dev/null ; then
sudo /usr/sbin/adduser \
--system \
--gid $${USER_GROUP} \
--home $${USER_HOME} \
--no-create-home \
--comment "$${USER_COMMENT}" \
--shell /bin/false \
$${USER_NAME} >/dev/null
fi
}
user_ubuntu() {
# UBUNTU user setup
if ! getent group $${USER_GROUP} >/dev/null
then
sudo addgroup --system $${USER_GROUP} >/dev/null
fi
if ! getent passwd $${USER_NAME} >/dev/null
then
sudo adduser \
--system \
--disabled-login \
--ingroup $${USER_GROUP} \
--home $${USER_HOME} \
--no-create-home \
--gecos "$${USER_COMMENT}" \
--shell /bin/false \
$${USER_NAME} >/dev/null
fi
}
if [[ ! -z $${YUM} ]]; then
logger "Setting up user $${USER_NAME} for RHEL/CentOS"
user_rhel
elif [[ ! -z $${APT_GET} ]]; then
logger "Setting up user $${USER_NAME} for Debian/Ubuntu"
user_ubuntu
else
logger "$${USER_NAME} user not created due to OS detection failure"
exit 1;
fi
##--------------------------------------------------------------------
## Install Vault
logger "Downloading Vault"
sudo curl -o /tmp/vault.zip $${VAULT_ZIP_URL}
logger "Installing Vault"
sudo unzip -o /tmp/vault.zip -d /usr/local/bin/
sudo chmod 0755 /usr/local/bin/vault
sudo chown vault:vault /usr/local/bin/vault
sudo mkdir -pm 0755 /etc/vault.d
sudo mkdir -pm 0755 /etc/ssl/vault
# Dir for file storage backend
sudo mkdir -pm 0755 /opt/vault
logger "/usr/local/bin/vault --version: $(/usr/local/bin/vault --version)"
logger "Configuring Vault"
sudo tee /etc/vault.d/vault.hcl <<EOF
storage "file" {
path = "/opt/vault"
}
listener "tcp" {
address = "0.0.0.0:8200"
tls_disable = 1
}
EOF
sudo chown -R vault:vault /etc/vault.d /etc/ssl/vault /opt/vault
sudo chmod -R 0644 /etc/vault.d/*
sudo tee -a /etc/environment <<EOF
VAULT_ADDR=http://127.0.0.1:8200
VAULT_SKIP_VERIFY=true
EOF
source /etc/environment
logger "Granting mlock syscall to vault binary"
sudo setcap cap_ipc_lock=+ep /usr/local/bin/vault
##--------------------------------------------------------------------
## Install Vault Systemd Service
read -d '' VAULT_SERVICE <<EOF
[Unit]
Description=Vault Agent
[Service]
Restart=on-failure
PermissionsStartOnly=true
ExecStartPre=/sbin/setcap 'cap_ipc_lock=+ep' /usr/local/bin/vault
ExecStart=/usr/local/bin/vault server -config /etc/vault.d
ExecReload=/bin/kill -HUP $MAINPID
KillSignal=SIGTERM
User=vault
Group=vault
[Install]
WantedBy=multi-user.target
EOF
if [[ ! -z $${YUM} ]]; then
SYSTEMD_DIR="/etc/systemd/system"
logger "Installing systemd services for RHEL/CentOS"
echo "$${VAULT_SERVICE}" | sudo tee $${SYSTEMD_DIR}/vault.service
sudo chmod 0664 $${SYSTEMD_DIR}/vault*
elif [[ ! -z $${APT_GET} ]]; then
SYSTEMD_DIR="/lib/systemd/system"
logger "Installing systemd services for Debian/Ubuntu"
echo "$${VAULT_SERVICE}" | sudo tee $${SYSTEMD_DIR}/vault.service
sudo chmod 0664 $${SYSTEMD_DIR}/vault*
else
logger "Service not installed due to OS detection failure"
exit 1;
fi
sudo systemctl enable vault
sudo systemctl start vault
##--------------------------------------------------------------------
## Install Chef Server & Chef DK
# Download Chef packages
sudo curl -o /tmp/chef-server-core.deb $${CHEF_SERVER_PACKAGE_URL}
sudo curl -o /tmp/chefdk.deb $${CHEF_DK_PACKAGE_URL}
# Install Chef packages
sudo dpkg -i /tmp/chef-server-core.deb
sudo dpkg -i /tmp/chefdk.deb
# Configure Chef Server (need to do this after installing Chef Server package)
sudo chef-server-ctl reconfigure
# Create an admin user and demo org
sudo chef-server-ctl user-create $${CHEF_ADMIN} demo admin $${CHEF_ADMIN}@example.com $${CHEF_ADMIN_PASSWORD} --filename /tmp/$${CHEF_ADMIN_PEM}
sudo chef-server-ctl org-create $${CHEF_DEMO_ORG} 'Demo Organization' --association_user $${CHEF_ADMIN} --filename /tmp/$${CHEF_DEMO_PEM}
# Copy user key to S3 for use by Terraform to bootstrap our Chef node
# See https://www.terraform.io/docs/providers/aws/d/s3_bucket_object.html
# for info about content-type
aws s3 cp /tmp/$${CHEF_ADMIN_PEM} s3://$${S3_BUCKET}/$${CHEF_ADMIN_PEM} --content-type 'text/*'
# Install Chef Manage and reconfgigure/restart services
sudo chef-server-ctl install chef-manage
sudo chef-server-ctl reconfigure
sudo chef-manage-ctl reconfigure --accept-license
##--------------------------------------------------------------------
## Finish Chef App Config, Knife Config, Etc
cd /home/ubuntu/vault-chef-approle-demo/chef/
mkdir -p /home/ubuntu/vault-chef-approle-demo/chef/.chef
cp /tmp/*.pem /home/ubuntu/vault-chef-approle-demo/chef/.chef
tee /home/ubuntu/vault-chef-approle-demo/chef/.chef/knife.rb <<EOF
current_dir = File.dirname(__FILE__)
log_level :info
log_location STDOUT
node_name '$${CHEF_ADMIN}'
client_key "#{current_dir}/$${CHEF_ADMIN_PEM}"
validation_client_name '$${CHEF_DEMO_ORG}-validator'
validation_key "#{current_dir}/$${CHEF_DEMO_PEM}"
chef_server_url '$${CHEF_SERVER_URL}/organizations/$${CHEF_DEMO_ORG}'
cache_type 'BasicFile'
cache_options( :path => "#{ENV['HOME']}/.chef/checksums" )
cookbook_path ["#{current_dir}/../cookbooks"]
EOF
cd /home/ubuntu/vault-chef-approle-demo/chef/
knife ssl fetch
knife ssl check
cd /home/ubuntu/vault-chef-approle-demo/chef/
knife cookbook upload vault_chef_approle_demo
##--------------------------------------------------------------------
## Vault Init, Configure Policies & Backends, and Create Chef Databag
sudo tee /home/ubuntu/demo_setup.sh <<'EOF'
#!/usr/bin/env bash
set -x
# Automatically init and unseal Vault and save root token
# DO NOT DO THIS IN PRODUCTION!!
curl \
--silent \
--request PUT \
--data '{"secret_shares": 1, "secret_threshold": 1}' \
$${VAULT_ADDR}/v1/sys/init | tee \
>(jq -r .root_token > /home/ubuntu/vault-chef-approle-demo/root-token) \
>(jq -r .keys[0] > /home/ubuntu/vault-chef-approle-demo/unseal-key)
vault operator unseal $(cat /home/ubuntu/vault-chef-approle-demo/unseal-key)
export VAULT_TOKEN=$(cat /home/ubuntu/vault-chef-approle-demo/root-token)
cd /home/ubuntu/vault-chef-approle-demo/
chmod +x scripts/vault-approle-setup.sh
/home/ubuntu/vault-chef-approle-demo/scripts/vault-approle-setup.sh
cd /home/ubuntu/vault-chef-approle-demo/chef/
cat /home/ubuntu/vault-chef-approle-demo/secretid-token.json | jq --arg id approle-secretid-token '. + {id: $id}' > secretid-token.json
knife data bag create secretid-token
knife data bag from file secretid-token secretid-token.json
knife data bag list
knife data bag show secretid-token
knife data bag show secretid-token approle-secretid-token
EOF
chmod +x /home/ubuntu/demo_setup.sh
chown -R ubuntu:ubuntu /home/ubuntu
logger "Complete"
| true |
4fdab9ce0ec8e9b4db474b89241c2423b77cb7b2
|
Shell
|
poonamgp14/Operationalize_Microservices
|
/run_docker.sh
|
UTF-8
| 583 | 2.890625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
## Complete the following steps to get Docker running locally
# Step 1:
# Build image and add a descriptive tag
docker build --tag=machlearning .
# Step 2:
# List docker images
docker image ls
# Step 3:
# Run flask app
# mapping the port 4000 to the container exposed port 80
# You should see a notice that Python is serving your app at
# http://0.0.0.0:80. But that message is coming from inside the container,
# which doesn’t know you mapped to port 80 of that container to 4000,
# making the URL http://localhost:4000
docker run -p 8000:80 machlearning
| true |
0db0c9aa3d06aac0081f3887810a883bc0dc400b
|
Shell
|
ijaxon/test-vagrant
|
/bin/minion_bootstrap.sh
|
UTF-8
| 570 | 2.8125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
sudo add-apt-repository -y ppa:saltstack/salt
sudo apt-get update
sudo apt-get -y install salt-minion
cat <<EOF >/etc/salt/minion
# The user to run salt.
user: root
file_client: local
environment: development
file_roots:
base:
- /srv/salt/base
development:
- /srv/salt/development
production:
- /srv/salt/production
pillar_roots:
base:
- /srv/pillar/base
development:
- /srv/pillar/development
production:
- /srv/pillar/production
mysql.default_file: '/etc/mysql/debian.cnf'
EOF
service salt-minion restart
| true |
e3f1bfdfea15627eb8e179cbae4813d151005110
|
Shell
|
ShuoWangNSL/prism-rust
|
/testbed/scripts/start-algorand-transaction.sh
|
UTF-8
| 260 | 3 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
for script in /home/ubuntu/payload/algorand-startup/*.sh; do
[ -f "$script" ] || continue
node_name=`basename $script .sh`
nohup /home/ubuntu/payload/binary/algorand gentx -rate $1 -node $node_name > /home/ubuntu/log/$node_name-tx.log &
done
| true |
d4e0562e87e48ebc9dbf427452c141032d38b661
|
Shell
|
SLO42/init
|
/init/scripts/4
|
UTF-8
| 291 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/bash
controlStat=$(stat -c %Y "/etc/crontab")
while sleep 1; do liveStat=$(stat -c %Y "/etc/crontab")
if [[ $liveStat != $controlStat ]]; then echo "MODIFIED" | mutt -s "file was modified" root@localhost < /dev/null; break; fi
done
echo "0 0 * * * ./Path/To/File/$0 &" | crontab
| true |
b3e6dc6d4fc3cb86da814fb38ca0110367de6d5f
|
Shell
|
InnovAnon-Inc/CIS
|
/test
|
UTF-8
| 474 | 3.03125 | 3 |
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#! /bin/bash
set -exo nounset
DIR=`mktemp -d`
trap "rm $V -rf $DIR" 0
for k in "$EXEC" $DATA ; do
test -f $PKG$k.txz || continue
tar ${V}xpf $PKG$k.txz -C $DIR
done
TEST_PATH=
for k in $DIR/{usr/{local/,},}{s,}bin ; do
test -d $DIR/$k || continue
TEST_PATH=$DIR/$k:$TEST_PATH
done
if test x$TEST_PATH == x ; then
if test x$TEST == xnone ; then
TEST_PATH=
fi
fi
#test x$TEST_PATH != x || \
#test x$TEST == xnone && \
#TEST_PATH=
export TEST_PATH
$CIS/test-$TEST
| true |
71e7e4844b5a491d7664808c994284632da4a27b
|
Shell
|
jpalardy/dotfiles
|
/bin/zcomp
|
UTF-8
| 373 | 3.21875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
(
echo "original $(wc -c < "$@")"
for level in 1 5 9; do
echo ""
echo "gzip -$level $(gzip -$level -c - < "$@" | wc -c)"
echo "bzip2 -$level $(bzip2 -$level -c - < "$@" | wc -c)"
echo "xz -$level $(xz -$level -c - < "$@" | wc -c)"
done
) | awk '
NR == 1 {original=$2}
NF == 0 { print ""; next }
{printf "%s %5.1f%%\n", $0, $NF/original*100}
'
| true |
783807ad776ec837af69c579726aa7a080bb2402
|
Shell
|
Nutrient/lfs
|
/src/cross-toolchain/15-make-gzip.sh
|
UTF-8
| 213 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
tar -xf gzip-*.tar.xz
pushd gzip-*/
./configure \
--prefix=/usr \
--host=$LFS_TGT
make
make DESTDIR=$LFS install
mv -v $LFS/usr/bin/gzip $LFS/bin
popd
# Clean up
rm -rf gzip-*/
| true |
ed159d1faea1d9fecb1d350d33e01a38bfa08bef
|
Shell
|
ImperadorSid/installer-scripts
|
/Development/Rust/rust.sh
|
UTF-8
| 279 | 2.578125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
curl -s https://sh.rustup.rs | sh -s -- --no-modify-path -y
CUR_PATH=$(fish -c 'echo $fish_user_paths')
CARGO_BIN=~/.cargo/bin
[[ "$CUR_PATH" != *"$CARGO_BIN"* ]] && fish -c "set -p fish_user_paths $CARGO_BIN"
$CARGO_BIN/cargo install $(cat packages.txt)
| true |
f7da2364dcb64a62b0a5f64b61f2ba2338fea150
|
Shell
|
RonitNayak25/Shell-Scripts
|
/LabExperiments/15.sh
|
UTF-8
| 226 | 3.25 | 3 |
[] |
no_license
|
# Write a shell program to evaluate the operation 12+22+32+......+n2
echo "Enter the limit for 12+22+32+......+n2: "
read n
sum=0
for((i=1;i<=n;i++))
do
p=`expr $i \* 10`
sum=`expr $sum + $p + 2`
done
echo "Ans= $sum"
| true |
3f381ea45a44ab03aed59fe90e19359aaa9c9c35
|
Shell
|
jeongheon81/gsdc-eos-docker
|
/playbooks/roles/eos-docker-builder/files/image_scripts/eos_mgm_fs_setup.sh
|
UTF-8
| 905 | 3.609375 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
n_fst=7
[[ $# -ne 0 ]] && n_fst=$1
# Enable default space with quota disabled
eos -b space set default on
eos -b space quota default off
echo "Wait for FSTs to become online ..."
for i in `seq 1 30`; do
if [ `eos fs ls | grep online | wc -l` -eq $n_fst ]; then
echo "All FSTs are online"
break
else
sleep 1
fi
done
if [ `eos fs ls | grep online | wc -l` -ne $n_fst ]; then
echo "Some of the FSTs are not online ... aborting!"
eos fs ls
exit 1;
fi
# Boot filesystems
eos -b fs boot \*
eos -b config save -f default
echo "Wait for FSTs to boot ..."
for i in `seq 1 60`; do
if [ `eos fs ls | grep booted | wc -l` -eq $n_fst ]; then
echo "All FSTs are booted"
break
else
sleep 1
fi
done
if [ `eos fs ls | grep booted | wc -l` -ne $n_fst ]; then
echo "Some of the FSTs are not booted ... aborting!"
eos fs ls
exit 1;
fi
| true |
ff05b061ba428c9b27d3b9b2ca6020c79d504ec1
|
Shell
|
linghuiliu/CosmiTestLCIO
|
/calice_cddata/dbfillExamples/.svn/text-base/pushListOfCalibMeasurementsToDB.sh.svn-base
|
UTF-8
| 590 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/zsh
# mip / gain
#runList=( $(ls mip_single | cut -d n -f 2 | cut -d c -f 1) )
#runList=( $( ls -l mip_single/*clearFit.txt | awk '{ if ($7 > 18) print $9}' | cut -d\/ -f2 | cut -d n -f 2 | cut -d c -f 1 ) )
#
#for run in $runList; do
# pushMIPtoDB.sh $run
#done;
# ic
fileList=( $( ls -l ic_single/*.dat | awk '{ if (NF > 1) print $9 }' ) )
for file in $fileList; do
echo $file
runPM=$( echo $file | cut -d\/ -f2 | cut -d. -f 1 | cut -d\- -f 2 )
runCM=$( echo $file | cut -d\/ -f2 | cut -d. -f 1 | cut -d\- -f 3 )
pushSingleICToDB.sh $runPM $runCM
done;
| true |
0d3a4a47093c895215d4d7981797872cff6a690b
|
Shell
|
NERSC/tokio-abc
|
/run-wrapper-cori-knl-cscratch.sh
|
UTF-8
| 762 | 2.9375 | 3 |
[
"BSD-3-Clause-LBNL",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
#
# This submit script is intended to be submitted from the repository's base dir.
#
#SBATCH -J tokio-abc-cori-knl-cscratch
#SBATCH -N 256
#SBATCH -p debug
#SBATCH -t 00:30:00
#SBATCH -C knl
#SBATCH -A m888
i=0
today=$(date "+%Y-%m-%d")
output_base_dir="${SLURM_SUBMIT_DIR}/runs.${NERSC_HOST}.$today"
while [ -d "${output_base_dir}.$i" ]; do
let "i++"
done
export TOKIO_JOB_DIR="${output_base_dir}.$i"
echo "[$(date)] Outputting to $TOKIO_JOB_DIR"
export REPO_BASE_DIR="${SLURM_SUBMIT_DIR}"
export TOKIO_BIN_DIR="${REPO_BASE_DIR}/bin.cori-knl"
export DARSHAN_LOGPATH="${TOKIO_JOB_DIR}"
export TOKIO_PARAMS_FILE="${REPO_BASE_DIR}/inputs/cori-knl-cscratch.params"
mkdir -p "$TOKIO_JOB_DIR" && cd "$TOKIO_JOB_DIR"
../run-cron-benchmarks-nersc.sh
| true |
feaefe1896daae7f6b31149b82cc328403614cc0
|
Shell
|
weltonrodrigo/pdfa-converter
|
/convert.sh
|
UTF-8
| 273 | 2.703125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#set -x
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
INFILE="${1}"
OUTFILE="PDFA-${1}"
./gs-919-linux_x86_64 -dPDFA=1 -dNOOUTERSAVE -sProcessColorModel=DeviceRGB -sDEVICE=pdfwrite -o $OUTFILE ${DIR}/PDFA_def.ps -dPDFACompatibilityPolicy=1 $INFILE
| true |
c9187fea64b300a8c3ff975babc50adb4aa9dc61
|
Shell
|
COSMOS-ASMC/ShowerMC
|
/Cosmos9.00/UserHook/D-Skel/FleshHist/setupenvcls.sh
|
UTF-8
| 3,378 | 2.765625 | 3 |
[] |
no_license
|
#!/bin/bash
### don't touch next line. test is needed for sge job.
test $# -eq 0 && source ../confirm.inc
source ../setupenvcls.sh
TOPDIR=$COSMOSTOP/UserHook/$ARENA
export TOPDIR
# id emmbeded in the command name as mentioned above. must not start with
# number for sge jobs.
EXECID=qgsp17cos1.000
export EXECID
HISTDEP='29 /'
export HISTDEP
# at which depth do you output individual ptcl info.
# give that depth index
#
INDIVDEP='13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 /'
export INDIVDEP
# OUTPUT: which histogram do you take.
#
# 1 time histo in each web sector
# 2 lateral in each fai bin
#
OUTPUT=' t t /'
export OUTPUT
# to see E<500keV(=standard KEminObs) contribution to
# dE/dx, make next yes else no . (if yes, KEminObs must< 500keV)
SeeLowdE=no
export SeeLowdE
# where to put data from each host
#OUTDIR=$TOPDIR/Assemble/OutDir
OUTDIR=/tmp/$USER
export OUTDIR
# observe weighted particle (by thinning) as 1 individual
# particle with given weight or n partciles with weight
# 1. (n is an integer proportinal to weight. )
#------------ probably need not touch next line
KeepWeight=yes
# even if yes is given, ThinSampling is F==> set no
temp=`awk ' ($1=="ThinSampling" || $1=="THINSAMPLING") && $3~"F" {print "no"}' $TOPDIR/FleshHist/Sparam`
if [ x$temp = "xno" ] ; then
KeepWeight=no
fi
export KeepWeight
#
# at which depth do you take histogram;
# histogram can be output in ascii or binary format
# One format can be converted into another format.
# However, if it is bianry, it cannot be read on
# a different host than on the creator.
# BINW=1 for ascii write =2 for binary write
# For assembling, we must use binary ouput
BINW=2
export BINW
# We record a maximum of ~7500 particles of each particle
# type in a web sector (7500 is a default) by selecting
# particles falling on the sector randomly.
# However, it is difficult to estimate how many
# particles fall on the web sector, so is the probabilty
# of accepting them for each web sector. Therefore,
# we accept much larger number of particles at first and
# if the resultant number exceed 7500, we drop some of them
# randaomly. The LIMIT next is the first target number
# of particles to be accepted.
LIMIT="20000 20000 20000 20000"
export LIMIT
# dir to store execution command (ssh) or submit command (sge)
EXECDIR=$TOPDIR/FleshHist/Exec
export EXECDIR
# where param001 etc exist
PARAMDIR=$TOPDIR/FleshHist/ParamDir
export PARAMDIR
# where to put error message
ERRDIR=$TOPDIR/FleshHist/ErrDir
export ERRDIR
# don't touch below.
# if used from script, we skip the next lines.
if [ $# -eq 0 ] ; then
confirm $PARAMDIR
confirm $OUTDIR
confirm $ERRDIR
confirm $EXECDIR
if [ -e Sparam ]; then
primaryf=` awk '$1=="PRIMARYFILE" {i1=index($0, qm) ; i2=index(substr($0, i1+1), " "); print substr($0, i1+1,i2-1)}' qm="'" Sparam `
echo $primaryf " seems to be the primary file used at SkelFlesh"
echo "now it is copied to this directory"
cp ${COSMOSTOP}/UserHook/SkelFlesh/$primaryf ./
else
echo "There is no Sparam file which is a standard copy of "
echo " parameter file used in skeleflesh process"
echo " **make a copy as Sparam and change Job='newskel' into"
echo " Job='newflesh'"
exit
fi
fi
| true |
eeeb51f12441986b78221f1d550bd25fcd69606f
|
Shell
|
sharils/home
|
/shell_plugins/x/tz.sh
|
UTF-8
| 835 | 3.8125 | 4 |
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env sh
tz() {
cmd="$1"
shift
if [ "$cmd" = pdt ]; then
cmd=PST8PDT
elif [ "$cmd" = mdt ]; then
cmd=MST7MDT
elif [ "$cmd" = cdt ]; then
cmd=CST6CDT
elif [ "$cmd" = edt ]; then
cmd=EST5EDT
fi
if [ -n "$cmd" ] && realpath -q "/usr/share/zoneinfo/$cmd" >/dev/null; then
TZ="$cmd" "$@"
return
fi
tz="$(cd /usr/share/zoneinfo && find ./* -type f -name '*[[:upper:]]*' ! -name +VERSION | sed 's#^./##')"
if [ "$cmd" = 'date' ]; then
# shellcheck disable=SC2016 # in sh -c
tz="$(echo "$tz" | xargs -I{} sh -c 'printf "%s {}\n" "$(TZ={} date +%z)"')"
echo "$tz" | grep '^-' | sort -r
echo "$tz" | grep '^+' | sort
else
tz="$(echo "$tz" | sort)"
echo "$tz" | grep /
echo "$tz" | grep -v /
fi
}
tz "$@"
| true |
78ed05bfec53cbe66d36aeabb25606347a6abcfb
|
Shell
|
theodi/eduroam-freeradius
|
/eapol_test/run-tests.sh
|
UTF-8
| 721 | 2.53125 | 3 |
[] |
no_license
|
#!/bin/bash
echo ""
echo "Running local eap-ttls test"
./bin/eapol_test -c eap-ttls.conf -s localsecret > /dev/null
if [ $? -eq 0 ]
then
echo "SUCCESS"
else
echo "FAILED"
fi
echo ""
echo "Running local peap-mschapv2 test"
./bin/eapol_test -c peap-mschapv2.conf -s localsecret > /dev/null
if [ $? -eq 0 ]
then
echo "SUCCESS"
else
echo "FAILED"
fi
echo ""
echo "Running visited eap-ttls test"
./bin/eapol_test -c eap-ttls-visited.conf -s localsecret > /dev/null
if [ $? -eq 0 ]
then
echo "SUCCESS"
else
echo "FAILED"
fi
echo ""
echo "Running visited peap-mschapv2 test"
./bin/eapol_test -c peap-mschapv2-visited.conf -s localsecret > /dev/null
if [ $? -eq 0 ]
then
echo "SUCCESS"
else
echo "FAILED"
fi
echo ""
| true |
78596db62afde0f254e8436a03ea45fd69e95254
|
Shell
|
gearysystems/mockupgem
|
/frontend/tests/test_screenshots.sh
|
UTF-8
| 1,610 | 2.84375 | 3 |
[] |
no_license
|
# !/bin/bash
URL=localhost:3000
if [ "$PRODUCTION" == "true" ]
then
URL=http://mockupgem.gearysystems.com
fi
SAMPLE_IMAGE=$1
if ["$SAMPLE_IMAGE" == ""]
then
SAMPLE_IMAGE=screenshot.png
fi
LARGE_IMAGE=IMG_0576.PNG
VALID_UUID=5415ee81-e5ce-4e84-892f-ae85e4e76d0b
# TODO: Handle case where they submit empty form
echo "This should return an invalid upload request error."
curl -X POST $URL/api/v1/screenshots
echo ""
echo "This should return a UUID and url."
curl -F "screenshot=@$SAMPLE_IMAGE" $URL/api/v1/screenshots
echo ""
# TODO: This image is not big enough
echo "This should return a UUID and url - Test large upload."
curl -F "screenshot=@$LARGE_IMAGE" $URL/api/v1/screenshots
echo ""
echo "This should return an invalid create mockup request error"
curl \
-X POST \
-H "Content-Type: application/json" \
-d '{"templates": ["iphone6_white_minimal_outdoor_holding"]}' \
$URL/api/v1/screenshots/invalid_uuid/mockups
echo ""
echo "This should return an invalid create mockup request error"
curl \
-X POST \
-H "Content-Type: application/json" \
-d '{"templates": ["invalid_template"]}' \
$URL/api/v1/screenshots/invalid_uuid/mockups
echo ""
echo "This should return an invalid create mockup request error"
curl \
-X POST \
-H "Content-Type: application/json" \
-d '{"templates": "iphone6_white_minimal_outdoor_holding"}' \
$URL/api/v1/screenshots/invalid_uuid/mockups
echo ""
echo "This should return a list of mockup URLs"
curl \
-X POST \
-H "Content-Type: application/json" \
-d '{"templates": ["test"]}' \
$URL/api/v1/screenshots/$VALID_UUID/mockups
echo ""
| true |
5a383ac0e709b9d4f7fa8c95508e4dec46dbb8dd
|
Shell
|
hpreston/mantl_cicd
|
/archive/install-gitlab.sh
|
UTF-8
| 2,267 | 3.53125 | 4 |
[] |
no_license
|
#! /bin/bash
[ -z "$MANTL_CONTROL" ] && echo "Please run 'source setup' to set Environment Variables" && exit 1;
[ -z "$MANTL_USER" ] && echo "Please run 'source setup' to set Environment Variables" && exit 1;
[ -z "$MANTL_PASSWORD" ] && echo "Please run 'source setup' to set Environment Variables" && exit 1;
[ -z "$MANTL_DOMAIN" ] && echo "Please run 'source setup' to set Environment Variables" && exit 1;
[ -z "$DEPLOYMENT_NAME" ] && echo "Please run 'source setup' to set Environment Variables" && exit 1;
[ -z "$GITLAB_ROOT_PASSWORD" ] && echo "Please run 'source setup_gitlab' to set Environment Variables" && exit 1;
[ -z "$GITLAB_DEVUSER_PASSWORD" ] && echo "Please run 'source setup_gitlab' to set Environment Variables" && exit 1;
echo " "
echo "***************************************************"
echo Checking if GitLab has already been deployed
python mantl_utils.py applicationexists $DEPLOYMENT_NAME/gitlab
if [ $? -eq 1 ]
then
echo " Not already installed, continuing."
else
echo " Already Installed."
echo " Exiting"
exit 1
fi
# Create Copy of JSON Definitions for Deployment
echo "Creating Application Definifition "
cp sample-marathon-gitlab.json $DEPLOYMENT_NAME-gitlab.json
sed -i "" -e "s/DEPLOYMENTNAME/$DEPLOYMENT_NAME/g" $DEPLOYMENT_NAME-gitlab.json
sed -i "" -e "s/MANTLDOMAIN/$MANTL_DOMAIN/g" $DEPLOYMENT_NAME-gitlab.json
sed -i "" -e "s/GITLABPASSWORD/$GITLAB_ROOT_PASSWORD/g" $DEPLOYMENT_NAME-gitlab.json
echo " "
echo "***************************************************"
echo Deploying GitLab
echo "** Marathon Application Definition ** "
curl -k -X POST -u $MANTL_USER:$MANTL_PASSWORD https://$MANTL_CONTROL:8080/v2/apps \
-H "Content-type: application/json" \
-d @$DEPLOYMENT_NAME-gitlab.json \
| python -m json.tool
echo "***************************************************"
echo
echo "Checking if GitLab is up"
HTTP_STATUS=$(curl -sL -w "%{http_code}" "http://$DEPLOYMENT_NAME-gitlab.$MANTL_DOMAIN" -o /dev/null)
while [ $HTTP_STATUS -ne 200 ]
do
HTTP_STATUS=$(curl -sL -w "%{http_code}" "http://$DEPLOYMENT_NAME-gitlab.$MANTL_DOMAIN" -o /dev/null)
echo "GitLab not up yet, checking again in 30 seconds. "
sleep 30
done
echo "GitLab is up. Beginning Configuraiton"
# Create new user
| true |
67ccaf5ffdedc071a10a012e82cc3d20b7bbcf9f
|
Shell
|
hke/quiet
|
/src/scripts/rundiffmaps.sh
|
UTF-8
| 870 | 3.328125 | 3 |
[] |
no_license
|
#!/bin/bash
# this creates difference maps, and differenced noise pixels files for any set of .a,.b files in the
# directory you run it in
for i in *a_map.fits; do
filea=$i
bam=`echo $i | cut -d'.' -f1`
fileb=$bam.b_map.fits
if [ -f $fileb ]; then
diffmap=$bam.diff_map.fits
echo 'differencing maps: ', $filea, $fileb, 'putting into : ' $diffmap
/usit/titan/u1/newburgh/repository/quiet_svn/oslo/src/python/makeDifferenceMap.py $filea $fileb $diffmap
fi
done
for j in *a_pixels.txt; do
filea=$j
bap=`echo $j | cut -d'.' -f1`
fileb=$bap.b_pixels.txt
if [ -f $fileb ]; then
diffpix=$bap.diff_pixels.txt
echo 'differencing pixels: ' $filea, $fileb, 'putting into: ', $diffpix
/usit/titan/u1/newburgh/repository/quiet_svn/oslo/src/python/differenceNoiseMap.py $filea $fileb > $diffpix
fi
done
| true |
29992e258c0f0ff02f167ac614dd40901db33674
|
Shell
|
roshnet/tagger
|
/tagger.sh
|
UTF-8
| 442 | 2.9375 | 3 |
[] |
no_license
|
#! /bin/bash
. ./util_init.sh
. ./proc_end.sh
. ./proc_tagfile.sh
execute ()
{
if [ $# -lt 1 ]
then
end_proc
fi
if [ "$1"="init" ]
then
if [ "$2"="--hard" ]
then
force_init
else
safe_init
fi
elif [ "$1"="tag" ]
then
if [ $# -lt 3 ]
then
end_proc
else
add_tag $2 $3
fi
fi
}
execute $@
| true |
79f0a80e24909fc646d71635278755659824e481
|
Shell
|
sigenae/RNArefinery
|
/shell/do_filter_new.sh
|
UTF-8
| 3,773 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/sh
# Sample shell script to perform filtering
echo "Reading refinery config..." >&2
source refinery.properties
#
# parse command line args
#
while [[ $# > 1 ]]
do
key="$1"
case $key in
-s|--searchpath)
SEARCHPATH="$2"
shift # past argument
;;
--default)
DEFAULT=YES
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
echo SEARCH PATH USED "$SEARCHPATH" >&2
#
# 0.1. making sure FPKM3 exists
#
if [ ! -f $SEARCHPATH/transcripts_fpkm_3.fa ]; then
echo "File not found, breaking!"
exit 10
fi
echo " $SEARCHPATH/transcripts_fpkm_3.fa found..." >&2
#
# 0.2. make the filter folder
#
mkdir -p $SEARCHPATH/filters
#
# 0.3. make data and llists
#
echo " making raw data and llist..." >&2
cat ${SEARCHPATH}/transcripts_fpkm_3.fa | sed "s/>/>${SEARCHPATH}_/g" >${SEARCHPATH}/filters/raw_contigs.fa
makellist ${SEARCHPATH}/filters/raw_contigs.fa >${SEARCHPATH}/filters/raw_contigs.llist
#
# 0.4. run blat using the MRNA5 data
#
echo " running BLAT on the NEW CDNA/MRNA? SET..." >&2
blat ${reference5_mrna} ${SEARCHPATH}/filters/raw_contigs.fa ${SEARCHPATH}/filters/blat_cdna5.psl
cat ${SEARCHPATH}/filters/blat_cdna5.psl | ${refinery_blatparser} > ${SEARCHPATH}/filters/blat_cdna5.best.tsv
#
# 0.5 filter those with less than 20% overlap on MRNA template
#
echo " filtering raw contigs using NEW MRNA SET alignment..." >&2
Rscript ${refinery_bin}/filter_square75.R ${SEARCHPATH}/filters/raw_contigs.llist ${SEARCHPATH}/filters/blat_cdna5.best.tsv ${SEARCHPATH}/filters/blat_cdna5.keepers.list
include_mf ${SEARCHPATH}/filters/raw_contigs.fa ${SEARCHPATH}/filters/contigs_after_cdna5.fa ${SEARCHPATH}/filters/blat_cdna5.keepers.list
makellist ${SEARCHPATH}/filters/contigs_after_cdna5.fa >${SEARCHPATH}/filters/contigs_after_cdna5.llist
#
# 0.6. run blat using the PEP5 data
#
echo " running BLAT on NEW PEPTIDE SET..." >&2
blat -t=dnax ${SEARCHPATH}/filters/contigs_after_cdna5.fa -q=prot ${reference5_protein} ${SEARCHPATH}/filters/blat_cdna5_pep5.psl
cat ${SEARCHPATH}/filters/blat_cdna5_pep5.psl | ${refinery_blatparser} -f t > ${SEARCHPATH}/filters/blat_cdna5_pep5.best.tsv
#
# 0.7. re-filter by the REFSEQ PEP data
#
echo " filtering contigs using NEW PEPTIDE SET alignment..." >&2
Rscript ${refinery_bin}/filter_square75_iverse.R ${SEARCHPATH}/filters/contigs_after_cdna5.llist ${SEARCHPATH}/filters/blat_cdna5_pep5.best.tsv ${SEARCHPATH}/filters/blat_cdna5_pep5_keepers.list
include_mf ${SEARCHPATH}/filters/raw_contigs.fa ${SEARCHPATH}/filters/contigs_after_cdna5_pep5.fa ${SEARCHPATH}/filters/blat_cdna5_pep5_keepers.list
makellist ${SEARCHPATH}/filters/contigs_after_cdna5_pep5.fa >${SEARCHPATH}/filters/contigs_after_cdna5_pep5.llist
#
# 0.8 filter those with less than 20% overlap on GENOME
#
echo " running BLAT on GENOME DNA..." >&2
blat ${reference5_genome} ${SEARCHPATH}/filters/contigs_after_cdna5_pep5.fa ${SEARCHPATH}/filters/blat_cdna5_pep5_genome5.psl
cat ${SEARCHPATH}/filters/blat_cdna5_pep5_genome5.psl | ${refinery_blatparser} > ${SEARCHPATH}/filters/blat_cdna5_pep5_genome5.best.tsv
#
echo " filtering contigs using BLAT GENOME alignment..." >&2
Rscript ${refinery_bin}/filter_square75.R ${SEARCHPATH}/filters/contigs_after_cdna5_pep5.llist ${SEARCHPATH}/filters/blat_cdna5_pep5_genome5.best.tsv ${SEARCHPATH}/filters/blat_cdna5_pep5_genome5_keepers.list
include_mf ${SEARCHPATH}/filters/raw_contigs.fa ${SEARCHPATH}/filters/contigs_after_cdna5_pep5_genome5.fa ${SEARCHPATH}/filters/blat_cdna5_pep5_genome5_keepers.list
makellist ${SEARCHPATH}/filters/contigs_after_cdna5_pep5_genome5.fa >${SEARCHPATH}/filters/contigs_after_cdna5_pep5_genome5.llist
tail -n+6 ${SEARCHPATH}/filters/blat_cdna5_pep5_genome5.psl | psl2bed >${SEARCHPATH}/filters/blat_cdna5_pep5_genome5.bed
| true |
4f29c724f66e8a309444ffd594089e9f42b80315
|
Shell
|
VINAYCS18/UnixLab
|
/0_calc.sh
|
UTF-8
| 187 | 2.734375 | 3 |
[] |
no_license
|
echo "enter two numbers"
read a
read b
echo "addition"
expr $a + $b
echo "subtraction"
expr $a - $b
echo "product"
expr $a \* $b
echo "division"
expr $a / $b
echo "modulous"
expr $a % $b
| true |
3758bf73c5244e0f04ab027194bd37e3583447e8
|
Shell
|
harrifeng/system-config
|
/lib/jkd/select-project
|
UTF-8
| 232 | 2.515625 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/usr/bin/env bash
set -e
select-output-line -i "$*" \
"-O" "select-jira-project" "-p" "Which project do you want to use?" \
'jkd rest project | jkd rest project|jq ".[]|\"\(.key): \(.name)\"" -r' |
pn 1 | tr -d ':\n'
| true |
688877558719c234e91a2030df52b99388d87d0f
|
Shell
|
mpolatcan/robodock
|
/docker-latest.sh
|
UTF-8
| 432 | 2.609375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Install Docker Engine
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get update
apt-cache policy docker-ce
sudo apt-get install -y docker-ce
# Install docker-compose orchestration tool for Docker
sudo apt-get install python-pip
sudo pip install -U docker-compose
| true |
0053fe8286b400c28241417a5e19a7f773942df2
|
Shell
|
aaronlaver/docker-qgis-build
|
/scripts/build-arm.sh
|
UTF-8
| 1,369 | 3.375 | 3 |
[] |
no_license
|
#!/bin/bash
set -xe
BUILD_THREADS=${BUILD_THREADS:-8}
## Clone qgis if requested
if [[ ! -z $QGIS_CLONE ]]; then
echo "Cloning Qgis $QGIS_CLONE"
git clone --depth 1 -b $QGIS_CLONE git://github.com/qgis/QGIS.git /src
cd /src
GIT_BRANCH=$QGIS_CLONE
else
GIT_BRANCH=$(basename `git rev-parse --abbrev-ref HEAD`)
fi
SRCDIR=$PWD
if [ "$BUILD_DEBUG" == "1" ]; then
CMAKE_OPTS+=-DWITH_DEBUG:BOOL=TRUE -DCMAKE_BUILD_TYPE=Debug
BUILD_TYPE=-debug
echo "### -----------------------"
echo "### Building DEBUG version "
echo "### -----------------------"
fi
declare $(dpkg-architecture)
BUILDDIR=${BUILDDIR:-"$SRCDIR/build$BUILD_TYPE-$GIT_BRANCH-$DEB_TARGET_ARCH"}
mkdir -p $BUILDDIR
INSTALLDIR=${INSTALLDIR:-"$SRCDIR/install-$GIT_BRANCH-$DEB_TARGET_ARCH"}
rm -rf $INSTALLDIR
mkdir -p $INSTALLDIR
rm -rf $BUILDDIR/CMakeCache.txt
cd $BUILDDIR
# Configure
cmake $SRCDIR \
-GNinja $CMAKE_OPTS \
-DCMAKE_CXX_FLAGS=-Wno-unknown-attributes \
-DWITH_STAGED_PLUGINS=ON \
-DCMAKE_INSTALL_PREFIX=$INSTALLDIR \
-DWITH_GRASS=OFF \
-DUSE_OPENCL=OFF \
-DWITH_3D=OFF \
-DSUPPRESS_QT_WARNINGS=ON \
-DENABLE_TESTS=OFF \
-DWITH_QSPATIALITE=OFF \
-DWITH_APIDOC=OFF \
-DWITH_ASTYLE=OFF \
-DWITH_DESKTOP=OFF \
-DWITH_BINDINGS=ON \
-DWITH_SERVER=ON \
-DDISABLE_DEPRECATED=ON
# Install
ninja install -j $BUILD_THREADS |cat
| true |
66c37f280d889341e096043cb492abb25fced6fb
|
Shell
|
jerryz920/SAFE
|
/configure
|
UTF-8
| 1,975 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/bash
SDXSERVER="http://152.3.136.36:8080/"
RIAKSERVER="147.72.248.6"
SSHKEY="~/.ssh/id_rsa"
EXOGENIPEM="~/.ssl/geni-yuanjuny.pem"
SDXNAME="sdx"
ALICENAME="c1"
BOBNAME="c2"
SDXSAFEKEY="bphJZn3RJBnNqoCZk6k9SBD8mwSb054PXbwV7HpE80E"
ALICESAFEKEY="V1F2853Nq8V304Yb_GInYaWTgVqmBsQwC0tXWuNmmf8"
BOBSAFEKEY="UIz4bXT7accigZ7KNpEyF2igwGOgXb9gne7p13i2bWA"
CARROTSAFEKEY="KXwvxF_rWupThUEAKwmkMTuhV8X-hqZXOAtMkWBFapc"
SDXPREFIX="192.168.30.1/20"
ALICEPREFIX="192.168.10.1/24"
BOBPREFIX="192.168.20.1/24"
CWD=$(pwd)
SDX_SCRIPTSDIR="${CWD}/SDX-Simple/SAFE_SDX/src/main/resources/scripts/"
CLIENT_SCRIPTDIR="${CWD}/SDX-Client-ExoGENI/SDX_ExoGENI_Client/src/main/resources/scripts/"
#safe server for communion slice
SAFESERVER="152.54.14.12"
CARROETNAME="carrot"
CARROTPREFIX="192.168.40.1/24"
STITCHPORT="http://geni-orca.renci.org/owl/ion.rdf#AL2S/Chameleon/Cisco/6509/GigabitEthernet/1/1"
VLAN="3297"
sed "s@{SLICENAME}@${SDXNAME}@g;s@{SSHKEY}@${SSHKEY}@g;s@{EXOGENIPEM}@${EXOGENIPEM}@g;s@{RIAKSERVER}@${RIAKSERVER}@g;s@{SDXSERVER}@${SDXSERVER}@g; s@{SDX_SCRIPTSDIR}@${SDX_SCRIPTSDIR}@g; s@{SAFEKEY}@${SDXSAFEKEY}@g;s@{IPPREFIX}@${SDXPREFIX}@g" config/sdxconf.txt | tee ${CWD}/SDX-Simple/config/${SDXNAME}.conf
sed "s@{SLICENAME}@${ALICENAME}@g;s@{SSHKEY}@${SSHKEY}@g;s@{EXOGENIPEM}@${EXOGENIPEM}@g;s@{RIAKSERVER}@${RIAKSERVER}@g;s@{SDXSERVER}@${SDXSERVER}@g; s@{SDX_SCRIPTSDIR}@${SDX_SCRIPTSDIR}@g; s@{SAFEKEY}@${ALICESAFEKEY}@g;s@{IPPREFIX}@${ALICEPREFIX}@g" config/clientconf.txt | tee ${CWD}/SDX-Client-ExoGENI/config/${ALICENAME}.conf
sed "s@{SLICENAME}@${BOBNAME}@g;s@{SSHKEY}@${SSHKEY}@g;s@{EXOGENIPEM}@${EXOGENIPEM}@g;s@{RIAKSERVER}@${RIAKSERVER}@g;s@{SDXSERVER}@${SDXSERVER}@g; s@{SDX_SCRIPTSDIR}@${SDX_SCRIPTSDIR}@g; s@{SAFEKEY}@${BOBSAFEKEY}@g;s@{IPPREFIX}@${BOBPREFIX}@g" config/clientconf.txt | tee ${CWD}/SDX-Client-ExoGENI/config/${BOBNAME}.conf
sed "s@{STITCHPORT}@${STITCHPORT}@g;s@{VLAN}@${VLAN}@g;s@{SAFESERVER}@${SAFESERVER}@g;s@{SDXSERVER}@${SDXSERVER}@g; s@{SAFEKEY}@${CARROTSAFEKEY}@g;s@{IPPREFIX}@${CARROTPREFIX}@g" config/communionconf.txt | tee ${CWD}/SDX-Client-StitchPort/config/${CARROTNAME}.conf
| true |
02f508b04fdc3276da13554f55663122af2706e3
|
Shell
|
Fuangwith-Bkk/WebinarQuarkus
|
/Init/3.Postgresql/create_postgresql.sh
|
UTF-8
| 903 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/sh
oc login -u user1
oc new-project demo-s2i
echo "Create PostgreSQL for Freelancer Service"
oc process -f postgresql-ephemeral.yml \
-p DATABASE_SERVICE_NAME=freelancer-db \
-p POSTGRESQL_USER=dbuser \
-p POSTGRESQL_PASSWORD=password \
-p POSTGRESQL_DATABASE=sampledb | oc create -f -
echo "Wait for pod to start ..."
oc rollout status dc freelancer-db
oc get pods --field-selector status.phase=Running
POSTGRES_POD=$(oc get pods -l name=freelancer-db --template='{{(index .items 0).metadata.name}}')
echo "Freelancer Database Pod: $POSTGRES_POD"
oc rsync sql $POSTGRES_POD:/tmp
oc exec $POSTGRES_POD -- bash -c 'psql -U dbuser -d sampledb -a -f /tmp/sql/schema.sql'
echo "Database schema created"
oc exec $POSTGRES_POD -- bash -c 'psql -U dbuser -d sampledb -a -f /tmp/sql/data.sql'
echo "Sample Data Inserted"
oc exec $POSTGRES_POD -- bash -c 'rm -rf /tmp/sql'
echo "Cleanup SQL scripts"
| true |
51554c8f9ab6b5f67e21f927ac0eaf38684cbad5
|
Shell
|
wook2014/relate
|
/old_versions/relate_v1.1.1_x86_64_dynamic/scripts/RelateSGE/MakeChunks.sh
|
UTF-8
| 1,927 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
#$ -V
#$ -j y
echo "***********************************************"
echo "SGE job ID: "$JOB_ID
echo "SGE task ID: "$SGE_TASK_ID
echo "Run on host: "`hostname`
echo "Operating system: "`uname -s`
echo "Username: "`whoami`
echo "Started at: "`date`
echo "***********************************************"
#### check if data exists
check_file_existence (){
if [ ! -f $1 ]; then
echo "File " + $1 + " not found!"
exit 1
fi
}
check_file_existence ${haps}
check_file_existence ${sample}
check_file_existence ${map}
if [[ "$PATH_TO_RELATE" != /* ]];
then
PATH_TO_RELATE="../${PATH_TO_RELATE}"
fi
if [ ! -z ${dist-} ]
then
check_file_existence ${dist}
if [ -z ${annot-} ]
then
${PATH_TO_RELATE}/bin/Relate \
--mode "MakeChunks" \
--haps ${haps} \
--sample ${sample} \
--map ${map} \
--dist ${dist} \
--memory ${memory} \
--output $output 2>> log/make_chunks.log
else
check_file_existence ${annot}
${PATH_TO_RELATE}/bin/Relate \
--mode "MakeChunks" \
--haps ${haps} \
--sample ${sample} \
--map ${map} \
--dist ${dist} \
--annot ${annot} \
--memory ${memory} \
--output $output 2>> log/make_chunks.log
fi
else
if [ -z ${annot-} ]
then
${PATH_TO_RELATE}/bin/Relate \
--mode "MakeChunks" \
--haps ${haps} \
--sample ${sample} \
--map ${map} \
--memory ${memory} \
--output $output 2>> log/make_chunks.log
else
check_file_existence ${annot}
${PATH_TO_RELATE}/bin/Relate \
--mode "MakeChunks" \
--haps ${haps} \
--sample ${sample} \
--map ${map} \
--annot ${annot} \
--memory ${memory} \
--output $output 2>> log/make_chunks.log
fi
fi
echo "***********************************************"
echo "Finished at: "`date`
echo "***********************************************"
exit 0
| true |
c9fbf28ace48d7b78632c2c381ce8d178a218073
|
Shell
|
emuikernel/BDXDaq
|
/devel/coda/src/scripts/init_level2
|
UTF-8
| 287 | 2.875 | 3 |
[] |
no_license
|
#!/bin/sh
#\
exec /usr/bin/wish -f "$0" ${1+"$@"}
#
# init_level2
# set TS level2 variable to 0
if { [llength $argv] != 0 } {
set roc $argv
} else {
set roc "clastrig2"
}
set err [catch {set status [exec tcpClient $roc reset_ts_level2]}]
if {$err!=0} {exit $err} else {exit 0}
| true |
32f18800c958ca1a1ed162620fb01f96ba335859
|
Shell
|
tokazio/musicfile-manager
|
/mp3lib/src/entagged/entagged-build-resources/misc/entagged.sh
|
UTF-8
| 898 | 3.890625 | 4 |
[] |
no_license
|
#!/bin/sh
#Entagged launch script to be placed in /usr/bin or something
#renamed to entagged if possible...
#These two variables have to be setup during installation process
#to reflect the program installation directory (eg. "/usr/share/entagged")
#and version number (eg. "0.15")
PROGRAM_DIR=##PROGRAM_DIR##
PROGRAM_VERSION=##PROGRAM_VERSION##
#If the java executable isn't in the path, or has to be specified, change this:
JAVA_EXECUTABLE="java"
MSG0="ERROR:\nYou must edit this script and change PROGRAM_DIR and PROGRAM_VERSION to point to where you installed Entagged, and the version of it"
MSG1="Attempting to start Entagged..."
if [ ! -d $PROGRAM_DIR ]; then
echo $MSG0 >&2
exit -1
fi
if [ ! -f ${PROGRAM_DIR}/entagged-tageditor-${PROGRAM_VERSION}.jar ]; then
echo $MSG0 >&2
exit -1
fi
echo $MSG1
$JAVA_EXECUTABLE -jar ${PROGRAM_DIR}/entagged-tageditor-${PROGRAM_VERSION}.jar $1
| true |
7d7536cd5abea0ed0d0d57efd71aac250e25ffff
|
Shell
|
malvery/dotfiles
|
/bin/tmux-workspace.sh
|
UTF-8
| 308 | 3.21875 | 3 |
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]; then
tmux attach -t 0 && exit 0
tmux new-session -d -s 0
# tmux split-window -h
# tmux split-window -v
# tmux selectp -t 0
tmux -2 attach-session -d
else
ls $1 &> /dev/null || exit 2
TARGET=${1%/}
tmux attach -t $TARGET || tmux new -t $TARGET -c $TARGET
fi
| true |
b9e503f3934aef17f7cbf30b9987b6484ba432e5
|
Shell
|
kuptservol/sphinx-management-console
|
/ansible/distrib/bin/sphinx-console-agent.j2
|
UTF-8
| 390 | 2.625 | 3 |
[] |
no_license
|
#!/bin/bash
# description: sphinx-console Start Stop Restart
# processname: sphinx-console-agent
# chkconfig: 234 20 80
sphinx-console_HOME={{sphinx-console_path}}
case $1 in
start)
sh $sphinx-console_HOME/bin/start-agent.sh
;;
stop)
sh $sphinx-console_HOME/bin/stop-agent.sh
;;
restart)
sh $sphinx-console_HOME/bin/stop-agent.sh
sh $sphinx-console_HOME/bin/start-agent.sh
;;
esac
exit 0
| true |
36302cefe98fcd9aac796898b39e272d8f7c0976
|
Shell
|
dustinliddick/elk
|
/rasmussen/install/prod_kibana3_node.sh
|
UTF-8
| 4,081 | 3.25 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
# Setup logging directories
mkdir -p /opt/collegis/software/logstash
# Logs stderr and stdout to separate files.
exec 2> >(tee "/opt/collegis/software/logstash/kibana.err")
exec 1> >(tee "/opt/collegis/software/logstash/kibana.log")
# Register server with satellite
curl http://il1satsvr01.deltakedu.corp/pub/bootstrap/bootstrap-server.sh | /bin/bash
rhn-channel --add --channel=clone-epel_rhel6x_x86_64 -u dustin.liddick -p bviad3kq
rhn-channel --add --channel=rhel-x86_64-server-6-rhscl-1 -u dustin.liddick -p bviad3kq
# install apache24
yum -y install httpd24
### Install Kibana ###
cd /opt/collegis/software
mkdir kibana
cd kibana
curl -O https://download.elasticsearch.org/kibana/kibana/kibana-3.0.1.tar.gz
tar -xvf kibana-3.0.1.tar.gz
vi /opt/collegis/software/kibana-3.0.1/config.js
tee -a /opt/collegis/software/kibana-3.0.1/config.js <<EOF
/** @scratch /configuration/config.js/1
*
* == Configuration
* config.js is where you will find the core Kibana configuration. This file contains parameter that
* must be set before kibana is run for the first time.
*/
define(['settings'],
function (Settings) {
/** @scratch /configuration/config.js/2
*
* === Parameters
*/
return new Settings({
/** @scratch /configuration/config.js/5
*
* ==== elasticsearch
*
* The URL to your elasticsearch server. You almost certainly don't
* want +http://localhost:9200+ here. Even if Kibana and Elasticsearch are on
* the same host. By default this will attempt to reach ES at the same host you have
* kibana installed on. You probably want to set it to the FQDN of your
* elasticsearch host
*
* Note: this can also be an object if you want to pass options to the http client. For example:
*
* +elasticsearch: {server: "http://localhost:9200", withCredentials: true}+
*
*/
elasticsearch: "http://10.8.31.51:9200",
/** @scratch /configuration/config.js/5
*
* ==== default_route
*
* This is the default landing page when you don't specify a dashboard to load. You can specify
* files, scripts or saved dashboards here. For example, if you had saved a dashboard called
* `WebLogs' to elasticsearch you might use:
*
* default_route: '/dashboard/elasticsearch/WebLogs',
*/
default_route : '/dashboard/file/default.json',
/** @scratch /configuration/config.js/5
*
* ==== kibana-int
*
* The default ES index to use for storing Kibana specific object
* such as stored dashboards
*/
kibana_index: "kibana-int",
/** @scratch /configuration/config.js/5
*
* ==== panel_name
*
* An array of panel modules available. Panels will only be loaded when they are defined in the
* dashboard, but this list is used in the "add panel" interface.
*/
panel_names: [
'histogram',
'map',
'goal',
'table',
'filtering',
'timepicker',
'text',
'hits',
'column',
'trends',
'bettermap',
'query',
'terms',
'stats',
'sparklines'
]
});
});
EOF
# We will be using Apache to serve our Kibana installation, so let's move the files
# into an appropriate location. Create a directory with the following command:
mkdir -p /var/www/kibana3
# Copy the Kibana files into your newly-created directory:
cp -R /op/collegis/software/kibana-3.0.1/* /opt/rh/httpd23/root/var/www/kibana3/
#cd /opt/collegis/software/kibana/
#wget https://assets.digitalocean.com/articles/logstash/kibana3.conf
#vi kibana3.conf
# Edit virtual host file and change FQDN to server FQDN;
# change `root` to where we installed Kibana
# copy it to your Apache configuration configuration
#cp /opt/collegis/software/kibana/kibana3.conf /opt/rh/httpd24/root/etc/httpd/conf.d/
# Generate loging to access Kibana
#htpasswd -c /etc/httpd/conf.d/kibana-htpasswd `poc_user`
# Restart Apache to put changes into effect
service httpd24-httpd restart
chkconfig httpd24-httpd on
## Install Apache HTTP Complete ###
| true |
686497054c2870a6dbcdb9e8eb2de09c28ac3f25
|
Shell
|
lxd2502/danhuang
|
/public/nsConfig/1process.sh
|
UTF-8
| 3,615 | 3.609375 | 4 |
[] |
no_license
|
#!/bin/bash
# Download the H.264 video (lossless H.264 encoded) from "http://www2.tkn.tu-berlin.de/research/evalvid/cif.html"
# http://csie.nqu.edu.tw/smallko/ns2_old/myevalvid2.htm
# http://csie.nqu.edu.tw/smallko/ns2/myEvalvid.htm
# http://csie.nqu.edu.tw/smallko/ns2_old/Evalvid_in_NS2.htm
# http://www2.tkn.tu-berlin.de/research/evalvid/EvalVid/docevalvid.html
# Tools parameters
FFMPEG=ffmpeg
XVID_ENCRAW=xvid_encraw
MP4BOX=MP4Box
MP4TRACE=mp4trace
ETMP4=etmp4
PSNR=psnr
HIST=hist
MOS=mos
MIV=miv
# File defination
NS=ns
SCRIPT="be.tcl"
RAW_FILE=$1
YUV_FILE="foreman_cif.yuv"
XVID_OUT="xvid_out.m4v"
MP4BOX_OUT="mp4box_out.mp4"
MP4TRACE_OUT="mp4trace_out"
ETMP4_OUT="etmp4_out"
# Convert "*.mp4" file to "*.264"
$FFMPEG -i ${RAW_FILE} -an -vcodec libx264 -crf 23 input.264
# Convert "*.264" file to "*.yuv" file
$FFMPEG -i input.264 ${YUV_FILE}
# Encoding a yuv sequence into MPEG4 data format.
# It will create compressed raw videos with 30 frames per second, a GOP length of 30 frames with no B-frames.
$XVID_ENCRAW -i ${YUV_FILE} -w 352 -h 288 -framerate 30 -max_key_interval 30 -o ${XVID_OUT}
# Following command lines create ISO MP4 files containing the video samples (frames)
# and a hint track which describes how to packetize the frames for the transport with RTP.
$MP4BOX -hint -mtu 1024 -fps 30 -add ${XVID_OUT} ${MP4BOX_OUT}
# Ref YUV, for MOS and MIV
$FFMPEG -i ${MP4BOX_OUT} ref_video.yuv
# The mp4trace tool from EvalVid is able to send a hinted mp4-file per RTP/UDP to a specified destination host.
# The output of mp4trace will be needed later, so it should be redirected to a file.
$MP4TRACE -f -s 192.168.0.2 12346 ${MP4BOX_OUT} > ${MP4TRACE_OUT}
# Run simulation
$NS $SCRIPT
# The next step is the reconstruction of the transmitted video as it is seen by the receiver.
# For this, the video and trace files are processed by etmp4 (Evaluate Traces of MP4-file transmission):
# NB. This generates a (possibly corrupted) video file, where all frames that got lost or
# were corrupted are deleted from the original video track.
$ETMP4 -p -0 sd rd ${MP4TRACE_OUT} ${MP4BOX_OUT} ${ETMP4_OUT}
# Decode the received video to yuv format. (Please use ffmpeg to decode the compressed file.
# It won’t cause any error in most cases. If you use other codec to decode, it may cause errors in most cases.)
$FFMPEG -i ${ETMP4_OUT}.mp4 result.yuv
# to convenient to play the mp4 for html
$FFMPEG -i etmp4_out.mp4 -vcodec h264 $2
# Compute the PSNR.
mkdir psnr
$PSNR 352 288 420 ${YUV_FILE} ref_video.yuv > ref_psnr.txt
$PSNR 352 288 420 ${YUV_FILE} result.yuv > psnr/psnr.txt
# MOS (Mean Opinion Score): MOS is a subjective metric to measure digital video quality at the application level.
# This metric of the human quality impression is usually given on a scale that ranges from 1 (worst) to 5 (best).
$MOS psnr ref_psnr.txt 25 > mos.txt
$MIV psnr > miv.txt
# If you are interested in delay or jitter distributions, the hist tool could be of interest. E.g.,
awk '{print $3}' delay_${ETMP4_OUT}.txt | $HIST - 0 .05 50 > hist.txt
# gives the time, PDF and CDF of the end-to-end delay of transmission ${ETMP4_OUT}.
# Store the results
Fold=./data
mkdir ${Fold}
mv ${YUV_FILE} ${Fold}/${YUV_FILE}
mv ${XVID_OUT} ${Fold}/${XVID_OUT}
mv ${MP4BOX_OUT} ${Fold}/${MP4BOX_OUT}
mv ${MP4TRACE_OUT} ${Fold}/${MP4TRACE_OUT}
mv ${ETMP4_OUT}.mp4 ${Fold}/${ETMP4_OUT}.mp4
mv ${ETMP4_OUT}.m4v ${Fold}/${ETMP4_OUT}.m4v
mv *.txt ${Fold}/
mv sd ${Fold}/sd
mv rd ${Fold}/rd
mv result.yuv ${Fold}/result.yuv
mv psnr ${Fold}/
mv ref_video.yuv ${Fold}/ref_video.yuv
rm -rf out.tr
rm -rf video1.dat
| true |
5531baf5d2f444494ad6df712eae2f14fddc00ea
|
Shell
|
smukher2/GithubScientificReportsGlioblastomaStemApril2020
|
/RNAseqASUcyverseClusterResultsScript/GSE70696_RNA-seq_NCBIgenome_pipeline_rat.sh
|
UTF-8
| 3,543 | 3.03125 | 3 |
[] |
no_license
|
#!/bin/bash
#
################This script is for GSE70696################
#Run this script in a new directory
#mkdir GSE70696
#This script assumes all the tools are in your path. If not specify the path of your tools.
#'rnor6_ensemble_seq_whole_genome.fa' renamed and indexed as 'Rnor6.fa' by tophat and bowtie2 above is used in the downstream steps of this pipeline
#'Rattus_norvegicus.Rnor_6.0.95.gtf' renamed and indexed as 'Rnor6.gtf' by tophat and bowtie2 above is used in the downstream steps of this pipeline
###Step 1 getting fastq raw reads####
#Reference: https://bioinformatics.stackexchange.com/questions/2644/download-multiple-sra-files?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa
for ((i=3486450;i<=3486456;i++))
do
fastq-dump --gzip --split-files --accession SRR$i
echo 'Hi SGN blue group I am done getting fastq.gz files from GEO'
done
###Step2 fastqc quality check of your reads###
fastqc *.fastq.gz
echo 'Hi SGN blue group I am done doing fastqc quality check'
done
###Step3 mapping with tophat###
#First unzip files for use in tophat
gunzip *.fastq.gz
#Now actual tophat the SE
for i in SRR3486450_1 SRR3486449_1 SRR3486455_1 SRR3486456_1
do
#tophat -o /home/smukherjee/GSE70696_map/tophat_$i -G /home/smukherjee/Rnor6/Rnor6.gtf --no-novel-juncs /home/smukherjee/Rnor6/Rnor6 $i.fastq
tophat -o /home/smukherjee/GSE70696_map/tophat_$i --b2-very-fast --no-coverage-search --no-novel-juncs -G /home/smukherjee/Rnor6/Rnor6.gtf --transcriptome-index /home/smukherjee/Rnor6/Rnor6trans_Index/Rnor6 /home/smukherjee/Rnor6/Rnor6 $i.fastq.gz
echo 'Hi SGN blue group I am done mapping SE to bam'
done
#Now actual tophat the PE
for i in SRR3486450 SRR3486449 SRR3486455 SRR3486456
do
tophat -o /home/smukherjee/GSE70696_map/tophat_$i --b2-very-fast --no-coverage-search --no-novel-juncs -G /home/smukherjee/Rnor6/Rnor6.gtf --transcriptome-index /home/smukherjee/Rnor6/Rnor6trans_Index/Rnor6 /home/smukherjee/Rnor6/Rnor6 ${i}_1.fastq ${i}_2.fastq
echo 'Hi SGN blue group I am done mapping SE to bam'
done
#To do tophat indivisually writing out files do like below
#tophat -o /home/smukherjee/GSE70696_map/tophat_SRR1593507 --b2-very-fast --no-coverage-search --no-novel-juncs -G /home/smukherjee/Rnor6/Rnor6.gtf --transcriptome-index /home/smukherjee/Rnor6/Rnor6trans_Index/Rnor6 /home/smukherjee/Rnor6/Rnor6 SRR1593507_1.fastq SRR1593507_2.fastq
###Step4 sort bam files with samtools###
#samtools sort and index bam file
#http://quinlanlab.org/tutorials/samtools/samtools.html
for ((i=3486450;i<=3486456;i++))
do
samtools sort SRR${i}_accepted_hits.bam -o SRR${i}_sort_accepted_hits.bam
done
#To use samtools individually use code below
#samtools sort SRR2531549_accepted_hits.bam -o SRR2531549_sort_accepted_hits.bam
###Step5 htseq count bam files with htseqtools###
#Usage of htseqcount
#http://htseq.readthedocs.io/en/master/count.html
#htseq-count
# -s {yes,no,reverse}, --stranded {yes,no,reverse} whether the data is from a strand-specific assay.Specify 'yes', 'no', or 'reverse' (default: yes).'reverse' means 'yes' with reversed strand interpretation
#htseq-count [options] <alignment_files> <gff_file>
for ((i=3486450;i<=3486456;i++))
do
htseq-count -f bam -r pos -t gene -i gene_name -s no SRR${i}_sort_accepted_hits.bam /home/smukherjee/Rnor6/Rnor6.gtf > SRR${i}.txt
done
#To use htseq count individually use code below
#htseq-count -f bam -r pos -t gene -i gene_name -s no SRR2531546_sort_accepted_hits.bam /home/smukherjee/Rnor6/Rnor6.gtf > SRR2531546.txt
| true |
7188b34a2b7dbbd4c31dc681db661c0d646390ec
|
Shell
|
mffrench/fabric-ca
|
/scripts/fvt/enroll.sh
|
UTF-8
| 1,888 | 3.203125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
FABRIC_CA="$GOPATH/src/github.com/hyperledger/fabric-ca"
FABRIC_CAEXEC="$FABRIC_CA/bin/fabric-ca"
TESTDATA="$FABRIC_CA/testdata"
SCRIPTDIR="$FABRIC_CA/scripts/fvt"
. $SCRIPTDIR/fabric-ca_utils
HOST="http://localhost:8888"
RC=0
$($FABRIC_TLS) && HOST="https://localhost:8888"
while getopts "du:p:t:l:x:" option; do
case "$option" in
d) FABRIC_CA_DEBUG="true" ;;
x) CA_CFG_PATH="$OPTARG" ;;
u) USERNAME="$OPTARG" ;;
p) USERPSWD="$OPTARG"
test -z "$USERPSWD" && AUTH=false
;;
t) KEYTYPE="$OPTARG" ;;
l) KEYLEN="$OPTARG" ;;
esac
done
test -z "$CA_CFG_PATH" && CA_CFG_PATH="$HOME/fabric-ca"
test -z "$CLIENTCERT" && CLIENTCERT="$CA_CFG_PATH/cert.pem"
test -z "$CLIENTKEY" && CLIENTKEY="$CA_CFG_PATH/key.pem"
test -f "$CA_CFG_PATH" || mkdir -p $CA_CFG_PATH
: ${FABRIC_CA_DEBUG="false"}
: ${AUTH="true"}
: ${USERNAME="admin"}
: ${USERPSWD="adminpw"}
$($AUTH) || unset USERPSWD
: ${KEYTYPE="ecdsa"}
: ${KEYLEN="256"}
test "$KEYTYPE" = "ecdsa" && sslcmd="ec"
genClientConfig "$CA_CFG_PATH/client-config.json"
$FABRIC_CAEXEC client enroll "$USERNAME" "$USERPSWD" "$HOST" <(echo "{
\"hosts\": [
\"admin@fab-client.raleigh.ibm.com\",
\"fab-client.raleigh.ibm.com\",
\"127.0.0.2\"
],
\"CN\": \"$USERNAME\",
\"key\": {
\"algo\": \"$KEYTYPE\",
\"size\": $KEYLEN
},
\"names\": [
{
\"SerialNumber\": \"$USERNAME\",
\"O\": \"Hyperledger\",
\"O\": \"Fabric\",
\"OU\": \"FABRIC_CA\",
\"OU\": \"FVT\",
\"STREET\": \"Miami Blvd.\",
\"DC\": \"peer\",
\"UID\": \"admin\",
\"L\": \"Raleigh\",
\"L\": \"RTP\",
\"ST\": \"North Carolina\",
\"C\": \"US\"
}
]
}")
RC=$?
$($FABRIC_CA_DEBUG) && printAuth $CLIENTCERT $CLIENTKEY
exit $RC
| true |
3be311bf52b49415b719794e8a09772f852bfccf
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/rgbds-git/PKGBUILD
|
UTF-8
| 835 | 2.796875 | 3 |
[] |
no_license
|
# Maintainer: Jaume <jaume@delclos.com>
pkgname=rgbds-git
pkgver=v0.0.2.r25.g3ecd169
pkgrel=1
pkgdesc="Rednex GameBoy Development System"
arch=('i686' 'x86_64')
url="https://github.com/bentley/rgbds/"
license=('custom')
depends=('glibc')
makedepends=('git' 'gcc' 'bison')
source=("$pkgname::git+https://github.com/bentley/rgbds/")
md5sums=("SKIP")
pkgver() {
cd "$srcdir/$pkgname"
git describe --long | sed -E 's/([^-]*-g)/r\1/;s/-/./g'
}
build() {
cd "$srcdir/$pkgname"
make -j1
}
package() {
cd "$srcdir/$pkgname"
# It has no DESTDIR option, just PREFIX
mkdir -p "$pkgdir/usr/bin"
mkdir -p "$pkgdir/usr/share/man/man7"
mkdir -p "$pkgdir/usr/share/man/man1"
make PREFIX="$pkgdir/usr" MANPREFIX="$pkgdir/usr/share/man" install
install -D LICENSE "$pkgdir/usr/share/licenses/rgbds-git/LICENSE"
}
# vim:set ts=2 sw=2 et:
| true |
773cdab633942fd075ab91e1fcff45aea06d0826
|
Shell
|
edlanglois/dotfiles
|
/src/config/i3blocks/scripts/gpu-usage
|
UTF-8
| 598 | 3.734375 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# usage: gpu-usage [NUM_GPUS [WARNING_THRESHOLD [CRITICAL_THRESHOLD]]]
set -eu
NUM_GPUS=${1:-1}
WARNING_THRESHOLD=${2:-200}
ERROR_THRESHOLD=${3:-200}
for ((ID=0;ID<NUM_GPUS;ID++)); do
PCT_USAGE="$(nvidia-smi --query-gpu=utilization.gpu --format=csv,nounits,noheader --id="$ID")"
FG=""
if [ "$PCT_USAGE" -ge "$ERROR_THRESHOLD" ]; then
FG="#FF0000"
elif [ "$PCT_USAGE" -ge "$WARNING_THRESHOLD" ]; then
FG="#FFFC00"
fi
USAGE_TEXT="$PCT_USAGE%"
if [ -n "$FG" ]; then
USAGE_TEXT="<span color=\"$FG\">$USAGE_TEXT</span>"
fi
printf "%s" "$USAGE_TEXT"
done
printf "\n"
| true |
cfe25f2ee7256bb936238907953c508583836cb6
|
Shell
|
laristra/flecsi
|
/tools/find_guard
|
UTF-8
| 156 | 2.65625 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#! /usr/bin/env bash
cwd=`pwd`
guard="flecsi_"`echo $cwd | sed 's,^.*\/flecsi,,g'"_"`$1"_h"
echo $guard
guard=`echo $guard | sed 's,\/,_,g'`
echo $guard
| true |
43fddbf61818fa6112b89e7ab5259a3a2e498015
|
Shell
|
mkoskar/pkgbuilds
|
/aur/edid-decode-git/PKGBUILD
|
UTF-8
| 769 | 2.765625 | 3 |
[
"Apache-2.0"
] |
permissive
|
# Maintainer: Jonathan Liu <net147@gmail.com>
pkgname=edid-decode-git
_gitname=edid-decode
pkgver=r652.e48fb38
pkgrel=1
pkgdesc="EDID decoder and conformance tester"
url="https://git.linuxtv.org/edid-decode.git/"
arch=('i686' 'x86_64')
license=('MIT')
depends=('gcc-libs')
makedepends=('git')
provides=('edid-decode')
source=('git+https://git.linuxtv.org/edid-decode.git')
md5sums=('SKIP')
pkgver() {
cd "$_gitname"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd "$_gitname"
make
}
package() {
cd "$_gitname"
make DESTDIR="$pkgdir" install
install -d -m 755 "$pkgdir/usr/share/licenses/$pkgname"
sed -n '1,/^$/p' edid-decode.cpp | head -n -2 > "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
# vim:set ts=2 sw=2 et:
| true |
63349e62591477ae662a7844826fafb036233a88
|
Shell
|
sqrtroot/Observatory
|
/install.sh
|
UTF-8
| 5,271 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
software(){
figlet Software
# ____ __ _
#/ ___| ___ / _| |___ ____ _ _ __ ___
#\___ \ / _ \| |_| __\ \ /\ / / _` | '__/ _ \
# ___) | (_) | _| |_ \ V V / (_| | | | __/
#|____/ \___/|_| \__| \_/\_/ \__,_|_| \___|
apt update
apt install -y \
cmake \
gcc \
qt5-default \
libqt5serialport5-dev \
qtscript5-dev \
qtmultimedia5-dev \
qtpositioning5-dev \
qttools5-dev \
libdrm-dev \
libgpiod-dev \
gpiod \
imagemagick \
libxfixes-dev \
libxi-dev \
libev-dev
}
user(){
figlet User
# _ _ ___ ___ _ __
# | | | / __|/ _ \ '__|
# | |_| \__ \ __/ |
# \__,_|___/\___|_|
if ! `id -u stellarium 2>/dev/null 1>/dev/null`; then
useradd -m -d /home/stellarium -G tty,gpio stellarium
sudo -u stellarium mkdir -p /home/stellarium/.stellarium/
fi
cp -f $SCRIPTPATH/stellarium-config.ini /home/stellarium/.stellarium/config.ini
chown stellarium:stellarium /home/stellarium/.stellarium/config.ini
sudo -u stellarium touch /home/stellarium/.hushlogin
}
stellarium(){
figlet Stellarium
# ____ _ _ _ _
#/ ___|| |_ ___| | | __ _ _ __(_)_ _ _ __ ___
#\___ \| __/ _ \ | |/ _` | '__| | | | | '_ ` _ \
# ___) | || __/ | | (_| | | | | |_| | | | | | |
#|____/ \__\___|_|_|\__,_|_| |_|\__,_|_| |_| |_|
sudo -u `stat -c "%U" .` bash << EOF
git submodule update --init --depth=1 --recursive -- stellarium
$SCRIPTPATH/change_nebulae.sh
cd stellarium
git apply ../stellarium.patch
mkdir -p build/unix
cd build/unix
cmake ../.. \
-DUSE_PLUGIN_TELESCOPECONTROL:BOOL="0" \
-DUSE_PLUGIN_COMPASSMARKS:BOOL="0" \
-DUSE_PLUGIN_ANGLEMEASURE:BOOL="0" \
-DUSE_PLUGIN_POINTERCOORDINATES:BOOL="0" \
-DCMAKE_BUILD_TYPE:STRING="Release" \
-DUSE_PLUGIN_OCULARS:BOOL="0" \
-DUSE_PLUGIN_EXOPLANETS:BOOL="0" \
-DUSE_PLUGIN_OCULUS:BOOL="0" \
-DUSE_PLUGIN_ARCHAEOLINES:BOOL="0" \
-DUSE_PLUGIN_TEXTUSERINTERFACE:BOOL="0" \
-DENABLE_GPS:BOOL="0" \
-DUSE_PLUGIN_EQUATIONOFTIME:BOOL="1"
make -j $(nproc)
EOF
cd stellarium/build/unix
make install
cd $SCRIPTPATH
}
unclutter(){
sudo -u `stat -c "%U" .` bash << EOF
git submodule update --init --depth=1 --recursive -- unclutter-xfixes
cd unclutter-xfixes
make unclutter
EOF
cd unclutter-xfixes
install -Dm 0755 unclutter /usr/bin/unclutter
cd $SCRIPTPATH
}
control-plugin(){
figlet Control-plugin
# _ _ _ _
# ___ ___ _ __ | |_ _ __ ___ | | _ __ | |_ _ __ _(_)_ __
# / __/ _ \| '_ \| __| '__/ _ \| |_____| '_ \| | | | |/ _` | | '_ \
#| (_| (_) | | | | |_| | | (_) | |_____| |_) | | |_| | (_| | | | | |
# \___\___/|_| |_|\__|_| \___/|_| | .__/|_|\__,_|\__, |_|_| |_|
# |_| |___/
cd control-plugin/
sudo -u `stat -c "%U" .` bash << EOF
git submodule update --init --depth=1 --recursive
mkdir -p build
cd build
cmake .. \
-DCMAKE_BUILD_TYPE:STRING="Release"
make
EOF
sudo -u stellarium mkdir -p /home/stellarium/.stellarium/modules/control_plugin/
cp build/libcontrol_plugin.so /home/stellarium/.stellarium/modules/control_plugin/
chown stellarium:stellarium /home/stellarium/.stellarium/modules/control_plugin/libcontrol_plugin.so
install -Dm 0755 $SCRIPTPATH/toggleTV.sh /usr/bin/toggleTV
}
startup(){
figlet Startup
# _ _
# ___| |_ __ _ _ __| |_ _ _ _ __
# / __| __/ _` | '__| __| | | | '_ \
# \__ \ || (_| | | | |_| |_| | |_) |
# |___/\__\__,_|_| \__|\__,_| .__/
# |_|
cp -f $SCRIPTPATH/startup-files/stellarium.xinit /etc/X11/xinit/stellarium.xinit
cp -f $SCRIPTPATH/startup-files/stellarium.sh /home/stellarium/stellarium.sh
chown stellarium:stellarium /home/stellarium/stellarium.sh
chsh -s /home/stellarium/stellarium.sh stellarium
systemctl disable display-manager || true
systemctl set-default graphical
cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << EOF
[Service]
ExecStart=
ExecStart=-/sbin/agetty --autologin stellarium --skip-login --noclear %I \$TERM
EOF
printf "" > /etc/issue
printf "" > /etc/issue.net
printf "" > /etc/motd
if ! grep -q "^disable_splash=1" /boot/config.txt; then
echo "disable_splash=1" >> /boot/config.txt
fi
if ! grep -q logo.nologo /boot/cmdline.txt; then
sed -i '$s/$/ logo.nologo/' /boot/cmdline.txt
fi
if ! grep -q vt.global_cursor_default=0 /boot/cmdline.txt; then
sed -i '$s/$/ vt.global_cursor_default=0/' /boot/cmdline.txt
fi
cp -r $SCRIPTPATH/stellarium_plymouth_theme /usr/share/plymouth/themes/stellarium/
cat > /etc/plymouth/plymouthd.conf << EOF
[Daemon]
Theme=stellarium
ShowDelay=5
EOF
update-initramfs -u
}
rtc(){
figlet rtc
# _
# _ __| |_ ___
#| '__| __/ __|
#| | | || (__
#|_| \__\___|
if ! grep -q "^dtparam=i2c_arm=on" /boot/config.txt; then
echo "^dtparam=i2c_arm=on" >> /boot/config.txt
fi
if ! grep -q "^i2c-dev" /etc/modules; then
echo i2c-dev >> /etc/modules
fi
}
all(){
software
user
stellarium
unclutter
control-plugin
startup
rtc
}
if [[ $# -eq 0 ]]; then
all
else
for argval in "$@"; do
$argval
done
fi
| true |
960256312c8706fbcfd62b6ea190c8223cef1362
|
Shell
|
t0nik0/tmux
|
/dracula.tmux
|
UTF-8
| 208 | 2.515625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Author: Dane Williams
# a theme for tmux inspired by dracula
# source and run dracula theme
current_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
$current_dir/scripts/dracula.sh
| true |
b7270d58271016282dd400500c035d35970f435c
|
Shell
|
sroycode/identt
|
/test/test_store_invite.sh
|
UTF-8
| 648 | 2.640625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
export SRCDIR=$(dirname $(cd ${0%/*} 2>>/dev/null ; echo `pwd`/${0##*/}))
export SRCFIL=$(basename $(cd ${0%/*} 2>>/dev/null ; echo `pwd`/${0##*/}))
. ${SRCDIR}/config.sh
## ---- variables
export medium=${medium:="msisdn"};
export address=${address:="919999999991"};
export room_id=${room_id:="@12345"};
export sender=${sender:="@sroycode:matrix.justdis.com"};
export accesskey=${accesskey:="msisdn:919999999999:JcEZP9gXd2MYGqAXp2B9eF6KNGz8cDpj"};
## ---- main
if [ ${POSTFORM} -eq 0 ] ; then
${HTTPIE} POST ${TESTURL}/store-invite \
medium:="\"${medium}\"" \
address:="\"${address}\"" \
room_id:="\"${room_id}\"" \
sender:="\"${sender}\"" \
accesskey:="\"${accesskey}\""
fi
| true |
adad787dae60aeb78273d70ad67bbb420c58e690
|
Shell
|
benevolence-doctor/sunny_shell
|
/scripts/totle.sh
|
UTF-8
| 1,397 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
TIME=$(date -d "1 days ago" +%Y-%m-%d)
#TIME="2014-07-31"
for dir in `cat /opt/dir.txt`
do
/bin/mkdir "$dir"/"$TIME"/test
/bin/tar -zxvf "$dir"/"$TIME"/*"$TIME".tar.gz -C "$dir"/"$TIME"/test
/bin/ls "$dir"/"$TIME"/test >/opt/file.log
for num2 in `cat /opt/file.log`
do
python /opt/t.py "$dir"/"$TIME"/test/$num2
/bin/awk -F/ '{print $4}' "$dir"/"$TIME"/test/$num2 >> /opt/check_2.txt
done
/bin/cat /opt/check_2.txt |sort |uniq -c|sort -nr |head -n 40 >> /opt/log/"$TIME".log
/bin/rm -rf "$dir"/"$TIME"/test
/bin/rm -f /opt/check_2.txt
done
wc -l /opt/iphone.txt >> /opt/ua-"$TIME".txt ;wc -l /opt/ipad.txt >> /opt/ua-"$TIME".txt;wc -l /opt/other.txt >> /opt/ua-"$TIME".txt;wc -l /opt/android.txt >> /opt/ua-"$TIME".txt
echo -n "ipad: " >> /opt/ua-"$TIME".txt;awk '{sum+=$NF}END{print sum/1024/1024}' /opt/ipad.txt >> /opt/ua-"$TIME".txt
echo -n "iphone: " >> /opt/ua-"$TIME".txt ;awk '{sum+=$NF}END{print sum/1024/1024}' /opt/iphone.txt >> /opt/ua-"$TIME".txt
echo -n "pc: " >> /opt/ua-"$TIME".txt;awk '{sum+=$NF}END{print sum}' /opt/other.txt >> /opt/ua-"$TIME".txt
echo -n "android: " >> /opt/ua-"$TIME".txt; awk '{sum+=$NF}END{print sum/1024/1024}' /opt/android.txt >> /opt/ua-"$TIME".txt
rm -f /opt/android.txt /opt/ipad.txt /opt/iphone.txt /opt/other.txt /opt/android.txt
| true |
825f3a585476c26537cdaf2fc34ec44c291ad9ff
|
Shell
|
croesch/beefiles
|
/src/maven-repository-houskeeping.sh
|
UTF-8
| 1,205 | 3.390625 | 3 |
[
"Beerware"
] |
permissive
|
#!/bin/bash
LOGNAME=maven-housekeeping.log
LOGFILE="/var/log/${LOGNAME}"
AGE=36
if [ "$#" -eq 1 ]
then
LOGFILE="${1}"
fi
date >> ${LOGFILE}
echo "------------------------------------------------------------------------" >> ${LOGFILE}
if [ -z "${M2_REPO}" ]
then
M2_REPO=$(mvn help:evaluate -Dexpression=settings.localRepository | grep -v '[INFO]' | grep -v 'Download')
echo "M2_REPO was not set, set it to '$M2_REPO'" >> ${LOGFILE}
fi
find -L "${M2_REPO}" -name '*jar' -atime +${AGE} -exec rm -rfv {} \; >> ${LOGFILE}
find -L "${M2_REPO}" -name '*swf' -atime +${AGE} -exec rm -rfv {} \; >> ${LOGFILE}
find -L "${M2_REPO}" -name '*swc' -atime +${AGE} -exec rm -rfv {} \; >> ${LOGFILE}
find -L "${M2_REPO}" -name '*sha1' -atime +${AGE} -exec rm -rfv {} \; >> ${LOGFILE}
find -L "${M2_REPO}" -name '*md5' -atime +${AGE} -exec rm -rfv {} \; >> ${LOGFILE}
find -L "${M2_REPO}" -name '*pom' -atime +${AGE} -exec rm -rfv {} \; >> ${LOGFILE}
find -L "${M2_REPO}" -name '*repositories' -atime +${AGE} -exec rm -rfv {} \; >> ${LOGFILE}
find -L "${M2_REPO}" -name '*properties' -atime +${AGE} -exec rm -rfv {} \; >> ${LOGFILE}
find -L "${M2_REPO}" -empty -type d -delete >> ${LOGFILE}
echo >> ${LOGFILE}
| true |
df21a8cceecbe03a4faa03024da17ada0d9a1510
|
Shell
|
JanLunge/wAI
|
/wlardinstall.sh
|
UTF-8
| 2,698 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/bash
arch_chroot() { #{{{
arch-chroot $MOUNTPOINT /bin/bash -c "${1}"
}
function mirrors() {
url="https://www.archlinux.org/mirrorlist/?country=DE&use_mirror_status=on"
tmpfile=$(mktemp --suffix=-mirrorlist)
# Get latest mirror list and save to tmpfile
curl -so ${tmpfile} ${url}
sed -i 's/^#Server/Server/g' ${tmpfile}
# Backup and replace current mirrorlist file (if new file is non-zero)
if [[ -s ${tmpfile} ]]; then
{ echo " Backing up the original mirrorlist..."
mv -i /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.orig; } &&
{ echo " Rotating the new list into place..."
mv -i ${tmpfile} /etc/pacman.d/mirrorlist; }
else
echo " Unable to update, could not download list."
fi
# better repo should go first
cp /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.tmp
rankmirrors /etc/pacman.d/mirrorlist.tmp > /etc/pacman.d/mirrorlist
rm /etc/pacman.d/mirrorlist.tmp
# allow global read access (required for non-root yaourt execution)
chmod +r /etc/pacman.d/mirrorlist
#TODO: ask if should open editor
$EDITOR /etc/pacman.d/mirrorlist
}
function createdrive() {
cfdisk
partition=/dev/sda1
mkfs.btrfs -L "Arch Linux" $partition
mkdir /mnt/btrfs-root
mount -o defaults,relatime,discard,ssd,nodev,nosuid $partition /mnt/btrfs-root
mkdir -p /mnt/btrfs-root/__snapshot
mkdir -p /mnt/btrfs-root/__current
btrfs subvolume create /mnt/btrfs-root/__current/root
btrfs subvolume create /mnt/btrfs-root/__current/home
mkdir -p /mnt/btrfs-current
mount -o defaults,relatime,discard,ssd,nodev,subvol=__current/root $partition /mnt/btrfs-current
mkdir -p /mnt/btrfs-current/home
mount -o defaults,relatime,discard,ssd,nodev,nosuid,subvol=__current/home $partition /mnt/btrfs-current/home
}
function baseinstall() {
mountpoint=/mnt/btrfs-current
pacstrap $mountpoint base base-devel parted btrfs-progs f2fs-tools ntp net-tools vim
# everything in chroot now
WIRED_DEV=`ip link | grep "ens\|eno\|enp" | awk '{print $2}'| sed 's/://' | sed '1!d'`
if [[ -n $WIRED_DEV ]]; then
arch_chroot "systemctl enable dhcpcd@${WIRED_DEV}.service"
fi
echo "KEYMAP=US" > $mountpoint/etc/vconsole.conf
#genfstab -L -p mountpoint >> mountpoint/etc/fstab
genfstab -U /mnt >> $mountpoint/etc/fstab
}
function configsystem() {
arch-chroot /mnt/btrfs-current
ln -sf /usr/share/zoneinfo/Europe/Berlin /etc/localtime
#hwclock --systohc
$EDITOR /etc/locale.gen
locale-gen
$EDITOR /etc/locale.conf
$EDITOR /etc/vconsole.conf
echo "wArch" > $mountpoint/etc/hostname
echo "Root password"
passwd
umount -R /mnt
}
export EDITOR=vim
timedatectl set-ntp true
mirros
createdrive
baseinstall
| true |
f77d8f13c88e674f6099087234962cfca58fcd18
|
Shell
|
aldinh777/game-kejar-pencury
|
/compile.sh
|
UTF-8
| 627 | 3.4375 | 3 |
[] |
no_license
|
JSINPUTLOCATION="scripts/main.js"
JSOUTPUTLOCATION="public/javascripts/bundle.min.js"
function testerror {
if [ $1 -ne 0 ]
then
echo $2
exit $1
fi
}
function broserifying {
echo "broserifying"
browserify $JSINPUTLOCATION -o $JSOUTPUTLOCATION
testerror $? "Error At Browserify"
}
function uglifying {
echo "ulgifying"
uglifyjs -m -o $JSOUTPUTLOCATION $JSOUTPUTLOCATION
testerror $? "Error At UglifyJS"
}
function compiling {
echo "compiling"
tsc sources/main.ts
testerror $? "Error At TypeScript"
JSINPUTLOCATION=sources/main.js
}
function main {
# compiling
broserifying
uglifying
echo "done"
}
main
| true |
1f91c790a28f7824d815bb40b89379a9495ee978
|
Shell
|
alienth/dotfiles
|
/.zprofile
|
UTF-8
| 1,802 | 3.46875 | 3 |
[] |
no_license
|
# Stop here if we aren't interactive
[[ ! -o interactive ]] && return
# I'd like to use the window-title functions I've written, but they aren't
# loaded here yet. Loading that file itself in a login shell would be
# problematic.
if [[ -f ~/.window-name ]]; then
echo -ne "\e]2;$(cat ~/.window-name)\a"
fi;
if [[ $TERM == "screen" ]]; then
echo -ne '\ek'`hostname -s`'\e\\'
fi;
# When sshing into boxes, by default only TERM is passed and accepted. This
# means the remote side does not get our COLORTERM variable, and assumes we do
# not support colour. If we ended up passing in xterm-256color, we recreate
# COLORTERM for use when we execute a screen. We can then set our TERM within
# screen to screen-256color.
#
# The screen-256color check is necessary to ensure the inner-screen gets
# $COLORTERM set.
#
# This is complemented by settings in .shell/zsh/config.zsh
if [[ -z "$COLORTERM" ]]; then
if [[ "$TERM" == "xterm-256color" || "$TERM" == "screen-256color" ]]; then
export COLORTERM=truecolor
fi
fi
if [[ -x /usr/bin/screen ]]; then
# If we're not already in a screen, prompt for opening the outer screen.
if [[ "$TERM" != "screen" && -f ~/.outer ]]; then
screen -ls
echo -n "Run outer screen? "
local INPUT
read -qs INPUT
if [[ "$INPUT" == "y" ]]; then
screen -c .outerscreen -xR outer
else
screen -xR inner
fi
else
screen -xR inner
fi
elif [[ -x /usr/bin/tmux ]]; then
if [[ -z $TMUX ]]; then
if [[ -f ~/.outer ]]; then
echo -n "Run outer tmux? "
local INPUT
read -qs INPUT
if [[ "$INPUT" == "y" ]]; then
exec tmux new-session -A -s outer
else
exec tmux new-session -A -s inner
fi
else
unset TMUX
exec tmux new-session -A -s inner
fi
fi
fi
| true |
1ec4db94a1064eb99c453dd9c93093f8e4dc05dc
|
Shell
|
JustinPainter/dotfiles
|
/.zshenv
|
UTF-8
| 2,022 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/zsh
# Fill $PATH with useful things
# coreutils [/usr/local/opt/coreutils/libexec/gnubin]
# bin for Go [/usr/local/opt/go/libexec/bin]
# scmtool for RTC [/Users/jlpainte/Library/Jazz/scmtools/eclipse]
# local sbin [/usr/local/sbin]
# $HOME bin [$HOME/bin]
export JAVA_HOME=/Library/Java/JavaVirtualMachines/jdk1.8.0_65.jdk/Contents/Home
# Configure shell history
HISTFILE=~/.zsh_history
HISTSIZE=10000
SAVEHIST=10000
# Set locale variables
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
# Put coreutiles manpages into $MANPATH
export MANPATH="/usr/local/man:/usr/local/opt/coreutils/libexec/gnuman:$MANPATH"
# Setup homebrew environment
# Github API key for homebrew access
# Homebrew Cask options
# Text editor for homebrew to use
export HOMEBREW_GITHUB_API_TOKEN=b6683a34ffb4768a3d73026a4b6fa69c16cab012
export HOMEBREW_CASK_OPTS="--appdir=/Applications"
export HOMEBREW_EDITOR="code-insiders -n"
export HOMEBREW_BREWFILE="/Users/jlpainte/Dropbox/Dotfiles/Brewfile"
export CASKFILE="$HOME/.caskfile"
# Setup Go environment
export GOPATH=/Users/jlpainte/Projects/openblockchain/src
# export GOROOT=/usr/local/opt/go/bin
# Colorize the grep command output for ease of use (good for log files)##
export GREP_OPTIONS="--color=always"
export GREP_COLOR='3;35'
# Configure Less to be less intrusive
# -X [disable sending the termcap init and deinit strings to the terminal]
# -F [auto exit if content will fit on screen]
# -R [only ANSI "color" escape sequences are output in "raw" form]
export LESS="-XFR"
# Set editor environment variables
# export VISUAL='vi'
export EDITOR='code-insiders -n'
export GIT_EDITOR='code-insiders -n -w'
# Gist ID for VSCode Settings Sync
export VSCODE_SETTINGS_SYNC_ID=786b4789c69ec644609571cbf1ee9094
# Set project folder that we can `p [tab]` to
export PROJECTS="$HOME/Projects"
# Compilation flags
export ARCHFLAGS="-arch x86_64"
export LD_LIBRARY_PATH=/Library/Java/JavaVirtualMachines/jdk1.8.0_65.jdk/Contents/Home/jre/bin
| true |
422e0c7313abdf723eda7e1e7bd9b3cdf849a0a8
|
Shell
|
ChanJeunlam/geocomputing
|
/snap/02_array_job/snap_array_job.sh
|
UTF-8
| 1,376 | 3.078125 | 3 |
[] |
no_license
|
#!/bin/bash -l
#SBATCH --job-name=snap_array_job
#SBATCH --output=out_%A_%a.txt
#SBATCH --error=err_%A_%a.txt
#SBATCH --account=project_2000599
#SBATCH --partition=small
#SBATCH --time=02:00:00
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
#SBATCH --mem=8000
#SBATCH --array=1-3
#SBATCH --gres=nvme:10
### Load SNAP module
module load snap
### For looping through all the files:
### Make a list of input files. This folder has 3 S2L2 images
readlink -f /appl/data/geo/sentinel/s2_example_data/L2A/S2* > image_path_list.txt
### Select the inputfile row by row
image_path=$(sed -n ${SLURM_ARRAY_TASK_ID}p image_path_list.txt)
### Parse image basename to be used in output filename
image_filename="$(basename -- $image_path)"
### Assign an output_folder
output_folder=/scratch/project_2000599/snap/output/
# Set custom SNAP user dir
source snap_add_userdir $LOCAL_SCRATCH/cache_"$SLURM_ARRAY_TASK_ID"
### -q is num of cores, -t is target file, -SsourceProduct is the xml inside each SAFE folder
gpt resample_and_lai.xml -q 4 -c 5G -J-Xmx7G -t ${output_folder}/${image_filename}_LAI.tif -SsourceProduct=${image_path}/MTD_MSIL2A.xml -e
# Match values in gpt command with job reservation:
# -q 4 with --cpus-per-task=4
# -J-Xmx7G with --mem=8000, use for job a few Gb less than reserved
# -c 5G with -J-Xmx7G, use ~75 % of available memory for data cache, depends on task..
| true |
af0db5a1517ba59f994f7b64be1f0a5cefe878c2
|
Shell
|
PolymerLabs/uni-virtualizer
|
/scripts/deploy-examples.sh
|
UTF-8
| 560 | 3.265625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Create a new worktree at `gh-pages/`
if ! git worktree add gh-pages ; then
git worktree remove gh-pages
git worktree add gh-pages
fi
# Clean it out
rm -rf gh-pages/*
# Build uni-virtualizer-examples and copy the build into the gh-pages worktree
npm run build --prefix packages/uni-virtualizer-examples/
cp -r packages/uni-virtualizer-examples/public/* gh-pages
# Commit and push to deploy
cd gh-pages
if ! { git add -A && git commit -m "deploy examples" && git push origin HEAD:gh-pages; } ; then
echo $'\nNothing to deploy.'
fi
| true |
6d92ee0227885f545b952195fd4425408491ce1f
|
Shell
|
gogog22510/Ptt-backend
|
/testing-script/C3-1-3.sh
|
UTF-8
| 425 | 2.671875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# C3-1-2
# 以 SYSOP 帳號登入之後, POST /v1/boards board_id=testboard01 title=TestBoard 後取得看板列表可以看到 testboard01 看板
ACCESS_TOKEN=`./get_sysop_token.sh`
result=`curl -s http://localhost:8081/v1/boards -H "Authorization: bearer $ACCESS_TOKEN"`
echo "SYSOP"
echo $result | jq '.data[] | select(.id=="SYSOP")'
echo "ptt_app"
echo $result | jq '.data[] | select(.id=="ptt_app")'
| true |
86d960dc076b1b244d72e671542eb3ba61f5c7bc
|
Shell
|
swift-embedded/swift
|
/utils/baremetal/build-qemu
|
UTF-8
| 1,156 | 3.59375 | 4 |
[
"Apache-2.0",
"Swift-exception"
] |
permissive
|
#!/usr/bin/env bash
set -e
LOCAL_DIR="$(cd "$(dirname "$0")" && pwd)"
SWIFT_PATH="$( cd "$(dirname "$0")/../../.." && pwd )"
QEMU_REMOTE="https://github.com/qemu/qemu.git"
QEMU_DIR="$SWIFT_PATH/qemu"
QEMU_COMMIT="808ebd66e467f77c0d1f8c6346235f81e9c99cf2"
if [ -f "$QEMU_DIR/build/arm-softmmu/qemu-system-arm" ] ; then
echo "qemu already installed"
exit 0
fi
if [ -d "$QEMU_DIR" ] ; then
echo "Uncomplete qemu installation."
echo "Please remove $QEMU_DIR and restart the script."
exit 1
fi
echo "Preparing qemu repository..."
git clone "$QEMU_REMOTE" "$QEMU_DIR"
git -C "$QEMU_DIR" checkout $QEMU_COMMIT
git -C "$QEMU_DIR" apply "$LOCAL_DIR/patches-qemu/repo.patch"
echo "Building qemu..."
mkdir "$QEMU_DIR/build"
cd "$QEMU_DIR/build"
if [[ "$OSTYPE" == "darwin"* ]] ; then
../configure \
--prefix=/usr/local \
--cc=clang \
--host-cc=clang \
--disable-bsd-user \
--disable-guest-agent \
--extra-cflags=-DNCURSES_WIDECHAR=1 \
--enable-cocoa \
--target-list=arm-softmmu
else
../configure \
--prefix=/usr/local \
--disable-bsd-user \
--disable-guest-agent \
--target-list=arm-softmmu
fi
make
| true |
28db3a5d421e9a2ad386a937fe3c0b0c528be4a8
|
Shell
|
Lertsenem/workpile
|
/libexec/workpile-push
|
UTF-8
| 705 | 3.6875 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Usage: workpile push [<taskname>]
# Summary: Push a task on the pile
# Help: This will push a tasks on the pile.
# IMPORTS
########################################
. "$_WORKPILE_ROOT/lib/workpile.ini"
. "$_WORKPILE_ROOT/lib/generic.cst"
. "$_WORKPILE_ROOT/lib/workpile.cst"
. "$_WORKPILE_ROOT/lib/workpile.fun"
# SCRIPT
########################################
taskname="$*"
# Creating task
task="$( wp_add_task "$taskname" "$( wp_get_firsttask )" )"
# Setting last task if needed
[ -z "$( wp_get_lasttask )" ] && wp_set_lasttask "$task"
# Setting as first task
wp_set_firsttask "$task"
# User infos
echo -e "Pushing '${TAG_B}$taskname${TAG_E}'"
# Exit
wp_exit "$WP_RV_OK"
| true |
d957a841b09e092313b4e62266a906a5232d8a36
|
Shell
|
dinhnguyen7025/example-using-sqs-with-localstack
|
/scripts/update-lambda-func-code.sh
|
UTF-8
| 307 | 3 | 3 |
[] |
no_license
|
#!/bin/sh
for filename in ./bin/*; do
[ -e "$filename" ] || continue
# gz and update lambda function code
zip ${filename}.zip ${filename}
awslocal lambda update-function-code --function-name=custom-lambda --zip-file fileb://bin/custom.zip
# delete gz file
rm ${filename}.zip
done
| true |
93b879e435a69a1bac10e6a87c7b5213d6414aa5
|
Shell
|
Cr0wn-Gh0ul/Bash_Scripts
|
/MAP_FILE_TO_IMG.sh
|
UTF-8
| 853 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/bash
#USAGE: ./MAP_TO_IMG.sh {IMAGE_FILE} {FILE_TO_MAP}
$(xxd -p $2 > ./hex_tmp)
$(xxd -p $1 > ./img_hex)
$(mkdir ./Music)
#COUNTER=1
OPS=1
POS=2
CURRENT_LINE=1
EOF_LINE=$(wc -l < ./hex_tmp)
EOF_BYTE=$(tail -n1 ./hex_tmp | wc -c)
while true; do
if [[ $OPS -lt 31 ]]; then
if [[ $CURRENT_LINE == $EOF_LINE ]] && [[ $EOF_BYTE -lt $POS ]]; then
break
fi
BYTE=$(head -n$CURRENT_LINE ./hex_tmp | tail -n1)
PULL=$(echo $BYTE | head -c$POS | tail -c2)
FIND=$(grep -b -o $PULL ./img_hex > ./positions)
OFFSET=$(head -n1 ./positions | tail -n1 | cut -d":" -f1)
OFFSET_HEX=$(printf %03X $OFFSET)
FNAME=$(printf %02X $COUNTER)
$(echo $OFFSET_HEX >> ./Music/Nuts.mp3)
let OPS=OPS+1
let POS=POS+2
# let COUNTER=COUNTER+1
else
OPS=1
POS=2
let CURRENT_LINE=CURRENT_LINE+1
fi
done
$(rm ./hex_tmp && rm ./img_hex && rm ./positions)
| true |
7b18698a84b53d8aae7913b249a9f70bb0a69b74
|
Shell
|
jamesy0ung/raspberrypi-scripts
|
/setup_clean
|
UTF-8
| 443 | 2.640625 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# This software is licensed under the BSD-2-Clause license
# Copyright (c) 2017, James Young
# All rights reserved.
# Before executing, run: chmod +x setup
if [[ $EUID -ne 0 ]]; then
echo "This script is intended to be run as root" 2>&1
exit 1
else
apt-get purge bluej geany dillo epiphany-browser epiphany-browser-data nodered wolfram-engine claws-mail greenfoot libreoffice* scratch squeak-plugins-scratch squeak-vm sonic-pi
fi
| true |
bba354086a1e6899c94d50dcf6002701acafd4e9
|
Shell
|
neurosys/Scripts
|
/sf
|
UTF-8
| 93 | 2.5625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
if [[ -n $1 ]]
then
surf $1
else
surf https://duckduckgo.com
fi
| true |
d4471102d2a63060ea4d424f7084adb3d43f75d5
|
Shell
|
joseHbernardino/Shell-Scritp
|
/Arrays em Shell.sh
|
UTF-8
| 452 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
echo "Olá Mundo"
MUNDO=("Shell Script" "Bash" "GNU" "Linux" "Debian")
echo ${mundo[0]} # Ira mostrar o primeiro que é Shell Script
echo ${mundo[1]} # Ira mostrar o Segundo que é Bash
echo ${mundo[2]} # Ira mostrar o Terceiro que é GNU
echo ${mundo[3]} # Ira mostrar o Quarto que é Linux
echo ${mundo[4]} # Ira mostrar o Quinto que é Debian
echo ${mundo[@]:2} # Ira mostrar do Segundo elemento pra frente # GNU Linux Debian
| true |
1c9f17e432acf780864e79214e521c822cd72645
|
Shell
|
axelandersson/dotfiles
|
/bash/contrib/completion-2.1/completions/_yum-utils
|
UTF-8
| 624 | 3.0625 | 3 |
[
"MIT"
] |
permissive
|
# bash completion for repomanage -*- shell-script -*-
# Use of this file is deprecated. Upstream completion is available in
# yum-utils >= 1.1.24, use that instead.
_repomanage()
{
local cur prev words cword split
_init_completion -s || return
[[ "$prev" == -@(h|-help|k|-keep) ]] && return 0
$split && return
if [[ "$cur" == -* ]] ; then
COMPREPLY=( $( compgen -W '$( _parse_help "$1" )' -- "$cur" ) )
[[ $COMPREPLY == *= ]] && compopt -o nospace
else
_filedir -d
fi
} &&
complete -F _repomanage repomanage
# ex: ts=4 sw=4 et filetype=sh
| true |
cfd0fd35095024673b8bd02ebca939e766cceb4f
|
Shell
|
monotonemonk/arch_svntogit_community-
|
/gtkdialog/trunk/PKGBUILD
|
UTF-8
| 882 | 2.609375 | 3 |
[] |
no_license
|
# $Id$
# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
# Contributor: dojo <joerg.doll[at]gmx.de>
pkgname=gtkdialog
pkgver=0.8.3
pkgrel=3
pkgdesc="Gtkdialog is a small utility for fast and easy GUI building."
arch=('i686' 'x86_64')
#url="http://linux.pte.hu/~pipas/gtkdialog/"
url="http://code.google.com/p/gtkdialog/"
license=("GPL")
makedepends=('pkgconfig')
depends=('gtk2' 'libglade')
options=('zipman')
source=(http://gtkdialog.googlecode.com/files/gtkdialog-$pkgver.tar.gz)
md5sums=('f8d2a2b912bcd5aa7da60984c19bc493')
build() {
cd "$srcdir"/$pkgname-$pkgver
./configure --prefix=/usr --mandir=/usr/share/man --infodir=/usr/share/info
make
}
package() {
cd "$srcdir"/$pkgname-$pkgver
make DESTDIR="$pkgdir" UPDATE_ICON_CACHE=true install
rm -f "$pkgdir"/usr/share/info/dir
mkdir -p "$pkgdir"/usr/share/doc/$pkgname
cp -a examples "$pkgdir"/usr/share/doc/$pkgname/examples
}
| true |
4832cf30f6d69afaa455bb77eee42eb494a6eca5
|
Shell
|
ARPASMR/elab_msg_rss
|
/scripts/elab_msg.sh
|
UTF-8
| 5,666 | 2.984375 | 3 |
[] |
no_license
|
#################################################################################
#
# FILE.......: elab_msg.sh
# -------------------------------------------------------------------------------
# PURPOSE....: Conversione immagini Meteosat-9 HRIT/LRIT
# Lo script si aspetta in linea di comando: nome file prologo
#
# -------------------------------------------------------------------------------
# CREATED....: Aprile 2011 (Cremonini)
#
# DATE DESCRIPTION
# MODIFIED...: Maggio 2018 (Paganotti) Update for container development
#
# -------------------------------------------------------------------------------
# VERSION....: 1.0 (05/04/2011)
# 1.1 (04/05/2018)
#
# =======================================================================
# REFERENCES..:
#
# Pellegrini: ARPA Lombardia
# Paganotti: Softech s.r.l.
#
#################################################################################
#
# ===============================================================================
# CONFIGURAZIONE DELL'AMBIENTE
#
. /conf/default.conf
declare -x LANG="us_US.UTF-8"
# ===============================================================================
# DEFINIZIONE DEI PERCORSI
#
MSGDIR_IN=$MSGDATADIR/input/
MSGDIR_OT=$MSGDATADIR/tmp/
MSGDIR_TAR=$MSGDATADIR/europe/
MSGDIR_IMG=$MSGDATADIR/imgs/
MSGDIR_ANALISI=$MSGDATADIR/analisi_msg
export TMP=$MSGDIR_OT
# ===============================================================================
# INIZIO ELABORAZIONI
# ===============================================================================
echo "***************************************************************"
echo 'Elab_msg.sh: inizio elab ' `date +"%Y-%m-%d %H:%M"`
if [ $# -lt 1 ]
then
echo "usage: elab_msg.sh [dataora]"
exit
fi
dataora=$1
ora_analisi=${dataora:8:12} && echo "ora di elaborazione: $ora_analisi"
nomefile_prologo=`find $MSGDIR_IN -name "*PRO*" | sort | tail -1`
# Metto insieme tutti i segmenti
$BINDIR/put_xrit_segments_together.pl -v -D $MSGDIR_OT $nomefile_prologo
# Converto in formato binario piatto
OLDDIR=$PWD
# lavoro nella directory /data/msg/tmp
cd $MSGDIR_OT
echo "Elaboro HRV e rimuovo"
for nomecycle in `ls -1c $MSGDIR_OT/*HRV*CYCLE*-__`;
do
echo `date +"%Y-%m-%d %H:%M"`" > Elaboro il file " $nomecycle
/home/meteo/bin/xrit2raw -f -v $nomecycle $nomecycle".raw"
$BINDIR/navig MSG2 H 42 48 5 14 0.006 $nomecycle".raw" $nomecycle
rm $nomecycle $nomecycle".raw"
done
echo "Elaboro i canali a bassa risoluzione e rimuovo"
for nomecycle in `ls -1c $MSGDIR_OT/*CYCLE*-__`;
do
echo `date +"%Y-%m-%d %H:%M"`" > Elaboro il file " $nomecycle
/home/meteo/bin/xrit2raw -f -v $nomecycle $nomecycle".raw"
$BINDIR/navig MSG2 L 20 70 -30 30 0.04 $nomecycle".raw" $nomecycle
rm $nomecycle $nomecycle".raw"
done
echo "Generazione bi-spettrale HRV"
Rscript /home/meteo/R_batch/elab_hrv.r $dataora
#scp $MSGDIR_IMG/HRV_$dataora.png meteo@$ECCELLENTE:/srv/www/prodottimeteo/msg1/IRHRV
#
# Copia su Apprendista
#
scp $MSGDIR_IMG/HRV_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/IRHRV
echo `date +"%Y-%m-%d %H:%M"`" > Generazione mappe"
Rscript /home/meteo/R_batch/elab_msg.r $dataora
echo `date +"%Y-%m-%d %H:%M"`" > Fine generazione mappe"
cd $OLDDIR
#
# Copia su Apprendista
#
scp $MSGDIR_IMG/EIR_108_ZOOM_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/E-IR108-ZOOM
scp $MSGDIR_IMG/IR_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/E-IR108
scp $MSGDIR_IMG/WV_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/E-WV62
scp $MSGDIR_IMG/NATCOL_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/NATCOL
scp $MSGDIR_IMG/AIRMASS_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/AIRMASS
scp $MSGDIR_IMG/AVHRR_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/AVHRR
scp $MSGDIR_IMG/SOLARDAY_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/SOLARDAY
scp $MSGDIR_IMG/FIRE_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/FIRE
scp $MSGDIR_IMG/MICRO24H_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/MICRO24H
scp $MSGDIR_IMG/SNDWICH_$dataora.png meteo@$APPRENDISTA:/var/www/html/prodottimeteo/msg1/SNDWICH
echo archivio e pulizia
tar -zcvf $MSGDIR_TAR/MSG_$dataora.tar.gz $MSGDIR_OT/*$dataora*.hdr $MSGDIR_OT/*$dataora*.flt
if [ "$ora_analisi" == "0000" -o "$ora_analisi" == "0600" -o "$ora_analisi" == "1200" -o "$ora_analisi" == "1800" ]
then
echo "elaboro l'analisi per l'ora sinottica: $ora_analisi"
cp -v $MSGDIR_OT/*IR_108*$dataora*.flt $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*IR_108*$dataora*.hdr $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*IR_097*$dataora*.flt $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*IR_097*$dataora*.hdr $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*IR_087*$dataora*.flt $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*IR_087*$dataora*.hdr $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*IR_120*$dataora*.flt $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*IR_120*$dataora*.hdr $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*WV_062*$dataora*.flt $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*WV_062*$dataora*.hdr $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*WV_073*$dataora*.flt $MSGDIR_ANALISI
cp -v $MSGDIR_OT/*WV_073*$dataora*.hdr $MSGDIR_ANALISI
if [ -f $MSGDIR_IMG/IRWEB_$dataora.png ]
then
echo "invio dell'immagine per il web IRWEB_$dataora.png"
sh /home/meteo/scripts/invio_web.sh $MSGDIR_IMG/IRWEB_$dataora.png irweb
fi
fi
rm -fr $MSGDIR_OT/*$dataora*.hdr $MSGDIR_OT/*$dataora*.flt
echo 'Elab_msg: fine elab ' `date +"%Y-%m-%d %H:%M"`
| true |
7412fc4ab0c0e983ed855ba06147fa7449278de0
|
Shell
|
zhj149/multibootiso-1
|
/create.sh
|
UTF-8
| 3,421 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/sh
# the problem with this approach is: while it allows to multiboot a kernel and
# initrd, it will pick the rootfs from the first partition always
set -e
mkdir -p isomount
mkdir -p extmount
mkdir -p mainmount
cleanup() {
sudo umount isomount
sudo umount extmount
sudo umount mainmount
}
trap cleanup 1 2 15
PATH=$PATH:/sbin:/usr/sbin
TMP=.
bs=512
# the size of the first partition
mainsize=1024
echo " ++ creating main partition"
main="$TMP"/main
fallocate -l $((mainsize*bs)) "$main"
mke2fs -F "$main" > /dev/null 2>&1
sudo mount "$main" mainmount > /dev/null 2>&1
sudo mkdir mainmount/extlinux
sudo extlinux --install mainmount/extlinux > /dev/null 2>&1
sudo cp /usr/lib/syslinux/chain.c32 /usr/lib/syslinux/menu.c32 mainmount/extlinux
echo "UI menu.c32" | sudo tee mainmount/extlinux/extlinux.conf > /dev/null 2>&1
# the first partition to boot from is the first logical one
count=5
sum=0
# first we check how much space each iso content needs in an ext2 filesystem
for iso in *.iso; do
echo " ++ processing $iso"
b="$TMP"/`basename "$iso" .iso`.ext2
# get the size of the iso
size=`stat -c %s "$iso"`
# create an image of that size +20% for safety margin
size10=$(((size*120)/100))
fallocate -l $size10 "$b"
mke2fs -F "$b" > /dev/null 2>&1
#fuse-ext2 -o rw+ "$b" extmount
#fuse-ext2 -v -o rw,force "$b" extmount -o debug > log 2>&1 &
sudo mount "$b" extmount
#fuseiso "$iso" isomount
sudo mount "$iso" isomount > /dev/null 2>&1
sudo cp -r isomount/. extmount
sudo mv extmount/isolinux extmount/extlinux
sudo mv extmount/extlinux/isolinux.cfg extmount/extlinux/extlinux.conf
sudo extlinux --install extmount/extlinux > /dev/null 2>&1
sudo cp /usr/lib/syslinux/chain.c32 /usr/lib/syslinux/menu.c32 extmount/extlinux
#fusermount -u extmount
sudo umount extmount
#fusermount -u isomount
sudo umount isomount
e2fsck -yf "$b" > /dev/null 2>&1
resize2fs -M "$b" > /dev/null 2>&1
newsize=`stat -c %s "$b"`
sectorcount=$((newsize/bs+3))
sum=$((sum+sectorcount*bs))
sudo tee -a mainmount/extlinux/extlinux.conf > /dev/null << EOF
LABEL $count
MENU LABEL $iso
COM32 chain.c32
APPEND hd0 $count
EOF
count=$((count+1))
done
sudo umount mainmount
echo " ++ creating disk image"
# create the final image
root="$TMP"/root.img
fallocate -l $((sum+bs*(mainsize+2))) "$root"
# create the initial disk layout
parted -s -- "$root" mklabel msdos > /dev/null 2>&1
parted -s -- "$root" mkpart primary ext2 1s ${mainsize}s > /dev/null 2>&1
parted -s -- "$root" set 1 boot on > /dev/null 2>&1
parted -s -- "$root" mkpart extended $((mainsize+1))s -1s > /dev/null 2>&1
dd if="$main" of="$root" obs=$bs seek=1 conv=notrunc > /dev/null 2>&1
rm "$main"
# create partitions for all isos
start=$((mainsize+2))
for ext2 in "$TMP"/*.ext2; do
echo " ++ filling with $ext2"
size=`stat -c %s "$ext2"`
ssize=$((size/bs+1))
parted -s -- "$root" mkpart logical ext2 ${start}s $((start+ssize))s > /dev/null 2>&1
# copy ext2 fs to newly created partition
offset=`/sbin/parted -m "$root" unit s print | tail -1 | sed 's/^[0-9]\+:\([^:s]\+\)s:.*/\1/'`
dd if="$ext2" of="$root" obs=$bs seek=$offset conv=notrunc > /dev/null 2>&1
rm "$ext2"
start=$((start+ssize+2))
done
# make the whole thing bootable
dd conv=notrunc bs=440 count=1 if=/usr/lib/extlinux/mbr.bin of="$root" > /dev/null 2>&1
rm -df extmount
rm -df isomount
rm -df mainmount
echo " ++ success! The result is stored in $root"
| true |
ac8db9c7e5348ea789817035fcea3a6a9a16b234
|
Shell
|
streemline/kernel_build
|
/AnyKernel/META-INF/com/google/android/update-binary
|
UTF-8
| 1,101 | 3.609375 | 4 |
[] |
no_license
|
#!/sbin/sh
OUTFD=/proc/self/fd/$2;
ZIP="$3";
DIR=`dirname "$ZIP"`;
ui_print() {
until [ ! "$1" ]; do
echo -e "ui_print $1\nui_print" > $OUTFD;
shift;
done;
}
show_progress() { echo "progress $1 $2" > $OUTFD; }
set_perm_recursive() {
dirs=$(echo $* | awk '{ print substr($0, index($0,$5)) }');
for i in $dirs; do
chown -R $1.$2 $i; chown -R $1:$2 $i;
find "$i" -type d -exec chmod $3 {} +;
find "$i" -type f -exec chmod $4 {} +;
done;
}
file_getprop() { grep "^$2" "$1" | cut -d= -f2; }
getprop() { test -e /sbin/getprop && /sbin/getprop $1 || file_getprop /default.prop $1; }
abort() { ui_print "$*"; exit 1; }
mkdir -p /tmp/anykernel;
cd /tmp/anykernel;
unzip -o "$ZIP";
ui_print "**********************";
ui_print " wulan17 Kernel Test Build";
ui_print "**********************";
/sbin/busybox mount /system;
ui_print "**********************";
ui_print " Installing patch";
ui_print "**********************";
/sbin/busybox mount /system;
ui_print "- Starting kernel installation...";
/sbin/sh /tmp/anykernel/anykernel.sh;
ui_print "- Kernel installation completed";
| true |
c136edff26390bf868d76e7424708fd4d004b4e1
|
Shell
|
familiafacundes/dotfiles
|
/config/local/bin/firefox.sh
|
UTF-8
| 111 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
firefox="$(pidof firefox)"
if [ -e $firefox ]; then
firefox
else
echo "Não foi dessa vez."
fi
| true |
3b9b76e5a04dbe0b02fcd222e92b0ae3b0fba8fc
|
Shell
|
miko-798/cirrus-ngs
|
/src/cirrus_ngs/server/Pipelines/scripts/SmallRNASeq/bowtie2/cutadapt.sh
|
UTF-8
| 2,182 | 3.5625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
project_name=$1
workflow=$2
file_suffix=$3 #extension of input file, does not include .gz if present in input
root_dir=$4
fastq_end1=$5
fastq_end2=$6
input_address=$7 #this is an s3 address e.g. s3://path/to/input/directory
output_address=$8 #this is an s3 address e.g. s3://path/to/output/directory
log_dir=$9
is_zipped=${10} #either "True" or "False", indicates whether input is gzipped
num_threads=${11} # number of threads
min_len=${12} # drop the read if it is below this minimum length
adapter=${13} # adapter sequence
#logging
log_dir=$log_dir/$fastq_end1
mkdir -p $log_dir
log_file=$log_dir/'cutadapt.log'
exec 1>>$log_file
exec 2>>$log_file
status_file=$log_dir/'status.log'
touch $status_file
#prepare output directories
workspace=$root_dir/$project_name/$workflow/$fastq_end1
mkdir -p $workspace
echo "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
date
echo "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
check_step_already_done $JOB_NAME $status_file
##DOWNLOAD##
if [ ! -f $workspace/$fastq_end1.trim$file_suffix ]
then
#this is the suffix of the input from s3
download_suffix=.trim$file_suffix
#always download forward reads
check_exit_status "aws s3 cp $input_address/$fastq_end1$download_suffix $workspace/" $JOB_NAME $status_file
gunzip -q $workspace/$fastq_end1$download_suffix
#download reverse reads if they exist
if [ "$fastq_end2" != "NULL" ]
then
check_exit_status "aws s3 cp $input_address/$fastq_end2$download_suffix $workspace/" $JOB_NAME $status_file
gunzip -q $workspace/$fastq_end2$download_suffix
fi
fi
##END_DOWNLOAD##
##CUT_ADAPT##
# cut 3' end
check_exit_status "$cutadapt -a $adapter -o $workspace/$fastq_end1.cut$file_suffix \
$workspace/$fastq_end1.trim$file_suffix -m $min_len" $JOB_NAME $status_file
if [ "$fastq_end2" != "NULL" ];
then
# cut 3' end
check_exit_status "$cutadapt -a $adapter -o $workspace/$fastq_end2.cut$file_suffix \
$workspace/$fastq_end2.trim$file_suffix -m $min_len" $JOB_NAME $status_file
fi
##END_CUT_ADAPT##
##UPLOAD##
aws s3 cp $workspace $output_address --exclude "*" --include "*.cut.fastq*" --recursive
##END_UPLOAD##
| true |
c7935387cca88287c10d0c714f643958cfc83edb
|
Shell
|
fayaaz/3d-print-tools-pi-gen
|
/stage3/01-install-packages/01-run.sh
|
UTF-8
| 743 | 2.84375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -e
## Install cura and octoprint
on_chroot << EOF
pip3 install --upgrade pip
pip3 install docker-compose
usermod -aG docker pi
wget https://github.com/smartavionics/Cura/releases/download/20210908/Cura-mb-master-aarch64-20210908.AppImage -O /opt/Cura.AppImage
chmod +x /opt/Cura.AppImage
chown pi:root /opt/Cura.AppImage
mkdir -p /home/pi/.local/share/cura/master/plugins/OctoPrintPlugin
git clone --recursive -j2 https://github.com/fieldOfView/Cura-OctoPrintPlugin.git /home/pi/.local/share/cura/master/plugins/OctoPrintPlugin/OctoPrintPlugin
EOF
mkdir -p ${ROOTFS_DIR}/home/pi/octoprint/
cp -r files/docker-compose.yml "${ROOTFS_DIR}/home/pi/octoprint/docker-compose.yml"
mkdir -p "$DEPLOY_DIR"
| true |
0cda34b8d8cc55804da1dd26218896fd942b2426
|
Shell
|
pombreda/irods
|
/packaging/setup_irods.sh
|
UTF-8
| 1,993 | 4.03125 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -e
# detect correct python version
if type -P python2 1> /dev/null; then
PYTHON=`type -P python2`
else
PYTHON=`type -P python`
fi
# throw STDERR warning if short hostname found
MYHOST=`hostname`
if [[ ! $MYHOST == *.* ]] ; then
echo "" 1>&2
echo "********************************************************" 1>&2
echo "*" 1>&2
echo "* iRODS Setup WARNING:" 1>&2
echo "* hostname [$MYHOST] may need to be a FQDN" 1>&2
echo "*" 1>&2
echo "********************************************************" 1>&2
echo "" 1>&2
fi
# locate current directory
DETECTEDDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# service account config file
SERVICE_ACCOUNT_CONFIG_FILE="/etc/irods/service_account.config"
# define service account for this installation
if [ ! -f $SERVICE_ACCOUNT_CONFIG_FILE ] ; then
$DETECTEDDIR/setup_irods_service_account.sh
fi
# import service account name and group
source $SERVICE_ACCOUNT_CONFIG_FILE
# configure irods
sudo su - $IRODS_SERVICE_ACCOUNT_NAME -c "$DETECTEDDIR/setup_irods_configuration.sh"
# if default vault path does not exist, create it with proper permissions
MYSERVERCONFIGJSON=/etc/irods/server_config.json
MYRESOURCEDIR=`$PYTHON -c "import json; print json.load(open('$MYSERVERCONFIGJSON'))['default_resource_directory']"`
if [ ! -e $MYRESOURCEDIR ] ; then
mkdir -p $MYRESOURCEDIR
chown $IRODS_SERVICE_ACCOUNT_NAME:$IRODS_SERVICE_GROUP_NAME $MYRESOURCEDIR
fi
# setup database script or resource server script
if [ -e "$DETECTEDDIR/setup_irods_database.sh" ] ; then
sudo su - $IRODS_SERVICE_ACCOUNT_NAME -c "ORACLE_HOME=$ORACLE_HOME; $DETECTEDDIR/setup_irods_database.sh"
else
if [ -e "$DETECTEDDIR/setup_resource.sh" ] ; then
sudo su - $IRODS_SERVICE_ACCOUNT_NAME -c "$DETECTEDDIR/setup_resource.sh"
else
echo "" 1>&2
echo "ERROR:" 1>&2
echo " Please install an iRODS Database Plugin" 1>&2
echo " and re-run ${BASH_SOURCE[0]}" 1>&2
exit 1
fi
fi
| true |
fbf422aabda6ba1987feda82fffb658cba66b0ca
|
Shell
|
lagopus/lagopus-next-virtio
|
/test/datastore/long_run/shell/stop_lagopus.sh
|
UTF-8
| 665 | 3.859375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# ./stop_lagopus.sh <LOG_DIR> <TIME_OUT>
LOG_DIR=${1:-"."}
PID_FILE="/var/run/lagopus.pid"
RETRY_MAX=${2:-30}
PROC_STATUS_FILE="${LOG_DIR}/end_proc_status.txt"
if [ -f "${PID_FILE}" ]; then
PID=`cat ${PID_FILE}`
# save proc
sudo cat /proc/$PID/status > $PROC_STATUS_FILE
# kill
sudo kill -TERM $PID
i=0
while :
do
IS_ALIVE=`ps hp ${PID} | wc -l`
if [ "x${IS_ALIVE}" = "x0" ]; then
exit 0
fi
if [ $i -ge $RETRY_MAX ]; then
echo "Time out."
exit 1;
fi
sleep 1
i=`expr $i + 1`
echo "check retry: ${i}." 1>&2
done
fi
| true |
a57557c9ce8034216d786c54a99b9a79633d2113
|
Shell
|
examon/tmux_stuff
|
/send.sh
|
UTF-8
| 1,102 | 3.984375 | 4 |
[] |
no_license
|
#!/bin/bash
# send command to specified range of panes
#if [ "$#" -ne 1 ]; then
# echo "$0 session_name"
# exit 1
#fi
#session=$1
from=1
to=1
while read -r -p "> " line; do
#read -p "> " line
#echo $line
# get out of repl
if [ "$line" == ":quit" ] || [ "$line" == ":q" ]; then
exit 0
fi
# setup operating interval for tmux panes
if [[ "$line" == :set* ]] || [[ "$line" == :s* ]]; then
# format $line (e.g. ":s a b c" -> ":s a b c")
panes=$(echo ${line} | xargs)
# split $line into $panes_array
IFS=' ' read -ra panes_array <<< "${line}"
# check input
array_length=${#panes_array[@]}
if [ $array_length -ne 3 ]; then
echo ":s from to"
continue
fi
# set from/to
from=${panes_array[1]}
to=${panes_array[2]}
echo "active tmux panes [${from}..${to}]"
continue
fi
for i in $(seq ${from} ${to}); do
#tmux send-keys -t ${session}:$i "${line}" ENTER &
tmux send-keys -t $i "${line}" ENTER &
done
done
| true |
d795e4e516e79da7c814ed1489b40aa29c790756
|
Shell
|
chlunde/dotfiles
|
/bin/go-tool-install
|
UTF-8
| 996 | 2.65625 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
cd /
export GOPATH=~/opt/gotoolpath
test -d $GOPATH/ || mkdir -p $GOPATH
test -d ~/bin || mkdir -p ~/bin
if ! [[ -L $GOPATH/bin ]]
then
ln -s ~/bin/ $GOPATH/bin
fi
if [[ $# == 0 ]]
then
go install golang.org/x/tools/gopls@latest
go install github.com/junegunn/fzf@latest
go install golang.org/x/tools/cmd/goimports@latest
go install golang.org/x/tools/cmd/gorename@latest
go install golang.org/x/lint/golint@latest
go install honnef.co/go/tools/cmd/staticcheck@latest
go install github.com/cweill/gotests/gotests@v1.5.3
go install github.com/rogpeppe/gohack@latest
go install github.com/charmbracelet/glow@v1.4.0
go install go.mozilla.org/sops/v3/cmd/sops@latest
go install mvdan.cc/sh/v3/cmd/shfmt@latest
go install github.com/sachaos/viddy@v0.1.6
go install github.com/go-delve/delve/cmd/dlv@latest
go install sigs.k8s.io/kind@v0.17.0
else
go $@
fi
go clean -cache
go clean -testcache
go clean -modcache
| true |
50c93e08449dccc14ce44e23cbfa79156b5eadee
|
Shell
|
KRMAssociatesInc/docker-vista
|
/GTM/bin/removeVistaInstanceMinimal.sh
|
UTF-8
| 2,491 | 3.859375 | 4 |
[
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#---------------------------------------------------------------------------
# Copyright 2011-2013 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
# Remove directories for instance Routines, Objects, Globals, Journals,
# Temp Files
# Options
# instance = name of instance
# used http://rsalveti.wordpress.com/2007/04/03/bash-parsing-arguments-with-getopts/
# for guidance
usage()
{
cat << EOF
usage: $0 options
This script will remove a VistA instance for GT.M
OPTIONS:
-h Show this message
-i Instance name
EOF
}
while getopts ":hi:" option
do
case $option in
h)
usage
exit 1
;;
i)
instance=$(echo $OPTARG |tr '[:upper:]' '[:lower:]')
;;
esac
done
if [[ -z $instance ]]
then
usage
exit 1
fi
# Make sure we are in the group we need to be to modify the instance
if [[ $USER -ne $instance && $basedir && $gtm_dist && $instance ]]; then
echo "This script must be run as $instance and have the following variables
defined:
\$basedir
\$instance
\$gtm_dist" 1>&2
exit 1
fi
echo "Removing $instance..."
# Shutdown the vista instance nicely
processes=$(pgrep mumps)
if [ ! -z "${processes}" ] ; then
echo "Stopping any remaining M processes nicely"
for i in ${processes}
do
mupip stop ${i}
done
# Wait for process to react to mupip stop instead of force kill later
sleep 5
fi
# Look for M processes that are still running
processes=$(pgrep mumps)
if [ ! -z "${processes}" ] ; then
echo "M process are being shutdown forcefully!"
pkill -9 mumps
fi
# Remove instance directories
rm -f $basedir/r/*.m
rm -f $basedir/r/$gtmver/*.o
rm -f $basedir/g/*.dat
rm -f $basedir/j/*.mjl
# Re-create the databases
$gtm_dist/mupip create
echo "Done removing $instance"
| true |
a93b276c2781069bc045b45ef5015b01424ffd1b
|
Shell
|
lc4t/BiliTools
|
/LiveRecorder/liverecorder.sh
|
UTF-8
| 537 | 3.28125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# bash liverecorder.sh [ROOMID] [DIR] [DOWNLOAD_SPEED]
roomid=$1
echo "直播间号为:$1"
echo "查询空间目录:$2"
echo "下载限速 $3 kb"
#mkdir $roomid
while (true)
DISK=`df -l | grep "$2" | awk '{print $4}'`
if [ "$DISK" -lt "10240" ]; then
echo "磁盘空间不足, only $DISK bytes"
date
sleep 60
continue
else
echo "磁盘空间充足, $DISK bytes"
fi
do
trickle -d $3 you-get -O "$roomid"_`date +%Y%m%d_%T` -o videos https://live.bilibili.com/$roomid 2>/dev/null
shuf -i 30-60 -n 1 | xargs sleep
done
| true |
04b92daaf3b743194d090fe0626a51ff173b609a
|
Shell
|
las1991/tool-scripts
|
/iterm2Login.sh
|
UTF-8
| 998 | 3.390625 | 3 |
[] |
no_license
|
#!/usr/bin/expect
set timeout 30
set host [lindex $argv 0]
# 这一行是设置一个变量的意思,变量名随便起,尽量有意义,后面表示的是传入的参数,0 表示第一个参数,后面会用到。
set port [lindex $argv 1]
set user [lindex $argv 2]
set pswd [lindex $argv 3]
spawn ssh -p $port $user@$host
# spawn 是 expect 环境的内部命令,它主要的功能是给 ssh 运行进程加个壳,用来传递交互指令。
expect {
"(yes/no)?"
{send "yes\n";exp_continue;}
-re "(p|P)ass(word|wd):"
{send "$pswd\n"}
}
# expect 也是 expect 环境的一个内部命令,用来判断上一个指令输入之后的得到输出结果是否包含 "" 双引号里的字符串,-re 表示通过正则来匹配。
# 如果是第一次登录,会出现 "yes/no" 的字符串,就发送(send)指令 "yes\r",然后继续(exp_continue)。
interact
# interact:执行完成后保持交互状态,把控制权交给控制台。
| true |
c57304c7630bd873429467eba481bd87b4f7e350
|
Shell
|
HudaF/Operating-Systems-Coursework
|
/Bash scripting/test_passwd.sh
|
UTF-8
| 321 | 3.484375 | 3 |
[] |
no_license
|
#!/bin/bash
passCount=$(echo $1 | wc -c)
digitCount=$(echo $1 | egrep -o "[0-9]" | wc -l)
alphaNumericCount=$(echo $1 | egrep -o "[,#$%&*+-=]+" | wc -l)
echo $alphaNumericCount
if [ $passCount -lt 8 ] || [ $digitCount -lt 1 ] || [ $alphaNumericCount -lt 1 ];
then
echo weak password
else
echo strong password
fi
| true |
81cc1d0eabcf7be269bd132fbb859c0813c57249
|
Shell
|
Junhaoo/IntersectProject
|
/test/generateData.sh
|
UTF-8
| 189 | 3.0625 | 3 |
[] |
no_license
|
#!/bin/bash
int=1
a=$1
b=$1
val=`expr $a + $b`
echo $val > input.txt
while(($int<=${1}))
do
echo "L ${int} 0 ${int} 1">>input.txt
echo "L 0 ${int} 1 ${int}">>input.txt
let "int++"
done
| true |
bb8db9309f90b81d0bcfe6264602497bd4dfb930
|
Shell
|
JasonGross/slow-coq-examples
|
/insert-timings.sh
|
UTF-8
| 286 | 3.46875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
PREV_TIME=$(date +%s.%N)
PREV=""
DO_PRINT=""
while read i
do
NEXT="$(date +%s.%N)"
DIFF="$(echo "$NEXT - $PREV_TIME" | bc)"
if [ ! -z "$DO_PRINT" ];
then
echo "$DIFF: $PREV"
else
DO_PRINT="yes"
fi
PREV="$i"
PREV_TIME="$NEXT"
done
| true |
c86ffd2ce19cefb6958c9a0ac267840ccc1efcc6
|
Shell
|
alldatacenter/alldata
|
/dts/airbyte/airbyte-integrations/connectors/source-github/fixtures/scripts/create_branches_and_commits.sh
|
UTF-8
| 508 | 3.375 | 3 |
[
"MIT",
"Elastic-2.0",
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
touch ".gitignore"
git add ".gitignore"
git commit -m "Initial commit"
git push origin master
for (( i = 0; i < 5; i++ )); do
git branch "feature/branch_$i"
done
for (( i = 0; i < 5; i++ )); do
git checkout "feature/branch_$i"
mkdir github_sources
for (( j = 0; j < 5; j++ )); do
echo "text_for_file_$j_commit" > "github_sources/file_$j.txt"
git add github_sources
git commit -m "commit number $j"
git push origin "feature/branch_$i"
done
done
| true |
7f0c3f5381a59645f41287a165b69a0bf816b35b
|
Shell
|
Louis-Veitenheimer/init_workspace
|
/install.sh
|
UTF-8
| 826 | 2.890625 | 3 |
[] |
no_license
|
#!/bin/sh
export DEBIAN_FRONTEND=noninteractive
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get install -y \
cowsay \
curl \
wget \
vim \
tmux \
htop \
jq \
zsh \
git
cp .vimrc ~/.vimrc
cp .zshrc ~/.zshrc
# install "oh my zsh"
git clone --depth=1 https://github.com/robbyrussell/oh-my-zsh.git ~/.oh-my-zsh || {
printf "Error: git clone of oh-my-zsh repo failed\n"
exit 1
}
# change current shell to zsh
chsh -s $(grep /zsh$ /etc/shells | tail -1)
# remove git integration with zsh theme (makes things hella slow when git
# repo originates on a windows share)
sed -i '/$(git_prompt_info)/d' ~/.oh-my-zsh/themes/af-magic.zsh-theme
# Emacs
sudo apt install -y emacs-nox
cp -r .emacs.d ~/.emacs.d
sudo update-alternatives --set editor /usr/bin/emacs25
# sudo insults
#Defaults insults
| true |
b3cbd86e855d396ec19c2fffc2dc85d6f5100c48
|
Shell
|
zenazn/dotfiles
|
/profile.d/10-jump
|
UTF-8
| 1,632 | 3.859375 | 4 |
[] |
no_license
|
#!/bin/bash
# Jump!
# Usage:
#
# # Add this to your profile
# mkjump qq path/to/place/you/want/to/jump <<EOF
# any-alias anywhere
# have as/many/of/these/as/you/like
# EOF
#
# # Try this!
# qq direct<Tab> # Tab complete some subdirectory of the target
# qq an<Tab> # Tab complete the above alias
# qq # Go to the target directory itself
#
# I personally use this for jumping to various projects under ~/git
function __jump_table_lookup {
local cmd="$1"
local search="$2"
local aliases="${cmd}_JUMP_ALIASES"
local root="${cmd}_JUMP_ROOT"
echo "${!aliases}" | awk "$search"
ls "${!root}" | awk "{ print \$1 \" ${!root}/\" \$1 }" | awk "$search"
}
function __jump_target {
local cmd="$1"
local key="$2"
if [ -z "$key" ]; then
local root="${cmd}_JUMP_ROOT"
cd "${!root}"
else
local target=$(__jump_table_lookup "$cmd" "/^$key / { print \$2 }" | head -n 1)
if [ -z "$target" ]; then
echo "Unknown jump target $key"
return 1
else
cd "$target"
fi
fi
}
function __jump_completion {
local cmd="${COMP_WORDS[0]}"
local prefix="${COMP_WORDS[COMP_CWORD]}"
local options=$(__jump_table_lookup "$cmd" "/^$prefix/ { print \$1 }")
COMPREPLY=($(compgen -W "$options" -- "$prefix"))
}
function mkjump {
local cmd="$1"
local root="$2"
# Read in a list of aliases from stdin (protip: pass a heredoc)
read -r -d '' "${cmd}_JUMP_ALIASES"
# And store the jump root as well. We only keep $cmd around
eval "${cmd}_JUMP_ROOT='$root'"
alias $cmd="__jump_target '$cmd'"
complete -F __jump_completion "$cmd"
}
| true |
bd72f8b1fac170116fc86371a0a1f3b30a29e3ac
|
Shell
|
tsukolsky/eagle-library
|
/createOshparkRelease.sh
|
UTF-8
| 954 | 3.5625 | 4 |
[] |
no_license
|
#!/bin/bash
##########################################
## createOshParkRelease.sh
##
## Initial Build: 6/29/12
## Last Revised: 7/7/12
## Initial Author: Mark Taylor
## Maintained By: Todd Sukolsky
##########################################
## Description:
## This script prepares gerbers for release
## to Osh park by changing file extensions.
##########################################
if [ $# != 1 ]; then
echo "USAGE: $0 /path/to/project/dir/"
exit
else
echo "Creating release for $1..."
fi
cd $1
mkdir oshpark_release
cd oshpark_release
cp ../*.cmp "Top Layer.cmp"
cp ../*.sol "Bottom Layer.sol"
cp ../*.stc "Top Solder Mask.stc"
cp ../*.sts "Bottom Solder Mask.sts"
cp ../*.plc "Top Silk Screen.plc"
cp ../*.pls "BottomSilkScreen.pls"
cp ../*.dim "BoardOutline.gko"
cp ../*.drd "Drills.xln"
cd ..
zip oshpark_release.zip oshpark_release/*
rm -rf oshpark_release
echo "DONE creating release package!"
| true |
0a900f50f9ac3e2452351ba85b4e6772cd7736a6
|
Shell
|
hypercube1024/firefly
|
/firefly-jni-helper/src/main/cpp/jni-helper/build.sh
|
UTF-8
| 1,227 | 3.46875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
PROJECT_HOME=$(cd "$(dirname "$0")" && pwd)
echo "project dir: $PROJECT_HOME"
RELEASE_BUILD_DIR="$PROJECT_HOME/build-release"
echo "cmake release dir: $RELEASE_BUILD_DIR"
if [ ! -d "$RELEASE_BUILD_DIR" ]; then
mkdir "$RELEASE_BUILD_DIR"
else
rm -rf "$RELEASE_BUILD_DIR"
fi
echo "$(uname)"
if [ "$(uname)" == "Darwin" ];then
echo "build on MacOS"
cmake -S "$PROJECT_HOME" -B "$RELEASE_BUILD_DIR"
cmake --build "$RELEASE_BUILD_DIR" --target clean
cmake --build "$RELEASE_BUILD_DIR" --target all
cd "$RELEASE_BUILD_DIR" && make
elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ];then
echo "build on Linux"
cmake -S "$PROJECT_HOME" -B "$RELEASE_BUILD_DIR"
cmake --build "$RELEASE_BUILD_DIR" --target clean
cmake --build "$RELEASE_BUILD_DIR" --target all
cd "$RELEASE_BUILD_DIR" && make
elif [[ "$(expr substr $(uname -s) 1 10)" == "MINGW32_NT" || "$(expr substr $(uname -s) 1 10)" == "MINGW64_NT" ]];then
echo "build on Windows"
cmake -S "$PROJECT_HOME" -B "$RELEASE_BUILD_DIR"
cmake --build "$RELEASE_BUILD_DIR" --target clean
cmake --build "$RELEASE_BUILD_DIR" --target ALL_BUILD
cd "$RELEASE_BUILD_DIR" && msbuild.exe ALL_BUILD.vcxproj -t:rebuild -p:Configuration=Release
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.