blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
f4a1aee2f33b95e2c54e31b2b405b322195a27cb
|
Shell
|
padfed/padfed-network-setup
|
/src/dev/lib-cctest.sh
|
UTF-8
| 4,507 | 3.828125 | 4 |
[] |
no_license
|
BOLD=$(tput bold)
GREEN=$(tput setaf 2)
RED=$(tput setaf 1)
NORMAL=$(tput sgr0)
docker() {
[[ -v DOCKERDEBUG ]] && echo docker "$@"
command docker "$@"
}
run() {
[[ -v RUNDEBUG ]] && echo "$@"
command "$@"
}
die() {
echo "$@"
exit 1
}
fail() {
echo "$RED$BOLD!!!! $*$NORMAL" >/dev/stderr
FAILURES=$((${FAILURES:-0}+1))
}
failreport() {
if [[ ${FAILURES:-0} -gt 0 ]]
then
echo "$RED$BOLD!!!! $FAILURES TESTS FAILED$NORMAL" > /dev/stderr
return 1
fi
}
assert() {
jq -e "$@" > /dev/null || fail "ASSERTION FAILED: $@"
}
cc() {
local OPTS=$(getopt -o 'ofa' -l 'must-fail,can-fail,out' -- "$@")
eval set -- "$OPTS"
while true; do
case "$1" in
-o|--out) local PRINTOUTPUT=1 ; shift ;;
-f|--must-fail) local EXPECTFAILURE=1 ; shift ;;
-a|--can-fail) local ACCEPTFAILURE=1 ; shift ;;
--) shift ; break ;;
*) echo "${RED}error: opción inválida: $BOLD$o$NORMAL" ; exit 1 ;;
esac
done
local SUBCOMMAND="$1"
shift
local PRINTFUNCTION="$1"
local ARGS='{"Args":[]}'
ARGS=$(jq <<<$ARGS -c ".function=\"$1\"")
shift
local PRINTARGS=""
for ARG in "$@"
do
ARG=$(jq -c '.' <<<$ARG 2>/dev/null || echo "$ARG")
ARGS=$(jq -c '.Args+=[$a]' --arg a "$ARG" <<<$ARGS)
PRINTARGS="${PRINTARGS:+$PRINTARGS }$ARG"
done
if [[ ! -v VERBOSE && ${#PRINTARGS} -gt 40 ]]; then
PRINTARGS=${PRINTARGS:0:40}...
fi
echo "$GREEN===> $BOLD$SUBCOMMAND$NORMAL $PRINTFUNCTION $PRINTARGS" > /dev/stderr
local WAIT_FOR_EVENT=""
local PEERS_PARAMS=""
local ORDERER_PARAMS=""
case "$SUBCOMMAND" in
invoke)
WAIT_FOR_EVENT="--waitForEvent"
ORDERER_PARAMS="-o $ORDERER"
if [[ "$TLS_ENABLED" == true ]]; then
ORDERER_PARAMS="--tls --cafile /etc/hyperledger/orderer/tls/tlsca.afip.tribfed.gob.ar-cert.pem"
if [[ $TLS_CLIENT_AUTH_REQUIRED == true ]]; then
ORDERER_PARAMS="$ORDERER_PARAMS --clientauth"
ORDERER_PARAMS="$ORDERER_PARAMS --keyfile /etc/hyperledger/admin/tls/client.key"
ORDERER_PARAMS="$ORDERER_PARAMS --certfile /etc/hyperledger/admin/tls/client.crt"
fi
fi
for org in $ORGS_WITH_PEERS; do
PEERS_PARAMS="$PEERS_PARAMS --peerAddresses peer0.${org,,}.tribfed.gob.ar:7051"
if [[ $TLS_CLIENT_AUTH_REQUIRED == true ]]; then
PEERS_PARAMS="$PEERS_PARAMS --tlsRootCertFiles /etc/hyperledger/tls_root_cas/tlsca.${org,,}.tribfed.gob.ar-cert.pem"
fi
done
;;
query)
;;
*)
echo "Error [$SUBCOMMAND] unknown command" > /dev/stderr
exit 1
esac
ENV="-e FABRIC_LOGGING_SPEC=error"
set +e
OUTPUT="$(docker exec $ENV peer0_afip_cli peer chaincode \
$SUBCOMMAND \
$PEERS_PARAMS \
$ORDERER_PARAMS \
$WAIT_FOR_EVENT \
-C $CHANNEL_NAME \
-n $CHAINCODE_NAME \
-c "$ARGS" 2>&1)"
STATUS=$?
set -e
if [[ $STATUS -eq 0 ]]; then
# éxito
if [[ ! -v EXPECTFAILURE ]]; then
echo " ${GREEN}EXPECTED SUCCESS$NORMAL" >/dev/stderr
else
fail "UNEXPECTED SUCCESS"
echo " OPERATION: $SUBCOMMAND $ARGS" >/dev/stderr
echo " PEER CLIENT OUTPUT: $OUTPUT" >/dev/stderr
fi
else
# fallo
if [[ -v ACCEPTFAILURE ]]; then
echo " ${GREEN}ACCEPTED FAILURE$NORMAL" >/dev/stderr
else
if [[ -v EXPECTFAILURE ]]; then
echo " ${GREEN}EXPECTED FAILURE$NORMAL" >/dev/stderr
else
fail "UNEXPECTED FAILURE"
echo " OPERATION: $SUBCOMMAND $ARGS" >/dev/stderr
echo " PEER CLIENT OUTPUT: $OUTPUT" >/dev/stderr
fi
fi
fi
# si se pasa -o se imprime por stdout para hacer validaciones sobre la salida
if [[ -v PRINTOUTPUT ]]; then
echo "$OUTPUT"
fi
# si está verbose se imprime por stderr (sólo para visualización)
if [[ -v VERBOSE && -n "$OUTPUT" ]]; then
echo "$GREEN<···$NORMAL $OUTPUT" > /dev/stderr
fi
}
invoke() {
cc invoke "$@"
}
query() {
cc query "$@"
}
| true |
a61e230b499a02e1006b5278340b4dc4c8b4454e
|
Shell
|
lospejos/scriptella-etl
|
/tools/src/bin/scriptella.sh
|
UTF-8
| 1,508 | 3.515625 | 4 |
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
# Copyright 2006-2012 The Scriptella Project Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Scriptella launcher script for Linux.
BIN_DIR=`dirname $0`
CUR_DIR=`pwd`
if [ "x$SCRIPTELLA_HOME" = "x" ]; then
SCRIPTELLA_HOME=`cd $BIN_DIR/..; pwd` # goes one level up
fi
if [ "x$SCRIPTELLA_JAVA_OPTS" = "x" ]; then
_SCRIPTELLA_JAVA_OPTS="$JAVA_OPTS"
else
_SCRIPTELLA_JAVA_OPTS="$SCRIPTELLA_JAVA_OPTS"
fi
_SCRIPTELLA_CP=""
for _arg in $SCRIPTELLA_HOME/lib/*.jar; do
_SCRIPTELLA_CP=$_SCRIPTELLA_CP:$_arg
done
# Setup the Java Virtual Machine
if [ -n "$JAVA_HOME" ]; then # true if string's length is not zero
if [ -x "$JAVA_HOME/bin/java" ] ; then # true if file exists and can be executed
JAVACMD="$JAVA_HOME/bin/java"
fi
fi
if [ -z "$JAVACMD" ]; then # true if string's length is zero
JAVACMD="java"
fi
$JAVACMD "$_SCRIPTELLA_JAVA_OPTS" -classpath $_SCRIPTELLA_CP scriptella.tools.launcher.EtlLauncher "$@"
| true |
c03c3adca7a6a0252d44da53545eddc7b4367487
|
Shell
|
uclibs/tricerabagger
|
/scholarTricerabagger.sh
|
UTF-8
| 1,561 | 3.875 | 4 |
[] |
no_license
|
#!/bin/bash
#set -e
# Updated 2016-12-09 by Nathan Tallman
# Variables
LOCAL=/mnt/libbag/tools
CON=/mnt/libbag/scholar
BAGPY=$LOCAL/tricerabagger/tools/bagit-python/bagit.py
LOG=$LOCAL/tricerabagger/logs/tricerabagger.txt
DATE=`date +%Y-%m-%d`
DESC="Scholar@UC content as of $DATE"
BAGID=cin.scholar.$DATE
echo -e "\n---------------------------------------------\n" >> $LOG 2>&1
echo "$(date): Tricerabagger will now process the Scholar@UC data into a bag and TAR it. This may take awhile, output is logged in $LOG." 2>&1 | tee -a $LOG
# Create bag
$BAGPY $CON --source-organization="University of Cincinnati Libraries" --bag-count="1 of 1" --internal-sender-identifier="$BAGID" --internal-sender-description="$DESC" >> $LOG 2>&1
# Add aptrust-info.txt
cd $CON
echo "Title: Scholar@UC, $DATE" > aptrust-info.txt
echo "Access: Institution" >> aptrust-info.txt
# Split bags -- need to add code, will probably need to use bagit-java
# Tar bags
mkdir $BAGID
mv * $BAGID/ >> $LOG 2>&1
tar -cvf $BAGID.tar $BAGID/ >> $LOG 2>&1
echo "$BAGID.tar has been created." 2>&1 | tee -a $LOG
# Add BAGID to list of sent bags so ingest status can be monitored.
echo "$BAGID" >> $LOCAL/tricerabagger/logs/sentBags.txt
while getopts "s" OPT; do
case $OPT in
s)
/usr/local/bin/aws s3 cp $CON/*.tar s3://aptrust.receiving.uc.edu 2>&1 | tee -a $LOG && echo "$BAGID.tar has been sent to APTrust." 2>&1 | tee -a $LOG
;;
\?)
echo "Invalid option: -$OPTARG" 2>&1 | tee -a $LOG
;;
esac
done
echo "$(date): Tricerabagger is done." 2>&1 | tee -a $LOG
| true |
6ba9f03ed021fff698ff3b2c65cf5f8fd94cee94
|
Shell
|
raisercostin/kscript
|
/misc/kshell_launcher/kshell_kts.sh
|
UTF-8
| 1,030 | 3.046875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [ $# -ne 1 ]; then
echo "Usage: kshell_kts.sh <kscript.kts>"
exit 0
fi
tmpfile=$(mktemp).kts
#echo '@file:Include("https://git.io/fAJ5h")' >> $tmpfile
echo '
@file:DependsOn("org.apache.hadoop:hadoop-common:2.7.0")
// should be now on maven central
@file:DependsOn("com.github.khud:kshell-repl-api:0.2.4-1.2.60")
@file:DependsOn("sparklin:jline3-shaded:0.2")
//@file:DependsOn("sparklin:kshell:0.2-SNAPSHOT")
@file:DependsOn("sparklin:kshell:0.2.5")
' > $tmpfile
echo '' >> $tmpfile
argScript=$1
#argScript=krangl_example.kts
cat $argScript | grep '@file' >> $tmpfile
#cat $tmpfile
echo "Preparing interactive session by resolving script dependencies..."
## resolve dependencies without running the kscript
KSCRIPT_DIR=$(dirname $(which kscript))
kscript_nocall() { kotlin -classpath ${KSCRIPT_DIR}/kscript.jar kscript.app.KscriptKt "$@";}
kshellCP=$(kscript_nocall $tmpfile | cut -d' ' -f4)
## create new
java -classpath "${kshellCP}" com.github.khud.sparklin.kshell.KotlinShell $@
| true |
22879072e82debf65a1ac93a8f7addf3906f1506
|
Shell
|
cherylling/OS-kerneltest
|
/测试套/debug_ftrace_t/debug_ftrace_t_src/testcase/scripts/nop.sh
|
UTF-8
| 1,796 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/bash
#!/bin/bash
TRACING_PATH=/sys/kernel/debug/tracing
init()
{
[ -d /sys/kernel/debug -a $(ls /sys/kernel/debug |wc -l) -gt 0 ] && umount /sys/kernel/debug
mount -t debugfs nodev /sys/kernel/debug
if [ $? -ne 0 ];then
echo "mount -t debugfs nodev /sys/kernel/debug fail"
exit 1
fi
grep nop ${TRACING_PATH}/available_tracers
if [ $? -ne 0 ];then
echo "no nop in ${TRACING_PATH}/available_tracers"
exit 1
fi
echo 1 > /proc/sys/kernel/ftrace_enabled
}
clean()
{
echo > ${TRACING_PATH}/trace
echo 1 > ${TRACING_PATH}/tracing_on
echo nop > ${TRACING_PATH}/current_tracer
}
do_test()
{
echo 0 > ${TRACING_PATH}/tracing_on
echo > ${TRACING_PATH}/trace
echo nop > ${TRACING_PATH}/current_tracer
echo 1 > ${TRACING_PATH}/tracing_on
sleep 1
echo 0 > ${TRACING_PATH}/tracing_on
TRACER_NAME=`cat ${TRACING_PATH}/trace |head -n1|awk -F: '{print $2}'`
if [ $TRACER_NAME != "nop" ];then
echo "nop tracer test fail"
clean
exit 1
fi
#MAX_LATENCY=`cat ${TRACING_PATH}/trace |head -n35 |grep latency |awk -F: '{print $2}'|awk '{print $1}'`
#MAX_LATENCY2=`cat ${TRACING_PATH}tracing_max_latency`
#if [ $MAX_LATENCY != $MAX_LATENCY2 ];then
# echo "max latency mismatch"
# clean
# exit 1
#fi
#cat ${TRACING_PATH}/trace |head -n35 |grep "=> started at:"
#if [ $? -ne 0 ];then
# echo "cannot find => started at: in trace with nop tracer"
# clean
# exit 1
#fi
#cat ${TRACING_PATH}/trace |head -n35 |grep "=> ended at:"
#if [ $? -ne 0 ];then
# echo "cannot find => ended at: in trace with nop tracer"
# clean
# exit 1
#fi
echo "nop tracer test pass"
clean
exit 0
}
init
do_test
| true |
65570de7d6c6455cb00f65ccb6eb9b4e62f4db4f
|
Shell
|
Illusionist5/StudFiles
|
/6 семестр/урвс/учебка/лабы/Lab_7_and_8_oldboyans/lab_7_and_8_oldboyans/Упресы 7,8/3вар/lab7/tests.sh
|
UTF-8
| 144 | 2.734375 | 3 |
[] |
no_license
|
#!bin/sh
echo "Тест №1: "
./labka7 > test1.txt
if [ $? -ne 0 ];
then echo "Провален"
else
echo "Успешен"
fi
rm test1.txt
| true |
0dc57dae5c02108a3571cbedc79033e56736d33b
|
Shell
|
HMMWiedmann/Linux
|
/Bash/Get_Available_Ram.sh
|
UTF-8
| 368 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
minavailableRamPercent=$1
memavailable=`free -m | grep Mem | awk '{print $7}'`;
memtotal=`free -m | grep Mem | awk '{print $2}'`;
memavailablepercent="$(($memavailable * 100 / $memtotal))"
echo "$memavailable mb ist verfuegbar von $memtotal mb"
echo "/ $memavailablepercent % /"
if [[ $memavailablepercent -gt $1 ]]
then
exit 0
else
exit 1001
fi
| true |
19c744b3bf296a2e6be150ece2dd703c5403b247
|
Shell
|
petronny/aur3-mirror
|
/sysklogd/PKGBUILD
|
UTF-8
| 2,298 | 2.53125 | 3 |
[
"BSD-4.3TAHOE"
] |
permissive
|
# $Id: PKGBUILD 158661 2012-05-05 22:14:03Z eric $
# Maintainer: Eric Bélanger <eric@archlinux.org>
pkgname=sysklogd
pkgver=1.5
pkgrel=4
pkgdesc="System and kernel log daemons"
arch=('i686' 'x86_64')
url="http://www.infodrom.org/projects/sysklogd/"
license=('GPL' 'BSD')
depends=('glibc' 'bash')
provides=('logger')
backup=('etc/syslog.conf' 'etc/logrotate.d/syslog')
source=(http://www.infodrom.org/projects/sysklogd/download/${pkgname}-${pkgver}.tar.gz{,.asc} \
syslog.conf syslog.logrotate syslogd klogd LICENSE \
sysklogd-1.4.1-caen-owl-syslogd-bind.diff \
sysklogd-1.4.1-caen-owl-syslogd-drop-root.diff \
sysklogd-1.4.1-caen-owl-klogd-drop-root.diff \
sysklogd-1.5-syslog-func-collision.patch)
sha1sums=('070cce745b023f2ce7ca7d9888af434d6d61c236'
'9599322fc176004d95b5111b05f665b5191dfe67'
'35b4cb76109a6ffe9269021a6bfb4f8da614a4eb'
'e67c0f78f13c94507d3f686b4e5b8340db4624fd'
'848beb23b9ca4de19c6022df03878dbe57e04c0a'
'f46088f761c033562a59bc13d4888b7343bc02fc'
'c416bcefd3d3d618139cc7912310caddf34c0c0b'
'849b2dcaf11060d583ccb3c48356a6971df45cf0'
'9701989490748b0c5a1727e0fc459179d0e350a8'
'76da0ecd9bca969e292a6ec58d7cd96e4c97e525'
'826e76a59834868658eb9f8d8f3aabd8bf748759')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
# CAEN/OWL security patches
patch -p1 -i ../sysklogd-1.4.1-caen-owl-syslogd-bind.diff
patch -p1 -i ../sysklogd-1.4.1-caen-owl-syslogd-drop-root.diff
patch -p1 -i ../sysklogd-1.4.1-caen-owl-klogd-drop-root.diff
patch -p1 -i ../sysklogd-1.5-syslog-func-collision.patch
sed -i -e "s/-O3/${CFLAGS} -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE/" \
-e "s/LDFLAGS= -s/LDFLAGS= ${LDFLAGS}/" Makefile
sed -i 's/500 -s/755/' Makefile
make
}
package() {
cd "${srcdir}/${pkgname}-${pkgver}"
install -d "${pkgdir}/usr/sbin" "${pkgdir}"/usr/share/man/{man5,man8}
make prefix="${pkgdir}" install
install -D -m644 ../syslog.conf "${pkgdir}/etc/syslog.conf"
install -D -m644 ../syslog.logrotate "${pkgdir}/etc/logrotate.d/syslog"
install -D -m755 ../syslogd "${pkgdir}/etc/rc.d/syslogd"
install -D -m755 ../klogd "${pkgdir}/etc/rc.d/klogd"
install -D -m644 ../LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true |
0bb97da51a60b9b16bf63e23adbfbf927fcabd25
|
Shell
|
aliarham11/pipeline-api
|
/run.sh
|
UTF-8
| 593 | 3.484375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
function start () {
if [ -z $1 ]; then host=0.0.0.0; else host=$1; fi
if [ -z $2 ]; then port=5000; else port=$2; fi
gunicorn -b $host:$port start:app
}
function stop () {
ps aux | grep gunicorn | awk '{print $2}' | xargs kill -9
}
function unittest () {
# python -m unittest discover convertnd
python -m pytest pipeline_service/tests
}
case "$1" in
start)
start $2 $3 $4
;;
stop)
stop
;;
test)
unittest
;;
*)
echo $"Usage: $0 {start|stop|test} [host] [post]"
exit 1
esac
| true |
efeb6de1348d79725447469a80b32f03cb62466f
|
Shell
|
Wunderment/pwa_builder
|
/add-pwa-app.sh
|
UTF-8
| 5,128 | 3.8125 | 4 |
[] |
no_license
|
#!/bin/bash
# Set the Android SDK root path for gradle.
export ANDROID_SDK_ROOT=~/android/sdk
# We need 1 command line parameters to work.
if [ ! $# -eq 1 ]; then
echo "Incorrect usage: add-pwa-app.sh <app.json>"
exit 1
fi
if [ ! -f $1 ]; then
echo "ERROR: Application JSON file does not exists!"
exit 1
fi
# Parse the JSON file for our variables.
TITLE=$(jq -Mr '.title' $1)
APPID=$(jq -Mr '.appid' $1)
PWAURL=$(jq -Mr '.url' $1)
PWADOMAIN=$(jq -Mr '.domain' $1)
PWASUBDOMAIN=$(jq -Mr '.subdomain' $1)
PCOLOUR=$(jq -Mr '.primaryColor' $1)
DCOLOUR=$(jq -Mr '.primaryColorDark' $1)
LCOLOUR=$(jq -Mr '.primaryColorLight' $1)
ROOTDIR=${PWD}
echo "Adding $TITLE ($PWAURL)..."
# Don't overwrite the PWA if it already exists.
if [ -d ./apps/$TITLE ]; then
echo "Error: PWA already exists!"
exit 1
fi
echo "Creating directory structure..."
# Make the PWA directory.
cd apps
mkdir $TITLE
cd $TITLE
APPDIR=${PWD}
# Copy the JSON file in to it's proper directory.
cp ../../$1 .
# Add the directories we're going to use.
mkdir icons
mkdir src
mkdir apk
mkdir screenshots
# Go get the icon from the website for the app.
cd $APPDIR/icons
# Make some working directories.
mkdir mipmap
mkdir drawable
# Make the mipmap directories: mdpi = 48x48, hdpi = 72x72, xhdpi = 96x96, xxhdpi = 144x144, xxxhdpi = 192x192
# Three files in each directory:
# ic_launcher.png - A square with rounded corners and transparent border around it.
# ic_launcher_foreground.png - No background color, just the main content of the image.
# ic_launcher_round.png - A round icon with the background color and slight transparent border.
cd $APPDIR/icons/mipmap
mkdir mipmap-hdpi
mkdir mipmap-mdpi
mkdir mipmap-xhdpi
mkdir mipmap-xxhdpi
mkdir mipmap-xxxhdpi
# Make the drawable directories: mdpi = 24x24, hdpi = 36x36, xhdpi = 48x48, xxhdpi = 72x72, xxxhdpi = 192x192
# One file in each directory:
# ic_appbar.png - No background color, just the main content of the image.
cd $APPDIR/icons/drawable
mkdir drawable-hdpi
mkdir drawable-mdpi
mkdir drawable-xhdpi
mkdir drawable-xxhdpi
mkdir drawable-xxxhdpi
# Go back up to the main icons directory.
cd $APPDIR/icons
echo "Downloading the favicons..."
# Get the favicons using favicon-downloader-cli from https://github.com/anubhavsrivastava/favicon-downloader-cli
favdownload $PWAURL
echo "Converting favicon to mipmap/drawable icons..."
# Convert the downloaded favicons to a single placeholder icon and then generate the various required icons for the app.
php $ROOTDIR/convert-to-icons.php $TITLE
echo "Copying the PWA template to the app directory..."
# Copy the PWA app template to our src directory.
cd $APPDIR/src
cp -R $ROOTDIR/template/* .
echo "Setting gradlew to executable..."
# Make gradlew executable.
chmod +x gradlew
## Update the various files in the template to customize it to the PWA.
echo "Updating the app config..."
# Update the URL's and domain's...
cd $APPDIR/src/app/src/main/java/at/xtools/pwawrapper
# Escape the backslashes in the PWA URL.
PWAURLESCAPED=$(echo $PWAURL | sed 's/\//\\\//g')
sed "s/https:\/\/www\.leasingrechnen\.at\//$PWAURLESCAPED/" Constants.java > Constants.new
sed "s/leasingrechnen\.at/$PWADOMAIN/" Constants.new > Constants.java
rm Constants.new
# Update the app name.
cd $APPDIR/src/app/src/main/res/values
sed "s/Leasing Rechner/$TITLE PWA/" strings.xml > strings.new
cp strings.new strings.xml
rm strings.new
# Update the colours.
sed "s/colorPrimary\">#....../colorPrimary\">$PCOLOUR/" colors.xml > colors.new
sed "s/colorPrimaryDark\">#....../colorPrimaryDark\">$DCOLOUR/" colors.new > colors.xml
sed "s/colorPrimaryLight\">#....../colorPrimaryLight\">$LCOLOUR/" colors.xml > colors.new
cp colors.new colors.xml
rm colors.new
# Update the icons.
cd ..
cp -r $APPDIR/icons/mipmap/* .
cp -r $APPDIR/icons/drawable/* .
# Update the app id and other build items.
cd $APPDIR/src/app
sed "s/at\.xtools\.pwawrapper/org.wunderment.pwa.$APPID/g" build.gradle > build.new
sed "s/www\.leasingrechnen\.at/$PWASUBDOMAIN/g" build.new > build.gradle
sed "s/leasingrechnen\.at/$PWADOMAIN/g" build.gradle > build.new
cp build.new build.gradle
rm build.new
echo "Start PWA build..."
# Time to actually build the PWA...
cd $APPDIR/src
./gradlew build
gradlew_return_code=$?
if (( gradlew_return_code != 0 )); then
echo "Gradle failed with exit status $gradlew_return_code"
exit
fi
# Copy the new APK.
echo "Renaming and storing the apk file..."
cp $APPDIR/src/app/build/outputs/apk/release/app-release-unsigned.apk $APPDIR/apk/$TITLE-PWA-unsigned.apk
# Sign the APK.
echo "Signing the apk..."
cd $APPDIR/apk
zipalign -v -p 4 $TITLE-PWA-unsigned.apk $TITLE-PWA-unsigned-aligned.apk
apksigner sign --key ~/.android-certs/releasekey.pk8 --cert ~/.android-certs/releasekey.x509.pem --out $TITLE-PWA.apk $TITLE-PWA-unsigned-aligned.apk
# Create a screenshot.
echo "Create screenshots..."
cd $APPDIR/screenshots
cutycapt --url=$PWAURL --out=$TITLE-noborder.png --min-width=600 --min-height=1067
# TBD: Add the PWA app template around the screenshot.
# php ../../add-screenshot-template.php $TITLE-noborder.png
echo "Finished!"
| true |
41c3e7409e1edebedebb98bbde18d0a262589560
|
Shell
|
cnlubo/kubernetes-toolkit
|
/addons/metrics-server/manual/old/metrics-install.sh
|
UTF-8
| 2,485 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC2034
# Color Palette
RESET='\033[0m'
BOLD='\033[1m'
## Foreground
BLACK='\033[38;5;0m'
RED='\033[38;5;1m'
GREEN='\033[38;5;2m'
YELLOW='\033[38;5;3m'
BLUE='\033[38;5;4m'
MAGENTA='\033[38;5;5m'
CYAN='\033[38;5;6m'
WHITE='\033[38;5;7m'
## Background
ON_BLACK='\033[48;5;0m'
ON_RED='\033[48;5;1m'
ON_GREEN='\033[48;5;2m'
ON_YELLOW='\033[48;5;3m'
ON_BLUE='\033[48;5;4m'
ON_MAGENTA='\033[48;5;5m'
ON_CYAN='\033[48;5;6m'
ON_WHITE='\033[48;5;7m'
MODULE="$(basename $0)"
stderr_print() {
printf "%b\\n" "${*}" >&2
}
log() {
stderr_print "[${BLUE}${MODULE} ${MAGENTA}$(date "+%Y-%m-%d %H:%M:%S ")${RESET}] ${*}"
}
info() {
log "${GREEN}INFO ${RESET} ==> ${*}"
}
warn() {
log "${YELLOW}WARN ${RESET} ==> ${*}"
}
error() {
log "${RED}ERROR${RESET} ==> ${*}"
}
info "modify yaml files ..... "
cd /u01/src/kubernetes/cluster/addons/metrics-server || exit
cp resource-reader.yaml{,.orig}
sed -i '/ - pods/a\ - pods/stats' resource-reader.yaml
cp metrics-server-deployment.yaml{,.orig}
sed -i '/--metric-resolution=30s/a\ - --kubelet-insecure-tls\n - --kubelet-preferred-address-types=InternalIP,Hostname,InternalDNS,ExternalDNS,ExternalIP' \
metrics-server-deployment.yaml
sed -i 's@- --kubelet-port=10255@# - --kubelet-port=10255@1' \
metrics-server-deployment.yaml
sed -i 's@- --deprecated-kubelet-completely-insecure=true@# - --deprecated-kubelet-completely-insecure=true@1' \
metrics-server-deployment.yaml
sed -i 's@{{ base_metrics_server_cpu }}@80m@1' \
metrics-server-deployment.yaml
sed -i 's@{{ base_metrics_server_memory }}@80Mi@1' \
metrics-server-deployment.yaml
sed -i 's@{{ metrics_server_memory_per_node }}@8@1' \
metrics-server-deployment.yaml
sed -i 's@- --minClusterSize={{ metrics_server_min_cluster_size }}@# - --minClusterSize={{ metrics_server_min_cluster_size }}@1' \
metrics-server-deployment.yaml
# 授予kube-system:metrics-server ServiceAccount访问kubelet API的权限
info "create auth-kubelet.yaml ....."
cat > auth-kubelet.yaml <<EOF
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: metrics-server:system:kubelet-api-admin
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kubelet-api-admin
subjects:
- kind: ServiceAccount
name: metrics-server
namespace: kube-system
EOF
info "metrics-server install ..... "
#kubectl create -f ./
| true |
0660a4343380462d65ef56dc82671b3faf1f7e11
|
Shell
|
gregth/NTUA-advcomparch
|
/ex3/scripts/get_instructions_count.sh
|
UTF-8
| 623 | 3.375 | 3 |
[] |
no_license
|
#!/bin/bash
OUTPUT_DIR_BASE="/home/gregth/workspace/advcomparch/ex3/outputs"
SNIPER_DIR="/home/gregth/workspace/advcomparch/ex3/sniper-7.3"
echo "Outputs to be processed located in: $OUTPUT_DIR_BASE"
output="*Benchmark*|*Instructions Run*|*Percentage %*\n"
for benchdir in $OUTPUT_DIR_BASE/*; do
bench=$(basename $benchdir)
instructions=$(cat $benchdir/$bench.DW_01-WS_001.out/sim.out | sed -n 2p | cut -d "|" -f 2 | tr -d '[:space:]')
percentage=$(echo "scale=2; $instructions/10000000"| bc | awk '{printf "%2.2f", $0}')
output="$output$bench|$instructions|$percentage\n"
done
echo -ne $output | column -t -s '|'
| true |
6d09bb938ab8cd7c0382b698fb0c6fd6b39791b7
|
Shell
|
Lugoues/go-crypto-wallet
|
/scripts/operation/generate-eth-key.sh
|
UTF-8
| 2,030 | 3.578125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -eu
COIN="${1:?eth}"
# 1: ganache, 2:go-ethereum, 3: something else
CLIENT=2
###############################################################################
# keygen wallet
###############################################################################
if [ $CLIENT -eq 1 ]; then
echo import ganache keys
docker compose exec btc-keygen-db mysql -u root -proot -e "$(cat ./scripts/operation/sql/ganache_key.sql)"
else
# create seed
keygen -coin ${COIN} create seed
# create hdkey for client, deposit, payment account
keygen -coin ${COIN} create hdkey -account client -keynum 10
keygen -coin ${COIN} create hdkey -account deposit -keynum 1
keygen -coin ${COIN} create hdkey -account payment -keynum 1
keygen -coin ${COIN} create hdkey -account stored -keynum 1
fi
# import generated private key into keygen wallet (this command should run on ethereum server)
# create key files on key store directory
keygen -coin ${COIN} import privkey -account client
keygen -coin ${COIN} import privkey -account deposit
keygen -coin ${COIN} import privkey -account payment
keygen -coin ${COIN} import privkey -account stored
# export address
file_address_client=$(keygen -coin "${COIN}" export address -account client)
file_address_deposit=$(keygen -coin "${COIN}" export address -account deposit)
file_address_payment=$(keygen -coin "${COIN}" export address -account payment)
file_address_stored=$(keygen -coin "${COIN}" export address -account stored)
###############################################################################
# watch only wallet
###############################################################################
# import addresses generated by keygen wallet
watch -coin ${COIN} import address -file ${file_address_client##*\[fileName\]: }
watch -coin ${COIN} import address -file ${file_address_deposit##*\[fileName\]: }
watch -coin ${COIN} import address -file ${file_address_payment##*\[fileName\]: }
watch -coin ${COIN} import address -file ${file_address_stored##*\[fileName\]: }
| true |
93da6ccaf1cf41b6cc65630b84bfeaac71557942
|
Shell
|
markddrake/YADAMU---Yet-Another-DAta-Migration-Utility
|
/qa/cmdLine/settings/default.sh
|
UTF-8
| 1,448 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
echo "Using default unsetings"
# If a value is not unset here a default will be assigned.
# MariaDB Connection Information
unset MARIADB_USER
unset MARIADB_PWD
unset MARIADB_HOST
unset MARIADB_PORT
unset MARIADB_DBNAME
# Generic SQL Server Connection Information
unset MSSQL_USER
unset MSSQL_PWD
unset MSSQL_HOST
unset MSSQL_PORT
unset MSSQL_DBNAME
# SQL Server 2017 Connection Information
unset MSSQL17_USER
unset MSSQL17_PWD
unset MSSQL17_HOST
unset MSSQL17_PORT
unset MSSQL17_DBNAME
# SQL Server 2019 Connection Information
unset MSSQL19_USER
unset MSSQL19_PWD
unset MSSQL19_HOST
unset MSSQL19_PORT
unset MSSQL19_DBNAME
# MySQL Connection Information
unset MYSQL_USER
unset MYSQL_PWD
unset MYSQL_HOST
unset MYSQL_PORT
unset MYSQL_DBNAME
# Oracle 19c Connection Information
unset ORACLE19C
unset ORACLE19C_USER
unset ORACLE19C_PWD
# Oracle 18c Connection Information
unset ORACLE18C
unset ORACLE18C_USER
unset ORACLE18C_PWD
# Oracle 12c Connection Information
unset ORACLE12C
unset ORACLE12C_USER
unset ORACLE12C_PWD
# Oracle 11g Connection Information
unset ORACLE11G
unset ORACLE11G_USER
unset ORACLE11G_PWD
# Postgres Connection Information
unset POSTGRES_USER
unset POSTGRES_PWD
unset POSTGRES_HOST
unset POSTGRES_PORT
unset POSTGRES_DBNAME
# Relative location of export files to be used for testing
export YADAMU_ORACLE_PATH=oracle19c
export YADAMU_MSSQL_PATH=mssql19
export YADAMU_MYSQL_PATH=mysql
export YADAMU_TEST_FOLDER=cmdLine
@echo on
| true |
55973906c608246c6e3f42b2dec9b77c865ec236
|
Shell
|
DimuthuKasunWP/dokku
|
/plugins/apps/subcommands/locked
|
UTF-8
| 565 | 3.484375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
[[ $DOKKU_TRACE ]] && set -x
source "$PLUGIN_CORE_AVAILABLE_PATH/common/functions"
source "$PLUGIN_AVAILABLE_PATH/apps/internal-functions"
apps_locked_cmd() {
declare desc="checks if an app is locked for deployment"
declare cmd="apps:locked"
[[ "$1" == "$cmd" ]] && shift 1
declare APP="$1"
verify_app_name "$APP"
local LOCKED="$(apps_is_locked "$APP")"
if [[ "$LOCKED" != "true" ]]; then
dokku_log_fail_quiet "Deploy lock does not exist"
fi
dokku_log_quiet "Deploy lock exists"
}
apps_locked_cmd "$@"
| true |
5e78e060e5e234b0d51db2e54b0ae4e2a096d5b8
|
Shell
|
Ghostbaby/harbor-ha-helm
|
/prepare.sh
|
UTF-8
| 4,228 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
set -x
basedir=$(cd `dirname $0`; pwd)
source ${basedir}/harbor.cfg
if [ "${HARBOR_DEPLOY_TYPE}" == "nodeport" -a "${CLUSTER_MASTER_IP}" == "" ];then
echo "CLUSTER_MASTER_IP: is valid, which cannot be null ..."
echo "please update ${basedir}/harbor.cfg"
exit 1
fi
if [ "${HARBOR_DEPLOY_TYPE}" != "nodeport" -a "${HARBOR_DEPLOY_TYPE}" != "ingress" ] ;then
echo "harbor only support nodeport/ingress(HARBOR_DEPLOY_TYPE) install"
echo "please update ${basedir}/harbor.cfg"
exit 1
fi
echo "============================="
echo "NAMESPACE: ${NAMESPACE}"
echo "REDIS_RELEASE_NAME: ${REDIS_RELEASE_NAME}"
echo "POSTGRES_RELEASE_NAME: ${POSTGRES_RELEASE_NAME}"
echo "HAPROXY_RELEASE_NAME: ${HAPROXY_RELEASE_NAME}"
echo "HARBOR_RELEASE_NAME: ${HARBOR_RELEASE_NAME}"
echo "HARBOR_DEPLOY_TYPE: ${HARBOR_DEPLOY_TYPE}"
echo "CLUSTER_MASTER_IP: ${CLUSTER_MASTER_IP}"
echo "STORAGE_CLASS_NAME: ${STORAGE_CLASS_NAME}"
echo "============================="
export NAMESPACE=${NAMESPACE}
export REDIS_RELEASE_NAME=${REDIS_RELEASE_NAME}
export POSTGRES_RELEASE_NAME=${POSTGRES_RELEASE_NAME}
export HAPROXY_RELEASE_NAME=${HAPROXY_RELEASE_NAME}
export HARBOR_RELEASE_NAME=${HARBOR_RELEASE_NAME}
export HARBOR_DEPLOY_TYPE=${HARBOR_DEPLOY_TYPE}
export CLUSTER_MASTER_IP=${CLUSTER_MASTER_IP}
export STORAGE_CLASS_NAME=${STORAGE_CLASS_NAME}
[ ! -e ${basedir}/templates ] && echo "cannot found values templates" && exit 1
[ -e ${basedir}/values ] && rm -rf ${basedir}/values
mkdir -p ${basedir}/values/
# prepare postgres-ha
echo "begin render postgresql ha template"
[ ! -e ${basedir}/templates/postgres-ha-values.yaml.tmpl ] && echo "cannot found postgresql values templates" && exit 1
set -e
${basedir}/envtpl -m error ${basedir}/templates/postgres-ha-values.yaml.tmpl > ${basedir}/values/postgres-ha-values.yaml
set +e
[ ! -e ${basedir}/values/postgres-ha-values.yaml ] && echo "render postgres ha template fail..." && exit 1
echo "render postgresql ha template suc.."
# prepare redis-ha
echo "begin render redis ha template"
[ ! -e ${basedir}/templates/redis-ha-values.yaml.tmpl ] && echo "cannot found redis values templates" && exit 1
cp -r ${basedir}/templates/redis-ha-values.yaml.tmpl ${basedir}/values/redis-ha-values.yaml
[ ! -e ${basedir}/values/redis-ha-values.yaml ] && echo "render redis ha template fail..." && exit 1
echo "render redis ha template suc.."
# prepare harbor haproxy
echo "begin render haproxy template"
[ ! -e ${basedir}/templates/haproxy-values.yaml.tmpl ] && echo "cannot found haproxy values templates" && exit 1
set -e
${basedir}/envtpl -m error ${basedir}/templates/haproxy-values.yaml.tmpl > ${basedir}/values/haproxy-values.yaml
set +e
[ ! -e ${basedir}/values/haproxy-values.yaml ] && echo "render haproxy template fail..." && exit 1
echo "render harbor haproxy template suc.."
# prepare harbor
echo "begin render harbor template"
[ ! -e ${basedir}/templates/harbor-${HARBOR_DEPLOY_TYPE}-values.yaml.tmpl ] && echo "cannot found harbor ${HARBOR_DEPLOY_TYPE} values templates" && exit 1
set -e
${basedir}/envtpl -m error ${basedir}/templates/harbor-${HARBOR_DEPLOY_TYPE}-values.yaml.tmpl > ${basedir}/values/harbor-${HARBOR_DEPLOY_TYPE}-values.yaml
set +e
[ ! -e ${basedir}/values/harbor-${HARBOR_DEPLOY_TYPE}-values.yaml ] && echo "render harbor ${HARBOR_DEPLOY_TYPE} template fail..." && exit 1
[ -e ${basedir}/deploy.sh ] && rm -f ${basedir}/deploy.sh
cat > ${basedir}/deploy.sh <<EOF
echo "begin install postgres ha..."
helm --namespace ${NAMESPACE} upgrade -i -f ${basedir}/values/postgres-ha-values.yaml ${POSTGRES_RELEASE_NAME} ${basedir}/charts/postgresql
echo "begin install redis ha..."
helm --namespace ${NAMESPACE} upgrade -i -f ${basedir}/values/redis-ha-values.yaml ${REDIS_RELEASE_NAME} ${basedir}/charts/redis-ha
echo "begin install harbor haproxy.."
helm --namespace ${NAMESPACE} upgrade -i -f ${basedir}/values/haproxy-values.yaml ${HAPROXY_RELEASE_NAME} ${basedir}/charts/harbor-haproxy
echo "begin install harbor.."
helm --namespace ${NAMESPACE} upgrade -i -f ${basedir}/values/harbor-${HARBOR_DEPLOY_TYPE}-values.yaml ${HARBOR_RELEASE_NAME} ${basedir}/charts/harbor-helm
echo "install harbor suc..."
EOF
chmod a+x ${basedir}/deploy.sh
| true |
ea492fec30a8ea1627d1894b8d4e3a00772bace1
|
Shell
|
bethesque/has-changed-path
|
/entrypoint.sh
|
UTF-8
| 1,048 | 4.21875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh -l
set -euo pipefail
SOURCE=${SOURCE:-.}
cd ${GITHUB_WORKSPACE:-.}/${SOURCE}
# This script returns `true` if the paths passed as
# arguments were changed in the last commit.
# For reference:
# https://fant.io/p/circleci-early-exit/
# 1. Get all the arguments of the script
# https://unix.stackexchange.com/a/197794
PATHS_TO_SEARCH="$*"
# 2. Make sure the paths to search are not empty
if [ -z "$PATHS_TO_SEARCH" ]; then
echo "Please provide the paths to search for."
echo "Example usage:"
echo "./entrypoint.sh path/to/dir1 path/to/dir2"
exit 1
fi
# 3. Compares paths from latest HEAD with previous one.
# --quiet: exits with 1 if there were differences (https://git-scm.com/docs/git-diff)
set -o noglob
if git diff --quiet HEAD~1 HEAD -- $PATHS_TO_SEARCH; then
echo "Code in the following paths hasn't changed: " $PATHS_TO_SEARCH
echo ::set-output name=changed::false
exit 0
else
echo "Code in the following paths changed: " $PATHS_TO_SEARCH
echo ::set-output name=changed::true
exit 0
fi
| true |
ad489029f19a5c3c2ba0bcd0000465d77be2f07e
|
Shell
|
ulyaoth/repository
|
/ulyaoth-fcgiwrap/build-ulyaoth-fcgiwrap.sh
|
UTF-8
| 1,643 | 3.078125 | 3 |
[] |
no_license
|
ulyaothos=`cat /etc/ulyaoth`
buildarch="$(uname -m)"
fcgiwrapversion=1.1.0
useradd ulyaoth
cd /home/ulyaoth/
su ulyaoth -c "rpmdev-setuptree"
su ulyaoth -c "wget https://github.com/gnosek/fcgiwrap/archive/'"$fcgiwrapversion"'.tar.gz"
su ulyaoth -c "tar xvzf '"$fcgiwrapversion"'.tar.gz"
su ulyaoth -c "mv /home/ulyaoth/fcgiwrap-'"$fcgiwrapversion"' /home/ulyaoth/fcgiwrap"
su ulyaoth -c "sed -i 's/http/fcgiwrap/g' /home/ulyaoth/fcgiwrap/systemd/fcgiwrap.service"
su ulyaoth -c "tar cvf fcgiwrap.tar.gz fcgiwrap"
su ulyaoth -c "mv fcgiwrap.tar.gz /home/ulyaoth/rpmbuild/SOURCES/"
su ulyaoth -c "rm -rf /home/ulyaoth/fcgiwrap/ '"$fcgiwrapversion"'.tar.gz"
cd /home/ulyaoth/rpmbuild/SPECS/
su ulyaoth -c "wget https://raw.githubusercontent.com/ulyaoth/repository/master/ulyaoth-fcgiwrap/SPEC/ulyaoth-fcgiwrap.spec"
if [ "$arch" != "x86_64" ]
then
sed -i '/BuildArch: x86_64/c\BuildArch: '"$buildarch"'' ulyaoth-fcgiwrap.spec
fi
if type dnf 2>/dev/null
then
dnf builddep -y ulyaoth-fcgiwrap.spec
elif type yum 2>/dev/null
then
yum-builddep -y ulyaoth-fcgiwrap.spec
fi
su ulyaoth -c "rpmbuild -ba ulyaoth-fcgiwrap.spec"
if [ "$ulyaothos" == "amazonlinux" ]
then
cp /home/ulyaoth/rpmbuild/SRPMS/* /home/ec2-user/
cp /home/ulyaoth/rpmbuild/RPMS/x86_64/* /home/ec2-user/
cp /home/ulyaoth/rpmbuild/RPMS/i686/* /home/ec2-user/
cp /home/ulyaoth/rpmbuild/RPMS/i386/* /home/ec2-user/
else
cp /home/ulyaoth/rpmbuild/SRPMS/* /root/
cp /home/ulyaoth/rpmbuild/RPMS/x86_64/* /root/
cp /home/ulyaoth/rpmbuild/RPMS/i686/* /root/
cp /home/ulyaoth/rpmbuild/RPMS/i386/* /root/
fi
rm -rf /root/build-ulyaoth-*
rm -rf /home/ulyaoth/rpmbuild
| true |
30a98df04b0ace0a845aba6ab0812948aab24435
|
Shell
|
PerilousApricot/SUSHyFT-Scripts
|
/topLevelScripts/makeAllStitch.sh
|
UTF-8
| 1,388 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
if [[ ! -d ${SHYFT_STITCHED_PATH}/${SHYFT_MODE} ]]; then
mkdir -p ${SHYFT_STITCHED_PATH}/${SHYFT_MODE}
fi
toProcess=( )
payloads=( )
for CFG in $SHYFT_BASE/config/$SHYFT_MODE/stitchSystematicConfigs/*.cfg; do
COMMAND_TO_RUN="stitch.py ${SHYFT_REBIN_PATH}/${SHYFT_MODE} ${SHYFT_STITCHED_PATH}/${SHYFT_MODE} $SHYFT_BASE/config/$SHYFT_MODE/stitchConfig.cfg"
OUTPUTS=""
INPUTS=""
while read LINE; do
# not sure how to break this up
if [[ -z $OUTPUTS ]]; then
OUTPUTS=$LINE
else
INPUTS="$INPUTS $LINE"
fi
done < <($COMMAND_TO_RUN $CFG --getInputFiles)
PAYLOAD="stitch.py ${SHYFT_REBIN_PATH}/${SHYFT_MODE} ${SHYFT_STITCHED_PATH}/${SHYFT_MODE} $SHYFT_BASE/config/$SHYFT_MODE/stitchConfig.cfg $CFG"
toProcess+=("runIfChanged.sh $OUTPUTS $INPUTS `which stitch.py` $SHYFT_BASE/config/$SHYFT_MODE/stitchConfig.cfg $CFG -- $PAYLOAD")
payload+=( "$PAYLOAD" )
done
COMMANDS_TO_UNROLL=10
COMMAND_TO_RUN='sbatch -A jswhep --time=60'
( for ((i = 0; i < ${#toProcess[@]}; i += $COMMANDS_TO_UNROLL)); do
sleep 0.3
echo "#!/bin/bash
#SBATCH --output=/dev/null
#SBATCH --output=/dev/null
#SBATCH --time=2:00:00
cd /home/meloam
source set-ntuple.sh
unset TERM
$(
for IDX in $(seq $i $(($i + $COMMANDS_TO_UNROLL - 1))); do
echo "${toProcess[$IDX]}"
done
)" | eval $COMMAND_TO_RUN
done; )
| true |
89ee730a01857c8bd819dbe77fcfe098877e41ed
|
Shell
|
patdie421/mea-edomus-lite
|
/docker/build.sh
|
UTF-8
| 1,140 | 3.40625 | 3 |
[] |
no_license
|
gen_dockerfile()
{
DEST=$1
echo "FROM debian"
echo "RUN apt-get update"
echo "RUN apt-get upgrade"
echo "RUN echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections"
echo "RUN apt-get -y install apt-utils"
echo "RUN apt-get -y install netbase"
echo "RUN apt-get -y install net-tools"
echo "RUN apt-get -y install python"
echo "RUN apt-get -y install libpython2.7"
echo "RUN apt-get -y install curl"
echo "WORKDIR $DEST"
echo "COPY . ."
echo "EXPOSE 3865/udp"
echo "CMD [\"bash\", \"start.sh\" ]"
}
gen_start_sh()
{
DIR=$1
echo "\"$DIR\"/bin/xplhub &"
echo "sleep 5"
echo "\"$DIR\"/bin/mea-edomus --basepath=\"$DIR\" &"
echo "sleep 5"
echo "\"$DIR\"/bin/demo_device"
}
ORG=`pwd`
REALPATH=`realpath "$0"`
BASEPATH=`dirname "$REALPATH"`
SOURCE="$1"
DEST="/app"
cd "$BASEPATH"
mkdir -p "$SOURCE"
docker rmi mea_edomus
gen_start_sh "$DEST" > start.sh.tmp
gen_dockerfile "$DEST" > Dockerfile.tmp
cp .dockerignore "$SOURCE"
mv Dockerfile.tmp "$SOURCE"/Dockerfile
mv start.sh.tmp "$SOURCE"/start.sh
chmod +x "$SOURCE"/start.sh
cd ..
./build_demo.sh "$SOURCE" "$DEST"
cd "$SOURCE"
docker build -t mea_edomus .
cd "$ORG"
| true |
2215fca4bdd6d361affccef6494648b40e9d32ae
|
Shell
|
gcc4ti/gcc4ti-toolchain
|
/scripts/012-keys.sh
|
UTF-8
| 301 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
## Download the keys from Brandon Wilson's website.
SOURCE=http://brandonw.net/calcstuff/keys.zip
wget --continue $SOURCE || { exit 1; }
## Unpack the keys to the rabbitsign directory.
echo Decompressing the magic keys. Please wait.
unzip -o -d "$TIGCC/share/rabbitsign" keys.zip || { exit 1; }
| true |
d3a765e94b8e4c9a992c6a796e5af6fae4458ca1
|
Shell
|
EduardoGimeno/Administracion-de-Sistemas
|
/practica_6/practica6_parte1.sh
|
UTF-8
| 1,500 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/bash
# Eduardo Gimeno
# La etiqueta de la opción -t de logger cambia según la máquina
# Obtener nº de usuarios y carga media de trabajo
# Depende de cuantos usuarios halla los campos pueden variar
if [ "user," = "$(uptime | awk '{print $5}')" ] || [ "users," = "$(uptime | awk '{print $5}')" ]
then
uptime | awk '{print "Usuarios conectados: " $4 ", Carga media de trabajo (1m, 5m, 15m): " $8 " " $9 " " $10 " "}' | logger -p local0.info -t debian-as1
else
uptime | awk '{print "Usuarios conectados: " $5 ", Carga media de trabajo (1m, 5m, 15m): " $9 " " $10 " " $11 " "}' | logger -p local0.info -t debian-as1
fi
# Memoria ocupada y libre, swap utilizado
free -h | awk 'NR==2 {print "Memoria ocupada: " $3 ", Memoria libre: "$4} NR==4 {print ", Swap utilizado: " $3}' | logger -p local0.info -t debian-as1
# Espacio ocupado y libre
df -h | awk 'NR==2 {print "Espacio ocupado: " $3 ", Espacio libre: "$4}' | logger -p local0.info -t debian-as1
# Nº de puertos abiertos y conexiones establecidas
puertos_abiertos="$(netstat -l | egrep -v ^unix | egrep LISTEN | wc -l)"
conexiones_establecidas="$(netstat | egrep -v ^unix | egrep ESTABLISHED | wc -l)"
echo "Numero de puertos abiertos: $puertos_abiertos, Numero de conexiones establecidas: $conexiones_establecidas" | logger -p local0.info -t debian-as1
# Nº de programas en ejecución
programas_ejecucion="$(ps -e | egrep : | wc -l)"
echo "Numero de programas en ejecucion: $programas_ejecucion" | logger -p local0.info -t debian-as1
| true |
44fd17007687d632884f43e4e454c3cf7405f33a
|
Shell
|
tkaaad97/gl-wrapper-gen
|
/scripts/supplement-spec.sh
|
UTF-8
| 981 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/bash
injects1=$(xmlstarlet sel -B -t -c '.' objects.xml)
injects2=$(xmlstarlet sel -B -t -c '.' newtypes.xml)
spec=$(cat gl.xml)
while read -r inject
do
command='xmlstarlet ed -P '
while read -r action
do
method=$(xmlstarlet sel -t -v '/action/@method' <<<"$action")
xpath=$(xmlstarlet sel -t -v '/action/@xpath' <<<"$action")
type=$(xmlstarlet sel -t -v '/action/@type' <<<"$action")
name=$(xmlstarlet sel -t -v '/action/@name' <<<"$action")
value=$(xmlstarlet sel -t -v '/action/@value' <<<"$action")
command+="--${method} '""${xpath}""' -t \"${type}\" -n \"${name}\" -v \"${value}\" "
done < <(xmlstarlet sel -t -m '/inject/action' -c '.' --nl <<<"$inject")
spec=$(echo "$spec" | bash -c "$command")
done < <({
xmlstarlet sel -t -m '/objects/object/injects/inject' -c '.' --nl <<<"$injects1";
xmlstarlet sel -t -m '/newtypes/newtype/injects/inject' -c '.' --nl <<<"$injects2";
})
echo "$spec"
| true |
9d444cfc7de476d41ceb93f24a1758e21978bbe6
|
Shell
|
ruiAzevedo19/UMinho
|
/4ano/ABD/TP/scripts/benchmarkconfiguration.sh
|
UTF-8
| 2,031 | 3.765625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# -----------------------------------------------------
# BENCHMARK SERVER CONFIGURATION - SCRIPT
# ABD UMINHO
# -----------------------------------------------------
helpFunction(){
echo ""
echo "Usage: $0 -s dbServerName -u dbUser -w nrWarehouses"
echo -e "\t-s Define the server name"
echo -e "\t-u Define the database user"
echo -e "\t-w Define the Number of Warehouses"
exit 1 # Exit script after printing help
}
while getopts "s:u:w:" opt
do
case "$opt" in
s ) dbServerName="$OPTARG" ;;
u ) dbUser="$OPTARG" ;;
w ) nrWarehouses="$OPTARG" ;;
? ) helpFunction ;; # Print helpFunction in case parameter is non-existent
esac
done
# Print helpFunction in case number of db server name, db user and number of warehouses was not provided
if [ -z "$dbServerName" ] || [ -z "$dbUser" ] || [ -z "$nrWarehouses" ]
then
echo "Some or all of the parameters are empty.";
helpFunction
fi
# Update packages
yes | sudo apt-get update
# Install Java
yes | sudo apt-get install openjdk-8-jdk
# Install pip
yes | sudo apt install python3-pip
# Install scipy
yes | pip3 install scipy
# Move file showtpc.py to the directory results
mkdir -p ~/results
cp ~/scripts/files/showtpc.py ~/results
# Untar files
tar xvf ~/scripts/files/tpc-c-0.1-SNAPSHOT-tpc-c.tar.gz -C ~/
tar xvf ~/scripts/files/extra.tgz -C ~/
# Cd to the tpcc directory
cd ~/tpc-c-0.1-SNAPSHOT
# Define the database connection address
sed -i.bak "s#^db.connection.string=.*#db.connection.string=jdbc:postgresql://${dbServerName}/tpcc#g" etc/database-config.properties
# Define the database username
sed -i.bak "s/^db.username=.*/db.username=${dbUser}/g" etc/database-config.properties
# Define the database password
sed -i.bak "s/^db.password=.*/db.password=/g" etc/database-config.properties
# Install postgresql client
yes | sudo apt-get install postgresql-client-12
# Run the script to create a new db
sh ~/scripts/auxiliary_scripts/createdb.sh $dbServerName $nrWarehouses
| true |
2f34c5cf68af7ed4834fd3e4394bd0b7e724a66c
|
Shell
|
kevinswiber/postman-echo-api
|
/scripts/sync-postman-collection.sh
|
UTF-8
| 658 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/sh
set -e
version="1.0.0"
name="Postman Echo v$version - openapi"
openapi_yaml="./openapi/postman-echo-oas-v$version.yaml"
collection_json="./openapi/postman-echo-postman-v$version.json"
openapi2postmanv2 -s $openapi_yaml -o $collection_json -p
collection_id=$(postmanctl get co -o json | jq -r '.[] | select(.name=="'"$name"'") | select(has("fork") | not).id')
if [ -z $collection_id ]
then
collection_id=$(cat $collection_json | postmanctl create collection)
echo "$collection_id created in Postman!"
else
collection_id=$(cat $collection_json | postmanctl replace collection $collection_id)
echo "$collection_id updated in Postman!"
fi
| true |
df2219d08106e6b3e16e91898beb0b4669e017bd
|
Shell
|
dforsber/sqlite-parquet-vtable
|
/tests/test-unsupported
|
UTF-8
| 994 | 4.0625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
# Verify that all the unsupported.*parquet files result in an error when creating the virtual table,
# but don't segfault.
load_unsupported() {
file=${1:?must provide file to load}
basename=$(basename "$file")
cat <<EOF
.echo on
.load build/linux/libparquet
.testcase $basename
.bail on
CREATE VIRTUAL TABLE test USING parquet('$file');
SELECT 123;
EOF
}
main() {
root=$(dirname "${BASH_SOURCE[0]}")/..
root=$(readlink -f "$root")
cd "$root"
unsupported_files=$(find . -type f -name 'unsupported*.parquet')
while read -r unsupported; do
echo "Testing: $unsupported"
"$root"/sqlite/sqlite3 -init <(load_unsupported "$unsupported") < /dev/null > /dev/null 2> testcase-stderr.txt
# We expect the 'SELECT 123' command to NOT have been run
if grep -q 123 testcase-out.txt; then
echo "...FAILED; expected an error message. Check testcase-{out,err}.txt" >&2
exit 1
fi
done < <(echo "$unsupported_files")
}
main "$@"
| true |
4bdabd0067c41401d52ffc9dced7d229dc491606
|
Shell
|
peeweep/nemo-packaging
|
/qt5-ngfd-git/PKGBUILD
|
UTF-8
| 1,021 | 2.796875 | 3 |
[] |
no_license
|
## $Id$
# Contributor: TheKit <nekit1000 at gmail.com>
# Contributor: Alexey Andreyev <aa13q@ya.ru>
# Maintainer: James Kittsmiller (AJSlye) <james@nulogicsystems.com>
pkgname=qt5-ngfd-git
_pkgname=libngf-qt-git
pkgver=0.7.0.r0.gac87734
pkgrel=1
pkgdesc="Qt-based client library for Non-Graphic Feedback daemon"
arch=('x86_64' 'aarch64')
url="https://github.com/sailfishos/libngf-qt"
license=('GPL')
depends=('qt5-declarative' 'libngf')
makedepends=('git')
provides=("${_pkgname%-git}" "${pkgname%-git}")
conflicts=("${_pkgname%-git}" "${pkgname%-git}")
source=("${pkgname%-git}::git+${url}")
md5sums=('SKIP')
pkgver() {
cd "$srcdir/${pkgname%-git}"
git describe --long --tags | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
cd "$srcdir/${pkgname%-git}"
qmake PREFIX=/usr
# Hack for PREFIX path not being passed to src subproject for some reason
cd src
qmake PREFIX=/usr
cd ..
make
}
package() {
cd "$srcdir/${pkgname%-git}"
make -j 1 INSTALL_ROOT="$pkgdir/" install
# Remove tests
rm -rf "$pkgdir/opt"
}
| true |
9dfba9f7a6ab3c7535596d642907dbdcad629d62
|
Shell
|
hcape/SATsudoku
|
/test-lines.sh
|
UTF-8
| 531 | 3.8125 | 4 |
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]
then
echo "Usage: $0 <multiline puzzle file>"
exit 1
fi
printf "Testing $(basename $1)\n"
mkdir -p tmp/$(basename $1).puzzles
split -d -e -l 1 $1 tmp/$(basename $1).puzzles/
mkdir -p out/$(basename $1).solns/
for file in tmp/$(basename $1).puzzles/*; do
printf "\rTesting Puzzle #$(basename $file)"
./test.sh $file > out/$(basename $1).solns/$(basename $file).soln
done
printf "\nWriting solutions to out/$(basename $1).soln\n"
cat out/$(basename $1).solns/* > out/$(basename $1).soln
| true |
f15adbd982f6fbe5aff5dbe3c303cce75dce9d38
|
Shell
|
start-jsk/rtmros_gazebo
|
/hrpsys_gazebo_atlas/scripts/update-software.sh
|
UTF-8
| 1,097 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash -ex
#
# This script updates following programs:
# drcsim
# gazebo
# hrpsys
# hrpsys_ros_bridge
# hrpsys_gazebo_atlas
# - also add sensors to dae file
#
source ~/ros/${ROS_DISTRO}/setup.sh
export ROS_PACKAGE_PATH_ORG=$ROS_PACKAGE_PATH
source /usr/share/drcsim/setup.sh
export ROS_PACKAGE_PATH=$ROS_PACKAGE_PATH_ORG:$ROS_PACKAGE_PATH
export ROS_PACKAGE_PATH=`echo $(echo $ROS_PACKAGE_PATH | sed -e "s/:/\n/g" | awk '!($0 in A) && A[$0] = 1' | grep -v "opt/ros"; echo $ROS_PACKAGE_PATH | sed -e "s/:/\n/g" | awk '!($0 in A) && A[$0] = 1' | grep "opt/ros") | sed -e "s/ /:/g"`
sudo apt-get update
sudo apt-get install drcsim gazebo
cd `rospack find roseus`/..
svn up
cd `rospack find hrpsys`/..
svn up
cd hrpsys/build/hrpsys-base-source/
svn up
cd `rospack find hrpsys`
rm -rf installed
make
cd `rospack find hrpsys_ros_bridge`
make
cd `rospack find hrpsys_gazebo_atlas`
rosmake
if [ `grep gyro ./models/atlas.dae | wc -l` -eq 0 ]; then
mv ./models/atlas.dae ./models/atlas.dae.bak
./scripts/add_sensor_to_collada.py ./models/atlas.dae.bak > ./models/atlas.dae
make
fi
| true |
4583156c04cf59e27ffa9c4a70663fcfdb96e1b7
|
Shell
|
neobht/magos-scripts
|
/90-magos-patches/usr/lib/magos/rootfs-patches/MagOS-thunderbird.sh
|
UTF-8
| 967 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/bash
[ -e usr/bin/thunderbird -o -h usr/bin/thunderbird ] || exit 0
PFP=$(ls -d1 usr/lib/thunderbird-* 2>/dev/null| tail -1)
[ "$PFP" = "" ] && PFP=$(ls -d1 usr/lib64/thunderbird-* | tail -1)
[ -d "$PFP"/defaults ] || exit 0
mkdir -p "$PFP"/defaults/profile
ln -sf /usr/share/magos/mozilla/thunderbird-prefs.js "$PFP"/defaults/profile/prefs.js
#Register ru locale when it is added manually
LIGHTNINGP='usr/lib/mozilla/extensions/{3550f703-e582-4d05-9a08-453d09bdfdc6}/{e2fda1a4-762b-4020-b5ad-a41df1933103}'
[ -d $LIGHTNINGP ] || LIGHTNINGP='usr/lib64/mozilla/extensions/{3550f703-e582-4d05-9a08-453d09bdfdc6}/{e2fda1a4-762b-4020-b5ad-a41df1933103}'
if [ -f $LIGHTNINGP/chrome/lightning-ru.jar ] ;then
if ! grep -q lightning-ru $LIGHTNINGP/chrome.manifest ;then
cat >>$LIGHTNINGP/chrome.manifest <<EOF
locale calendar ru jar:chrome/calendar-ru.jar!/locale/ru/calendar/
locale lightning ru jar:chrome/lightning-ru.jar!/locale/ru/lightning/
EOF
fi
fi
exit 0
| true |
25ffc13eacb28b22d4d88ab09acbd72a15ce5319
|
Shell
|
kapamaroo/fluidanimate_cuda_version
|
/tools/compile.sh
|
UTF-8
| 1,022 | 3.5 | 4 |
[] |
no_license
|
#!/bin/bash
CC="g++"
if [ `which my_nvcc` ]; then
NVCC="my_nvcc"
NVCCFLAGS=""
echo "my_nvcc found, use that"
else
NVCC="nvcc"
case $HOSTNAME in
blacksunhat)
#my machine
SDK_PATH="/opt"
;;
*)
SDK_PATH="~"
esac
SDK_HOME="$SDK_PATH/NVIDIA_GPU_Computing_SDK"
ARCH="`uname -i`"
if [ $ARCH = 'unknown' ]; then
echo "unkown platform, set to i386"
ARCH="i386"
fi
NVCCFLAGS="-I $SDK_HOME/C/common/inc/ \
-L $SDK_HOME/C/lib/ -l cutil_$ARCH \
-lcudart \
-O4"
fi
CPPFLAGS="-lpthread -O4"
NVCCFLAGS="$NVCCFLAGS $@"
BINDIR="bin"
TOOLSDIR="tools"
CPU_BIN="fluidanimate_cpu"
GPU_BIN="fluidanimate_gpu"
CMP_BIN="checkfiles"
CPU_SRC_CODE="./src/pthreads.cpp"
GPU_SRC_CODE="./src/cuda.cu"
CMP_SRC_CODE="./src/cmp.cpp"
mkdir -p $BINDIR
$CC $CPPFLAGS $CPU_SRC_CODE -o ./$BINDIR/$CPU_BIN
$NVCC $NVCCFLAGS $GPU_SRC_CODE -o ./$BINDIR/$GPU_BIN
$CC $CPPFLAGS $CMP_SRC_CODE -o ./$TOOLSDIR/$CMP_BIN
| true |
0302a3d5f08f389c894cbc7c0bebfd8a05304687
|
Shell
|
armoin2018/hiveid-ap
|
/dnsmasq_find_leases.sh
|
UTF-8
| 391 | 2.578125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
##########################################################
#### Author: Blaine McDonnell (blaine@armoin.com) ####
#### Usage: sudo ./dnsmasq_find_leases.sh ####
#### Description: Find the active dnsmasq lease file ####
#### Version: 0.1 ####
##########################################################
find /. -name *.leases 2>/dev/null | head -1
| true |
195000ef085482e1772a373d1fa55ee06e2ef4c1
|
Shell
|
webfrogs/ToolKit
|
/bootstrap/manjaro.sh
|
UTF-8
| 2,871 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/sh
set -e
cd $(dirname $0)
cd ..
RootPath=$(pwd)
if test "$(uname -s)" != "Linux"; then
echo "[ERROR] Current OS is not Linux"
exit 2
fi
if test ! -x "$(command -v pacman)"; then
echo "[ERROR] pacman command is not found."
exit 2
fi
# check proxys
# echo "==> checking proxy"
# read -p "Should use proxy? [y/n]: " use_proxy
# if test "${use_proxy}" = "y"; then
# echo "Default http proxy address is 127.0.0.1:1089"
# read -p "Input http proxy addres, press enter to use default: " proxy_addr
# if test -z "${proxy_addr}"; then
# proxy_addr="127.0.0.1:1089"
# fi
# export http_proxy="http://${proxy_addr}"
# export https_proxy="http://${proxy_addr}"
# echo "http proxy is set to 'http://${proxy_addr}'"
# else
# echo "No proxy is set."
# fi
echo "==> start to bootstrap manjaro."
read -p "Want to use pacman cn mirror? [y/n]: " use_pacman_cn_mirror
if test "${use_pacman_cn_mirror}" = "y"; then
./installer/manjaro/pacman_cn_mirror.sh
fi
echo "==> add archlinuxcn pacman source."
if grep -Fxq "[archlinuxcn]" /etc/pacman.conf; then
echo "[INFO] archlinux cn already exists in file '/etc/pacman.conf'"
else
echo "[archlinuxcn]" | sudo tee -a /etc/pacman.conf
echo "SigLevel = Optional TrustedOnly" | sudo tee -a /etc/pacman.conf
echo 'Server = http://mirrors.tuna.tsinghua.edu.cn/archlinuxcn/$arch' | sudo tee -a /etc/pacman.conf
sudo pacman -Syy
sudo pacman -S archlinuxcn-keyring haveged --noconfirm
# fix archlinuxcn key can not import
# see https://www.archlinuxcn.org/gnupg-2-1-and-the-pacman-keyring/
sudo systemctl enable --now haveged
sudo rm -rf /etc/pacman.d/gnupg
sudo pacman-key --init
sudo pacman-key --populate manjaro
sudo pacman-key --populate archlinux
sudo pacman-key --populate archlinuxcn
fi
# install necessary packages
echo "==> install necessary packages."
sudo pacman -S --noconfirm \
base-devel \
git vim zip tree unzip jq \
terminator hexchat \
resolvconf net-tools \
dnsutils iputils socat \
blueman network-manager-applet
# TODO: unzip-iconv is not installed
# install chinese input method
sudo pacman -S fcitx-im fcitx-configtool fcitx-sunpinyin --noconfirm
# fix i3wm dmenu input issue
if test ! -e "/etc/environment"; then
sudo touch /etc/environment
fi
if test "$(grep -c '^GTK_IM_MODULE=fcitx' /etc/environment)" = "0"; then
echo "GTK_IM_MODULE=fcitx" | sudo tee -a /etc/environment > /dev/null
fi
if test "$(grep -c '^QT_IM_MODULE=fcitx' /etc/environment)" = "0"; then
echo "QT_IM_MODULE=fcitx" | sudo tee -a /etc/environment > /dev/null
fi
if test "$(grep -c '^XMODIFIERS=@im=fcitx' /etc/environment)" = "0"; then
echo "XMODIFIERS=@im=fcitx" | sudo tee -a /etc/environment > /dev/null
fi
sudo pacman -S flameshot dunst --noconfirm
mkdir -p ${HOME}/Pictures/screenshots
./configs/dunst/config.sh
./configs/git/config.sh
#./installer/fzf/install.sh
| true |
5bf831e7b0be53904f00209a48835148a850ae94
|
Shell
|
IanYbarra2000/Bash-Guessing-Game
|
/Guessing Game.sh
|
UTF-8
| 581 | 3.84375 | 4 |
[] |
no_license
|
#!/bin/bash
answer=$(($RANDOM%129))
declare -i guess
guess=129
declare -i score
score=0
until (($answer==$guess))
do
read -p "Guess the number!(0-128) " guess
score=score+1
if (($guess>$answer))
then
echo "High guess"
elif (($guess<$answer))
then
echo "Low guess"
fi
done
echo "Congratulations, you guessed the correct number in "$score" tries"
read -p "Enter your name: " name
Score_File="scores.txt"
echo $name $score >> $Score_File
sort -k2 -n -o $Score_File $Score_File
echo "Top Scores: "
sed '1q;d' scores.txt
sed '2q;d' scores.txt
sed '3q;d' scores.txt
| true |
d55f0d6e15fd5bb8111e9cae8322f186876a1825
|
Shell
|
anubhavvs/GradSchool
|
/SEMESTER_5/S5D10_1.sh
|
UTF-8
| 147 | 3.203125 | 3 |
[] |
no_license
|
# find factorial of a number
echo "Enter a number"
read num
fact=1
for((i=1;i<=num;i++))
do
fact=$((fact*i))
done
echo "Factorial of $num is $fact"
| true |
963b096ae15f19f82fb4fbe098afda83d8f53571
|
Shell
|
sunakan/sunady2020-rocket-chat
|
/rocketchat/start.sh
|
UTF-8
| 150 | 2.890625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
for i in `seq 1 10`; do
node main.js && s=$? && break || s=$?;
echo 試行 $i 回目: 5秒待機中...;
sleep 5;
done;
exit $$s
| true |
0d3ee48518751eb22371ce07ec7bf9dd56852e3f
|
Shell
|
loknjinu13/Week6
|
/usb_virtualbox.md
|
UTF-8
| 485 | 2.703125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#How to install USB in Virtualbox
cat << _EOF_
<html>
<head>
<title>
## **How to Install USB in VirtualBox**
</title>
</head>
<body>
1. Download and Install virtualbox extension Pack on windows.
2. After installing, reboot the *host OS* for changes to take effect.
3. Under settings in virtualNox *go to* USB and click on *add USB* selecting the right version of your USB model.
4. Once Virtual Box OS starts, plug in the USB and it will appear.
</body>
</html>
_EOF_
| true |
75404d0c077babf592cac9c2eb644730916080ef
|
Shell
|
wkmor1/eco-dev
|
/userconf.sh
|
UTF-8
| 455 | 3.078125 | 3 |
[] |
no_license
|
#!/bin/bash
USER=${USER:=rstudio}
PASSWORD=${PASSWORD:=rstudio}
USERID=${USERID:=1000}
ROOT=${ROOT:=FALSE}
export HOME=/home/$USER
useradd -u $USERID $USER
addgroup $USER staff
echo "$USER:$PASSWORD" | chpasswd
chmod go-rx /usr/bin/passwd
mkdir -p $HOME
if [ "$ROOT" == "TRUE" ]
then
adduser $USER sudo && echo "%sudo ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
fi
echo "PATH=${PATH}" >> /usr/local/lib/R/etc/Renviron
chown -R $USER:$USER $HOME
| true |
8781879a5b2c1be8ef2e3461cae16505b123ebc0
|
Shell
|
juanvictor/ShellScript
|
/datos_linux/datos_linux.sh
|
UTF-8
| 3,187 | 3.453125 | 3 |
[] |
no_license
|
#!/bin/bash
#MEMORIA RAM
ram_total=$(free -m | grep Mem | tr -s ' ' | cut -d ' ' -f2)
ram_usado=$(free -m | grep Mem | tr -s ' ' | cut -d ' ' -f3)
ram_usado_porcentaje=$(expr $ram_usado \* 100 / $ram_total)
ram_libre_porcentaje=$(expr 100 - $ram_usado_porcentaje)
#CONECTIVIDAD
conectividad="NO"
myping=$(ping -c 1 google.com 2> /dev/null)
if [ $? -eq 0 ] ; then
conectividad="OK"
fi
#ESPACIO EN DISCO
disco_total=$(df -h / | grep /dev | tr -s ' ' ';' | cut -d ';' -f2)
disco_usado=$(df -h / | grep /dev | tr -s ' ' ';' | cut -d ';' -f5)
disco_usado_sin_porcentaje=$(df -h / | grep /dev | tr -s ' ' ';' | cut -d ';' -f5 | cut -d '%' -f1)
disco_libre=$(expr 100 - $disco_usado_sin_porcentaje)
#CPU
cantidad_cpu=$(nproc)
cpu_usado=$(top -n 1 | grep %Cpu | tr -s ' ' | cut -d ' ' -f2)
cpu_libre=$(top -n 1 | grep %Cpu | tr -s ' ' | cut -d ' ' -f8)
#INFORMACION SISTEMA
sistema=$(hostname -s)
dominio=$(hostname -d)
version=$(cat /etc/issue | cut -d ' ' -f 1-3)
nucleo=$(uname -r)
arquitectura=$(uname -m)
#USUARIOS
usuarios=$(cat /etc/passwd | wc -l)
usuarios_activo=$(uptime | tr -s ' ' | cut -d ' ' -f5)
#PROCESOS
numero_procesos=$(top -n 1 | grep Ta | tr -s ' ' | cut -d ' ' -f2)
procesos_ejecutando=$(top -n 1 | grep Ta | tr -s ' ' | cut -d ' ' -f4)
procesos_durmiendo=$(top -n 1 | grep Ta | tr -s ' ' | cut -d ' ' -f6)
procesos_parados=$(top -n 1 | grep Ta | tr -s ' ' | cut -d ' ' -f8)
procesos_zombie=$(top -n 1 | grep Ta | tr -s ' ' | cut -d ' ' -f10)
#RED
red_ip=$(ip route show | grep kernel | cut -d ' ' -f9)
red_mascara=$(ip route show | grep kernel | cut -d '/' -f2 | cut -d ' ' -f1)
red_enlace=$(ip route show | grep default | tr -s ' ' | cut -d ' ' -f3)
red_dns=$(cat /etc/resolv.conf | grep nameserver | tr -s ' ' | sed -n 1p | cut -d ' ' -f2)
# red_bytes_tx=$(ifconfig | tr ':' ' ' | grep TX | sed -n 1p | tr -s ' ' | cut -d ' ' -f4)
# red_bytes_rx=$(ifconfig | tr ':' ' ' | grep RX | sed -n 1p | tr -s ' ' | cut -d ' ' -f4)
echo -e "--------------------------------------------------------"
echo -e "MEMORIA\t\t\t| INFORMACION DEL SISTEMA"
echo -e "Total:\t\t$ram_total\t| Sistema:\t$sistema"
echo -e "Usado:\t\t$ram_usado_porcentaje %\t| Dominio:\t$dominio"
echo -e "Libre:\t\t$ram_libre_porcentaje %\t| Versión:\t$version"
echo -e "\t\t\t| Núcleo:\t$nucleo"
echo -e "ESPACIO EN DISCO RAIZ\t| Arquitectura:\t$Arquitectura"
echo -e "Total:\t\t$disco_total\t|"
echo -e "Usado:\t\t$disco_usado\t| USUARIOS"
echo -e "Libre:\t\t$disco_libre%\t| Numero de usuarios:\t$usuarios"
echo -e "\t\t\t| Usuarios activos:\t$usuarios_activo"
echo -e "CPU\t\t\t| "
echo -e "Cantidad:\t$cantidad_cpu\t| PROCESOS"
echo -e "Usado:\t\t$cpu_usado %\t| Total:\t$numero_procesos"
echo -e "Inactivo:\t$cpu_libre %\t| Ejecutando:\t$procesos_ejecutando"
echo -e "\t\t\t| Durmiendo:\t$procesos_durmiendo"
echo -e "\t\t\t| Parados:\t$procesos_parados"
echo -e "\t\t\t| Zombies:\t$procesos_zombie"
echo -e "RED"
echo -e "IP:\t\t$red_ip"
echo -e "Mascara:\t$red_mascara"
echo -e "Enlace:\t\t$red_enlace"
echo -e "DNS:\t\t$red_dns"
# echo -e "Bytes TX:\t$red_bytes_tx"
# echo -e "Bytes RX:\t$red_bytes_rx"
echo ""
echo -e "Conectividad:\t$conectividad"
| true |
f584f7c6b0d050a616a9a98ec2a34a9a982a2371
|
Shell
|
vincentjoseph/docker-psysh
|
/build.sh
|
UTF-8
| 711 | 4.125 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
function contains {
for a in $1; do
if [[ "$2" = $a ]];then
return 0
fi
done
return 1
}
availables="5.4 5.5 5.6 7.0 latest"
if [ "$1" = "all" ]; then
set -- "$availables"
fi
for version in $@; do
if ! contains "$availables" "$version"; then
echo >&2 "$version not supported. Ignored."
continue
fi
set -x
mkdir $version
cp -r conf $version/
cp docker-entrypoint.sh $version/
echo "# generated by $(basename $0)" > "$version/Dockerfile"
sed "s/%%VERSION%%/$version/g" Dockerfile.template >> "$version/Dockerfile"
docker build -t psy/psysh:$version $version
rm -fr $version
done
| true |
7ef4c1c080fd01057d1ab7e07eb13b4eb94eb1cd
|
Shell
|
antoni/dotfiles
|
/install/install_rust.sh
|
UTF-8
| 308 | 2.828125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -ue
# Install Rust toolchain, noninteractively
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
# Source for current session
source "$HOME/.cargo/env"
# Run it, so that we have all components downloaded right away (it does so upon first launch)
cargo --help
| true |
e9392e0a4a1e468c930e40ec94b137689677794b
|
Shell
|
kyleburton/sandbox
|
/selenium/run-selenium-rc.sh
|
UTF-8
| 4,197 | 3.453125 | 3 |
[] |
no_license
|
PROF="3o74fgym.Selenium1"
PROF_DIR="/Users/kburton/Library/Application Support/Firefox/Profiles/$PROF"
if [ -e "$PROF_DIR" ]; then
echo "copying fresh profile ($PROF) from $PROF_DIR"
test -d "$PROF" && rm -rf "$PROF"
cp -r "$PROF_DIR" ./
fi
# Usage: java -jar selenium-server.jar [-interactive] [options]
#
# -port <nnnn>: the port number the selenium server should use (default 4444)
# -timeout <nnnn>: an integer number of seconds before we should give up
# -interactive: puts you into interactive mode. See the tutorial for more details
# -singleWindow: puts you into a mode where the test web site
# executes in a frame. This mode should only be selected if the
# application under test does not use frames.
# -profilesLocation: Specifies the directory that holds the profiles
# that java clients can use to start up selenium. Currently
# supported for Firefox only.
# -forcedBrowserMode <browser>: sets the browser mode to a single
# argument (e.g. "*iexplore") for all sessions, no matter what is
# passed to getNewBrowserSession
# -forcedBrowserModeRestOfLine <browser>: sets the browser mode to
# all the remaining tokens on the line (e.g. "*custom
# /some/random/place/iexplore.exe") for all sessions, no matter what
# is passed to getNewBrowserSession
# -userExtensions <file>: indicates a JavaScript file that will be
# loaded into selenium
# -browserSessionReuse: stops re-initialization and spawning of the
# browser between tests
# -avoidProxy: By default, we proxy every browser request; set this
# flag to make the browser use our proxy only for URLs containing
# '/selenium-server'
# -firefoxProfileTemplate <dir>: normally, we generate a fresh empty
# Firefox profile every time we launch. You can specify a directory
# to make us copy your profile directory instead.
# -debug: puts you into debug mode, with more trace information and
# diagnostics on the console
# -browserSideLog: enables logging on the browser side; logging
# messages will be transmitted to the server. This can affect
# performance.
# -ensureCleanSession: If the browser does not have user profiles,
# make sure every new session has no artifacts from previous
# sessions. For example, enabling this option will cause all user
# cookies to be archived before launching IE, and restored after IE
# is closed.
# -trustAllSSLCertificates: Forces the Selenium proxy to trust all
# SSL certificates. This doesn't work in browsers that don't use the
# Selenium proxy.
# -log <logFileName>: writes lots of debug information out to a log
# file
# -htmlSuite <browser> <startURL> <suiteFile> <resultFile>: Run a
# single HTML Selenese (Selenium Core) suite and then exit
# immediately, using the specified browser (e.g. "*firefox") on the
# specified URL (e.g. "http://www.google.com"). You need to specify
# the absolute path to the HTML test suite as well as the path to the
# HTML results file we'll generate.
# -proxyInjectionMode: puts you into proxy injection mode, a mode
# where the selenium server acts as a proxy server for all content
# going to the test application. Under this mode, multiple domains
# can be visited, and the following additional flags are supported:
#
# -dontInjectRegex <regex>: an optional regular expression that
# proxy injection mode can use to know when to bypss injection
# -userJsInjection <file>: specifies a JavaScript file which will
# then be injected into all pages
# -userContentTransformation <regex> <replacement>: a regular
# expression which is matched against all test HTML content; the
# second is a string which will replace matches. These flags can
# be used any number of times. A simple example of how this could
# be useful: if you add "-userContentTransformation https http"
# then all "https" strings in the HTML of the test application will
# be changed to be "http".
java -jar /opt/algorithmics.com/algo-connect/selenium-server-1.0.1/selenium-server.jar \
-firefoxProfileTemplate ./3o74fgym.Selenium1
| true |
58fafed348cafb067b2c3dbc9deddfac65a36fda
|
Shell
|
TonyVlcek/config-files
|
/.bashrc
|
UTF-8
| 2,669 | 2.984375 | 3 |
[] |
no_license
|
# _______ _ ____ _
#|__ __| ( ) | _ \ | |
# | | ___ _ __ _ _|/ ___ | |_) | __ _ ___| |__
# | |/ _ \| '_ \| | | | / __| | _ < / _` / __| '_ \
# | | (_) | | | | |_| | \__ \ | |_) | (_| \__ \ | | |
# |_|\___/|_| |_|\__, | |___/ |____/ \__,_|___/_| |_|
#................ __/ |
# |___/
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
#History setup
HISTCONTROL=ignoreboth # ignore dublicities and space starting lines
shopt -s histappend #append to the history file
HISTSIZE=1000 # history length
HISTFILESIZE=2000 # history lenght
shopt -s checkwinsize # window size update
shopt -s globstar # enable ** - match file in subtree
# Less setup
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" ## make less more friendly for non-text input files
# Less colors
export LESS_TERMCAP_mb=$(printf '\e[01;31m') # enter blinking mode - red
export LESS_TERMCAP_md=$(printf '\e[01;32m') # enter double-bright mode - bold, magenta
export LESS_TERMCAP_me=$(printf '\e[0m') # turn off all appearance modes (mb, md, so, us)
export LESS_TERMCAP_se=$(printf '\e[0m') # leave standout mode
export LESS_TERMCAP_so=$(printf '\e[01;33m') # enter standout mode - yellow
export LESS_TERMCAP_ue=$(printf '\e[0m') # leave underline mode
export LESS_TERMCAP_us=$(printf '\e[04;36m') # enter underline mode - cyan
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# Enable programmable completion features (you don't need to enable
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
# COLORS SETUP
. ~/.bash_colors
# Git Setup
. ~/.git-prompt.sh
export GIT_PS1_SHOWDIRTYSTATE=1
export GIT_PS1_SHOWSTASHSTATE=1
export GIT_PS1_SHOWUNTRACKEDFILES=1
export GIT_PS1_SHOWUPSTREAM="auto verbose"
# GCC setup
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' # enable colored errors
GCC_INCLUDE_DIR=/usr/include/
export GCC_INCLUDE_DIR
# PROMPT SETUP
PS1="\[${Yellow}\][\t] \[${BGreen}\]\u@\h\[${Color_Off}\]:\[${BBlue}\]\w\[${Yellow}\]"'$(__git_ps1 "(%s)")'"\[${Color_Off}\]\$ "
# Load ALIASES
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# xset setup
xset r rate 200 50 # speed up keyboard response
| true |
e1f0aae3c6651b583fa358d0d8b87fe7b7c10b79
|
Shell
|
gingerzoealex/dotfiles
|
/.bash_profile
|
UTF-8
| 1,361 | 3.0625 | 3 |
[] |
no_license
|
on_reith () {
[ "BBC On Network" = "$(/usr/sbin/scselect 2>&1 | grep '^ \*' | sed 's/.*(\(.*\))/\1/')" ]
}
if on_reith;
then
echo "On Reith"
export http_proxy="http://www-cache.reith.bbc.co.uk:80"
export https_proxy="$http_proxy"
export HTTP_PROXY="$http_proxy"
export HTTPS_PROXY="$http_proxy"
export ALL_PROXY="$http_proxy"
export no_proxy=localhost,127.0.0.1
export NO_PROXY=$no_proxy
ln -fs ~/.ssh/on-reith-config ~/.ssh/config
if [[ -n $(which npm) ]]; then
npm config set proxy $http_proxy
npm config set https-proxy $https_proxy
fi
if [[ -n $(which git) ]]; then
git config --global http.proxy $http_proxy
git config --global https.proxy $http_proxy
fi
else
echo "Off Reith"
unset http_proxy
unset https_proxy
unset HTTP_PROXY
unset HTTPS_PROXY
unset ALL_PROXY
unset no_proxy
unset NO_PROXY
ln -fs ~/.ssh/off-reith-config ~/.ssh/config
if [[ -n $(which npm) ]]; then
npm config rm proxy
npm config rm https-proxy
fi
if [[ -n $(which git) ]]; then
git config --global --unset http.proxy
git config --global --unset https.proxy
fi
echo HTTP_PROXY
fi
export NVM_DIR="$HOME/.nvm"
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
| true |
fda3c71cfad78be2c64ebda8efeed2b4b95acc5e
|
Shell
|
BackupTheBerlios/sorcerer-svn
|
/trunk/grimoire/wpa_supplicant.d/init.d/wpa_supplicant
|
UTF-8
| 6,693 | 3.703125 | 4 |
[] |
no_license
|
#!/bin/bash
### BEGIN INIT INFO
# Default-Mode: 500
# Required-Start: proc wpa_priv var_run
# Should-Start: udev
# Required-Stop: kill
# Default-Start: S 2 3 4 5
# Default-Stop: 0 6
# Short-Description: Starts wpa_supplicant for all wireless interfaces
### END INIT INFO
. /lib/lsb/init-functions
pnw(){
[ -f /proc/net/wireless ] &&
tr ' ' '\n' < /proc/net/wireless |
sed 's/://p;d'
}
guess_interface(){
if [ -z "$INTERFACE" ]
then INTERFACE="$( pnw | sed "1p;d" )"
fi
}
only start stop configure status
name wpa_supplicant
server /sbin/wpa_supplicant
guess_interface
config /etc/wpa_supplicant/wpa_supplicant.conf
options "-B -Dwext -i$INTERFACE -c $CONF"
if [ "$1" == start ]; then
if [ -z "$INTERFACE" ] || ! /sbin/ip link show grep -q ": $INTERFACE:"; then
log_warning_msg "Net device is not obvious. Not starting."
trap - EXIT; exit 0
fi
fi
write_conf(){
output(){
echo "ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=wpa_priv"
echo "network={"
echo "ssid=\"$SSID\""
echo "scan_ssid=$SCAN_SSID"
echo "key_mgmt=$KEY_MGMT"
echo "psk=\"$PSK\""
echo "}"
}
mkdir -p /etc/wpa_supplicant/
local c=/etc/wpa_supplicant/wpa_supplicant.conf
output > $c
chmod 600 $c
}
configure(){
save(){
output(){
echo "CONFIGURED=true"
echo "SSID=$SSID"
echo "SCAN_SSID=$SCAN_SSID"
echo "KEY_MGMT=$KEY_MGMT"
echo "PSK=$PSK"
echo "INTERFACE=$INTERFACE"
}
mkdir -p /etc/init.d/conf.d
local c=/etc/init.d/conf.d/wpa_supplicant
[ -f $c ] &&
sed -i '/CONFIGURED=/d
/SSID=/d
/SCAN_SSID=/d
/KEY_MGMT=/d
/PSK=/d
/INTERFACE=/d' $c
output > $c
chmod 600 $c
write_conf
}
get_ssid(){
BACKTITLE=$"WPA Configuration Menu"
TITLE=$"WPA Variable Selection"
HELP=$"Enter the same name for network as router uses"
SSID="$( dialog \
--backtitle "$BACKTITLE" \
--title "$TITLE" \
--stdout \
--no-cancel \
--inputbox "$HELP" 0 0 \
"$SSID"
)"
}
get_scan_ssid(){
BACKTITLE=$"WPA Configuration Menu"
TITLE=$"WPA Variable Selection"
HELP=$"Does the router broadcast the SSID?"
if ! [ "$SCAN_SSID" == 1 ]
then local DEFAULT=--defaultno
fi
if dialog \
--backtitle "$BACKTITLE" \
--title "$TITLE" \
$DEFAULT \
--yesno "$HELP" 0 0
then SCAN_SSID=1
else SCAN_SSID=0
fi
}
get_psk(){
BACKTITLE=$"WPA Configuration Menu"
TITLE=$"WPA Variable Selection"
HELP=$"Enter same network passphrase as router uses"
PSK="$( dialog \
--backtitle "$BACKTITLE" \
--title "$TITLE" \
--stdout \
--no-cancel \
--inputbox "$HELP" 0 0 \
"$PSK"
)"
}
get_key_mgmt(){
BACKTITLE=$"WPA Configuration Menu"
TITLE=$"WPA Variable Selection"
HELP=$"Select type of key management router uses"
OTHER=$"Menu driven configuration of non WPA-PSK key management
is not supported by this simple menu driven configuration interface.
Please read the wpa_supplicant.conf manual page
and then appropriately edit /etc/wpa_supplicant/wpa_supplicant.conf
after selecting the interface and exiting this configuration menu."
KEY_MGMT="$( dialog \
--backtitle "$BACKTITLE" \
--title "$TITLE" \
--stdout \
--no-cancel \
--menu "$HELP" 0 0 0 \
WPA-PSK "" \
OTHER ""
)"
if [ OTHER = "$KEY_MGMT" ]; then
dialog --backtitle "$BACKTITLE" \
--title "$TITLE" \
--stdout \
--no-cancel \
--msgbox "$OTHER"
fi
}
interface_available(){
local HELP=$"Wireless interfaces are not available.
When wireless interfaces are available please re-run
# /etc/init.d/wpa_supplicant configure"
if ! pnw | grep -q .; then
dialog --backtitle "$BACKTITLE" \
--title "$TITLE" \
--stdout \
--no-cancel \
--msgbox "$HELP" 0 0
false
fi
}
get_interface(){
BACKTITLE=$"WPA Configuration Menu"
TITLE=$"WPA Variable Selection"
HELP=$"Select wireless interface name"
interface_available &&
KEY_MGMT="$( dialog \
--backtitle "$BACKTITLE" \
--title "$TITLE" \
--stdout \
--no-cancel \
--default-item "$INTERFACE" \
--menu "$HELP" 0 0 0 \
$( pnw | sed -r "s:(.*):\1\tInterface:" )
)"
}
wpa_menu(){
BACKTITLE=$"WPA Configuration Menu"
TITLE=$"WPA Variable Selection"
HELP=$"Select field to adjust"
OUT=$"Save and Exit"
SELECT=$"Select"
dialog \
--backtitle "$BACKTITLE" \
--title "$TITLE" \
--stdout \
--ok-label "$SELECT" \
--cancel-label "$OUT" \
--menu "$HELP" \
0 0 0 \
SSID "$SSID" \
SCAN_SSID "$SCAN_SSID" \
KEY_MGMT "$KEY_MGMT" \
PSK "$PSK" \
INTERFACE "$INTERFACE"
}
while :; do
case $( wpa_menu ) in
SSID) get_ssid ;;
SCAN_SSID) get_scan_ssid ;;
KEY_MGMT) get_key_mgmt ;;
PSK) get_psk ;;
INTERFACE) get_interface ;;
*) save; return 0 ;;
esac
done
}
now(){
if ! dialog --title "WPA supplicant configuration" --timeout 60 \
--yesno "WPA supplicant can be configured now.
Or configuration and starting later is possible
by executing the commands:
# /etc/init.d/wpa_supplicant configure;
# /etc/init.d/wpa_supplicant start
If deployed then this prompt will not appear on the next boot.
Configure WPA supplicant now?" 0 0
then
mkdir -pm 700 /etc/init.d/conf.d
echo CONFIGURED=true > /etc/init.d/conf.d/wpa_supplicant
false
fi
}
# if [ "$1" == start ]
# then pnw | grep -q . || exit 0
# fi
# param(){
# local N
# local E=/etc/wpa_supplicant
# pnw |
# while read; do
# if [ -f $E/wpa$REPLY.conf ]
# then CONF=" -c $E/wpa$REPLY.conf"
# elif [ -f $E/wpa_supplicant.conf ]
# then CONF=" -c $E/wpa_supplicant.conf"
# fi
# echo "$N-Dwext -i$REPLY$CONF"
# N="-N "
# done
# }
# Yes this odd code at the bottom should be at the bottom
# because it requires functions defined above.
if [ start == "$1" ]; then
if [ -z "$CONFIGURED" ] &&
! [ -f /etc/wpa_supplicant/wpa_supplicant.conf ]
then now && configure
fi
if [ -n "$SSID" ] && ! [ -f /etc/wpa_supplicant/wpa_supplicant.conf ]
then write_conf
fi
fi
| true |
406ae70f7481fe5cf40b9ac232abbb507be1b6af
|
Shell
|
Kentarou-linux/dotfiles
|
/.zshrc
|
UTF-8
| 6,570 | 3.03125 | 3 |
[] |
no_license
|
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# ~/.zshrc file for zsh interactive shells.
# see /usr/share/doc/zsh/examples/zshrc for examples
export ZSH="/home/kebab/.oh-my-zsh"
ZSH_THEME="agnoster"
plugins=(
git
zsh-autosuggestions
zsh-syntax-highlighting
)
source $ZSH/oh-my-zsh.sh
setopt autocd # change directory just by typing its name
setopt correct # auto correct mistakes
setopt interactivecomments # allow comments in interactive mode
setopt magicequalsubst # enable filename expansion for arguments of the form ‘anything=expression’
setopt nonomatch # hide error message if there is no match for the pattern
setopt notify # report the status of background jobs immediately
setopt numericglobsort # sort filenames numerically when it makes sense
setopt promptsubst # enable command substitution in prompt
setopt histignorealldups
WORDCHARS=${WORDCHARS//\/} # Don't consider certain characters part of the word
# hide EOL sign ('%')
PROMPT_EOL_MARK=""
# configure key keybindings
#bindkey -e # emacs key bindings
bindkey ' ' magic-space # do history expansion on space
bindkey '^[[3;5~' kill-word # ctrl + Supr
bindkey '^[[3~' delete-char # delete
bindkey '^[[1;5C' forward-word # ctrl + ->
bindkey '^[[1;5D' backward-word # ctrl + <-
bindkey '^[[5~' beginning-of-buffer-or-history # page up
bindkey '^[[6~' end-of-buffer-or-history # page down
bindkey '^[[H' beginning-of-line # home
bindkey '^[[F' end-of-line # end
bindkey '^[[Z' undo # shift + tab undo last action
# enable completion features
autoload -Uz compinit
autoload -Uz colors
colors
compinit -d ~/.cache/zcompdump
zstyle ':completion:*:*:*:*:*' menu select
zstyle ':completion:*' matcher-list 'm:{a-zA-Z}={A-Za-z}' # case insensitive tab completion
# History configurations
HISTFILE=~/.zsh_history
HISTSIZE=10000
SAVEHIST=10000
setopt hist_expire_dups_first # delete duplicates first when HISTFILE size exceeds HISTSIZE
setopt hist_ignore_dups # ignore duplicated commands history list
setopt hist_ignore_space # ignore commands that start with space
setopt hist_verify # show command with history expansion to user before running it
setopt share_history # share command history data
# force zsh to show the complete history
alias history="history 0"
# enable color support of ls, less and man, and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
alias diff='diff --color=auto'
alias ip='ip --color=auto'
export LESS_TERMCAP_mb=$'\E[1;31m' # begin blink
export LESS_TERMCAP_md=$'\E[1;36m' # begin bold
export LESS_TERMCAP_me=$'\E[0m' # reset bold/blink
export LESS_TERMCAP_so=$'\E[01;33m' # begin reverse video
export LESS_TERMCAP_se=$'\E[0m' # reset reverse video
export LESS_TERMCAP_us=$'\E[1;32m' # begin underline
export LESS_TERMCAP_ue=$'\E[0m' # reset underline
# Take advantage of $LS_COLORS for completion as well
zstyle ':completion:*' list-colors "${(s.:.)LS_COLORS}"
#zstyle ':completion:*' list-colors "exfxcxdxbxegedabagacad"
fi
# some more ls aliases
alias ll='ls -l'
alias la='ls -A'
alias l='ls -CF'
alias ma="cd /home/kebab/eclipse-workspace/mahjong_test/src/test"
# enable auto-suggestions based on the history
if [ -f /usr/share/zsh-autosuggestions/zsh-autosuggestions.zsh ]; then
. /usr/share/zsh-autosuggestions/zsh-autosuggestions.zsh
# change suggestion color
# ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=#999'
ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE='fg=8'
fi
#追加設定
#--------------------------------------------------------------------------------------
prompt_context() {
local user=`whoami`@`hostname`
if [[ "$user" != "root" || -n "$SSH_CONNECTION" ]]; then
# prompt_segment black yellow " %(!.%{%F{black}%}.)$user "
prompt_segment black red " %(!.%{%F{black}%}.)$user "
else
prompt_segment red red " %(!.%{%F{%}.) $user "
fi
}
# PROMPT='
#%{%f%b%k%}$(build_prompt)
# %B%F{yellow}❯❯%f%b '
PROMPT='
%{%f%b%k%}$(build_prompt)
%B%F{red}❯❯%f%b '
chpwd() { ls }
set opt auto_pushd
export PAGER=most
export PATH=$PATH:/usr/local/bin/
export PATH=$PATH:/home/kentarou/.config/nvim/
export PATH=$PATH:/home/kentarou/eclipse/java-2021-03/eclipse
export LANG=en_US.UTF-8
export LANGUAGE=en_US.UTF-8
export LC_ALL=$LANG
#export LANG=ja_JP.UTF-8
alias c="clear"
alias -g G="|grep"
alias user="ssh root@192.168.1.34"
alias 2="terminator -l 2 && exit"
alias 3="terminator -l 3 && exit"
alias 4="terminator -l 4 && exit"
alias dev="terminator -l dev && exit"
alias dev2="terminator -l dev2 && exit"
alias init.vim="nvim ~/.config/nvim/init.vim"
alias dein.toml="nvim ~/.config/nvim/dein.toml"
alias status="systemctl status"
alias start="systemctl start"
alias stop="systemctl stop"
alias restart="systemctl restart"
alias vim="nvim"
#alias blue="gnome-control-center bluetooth"
#alias blue="bluetoothctl connect 00:00:00:00:58:CA
alias e="exit"
alias wifi="nmcli device wifi list"
alias ma="cd eclipse-workspace/Mahjong_test/src/test"
#google検索
gs() {
google-chrome https://www.google.com/search?q="$*&hl=en"
}
#google翻訳
gt() {
echo -n "$*" ": ">> ~/.eng_history &&
trans -b :ja "$*" | tee -a ~/.eng_history
}
# gt() {
# trans -b :ja "$*"
# }
error() {
$* |& read -d'あ' error ; gt $error
}
blue(){
bluetoothctl connect 00:00:00:00:58:CA
result=`echo $?`
if [ $result -eq 0 ]
then
exit
fi
}
alias wifi="nmcli device wifi connect "SPWN_N36_586cae" password 8c98718c67e60 ifname wlp2s0"
export PATH=$PATH:/home/kebab/eclipse/jee-2021-03/eclipse
# cat /proc/cpuinfo
| true |
0636e393bbf800e3e77f06afd6425185ceec9fd0
|
Shell
|
actiago/restic-systemd-automatic-backup
|
/bin/restic_check.sh
|
UTF-8
| 1,974 | 4.15625 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# Check the backups made with restic to Backblaze B2 for errors.
# See restic_backup.sh on how this script is run (as it's analogous for this script).
set -o errexit
set -o pipefail
[[ "${TRACE-0}" =~ ^1|t|y|true|yes$ ]] && set -o xtrace
# Clean up lock if we are killed.
# If killed by systemd, like $(systemctl stop restic), then it kills the whole cgroup and all it's subprocesses.
# However if we kill this script ourselves, we need this trap that kills all subprocesses manually.
exit_hook() {
echo "In exit_hook(), being killed" >&2
jobs -p | xargs kill
restic unlock
}
trap exit_hook INT TERM
# Assert that all needed environment variables are set.
assert_envvars() {
local varnames=("$@")
for varname in "${varnames[@]}"; do
if [ -z ${!varname+x} ]; then
printf "%s must be set for this script to work.\n\nDid you forget to source a $INSTALL_PREFIX/etc/restic/*.env.sh profile in the current shell before executing this script?\n" "$varname" >&2
exit 1
fi
done
}
warn_on_missing_envvars() {
local unset_envs=()
local varnames=("$@")
for varname in "${varnames[@]}"; do
if [ -z "${!varname}" ]; then
unset_envs=("${unset_envs[@]}" "$varname")
fi
done
if [ ${#unset_envs[@]} -gt 0 ]; then
printf "The following env variables are recommended, but have not been set. This script may not work as expected: %s\n" "${unset_envs[*]}" >&2
fi
}
assert_envvars\
RESTIC_PASSWORD_FILE RESTIC_REPOSITORY RESTIC_VERBOSITY_LEVEL
warn_on_missing_envvars \
B2_ACCOUNT_ID B2_ACCOUNT_KEY B2_CONNECTIONS
B2_ARG=
[ -z "${B2_CONNECTIONS+x}" ] || B2_ARG=(--option b2.connections="$B2_CONNECTIONS")
# Remove locks from other stale processes to keep the automated backup running.
# NOTE nope, don't unlock like restic_backup.sh. restic_backup.sh should take precedence over this script.
#restic unlock &
#wait $!
# Check repository for errors.
echo restic check \
"${B2_ARG[@]}" \
--verbose="$RESTIC_VERBOSITY_LEVEL" &
wait $!
| true |
99c6d4251f217437d4db0499b25dddb09feedbae
|
Shell
|
jspawar/dotfiles
|
/modules/dotfiles/bash/.bashrc
|
UTF-8
| 72 | 2.875 | 3 |
[] |
no_license
|
for file in "${HOME}"/.config/bash/*.bash; do
source "${file}"
done
| true |
d2784d404d76441188482831750237f1cc275f6d
|
Shell
|
tano-systems/meta-tanowrt
|
/meta-tanowrt/recipes-core/base-files/base-files/files/sysfixtime.init
|
UTF-8
| 2,695 | 3.828125 | 4 |
[
"MIT",
"LicenseRef-scancode-unknown"
] |
permissive
|
#!/bin/sh /etc/rc.common
# Copyright (C) 2013-2014 OpenWrt.org
# Copyright (C) 2018-2022 Tano Systems LLC
START=00
STOP=90
HWCLOCK=/sbin/hwclock
extra_command "systohc" "Save system time to hardware RTC"
extra_command "hctosys" "Load system time from hardware RTC"
extra_command "systz" "Apply timezone"
LOG="logger -t sysfixtime -p"
boot() {
hwclock_call restore
local maxtime="$(get_maxtime)"
local curtime="$(date +%s)"
if [ $curtime -lt $maxtime ]; then
date -s @$maxtime
hwclock_call save
fi
}
apply_timezone() {
local config="$1"
local zonename
local kept_in_localtime
config_get zonename "$config" 'zonename' 'UTC'
config_get_bool kept_in_localtime 'rtc' 'hwclock_localtime' 1
[ -n "$zonename" ] && [ -f "/usr/share/zoneinfo/$zonename" ] && \
ln -sf "/usr/share/zoneinfo/$zonename" /tmp/localtime
# apply timezone to kernel
RET="0"
if [ "$kept_in_localtime" = "0" ]; then
${HWCLOCK} -u --systz
RET="$?"
else
${HWCLOCK} -l --systz
RET="$?"
fi
if [ "${RET}" = "0" ]; then
$LOG daemon.info "applied time zone '$zonename'"
else
$LOG daemon.error "failed to apply time zone '$zonename' (${RET})"
fi
}
hwclock_call() {
local args rtc_dev kept_in_localtime
config_load 'system'
config_get rtc_dev 'rtc' 'hwclock_dev' '/dev/rtc0'
config_get_bool kept_in_localtime 'rtc' 'hwclock_localtime' 1
# Early apply timezone from system configuration
config_foreach apply_timezone system
args=""
if [ "$kept_in_localtime" = "0" ]; then
# -u|--utc
append args "-u" " "
else
# -l|--localtime
append args "-l" " "
fi
# -f|--rtc
append args "-f $rtc_dev" " "
if [ "$1" = "save" ]; then
# -w|--systohc
append args "-w" " "
elif [ "$1" = "restore" ]; then
# -s|--hctosys
append args "-s" " "
fi
RET="0"
if [ ! -e "$rtc_dev" ]; then
RET="no $rtc_dev device"
elif [ ! -e "$HWCLOCK" ]; then
RET="no $HWCLOCK"
else
$HWCLOCK $args
RET="$?"
fi
if [ "$1" = "save" ]; then
if [ "$RET" = "0" ]; then
$LOG daemon.info "saved localtime ($(date)) to $rtc_dev"
else
$LOG daemon.error "failed to save localtime to $rtc_dev ($RET)"
fi
else
if [ "$RET" = "0" ]; then
$LOG daemon.info "set localtime ($(date)) from $rtc_dev"
else
$LOG daemon.error "failed to set localtime from $rtc_dev ($RET)"
fi
fi
}
systohc() {
hwclock_call save
}
hctosys() {
hwclock_call restore
}
systz() {
config_load 'system'
config_foreach apply_timezone system
}
start() {
hwclock_call restore
}
stop() {
hwclock_call save
}
get_maxtime() {
local file newest
for file in $( find /etc -type f ) ; do
[ -z "$newest" -o "$newest" -ot "$file" ] && newest=$file
done
[ "$newest" ] && date -r "$newest" +%s
}
| true |
7abbb9f1e091ac126aa3cc353111b0f245f3de5d
|
Shell
|
SixSq/dataClay
|
/dataclay-proxy/release.sh
|
UTF-8
| 1,886 | 3.15625 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2018, SixSq Sarl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
if [[ $# -eq 0 ]] ; then
echo 'Please pass the Docker image tag version as an argument'
exit 0
fi
(cd ../orchestration/ && docker-compose up -d)
TOOLSPATH=../tool/dClayTool.sh
until $TOOLSPATH GetDataClayID
do
echo " --- waiting for dataclay"
sleep 2
done
password=`echo $(uuidgen || cat /dev/urandom) | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`
sed -i '.orig' "s/cHaNgEmE/$password/g" registerModel_v2.sh
./registerModel_v2.sh
./buildApp.sh
lein do clean, uberjar
mv cfgfiles/client.properties cfgfiles/client.properties.orig
cat >cfgfiles/client.properties <<EOF
HOST=logicmodule1
TCPPORT=1034
EOF
cp -fr ../tool tool
docker build -t mf2c/dataclay-proxy:${1} .
docker tag mf2c/dataclay-proxy:${1} mf2c/dataclay-proxy:latest
# cleanup
mv cfgfiles/session.properties.orig cfgfiles/session.properties
mv cfgfiles/client.properties.orig cfgfiles/client.properties
mv registerModel_v2.sh.orig registerModel_v2.sh
while true;
do
read -p "Do you wish to shutdown DataClay? [y/n]" yn
case $yn in
[Yy]* )
(cd ../orchestration/ && docker-compose down -v)
break
;;
[Nn]* ) break;;
* ) echo "Please answer yes or no.";;
esac
done
lein do clean
rm -fr stubs bin tool
| true |
4976216a9407676105a4625a7b9eb96424d4a827
|
Shell
|
rccguexam/osexam
|
/MCA3_25_mohini-parmar.sh
|
UTF-8
| 608 | 3.5625 | 4 |
[
"MIT"
] |
permissive
|
#------------------------Rno25_Q1.sh------------------------
#product of 10 numbers
n=10
i=1
product=1
while [ $i -le $n ]
do
product=$((product * i))
i=$((i + 1))
done
echo "The product is : $product"
#------------------------Rno25_Q2.sh------------------------
#write a shell script to display menu driven interface.
echo "-----Menu------"
echo "1.list all files."
echo "2.print current directory"
echo "3.print Date"
read ch
case $ch in
1) ls
;;
2) pwd
;;
3) echo `date +%d-%B-%Y`
;;
*) echo "Invalid Option"
esac
| true |
fd3f8f3e81e04d6a027cfc5f5df0d964a0d920e5
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/gnomato/PKGBUILD
|
UTF-8
| 642 | 2.59375 | 3 |
[] |
no_license
|
# Maintainer: Ainola
# Contributor: Vinícius dos Santos Oliveira
pkgname=gnomato
pkgver=1.1.0
pkgrel=1
pkgdesc="A timer for Pomodoro Technique"
arch=(i686 x86_64)
url="https://github.com/diegorubin/gnomato"
license=('GPL3')
depends=('gtkmm3' 'libnotify' 'boost-libs' 'python2')
makedepends=('intltool' 'gettext' 'boost')
source=("https://github.com/diegorubin/gnomato/archive/$pkgver.tar.gz")
sha256sums=('450cbddbc36709727774a96cfa319cf70ddce88e442eb75da2ec0b837ce8e44b')
build() {
cd "$srcdir/$pkgname-$pkgver"
autoreconf -vi
./configure --prefix=/usr
make
}
package() {
cd "$srcdir/$pkgname-$pkgver"
make DESTDIR="$pkgdir/" install
}
| true |
ac05f8a2e870ee67b0c2e148a2474d210f7cf347
|
Shell
|
djsperka/habit2-src
|
/tools/copy_gstreamer_framework
|
UTF-8
| 1,554 | 3.671875 | 4 |
[] |
no_license
|
#!/bin/sh
if [ $# -ne 1 ]; then
echo "usage: copy_gstreamer_framework dest"
exit -1
fi
# check that dest exists, if not, create it and the subdir "GStreamer.framework"
DEST="$1/GStreamer.framework"
PKG_DEST="$DEST/Versions/1.0"
if [ ! -d $PKG_DEST ]; then
mkdir -p $PKG_DEST
fi
GSTQT_PLUGIN="/Users/dan/eclipse-workspace/habit2/libs/gstqt/release/libgstqt.dylib"
SOURCE_DIR="/Library/Frameworks/GStreamer.framework"
FIRST_PACKAGE="org.freedesktop.gstreamer.x86_64.GStreamer"
PACKAGES=" org.freedesktop.gstreamer.x86_64.gstreamer-1.0-net \
org.freedesktop.gstreamer.x86_64.gstreamer-1.0-codecs-restricted \
org.freedesktop.gstreamer.x86_64.base-system-1.0 \
org.freedesktop.gstreamer.x86_64.gstreamer-1.0-codecs-gpl \
org.freedesktop.gstreamer.x86_64.gstreamer-1.0-system \
org.freedesktop.gstreamer.x86_64.base-crypto \
org.freedesktop.gstreamer.x86_64.gstreamer-1.0-dvd \
org.freedesktop.gstreamer.x86_64.gstreamer-1.0-core \
org.freedesktop.gstreamer.x86_64.gstreamer-1.0-codecs \
org.freedesktop.gstreamer.x86_64.gstreamer-1.0-playback \
org.freedesktop.gstreamer.x86_64.gstreamer-1.0-effects \
org.freedesktop.gstreamer.x86_64.gstreamer-1.0-libav"
# FIRST_PACKAGE is moved first - it sets up the directory structure
echo "Copying root package $FIRST_PACKAGE"
tar -C $SOURCE_DIR -cnvf - `pkgutil --files $FIRST_PACKAGE` | tar -C ${DEST} -xf -
for PKG in $PACKAGES; do
echo "Copying package $PKG"
tar -C ${SOURCE_DIR}/Versions/1.0 -cnf - `pkgutil --files ${PKG}` | tar -C ${DEST}/Versions/1.0 -xf -
done
| true |
6f9fe3be9ad2aed7169f1566be1a1dcb0147b821
|
Shell
|
compas/grasp
|
/test/integration/mpitmp/run.sh
|
UTF-8
| 4,996 | 4.53125 | 5 |
[
"MIT",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
#
# ./run.sh [--preserve-tmp] casename
#
# Create a temporary directory in the current working directory and run the integration
# tests. Currently supported cases are:
#
# - serial: runs the serial versions of rangular and rmcdhf
# - mpi: runs the MPI versions of rangular/rmcdhf, but with a short MPI_TMP (under /tmp)
# - mpi-longpath: runs the MPI versions of rangular/rmcdhf, but with a much longer MPI_TMP
# to test for the MPI_TMP handling bug
#
export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Check for mandatory environment variables
if [ -z ${GRASP_BINDIR+x} ]; then
>&2 echo "ERROR: \$GRASP_BINDIR variable is unset."
exit 127
fi
GRASP_BINDIR=`realpath "${GRASP_BINDIR}"`
if ! [ -d "${GRASP_BINDIR}" ]; then
>&2 echo "ERROR: \$GRASP_BINDIR is not a diretory."
>&2 echo " GRASP_BINDIR=$GRASP_BINDIR"
exit 127
fi
# If the user passes "--preserve-tmp" as the first argument, we'll note that down for later.
if [ "$1" == "--preserve-tmp" ]; then
preserve_tmp=true
shift
fi
# Get the case name. The user should pass only one argument.
if [ "$#" -ne 1 ]; then
>&2 echo "ERROR: Invalid number of arguments passed ($#)"
exit 127
fi
CASE=$1
if [ "$CASE" != "serial" ] && [ "$CASE" != "mpi" ] && [ "$CASE" != "mpi-longpath" ]; then
>&2 echo "ERROR: Invalid configuration passed ($CASE)"
>&2 echo " expected one of: serial, mpi, mpi-longpath"
exit 125
fi
# Determine the paths to the necessary binaries
function checkbinary {
varname=$1
path=$2
if ! [ -f "${path}" ]; then
>&2 echo "ERROR: Unable to find binary $varname"
>&2 echo " at ${path}"
exit 127
fi
>&2 echo "INFO: $varname=${path}"
}
RNUCLEUS="${GRASP_BINDIR}/rnucleus"; checkbinary RNUCLEUS $RNUCLEUS
RCSFGENERATE="${GRASP_BINDIR}/rcsfgenerate"; checkbinary RCSFGENERATE $RCSFGENERATE
RWFNESTIMATE="${GRASP_BINDIR}/rwfnestimate"; checkbinary RWFNESTIMATE $RWFNESTIMATE
if [[ $CASE =~ ^mpi ]]; then
checkbinary RANGULAR "${GRASP_BINDIR}/rangular_mpi"
checkbinary RMCDHF "${GRASP_BINDIR}/rmcdhf_mpi"
RANGULAR="mpirun -n 4 ${GRASP_BINDIR}/rangular_mpi"
RMCDHF="mpirun -n 4 ${GRASP_BINDIR}/rmcdhf_mpi"
else
RANGULAR="${GRASP_BINDIR}/rangular"; checkbinary RANGULAR $RANGULAR
RMCDHF="${GRASP_BINDIR}/rmcdhf"; checkbinary RMCDHF $RMCDHF
fi
# Create a temporary directory to run GRASP in:
TMP=`mktemp -d grasp-test-mpitmp.XXXXXXXXX` || exit 120
TMP=`realpath "${TMP}"`
if ! [ -d "${TMP}" ]; then
>&2 echo "ERROR: Temporary directory as not created."
>&2 echo " TMP=$TMP"
exit 121
fi
# This will be called any time we exit, independent of whether it's an early exit due to a
# failure, or the final exit when tests pass.
function clean_up_tmp_directory {
# Keep the temporary directory around if the user passed --preserve-tmp
if [ "$preserve_tmp" == "true" ]; then
>&2 echo "INFO: Keeping temporary directory ${TMP}"
else
>&2 echo "INFO: Removing temporary directory ${TMP}"
rm -vR "${TMP}"
fi
}
trap clean_up_tmp_directory EXIT
>&2 echo "INFO: switching to temporary directory: ${TMP}"
cd "${TMP}" || exit 122
# Function to test existence of a generated file:
function test_file_exists {
if ! [ -f "$1" ]; then
>&2 echo "ERROR: failed to generate file $1"
exit 50
fi
}
# Run rnucleus to generate a simple isodata file
${RNUCLEUS} <<-EOF
92
238
n
238.02891
0
0
0
EOF
exitcode=$?
if ! [ $exitcode -eq 0 ]; then
>&2 echo "ERROR: rnucleus failed with $exitcode"
exit 1
fi
test_file_exists "isodata"
# Run rcsfgenerate to generate a simple CSL
${RCSFGENERATE} <<-EOF
*
0
1s(2,*)2s(2,*)
2s,2p
0,2
2
n
EOF
exitcode=$?
if ! [ $exitcode -eq 0 ]; then
>&2 echo "ERROR: rcsfgenerate failed with $exitcode"
exit 1
fi
test_file_exists "rcsf.out"
mv rcsf.out rcsf.inp || exit 2
test_file_exists "rcsf.inp"
# Run rwfnestimate to generate basic orbitals
${RWFNESTIMATE} <<-EOF
y
2
*
EOF
exitcode=$?
if ! [ $exitcode -eq 0 ]; then
>&2 echo "ERROR: rwfnestimate failed with $exitcode"
exit 1
fi
test_file_exists "rwfn.inp"
# Set up MPI_TMP on MPI cases
function strlen {
echo -n $1 | wc -c
}
if [[ $CASE =~ ^mpi ]]; then
export MPI_TMP=`mktemp -d`
function clean_up_mpitmp {
rm -Rv ${MPI_TMP}
}
trap clean_up_mpitmp EXIT
if [ "$CASE" == "mpi-longpath" ]; then
export MPI_TMP="$MPI_TMP/mpitmp"
while [ `strlen $MPI_TMP` -lt 80 ]; do
export MPI_TMP="${MPI_TMP}-qwertyuiop1234567890"
done
mkdir "${MPI_TMP}" || exit 5
fi
echo "MPI_TMP=$MPI_TMP ($(strlen $MPI_TMP) characters)"
fi
# Run rangular
echo "Running: ${RANGULAR}"
${RANGULAR} <<-EOF
y
EOF
exitcode=$?
if ! [ $exitcode -eq 0 ]; then
>&2 echo "ERROR: rangular failed with $exitcode"
exit 1
fi
# Run rmcdhf
echo "Running: ${RMCDHF}"
${RMCDHF} <<-EOF
y
1
1
5
*
*
20
EOF
exitcode=$?
if ! [ $exitcode -eq 0 ]; then
>&2 echo "ERROR: rmcdhf failed with $exitcode"
exit 1
fi
test_file_exists "rmcdhf.sum"
test_file_exists "rmix.out"
echo "INFO: Final directory contents:"
ls -Alh
# If we got this far, everything is a-ok
>&2 echo "TESTS SUCCEEDED"
exit 0
| true |
784c35aa18e0c6ca35de8550fd891b7cd7991f63
|
Shell
|
rjzupkoii/PSU-CIDD-Malaria-Simulation
|
/config.sh
|
UTF-8
| 2,754 | 3.921875 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
function check_version() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
# Define some paths
BUILD_ENV="$HOME/work/build_env"
# Load cmake
module load cmake
# Create the directory to work in
source=$(pwd)
mkdir -p $BUILD_ENV
# If PostgreSQL isn't present, download and install it to the user directory
if [ ! -d "$BUILD_ENV/postgres" ]; then
# Get the source files
cd $BUILD_ENV
wget https://github.com/postgres/postgres/archive/master.zip
unzip master.zip
rm master.zip
# Build the source files
cd postgres-master
./configure --prefix=$BUILD_ENV/postgres
make -j 8
make install
cd ..
rm -rf $BUILD_ENV/postgres-master
# Export the relevent variables
export PATH="$BUILD_ENV/postgres/bin:$PATH"
export PKG_CONFIG_PATH="$BUILD_ENV/postgres/lib/pkgconfig:$PKG_CONFIG_PATH"
export LIBRARY_PATH="$BUILD_ENV/postgres/lib:$LIBRARY_PATH"
# Prepare the libpqxx library, limit to version 7.0.0
git clone https://github.com/jtv/libpqxx.git
cd libpqxx
git checkout 7.0.0
./configure --disable-documentation --prefix=$BUILD_ENV/lib
make -j 8
make install
fi
# If vcpkg doesn't already exist as a directory, load it
if [ ! -d "$BUILD_ENV/vcpkg" ]; then
cd $BUILD_ENV
wget https://github.com/microsoft/vcpkg/archive/2019.08.tar.gz
tar xf 2019.08.tar.gz
rm 2019.08.tar.gz
mv vcpkg-2019.08 vcpkg
cd vcpkg
./bootstrap-vcpkg.sh
fi
# Load the relevent packages
cd $BUILD_ENV/vcpkg
./vcpkg install yaml-cpp fmt date args
# Return to the source directory
cd $source
# Load GSL so we can set the correct path
module use /storage/icds/RISE/sw8/modules
module load gsl
# Create the build script
if [ ! -d "build" ]; then
mkdir -p build
cd build
toolchain="$BUILD_ENV/vcpkg/scripts/buildsystems/vcpkg.cmake"
echo "module use /storage/icds/RISE/sw8/modules" >> build.sh
echo "module load gsl" >> build.sh
echo "module load cmake" >> build.sh
echo "export GSL_ROOT_DIR=`gsl-config --prefix`" >> build.sh
echo "export PATH=$PATH" >> build.sh
echo "export LIBRARY_PATH=$BUILD_ENV/postgres/lib:$BUILD_ENV/lib/lib:$LIBRARY_PATH" >> build.sh
echo "cmake -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$toolchain -DBUILD_CLUSTER:BOOL=true .." >> build.sh
echo "make -j 8" >> build.sh
chmod +x build.sh
fi
# Notify the user of recommended .bashrc changes
BIWhite='\033[1;97m'
DEFAULT='\033[0m'
LIBPQ=~/work/build_env/postgres/lib/
echo -e "${BIWhite}Configuration complete, you may wish to update ~/.bashrc with the following:${DEFAULT}\n"
echo " # Configure runtime environment"
echo " module use /storage/icds/RISE/sw8/modules"
echo " module load gsl"
echo " LD_LIBRARY_PATH=\$LD_LIBRARY_PATH:`dirname $LIBPQ`/`basename $LIBPQ`"
echo
| true |
a25925e3ca923a0a56fe36a89878131ca38a59a3
|
Shell
|
jaredjennings/cmits-unclass
|
/modules/stig_misc/files/login_history/gdm-post-login.sh
|
UTF-8
| 209 | 2.59375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Fulfill AFMAN 33-223, section 5.5.2, and UNIX SRG rules GEN000452 and
# GEN000454.
text="`/usr/sbin/loginhistory $LOGNAME`"
[[ "$text" =~ \! ]] && sw=--error || sw=--info
zenity $sw --text="$text"
| true |
2bd9b4095a115569378c09822197e3bd334e594e
|
Shell
|
hashicorp-demoapp/instruqt
|
/packer/bootstrap.sh
|
UTF-8
| 2,614 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
export HOME=/root
# Hack to make sure we don't start installing packages until the filesystem is available.
echo "waiting 180 seconds for cloud-init to update /etc/apt/sources.list"
timeout 180 /bin/bash -c \
'until stat /var/lib/cloud/instance/boot-finished 2>/dev/null; do echo waiting ...; sleep 1; done'
# Install packages.
export DEBIAN_FRONTEND=noninteractive
apt-get update
apt-get -y install \
apt-transport-https \
ca-certificates \
software-properties-common \
git curl wget \
conntrack socat \
inotify-tools \
unzip \
make golang-go \
jq vim nano emacs joe \
bash-completion
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
apt update
apt-get -y install \
docker-ce \
docker-ce-cli \
containerd.io
# Make sure SSH does not break.
apt-get -y remove sshguard
# Disable auto updates as they break things.
systemd-run --property="After=apt-daily.service apt-daily-upgrade.service" --wait /bin/true
systemctl mask apt-daily.service apt-daily-upgrade.service
# Improve the startup sequence
cp /tmp/google-startup-scripts.service /etc/systemd/system/multi-user.target.wants/google-startup-scripts.service
# Start Docker, in case we need to pre-pull images in derivatives of this image.
systemctl daemon-reload
systemctl enable docker
systemctl start docker
VERSION=1.5.0
OS=linux
ARCH=amd64
curl -fsSL "https://github.com/GoogleCloudPlatform/docker-credential-gcr/releases/download/v${VERSION}/docker-credential-gcr_${OS}_${ARCH}-${VERSION}.tar.gz" \
| tar xz --to-stdout ./docker-credential-gcr \
> /usr/bin/docker-credential-gcr && chmod +x /usr/bin/docker-credential-gcr
docker-credential-gcr configure-docker
# Install shipyard
curl https://shipyard.run/install | bash
# Run the blueprint
shipyard run github.com/hashicorp-demoapp/infrastructure//blueprint
# Replace with a nice check at some point
sleep 60
# Pause the application
shipyard pause
# Install Tools
## Install Vault
wget https://releases.hashicorp.com/vault/1.3.1/vault_1.3.1_linux_amd64.zip
unzip vault_1.3.1_linux_amd64.zip
mv vault /usr/bin
## Install Consul
wget https://releases.hashicorp.com/consul/1.6.2/consul_1.6.2_linux_amd64.zip
unzip consul_1.6.2_linux_amd64.zip
mv consul /usr/bin
## Install Kubectl
curl -LO https://storage.googleapis.com/kubernetes-release/release/`curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt`/bin/linux/amd64/kubectl
chmod +x kubectl
mv kubectl /usr/bin
| true |
47e3677fc165d09c343e51397c66a5cfa1bcb603
|
Shell
|
danielxiaowxx/generator-ionic
|
/generators/update-project/templates/release-android-app.sh
|
UTF-8
| 1,101 | 2.875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
gulp clean
gulp release
mv www www_back
mv release/www www
ionic plugin rm cordova-plugin-console
#多渠道-开始
mkdir ./tmp
mv ./platforms/android/AndroidManifest.xml ./tmp/
echo '' > release.log
for version in 'YingYongBao' 'Test'
do
cp -f ./tmp/AndroidManifest.xml ./platforms/android/
sed -i -- "s/Channel_ID/$version/g" ./platforms/android/AndroidManifest.xml
cat ./platforms/android/AndroidManifest.xml | grep $version >> release.log
ionic build --release android
jarsigner -verbose -sigalg SHA1withRSA -digestalg SHA1 -keystore <%= appName %>-release-key.keystore -storepass $<%= snakeCaseAppName %>_storepass -keypass $<%= snakeCaseAppName %>_keypass platforms/android/build/outputs/apk/android-release-unsigned.apk <%= appName %>
$ANDROID_HOME/build-tools/23.0.1/zipalign -v 4 platforms/android/build/outputs/apk/android-release-unsigned.apk release/<%= appName %>-$version.apk
done
mv -f ./tmp/AndroidManifest.xml ./platforms/android/
rm -rf ./tmp
#多渠道-结束
ionic plugin add cordova-plugin-console
rm -rf www
mv www_back www
cat release.log
| true |
272fd256618f33f416fe9782c951835077a26ae4
|
Shell
|
ThomasAdam/tmux-ARCHIVED
|
/tags/TMUX_1_4/tools/fix-ids.sh
|
UTF-8
| 228 | 2.890625 | 3 |
[] |
no_license
|
# $Id: fix-ids.sh,v 1.3 2009-07-01 19:03:34 nicm Exp $
for i in *.[ch] tmux.1; do
(head -1 $i|grep '$OpenBSD' >/dev/null) || continue
mv $i $i~ || exit
sed 's/\$OpenBSD.* \$/$\Id$/' $i~ >$i || exit
echo $i
done
| true |
c384685ee2f1516192b52a6dbbd49721f4c9798d
|
Shell
|
lastweek/rdma_bench_dirty
|
/rc-swarm/sweep.sh
|
UTF-8
| 856 | 3.328125 | 3 |
[
"Apache-2.0"
] |
permissive
|
# A function to echo in blue color
function blue() {
es=`tput setaf 4`
ee=`tput sgr0`
echo "${es}$1${ee}"
}
# Sweep over params
# This is separate from run-all.sh, which does not sweep
# Empty existing sweep output
rm -f sweep/temp_out
# 6 machines on NetApp, so increment NUM_WORKERS by 6
for VM_PER_MACHINE in 1 2 3 4 5 6 7 8 9; do
for WINDOW_SIZE in `seq 8 8 32`; do
for UNSIG_BATCH in 1; do
for NUM_WORKERS in 154; do
# Do work for these params
rm sweep.h
touch sweep.h
echo "#define SIZE 32" >> sweep.h
echo "#define VM_PER_MACHINE $VM_PER_MACHINE" >> sweep.h
echo "#define WINDOW_SIZE $WINDOW_SIZE" >> sweep.h
echo "#define NUM_WORKERS $NUM_WORKERS" >> sweep.h
echo "#define UNSIG_BATCH $UNSIG_BATCH" >> sweep.h
make clean
make
blue "Starting run"
./run-all.sh
done
done
done
done
| true |
9c6b41dd3b492bce38174e7ad041ba40585935a5
|
Shell
|
kinglionsoft/farmer
|
/Micro/docker/gdi/build-docker-base-image.sh
|
UTF-8
| 1,259 | 3.5625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Create GDI+\TTF images for .NetCore
images=(runtime:2.1 runtime:2.2 runtime:3.0 aspnet:2.1 aspnet:2.2 aspnet:3.0 aspnet:3.1)
official=mcr.microsoft.com/dotnet/core/
yx=registry.local.com/dotnetcore/
proxy=http://192.168.1.123:11080/
rm -r gdi
mkdir gdi
cp sources.list ./gdi
for img in ${images[*]}
do
# GDI+
gdi=$yx$(echo $img | sed 's/:/-gdi:/')
echo '========================='
echo building $gdi
if [[ "$(docker image ls -q $gdi 2> /dev/null)" != "" ]]; then
echo $gdi exists
else
pushd gdi
tee Dockerfile << EOF
FROM $official$img
ADD sources.list /etc/apt/
RUN apt-get update \
&& apt-get install -y --allow-unauthenticated \
libc6-dev \
libgdiplus \
libx11-dev \
&& rm -rf /var/lib/apt/lists/*
EOF
docker build -t $gdi .
rm Dockerfile
popd
fi
# TTF
echo '========================='
ttf=$yx$(echo $img | sed 's/:/-ttf:/')
echo building $ttf
if [[ "$(docker image ls -q $ttf 2> /dev/null)" != "" ]]; then
echo $ttf exists
else
pushd font
tee Dockerfile << EOF
FROM $gdi
COPY ttf/* /usr/share/fonts/winFonts/
EOF
docker build -t $ttf .
rm Dockerfile
popd
fi
done
| true |
3413a3b2fd40dad073251fc056a2872f707ce2cc
|
Shell
|
alexalmansa/rbacF2
|
/rbac-0.3/rbacb/rbac_dir/setup
|
UTF-8
| 4,763 | 2.640625 | 3 |
[] |
no_license
|
#!/bin/bash
#FUNCIONS
function creaDaemonEntorn()
{
systemctl stop dimoniRoot
systemctl disable dimoniRoot
rm /lib/systemd/system/dimoniRoot.service
systemctl daemon-reloadg
systemctl reset-failed
cat <<EOT >> /lib/systemd/system/dimoniRoot.service
[Unit]
Description=daemon root service
After=network.target
[Service]
Type=simple
Restart=always
RestartSec=5
ExecStart=/data/users/config/escolta.sh
[Install]
WantedBy=multi-user.target
EOT
systemctl start dimoniRoot
systemctl enable dimoniRoot
}
function creaDaemonMail()
{
systemctl stop dimoniMail
systemctl disable dimoniMail
rm /lib/systemd/system/dimoniMail.service
systemctl daemon-reload
systemctl reset-failed
cat <<EOT >> /lib/systemd/system/dimoniMail.service
[Unit]
Description=daemon mail service
After=network.target
[Service]
User=root
Type=simple
Restart=always
RestartSec=5
ExecStart=/data/users/config/repMail.sh
[Install]
WantedBy=multi-user.target
EOT
systemctl start dimoniMail
systemctl enable dimoniMail
}
function creaConfigs()
{
cp /usr/bin/rbac/rbac_dir/enviroment /data/users/config/
chmod 755 /data/users/config/enviroment
cp /usr/bin/rbac/rbac_dir/escolta.sh /data/users/config/
chmod 755 /data/users/config/escolta.sh
cp /usr/bin/rbac/rbac_dir/.envia.sh /data/users/config/
chmod 755 /data/users/config/.envia.sh
cp /usr/bin/rbac/rbac_dir/repMail.sh /data/users/config/
chmod 755 /data/users/config/repMail.sh
cp /usr/bin/rbac/rbac_dir/gestioEntorn /data/users/config/
chmod 755 /data/users/config/gestioEntorn
cp /usr/bin/rbac/rbac_dir/removeEnviroment /data/users/config/
chmod 755 /data/users/config/removeEnviroment
cd /data/users/config
cat <<EOT > datastore
/etc/skel
0
0
x
x
x
0.5
512
50m
100m
EOT
cat <<EOT > visitor
bash,touch,mkdir,rm,ls,vim,nano
/etc/skel
1 day
1 day
x
x
visitor
0.75
512
150m
300m
EOT
cat <<EOT > basic
bash,touch,mkdir,rm,ls,vim,nano,gcc,make,kill
/etc/skel
1 day
persistent
6000
30000
compartida
1
1024
200m
400m
EOT
cat <<EOT > medium
bash,touch,mkdir,rm,ls,vim,nano,gcc,make,kill,java,ln,ps,python,pip,valgrind,grep,awk,sed
/etc/skel
1 day
persistent
6000
30000
compartida
1.5
1536
500m
1g
EOT
#cal afegir dos mes al advanced
cat <<EOT > advanced
bash,touch,mkdir,rm,ls,vim,nano,gcc,make,kill,java,ln,ps,python3,pip,valgrind,grep,awk,sed,chmod,chown,strace,cat,mv,rm,rmdir,clear
/etc/skel
persistent
persistent
6000
30000
compartida
2
2048
750m
1.5g
EOT
}
function creaFitxerBase()
{
cd /data/users
cat <<EOT > configuracio
/data/users/config
30000
30000
mac12llm@gmail.com
/data/docker
user.info
EOT
}
function creaSSH()
{
mkdir -p /data/users/config/ssh/$admin/
cat <<EOT > /data/users/config/ssh/$admin/authorized_keys
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSVBbuiFnNig/DOY5hbV+XjDAaSwEv/+JXpWK/CyyjRrYmLbUG1SzYqoqs5GnX2QBUf1zVBrCvnFcos37uAHXQpeTXnnMi1wzREO7mW6XTRt4TY3rusPwuQkcD3+RT14Xm5f9Nw1wY0fDAy5wBAoCe8ir4VUkWWBkaeQ4Mb0Wh2ecspwgg5I4nY24qJIhX01DqEvzP1LQY8/lKn57HUmzFLpRQipfNDcx/4krGdgIeDdwJv8vOK03o8razVMKs11Af+lhcHsBWwDSFgo1owfsvdMLQX9THy9XqpgvQXC7rAr9C/99eXcBPizAKIpnxytjiU3pIAV/ZTTsT7v3RHnq5 alexalmansa@alex.local
EOT
chmod 755 /data/users/config/ssh
chmod 755 /data/users/config/ssh/authorized_keys
}
#CONSTANTS
userhome="$1"
#admin="alex"
admin="$1"
#SCRIPT
#Afegim grups dels diferents rols
groupadd datastore
groupadd visitor
groupadd basic
groupadd medium
groupadd advanced
rm -rf /data/users/config
mkdir -p /data/users/config/googleauth
#Crea el fitxer de configuracio que te el mail i direccio
creaFitxerBase
#Crea els diferents fitxers de configuració per cada grup, i copia els programes necessaris
creaConfigs
#Crea el dimoni encarregat de executar els borrats de usuaris, homes i entorns
creaDaemonEntorn
#Crea el dimoni encarregat d'enviar el mail amb el request command
creaDaemonMail
creaSSH
mkdir /var/log/rbac
touch /var/log/rbac/docker.log
chmod 666 /var/log/rbac/docker.log
touch /var/log/rbac/user.log
chmod 666 /var/log/rbac/user.log
touch /var/log/rbac/request.log
chmod 666 /var/log/rbac/request.log
echo "Copiant fitxers"
cp -r /usr/bin/rbac/rbac_dir/.bashrc /etc/skel
cp -r /usr/bin/rbac/rbac_dir/sshd_config /etc/ssh
cp -r /usr/bin/rbac/rbac_dir/sshd /etc/pam.d/
cp -r /usr/bin/rbac/carpeta /data/
docker network create -d bridge compartida
echo "Restarting ssh service"
service ssh restart
| true |
d2cc9fa3e390d1e510429add93084974b2e0538b
|
Shell
|
uyirex/AzuraCast
|
/update.sh
|
UTF-8
| 564 | 3.203125 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/usr/bin/env bash
function phpuser {
sudo -u azuracast php $@
}
export www_base="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export app_base=`realpath $www_base/..`
export util_base=$www_base/util
export tmp_base=$app_base/www_tmp
# Stop system tasks
service nginx stop
service cron stop
# Pull down update
git reset --hard
git pull
chmod a+x ./update.sh
# Clear cache
rm -rf $tmp_base/cache/*
cd $util_base
phpuser cli.php cache:clear
phpuser doctrine.php orm:schema-tool:update --force
# Restart services
service cron start
service nginx start
| true |
cfa36423f55d347e9b111d015af286d37b230b12
|
Shell
|
ConorMcFeelyQUB/cloud-scripts
|
/big-test/deploy-all.sh
|
UTF-8
| 4,928 | 3.296875 | 3 |
[] |
no_license
|
set -ex
ADVERT_DB_INSTANCE_NAME="advert-db-instance"
PAGE_DB_INSTANCE_NAME="page-db-instance"
DB_PASSWORD="QUBccProject"
DB_TEIR="db-n1-standard-2"
REGION="europe-west2"
ZONE="europe-west2-a"
#creating sql instance for advert db
gcloud sql instances create $ADVERT_DB_INSTANCE_NAME \
--tier="db-n1-standard-2" \
--region="europe-west2"
#set the rootpassword for the new sql instance
gcloud sql users set-password root --host=% --instance $ADVERT_DB_INSTANCE_NAME --password $DB_PASSWORD
# \
# --availability-type= regional
#creating sql instance for page db
gcloud sql instances create $PAGE_DB_INSTANCE_NAME \
--tier="db-n1-standard-2" \
--region="europe-west2"
#set the rootpassword for the new sql instance
gcloud sql users set-password root --host=% --instance $PAGE_DB_INSTANCE_NAME --password $DB_PASSWORD
#enable sqladmin service
gcloud services enable sqladmin.googleapis.com
#allow http traffic
gcloud compute firewall-rules create default-allow-http-8080 \
--allow tcp:8080 \
--source-ranges 0.0.0.0/0 \
--target-tags http-server \
--description "Allow port 8080 access to http-server"
############################################################
#Creating db tables and putting initial data
STATIC_IP_SQL_SETUP_INSTANCE="static-sql-setup"
#Creating a static IP for the sqlsetup vm
gcloud compute addresses create $STATIC_IP_SQL_SETUP_INSTANCE \
--region $REGION \
#Storing the newly created static ip
STATIC_IP_SQL_SETUP="$(gcloud compute addresses describe $STATIC_IP_SQL_SETUP_INSTANCE --region $REGION --format='get(address)')"
#Add setup IP to authorised list for advert sql instance
gcloud --quiet sql instances patch $ADVERT_DB_INSTANCE_NAME --authorized-networks="${STATIC_IP_SQL_SETUP}",
#Add setup IP to authorised list for page sql instance
gcloud --quiet sql instances patch $PAGE_DB_INSTANCE_NAME --authorized-networks="${STATIC_IP_SQL_SETUP}",
#create vm instance to run mysql commands
SQL_SETUP_INSTANCE_NAME="sql-setup-vm-instance"
gcloud compute instances create $SQL_SETUP_INSTANCE_NAME \
--image-family=debian-9 \
--image-project=debian-cloud \
--machine-type=g1-small \
--scopes userinfo-email,cloud-platform \
--metadata-from-file startup-script=startup-script-sql-setup.sh \
--zone $ZONE \
--tags http-server \
--address ${STATIC_IP_SQL_SETUP}
#################################
#Creating static ips for 3 VM instances and store ips
STATIC_IP_ADVERT_INSTANCE="static-advert"
STATIC_IP_SEARCH_INSTANCE="static-search"
STATIC_IP_INDEXER_INSTANCE="static-indexer"
#advert
gcloud compute addresses create $STATIC_IP_ADVERT_INSTANCE \
--region $REGION \
STATIC_IP_ADVERT="$(gcloud compute addresses describe $STATIC_IP_ADVERT_INSTANCE --region $REGION --format='get(address)')"
#search
gcloud compute addresses create $STATIC_IP_SEARCH_INSTANCE \
--region $REGION \
STATIC_IP_SEARCH="$(gcloud compute addresses describe $STATIC_IP_SEARCH_INSTANCE --region $REGION --format='get(address)')"
#indexer
gcloud compute addresses create $STATIC_IP_INDEXER_INSTANCE \
--region $REGION \
STATIC_IP_INDEXER="$(gcloud compute addresses describe $STATIC_IP_INDEXER_INSTANCE --region $REGION --format='get(address)')"
#authorise advert and search for the advert sql instance
gcloud --quiet sql instances patch $ADVERT_DB_INSTANCE_NAME --authorized-networks="${STATIC_IP_SQL_SETUP}","${STATIC_IP_ADVERT}","${STATIC_IP_SEARCH}",
#authorise indexer and search for the page sql instance
gcloud --quiet sql instances patch $PAGE_DB_INSTANCE_NAME --authorized-networks="${STATIC_IP_SQL_SETUP}","${STATIC_IP_INDEXER}","${STATIC_IP_SEARCH}",
#Now create the 3 VM instances giving the static ips
ADVERT_VM_INSTANCE_NAME="advert-vm-instance"
SEARCH_VM_INSTANCE_NAME="search-vm-instance"
INDEXER_VM_INSTANCE_NAME="indexer-vm-instance"
#advert
gcloud compute instances create $ADVERT_VM_INSTANCE_NAME \
--image-family=debian-9 \
--image-project=debian-cloud \
--machine-type=g1-small \
--scopes userinfo-email,cloud-platform \
--metadata-from-file startup-script=startup-script-advert.sh \
--zone $ZONE \
--tags http-server \
--address ${STATIC_IP_ADVERT}
#search
gcloud compute instances create $SEARCH_VM_INSTANCE_NAME \
--image-family=debian-9 \
--image-project=debian-cloud \
--machine-type=g1-small \
--scopes userinfo-email,cloud-platform \
--metadata-from-file startup-script=startup-script-search.sh \
--zone $ZONE \
--tags http-server \
--address ${STATIC_IP_SEARCH}
#indexer
gcloud compute instances create $INDEXER_VM_INSTANCE_NAME \
--image-family=debian-9 \
--image-project=debian-cloud \
--machine-type=g1-small \
--scopes userinfo-email,cloud-platform \
--metadata-from-file startup-script=startup-script-indexer.sh \
--zone $ZONE \
--tags http-server \
--address ${STATIC_IP_INDEXER}
#FIN
| true |
b132c37c36d05f9ab46a0f2c36eaff9abb98f1db
|
Shell
|
pklepikov/SRLinux_basics
|
/00.prepare_host.sh
|
UTF-8
| 419 | 2.96875 | 3 |
[] |
no_license
|
#! /bin/bash
# Copy the SRL inmage X.Y.Z-N.tar.xz file into Project directory on Centos 8 Host Machine.
# TBD
# Copy the license.key into Project directory.
# TBD
# Load the docker image.
# - To load the image, the user must have root privilege, or be part of the docker group.
docker image load -i 20.6.1-286.tar.xz
# Turn off the Docker0 Tx checksum offload:
ethtool --offload docker0 tx off
| true |
fe2d8d8990f15bf4c484774b933718796d21a85a
|
Shell
|
prariehill/sushi-card-gitbook-theme
|
/src/build.sh
|
UTF-8
| 979 | 2.765625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Install fonts
bower install;
# Build global Sushi Card CSS
lessc "src/css/web.less" "_assets/website/sushi.css";
lessc "src/css/print.less" "_assets/ebook/sushi.css";
# Put fonts where GitBook can find them
# Lato
mkdir -p _assets/website/fonts/lato;
cp -R bower_components/lato/font/ _assets/website/fonts/lato/;
cp bower_components/lato/README.md _assets/website/fonts/lato;
mkdir -p _assets/ebook/fonts/lato;
cp -R bower_components/lato/font/ _assets/ebook/fonts/lato/;
# League Gothic
mkdir -p _assets/website/fonts/league-gothic;
cp -R bower_components/league-gothic/webfonts/leaguegothic-regular* _assets/website/fonts/league-gothic/;
cp bower_components/league-gothic/*.markdown _assets/website/fonts/league-gothic/;
mkdir -p _assets/ebook/fonts/league-gothic;
cp -R bower_components/league-gothic/webfonts/leaguegothic-regular* _assets/ebook/fonts/league-gothic/;
cp bower_components/league-gothic/*.markdown _assets/ebook/fonts/league-gothic/;
| true |
434be69408f27267e7b3fe5732d2e1d46e0e0be8
|
Shell
|
m-lab/epoxy-images
|
/configs/stage3_ubuntu/opt/mlab/bin/generate_network_config.sh
|
UTF-8
| 2,657 | 3.90625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# generate_network_config.sh finds the epoxy.ip= kernel parameter, parses it and
# writes a networkd configuration file for the static IP to the named file.
# generate_network_config also sets the machine hostname.
OUTPUT=${1:?Please provide the name for writing config file}
# TODO: Modify ePoxy to recognize both IPv4 and IPv6 addresses when
# authenticating requests from nodes. For nodes in an environment where an
# upstream device may have IPv6 autoconfiguration/discovery turned on, the node
# may get an autoconf address which is not the one we use for the node.
# Additionally, when we finally configure IPv6 on nodes, if ePoxy is not
# configured to recognize both IPv4 and IPv6 addresses, then requests from
# legitimate nodes from IPv6 addresses will fail.
#
# Disable IPv6 autoconf.
echo "0" > /proc/sys/net/ipv6/conf/all/accept_ra
echo "0" > /proc/sys/net/ipv6/conf/all/autoconf
# Extract the epoxy.hostname parameter from /proc/cmdline
if [[ `cat /proc/cmdline` =~ epoxy.hostname=([^ ]+) ]]; then
HOSTNAME=${BASH_REMATCH[1]}
else
HOSTNAME="localhost"
fi
# IPv4
#
# Extract the epoxy.ipv4= parameter from /proc/cmdline.
#
# For example:
# epoxy.ipv4=4.14.159.86/26,4.14.159.65,8.8.8.8,8.8.4.4
if [[ `cat /proc/cmdline` =~ epoxy.ipv4=([^ ]+) ]]; then
FIELDS_IPv4=${BASH_REMATCH[1]}
else
# Use default values for VM testing.
FIELDS_IPv4="192.168.0.2,192.168.0.1,8.8.8.8,8.8.4.4"
fi
# Extract all helpful IPv4 fields.
ADDR_IPv4=$( echo $FIELDS_IPv4 | awk -F, '{print $1}' )
GATEWAY_IPv4=$( echo $FIELDS_IPv4 | awk -F, '{print $2}' )
DNS1_IPv4=$( echo $FIELDS_IPv4 | awk -F, '{print $3}' )
DNS2_IPv4=$( echo $FIELDS_IPv4 | awk -F, '{print $4}' )
# IPv6
#
# Extract the epoxy.ipv6= parameter from /proc/cmdline.
#
# For example:
# epoxy.ipv6=2001:1900:2100:2d::86/64,2001:1900:2100:2d::1,2001:4860:4860::8888,2001:4860:4860::8844
if [[ `cat /proc/cmdline` =~ epoxy.ipv6=([^ ]+) ]]; then
FIELDS_IPv6=${BASH_REMATCH[1]}
fi
# Extract all helpful IPv6 fields.
ADDR_IPv6=$( echo $FIELDS_IPv6 | awk -F, '{print $1}' )
GATEWAY_IPv6=$( echo $FIELDS_IPv6 | awk -F, '{print $2}' )
DNS1_IPv6=$( echo $FIELDS_IPv6 | awk -F, '{print $3}' )
DNS2_IPv6=$( echo $FIELDS_IPv6 | awk -F, '{print $4}' )
# Note, we cannot set the hostname via networkd. Use hostnamectl instead.
hostnamectl set-hostname ${HOSTNAME}
# TODO: do not hardcode /26.
# TODO: do not hardcode eth0.
cat > ${OUTPUT} <<EOF
[Match]
Name=eth0
[Network]
# IPv4
Address=$ADDR_IPv4
Gateway=$GATEWAY_IPv4
DNS=$DNS1_IPv4
DNS=$DNS2_IPv4
# IPv6
Address=$ADDR_IPv6
Gateway=$GATEWAY_IPv6
DNS=$DNS1_IPv6
IPv6AcceptRA=no
EOF
| true |
9299295a94c2a201fe6b49cca01bcc0d2f446bb8
|
Shell
|
zhangerjun/TBSSVBA
|
/rDTIDA_scripts/Segmentation/Shell_Scripts/applyTransformToSegmentation.sh
|
UTF-8
| 550 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
SEGMENTATION=$1
TRANSFORM_MATs_TEMP=(`ls $2/*.mat`)
TRANSFORM_MAT_ATLAS=$3
INV_TRANSFORMs_TEMP=(`ls $4/*InverseWarp.nii.gz`)
INV_TRANSFORM_ATLAS=$5
REF=$6
for (( i=0; i<${#TRANSFORM_MATs_TEMP[@]}; i++ )); do
FILENAME=`basename ${TRANSFORM_MATs_TEMP[i]} | cut -d '.' -f 1`
OUTPUTNAME=./segmentation/scmaps_seg/seg_$FILENAME.nii
COMMAND="WarpImageMultiTransform 3 $SEGMENTATION $OUTPUTNAME --use-NN -i ${TRANSFORM_MATs_TEMP[i]} -i $TRANSFORM_MAT_ATLAS ${INV_TRANSFORMs_TEMP[i]} $INV_TRANSFORM_ATLAS -R $REF"
$COMMAND
done
| true |
9a31091d649f5347b66c61bb31f7985a8ba7cb00
|
Shell
|
argustelecom/opsWorkshop
|
/.linuxbuild/linux_build.sh
|
UTF-8
| 8,490 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/bash
# Script to build Argus Application Server Ops in Linux environment
# !!!Выполняется из каталога .linuxbuild
# TASK-87917, v.semchenko (31.10.2017): скрипт используем только для подготовки СП и обновления к БД.
# ВАЖНО!!! Если адаптируешь еще для чего-то скрипт, укажи когда и где он используется!!!
export BUILDDIR=`pwd`
WORKSPACE="../workspace"
b_err="Build ERROR!"
# Завершение работы build
end_build () {
# Если сборка ui-tests, то не следует оставлять после себя активные процессы
if [ $mode == "ui-tests" ] || [ $mode == "ui-tests-pa" ]; then
# Если процесс СП все еще висит нужно его грохнуть, иначе gitlab-runner не завершает работу
pidRunAppServer=$(ps -ef | grep 'gitlab-runner' | grep "${branch}/${wildflyPackage}" | awk -F " " '{print $2}')
# Ищем СП personal area и завершаем его процесс
pidRunAppServerPA=$(ps -ef | grep 'gitlab-runner' | grep "${branch}/${wildflyPackage}-PA" | awk -F " " '{print $2}')
if [ ! "$pidRunAppServerPA" == "" ]; then
kill $pidRunAppServerPA;
fi
if [ ! "$pidRunAppServer" == "" ]; then
kill $pidRunAppServer;
fi
echo "Stopping current Xvfb session";
/etc/init.d/xvfb_start_script.sh stop $buildcounter $branch
[ $? -ne 0 ] && error_all "error: xvfb_start_script.sh stop $buildcounter $branch"
# Make sure all X sessions are killed
echo "Killing all virtual framebuffers"
killall Xvfb || true
rm -Rf $HOME/xvfb/*
fi
cd $BUILDDIR;
}
# При получении ошибки
error_all () {
# Throw error both to logs
# Call it like this:
# error_all "Unable to do this kind of stuff"
printf "\n%s\n" "ERROR: $1"
end_build
exit 1
}
# Проверка версии Java
echo "Checking version Java"
version=$("$JAVA_HOME"/bin/java -version 2>&1 | awk -F '"' '/version/ {print $2}')
echo "java version $version"
if [[ "$version" < "1.8" ]]; then
error_all "Error: version JAVA < 1.8"
else
echo "JAVA_HOME=$JAVA_HOME . OK!"
fi
# Останавливаем сервисы xvfb если они по какой-то причине остались с прошлой сборки
# для инфомраци: не обладаем правами закрыть процессы другого пользователя
echo "Getting rid of traces left by Xvfb"
killall Xvfb || true
rm -Rf $HOME/xvfb/*
echo " --- Setting up build environment --- "
# Setting up local maven repo for this specific version (branch).
export MAVEN_OPTS="-Xmx768m -Dfile.encoding=UTF-8 -Dmaven.repo.local=${HOME}/maven_repo/${branch}"
# Prepare INSTALL_PATH
wildflyVars=(wildflyVersion wildflyPackage installPath)
# TASK-86433, v.semchenko (25.08.2017): версию берем из work.properties настройки argus.teamcity.server-package
wildflyVersion=`sed -n 's/argus\.teamcity\.server\-package=wildfly\-//p' ${WORKSPACE}/work.properties`
wildflyPackage="wildfly-${wildflyVersion}"
installPath="${HOME}/servers/$branch/$wildflyPackage/"
# Check wildfly var for installpath
for var in ${wildflyVars[@]}; do
if [ -z "${!var}" ]; then
error_all "Variable $var is not set"
fi
done
cd $BUILDDIR
if [ ! -d $installPath ]; then
echo "WildFly directory does not exist, will create when install distrib."
else
# на всяки случай чистим каталог будущей установки
echo "WildFly directory exists. Cleaning directory."
rm -rf $installPath/*
[ $? -ne 0 ] && error_all "Unknown error"
fi
# Prepare configurations workspace/my.properties
echo "Prepare configuration $WORKSPACE/my.properties"
echo "INSTALL_PATH=$installPath" | cat > $WORKSPACE/my.properties
echo 'argus.app.memory.max-size=3600' | cat >> $WORKSPACE/my.properties
echo 'argus.app.debug-mode.enabled=true' | cat >> $WORKSPACE/my.properties
# на хосте также есть агент teamcity с СП под ui-теcты, для перестраховки взял смещение 10
echo "jboss.socket.binding.port-offset=10" | cat >> $WORKSPACE/my.properties
majorVersion=`sed -n 's/ops\.app\.version=//p' ${WORKSPACE}/work.properties`
echo "argus.app.build-number=$majorVersion.$CI_PIPELINE_ID" | cat >> $WORKSPACE/my.properties
echo "jboss.bind.address=127.0.0.1" | cat >> $WORKSPACE/my.properties
echo "argus.app.admin.user=developer" | cat >> $WORKSPACE/my.properties
echo "argus.app.admin.pass=developer" | cat >> $WORKSPACE/my.properties
echo "argus.mail.enabled=true" | cat >> $WORKSPACE/my.properties
echo "argus.mail.smtp.user=ops.noreply@argustelecom.ru" | cat >> $WORKSPACE/my.properties
echo "argus.mail.smtp.pass=DutyFr33!" | cat >> $WORKSPACE/my.properties
echo "argus.mail.smtp.port=25" | cat >> $WORKSPACE/my.properties
echo "argus.mail.smtp.host=mail.argustelecom.ru" | cat >> $WORKSPACE/my.properties
echo "argus.mail.smtp.ssl.enabled=false" | cat >> $WORKSPACE/my.properties
echo "argus.mail.smtp.starttls.enabled=false" | cat >> $WORKSPACE/my.properties
echo "argus.mail.smtp.auth.enabled=true" | cat >> $WORKSPACE/my.properties
if [ $mode == "ui-tests" ] || [ $mode == "ui-tests-pa" ]; then
# переменую с именем БД Ops объявляем в .gitlab-ci.yml
echo "Using database DB_NAME: $DB_NAME"
echo "Prepare configuration argus.db.*"
# переменую с адресом хоста баз Box объявляем в .gitlab-ci.yml
echo "argus.db.address=$HostDB" | cat >> $WORKSPACE/my.properties
echo "argus.db.port=5432" | cat >> $WORKSPACE/my.properties
echo "argus.db.name=$DB_NAME" | cat >> $WORKSPACE/my.properties
echo "argus.db.user=argus_sys" | cat >> $WORKSPACE/my.properties
echo "argus.db.pass=vk38gwwm" | cat >> $WORKSPACE/my.properties
if [ $mode == "ui-tests-pa" ]; then
echo ' -- Install server Ops -- '
build_number=`sed -n 's/argus\.app\.build-number=//p' ${WORKSPACE}/my.properties`
name_distr="ops-dist-${build_number}.jar"
if [ ! -f "$WORKSPACE/server-conf/ops-dist/target/$name_distr" ]; then
echo "Not found $name_distr in directory $WORKSPACE/server-conf/ops-dist/target"
error_all "Install application server: not found distrib"
else
echo "Found distrib $name_distr"
fi
cd $BUILDDIR/$WORKSPACE/server-conf/ops-dist/target
java -jar $name_distr -options $BUILDDIR/$WORKSPACE/my.properties
[ $? -ne 0 ] && error_all "Install application server failed"
echo ' -- Start server Ops -- '
cd $BUILDDIR/$WORKSPACE/server-app/inf-modules/webui
mvn pre-integration-test -Pbefore-ui-tests-build-start-appserver
[ $? -ne 0 ] && error_all "Start server Ops"
cd $BUILDDIR
echo " -- Prepare configuration for Personal Area -- "
installPath="${HOME}/servers/${branch}/${wildflyPackage}-PA/";
sed -i -e "s|INSTALL_PATH=.*|INSTALL_PATH=${installPath}|1" $WORKSPACE/my.properties
# >> на хосте также есть агент teamcity с СП под ui-теcты, для перестраховки взял смещение 10,
# а для СП личного кабинета port-offset=20
sed -i 's/jboss.socket.binding.port-offset=.*/jboss.socket.binding.port-offset=20/1' $WORKSPACE/my.properties
echo "argus.security.login-module=ru.argustelecom.ops.inf.login.PersonalAreaLoginModule" | cat >> $WORKSPACE/my.properties
echo "argus.test.ui.remotearm=true" | cat >> $WORKSPACE/my.properties
echo "contextRoot=" | cat >> $WORKSPACE/my.properties
# ! незабывай учитывать в настройке argus.test.provider.address смещение порта "обыного" СП Ops
echo "argus.test.provider.address=127.0.0.1:8090" | cat >> $WORKSPACE/my.properties
fi
fi
echo "Result configuration $WORKSPACE/my.properties: "
cat $WORKSPACE/my.properties | grep -Ev "(^#|^$)"
# selenium тесты только в сборке ui-tests
if [ $mode == "ui-tests" ] || [ $mode == "ui-tests-pa" ]; then
# We use xvfb to run selenium tests on headless environment
echo "Starting up Xvfb server";
/etc/init.d/xvfb_start_script.sh start $buildcounter $branch
[ $? -ne 0 ] && error_all "error: xvfb_start_script.sh start $buildcounter $branch"
export DISPLAY=:$buildcounter
fi
# Deprecated. Convert shell scripts from dos to unix format
dos2unix make_all.sh && ./make_all.sh $mode
[ $? -ne 0 ] && error_all "error execute make_all.sh"
end_build
| true |
58749a71f0a80ef838c6a893aa7604bee978d73c
|
Shell
|
minjun-jang/ocpinstall
|
/00.prepare/05_chrony_setting.sh
|
UTF-8
| 1,188 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/sh
ABSOLUTE_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "${ABSOLUTE_PATH}/config/openshift.env"
echo -e "\033[32m[S]=============================================================================\033[0m"
echo -e "\033[46m@@@[S]_[YUM INSTALL CHRONY] ==> ${CHRONY_SERVER}\033[0m"
Asystemctl stop ntpd
systemctl disable ntpd
timedatectl set-timezone Asia/Seoul
timedatectl status
yum -y remove ntp
yum -y install chrony
sed -i "s/^server/#server/g" /etc/chrony.conf
sed -i "s/^allow/#allow/g" /etc/chrony.conf
sed -i "s/^local/#local/g" /etc/chrony.conf
sed -i'' -r -e "/#server\ 3.rhel.pool.ntp.org\ iburst/a\server\ bastion.${DNS_DOMAIN}\ iburst" /etc/chrony.conf
sed -i'' -r -e "/^#allow\ 192.168.0.0\/16/a\allow\ ${CHRONY_ALLOW}" /etc/chrony.conf
sed -i'' -r -e "/^#local\ stratum\ 10/a\local\ stratum\ ${CHRONY_STRATUM}" /etc/chrony.conf
firewall-cmd --permanent --add-port=123/udp
firewall-cmd --reload
systemctl enable chronyd
systemctl restart chronyd
chronyc sources -v
chronyc tracking
echo -e "\033[36m@@@[E]_[YUM INSTALL CHRONY] ==> ${CHRONY_SERVER}\033[0m"
echo -e "================================================================================[E]"
| true |
90dfbea16a8fc624736a5c61c6522ff59515b442
|
Shell
|
ajsalminen/dotfiles
|
/shell/functions/ag_truncate_lines.sh
|
UTF-8
| 577 | 3.40625 | 3 |
[
"MIT",
"BSD-2-Clause"
] |
permissive
|
# page only when needed, set term to truncate lines for minified code etc.
# default to insensitive matches and include a global agignore for minified exts.
# Some info http://unix.stackexchange.com/questions/109211/preserving-color-output-with-cut
ag_truncate_lines() {
tput rmam # turn off automatic margin mode.
# reset colors at the beginning of line because rmam can cut some seqs out.
ag --color -i --path-to-ignore=~/.grepignore "$@" | sed "s/^/$(tput sgr0)/" | less -XFr
local ret="$?"
tput smam # turn on automatic margin mode.
return "$ret"
}
| true |
316593ccee6d1cb7cce06ad2302e1024726d5d17
|
Shell
|
Sunil2914/network
|
/mirror/create_mirror_misc.sh
|
UTF-8
| 1,701 | 3.25 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -ev
export DISTRO=$(cat /etc/*-release|grep ^ID\=|awk -F\= {'print $2'}|sed s/\"//g)
if [[ "${DISTRO}" == "ubuntu" ]]; then
apt-get install -y apt-transport-https curl
fi
[[ -z ${MIRROR_BUILD_DIR} ]] && export MIRROR_BUILD_DIR=${PWD}
[[ -z ${MIRROR_OUTPUT_DIR} ]] && export MIRROR_OUTPUT_DIR=${PWD}/mirror-dist
STATIC_FILE_LIST=$(<${MIRROR_BUILD_DIR}/dependencies/pnda-static-file-dependencies.txt)
PLUGIN_LIST=$(<${MIRROR_BUILD_DIR}/dependencies/pnda-logstash-plugin-dependencies.txt)
STATIC_FILE_DIR=$MIRROR_OUTPUT_DIR/mirror_misc
mkdir -p $STATIC_FILE_DIR
cd $STATIC_FILE_DIR
echo "$STATIC_FILE_LIST" | while read STATIC_FILE
do
echo $STATIC_FILE
curl -LOJf --retry 5 --retry-max-time 0 $STATIC_FILE
done
cat SHASUMS256.txt | grep node-v6.10.2-linux-x64.tar.gz > node-v6.10.2-linux-x64.tar.gz.sha1.txt
sha512sum je-5.0.73.jar > je-5.0.73.jar.sha512.txt
sha512sum Anaconda2-4.0.0-Linux-x86_64.sh > Anaconda2-4.0.0-Linux-x86_64.sh.sha512.txt
if [ "x$DISTRO" == "xrhel" -o "x$DISTRO" == "xcentos" ]; then
yum install -y java-1.7.0-openjdk
elif [ "x$DISTRO" == "xubuntu" ]; then
apt-get install -y default-jre
fi
cd /tmp
curl -LOJf --retry 5 --retry-max-time 0 https://artifacts.elastic.co/downloads/logstash/logstash-5.2.2.tar.gz
tar zxf logstash-5.2.2.tar.gz
rm logstash-5.2.2.tar.gz
cd logstash-5.2.2
# work around bug introduced in 5.1.1: https://discuss.elastic.co/t/5-1-1-plugin-installation-behind-proxy/70454
JARS_SKIP='true' bin/logstash-plugin install $PLUGIN_LIST
bin/logstash-plugin prepare-offline-pack $PLUGIN_LIST
chmod a+r logstash-offline-plugins-5.2.2.zip
mv logstash-offline-plugins-5.2.2.zip $STATIC_FILE_DIR/logstash-offline-plugins-5.2.2.zip
| true |
60687989a9899faf88ddbe93c4d98b960a657be1
|
Shell
|
josh43/RestBuilder
|
/RunAndUpdateScript.sh
|
UTF-8
| 417 | 2.625 | 3 |
[] |
no_license
|
#!/bin/bash
#make sure to install and include express and multer in your package.json
make
./main
theSource=${PWD}/Rest/
objcTarget="/Users/josh/Documents/CS Projects/RestExample/RestExample/"
jsTarget="/Users/josh/Documents/CS Projects/RestGeneratorTester/routes/"
echo "Copying files at $theSource to $objcTarget"
cp -f -R "${theSource}/OBJC" "${objcTarget}"
cp -f -R "${theSource}/JS/" "${jsTarget}"
make clean
| true |
265334cd9cfebe6c90c6dfb6d4103701008b1bfa
|
Shell
|
neh/myconf
|
/bin/polybar.sh
|
UTF-8
| 873 | 3.296875 | 3 |
[] |
no_license
|
#!/usr/bin/env sh
laptop_display='eDP-1'
# Terminate already running bar instances
killall -q polybar
# Wait until the processes have been shut down
while pgrep -x polybar >/dev/null; do sleep 1; done
# Get network interface names
export INTERFACE_WIRED=$(ip link show | cut -d' ' -f2 | tr -d ':' | grep '^en')
export INTERFACE_WIRELESS=$(ip link show | cut -d' ' -f2 | tr -d ':' | grep '^wl')
primary=$(xrandr | grep ' connected primary' | cut -d' ' -f1)
for m in $(polybar --list-monitors | cut -d":" -f1); do
if [[ "$m" == "$primary" ]]; then
# the --reload option doesn't seem to work under i3, but
# doesn't hurt either
if [[ "$m" == "$laptop_display" ]]; then
MONITOR=$m polybar primary-laptop &
else
MONITOR=$m polybar default &
fi
else
MONITOR=$m polybar secondary &
fi
done
| true |
ceb4786295f1fb9dd1e0aa4567fbfdcef1b6f0c0
|
Shell
|
mrstepanovic/bradmci
|
/tools/bak/old.fc_analysis
|
UTF-8
| 7,941 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/bash
# source matlab
source /autofs/cluster/animal/scripts/matlab/matlab_74
#
# For each subject, extracts time courses for each run
# and computes correlation coefficients using ROIs.
# Also generates a corresponding matrix graphic.
#
function usage {
echo " ______ _____ _ _ _ __ _______ _____ _____ "
echo "| ____/ ____| /\ | \ | | /\ | | \ \ / / ____|_ _|/ ____|"
echo "| |__ | | / \ | \| | / \ | | \ \_/ / (___ | | | (___ "
echo '| __|| | / /\ \ | . ` | / /\ \ | | \ / \___ \ | | \___ \ ~~~ version 1.2.1'
echo "| | | |____ / ____ \| |\ |/ ____ \| |____| | ____) |_| |_ ____) |"
echo "|_| \_____| /_/ \_\_| \_/_/ \_\______|_| |_____/|_____|_____/ "
echo ""
echo "REQUIRED:"
echo " -i <arg> Subject names"
echo " -r <arg> ROI file locations"
echo " -n <arg> ROI names, default ROI file name"
echo " -l <arg> Analysis label, e.g. 'DMN' (no spaces)"
echo ""
echo " Note: when using multiple arguments, place in quotes:"
echo " fc_analysis -i \"SUBJ1 SUBJ2 SUBJ3\" -r \"ROI1 ROI2\" -n \"PCC HIPPO\" -l 'PCC_HIPPO_FC'"
echo ""
echo "OR:"
echo " -I <file> File containing list of subjects"
echo " -R <file> File containing list of ROI files"
echo " -N <file> File containing list of ROI names"
echo " -l <arg> Analysis label, e.g. 'DMN' (no spaces)"
echo ""
echo "MORE OPTIONS:"
echo " -u <file> Filename of run list (as found in <SUBJECT>/<file>)"
echo " -k Gaussian smoothing BOLD to process, default 6mm"
echo " -e Force timecourse extraction"
echo " -t Do not calculate subject averages"
echo " -s Do not calculate group averages"
echo " -m Make images of coefficent matrices"
echo " -p <arg> Only show coefficients where p < arg. (default is .05)."
echo " All other values are converted to 0.000 on matrix plots"
echo " -o <arg> Output directory, default ./GROUP_STATS/<user>/FC"
echo " -h Help"
echo ""
}
ROINAMES=""
RD="$PWD/GROUP_STATS/$(whoami)/FC";
PVAL="0.050";
SMOOTH=6;
while getopts "i:r:I:R:n:N:u:l:o:p:k:e h t s m" o ; do
case $o in
i ) SUBJECTS=$OPTARG;;
I ) SUBJECTS=$(cat $OPTARG | xargs);;
r ) ROIS=$OPTARG;;
R ) ROIS=$(cat $OPTARG | xargs);;
n ) ROINAMES=$OPTARG;;
N ) ROINAMES=$(cat $OPTARG | xargs);;
k ) SMOOTH=$OPTARG;;
e ) FORCEEXTRACT=1;;
p ) PVAL=$OPTARG;;
l ) AL=$OPTARG;;
o ) RD=$OPTARG;;
t ) SKIPSUB=1;;
s ) SKIPGRP=1;;
m ) MAKEIMG=1;;
u ) RUNLISTFILE=$OPTARG;;
h ) usage;
exit 0;;
esac
done
if [ $# -eq 0 ]; then
usage; exit 0;
fi
resultsdir=${RD}
if [ ${#SUBJECTS} -lt 1 ]; then
echo "Error: Please provide subjects.";
usage; exit 0;
fi
if [ ${#ROIS} -lt 1 ]; then
echo "Error: Please provide at least 2 ROIs."
usage; exit 0;
fi
if [ ! $AL ]; then
echo "Error: Please specify an analysis label, using -l.";
exit 0;
fi
if [ $SMOOTH -eq 0 ]; then
SMOOTH_HALF_F=0
else
SMOOTH_HALF_F=$( echo 'scale=15;(.4412712/('$SMOOTH'/100))' | bc | awk '{print int($1)}' );
fi
echo "SUBJECTS: "$SUBJECTS;
echo ""
echo "ROIS: "$ROIS;
echo ""
echo "ROI NAMES: $ROINAMES";
echo ""
function extract {
n=1;
comm="addpath('/cluster/animal/scripts/matlab/'); all=[]; "
mkdir -p ${resultsdir}/${AL}
mkdir -p ${resultsdir}/${AL}/timecourses
for subject in $SUBJECTS; do
comm="${comm} subj=[];"
fc=$( ls -d ${subject}/bold/0*/ | wc | awk '{print $1}' )
if [ $fc == 0 ]; then
echo -e "Error: No BOLD runs found for subject ${subject}."
exit
fi
### CHANGES FOR 1.2 MADE BELOW ###
## concatenate the timecourses.... UGH, WHY DID I NOT DO THIS BEFORE?!? I surprise myself with my stupidity
ppstr=$(/cluster/animal/scripts/NRG-fc/functions/fc_ppstr $subject)
RUNLISTtxt=${subject}/fcMRI/${subject}_${ppstr}_g${SMOOTH_HALF_F}_bpss_resid.txt
if [ "$RUNLISTFILE" != "" ]; then
if [ ! -r $subject/$RUNLISTFILE ]; then
echo "*** No runlist file ( $RUNLISTFILE ) found for ${subject}! Skipping subject... ***"
continue;
else
echo "using specific runlist file... $subject/$RUNLISTFILE"
t=$(cat $subject/$RUNLISTFILE | xargs);
echo " $t"
for x in $t; do
ga="$ga-e /bold/$x/ "
done
cat $RUNLISTtxt | grep -i $ga > /tmp/${subject}.${RUNLISTFILE}.$$.txt
RUNLIST=/tmp/${subject}.${RUNLISTFILE}.$$.txt
fi
else
RUNLIST=$RUNLISTtxt
fi
format=""
for x in $(cat $RUNLIST); do
if [ ! -e $x".nii.gz" ]; then
echo "Error: cannot find file $x.nii.gz"
exit;
else
tp=$(fslnvols $x".nii.gz")
format="${format}0x${tp}+"
fi
done
comm="${comm} tc=[];"
seednames=""
for seed in $ROIS; do
seedname=$(basename ${seed%%.nii*})
seednames="$seednames '$seedname', "
fileloc=${resultsdir}/${AL}/timecourses/${subject}_${seedname}
if [ ! -r ${fileloc}.voxt.dat ] || [ $FORCEEXTRACT ]; then
echo "Extracting timecourse for subject: $subject, ROI: $seedname"
qnt_nifti -s -list $RUNLIST ${seed} | awk '$1 !~/#/ {print $2}' > ${fileloc}.voxt.dat
if [ ! -r ${fileloc}.voxt.dat ]; then
echo "Error: Cannot find ${fileloc}.voxt.dat. Trying again...";
qnt_nifti -s -list $RUNLIST ${seed} | awk '$1 !~/#/ {print $2}' > ${fileloc}.voxt.dat
fi
else
echo "Using previous timecourse file -- subject: $subject, ROI: $seedname"
fi
if [ ! -r ${fileloc}.voxt.dat ]; then
echo "Error: Cannot find ${fileloc}.voxt.dat. Try forcing timecourse extraction using -e."; exit;
fi
if [ "$(cat ${fileloc}.voxt.dat | wc -w)" == "0" ]; then
echo "Error: ${fileloc}.voxt.dat is empty. Forcing extraction...";
qnt_nifti -s -list $RUNLIST ${seed} | awk '$1 !~/#/ {print $2}' > ${fileloc}.voxt.dat
fi
comm="${comm} tcf= load('${fileloc}.voxt.dat'); tc=[tc tcf];\n"
done
if [ $(echo ${ROINAMES} | wc -w) -eq $(echo ${ROIS} | wc -w) ]; then
seednames="'${ROINAMES// /', '}'";
fi
# calculate coef
comm="${comm} [fc, pval] = corrcoef(tc); fcz = real(r_2_fisher_z(fc)); \n"
comm="${comm} all(:,:,${n}) = fcz; allr(:,:,${n}) = fc; \n"
### END CHANGES TO 1.2 ###
if [ ! $SKIPSUB ]; then
fname="${resultsdir}/${AL}/${subject}.dat"
comm="${comm} save ${fname} fcz -ascii;\n"
comm="${comm} fprintf('writing: ${fname}\\\n'); \n"
if [ $MAKEIMG ]; then
comm="${comm} cfig = fcmatrix(fcz, {${seednames}}, '${AL}: ${subject}');\n"
fname="${resultsdir}/${AL}/${subject}.png"
comm="${comm} saveas(cfig, '${fname}', 'png');\n"
comm="${comm} fprintf('writing: ${fname}\\\n'); \n"
fi
fi
n=$(($n+1))
done
if [ ! $SKIPGRP ]; then
fname="${resultsdir}/${AL}/${AL}.dat"
fnamer="${resultsdir}/${AL}/${AL}_r.dat"
fmat="${resultsdir}/${AL}/${AL}.mat"
comm="${comm} save('${fmat}', 'all'); \n"
comm="${comm} allm = mean(all,3); save ${fname} allm -ascii;\n"
comm="${comm} allmr = mean(allr,3); save ${fnamer} allmr -ascii;\n"
comm="${comm} fprintf('writing: ${fname}\\\n'); \n"
fname="${resultsdir}/${AL}/${AL}_sd.dat"
comm="${comm} allstd = std(all,0,3); save ${fname} allstd -ascii;\n"
comm="${comm} fprintf('writing: ${fname}\\\n'); \n"
if [ $MAKEIMG ]; then
comm="${comm} cfig = fcmatrix(allm, {${seednames}}, '${AL}');\n"
fname="${resultsdir}/${AL}/${AL}.png"
comm="${comm} saveas(cfig, '${fname}', 'png');\n"
comm="${comm} fprintf('writing: ${fname}\\\n'); quit; \n"
fi
fi
echo -e $comm
cmdfile="$(whoami)$$.m"
echo -e $comm > $cmdfile
run_matlab -nosplash -nodesktop -nojvm -r "${cmdfile%%.m}; exit"
rm $cmdfile
}
extract;
if [ -e ${resultsdir}/${AL}/ ]; then
cd ${resultsdir}/${AL}/;
/cluster/animal/scripts/NRG-fc/functions/flatten_matrices "$SUBJECTS" "$ROINAMES" "$(basename ${AL})"
fi
echo "Finished!"
| true |
50fa2c1730aefd8637cd32addbffe4cfbb8a7008
|
Shell
|
mkg20001/nix
|
/cron/clean-node-modules.sh
|
UTF-8
| 296 | 3.53125 | 4 |
[] |
no_license
|
#!/bin/bash
set -eo pipefail
if [ -z "$1" ]; then
CMD=$(readlink -f "$0")
find /home -iname node_modules -mtime +14 -prune -exec bash $CMD {} \;
else
if [ -z "$(echo "$1" | tr "/" "
" | grep "^\\.")" ]; then
echo " -- RM $1 -- " >&2
rm -rf "$1"
else
echo "keep $1"
fi
fi
| true |
0d6084cbf565237015bb7667fad77edede99bdae
|
Shell
|
JuanDAC/test_others_simple_shell
|
/cd_multiargs
|
UTF-8
| 371 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/bash
command1="cd /bin /tmp slkfjl"
# Stop any running shells
stop_shell
# Run command
echo $command$'\n'pwd | "$SHELL" > "$YOUR_OUTPUT" 2> "$YOUR_ERROR" &
echo $command$'\n'pwd | sh > "$EXPECTED_OUTPUT" 2> "$EXPECTED_ERROR"
# Wait for one second
"$SLEEP" "$SLEEPSECONDS"
# Check the output
check_output
# Check the errors
check_error
# Clean up
stop_shell
| true |
b49e467d4f01b32bfaaa1f2809e1082f4ffd7d24
|
Shell
|
NDari/dotfiles
|
/scripts/setup_laptop.sh
|
UTF-8
| 1,787 | 3.734375 | 4 |
[] |
no_license
|
#!/bin/sh
# Modified version of thoughtbots script
# https://github.com/thoughtbot/laptop/blob/master/mac
fancy_echo() {
local fmt="$1"; shift
printf "\n$fmt\n" "$@"
}
trap 'ret=$?; test $ret -ne 0 && printf "failed\n\n" >&2; exit $ret' EXIT
set -e
if [ ! -d "$HOME/bin/" ]; then
mkdir "$HOME/bin"
fi
if [ ! -f "$HOME/.zshrc" ]; then
touch "$HOME/.zshrc"
fi
HOMEBREW_PREFIX="/usr/local"
if [ -d "$HOMEBREW_PREFIX" ]; then
if ! [ -r "$HOMEBREW_PREFIX" ]; then
sudo chown -R "$LOGNAME:admin" /usr/local
fi
else
sudo mkdir "$HOMEBREW_PREFIX"
sudo chflags norestricted "$HOMEBREW_PREFIX"
sudo chown -R "$LOGNAME:admin" "$HOMEBREW_PREFIX"
fi
if ! command -v brew >/dev/null; then
fancy_echo "Installing Homebrew ..."
curl -fsS \
'https://raw.githubusercontent.com/Homebrew/install/master/install' | ruby
fi
fancy_echo "Installing brew packages"
brew update --force # https://github.com/Homebrew/brew/issues/1151
brew bundle --file=$HOME/dotfiles/Brewfile
update_shell() {
local shell_path;
shell_path="$(which zsh)"
fancy_echo "Changing your shell to zsh ..."
if ! grep "$shell_path" /etc/shells > /dev/null 2>&1 ; then
fancy_echo "Adding '$shell_path' to /etc/shells"
sudo sh -c "echo $shell_path >> /etc/shells"
fi
sudo chsh -s "$shell_path" "$USER"
}
case "$SHELL" in
*/zsh)
if [ "$(which zsh)" != '/usr/local/bin/zsh' ] ; then
update_shell
fi
;;
*)
update_shell
;;
esac
cd $HOME
curl -L https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh | sh
if [ ! -d ".config/nvim" ]; then
mkdir -p ".config/nvim"
fi
ln -s dotfiles/.ctags
ln -s dotfiles/.tmux.conf
ln -s dotfiles/.zshrc
ln -s dotfiles/.zlogout
ln -s dotfiles/.inputrc
cd .config/nvim
ln -s ~/dotfiles/init.vim .
| true |
f03f45e5735d395f886c29f3b6c26cbc489749ea
|
Shell
|
it4ng/sendemail
|
/sendmail
|
UTF-8
| 491 | 2.671875 | 3 |
[] |
no_license
|
#!/bin/sh
[ -z "$MAIL_RECIPIENT" ] && echo "MAIL_RECIPIENT is not set, exiting" && exit 1
[ -z "$MAIL_SENDER" ] && echo "MAIL_SENDER is not set, exiting" && exit 1
[ -z "$MAIL_SUBJECT" ] && echo "MAIL_SUBJECT is not set, exiting" && exit 1
[ -z "$MAIL_MESSAGE" ] && echo "MAIL_MESSAGE is not set, exiting" && exit 1
[ -z "$MAIL_SERVER" ] && echo "MAIL_SERVER is not set, exiting" && exit 1
sendemail -f $MAIL_SENDER -t $MAIL_RECIPIENT -u "$MAIL_SUBJECT" -m "$MAIL_MESSAGE" -s $MAIL_SERVER
| true |
6db955d58df07c34a69cc97d30286ce64453e580
|
Shell
|
prasanthkumar3103/My-AWS-Handy-Scripts
|
/get_awsInstanceId.sh
|
UTF-8
| 285 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/bash
# Author: Ajaya Kumar Loya
# This script will give you the instanceID when executed.
# Note this need be running on a Instance of which you need Instance ID.
export INSTANCE_ID=`curl --silent http://169.254.169.254/latest/meta-data/instance-id`
echo "Instance ID => "${INSTANCE_ID}
| true |
fe898ea27e375d9f41533b3115089f81617a4d0d
|
Shell
|
xianlimei/zhuxianB30
|
/scripts/build_functions/.svn/text-base/device_cpu.svn-base
|
UTF-8
| 1,949 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
case $1 in
uag|ips|fw|utm|rt|dpx|dlb|bsrg|dpx19k|nsw)
CURRENT_BUILD_CPU='mips-xlr'
CURRENT_BUILD_ARCH='mips'
;;
mips)
CURRENT_BUILD_CPU='mips-xlr'
CURRENT_BUILD_ARCH='mips'
;;
uag64|ips64|fw64|utm64|dpx64|dlb64)
CURRENT_BUILD_CPU='mips64-xlr'
CURRENT_BUILD_ARCH='mips64'
;;
mips64)
CURRENT_BUILD_CPU='mips64-xlr'
CURRENT_BUILD_ARCH='mips64'
;;
uagxlp|ipsxlp|fwxlp|utmxlp|dpxxlp|dlbxlp)
CURRENT_BUILD_CPU='mips64-xlp'
CURRENT_BUILD_ARCH='mips64'
;;
mips_xlp|mipsxlp|mips64_xlp|mips64xlp)
CURRENT_BUILD_CPU='mips64-xlp'
CURRENT_BUILD_ARCH='mips64'
;;
srg|bsw|mips_bcm|mips-bcm)
CURRENT_BUILD_CPU='mips-bcm'
CURRENT_BUILD_ARCH='mips_bcm'
;;
lsw|ppc|powerpc)
CURRENT_BUILD_CPU='powerpc'
CURRENT_BUILD_ARCH='powerpc'
;;
x86|i386)
CURRENT_BUILD_CPU='x86'
CURRENT_BUILD_ARCH='x86'
;;
*)
echo "Usage $0 [uag|fw|rt|ips|utm|dpx|dlb|lsw|bsw|nsw|srg|bsrg|x86]"
echo ""
exit 1
;;
esac
case $1 in
uag|uag64|uagxlp)
BUILD_PRODUCT_TYPE='uag'
;;
ips|ips64|ipsxlp)
BUILD_PRODUCT_TYPE='ips'
;;
fw|fw64|fwxlp)
BUILD_PRODUCT_TYPE='fw'
;;
utm|utm64|utmxlp)
BUILD_PRODUCT_TYPE='utm'
;;
rt)
BUILD_PRODUCT_TYPE='rt'
;;
dpx|dpx64|dpxxlp)
BUILD_PRODUCT_TYPE='dpx'
;;
dpx19k)
BUILD_PRODUCT_TYPE='dpx19k'
;;
dlb)
BUILD_PRODUCT_TYPE='dlb'
;;
lsw)
BUILD_PRODUCT_TYPE='lsw'
;;
bsw)
BUILD_PRODUCT_TYPE='bsw'
;;
srg)
BUILD_PRODUCT_TYPE='srg'
;;
bsrg)
BUILD_PRODUCT_TYPE='bsrg'
;;
nsw)
BUILD_PRODUCT_TYPE='nsw'
;;
x86)
BUILD_PRODUCT_TYPE='x86'
;;
mips|mips64|mipsxlp|mips-xlp|mips64xlp)
BUILD_PRODUCT_TYPE='fw'
;;
ppc|powerpc)
BUILD_PRODUCT_TYPE='lsw'
;;
*)
echo "Usage $0 [uag|fw|rt|ips|utm|dpx|dlb|lsw|bsw|nsw|srg|bsrg|x86]"
echo ""
exit 1
;;
esac
if [ -z "$CURRENT_BUILD_ARCH" ]; then
echo "UNKNOW PRODUCT_TYPE OR COMPILER"
echo ""
exit 1
fi
export CURRENT_BUILD_CPU
export CURRENT_BUILD_ARCH
export BUILD_PRODUCT_TYPE
| true |
6ace5160ab7980b8533888730e4753c05478c4fd
|
Shell
|
mnsanghvi/cl-travis
|
/install.sh
|
UTF-8
| 8,609 | 3.71875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
# cl-travis install script. Don't remove this line.
set -e
# get <url> <destination>
get() {
url=$1
destination=$2
echo "Downloading ${url}..."
curl --no-progress-bar --retry 10 -o "$destination" -L "$url"
}
# unpack <uncompression option> <file> <destination>
unpack() {
opt=$1
file=$2;
destination=$3;
echo "Unpacking tarball $1 into $3..."
mkdir -p "$destination"
tar -C "$destination" --strip-components=1 "$opt" -xf "$file"
}
install_i386_arch() {
# Travis-CI's dpkg doesn't seem to know about --add-architecture.
#sudo dpkg --add-architecture i386
sudo apt-get install libc6:i386
}
# add_to_lisp_rc <string>
add_to_lisp_rc() {
string=$1
case "$LISP" in
abcl) rc=".abclrc" ;;
allegro*) rc=".clinit.cl" ;;
sbcl|sbcl32) rc=".sbclrc" ;;
ccl|ccl32) rc=".ccl-init.lisp" ;;
cmucl) rc=".cmucl-init.lisp" ;;
clisp|clisp32) rc=".clisprc.lisp" ;;
ecl) rc=".eclrc" ;;
*)
echo "Unrecognised lisp: '$LISP'"
exit 1
;;
esac
echo "$string" >> "$HOME/$rc"
}
# version of ASDF known to work with cl-launch (3.0.2)
ASDF_URL="https://raw.githubusercontent.com/sbcl/sbcl/sbcl-1.1.17/contrib/asdf/asdf.lisp"
ASDF_LOCATION="$HOME/asdf"
install_asdf() {
get "$ASDF_URL" asdf.lisp
add_to_lisp_rc "(load \"$ASDF_LOCATION\")"
}
compile_asdf() {
echo "Compiling ASDF..."
cl-launch -i "(compile-file \"$ASDF_LOCATION.lisp\")"
}
CL_LAUNCH_URL="http://common-lisp.net/project/xcvb/cl-launch/cl-launch-4.0.3.tar.gz"
CL_LAUNCH_DIR="$HOME/cl-launch"
CL_LAUNCH_TARBALL="$HOME/cl-launch.tar.gz"
CL_LAUNCH_SCRIPT="/usr/local/bin/cl-launch"
CL_LAUNCH_RC="$HOME/.cl-launchrc"
download_cl_launch() {
get "$CL_LAUNCH_URL" "$CL_LAUNCH_TARBALL"
unpack -z "$CL_LAUNCH_TARBALL" "$CL_LAUNCH_DIR"
}
# install_cl_launch <lisp> <option>
install_cl_launch() {
echo "Installing cl-launch to $CL_LAUNCH_SCRIPT..."
rm -f "$CL_LAUNCH_RC"
for arg; do
echo $arg >> "$CL_LAUNCH_RC"
done
sudo bash "$CL_LAUNCH_DIR/cl-launch.sh" \
-I "$CL_LAUNCH_DIR" \
-o "$CL_LAUNCH_SCRIPT" \
--rc \
-B install
}
ASDF_SR_CONF_DIR="$HOME/.config/common-lisp/source-registry.conf.d"
ASDF_SR_CONF_FILE="$ASDF_SR_CONF_DIR/cl-travis.conf"
LOCAL_LISP_TREE="$HOME/lisp"
setup_asdf_source_registry() {
mkdir -p "$LOCAL_LISP_TREE"
mkdir -p "$ASDF_SR_CONF_DIR"
echo "(:tree \"$TRAVIS_BUILD_DIR/\")" > "$ASDF_SR_CONF_FILE"
echo "(:tree \"$LOCAL_LISP_TREE/\")" >> "$ASDF_SR_CONF_FILE"
echo "Created $ASDF_SR_CONF_FILE"
cat -n "$ASDF_SR_CONF_FILE"
}
# install_script <path> <lines...>
install_script() {
path=$1; shift
tmp=$(mktemp)
echo "#!/bin/sh" > "$tmp"
for line; do
echo "$line" >> "$tmp"
done
chmod 755 "$tmp"
sudo mv "$tmp" "$path"
}
ABCL_TARBALL_URL="http://www.abcl.org/releases/1.2.1/abcl-bin-1.2.1.tar.gz"
ABCL_TARBALL="abcl.tar.gz"
ABCL_DIR="$HOME/abcl"
ABCL_SCRIPT="/usr/local/bin/abcl"
install_abcl() {
sudo apt-get install default-jre
get "$ABCL_TARBALL_URL" "$ABCL_TARBALL"
unpack -z "$ABCL_TARBALL" "$ABCL_DIR"
install_script "$ABCL_SCRIPT" \
"java -cp \"$ABCL_DIR/abcl-contrib.jar\" \
-jar \"$ABCL_DIR/abcl.jar\" \"\$@\""
install_cl_launch "LISP=abcl" "ABCL_OPTIONS='--noinform'"
}
SBCL_TARBALL_URL="http://prdownloads.sourceforge.net/sbcl/sbcl-1.2.3-x86-64-linux-binary.tar.bz2"
SBCL_TARBALL="sbcl.tar.bz2"
SBCL_DIR="$HOME/sbcl"
install_sbcl() {
echo "Installing SBCL..."
get "$SBCL_TARBALL_URL" "$SBCL_TARBALL"
unpack -j "$SBCL_TARBALL" "$SBCL_DIR"
( cd "$SBCL_DIR" && sudo bash install.sh )
install_cl_launch "LISP=sbcl" "SBCL_OPTIONS='--noinform --disable-debugger'"
}
SBCL32_TARBALL_URL="http://downloads.sourceforge.net/project/sbcl/sbcl/1.0.58/sbcl-1.0.58-x86-linux-binary.tar.bz2"
SBCL32_TARBALL="sbcl32.tar.bz2"
SBCL32_DIR="$HOME/sbcl32"
install_sbcl32() {
echo "Installing 32-bit SBCL..."
install_i386_arch
get "$SBCL32_TARBALL_URL" "$SBCL32_TARBALL"
unpack -j "$SBCL32_TARBALL" "$SBCL32_DIR"
( cd "$SBCL32_DIR" && sudo bash install.sh )
sudo ln -s /usr/local/bin/sbcl /usr/local/bin/sbcl32
install_cl_launch "LISP=sbcl" "SBCL_OPTIONS='--noinform --disable-debugger'"
}
CCL_TARBALL_URL="ftp://ftp.clozure.com/pub/release/1.9/ccl-1.9-linuxx86.tar.gz"
CCL_TARBALL="ccl.tar.gz"
CCL_DIR="$HOME/ccl"
CCL_SCRIPT_PREFIX="/usr/local/bin"
install_ccl() {
if [ "$LISP" = "ccl32" ]; then
echo "Installing 32-bit CCL..."
install_i386_arch
bin="lx86cl"
script="ccl32"
else
echo "Installing CCL..."
bin="lx86cl64"
script="ccl"
fi
get "$CCL_TARBALL_URL" "$CCL_TARBALL"
unpack -z "$CCL_TARBALL" "$CCL_DIR"
install_script "$CCL_SCRIPT_PREFIX/$script" "\"$CCL_DIR/$bin\" \"\$@\""
install_cl_launch "LISP=ccl" "CCL=\"$script\"" "CCL_OPTIONS='--quiet'"
}
CMUCL_TARBALL_URL="http://common-lisp.net/project/cmucl/downloads/snapshots/2014/01/cmucl-2014-01-x86-linux.tar.bz2"
CMUCL_EXTRA_TARBALL_URL="http://common-lisp.net/project/cmucl/downloads/snapshots/2014/01/cmucl-2014-01-x86-linux.extra.tar.bz2"
CMUCL_TARBALL="cmucl.tar.bz2"
CMUCL_EXTRA_TARBALL="cmucl-extra.tar.bz2"
CMUCL_DIR="$HOME/cmucl"
CMUCL_SCRIPT="/usr/local/bin/cmucl"
install_cmucl() {
echo "Installing CMUCL..."
install_i386_arch
get "$CMUCL_TARBALL_URL" "$CMUCL_TARBALL"
get "$CMUCL_EXTRA_TARBALL_URL" "$CMUCL_EXTRA_TARBALL"
mkdir -p "$CMUCL_DIR"
tar -C "$CMUCL_DIR" -xjf "$CMUCL_TARBALL"
tar -C "$CMUCL_DIR" -xjf "$CMUCL_EXTRA_TARBALL"
install_script "$CMUCL_SCRIPT" \
"CMUCLLIB=\"$CMUCL_DIR/lib/cmucl/lib\" \"$CMUCL_DIR/bin/lisp\" \"\$@\""
install_cl_launch "LISP=cmucl" "CMUCL_OPTIONS='-quiet'"
}
ECL_TARBALL_URL="http://common-lisp.net/~loliveira/tarballs/ecl-13.5.1-linux-amd64.tar.gz"
ECL_TARBALL="ecl.tar.gz"
install_ecl() {
echo "Installing ECL..."
get "$ECL_TARBALL_URL" "$ECL_TARBALL"
sudo tar -C / -xzf "$ECL_TARBALL"
install_cl_launch "LISP=ecl" "ECL_OPTIONS='-q'"
}
install_clisp() {
if [ "$LISP" = "clisp32" ]; then
echo "Installing 32-bit CLISP..."
sudo apt-get remove libsigsegv2
sudo apt-get install libsigsegv2:i386
sudo apt-get install clisp:i386
sudo ln -s /usr/bin/clisp /usr/local/bin/clisp32
else
echo "Installing CLISP..."
sudo apt-get install clisp
fi
install_cl_launch "LISP=clisp" "CLISP_OPTIONS=\"--quiet --quiet\""
}
ACL_TARBALL_URL="http://www.franz.com/ftp/pub/acl90express/linux86/acl90express-linux-x86.bz2"
ACL_DIR="$HOME/acl"
ACL_TARBALL="acl.tar.bz2"
install_acl() {
echo "Installing Allegro CL..."
install_i386_arch
get "$ACL_TARBALL_URL" "$ACL_TARBALL"
mkdir -p "$ACL_DIR"
tar -C "$ACL_DIR" --strip-components=1 -xjf "$ACL_TARBALL"
case "$LISP" in
allegro) acl=alisp ;;
allegromodern) acl=mlisp ;;
*)
echo "Unrecognised lisp: '$LISP'"
exit 1
;;
esac
sudo ln -vs "$ACL_DIR/$acl" "/usr/local/bin/$acl"
sudo ln -vs "$ACL_DIR/$acl" "/usr/local/bin/$LISP"
install_cl_launch "LISP=$LISP" "ALLEGRO_OPTIONS='-L ~/.clinit.cl'"
}
QUICKLISP_URL="http://beta.quicklisp.org/quicklisp.lisp"
install_quicklisp() {
get "$QUICKLISP_URL" quicklisp.lisp
echo "Installing Quicklisp..."
cl-launch -f quicklisp.lisp -i "(quicklisp-quickstart:install)"
add_to_lisp_rc '(let ((quicklisp-init (merge-pathnames "quicklisp/setup.lisp"
(user-homedir-pathname))))
(when (probe-file quicklisp-init)
(load quicklisp-init)))'
}
(
cd "$HOME"
sudo apt-get update
download_cl_launch
install_asdf
case "$LISP" in
abcl) install_abcl ;;
allegro|allegromodern) install_acl ;;
sbcl) install_sbcl ;;
sbcl32) install_sbcl32 ;;
ccl|ccl32) install_ccl ;;
cmucl) install_cmucl ;;
clisp|clisp32) install_clisp ;;
ecl) install_ecl ;;
*)
echo "Unrecognised lisp: '$LISP'"
exit 1
;;
esac
compile_asdf
cl-launch -i '(format t "~%~a ~a up and running! (ASDF ~a)~%~%"
(lisp-implementation-type)
(lisp-implementation-version)
(asdf:asdf-version))'
install_quicklisp
setup_asdf_source_registry
)
| true |
9b21b05e530d2948de0547e160ce5908f14ee9f3
|
Shell
|
StamLab/stampipes
|
/scripts/rna-star/aggregate/checkcomplete.sh
|
UTF-8
| 1,597 | 3.125 | 3 |
[] |
no_license
|
# Checks that important files exist and are not size 0
EXIT=0
# list of files
files=( \
"merged.transcriptome.cram" \
"feature_counts.txt" \
"genes.fpkm_tracking" \
"isoforms.fpkm_tracking" \
"kallisto.log" \
"kallisto_adv.log" \
"picard.MarkDuplicates.txt" \
"picard.RnaSeqMetrics.txt" \
"Signal.Unique.both.bw" \
"Signal.Unique.str-.bw" \
"Signal.Unique.str+.bw" \
"adapter_counts.info" \
"ribosomal_counts.info" \
"kallisto_output/abundance.tsv" \
"kallisto_output_adv/abundance.tsv" \
)
# Paired files only exist for paired-end aggregations.
paired_files=( \
"picard.CollectInsertSizes.txt" \
)
# list of sequins files
# turned off until we get a sequins flag
sequins_files=( \
# "anaquin_subsample/anaquin_kallisto/RnaExpression_genes.tsv" \
# "anaquin_subsample/anaquin_kallisto/RnaExpression_isoforms.tsv" \
# "anaquin_subsample/anaquin_kallisto/RnaExpression_isoforms.neatmix.tsv.info" \
# "anaquin_subsample/anaquin_kallisto/RnaExpression_summary.stats" \
# "anaquin_star/RnaAlign_summary.stats.info" \
)
function check_files() {
for FILE in "$@" ; do
if [ ! -s "$FILE" ]; then
echo "Missing $FILE"
EXIT=1
fi
done
}
# check files
check_files "${files[@]}"
if [[ -n "$PAIRED" ]] ; then
check_files "${paired_files[@]}"
fi
if [[ -n "$SEQUINS_REF" ]]; then
check_files "${sequins_files[@]}"
fi
if [[ $EXIT -eq 0 ]]; then
python3 "$STAMPIPES/scripts/lims/upload_data.py" --aggregation_id "$AGGREGATION_ID" --complete_aggregation
fi
exit $EXIT
| true |
e9b2616cc694bdbc32f9cf188b63ad2d5b175728
|
Shell
|
birdsofsummer/kpw2_rom
|
/tt/usr/sbin/wfm_mount
|
UTF-8
| 496 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/sh
WFM_MOUNT=/mnt/wfm
if mount | grep -q $WFM_MOUNT ; then
echo "waveform filesystem already mounted"
exit 0
fi
echo "mounting waveform filesystem"
# Set up loop as offset 0x1C41000 from flash root
losetup -o $(( 4096 * 7233 )) /dev/loop/1 /dev/mmcblk0
mount /dev/loop/1 $WFM_MOUNT
if [ $? -ne 0 ]; then
echo "image not found, creating"
# block count is # of 1024 byte blocks
mkdosfs -F 32 -s 16 -S 1024 -t 3836 /dev/loop/1
mount /dev/loop/1 $WFM_MOUNT
fi
| true |
027fdafbb38a0dac45ea7c30509485df68229b55
|
Shell
|
jbrakensiek/CORBITS
|
/data/grab.sh
|
UTF-8
| 670 | 2.859375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
today=$(date -I)
name="koi-data" #"koi-$today"
#download data from the Exoplanet Archive
#query api: http://exoplanetarchive.ipac.caltech.edu/docs/program_interfaces.html
wget "http://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=q1_q17_dr24_koi&select=kepid,kepoi_name,koi_period,koi_period_err1,koi_impact,koi_impact_err1,koi_incl,koi_sma,koi_dor,koi_ror,koi_prad,koi_srad,koi_model_snr,koi_slogg&order=kepoi_name&where=koi_disposition+like+'C%25'&format=ascii" -O data/$name.txt &> data/grab.log
#remove first few lines of file and any null content
sed -e "/[/\|].*/d" -e "/.*null.*/d" "data/$name.txt" > "data/$name-edit.txt"
| true |
767357af19c12de17b52d1ddf1be5152c79ea0d2
|
Shell
|
packy/maccfg
|
/bashrc.d/enabled/ssh_hosts.sh
|
UTF-8
| 518 | 3.390625 | 3 |
[] |
no_license
|
#!bash # for emacs formatting
authorize () {
HOSTNAME=$(hostname -s)
USER=$(whoami)
DIR="~/.ssh"
REMOTE_PUB="~/.ssh/id_rsa_${HOSTNAME}_${USER}.pub"
AUTH_KEYS="~/.ssh/authorized_keys"
cat ~/.ssh/id_rsa.pub | ssh $1@$2 "umask 077; test -d $DIR || mkdir $DIR; cat > $REMOTE_PUB; cat $REMOTE_PUB >> $AUTH_KEYS; test -x /sbin/restorecon && /sbin/restorecon $DIR $REMOTE_PUB $AUTH_KEYS"
}
forget_host () {
TMP=/tmp/known_hosts.$$
grep -v $1 $HOME/.ssh/known_hosts > $TMP
mv $TMP $HOME/.ssh/known_hosts
}
| true |
2dfd90e77a5ab39a35811bdb96ad73ad08889ba3
|
Shell
|
tdf/salt-states-base
|
/users/Z99-user.sh
|
UTF-8
| 449 | 2.640625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !!! THIS FILE IS MANAGED BY SALT !!!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
alias grepconf='grep -vE "^#|^$|^[\w]+#"'
alias grep='grep --color'
alias sudox='sudo cp ~/.Xauthority /root && sudo '
export LS_OPTIONS='--color=auto -h'
eval "`dircolors`"
alias ls='ls $LS_OPTIONS'
alias ll='ls $LS_OPTIONS -l'
alias l='ls $LS_OPTIONS -lA'
alias ..='cd ..'
alias ...='cd ../..'
force_color_prompt=yes
| true |
0139d94e6fb7e5e0fdbf1b96176e983223dd357b
|
Shell
|
yusong-shen/STAT640_Statistical_Machine_Learning
|
/Competition/svdfeature-1.2.2/demo/basicMF/run-ml100K.sh
|
UTF-8
| 890 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/bash
# this is a demo script for training the movielen
# get data if it doesn't exist
if [[ -f ua.base && -f ua.test ]]
then
echo "start demo using movielen data"
else
echo "The demo require input file ua.base, ua.test from ml-100K in current folder, please get them from www.grouplens.org"
exit
fi
# shuffle training data
../../tools/line_shuffle ua.base ua.base.shuffle
# generate feature file
python mkbasicfeature.py ua.base.shuffle ua.base.basicfeature
python mkbasicfeature.py ua.test ua.test.basicfeature
# make buffer, transform text format to binary format
../../tools/make_feature_buffer ua.base.basicfeature ua.base.buffer
../../tools/make_feature_buffer ua.test.basicfeature ua.test.buffer
# training for 40 rounds
../../svd_feature basicMF.conf num_round=40
# write out prediction from 0040.model
../../svd_feature_infer basicMF.conf pred=40
| true |
05286ba696b8111cd510912e4a3fee0ed98441d9
|
Shell
|
Vman45/ubuntu_server_installer
|
/crypt/decrypt.sh
|
UTF-8
| 476 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
#@author Filip Oščádal <oscadal@gscloud.cz>
dir="$(dirname "$0")"
. $dir/_includes.sh
FILES="id_rsa id_rsa.pub"
if [ -z "${PASS}" ]; then
info "Manage keys (interactive) ..."
read -s -p "Enter password:" PASS
echo -ne "\n"
export PASS
else
info "Manage keys (automatic) ..."
fi
for i in $FILES
do
if [ -f "$i.aes" ]
then
openssl aes-256-cbc -pbkdf2 -d -pass env:PASS -in ./$i".aes" -out ./$i
else
warn "Missing file: $i"
fi
done
echo -en "\nDone.\n"
| true |
1363b8efbc68998274036024dbcc5b806783c33e
|
Shell
|
emersion/debug-frame-check
|
/bin/dwarf-orc-csmith.sh
|
UTF-8
| 172 | 2.84375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e -o pipefail
bin_dir=$(dirname "$0")
if [ "$CSMITH" = "" ] ; then
CSMITH="csmith"
fi
$CSMITH $@ > csmith.c
"$bin_dir/dwarf-orc-crosscheck.sh" csmith.c
| true |
8a9996749f4ab95937e4bb4dc5b794cf14cef168
|
Shell
|
fzhao06/auto_scripts
|
/bashrc
|
UTF-8
| 722 | 2.75 | 3 |
[] |
no_license
|
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# Uncomment the following line if you don't like systemctl's auto-paging feature:
# export SYSTEMD_PAGER=
# User specific aliases and functions
if test -f $HOME/.gpg-agent-info && \
kill -0 `cut -d: -f 2 $HOME/.gpg-agent-info` 2>/dev/null; then
GPG_AGENT_INFO=`cat $HOME/.gpg-agent-info | cut -c 16-`
else
# No, gpg-agent not available; start gpg-agent
eval `gpg-agent --daemon --no-grab --write-env-file $HOME/.gpg-agent-info`
fi
export GPG_TTY=$(tty)
export GPG_AGENT_INFOi
tput bold;
export PS1="\[\e[1;35m\]{\[\e[1;36m\]\u@\h\[\e[1;35m\] \w\[\e[1;36m\]}\$ \[\e[0;33m\]"
export LS_COLORS='di=0;36'
export PATH=$JAVA_HOME/bin:$PATH
| true |
fc7dfbb67f5be9329020f43bb9767e638b309704
|
Shell
|
WU-ARL/ndnmap
|
/DataCollection/runHOUS.sh
|
UTF-8
| 154 | 2.625 | 3 |
[] |
no_license
|
#!/bin/bash
while true
do
./getHOUS.py >& getHOUS.log
DATE=`date +%Y.%b.%d.%H.%M`
mv getHOUS.log getHOUS.log.$DATE
gzip getHOUS.log.$DATE
done
| true |
f1c6e274d58214d7f0a699fc71906993665df002
|
Shell
|
akifoezkan/Halide-HLS
|
/tutorial/figures/generate_figures_18.sh
|
UTF-8
| 3,442 | 2.5625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script generates the figures for lesson 18
make -C ../.. bin/HalideTraceViz
rm -rf tmp
mkdir -p tmp
# Grab a trace
HL_JIT_TARGET=host-trace_loads-trace_stores-trace_realizations \
HL_TRACE_FILE=$(pwd)/tmp/trace.bin \
make -C ../.. tutorial_lesson_18_parallel_associative_reductions
ls tmp/trace.bin
rm -rf lesson_18_*.mp4
# Serial histogram
cat tmp/trace.bin | ../../bin/HalideTraceViz \
--size 230 280 --timestep 1 --decay 2 4 --hold 10 --uninit 50 50 100 \
--max 256 --gray --strides 1 0 0 1 --zoom 20 --store 2 \
--move 10 40 --func 'hist_serial:input' --up 8 --label 'hist_serial:input' Input 1 \
--max 25 --strides 1 0 \
--move 10 246 --func 'hist_serial' --up 8 --label 'hist_serial' Histogram 1 \
| avconv -f rawvideo -pix_fmt bgr32 -s 230x280 -i /dev/stdin -c:v h264 lesson_18_hist_serial.mp4
# Manually-factored parallel histogram
cat tmp/trace.bin | ../../bin/HalideTraceViz \
--size 460 300 --timestep 1 --decay 2 4 --hold 10 --uninit 50 50 100 \
--max 256 --gray --strides 1 0 0 1 --zoom 20 --store 2 \
--move 20 40 --func 'merge_par_manual:input' --up 8 --label 'merge_par_manual:input' Input 1 \
--max 4 \
--move 230 40 --func 'intm_par_manual' --up 8 --label 'intm_par_manual' 'Partial Histograms' 1 \
--strides 1 0 --max 10 \
--move 230 246 --func 'merge_par_manual' --up 8 --label 'merge_par_manual' Histogram 1 \
| avconv -f rawvideo -pix_fmt bgr32 -s 460x300 -i /dev/stdin -c:v h264 lesson_18_hist_manual_par.mp4
# Parallelize the outer dimension using rfactor
cat tmp/trace.bin | ../../bin/HalideTraceViz \
--size 460 300 --timestep 1 --decay 2 4 --hold 10 --uninit 50 50 100 \
--strides 1 0 0 1 --zoom 20 --gray --max 256 --store 2 \
--move 20 40 --func 'hist_rfactor_par:input' --up 8 --label 'hist_rfactor_par:input' Input 1 \
--max 4 \
--move 230 40 --func 'hist_rfactor_par_intm' --up 8 --label 'hist_rfactor_par_intm' 'Partial Histograms' 1 \
--strides 1 0 --max 10 \
--move 230 246 --func 'hist_rfactor_par' --up 8 --label 'hist_rfactor_par' Histogram 1 \
| avconv -f rawvideo -pix_fmt bgr32 -s 460x300 -i /dev/stdin -c:v h264 lesson_18_hist_rfactor_par.mp4
# Vectorize the inner dimension using rfactor
cat tmp/trace.bin | ../../bin/HalideTraceViz \
--size 460 300 --timestep 1 --decay 2 4 --hold 10 --uninit 50 50 100 \
--strides 1 0 0 1 --zoom 20 --gray --max 256 --store 2 \
--move 20 40 --func 'hist_rfactor_vec:input' --up 8 --label 'hist_rfactor_vec:input' Input 1 \
--max 4 \
--move 230 40 --func 'hist_rfactor_vec_intm' --up 8 --label 'hist_rfactor_vec_intm' 'Partial Histograms' 1 \
--strides 1 0 --max 10 \
--move 230 246 --func 'hist_rfactor_vec' --up 8 --label 'hist_rfactor_vec' Histogram 1 \
| avconv -f rawvideo -pix_fmt bgr32 -s 460x300 -i /dev/stdin -c:v h264 lesson_18_hist_rfactor_vec.mp4
# Tile histogram using rfactor
cat tmp/trace.bin | ../../bin/HalideTraceViz \
--size 650 200 --timestep 1 --decay 2 4 --hold 10 --uninit 50 50 100 \
--strides 1 0 0 1 --zoom 20 --gray --max 256 --store 2 \
--move 20 40 --func 'hist_rfactor_tile:input' --up 8 --label 'hist_rfactor_tile:input' Input 1 \
--max 4 --strides 1 0 11 0 0 2 \
--move 230 40 --func 'hist_rfactor_tile_intm' --up 8 --label 'hist_rfactor_tile_intm' 'Partial Histograms' 1 \
--strides 1 0 --max 10 \
--move 230 158 --func 'hist_rfactor_tile' --up 8 --label 'hist_rfactor_tile' Histogram 1 \
| avconv -f rawvideo -pix_fmt bgr32 -s 650x200 -i /dev/stdin -c:v h264 lesson_18_hist_rfactor_tile.mp4
rm -rf tmp
| true |
cec0987ff9b6e6343226972d66ab9499b82f37f1
|
Shell
|
freebsd/freebsd-ports
|
/dns/rbldnsd/files/rbldnsd.in
|
UTF-8
| 679 | 3.109375 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# PROVIDE: rbldnsd
# REQUIRE: DAEMON
# BEFORE: LOGIN
# KEYWORD: shutdown
#
# Add the following lines to /etc/rc.conf to enable rbldnsd:
#
# rbldnsd_enable="YES"
#
# See rbldnsd(8) for flags.
#
. /etc/rc.subr
name=rbldnsd
rcvar=rbldnsd_enable
load_rc_config $name
rbldnsd_enable=${rbldnsd_enable:-"NO"}
rbldnsd_flags=${rbldnsd_flags:-"-r %%PREFIX%%/etc/rbldnsd -b 127.0.0.1 bl.example.com:ip4set:example"}
command=%%PREFIX%%/sbin/rbldnsd
pidfile=/var/run/rbldnsd.pid
extra_commands=reload
start_precmd=prestart
stop_postcmd="rm -f ${pidfile}"
prestart()
{
# enforce pidfile as first argument
rc_flags="-p ${pidfile} ${rbldnsd_flags}"
}
run_rc_command "$1"
| true |
338bf0d4c2889623385d7671749ea648ce78f4a8
|
Shell
|
socialtables/docker-machine-dns-daemon
|
/update-docker-machine-dns.sh
|
UTF-8
| 2,110 | 4.4375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Social Tables docker DNS configurator for OSX - updates dnsmask's configuration
# to point the specified hostname at the specified docker machine's IP address
# help blob
show_help() {
echo "Usage: update-docker-machine-dns [ -h hostname ] [ -m machine name ] [ -d (daemon mode) ]"
}
# optparse dump to globals
parse_opts() {
local OPTIND=1
while getopts "dh:m:?" opt; do
case $opt in
h)
HOST_NAME="$OPTARG"
;;
m)
MACHINE_NAME="$OPTARG"
;;
d)
IS_DAEMON=1
;;
'?')
show_help
exit
;;
esac
done
}
# adds or modifies a simple config file line
upsert_config_line() {
local TAG=$1
local VAL=$2
local TARGET_FILE=$3
local FOUND_CONFIG_LINE=$(grep "${TAG}" ${TARGET_FILE})
if [ -z $FOUND_CONFIG_LINE ]; then
# strip escape slashes and insert into file
echo "${VAL} # ${TAG}" | sed -E 's/\\(.)/\1/g' >> ${TARGET_FILE}
else
# replace extant line in file
sed -i .old "s/.* # ${TAG}/${VAL} # ${TAG}/" ${TARGET_FILE}
fi
}
# updates a docker machine dnsmasq DNS entry
update_docker_machine_entry() {
local MACHINE_NAME="${1:-default}"
local HOST_NAME="${2:-${MACHINE_NAME}.docker}"
local MACHINE_IP="$(docker-machine ip ${MACHINE_NAME})"
if [ -z $MACHINE_IP ]; then
echo "Unable to resolve docker machine IP"
return
fi
# update dnsmasq config
upsert_config_line "DOCKER MACHINE HOST <${HOST_NAME}>" "address=\\/${HOST_NAME}\\/${MACHINE_IP}" $(brew --prefix)/etc/dnsmasq.conf
}
#############################
### execution begins here ###
#############################
# parse command line options
parse_opts $@
# assign defaults
if [ -z $MACHINE_NAME ]; then
MACHINE_NAME="default"
fi
if [ -z $HOST_NAME ]; then
HOST_NAME="${MACHINE_NAME}.docker"
fi
# ensure dnsmasq config file exists
mkdir -pv $(brew --prefix)/etc/
touch $(brew --prefix)/etc/dnsmasq.conf
# normal mode - invoke once
if [ -z $IS_DAEMON ]; then
update_docker_machine_entry $MACHINE_NAME $HOST_NAME
# daemon mode - invoke once every two minutes
else
while true; do
update_docker_machine_entry $MACHINE_NAME $HOST_NAME
sleep 120
done
fi
| true |
33b03ebe79fc9cdeeee6c29fa8a655eaa9025068
|
Shell
|
dijksterhuis/discogs-yt-playlister
|
/2-discogs-datastorage/6-redis-ETL/1-searches/redis-loads-set-adds-unique-metadata.sh
|
UTF-8
| 922 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
for tag in genre style year; \
do \
echo 'Running for tag '$tag ; \
image='dijksterhuis/redis-database-inserts:modularised-0.1' ;\
container_name='redis-loads-unique-'$tag ;\
container_command='./redis-load-set-adder.py ' ;\
# run_type, primary_key, mongo instance, redis instance, key, value
container_args='simple key masters redis-metadata-unique-'$tag' '$tag' masters_id' ;\
networks='discogs-redis-site-queries discogs-mongo' ;\
echo $container_name ; echo $image ;\
docker run -di \
-w /home \
-v metadata-extraction-logs:/logging \
--name $container_name \
$image ; \
for network in $networks; do docker network connect $network $container_name ; echo "connected to "$network ; done ; \
docker exec -it $container_name $container_command $container_args ;\
docker stop $container_name ; docker rm $container_name ;\
done
| true |
01dad9fe709e491f80afeee665dda759affc9fec
|
Shell
|
webis-de/ICWSM-17
|
/wikipedia-reverts-detection/src-shell/download-wiki-stub-meta.sh
|
UTF-8
| 913 | 3.984375 | 4 |
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ];then
echo "Usage:"
echo " $0 <country>"
echo "With"
echo " country"
echo " The two-letter country code as used by Wikipedia (en, de, fr, ...)"
exit 1
fi
country=$1
shell_source_dir=`dirname $0`
source $shell_source_dir/config.sh
wiki=$country"wiki"
output_dir=$shell_source_dir/../data/$wiki-$version
mkdir -p $output_dir
cont=1
part=1
while [ $cont -eq 1 ];do
output_file=$output_dir/$wiki-$version-stub-meta-history$part.xml.gz
if [ -e $output_file ];then
echo "Not downloading already existing file: $output_file"
else
echo "Downloading: $output_file"
curl --output $output_file "https://dumps.wikimedia.org/$wiki/$version/$wiki-$version-stub-meta-history$part.xml.gz"
if [ $(file $output_file | grep HTML | wc -l) -eq 1 ];then
echo "No such part: $part. Aborting"
rm $output_file
cont=0
fi
fi
let part++
done
| true |
87e79140f676ed9a26e58f8cc0da6eb5ba790b5c
|
Shell
|
research-iobserve/jss-privacy-experiment
|
/execute-jpetstore-with-workload-continuous-reconfiguration.sh
|
UTF-8
| 2,792 | 3.671875 | 4 |
[] |
no_license
|
#!/bin/bash
# Execute a distributed JPetStore with docker locally
# including a migration scheme, where one accounting component
# is redeployed 10000 times.
# This variant does not support a workload driver.
REDEPLOYS = 10000
BASE_DIR=$(cd "$(dirname "$0")"; pwd)
if [ -f $BASE_DIR/config ] ; then
. $BASE_DIR/config
else
echo "Missing configuration"
exit 1
fi
. $BASE_DIR/common-functions.sh
#############################################
# common functions
# stopping docker container
function stopDocker() {
information "Stopping existing distributed jpetstore instances ..."
docker stop frontend
docker stop order
docker stop catalog
docker stop account
docker rm frontend
docker rm order
docker rm catalog
docker rm account
docker network rm jpetstore-net
information "done"
}
information "Interactive mode no specialized workload driver"
###################################
# check if no leftovers are running
# stop docker
stopDocker
###################################
# starting
# jpetstore
information "Start jpetstore"
docker network create --driver bridge jpetstore-net
docker run -e LOGGER=$LOGGER -e LOCATION=GERMANY -d --name account --network=jpetstore-net jpetstore-account-service
docker run -e LOGGER=$LOGGER -d --name order --network=jpetstore-net jpetstore-order-service
docker run -e LOGGER=$LOGGER -d --name catalog --network=jpetstore-net jpetstore-catalog-service
docker run -e LOGGER=$LOGGER -d --name frontend --network=jpetstore-net jpetstore-frontend-service
ID=`docker ps | grep 'frontend' | awk '{ print $1 }'`
FRONTEND=`docker inspect $ID | grep '"IPAddress' | awk '{ print $2 }' | tail -1 | sed 's/^"\(.*\)",/\1/g'`
SERVICE_URL="http://$FRONTEND:8080/jpetstore-frontend"
information "Service URL $SERVICE_URL"
while ! curl -sSf $SERVICE_URL 2> /dev/null > /dev/null ; do
echo "wait for service coming up..."
sleep 1
done
ITERATION=0
while [ $ITERATION -lt $REDEPLOYS ] ; do
ITERATION=`expr $ITERATION + 1`
echo "Redeployment $ITERATION"
if [ $(( $ITERATION % 2)) -eq 0 ]; then
export LOCATION="USA"
else
export LOCATION="GERMANY"
fi
docker stop -t 30 account > /dev/null
docker rm account > /dev/null
docker run -e LOGGER=$LOGGER -e LOCATION=$LOCATION -d --name account --network=jpetstore-net jpetstore-account-service > /dev/null
ACCOUNT_ID=`docker ps | grep 'account' | awk '{ print $1 }'`
ACCOUNT=`docker inspect $ACCOUNT_ID | grep '"IPAddress' | awk '{ print $2 }' | tail -1 | sed 's/^"\(.*\)",/\1/g'`
ACCOUNT_URL="http://$ACCOUNT:8080/jpetstore-account/request-user?username=j2ee"
while ! curl -sSf $ACCOUNT_URL 2> /dev/null > /dev/null ; do
echo "wait for service coming up..."
sleep 1
done
done
###################################
# shutdown
# shutdown jpetstore
stopDocker
# end
| true |
643a79064177e1c34ba4a172a6235bd9529e624a
|
Shell
|
andrewlook/ipynblog
|
/tests/test_cookiecutter.sh
|
UTF-8
| 898 | 3.1875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash -x
SAMPLE_COLAB_URL="https://colab.research.google.com/drive/1fjv0zVC0l-81QI7AtJjZPMfYRiynOJCB#scrollTo=Kp3QKj1KIaaO"
SAMPLE_COOKIECUTTER_REPO="git@github.com:andrewlook/ipynblog-cookiecutter-svelte-template.git"
NOTEBOOKS_DIR=$(pwd)/notebooks
ipynblog-download ${SAMPLE_COLAB_URL} -d ${NOTEBOOKS_DIR}
NOTEBOOK_NAME=$(ls ./notebooks | grep -v ".meta" | head -n1)
NOTEBOOK_SLUG=$(echo ${NOTEBOOK_NAME} | tr '-' '_' | tr ' ' '_' | tr '[:upper:]' '[:lower:]')
NOTEBOOK_PATH=${NOTEBOOKS_DIR}/${NOTEBOOK_NAME}
NOTEBOOK_META=${NOTEBOOK_PATH}.yaml
ipynblog-cookiecutter --metadata ${NOTEBOOK_META} ${SAMPLE_COOKIECUTTER_REPO}
ls -l ${NOTEBOOK_SLUG}/
GEN_NOTEBOOKS_DIR=${NOTEBOOK_SLUG}/notebooks
GEN_PUBLIC_DIR=${NOTEBOOK_SLUG}/public
ipynblog-render ${NOTEBOOK_PATH} ${GEN_PUBLIC_DIR}/index.html \
--images-dir ${GEN_PUBLIC_DIR}/images \
--template ${GEN_NOTEBOOKS_DIR}/nbconvert.tpl
| true |
b01ba54563ee529912847e62278e54bd6faaaaa0
|
Shell
|
avi202020/sailpoint-docker
|
/bin/status.sh
|
UTF-8
| 155 | 2.828125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [[ -e build/.composefile ]]; then
FILE=`cat build/.composefile`
else
FILE=docker-compose.yml
fi
set -x
docker-compose ps
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.