blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
1367e741178c227e70bbe17ed385ce7a953b72fd
|
Shell
|
cpraveen/cfdlab
|
/mpi_test/pbs.sh
|
UTF-8
| 771 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/sh
#PBS -V
#PBS -m ae
#PBS -M cpraveen@gmail.com
#PBS -N mpi_test
#PBS -o pbs_out.txt
#PBS -e pbs_error.txt
#PBS -l nodes=1:ppn=4
#PBS -q test
cd $PBS_O_WORKDIR
echo "Job began"
echo "Working directory is $PBS_O_WORKDIR"
# Calculate the number of processors allocated to this run.
NPROCS=`wc -l < $PBS_NODEFILE`
# Calculate the number of nodes allocated.
NNODES=`uniq $PBS_NODEFILE | wc -l`
### Display the job context
echo Running on host `hostname`
echo Time is `date`
echo Directory is `pwd`
echo Using ${NPROCS} processors across ${NNODES} nodes
# OpenMPI will automatically launch processes on all allocated nodes.
# MPIRUN=`which mpirun`
# ${MPIRUN} -machinefile $PBS_NODEFILE -np ${NPROCS} my-openmpi-program
mpiexec ./hello
echo "Job ended"
echo Time is `date`
| true |
3cb96ff4fd42c5234e157559286548ac603493e0
|
Shell
|
guolikai/myscripts
|
/etcd/etcd.sh
|
UTF-8
| 5,730 | 3.671875 | 4 |
[] |
no_license
|
#!/bin/bash
##########################################################################
#Name: etcd.sh
#Version: v3.1.5
#Create_Date: 2017-4-19
#Author: GuoLikai(glk73748196@sina.com)
#Description: "编译安装管理etcd"
##########################################################################
App=etcd-v3.1.5-linux-amd64
AppName=etcd
AppOptBase=/App/opt/OPS
AppOptDir=$AppOptBase/$AppName
AppInstallBase=/App/install/OPS
AppInstallDir=$AppInstallBase/$App
AppConfDir=/App/conf/OPS/$AppName
AppLogDir=/App/log/OPS/$AppName
AppSrcBase=/App/src/OPS
AppTarBall=$App.tar.gz
AppBuildBase=/App/build/OPS
AppBuildDir=$(echo "$AppBuildBase/$AppTarBall" | sed -e 's/.tar.*$//' -e 's/^.\///')
AppProg=$AppOptDir/etcd
AppProgCtl=$AppOptDir/etcdctl
#EtcdServerIp=10.10.10.10
AppDataDir=/App/data/OPS/$AppName
EtcdServerIp=$(/usr/sbin/ifconfig eth1 | grep "inet" | grep -v "inet6" | awk -F' ' '{print $2}')
RemoveFlag=0
InstallFlag=0
# 获取PID
fpid()
{
AppMasterPid=$(ps ax | grep "${AppName}" | grep -v "grep" | awk '{print $1}' 2> /dev/null)
}
# 查询状态
fstatus()
{
fpid
if [ ! -f "$AppProg" ]; then
echo "$AppName 未安装"
else
echo "$AppName 已安装"
if [ -z "$AppMasterPid" ]; then
echo "$AppName 未启动"
else
echo "$AppName 正在运行"
fi
fi
}
# 删除
fremove()
{
fpid
RemoveFlag=1
if [ -z "$AppMasterPid" ]; then
if [ -d "$AppInstallDir" ]; then
rm -rf $AppInstallDir && echo "删除 $AppName"
rm -f $AppConfDir
rm -f $AppOptDir
rm -f $AppLogDir
else
echo "$AppName 未安装"
fi
else
echo "$AppName 正在运行" && exit
fi
}
# 备份
fbackup()
{
Day=$(date +%Y-%m-%d)
BackupFile=$App.$Day.tgz
if [ -f "$AppProg" ]; then
cd $AppInstallBase
tar zcvf $BackupFile --exclude=logs/* $App/* --backup=numbered
[ $? -eq 0 ] && echo "$AppName 备份成功" || echo "$AppName 备份失败"
else
echo "$AppName 未安装"
fi
}
# 安装
finstall()
{
fpid
InstallFlag=1
if [ -z "$AppMasterPid" ]; then
test -f "$AppProg" && echo "$AppName 已安装"
[ $? -ne 0 ] && fupdate && fsymlink && fcpconf
else
echo "$AppName 正在运行"
fi
}
# 拷贝配置
fcpconf()
{
/usr/bin/cp $AppInstallDir/{etcd,etcdctl} /usr/sbin/
chmod +x /usr/sbin/{etcd,etcdctl}
}
# 更新
fupdate()
{
Operate="更新"
[ $InstallFlag -eq 1 ] && Operate="安装"
[ $RemoveFlag -ne 1 ] && fbackup
test -d "$AppBuildDir" && rm -rf $AppBuildDir
tar zxf $AppSrcBase/$AppTarBall -C $AppBuildBase || tar jxf $AppSrcBase/$AppTarBall -C $AppBuildBase
/usr/bin/cp -rf $AppBuildDir $AppInstallBase
if [ $? -eq 0 ]; then
echo "$AppName $Operate成功"
else
echo "$AppName $Operate失败"
exit 1
fi
}
#创建软连接
fsymlink()
{
[ -L $AppOptDir ] && rm -f $AppOptDir
[ -L $AppConfDir ] && rm -f $AppConfDir
[ -L $AppLogDir ] && rm -f $AppLogDir
ln -s $AppInstallDir $AppOptDir
}
# 启动
fstart()
{
fpid
if [ -n "$AppMasterPid" ]; then
echo "$AppName 正在运行"
else
#/usr/bin/nohup ${AppProg} --name auto_scale --data-dir ${AppDataDir} --listen-peer-urls "http://10.10.10.10:2380,http://10.10.10.10:7001" --listen-client-urls "http://10.10.10.10:2379,http://10.10.10.10:4001" --advertise-client-urls "http://10.10.10.10:2379,http://10.10.10.10:4001" &
/usr/bin/nohup ${AppProg} --name auto_scale --data-dir ${AppDataDir} --listen-peer-urls "http://${EtcdServerIp}:2380,http://${EtcdServerIp}:7001" --listen-client-urls "http://${EtcdServerIp}:2379,http://${EtcdServerIp}:4001" --advertise-client-urls "http://${EtcdServerIp}:2379,http://${EtcdServerIp}:4001" &
sleep 0.1
if [ -n "$(ps ax | grep "$AppName" | grep -v "grep" | awk '{print $1}' 2> /dev/null)" ]; then
echo "$AppName 启动成功"
else
echo "$AppName 启动失败"
fi
fi
}
# 停止
fstop()
{
fpid
if [ -n "$AppMasterPid" ]; then
kill -9 $AppMasterPid &>/dev/null && echo "停止 $AppName" || echo "$AppName 停止失败"
else
echo "$AppName 未启动"
fi
}
# 重启
frestart()
{
fpid
[ -n "$AppMasterPid" ] && fstop && sleep 1
fstart
}
# 终止进程
fkill()
{
fpid
if [ -n "$AppMasterPid" ]; then
echo "$AppMasterPid" | xargs kill -9
if [ $? -eq 0 ]; then
echo "终止 $AppName 主进程"
else
echo "终止 $AppName 主进程失败"
fi
else
echo "$AppName 主进程未运行"
fi
}
ScriptDir=$(cd $(dirname $0); pwd)
ScriptFile=$(basename $0)
case "$1" in
"install" ) finstall;;
"update" ) fupdate;;
"reinstall" ) fremove && finstall;;
"remove" ) fremove;;
"backup" ) fbackup;;
"start" ) fstart;;
"stop" ) fstop;;
"restart" ) frestart;;
"status" ) fstatus;;
"kill" ) fkill;;
* )
echo "$ScriptFile install 安装 $AppName"
echo "$ScriptFile update 更新 $AppName"
echo "$ScriptFile reinstall 重装 $AppName"
echo "$ScriptFile remove 删除 $AppName"
echo "$ScriptFile backup 备份 $AppName"
echo "$ScriptFile start 启动 $AppName"
echo "$ScriptFile stop 停止 $AppName"
echo "$ScriptFile restart 重启 $AppName"
echo "$ScriptFile status 查询 $AppName 状态"
echo "$ScriptFile kill 终止 $AppName 进程"
;;
esac
| true |
9575a5ebddc778eb02793bb9e7390659c9c10fd2
|
Shell
|
binaryphile/jiff-personal
|
/libexec/jiff-add-task
|
UTF-8
| 1,068 | 3.546875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Usage: jiff add-task
source "${_JIFF_ROOT}/../../binaryphile/jiff-core/libexec/jiff-add-task"
push_core_task () {
# TODO: check and see if we're on master on jiff-personal first. commit and push jiff-core. mv jiff-personal link to jiff instead of creating and rm'ing?
filename="jiff-${1}"
cd "${_JIFF_ROOT}/../jiff/libexec"
make_symlink "${filename}" "../../../binaryphile/jiff-core/libexec/${filename}"
git add --force "${filename}"
git commit -m "automated commit"
git push
cd "${_JIFF_ROOT}/libexec"
rm "${filename}"
git fetch upstream
git rebase upstream/master
git push --force
git checkout develop
git merge master
git push
git checkout master
}
set_taskdir () {
case "${1}" in
"role" )
echo "../role/current"
;;
"default" )
echo ""
;;
"core" )
echo "../../../binaryphile/jiff-core/libexec"
;;
* )
echo "../context/current"
esac
}
return 0 2>/dev/null || true
strict_mode on
main "${@}"
is_not_match "${2:-}" "core" || push_core_task "${1}"
| true |
f5351544aaeaa994c8782287439e64aa8c4b9a6b
|
Shell
|
dozer47528/home-config
|
/setup
|
UTF-8
| 1,294 | 3.578125 | 4 |
[] |
no_license
|
#!/bin/bash
set -e
# Check working directory
if [ $HOME == $PWD ]; then
echo "Error: can not put this project in home directory!"
exit -1
fi
if [ ! -f "$PWD/setup" ]; then
echo "Error: please run this command under project directory!"
exit -1
fi
# Check dependency
command -v vim >/dev/null 2>&1 || { echo >&2 "I require vim but it's not installed. Aborting."; exit 1; }
command -v zsh >/dev/null 2>&1 || { echo >&2 "I require zsh but it's not installed. Aborting."; exit 1; }
command -v ctags >/dev/null 2>&1 || { echo >&2 "I require ctags but it's not installed. Aborting."; exit 1; }
# Reset submodules
rm -rf .oh-my-zsh
rm -rf .vim
rm -rf .zsh-plugins
rm -rf .zsh-themes
# Init submodules
git submodule init
git submodule update
# Link plugins and themes
ln -fs $PWD/.zsh-plugins/* $PWD/.oh-my-zsh/custom/plugins
ln -fs $PWD/.zsh-themes/* $PWD/.oh-my-zsh/custom/themes
# Link to home
ls -A | grep -v '^setup$' | grep -v '^upgrade$' | grep -v '^README.md$' | grep -v '^.git$' | xargs -I {} rm -rf $HOME/{}
ls -A | grep -v '^setup$' | grep -v '^upgrade$'| grep -v '^README.md$' | grep -v '^.git$' | xargs -I {} ln -fs $PWD/{} $HOME/{}
# Install vim plugins
vim +PluginInstall +qall
# Change defaull shell
echo "Input password for change default shell"
chsh -s `which zsh`
| true |
2a6efbed7d1d6b96f64dababb4c980cd9c2a4eeb
|
Shell
|
MatthiasPolag/Enriching-the-Machine-Learning-Workloads-of-BigBench
|
/SystemML_Algorithmen/System_ML_svm.sh
|
UTF-8
| 5,163 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
echo "executing query 26"
# Measure time for query execution time
# Start timer to measure data loading for the file formats
STARTDATE="`date +%Y/%m/%d:%H:%M:%S`"
STARTDATE_EPOCH="`date +%s`" # seconds since epochstart
#step 1. NEED TO BE RECOMMENTED, Q renamed!
TEMP_DATA_DIR="/home/user1/MP/tempData"
TEMP_RESULT_DIR="/user/user1/bigbenchv2/recommendation"
#create local file to save the result of the query
mkdir /home/user1/MP/tempData
# create the temp_result dir
hadoop fs -mkdir /user/user1/bigbenchv2/recommendation
#save the table data into the file
hive -e 'SELECT * FROM bigbenchv2.recommendation_data' | sed 's/\t/,/g'> "$TEMP_DATA_DIR/rawData"
PREPSTARTDATE="`date +%Y/%m/%d:%H:%M:%S`"
PREPSTARTDATE_EPOCH="`date +%s`" # seconds since epochstart
#split it into the feature vector part
sed 's/^[^,]*,//g' "$TEMP_DATA_DIR/rawData" > "$TEMP_DATA_DIR/featureVectors"
#and the label part
sed 's/,.*//g' "$TEMP_DATA_DIR/rawData" > "$TEMP_DATA_DIR/labels"
#create metadatafiles with the correct number of columns AND delete the unnecessary part of the file path
rows=$(wc -l "$TEMP_DATA_DIR/featureVectors" | sed 's/[^0-9]*//g')
#count number of words
words=$(sed 's/,/ /g' "$TEMP_DATA_DIR/featureVectors" | wc -w | sed 's/[^0-9]*//g')
#calculate number of rows
columns=$(expr $words / $rows)
#calculate 60% split
split=$(expr $rows \* 60 / 100)
#store rows variable for testing file
testrows=$(expr $rows - $split)
secondpart=$(expr $split + 1)
#split into training and testing set
head -n 88671 "$TEMP_DATA_DIR/featureVectors" > "$TEMP_DATA_DIR/featureVectoraa"
tail -n 88672 "$TEMP_DATA_DIR/featureVectors" > "$TEMP_DATA_DIR/featureVectorab"
head -n 88671 "$TEMP_DATA_DIR/labels" > "$TEMP_DATA_DIR/labelaa"
tail -n 88672 "$TEMP_DATA_DIR/labels" > "$TEMP_DATA_DIR/labelab"
#rename files
mv "$TEMP_DATA_DIR/featureVectoraa" "$TEMP_DATA_DIR/trainingData"
mv "$TEMP_DATA_DIR/featureVectorab" "$TEMP_DATA_DIR/testData"
mv "$TEMP_DATA_DIR/labelaa" "$TEMP_DATA_DIR/trainingLabels"
mv "$TEMP_DATA_DIR/labelab" "$TEMP_DATA_DIR/testLabels"
#create metadata file
echo {\"rows\": 88671, \"cols\": 19, \"format\": \"csv\"} > "$TEMP_DATA_DIR/trainingData.mtd"
echo {\"rows\": 88671, \"cols\": 1, \"format\": \"csv\"} > "$TEMP_DATA_DIR/trainingLabels.mtd"
echo {\"rows\": 59114, \"cols\": 19, \"format\": \"csv\"} > "$TEMP_DATA_DIR/testData.mtd"
echo {\"rows\": 59114, \"cols\": 1, \"format\": \"csv\"} > "$TEMP_DATA_DIR/testLabels.mtd"
#upload the files into hdfs
hdfs dfs -put "$TEMP_DATA_DIR/trainingData" "$TEMP_RESULT_DIR"
hdfs dfs -put "$TEMP_DATA_DIR/trainingLabels" "$TEMP_RESULT_DIR"
hdfs dfs -put "$TEMP_DATA_DIR/trainingData.mtd" "$TEMP_RESULT_DIR"
hdfs dfs -put "$TEMP_DATA_DIR/trainingLabels.mtd" "$TEMP_RESULT_DIR"
hdfs dfs -put "$TEMP_DATA_DIR/testData" "$TEMP_RESULT_DIR"
hdfs dfs -put "$TEMP_DATA_DIR/testLabels" "$TEMP_RESULT_DIR"
hdfs dfs -put "$TEMP_DATA_DIR/testData.mtd" "$TEMP_RESULT_DIR"
hdfs dfs -put "$TEMP_DATA_DIR/testLabels.mtd" "$TEMP_RESULT_DIR"
PREPDATE="`date +%Y/%m/%d:%H:%M:%S`"
PREPDATE_EPOCH="`date +%s`" # seconds since epoch
PREPDIFF_s="$(($PREPDATE_EPOCH - $PREPSTARTDATE_EPOCH))"
PREPDIFF_ms="$(($PREPDIFF_s * 1000))"
PREPDURATION="$(($PREPDIFF_s / 3600 ))h $((($PREPDIFF_s % 3600) / 60))m $(($PREPDIFF_s % 60))s"
#step 3 set java home
#JAVA_HOME=/usr/java/jdk1.8.0_60/
# clean the result before starting
hadoop fs -rm -r -f /user/cloudera/bigbenchv2/svm
#step 3. run the algorithm
hadoop fs -mkdir RESULT_DIR=/user/user1/bigbenchv2/svm
RESULT_DIR="/user/user1/bigbenchv2/svm"
#SVM
EXSTARTDATE="`date +%Y/%m/%d:%H:%M:%S`"
EXSTARTDATE_EPOCH="`date +%s`" # seconds since epochstart
hadoop jar /home/user1/MP/systemml-1.1.0-bin/SystemML.jar -f "/home/user1/MP/systemml-1.1.0-bin/scripts/algorithms/l2-svm.dml" -nvargs X="$TEMP_RESULT_DIR/trainingData" Y="$TEMP_RESULT_DIR/trainingLabels" model="$RESULT_DIR/weights" Log="$RESULT_DIR/log" scores=/"$RESULT_DIR/scores" accuracy="$RESULT_DIR/accuracy" confusion="$RESULT_DIR/confusion"
EXDATE="`date +%Y/%m/%d:%H:%M:%S`"
EXDATE_EPOCH="`date +%s`" # seconds since epoch
EXDIFF_s="$(($EXDATE_EPOCH - $EXSTARTDATE_EPOCH))"
EXDIFF_ms="$(($EXDIFF_s * 1000))"
EXDURATION="$(($EXDIFF_s / 3600 ))h $((($PREPDIFF_s % 3600) / 60))m $(($PREPDIFF_s % 60))s"
#Predict
hadoop jar /home/user1/MP/systemml-1.1.0-bin/SystemML.jar -f "/home/user1/MP/systemml-1.1.0-bin/scripts/algorithms/l2-svm-predict.dml" -nvargs X="$TEMP_RESULT_DIR/testData" Y="$TEMP_RESULT_DIR/testLabels" model="$RESULT_DIR/weights" Log="$RESULT_DIR/testlog" scores="$RESULT_DIR/scores" accuracy="$RESULT_DIR/accuracy" confusion="$RESULT_DIR/confusion"
#step 4 cleanup
#rm -r "$TEMP_DATA_DIR"
hadoop fs -rm -r -f "$TEMP_RESULT_DIR"
# Calculate the time
STOPDATE="`date +%Y/%m/%d:%H:%M:%S`"
STOPDATE_EPOCH="`date +%s`" # seconds since epoch
DIFF_s="$(($STOPDATE_EPOCH - $STARTDATE_EPOCH))"
DIFF_ms="$(($DIFF_s * 1000))"
DURATION="$(($DIFF_s / 3600 ))h $((($DIFF_s % 3600) / 60))m $(($DIFF_s % 60))s"
# print times
echo "query preparation time: ${PREPDIFF_s} (sec)| ${PREPDURATION}"
echo "query execution time: ${DIFF_s} (sec)| ${DURATION}"
| true |
3763ca96740228300d6777eb028c77f1e88b2621
|
Shell
|
fromcloud/handson
|
/hackersEnglish/scripts/install_dependencies.sh
|
UTF-8
| 144 | 2.734375 | 3 |
[] |
no_license
|
#!/bin/bash
if ! [ -x "$(command -v httpd)" ];
then
yum install -y httpd24 >&2;
exit 1;
fi
# install apache if not already installed
| true |
63a53005b43aae8953a2f3c34650e8e1d848572b
|
Shell
|
phy2000/presales-demos
|
/demos/scripts/BDS/kafkaScriptsOLD/kafkaCreateTopic
|
UTF-8
| 318 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
KAFKA_HOME=/opt/kafka
ZOOKEEPER=napslxcdh01:2181
USAGE="Usage: $0 <kafkatopic>"
if [ $# -lt 1 ] ; then
echo $USAGE >&2
exit -1
fi
TOPIC=$1
SCRIPTDIR=$(cd $(dirname $0) && pwd -P)
cd $KAFKA_HOME
bin/kafka-topics.sh --create --zookeeper $ZOOKEEPER --replication-factor 1 --partitions 1 --topic $TOPIC
| true |
5b8d2125533dc0da3fd0d16ccb5847f2d882b344
|
Shell
|
msgpo/osmo-ci
|
/scripts/osmo-layer1-headers.sh
|
UTF-8
| 1,463 | 3.953125 | 4 |
[] |
no_license
|
#!/bin/sh
# -e: fail if any of the subsequent commands fail
# -x: trace each executed command via debug log
set -e -x
# Usage:
# ./osmo-layer1-headers.sh sysmo superfemto_v5.1
# where 'sysmo' is BTS type and 'superfemto_v5.1' is version specifier (tag or branch for git reset)
# 2nd parameter is optional and defaults to latest master branch
case "$1" in
sysmo)
uri="git://git.sysmocom.de/sysmo-bts/layer1-api"
version_prefix=""
version="origin/master"
;;
oct)
uri="git://git.osmocom.org/octphy-2g-headers"
version_prefix=""
version="origin/master"
;;
lc15)
uri="https://gitlab.com/nrw_litecell15/litecell15-fw"
version_prefix="origin/nrw/"
version="origin/nrw/litecell15"
;;
oc2g)
uri="https://gitlab.com/nrw_oc2g/oc2g-fw"
version_prefix="origin/nrw/"
version="origin/nrw/oc2g"
;;
*)
echo "Unknown BTS model '$1'"
exit 1
;;
esac
# if 2nd parameter was specified and it's not 'master' then use it instead of default
if [ -n "$2" ]
then
if [ "$2" != "master" ]
then
version=$2
fi
fi
if ! test -d layer1-headers;
then
git clone "$uri" layer1-headers
fi
cd layer1-headers
git fetch origin
# $version_prefix is an ugly workaround for jenkins not being able to deal with slash ('/')
# in label names that comprise the axis of a matrxi buildjob, while nuran not using tags but
# only branch names in their firmware repositories :(
git checkout -f "$version" || git checkout -f "${version_prefix}${version}"
| true |
346d95fb32112cc183dd00168941bb06bf080b73
|
Shell
|
ivan-petrunko/utils
|
/autonetworkrestart.sh
|
UTF-8
| 332 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
echo Launched at $(date "+%Y-%m-%d %H:%M:%S")
while true; do
if ! [ "`ping -c 1 -W 10 google.com`" ]; then
nmcli networking off && nmcli networking on
echo Network restarted $(date "+%Y-%m-%d %H:%M:%S")
fi
sleep 300
done
| true |
8958cd632f1f6e5f9091261c3a16ae9ae04f539a
|
Shell
|
Driaan/scc-new
|
/scc-srv-src/sunos-gen-scc-srv
|
UTF-8
| 3,672 | 3.46875 | 3 |
[] |
no_license
|
#!/bin/sh
# Generate Solaris package to install scc-srv by means of pkgadd.
# Copyright (C) 2001-2004 Open Challenge B.V.
# Copyright (C) 2004-2005 OpenEyeT Professional Services.
# Copyright (C) 2005-2018 QNH.
# Copyright (C) 2019 Siem Korteweg.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING.
# If not, write to the Free Software Foundation,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#
# Contact information: https://sourceforge.net/projects/sysconfcollect/support
# SCC-release: 1.19.44
# ID: $Id: sunos-gen-scc-srv 6217 2019-03-22 18:46:12Z siemkorteweg $
# This program requires to be called in a directory with the following hierarchy:
#
# install pre/post install scripts
# software/opt/scc-srv bin, doc and man
# software/var/opt/scc-srv conf, data and newconfig
#
# After using this program, the directory contains one file:
#
# scc-srv-1.19.44.sunos.pkg
#
# Provide an alternative version as first argument.
# Be aware of the limitations in the format of the version.
TESTING="testing"
x="$(echo "${TESTING}" 2>/dev/null)"
if [ "${x}" != "${TESTING}" ]
then
exec /bin/ksh ${0} "$@"
echo "${0}: cannot find ksh" >&2
exit 2
fi
ProgName=${0##*/}; export ProgName
exe="$(which pkgproto 2>/dev/null)"
if [ ! -x "${exe}" ]
then
echo "${ProgName}: cannot find pkgproto" >&2
exit 2
fi
uid=$(id | sed -e 's/(.*//' -e 's/.*=//')
if [ "${uid}" != "0" ]
then
echo "${ProgName}: only root should run this program" >&2
exit 2
fi
chown -R root:root software
VERSION=${1:-1.19.44}
cat >pkginfo <<_EOF_
PKG="scc-srv"
VERSION="${VERSION}"
NAME="System Configuration Collector Server"
CATEGORY="application"
CLASSES="none"
ARCH="noarch"
VENDOR="(c) Siem Korteweg"
_EOF_
for f in *
do
if [ -f "${f}" ]
then
echo "${f}"
fi
done >file_list
PKG=scc-srv-${VERSION}.sunos.pkg; export PKG
uid=$(id | sed -e 's/(.*//' -e 's/.*=//')
if [ "${uid}" != "0" ]
then
echo "${ProgName}: only root should run this program" >&2
exit 2
fi
# The shell /bin/sh on Solaris systems does not provide the functionality we need.
# Replace it with /bin/ksh
find install software -type f |
while read f
do
hdr=$(head -n 1 ${f})
if [ "${hdr}" = "#!/bin/sh" ]
then
cat <<-_X_ | ed -s ${f} 2>/dev/null | sed -e '/^?$/d'
1,$ s@^#!/bin/sh@#!/bin/ksh@
w
q
_X_
fi
done
mkdir spool
(
cat <<-_X_
i pkginfo
i postinstall=install/postinstall
i preremove=install/preremove
_X_
# Do not alter permissions for default directories like /opt, /var and /var/opt.
# Ignore /opt/scc-srv and /var/opt/scc-srv to allow symbolic links for these directories.
# Do not bother in case of "relocation"
pkgproto software/opt/scc-srv=/opt/scc-srv software/var/opt/scc-srv=/var/opt/scc-srv |
sed -e '/\/opt /d' \
-e '/\/var /d' \
-e '/\/scc-srv /d' \
-e 's/ 0600 / 0500 /g' \
-e 's/root other/root sys/g'
) >sunos-scc-srv.pkg
pkgmk -o \
-d ./spool \
-f sunos-scc-srv.pkg \
-p sunos58`date '+%Y%m%d%H%M'` \
-v ${VERSION}
pkgtrans -n ./spool ${PKG} scc-srv
mv spool/${PKG} .
chmod 444 ${PKG}
rm -rf install software spool sunos-scc-srv.pkg $(cat file_list)
exit 0
| true |
6f0018d428534bc2a3c42ab1c47685cc8c62e8d1
|
Shell
|
BlueDrink9/env
|
/shell/scripts/myEmacs
|
UTF-8
| 247 | 2.890625 | 3 |
[] |
no_license
|
#!/bin/sh
myEmacsC(){
emacsclient --create-frame --alternate-editor='' -nw "$@"
}
# Use xterm-direct to convince doom emacs to use truecolor
if [ "$COLORTERM" = "truecolor" ]; then
TERM=xterm-direct myEmacsC "$@"
else
myEmacsC "$@"
fi
| true |
482b23bd05b760c31bb7beafed282db36e5e6df4
|
Shell
|
machawk1/wail
|
/bundledApps/heritrix-3.2.0/bin/foreground_heritrix
|
UTF-8
| 1,080 | 3.75 | 4 |
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown"
] |
permissive
|
#!/usr/bin/env bash
##
## This script launches the heritrix crawler and keeps the process in foreground
##
## Optional environment variables
##
## JAVA_HOME Point at a JDK install to use.
##
## HERITRIX_HOME Pointer to your heritrix install. If not present, we
## make an educated guess based of position relative to this
## script.
##
## JAVA_OPTS Java runtime options.
##
## FOREGROUND Set to any value -- e.g. 'true' -- if you want to run
## heritrix in foreground (Used by build system when it runs
## selftest to see if completed successfully or not)..
##
# Resolve links - $0 may be a softlink
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '.*/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
PRGDIR=`dirname "$PRG"`
# Set HERITRIX_HOME.
if [ -z "$HERITRIX_HOME" ]
then
HERITRIX_HOME=`cd "$PRGDIR/.." ; pwd`
fi
FOREGROUND='true' /bin/sh $HERITRIX_HOME/bin/heritrix $@
| true |
c9091cc8b8dba39613baf028753d3a376b731adf
|
Shell
|
CodeMason/CityGenerator
|
/runtest.sh
|
UTF-8
| 927 | 2.84375 | 3 |
[] |
no_license
|
#!/bin/bash
if [[ "$1" == "profile" ]] ; then
echo "profiling code"
perl -d:NYTProf ./tests/runtests.pl
rm -rf nytprof.old || echo "no old to remove"
mv nytprof nytprof.old
nytprofhtml --open
elif [[ "$1" == "full" || "$1" == "all" ]] ;then
echo "full test, coverage and profiling"
rm -rf cover_db || echo "no old cover to remove"
rm -rf nytprof || echo "no old nytprof to remove"
# perl -d:NYTProf ./tests/runtests.pl && \
perl -MDevel::Cover=+select,^lib/.*\.pm,+ignore,^/,tests/ ./tests/runtests.pl >/dev/null && \
cover -summary && \
chmod -R 755 cover_db && \
nytprofhtml --open
elif [[ "$1" == "cover" ]] ;then
echo " checking code coverage"
perl -MDevel::Cover=+select,^lib/.*\.pm,+ignore,^/,tests/ ./tests/runtests.pl >/dev/null && \
cover -summary && \
chmod -R 755 cover_db
else
echo "quick test"
perl ./tests/runtests.pl
fi
| true |
629f33a7182da2e85ce3aa776e307168393ecb34
|
Shell
|
Liesegang/dotfiles
|
/zsh/.zshrc
|
UTF-8
| 4,355 | 3.078125 | 3 |
[] |
no_license
|
source $ZDOTDIR/exports.zsh
source $ZDOTDIR/alias.zsh
# 色を使用出来るようにする
autoload -Uz colors
colors
# ヒストリの設定
HISTFILE=~/.zsh_history
HISTSIZE=1000000
SAVEHIST=1000000
# 単語の区切り文字を指定する
autoload -Uz select-word-style
# ここで指定した文字は単語区切りとみなされる
# / も区切りと扱うので、^W でディレクトリ1つ分を削除できる
zstyle ':zle:*' word-chars " /=;@:{},|"
zstyle ':zle:*' word-style unspecified
########################################
# 補完
# 補完機能を有効にする
autoload -Uz compinit
compinit
# 補完で小文字でも大文字にマッチさせる
zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z}'
# ../ の後は今いるディレクトリを補完しない
zstyle ':completion:*' ignore-parents parent pwd ..
# sudo の後ろでコマンド名を補完する
zstyle ':completion:*:sudo:*' command-path /usr/local/sbin /usr/local/bin \
/usr/sbin /usr/bin /sbin /bin /usr/X11R6/bin
# ps コマンドのプロセス名補完
zstyle ':completion:*:processes' command 'ps x -o pid,s,args'
########################################
# オプション
# 日本語ファイル名を表示可能にする
setopt print_eight_bit
# beep を無効にする
setopt no_beep
# フローコントロールを無効にする
setopt no_flow_control
# Ctrl+Dでzshを終了しない
setopt ignore_eof
# '#' 以降をコメントとして扱う
setopt interactive_comments
# ディレクトリ名だけでcdする
setopt auto_cd
# cd したら自動的にpushdする
setopt auto_pushd
# 重複したディレクトリを追加しない
setopt pushd_ignore_dups
# 同時に起動したzshの間でヒストリを共有する
setopt share_history
# 同じコマンドをヒストリに残さない
setopt hist_ignore_all_dups
# スペースから始まるコマンド行はヒストリに残さない
setopt hist_ignore_space
# ヒストリに保存するときに余分なスペースを削除する
setopt hist_reduce_blanks
# 高機能なワイルドカード展開を使用する
setopt extended_glob
setopt correct
setopt no_flow_control
########################################
# キーバインド
# emacs 風キーバインドにする
bindkey -v
bindkey -v '^?' backward-delete-char
# ^R で履歴検索をするときに * でワイルドカードを使用出来るようにする
bindkey '^R' history-incremental-pattern-search-backward
bindkey '^P' history-beginning-search-backward
bindkey '^N' history-beginning-search-forward
bindkey '^Q' push-line-or-edit
########################################
# OS 別の設定
case ${OSTYPE} in
darwin*)
#Mac用の設定
export CLICOLOR=1
alias ls='ls -G -F'
;;
linux*)
#Linux用の設定
alias ls='ls -F --color=auto'
eval `dircolors .dir_colors/dircolors`
;;
esac
function chpwd() { ls }
function mkcd() { mkdir -p "$@" && eval cd "\"\$$#\""; }
# for any env
export PATH="$HOME/.anyenv/bin:$PATH"
eval "$(anyenv init -)"
# for prompt setting
autoload -Uz promptinit
promptinit
PROMPT="%(?.%{${fg[green]}%}.%{${fg[red]}%})%n${reset_color}@%{${fg[blue]}%}%m${reset_color}(%*%) %~
%# "
# ls color
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
# for zplug
source ~/.zplug/init.zsh
zplug 'zplug/zplug', hook-build:'zplug --self-manage'
zplug 'zsh-users/zsh-completions'
zplug "zsh-users/zsh-syntax-highlighting", defer:2
zplug load
# git設定
RPROMPT="%{${fg[blue]}%}[%~]%{${reset_color}%}"
autoload -Uz vcs_info
setopt prompt_subst
zstyle ':vcs_info:git:*' check-for-changes true
zstyle ':vcs_info:git:*' stagedstr "%F{yellow}!"
zstyle ':vcs_info:git:*' unstagedstr "%F{red}+"
zstyle ':vcs_info:*' formats "%F{green}%c%u[%b]%f"
zstyle ':vcs_info:*' actionformats '[%b|%a]'
precmd () { vcs_info }
RPROMPT=$RPROMPT'${vcs_info_msg_0_}'
## man zshall
# zman [search word]
zman() {
if [[ -n $1 ]]; then
PAGER="less -g -s '+/"$1"'" man zshall
echo "Search word: $1"
else
man zshall
fi
}
# zsh 用語検索
# http://qiita.com/mollifier/items/14bbea7503910300b3ba
zwman() {
zman "^ $1"
}
# zsh フラグ検索
zfman() {
local w='^'
w=${(r:8:)w}
w="$w${(r:7:)1}|$w$1(\[.*\].*)|$w$1:.*:|$w$1/.*/.*"
zman "$w"
}
| true |
104b0b0816190893d640353b34dd026dcee44b0a
|
Shell
|
jvandervelden/clean-path
|
/run.sh
|
UTF-8
| 467 | 2.515625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
baseDir=$(dirname `readlink -f "$0"`)
$baseDir/build.sh
docker rm -f cleanpath-ui
docker rm -f cleanpath-service
docker rm -f cleanpath-cleaner
docker network rm cleanPath
docker network create cleanPath
docker run -d --name cleanpath-cleaner --network cleanPath cleanpath-cleaner:1.0.0
docker run -d --name cleanpath-service --network cleanPath cleanpath-service:1.0.0
docker run -d --name cleanpath-ui --network cleanPath -p 1337:80 cleanpath-ui:1.0.0
| true |
bca05e690bad85e29d01f54a7fd6e30ce12d5f35
|
Shell
|
splatatozor/Redis-mass-insert
|
/insert_nosql_million.sh
|
UTF-8
| 455 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/bash
# Create database + table + time to insert datas
redis-cli flushall
echo "Earth has been destroyed"
TIME1=`date +%s%N`
redis-cli --eval generate_data_million.lua
TIME2=`date +%s%N`
TIME3=`expr ${TIME2} - ${TIME1}`
# Use expr to do the math, let's say TIME1 was the start and TIME2 was the finish
DIFFSEC=`expr ${TIME3} / 1000000`
echo Start ${TIME1}
echo Finish ${TIME2}
echo Took ${DIFFSEC} miliseconds.
redis-cli KEYS "*" | wc -l
| true |
0cd7408b2a37e423f21847e893a4db4fb3fcbff7
|
Shell
|
pinczakko/GraphViz-Samples
|
/gen_png.sh
|
UTF-8
| 364 | 4 | 4 |
[] |
no_license
|
#!/bin/sh
if [ $# == 1 ] ; then
## echo "Number of arguments = $#"
if [ -f $1 ] ; then
# name without extension
name=${1%\.*}
echo "Input filename is (w/ extension): $1"
echo "Input filename is (w/o extension): ${name}"
dot -Tpng $1 -o ${name}.png
else
echo "ERROR: $1 file does not exist!"
fi
else
echo "Usage: $0 [dot_filename]"
fi
| true |
0255c201b1a1246b60d3c642b71ee2d10ed2d605
|
Shell
|
ripples3/sysadmin-shell-scripts
|
/server_management/Debian/install_salt_minion.sh
|
UTF-8
| 1,562 | 4.4375 | 4 |
[] |
no_license
|
#!/bin/bash
#####################################################
# #
# A shell script to install Salt-Minion on Debian #
# #
#####################################################
# check if the current user is root
if [[ $(/usr/bin/id -u) != "0" ]]; then
echo -e "This looks like a 'non-root' user.\nPlease switch to 'root' and run the script again."
exit
fi
# Check if user-input exists
if [ "$1" != "" ]; then
MASTERIP=$1
else
echo -e "Usage: ./install_salt_minion.sh <salt-master-ip>\nPlease run the script once more WITH the Salt Master's IP address."
exit
fi
# Function to install salt-minion
install_minion() {
# Get the saltstack key and create the salt repo
wget -O - https://repo.saltstack.com/apt/debian/8/amd64/latest/SALTSTACK-GPG-KEY.pub | apt-key add -
echo -e "deb http://repo.saltstack.com/apt/debian/8/amd64/latest jessie main" >> /etc/apt/sources.list.d/saltstack.list
apt-get update
# Install salt-minion and firewalld
apt-get install salt-minion salt-ssh firewalld -y
# Open the Salt-minon port
firewall-cmd --zone=public --add-port=4506/tcp --permanent
firewall-cmd --reload
# Add the Salt-Master's IP to the minion's config file
sed -i "s/\#master:\ salt/master:\ $MASTERIP/g" /etc/salt/minion
# Enable and start the minion service
systemctl enable salt-minion
systemctl start salt-minion
}
# Calls the install_minion function
install_minion
| true |
55bcdfdc245ec20badadc17cd90c09dfcecc1489
|
Shell
|
andreaswendlandt/sysadmin_everyday_tools
|
/iptables_fix.sh
|
UTF-8
| 1,143 | 3.53125 | 4 |
[] |
no_license
|
#!/bin/bash
# author: guerillatux
# desc: simple iptables based firewall for ipv4 and ipv6, udp and tcp
# last modified: 07.03.2016
# ensure that it works for both, ipv4 and ipv6
IPT () {
iptables $@
ip6tables $@
}
# remove everything and start from scratch
IPT -F INPUT
IPT -F OUTPUT
IPT -F FORWARD
# deny everything as a default policy
IPT -P INPUT DROP
IPT -P FORWARD DROP
IPT -P OUTPUT DROP
# allow traffic initialized from ourselves
IPT -A OUTPUT -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
IPT -A INPUT -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
# start to enable stuff, in particular loopback interface
IPT -t filter -A INPUT -i lo -j ACCEPT
IPT -t filter -A OUTPUT -i lo -j ACCEPT
# allow icmp from all private networks
IPT -I INPUT -p icmp -s 192.168.0.0/16 -j ACCEPT
IPT -I INPUT -p icmp -s 172.16.0.0/12 -j ACCEPT
IPT -I INPUT -p icmp -s 10.0.0.0/8 -j ACCEPT
## TCP
tcp_ports="list_of_ports"
for tcp_port in $tcp_ports; do
IPT -A INPUT -p tcp --dport $tcp_port -j ACCEPT
done
## UDP
udp_ports="list_of_ports"
for udp_port in $udp_ports; do
IPT -A INPUT -p udp --dport $udp_port -j ACCEPT
done
| true |
3df65daccf8b4bc765dccfee676ee1fe69a5a621
|
Shell
|
RushikeshDapurkar/multi-cloud
|
/installer/install_dependencies.sh
|
UTF-8
| 2,223 | 3.671875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Install dependencies
echo Installing dependencies
apt-get install -y curl wget libltdl7 libseccomp2 libffi-dev apt-transport-https ca-certificates gnupg gnupg-agent lsb-release software-properties-common sshpass pv gawk
# Enable docker repository
echo Enabling docker repository
mkdir -p /etc/apt/keyrings
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /etc/apt/keyrings/docker.gpg
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
# Update local repositories
echo Updating local repositories
apt-get update
# Install python dependencies
echo Installing Python dependencies
apt-get install -y python3-pip
# Install ansible if not present
if [ "`which ansible`" != "" ]; then
echo ansible already installed, skipping.
else
echo Installing ansible
python3 -m pip install ansible
fi
# Install docker if not present
if [ "`which docker`" != "" ]; then
echo Docker already installed, skipping.
else
echo Installing docker
apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
fi
# Install Go if not present
if [ "`which go`" != "" ]; then
IFS=' '
v=`go version | { read _ _ v _; echo ${v#go}; }`
IFS='.'
read -ra v <<< "$v"
if (( ${v[0]} == 1 && ${v[1]} >= 17 )); then
echo Go 1.17+ already installed, skipping.
else
echo Removing existing Go installation
rm -rf /usr/local/go
echo Installing Go 1.17.9
wget https://storage.googleapis.com/golang/go1.17.9.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.17.9.linux-amd64.tar.gz
fi
unset IFS v
else
echo Installing Go 1.17.9
wget https://storage.googleapis.com/golang/go1.17.9.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.17.9.linux-amd64.tar.gz
fi
# Ensure /usr/local/bin is in path
export PATH=$PATH:/usr/local/bin
# Ensure usr/local/go/bin is in path, create GOPATH and source it
echo 'export PATH=$PATH:/usr/local/go/bin' >> /etc/profile
echo 'export GOPATH=$HOME/gopath' >> /etc/profile
source /etc/profile
| true |
925d37e364eef0ab1b751f55836042cbd6b8707c
|
Shell
|
fdesjardins/config
|
/bin/git-restamp-all
|
UTF-8
| 234 | 3.546875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
datecal="$1"
from="$2"
to="$3"
if [[ -z "$to" ]]; then
to="HEAD"
fi
echo $from
echo $to
commits=`git rev-list --boundary $from..$to | sed s/-//g`
for commit in $commits
do
git restamp "$commit" "$datecal"
done
| true |
3effd117faef6898d8ca4bdddc980f64610258b7
|
Shell
|
adyang/dotfiles
|
/test/install.bats
|
UTF-8
| 9,043 | 3.21875 | 3 |
[] |
no_license
|
#!/usr/bin/env bats
load 'libs/bats-support/load'
load 'libs/bats-assert/load'
load 'mock_helper'
setup() {
tmp_dot_home="$(mktemp -d)"
DOT_HOME="${tmp_dot_home}"
tmp_script_dir="$(mktemp -d)"
source "${BATS_TEST_DIRNAME}/../install"
}
teardown() {
rm -rf "${tmp_dot_home}"
rm -rf "${tmp_script_dir}"
}
@test "[install] sudo_keep_alive: password validation failure" {
mock_failure 'sudo' '--stdin' '--validate' '--prompt'
sudo_until_process_ends() {
echo '[FAILURE] should exit immediately on sudo validation failure'
}
run sudo_keep_alive
assert_failure 1
refute_output --partial '[FAILURE]'
}
@test "[install] brew_kext_packages: brew update fails" {
mock_failure 'brew' 'update'
run brew_kext_packages <<<$'\n'
assert_failure 1
refute_line --partial 'brew bundle'
}
@test "[install] brew_kext_packages: first brew bundle succeeds" {
mock_echo 'brew'
run brew_kext_packages <<<$'\n'
assert_success
assert_equal "${#lines[@]}" 3
assert_line --index 1 --regexp '^brew update'
assert_line --index 2 --regexp '^brew bundle --verbose --file=.*Brewfile-kext$'
}
@test "[install] brew_kext_packages: first brew bundle fails" {
attempt_count=0
brew() {
[[ "$1" == 'bundle' ]] || return 0;
attempt_count=$(( attempt_count + 1 ))
if (( attempt_count == 1 )); then
return 1
else
echo "brew $*"
fi
}
run brew_kext_packages <<<$'\n'
assert_success
assert_line --index 2 --partial 'Please allow kext installation'
assert_line --index 3 --regexp '^brew bundle --verbose --file=.*Brewfile-kext$'
}
@test "[install] brew_kext_packages: first brew bundle fails and read fails" {
attempt_count=0
brew() {
[[ "$1" == 'bundle' ]] || return 0;
attempt_count=$(( attempt_count + 1 ))
if (( attempt_count == 1 )); then
return 1
else
echo "brew $*"
fi
}
mock_failure 'read'
run brew_kext_packages
assert_failure 1
assert_line --index 2 --partial 'Please allow kext installation'
refute_line --index 3 'brew bundle --verbose --file=Brewfile-kext'
}
@test "[install] brew_kext_packages: first brew bundle fails and second brew bundle fails" {
mock_failure 'brew' 'bundle'
brew_kext_packages_with_exit_test() {
brew_kext_packages <<<$'\n'
echo '[FAILURE] failed to exit'
}
run brew_kext_packages_with_exit_test
assert_failure 1
refute_line '[FAILURE] failed to exit'
}
@test "[install] brew_packages: brew update fails" {
mock_failure 'brew' 'update'
run brew_packages
assert_failure 1
refute_line --partial 'brew bundle'
}
@test "[install] brew_packages: brew bundle fails" {
mock_failure 'brew' 'bundle'
brew_packages_with_exit_test() {
brew_packages
echo '[FAILURE] failed to exit'
}
run brew_packages_with_exit_test
assert_failure 1
refute_line '[FAILURE] failed to exit'
}
@test "[install] install_powerline: powerline-go already installed" {
mock_echo 'curl'
mkdir -p "${DOT_HOME}/.powerline-go"
touch "${DOT_HOME}/.powerline-go/powerline-go-darwin-amd64-v1.18.0"
run install_powerline
assert_success
refute_line --partial 'curl'
}
@test "[install] install_powerline: powerline-go not installed" {
curl() {
mkdir -p "${DOT_HOME}/.powerline-go"
touch "${DOT_HOME}/.powerline-go/powerline-go-darwin-amd64-v1.18.0"
}
run install_powerline
assert_success
assert [ "${DOT_HOME}/.local/bin/powerline-go" -ef "${DOT_HOME}/.powerline-go/powerline-go-darwin-amd64-v1.18.0" ]
}
@test "[install] install_powerline: powerline-go not installed but download fails" {
curl() {
return 22
}
run install_powerline
assert_failure 22
refute [ -x "${DOT_HOME}/.powerline-go/powerline-go-darwin-amd64-v1.18.0" ]
refute [ -d "${DOT_HOME}/.local/bin" ]
refute [ "${DOT_HOME}/.local/bin/powerline-go" -ef "${DOT_HOME}/.powerline-go/powerline-go-darwin-amd64-v1.18.0" ]
}
@test "[install] symlink_home_files: regular file with same name as home file present" {
mkdir "${tmp_script_dir}/home"
touch "${tmp_script_dir}/home/present-file"
echo 'present-file' > "${tmp_dot_home}/present-file"
run symlink_home_files "${tmp_script_dir}"
assert_success
assert [ "$(cat "${tmp_dot_home}/present-file.bak")" == 'present-file' ]
assert [ "${tmp_dot_home}/present-file" -ef "${tmp_script_dir}/home/present-file" ]
}
@test "[install] symlink_home_files: directory with same name as home file present" {
mkdir "${tmp_script_dir}/home"
touch "${tmp_script_dir}/home/same-name"
mkdir "${tmp_dot_home}/same-name"
run symlink_home_files "${tmp_script_dir}"
assert_success
assert [ -d "${tmp_dot_home}/same-name" ]
refute [ -e "${tmp_dot_home}/same-name.bak" ]
refute [ "${tmp_dot_home}/same-name" -ef "${tmp_script_dir}/home/same_name" ]
}
@test "[install] symlink_home_files: symlink with same name as home file present" {
mkdir "${tmp_script_dir}/home"
touch "${tmp_script_dir}/home/same-name"
touch "${tmp_dot_home}/old-src"
ln -sv "${tmp_dot_home}/old-src" "${tmp_dot_home}/same-name"
run symlink_home_files "${tmp_script_dir}"
assert_success
assert [ "${tmp_dot_home}/same-name" -ef "${tmp_dot_home}/old-src" ]
refute [ -e "${tmp_dot_home}/same-name.bak" ]
}
@test "[install] symlink_home_files: hidden and non-hidden dotfiles" {
mkdir "${tmp_script_dir}/home"
touch "${tmp_script_dir}/home/"{.hidden,non-hidden}
mkdir "${tmp_script_dir}/home/"{.hidden-dir,non-hidden-dir}
run symlink_home_files "${tmp_script_dir}"
assert_success
for file in .hidden non-hidden .hidden-dir non-hidden-dir; do
assert [ "${tmp_dot_home}/${file}" -ef "${tmp_script_dir}/home/${file}" ]
done
}
@test "[install] symlink_home_files: backup of regular file fails" {
backup_if_regular_file() {
return 1
}
mock_echo 'symlink_if_absent'
run symlink_home_files "${tmp_script_dir}"
assert_failure 1
refute_line --partial 'symlink_if_absent'
}
@test "[install] symlink_home_files: symlink fails" {
symlink_if_absent() {
return 1
}
symlink_home_files_with_exit_test() {
symlink_home_files "${tmp_script_dir}"
echo '[FAILURE] failed to exit'
}
run symlink_home_files_with_exit_test
assert_failure 1
refute_line '[FAILURE] failed to exit'
}
@test "[install] symlink_nested_home_files: nested home files" {
mkdir -p "${tmp_script_dir}/home-nested/nest1/nest2"
touch "${tmp_script_dir}/home-nested/nest1/nest2/nested-file"
run symlink_nested_home_files "${tmp_script_dir}"
assert_success
assert [ "${tmp_dot_home}/nest1/nest2/nested-file" -ef "${tmp_script_dir}/home-nested/nest1/nest2/nested-file" ]
refute [ -L "${tmp_dot_home}/nest1" ]
refute [ -L "${tmp_dot_home}/nest1/nest2" ]
}
@test "[install] configure_asdf_plugins: source asdf fails" {
source() {
return 1
}
mock_echo 'upadd_plugins'
echo 'plugin repo' > "${tmp_dot_home}/.asdf-plugins"
mock_echo 'install_plugin_versions'
echo 'plugin 1.0.0' > "${tmp_dot_home}/.tool-versions"
mock_echo 'additional_plugins_setup'
run configure_asdf_plugins
assert_failure 1
refute_line --partial 'upadd_plugins'
refute_line --partial 'install_plugin_versions'
}
@test "[install] configure_asdf_plugins: upadd_plugin fails" {
upadd_plugin() {
return 1
}
echo 'plugin repo' > "${tmp_dot_home}/.asdf-plugins"
mock_echo 'source'
mock_echo 'install_plugin_versions'
echo 'plugin 1.0.0' > "${tmp_dot_home}/.tool-versions"
mock_echo 'additional_plugins_setup'
run configure_asdf_plugins
assert_failure 1
refute_line --partial 'install_plugin_versions'
}
@test "[install] configure_asdf_plugins: asdf install <plugin> <version> fails" {
mock_failure 'asdf' 'install'
echo 'plugin 1.0.0' > "${tmp_dot_home}/.tool-versions"
mock_echo 'source'
mock_echo 'upadd_plugin'
echo 'plugin repo' > "${tmp_dot_home}/.asdf-plugins"
mock_echo 'additional_plugins_setup'
run configure_asdf_plugins
assert_failure 1
refute_line --partial 'asdf global'
}
@test "[install] install_pip_packages: upgrade pip fails" {
mock_failure 'python' '-m pip install --upgrade pip'
run install_pip_packages
assert_failure 1
refute_line 'python -m pip install --upgrade --user pipx'
}
@test "[install] install_pip_packages: install pipx fails" {
mock_failure 'python' '-m pip install --upgrade --user pipx'
run install_pip_packages
assert_failure 1
refute_line --partial 'pipx'
}
@test "[install] install_pip_packages: pipx install ansible fails" {
mock_failure 'python' '-m pipx install --include-deps --pip-args=--upgrade --force ansible'
run install_pip_packages
assert_failure 1
refute_line --partial 'pipx install'
}
@test "[install] install_pip_packages: pipx install yolk3k fails" {
mock_failure 'python' '-m pipx install --pip-args=--upgrade --force yolk3k'
install_pip_packages_with_exit_test() {
install_pip_packages
echo '[FAILURE] failed to exit'
}
run install_pip_packages_with_exit_test
assert_failure 1
refute_line '[FAILURE] failed to exit'
}
| true |
c6f559b9910a9cf3c7a0d57e875e166754248cf1
|
Shell
|
jseidman/cmapi
|
/deploy/scripts/setup-aws-hosts.sh
|
UTF-8
| 2,357 | 3.765625 | 4 |
[] |
no_license
|
#!/bin/bash
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Prepare a cluster on EC2 for a Cloudera deployment via Cloudera Manager.
# These scripts assume a RHEL/CentOS OS.
#
# Given a set of EC2 instances this will execute scripts to perform the
# following tasks:
# * Prepare the OS, including disabling iptables, installing and starting
# NTPD, and turn off SELinux.
# * Install the Cloudera Manager Yum repository file.
# * Install the Oracle JDK.
# * Install required CM packages, including server, agent, and CM DB packages.
# * Start CM server and agent processes.
#
# Requires a configuration file with the following properties:
# * cmserver - Public hostname for instance that will host CM Server.
# * workerhostsfile - Name of file containing public hostnames for worker
# nodes, one hostname per line.
# * pemfile - PEM file to connect to EC2 hosts.
# * user - Username for logging into hosts, generally ec2-user for RHEL/CentOS
# hosts on EC2.
configfile=$1
if [ -z $configfile ]
then
echo "usage: $0 config-file-name"
echo "config-file-name should be the name of a file containing configuration parameters for use by these scripts"
exit 1
fi
source $configfile
# Setup CM server
scp -i $pemfile -o StrictHostKeyChecking=no -p setup-cm-server.sh $configfile $user@$cmserver:~/
ssh -t -i $pemfile $user@$cmserver "~/setup-cm-server.sh $configfile"
# Setup all workers
for host in `cat $workerhostsfile`
do
scp -i $pemfile -o StrictHostKeyChecking=no -p setup-worker.sh $configfile $user@$host:~/
ssh -t -i $pemfile $user@$host "~/setup-worker.sh $configfile"
done
| true |
e60f4405c10588d3d45ed19da5e20a87f5688e82
|
Shell
|
RiderSargent/dotfiles
|
/bin/shell_colors
|
UTF-8
| 3,456 | 2.765625 | 3 |
[] |
no_license
|
#!/bin/bash
# ANSI codes - separate with semicolons in escape code to combine.
# Charactors
# 0 - Normal
# 1 - Bold
# 4 - Underline
# 5 - Blinking
# 7 - Reverse
# Forground
# 30 - Black
# 31 - Red
# 32 - Green
# 33 - Yellow
# 34 - Blue
# 35 - Magenta
# 36 - Cyan
# 37 - White
# Background
# 40 - Black
# 41 - Red
# 42 - Green
# 43 - Yellow
# 44 - Blue
# 45 - Magenta
# 46 - Cyan
# 47 - White
echo ""
echo -e "\033[1;4mNormal Colors on default bg\033[0m"
echo -e "0;30m: \033[0;30m Lorum ipsem \033[0m"
echo -e "0;31m: \033[0;31m Lorum ipsem \033[0m"
echo -e "0;32m: \033[0;32m Lorum ipsem \033[0m"
echo -e "0;33m: \033[0;33m Lorum ipsem \033[0m"
echo -e "0;34m: \033[0;34m Lorum ipsem \033[0m"
echo -e "0;35m: \033[0;35m Lorum ipsem \033[0m"
echo -e "0;36m: \033[0;36m Lorum ipsem \033[0m"
echo -e "0;37m: \033[0;37m Lorum ipsem \033[0m"
echo ""
echo -e "\033[1;4mBold Colors on default bg\033[0m"
echo -e "1;30m: \033[1;30m Lorum ipsem \033[0m"
echo -e "1;31m: \033[1;31m Lorum ipsem \033[0m"
echo -e "1;32m: \033[1;32m Lorum ipsem \033[0m"
echo -e "1;33m: \033[1;33m Lorum ipsem \033[0m"
echo -e "1;34m: \033[1;34m Lorum ipsem \033[0m"
echo -e "1;35m: \033[1;35m Lorum ipsem \033[0m"
echo -e "1;36m: \033[1;36m Lorum ipsem \033[0m"
echo -e "1;37m: \033[1;37m Lorum ipsem \033[0m"
echo ""
echo -e "\033[1;4mNormal Colors on White bg\033[0m"
echo -e "0;30;47m: \033[0;30;47m Lorum ipsem \033[0m"
echo -e "0;31;47m: \033[0;31;47m Lorum ipsem \033[0m"
echo -e "0;32;47m: \033[0;32;47m Lorum ipsem \033[0m"
echo -e "0;33;47m: \033[0;33;47m Lorum ipsem \033[0m"
echo -e "0;34;47m: \033[0;34;47m Lorum ipsem \033[0m"
echo -e "0;35;47m: \033[0;35;47m Lorum ipsem \033[0m"
echo -e "0;36;47m: \033[0;36;47m Lorum ipsem \033[0m"
echo -e "0;37;47m: \033[0;37;47m Lorum ipsem \033[0m"
echo ""
echo -e "\033[1;4mBold Colors on Black bg\033[0m"
echo -e "1;30;40m: \033[1;30;40m Lorum ipsem \033[0m"
echo -e "1;31;40m: \033[1;31;40m Lorum ipsem \033[0m"
echo -e "1;32;40m: \033[1;32;40m Lorum ipsem \033[0m"
echo -e "1;33;40m: \033[1;33;40m Lorum ipsem \033[0m"
echo -e "1;34;40m: \033[1;34;40m Lorum ipsem \033[0m"
echo -e "1;35;40m: \033[1;35;40m Lorum ipsem \033[0m"
echo -e "1;36;40m: \033[1;36;40m Lorum ipsem \033[0m"
echo -e "1;37;40m: \033[1;37;40m Lorum ipsem \033[0m"
echo ""
echo -e "\033[1;4mWhite on all bgs\033[0m"
echo -e "0;37;40m: \033[0;37;40m Lorum ipsem \033[0m"
echo -e "0;37;41m: \033[0;37;41m Lorum ipsem \033[0m"
echo -e "0;37;42m: \033[0;37;42m Lorum ipsem \033[0m"
echo -e "0;37;43m: \033[0;37;43m Lorum ipsem \033[0m"
echo -e "0;37;44m: \033[0;37;44m Lorum ipsem \033[0m"
echo -e "0;37;45m: \033[0;37;45m Lorum ipsem \033[0m"
echo -e "0;37;46m: \033[0;37;46m Lorum ipsem \033[0m"
echo -e "0;37;47m: \033[0;37;47m Lorum ipsem \033[0m"
echo ""
echo -e "\033[1;4mBlack on all bgs\033[0m"
echo -e "0;30;40m: \033[0;30;40m Lorum ipsem \033[0m"
echo -e "0;30;41m: \033[0;30;41m Lorum ipsem \033[0m"
echo -e "0;30;42m: \033[0;30;42m Lorum ipsem \033[0m"
echo -e "0;30;43m: \033[0;30;43m Lorum ipsem \033[0m"
echo -e "0;30;44m: \033[0;30;44m Lorum ipsem \033[0m"
echo -e "0;30;45m: \033[0;30;45m Lorum ipsem \033[0m"
echo -e "0;30;46m: \033[0;30;46m Lorum ipsem \033[0m"
echo -e "0;30;47m: \033[0;30;47m Lorum ipsem \033[0m"
echo ""
# echo -e "\033[1;4mVarious\033[0m"
# echo -e "0;7;33m: \033[0;7;33m Lorum ipsem \033[0m"
# Basically everything (warning: blinking text!)
# echo -e "1;4;5;7;30;41m: \033[1;4;5;7;30;41m Lorum ipsem \033[0m"
| true |
6a02644e394d7375b24feb3d3a6a72f8101dcbdb
|
Shell
|
AdamDlubak/l1-2018
|
/project/Task_5/task.sh
|
UTF-8
| 1,483 | 3.734375 | 4 |
[] |
no_license
|
#!/bin/bash
if [ "$1" == "-h" ]; then
printf "\n\t-----------------------------\n"
printf "\nKorzystając z mechanizmu opisanego tutaj napisz skrypt, który będzie przyjmował wartość liczbową i adre email w linii poleceń a następnie wysyłał wiadomosc emaila (Vagrant pozwala wysyłać wiadomości tylko z konta root) pod wskazany adres wraz dowolnym komentarzem tekstowym podaną wartością i wszystkimi wartościami zmiennych środowiskowych.\n\n"
printf "\t#1 Parameter: Intiger numer - Number witch will be send \n\t\tE.g. 3\n"
printf "\t#2 Parameter: E-mail address to which the message will be sent. \n\t\tE.g. adam.dlubak@gmail.com\n"
printf "\t-----------------------------\n\n"
exit 0
fi
if [ "$#" != "2" ]
then
echo "Niepoprawna lista parametrow!"
exit 1;
fi
number_regex='^[0-9]+$'
if ! [[ $1 =~ $number_regex ]] ; then
printf "\t#1 Parameter: Intiger numer - Number witch will be send \n\t\tE.g. 3\n"
printf "\t#2 Parameter: E-mail address to which the message will be sent. \n\t\tE.g. adam.dlubak@gmail.com\n"
printf "\t-----------------------------\n\n"
exit 0
fi
if [ "$#" != "2" ]
then
echo "Niepoprawna lista parametrow!"
exit 1;
fi
number_regex='^[0-9]+$'
if ! [[ $1 =~ $number_regex ]] ; then
echo "Error: First parameter should be a number"
exit 1
fi
environment_variables=$(printenv)
mail -s "Message from WCSS Server" echo $2 <<EOF
Hello in my world!
$1
$environment_variables
EOF
| true |
ea92a50f6fe3749333a6d3bd6d9caf428933c401
|
Shell
|
BBezaire/graylog-project
|
/scripts/start-web-dev
|
UTF-8
| 290 | 3 | 3 |
[] |
no_license
|
#!/bin/sh
prefix=$(dirname $0)
. ${prefix}/includes
check_for_server_dir
pushd 2> /dev/null
if [ ! -d ${webIfDir}/node ] || [ ! -d ${webIfDir}/node_modules ]; then
cd ${serverDir}/graylog2-server
mvn generate-resources
fi
cd $webIfDir
PATH=$PATH:node npm start
popd 2> /dev/null
| true |
64871d5bfea4a1903fbe861b5b7bbd143d1cd102
|
Shell
|
june-yang/diskimage-builder
|
/diskimage_builder/elements/fedora/environment.d/10-fedora-distro-name.bash
|
UTF-8
| 188 | 2.8125 | 3 |
[
"Apache-2.0"
] |
permissive
|
export DISTRO_NAME=fedora
export DIB_RELEASE=${DIB_RELEASE:-25}
if [ -n "${DIB_FEDORA_DISTRIBUTION_MIRROR:-}" ]; then
export DIB_DISTRIBUTION_MIRROR=$DIB_FEDORA_DISTRIBUTION_MIRROR
fi
| true |
9517fc844951fd29cb382fc56898cb89973bf22f
|
Shell
|
987Frogh/Makehuman
|
/win-installer/build-installer.sh
|
UTF-8
| 2,911 | 3.703125 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2016 Christoph Reiter, 2019 Dan Yeaw
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "${DIR}"
# CONFIG START
ARCH="x86_64"
BUILD_VERSION="0"
# CONFIG END
MISC="${DIR}"/misc
if [ "${ARCH}" = "x86_64" ]; then
MINGW="mingw64"
else
MINGW="mingw32"
fi
function get_version {
python3 - <<END
from tomlkit import parse
with open('../pyproject.toml', 'r') as f:
parsed_toml = parse(f.read())
print(parsed_toml["tool"]["poetry"]["version"])
END
}
VERSION="$(get_version)"
function set_build_root {
DIST_LOCATION="$1"
GAPHOR_LOCATION="${DIST_LOCATION}"/gaphor
}
set_build_root "${DIR}/dist/gaphor"
function install_pre_deps {
pacman -S --needed --noconfirm p7zip git dos2unix upx \
mingw-w64-"${ARCH}"-nsis \
mingw-w64-"${ARCH}"-wget
}
function install_deps {
pacman --noconfirm --needed -S \
mingw-w64-"${ARCH}"-gtk3 \
mingw-w64-"${ARCH}"-python3 \
mingw-w64-"${ARCH}"-python3-gobject \
mingw-w64-"${ARCH}"-gobject-introspection \
mingw-w64-"${ARCH}"-python3-cairo \
mingw-w64-"${ARCH}"-python3-pip \
mingw-w64-"${ARCH}"-python3-setuptools \
mingw-w64-"${ARCH}"-python3-importlib-metadata
}
function build_pyinstaller {
pyinstaller -y gaphor.spec
}
function build_installer {
cp "${DIR}"/misc/gaphor.ico "${DIST_LOCATION}"
(cd "${DIST_LOCATION}" && makensis -NOCD -DVERSION="$VERSION" "${MISC}"/win_installer.nsi)
mv "${DIST_LOCATION}/gaphor-LATEST.exe" "$DIR/gaphor-$VERSION-installer.exe"
}
function build_portable_installer {
local PORTABLE="$DIR/gaphor-$VERSION-portable"
rm -rf "$PORTABLE"
mkdir "$PORTABLE"
cp "$MISC"/gaphor.lnk "$PORTABLE"
cp "$MISC"/README-PORTABLE.txt "$PORTABLE"/README.txt
unix2dos "$PORTABLE"/README.txt
mkdir "$PORTABLE"/config
cp -RT "${DIST_LOCATION}" "$PORTABLE"/data
rm -Rf 7zout 7z1900-x64.exe
7z a payload.7z "$PORTABLE"
wget.exe -P "$DIR" -c https://www.7-zip.org/a/7z1900-x64.exe
7z x -o7zout 7z1900-x64.exe
cat 7zout/7z.sfx payload.7z > "$PORTABLE".exe
rm -Rf 7zout 7z1900-x64.exe payload.7z "$PORTABLE"
}
function main {
local GIT_TAG=${1:-"master"}
# started from the wrong env -> switch
if [ $(echo "$MSYSTEM" | tr '[A-Z]' '[a-z]') != "$MINGW" ]; then
"/${MINGW}.exe" "$0"
exit $?
fi
echo "install pre-dependencies"
install_pre_deps
echo "install dependencies"
install_deps
echo "pyinstall gaphor"
build_pyinstaller
echo "build installer"
build_installer
echo "build portable installer"
build_portable_installer
}
main "$@";
| true |
fe705aba8fbabdf51ab7b6ed115b2b739b364df2
|
Shell
|
lsgos/MPhysPulsars
|
/frb_cands/ninjector_tests/ninjectorrun.sh
|
UTF-8
| 1,831 | 3.125 | 3 |
[] |
no_license
|
#!/bin/bash
list=$1
while read line
do
workdir=/scratch/mmalenta/ninjector_tests
export OMP_NUM_THREADS=16
filename=$(basename $line)
sourcename=$(echo $filename | sed 's/\([a-zA-Z0-9_\-]*\)_gsb.*/\1/')
mkdir ${sourcename}
cd ${sourcename}
#rsync -avz --progress -e 'ssh -i /home/mmalenta/.ssh/hydrus' malenta@hydrus.jb.man.ac.uk:${line} ./${filename}.gmrt_dat
#rsync -avz -e 'ssh -i /home/mmalenta/.ssh/hydrus' malenta@hydrus.jb.man.ac.uk:${line}.gmrt_hdr ./${filename}.gmrt_hdr
scp -i /home/mmalenta/.ssh/hydrus malenta@hydrus.jb.man.ac.uk:${line} ./${filename}.gmrt_dat
scp -i /home/mmalenta/.ssh/hydrus malenta@hydrus.jb.man.ac.uk:${line}.gmrt_hdr ./${filename}.gmrt_hdr
filterbank ${filename}.gmrt_dat > ${sourcename}.fil
rm *.gmrt_dat
rm *.gmrt_hdr
mkdir original
cd original
/home/mmalenta/code/Bifrost/bin/bifrost -f ${workdir}/${sourcename}/${sourcename}.fil -o ${workdir}/${sourcename}/original --dm_start 0 --dm_end 2000 --single -t 1 -k /home/mmalenta/code/Bifrost/killmask2048.kill
cat *.cand > allcands_original
/home/mmalenta/code/Bifrost/scripts/trans_gen_overview.py -cands_file allcands_original
mv overview*.png ${sourcename}_original.png
cd ../
/home/mmalenta/code/ninjector/bin/ninjector -f ${workdir}/${sourcename}/${sourcename}.fil -o ${workdir}/${sourcename}/${sourcename}_injected.fil -r
mkdir injected
cd injected
/home/mmalenta/code/Bifrost/bin/bifrost -f ${workdir}/${sourcename}/${sourcename}_injected.fil -o ${workdir}/${sourcename}/injected --dm_start 0 --dm_end 2000 --single -t 1 -k /home/mmalenta/code/Bifrost/killmask2048.kill
cat *.cand > allcands_injected
/home/mmalenta/code/Bifrost/scripts/trans_gen_overview.py -cands_file allcands_injected
mv overview*.png ${sourcename}_injected.png
cd ../
rm *.fil
cd ${workdir}
done < $list
| true |
fa83cc44bba657b280977818e429a421c70b109b
|
Shell
|
hanbingjia/generate_bash_tool
|
/bash_count.sh
|
UTF-8
| 107 | 2.515625 | 3 |
[] |
no_license
|
#!/bin/sh
count=`awk -F'[ :]+' '{++S[$NF]} END {for (key in S) print key,S[key]}' /etc/passwd`
echo $count
| true |
851ba094a7b8b5d7aa84eee152e9bfc4d3559fa7
|
Shell
|
seamster/soundcloud-dl
|
/soundcloud.sh
|
UTF-8
| 1,370 | 4.0625 | 4 |
[
"Beerware"
] |
permissive
|
#!/bin/bash
#soundcloud music downloader by http://360percents.com - v3.0 on Nov 1st 2011
#Author: Luka Pusic <pusic93@gmail.com>
echo "[i] soundcloud.com music downloader by http://360percents.com (wget version)";
if [ -z "$1" ]; then
echo "";echo "[i] Usage: `basename $0` http://soundcloud.com/link_with_tracks_on_page";echo "";exit
fi
pages=`wget "$1" -q --user-agent 'Mozilla/5.0' -O - | tr '"' "\n" | grep "tracks?page=" | sort -u | tail -n 1 | cut -d "=" -f 2`
if [ -z "$pages" ]; then
pages=1
fi
echo "[i] Found $pages pages of songs!"
for (( page=1; page <= $pages; page++ ))
do
if [ "$pages" = "1" ]; then
this=`wget -q --user-agent='Mozilla/5.0' $1 -O -`;
else
this=`wget -q --user-agent='Mozilla/5.0' $1?page=$page -O -`;
fi
songs=`echo "$this" | grep 'streamUrl' | tr '"' "\n" | sed 's/\\u0026amp;/\&/' | grep 'http://media.soundcloud.com/stream/' | sed 's/\\\\//'`;
songcount=`echo "$songs" | wc -l`
titles=`echo "$this" | grep 'title":"' | tr ',' "\n" | grep 'title' | cut -d '"' -f 4`
if [ -z "$songs" ]; then
echo "[!] No songs found at $1." && exit
fi
echo "[+] Downloading $songcount songs from page $page..."
for (( songid=1; songid <= $songcount; songid++ ))
do
title=`echo "$titles" | sed -n "$songid"p`
echo "[-] Downloading $title..."
url=`echo "$songs" | sed -n "$songid"p`;
wget -q --user-agent='Mozilla/5.0' -O "$title.mp3" $url;
done
done
| true |
c152f5dccee9079b086e98dcf8a01760fb2d9318
|
Shell
|
richardseberino/DeviceID
|
/buildBackEndDockerImage.sh
|
UTF-8
| 481 | 3.375 | 3 |
[] |
no_license
|
# Para rodar corretamente o script deve-se:
# 1- rodar chmod +x buildBackEndDockerImage.sh
# 2- ./buildBackEndDockerImage.sh <nome da imagem> <porta>
# Construindo a imagem do backEnd
echo 'Criando a imagem do backEnd no Docker com o nome passado por parâmetro'
docker build -t "$1" .
echo 'Mensagem criada com sucesso'
# Rodando localmente a imagem na porta especificada
echo 'Rodando a imagem criada na porta especificada no segundo parâmetro'
docker run -p "$2":3000 -t "$1"
| true |
02cde24740bb20f6571f278c0d54114a99e8d672
|
Shell
|
naveenmahadevuni/incubator-trafodion
|
/core/sqf/src/seabed/test/goshell124
|
UTF-8
| 2,039 | 3.265625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# @@@ START COPYRIGHT @@@
#
# (C) Copyright 2011-2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@@ END COPYRIGHT @@@
if [ $# -lt 2 ]; then
echo "goshell124 <cli> <srv>"
exit 1
fi
xclient=$1
xserver=$2
shift
shift
# setup
. ./gosetup
maxcp=1
if [ "$1" = "-maxcp" ]; then
maxcp="$2"
shift
shift
fi
maxsp=1
if [ "$1" = "-maxsp" ]; then
maxsp="$2"
shift
shift
fi
echo "********* TEST `date` $xclient/$xserver *********"
tcheck -setup
. ./goshellsetup
echo "node" >> $TMP
echo "! Start the monitor processes across the cluster" >> $TMP
echo "startup" >> $TMP
echo "! Start the test server" >> $TMP
n=0
while [[ $n -lt $maxsp ]]; do
echo "exec {nowait,name \$srv$n} $xserver -server -maxcp $maxcp -maxsp $maxsp $*" >> $TMP
n=`expr $n + 1`
done
echo "! Display process status of our processes" >> $TMP
echo "ps" >> $TMP
echo "! Start the client process" >> $TMP
n=0
while [[ $n -lt $maxcp ]]; do
echo "exec {nowait,name \$cli$n} $xclient -client -maxcp $maxcp -maxsp $maxsp $*" >> $TMP
n=`expr $n + 1`
done
echo "delay 5" >> $TMP
echo "! Display process status to see new processes" >> $TMP
echo "ps" >> $TMP
echo "! Wait for our processes to stop, then stop the monitor processes and exit." >> $TMP
n=0
while [[ $n -lt $maxsp ]]; do
echo "wait \$srv$n" >> $TMP
n=`expr $n + 1`
done
n=0
while [[ $n -lt $maxcp ]]; do
echo "wait \$cli$n" >> $TMP
n=`expr $n + 1`
done
echo "shutdown" >> $TMP
echo "exit" >> $TMP
echo "eof" >> $TMP
. ./goshellfilter
tcheck $xclient
| true |
facaaed7adc667f836a62ca90f58959a9c491bb1
|
Shell
|
uday745/DEVOPS
|
/shell scripts/if_conditions.txt
|
UTF-8
| 878 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
count=2
if [ $count -gt 0 ];then
echo "count is positive"
printf "My current working directory \n $PWD \n"
fi
---------------------------------------
#!/bin/bash
count=-2
if [ $count -gt 0 ];then
echo "count is positive"
printf "My current working directory \n $PWD \n"
else
echo "count is negative"
fi
----------------------------------------------
#!/bin/bash
count=-5
if [ $count -gt 0 ];then
echo "count is positive"
printf "My current working directory \n $PWD \n"
elif [ $count -lt -2 ];then
echo "count is negative"
fi
----------------------------------
#!/bin/bash
count=-1
if [ $count -gt 0 ];then
echo "count is positive"
printf "My current working directory \n $PWD \n"
elif [ $count -lt -2 ];then
echo "count is less than -2"
else
echo "count lies between 0 and -1"
fi
------------------------------------
| true |
4df35278d3924c5910f0c178bf67c8f7eb7bb2d2
|
Shell
|
sqtpm/sqtpm
|
/Utils/vbox-etc-shared.sh
|
UTF-8
| 551 | 3.21875 | 3 |
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# VM-side executir for VirtualBox with a shared directory.
# This file is part of sqtpm.
sharedd=/media/sf_aux
dir=$1
input=$2
lang=$3
cputime=$4
virtmem=$5
stkmem=$6
cd $sharedd/$dir &>/dev/null
umask 0000
tag=${input/.in/}
if [[ "$lang" == "Python3" ]]; then
bash -c "ulimit -c 0 -t $cputime -v $virtmem -s $stkmem; python3 ./elf <$input 1>$tag.run.out 2>$tag.run.err; echo \$? >$tag.run.st"
else
bash -c "ulimit -c 0 -t $cputime -v $virtmem -s $stkmem; ./elf <$input 1>$tag.run.out 2>$tag.run.err; echo \$? >$tag.run.st"
fi
| true |
0b9e1a72d229a2a26175ec65e29c34602104ad26
|
Shell
|
mikeylienxvi/hacklab
|
/hacktools/vpnmon/auth/auth
|
UTF-8
| 509 | 3.21875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
if [ `expr match "$username" '^.*@itbhu[.]ac[.]in'` -eq 0 ]
then
username="${username}@itbhu.ac.in"
fi
blacklisted=`cat /etc/openvpn/blacklist`
declare -A blacklist
for bluser in $blacklisted
do
blacklist[$bluser]=1
done
if [[ ${blacklist[$username]} ]]
then
exit 1
fi
outp=$(curl --silent --location https://www.google.com/accounts/ClientLogin --data "Email=$username&Passwd=$password" --header 'Content-Type:application/x-www-form-urlencoded')
if [[ "$outp" =~ ^SID= ]]
then
exit 0
else
exit 1
fi
| true |
f3f85fcc937dffd42d38b24c793eb7624430a3fd
|
Shell
|
alittlebrighter/scripts
|
/gcloud/svcaccount/create.sh
|
UTF-8
| 605 | 3.03125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# exported from COACH - https://github.com/alittlebrighter/coach
#-ALIAS- = gcloud.svcaccount.create
# -TAGS- = gcloud,service account
#-SHELL- = bash
#
#-DOCUMENTATION- !DO NOT EDIT THIS LINE!
# Creates a new service account for Google Cloud.
#
#-SCRIPT- !DO NOT EDIT THIS LINE!
NAME=$1
PROJECT=$2
gcloud iam service-accounts create $NAME
gcloud projects add-iam-policy-binding $PROJECT --member "serviceAccount:${NAME}@${PROJECT}.iam.gserviceaccount.com" --role "roles/owner"
echo "$NAME service account created as owner for project $PROJECT. You should go to Google Cloud Console and limit these permissions."
| true |
f1e2c0b3956017a9e1fe80ef877c5ad75a374efe
|
Shell
|
OpenSmalltalk/opensmalltalk-vm
|
/platforms/unix/config/squeak.sh.in
|
UTF-8
| 2,068 | 3.890625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
# Run the VM, setting SQUEAK_PLUGINS if unset to the VM's containing directory
# if unset, and ensuring LD_LIBRARY_PATH includes the VM's containing directory.
BIN=`/usr/bin/dirname "$0"`/@expanded_relative_imgdir@
GDB=
if [ "${SQUEAK_PLUGINS-unset}" = unset ]; then
export SQUEAK_PLUGINS="$BIN"
fi
if [ "$1" = '-gdb' ]; then
GDB=gdb
shift
echo;echo run $@; echo
set --
fi
# At least on linux LD_LIBRARY_PATH's components must be absolute path names
case "$BIN" in
/*) PLUGINS="$BIN";;
*) PLUGINS="`pwd`/$BIN"
esac
if [ $(uname -s) = "OpenBSD" ]; then
LD_LIBRARY_PATH="$PLUGINS:${LD_LIBRARY_PATH}" exec $GDB "$BIN/squeak" "$@"
fi
# On some linuxes there multiple versions of the C library. If the image uses
# libc (e.g. through the FFI) then it must use the same version that the VM uses
# and so it should take precedence over /lib libc. This is done by setting
# LD_LIBRARY_PATH appropriately, based on ldd's idea of the libc use by the VM.
LIBC_SO="`/usr/bin/ldd "$BIN/squeak" | /bin/fgrep /libc. | sed 's/^.*=> \([^ ]*\).*/\1/'`"
PLATFORMLIBDIR=`expr "$LIBC_SO" : '\(.*\)/libc.*'`
if [ "$PLATFORMLIBDIR" = "" ]; then
{
echo "Error. Could not determine platform's libc path for VM. "
echo "Try forcing \$PLATFORMLIBDIR in $0, based on LIBC_SO."
echo "Please report what works to squeak [vm-dev] mail list."
echo " LIBC_SO="$LIBC_SO
cat /etc/*-release* | grep -v // | sed 's/^/ /'
echo -n " UNAME=" ; uname -a
MACHINE=`uname -m`
case "$MACHINE" in
*64*)
echo " System seems to be 64 bit. You may need to (re)install the 32-bit libraries."
;;
esac
exit 1
} 1>&2
fi
# prepending is less flexible but safer because it ensures we find the plugins
# in the same directory as the VM. We must include at least /lib and /usr/lib
# if libraries there-in are to be found. These directories are not implicit.
case $PLATFORMLIBDIR in
/lib|/usr/lib) SVMLLP=/lib:/usr/lib;;
*) SVMLLP="$PLATFORMLIBDIR:/lib:/usr$PLATFORMLIBDIR:/usr/lib"
esac
LD_LIBRARY_PATH="$PLUGINS:$SVMLLP:${LD_LIBRARY_PATH}" exec $GDB "$BIN/squeak" "$@"
| true |
be5e9c18bc2936209c77d589025a2e8fc2bfc88e
|
Shell
|
RoxanneDewing/SAT-Project
|
/FinalProject/minisat_standard_tasks_alt.sh
|
UTF-8
| 678 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/bash
for q in {1..9}
do
echo "Checking grid0$q"
cat grid0$q.txt | python3 sud2sat_alt.py > alt_sat_input0$q.txt
minisat alt_sat_input0$q.txt alt_sat_output0$q.txt
python3 sat2sud.py alt_sat_output0$q.txt > alt_solution0$q.txt
python3 check_sudoku.py < alt_solution0$q.txt
echo "-------------------------------------------------"
done
for q in {10..50}
do
echo "Checking grid$q"
cat grid$q.txt | python3 sud2sat_alt.py > alt_sat_input$q.txt
minisat alt_sat_input$q.txt alt_sat_output$q.txt
python3 sat2sud.py alt_sat_output$q.txt > alt_solution$q.txt
python3 check_sudoku.py < alt_solution$q.txt
echo "-------------------------------------------------"
done
| true |
e0065c4d35f57025a774764f3504cb720c46114e
|
Shell
|
teja624/home
|
/.zsh/modules/aws/lib/sh/api/ssm/maintenance_window_delete.sh
|
UTF-8
| 161 | 2.65625 | 3 |
[
"Apache-2.0"
] |
permissive
|
aws_ssm_maintenance_window_delete() {
local window_id="$1"
shift 1
cond_log_and_run aws ssm delete-maintenance-window --window-id $window_id "$@"
}
| true |
e4367331fa24ac6602612eb17f17d243c7b5bd46
|
Shell
|
jamie-ryan/Scripts
|
/ssh-test.sh~
|
UTF-8
| 327 | 2.5625 | 3 |
[] |
no_license
|
#!/bin/bash
#ps aux | grep jsr2 shows all processes
#can then get access to files on local machine using ssh://jsr2@localhost:2222
if [[ $(ps aux | grep ssh | grep gate ) ]]
then
# ssh -f jsr2@gate.mssl.ucl.ac.uk -L 8080:msslex:22 -N
ssh -YC jsr2@gate.mssl.ucl.ac.uk -L 8080:msslex:22 -N
fi
#ssh -p 8080 -YC jsr2@localhost
ssh -p 8080 jsr2@localhost
| true |
a2668d077d63f48e0febd871dd22b342adb39a04
|
Shell
|
Enterprise-System-Management-Solutions/Data-pipeline-scripts
|
/mediation01/unix/script/process/bin/msc_move/nokia_msc_move_msc_new_1.sh
|
UTF-8
| 678 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin'
#Author : Tareq
#Date : 07-08-2020
#nokia_msc file transfer 253~239 server and move dump directory
lock=/data02/script/process/bin/nokia_msc_new_1_transfer_lock export lock
if [ -f $lock ] ; then
exit 2
else
touch $lock
cd /data01/msc/nokia_csv_msc_cdr/msc_new_1
xdir=/data01/msc/nokia_csv_msc_cdr_dump
zdir=/data02/sftp_msc/nokia
FILES=`ls -ltr *.csv | awk -F" " {'print $9'}`
for i in $FILES
do
#f=${i}
#sshpass -p "dwhadmin" scp "$i" dwhadmin@192.168.61.239:$zdir
sshpass -p "dwhadmin" scp "$i" dwhadmin@192.168.61.204:$zdir
mv $i $xdir
done
rm -f $lock
fi
| true |
faf38f93fb5e2ebb5de292ec49fcfc4dc95540a7
|
Shell
|
laszewski/hugo
|
/geticon
|
UTF-8
| 272 | 3.09375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -ex
icons="twitter github"
dest=fontawesome
url=https://raw.githubusercontent.com/FortAwesome/Font-Awesome/master/advanced-options/raw-svg/brands
mkdir -p "${dest}"
for icon in $icons; do
icon="${icon}.svg"
wget -O "${dest}/${icon}" "${url}/${icon}"
done
| true |
3bf59dbc97cb4d1c626a5ce3a8401c3ff4fb7125
|
Shell
|
JrMime/fr-MW-autocat-script
|
/catscripts/Government/Countries/Malaysia/Malaysia.sh
|
UTF-8
| 228 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
egrep -i 'Malaysia\b' newpages.txt >> Malaysia.txt
MALAYSIA=`stat --print=%s Malaysia.txt`
if [ $MALAYSIA -ne 0 ];
then
export CATFILE="Malaysia.txt"
export CATNAME="Malaysia"
$CATEGORIZE
fi
rm Malaysia.txt
| true |
40b70de422a201b98e69643c5497155a5edee67e
|
Shell
|
nguyensu/pces
|
/out/artifacts/pces_jar/submitjob.sh
|
UTF-8
| 715 | 2.59375 | 3 |
[] |
no_license
|
#!/bin/bash
EXPNAME="ENSrcjs"
UF="20"
for seed in 1 #{1..30}
do
for nsize in 4
do
for npc in 2
do
for bc in 8
do
for tw in 50
do
for pi in RCJS_1_3_50_40.txt
do
for nr in 1
do
for ntg in 1
do
for sip in 5
do
for mt in 36000
do
for ps in 2000
do
FILE=EXP${EXPNAME}NS${nsize}NPC${npc}BC${bc}TW${tw}PI${pi}NR${nr}NTG${ntg}SIP${sip}MT${mt}PS${ps}SEED${seed}.out
if [ ! -f "$FILE" ]; then
echo Experiment ${EXPNAME} NS${nsize} NPC${npc} BC${bc} TW${tw} PI${pi} NR${nr} NTG${ntg} SIP${sip} MT${mt} PS${ps} SEED${seed}
qsub -v name=$EXPNAME,ns=$nsize,npc=$npc,bc=$bc,tw=$tw,pi=$pi,nr=$nr,ntg=$ntg,sip=$sip,mt=$mt,ps=$ps,seed=$seed,uf=$UF exp_run_gpfjss.sh
fi
done
done
done
done
done
done
done
done
done
done
done
| true |
5dcb05457a84d020e10134bef944201b9b077324
|
Shell
|
waygongNote/scripts
|
/filename_date.sh
|
UTF-8
| 406 | 3.359375 | 3 |
[] |
no_license
|
# ex. myDate = 1231_2359_2014
myDate=$(echo $(date +%m%d_%H%M_%Y))
if [ $# -ge 1 ] && [ $(echo $0 | tail -c 17) == "filename_date.sh" ]; then
cp -a $1 $(echo $1 | cut -d '.' -f 1)_$myDate.$(echo $1 | cut -d '.' -f 2)
echo $(echo $1 | cut -d '.' -f 1)_$myDate.$(echo $1 | cut -d '.' -f 2)
fi
# (cut can be replaced by awk): ex. test=$(echo "123.456.789"|awk 'BEGIN {FS="."};{print $1}') => test = 123
| true |
ae5ed0e2a70114d5d0841cb46e8cd6a27e9fb50c
|
Shell
|
JiriSko/amixer-webui
|
/production/deb-package/DEBIAN/postinst
|
UTF-8
| 185 | 2.71875 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/bin/sh
path=/usr/share/amixer-webui
if [ ! -e "/etc/amixer-webui.conf" ]
then
cp $path/amixer-webui.conf /etc/amixer-webui.conf
fi
cp $path/amixer-webui /etc/init.d/amixer-webui
| true |
c6ea5a0a1696b89b31b59aff8203d4426f83d992
|
Shell
|
weaver-viii/dotfiles-1
|
/bin/scratchpad
|
UTF-8
| 119 | 2.546875 | 3 |
[] |
no_license
|
#!/bin/bash
EXE=$1
INST=$2
i3-window-exists -i $INST || i3-msg "exec $EXE"
i3-msg "[instance=$INST] scratchpad show"
| true |
0f601b87ec5bbc8b02b487bac679c62c0eeb5e57
|
Shell
|
nolanw/acquire
|
/sparkle/generateDSASignature.sh
|
UTF-8
| 209 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
if [ ! -n "$1" ]
then
echo "Usage: `basename $0` path_to_zipped_release"
exit 1
fi
openssl dgst -sha1 -binary < $1 \
| openssl dgst -dss1 -sign dsa_priv.pem \
| openssl enc -base64
| true |
ee7e8727d6e5fdc554a5ee4dd8624389cb57c335
|
Shell
|
erfg/dotfiles
|
/install-deps.sh
|
UTF-8
| 982 | 2.5625 | 3 |
[] |
no_license
|
# https://github.com/jamiew/git-friendly
# the `push` command which copies the github compare URL to my clipboard is heaven
sudo bash < <( curl https://raw.github.com/jamiew/git-friendly/master/install.sh)
# https://rvm.io
# rvm for the rubiess
curl -L https://get.rvm.io | bash -s stable --ruby
# https://github.com/isaacs/nave
# needs npm, obviously.
# TODO: I think i'd rather curl down the nave.sh, symlink it into /bin and use that for initial node install.
npm install -g nave
# homebrew!
# you need the code CLI tools YOU FOOL.
ruby <(curl -fsSkL raw.github.com/mxcl/homebrew/go)
mkdir -p code
# https://github.com/rupa/z
# z, oh how i love you
cd code
git clone https://github.com/rupa/z.git
chmod +x code/z/z.sh
# z binary is already referenced from .bash_profile
# https://github.com/jeroenbegyn/VLCControl
# VLC Controll Script
cd code
git clone git://github.com/jeroenbegyn/VLCControl.git
# for the c alias (syntax highlighted cat)
sudo easy_install Pygments
| true |
b7917b92a80be1a101231b2df5d436d0fc1f5266
|
Shell
|
RebornQ/LinuxScripts
|
/centos-init.sh
|
UTF-8
| 3,760 | 4.09375 | 4 |
[] |
no_license
|
#!/bin/bash
# CentOS Linux release (version?) 系统 初始化脚本
# Powered by Reborn
# 参考 https://github.com/bboysoulcn/centos
add_user() {
echo "starting add user ..."
read -p "Username:" username
echo "Password:"
read -s password
sudo adduser $username
if [ "$?" = "0" ]; then
echo $password | sudo passwd --stdin $username
read -p "set this user as sudoer? (y)" setroot
if [[ -n $setroot || $setroot == "y" || $setroot == "Y" ]]; then
sudo tee /etc/sudoers.d/$username <<<$username' ALL=(ALL) ALL'
sudo chmod 440 /etc/sudoers.d/$username
echo "root user created !!!"
else
echo "user created !!!"
fi
else
echo "cannot create user" 1>&2
exit 1
fi
}
del_user() {
echo "starting del user ..."
cat /etc/passwd | grep -v nologin | grep -v halt | grep -v shutdown | awk -F":" '{ print $1"|"$3"|"$4 }' | more
read -p "Username:" username
read -p "Confirm: Do you really want to del this user? (y)" del
if [[ -n $del || $del == "y" || $del == "Y" ]]; then
sudo userdel -r $username
if [ "$?" = "0" ]; then
echo "user $username has been deleted !!!"
else
echo "cannot delete user" 1>&2
exit 1
fi
fi
}
install_software() {
echo "starting install software ..."
sudo yum install epel-release -y
sudo yum update -y
sudo yum install git wget screen nmap vim htop iftop iotop zip telnet nano -y
echo "software installed !!!"
}
install_oh_my_zsh(){
echo "starting install oh_my_zsh ..."
sudo yum -y install zsh
chsh -s /bin/zsh
sh -c "$(wget -O- https://cdn.jsdelivr.net/gh/ohmyzsh/ohmyzsh/tools/install.sh)"
echo "oh_my_zsh installed !!!"
}
disable_root_login() {
echo "starting disable root login ..."
sudo cp /etc/ssh/sshd_config /etc/ssh/sshd_config_bak
sudo sed -i "s/.*PermitRootLogin.*/PermitRootLogin no/g" /etc/ssh/sshd_config
sudo systemctl restart sshd
echo "your sshd_config.PermitRootLogin is set to no"
}
enable_root_login() {
echo "starting enable root login ..."
sudo cp /etc/ssh/sshd_config /etc/ssh/sshd_config_bak
sudo sed -i "s/.*PermitRootLogin.*/PermitRootLogin yes/g" /etc/ssh/sshd_config
sudo systemctl restart sshd
echo "your sshd_config.PermitRootLogin is set to yes"
}
print_systeminfo() {
echo "******************************************************"
echo "Powered by Reborn"
echo "Email: ren.xiaoyao@gmail.com"
echo "Hostname:" $(hostname)
# virtualization
cat /proc/cpuinfo | grep vmx >>/dev/null
if [ $? == 0 ]; then
echo "Supporting virtualization"
else
echo "Virtualization is not supported"
fi
echo "Cpu:" $(cat /proc/cpuinfo | grep "model name" | awk '{ print $4" "$5""$6" "$7 ; exit }')
echo "Memory:" $(free -m | grep Mem | awk '{ print $2 }') "M"
echo "Swap: " $(free -m | grep Swap | awk '{ print $2 }') "M"
echo "Kernel version: " $(cat /etc/redhat-release)
echo "******************************************************"
}
help() {
echo -e "1) add_user\t\t4) enable_root_login\t\t7) exit"
echo -e "2) del_user\t\t5) install_software\t\t8) help:"
echo -e "3) disable_root_login 6) install_oh_my_zsh"
}
main() {
print_systeminfo
centos_funcs="add_user del_user
disable_root_login enable_root_login
install_software install_oh_my_zsh
exit help"
select centos_func in $centos_funcs:; do
case $REPLY in
1)
add_user
help
;;
2)
del_user
help
;;
3)
disable_root_login
help
;;
4)
enable_root_login
help
;;
5)
install_software
help
;;
6)
install_oh_my_zsh
help
;;
7)
exit
;;
8)
help
;;
*)
echo "please select a true num"
;;
esac
done
}
main
| true |
18f282715b290fe658fbefae25e4feaa97249b2b
|
Shell
|
cYbercOsmOnauT/phpBB_Vagrant
|
/scripts/import-db.sh
|
UTF-8
| 280 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
set -x
dump=` ls -t /mnt/data/live/home/backup/forum/*sql.bz2 | head -n 1`
[ -f $dump ] || exit -1
mysql -uroot -pmysql -e 'DROP DATABASE forum' mysql
mysql -uroot -pmysql -e 'CREATE DATABASE forum' mysql
pv $dump | bunzip2 | mysql -uforum -pforum forum
| true |
c8eb79453daa6740377c606bface335f75a45789
|
Shell
|
zalf-rpm/build-pipeline
|
/buildscripts/pack-monica-artifact.sh
|
UTF-8
| 1,168 | 2.984375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
VERSION_NUMBER=$1
MONICA_NAME="monica_$VERSION_NUMBER"
ARTIFACT_ROOT="artifact"
ARTIFACT_FOLDER="$ARTIFACT_ROOT/$MONICA_NAME"
SQLITE_FOLDER="sqlite-db"
ARTIFACT_SQLITE_FOLDER="$ARTIFACT_FOLDER/$SQLITE_FOLDER"
DEPLOY_FOLDER="deployartefact"
rm -rf $DEPLOY_FOLDER
rm -rf $ARTIFACT_ROOT
mkdir -p $ARTIFACT_FOLDER
mkdir -p $DEPLOY_FOLDER
mkdir -p $ARTIFACT_SQLITE_FOLDER
cd monica/_cmake_linux
cp -af monica ../../$ARTIFACT_FOLDER
cp -af monica-run ../../$ARTIFACT_FOLDER
cp -af monica-zmq-control-send ../../$ARTIFACT_FOLDER
cp -af monica-zmq-run ../../$ARTIFACT_FOLDER
#cp -af monica_python.so ../../$ARTIFACT_FOLDER
cp -af monica-zmq-control ../../$ARTIFACT_FOLDER
cp -af monica-zmq-proxy ../../$ARTIFACT_FOLDER
cp -af monica-zmq-server ../../$ARTIFACT_FOLDER
cd ../$SQLITE_FOLDER
cp -af ka5-soil-data.sqlite ../../$ARTIFACT_SQLITE_FOLDER
cp -af carbiocial.sqlite ../../$ARTIFACT_SQLITE_FOLDER
cp -af monica.sqlite ../../$ARTIFACT_SQLITE_FOLDER
cd ..
cp -af db-connections-install.ini $ARTIFACT_FOLDER/db-connections.ini
cd ../artifact
tar -cvpzf ../deployartefact/$MONICA_NAME.tar.gz $MONICA_NAME --overwrite
cd ..
| true |
4c947377d3b3dfa7522bb0479691444ea5584bae
|
Shell
|
Amoenus/docker-ghostazurewebapplinux
|
/init-letsencrypt.sh
|
UTF-8
| 1,340 | 3.703125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
# We do not need just to renew the Let's Encrypt certificate,
# but also to upload to Azure WebApp and bind it. That's why we
# add a renew hook that calls update-azurewebapp-tls.bash
if ! grep -m 1 -q -- "--renew-hook" /etc/cron.d/certbot; then
echo "init-letsencrypt.sh: Modifying certbot cron job for Azure WebApps persistent storage..."
sed -i -e 's#certbot -q renew#certbot --config-dir /home/letsencrypt --work-dir /home/letsencrypt/workdir --logs-dir /home/LogFiles/letsencrypt --renew-hook /usr/local/bin/update-azurewebapp-tls.bash -q renew#g' /etc/cron.d/certbot
fi
# If the certificate doesn't exists let's generate a new one
# We will set Let's Encrypt config dir to persistent storage.
if [ ! -f /home/letsencrypt/live/$WEBAPP_CUSTOM_HOSTNAME/fullchain.pem ]; then
certbot certonly --config-dir /home/letsencrypt --work-dir /home/letsencrypt/workdir \
--logs-dir /home/LogFiles/letsencrypt --webroot --email $LETSENCRYPT_EMAIL --agree-tos \
-w /home/site/wwwroot -d $WEBAPP_CUSTOM_HOSTNAME -d www.$WEBAPP_CUSTOM_HOSTNAME
if [ $? -eq 0 ]; then
echo "init-letsencrypt.sh: Uploading TLS certificate to Azure WebApp..."
/usr/local/bin/update-azurewebapp-tls.bash
else
echo "init-letsencrypt.sh: There was a problem with TLS certificate generation"
fi
fi
| true |
65740c7afed7b0bd689d64d788c2efff5d893b84
|
Shell
|
asvitkine/phxd
|
/server/run/exec/find
|
WINDOWS-1252
| 6,965 | 3.84375 | 4 |
[] |
no_license
|
#!/bin/sh
#################################################################
# Title: find
# Date: November 18, 2002
# Author: Devin Teske
# version: 1.3
#
# Dependencies:
# find sed grep tr awk
#
# Details:
# This script conforms to BSD standards. Although, if you
# happen to have a GNU compatible find library installed you
# have a few more options available. For example, GNU options
# allow for case-insensitive searching. If you are running a
# BSD system (OS X and Darwin are based on FreeBSD), and you
# wish to enable case-insensitive searching, you will need to
# install a GNU compatible find utility. Since i know a lot
# of OS X clients will be using this script, I have packaged
# GNU Find for OS X. It can be found at the address given
# below. This script will automatically recognize the GNU
# flags when available and use them to its advantage.
#
# If you have the utility `getudbit' installed then the
# script will automatically use it (if it's in the execution
# path) to determine if the person running the script has the
# privelege to view the contents of drop boxes. If the person
# does, then it will allow finding in drop boxes (regardless
# of the conf setting). This will only work if running under a
# server that defines the $ACCOUNT variable for the script
# (shxd 0.1.58 and later does this, placing the name of the
# account executing the script in this variable). The utility
# `getudbit' can be found in the `utils' folder of the shxd
# source as well as on the website (http://synhxd.sf.net).
#
# GNU find 4.1 for Mac OS X [Server] (Darwin):
# http://synhxd.sourceforge.net/darwin_gnufind.tgz
#################################################################
# script prefers bash
[ -f /bin/bash ] && SHELL=/bin/bash
# test for bash on FreeBSD/OpenBSD
[ -f /usr/local/bin/bash ] && SHELL=/usr/local/bin/bash
####################### Configure Options
# all the main options are now in etc/find.conf
# which conf file to read:
conf="./etc/find.conf";
####################### Begin script
#set usage
usage="Usage: find [-qv] <file name>";
# read parameters
num=$#;
if [ $num -eq 0 ]; then
# user didn't supplied any arguments
echo -n "
$usage";
exit 0;
fi
# initialize local variables
findcmd=""; findres=""; rc=0; ffnd=""; ipath="";
bsd_find=0; findkey=""; printf=""; path="-path";
version="1.3"; quiet=0; pretty=" ";
# check for -v (version)
if [ "$1" = "-v" -o "$1" = "--version" ]; then
echo -n "
find v$version";
exit 0;
fi
# check for -q (quite mode, no headers)
if [ "$1" = "-q" -o "$1" = "--quiet" ]; then
quiet=1;
shift 1;
num=$#;
if [ $num -eq 0 ]; then
echo -n "
$usage";
exit 0;
fi
fi
# read arguments (harmful elements are removed)
args=`echo "$@" \
| tr -d '\`' \
| tr -d '\|' \
| tr -d '\"' \
| tr -d '\\\\' \
| tr -d '\$' \
| tr -d ';'`;
# read in the configuration file
source $conf;
if [ $quiet -eq 0 ]; then
echo -n "
$header"
else
pretty="";
fi
# find out what we are dealing with
findkey=`$fbin --help 2> /dev/null`;
printf=`echo "$findkey" | grep "\\-printf"`;
if [ "$printf" = "" ]; then
bsd_find=1;
fi
ipath=`echo "$findkey" | grep "\\-ipath"`;
# OpenBSD 3.0 understands all GNU flags except -ipath
if [ $bsd_find -eq 1 -a "$ipath" != "" ]; then
path="-ipath";
fi
# to escape a file path for sed
function escapepath () {
newpath=`echo "$1" \
| sed s/"\\/"/"\\\\\\\\\/"/g`;
eval "$2=\"$newpath\"";
}
# check if the account file reader is installed
# if so we can check if they have priv to read drop boxes
w="/usr/local/bin/getudbit";
if [ "$ACCOUNT" != "" -a -f $w ]; then
rdboxen=`eval "$w ./accounts/$ACCOUNT/UserData view_drop_boxes"`;
read_dropboxes=$rdboxen;
fi
# construct the find statement
findcmd="findres=\`$fbin $searchdir -follow";
if [ $bsd_find -eq 0 ]; then
findcmd="$findcmd \
-path '*/\\.*' -prune -o \
-path '*/Network Trash Folder' -prune -o \
-path '*/TheVolumeSettingsFolder' -prune -o \
-path '*/TheFindByContentFolder' -prune -o \
-path '*/Icon
' -prune -o";
if [ $read_dropboxes -eq 0 ]; then
findcmd="$findcmd \
-ipath '*/*drop box*' -prune -o";
fi
findcmd="$findcmd \
-iname \"*$args*\" \
-printf \"\\r$pretty/%P\"";
else
escapepath "$searchdir" "rd";
findcmd="$findcmd \
-name \"*$args*\"";
if [ $restrict -eq 0 ]; then
findcmd="$findcmd \
| sed s/\"$rd\"/\"
$pretty\"/";
else
findcmd="$findcmd \
| sed s/\"$rd\"/\"$pretty\"/";
fi
findcmd="$findcmd \
| grep -v \"^$pretty\$\" \
| grep -v \".*/\\..*\" \
| grep -v \".*/Network Trash Folder\" \
| grep -v \".*/TheVolumeSettingsFolder\" \
| grep -v \".*/TheFindByContentFolder\" \
| grep -v \".*/Icon
\"";
if [ $read_dropboxes -eq 0 ]; then
findcmd="$findcmd \
| grep -iv \".*/.*drop box.*\"";
fi
fi
# close the find command and run it (output std_error to /dev/null)
findcmd="$findcmd 2> /dev/null\`";
eval $findcmd;
# restrict the output
if [ $restrict -eq 1 ]; then
if [ $bsd_find -eq 0 ]; then
# the find result ends with a new line character
# so we want to grab one more file/line to display
(( maxreturn = maxreturn + 1 ))
fi
# convert carriage returns to newlines and then back
# because grep doesn't count carriage returns as a
# line delimiter, only new line characters
findres=`echo "$findres" \
| tr "\r" "\n"`;
# count how many lines there were so we can say if
# there were more matches than were displayed
rc=`echo "$findres" | grep -c .`;
findres=`echo "$findres" \
| head -n $maxreturn \
| tr "\n" "\r"`;
fi
# remove blank lines and assign to a variable that we can
# use to check to see if we had any real results
ffnd=`echo $findres | tr -d "\r"`;
# add a carriage return before the footer because if we
# have used tr or head it won't have the carriage return
# at the end of the find result
if [ $rc -ge $maxreturn -o $restrict -eq 0 -o "$ffnd" = "" ]; then
footer="
$footer";
fi
if [ "$ffnd" = "" ]; then
# the script didn't find anything
echo -n "$notfound";
else
# the use of head with the bsd method will remove the
# leading carriage return, so we must re-add it
if [ $bsd_find -eq 1 -a $restrict -eq 1 ]; then
findres="
$findres";
fi
# cut off the trailing new line after quiet output
if [ $quiet -eq 1 ]; then
if ! [ $restrict -eq 1 -a $rc -ge $maxreturn ]; then
flen=`echo -n "$findres" | wc -c | tr -d ' '`;
flen=`expr $flen - 1`;
fex="findres=\`echo|awk '{print substr(\"$findres\",1,$flen)}'\`";
eval $fex;
fi
fi
echo -n "$findres";
if [ $restrict -eq 1 -a $rc -ge $maxreturn ]; then
echo -n "$toomany";
fi
fi
if [ $quiet -eq 0 ]; then
echo -n "$footer";
fi
| true |
2bbbb2e3c979710224d617951de3f8e523c087a7
|
Shell
|
gokhankici/dotvim
|
/dotfiles/install.sh
|
UTF-8
| 1,536 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/zsh
set -e
typeset -A files
THIS_DIR=$HOME/.vim/dotfiles
files=( bashrc $HOME/.bashrc \
clang-format $HOME/.clang-format \
gitconfig $HOME/.gitconfig \
gvimrc $HOME/.gvimrc \
screenrc $HOME/.screenrc \
tmux.conf $HOME/.tmux.conf \
vimrc $HOME/.vimrc \
Xmodmap $HOME/.Xmodmap \
Xresources $HOME/.Xresources \
zshrc $HOME/.zshrc \
fonts_local.conf $HOME/.config/fontconfig/fonts.conf \
compton.conf $HOME/.config/compton.conf \
mimeapps.list $HOME/.config/mimeapps.list
)
#i3_config
#i3status.conf
echo "creating symbolic links for the dotfiles..."
file_date() {
stat --format=%Y $1
}
for f in "${(@k)files}"; do
local SOURCE=$THIS_DIR/$f
local TARGET=$files[$f]
local TARGET_DIR=$(dirname $TARGET)
if [[ ! -d $TARGET_DIR ]]; then
mkdir -p $TARGET_DIR
else
if [[ -f $TARGET && ! -h $TARGET ]]; then
cp $TARGET $TARGET.old
#if [[ ! -a $SOURCE ||
#$(file_date $TARGET) -gt $(file_date $SOURCE) ]]; then
#echo "Target $TARGET is newer, copying it to $SOURCE"
#cp $TARGET $SOURCE
#fi
echo "backed up $TARGET to $TARGET.old"
rm -f $TARGET
fi
fi
if [[ ! -f $TARGET ]]; then
ln -s $SOURCE $TARGET
else
echo "skipping $f"
fi
done
echo "DONE!"
| true |
417c3ff08821d8d0d5eefd895dce7f6a8e4bd3df
|
Shell
|
christaotaoz/shkd-work
|
/work/panabit_plugin/pa_plugin/cfy/Route/src/plan_config
|
GB18030
| 2,191 | 2.71875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#This script is created by ssparser automatically. The parser first created by MaoShouyan
printf "Content-type: text/html;charset=gb2312
Cache-Control: no-cache
"
echo -n "";
. ../common/common.sh
myself="/cgi-bin/Route/`basename $0`"
IPE_PPPOEPLAN=${PGBIN}/ipe_pppoeplan
MOD_TAB_LIST="#${myself} ƻб#/cgi-bin/Route/plan_list"
echo -n "
<script type=\"text/javascript\" src=\"/img/common.js\"></script>
<script language=\"javascript\">
function onCancel()
{
window.location.href = \"${returl}\";
}
</script>
";
if [ "${REQUEST_METHOD}" = "POST" ]; then
operator_check "${myself}"
errmsg=`${IPE_PPPOEPLAN} -e ${CGI_enable}`
if [ "$?" != "0" ]; then
afm_dialog_msg "ʧ:${errmsg}"
fi
elif [ "${CGI_act}" = "reboot" ]; then
err="`${IPE_PPPOEPLAN} -k`"
CGI_enable=`${IPE_PPPOEPLAN} -e`
else
CGI_enable=`${IPE_PPPOEPLAN} -e`
fi
echo -n "
<body>
"; cgi_show_title "Ӧ·->->ʱ"
echo -n "
<br>
"; cgi_print_mod_header "" 700
echo -n "
<br>
<form method=post onsubmit=\"return beforeCommit(this)\" action=\"${myself}\">
<table width=700 border=0 cellspacing=0 cellpadding=3 bgcolor=\"#ffffff\">
<tr id=row1>
<td width=120 align=left><b>ƻ</b></td>
<td width=* align=left>
<select name=enable value=${CGI_enable} style=\"width:100\">
";
if [ ${CGI_enable} -eq 0 ]; then
echo "<option value=1></option>"
echo "<option value=0 selected></option>"
else
echo "<option value=1 selected></option>"
echo "<option value=0></option>"
fi
echo -n "
</select>
</td>
</tr>
</table>
<table style=\"width:700; border-bottom:1px #787882 solid; color:#0000ff\">
<tr><td align=right> </td></tr>
</table>
<table style=\"width:700\">
<tr>
<td align=right>
<input type=submit style=\"width:80\" value=\"ύ\" />
</td>
</tr>
</table>
</form>
</body>
</html>
";
| true |
6bffcdf3f59474451cdf6809c3fe74046fee3f7a
|
Shell
|
NamraOnPC/ShellScripts
|
/scripts/oldfiles
|
UTF-8
| 150 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/bash
for filename in $(ls $1)
do
echo "$filename" | grep "\.old$" > /dev/null
if [$?!='0']
then mv "$1/$filename" "$1/$filename.old"
fi
done
| true |
717e3c1ec9426e7fe8ef68e6c3a5db1224efb0c9
|
Shell
|
richmonkey/git-cloud
|
/build.sh
|
UTF-8
| 925 | 3.3125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
VERSION="0.2.0"
target=./dist/gitCloud-v$VERSION
if [ -d "$target" ]
then
while true; do
read -p "Do you wish to rm $target directory? (y/N)" yn
case $yn in
[Yy]* ) rm -rf $target; break;;
[Nn]* ) exit;;
* ) echo "Please answer yes or no.";;
esac
done
fi
mkdir $target
yarn run build
#python3 -m venv $target/python3
#$target/python3/bin/pip install -r requirements.txt
#cp dist/index.html $target
#cp dist/index.bundle.js $target
#cp run.sh $target
#cp config.py $target
#cp sync.py $target
#cp main.py $target
#cp README.md $target
#cd dist && zip gitCloud-v$VERSION.zip -r gitCloud-v$VERSION
pyinstaller --osx-bundle-identifier com.beetle.gitcloud --add-data dist/index.html:. --add-data dist/index.bundle.js:. --name "Git Cloud" --exclude-module PyQt5 -w main.py
mv "./dist/Git Cloud.app" $target && cd dist && zip gitCloud-v$VERSION.zip -r gitCloud-v$VERSION
| true |
158f1499d67725427cb97d22633d76c3a3670e86
|
Shell
|
jarvisschultz/system_configurations
|
/bashrc.bash
|
UTF-8
| 6,837 | 3.46875 | 3 |
[] |
no_license
|
# ~/.bashrc: executed by bash(1) for non-login shells.
# see /usr/share/doc/bash/examples/startup-files (in the package bash-doc)
# for examples
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases
alias ll='ls -alFh'
alias la='ls -A'
alias l='ls -CF'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
# make nano automatically open with sudo when necessary
function nano() {
nano=`which nano`;
if ([ -e "$1" ] && ! [ -w "$1" ]) || ( ! [ -e "$1" ] && ! [ -w "`dirname $1`" ]);
then
read -n 1 -p "$1 is not editable by you. sudo [y/n]? " y
[ "$y" == "y" ] || [ "$y" == "Y" ] && echo -e "\n" && sudo $nano $@
else
$nano $@
fi
}
# add 256 color support for the terminal:
# export TERM=xterm-256color
## note that this causes problems with emacs when it tries to autoload
## color themes.
## python config
export PYTHONPATH=$HOME/.local/lib/python2.7/site-packages:/usr/local/lib:/usr/lib/python2.7/config:/usr/local/lib/python2.7/site-packages
export PATH=$PATH:$HOME/.local/bin
export DEFAULT_PYTHON=$PYTHONPATH
## ROS ENVIRONMENT SETUP:
# let's set ROS_HOSTNAME by default
export ROS_HOSTNAME=$(hostname)".local"
export ROSCONSOLE_FORMAT='[${time}] [${node}] [${severity}]: ${message}'
rcon > /dev/null
rsource > /dev/null
# fix rviz flickering
unset LIBGL_ALWAYS_INDIRECT
# Set EMACS as default editor:
export EDITOR='emacsclient -t'
# add home bin always:
PATH=$PATH:$HOME/bin
# Add MATLAB_JAVA Environment Variable:
export MATLAB_JAVA=/usr/lib/jvm/java-6-openjdk-amd64/jre/
# add microchip compilers to the path
bdir=/opt/microchip/xc32
if [ -d $bdir ]
then
# echo "MICROCHIP COMPILER DETECTED"
ver=$(ls $bdir |sort -r |head -n 1)
# echo "MAX VERSION = ${ver}"
export PATH=$PATH:/opt/microchip/xc32/${ver}/bin
fi
# add syntax color and piping to less
export LESSOPEN="| /usr/share/source-highlight/src-hilite-lesspipe.sh %s"
# raw control chars, turn off screen resetting, auto exit if less than one screen
export LESS=' -RXF '
# color support for grep and less
export CLICOLOR_FORCE="true"
alias lsc='ls --color=always'
alias llc='ls -lah --color=always'
alias grepc='grep --color=always'
# disable XON/XOFF flow control for the connection to stty
stty -ixon
# set bash completion to be case insensitive
bind 'set completion-ignore-case on' # NOTE, could also put this in ~/.inputrc without the bind
# Add settings for emacs client:
export ALTERNATE_EDITOR=""
# set default gpg key
# set agent info
# GPG_TTY=$(tty)
# export GPG_TTY
# . "${HOME}/.gpg-agent-info"
# export GPG_AGENT_INFO=$GPG_AGENT_INFO
# export SSH_AUTH_SOCK=$SSH_AUTH_SOCK
# add lib64 for phantom omni
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/lib64
# add self-built texlive 2013 to PATH
export PATH=$PATH:/usr/local/texlive/2013/bin/x86_64-linux
# enable bash directory history
acdpath=$(command -v acd_func.sh)
if [ -f "$acdpath" ]; then
source $acdpath
# add a keybinding:
bind -x "\"\C-\M-i\":cd_func -- ;"
fi
# add byobu colorprompt
[ -r ${HOME}/.byobu/prompt ] && . ${HOME}/.byobu/prompt #byobu-prompt#
# setup Ruby Version Manager:
# if [ -f ~/.rvm/scripts/rvm ]; then
# export PATH="$PATH:$HOME/.rvm/bin" # Add RVM to PATH for scripting
# source ~/.rvm/scripts/rvm
# fi
# add cargo for rust packages
if [ -d "${HOME}/.cargo/bin" ]
then
export PATH=$PATH:${HOME}/.cargo/bin
fi
# clear any duplicates in PATH
if [ -n "$PATH" ]; then
old_PATH=$PATH:; PATH=
while [ -n "$old_PATH" ]; do
x=${old_PATH%%:*} # the first remaining entry
case $PATH: in
*:"$x":*) ;; # already there
*) PATH=$PATH:$x;; # not there yet
esac
old_PATH=${old_PATH#*:}
done
PATH=${PATH#:}
unset old_PATH x
fi
# enable ccache:
if [ -d "/usr/lib/ccache" ]
then
export PATH="/usr/lib/ccache:$PATH"
fi
# disable ROS_LANG
export ROS_LANG_DISABLE=genlisp:geneus:gennodejs
### Added by the Heroku Toolbelt
if [ -d /usr/local/heroku/bin ]
then
export PATH="/usr/local/heroku/bin:$PATH"
fi
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
| true |
8929ab7a63fc1f442b8c782f41ab4afdf0cd5190
|
Shell
|
troyxmccall/dotfiles
|
/bin/yesterday
|
UTF-8
| 1,240 | 3.875 | 4 |
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/usr/bin/env sh
# based on https://gist.github.com/cgutierrez/9c6149193b3091c7864f362b93216ac8
PROJECTS_DIR=$HOME/projects
LOG_FILE=$PROJECTS_DIR/commits.txt
AUTHOR=$(git config user.name)
# store the current dir
CUR_DIR=$(pwd)
#hey user
echo "Building your commit log from yesterday..."
cd $PROJECTS_DIR
rm -f $LOG_FILE
touch $LOG_FILE
# Find all git repositories and update them to their latest revision on current branch
for i in $(find . -name ".git" | cut -c 3-); do
#ignore submodules, vendor, composer, and bins
if [[ "$i" != *libraries* && "$i" != *deployment* && "$i" != *vendor* && "$i" != *bin* ]]
then
PROJECT_DIR=$(dirname $i)
echo "checking $PROJECT_DIR...";
#We have to go to the .git parent directory to call the pull command
cd $PROJECT_DIR;
CMD="git --no-pager log --reverse --author='$AUTHOR' --date=short --since='yesterday' --pretty=format:'%h%x09%ad%x09%s'"
LOG_OUTPUT=$(eval $CMD)
if [ ! -z "$LOG_OUTPUT" ]; then
echo $(basename `pwd`) >> $LOG_FILE
echo "$LOG_OUTPUT" >> $LOG_FILE
echo "\n" >> $LOG_FILE
fi
#lets get back to the PROJECTS_DIR and get more gits
cd $PROJECTS_DIR
fi
done
open $LOG_FILE
cd $CUR_DIR
echo "Complete!"
| true |
b6c9693fb85de6087224ea83e69ffeab58e08199
|
Shell
|
davetcoleman/unix_settings
|
/.my.bashrc
|
UTF-8
| 18,944 | 3.625 | 4 |
[] |
no_license
|
#!/bin/bash
# ~/unix_settings/.my.bashrc Customized bash script for multiple computers
# BASHRC_ENV tells .my.bashrc which environment we are in
#export BASHRC_ENV=mac
# Source users personal .my.bashrc if it exists.
#if [[ -f ~/unix_settings/.my.bashrc ]]; then
# . ~/unix_settings/.my.bashrc
#fi
# If not running interactively, don't do anything
[ -z "$PS1" ] && return
# Clear the screen
clear
# don't put duplicate lines in the history. See bash(1) for more options
# ... or force ignoredups and ignorespace
HISTCONTROL=ignoredups:ignorespace
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# make less more friendly for non-text input files, see lesspipe(1)
# Skip for Gento
if [[ $BASHRC_ENV != "ros_baxter" ]]; then
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
fi
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "$debian_chroot" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias dir='dir --color=auto'
alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# some more ls aliases for listing files and folders
alias ll='ls -alFh'
alias la='ls -Ah'
alias l='ls -CFh'
alias listfolders='ls -AF | grep /'
#alias listfiles='ls -AF | grep -v /'
alias listfiles="find * -type f -print" # lists files in the current directory
function cdl() {
cd "$1" && ll
}
# Quick back folder
alias c="cd .."
alias mkdir="mkdir -p"
# Remove line numbers in history
alias history="history | sed 's/^[ ]*[0-9]\+[ ]*//'"
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
# CUSTOM STUFF - NOT UBUNTU DEFAULT---------------------------------------------------------------------
# OS Stuff
platform='unknown'
unamestr=`uname`
if [[ "$unamestr" == 'Linux' ]]; then
platform='linux'
elif [[ "$unamestr" == 'Darwin' ]]; then
platform='osx'
fi
# IP Addresses Shared btw computers -------------------------------------------------------
# get just the ip address
function myip()
{
ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0.1'
}
# git aliases and functions
source ~/unix_settings/scripts/git.sh
# all ip address are hidden for security reasons
source ~/unix_settings_private/ip_addresses.sh
# Generic ROS Stuff --------------------------------------------------------------------
ROS_SEGMENT=`echo $BASHRC_ENV | cut -d'_' -f 1`
if [ $ROS_SEGMENT == "ros" ]; then
# Shortcuts, aliases and exports
source ~/unix_settings/scripts/ros.sh
# shared settings
ROS_MASTER="localhost" # to be over written
# make sure the ordering of the ROS sources do not get mixed up
unset CMAKE_PREFIX_PATH
unset ROS_PACKAGE_PATH
fi
# Web server environment --------------------------------------------------------
if [ $BASHRC_ENV == "dtc" ]; then
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
export PS1="\W$ "
echo "Computer: DTC Server"
fi
# Custom environements per computer --------------------------------------------------------
if [ $BASHRC_ENV == "ros_monster" ]; then
ROS_MASTER="baxter"
#ROS_MASTER="localhost"
#ROS_MASTER="localhost2"
#ROS_MASTER="rosbrick"
#ROS_MASTER="rosstudent" # andy's computer
source ~/unix_settings/scripts/amazon.sh
# For da cuda
export PATH=/usr/local/cuda-7.0/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda-7.0/lib64:$LD_LIBRARY_PATH
# In-Use Workspaces
#source /opt/ros/indigo/setup.bash
source /home/$USER/ros/ws_picknik/devel/setup.bash
#source /home/$USER/ros/ws_picknik/devel_debug/setup.bash
#source /home/$USER/ros/ws_base/devel/setup.bash
#source /home/$USER/ros/ws_moveit/devel/setup.bash
#source /home/$USER/ros/ws_moveit_other/devel/setup.bash
#source /home/$USER/ros/ws_amazon/devel/setup.bash
# overwrite the one from ws_ros/install/setup.bash
export ROSCONSOLE_CONFIG_FILE=~/unix_settings/config/rosconsole.yaml
# Syncing scripts
alias sync_ros_monster_to_student="source /home/$USER/unix_settings/scripts/rsync/ros_monster_to_student.sh"
# PCL hack
#export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/include
# Linux Brew
export PATH="$HOME/.linuxbrew/bin:$PATH"
export MANPATH="$HOME/.linuxbrew/share/man:$MANPATH"
export INFOPATH="$HOME/.linuxbrew/share/info:$INFOPATH"
# Exports
export ROS_IP=`hostname -I`
echo -ne "ROS: indigo | "
echo -ne "Computer: ros_monster"
fi
if [ $BASHRC_ENV == "ros_baxter" ]; then
export PATH=$PATH:/home/ruser/software/emacs-24.3/lib-src/
export PATH=$PATH:/home/ruser/software/emacs-24.3/src/
export PATH=$PATH:/home/ruser/bin
export PYTHONPATH="/home/ruser/bin/catkin_tools/lib/python2.7/site-packages:$PYTHONPATH"
ROS_MASTER="localhost"
source ~/unix_settings/scripts/baxter.sh
# In-Use Workspaces
#source /opt/ros/groovy/setup.bash
#source /home/ruser/ros/ws_base/devel/setup.bash
source /home/ruser/ros/ws_baxter/devel/setup.bash
echo -ne "ROS: groovy | "
# overwrite the one from ws_ros/install/setup.bash
export ROSCONSOLE_CONFIG_FILE=~/unix_settings/config/rosconsole.yaml
# Exports
# Use ROS_IP if you are specifying an IP address, and ROS_HOSTNAME if you are specifying a host name.
export ROS_IP=$ROS_BAXTER_IP
#export ROS_HOSTNAME=$ROS_BAXTER_IP #http://localhost:11311
#export ROS_MASTER_URI=http://localhost:11311
echo -ne "Computer: ros_baxter"
fi
if [ $BASHRC_ENV == "ros_student" ]; then
#ROS_MASTER="davecore"
ROS_MASTER="localhost"
source ~/unix_settings/scripts/baxter.sh
# In-Use Workspaces
#source /opt/ros/indigo/setup.bash
source /home/$USER/ros/ws_picknik/devel/setup.bash
echo -ne "ROS: indigo | "
# overwrite the one from ws_ros/install/setup.bash
export ROSCONSOLE_CONFIG_FILE=~/unix_settings/config/rosconsole.yaml
# Syncing scripts
alias sync_ros_student_to_monster="source /home/$USER/unix_settings/scripts/rsync/ros_student_to_monster.sh"
alias startcamera="roslaunch picknik_perception multi_xtion.launch "
# Exports
export ROS_IP=$ROS_STUDENT_IP
echo -ne "Computer: ros_student"
fi
if [ $BASHRC_ENV == "ros_mac" ]; then
#ROS_MASTER="baxter"
ROS_MASTER="localhost"
source ~/unix_settings/scripts/baxter.sh
# In-Use Workspaces
source /opt/ros/indigo/setup.bash
#source /home/$USER/ros/ws_base/devel/setup.bash
#source /home/$USER/ros/ws_moveit/devel/setup.bash
#source /home/$USER/ros/ws_moveit_other/devel/setup.bash
#source /home/$USER/ros/ws_baxter/devel/setup.bash
#source /home/$USER/ros/ws_nasa/devel/setup.bash
echo -ne "ROS: indigo | "
# overwrite the one from ws_ros/install/setup.bash
export ROSCONSOLE_CONFIG_FILE=~/unix_settings/config/rosconsole.yaml
# Aliases
alias runmatlab="/usr/local/MATLAB/R2013b/bin/matlab"
# Exports
export ROS_IP=`hostname -I`
echo -ne "Computer: ros_mac"
fi
if [ $BASHRC_ENV == "ros_baxter_control" ]; then
export LIBGL_ALWAYS_SOFTWARE=1
# Settings
ROS_MASTER="baxter"
source ~/unix_settings/scripts/baxter.sh
#In-Use Workspaces
source /opt/ros/hydro/setup.bash
source /home/$USER/ros/ws_baxter/devel/setup.bash
echo -ne "ROS: hydro | "
# Exports
export ROS_HOSTNAME=$ROS_BAXTER_CONTROL_IP
echo -ne "Computer: ros_baxter_control"
fi
# Custom environements per computer --------------------------------------------------------
if [ $BASHRC_ENV == "ros_brick" ]; then
#ROS_MASTER="baxter"
#ROS_MASTER="localhost"
#ROS_MASTER="davecore"
ROS_MASTER="andycore"
export PATH=/usr/local/cuda-7.0/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda-7.0/lib64:$LD_LIBRARY_PATH
source ~/unix_settings/scripts/baxter.sh
source ~/unix_settings/scripts/amazon.sh
# In-Use Workspaces
source /opt/ros/indigo/setup.bash
#source /home/$USER/ros/ws_picknik/devel/setup.bash
#source /home/$USER/ros/ws_moveit/devel/setup.bash
#source /home/$USER/ros/ws_moveit_other/devel/setup.bash
#source /home/$USER/ros/ws_robots/devel/setup.bash
#source /home/$USER/ros/ws_amazon/devel/setup.bash
echo -ne "ROS: indigo | "
# overwrite the one from ws_ros/install/setup.bash
export ROSCONSOLE_CONFIG_FILE=~/unix_settings/config/rosconsole.yaml
alias startcamera="roslaunch realsense_camera realsense_camera.launch"
# Exports
export ROS_IP=`hostname -I`
echo -ne "Computer: ros_brick"
fi
if [ $BASHRC_ENV == "ros_picknik2" ]; then
#ROS_MASTER="baxter"
ROS_MASTER="localhost"
#ROS_MASTER="localhost2"
#ROS_MASTER="rosbrick"
#ROS_MASTER="rosstudent"
source ~/unix_settings/scripts/amazon.sh
# For da cuda
export PATH=/usr/local/cuda-7.0/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda-7.0/lib64:$LD_LIBRARY_PATH
# In-Use Workspaces
#source /opt/ros/indigo/setup.bash
source /home/$USER/ros/ws_picknik/devel/setup.bash
# overwrite the one from ws_ros/install/setup.bash
export ROSCONSOLE_CONFIG_FILE=~/unix_settings/config/rosconsole.yaml
export PATH="$HOME/.linuxbrew/bin:$PATH"
export MANPATH="$HOME/.linuxbrew/share/man:$MANPATH"
export INFOPATH="$HOME/.linuxbrew/share/info:$INFOPATH"
# Exports
export ROS_IP=127.0.0.1
# export ROS_IP=`hostname -I`
echo -ne "ROS: indigo | "
echo -ne "Computer: ros_picknik2"
fi
if [ $BASHRC_ENV == "ros_luma" ]; then
#ROS_MASTER="baxter"
ROS_MASTER="localhost"
#ROS_MASTER="davecore"
#export PATH=/usr/local/cuda-7.0/bin:$PATH
#export LD_LIBRARY_PATH=/usr/local/cuda-7.0/lib64:$LD_LIBRARY_PATH
source ~/unix_settings/scripts/amazon.sh
# In-Use Workspaces
source /opt/ros/indigo/setup.bash
#source /home/$USER/ros/ws_picknik/devel/setup.bash
#source /home/$USER/ros/ws_moveit/devel/setup.bash
#source /home/$USER/ros/ws_moveit_other/devel/setup.bash
#source /home/$USER/ros/ws_robots/devel/setup.bash
#source /home/$USER/ros/ws_amazon/devel/setup.bash
echo -ne "ROS: indigo | "
# overwrite the one from ws_ros/install/setup.bash
export ROSCONSOLE_CONFIG_FILE=~/unix_settings/config/rosconsole.yaml
alias startcamera="roslaunch realsense_camera realsense_camera.launch"
# Exports
export ROS_IP=`hostname -I`
echo -ne "Computer: ros_luma"
fi
if [ $BASHRC_ENV == "janus" ]; then
use Moab
use Torque
use .hdf5-1.8.6
use OpenMPI-1.4.3
use CMake
#use G_DISABLED_it
alias emacs='/home/daco5652/software/emacs/bin/emacs-23.4'
export BOOST_ROOT=/projects/daco5652/software/boost/1.42.0/
cd /lustre/janus_scratch/daco5652/scriptbots/
echo -ne "Computer: Janus"
fi
if [ $BASHRC_ENV == "mac" ]; then
alias web='cd /Volumes/$USER/Web'
alias brewwork='cd /usr/local/Library/Formula'
# For homebrew / ROS Mac
export PATH=/usr/local/bin:/usr/local/sbin:$PATH
# For gedit
PATH=$PATH:/Applications/gedit.app/Contents/MacOS
# Colors!
export CLICOLOR=1
export LSCOLORS=ExFxCxDxBxegedabagacad
# Chrome
alias google-chrome='open -a Google\ Chrome --args --allow-file-access-from-files'
echo "Computer: MBP"
fi
if [ $BASHRC_ENV == "sensor-bot" ]; then
export PATH="$HOME/.linuxbrew/bin:$PATH"
export MANPATH="$HOME/.linuxbrew/share/man:$MANPATH"
export INFOPATH="$HOME/.linuxbrew/share/info:$INFOPATH"
echo "Computer: Sensor"
fi
if [ $BASHRC_ENV == "ros_vm" ]; then
#In-Use Workspaces
#source /opt/ros/hydro/setup.bash
source /home/$USER/ros/ws_picknik/devel/setup.bash
# Change display method for VM graphics card
export OGRE_RTT_MODE=Copy
echo -ne "Computer: ROS VM"
fi
# Set ROS MASTER URI for our robot or locally
if [ $ROS_SEGMENT == "ros" ]; then
if [ $ROS_MASTER == "baxter" ]; then # Use Baxter externally
export ROS_MASTER_URI="http://"$ROS_BAXTER_IP":11311"
echo -ne " | ROS Master: baxter"
elif [ $ROS_MASTER == "special" ]; then # Internal Baxter
export ROS_MASTER_URI=$ROS_BAXTER_IP
echo -ne " | ROS Master: i_am_baxter"
elif [ $ROS_MASTER == "davecore" ]; then # Internal Baxter
export ROS_MASTER_URI=http://128.138.224.226:11311
echo -ne " | ROS Master: Dave's computer"
elif [ $ROS_MASTER == "andycore" ]; then # Internal Baxter
export ROS_MASTER_URI=http://128.138.224.186:11311
echo -ne " | ROS Master: Andy's computer"
elif [ $ROS_MASTER == "rosbrick" ]; then
export ROS_MASTER_URI=http://128.138.224.198:11311
echo -ne " | ROS Master: ROS Brick"
elif [ $ROS_MASTER == "rosstudent" ]; then
export ROS_MASTER_URI=http://128.138.224.186:11311
echo -ne " | ROS Master: ROS Student"
elif [ $ROS_MASTER == "localhost2" ]; then
export ROS_MASTER_URI=http://localhost:11312
alias roscore2="roscore -p 11312 &"
echo -ne " | ROS Master: localhost2"
else # Localhost
export ROS_MASTER_URI=http://localhost:11311
echo -ne " | ROS Master: localhost"
fi
# New line
echo ""
# clean out the stupid logs
rosclean purge -y
# Display the package path if this is a ROS computer
rosPackagePath
fi
# Text Editor
if [[ $platform == "osx" ]]; then #only mac
alias e="emacs"
alias se="sudo emacs -nw"
export EDITOR='emacs'
else
alias e="emacsclient -nw -t" #new_window, t does something for server/client
alias se="sudo emacs -nw"
export EDITOR='emacsclient -nw -t'
function re() {
emacsclient -nw -t -e "(find-file-read-only \"$1\")"
}
fi
export ALTERNATE_EDITOR="" # this evokes emacs server/client stuff somehow
# Python
alias p="python"
alias pylab="ipython --pylab"
# Clipboard
alias xc="xclip" # copy
alias xv="xclip -o" # paste
alias pwdxc="pwd | xclip"
## GREP / FIND --------------------------------------------------------------------------------
# Searching within files, recursive from current location
gr() { grep -I --color=always --ignore-case --line-number --recursive "$1" . ;}
grcase() { grep -I --color=always --line-number --recursive "$1" . ;}
# Exclude certain directories from grep. this doesn't work for osx
if [[ $platform != 'osx' ]]; then
export GREP_OPTIONS="--exclude-dir=\build --exclude-dir=\.svn --exclude-dir=\.hg --exclude-dir=\.git --exclude=\.#* --exclude=*.dae"
fi
#grep -r -i "WORD" . # search recursively in directory for case-insensitive word
# Find a file with a string and open with emacs (kinda like e gr STRING)
gre() { grep -l -I --ignore-case --recursive $1 . | xargs emacsclient -nw -t ;}
# Find files with name in directory
findfile()
{
if [[ $platform != 'osx' ]]; then
find -iname $1 2>/dev/null
else
#find . -name '[mM][yY][fF][iI][lL][eE]*' # makes mac case insensitive
echo "'$1*'" |perl -pe 's/([a-zA-Z])/[\L\1\U\1]/g;s/(.*)/find . -name \1/'|sh
fi
}
# Find files recursively by file type and copy them to a directory
#find . -name "*.rst" -type f -exec cp {} ~/Desktop/ \;
# Find files and delete them
#find -name *conflicted* -delete
# Also:
# find . -iname '*.so'
#
# Find and replace string in all files in a directory
# param1 - old word
# param2 - new word
findreplace() { grep -lr -e "$1" * | xargs sed -i "s/$1/$2/g" ;}
# Find installed programs in Ubuntu:
findprogram() { sudo find /usr -name "$1*" ;}
## COMPRESSION --------------------------------------------------------------------------------
# Compressed aliases
alias untargz='tar xvfz ' #file.tar.gz
alias untarxz='tar xvfJ ' #file.tar.xz
alias untgz='untargz' # same as above
alias dotargz='tar cfz ' #file.tar.gz folder/
alias untar='tar xvf ' #file.tar
alias dotar='tar cvwf ' #file.tar folder/
# Quick edit this file
alias mybash="e ~/unix_settings/.my.bashrc && . ~/unix_settings/.my.bashrc"
alias mybashr=". ~/unix_settings/.my.bashrc"
alias myemacs="e ~/unix_settings/.emacs"
alias myubuntuinstall="e ~/unix_settings/install/ubuntu.sh"
# Diff with color
alias diff="colordiff"
# Update Ubuntu
alias sagu="sudo apt-get update && sudo apt-get dist-upgrade -y"
alias sagi="sudo apt-get install "
# Quick cmake
function cmaker()
{
rm -rf build
mkdir build
cd build
cmake ..
make -j6
}
alias maker="sudo clear && cmake ../ && make -j8 && sudo make install"
alias maker_local="cmake ../ -DCMAKE_INSTALL_PREFIX=$HOME/local && make -j8 && make install"
alias dmaker="sudo clear && cmake ../ -DCMAKE_BUILD_TYPE=debug && make -j8 && sudo make install"
# Build Latex document and open it
alias quicktex="ls *.tex | pdflatex && open *.pdf"
# Search running processes
alias pp="ps aux | grep "
# Show fake hacker script
alias hacker='hexdump -C /dev/urandom | grep "fd b4"'
# gdb
alias gdbrun='gdb --ex run --args '
alias rosrungdb='gdb --ex run --args ' #/opt/ros/hydro/lib/rviz/rviz
## More Scripts -----------------------------------------------------------------------
# Ubuntu only
if [[ $platform != 'osx' ]]; then
source /home/$USER/unix_settings/scripts/ubuntu.sh
fi
# Notes
source ~/unix_settings/notes/aliases.sh
function selfdestruct()
{
seconds=10; date1=$((`date +%s` + $seconds));
while [ "$date1" -ne `date +%s` ]; do
echo -ne "Self Destruct In $(date -u --date @$(($date1 - `date +%s` )) +%H:%M:%S)\r";
done
echo ""
}
function dired() {
emacsclient -e "(dired \"$PWD\")"
}
# Cause remote messages to display
# sagi libnotify-bin
# export DISPLAY=:0.0
# notify-send "Nikolaus Correll Says" "PUBLISH PUBLISH PUBLISH"
| true |
cbfc9ef29d32ebb8d8567810cde93c4bb299796c
|
Shell
|
icelab/asdf-mysql
|
/bin/install
|
UTF-8
| 1,584 | 3.765625 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
install() {
local install_type="$1"
local version="$2"
local install_path="$3"
local download_url=$(get_download_url $version)
local download_tmp_dir=$(get_tmp_dir)
local download_file_name=$(get_download_file_name $version)
curl -Lo $download_tmp_dir/$download_file_name -C - $download_url
# Run in a subshell so we don't disturb current working dir
(
cd $download_tmp_dir
tar zxf $download_file_name
cd $(get_download_extracted_dir_name $version)
cmake . \
-DCMAKE_INSTALL_PREFIX="$install_path" \
-DCMAKE_BUILD_TYPE=Release \
-DFORCE_INSOURCE_BUILD=1 \
-DCOMPILATION_COMMENT=asdf-mysql \
-DWITH_BOOST=boost \
-DWITH_UNIT_TESTS=OFF \
-DENABLED_LOCAL_INFILE=ON \
-DWITH_INNODB_MEMCACHED=ON
make
make install
$install_path/bin/mysqld \
--initialize-insecure \
--user="$USER" \
--basedir="$install_path" \
--datadir="${install_path}/data" \
--tmpdir=/tmp
)
}
get_tmp_dir() {
if [ "$TMPDIR" = "" ]; then
local tmp_dir=$(mktemp -d)
else
local tmp_dir=$TMPDIR
fi
echo $tmp_dir
}
get_download_url() {
local version="$1"
local file_name=$(get_download_file_name $version)
echo "https://downloads.mysql.com/archives/get/file/${file_name}"
}
get_download_file_name() {
local version=$1
echo "mysql-boost-${version}.tar.gz"
}
get_download_extracted_dir_name() {
local version=$1
echo "mysql-${version}"
}
install "${ASDF_INSTALL_TYPE}" "${ASDF_INSTALL_VERSION}" "${ASDF_INSTALL_PATH}"
| true |
1d92ab04dc976af7b432e24f6278ce6fa2515b5f
|
Shell
|
foxliu/new-project
|
/backup_mysqldata_ph_v2.sh
|
UTF-8
| 1,906 | 3.75 | 4 |
[] |
no_license
|
#!/bin/bash
#This is a ShellScript For Auto MysqlDB Backup
#By foxliu2012@gmail.com
#2013-12
#Setting
DBNames=
DBUser=
DBHost=
DBPasswd=
BackupPath=
#Setting END
#progress
t=`date +%Y%m.%d`
y=`expr substr $t 1 4`
md=`expr substr $t 5 5`
backupdir=$(echo "$BackupPath" | sed -e 's/\/$//')/$y/$md
#Backup Method in rysnc and mysqldump
Backup_Method=rsync
getbinlog () {
echo -e "\n$(date +%Y%m%d-%H:%M) \t backup data ph \n ============================================" >> /$backupdir/binlog_from.log
/usr/local/mysql/bin/mysql -u $DBUser -p$DBPasswd -h $DBHost -e "stop slave;" && /usr/local/mysql/bin/mysql -u$DBUser -p$DBPasswd -h $DBHost -e "show slave status\G" | grep -E '(Relay_Master_Log_File|Exec_Master_Log_Pos)' >> /$backupdir/binlog_from.log
}
backup_data () {
case $Backup_Method in
mysqldump )
/usr/local/mysql/bin/mysqldump -u $DBUser -p$DBPasswd -h $DBHost $DBName > $backupdir/"$DBName".sql
;;
rsync )
rsync -aqzP root@$DBHost:/usr/local/mysql/data/$DBName $backupdir
;;
* )
echo "Set Backup_Method=rsync or mysqldump"
;;
esac
}
dump_innodb_table () {
tables=$(mysql -u $DBName -p$DBPasswd -h $DBHost -e "select table_name, engine from information_schema.tables where table_schema='ph' and engine='InnoDB'\G" | grep table_name | awk '{print $2}')
for table in $tables
do
/usr/local/mysql/bin/mysqldump -u $DBUser -p$DBPasswd -h $DBHost ph $table > $backupdir/${table}.sql
done
}
if [ ! -d $backupdir ]; then
mkdir -p $backupdir
fi
getbinlog
for DBName in $DBNames
do
backup_data
done
dump_innodb_table
/usr/local/mysql/bin/mysql -u $DBUser -p$DBPasswd -h $DBHost -e "slave start;"
echo -e "\n===========================================\n$(date +%Y%m%d-%H:%M) \tBackup Complete" >> /$backupdir/binlog_from.log
#delete 2 days ago's backup file
find /data/mysqldata_ph -mindepth 2 -maxdepth 2 -type d -mtime +6 -exec rm -rf {} \;
| true |
f90b7529bcbf482deb1c6d66a70a6dc13ffa03db
|
Shell
|
axiros/termcontrol
|
/tests/travis_tests_within_xterm.sh
|
UTF-8
| 365 | 2.90625 | 3 |
[] |
no_license
|
#!/bin/bash
rm -f success
d="$(pwd)"
function main {
test "$1" == "start" && {
echo $$ > testpid
xterm -l -e "$0" &
sleep 1
tail -f /home/gk/termcontrol_input.log &
tail -f Xterm.log*
exit 0
}
pytest -xs tests/test_cli -k works && touch "$d/success"
kill $(cat "$d/testpid")
}
main "$@"
| true |
9baa960ee51432a2455d6724f43009249e22fc7a
|
Shell
|
javiplx/tests-md253
|
/bootfs/var/www/cgi-bin/ins_pkg.cgi
|
BIG5
| 4,273 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/sh
echo -e "Content-type: text/html"\\r
echo -e ""\\r
echo -e "<HTML><HEAD><TITLE>Sample CGI Output2</TITLE></HEAD><BODY>"\\r
. /usr/libexec/modules/modules.conf
PASSWD=/etc/passwd
CONF_PATH=/etc/sysconfig/config
SMB_SHARES_CONF=${CONF_PATH}/smb/shares.inc
SMB_HOST_CONF=${CONF_PATH}/smb/host.inc
IFCFG=${CONF_PATH}/ifcfg-eth0
IFCFG_DEFAULT=${CONF_PATH}/ifcfg-eth0.default
TIMEMACHINEPORT="548"
scsi_list=/etc/sysconfig/config/scsi.list
format_hdd=/var/www/cgi-bin/format.sh
SingleFormat=/var/www/cgi-bin/SingleFormat.sh
XFS_QUOTA=/usr/local/xfsprogs/xfs_quota
func=`echo ${QUERY_STRING} | cut '-d&' -f1`
PKG_Folder=/usr/local/install
PACKAGE=${PKG_Folder}/package
PKGPATH=/usr/local/install
case ${func} in
"SetExecTable")
Value=`echo ${QUERY_STRING}|/bin/cut '-d&' -f2|/bin/sed 's/\%20/\ /g'`
$Value
;;
"check_pkg")
if [ -d ${PKG_Folder} ]; then
cd ${PKG_Folder}
PKG=`echo ${QUERY_STRING}|/bin/cut '-d&' -f2`
PKG_IPK=`/bin/basename $PKG | /bin/awk -F"_" '{print $3}'`
cat package | grep ${PKG_IPK} > /dev/null
if [ $? -eq 0 ]; then
echo -e "exist"\\r
else
echo -e "null"\\r
fi
else
echo -e "no_devic1e"\\r
fi
;;
"install_pkg")
# YSnؿAhإ
[ -d /home/PUBLIC/.pkg ] || {
/bin/mkdir -p /home/PUBLIC/.pkg/lib
/bin/mkdir -p /home/PUBLIC/.pkg/bin
}
# YSnUserAhإ
PKG_USER="squeeze lp"
for i in $PKG_USER; do
user=`/bin/cat /etc/passwd | grep $i`
[ "X${user}" == "X" ] && /bin/adduser $i -h /home -H -D -G admin
done
# Y /usr/local/install sb~
if [ -d ${PKG_Folder} ]; then
cd ${PKG_Folder}
# url ĤGӰѼƨopackage W
PKG=`echo ${QUERY_STRING}|/bin/cut '-d&' -f2`
PKG_IPK=`/bin/basename $PKG | /bin/awk -F"_" '{print $3}'`
PKG_SCRIPT=${PKG_IPK}/scripts
# Npackage Ytmp
mkdir -p tmp
cd tmp
rm -rf $PKG_IPK
tar zxf $PKG
# ospackage
newVersion=`cat ${PKG_SCRIPT}/INFO | grep "OFF"| awk -F"~" '{print $3}'`
# P_O_w˹LΪO_P
cd ..
oldversion=`cat package | grep ${PKG_IPK} | awk -F"~" '{print $3}'`
if [ "${oldversion}" != "${newVersion}" ]; then
# oldversion != "" YsAª
if [ "${oldversion}" != "" ]; then
sh ${PKG_SCRIPT}/start-stop-status del
if [ "$PKG_IPK" == "TimeMachine" ]; then
dlna_mDNSR_stop
dlna_mDNSR_modify_conf_data > ${CONF_PATH}/responder.conf
dlna_mDNSR_start
fi
fi
#}lw
cd ${PKG_Folder}
#R즳AåηsN
rm -rf $PKG_IPK
mv tmp/$PKG_IPK .
#AT{
PKG_NAME=`/bin/cat ${PKG_SCRIPT}/INFO | /bin/grep "OFF"| /bin/awk -F"~" '{print $2}'`
[ ${PKG_IPK} != ${PKG_NAME} ] && {
echo -e "error"\\r
/bin/rm -rf $PKG_IPK
return
}
#T{ҶO_
pkg_need_size=`/bin/cat ${PKG_SCRIPT}/INFO | /bin/grep "SIZE"| /bin/awk -F= /SIZE/'{print $2}'`
hdd_remnant_size=`/bin/df| /bin/grep "/home$"| /bin/awk '{print $4}'| /bin/sed s/\ //g`
hdd_remnant_size=`/bin/echo ${hdd_remnant_size}000`
remnant=$(($hdd_remnant_size-$pkg_need_size))
if [ ${remnant} -ge 0 ]; then
if [ -d ${PKG_IPK} ]; then
/bin/cat ${PKG_SCRIPT}/INFO | /bin/grep "OFF" >> ${PACKAGE}
#P_YTwonkymediahҰ
for pkg in `/bin/cat ${PACKAGE}`; do
Package=$pkg
PackageName=`echo "$Package"|/bin/awk -F~ '{print $2}'`
[ "${PackageName}" == "Twonkymedia" ] && {
PackageNum=`echo "$Package"|/bin/awk -F~ '{print $1}'`
String="PackageAction&${PackageName}&${PackageNum}"
service_package_manager ${String} start
}
done
echo -e "ok"\\r
else
echo -e "error"\\r
fi
else
/bin/rm -rf $PKG_IPK
echo -e "no_remnant_size"\\r
fi
else
echo -e "exist"\\r
fi
else
echo -e "no_device"\\r
fi
;;
"PackageAction")
QUERY_STRING=`echo ${QUERY_STRING} | sed 's/\%5E/\^/'`
action=`echo ${QUERY_STRING} | cut '-d&' -f2`
service_package_manager ${QUERY_STRING} ${action}
;;
*)
QUERY_STRING=`echo ${QUERY_STRING} | sed 's/\%5E/\^/'`
echo -e "${QUERY_STRING} ${REQUEST_METHOD}"\\r
;;
esac
echo -e "</BODY></HTML>"\\r
| true |
9ab7c72204fe7a62bac42fa56ac4cf881e51463f
|
Shell
|
apolloclark/bash-roles
|
/packer/install.sh
|
UTF-8
| 1,089 | 3.796875 | 4 |
[] |
no_license
|
#!/bin/bash
set -euxo pipefail
IFS=$'\n\t'
# http://redsymbol.net/articles/unofficial-bash-strict-mode/
# retrieve a link to the latest version
PACKER_VERSION_LATEST=$(curl -sSL https://releases.hashicorp.com/packer/ \
| grep -F '"/packer/' | head -n1 | cut -d '"' -f2 | cut -d '/' -f3);
# get the currently installed version
PACKER_VERSION_CURRENT="";
if [ -x "$(command -v packer)" ]; then
PACKER_VERSION_CURRENT=$(packer -v | cut -d' ' -f2 | cut -c 2-);
fi
# check if the latest version matches the currently installed version
if [ "$PACKER_VERSION_LATEST" = "$PACKER_VERSION_CURRENT" ]; then
echo "Already running the latest version == $PACKER_VERSION_CURRENT"
return 0;
fi
# generate the download URL
PACKER_URL=$(echo 'https://releases.hashicorp.com/packer/'"${PACKER_VERSION_LATEST}"'/packer_'"${PACKER_VERSION_LATEST}"'_linux_amd64.zip')
echo $PACKER_URL;
# get the file, install it
cd /tmp
wget -q "$PACKER_URL"
unzip ./packer_*.zip
mv ./packer /usr/bin/packer
chmod +x /usr/bin/packer
# packer --version 2>&1 | grep -F "${PACKER_VERSION_LATEST}"
rm -rf /tmp/packer*
| true |
722c934eed160ca2c6f01703b2b3648f5ecc68a0
|
Shell
|
atlury/lfs
|
/system/bzip2
|
UTF-8
| 584 | 2.59375 | 3 |
[] |
no_license
|
#!/tools/bin/bash
set -e
cd /tmp
tar xvf /sources/bzip2-1.0.6.tar.gz -C /tmp
echo "Successfully extract bzip2-1.0.6.tar.gz"
cd /tmp/bzip2-1.0.6
patch -Np1 -i /sources/bzip2-1.0.6-install_docs-1.patch
sed -i 's@\(ln -s -f \)$(PREFIX)/bin/@\1@' Makefile
sed -i "s@(PREFIX)/man@(PREFIX)/share/man@g" Makefile
make -f Makefile-libbz2_so
make clean
make
make PREFIX=/usr install
cp -v bzip2-shared /bin/bzip2
cp -av libbz2.so* /lib
ln -sv ../../lib/libbz2.so.1.0 /usr/lib/libbz2.so
rm -v /usr/bin/{bunzip2,bzcat,bzip2}
ln -sv bzip2 /bin/bunzip2
ln -sv bzip2 /bin/bzcat
cd /
rm -rf /tmp/*
| true |
4cda149b2c09d56097c8ccc527fec853c164f6e5
|
Shell
|
mojofunk/abuild
|
/packages/glib/ABUILD
|
UTF-8
| 541 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/bash
function set_pkg_env
{
PKG_NAME="glib"
PKG_VERSION="2.42.2"
PKG_SRC_DIR="${PKG_NAME}-${PKG_VERSION}"
PKG_SRC_FILE="${PKG_SRC_DIR}.tar.xz"
PKG_SRC_URL="http://ftp.gnome.org/pub/gnome/sources/glib/2.42/${PKG_SRC_FILE}"
# For now it is assumed that glib2-devel is also installed on the
# build system.
PKG_DEPS="zlib libffi gettext"
PKG_BUILD_TOOL="autotools"
}
function configure
{
cd "$PKG_BUILD_ROOT_DIR/$PKG_NAME" || exit 1
autotools_configure "--with-pcre=internal --disable-silent-rules --with-libiconv=no"
}
| true |
7df7b15c42753ef17546af028081842a5392b877
|
Shell
|
sunnysideup/silverstripe-easy-coding-standards
|
/bin/sake-git-commit-and-push-vendor-packages
|
UTF-8
| 4,517 | 4.0625 | 4 |
[] |
no_license
|
#!/bin/bash
############################################ BASICS
SCRIPT_DIR="$COMPOSER_RUNTIME_BIN_DIR"
WORKING_DIR=$(pwd)
source $SCRIPT_DIR/sake-self-methods
############################################ DIR
if [ "$1" != "" ]; then
dir=$1;
else
dir='vendor'
fi
############################################ SETTINGS
commitAndPush='yes'
message='WIP'
newVersion='no'
statusOnly='no'
fullUpdate='yes'
help='no'
while (( $# )); do
case $1 in
-m|--message) message=$2;shift ;;
-c|--commit-and-push) commitAndPush=$2;shift ;;
-v|--new-version) newVersion=$2;shift ;;
-s|--status-only) statusOnly=$2;shift ;;
-f|--full-update) fullUpdate=$2;shift ;;
-h|--help) help='yes';shift ;;
-*) printf 'Unknown option: %q\n\n' "$1";
help='yes' ;;
*) dir=$1;;
esac
shift
done
# prompt user for message
if [[ "$message" == "" ]]; then
read -p "Commit message: " message
fi
# get last commit message
if [[ "$message" == "" ]]; then
message="WIP"
fi
help_and_exit() {
echohead "Checking Vendor Packages for Changes";
echonice "Directory of script: $SCRIPT_DIR";
echonice "Directory to analyse: $WORKING_DIR/$dir";
echohead "Current settings:";
echonice "Message (-m): $message";
echonice "Commit and Push (-c): $commitAndPush";
echonice "Create New tag (-v): $newVersion";
echonice "Status Only (-s): $statusOnly";
echonice "Full Composer Update (-f): $fullUpdate";
echohead "Available settings:";
echonice "-m, --message message - e.g. Lint, default: WIP";
echonice "-c, --commit-and-push commit and push also? default: yes";
echonice "-v, --new-version create new tag (no|patch|minor|major), default: no";
echonice "-s, --status-only check if there are changes git folder present, default: no"
echonice "-f, --full-update run a full composer update afterwards and commit, default: yes"
echonice "-h, --help show help information"
echohead "Example usage:"
echonice "sake-git-commit-and-push-vendor-packages -m 'PATCH: lint' -c yes -v patch -f no vendor/my-vendor-name";
echofunctions
exit;
}
############################################ HELP ONLY
if [[ "$help" == "yes" ]]; then
help_and_exit
fi
############################################ CODE
if [ -d "$WORKING_DIR" ]; then
cd $WORKING_DIR
else
echonice "Could not change directory to $WORKING_DIR";
help_and_exit;
fi
echohead "Removing folders that do not need to be there ..."
find $WORKING_DIR/$dir -mindepth 2 -maxdepth 2 -type d -name "vendor"
find $WORKING_DIR/$dir -mindepth 2 -maxdepth 2 -type d -name "vendor" -exec rm "{}" -rf \;
sake-lint-remove-origs $dir
echohead "going to look for folders in $WORKING_DIR/themes"
find $WORKING_DIR/themes -mindepth 1 -maxdepth 1 -type d -print0 |
while IFS= read -r -d '' line; do
echohead "### DOING: $line" ;
cd "$line";
test="$line/.git/config"
if [[ ! -f "$line/.git/config" ]]; then
echobad "$line IS NOT A GIT REPO";
else
if [[ "$statusOnly" == "yes" ]]; then
echonice '000sa0df'
git status -s
else
$SCRIPT_DIR/sake-git-commit-and-push . -m "$message" -c $commitAndPush -s $statusOnly -v $newVersion ;
fi
fi
cd -
done
echohead "going to look for folders in $WORKING_DIR/$dir"
find $WORKING_DIR/$dir -mindepth 1 -maxdepth 1 -type d -print0 |
while IFS= read -r -d '' line; do
echohead "### DOING: $line" ;
cd "$line";
test="$line/.git/config"
echonice "Checking for /.git/config"
if [[ ! -f "$test" ]]; then
echobad "$line IS NOT A GIT REPO";
else
if [[ "$statusOnly" == "yes" ]]; then
git status -s
else
$SCRIPT_DIR/sake-git-commit-and-push . -m "$message" -c $commitAndPush -s $statusOnly -v $newVersion ;
fi
fi
cd -
done
echohead "back to the start: $WORKING_DIR"
cd $WORKING_DIR
if [[ "$fullUpdate" == "yes" ]]; then
sake-composer-update
$SCRIPT_DIR/sake-git-commit-and-push . -m "$message" -c $commitAndPush -s $statusOnly -v $newVersion
fi
echoend;
| true |
fd6c9823c64b9170d94558889aacbb9298b0becd
|
Shell
|
dellelce/mkit
|
/modules/lua/build.sh
|
UTF-8
| 1,699 | 3.828125 | 4 |
[] |
no_license
|
lua_platform()
{
typeset platform=$(uname -s| awk -F_ ' { print tolower($1); } ')
[ "$platform" == "cygwin" ] && platform="mingw"
echo $platform
}
#
# Build lua
build_lua_core()
{
typeset rc=0
export rc_conf=0 rc_make=0 rc_makeinstall=0
typeset id="$1"; shift # build id
typeset dir="$1"; shift # src directory
typeset pkgbuilddir="$BUILDDIR/$id"
# No Sanity checks!
# Other steps
[ ! -d "$pkgbuilddir" ] && { mkdir -p "$pkgbuilddir"; } ||
{
pkgbuilddir="$BUILDDIR/${id}.${RANDOM}"; mkdir -p "$pkgbuilddir";
}
cd "$pkgbuilddir" ||
{
echo "build_lua: Failed to change to build directory: " $pkgbuilddir;
return 1;
}
prepare_build $dir
echo "Building $id [${BOLD}$(getbasename $id)${RESET}] at $(date)"
echo
time_start
# no configure step for lua
#build_logger "${id}_configure"
#[ "$rc_conf" -ne 0 ] && return $rc_conf
logFile=$(logger_file ${id}_make)
echo "Running make..."
{
conf="$(lua_platform) INSTALL_TOP=${prefix}"
conf="${conf} MYCFLAGS=-I${prefix}/include MYLDFLAGS=-L${prefix}/lib"
conf="${conf} MYLIBS=-lncurses"
echo "Configuration: $conf"
make $conf 2>&1
rc_make=$?
} > ${logFile}
[ $rc_make -ne 0 ] && { cd "$cwd"; time_end; cat "${logFile}"; echo ; echo "Failed make for ${id}"; return $rc_make; }
echo "Running make install..."
logFile=$(logger_file ${id}_makeinstall)
{
make install INSTALL_TOP=${prefix} 2>&1
rc_makeinstall=$?
} > ${logFile}
cd "$WORKDIR"
[ $rc_makeinstall -ne 0 ] && { cat "${logFile}"; echo ; echo "Failed make install for ${id}"; }
time_end
return $rc_makeinstall
}
# build_lua: wrapper to handle "standard" arguments and uncompression
build_lua()
{
build_lua_core lua $srcdir_lua
}
| true |
7d4a73fb849e198f1229585b3f3163dab40b9ce9
|
Shell
|
dihu-stuttgart/iron-tests
|
/examples/example-0404/run_example.sh
|
UTF-8
| 619 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
# test cases for example-0404 (1D problem with Hodgkin-Huxley)
echo "compiling and running example $(pwd)"
folder=$1
mkdir -p $folder
echo " compiling $folder"
cd $folder
cmake -DCMAKE_BUILD_TYPE=$folder -DOPENCMISS_BUILD_TYPE=$folder ..
make
cd ..
echo " running $folder"
# <number elements X> <interpolation type> <solver type> <PDE step size> <stop time> <output frequency> <CellML Model URL> <slow-twitch> <ODE time-step>
mkdir -p results/current_run/l1x1_n16_i2_s0_05 && ./$folder/src/example 16 2 0 0.05 10 1 hodgkin_huxley_1952.cellml F 0.05 && mv *.ex* results/current_run/l1x1_n16_i2_s0_05
| true |
a06f9995f967ba5eae747c284acf983fffe06c66
|
Shell
|
eamarais/eam-group
|
/docs/geosfp_0.25x03125_eu-03.run
|
UTF-8
| 1,529 | 3.21875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
#PBS -N geosfp_eu-03
#PBS -l nodes=1:ppn=16
#PBS -l walltime=70:00:00
#PBS -l vmem=40gb
#PBS -M uol-uname@le.ac.uk
#PBS -m bea
# Load environment modules
module load gcc/6.3.0 openmpi/gcc/3.0.0 netcdf/gcc-mpi/4.4.1
# Relevant libraries and other resource information:
export NETCDF_HOME=`nc-config --prefix`
export GC_BIN=$NETCDF_HOME/bin
export GC_INCLUDE=$NETCDF_HOME/include
export GC_LIB=$NETCDF_HOME/lib
export FC=gfortran
export CC=gcc
export CXX=g++
ulimit -s unlimited
export OMP_STACKSIZE=500m
export OMP_NUM_THREADS=16
# Move to run directory:
cd $PBS_O_WORKDIR
#-------------------------------------------------
# Initialize
#-------------------------------------------------
# Define GEOS-Chem log file
run=03
log=log_$run
# Delete any existing log file with the same name:
rm -f $log
#-------------------------------------------------
# Start the simulation
#-------------------------------------------------
# Run GEOS-Chem and pipe output to log
ln -sf ./input.geos_$run input.geos
./geos >> $log
# Echo end time
echo '===> Run ended at' `date` >> $log
#-------------------------------------------------
# Clean up
#-------------------------------------------------
# Echo info from computational cores to log file for displaying results
nodeName=`uname -n`
echo "# of CPUs: $OMP_NUM_THREADS"
echo "NodeName : $nodeName"
grep "vendor_id" /proc/cpuinfo
grep "model name" /proc/cpuinfo
grep "cpu MHz" /proc/cpuinfo
# Clear variable
unset id
unset log
unset nodename
# Exit normally
exit 0
#EOC
| true |
5e0ab722920a6e81f9f49f32d03c9b7c02042d8b
|
Shell
|
Ciantic/casterson
|
/get_testdata.sh
|
UTF-8
| 435 | 2.59375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ ! -f ./test_data/big_buck_bunny.mp4 ]; then
wget -O ./test_data/big_buck_bunny.mp4 http://commondatastorage.googleapis.com/gtv-videos-bucket/big_buck_bunny_1080p.mp4
fi
echo "1
00:00:00,498 --> 00:00:02,826
- This is an example subtitle
second line here. ÄÖäö.
2
00:00:02,826 --> 00:00:06,384
- In one line
3
00:00:06,384 --> 00:00:09,428
- And something else too?
- Okay." > ./test_data/big_buck_bunny.srt
| true |
6377cc8183105a2e6df37c885a91cfaf58710007
|
Shell
|
bacc129/bacc
|
/build.sh
|
UTF-8
| 8,150 | 3.875 | 4 |
[] |
no_license
|
#!/bin/bash
SOURCE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
function usage()
{
printf "\\tUsage: %s \\n\\t[Build Option -o <Debug|Release|RelWithDebInfo|MinSizeRel>] \\n\\t[CodeCoverage -c] \\n\\t[Doxygen -d] \\n\\t[CoreSymbolName -s <1-7 characters>] \\n\\t[ResourceModel -r <Unlimit|Fee|Delegate>] \\n\\t[Avoid Compiling -a]\\n\\n" "$0" 1>&2
exit 1
}
is_noninteractive() {
[[ -n "${CHAIN_BUILD_NONINTERACTIVE+1}" ]]
}
ARCH=$( uname )
if [ "${SOURCE_DIR}" == "${PWD}" ]; then
BUILD_DIR="${PWD}/build"
else
BUILD_DIR="${PWD}"
fi
CMAKE_BUILD_TYPE=Release
DISK_MIN=10
DOXYGEN=false
ENABLE_COVERAGE_TESTING=false
CORE_SYMBOL_NAME="BACC"
ROOT_ACCOUNT="bacc"
USE_PUB_KEY_LEGACY_PREFIX=1
MAX_PRODUCERS=17
BLOCK_INTERVAL_MS=500
PRODUCER_REPETITIONS=6
RESOURCE_MODEL=2
# if chain use bonus to vote
USE_BONUS_TO_VOTE=1
# Use current directory's tmp directory if noexec is enabled for /tmp
if (mount | grep "/tmp " | grep --quiet noexec); then
mkdir -p $SOURCE_DIR/tmp
TEMP_DIR="${SOURCE_DIR}/tmp"
rm -rf $SOURCE_DIR/tmp/*
else # noexec wasn't found
TEMP_DIR="/tmp"
fi
START_MAKE=true
TIME_BEGIN=$( date -u +%s )
VERSION=1.2
txtbld=$(tput bold)
bldred=${txtbld}$(tput setaf 1)
txtrst=$(tput sgr0)
if [ $# -ne 0 ]; then
while getopts ":cdo:s:r:ah" opt; do
case "${opt}" in
o )
options=( "Debug" "Release" "RelWithDebInfo" "MinSizeRel" )
if [[ "${options[*]}" =~ "${OPTARG}" ]]; then
CMAKE_BUILD_TYPE="${OPTARG}"
else
printf "\\n\\tInvalid argument: %s\\n" "${OPTARG}" 1>&2
usage
exit 1
fi
;;
c )
ENABLE_COVERAGE_TESTING=true
;;
d )
DOXYGEN=true
;;
s)
if [ "${#OPTARG}" -gt 7 ] || [ -z "${#OPTARG}" ]; then
printf "\\n\\tInvalid argument: %s\\n" "${OPTARG}" 1>&2
usage
exit 1
else
CORE_SYMBOL_NAME="${OPTARG}"
fi
;;
r )
options=( "Unlimit" "Fee" "Delegate" )
if [[ "${options[*]}" =~ "${OPTARG}" ]]; then
if [[ "${OPTARG}" == "Unlimit" ]]; then
RESOURCE_MODEL=0
fi
if [[ "${OPTARG}" == "Fee" ]]; then
RESOURCE_MODEL=1
fi
if [[ "${OPTARG}" == "Delegate" ]]; then
RESOURCE_MODEL=2
fi
else
printf "\\n\\tInvalid argument: %s\\n" "${OPTARG}" 1>&2
usage
exit 1
fi
;;
a)
START_MAKE=false
;;
h)
usage
exit 1
;;
y)
CHAIN_BUILD_NONINTERACTIVE=1
;;
\? )
printf "\\n\\tInvalid Option: %s\\n" "-${OPTARG}" 1>&2
usage
exit 1
;;
: )
printf "\\n\\tInvalid Option: %s requires an argument.\\n" "-${OPTARG}" 1>&2
usage
exit 1
;;
* )
usage
exit 1
;;
esac
done
fi
pushd "${SOURCE_DIR}" &> /dev/null
printf "\\n\\tBeginning build version: %s\\n" "${VERSION}"
printf "\\t%s\\n" "$( date -u )"
printf "\\tUser: %s\\n" "$( whoami )"
#printf "\\tgit head id: %s\\n" "$( cat .git/refs/heads/master )"
# printf "\\tCurrent branch: %s\\n" "$( git rev-parse --abbrev-ref HEAD )"
printf "\\n\\tARCHITECTURE: %s\\n" "${ARCH}"
popd &> /dev/null
if [ "$ARCH" == "Linux" ]; then
if [ ! -e /etc/os-release ]; then
printf "\\n\\tcurrently supports Centos, Ubuntu Linux only.\\n"
printf "\\tPlease install on the latest version of one of these Linux distributions.\\n"
printf "\\tExiting now.\\n"
exit 1
fi
OS_NAME=$( cat /etc/os-release | grep ^NAME | cut -d'=' -f2 | sed 's/\"//gI' )
case "$OS_NAME" in
"Ubuntu")
FILE="${SOURCE_DIR}/scripts/build_ubuntu.sh"
CXX_COMPILER=clang++-4.0
C_COMPILER=clang-4.0
MONGOD_CONF=${HOME}/bacc/opt/mongodb/mongod.conf
export PATH=${HOME}/bacc/opt/mongodb/bin:$PATH
;;
*)
printf "\\n\\tUnsupported Linux Distribution. Exiting now.\\n\\n"
exit 1
esac
export BOOST_ROOT="${HOME}/bacc/opt/boost"
WASM_ROOT="${HOME}/bacc/opt/wasm"
OPENSSL_ROOT_DIR=/usr/include/openssl
fi
if [ "$ARCH" == "Darwin" ]; then
FILE="${SOURCE_DIR}/scripts/build_darwin.sh"
CXX_COMPILER=clang++
C_COMPILER=clang
MONGOD_CONF=/usr/local/etc/mongod.conf
OPENSSL_ROOT_DIR=/usr/local/opt/openssl
fi
. "$FILE"
printf "\\n\\n>>>>>>>> ALL dependencies sucessfully found or installed .\\n\\n"
printf ">>>>>>>> CMAKE_BUILD_TYPE=%s\\n" "${CMAKE_BUILD_TYPE}"
printf ">>>>>>>> ENABLE_COVERAGE_TESTING=%s\\n" "${ENABLE_COVERAGE_TESTING}"
printf ">>>>>>>> DOXYGEN=%s\\n" "${DOXYGEN}"
printf ">>>>>>>> RESOURCE_MODEL=%s\\n\\n" "${RESOURCE_MODEL}"
if [ ! -d "${BUILD_DIR}" ]; then
if ! mkdir -p "${BUILD_DIR}"
then
printf "Unable to create build directory %s.\\n Exiting now.\\n" "${BUILD_DIR}"
exit 1;
fi
fi
if ! cd "${BUILD_DIR}"
then
printf "Unable to enter build directory %s.\\n Exiting now.\\n" "${BUILD_DIR}"
exit 1;
fi
if [ -z "$CMAKE" ]; then
CMAKE=$( command -v cmake )
fi
echo "-DCMAKE_BUILD_TYPE='${CMAKE_BUILD_TYPE}' "
echo "-DCMAKE_CXX_COMPILER='${CXX_COMPILER}'"
echo "-DCMAKE_C_COMPILER='${C_COMPILER}' "
echo "-DWASM_ROOT='${WASM_ROOT}' "
echo "-DCORE_SYMBOL_NAME='${CORE_SYMBOL_NAME}'"
echo "-DUSE_PUB_KEY_LEGACY_PREFIX=${USE_PUB_KEY_LEGACY_PREFIX}"
echo "-DUSE_MULTIPLE_VOTE=${USE_MULTIPLE_VOTE}"
echo "-DROOT_ACCOUNT='${ROOT_ACCOUNT}'"
echo "-DMAX_PRODUCERS='${MAX_PRODUCERS}' "
echo "-DBLOCK_INTERVAL_MS='${BLOCK_INTERVAL_MS}' "
echo "-DPRODUCER_REPETITIONS='${PRODUCER_REPETITIONS}'"
echo "-DRESOURCE_MODEL=${RESOURCE_MODEL}"
echo "-DOPENSSL_ROOT_DIR='${OPENSSL_ROOT_DIR}' "
echo "-DBUILD_MONGO_DB_PLUGIN=true"
echo "-DENABLE_COVERAGE_TESTING='${ENABLE_COVERAGE_TESTING}' "
echo "-DBUILD_DOXYGEN='${DOXYGEN}'"
echo "-DCMAKE_INSTALL_PREFIX='/usr/local/baccchain'"
echo ${LOCAL_CMAKE_FLAGS}
if ! "${CMAKE}" -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" -DCMAKE_CXX_COMPILER="${CXX_COMPILER}" \
-DCMAKE_C_COMPILER="${C_COMPILER}" -DWASM_ROOT="${WASM_ROOT}" -DCORE_SYMBOL_NAME="${CORE_SYMBOL_NAME}" \
-DUSE_PUB_KEY_LEGACY_PREFIX=${USE_PUB_KEY_LEGACY_PREFIX} \
-DUSE_MULTIPLE_VOTE=${USE_MULTIPLE_VOTE} \
-DROOT_ACCOUNT="${ROOT_ACCOUNT}" \
-DMAX_PRODUCERS="${MAX_PRODUCERS}" -DBLOCK_INTERVAL_MS="${BLOCK_INTERVAL_MS}" -DPRODUCER_REPETITIONS="${PRODUCER_REPETITIONS}" \
-DRESOURCE_MODEL=${RESOURCE_MODEL} \
-DOPENSSL_ROOT_DIR="${OPENSSL_ROOT_DIR}" -DBUILD_MONGO_DB_PLUGIN=true \
-DENABLE_COVERAGE_TESTING="${ENABLE_COVERAGE_TESTING}" -DBUILD_DOXYGEN="${DOXYGEN}" \
-DCMAKE_INSTALL_PREFIX="/usr/local/baccchain" ${LOCAL_CMAKE_FLAGS} "${SOURCE_DIR}"
then
printf "\\n\\t>>>>>>>>>>>>>>>>>>>> CMAKE building BACC-Chain has exited with the above error.\\n\\n"
exit -1
fi
if [ "${START_MAKE}" == "false" ]; then
printf "\\n\\t>>>>>>>>>>>>>>>>>>>> BACC-Chain has been successfully configured but not yet built.\\n\\n"
exit 0
fi
if [ -z ${JOBS} ]; then JOBS=$CPU_CORE; fi # Future proofing: Ensure $JOBS is set (usually set in scripts/build_*.sh scripts)
if ! make -j"${JOBS}"
then
printf "\\n\\t>>>>>>>>>>>>>>>>>>>> MAKE building BACC-Chain has exited with the above error.\\n\\n"
exit -1
fi
| true |
3de19cb2ec3d4536dbecd4c91f9ee2b832bf7af5
|
Shell
|
gkaskonas/portfolio-website
|
/scripts/aws-cli-assumerole.sh
|
UTF-8
| 929 | 2.546875 | 3 |
[
"MIT"
] |
permissive
|
unset AWS_SESSION_TOKEN
if [ $ENVIRONMENT == "prod" ]
then
temp_role=$(aws sts assume-role \
--role-arn "arn:aws:iam::119184259962:role/PipelineRole" \
--role-session-name "circleci-prod")
elif [ $ENVIRONMENT == "test" ]
then
temp_role=$(aws sts assume-role \
--role-arn "arn:aws:iam::380477309410:role/PipelineRole" \
--role-session-name "circleci-test")
else
temp_role=$(aws sts assume-role \
--role-arn "arn:aws:iam::404319983256:role/PipelineRole" \
--role-session-name "circleci-dev")
fi
echo export AWS_ACCESS_KEY_ID=$(echo $temp_role | jq .Credentials.AccessKeyId | xargs) >> $BASH_ENV
echo export AWS_SECRET_ACCESS_KEY=$(echo $temp_role | jq .Credentials.SecretAccessKey | xargs) >> $BASH_ENV
echo export AWS_SESSION_TOKEN=$(echo $temp_role | jq .Credentials.SessionToken | xargs) >> $BASH_ENV
| true |
403f40d1399f21db6ad2d89df25d8a6debd53d02
|
Shell
|
abhchand/reely-ansible
|
/roles/app/templates/app.sh.j2
|
UTF-8
| 936 | 4.0625 | 4 |
[] |
no_license
|
#!/bin/bash
# #############################################################################
# Deploys a new app version by rebuilding the image and containers
# This file is managed by Ansible. Any changes may be overwritten.
# #############################################################################
function printout {
YELLOW='\033[1;33m'
NC='\033[0m'
echo -e "=> ${YELLOW}$1${NC}"
}
function ensureSuccess {
if [ $? -ne 0 ]; then
echo "ERROR"
exit 1
fi
}
function startApp {
docker-compose -f {{ git_clone_path }}/docker-compose.yml start
ensureSuccess
}
function stopApp {
docker-compose -f {{ git_clone_path }}/docker-compose.yml stop
ensureSuccess
}
case $1 in
"start")
printout "Starting {{ app_name }}"
startApp
;;
"stop")
printout "Stopping {{ app_name }}"
stopApp
;;
"restart")
printout "Restarting {{ app_name }}"
stopApp
startApp
;;
*)
esac
| true |
4dc5110d8072753ee8d06445174a577fd7b478cb
|
Shell
|
oliverjam/fac-resources
|
/scripts/init_db.sh
|
UTF-8
| 460 | 3.125 | 3 |
[] |
no_license
|
#! /bin/bash
# exit immediately on error
set -e
GREEN="\033[0;92m"
RESET="\033[0m"
psql -q -c "CREATE USER fac_resources_user SUPERUSER PASSWORD 'local123'"
echo "${GREEN}Postgres user 'fac_resources_user' created${RESET}"
psql -q -c "CREATE DATABASE fac_resources WITH OWNER fac_resources_user"
echo "${GREEN}Postgres database 'fac_resources' created${RESET}"
cp -n .env.example .env
echo "${GREEN}'.env' file created${RESET}"
sh ./scripts/build_db.sh
| true |
77823c1dad670e2d55b0b792a4002c8ba242cb6d
|
Shell
|
vhlaca/zvjezdan
|
/mvrecording
|
UTF-8
| 3,354 | 3.890625 | 4 |
[] |
no_license
|
#!/bin/bash
# /etc/init.d/mvrecording.sh
#
#
# verzija datoteke 1.0.1
# datum: 03.06.2015
#
# promjene:
# 1.0.1 03.06.2015
# dodano zapisivanje u bazu za svaku datoteku.
#this is where files are recorded
DIR="/mnt/ramdisk"
#this is where files are stored
DESTINATION="/var/www/html/recording/"
#this is minimal length of a file that will be copied. Smaller files will be deleted
MINFILELENGTH=400
#mysql command
MYSQL="/usr/bin/mysql -u asteriskuser -pasteriskPWD14 asteriskdatabase -e"
#extension length with .
EXTLENGTH=4
#check if directory isnt empty
if [ "$(ls -A "$DIR")" ]; then
case "$1" in
start)
echo "Moving files from ramdisk on start"
#files must not be locked by any process
for file in $(comm -2 -3 <(find $DIR -maxdepth 1 -type f|sort) <(sudo lsof $DIR/* | awk '(NR>1) {print $9}'|sort))
do
#check file length. must be greater then MINFILELENTH otherwise its only deleted
filelength=$(stat -c %s ${file})
if [ ${filelength} -gt ${MINFILELENGTH} ] ; then
#if we want encryption:
#openssl aes-256-cbc -a -salt -in ${file} -out ${DESTINATION}${file##*/}.enc -k $(date +%D)
#if we want only copy
mv ${file} ${DESTINATION}${file##*/}
${MYSQL} "CALL SetRecorded('${file##*/}',${EXTLENGTH})"
echo [`date +"%Y-%m-%d %H:%M"`] Ramdisk copied to HD on start >> /var/log/ramdisk_sync.log
else
rm ${file}
fi
done
;;
sync)
echo "Moving files from ramdisk on sync"
#files must not be locked by any process
for file in $(comm -2 -3 <(find $DIR -maxdepth 1 -type f|sort) <(sudo lsof $DIR/* | awk '(NR>1) {print $9}'|sort))
do
#check file length. must be greater then MINFILELENTH otherwise its only deleted
filelength=$(stat -c %s ${file})
if [ ${filelength} -gt ${MINFILELENGTH} ] ; then
#if we want encryption:
#openssl aes-256-cbc -a -salt -in ${file} -out ${DESTINATION}${file##*/}.enc -k $(date +%D)
#if we want only copy
mv ${file} ${DESTINATION}${file##*/}
${MYSQL} "CALL SetRecorded('${file##*/}',${EXTLENGTH})"
echo [`date +"%Y-%m-%d %H:%M"`] Ramdisk copied to HD on sync>> /var/log/ramdisk_sync.log
else
rm ${file}
fi
done
;;
stop)
echo "Moving files from ramdisk on stop"
#if the system is stopped there shouldn't be any locks. Just list content of directory
for file in $(ls ${DIR}/*)
do
#check file length. must be greater then MINFILELENTH otherwise its only deleted
filelength=$(stat -c %s ${file})
if [ ${filelength} -gt ${MINFILELENGTH} ] ; then
#if we want encryption:
#openssl aes-256-cbc -a -salt -in ${file} -out ${DESTINATION}${file##*/}.enc -k $(date +%D)
#if we want only copy
mv ${file} ${DESTINATION}${file##*/}
${MYSQL} "CALL SetRecorded('${file##*/}',${EXTLENGTH})"
echo [`date +"%Y-%m-%d %H:%M"`] Ramdisk copied to HD on stop>> /var/log/ramdisk_sync.log
fi
done
;;
*)
echo "Usage: /etc/init.d/mvrecording {start|stop|sync}"
exit 1
;;
esac
fi
exit 0
| true |
e215c6279b84319cd970126307bd8a568c30bf37
|
Shell
|
Bae52BRAEVARussell/roamworks_modbus
|
/rw_mod
|
UTF-8
| 773 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/sh /etc/rc.common
START=98
boot() {
#create folder
[ -d /tmp/rw_modbus_data/ ] || mkdir /tmp/rw_modbus_data/
#killall rw_modbus
stop
#start rw_modbus
imsi=`sendat /dev/ttyACM0 AT+CIMI 1 | grep -v [a-zA-Z] | grep [0-9]`
if [ `expr length "$imsi"` -lt 15 ]; then
echo -e "2003,`date -u +%y%m%d%H%M%S`,\r" >> /etc/MODBUS_RW_Err_Events.csv
fi
/usr/bin/rw_modbus &
}
start() {
#start rw_modbus
/usr/bin/rw_modbus &
}
stop() {
killall rw_modbus
}
reload() {
#ceate folder
[ -d /tmp/rw_modbus_data/ ] || mkdir /tmp/rw_modbus_data/
stop
/usr/bin/rw_modbus &
}
restart() {
#ceate folder
[ -d /tmp/rw_modbus_data/ ] || mkdir /tmp/rw_modbus_data/
stop
/usr/bin/rw_modbus &
}
reconfiguration_and_restart() {
start
}
| true |
f67771590e6218df18298c432cc8180dc93e65bd
|
Shell
|
Loeng/yesmywine_ms
|
/dockerImage.sh
|
UTF-8
| 32,762 | 3.4375 | 3 |
[] |
no_license
|
# Initialization step
red='\033[0;31m'
green='\033[0;32m'
yellow='\033[0;33m'
plain='\033[0m'
paas_git_repository="/home/repository/yesmywine"
mall_git_repository="/home/repository/yesmywine_ms"
platform=(
paas
mall
)
paas=(
paas-goods
paas-inventory
paas-user
paas-logistics
paas-sms
paas-email
paas-dic
all
)
mall=(
mall-goods
mall-cart
mall-cms
mall-evaluation
mall-fileupload
mall-inventory
mall-logistics
mall-pay
mall-push
mall-sso
mall-user
mall-orders
mall-activity
all
)
deploy_microservice() {
platform_select
deploy_prepare
}
undeploy_microservice() {
echo "undeploy?"
}
platform_select() {
# if ! deploy_check; then
# echo -e "${red}Error:${plain} Your OS is not supported to run it!"
# echo "Please change to CentOS 6+ and try again."
# exit 1
# fi
clear
while true; do
echo "Which platform you'd select:"
for ((i = 1; i <= ${#platform[@]}; i++)); do
hint="${platform[$i - 1]}"
echo -e "${green}${i}${plain}) ${hint}"
done
read -p "Please enter a number (Default ${platform[0]}):" platform_selected
[ -z "${platform_selected}" ] && platform_selected="1"
case "${platform_selected}" in
1 | 2)
echo
echo "You choose = ${platform[${platform_selected} - 1]}"
echo
break
;;
*)
echo -e "${red}Error:${plain} Please only enter a number [1-2]"
;;
esac
done
}
deploy_prepare() {
if [[ "${platform_selected}" == "1" ]]; then
# repository_check ${paas_git_repository}
microservice_select "${paas[*]}"
config_paas_microservice
elif [ "${platform_selected}" == "2" ]; then
# repository_check ${mall_git_repository}
microservice_select "${mall[*]}"
config_mall_microservice
fi
echo
echo "Press any key to start...or Press Ctrl+C to cancel"
char=$(get_char)
}
get_char() {
SAVEDSTTY=$(stty -g)
stty -echo
stty cbreak
dd if=/dev/tty bs=1 count=1 2>/dev/null
stty -raw
stty echo
stty $SAVEDSTTY
}
whetherUpload() {
while true; do
read -p "Whether upload to remote server(Please enter y or n,Default y):" sign
case $sign in
[y])
echo "Yes"
break
;;
[n])
echo "Complete!"
exit
;;
*)
echo "Invalid input..."
break
;;
esac
done
}
microservice_select() {
while true; do
echo -e "Please select microservice for ${platform[${platform_selected} - 1]}:"
microservice_group=($1)
for ((i = 1; i <= ${#microservice_group[@]}; i++)); do
hint="${microservice_group[$i - 1]}"
echo -e "${green}${i}${plain}) ${hint}"
done
read -p "Which microservice you'd select(Default: ${microservice_group[0]}):" microservice_selected
[ -z "$microservice_selected" ] && microservice_selected=1
expr ${microservice_selected} + 1 &>/dev/null
if [ $? -ne 0 ]; then
echo -e "[${red}Error${plain}] Input error, please input a number"
continue
fi
if [[ "$microservice_selected" -lt 1 || "$microservice_selected" -gt ${#microservice_group[@]} ]]; then
echo -e "[${red}Error${plain}] Input error, please input a number between 1 and ${#microservice_group[@]}"
continue
fi
microservice=${microservice_group[$microservice_selected - 1]}
echo
echo "microservice = ${microservice}"
echo
break
done
}
config_paas_microservice() {
if [[ "${microservice_selected}" == "1" ]]; then
echo "Change ${paas_git_repository}/goods"
if [ -d "${paas_git_repository}/goods" ]; then
cd ${paas_git_repository}/goods
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o paas-goods.tar paas-goods
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/paas-goods.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "2" ]; then
echo "Change ${paas_git_repository}/inventory"
if [ -d "${paas_git_repository}/inventory" ]; then
cd ${paas_git_repository}/goods
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o paas-inventory.tar paas-inventory
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/paas-inventory.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "3" ]; then
echo "Change ${paas_git_repository}/user"
if [ -d "${paas_git_repository}/user" ]; then
cd ${paas_git_repository}/user
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o paas-user.tar paas-user
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/paas-user.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "4" ]; then
echo "Change ${paas_git_repository}/logistics"
if [ -d "${paas_git_repository}/logistics" ]; then
cd ${paas_git_repository}/logistics
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o paas-logistics.tar paas-logistics
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/paas-logistics.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "5" ]; then
echo "Change ${paas_git_repository}/sms"
if [ -d "${paas_git_repository}/sms" ]; then
cd ${paas_git_repository}/sms
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o paas-sms.tar paas-sms
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/paas-sms.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "6" ]; then
echo "Change ${paas_git_repository}/email"
if [ -d "${paas_git_repository}/email" ]; then
cd ${paas_git_repository}/email
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o paas-email.tar paas-email
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/paas-email.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "7" ]; then
echo "Change ${paas_git_repository}/dictionary"
if [ -d "${paas_git_repository}/dictionary" ]; then
cd ${paas_git_repository}/dictionary
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o paas-dic.tar paas-dic
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/paas-dic.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "8" ]; then
echo "Developing"
exit
echo "Change ${paas_git_repository}"
if [ -d "${paas_git_repository}" ]; then
cd ${paas_git_repository}
git pull
if [[ $? -eq 0 ]];then
startAllPaasServer
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o paas-dic.tar paas-dic
docker save -o paas-email.tar paas-email
docker save -o paas-goods.tar paas-goods
docker save -o paas-inventory.tar paas-inventory
docker save -o paas-sms.tar paas-sms
docker save -o paas-user.tar paas-user
docker save -o paas-logistics.tar paas-logistics
echo "Upload images to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/paas-*.tar ${ip}:/home/repository/images
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
fi
}
startAllPaasServer() {
echo
echo "Change ${paas_git_repository}/dictionary"
if [ -d "${paas_git_repository}/dictionary" ]; then
cd ${paas_git_repository}/dictionary
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${paas_git_repository}/email"
if [ -d "${paas_git_repository}/email" ]; then
cd ${paas_git_repository}/email
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${paas_git_repository}/goods"
if [ -d "${paas_git_repository}/goods" ]; then
cd ${paas_git_repository}/goods
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${paas_git_repository}/sms"
if [ -d "${paas_git_repository}/sms" ]; then
cd ${paas_git_repository}/sms
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${paas_git_repository}/user"
if [ -d "${paas_git_repository}/user" ]; then
cd ${paas_git_repository}/user
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${paas_git_repository}/inventory"
if [ -d "${paas_git_repository}/inventory" ]; then
cd ${paas_git_repository}/inventory
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${paas_git_repository}/logistics"
if [ -d "${paas_git_repository}/logistics" ]; then
cd ${paas_git_repository}/logistics
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
}
config_mall_microservice() {
if [[ "${microservice_selected}" == "1" ]]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/goods"
if [ -d "${mall_git_repository}/goods" ]; then
cd ${mall_git_repository}/goods
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-goods.tar mall-goods
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-goods.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "2" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/cart"
if [ -d "${mall_git_repository}/cart" ]; then
cd ${mall_git_repository}/cart
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-cart.tar mall-cart
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-cart.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "3" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/cms"
if [ -d "${mall_git_repository}/cms" ]; then
cd ${mall_git_repository}/cms
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-cms.tar mall-cms
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-cms.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "4" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/evaluation"
if [ -d "${mall_git_repository}/evaluation" ]; then
cd ${mall_git_repository}/evaluation
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-evaluation.tar mall-evaluation
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-evaluation.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "5" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/fileUpload"
if [ -d "${mall_git_repository}/fileUpload" ]; then
cd ${mall_git_repository}/fileUpload
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-fileupload.tar mall-fileupload
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-fileupload.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "6" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/inventory"
if [ -d "${mall_git_repository}/inventory" ]; then
cd ${mall_git_repository}/inventory
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-inventory.tar mall-inventory
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-inventory.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "7" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/logistics"
if [ -d "${mall_git_repository}/logistics" ]; then
cd ${mall_git_repository}/logistics
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-logistics.tar mall-logistics
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-logistics.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "8" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/pay"
if [ -d "${mall_git_repository}/pay" ]; then
cd ${mall_git_repository}/pay
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-pay.tar mall-pay
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-pay.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "9" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/push"
if [ -d "${mall_git_repository}/push" ]; then
cd ${mall_git_repository}/push
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-push.tar mall-push
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-push.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "10" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/sso"
if [ -d "${mall_git_repository}/sso" ]; then
cd ${mall_git_repository}/sso
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-sso.tar mall-sso
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-sso.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "11" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/userservice"
if [ -d "${mall_git_repository}/userservice" ]; then
cd ${mall_git_repository}/userservice
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-user.tar mall-user
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-user.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "12" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/orders"
if [ -d "${mall_git_repository}/orders" ]; then
cd ${mall_git_repository}/orders
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-orders.tar mall-orders
echo "Upload image to remote server"
read -p "Please enter remote ip(much ip please segmentation by ';'):" ip
scp /home/repository/images/mall-orders.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "13" ]; then
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}/activity"
if [ -d "${mall_git_repository}/activity" ]; then
cd ${mall_git_repository}/activity
git pull
if [[ $? -eq 0 ]];then
sh runDocker.sh
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export image"
docker save -o mall-activity.tar mall-activity
echo "Upload image to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-activity.tar ${ip}:/home/repository/images
if [[ $? -eq 0 ]];then
echo "Change machine"
ssh ${ip}
else
"Remote server /home/repository/images not found,Please create it first"
fi
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
elif [ "${microservice_selected}" == "14" ]; then
echo "Developing"
exit
echo "microservice = ${ymw_microservice}"
echo "Change ${mall_git_repository}"
if [ -d "${mall_git_repository}" ]; then
cd ${mall_git_repository}
git pull
if [[ $? -eq 0 ]];then
startAllMallServer
whetherUpload
echo "Change /home/repository/images"
if [ -d "/home/repository/images" ]; then
cd /home/repository/images
else
echo "No directory,Create it"
mkdir -p /home/repository/images
cd /home/repository/images
fi
echo "Export images"
docker save -o mall-activity.tar mall-activity
docker save -o mall-cart.tar mall-cart
docker save -o mall-cms.tar mall-cms
docker save -o mall-evaluation.tar mall-evaluation
docker save -o mall-fileUpload.tar mall-fileUpload
docker save -o mall-goods.tar mall-goods
docker save -o mall-inventory.tar mall-inventory
docker save -o mall-logistics.tar mall-logistics
docker save -o mall-orders.tar mall-orders
docker save -o mall-pay.tar mall-pay
docker save -o mall-push.tar mall-push
docker save -o mall-sso.tar mall-sso
docker save -o mall-userservice.tar mall-userservice
echo "Upload images to remote server"
read -p "Please enter remote ip:" ip
scp /home/repository/images/mall-*.tar ${ip}:/home/repository/images
else
echo -e "Error:Uninstall GIT!"
fi
else
echo -e "${red}Error:${plain} ${1} directory not found."
exit 1
fi
fi
}
startAllMallServer() {
echo
echo "Change ${mall_git_repository}/activity"
if [ -d "${mall_git_repository}/activity" ]; then
cd ${mall_git_repository}/activity
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/cart"
if [ -d "${mall_git_repository}/cart" ]; then
cd ${mall_git_repository}/cart
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/cms"
if [ -d "${mall_git_repository}/cms" ]; then
cd ${mall_git_repository}/cms
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/evaluation"
if [ -d "${mall_git_repository}/evaluation" ]; then
cd ${mall_git_repository}/evaluation
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/fileUpload"
if [ -d "${mall_git_repository}/fileUpload" ]; then
cd ${mall_git_repository}/fileUpload
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/goods"
if [ -d "${mall_git_repository}/goods" ]; then
cd ${mall_git_repository}/goods
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/inventory"
if [ -d "${mall_git_repository}/inventory" ]; then
cd ${mall_git_repository}/inventory
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/logistics"
if [ -d "${mall_git_repository}/logistics" ]; then
cd ${mall_git_repository}/logistics
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/orders"
if [ -d "${mall_git_repository}/orders" ]; then
cd ${mall_git_repository}/orders
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/pay"
if [ -d "${mall_git_repository}/pay" ]; then
cd ${mall_git_repository}/pay
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/push"
if [ -d "${mall_git_repository}/push" ]; then
cd ${mall_git_repository}/push
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/sso"
if [ -d "${mall_git_repository}/sso" ]; then
cd ${mall_git_repository}/sso
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
echo
echo "Change ${mall_git_repository}/userservice"
if [ -d "${mall_git_repository}/userservice" ]; then
cd ${mall_git_repository}/userservice
sh runDocker.sh
else
echo -e "${red}Error:${plain} ${1} directory not found."
fi
}
action=$1
[ -z $1 ] && action=deploy
case "$action" in
deploy | undeploy)
${action}_microservice
;;
*)
echo "Arguments error! [${action}]"
echo "Usage: $(basename $0) [deploy|undeploy]"
;;
esac
| true |
460618166db43d7d5c6472049131b9f2d0727f8f
|
Shell
|
katoni/simple-acme-server
|
/entrypoint.sh
|
UTF-8
| 920 | 3.46875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
export CONFIG_FILE=${CONFIG_FILE-"/home/step/config/ca.json"}
export PASSWORD_FILE=${PASSWORD_FILE-"/home/step/secrets/password"}
export PASSWORD=${PASSWORD-"password"}
if [ ! -f "${PASSWORD_FILE}" ]; then
mkdir -p $(dirname $PASSWORD_FILE)
echo $PASSWORD > $PASSWORD_FILE
fi
if [ -f "${CONFIG_FILE}" ]; then
echo "Using existing configuration file"
else
echo "No configuration file found at ${CONFIG_FILE}"
/usr/local/bin/step ca init --name "Fake Authority" --provisioner admin --dns "ca.internal" --address ":443" --password-file=${PASSWORD_FILE}
/usr/local/bin/step ca provisioner add development --type ACME
# Increase certificate validity period
echo $(cat config/ca.json | jq '.authority.provisioners[[.authority.provisioners[] | .name=="development"] | index(true)].claims |= (. + {"maxTLSCertDuration":"2160h","defaultTLSCertDuration":"720h"})') > config/ca.json
fi
exec "$@"
| true |
61985cd7d12c65acf3ff237e9ff403f602a49eb8
|
Shell
|
infrastlabs/ftpsave-repository
|
/ftp/down1.sh
|
UTF-8
| 1,450 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/bash
repofile="files-repository1.txt"
repourl="http://6.6.6.92"
savepath="/opt/ftp_down" #down>edit, if you need change your savepath before you exec this scripts.
#get files-repository1.txt
export AUTH=root:root
mkdir -p $savepath && cd $savepath
curl -u $AUTH -s $repourl/$repofile > $repofile
#loop judge: localCached/wget
function eachJudgeDown(){
#cat $repofile
IFS_old=$IFS
IFS=$'\n'
for one in `cat $repofile`
do
file=`echo $one |cut -d'|' -f2`
md5A=`echo $one |cut -d'|' -f1`
#each file: new-down or exist.
if [ -f "$file" ]; then
md5B=`md5sum $file |awk '{print $1}'`
md5B=${md5B:0:8}
echo "[md5Compare: "$md5A"~"$md5B"]"
if [ ! "$md5A" = "$md5B" ]; then #judgeMD5
echo "Md5 unmatch(last down broken, or ftp refreshed), re-download file: "$file ## del, reDownload
#rm -f $file #danger! #nodel just ">" reWrite file.
curl -u $AUTH -s $repourl/$file > $file #dir exist already.
fi
else #file not exist
mkdir -p ${file%/*} #mkdir
echo "new download file: "$file
curl -u $AUTH -s $repourl/$file > $file
#TODO1 validate MD5
fi
done
IFS=$IFS_old
}
#bigfile: md5>cr32>>sha256
#TODO2 when each downloaded, just save the local-Md5 to a list, to save the time-cost of m5dsum
eachJudgeDown
tree -ah $savepath #view
| true |
3e2b7e63569ec5d1f2426ff071851f04b1170d34
|
Shell
|
bigboards/docker
|
/push.sh
|
UTF-8
| 262 | 3.3125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
REPO=$1
ARCH=$(uname -m)
DOCKER="/usr/bin/docker"
source ./$REPO/vars
IMAGE_NAME="bigboards/${REPO}-${ARCH}:${VERSION}"
echo "Logging into the docker hub"
${DOCKER} login
echo "Pushing ${IMAGE_NAME}"
sudo -E ${DOCKER} push ${IMAGE_NAME}
| true |
403bb4747c7afa309b5d1ec42769b991db822772
|
Shell
|
Manasse228/webscapping_perl
|
/csv2wget.sh
|
UTF-8
| 352 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
rm ./garedetri/05csv/*
rm wget.sh;
i=0; while read line;
do
i=$((i+1));
echo 'wget'' -t1 --timeout=8 -P ./garedetri/17wget/'$line' http://www.'$line >> ./wget.sh;
done < ./garedetri/02source/csv.csv;
#ajout de 2 lignes en debut de fichiers
sed -i 1i'\\' ./wget.sh;
sed -i 1i'\#!/bin/bash\' ./wget.sh;
echo "yeahh !";
exit;
| true |
68a08ebf5410de32526e606b6d5608e38e9e9a24
|
Shell
|
daslerjo/cp4d-deployment
|
/selfmanaged-openshift/upi/mirror-registry/ocs-disconnected.sh
|
UTF-8
| 924 | 2.671875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
MIRROR_REGISTRY_DNS=$1
AUTH_FILE="./pull-secret.json"
#Build the catalog for redhat-operators
echo "****************************************"
echo "Build the catalog for redhat-operators"
echo "****************************************"
oc adm catalog build --appregistry-org redhat-operators \
--from=registry.redhat.io/openshift4/ose-operator-registry:v4.6 \
--to=${MIRROR_REGISTRY_DNS}/olm/redhat-operators:v1 \
--registry-config=${AUTH_FILE} \
--filter-by-os="linux/amd64" --insecure
#Mirror the catalog for redhat-operators
echo "*******************************************************"
echo "Mirror the catalog for redhat-operators"
echo "This is a long operation, will take more then 5 hours"
echo "*******************************************************"
oc adm catalog mirror ${MIRROR_REGISTRY_DNS}/olm/redhat-operators:v1 \
${MIRROR_REGISTRY_DNS} --registry-config=${AUTH_FILE} --insecure
| true |
beb973d4cf26105264530ba751ea6dc19532f797
|
Shell
|
anikiandy/cs40
|
/lab14/bkup/guessing.sh
|
UTF-8
| 1,148 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/bash
clear
CONTINUE=0
while [ $CONTINUE = 0 ]
do
while [[ $CHOICE != "1" ]] && [[ $CHOICE != "2" ]] && [[ $CHOICE != "3" ]]
do
clear
cat << EOF
~~~~~~GUESSING GAME~~~~~~~~
1. COLOR TV
2. FRIDGE
3. AUTO
What is you choice? (1,2,3)
EOF
read -r CHOICE
done
WIN=0
D=0
GUESS=0
TIMER=0
#Generate money
case $CHOICE in
1)
D=$(($RANDOM%301+200));;
2)
D=$(($RANDOM%301+600));;
3)
D=$(($RANDOM%1001+500));;
esac
#Setup game start timer starts when they enter first guess
echo -e "You have 1 minute to guess the price\nWhat is your first guess? \c"
read -r GUESS
TIMER=$(date +%s)
####~~~~Guessing Loop~~~~######
while [[ $GUESS != $D ]] && [ $(($(date +%s)-$TIMER)) -le 60 ]
do
#COMPARE PRICES
delta=$(($D-$GUESS))
if [ $delta -gt 0 ]; then
echo -e "Guess higher: \c"
read -r GUESS
else
echo -e "Guess lower: \c"
read -r GUESS
fi
if [[ $GUESS = $D ]]; then
WIN=1;
fi
done
if [ $WIN = 0 ]; then
echo "Time ran out!"
else
echo "YOU WIN!"
fi
yn=0
while [[ $yn != 'y' ]] && [[ $yn != 'n' ]]
do
echo -e "Play again? (y/n): \c"
read -r yn
done
if [[ $yn == "n" ]]; then
CONTINUE=1;
fi
GUESS=0
D=0
CHOICE=0
done
| true |
58a77119d43a784e9a7f24080fc96a7d8753998b
|
Shell
|
RACELAND-Automacao-Lda/openipc-2.1
|
/general/overlay/init
|
UTF-8
| 891 | 3.359375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
on_exit()
{
mountpoint -q /proc && umount /proc
exec /sbin/init $*
}
trap on_exit 0
mount -t proc proc /proc || exit
grep -q overlay /proc/filesystems || exit
mount -t jffs2 /dev/$(awk -F ':' '/rootfs_data/ {print $1}' /proc/mtd | sed 's/mtd/mtdblock/') /overlay || \
mount -t tmpfs tmpfs /overlay || exit
if grep -q overlayfs /proc/filesystems; then
mount -t overlayfs overlayfs -o lowerdir=/,upperdir=/overlay,ro /mnt \
|| { umount /overlay; exit; }
else
overlay_rootdir=/overlay/root
overlay_workdir=/overlay/work
mkdir -p ${overlay_rootdir} ${overlay_workdir}
mount -t overlay overlay \
-o lowerdir=/,upperdir=${overlay_rootdir},workdir=${overlay_workdir} /mnt \
|| { umount /overlay; exit; }
fi
pivot_root /mnt /mnt/rom
mount -o noatime,move /rom/proc /proc
mount -o noatime,move /rom/dev /dev
mount -o noatime,move /rom/overlay /overlay
| true |
fe78e62e5c160e02832ecd18dc4bddc01e4a7793
|
Shell
|
seansilvestri/k8s-log-forwarder
|
/docker-entrypoint.sh
|
UTF-8
| 357 | 2.75 | 3 |
[] |
no_license
|
#!/bin/bash
sed -i "s/#LOGSTASH_URI#/${LOGSTASH_URI}/g" /etc/filebeat/filebeat-kubetail.yml
filebeat -e -c filebeat-kubetail.yml &
if [[ ! -z "$FILTER" ]]; then
./kubetail $(echo \"$KUBE_TAIL_OPTIONS\" | tr -d '\"') | grep "$FILTER" >> /var/log/kubetail.log
else
./kubetail $(echo \"$KUBE_TAIL_OPTIONS\" | tr -d '\"') >> /var/log/kubetail.log
fi
| true |
91cee5b18107bf21838e79ecf81a181f36888c7e
|
Shell
|
ae97013/sandbox
|
/setup/bash/bashrc
|
UTF-8
| 917 | 2.8125 | 3 |
[
"Unlicense"
] |
permissive
|
# .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# User specific aliases and functions
# .bashrc
export PATH="$PATH:/usr/local/bin"
export PATH="$PATH:/usr/software/bin:/usr/software/utils/bin"
export PATH="$PATH:/usr/software/rats/bin:/usr/software/test/bin"
export PATH="$PATH:$HOME/sandbox/scripts"
export MANPATH="$MANPATH:/usr/local/man:/usr/man:/usr/man/X11:/usr/dt/man"
## Primary prompt
export PS1="[\u@\h \W]\$ "
#export PS1="`hostname`% "
#export PS1="\[\033[32;40m\]\h:\u\wi:% "
#export PS1="\u@\h% "
## Secondary prompt
#export PS2="> "
## Prompt 3
#export PS3="#? "
## Prompt 4
#export PS4="+"
## Commandline
#set -o vi
export PAGER='/usr/bin/less'
alias q='exit'
alias pine='alpine -i'
alias cls='clear'
alias dir='ls -lF --color'
alias f='finger'
alias vi='vim'
export SB='$HOME/sandbox'
alias cdsb="cd $SB"
## System limits
#ulimit -c unlimited
umask 022
| true |
489339da2a215a2d1f973f510d28ae9239fa9139
|
Shell
|
maniacs-oss/remotestories
|
/scripts/deploy.sh
|
UTF-8
| 513 | 2.59375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
git config credential.helper "store --file=.git/credentials"
echo "https://${GH_TOKEN}:@github.com" > .git/credentials
if ! git remote | grep api > /dev/null; then
git remote add api https://github.com/ayrbot/remotestories-api.git
fi
git push api `git subtree split --prefix api master`:master --force
if ! git remote | grep web > /dev/null; then
git remote add web https://github.com/ayrbot/remotestories-web.git
fi
git push web `git subtree split --prefix web master`:master --force
| true |
7798b649791801a220d02778d1fe2070520f0cb4
|
Shell
|
simedw/Kandidat
|
/testresults/extract.sh
|
UTF-8
| 217 | 2.59375 | 3 |
[] |
no_license
|
#!/bin/sh
cat $1 | grep "Optimise: $2" | awk '{print $4}' | cut -d ')' -f 1 > indexes
cat $1 | grep "Optimise: $2" | cut -d ')' -f 2 | awk -F "," '{print $2}' > values
paste indexes values
#rm indexes
#rm values
| true |
b19c477a3fdcb306e2be9b4b06fbf90c3d1cb512
|
Shell
|
peterliu2/script
|
/install_apach_protable_runtime/install_apach_protable_runtime.sh
|
UTF-8
| 845 | 3.171875 | 3 |
[] |
no_license
|
#! /bin/bash
# Program:
# install apach protable runtime
#
set -e
# go somewhere safe
cd /tmp
# get the source to base APR 1.5.2
curl -L -O http://apache.stu.edu.tw/apr/apr-1.5.2.tar.gz
# extract it and go into the source
tar -xzvf apr-1.5.2.tar.gz
cd apr-1.5.2
# configure, make, make install
./configure
make
sudo make install
# reset and cleanup
cd /tmp
rm -rf apr-1.5.2 apr-1.5.2.tar.gz
# do the same with apr-util
cd /tmp
curl -L -O http://apache.stu.edu.tw/apr/apr-util-1.5.4.tar.gz
# extract it and go into the source
tar -xzvf apr-util-1.5.4.tar.gz
cd apr-util-1.5.4
# configure, make, make install
./configure --with-apr=/usr/local/apr
# you need that extra parameter to configure because apr-util can't really find it because...who knows.
make
sudo make install
# cleanup
cd /tmp
rm -fr apr-util-1.5.4 apr-util-1.5.4.tar.gz
| true |
49383634f09e16376d42d6ec588b5f5fde87a322
|
Shell
|
roberto-cano/openshift
|
/install/fix-certificate.sh
|
UTF-8
| 5,489 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash -x
# ./install/fix-certificate.sh
#########################################################################
# Copyright (C) 2020 Sebastian Francisco Colomar Bauza #
# SPDX-License-Identifier: GPL-2.0-only #
#########################################################################
caps=CAPABILITY_IAM ;
ClusterName=$ClusterName ;
HostedZoneName=sebastian-colomar.es #PLEASE CUSTOMIZE#
Identifier=$Identifier ;
s3name=docker-aws #PLEASE CUSTOMIZE#
s3region=ap-south-1 #PLEASE CUSTOMIZE#
template=cloudformation.yaml ;
#########################################################################
wget https://raw.githubusercontent.com/secobau/openshift/master/install/ingresscontroller-template.yaml ;
sed --in-place s/ClusterName/$ClusterName/ \
ingresscontroller-template.yaml ;
sed --in-place s/HostedZoneName/$HostedZoneName/ \
ingresscontroller-template.yaml ;
#########################################################################
oc get ingresscontrollers/default \
--namespace=openshift-ingress-operator \
--output=yaml \
1> ingresscontroller.yaml ;
oc delete -f ingresscontroller.yaml ;
oc create -f ingresscontroller-template.yaml ;
#########################################################################
InstanceWorker1=$( \
aws ec2 describe-instances \
--filter "Name=tag:Name,Values=$ClusterName-*-worker-${s3region}a*" \
--output text \
--query "Reservations[].Instances[].InstanceId" \
) ;
InstanceWorker2=$( \
aws ec2 describe-instances \
--filter "Name=tag:Name,Values=$ClusterName-*-worker-${s3region}b*" \
--output text \
--query "Reservations[].Instances[].InstanceId" \
) ;
InstanceWorker3=$( \
aws ec2 describe-instances \
--filter "Name=tag:Name,Values=$ClusterName-*-worker-${s3region}c*" \
--output text \
--query "Reservations[].Instances[].InstanceId" \
) ;
SubnetPublic1=$( \
aws ec2 describe-subnets \
--filter "Name=tag:Name,Values=$ClusterName-*-public-${s3region}a" \
--output text \
--query "Subnets[].SubnetId" \
) ;
SubnetPublic2=$( \
aws ec2 describe-subnets \
--filter "Name=tag:Name,Values=$ClusterName-*-public-${s3region}b" \
--output text \
--query "Subnets[].SubnetId" \
) ;
SubnetPublic3=$( \
aws ec2 describe-subnets \
--filter "Name=tag:Name,Values=$ClusterName-*-public-${s3region}c" \
--output text \
--query "Subnets[].SubnetId" \
) ;
VpcCidrBlock=$( \
aws ec2 describe-vpcs \
--filter "Name=tag:Name,Values=$ClusterName-*-vpc" \
--output text \
--query "Vpcs[].CidrBlockAssociationSet[].CidrBlock" \
) ;
VpcId=$( \
aws ec2 describe-vpcs \
--filter "Name=tag:Name,Values=$ClusterName-*-vpc" \
--output text \
--query "Vpcs[].VpcId" \
) ;
#########################################################################
VpcDefaultSecurityGroup=$( \
aws ec2 describe-security-groups \
--filter "Name=vpc-id,Values=$VpcId" "Name=group-name,Values=default" \
--output text \
--query "SecurityGroups[].GroupId" \
) ;
#########################################################################
wget https://raw.githubusercontent.com/secobau/openshift/master/install/$template ;
s3domain=$s3name.s3.$s3region.amazonaws.com ;
stack=$ClusterName-openshift-fix-certificate ;
template_url=https://$s3domain/$ClusterName/$template ;
aws s3 cp $template s3://$s3name/$ClusterName/$template ;
aws cloudformation create-stack \
--capabilities \
$caps \
--parameters \
ParameterKey=ClusterName,ParameterValue=$ClusterName \
ParameterKey=HostedZoneName,ParameterValue=$HostedZoneName \
ParameterKey=Identifier,ParameterValue=$Identifier \
ParameterKey=InstanceWorker1,ParameterValue=$InstanceWorker1 \
ParameterKey=InstanceWorker2,ParameterValue=$InstanceWorker2 \
ParameterKey=InstanceWorker3,ParameterValue=$InstanceWorker3 \
ParameterKey=SubnetPublic1,ParameterValue=$SubnetPublic1 \
ParameterKey=SubnetPublic2,ParameterValue=$SubnetPublic2 \
ParameterKey=SubnetPublic3,ParameterValue=$SubnetPublic3 \
ParameterKey=VpcId,ParameterValue=$VpcId \
ParameterKey=VpcCidrBlock,ParameterValue=$VpcCidrBlock \
ParameterKey=VpcDefaultSecurityGroup,ParameterValue=$VpcDefaultSecurityGroup \
--stack-name \
$stack \
--template-url \
$template_url \
--output \
text \
#########################################################################
echo After running the previous script you need to open ports 80 and 1936
echo internally for the workers. You also need to open port 443 externaly
echo (open to the world) for the workers.
#########################################################################
| true |
c76187319238c24601e0a98baca86299c11fa535
|
Shell
|
mattkingston/dotfiles
|
/lib/npm.sh
|
UTF-8
| 987 | 3.78125 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
npm_update() {
if command -v 'npm' > /dev/null; then
echo "Attempting npm update. Please wait" >> ~/.dotfiles.log
npm install -g npm 1>> ~/.dotfiles.log
print_result $? 'NPM update'
echo "Attempting npm upgrade npm packages. Please wait" >> ~/.dotfiles.log
npm update -g 1>> ~/.dotfiles.log
print_result $? 'NPM upgrade all'
fi
}
npm_config_registry() {
if command -v 'npm' > /dev/null; then
npm config set registry "http://registry.npmjs.org/" 1>> ~/.dotfiles.log
print_result $? 'NPM Configure registry'
fi
}
npm_set_npmrc() {
local location="$1"
if content_block_exists_in_file "NPM_RC" "${BASH_RC_LOCAL}"; then
content_block_remove "NPM_RC" "${BASH_RC_LOCAL}"
fi
local npm_stream="$(content_block_stream_new NPM_RC)"
stream_add "${npm_stream}" "export NPM_RC='${location}'"
content_block_stream_write "${npm_stream}" "${BASH_RC_LOCAL}"
# THESE LINES ARE IMPORTANT
export NPM_RC="${location}"
}
| true |
095f38d3a0a4f1f54795e083f3448cc20a770398
|
Shell
|
Inobtenio/CoronaPlotter
|
/core/bash_scripts/plot_total.sh
|
UTF-8
| 3,717 | 3 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
country_label="$(tr [A-Z] [a-z] <<< "$1")"
days_ago=${2}
gnuplot <<- EOF
set print "-"
data_file_name = '../data/total_history.csv'
time_format = '%m-%d-%y'
today_date = strftime(time_format, time(0)-18000)
output_file_relative_path = sprintf('plots/total/${country_label} ${days_ago} - %s.png', today_date)
output_file_name = sprintf('../%s', output_file_relative_path)
set output output_file_name
set datafile separator ','
set grid
set border lw 1 lc rgb 'grey'
set xtics textcolor rgb 'grey' font ', 8'
set ytics textcolor rgb 'grey'
set key textcolor rgb 'grey'
set title textcolor rgb 'grey'
set size ratio 0.45
set title 'COVID-19 Incidence in ${1}'
set terminal pngcairo enhanced background rgb 'black' size 720, 640
set ylabel '' tc rgb 'grey'
set xlabel '' tc rgb 'grey'
set style fill solid 0.3
set key left
set style fill solid 0.3
set offsets graph 0.1, 2, 20, 0
set grid xtics, ytics
set key top left
set timefmt '%m/%d/%y'
set xdata time
set format x '%b %d'# time
set table 'dummy'
plot data_file_name using (start_string=stringcolumn('${country_label}')):1 every ::0::1 w table
unset table
time_format = '%m/%d/%y'
days_in_the_future = 4
today_float = strptime(time_format, strftime(time_format, time(0)-18000)) - $days_ago*86400
today_string = strftime(time_format, today_float)
yesterday_float = today_float - 1*86400
yesterday_string = strftime(time_format, today_float-1*86400)
start_float = strptime(time_format, start_string)
end_string = strftime(time_format, strptime(time_format, strftime(time_format, time(0)-18000))+days_in_the_future*86400)
end_float = strptime(time_format, end_string)
q = (today_float - start_float)/86400/21
N = q <= 1.0 ? 1 : ceil(q)
delta = int((end_float - today_float)/86400) - 1
days_plotted = $days_ago <= q*20 ? 1 : int(q*20/$days_ago)
not_greater_than_today(x) = today_float >= strptime(time_format, x) ? x : NaN
days(x) = (strptime(time_format, x)-start_float)/86400.0
is_in_range(x) = start_float == strptime(time_format, x) || (strptime(time_format, x) == strptime(time_format, strftime(time_format, time(0)-18000)) || (ceil(days(x))%N == 0)) ? x : NaN
is_zero(x) = x == 0 ? NaN : x
a = 1
b = 1e-6
f(x) = a*exp(b*int((x-start_float)/86400+1))
fit [start_float:today_float] f(x) data_file_name using 1:'${country_label}' via a,b
cases_at(x) = int(f(today_float + (x)*86400))
date_after(x) = strftime(time_format, today_float + x*86400)
array A[delta]
array B[delta]
do for [i=1:delta] {
A[i] = date_after(i)
B[i] = cases_at(i)
}
set label 1 at graph 0.237, graph 0.793 'Expected' tc rgb 'orange' front
set label 2 at graph 0.162, graph 0.725 sprintf('Doubling every %.2f days',(log(2)/b)) tc rgb 'grey' front
set xrange[start_float:end_float]
set samples (end_float - start_float)/86400.0 + 1
plot \
f(x) w l lc rgb 'red' ti sprintf('f(x) = %0.4fe^{(%0.4fx)}', a, b), \
\
data_file_name using (is_in_range(stringcolumn(1))):'${country_label}' w lp pt 7 lc rgb 'blue' ti 'Total confirmed cases', \
\
'' using (is_in_range(stringcolumn(1))):(is_zero(column('${country_label}'))):'${country_label}' with labels textcolor rgb 'grey' font ', 7' offset char 0,0.8 notitle, \
\
today_float < x && x < end_float ? f(x) : 1/0 w p pt 7 lc rgb 'yellow' ti ' ', \
\
B using (A[\$1]):(B[\$1]):(sprintf('%.0f', B[\$1])) every days_plotted with labels textcolor rgb 'orange' font ', 8' offset char 0,2 notitle, \
\
'' using 1:'${country_label}' ti ' ' lc rgb '#00000000' ps 0
print(output_file_relative_path)
EOF
| true |
42f7f71e4e7163c128e297ad9051017d736408c8
|
Shell
|
rbgrouleff/dotfiles
|
/bash/path
|
UTF-8
| 486 | 2.515625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# Add Homebrew stuff to PATH
if [ -d /usr/local/Cellar ]; then
export PATH=/usr/local/bin:$PATH
export PATH=$PATH:/usr/local/sbin
fi
export PATH="$HOME/bin:$PATH"
export PATH="$HOME/.cargo/bin:$PATH"
export PATH="/Applications/Vice/tools:$PATH"
export PATH="$PATH:`yarn global bin`"
# Setup some NODE_PATH stuff - is it even needed?!
export NODE_PATH=$NODE_PATH:/usr/local/lib/node_modules
export CLASSPATH=/Applications/KickAssembler/KickAss.jar:$CLASSPATH
| true |
e10dd99f493d5e82a6ee4f027142254c44e3cb81
|
Shell
|
scitokens/scitokens-cpp
|
/debian/get-orig-source.sh
|
UTF-8
| 1,080 | 3.84375 | 4 |
[
"Apache-2.0",
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] |
permissive
|
# Generate a source tarball including submodules
if [ -z "${1}" ] ; then
echo No tag or branch given
exit 1
fi
ver=${1}
# Remove initial v from tag name for use in filenames
if [ ${ver:0:1} = 'v' ] ; then
fver=${ver:1}
else
fver=${ver}
fi
if [ -r scitokens-cpp_${fver}.orig.tar.gz ] ; then
echo scitokens-cpp_${fver}.orig.tar.gz already exists
exit 1
fi
curdir=$(pwd)
tdir=$(mktemp -d)
cd ${tdir}
git clone https://github.com/scitokens/scitokens-cpp.git
cd scitokens-cpp
git checkout ${ver}
if [ $? -ne 0 ] ; then
echo No such tag or branch: ${ver}
cd ${curdir}
rm -rf ${tdir}
exit 1
fi
git archive --prefix scitokens-cpp_${fver}/ ${ver} -o ${tdir}/scitokens-cpp_${fver}.orig.tar
git submodule update --init
git submodule foreach --recursive "git archive --prefix scitokens-cpp_${fver}/\$path/ \$sha1 -o ${tdir}/\$sha1.tar ; tar -A -f ${tdir}/scitokens-cpp_${fver}.orig.tar ${tdir}/\$sha1.tar ; rm ${tdir}/\$sha1.tar"
cd ${tdir}
gzip scitokens-cpp_${fver}.orig.tar
mv scitokens-cpp_${fver}.orig.tar.gz ${curdir}
cd ${curdir}
rm -rf ${tdir}
| true |
b05474b8a1b146dd7a7b8db2d9ccf0ec950cb1f0
|
Shell
|
jestra52/twitter-sentiment-analysis
|
/get_full_dataset.sh
|
UTF-8
| 1,036 | 3.875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# You should run this only ONCE if there's no "movie_data" folder
# To run this you should past a train and test folder, like this:
# $ ./get_full_dataset.sh TRAIN_DATA_DIR TEST_DATA_DIR
# Note: you should consider that each folder must have pos and neg directories
TRAIN_DATA_DIR=$1
TEST_DATA_DIR=$2
RESULT_DIR='movie_data'
if [ ! -d $RESULT_DIR ]; then
echo "Created '$RESULT_DIR' folder"
mkdir $RESULT_DIR
fi
for split in $TRAIN_DATA_DIR $TEST_DATA_DIR; do
for sentiment in pos neg; do
# echo 'text;;score' >> $RESULT_DIR/full_$(basename $split).csv
for file in $split/$sentiment/*; do
# score_with_fextension=$(basename $file | cut -d '_' -f 2)
# score=${score_with_fextension%.*}
echo -e "Merging \t $file \t in \t $RESULT_DIR/full_$(basename $split).txt"
# echo "$(cat $file);;$score" >> $RESULT_DIR/full_$(basename $split).csv
echo $(cat $file) >> $RESULT_DIR/full_$(basename $split).txt
done
done
done
| true |
b4d0d620808c78cbc824b91ec9bc8d42c3d84a8e
|
Shell
|
ryenus/git-relnotes
|
/git-relnotes
|
UTF-8
| 794 | 3.625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if [ "$1" = "-l" ]; then
curl -s "https://github.com/git/git/releases" \
| sed -n -E 's,.*"/git/git/releases/tag/v([-0-9.rc]+)".*,\1,p'
elif [ "$1" = "-L" ]; then
curl -s "https://github.com/git/git/tree/master/Documentation/RelNotes/" \
| awk -F'[><]' '/\.txt</ {print substr($5,1,length($5) - 4)}' \
| sort -V | column -x
else
version="${1:-$(git version | cut -d' ' -f3)}"
version="${version#v}" # remove any leading 'v': v2.16 => 2.16
version="${version%%-*}" # remove any RC version suffix: 2.16.0-rc1 => 2.16.0
[ "${version//[0-9]}" = "." ] && version="${version}.0" # bashism, expand 2.16 => 2.16.0
curl -sL "https://github.com/git/git/raw/master/Documentation/RelNotes/${version}.txt" \
| less -NRwXj9 -p '(?<=^ )\*(?= )' +jgn
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.