blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
a302e4af9052f303eea1655b478d99d92ee2febe
|
Shell
|
daodaoliang/uird
|
/initrd/usr/bin/unsquashfs
|
UTF-8
| 445 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
force=""
filexzm=""
destdir=$(pwd)
for a in "$@" ; do
[ -f "$a" ] && filexzm=$a
done
set -- `getopt "fd:" "$@"`
while [ ! -z "$1" ] ; do
case "$1" in
-f) force="f"; shift ;;
-d) destdir="$2"; shift 2 ;;
*) break;;
esac
done
[ -f $filexzm ] || exit
. /livekitlib
mkdir -p /tmp/tmpmntdir
mount_device $filexzm /tmp/tmpmntdir
cp -a"$force" /tmp/tmpmntdir/* ${destdir}/
umount /tmp/tmpmntdir
rm -rf /tmp/tmpmntdir
| true |
db2d771439a0709666feb306b4d3f2c087a5d9d3
|
Shell
|
cloudfoundry/capi-ci
|
/ci/bosh/create_capi_release.sh
|
UTF-8
| 1,129 | 3.546875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
source ~/.bashrc
VERSION=`cat next-version/version`
pushd cloud_controller_ng
if [ -n "$CC_BRANCH" ]; then
CC_COMMIT_SHA=$(git rev-parse HEAD)
fi
popd
pushd capi-release
CAPI_COMMIT_SHA=$(git rev-parse HEAD)
pushd src/cloud_controller_ng
if [ -z "$CC_COMMIT_SHA" ]; then
CC_COMMIT_SHA=$(git rev-parse HEAD)
fi
git fetch
git checkout $CC_COMMIT_SHA
popd
for i in {1..5}; do
echo "Syncing blobs, attempt $i"
bosh sync-blobs --sha2 --parallel=10 && break
done
./scripts/unused_blobs
TARBALL_NAME=capi-${VERSION}-${CAPI_COMMIT_SHA}-${CC_COMMIT_SHA}.tgz
for i in {1..5}; do
echo "Creating release, attempt $i"
bosh create-release --sha2 --tarball=$TARBALL_NAME --version $VERSION --force
EXIT_STATUS=${PIPESTATUS[0]}
if [ "$EXIT_STATUS" = "0" ]; then
break
fi
done
if [ ! "$EXIT_STATUS" = "0" ]; then
echo "Failed to create CAPI release"
exit $EXIT_STATUS
fi
if [ ! -f $TARBALL_NAME ]; then
echo "No release tarball found"
exit 1
fi
popd
mv capi-release/$TARBALL_NAME created-capi-release/
| true |
3fc485b9545426814e2d13d6e46f69074fc229ca
|
Shell
|
030/ansible-firefox
|
/latest_version.sh
|
UTF-8
| 1,460 | 3.453125 | 3 |
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -e
update() {
readonly LATEST_VERSION=$(curl -s https://product-details.mozilla.org/1.0/firefox_versions.json | jq -r .LATEST_FIREFOX_VERSION)
readonly VERSION=$(docker run -v ${PWD}:/ansible-firefox utrecht/go-yq:2.1.0 .firefox_version /ansible-firefox/defaults/main.yml)
readonly LATEST_CHECKSUM=sha512:$(curl https://ftp.mozilla.org/pub/firefox/releases/${LATEST_VERSION}/SHA512SUMS | grep linux-x86_64/en-US/firefox-${LATEST_VERSION}.tar.bz2 | sed -e "s| linux-x86_64/en-US/firefox-${LATEST_VERSION}.tar.bz2$||g")
readonly CHECKSUM=$(docker run -v ${PWD}:/ansible-firefox utrecht/go-yq:2.1.0 .firefox_checksum /ansible-firefox/defaults/main.yml)
git checkout master
git pull origin master
sed -i "s|firefox_version: \".*\"|firefox_version: \"${LATEST_VERSION}\"|" defaults/main.yml
sed -i "s|firefox_checksum: sha512:.*|firefox_checksum: ${LATEST_CHECKSUM} # yamllint disable-line rule:line-length|" defaults/main.yml
git config user.name 030
git config user.email chocolatey030@gmail.com
CHANGED=$(git status defaults/main.yml | grep modified || true)
if [ -n "$CHANGED" ]; then
git add defaults/main.yml
git commit -m "Updated checksum and version to respectively '${LATEST_CHECKSUM}' and '${LATEST_VERSION}'"
git push origin master
fi
}
tag() {
bash <(curl -s https://gist.githubusercontent.com/030/db4d7c5495d751a1001fc425e1f14302/raw/24c59d676da1f18427fd6959f1fd5c6fedb2e532/increment_tag.sh)
}
main() {
update
tag
}
main
| true |
9e2e48055bdcad689eb7fca9a5bd30912089e314
|
Shell
|
EpiphanyMachine/dotfiles
|
/setup-new-machine.sh
|
UTF-8
| 2,086 | 2.890625 | 3 |
[] |
no_license
|
##############################################################################################################
### XCode Command Line Tools
# thx https://github.com/alrra/dotfiles/blob/ff123ca9b9b/os/os_x/installs/install_xcode.sh
if ! xcode-select --print-path &> /dev/null; then
# Prompt user to install the XCode Command Line Tools
xcode-select --install &> /dev/null
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Wait until the XCode Command Line Tools are installed
until xcode-select --print-path &> /dev/null; do
sleep 5
done
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Point the `xcode-select` developer directory to
# the appropriate directory from within `Xcode.app`
# https://github.com/alrra/dotfiles/issues/13
sudo xcode-select -switch /Applications/Xcode.app/Contents/Developer
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Prompt user to agree to the terms of the Xcode license
# https://github.com/alrra/dotfiles/issues/10
sudo xcodebuild -license
fi
##############################################################################################################
### homebrew!
# (if your machine has /usr/local locked down (like google's), you can do this to place everything in ~/.homebrew
source .exports
mkdir $HOMEBREW_HOME && curl -L https://github.com/mxcl/homebrew/tarball/master | tar xz --strip 1 -C $HOMEBREW_HOME
# install all the things
brew bundle
# setup custom dns-crypt settings
rm $HOMEBREW_HOME/etc/dnscrypt-proxy.toml
ln -s $dir/dnscrypt-proxy.toml ~/dnscrypt-proxy.toml
sudo brew services start dnscrypt-proxy
./symlink-setup.sh
# setup correct git url for this repo (downloaded without being logged int)
git remote remove origin
git remote add origin git@github.com:EpiphanyMachine/dotfiles.git
# Final install instructions
echo Install the dnscrypt-proxy bitbar plugin
echo https://getbitbar.com/plugins/Network/dnscrypt-proxy-switcher.10s.sh
echo
echo Install / Copy the gpg and ssh keys
echo
echo DONE
| true |
48a1fc8772fa6b11873fa4e460f640bbafe9dfb8
|
Shell
|
TaylorGrover/func
|
/deploy.sh
|
UTF-8
| 433 | 2.640625 | 3 |
[] |
no_license
|
#! /bin/bash
if [ `lscpu | grep -i Architecture | cut -d: -f2 | cut -d' ' -f9` = "aarch64" ]
then
DIRECTORY=/data/data/com.termux/files/usr/lib/python3.7
fi
if [ `lscpu | grep -i Architecture | cut -d: -f2 | cut -d' ' -f9` = "x86_64" ]
then
DIRECTORY=/usr/lib/python3.6
fi
cp chem.py ${DIRECTORY}/chem.py
cp func.py ${DIRECTORY}/func.py
cp stats.py ${DIRECTORY}/stats.py
cp linear_regression.py ${DIRECTORY}/linear_regression.py
| true |
9117e4c99f57cd19ba2a23fff5860f1eeecc762d
|
Shell
|
titilambert/ambassador
|
/build-aux/tests/prelude.bats
|
UTF-8
| 4,065 | 3.328125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
load common
@test "prelude.mk: joinlist with separator" {
check_expr_eq echo '$(call joinlist,/,foo bar baz)' 'foo/bar/baz'
}
@test "prelude.mk: joinlist without separator" {
check_expr_eq echo '$(call joinlist,,foo bar baz)' 'foobarbaz'
}
@test "prelude.mk: quote.shell" {
# This test relies on the fact that 'var.mk' is implemented
# using `quote.shell`.
cat >>Makefile <<-'__EOT__'
include build-aux/prelude.mk
include build-aux/var.mk
define actual
some'string"with`special characters)
and newlines and tabs
and 2 trailing newlines
endef
all: $(var.)actual
__EOT__
make
printf 'some'\''string"with`special characters)\nand newlines\tand tabs\nand 2 trailing newlines\n\n' > expected
diff -u expected build-aux/.var.actual
}
@test "prelude.mk: lazyonce" {
if [[ "$(make --version | head -n1)" == 'GNU Make 3.81' ]]; then
skip
fi
cat >>Makefile <<-'__EOT__'
include build-aux/prelude.mk
var = $(call lazyonce,var,$(info eval-time)value)
$(info before)
$(info a: $(var))
$(info b: $(var))
$(info c: $(var))
all: noop
noop: ; @true
.PHONY: noop
__EOT__
make > actual
printf '%s\n' > expected \
'before' \
'eval-time' \
'a: value' \
'b: value' \
'c: value'
diff -u expected actual
}
@test "prelude.mk: build-aux.dir" {
cat >>Makefile <<-'__EOT__'
include build-aux/prelude.mk
include build-aux/var.mk
all: $(var.)build-aux.dir
__EOT__
make
# Check that it points to the right place
[[ "$(cat build-aux/.var.build-aux.dir)" -ef build-aux ]]
}
@test "prelude.mk: build-aux.bindir" {
cat >>Makefile <<-'__EOT__'
include build-aux/prelude.mk
include build-aux/var.mk
all: $(build-aux.bindir) $(var.)build-aux.bindir
__EOT__
make
# Check that it points to the right place
[[ "$(cat build-aux/.var.build-aux.bindir)" -ef build-aux/bin ]]
# Check that it's absolute
[[ "$(cat build-aux/.var.build-aux.bindir)" == /* ]]
}
@test "prelude.mk: FLOCK" {
if type flock &>/dev/null; then
check_executable prelude.mk FLOCK
else
check_go_executable prelude.mk FLOCK
[[ "$FLOCK" != unsupported ]] || return 0
fi
if which flock &>/dev/null; then
[[ "$FLOCK" == "$(which flock)" ]]
fi
if [[ -n "$build_aux_expected_FLOCK" ]]; then
[[ "$FLOCK" == "$build_aux_expected_FLOCK" ]]
fi
# TODO: Check that $FLOCK behaves correctly
}
@test "prelude.mk: COPY_IFCHANGED" {
check_executable prelude.mk COPY_IFCHANGED
# TODO: Check that $COPY_IFCHANGED behaves correctly
}
@test "prelude.mk: MOVE_IFCHANGED" {
check_executable prelude.mk MOVE_IFCHANGED
# TODO: Check that $MOVE_IFCHANGED behaves correctly
}
@test "prelude.mk: WRITE_IFCHANGED" {
check_executable prelude.mk WRITE_IFCHANGED
# TODO: Check that $WRITE_IFCHANGED behaves correctly
}
@test "prelude.mk: TAP_DRIVER" {
check_executable prelude.mk TAP_DRIVER
# TODO: Check that $TAP_DRIVER behaves correctly
}
@test "prelude.mk: clobber" {
if ! [[ -e build-aux/.git ]]; then
# Because we check `git clean -ndx` to make sure
# things are clean.
skip
fi
(cd build-aux && git clean -fdx)
cat >>Makefile <<-'__EOT__'
include build-aux/prelude.mk
include build-aux/var.mk
all: $(COPY_IFCHANGED) $(MOVE_IFCHANGED) $(WRITE_IFCHANGED) $(TAP_DRIVER)
__EOT__
[[ -d build-aux ]]
[[ ! -d build-aux/bin ]]
make all
[[ -d build-aux/bin ]]
[[ -f build-aux/bin/copy-ifchanged && -x build-aux/bin/copy-ifchanged ]]
[[ -n "$(cd build-aux && git clean -ndx)" ]]
make clobber
[[ -d build-aux ]]
[[ ! -d build-aux/bin ]]
[[ -z "$(cd build-aux && git clean -ndx)" ]]
}
@test "prelude.mk: build-aux.bin-go.rule" {
# TODO
}
@test "prelude.mk: FORCE" {
cat >>Makefile <<-'__EOT__'
include build-aux/prelude.mk
all: without-force with-force
without-force: ; touch $@
with-force: FORCE ; touch $@
__EOT__
make
cp -a with-force with-force.bak
cp -a without-force without-force.bak
sleep 2
make
ls -l
[[ with-force -nt with-force.bak ]]
[[ ! without-force -nt without-force.bak ]]
[[ ! without-force -ot without-force.bak ]]
}
| true |
713a43515a53c4ca2c0d6364e4eb4d7f334430e6
|
Shell
|
shahar3000/vim_cfg
|
/install.sh
|
UTF-8
| 1,670 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/bash -e
install_ccglue() {
local ccglue_path=/tmp/ccglue
git clone https://github.com/giraldeau/ccglue.git $ccglue_path
pushd $ccglue_path
git checkout 3fa724b17d854c359e380cc9ee2ad61756696e31
autoreconf -i
./configure
make -j8
sudo make install
popd
rm -rf $ccglue_path
}
install_perquisites() {
sudo apt update
sudo apt remove -y vim vim-runtime gvim vim-tiny vim-common vim-gui-common vim-nox
sudo apt install -y ruby-dev libperl-dev python3.6-dev exuberant-ctags cmake cscope automake autoconf libtool
}
install_vim() {
local vim_path=/tmp/vim
local xterm_regex="if \[\[ $TERM == xterm \]\]; then export TERM=xterm-256color; fi"
local xterm_cmd="if [[ $TERM == xterm ]]; then export TERM=xterm-256color; fi"
local bashrc_path="/home/$(whoami)/.bashrc"
git clone https://github.com/vim/vim.git $vim_path
pushd $vim_path
git checkout v8.1.2300
./configure --with-features=huge \
--enable-multibyte \
--enable-rubyinterp=yes \
--enable-pythoninterp=yes \
--with-python-config-dir=/usr/lib/python3.6/config-3.6m-x86_64-linux-gnu \
--enable-perlinterp=yes \
--enable-luainterp=yes \
--enable-cscope \
--prefix=/usr/local
make -j8
sudo make install
popd
rm -rf $vim_path
grep -q "^$xterm_regex$" $bashrc_path || echo "$xterm_cmd" >> $bashrc_path
}
set_vim_default_editor() {
sudo update-alternatives --install /usr/bin/editor editor /usr/local/bin/vim 1
sudo update-alternatives --set editor /usr/local/bin/vim
sudo update-alternatives --install /usr/bin/vi vi /usr/local/bin/vim 1
sudo update-alternatives --set vi /usr/local/bin/vim
}
install_perquisites
install_vim
set_vim_default_editor
install_ccglue
| true |
7630a0a967cf96ee9415b12298eb04d7a16d7161
|
Shell
|
petronny/aur3-mirror
|
/dzip/PKGBUILD
|
UTF-8
| 647 | 2.59375 | 3 |
[
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
# Contributor: Charles Lindsay <charles@chaoslizard.org>
# Maintainer: Nathan O <ndowens.aur at gmail dot com>
pkgname=dzip
pkgver=2.9
pkgrel=3
pkgdesc="A lightweight file compression program originally for Quake demos"
arch=('i686' 'x86_64')
url="http://speeddemosarchive.com/dzip/"
license=('custom')
depends=('glibc')
source=('http://speeddemosarchive.com/${pkgname}/dz29src.zip' 'license.txt')
md5sums=('b02d69c7c6ee491380d77f26c6f5a6e0' 81a34dd783e5bfc8d3cb634c439e133b)
build() {
cd ${srcdir}
install -D Makefile.linux Makefile
make
install -Dm 755 dzip ${pkgdir}/usr/bin/dzip
install -Dm 644 license.txt ${pkgdir}/usr/share/licenses/${pkgname}/LICENSE
}
| true |
4c395aa376afb274b53526e01d5485d9a24c73fd
|
Shell
|
hayk99/SistLegados1
|
/ATM Cobol_backup/ficheros BANK1/fixDisplay.sh
|
UTF-8
| 409 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/bash
#Autor: Hayk Kocharyan
filename=fixed/$2.cbl
echo $filename
if [ $# -ne 2 ];
then
exit 1
else
cat $1 | sed 's/DISPLAY *( *\([0-9]*\) * \([0-9]*\) *) *\("\."*\)/DISPLAY \3 LINE \1 COL \2/g' |
sed 's/DISPLAY *( *\([0-9]*\) * \([0-9]*\) *) *\([^\.]*\)/DISPLAY \3 LINE \1 COL \2/g' \
| sed 's/ACCEPT *( *\([0-9]*\) * \([0-9]*\) *) *\([^ ]*\) *\([^\.\n]*\)/ACCEPT \3 LINE \1 COL \2 \4/' > $filename
fi
| true |
0fffef52ac2bc6a37fb5baeb6ace08329f7e2f21
|
Shell
|
swp-fu-eid/eid-fu-swp
|
/init_ci.sh
|
UTF-8
| 1,852 | 3.640625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash -
CERT_DIR=./certs
DOMAIN=eid.local
IP4_PREFIX=$(hostname -I | cut -d' ' -f 1 | cut -d. -f 1,2)
function createCert {
NAME_PREFIX=$1
openssl req -newkey rsa:2048 -nodes -keyout $NAME_PREFIX.key -x509 -days 365 -out $NAME_PREFIX.cer -subj "/C=DE/ST=Berlin/L=Berlin/O=eid-fu-swp/OU=eid-fu-swp/CN=$DOMAIN"
}
function addToEnv {
echo "$1" >> .env
}
function addToCompose {
echo " extra_hosts:" >> docker-compose.test.yml
echo " - \"$DOMAIN:$1\"" >> docker-compose.test.yml
}
echo :Remove existing setup files
rm .env {www,MAIN}.{cer,key} > /dev/null
echo :Shutdown the system if running and cleanup
sudo docker-compose down
sudo docker volume rm eidfuswp_db_mysql
echo :Create .env file
addToEnv "BOILERPLATE_DOMAIN=$DOMAIN"
addToEnv "BOILERPLATE_IPV4_16PREFIX=$IP4_PREFIX"
addToEnv "BOILERPLATE_IPV6_SUBNET=bade:affe:dead:beef:b011::/80"
addToEnv "BOILERPLATE_IPV6_ADDRESS=bade:affe:dead:beef:b011:0642:ac10:0080"
addToEnv "BOILERPLATE_WWW_CERTS=$CERT_DIR"
addToEnv "BOILERPLATE_API_SECRETKEY=$(date | md5sum | cut -d' ' -f 1)"
sleep 1
addToEnv "BOILERPLATE_DB_PASSWORD=$(date | md5sum | cut -d' ' -f 1)"
cat .env.test >> .env
echo "#################### .env ####################"
cat .env
echo "##############################################"
echo "Make sure the line '$IP4_PREFIX.0.128<tab>$DOMAIN' is in you /etc/hosts"
echo -e "\n$IP4_PREFIX.0.128\t$DOMAIN" | sudo tee --append /etc/hosts
echo Check hosts file
echo "################# /etc/hosts #################"
sudo cat /etc/hosts
echo "##############################################"
echo :Add IP to docker-compose.test.yml
addToCompose "$IP4_PREFIX.0.128"
sudo cat docker-compose.test.yml
echo :Create ssl certifactes
createCert MAIN
createCert www
sudo mv {MAIN,www}.{cer,key} "$CERT_DIR/"
sudo chown root:root $CERT_DIR/{MAIN,www}.{cer,key}
| true |
c4fb23a6b4618b9bafbb71e13f9c5634e30a5dc0
|
Shell
|
bbonsign/.dotfiles
|
/bin/calendar
|
UTF-8
| 621 | 3.265625 | 3 |
[] |
no_license
|
#! /bin/bash
send_notification() {
TODAY=$(date '+%-d')
HEAD=$(cal "$1" | head -n1)
if [ "$2" -ge 3 ]; then
BODY=$(cal "$1" | tail -n7 | sed -z "s|$TODAY|<u><b>$TODAY</b></u>|g")
FOOT="\n "
else
BODY=$(cal "$1" | tail -n7 | sed -z "s|$TODAY|<u><b>$TODAY</b></u>|1")
FOOT="\n "
fi
dunstify -h string:x-canonical-private-synchronous:calendar \
"$HEAD" "$BODY$FOOT" -u NORMAL
}
handle_action() {
send_notification "-${DIFF}" "${DIFF}"
}
case $1 in
"curr") DIFF=1;;
"-3") DIFF=3;;
"") DIFF=1;;
esac
handle_action
| true |
7f9125ba065a68fb9eb2e5c56a2740549159d562
|
Shell
|
nichuanfang/config-server
|
/linux/bash/step1/config_network.sh
|
UTF-8
| 1,819 | 2.578125 | 3 |
[] |
no_license
|
#!/bin/bash
echo "==================================================操作系统网络配置相关脚本,开始执行....."
sudo rm -f /var/lib/dpkg/lock-frontend
sudo rm -f /var/lib/dpkg/lock
sudo rm -f /var/cache/apt/archives/lock
sudo rm -f /var/lib/dpkg/lock-frontend
sudo apt-get install iftop -y
echo '本地主机名解析设置...'
sed -i "s/127.0.1.1\s.\w.*$/127.0.1.1 $1/g" /etc/hosts
grep -q "^\$(hostname -I)\s.\w.*$" /etc/hosts && sed -i "s/\$(hostname -I)\s.\w.*$/$2 $1" /etc/hosts || echo "$2 $1" >>/etc/hosts
cat /etc/hosts
# 注: dns修改不生效 遂放弃
# echo '系统DNS域名解析服务设置...'
# cat /dev/null >/etc/resolv.conf
# cat <<EOF >/etc/resolv.conf
# nameserver 127.0.0.1
# options edns0
# search localdomain
# nameserver 8.8.4.4
# nameserver 223.5.5.5
# nameserver 223.6.6.6
# EOF
# cat /dev/null >/etc/NetworkManager/NetworkManager.conf
# cat <<EOF >/etc/NetworkManager/NetworkManager.conf
# [main]
# plugins=ifupdown,keyfile
# dns=none
# [ifupdown]
# managed=false
# [device]
# wifi.scan-rand-mac-address=no
# EOF
# 优化linux内核
cat > /etc/sysctl.conf << EOF
net.core.somaxconn = 65535
net.ipv4.tcp_max_syn_backlog = 65535
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.tcp_keepalive_probes = 5
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.tcp_slow_start_after_idle = 0
net.ipv4.tcp_notsent_lowat=16384
net.ipv4.tcp_mtu_probing = 1
net.ipv4.tcp_max_tw_buckets = 50000
net.core.default_qdisc = fq
net.ipv4.tcp_congestion_control = bbr
EOF
sysctl -p
# sudo systemctl daemon-reload
# sudo systemctl stop systemd-resolved
# sudo systemctl start systemd-resolved
# systemd-resolve --status
echo "==================================================操作系统网络配置完成!"
| true |
9e3e951ddf8e512bfbf795cb012e84d3d2cf4093
|
Shell
|
vadimr/vagrant-base-box
|
/bootstrap.sh
|
UTF-8
| 1,421 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
# Create swapfile of 1GB with block size 1MB
dd if=/dev/zero of=/swapfile bs=1024 count=1048576
# Set up the swap file
mkswap /swapfile
# Enable swap file immediately
swapon /swapfile
# Enable swap file on every boot
echo '/swapfile swap swap defaults 0 0' >> /etc/fstab
apt-get update
apt-get install -y git-core
apt-get install -y build-essential
# apt-get install -y openjdk-7-jdk
export DEBIAN_FRONTEND=noninteractive
apt-get install -y mysql-server
# apt-get install -y mongodb
# apt-get install -y redis-server
# apt-get install -y memcached
apt-get install -y npm
ln -s `which nodejs` /usr/bin/node
apt-get install -y libssl-dev libreadline-dev zlib1g-dev
apt-get install -y libmysqlclient-dev
apt-get install -y libmagickwand-dev imagemagick
exec sudo -i -u vagrant /bin/bash - <<'AS_VAGRANT'
mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO 'MAUKA_DEV'@'localhost' WITH GRANT OPTION;"
mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO 'MAUKA_DEV'@'%' WITH GRANT OPTION;"
git clone https://github.com/rbenv/rbenv.git ~/.rbenv
git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build
echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >> ~/.bashrc
echo 'eval "$(rbenv init -)"' >> ~/.bashrc
PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
RV=2.3.0
rbenv install $RV
rbenv global $RV
rbenv rehash
gem install bundler
AS_VAGRANT
| true |
605aba4f66bed0cae4d908d32753a5e9f079df73
|
Shell
|
win-stub/YoutubeWhistleblower
|
/RUN_LINGUISTIQUE/test.sh
|
UTF-8
| 295 | 2.734375 | 3 |
[] |
no_license
|
x='bob+smith+william ghol+steve'
OLD_IFS=$IFS # save internal field separator
IFS="+" # set it to '+'
set -- $x # make the result positional parameters
IFS=$OLD_IFS # restore IFS
var1=$1
var2=$2
var3=$3
var4=$4
echo var1=$var1
echo var2=$var2
echo var3=$var3
echo var4=$var4
| true |
39b76ce34fa52aad72ef6882cbaf78a8a0cd6800
|
Shell
|
Shelvak/random
|
/bins/create_tanga_pod
|
UTF-8
| 289 | 2.671875 | 3 |
[] |
no_license
|
#!/bin/bash
NAME=tanga
DIR="`(cd $(dirname "$0") && pwd)`"
kubectl delete pod $NAME
cd /tmp
rm -f $NAME.yml
echo -ne "1\n$NAME\n\n" | $DIR/createPodwispro.sh
kubectl -n default apply -f $NAME.yaml
kubectl wait --for=condition=ready --timeout=60s pod $NAME
kubectl exec -it $NAME bash
| true |
25b05506b8f93f4d277a31af7d2ac6bf012b8540
|
Shell
|
subbink-capitar/fwmacro
|
/distro/deb/debian/python-fwmacro.preinst
|
UTF-8
| 186 | 2.71875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
if ! which easy_install >/dev/null ; then
echo "Cannot find easy_install. Please install python-setuptools"
exit 1
fi
easy_install netaddr
easy_install plex
exit 0
| true |
26f9c83a9d7eea212085b87bf47477e7231badaf
|
Shell
|
brentleyjones/dotfiles
|
/moom/setup.sh
|
UTF-8
| 362 | 3 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
cd "${BASH_SOURCE[0]%/*}" || exit 1
source ../scripts/functions.sh
src="$(osx_realpath .)"
dst="$(osx_realpath ~/Library/Application\ Support/Code/User)"
info "setting up Moom"
if defaults import com.manytricks.Moom defaults.plist; then
success "successfully set up Moon"
else
error "failed to set up Moon"
fi
| true |
3cfb1c1af84dcba2f0c54a11f72dce484d45f09d
|
Shell
|
rickb777/sqlapi
|
/pgxapi/pgtest.sh
|
UTF-8
| 777 | 3.625 | 4 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash -e
# Setup
# -----
# This script can run some of the tests against real PostgreSQL. Therefore,
# it is necessary to create test accounts in each one beforehand.
# These all assume the existence of either
# postgres:postgres@/postgres
# test:test@/test
cd "$(dirname $0)"
PATH=$HOME/go/bin:$PATH
export PGHOST=localhost
if [[ $1 = "-v" ]]; then
V=-v
shift
fi
if [[ $1 = "travis" ]]; then
export PGDATABASE='postgres'
export PGUSER='postgres'
export PGPASSWORD=''
elif [[ -z $PGUSER ]]; then
export PGDATABASE='test'
export PGUSER='test'
export PGPASSWORD='test'
fi
echo
echo "PGX (no quotes)...."
go clean -testcache ||:
PGQUOTE=none go test $V ./...
echo
echo "PGX (ANSI)...."
go clean -testcache ||:
PGQUOTE=ansi go test $V ./...
| true |
2920db6ce895bd2a0eb162238eac9a2c70a2f69f
|
Shell
|
super-Colin/100_days
|
/first-50-days/011__git-clone.sh
|
UTF-8
| 352 | 2.640625 | 3 |
[] |
no_license
|
# 12/21/19
# Learning Bash
# MOUNT THE THEMES FOLDER
cd /var/www/html/wp-content/themes
# DELETE OLD VERSION
FILE=/var/www/html/wp-content/themes/git_repo-theme
if [ -e "$FILE" ]; then
rm -rf $FILE
fi
# CLONE REPO
sudo git clone https://github.com/super-Colin/git_repo-theme.git
# WILL ASK FOR GIT USER AND PASS HERE
# GIT USER
# GIT PASS
| true |
ba3ffec0811dc181831602d2cb993358d1f67d1a
|
Shell
|
guoku/docker
|
/entity_spider/clean.sh
|
UTF-8
| 434 | 3.15625 | 3 |
[] |
no_license
|
BASE_DOCKER_NAME='gkspider_base'
echo $1
if [ "$1" == "all" ];
then
docker rmi $BASE_DOCKER_NAME
fi
if docker images | grep $BASE_DOCKER_NAME
then echo 'spider base image exist !!'
else docker build -t $BASE_DOCKER_NAME ./base_docker
fi
docker-compose stop
docker ps -a | grep entityspider | awk '{print $1}' | xargs docker rm
docker images | grep entityspider | awk '{print $3}' | xargs docker rmi -f
docker-compose build
| true |
c229df6bd624137fb3225e2a179825bb8fe136e8
|
Shell
|
Sunspar/dotfiles
|
/setup.sh
|
UTF-8
| 982 | 3.75 | 4 |
[
"ISC"
] |
permissive
|
#! /usr/bin/env bash
# Sunspar's Dotfiles and environment setup scripts.
#
# This script determines the desired playbook, and then executes it, bootstrapping ansible if required.
# Currently we only support macOS as thats my main machine, although adding linux distro support should
# be relatively straightforward -- differences would mostly be the source folders for config files and
# differences in package management.
# Arg $1 determines what base playbook to run.
local playbook=$1
# On MacOS, we need homebrew at a minimum. Install it if we cannot find it on the system.
if ! command -v brew &> /dev/null; then
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
fi
# Bootstrap ansible if the expected binaries dont exist.
if ! command -v ansible-playbook &> /dev/null; then
brew install ansible
fi
# Run Ansible!
ansible-galaxy collection install community.general
ansible-playbook -i inventory dotfiles-playbook/main.yml
| true |
ee4a9181fc87af7ad8530b08aac2dbf50009358a
|
Shell
|
ctb/Simulate-mRNASeq-Reads
|
/bash_scripts/runGeneExpChick.sh
|
UTF-8
| 1,500 | 2.5625 | 3 |
[] |
no_license
|
#! /bin/bash
groups='Gallus_gallus.transIdgeneId.txt'
mapped='DataSet245rep1E1R10G100_default.map'
full='DataSet245rep1E1R100G100_default.map.col1.geneExp'
name='Gallus_gallus.allGeneNames.txt'
#ref='DataSet246rep2E1R100G100RandomReadsNames.txt'
#awk '{print $1}' $mapped > $mapped'.col1'
#awk '{print $6}' $mapped > $mapped'.col6'
#python getGeneReadCountsFromTranscriptMapping2.py $groups $mapped'.col6' $mapped'.col1' $mapped'.col6.geneExp' $mapped'.col1.geneExp'
python getGeneReadCountsFromTranscriptMapping2.py $groups $mapped'.col6' $mapped'.col1' $mapped'.col6.geneExp' $mapped'.col1.geneExp'
cp $full $mapped'.col1.geneExp'
python combine-bowtie-bowtie-refFile-results2.py $name $mapped'.col6.geneExp' $mapped'.col1.geneExp' $mapped'.col6.col1.geneExp.combine'
python calc-log2-diff-exp2.py $mapped'.col6.col1.geneExp.combine' $mapped'.col6.col1.geneExp.combine.log2diff2'
awk '{print $4}' $mapped'.col6.col1.geneExp.combine.log2diff2' > $mapped'.col6.col1.geneExp.combine.log2diff2.nums'
awk '{if ($1 <= 1 && $1 >= -1) print $0}' $mapped'.col6.col1.geneExp.combine.log2diff2.nums' > $mapped'.col6.col1.geneExp.combine.log2diff2.numsWithinPm1'
wc -l $mapped'.col6.col1.geneExp.combine.log2diff2.numsWithinPm1'
#python makeHistogram.py $mapped'.col6.col1.geneExp.combine.log2diff.nums' -19 20 2 $mapped'.col6.col1.geneExp.combine.log2diff.hist'
| true |
29b500b4bdfdc4fb64e3639075d63030a33139f4
|
Shell
|
ddavidtran/Labs-at-LiU
|
/TDDC78 - Programming of Parallel Computers (Spring 2018)/Lab3_openmp/run.sh
|
UTF-8
| 174 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
for cores in {1,2,4,8,16}; do
echo "Cores: $cores " >> "lab3_result.txt"
ompsalloc -N1 -c$cores ./myprogram >> "lab3_result.txt"
echo ""
done
| true |
1b057c4bc02ecaf52d6df381057e4cc50cda359b
|
Shell
|
orion01500/nginx-ultimate-bad-bot-blocker
|
/setup-ngxblocker
|
UTF-8
| 7,244 | 4 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
### NGINX Bad Bot Blocker: setup script #################
### Copyright (C) 2017 Stuart Cardall ###
### https://github.com/itoffshore ###
### Licensed under the terms of the GPL2 ###
##########################################################
WWW=/var/www
VHOST_EXT="vhost"
VHOST_DIR=/etc/nginx/sites-available
BOTS_DIR=/etc/nginx/bots.d
CONF_DIR=/etc/nginx/conf.d
MAIN_CONF=/etc/nginx/nginx.conf
# setting Y / yes will whitelist only directories in $www
# that look like domain.names
DOT_NAMES="Y"
# if you already set 'limit_conn addr' you may want to set
# this to N / no.
INC_DDOS="Y"
####### end user configuration ###########################
usage() {
local script=$(basename $0)
cat <<EOF
$script: add Nginx Bad Bot Blocker configuration [ in $MAIN_CONF ] [ $VHOST_DIR/* ]
Usage: $script [OPTIONS]
[ -w | --www ] : WWW path (default: $WWW)
[ -e | --ext ] : Vhost file extension (default: .$VHOST_EXT)
[ -v | --vhost ] : Vhost directory (default: $VHOST_DIR)
[ -b | --bots ] : Bot rules directory (default: $BOTS_DIR)
[ -c | --conf ] : NGINX conf directory (default: $CONF_DIR)
[ -m | --main ] : NGINX main configuration (default: $MAIN_CONF)
[ -n | --names ] : NO whitelist of .names only (default: $DOT_NAMES)
[ -d | --ddos ] : NO insert of DDOS rule (default: $INC_DDOS)
[ -x | --exec ] : Actually change the files (default: don't change anything)
[ -h | --help ] : this help message
Examples:
$script -n (Whitelist all directory names in $WWW as domains: not just dot.name directories)
$script -d (Do not insert DDOS rule: these may clash with existing 'limit_conn addr' rules)
$script (Don't change anything: display results on stdout)
$script -x (Change / update config files)
EOF
return 0
}
check_config() {
local files=$@
if [ -z "$files" ]; then
echo "no vhost files in: $VHOST_DIR/*.$VHOST_EXT => exiting."
exit 1
fi
}
find_vhosts() {
find $VHOST_DIR -type f -name "*.$VHOST_EXT"
}
whitelist_ips() {
local ip= conf=$BOTS_DIR/whitelist-ips.conf
mkdir -p $BOTS_DIR
if [ -n "$(which dig)" ]; then
ip=$(dig +short myip.opendns.com @resolver1.opendns.com)
if ! grep "$ip" $conf &>/dev/null; then
printf "%-17s %-15s %-s\n" "Whitelisting ip:" "$ip" "=> $conf"
if [ "$DRY_RUN" = "N" ]; then
printf "%-23s %-s\n" "$ip" "0;" >> $conf
fi
fi
else
echo "WARN: dig binary missing => install bind-tools to whitelist external ip address"
fi
}
whitelist_domains() {
local domain_list= domain= domain_len=
local conf=$BOTS_DIR/whitelist-domains.conf
case "$DOT_NAMES" in
y*|Y*) domain_list=$(find $WWW -mindepth 1 -maxdepth 1 -type d -name '*\.*' -exec basename {} \;);;
*) domain_list=$(find $WWW -mindepth 1 -maxdepth 1 -type d -exec basename {} \;);;
esac
domain_len=$(find $WWW -mindepth 1 -maxdepth 1 -type d -exec basename {} \; \
| awk '{ print length ($0) }' | sort -nr | head -1)
for domain in $domain_list; do
if ! grep "$domain" $conf &>/dev/null; then
printf "%-s %-$(( $domain_len +2))s %s\n" "Whitelisting domain:" "$domain" "=> $conf"
if [ "$DRY_RUN" = "N" ]; then
printf "%-$(( $domain_len +8))s %s\n" "\"~*$domain\"" "0;" >> $conf
fi
fi
done
}
longest_str() {
echo $@ | tr " " "\n" | awk '{print length ($0)}' | sort -nr | head -n1
}
add_includes() {
local ph='<<!!>>' line=$1 file=$2 conf_dir=$3 text= update=
local include_list=$(echo $@ | awk '{$1=$2=$3=""}sub("^"OFS"+","")')
local col_size=$(( $(longest_str $include_list) + $(echo $conf_dir | wc -m) ))
for text in $include_list; do
if ! grep "$text" $file 1>/dev/null; then
update='true'
text="include $conf_dir/$text;"
printf "%-10s %-$(( $col_size +10 ))s %s\n" "inserting:" "$text" "=> $file"
if [ "$DRY_RUN" = "N" ]; then
# $ph is just a placeholder so sed inserts a \t (tab)
sed -i "$line i $ph \t$text $ph" $file
fi
fi
done
if [ "$DRY_RUN" = "N" ]; then
if [ -n "$update" ]; then
#add blank line below inserts
line=$(( $line + $(echo $include_list | wc -w) ))
if ! sed -n "${line}p" $file | grep ^'}' 1>/dev/null; then
text="include $conf_dir/$(echo $include_list | awk '{print $1}');"
sed -i "s|$text|$text\n|" $file
fi
#add comment above inserts
text="include $conf_dir/$(echo $include_list | awk '{print $NF}');"
sed -i "s|$text|# Bad Bot Blocker\n\t$text|" $file
# remove placeholders
sed -i "s|$ph||g" $file
fi
fi
}
find_line() {
local file=$1 find_str=$2 first_last=$3
case "$first_last" in
first) awk "/$find_str/{ print NR; exit }" $file;;
last) awk "/$find_str/{ print NR }" $file | tail -n1;;
esac
}
find_includes() {
local file=$1 search=$2 search_first_last=$3 line= tmp=$(mktemp)
local start_range=$4 start_first_last=$5
local end_range=$6 end_first_last=$7
local start=$(find_line $file $start_range $start_first_last)
local end=$(find_line $file $end_range $end_first_last)
sed -n ${start},${end}p ${file} > $tmp
line=$(find_line $tmp $search $search_first_last)
rm -f $tmp
# search string not found
if [ -z "$line" ]; then
line=1
fi
case "$search_first_last" in
first) line=$(( $line + $start -1 ));;
last) line=$(( $line + $start +1 ));;
esac
# if inserting beyond the end of the file
if [ $line -gt $(wc -l < $file) ]; then
# insert blank line
sed -i "$end i \ " $file
fi
echo $line
}
get_options() {
local options=$(getopt -o w:e:v:b:c:m:ndhx --long \
www:,ext:,vhost:,bots:,conf:,main:,names,ddos,help,exec -- "$@" 2>/dev/null)
if [ $? -ne 0 ]; then
usage
exit 1
fi
eval set -- "$options"
while :; do
case "$1" in
-h | --help) usage && exit 1;;
-x | --exec) DRY_RUN=N; shift;;
-w | --www) WWW=$2; shift 2;;
-e | --ext) VHOST_EXT=$2; shift 2;;
-v | --vhost) VHOST_DIR=$2; shift 2;;
-b | --bots) BOTS_DIR=$2; shift 2;;
-c | --conf) CONF_DIR=$2; shift 2;;
-m | --main) MAIN_CONF=$2; shift 2;;
-n | --names) DOT_NAMES=N; shift;;
-d | --ddos) INC_DDOS=N; shift;;
*) break;;
esac
done
}
main() {
local file= line= vhost_includes= main_includes= file_list=
main_includes="botblocker-nginx-settings.conf globalblacklist.conf"
vhost_includes="blockbots.conf"
# parse command line
get_options $@
case "$INC_DDOS" in
y*|Y*) vhost_includes="$vhost_includes ddos.conf"
esac
file_list=$(find_vhosts)
check_config $file_list
if [ -z "$DRY_RUN" ]; then
printf "\n** Dry Run ** | not updating files | -x or --exec to change files\n\n"
fi
# update vhosts
for file in $file_list; do
line=$(find_includes $file include last server_ last location first )
add_includes $line $file $BOTS_DIR $vhost_includes
done
# update main config
line=$(find_includes $MAIN_CONF include last http first '\}' last )
add_includes $line $MAIN_CONF $CONF_DIR botblocker-nginx-settings.conf globalblacklist.conf
whitelist_ips
whitelist_domains
}
## START ##
main $@
exit $?
| true |
2f0a2488b34173f7577f8dbb981d42bcf18c2912
|
Shell
|
tbots/docs
|
/libvirt/depl2/nagios/fix.sh
|
UTF-8
| 451 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
#
# check proper configuration settings
#
files=\
'/usr/local/nagios/etc/htpasswd.users
/usr/local/nagios/libexec/eventhandlers
/usr/local/nagios/libexec/check_nrpe'
for f in $files
do
if [ -e $f ]
then
# check with awk the correct permissions set
echo -e "$f\t\t`ls -ld $f | awk '{print $3 "\t" $4}'`"
else
echo -e "$f\t[ missing ]"
fi
done
echo "nagios admin user"
awk -F: '{print $1}' /usr/local/nagios/etc/htpasswd.users
| true |
2bd70660313dde80f916b1da822a2faa6ad1c042
|
Shell
|
spacchetti/starsuit
|
/scripts/build.sh
|
UTF-8
| 354 | 2.765625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
set -e
TAG="$(curl -H "Authorization: token $GITHUB_API_TOKEN" --silent "https://api.github.com/repos/purescript/package-sets/releases/latest" | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/')"
echo "package-sets release: $TAG"
node index.js $TAG
spago install
set -f
purs docs $(spago sources)
purescript-docs-search build-index
| true |
94a1ce4cff36f4f7298007e8476146bb48b3cc70
|
Shell
|
gmarcotte3/georgesrepos
|
/jenkinsDocker/bin/run-jenkins.bash
|
UTF-8
| 424 | 3.265625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Assumptions:
# jenkins home is at /var/lib/jenkins
jenkins_home_dir=/var/opt/jenkins
docker_image=marcotte/jenkins-docker
if docker ps | grep "\\s${docker_image}\\s" ; then
echo "ERROR: The server is running already."
exit 1
fi
# add -d remove -i -t
docker run -d -p 8080:8080 --name jenkinsdocker \
-v "${jenkins_home_dir}:/var/lib/jenkins" \
${docker_image}
(( $? != 0 )) && exit 1
docker ps
| true |
7a80879200f346866721e8bbe9b768d767a95832
|
Shell
|
Luismcplopes/MyMediaCenter
|
/install-couchpotato-sickrage.sh
|
UTF-8
| 3,666 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/sh
# Installation script of Sickrage and CouchPotato
echo "Begining updating and installation of GIT..."
sudo apt-get --yes --force-yes update && sudo apt-get --yes --force-yes install git
echo "finished updating and instlaltion of GIT"
echo "Begining upgrading osmc..."
sudo apt-get --yes --force-yes upgrade
echo "finished upgrading"
echo "begining installing p7zip..."
sudo apt-get --yes --force-yes install p7zip-full
echo "installed p7zip"
echo "begining installing unrar..."
sudo wget http://sourceforge.net/projects/bananapi/files/unrar_5.2.6-1_armhf.deb
sudo dpkg -i unrar_5.2.6-1_armhf.deb
echo "successfully installed and removing temporary file of unrar"
sudo rm unrar_5.2.6-1_armhf.deb
echo "removed temporary file of unrar"
echo "Begining installing CouchPotato..."
echo "adding couchpotato username and adding to group..."
sudo useradd couchpotato
sudo usermod -a -G osmc couchpotato
echo "creatint home folder in /home/couchpotato..."
sudo mkdir /home/couchpotato
echo "granting permissions..."
sudo chown -R couchpotato:couchpotato /home/couchpotato
echo "downloading couchpotato and installing..."
sudo git clone http://github.com/RuudBurger/CouchPotatoServer /opt/CouchPotatoServer
echo "granting permissions... "
sudo chown -R couchpotato:couchpotato /opt/CouchPotatoServer
echo "adding startup script...."
cd /opt/CouchPotatoServer
sudo cp /opt/CouchPotatoServer/init/couchpotato.service /etc/systemd/system/couchpotato.service
cd /etc/systemd/system/
echo "fixing startup path..."
sudo sed -i 's@/var/lib/CouchPotatoServer/CouchPotato.py@/opt/CouchPotatoServer/CouchPotato.py@g' couchpotato.service
echo "enabling startup script..."
sudo systemctl enable couchpotato.service
echo "couchpotato service is starting..."
sudo systemctl start couchpotato.service
echo "couchpotato successfully installed, you can check at Raspberry_Pi_IP_Address:5050"
cd
echo "Begining to install SickRage"
echo "creating sickrage username and adding to group..."
sudo useradd sickrage
sudo usermod -a -G osmc sickrage
echo "downloading sickrage and installing..."
sudo git clone https://github.com/SiCKRAGE/SickRage.git /opt/sickrage
echo "creating startup script for sickrage..."
sudo cp /opt/sickrage/runscripts/init.systemd /etc/systemd/system/sickrage.service
echo "granting permissions to sickrage folder"
sudo chown -R sickrage:sickrage /opt/sickrage
sudo chmod +x /opt/sickrage
sudo chmod a-x /etc/systemd/system/sickrage.service
echo "fixing path at startup script..."
cd /etc/systemd/system
sudo sed -i 's@/usr/bin/python2.7 /opt/sickrage/SickBeard.py -q --daemon --nolaunch --datadir=/opt/sickrage@/opt/sickrage/SickBeard.py -q --daemon --nolaunch --datadir=/opt/sickrage@g' sickrage.service
echo "enabling startup script...."
sudo systemctl enable sickrage.service
echo "starting sickrage and waiting to create file config.ini ..."
sudo systemctl start sickrage.service
echo "created file config.ini and stopping sickrage..."
sudo service sickrage stop
cd /opt/sickrage/
echo "adding username and password to sickrage... this fixes freezing raspbeery pi when you try to login to sickrage..."
sudo sed -i 's@web_username = ""@web_username = "osmc"@g' config.ini
sudo sed -i 's@web_password = ""@web_password = "osmc"@g' config.ini
echo "Sickrage succesfully installed..."
sudo service sickrage start
echo "Sickrage service started!"
echo " "
echo "Successfully installed! more info bellow..."
echo " "
echo "couchpotato info:"
echo "webgui raspberry_ip:5050"
echo " "
echo "sickrage info:"
echo "webgui raspberry_ip:8081"
echo " "
echo "sickrage login info:"
echo " "
echo "username: osmc"
echo "password: osmc"
echo " "
echo "enjoy"
| true |
f84bd287e21b275bb2bb8d671150dd02f4f90d92
|
Shell
|
Kagee/make-no-list
|
/commoncrawl/get-domainlist.sh
|
UTF-8
| 295 | 2.796875 | 3 |
[] |
no_license
|
#! /bin/bash
for INDEX in $(ls ../storage/cdx-index-client/|grep -P 'CC-MAIN');
do
# Saves as $1.domains.index
echo $INDEX; ./get-domains-index.sh $INDEX;
done
cat *.domains.index commoncrawl.domains | sort | uniq > commoncrawl.domains.tmp
mv commoncrawl.domains.tmp commoncrawl.domains
| true |
4999b44df59b0cb1d2924c7585cd5584ab49828c
|
Shell
|
ngroberio/ocpAdvDepl
|
/roles/7_CREATE_PVS/files/create_pvs.sh
|
UTF-8
| 503 | 3.03125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
export GUID=`hostname | cut -d"." -f2`
mkdir -p /srv/nfs/user-vols/pv{1..50}
echo ">>> CREATE PV FOR USERS"
for pvnum in {1..50}; do echo "/srv/nfs/user-vols/pv${pvnum} *(rw,root_squash)" >> openshift-uservols.exports; done
sudo cp ./openshift-uservols.exports /etc/exports.d/
chown -R nfsnobody.nfsnobody /srv/nfs
chmod -R 777 /srv/nfs
echo "<<< CREATE PV FOR USERS DONE"
echo ">>> RESTARTING NFS SERVER"
sudo systemctl restart nfs-server
echo "<<< RESTARTING NFS SERVER DONE"
| true |
9dda5c4e9222073c1d92f257f43848d7be0ae744
|
Shell
|
sri-arjuna/script-tools.tui
|
/menu/set/up.2
|
UTF-8
| 1,303 | 3.5 | 4 |
[] |
no_license
|
#!/bin/bash
#
#
#
#
#
# Date Created: 2013.02.25
script_version=0.1
#
#
# Root Check
#
[ ! 0 -eq $UID ] && \
sE "You must be root to set up and install the system." && \
ask "Reload script as root?" && \
su -c "$0 $@" && \
return 0 || return 1
#
# Title
#
sT "sea's Setup and installation handler ($script_version)"
#
# Variables
#
# task_groups="langauge-time partitions repo-packages"
task_groups="usage setup content"
task_usage="language keyboard timezone internet"
task_part_type="mbr gpt"
task_part_editors="fdisk gdisk cfdisk"
task_part_mounts="root swap home opt var"
task_part_fs="ext2 ext3 ext4 xfs reiserfs vfat ntfs-3g"
#
# Arrays
#
ar_disk[]="" # /dev/sda[0-9]
ar_mount[]="" # /
ar_fs[]="" # any of $task_part_fs
ar_size[]="" # the targeted size
ar_label[]="" # one word description
ar_format[]="" # 0 = yes, 1 = no
ar_order[]="" # create order
#ar_disk[]="" #
#
# Subs
#
#
# Menu
#
for group in $task_groups;do
sT "$(Capitalize $group)"
case $group in
usage) for task in $task_usage;do
case $task in
language) echo ;;
keyboard) echo ;;
timezone) echo ;;
internet) sE "At this moment, please run:" "st net stnm"
sE "in another terminal."
;;
esac
done
;;
setup) echo ;;
content) echo ;;
esac
done
| true |
b33c32732bd366c2f6ae784da89443655d179f92
|
Shell
|
TritonDataCenter/mi-standard
|
/copy/var/zoneinit/includes/41-postgresql.sh
|
UTF-8
| 911 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
[ "$(svcs -Ho state postgresql)" == "online" ] && \
svcadm disable -s -t postgresql
mdata-get pgsql_pw > /tmp/pgpasswd
log "initializing PostgreSQL"
PGDATA=$(svcprop -p config/data postgresql 2>/dev/null)
: ${PGDATA:=/var/pgsql/data}
[ -d ${PGDATA} ] && rm -rf ${PGDATA}
su - postgres -c "/opt/local/bin/initdb \
--pgdata=${PGDATA} \
--encoding=UTF8 \
--locale=en_US.UTF-8 \
--auth=password \
--pwfile=/tmp/pgpasswd" >/dev/null || \
error "PostgreSQL init command failed"
# symlink for the sake of Webmin's config
[ ${PGDATA} != /var/pgsql/data ] && \
ln -s ${PGDATA} /var/pgsql/data
log "starting PostgreSQL"
svcadm enable -s postgresql && sleep 1
[ "$(svcs -Ho state postgresql)" != "online" ] && \
error "PostgreSQL failed to start"
log "disabling PostgreSQL by default"
svcadm disable -s postgresql
| true |
9d640324895bd2e019fe28adf0c1497f60916747
|
Shell
|
androdev4u/network
|
/salt/firewall/priv-stateful.sh
|
UTF-8
| 631 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/sh
export PATH=/sbin:/bin:/usr/sbin:/usr/bin
if echo "$IFACE" | grep priv >/dev/null; then
iptables -F FORWARD
ip6tables -F FORWARD
iptables -P FORWARD DROP
ip6tables -P FORWARD DROP
iptables -A FORWARD -m state --state ESTABLISHED -j ACCEPT
ip6tables -A FORWARD -m state --state ESTABLISHED -j ACCEPT
# loopback
iptables -A FORWARD -i lo -j ACCEPT
ip6tables -A FORWARD -i lo -j ACCEPT
# Trust priv
iptables -A FORWARD -i $IFACE -j ACCEPT
ip6tables -A FORWARD -i $IFACE -j ACCEPT
# Deny by default
iptables -A FORWARD -j REJECT
ip6tables -A FORWARD -j REJECT
fi
| true |
35610da0cc7558badbb454a18e1285da9d5feb86
|
Shell
|
shubh1905/dissertation_UOE
|
/run_preprocess_label.sh
|
UTF-8
| 1,556 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
#conda activate temp_env
DATASET=$1
LABEL_EMB=$2 # pifa-tfidf | pifa-neural | text-emb
CLUSTER_SIZE=$3
#cd X-Transformers
# setup label embedding feature path
# overwrite it if necessary
DATA_DIR=./Data
if [ ${LABEL_EMB} == 'pifa-tfidf' ]; then
label_emb_inst_path=${DATA_DIR}/${DATASET}/X.trn.npz
elif [ ${LABEL_EMB} == 'pifa-neural' ]; then
label_emb_inst_path=${DATA_DIR}/${DATASET}/X.trn.finetune.xlnet.npy
elif [ ${LABEL_EMB} == 'text-emb' ]; then
label_emb_inst_path=${DATA_DIR}/${DATASET}/X.trn.npz
fi
# construct label embedding
mkdir -p ${DATA_DIR}/${DATASET}/label_embedding
LABEL_EMB_DIR=${DATA_DIR}/${DATASET}/label_embedding
CLUSTER_DATA_DIR=${DATA_DIR}/${DATASET}/cluster_info
mkdir -p ${CLUSTER_DATA_DIR}
CLUSTER_DATA_DIR=${DATA_DIR}/${DATASET}/cluster_info/${LABEL_EMB}
mkdir -p ${CLUSTER_DATA_DIR}
CLUSTER_DATA_DIR=${DATA_DIR}/${DATASET}/cluster_info/${LABEL_EMB}/${CLUSTER_SIZE}
mkdir -p ${CLUSTER_DATA_DIR}
#Comment this line once label embeddings are available
python -u -m xbert.preprocess --do_label_embedding -i ${DATA_DIR}/${DATASET} -o ${LABEL_EMB_DIR} \
-l ${LABEL_EMB} \
-x ${label_emb_inst_path}
# semantic label indexing
SEED=0
LABEL_EMB_NAME=${LABEL_EMB}-s${SEED}
INDEXER_DIR=${OUTPUT_DIR}/${LABEL_EMB_NAME}/indexer
python -u -m xbert.indexer \
-i ${LABEL_EMB_DIR}/L.${LABEL_EMB}.npz \
-o ${CLUSTER_DATA_DIR} --seed ${SEED} --cluster_size ${CLUSTER_SIZE}
python ./Clustering_Classifier/create_unbalanced_cluster.py --d ${DATASET} --e ${LABEL_EMB} --c ${CLUSTER_SIZE}
#p
| true |
ecf9d430777eb1ca66c2284d2c11773f514cbe46
|
Shell
|
proudzhu/MdCharm
|
/dist/arch/PKGBUILD
|
UTF-8
| 2,093 | 2.6875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
# Maintainer: proudzhu <proudzhu at gmail.com>
pkgname=mdcharm-git
_pkgname=mdcharm
pkgver=r35.c4e8d4e
pkgrel=1
pkgdesc='A wiki editor, currently it supports markdown(markdown extra) and Multi-Markdown.'
arch=('i686' 'x86_64')
url='https://github.com/zhangshine/MdCharm'
license=('custom')
depends=('qt5-webkit' 'hunspell' 'zlib')
makedepends=('python')
provides=("$_pkgname")
conflicts=("$_pkgname")
source=("$_pkgname::git+https://github.com/proudzhu/MdCharm.git")
sha256sums=('SKIP')
pkgver() {
cd "$srcdir/$_pkgname"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd "$srcdir/$_pkgname/src"
qmake MdCharm.pro
# maybe some error when build with -j2
MAKEFLAGS="-j1" make
}
package() {
cd "$srcdir/$_pkgname/src"
# make DESTDIR=$pkgdir install
install -Dm755 release/mdcharm $pkgdir/usr/bin/mdcharm
install -Dm644 $srcdir/mdcharm/LICENSE $pkgdir/usr/share/licenses/$_pkgname/LICENSE
install -Dm644 $srcdir/mdcharm/dist/ubuntu/usr/share/mdcharm/mdcharm.png $pkgdir/usr/share/mdcharm/mdcharm.png
install -Dm644 $srcdir/mdcharm/dist/ubuntu/usr/share/applications/mdcharm.desktop $pkgdir/usr/share/applications/mdcharm/mdcharm.desktop
install -Dm644 $srcdir/mdcharm/dist/ubuntu/usr/share/mdcharm/spellcheckdict/en_GB.aff $pkgdir/usr/share/mdcharm/spellcheckdict/en_GB.aff
install -Dm644 $srcdir/mdcharm/dist/ubuntu/usr/share/mdcharm/spellcheckdict/en_GB.dic $pkgdir/usr/share/mdcharm/spellcheckdict/en_GB.dic
install -Dm644 $srcdir/mdcharm/dist/ubuntu/usr/share/mdcharm/spellcheckdict/en_US.aff $pkgdir/usr/share/mdcharm/spellcheckdict/en_US.aff
install -Dm644 $srcdir/mdcharm/dist/ubuntu/usr/share/mdcharm/spellcheckdict/en_GB.dic $pkgdir/usr/share/mdcharm/spellcheckdict/en_US.dic
install -Dm644 $srcdir/mdcharm/dist/ubuntu/usr/share/mdcharm/spellcheckdict/README_en_GB.txt $pkgdir/usr/share/mdcharm/spellcheckdict/README_en_GB.txt
install -Dm644 $srcdir/mdcharm/dist/ubuntu/usr/share/mdcharm/spellcheckdict/README_en_US.txt $pkgdir/usr/share/mdcharm/spellcheckdict/README_en_US.txt
}
| true |
26764e4240879bf60d918545ee5bd4270bfc60f1
|
Shell
|
unix1986/universe
|
/script/cpidx.sh
|
UTF-8
| 753 | 3.484375 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
HOSTS="hostlist"
SRC_PATH="/home/search/zmq_gosearch3/*"
DST_PATH="/home/search/wangjingjing/"
declare -i THREAD_NUM=10
mkfifo /tmp/$$.fifo
exec 6<>/tmp/$$.fifo
rm -rf /tmp/$$.fifo
declare -i HOST_NUM=`cat $HOSTS| wc -l`
if [ $HOST_NUM -lt $THREAD_NUM ]; then
THREAD_NUM=$HOST_NUM
fi
echo "Thread num: $THREAD_NUM"
for (( i = 0; i < THREAD_NUM; ++i )) {
echo
} >&6
function child() {
sudo -u search ssh $1 "rsync -r $SRC_PATH $2:$DST_PATH"
}
while read line; do
read -u6
src_host=`echo $line | cut -d' ' -f1`
dst_host=`echo $line | cut -d' ' -f2`
{
child $src_host $dst_host && echo "$src_host finished." || echo "$src_host failed."
echo >&6
}&
done <$HOSTS
wait
exec 6<&-
echo "Done"
| true |
dc8fc45f092b5b87bf2817c2ba4cf74b8648aaeb
|
Shell
|
ceplbr/breach_snapshot
|
/sys/install_scripts/breach_install_part_4.sh
|
UTF-8
| 540 | 2.90625 | 3 |
[] |
no_license
|
#!/bin/bash
# CUSTOM COLORS
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'
printf "Breach install script -Part 4- Robot config\n"
printf "\nSorry, this is the manual part :( \n\n"
printf "1. Change IP settings in ~/.bashrc file\n"
printf "Change line to 'export ROS_MASTER_URI=http://192.168.1.X:11311'\n"
printf "Add line 'export ROS_HOSTNAME=192.168.1.X'\n"
printf "2. Change robot config in ~/catkin_ws/src/breach/config/robot_config.yaml file\n"
printf "3. Reboot PC with 'sudo reboot'\n"
printf "Installation -Part 4- done!\n"
| true |
103891d7ff4a4c1b9a44e816dede08f938cff7bd
|
Shell
|
qianthinking/dotfiles
|
/zsh/kubectl.zsh
|
UTF-8
| 783 | 3.15625 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
# you can disable this by exporting K8S_CONTEXT_NAMESPACE_ENV=disabled in .zshrc
# see zsh/prezto-themes/prompt_qianthinking_setup which
# ensure K8S_CONTEXT && K8S_NAMESPACE exists in SHELL PROMPT
if [ ! "$K8S_CONTEXT_NAMESPACE_ENV" = 'disabled' ]
then
function kubectl() {
if [ -z "$K8S_CONTEXT" ]; then
command kubectl "$@"
else
if [ -z "$K8S_NAMESPACE" ] || [[ "$*" == '-n'* ]] || [[ "$*" == '--namespace'* ]]; then
command kubectl --context $K8S_CONTEXT "$@"
else
command kubectl --context $K8S_CONTEXT -n $K8S_NAMESPACE "$@"
fi
fi
}
function kbc() {
export K8S_CONTEXT=$1
}
function kbn() {
export K8S_NAMESPACE=$1
}
function kbclear() {
export K8S_CONTEXT=''
export K8S_NAMESPACE=''
}
fi
| true |
cb58ce79669688056c089730f90c23288c80138e
|
Shell
|
TeaPackCZ/Pine64_scripts
|
/battery_keeper.sh
|
UTF-8
| 491 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/bash
BATTERY_PRESENT=$(cat /sys/class/power_supply/battery/present)
while [ "$BATTERY_PRESENT" -eq "1" ] ; do
sleep 30
BATTERYVOLTAGE=$(cat /sys/class/power_supply/battery/voltage_now)
if [ "3200000" -gt "$BATTERYVOLTAGE" ];
then
if [ "$(cat /sys/class/power_supply/battery/status)" -eq "Discharging" ];
then
echo "VOLTAGE: $BATTERYVOLTAGE - too low, shutting down." | wall
sleep 5
shutdown -h now
fi
else
echo "VOLTAGE: $BATTERYVOLTAGE - OK"
fi
done
exit 0
| true |
5ee5ea67c3db4df31f260f45f98e5965aa436ca0
|
Shell
|
bluewaysw/pcgeos
|
/Tools/scripts/profile/bp
|
UTF-8
| 1,062 | 3.421875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
##############################################################################
#
# Copyright (c) Geoworks 1993 -- All Rights Reserved
#
# PROJECT: PC GEOS
# MODULE: Tools/scripts -- branchpoint analysis
# FILE: bp
# AUTHOR: John Wedgwood, 9/17/93
#
# REVISION HISTORY:
# Name Date Description
# ---- ---- -----------
# jcw 9/17/93 Initial Revision
#
# DESCRIPTION:
# This file prints out a list of functions and the number of
# decisions made in each function.
#
# $Id: bp,v 1.4 94/04/20 00:33:05 don Exp $
#
###############################################################################
if [ "$*" ]; then
files=$*
else
files=`find . \( \( -name \*.asm \) -a \( \! \( -name \*_loc.asm \) \) \) -type f -print`
fi
if [ -f /staff/pcgeos/$USER/Tools/scripts/profile/bpData.awk ]; then
data=/staff/pcgeos/$USER/Tools/scripts/profile/bpData.awk
echo "using private awk file: $data"
else
data=/staff/pcgeos/Tools/scripts/profile/bpData.awk
fi
awk -f $data $files | sort -r -n +0 +2 - | cut -c5- -
| true |
08ae60ae4c74ec8648d884ec9411aab10c85ad23
|
Shell
|
adriananeci/k8s-the-hard-way
|
/k8s_resources/apply_all.sh
|
UTF-8
| 1,493 | 3.453125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
dir_path=$(realpath $(dirname $0))
### 1. Apply monitoring stack
scratch_directory=$(mktemp -d)
cleanup() {
rm -rf "$scratch_directory"
}
trap cleanup EXIT
trap cleanup SIGINT
pushd "$scratch_directory" || exit 1
if ! git clone https://github.com/coreos/kube-prometheus.git; then
echo "An error occurred when cloning coreos/kube-prometheus"
exit 1
fi
cd kube-prometheus
# Update libsonnet libraries
jb update
# Build monitoring manifest based on our jsonnet
./build.sh ${dir_path}/monitoring_stack/monitoring.jsonnet
# Create the namespace and CRDs, and then wait for them to be available before creating the remaining resources
kubectl create -f manifests/setup
until kubectl get servicemonitors --all-namespaces ; do date; sleep 1; echo ""; done
# Create monitoring resources
kubectl create -f manifests/
popd || exit 1
### 2. Create nginx-ingress controller resources
#kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-0.32.0/deploy/static/provider/baremetal/deploy.yaml
kubectl create ns ingress-nginx
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm install ingress-nginx ingress-nginx/ingress-nginx -n ingress-nginx \
--set controller.service.type=NodePort \
--set controller.service.nodePorts.https=30443 \
--set controller.service.nodePorts.http=30080 \
--set controller.service.nodePorts.tcp.8080=38080
### 3. Apply remaining manifests
kubectl apply -R -f ${dir_path}
| true |
3a0c02d69c96d89ffcfaedd761a761bd4fa4ad31
|
Shell
|
vnagel/dotfiles
|
/bin/ls-installed
|
UTF-8
| 542 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
ls-apt-installed(){
comm -23 <(apt-mark showmanual | sort -u) <(gzip -dc /var/log/installer/initial-status.gz | sed -n 's/^Package: //p' | sort -u)
}
ls-npm-installed(){
npm list -g --depth 0
}
ls-cargo-installed(){
cargo install --list
}
ls-snap-installed(){
snap list
}
ls-installed(){
echo '----apt----'
ls-apt-installed
echo -e '\n----npm----'
ls-npm-installed
echo '----cargo----'
ls-cargo-installed
echo -e '\n----snap----'
ls-snap-installed
}
ls-installed 2> /dev/null
| true |
f6e58a7c4e7ee143850d54047a6f995866334976
|
Shell
|
radiant-rstats/docker-vsix-dev
|
/update.sh
|
UTF-8
| 258 | 2.703125 | 3 |
[] |
no_license
|
rm vsix_list.txt
touch vsix_list.txt
vsix_list=$(ls -d *.vsix 2>/dev/null)
for i in ${vsix_list}; do
echo "https://github.com/radiant-rstats/docker-vsix-dev/raw/main/${i}" >> ../docker-vsix-dev/vsix_list.txt
done
git add .
git commit -m "update"
git push
| true |
3a229a5bf8e6c87de286f4937f10f3e14664599d
|
Shell
|
NovasisGDSBT/FullGDS
|
/www/backlight_counter.sh
|
UTF-8
| 1,413 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/sh
TRUE="1"
COUNTER=0
SAVECOUNTER=0
TIMETOSAVE=300
if [ -f /tmp/backlight_on_counter ]; then
. /tmp/backlight_on_counter
COUNTER=$BACKLIGHT_ON_COUNTER
else
echo "BACKLIGHT_ON_COUNTER=0" > /tmp/backlight_on_counter
fi
while [ "$TRUE" == "1" ]; do
sleep 1
DEV=`cat /tmp/www/cgi-bin/lvds_device`
LEV=`cat ${DEV}/actual_brightness`
ENABLE=`cat ${DEV}/bl_power`
if [ $LEV -gt 0 ]; then
if [ "$ENABLE" == "0" ]; then
let COUNTER=$COUNTER+1
echo "BACKLIGHT_ON_COUNTER=$COUNTER" > /tmp/backlight_on_counter
fi
fi
DATE=`date -u`
if [ -f /tmp/backlight_on_reset ]; then
rm /tmp/backlight_on_reset
echo "BACKLIGHT_ON_COUNTER=0" > /tmp/backlight_on_counter
COUNTER=0
fi
let SAVECOUNTER=$SAVECOUNTER+1
if [ $SAVECOUNTER -gt $TIMETOSAVE ]; then
SAVECOUNTER=0
mkdir -p /tmp/store_mountpoint
mount /dev/mmcblk0p3 /tmp/store_mountpoint
if [ $? -eq 0 ] # test mount OK
then
sync
cp /tmp/backlight_on_counter /tmp/store_mountpoint/webparams/tempfile
mv /tmp/store_mountpoint/webparams/tempfile /tmp/store_mountpoint/webparams/backlight_on_counter
umount /tmp/store_mountpoint
sync
e2fsck /dev/mmcblk0p3
#echo "$0" "Save"
#/tmp/www/logwrite.sh "APPA" "INFO" "$0" "Save counters"
#else
# echo "$0" "Partition /dev/mmcblk0p3 busy"
fi
fi
done
| true |
7e45eac47239b170a7e716fd6f9dcf99595c58bd
|
Shell
|
rome-bot/automotive-ci
|
/tests/ostree/ostree.sh
|
UTF-8
| 728 | 2.765625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
USER=$1
PASS=$2
# Setup subscription-manager to enable yum repos
subscription-manager register --username $USER --password $PASS
subscription-manager role --set="Red Hat Enterprise Linux Server"
subscription-manager service-level --set="Self-Support"
subscription-manager usage --set="Development/Test"
subscription-manager attach
# Install test dependencies
dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm; sudo dnf config-manager --set-enabled epel
dnf install -y ansible jq qemu-img qemu-kvm libvirt-client libvirt-daemon-kvm virt-install git
git clone https://github.com/virt-s1/rhel-edge.git; cd rhel-edge
chmod 600 key/ostree_key
./ostree.sh
echo "Done"
| true |
1ad316d9ba678a78851e281c40048a374f79d4ee
|
Shell
|
l04m33/dot-files
|
/.oh-my-zsh_custom/plugins/my-gpg-agent/my-gpg-agent.plugin.zsh
|
UTF-8
| 613 | 3.390625 | 3 |
[] |
no_license
|
local GPG_AGENT_BIN=$(which gpg-agent)
local GPG_AGENT_ENV="$HOME/.gnupg/gpg-agent.env"
local GPG_CONNECT_AGENT_ERR_MSG="gpg-connect-agent: no gpg-agent running in this session"
function start_agent_withssh {
${GPG_AGENT_BIN} --quiet --enable-ssh-support --daemon 2> /dev/null > "${GPG_AGENT_ENV}"
chmod 600 "${GPG_AGENT_ENV}"
. "${GPG_AGENT_ENV}" > /dev/null
}
if [[ "$(gpg-connect-agent --no-autostart --quiet /bye 2>&1)" == "$GPG_CONNECT_AGENT_ERR_MSG" ]]; then
start_agent_withssh
elif [[ -f "${GPG_AGENT_ENV}" ]]; then
. "${GPG_AGENT_ENV}" > /dev/null
fi
GPG_TTY=$(tty)
export GPG_TTY
| true |
b6480e9181b915dd440e038141a5345ca615b78f
|
Shell
|
gleb-britecore/uptimer-api
|
/deployment/restart_if_git_changed.sh
|
UTF-8
| 311 | 2.890625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
git fetch --all
if [[ $(git rev-parse HEAD) == $(git rev-parse @{u}) ]]; then
echo 'remote branch has no changes, update will be skipped'
exit
fi
git reset --hard
git pull
echo 'Found new code on remote branch.. Restarting containers'
source set_docker_env.sh
bash ./restart.sh
| true |
f1b575dddb07bcbae74c6cacdddad2394e304ce3
|
Shell
|
golemfactory/concent-deployment
|
/containers/signing-service/clean-container.sh
|
UTF-8
| 329 | 2.546875 | 3 |
[] |
no_license
|
#!/bin/bash -e
readarray temporary_dependencies <<< $(cat ${BASH_SOURCE%/*}/build-dependencies.txt ${BASH_SOURCE%/*}/pyenv-build-dependencies.txt)
# Clean up
apt-get clean
apt-get remove --purge --assume-yes ${temporary_dependencies[*]}
apt autoremove --assume-yes
rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* ~/.cache/pip/*
| true |
a885645a6be3644a37c0cbf9132ca67d47a77549
|
Shell
|
levindoneto/IoT-Modelling-Tool
|
/scripts/bundle_windows.sh
|
UTF-8
| 847 | 3.125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Script for bundling the react components
# Author: Levindo Gabriel Taschetto Neto
# Advisors: Prof. Dr.-Ing. habil. Bernhard Mitschang,
# M.Sc. Ana Cristina Franco da Silva
# Dipl.-Inf. Pascal Hirmer
# Requirements:
# - Node JS (npm) installed
sed -i 's/\r$//' init.sh; # Remove trailing \r character
clear;
echo "Setting Up and Initializing the IoT Modelling Tool";
echo "___________________________________________________";
echo "Setting Webpack up (This might take a few seconds)";
npm install -g webpack@3.10.0; # Module bundler
echo "___________________________________________________";
echo "Bundling React JS Components (Wait until webpack has finished watching all the files)";
pushd ../public/app/modules/dashboard/digital_environment/src/main/js/; # Go to the directory where the web app's files are in
webpack -p; # Bundle and minify (-p) the react js components
| true |
8ce648fddcd046563e6f8d877fffd7591d62c513
|
Shell
|
Piraty/dotfiles
|
/bin/x-init
|
UTF-8
| 526 | 2.9375 | 3 |
[] |
no_license
|
#!/bin/sh
# initialize everything related to X
set_wallpaper() {
(
feh --no-fehbg --bg-fill ~/.wallpaper/default ||
feh --no-fehbg --bg-fill --recursive --randomize ~/.wallpaper ||
xsetroot -solid "#000000"
) 2>/dev/null
}
set_keyboard() {
# default X env
command -v xrdb >/dev/null && xrdb -merge -I"$HOME" ~/.Xresources
# key mapping
setxkbmap 'us(altgr-intl)'
#numlockx
xmodmap ~/.Xmodmap
killall -q xcape
xcape -e 'Mode_switch=Escape' #make single CAPS tap act like ESC
}
set_wallpaper
set_keyboard
| true |
071a30f995c4fbbb7460873dfe03af63f56a3782
|
Shell
|
smileyspoon/665Midterm_Examples
|
/updateAll.sh
|
UTF-8
| 134 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
ARG=${1:-"Update"}
for X in *.*
do
chmod u+x $X
done
git add *
git commit -a -m "$ARG"
git push -u origin master
| true |
4dbc9726d4ea1c7b97cb8f9e34fa27dff6848f2f
|
Shell
|
Munter/opencollective-api
|
/scripts/watch_email_template.sh
|
UTF-8
| 337 | 3.25 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
TEMPLATE="templates/emails/${1}.hbs"
function compileTemplate {
echo "Compiling $1";
babel-node scripts/compile-email.js $1 > /tmp/$1.html
echo "Done: /tmp/$1.html";
return 0
}
export -f compileTemplate
compileTemplate $@
echo "Watching ${TEMPLATE}";
fswatch -o templates/ | xargs -n1 bash -c "compileTemplate "$@""
| true |
42794d551e98590add5381311e401b8ae9c64877
|
Shell
|
Adrian123K/linux
|
/find_word.sh
|
UTF-8
| 308 | 2.90625 | 3 |
[
"MIT"
] |
permissive
|
echo "-------------------------------------------------"
echo -n "스크립트를 입력하세요 : "
read script
echo -n "검색할 단어를 입력하세요 : "
read word
cnt=`grep -i $word $script | wc -l`
echo "$cnt행이 검색 되었습니다."
echo "-------------------------------------------------"
| true |
61766aa8b4180f59fc875df819b16dd386f205c6
|
Shell
|
haoshuncheng/scripts
|
/install/redis/new_instance.sh
|
UTF-8
| 685 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/sh
source ../header.sh
config() {
if [ x$1 = "x" ];then
echo "pls tell me port."
exit
fi
port=$1
cp $root/redis_init /etc/init.d/redis_$port
sed -i s/6379/$port/g /etc/init.d/redis_$port
if [ x$2 = "xdb" ];then
echo "db option enabled!"
cp $root/redis_db.conf /usr/local/etc/redis/$port.conf
else
echo "just cache, not db."
cp $root/redis.conf /usr/local/etc/redis/$port.conf
fi
sed -i s/6379/$port/g /usr/local/etc/redis/$port.conf
chmod +x /etc/init.d/redis_$port
#mkdir -p /opt/redis/var/$port
mkdir -p /home/data/redis/$port
chown -R www /logs
/etc/init.d/redis_$port stop
/etc/init.d/redis_$port start
}
config $1 $2
| true |
1c9bbf51d31328dc79d352cabde9858a335ce48f
|
Shell
|
avary/rom_build_tools
|
/build.sh
|
UTF-8
| 4,398 | 3.796875 | 4 |
[] |
no_license
|
#!/bin/bash
# Copyright (C) 2011 SuperTeam Development Group.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Inicializamos las variables
SCRIPTDIR=`dirname $0`
TOPDIR=`pwd`
. $SCRIPTDIR/mensajes.sh
if [ $# -lt 1 ]
then
msgErr "Usage: $0 <device>"
exit 1
fi
MAINFILE=`find device -name team_$1.mk`
if [ -z $MAINFILE ]
then
msgErr "No se ha encontrado el dispositivo $1"
exit 1
fi
SUBDEVICE=`grep -G ^PRODUCT_SUBDEVICE $MAINFILE`
if [ -n $SUBDEVICE ]; then
DEVICE=$1
else
DEVICE=$SUBDEVICE
fi
ROMDIR=$TOPDIR/../cache/roms/$DEVICE
BUILDDIR=$ROMDIR/last_build
RELEASEDIR=$ROMDIR/last_release
PATCHDIR=$ROMDIR/last_patch
PUBLICDIR=$ROMDIR/last_public
CONFIGFILE=$HOME/.SuperOSR.conf
#Buscamos valores personalizados para el build
if [ -f $CONFIGFILE ]; then
CORES=$( grep CORES $CONFIGFILE | cut -f 2 -d "=" )
if [ -z "$CORES" ]; then
CORES=$( cat /proc/cpuinfo | grep -c processor )
fi
USE_CCACHE=$( grep USE_CCACHE $CONFIGFILE | cut -f 2 -d "=" )
if [ -n "$USE_CCACHE" ] && [ "$USE_CCACHE" = "1" ]; then
export USE_CCACHE=1
else
unset USE_CCACHE
fi
fi
function compilar(){
#borramos los objetos para forzar que se copie la ROM entera
rm -rf $OUT/recovery $OUT/root $OUT/system $OUT/kernel 2&> /dev/null
make -j${CORES} otapackage
if [ "$?" -eq 0 ]; then
msgOK "Compilación correcta"
else
msgErr "Error en compilación"
FAIL=true
fi
}
function squishear(){
SQUISHER=`find vendor -name squisher*`
$SQUISHER
if [ "$?" -eq 0 ]; then
msgOK "Personalización correcta"
else
msgErr "Error al ejecutar squisher"
FAIL=true
fi
}
function reiniciar(){
echo "1: Normal"
echo "2: Recovery"
echo "3: HBoot"
echo "4: Apagar"
read option
if [ ! -z $option ]
then
case $option in
1) adb reboot
;;
2) adb reboot recovery
;;
3) adb reboot bootloader
;;
4) adb shell halt
esac
fi
}
function sincronizar(){
$SCRIPTDIR/sincronizar.sh $ROMDIR
if [ "$?" -eq 0 ]; then
msgOK "Sincronización correcta"
else
msgErr "Error al sincronizar"
FAIL=true
fi
}
function makeClean(){
echo "¿Estás seguro que quieres empezar de cero? (s/N)"
read option
option=${option:="N"}
if [ "$option" = "s" ] || [ "$option" = "S" ]; then
make clean
fi
}
function parchear(){
if [ ! -d $PUBLICDIR ]; then
msgWarn "No existe un directorio con la versión actualmente publicada. Se crea uno nuevo. La propia ROM es el parche."
cp -r $BUILDDIR $PUBLICDIR
else
if [ -d $PATCHDIR ]; then
rm -r $PATCHDIR
fi
mkdir $PATCHDIR
msgStatus "Calculando las diferencias con la anterior versión publicada"
$SCRIPTDIR/sacadiff.sh $BUILDDIR $PUBLICDIR $ROMDIR/public.diff.txt
$SCRIPTDIR/fromdiff.sh $ROMDIR/public.diff.txt $PATCHDIR patch
$SCRIPTDIR/updater.sh $DEVICE
fi
}
while true
do
#inicializamos estados
msgStatus "Compilando la rom del SuperTeam para el dispositivo $1"
echo "Elige una opción:"
echo " 1: make"
echo " 2: squisher"
echo " 3: sincronizar"
echo " 4: crear parche"
echo " 5: make + squisher + sincronizar"
echo " 6: limpiar build"
echo " 7: Reiniciar/Apagar dispositivo"
echo " 8: Compilar kernel"
echo " 9: Cambiar boot"
echo "99: salir"
read option
FAIL=false
if [ $option -eq 99 ]; then
exit 0
fi
if [ "$OUT" = "" ]; then
. build/envsetup.sh
lunch team_$DEVICE-eng
if [ "$?" -ne 0 ]; then
continue
fi
fi
case $option in
1)
compilar
;;
2)
squishear
;;
3)
sincronizar
;;
4)
parchear
;;
5)
compilar
if ! $FAIL ; then
squishear
fi
if ! $FAIL ; then
sincronizar
fi
;;
6)
makeClean
;;
7)
reiniciar
;;
8)
$SCRIPTDIR/kernel.sh $DEVICE
;;
9)
fastboot flash boot $OUT/boot.img
esac
done
| true |
bac785a895827580c909dd1070827c37cae2cda1
|
Shell
|
joeybaker/gymnastics-text
|
/run-local.sh
|
UTF-8
| 1,807 | 4 | 4 |
[] |
no_license
|
#! /bin/bash
port=1337
image_name="gymnasticstext_node-app"
build=1
function run_node_app {
have_docker=0
have_compose=0
hash docker-compose 2>/dev/null || have_compose=1
hash docker 2>/dev/null || have_docker=1
if [ "$have_compose" -eq 0 ]; then
if [ "$build" -eq 0 ]; then
echo "Running docker-compose with build..."
docker-compose up --build -d &>/dev/null
else
echo "Running docker-compose..."
docker-compose up -d &>/dev/null
fi
elif [ "$have_docker" -eq 0 ]; then
echo "No docker-compose, running with docker..."
docker run --rm -d -v "$(pwd)":/app -p 1337:1337 --name ${image_name}_1 $image_name npm start &>/dev/null
else
echo "No docker-compose or Docker, running with npm..."
npm start &>/dev/null &
fi
}
function run_ngrok {
have_ngrok=0
ngrok_local=0
hash ngrok 2>/dev/null || have_ngrok=1
if [ "$have_ngrok" -eq 0 ]; then
echo "Ngrok in path..."
ngrok http $port
elif [ -e ./ngrok ]; then
echo "Running local ./ngrok..."
./ngrok http $port
else
echo "Unable to run ngrok, exiting..."
exit 1
fi
}
function main {
echo "Running GymnasticsText + Ngrok"
echo "_______________________________"
if [[ "$arg1" == "-h" ]]; then
echo "run-local.sh [-h] [-b]"
echo "-h, for help message"
echo "-b, add build argument to docker-compose"
exit 0
elif [[ "$arg1" == "-b" ]]; then
echo "Build option selected..."
build=0
fi
run_node_app
run_ngrok
echo "Cleaning up container if needed..."
echo "--> trying ${image_name}_1..."
docker rm -f ${image_name}_1 || true
echo "...done"
}
arg1=$1
main
| true |
a6f158b0058fab78ddecd60534a1d10acbadaf31
|
Shell
|
oconnormi/packer-templates
|
/centos/centos-7-jdk/scripts/install-vbox-additions.sh
|
UTF-8
| 619 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
_user=vagrant
_user_home=/home/$_user
_version=$(cat ${_user_home}/.vbox_version)
_kernel_version=$(uname -r)
_kernel_pkg=kernel-devel-$(uname -r)
_required_pkgs="gcc bzip2 make kernel-devel-${_kernel_version} gcc-c++ zlib-devel openssl-devel readline-devel"
_vbox_additions_path=${_user_home}/VBoxGuestAdditions_${_version}.iso
if [ -f "${_user_home}/.vbox_version" ]; then
yum install -y ${_required_pkgs}
mount -o loop ${_vbox_additions_path} /mnt
sh /mnt/VBoxLinuxAdditions.run --nox11
umount /mnt
rm -rf ${_vbox_additions_path}
rm -rf ${_user_home}/.vbox_version
yum clean -y all
fi
| true |
88f632513290bd468b0947b40f82ac32bf5c9fa2
|
Shell
|
mworion/MountWizzard4
|
/support/2.0/MacOSx/MW4_Run.command
|
UTF-8
| 2,730 | 3.296875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
cd $(dirname "$0")
#
# run script for macOS
# (c) 2021 mworion
#
echo
echo ---------------------------------------------
echo
echo " ███╗ ███╗ ██╗ ██╗ ██╗ ██╗"
echo " ████╗ ████║ ██║ ██║ ██║ ██║"
echo " ██╔████╔██║ ██║ █╗ ██║ ███████║"
echo " ██║╚██╔╝██║ ██║███╗██║ ╚════██║"
echo " ██║ ╚═╝ ██║ ╚███╔███╔╝ ██║"
echo " ╚═╝ ╚═╝ ╚══╝╚══╝ ╚═╝"
echo
echo " ██████╗ ███████╗ ██╗ ██╗"
echo " ██╔═══██╗██╔════╝ ╚██╗██╔╝"
echo " ██║ ██║███████╗█████╗╚███╔╝ "
echo " ██║ ██║╚════██║╚════╝██╔██╗ "
echo " ╚██████╔╝███████║ ██╔╝ ██╗"
echo " ╚═════╝ ╚══════╝ ╚═╝ ╚═╝"
echo
echo ---------------------------------------------
echo run script version 2.2
echo ---------------------------------------------
echo run script version 2.2 > run.log 2>&1
if [ ! -f ./venv/bin/activate ]; then
echo
echo ---------------------------------------------
echo no valid virtual environment installed
echo ---------------------------------------------
exit
fi
source ./venv/bin/activate venv >> run.log 2>&1
echo
echo ---------------------------------------------
echo checking installed python version
echo ---------------------------------------------
echo Checking environment and start script >> run.log 2>&1
# get version of python3 installation
T=$(python3 --version)
# get version of python3 installation
T=$(python3 --version)
# check which valid version is installed
if [[ $T == *"3.9"* ]]; then
P_VER="python3.9"
elif [[ $T == *"3.8"* ]]; then
P_VER="python3.8"
elif [[ $T == *"3.7"* ]]; then
P_VER="python3.7"
fi
echo variable P_VER has value of $P_VER >> run.log 2>&1
if [ "${P_VER:0:6}" == "python" ]; then
echo
echo ---------------------------------------------
echo python version ok: ${P_VER}
echo ---------------------------------------------
else
echo
echo ---------------------------------------------
echo no valid python version installed
echo please run MW4_Install.command first
echo ---------------------------------------------
exit
fi
COMMAND="python ./venv/lib/$P_VER/site-packages/mw4/loader.py &"
eval ${COMMAND} >> run.log 2>&1
deactivate >> run.log 2>&1
| true |
6b816c8f1b663236750d9e61f0f654d8839860e2
|
Shell
|
Mic92/xfstests-cntr
|
/tests/generic/114
|
UTF-8
| 1,106 | 3.3125 | 3 |
[] |
no_license
|
#! /usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2015 Red Hat, Inc. All Rights Reserved.
#
# FS QA Test No. 114
#
# Test races while extending past EOF via sub-block AIO writes
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
cd /
rm -f $TEST_DIR/tst-aio-dio-eof-race
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
_supported_fs generic
_require_test
_require_sparse_files
_require_aiodio aio-dio-eof-race
# Test does 512 byte DIO, so make sure that'll work
logical_block_size=`_min_dio_alignment $TEST_DEV`
if [ "$logical_block_size" -gt "512" ]; then
_notrun "device block size: $logical_block_size greater than 512"
fi
# We don't mind 512-byte fs blocks; the IOs won't be sub-block,
# but the test should still pass, even if it doesn't stress the code
# we're targeting.
# Note, this test does several extending loops internally
$AIO_TEST $TEST_DIR/tst-aio-dio-eof-race
status=$?
exit
| true |
4141e06197be87989fb5a758f86c38670a1af645
|
Shell
|
AmmarkoV/WebSniper
|
/uninstall.sh
|
UTF-8
| 769 | 2.78125 | 3 |
[] |
no_license
|
#!/bin/bash
if [ -e /usr/bin/WebSniper ]
then
sudo rm /usr/bin/WebSniper
else
#nothing to do :P
fi
if [ -e /usr/share/icons/websnipericon.png ]
then
sudo rm /usr/share/icons/websnipericon.png
else
#nothing to do :P
fi
if [ -e /usr/share/applications/WebSniper.desktop ]
then
sudo rm /usr/share/applications/WebSniper.desktop
else
#nothing to do :P
fi
if [ -d "/usr/share/websniper" ]; then
echo "WebSniper Installation detected , uninstalling it "
sudo rmdir -r /usr/share/websniper
else
#nothing to do :P
fi
if [ -d "~/.WebSniper" ]; then
echo "WebSniper Installation detected in home , uninstalling it "
sudo rmdir -r ~/.WebSniper
else
#nothing to do :P
fi
echo "Done"
exit 0
| true |
b3b39edaba7f48621585b0ca23ab2d9189b91544
|
Shell
|
JustinHoyt/.dotfiles
|
/setup_linux.sh
|
UTF-8
| 235 | 2.640625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
if ! command -v nix &> /dev/null; then
sh <(curl -L https://nixos.org/nix/install) --daemon
fi
echo "${HOME}/.nix-profile/bin/zsh" | sudo -S tee -a /etc/shells
su - $USER -c 'bash ~/.dotfiles/setup_common.sh'
| true |
145de9a6eaaf0af593b71872a15969cfe0e92d5c
|
Shell
|
dwaraka/mymote
|
/tinyos.sh
|
UTF-8
| 397 | 2.5625 | 3 |
[] |
no_license
|
#! /usr/bin/env bash
# Here we setup the environment
# variables needed by the tinyos
# make system
echo "Setting up for TinyOS"
export TOSROOT=
export TOSDIR=
export MAKERULES=
TOSROOT="/opt/tinyosX"
TOSDIR="$TOSROOT/tos"
CLASSPATH=$CLASSPATH.:$TOSROOT/support/sdk/java/tinyos.jar
MAKERULES="$TOSROOT/support/make/Makerules"
export TOSROOT
export TOSDIR
export CLASSPATH
export MAKERULES
| true |
b123193f5cde4e70c63108abadc5a8811a4521a3
|
Shell
|
snappyflow/helm-charts
|
/charts/argocd/argocd_password_azure.sh
|
UTF-8
| 1,645 | 3.015625 | 3 |
[] |
no_license
|
#!/bin/bash -x
export PATH=/opt/microsoft/powershell/7:~/.local/bin:~/bin:~/.dotnet/tools:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/istio-latest/bin:/usr/local/linkerd/bin:/usr/lib/golang/bin:/opt/mssql-tools18/bin:~/bundle/bin:~/bundle/gems/bin:/home/asraf/.local/share/powershell/Scripts
###Package required for interactive session for argocd login
echo 'debconf debconf/frontend select Noninteractive' | debconf-set-selections
sudo apt-get -y install expect
###Installation of ArgoCD cli
sudo curl -sSL -o argocd-linux-amd64 https://github.com/argoproj/argo-cd/releases/latest/download/argocd-linux-amd64
sudo install -m 555 argocd-linux-amd64 /usr/local/bin/argocd
sudo rm argocd-linux-amd64
###command to get ArgoCD server LoadBalancer IP
argocdserver=$(kubectl get svc argocd-server -n argocd | awk NR==2'{print $4}')
###command to get ArgoCD initial password
argocd admin initial-password -n argocd > test
argocdpassword=$(sudo cat test | awk NR==1)
###Interactive session for ArgoCD login CLI
echo "Session for ArgoCD login CLI"
argocd login $argocdserver --username admin --password $argocdpassword --insecure
sleep 10
###Interactive session for updating ArgoCD password
echo "CLI to update ArgoCD password"
argocd account update-password --current-password $argocdpassword --new-password admin@123 --server $argocdserver --insecure
sleep 10
###Interactive session for ArgoCD login CLI
echo "Session for ArgoCD login CLI with new password"
argocd login $argocdserver --username admin --password admin@123 --insecure
sleep 10
sudo rm -rf test
| true |
878e59df564edee5dcc93d0e7c335746b49b6bca
|
Shell
|
vboykox/bare
|
/pkg/sinit.sh
|
UTF-8
| 403 | 2.796875 | 3 |
[
"ISC"
] |
permissive
|
ver 1.0
epoc 1
dist $URI_SUCK/$PKG_NAME/$PKG_NAME-${PKG_VER}.tar.gz
pre_configure() {
ed config.mk <<-EOF
,s|^\(CC = \).*\$|\1$CC
,s|^\(PREFIX = \).*\$|\1$MK_PREFIX
/^CFLAGS/s|=|+=
w
q
EOF
ed config.def.h <<-EOF
,s|/bin/rc.init|/etc/rc
,s|/bin/rc.shutdown|/etc/rc
w
q
EOF
ed Makefile <<-"EOF"
,s|$(PREFIX)/bin|/sbin
w
q
EOF
}
post_install() {
ln -sf sinit $MK_DESTDIR/sbin/init
}
| true |
2338ce88a63728f9eeb2c285f8610581b799890c
|
Shell
|
LotharKAtt/packer-trymcp-day01
|
/scripts/salt_bootstrap.sh
|
UTF-8
| 3,579 | 3.328125 | 3 |
[] |
no_license
|
#!/bin/bash -xe
if [ -f '/done_ubuntu_salt_bootstrap' ]; then
echo "INFO: ubuntu_salt_bootstrap already finished! Skipping.."
exit 0
fi
#
CLUSTER_NAME=${CLUSTER_NAME:-lost_cluster_name_variable}
CLUSTER_MODEL=${CLUSTER_MODEL:-https://github.com/Mirantis/mcp-offline-model.git}
CLUSTER_MODEL_REF=${CLUSTER_MODEL_REF:-master}
FORMULA_VERSION=${FORMULA_VERSION:-testing}
SALTSTACK_GPG=${SALTSTACK_GPG:-"https://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7/SALTSTACK-GPG-KEY.pub"}
SALTSTACK_REPO=${SALTSTACK_REPO:-"http://repo.saltstack.com/apt/ubuntu/16.04/amd64/2017.7 xenial main"}
APT_MIRANTIS_GPG=${APT_MIRANTIS_GPG:-"http://apt.mirantis.com/public.gpg"}
APT_MIRANTIS_SALT_REPO=${APT_MIRANTIS_SALT_REPO:-"http://apt.mirantis.com/xenial/ $FORMULA_VERSION salt"}
GIT_SALT_FORMULAS_SCRIPTS=${GIT_SALT_FORMULAS_SCRIPTS:-"https://github.com/salt-formulas/salt-formulas-scripts"}
GIT_SALT_FORMULAS_SCRIPTS_REF=${GIT_SALT_FORMULAS_SCRIPTS_REF:-master}
function process_repos(){
# TODO: those should be unhardcoded and re-writed, using CC model
wget -O - ${SALTSTACK_GPG} | sudo apt-key add -
wget -O - ${APT_MIRANTIS_GPG} | apt-key add -
wget -O - http://mirror.mirantis.com/${FORMULA_VERSION}/extra/xenial/archive-extra.key | apt-key add -
echo "deb [arch=amd64] ${SALTSTACK_REPO}" > /etc/apt/sources.list.d/mcp_saltstack.list
echo "deb [arch=amd64] http://mirror.mirantis.com/${FORMULA_VERSION}/extra/xenial xenial main" > /etc/apt/sources.list.d/mcp_extra.list
# This Pin-Priority fix should be always aligned with
# https://github.com/Mirantis/reclass-system-salt-model/blob/master/linux/system/repo/mcp/apt_mirantis/saltstack.yml
# saltstack
cat <<EOF >> /etc/apt/preferences.d/mcp_saltstack
Package: libsodium18
Pin: release o=SaltStack
Pin-Priority: 50
Package: *
Pin: release o=SaltStack
Pin-Priority: 1100
EOF
# reclass
cat <<EOF >> /etc/apt/preferences.d/mcp_extra
Package: *
Pin: release o=Mirantis
Pin-Priority: 1100
EOF
}
process_repos
apt-get update
apt-get install git-core reclass -y
rm -v /etc/apt/sources.list.d/mcp_extra.list /etc/apt/preferences.d/mcp_extra
for g_host in ${CLUSTER_MODEL} ${GIT_SALT_FORMULAS_SCRIPTS} ; do
_tmp_host=$(echo ${g_host} | awk -F/ '{print $3}')
ssh-keyscan -T 1 -H ${_tmp_host} >> ~/.ssh/known_hosts || true
done
if [[ ! -d /srv/salt/reclass ]]; then
git clone --recursive ${CLUSTER_MODEL} /srv/salt/reclass
pushd /srv/salt/reclass/
git checkout ${CLUSTER_MODEL_REF}
popd
fi
if [[ ! -d /srv/salt/scripts ]]; then
git clone --recursive ${GIT_SALT_FORMULAS_SCRIPTS} /srv/salt/scripts
pushd /srv/salt/scripts/
git checkout ${GIT_SALT_FORMULAS_SCRIPTS_REF}
popd
fi
# bootstrap.sh opts
export FORMULAS_SOURCE=pkg
export HOSTNAME=${BS_HOSTNAME:-lost_bs_hostname_variable}
export DOMAIN="${CLUSTER_NAME}.local"
export EXTRA_FORMULAS=${EXTRA_FORMULAS:-"ntp aptly nginx iptables docker git maas logrotate jenkins sphinx gerrit openldap keycloak"}
export APT_REPOSITORY=" deb [arch=amd64] ${APT_MIRANTIS_SALT_REPO} "
export APT_REPOSITORY_GPG=${APT_MIRANTIS_GPG}
export SALT_STOPSTART_WAIT=${SALT_STOPSTART_WAIT:-10}
echo "INFO: build in offline build!"
export BOOTSTRAP_SALTSTACK_COM="file:///opt/bootstrap.saltstack.com.sh"
# extra opts will push bootstrap script NOT install upstream repos.
export BOOTSTRAP_SALTSTACK_OPTS=${BOOTSTRAP_SALTSTACK_OPTS:- -dXr $BOOTSTRAP_SALTSTACK_VERSION }
#
if [[ ! -f /srv/salt/scripts/bootstrap.sh ]]; then
echo "ERROR: File /srv/salt/scripts/bootstrap.sh not found"
exit 1
fi
bash -x /srv/salt/scripts/bootstrap.sh || true
touch /done_ubuntu_salt_bootstrap
| true |
0dec58e25886545a6c0c2521bf29a93fe849565a
|
Shell
|
patnaikshekhar/KubeValidateImagePrefix
|
/install.sh
|
UTF-8
| 1,783 | 2.890625 | 3 |
[
"MIT"
] |
permissive
|
#/bin/bash
echo "Clean Up"
kubectl delete secret pod-validation-secret -n development
kubectl delete ValidatingWebhookConfiguration denypublicimages
kubectl delete -f k8s/deploy.yaml
rm -r certs
echo "Creating certs"
mkdir certs && cd certs
openssl genrsa -out ca.key 2048
openssl req -x509 -new -nodes -key ca.key -days 100000 -out ca.crt -subj "/CN=admission_ca"
cat >server.conf <<EOF
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = clientAuth, serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = pod-validation-service
DNS.2 = pod-validation-service.development
DNS.3 = pod-validation-service.development.svc
EOF
openssl genrsa -out tls.key 2048
openssl req -new -key tls.key -out server.csr -subj "/CN=pod-validation-service.development.svc" -config server.conf
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key -CAcreateserial -out tls.crt -days 100000 -extensions v3_req -extfile server.conf
echo "Creating Secret"
kubectl create secret tls pod-validation-secret --cert=tls.crt --key=tls.key -n development
cd ..
echo "Installing Webhook Pods"
kubectl apply -f k8s
echo "Creating Webhook"
cat <<EOF | kubectl apply -f -
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
name: denypublicimages
webhooks:
- name: denypublicimages.shekharpatnaik.com
rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
resources:
- pods
failurePolicy: Fail
clientConfig:
service:
namespace: development
name: pod-validation-service
caBundle: $(cat ./certs/ca.crt | base64 | tr -d '\n')
EOF
| true |
8fe8af8cbb1bfd591bbaa4c068559a4527bb2295
|
Shell
|
Phoenix-626/CodeEditor-application
|
/scripts/workon.sh
|
UTF-8
| 434 | 4.09375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Simple script to start working on some modules
#
#
#base="https://github.com/ReleaseStandard/"
base="file://$(pwd)/../"
if [ "$#" -eq "0" ] ; then
echo "You must give plugins as arguments here ..."
exit 1
fi
for plugin in "$@" ; do
echo "Working on $plugin"
url="${base}/${plugin}"
echo "Getting module from url=${url} ..."
git submodule add --depth=1 "$url"
rm -fr "${plugin}"
ln -rs "../${plugin}" ./
done
| true |
57a64534cec8858c3cc0c1da2d60a8bf79ff0e73
|
Shell
|
Surnagon/Sistemas_Embarcados
|
/Code/14_SPI/Ex1.sh
|
UTF-8
| 736 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
echo
echo Você leu README.md?
echo A SPI deverá estar habilitada para o código funcionar.
echo Vamos escrever na interface de comunicação SPI
echo através do arquivo /dev/spidev0.0.
echo
echo O comando
echo echo -ne \"\\x01\"
echo escreve o valor hexadecimal 0x01 na tela,
echo sem pular a linha \(\\n\).
echo O comando
echo echo -ne "\x01" \> /dev/spidev0.0
echo escreve o valor hexadecimal 0x01 em /dev/spidev0.0.
echo Se você tiver compilado e executado o código MSP430_code.c
echo no MSP430G2553, ele deverá piscar o LED uma vez.
echo
echo Pressione a tecla ENTER.
read
echo -ne "\x01" > /dev/spidev0.0
echo
echo Vamos piscar o LED 5 vezes.
echo Pressione a tecla ENTER.
read
echo -ne "\x05" > /dev/spidev0.0
| true |
d1a8fb221e58f94c52caeb0499850bdf2da00cfb
|
Shell
|
dannyge/Tools
|
/java/jar-conflict-detect.sh
|
UTF-8
| 2,264 | 3.59375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#此脚本用于识别冲突的Jar包,可以在一个根目录下找到所有包含相同类的Jar包,并且根据相同类的多少来判断Jar包的相似度,
#常常用于某些功能上线不可用或者没有按照预期起到作用,使用此脚本分析是否存在两个版本的类,而老版本的类被Java虚拟机加载,
#其实,JVM规范并没有规定类路径下相同类的加载顺序,实现JVM规范的虚拟机的实现机制也各不相同,
#因此无法判断相同的类中哪个版本的类会被先加载,因此Jar包冲突是个非常讨厌的问题。
#命令格式:
# jar-conflict-detect 路径
if [ $# -eq 0 ];then
echo "please enter classpath dir"
exit -1
fi
if [ ! -d "$1" ]; then
echo "not a directory"
exit -2
fi
tmpfile="/tmp/.cp$(date +%s)"
tmphash="/tmp/.hash$(date +%s)"
verbose="/tmp/cp-verbose.log"
declare -a files=(`find "$1" -name "*.jar"`)
for ((i=0; i < ${#files[@]}; i++)); do
jarName=`basename ${files[$i]}`
list=`unzip -l ${files[$i]} | awk -v fn=$jarName '/\.class$/{print $NF,fn}'`
size=`echo "$list" | wc -l`
echo $jarName $size >> $tmphash
echo "$list"
done | sort | awk 'NF{
a[$1]++;m[$1]=m[$1]","$2}END{for(i in a) if(a[i] > 1) print i,substr(m[i],2)
}' > $tmpfile
awk '{print $2}' $tmpfile |
awk -F',' '{i=1;for(;i<=NF;i++) for(j=i+1;j<=NF;j++) print $i,$j}' |
sort | uniq -c | sort -nrk1 | while read line; do
dup=${line%% *}
jars=${line#* }
jar1=${jars% *}
jar2=${jars#* }
len_jar1=`grep -F "$jar1" $tmphash | grep ^"$jar1" | awk '{print $2}'`
len_jar2=`grep -F "$jar2" $tmphash | grep ^"$jar2" | awk '{print $2}'`
# Modified by Robert 2017.4.9
#len=$(($len_jar1 > $len_jar2 ? $len_jar1 : $len_jar2))
len_jar1=`echo $len_jar1 | awk -F' ' '{print $1}'`
len_jar2=`echo $len_jar2 | awk -F' ' '{print $1}'`
if [ $len_jar1 -gt $len_jar2 ]
then
len=$len_jar1
else
len=$len_jar2
fi
per=$(echo "scale=2; $dup/$len" | bc -l)
echo ${per/./} $dup $jar1 $jar2
done | sort -nr -k1 -k2 |
awk 'NR==1{print "Similarity DuplicateClasses File1 File2"}{print "%"$0}'| column -t
sort $tmpfile | awk '{print $1,"\n\t\t",$2}' > $verbose
echo "See $verbose for more details."
rm -f $tmpfile
rm -f $tmphash
| true |
39006be1aa9a2957aa336dec51325a797b1aa029
|
Shell
|
ilventu/aur-mirror
|
/yubico-c-git/PKGBUILD
|
UTF-8
| 1,413 | 2.875 | 3 |
[] |
no_license
|
# Maintainer: Christian Hesse <mail@eworm.de>
pkgname=yubico-c-git
pkgver=20120621
pkgrel=1
pkgdesc="Yubico yubikey C library"
arch=('i686' 'x86_64')
url="https://github.com/Yubico/yubico-c"
license=('BSD')
depends=('curl')
replaces=('libyubikey')
provides=('libyubikey' 'yubico-c')
conflicts=('libyubikey' 'yubico-c')
makedepends=('git')
install=yubico-c.install
source=('70-yubikey.rules' 'automake.patch')
_gitroot="git://github.com/Yubico/yubico-c.git"
_gitname="yubico-c"
build() {
cd "${srcdir}"
msg "Connecting to GIT server...."
if [ -d ${_gitname} ] ; then
cd ${_gitname} && git pull origin
msg "The local files are updated."
else
git clone ${_gitroot} ${_gitname}
fi
msg "GIT checkout done or server timeout"
msg "Starting make..."
rm -rf "${srcdir}/${_gitname}-build"
git clone "${srcdir}/${_gitname}" "${srcdir}/${_gitname}-build"
cd "${srcdir}/${_gitname}-build"
# fix automake 1.12
patch -Np1 < ${srcdir}/automake.patch
autoreconf --install
./configure --prefix=/usr
make
make check
}
package() {
cd "${srcdir}/${_gitname}-build"
install -D -m644 COPYING "${pkgdir}/usr/share/licenses/${_gitname}/COPYING"
install -D -m644 README "${pkgdir}/usr/share/doc/${_gitname}/README"
install -D -m644 "${srcdir}/70-yubikey.rules" "${pkgdir}/usr/lib/udev/rules.d/70-yubikey.rules"
make DESTDIR="${pkgdir}/" install
}
sha256sums=('5ae001cd6356115123a454db8dcdb1392aa5167460b52ee6e68f3509b4c3aa86'
'8158d780ef09d1f03b481cc050c332759c9dbce68a216b58090e6cb3d3284191')
| true |
cdf8257b75756384bffa020892aa2195a5e5393e
|
Shell
|
petronny/aur3-mirror
|
/u8it-git/PKGBUILD
|
UTF-8
| 666 | 3.296875 | 3 |
[] |
no_license
|
_pkgname=u8it
pkgname=$_pkgname-git
pkgver=20100323
pkgrel=1
pkgdesc="u8it is used for Wii development to unpack .arc files."
url="http://github.com/icefire/$_pkgname/tree"
license="GPL"
arch=('i686' 'x86_64')
depends=()
makedepends=('git' 'gcc')
provides=($_pkgname)
_gitroot="git://github.com/icefire/$_pkgname.git"
_gitname=$_pkgname
build() {
cd "$srcdir"
msg "Connecting to github.com GIT server...."
if [ -d $srcdir/$_gitname ] ; then
cd $_gitname && git pull origin
msg "The local files are updated."
else
git clone $_gitroot
cd $_gitname
fi
gcc $_pkgname.c -o u8it
mkdir -p $pkgdir/usr/bin
cp u8it $pkgdir/usr/bin
}
| true |
7f002f2328e7907a4605d8eac744e70eb7616e8d
|
Shell
|
kellydunn/rsvm
|
/rsvm.sh
|
UTF-8
| 4,527 | 3.953125 | 4 |
[
"MIT"
] |
permissive
|
# Rust Version Manager
# ====================
#
# To use the rsvm command source this file from your bash profile.
RSVM_VERSION="0.1.0"
# Auto detect the NVM_DIR
if [ ! -d "$RSVM_DIR" ]
then
export RSVM_DIR=$(cd $(dirname ${BASH_SOURCE[0]:-$0}) && pwd)
fi
if [ -e "$RSVM_DIR/current/dist/bin" ]
then
PATH=$RSVM_DIR/current/dist/bin:$PATH
fi
rsvm_use()
{
if [ -e "$RSVM_DIR/v$1" ]
then
echo -n "Activating rust v$1 ... "
rm -rf $RSVM_DIR/current
ln -s $RSVM_DIR/v$1 $RSVM_DIR/current
source $RSVM_DIR/rsvm.sh
echo "done"
else
echo "The specified version v$1 of rust is not installed..."
echo "You might want to install it with the following command:"
echo ""
echo "rsvm install $1"
fi
}
rsvm_current()
{
target=`echo echo $(readlink .rsvm/current)|tr "/" "\n"`
echo ${target[@]} | awk '{print$NF}'
}
rsvm_ls()
{
directories=`find $RSVM_DIR -maxdepth 1 -mindepth 1 -type d -exec basename '{}' \;|egrep "^v\d+\.\d+\.?\d*"`
echo "Installed versions:"
echo ""
if [ `grep -o "v" <<< "$directories" | wc -l` = 0 ]
then
echo ' - None';
else
for line in $(echo $directories | tr " " "\n")
do
if [ `rsvm_current` = "$line" ]
then
echo " => $line"
else
echo " - $line"
fi
done
fi
}
rsvm_init_folder_structure()
{
echo -n "Creating the respective folders for rust v$1 ... "
mkdir -p "$RSVM_DIR/v$1/src"
mkdir -p "$RSVM_DIR/v$1/dist"
echo "done"
}
rsvm_install()
{
current_dir=`pwd`
rsvm_init_folder_structure $1
cd "$RSVM_DIR/v$1/src"
if [ -f "rust-$1.tar.gz" ]
then
echo "Sources for rust v$1 already downloaded ..."
else
echo -n "Downloading sources for rust v$1 ... "
wget -q "http://static.rust-lang.org/dist/rust-$1.tar.gz"
echo "done"
fi
if [ -e "rust-$1" ]
then
echo "Sources for rust v$1 already extracted ..."
else
echo -n "Extracting source ... "
tar -xzf "rust-$1.tar.gz"
echo "done"
fi
cd "rust-$1"
echo ""
echo "Configuring rust v$1. This will take some time. Grep a beer in the meantime."
echo ""
sleep 5
./configure --prefix=$RSVM_DIR/v$1/dist --local-rust-root=$RSVM_DIR/v$1/dist
echo ""
echo "Still awake? Cool. Configuration is done."
echo ""
echo "Building rust v$1. This will take even more time. See you later ... "
echo ""
sleep 5
make && make install
echo ""
echo "And we are done. Have fun using rust v$1."
cd $current_dir
}
rsvm()
{
echo ''
echo 'Rust Version Manager'
echo '===================='
echo ''
case $1 in
""|help|--help|-h)
echo 'Usage:'
echo ''
echo ' rsvm help | --help | -h Show this message.'
echo ' rsvm install <version> Download and install a <version>. <version> could be for example "0.4".'
# echo ' rsvm uninstall <version> Uninstall a <version>.'
echo ' rsvm use <version> Activate <version> for now and the future.'
echo ' rsvm ls | list List all installed versions of rust.'
echo ''
echo "Current version: $RSVM_VERSION"
;;
--version|-v)
echo "v$RSVM_VERSION"
;;
install)
if [ -z "$2" ]
then
# whoops. no version found!
echo "Please define a version of rust!"
echo ""
echo "Example:"
echo " rsvm install 0.4"
elif ([[ "$2" =~ ^[0-9]+\.[0-9]+\.?[0-9]*$ ]])
then
if [ "$3" = "--dry" ]
then
echo "Would install rust v$2"
else
rsvm_install "$2"
fi
else
# the version was defined in a the wrong format.
echo "You defined a version of rust in a wrong format!"
echo "Please use either <major>.<minor> or <major>.<minor>.<patch>."
echo ""
echo "Example:"
echo " rsvm install 0.4"
fi
;;
ls|list)
rsvm_ls
;;
use)
if [ -z "$2" ]
then
# whoops. no version found!
echo "Please define a version of rust!"
echo ""
echo "Example:"
echo " rsvm use 0.4"
elif ([[ "$2" =~ ^[0-9]+\.[0-9]+\.?[0-9]*$ ]])
then
rsvm_use "$2"
else
# the version was defined in a the wrong format.
echo "You defined a version of rust in a wrong format!"
echo "Please use either <major>.<minor> or <major>.<minor>.<patch>."
echo ""
echo "Example:"
echo " rsvm use 0.4"
fi
;;
esac
echo ''
}
| true |
008bb65e39a7263145de0e6ef2f02785eb865356
|
Shell
|
rcline/development-environment
|
/dotfiles/.profile
|
UTF-8
| 455 | 3.46875 | 3 |
[] |
no_license
|
# Git bash prompt
function parse_git_dirty {
[[ $(git status 2> /dev/null | tail -n1) != "nothing to commit (working directory clean)" ]] && echo "*"
}
function parse_git_branch {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e "s/* \(.*\)/ (\1$(parse_git_dirty))/"
}
RED="\[\033[0;31m\]"
YELLOW="\[\033[0;33m\]"
GREEN="\[\033[0;32m\]"
NO_COLOUR="\[\033[0m\]"
export PS1="$GREEN\u@\h$NO_COLOUR:\w$YELLOW\$(parse_git_branch)$NO_COLOUR\$ "
| true |
2547a739e8429bf28edfc434c56d67357fb1c708
|
Shell
|
divyanshu-talwar/B-Plus-Tree
|
/src/run_top_down.sh
|
UTF-8
| 489 | 2.9375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "4" > bplustree.inp
echo "1000" >> bplustree.inp
echo "Generating 1000 random numbers to feed to the B + Tree"
python input.py > in
echo "Running External Merge Sort on the Random data generated...."
javac ExternalMergeSort.java
java ExternalMergeSort in out
cat out >> bplustree.inp
echo "Done sorting the data!"
echo "Doing top down insertion..."
javac BPlusTreeTopDown.java
java BPlusTreeTopDown
echo "Completed! See the bplustree.out file to check the output."
exit 0
| true |
0b2b80d7345f3860d3cf420a4290e25a0ddd21bb
|
Shell
|
davidaq/aq-web-front-node
|
/lib/res/minifyall.sh
|
UTF-8
| 221 | 2.921875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
pushd `dirname $0` > /dev/null
find . -name \*.min.js -exec rm -f {} \; ; find . -name \*.js | while read x; do echo $x; uglifyjs $x -c -m -o ${x:0:${#x}-3}.min.js ; done
rm libPreset*.min.js
popd > /dev/null
| true |
ec047db3f4aae5877e68a65a151ce3010bd49eeb
|
Shell
|
ik-veon-dev/dotplant3
|
/vagrant-env/provision_init.sh
|
UTF-8
| 1,563 | 3.21875 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
ENV_DIR="/vagrant/vagrant-env"
MYSQL_DATABASE="dotplant3"
echo "==== apt update ===="
apt-get update
echo "==== Installing dependencies ===="
apt-get install -y mc htop memcached nodejs npm
locale-gen ru_RU.UTF-8 en_US.UTF-8
echo "==== Creating database ===="
mysqladmin -uroot -pvagrant create $MYSQL_DATABASE
echo "==== Configuring web server ====="
cp $ENV_DIR/nginx/dotplant3.dev.conf /etc/nginx/conf.d/
# create local db config for running vagrant mysql db
echo "<?php
return [
'dsn' => 'mysql:host=127.0.0.1;dbname=dotplant3',
'username' => 'vagrant',
'password' => 'vagrant',
];
" > /vagrant/config/db-local.php
echo "<?php
return [
'192.168.33.1',
];
" > /vagrant/config/dev-ips-local.php
echo "<?php
return [
'class' => 'yii\caching\MemCache',
'useMemcached' => true,
];
" > /vagrant/config/cache-local.php
chown vagrant:vagrant /vagrant/config/*-local.php
# restart nginx
service nginx restart
echo "==== Installing gulp ===="
npm install -g gulp
ln -s /usr/bin/nodejs /usr/local/bin/node
echo "==== Installing composer ===="
if [ -f $ENV_DIR/auth.json ]
then
mkdir -p /home/vagrant/.composer
cp $ENV_DIR/auth.json /home/vagrant/.composer/
chown -R vagrant:vagrant /home/vagrant/.composer/
fi
composer self-update
su vagrant -c 'composer global require fxp/composer-asset-plugin ~1.1.1 --prefer-dist'
echo "==== Installing base dotplant3 ===="
cd /vagrant
su vagrant -c 'composer install --prefer-dist'
# run
su vagrant -c './yii migrate --interactive=0'
echo "==== DONE ===="
| true |
3eef86c94f6e7b7f62f9fbaaba1c79d924208286
|
Shell
|
stonier/ckx_tools
|
/setup.bash
|
UTF-8
| 641 | 2.921875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Script for setting up the development environment.
if [ "${VIRTUAL_ENV}" == "" ]; then
workon ckx_tools
if [ $? -ne 0 ]; then
mkvirtualenv ckx_tools
if [ $? -ne 0 ]; then
sudo apt-get install virtualenvwrapper
mkvirtualenv ckx_tools
fi
# probably some python setup.py target which will do this for you
pip install catkin_pkg
pip install pyyaml
pip install vcstool
pip install vci
pip install rospkg
fi
fi
# Always pulling for now
python setup.py develop
echo ""
echo "Leave the virtual environment with 'deactivate'"
echo ""
echo "I'm grooty, you should be too."
echo ""
| true |
9ac480ea99a444faf3793b783d78b0f9eb5cfe58
|
Shell
|
OpenGeoscience/fsqio
|
/scripts/upkeep/check.sh
|
UTF-8
| 753 | 4.0625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
DIR=$(dirname ${BASH_SOURCE[${#BASH_SOURCE[@]} - 1]})
source $DIR/run.sh
set -e
shopt -s nullglob # without this, empty $DIR would expand * to literal $DIR/*
ran=""
function check() {
task=$(basename $1)
required=$(cat $1)
current=""
if [ -f "$DIR"/current/"$task" ]; then
current=$(cat $DIR/current/$task)
fi
if [ "$current" != "$required" ]; then
echo "Running upkeep $task (current: '$current' vs required: '$required')..."
run_task "$task" "$current"
ran="$ran $task"
fi
}
if [ "$1" != "" ]; then
check $DIR/required/$1
else
for req_file in $DIR/required/*; do
check $req_file
done
fi
if [ "$ran" != "" ]; then
echo
echo "$(date +%H:%M:%S) Finished running upkeep tasks: $ran"
fi
| true |
05954cc7cae237a8d976912572e70b0b23a6e67a
|
Shell
|
QaDeS/update-docker-dns
|
/update-docker-dns.sh
|
UTF-8
| 5,662 | 3.625 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# update-docker-dns is used to selectively expose docker containers by name
# Copyright (c) 2018, Sagi Zeevi (www.theimpossiblecode.com)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the organization nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY yourname ''AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL yourname BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
WAIT_EVENS_TO=10
ECHO="/bin/echo"
# Path to the addn-hosts file
CONTAINER_HOSTS=/etc/docker-container-hosts
DOMAIN_NAME=`domainname`
if [ `$ECHO "$DOMAIN_NAME" | grep -v '\.'` ]; then
DOMAIN_NAME=""
fi
generate_docker_hosts ()
{
$ECHO "# Auto-generated by $0" > $CONTAINER_HOSTS
for cid in `docker ps -q`; do
use_docker_host=""
host=""
subdomain_hosts=""
container_domain=""
domain_is_host=""
ip=""
ips=""
use_docker_host=`docker inspect --format='{{ index .Config.Labels "com.theimpossiblecode.expose.useDockerName"}}' $cid`
if [ "$use_docker_host" = "true" ]; then
host=`docker inspect --format='{{.Name}}' $cid | sed -e 's@^/@@'`
else
host=`docker inspect --format='{{ index .Config.Labels "com.theimpossiblecode.expose.host"}}' $cid`
subdomain_hosts=`docker inspect --format='{{ index .Config.Labels "com.theimpossiblecode.expose.subdomainHosts"}}' $cid`
fi
container_domain=`docker inspect --format='{{ index .Config.Labels "com.theimpossiblecode.expose.domain"}}' $cid`
domain_is_host=`docker inspect --format='{{ index .Config.Labels "com.theimpossiblecode.expose.domainIsHost"}}' $cid`
if [ "$host" != "" -o "$container_domain" != "" -o "$subdomain_hosts" != "" ]; then
ips=$(docker exec $cid ip address | perl -ne 'if (/ether/ .. /global/) {print "$1\n" if /(\d+\.\d+\.\d+\.\d+)/}')
for ip in $ips; do
# Take the first reachable IP
$ECHO "Checking IP $ip"
if ping -c 1 -W 1 $ip ; then
$ECHO "$ip is reachable"
break
fi
$ECHO "$ip is not reachable"
ip=""
done
if [ "$ip" = "" ]; then
# No reachable IP
continue
fi
$ECHO -n "$ip " >> $CONTAINER_HOSTS
if [ "$container_domain" = "" ]; then
container_domain=$DOMAIN_NAME
elif [ "$container_domain" != "$DOMAIN_NAME" ]; then
if [ "$domain_is_host" = "true" ]; then
$ECHO -n "$container_domain " >> $CONTAINER_HOSTS
fi
fi
subdomain_hosts="$host $subdomain_hosts"
for host in $subdomain_hosts; do
$ECHO -n " $host" >> $CONTAINER_HOSTS
if [ "$container_domain" != "" ]; then
$ECHO -n " $host.$container_domain" >> $CONTAINER_HOSTS
fi
done
$ECHO >> $CONTAINER_HOSTS
fi
done
# Ask dnsmasq to reload
pkill -x -HUP dnsmasq
}
function set_timer
{
( sleep $1
kill -ALRM $$
) &
}
events_raised=0
function timeout_handler
{
$ECHO "regenerating the $CONTAINER_HOSTS file after $events_raised events"
generate_docker_hosts
events_raised=0
}
trap timeout_handler SIGALRM
# Exernal loop to handle docker restarts
while [ 1 ] ; do
$ECHO "waiting for docker to be up"
if ! docker network ls > /dev/null 2>&1 ; then
# Wait for docker networks to start
while ! docker network ls > /dev/null 2>&1 ; do
sleep 1
done
fi
$ECHO "register for docker start/stop events"
# Listen on docker events to maintain the CONTAINER_HOSTS file
coproc docker events --filter 'event=start' --filter 'event=stop'
# Initialize the CONTAINER_HOSTS now
$ECHO "generating an initial $CONTAINER_HOSTS file"
generate_docker_hosts
# Internal loop to handle docker events
while read -u ${COPROC[0]} line; do
if [ $events_raised != 0 ]; then
# Just count until the timer expires
let events_raised++
else
# Start counting events and handle them all at once
# after WAIT_EVENS_TO seconds
$ECHO "will generate $CONTAINER_HOSTS in $WAIT_EVENS_TO seconds"
events_raised=1
set_timer $WAIT_EVENS_TO
fi
done
done
| true |
44f7f295be99fc84672371a10b1e93e9084db273
|
Shell
|
AlienTab1/dwltest
|
/iperf_day.sh
|
UTF-8
| 898 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/sh
#SERVER=bouygues.testdebit.info
SERVER=bouygues.iperf.fr
#ping.online.net
OUTPUT_FILE=/root/dwltest/daily_results.log
die() {
echo "$1" | logger $msg
exit
}
logger "iperf dwl measure start, server: $SERVER"
#Rx state of wan interface
rx_state_start=`cat /proc/net/dev | awk '/br-wan/{ print $2 }'`
json=`iperf3 -R -J -p 5209 -c $SERVER`
rx_state_end=`cat /proc/net/dev | awk '/br-wan/{ print $2 }'`
if [[ $? -ne 0 ]]; then
die "cannot contact server: $SERVER"
fi
jsonout=`echo "$json" | /root/dwltest/parse_iperf.lua`
if [[ "$?" -ne 0 ]]; then
die "iperf fail!: $SERVER"
fi
iperf_transfer=`echo "$jsonout" | awk '{ print $2 }'`
if [[ $((rx_state_end - rx_state_start - iperf_transfer)) -gt $((iperf_transfer + 10*1024)) ]]; then
die "iperf invalid value: $SERVER"
fi
echo $jsonout >> $OUTPUT_FILE
if [[ $? -ne 0 ]]; then
die "iperf fail!: $SERVER"
fi
| true |
fb5d32b8d387c59b46bfa25608e541a80de3cf4d
|
Shell
|
RobertAudi/.dotfiles
|
/zsh/.config/zsh/010_overrides.zsh
|
UTF-8
| 5,452 | 3.875 | 4 |
[
"WTFPL"
] |
permissive
|
# cd / smart_cd {{{
# ------------------------------------------------------------------------------
# Source: http://zshwiki.org/home/examples/functions
#
# - cd /etc/fstab → cd /etc
# - corrections on the given dirname (if directory could not be found)
# - all(?) other possible invocations of the builtin "cd"
# ------------------------------------------------------------------------------
smart_cd() {
if (( $# != 1 )); then
builtin cd "$@"
return $?
fi
if [[ -f "$1" ]]; then
if [[ -e "${1:h}" ]]; then
builtin print -P -- "%F{032}INFO%f -- Correcting $1 to ${1:h}"
fi
builtin cd "${1:h}"
else
builtin cd "$1"
fi
}
cd() {
emulate -L zsh
setopt localoptions
setopt extendedglob
local approx1 ; approx1=()
local approx2 ; approx2=()
if (( ${#*} == 0 )) || [[ ${1} = [+-]* ]] ; then
builtin cd "$@"
elif (( ${#*} == 1 )) ; then
approx1=( (#a1)${1}(N) )
approx2=( (#a2)${1}(N) )
if [[ -e ${1} ]] ; then
smart_cd ${1}
elif [[ ${#approx1} -eq 1 ]] ; then
builtin print -P -- "%F{032}INFO%f -- Correcting $1 to ${approx1[1]}"
smart_cd ${approx1[1]}
elif [[ ${#approx2} -eq 1 ]] ; then
builtin print -P -- "%F{032}INFO%f -- Correcting $1 to ${approx2[1]}"
smart_cd ${approx2[1]}
else
builtin print -P -- "%F{196}ERROR%f -- couldn't correct $1" >&2
return 1
fi
elif (( ${#*} == 2 )) ; then
builtin cd $1 $2
else
builtin print -P -- "%F{196}ERROR%f -- cd: too many arguments" >&2
return 1
fi
}
# ------------------------------------------------------------------------------ }}}
# where {{{
# ------------------------------------------------------------------------------
# Taken from https://github.com/aspiers/shell-env - .zsh/functions/wh
# https://github.com/aspiers/shell-env/blob/0f7d4bf/.zsh/functions/wh
where() {
for thing in "$@"; do
if [[ `builtin whence -ca "$thing"` == *'builtin autoload'* ]]; then
autoload +X "$thing"
builtin print -P -- "%F{008}# Note: $thing was autoloaded%f"
fi
done
builtin whence -cas "$@"
}
# ------------------------------------------------------------------------------ }}}
# sudo {{{
# ------------------------------------------------------------------------------
# Enable aliases to be sudo’ed
# Works with command/nocorrect/noglob
# Doesn't work with builtins
#
# Source: http://www.zsh.org/mla/users/2008/msg01229.html
alias sudo="noglob do_sudo "
do_sudo() {
integer glob=1
local -a run
run=( command sudo )
if [[ $# -gt 1 && $1 = -u ]]; then
run+=($1 $2)
shift ; shift
fi
(( $# == 0 )) && 1=/bin/zsh
while (( $# > 0 )); do
case "$1" in
command|exec|-) shift; break ;;
nocorrect) shift ;;
noglob) glob=0; shift ;;
*) break ;;
esac
done
if (( glob )); then
PATH="/sbin:/usr/sbin:/opt/homebrew/sbin:$PATH" $run $~==*
else
PATH="/sbin:/usr/sbin:/opt/homebrew/sbin:$PATH" $run $==*
fi
}
# ------------------------------------------------------------------------------ }}}
# Better tree command {{{
# ------------------------------------------------------------------------------
tree() {
if is-callable exa ; then
# Options:
# -a All files (including hidden files, but not "." and "..")
# -F Classify (directory: /, executable file: *, etc.)
# -T Recurse into directories as a tree
# -I pattern Don't list files matching the pattern
# -L level Max depth
local exaopts="--time-style=long-iso --group-directories-first --colour=always -aFT -I \"rhel.*.*.package|.git|.gem\""
if [[ "$1" =~ "^[1-9][0-9]*$" ]]; then
exaopts="$exaopts -L $1"
shift
fi
command exa $=exaopts "$@"
else
# Options:
# -C Colors
# -A ANSI line graphics
# -F Classify (directory: /, executable file: *, etc.)
# -a All files (including hidden files, but not "." and "..")
# -I pattern Don't list files matching the pattern
# --dirsfirst List directories before files
# -L level Max depth
local treeopts="-CAFa -I \"rhel.*.*.package|.git|.gem\" --dirsfirst"
if [[ "$1" =~ "^[1-9][0-9]*$" ]]; then
treeopts="$treeopts -L $1"
shift
fi
command tree $=treeopts "$@"
fi
}
# ------------------------------------------------------------------------------ }}}
# Disable globbing.
alias fc="noglob fc"
alias find="noglob find"
alias history="noglob history"
alias locate="noglob locate"
alias rm="rm -v --preserve-root"
# Be verbose
alias cp="cp -v"
alias chgrp="chgrp --preserve-root -v"
alias chmod="chmod --preserve-root -v"
alias chown="chown --preserve-root -v"
alias mkdir="mkdir -vp"
alias mv="mv -v"
alias ln="ln -v"
# used when you press M-? on a command line
alias which-command="builtin whence -sva"
# List everything
alias type="type -a"
alias grep="grep --binary-files=without-match --exclude=.git{,ignore,modules} --exclude-dir=.{cvs,git,hg,svn} --color=auto"
alias gzip="gzip --verbose --best --name"
alias pgrep="pgrep -il"
alias pkill="pkill -i"
alias cat="smart-cat"
# Better jobs
alias jobs="jobs -lpd"
# Print directory stack with numbers and one directory per line.
alias dirs="dirs -v -p"
alias fetchmail="fetchmail --check"
# Modeline {{{
# vim: set foldmarker={{{,}}} foldlevel=0 foldmethod=marker : }}}
| true |
641ca2a38b93ad40490e1dd6161503d5c823b160
|
Shell
|
DataDog/dd-agent-integrations-build
|
/pre.sh
|
UTF-8
| 172 | 2.59375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
mkdir -p ~/pkg ~/keys
if [[ $DOCKER_IMAGE == *"rpm"* || $DOCKER_IMAGE == *"sles"* ]]
then
echo -e $RPM_GPG_KEY > /home/ubuntu/keys/RPM-SIGNING-KEY.private
fi
| true |
9b72e0441fce378dadc6ea3e7721fac73304e74c
|
Shell
|
joe-nano/docker
|
/standardese_dev/build.bash
|
UTF-8
| 571 | 3.421875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
if [[ ! -d "standardese" ]]; then
echo "error: need to mount /root/standardese to standardese source files" >/dev/stderr
exit 1
fi
if [[ ! -d "output" ]]; then
echo "error: need to mount /root/output to some directory that should contain the result" >/dev/stderr
exit 1
fi
set -e
mkdir build && cd build
cmake -DCMAKE_EXE_LINKER_FLAGS="-static -static-libstdc++ -static-libgcc" -DCLANG_BINARY="clang++" ../standardese
cmake --build . --target standardese_tool -j $(nproc)
cp tool/standardese ../output/
chmod ugoa=rwx ../output/standardese
| true |
db22e20a7d32a3acd6c7d4a796dc2a6ad422c3fc
|
Shell
|
CodethinkLabs/provisioning-runners-VMs
|
/runners-registering/unregister_runner.sh
|
UTF-8
| 654 | 4.25 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
usage() {
cat <<EOF
usage: $0 -r RUNNER_NAME
This script unregisters gitlab runner called RUNNER_NAME.
OPTIONS:
-h Show this message
-r Provide a name of gitlab runner to unregister
EOF
}
unregister_runner() {
/usr/local/bin/gitlab-runner unregister -n "$runner_name"
}
main() {
runner_name=""
while getopts "hr:" option; do
case "$option" in
h)
usage
exit 0
;;
r)
runner_name="$OPTARG"
;;
esac
done
if [[ -z "$runner_name" ]]; then
echo -e "ERROR: RUNNER_NAME is mandatory\n"
usage
exit 1
fi
unregister_runner
}
main "$@"
| true |
a672e17b3eb82051f3260cc911d5946e23908838
|
Shell
|
jmrbcu/dotfiles
|
/.common.sh
|
UTF-8
| 3,626 | 3.65625 | 4 |
[] |
no_license
|
#################################################################################
# General Options
#################################################################################
# path
custom="$HOME/.local/bin /usr/local/bin /usr/local/sbin /usr/local/opt/ruby/bin $HOME/.dotnet/tools"
for P in $custom; do
test -d $P && export PATH=$P:$PATH
done
# Set the default Less options.
export LESS='-c -g -i -M -R -S -w -X -z-4'
# command -v source-highlight >/dev/null 2>&1 && export LESSOPEN="| $HOME/.dotfiles/less.sh %s"
# Preferred editor for local and remote sessions, in this order: vim, nano
EDITOR="$(command -v vim 2>/dev/null || command -v nano)"
#################################################################################
# Command Aliases
#################################################################################
alias du="du -h -s"
alias df="df -h"
alias py=ipython
alias mc="mc -u -X"
#################################################################################
# Utility Functions
#################################################################################
# creates a local socks proxy using the remote server as exit point
function proxy() {
if [ "$#" -eq 2 ]; then
echo "::: Listening in: localhost:$2"
ssh -D $2 -q -C -N $1
else
echo "::: Usage: proxy <[user@]host[:port]> <local-port>"
echo "::: <[user@]host[:port]>: exit point host"
echo "::: <local-port>: Port on the local machine we want to forward"
fi
}
# forward local port to a remote port using ssh
function forward() {
if [ "$#" -ge 2 ]; then
for arg in "${@:2}"; do
params="-L $arg $params"
done
echo "$params $1"
eval "ssh -q -N -C $params $1"
unset params
else
echo "::: Usage: forward <[user@]host[:port]> <local-port>:<remote-host>:<remote-port> ... <local-port>:<remote-host>:<remote-port>"
echo "::: <[user@]host[:port]>: Intermediate host with ssh"
echo "::: <local-port>: Port on the local machine we want to forward"
echo "::: <remote-host: Remote host where the local port will be forwarded to"
echo "::: <remote-port: Port in the remote host where the local port will be forwarded to"
fi
}
# capture traffic from a remote server
function remote-capture() {
if [ "$#" -eq 2 ]; then
ssh $1 'dumpcap -w - -i $2 -f "not port 22"' | wireshark -k -i -
elif [ "$#" -eq 3 ]; then
# ssh -q -N -C -p $2 $1 "tcpdump -i $3 -U -s0 -w -" | wireshark -k -i -
ssh -p $2 $1 'dumpcap -w - -i $3 -f "not port 22"' | wireshark -k -i -
else
echo "::: Usage:"
echo "::: remote-capture <[user@]host> [ssh-port] <iface>\n"
fi
}
#################################################################################
# OS Detection
#################################################################################
if [[ "$OSTYPE" == darwin* ]]; then
# Use GNU ls instead of BSD ls if available
alias ls="ls -hlGF"
alias la="ls -hlGFA"
if $(command -v exa >/dev/null 2>&1); then
alias ls="exa -hl --group-directories-first --git"
alias la="exa -hla --group-directories-first --git"
elif $(command -v gls >/dev/null 2>&1); then
alias ls="gls -hlF --color=always --group-directories-first"
alias la="gls -hlAF --color=always --group-directories-first"
fi
else
# Command Aliases
alias ls="ls -hlF --color=always --group-directories-first"
alias la="ls -hlAF --color=always --group-directories-first"
if $(command -v exa >/dev/null 2>&1); then
alias ls="exa -hl --group-directories-first --git"
alias la="exa -hla --group-directories-first --git"
fi
fi
| true |
6688dfa04a51329a995215bd11b69fd0b3d2a8e9
|
Shell
|
PieLabs/pie-catalog
|
/deploy.sh
|
UTF-8
| 761 | 3.25 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
echo "set up a deployment..."
app=$1
echo "app: $app"
rm -fr .deployment
rm -fr slug.tgz
rm -fr artifact.tgz
mkdir .deployment
read -d '' PROC_FILE << EOL
web: ./node-v7.4.0-linux-x64/bin/node ./lib/index.js
EOL
echo "$PROC_FILE" > .deployment/Procfile
GIT_VERSION="$(git describe --always)"
echo "$GIT_VERSION"
echo "$GIT_VERSION" > .deployment/.git-version
chmod +x .deployment/Procfile
cp -rv node_modules .deployment/node_modules
cp -rv lib .deployment/lib
cp -rv package.json .deployment/package.json
tar -czvf artifact.tgz -C .deployment .
cbt slug-mk-from-artifact-file\
--artifact-file=artifact.tgz\
--out-path=slug.tgz \
--platform=node-7.4.0
cbt slug-deploy-from-file\
--heroku-app=$app\
--slug-file=slug.tgz
| true |
9fe6395a8fb1a46d30ae34061d321d66b48cff72
|
Shell
|
kicktothecortex/scripts
|
/zip_to_cbr.sh
|
UTF-8
| 378 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
## Script to convert from zip file to cbr for comic readers
FILE=$1
ZIPFILE=`readlink -f "$1"`
TEMPDIR="/tmp"
ISSUE="${FILE%%.*}"
ISSUEDIR="temp_$ISSUE"
ORIGINALDIR=`pwd`
cd "$TEMPDIR"
mkdir "$ISSUEDIR"
cd "$ISSUEDIR"
unzip -d . "$ZIPFILE"
cd "$TEMPDIR"
rar a -ep "$ISSUE.cbr" "$ISSUEDIR"
mv -v "$ISSUE.cbr" "$ORIGINALDIR"
rm -rfv "$ISSUEDIR"
#rm -rfv "$ZIPFILE"
| true |
25802743dc6e5a7d2e0b965b00da49c4621e8aa7
|
Shell
|
zgdkik/myops
|
/自动化运维/ansible/实战案例/ansible批量部署软件/ansible-deploy/deploy/centos7/define_env.sh
|
UTF-8
| 1,811 | 3.296875 | 3 |
[] |
no_license
|
#!/bin/bash
#description: ansible实现批量部署应用环境(仅适用于centos7)...
#version: v1.2
#auth: by zhengxin20180720
#email: hzzxin@tairanchina.com
DATE="$(date +%Y%m%d_%H%M)"
DYN_INVENTORY="/ansible-deploy/inventory/centos7_ip_list"
PLAYBOOK_DIR="/ansible-deploy/playbook/centos7"
ENV_PLAYBOOK="$PLAYBOOK_DIR/env_init.yml"
JDK_PLAYBOOK="$PLAYBOOK_DIR/jdk.yml"
TOMCAT_PLAYBOOK="$PLAYBOOK_DIR/tomcat.yml"
NODE1_PLAYBOOK="$PLAYBOOK_DIR/nodejs-v6.11.2.yml"
NODE2_PLAYBOOK="$PLAYBOOK_DIR/nodejs-v8.10.0.yml"
NGINX_PLAYBOOK="$PLAYBOOK_DIR/nginx.yml"
MQ_PLAYBOOK="$PLAYBOOK_DIR/rabbitmq.yml"
LOCAL_REPO_PLAYBOOK="$PLAYBOOK_DIR/local_repo.yml"
ALY_REPO_PLAYBOOK="$PLAYBOOK_DIR/aliyun_repo.yml"
ZK_PLAYBOOK="$PLAYBOOK_DIR/zookeeper.yml"
ZBX_AGENT_PLAYBOOK="$PLAYBOOK_DIR/zabbix-agent.yml"
APP_DIR="/usr/local/weihu/software/centos7"
LOG_DIR="/ansible-deploy/logs/centos7"
APP_LIST=(env_init.tar.gz rabbitmq.tar.gz trc-apr-1.5.2-1.el7.centos.x86_64.rpm trc-apr-iconv-1.2.1-1.el7.centos.x86_64.rpm trc-apr-util-1.5.4-1.el7.centos.x86_64.rpm trc-jdk-1.8.0_77-1.el7.centos.x86_64.rpm trc-nginx-1.12.2-3.el7.centos.x86_64.rpm trc-node-v6.11.2-1.el7.centos.x86_64.rpm trc-node-v8.10.0-1.el7.centos.x86_64.rpm trc-tomcat-8.0.32-1.el7.centos.x86_64.rpm trc-tomcat-native-1.1.34-1.el7.centos.x86_64.rpm trc-zookeeper-3.4.6-1.el7.centos.x86_64.rpm)
##创建日志目录
mkdir -p $LOG_DIR
>$LOG_DIR/${DATE}.log
##记录部署日志
RECORD_LOG() {
echo "$(date +%F_%T)" "$1" >>$LOG_DIR/${DATE}.log
}
##检测软件安装包是否存在
CHECK_FILE() {
for i in ${APP_LIST[@]};do
if ! ls $APP_DIR/${i} &>/dev/null;then
RECORD_LOG "${i}: software pkg does not exist..."
exit 1
fi
done
}
##安装函数
INSTALL() {
RECORD_LOG "starting install software..."
ansible-playbook -i $DYN_INVENTORY "$1"
}
| true |
659051b5bd6b30ffe096f2eb95eac5f414332066
|
Shell
|
surskitt/scripts
|
/tile.sh
|
UTF-8
| 2,901 | 3.375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
mapfile -t curr_desktops < <(bspc query -D -d '.active')
focused_desktop=$(bspc query -D -d '.focused')
if [[ "${#}" -gt 18 ]]; then
echo "Error: only supports 18 vids or less"
exit 1
fi
# close window on desktops 7 and 8
(bspc query -N -d files; bspc query -N -d files2)|while read -r n; do
bspc node "${n}" -c
done
bspc desktop -f files2
bspc desktop -f files
pids=()
for i in "${@}"; do
mimetype=$(file -L -b --mime-type "${i}")
case "${mimetype}" in
image/jpeg)
feh "${i}" &
;;
video/mp4|video/webm|video/3gpp|video/x-flv|video/x-matroska|video/x-msvideo)
mpv --quiet --mute --force-window=immediate "${i}" >&2 >/dev/null &
;;
esac
pids+=($!)
done
echo "waiting for windows to launch..."
sleep 1
desktop=files
bspc query -N -d files -n '.window'|while read -r n; do
echo "moving ${n} to ${desktop}"
bspc node "${n}" -d "${desktop}"
echo "tiling ${n}"
bspc node "${n}" -t tiled
[[ "${desktop}" == files ]] && desktop=files2 || desktop=files
done
sleep 1
for d in files files2; do
# n=$(bspc query -N -d "${d}" -n '.window'|wc -l)
mapfile -t ns < <(bspc query -N -d "${d}" -n '.window')
case "${#ns[@]}" in
1|2)
;;
3)
bspc node "${ns[0]}" -o 0.333
bspc node "${ns[1]}" -n "${ns[0]}"
bspc node "${ns[1]}" -o 0.5
bspc node "${ns[2]}" -n "${ns[1]}"
;;
4)
bspc node "${ns[0]}" -o 0.5 -p south
bspc node "${ns[1]}" -n "${ns[0]}"
bspc node "${ns[2]}" -o 0.5 -p south
bspc node "${ns[3]}" -n "${ns[2]}"
;;
5)
;;
6)
;;
7)
;;
8)
;;
9)
bspc node "${ns[0]}" -o 0.333 -p south
bspc node "${ns[1]}" -n "${ns[0]}"
bspc node "${ns[1]}" -o 0.5 -p south
bspc node "${ns[2]}" -n "${ns[1]}"
sleep 1
bspc node "${ns[3]}" -o 0.333 -p south
bspc node "${ns[4]}" -n "${ns[3]}"
bspc node "${ns[4]}" -o 0.5 -p south
bspc node "${ns[5]}" -n "${ns[4]}"
sleep 1
bspc node "${ns[6]}" -o 0.333 -p south
bspc node "${ns[7]}" -n "${ns[6]}"
bspc node "${ns[7]}" -o 0.5 -p south
bspc node "${ns[8]}" -n "${ns[7]}"
sleep 1
;;
esac
done
polybar-msg cmd hide; sleep 0.1; bspc config bottom_padding 0
bspc config window_gap 0
while :; do
if [[ $(ps --no-headers -fp "${pids[@]}" 2>/dev/null|wc -l) -eq 0 ]]; then break; fi
sleep 1
done
polybar-msg cmd show; sleep 0.1; bspc config bottom_padding 27
bspc config window_gap 50
for d in "${curr_desktops[@]}"; do
bspc desktop -f "${d}"
done
bspc desktop -f "${focused_desktop}"
| true |
82dc8c35321eff568c0e4019e888f6ab1e807d63
|
Shell
|
ddbox/fragments
|
/cert_stuff/make_server_cert.sh
|
UTF-8
| 825 | 3.21875 | 3 |
[] |
no_license
|
#!/bin/bash -x
server_name=$1
if [ "$server_name" = "" ]; then
echo usage $0 server_fqdn
exit 1
fi
cd root/ca
rm -rf intermediate/private/$server_name.key.pem
rm -rf intermediate/certs/$server_name.cert.pem
cd -
openssl genrsa \
-out root/ca/intermediate/private/$server_name.key.pem 2048
chmod 400 root/ca/intermediate/private/$server_name.key.pem
openssl req -batch -config int.openssl.cnf \
-key root/ca/intermediate/private/$server_name.key.pem \
-new -sha256 -out root/ca/intermediate/csr/$server_name.csr.pem
openssl ca -config int.openssl.cnf \
-extensions server_cert -days 375 -notext -md sha256 \
-in root/ca/intermediate/csr/$server_name.csr.pem \
-out root/ca/intermediate/certs/$server_name.cert.pem
chmod 444 root/ca/intermediate/certs/$server_name.cert.pem
| true |
9139870ed7f0938f3d6d0efa2285302ff960b11b
|
Shell
|
vdloo/simulacra
|
/vms.sh
|
UTF-8
| 2,535 | 3.6875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# Set up some VMs in my home lab. Uses terraform to start up qemu machines with macvtap networking.
# This script needs to run on another host than the hypervisor because the hypervisor can not
# talk directly with the guests because macvtap injects packets directly into the hardware
# interface. To discover the ip addresses of the created VMs the other host (the one that
# runs this script) must populate the arp cache (by either running nmap or arping) and then
# grep the mac addresses from the arp table.
HYPERVISOR="192.168.1.182"
ssh root@$HYPERVISOR systemctl start libvirtd << EOF
systemctl start libvirtd
systemctl start firewalld
virsh net-start default
EOF
sleep 3 # Giving the hypervisor some time to start up
ssh root@$HYPERVISOR virsh pool-destroy default
ssh root@$HYPERVISOR virsh pool-create pool.xml
ssh root@$HYPERVISOR << EOF
cd /root/code/projects/simulacra/terraform
terraform destroy -force
virsh list --all | grep grid | awk '{print$2}' | xargs -I {} virsh destroy {}
terraform apply -auto-approve
EOF
MAC_ADDRESSES=$(ssh root@$HYPERVISOR cat /etc/libvirt/qemu/grid*.xml | grep "mac address" | cut -d"'" -f2)
# warm ARP cache, discover all hosts on the network
# doing three passes to make sure we got them all
echo "Flushing and warming ARP cache"
ip -s -s neigh flush all
for i in {1..6}; do
echo "Discovering hosts, sweep $i out of 6"
nmap -sn 192.168.1.0/24 -n --send-ip -v0 -T5 --min-parallelism 100 --max-parallelism 256
sleep 10
done
# Get IP addresses of all VMs
IP_ARRAY=()
for mca in $MAC_ADDRESSES; do
IP_ARRAY+=($(arp -a -n | grep -v incomplete | grep $mca | awk -F"[()]" '{print $2}'))
done
node_count=1
for ip in "${IP_ARRAY[@]}"; do
echo $ip
ssh -oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null root@$ip << EOF
pacman -Syyu --noconfirm
pacman -S screen --noconfirm --needed
/usr/bin/screen -S reboot -d -m bash -c "test -f /root/id_rsa || ssh-keygen -q -N '' -f /root/.ssh/id_rsa; pacman -S jre8-openjdk-headless docker screen sudo ruby git rsync ntp puppet acl python2 python3 python-virtualenv python-virtualenvwrapper python-pip --noconfirm --needed; systemctl enable docker; ntpd -q; sleep 3; reboot"
EOF
java -jar jenkins-cli.jar -s http://localhost:8090 delete-node grid$node_count || /bin/true
java -jar jenkins-cli.jar -s http://localhost:8090 get-node grid0 | sed "s/1.2.3.4/$ip/g" | java -jar jenkins-cli.jar -s http://localhost:8090 create-node grid$node_count
node_count=$((node_count + 1))
done
| true |
08541c3e9cb7375d3629f2ccc701765c8f24ab1a
|
Shell
|
trimbakeshwar/shellScriptingProgram
|
/case_statment/digitToWeek.sh
|
UTF-8
| 263 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash -x
read -p "enter day " x
case $x in
0)
echo sunday ;;
1)
echo monday ;;
2)
echo tusday ;;
3)
echo wensday ;;
4)
echo thursday ;;
5)
echo friday ;;
6)
echo saturday ;;
esac
| true |
55ebc9a838ed27efeeb9cdcd674fc85c8fa9ab9f
|
Shell
|
GovReady/testmachines
|
/vbkick-templates/Ubuntu12.04/validate/test_ruby.sh
|
UTF-8
| 399 | 3.28125 | 3 |
[] |
no_license
|
#!/bin/bash
set -e -E -u -o pipefail; shopt -s failglob;
# Feature: ruby interpreter
# Given ruby command
if ! command -v ruby >/dev/null 2>&1; then
printf "\e[1;31mruby: FAIL\n\e[0m"
exit
fi
# When I run "ruby --version" command
if ! ruby --version >/dev/null 2>&1; then
printf "\e[1;31mruby --version: FAIL\n\e[0m"
exit
fi
# Then I expect success
printf "\e[1;32mruby: OK\n\e[0m"
| true |
641d08ff4d6062e686ed6a26c01814aeb309d2fa
|
Shell
|
alb-i/cluster-setups
|
/kubernetes-hetzner/create-mini-server.sh
|
UTF-8
| 192 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/sh
if ! [ -z "$1" ] ; then
NAME="$1"
else
NAME="gru"
fi
set -x
./hcloud.sh server create --name "$NAME" --type cx31 --image "ubuntu-18.04" `$(dirname $0)/get-ssh-key-parameters.sh`
| true |
2c693bcc17680421e8a90e9a05d75368e8bbf778
|
Shell
|
FabianDK/LongeviTE
|
/01-Download_raw_reads.sh
|
UTF-8
| 7,373 | 3.625 | 4 |
[] |
no_license
|
#DOWNLOAD OF SRA FILES
#Here we explain how we downloaded raw sequence reads from the SRA
#If necessary, change folder names and edit commands accordingly.
#Most commands were run on a computer cluster with multiple cores and large available memory
#Custom scripts used here (see required_scripts folder):
SRA_to_gzip_fastq.sh #Requires fastq-dump from SRA tool kits, see SRA_to_gzip_fastq.sh script for more details.
fastq_check.sh
unzip_many_files.sh
########################################
#CARNES ET AL (2015) - RAW READS
########################################
mkdir Carnes
mkdir Carnes/reads
#1) Get run info from BioProject
cd Carnes
wget 'http://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?save=efetch&rettype=runinfo&db=sra&term=PRJNA286855' -O - | tee SraRunInfo.csv
#2) Loop through lines of file, extract download link and use wget to download it to folder "Carnes/reads"
for runinfo in SraRunInfo.csv
do
x=$(sed 's/,/\t/g' $runinfo | cut -f10 | grep "http")
wget $x -P Carnes/reads
echo "$x"
done
#3) Transform .sra files into gzip fastq files using fastq-dump
sh required_scripts/SRA_to_gzip_fastq.sh Carnes/reads
#4) Use FASTQC to perform quality checks on fastq files
sh required_scripts/fastq_check.sh Carnes/reads/gzip_fastq
#5) Unzip .gz files and move to unzipped folder
sh required_scripts/unzip_many_files.sh Carnes/reads/gzip_fastq
#6) Rename files and remove .gz.decomp extension
for filename in Carnes/reads/gzip_fastq/unzipped/*.decomp
do
[ -f "$filename" ] || continue
mv "$filename" "${filename%.gz.decomp}"
done
#7) Created table with two columns (SRA accession number, and name of population) based onn SraRunInfo.csv: carnes_rename_tab.txt.
#Then rename file names so that they are labelled according to selection regime and replicate
cd Carnes/reads/gzip_fastq/unzipped/
sed 's/^/mv -vi "/;s/\t/_1.fastq" "/;s/$/_r1.fastq";/' < extra_files/carnes_rename_tab.txt | bash -
sed 's/^/mv -vi "/;s/\t/_2.fastq" "/;s/$/_r2.fastq";/' < extra_files/carnes_rename_tab.txt | bash -
sed 's/^/mv -vi "/;s/\t/.fastq" "/;s/$/_noPair.fastq";/' < extra_files/carnes_rename_tab.txt | bash -
########################################
#REMOLINA ET AL (2012) - RAW READS
########################################
mkdir Remolina
mkdir Remolina/reads
#1) Get run info from BioProject
cd Remolina
wget 'http://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?save=efetch&rettype=runinfo&db=sra&term=PRJNA185744' -O - | tee SraRunInfo.csv
#2) Loop through lines of file, extract download link and use wget to download it to folder "Remolina/reads"
for runinfo in SraRunInfo.csv
do
x=$(sed 's/,/\t/g' $runinfo | cut -f10 | grep "http")
wget $x -P Remolina/reads
done
#3) Transform .sra files into gzip fastq files using fastq-dump
sh required_scripts/SRA_to_gzip_fastq.sh Remolina/reads
#4) Use FASTQC to perform quality checks on fastq files
sh required_scripts/fastq_check.sh Remolina/reads/gzip_fastq
#5) Unzip .gz files and move to unzipped folder
sh required_scripts/unzip_many_files.sh Remolina/reads/gzip_fastq
#6) Rename files and remove .gz.decomp extension
for filename in Remolina/reads/gzip_fastq/unzipped/*.decomp
do
[ -f "$filename" ] || continue
mv "$filename" "${filename%.gz.decomp}"
done
#7) Created table with two columns (SRA accession number, and name of population) based onn SraRunInfo.csv: remolina_rename_tab.txt.
#Then rename file names so that they are labelled according to selection regime and replicate
cd Remolina/reads/gzip_fastq/unzipped/
sed 's/^/mv -vi "/;s/\t/_1.fastq" "/;s/$/_r1.fastq";/' < extra_files/remolina_rename_tab.txt | bash -
sed 's/^/mv -vi "/;s/\t/_2.fastq" "/;s/$/_r2.fastq";/' < extra_files/remolina_rename_tab.txt | bash -
########################################
#HOEDJES ET AL (2019) - RAW DNA READS
########################################
mkdir Hoedjes
mkdir Hoedjes/reads
#1) Get run info from BioProject
cd Hoedjes
wget 'http://trace.ncbi.nlm.nih.gov/Traces/sra/sra.cgi?save=efetch&rettype=runinfo&db=sra&term=PRJNA564570' -O - | tee SraRunInfo.csv
#2) Loop through lines of file, extract download link and use wget to download it to folder "Hoedjes/reads"
for runinfo in SraRunInfo.csv
do
x=$(sed 's/,/\t/g' $runinfo | cut -f10 | grep "http")
wget $x -P Hoedjes/reads
done
#3) Transform .sra files into gzip fastq files using fastq-dump
sh required_scripts/SRA_to_gzip_fastq.sh Hoedjes/reads
#4) Use FASTQC to perform quality checks on fastq files
sh required_scripts/fastq_check.sh Hoedjes/reads/gzip_fastq
#5) Unzip .gz files and move to unzipped folder
sh required_scripts/unzip_many_files.sh Hoedjes/reads/gzip_fastq
#6) Rename files and remove .gz.decomp extension
for filename in Hoedjes/reads/gzip_fastq/unzipped/*.decomp
do
[ -f "$filename" ] || continue
mv "$filename" "${filename%.gz.decomp}"
done
#7) Created table with two columns (SRA accession number, and name of population) based onn SraRunInfo.csv: hoedjes_rename_tab.txt.
#Then rename file names so that they are labelled according to selection regime and replicate
cd Hoedjes/reads/gzip_fastq/unzipped/
sed 's/^/mv -vi "/;s/\t/_1.fastq" "/;s/$/_r1.fastq";/' < extra_files/hoedjes_rename_tab.txt | bash -
sed 's/^/mv -vi "/;s/\t/_2.fastq" "/;s/$/_r2.fastq";/' < extra_files/hoedjes_rename_tab.txt | bash -
########################################
#FABIAN ET AL (2018) - RAW READS
########################################
#We have not downloaded read files from Fabian et al. from the repository, but instead used our own local copies.
#Raw fastq reads used in this study have been deposited to the ENA under the study accession (PRJEB28048): https://www.ebi.ac.uk/ena/data/view/PRJEB28048
#SRA files could be downloaded and extracted similar as for the studies above (points 1 to 3). The BioProject repository includes bam files from an earlier study and raw fastq reads. For the analysis here, we only used the raw fastq reads.
mkdir Fabian
mkdir Fabian/reads
#1) Example of download from ENA, fastqc check and decompressing files:
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764144/Cont_Ra1.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764144/Cont_Ra2.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764145/Cont_Rb1.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764145/Cont_Rb2.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764146/Sel_La1.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764146/Sel_La2.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764147/Sel_Lb1.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764147/Sel_Lb2.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764148/Sel_2La1.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764148/Sel_2La2.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764149/Sel_2Lb1.fastq.gz -P Fabian/reads
wget ftp://ftp.sra.ebi.ac.uk/vol1/run/ERR376/ERR3764149/Sel_2Lb2.fastq.gz -P Fabian/reads
sh required_scripts/fastq_check.sh Fabian/reads/gzip_fastq
sh required_scripts/unzip_many_files.sh Fabian/reads/gzip_fastq
#2) Rename files and remove .gz.decomp extension
for filename in Fabian/reads/unzipped/*.decomp
do
[ -f "$filename" ] || continue
mv "$filename" "${filename%.gz.decomp}"
done
| true |
9549d9d1ecdc28605002646315d4787c64222cf1
|
Shell
|
aurelien-brabant/gnl-smasher
|
/scripts/compile.sh
|
UTF-8
| 966 | 3.640625 | 4 |
[] |
no_license
|
#!/bin/bash
# Compile get_next_line.o and get_next_line_utils.o
CC=gcc
CLFAGS='-Werror -Wextra -Wall'
# Compile the project object files.
#
# @param string the verified gnl path.
# @param number the BUFFER_SIZE to use.
function compile_proj_objs() {
sources=("get_next_line" "get_next_line_utils")
rm -rf .obj
mkdir .obj 2> /dev/null
[ $BONUS == "yes" ] && suffix="_bonus.c" || suffix=".c"
for src in ${sources[@]}
do
${CC} ${CFLAGS} -D BUFFER_SIZE=$BUFFER_SIZE -c "${1}/${src}${suffix}" -o "./.obj/${src}.o"
done
[ ! -f ./.obj/get_next_line.o ] || [ ! -f ./.obj/get_next_line_utils.o ] && fatal_error "Could not compile YOUR project!" $EXIT_COMPILATION_ERROR
}
# Compile the test binary with the passed source.
#
# @param string => source path
function comp_test() {
${CC} ${CFLAGS} ./.obj/get_next_line.o ./.obj/get_next_line_utils.o $1 -o testbin.out
[ $? -ne 0 ] && fatal_error "Could not compile the test binary." $EXIT_COMPILATION_ERROR
}
| true |
bc618d1f81d4e7222ec641f259d910faeb88cc0c
|
Shell
|
AlexxandreFS/Batocera.PLUS
|
/plus/usr/bin/batocera-ds4motion
|
UTF-8
| 2,639 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/bash
##
# Resolve o bug de indentificar 2 controles de PS4 com apenas 1 controle conectado.
# Funciona apenas com controle de PlayStation 4.
# Funciona em controle conectado por cabo ou pelo adaptador de controle oficial da Sony.
# Não funciona com controle conectado por adaptador bluetooth.
#
# Batocera.PLUS
# Autor: Alexandre Freire dos Santos
# E-Mail: alexxandre.freire@gmail.com
#
#######################################################################################
# CONFIGURAÇÃO DO USUÁRIO INÍCIO
#######################################################################################
##
# Ativa ou desativa o controle extra indesejado.
# 0 = Desativa o controle extra.
# 1 = Deixa ativado o controle extra.
##
CONTROLE=0
##
# Nome dos dispositivos para serem removidos.
# Os dispositivos de controle são encontrados em /dev/input/by-id
# Dependendo da versão do Linux os nomes podem ser diferentes.
# Controles ligados na porta USB
##
USB=usb-Sony_Interactive_Entertainment_Wireless_Controller-event-if03
##
# Nome dos dispositivos para serem removidos.
# Os dispositivos de controle são encontrados em /dev/input/by-id
# Dependendo da versão do Linux os nomes podem ser diferentes.
# Controles ligados pelo Adaptador Wireless Oficial Sony.
##
WIRELESS=usb-Sony_Interactive_Entertainment_DUALSHOCK®4_USB_Wireless_Adaptor-event-if03
########################################################################################
# CONFIGURAÇÃO DO USUÁRIO FIM
########################################################################################
##
# Diretório onde o log é gravado.
##
#LOG_FILE=/userdata/system/logs/$(basename ${0} .sh).log
LOG_FILE=/dev/null
echo '' >> ${LOG_FILE}
echo "Data: $(date -Iseconds -u)" >> ${LOG_FILE}
##
# Desativa o controle extra indesejado.
##
if [ "${CONTROLE}" == '0' ]
then
echo 'Procurando por controle...' >> ${LOG_FILE}
if [ -e "/dev/input/by-id/${USB}" ]
then
echo 'Controle encontrado em: Cabo USB' >> ${LOG_FILE}
rm /dev/input/$(ls -l /dev/input/by-id/${USB} | cut -d '/' -f 6) || exit 1
rm /dev/input/by-id/${USB} || exit 1
echo 'O controle extra foi desativado.' >> ${LOG_FILE}
elif [ -e "/dev/input/by-id/${WIRELESS}" ]
then
echo 'Controle encontrado em: Adaptador Wireless Oficial Sony' >> ${LOG_FILE}
rm /dev/input/$(ls -l /dev/input/by-id/${WIRELESS} | cut -d '/' -f 6) || exit 1
rm /dev/input/by-id/${WIRELESS} || exit 1
echo 'O controle extra foi desativado.' >> ${LOG_FILE}
else
echo 'Controle não encontrado.' >> ${LOG_FILE}
exit 1
fi
fi
exit 0
| true |
1633ccbb0dbdfc26a812aa16b52bd30dd6f0ac2a
|
Shell
|
charlesdaniels/toolchest
|
/bin/temputils
|
UTF-8
| 3,725 | 4.03125 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
########10########20########30## DOCUMENTATION #50########60########70########80
#
# OVERVIEW
# ========
# Utilities used to work with temporary directories. This is intended to be
# used only by other scripts, as it will probably not be particularly useful
# for an end user.
#
# USAGE
# =====
#
# $1 . . . . Application tag - used to separate different application's temp
# dirs
#
# $2 . . . . Action - one of several operations enumerated below
#
# ENVIRONMENT VARIABLES
# =====================
#
# TEMPUTILS_PREFIX_DIR . . . if set, will be used as the parent for all
# temputils created directories, otherwise
# defaults to `/tmp/`
#
# ACTIONS
# =======
#
# create
# create a temp folder in `/tmp` named by application tag
#
# destroy
# remove a temp folder in `/tmp` named by application tag
#
# get
# display on stdout the full path of the temporary folder for the
# application tag
#
########10########20########30#### LISCENSE ####50########60########70########80
# Copyright (c) 2016, Charles Daniels
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
########10########20########30########40########50########60########70########80
if [ $# -ne 2 ] ; then
echo "ERROR 67: incorrect number of arguments: $#"
echo "usage: $0 [APPTAG] [ACTION]"
exit 1
fi
if ! (: "${TEMPUTILS_PREFIX_DIR?}") 2>/dev/null; then
TEMP_DIR=/tmp
else
TEMP_DIR="$TEMPUTILS_PREFIX_DIR"
fi
APPTAG="$1"
ACTION="$2"
case $ACTION in
get)
echo $TEMP_DIR/$APPTAG
exit 0
;;
create)
if [ -d $($0 $APPTAG get) ] ; then
echo "ERROR 85: $($0 $APPTAG get) exists, refusing to overwrite"
exit 1
fi
mkdir $($0 $APPTAG get)
if [ $? -ne 0 ] ; then
echo "ERROR 92: unknown error while creating $($0 $APPTAG get)"
exit 1
fi
exit 0
;;
destroy)
if [ -d $($0 $APPTAG get) ] ; then
rm -rf $($0 $APPTAG get)
if [ $? -ne 0 ] ; then
echo "ERROR 104: failed to remove temp directory $($0 $APPTAG get)"
exit 1
fi
fi
exit 0
;;
*)
echo "ERROR 83: no such action $ACTION"
exit 1
;;
esac
exit 1
| true |
4785efb1db7821e7f2ab6afeff217e0aa30a120f
|
Shell
|
petronny/aur3-mirror
|
/xhtml11-doc/PKGBUILD
|
UTF-8
| 625 | 2.515625 | 3 |
[] |
no_license
|
# Contributor: Ondrej Kucera <ondrej.kucera@centrum.cz>
pkgname=xhtml11-doc
pkgver=20101123
pkgrel=2
pkgdesc="XHTML 1.1 Documentation"
arch=('any')
url="http://www.w3.org/TR/xhtml11"
license=('W3C')
depends=("xhtml-modularization-docs")
options=('docs' '!strip')
source=("http://www.w3.org/TR/2010/REC-xhtml11-${pkgver}/xhtml11.tgz")
md5sums=('56366fb9ff58b79a2de71f127b9baf76')
build(){
cd "$srcdir/xhtml11-${pkgver}"
sed -i 's%http://www.w3.org/TR/xhtml-modularization/%../xhtml-modularization/%g' *.html
}
package() {
cd "$srcdir/xhtml11-${pkgver}"
mkdir -p "$pkgdir/usr/share/doc/xhtml11"
cp -rf * "$pkgdir/usr/share/doc/xhtml11"
}
| true |
72f20561f993faf7f295e910a51b81cdde43da7e
|
Shell
|
B3W/RNAMapping
|
/kallisto_index.sh
|
UTF-8
| 1,075 | 3.640625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright (c) 2019 Weston Berg
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Check correct number of cmd line inputs
if [[ $# -ne 2 ]]; then
echo "Incorrect number of parameters"
echo "Supply parameter for input file and out file, respectively"
exit 1
fi
# Second cmd line input is input file
if [[ ! -f $1 ]]; then
echo "Input file not found"
exit 1
fi
# Check input file extension
if [[ ${1: -3} != ".fa" ]]; then
echo "Input file type must be fasta (.fa)"
exit 1
fi
# Load Kallisto
echo "Loading kallisto..."
module load kallisto
# Create index
echo "Creating index..."
kallisto index -i "$2" "$1"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.