blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
8981658fb70dcdaa8582b6adbeb6aa265c32bb8f
|
Shell
|
AlekseiFalshtynskii/ionic_build
|
/common/publish/publish_market
|
UTF-8
| 2,147 | 3.5 | 4 |
[] |
no_license
|
#!/bin/bash
. build/common/functions
if $(handleArgs --help $*); then
build/common/help "build_all_after"
exit 0
fi
echo $(green "build/common/publish_market $*")
outputPath=$(handleArgs --outputPath $*)
build_ios=$(handleArgs ios $*)
build_android=$(handleArgs android $*)
ios_version=$(handleArgs --ios_version $*)
android_version=$(handleArgs --android_version $*)
cd $outputPath
xcarchive="РСХБизнес.xcarchive"
armv7="android-armv7-release-unsigned.apk"
x86="android-x86-release-unsigned.apk"
if $build_ios; then
files="$files $xcarchive"
fi
if $build_android; then
files="$files $armv7"
files="$files $x86"
fi
if $build_ios && [ ! -d $xcarchive ]; then
files_not_found="$files_not_found $xcarchive"
fi
if $build_android && [ ! -f $armv7 ]; then
files_not_found="$files_not_found $armv7"
fi
if $build_android && [ ! -f $x86 ]; then
files_not_found="$files_not_found $x86"
fi
if [ ! "$files_not_found" ]; then
dt=$(datetime)
xcarchive_dt="RSHBusiness_$dt.xcarchive"
armv7_dt="$(basename ${armv7%.apk})_$dt.apk"
x86_dt="$(basename ${x86%.apk})_$dt.apk"
if $build_ios; then
mv $xcarchive $xcarchive_dt
zipFiles="$zipFiles $xcarchive_dt"
fi
if $build_android; then
mv $armv7 $armv7_dt
mv $x86 $x86_dt
zipFiles="$zipFiles $armv7_dt $x86_dt"
fi
zipFile="RSHBusiness_$dt.zip"
zip -er -P 111 $zipFile $zipFiles
marketPath=$archivePathMarket
mkdir -p "$marketPath"
cp -f $zipFile "$marketPath"
depth=`echo $outputPath | grep -o / | wc -l`
for ((i=0; i<depth; i++)) do
cd ../
done
build/common/publish/dropbox/dropbox_uploader.sh delete $dropboxPathMarket
build/common/publish/dropbox/dropbox_uploader.sh upload $outputPath$zipFile $dropboxPathMarket/
sleep 60
link=$(getDropboxUrl $dropboxPathMarket/ $zipFile)
if $build_ios; then
message=$message"iOS $version_market_ios<br>"
fi
if $build_android; then
message=$message"Android $version_market_android<br>"
fi
message="$message $link"
python build/common/publish/email/send_email_market.py "$message"
else
python build/common/publish/email/send_email_error.py "$files_not_found"
fi
| true |
86fcacb3c2eb8a40a43c31aafc7ecc8f792f371f
|
Shell
|
Altersoundwork/Random-Scripts
|
/realtek_rtl8812bu.sh
|
UTF-8
| 1,640 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/bash
#
# Installs and configures the Realtek RTL8812BU wireless card on Debian based systems.
# v0.1 - 29-07-2021
# dev: @Altersoundwork
#
clear
################################################
echo ${bold}
echo "##########################################"
echo "Install & Configure Realtek RTL8812BU Wifi"
echo "##########################################"
echo ${normal}
#################################################
sudo apt-get update
sudo apt-get install build-essential git
git clone https://github.com/cilynx/rtl88x2BU_WiFi_linux_v5.2.4.4_25643.20171212_COEX20171012-5044.git
cd rtl88x2BU_WiFi_linux_v5.2.4.4_25643.20171212_COEX20171012-5044
VER=$(cat ./version)
sudo rsync -rvhP ./ /usr/src/rtl88x2bu-${VER}
sudo dkms add -m rtl88x2bu -v ${VER}
sudo dkms build -m rtl88x2bu -v ${VER}
sudo dkms install -m rtl88x2bu -v ${VER}
sudo modprobe 88x2bu
echo
read -p "${bold}All good?${normal}" -n 1 -r
echo # (optional) move to a new line
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
[[ "$0" = "$BASH_SOURCE" ]] && echo && echo "Fix it then!" && echo && exit 1 || return 1 # handle exits from shell or function but don't exit interactive shell
fi
clear
echo
read -p "${bold}You will now need to reboot. Press Y to do no or N to do it later. ${normal}" -n 1 -r
echo # (optional) move to a new line
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
[[ "$0" = "$BASH_SOURCE" ]] && echo && echo "${bold}The system must be restarted before continuing for correct functionality. Please manually restart as soon as possible.${normal}" && echo && exit 1 || return 1 # handle exits from shell or function but don't exit interactive shell
fi
sudo reboot
| true |
8051e3601ffee7822d5a15a91ce0c287a4ce9a55
|
Shell
|
syntacticsugarglider/tts-collator
|
/concat
|
UTF-8
| 700 | 3.265625 | 3 |
[] |
no_license
|
#!env bash
sox -n -r 24000 -c 1 short_silence.wav trim 0.0 0.5
sox -n -r 24000 -c 1 long_silence.wav trim 0.0 3
indices=()
for file in $(ls -1 | sort -n | grep "[^(output)].mp3"); do
indices+=($(echo $file | cut -d'_' -f2))
done
indices=($(echo ${indices[@]} | tr ' ' '\n' | sort -nu))
for index in "${indices[@]}"; do
files=$(ls -1 | grep "tts\_$index.*")
echo $files | sed -z s/\ /\\nshort_silence.wav\\n/g | xargs sh -c 'sox ${@:2} $1_out.mp3' sh $index
done
ls -1 | grep tts.*\.mp3 | xargs rm
rm short_silence.wav
ls -1 | grep ".*\_out\.mp3" | sort -n | sed -z s/\\n/\\nlong_silence.wav\\n/g | xargs sh -c 'sox $@ out.mp3' sh
rm long_silence.wav
ls -1 | grep ".*\_out\.mp3" | xargs rm
| true |
c7cd1154aee69f1bb26a08404adf2cd99e0f3ec2
|
Shell
|
willianxz/shellscript
|
/criandoArquivoTexto.sh
|
UTF-8
| 271 | 2.78125 | 3 |
[] |
no_license
|
echo "Será criado um arquivo texto e escrito alguma coisa dentro dele."
echo "Escreva qualquer coisa para ser colocado no arquivo texto."
read texto
echo $texto > meuTexto.txt
echo "Foi criado um arquivo texto com o nome meuTexto, contendo o que foi escrito."
read
exit
| true |
e5900babae44b3ed611fe57aedb793f5679c8fce
|
Shell
|
mthang/Galaxy-Pipeline
|
/Wrappers/cdhit_filter.sh
|
UTF-8
| 220 | 2.90625 | 3 |
[] |
no_license
|
#!/bin/bash
# run the cd hit fileter tool with the provided arguments
# note: only works with non zip data
CD-HIT-Filter "${@:1:$(($#-1))}"
# move the output file to the provided output path
mv "${2%.*}"*min*.fasta $7
| true |
39125a50e4373a00832cb15b5846093fba5ac521
|
Shell
|
radtek/data-pipeline
|
/data-sync/src/main/resources/bin/stop.sh
|
UTF-8
| 190 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
WORK_PATH=$(cd `dirname $0`;pwd)
LOG_PATH=${WORK_PATH}/../log
if [ -f ${LOG_PATH}/pid.log ]
then
echo "cat ${LOG_PATH}/pid.log | kill"
cat ${LOG_PATH}/pid.log | kill
fi
| true |
d99a9eb9ce43f0e9af093074c0185200ad801349
|
Shell
|
tomphp/dotmanager
|
/drivers/ubuntu_root.sh
|
UTF-8
| 296 | 3.25 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
function package_manager_install {
apt-get install -y "$1"
}
export -f package_manager_install
function package_manager_update {
apt-get update
}
export -f package_manager_update
function bash_profile_file {
echo "$HOME/.bashrc"
}
export -f bash_profile_file
| true |
3f06818a4b6fdb5b8801088f8d92c3c277dbb23e
|
Shell
|
allyoushawn/grape_project
|
/disentangle/make_feats.sh
|
UTF-8
| 424 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/bash
feat_ark=$1
prons=$2
op_feats_dir=$3
[ -f $feat_ark ] || exit 1
[ -f $prons ] || exit 1
mkdir -p $op_feats_dir/$seq_len
rm -f $op_feats_dir/$seq_len/*
python3 get_feat.py $prons $feat_ark $op_feats_dir $seq_len
#if [ ! -f $feats_dir/extracted ]; then
# [ -f $feats_dir ] && rm -rf $feats_dir
# python3 get_feat.py $prons $feat_dir/cmvned_feats.ark $op_feats_dir
# echo 1 > $feat_dir/feats/extracted
#fi
| true |
b6caae99ac6c89045db86dbd4a03dfb599eb9e96
|
Shell
|
gilbertfrancois/dotfiles
|
/nvim/install_nvim.sh
|
UTF-8
| 8,603 | 3.390625 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -xe
NVIM_VERSION="0.9.1"
NODE_VERSION="18.16.0" # NodeJS LTS
# FZF_VERSION="0.35.0"
# LUA_LSP_VERSION="3.6.4"
# VSCODE_LLDB_VERSION="1.8.1"
NVIM_CONFIG_DIR=${HOME}/.config/nvim
NVIM_SHARE_DIR=${HOME}/.local/share/nvim
NVIM_LIB_DIR=${NVIM_SHARE_DIR}/lib
function reset_config_dir {
echo "--- (Re)setting Neovim config folder."
rm -rf ${NVIM_CONFIG_DIR}
rm -rf ${NVIM_SHARE_DIR}
mkdir -p ${HOME}/.config
mkdir -p ${NVIM_SHARE_DIR}
mkdir -p ${NVIM_LIB_DIR}
ln -s ${HOME}/.dotfiles/nvim ${NVIM_CONFIG_DIR}
}
function install_deps {
echo "--- Installing ctags, ripgrep"
# TODO: Install version for ARMv8
if [[ `uname -s` == "Linux" ]]; then
sudo apt install -y curl wget exuberant-ctags ninja-build
# if [[ `uname -m` == "x86_64" ]]; then
# sudo snap install ripgrep --classic
# fi
elif [[ `uname -s` == "Darwin" ]]; then
# brew reinstall curl ctags the_silver_searcher fd ripgrep wget pandoc pandoc-crossref rust ninja
brew reinstall curl wget ctags ninja
fi
}
function install_neovim {
echo "--- Installing Neovim."
if [[ `uname -s` == "Linux" ]]; then
if [[ `uname -m` == "x86_64" ]]; then
sudo rm -rf /usr/local/bin/nvim.appimage /usr/local/bin/nvim
cd /tmp
wget https://github.com/neovim/neovim/releases/download/v${NVIM_VERSION}/nvim.appimage
sudo mv nvim.appimage /usr/local/bin
sudo chmod 755 /usr/local/bin/nvim.appimage
sudo ln -s /usr/local/bin/nvim.appimage /usr/local/bin/nvim
elif [[ `uname -m` == "aarch64" ]]; then
echo "Build Neovim from source."
elif [[ `uname -m` == "armv7l" ]]; then
echo "Build Neovim from source."
fi
elif [[ `uname -s` == "Darwin" ]]; then
brew update
brew reinstall neovim wget
else
echo "Unsupported OS."
fi
}
function install_python {
echo "--- Installing python environment for NeoVim."
if [[ `uname -s` == "Linux" ]]; then
sudo apt update
sudo apt install -y python3-venv
elif [[ `uname -s` == "Darwin" ]]; then
brew update
brew reinstall python
else
echo "Unsupported OS."
fi
VENV_PATH="${NVIM_LIB_DIR}/python"
rm -rf ${VENV_PATH}
cd ${NVIM_LIB_DIR}
python3 -m venv ${VENV_PATH}
source ${VENV_PATH}/bin/activate
# Avoid problems due to outdated pip.
pip install --upgrade pip
pip install setuptools wheel
# Install neovim extension, python liners, formatters, import sorters and more...
# pip install neovim jedi rope ropevim pylint flake8 pynvim yapf isort autopep8 black debugpy
pip install neovim
}
function install_node {
INSTALL_DIR=${NVIM_LIB_DIR}
echo "--- Installing nodejs."
if [[ `uname -s` == "Linux" ]]; then
NODE_OS="linux"
NODE_EXTENSION="tar.gz"
if [[ `uname -m` == "x86_64" ]]; then
NODE_ARCH="x64"
elif [[ `uname -m` == "aarch64" ]]; then
if [[ `getconf LONG_BIT` == "32" ]]; then
NODE_ARCH="armv7l"
else
NODE_ARCH="arm64"
fi
elif [[ `uname -m` == "armv7l" ]]; then
FZF_ARCH="armv7l"
fi
elif [[ `uname -s` == "Darwin" ]]; then
NODE_OS="darwin"
NODE_EXTENSION="tar.gz"
if [[ `uname -m` == "x86_64" ]]; then
NODE_ARCH="x64"
elif [[ `uname -m` == "arm64" ]]; then
NODE_ARCH="arm64"
fi
fi
cd /tmp
rm -rf node*
rm -rf ${NVIM_LIB_DIR}/node*
wget https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-${NODE_OS}-${NODE_ARCH}.${NODE_EXTENSION}
echo "node-v${NODE_VERSION}-${NODE_OS}-${NODE_ARCH}.${NODE_EXTENSION}"
tar -xvf node-v${NODE_VERSION}-${NODE_OS}-${NODE_ARCH}.${NODE_EXTENSION}
mv node-v${NODE_VERSION}-${NODE_OS}-${NODE_ARCH} ${NVIM_LIB_DIR}
ln -s ${NVIM_LIB_DIR}/node-v${NODE_VERSION}-${NODE_OS}-${NODE_ARCH} ${NVIM_LIB_DIR}/node
export PATH=${NVIM_LIB_DIR}/node/bin:$PATH
${NVIM_LIB_DIR}/node/bin/npm install --location=global --prefix ${NVIM_LIB_DIR}/node neovim
# ${NVIM_LIB_DIR}/node/bin/npm install --location=global --prefix ${NVIM_LIB_DIR}/node pyright
# ${NVIM_LIB_DIR}/node/bin/npm install --location=global --prefix ${NVIM_LIB_DIR}/node prettier
# ${NVIM_LIB_DIR}/node/bin/npm install --location=global --prefix ${NVIM_LIB_DIR}/node typescript typescript-language-server
# ${NVIM_LIB_DIR}/node/bin/npm install --location=global --prefix ${NVIM_LIB_DIR}/node diagnostic-languageserver
# ${NVIM_LIB_DIR}/node/bin/npm install --location=global --prefix ${NVIM_LIB_DIR}/node vscode-langservers-extracted
# ${NVIM_LIB_DIR}/node/bin/npm install --location=global --prefix ${NVIM_LIB_DIR}/node tree-sitter
# if [[ `uname -s` == "Linux" ]]; then
# ${NVIM_LIB_DIR}/node/bin/npm install --location=global --prefix ${NVIM_LIB_DIR}/node tree-sitter-cli
# fi
}
function install_fzf {
echo "--- Installing FZF."
if [[ `uname -s` == "Linux" ]]; then
FZF_OS="linux"
FZF_EXTENSION="tar.gz"
if [[ `uname -m` == "x86_64" ]]; then
FZF_ARCH="amd64"
elif [[ `uname -m` == "aarch64" ]]; then
FZF_ARCH="arm64"
elif [[ `uname -m` == "armv7l" ]]; then
FZF_ARCH="armv7"
fi
cd /tmp
wget https://github.com/junegunn/fzf/releases/download/${FZF_VERSION}/fzf-${FZF_VERSION}-${FZF_OS}_${FZF_ARCH}.${FZF_EXTENSION}
tar zxvf fzf-${FZF_VERSION}-${FZF_OS}_${FZF_ARCH}.tar.gz
sudo cp fzf /usr/local/bin
elif [[ `uname -s` == "Darwin" ]]; then
brew reinstall fzf
fi
}
function lsp_extensions {
# Python
${HOME}/.local/share/nvim/lib/python/bin/python -m pip install pynvim pyright black isort
# LaTeX
cargo install --force texlab
# # GLSL
pushd /tmp
rm -rf glsl-language-server
git clone https://github.com/svenstaro/glsl-language-server.git
cd glsl-language-server
pwd
git submodule update --init
cmake -Bbuild -GNinja
ninja -Cbuild
sudo ninja -Cbuild install
popd
# Lua
pushd /tmp
if [[ `uname -s` == "Linux" ]]; then
OS="linux"
elif [[ `uname -s` == "Darwin" ]]; then
OS="darwin"
else
OS=""
echo "Unsupported OS."
fi
if [[ `uname -m` == "x86_64" ]]; then
ARCH="x64"
elif [[ `uname -m` == "arm64" ]]; then
ARCH="arm64"
elif [[ `uname -m` == "aarch64" ]]; then
ARCH="aarch64"
elif [[ `uname -m` == "armv7l" ]]; then
ARCH="armv71"
else
ARCH=""
echo "Unsupported architecture"
fi
wget https://github.com/sumneko/lua-language-server/releases/download/${LUA_LSP_VERSION}/lua-language-server-${LUA_LSP_VERSION}-${OS}-${ARCH}.tar.gz
rm -rf lua-language-server
mkdir lua-language-server
cd lua-language-server
tar zxvf ../lua-language-server-${LUA_LSP_VERSION}-${OS}-${ARCH}.tar.gz
cd ..
rm -rf ${NVIM_LIB_DIR}/lua-language-server
cp -r lua-language-server ${NVIM_LIB_DIR}/
popd
}
function install_dap_extensions {
if [[ `uname -s` == "Linux" ]]; then
OS="linux"
elif [[ `uname -s` == "Darwin" ]]; then
OS="darwin"
else
OS=""
echo "Unsupported OS."
fi
if [[ `uname -m` == "x86_64" ]]; then
ARCH="x86_64"
elif [[ `uname -m` == "aarch64" ]]; then
ARCH="aarch64"
elif [[ `uname -m` == "arm64" ]]; then
ARCH="aarch64"
elif [[ `uname -m` == "armv7l" ]]; then
ARCH="arm"
else
ARCH=""
echo "Unsupported architecture"
fi
pushd /tmp
rm -rf /tmp/vscode_lldb
mkdir /tmp/vscode_lldb
cd /tmp/vscode_lldb
wget https://github.com/vadimcn/vscode-lldb/releases/download/v${VSCODE_LLDB_VERSION}/codelldb-${ARCH}-${OS}.vsix
unzip codelldb-${ARCH}-${OS}.vsix
rm -f codelldb-${ARCH}-${OS}.vsix
cd ..
mv vscode_lldb ${NVIM_LIB_DIR}
popd
}
function __os_template {
if [[ `uname -s` == "Linux" ]]; then
OS="linux"
elif [[ `uname -s` == "Darwin" ]]; then
OS="darwin"
else
OS=""
echo "Unsupported OS."
fi
if [[ `uname -m` == "x86_64" ]]; then
ARCH="x86_64"
elif [[ `uname -m` == "aarch64" ]]; then
ARCH="aarch64"
elif [[ `uname -m` == "armv7l" ]]; then
ARCH="armv71"
else
ARCH=""
echo "Unsupported architecture"
fi
}
reset_config_dir
install_neovim
install_deps
install_python
install_node
# install_fzf
# install_lsp_extensions
# install_dap_extensions
| true |
c059f9bb36a4f95757b3a5261de9cef8ab28e43c
|
Shell
|
engrab6/mechsysTNN
|
/scripts/old/change_includes.sh
|
UTF-8
| 209 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
if [ "$#" -ne 1 ]; then
echo
echo "Usage:"
echo " $0 file.{h,cpp}"
echo
exit 1
fi
sed -i '/\/\/\ MechSys/,/^$/ s/.h"/.h>/' $1
sed -i '/\/\/\ MechSys/,/^$/ s/"/<mechsys\//' $1
| true |
ae47e9368ee1a3276734ac30ff78116976a80e74
|
Shell
|
1301688/front
|
/bin/create.sh
|
UTF-8
| 1,228 | 3.734375 | 4 |
[
"0BSD"
] |
permissive
|
#!/bin/zsh
# tarminal > yarn new:post (package.json)
printf "記事タイトルを入力してください >> "; read TITLE
printf "ファイル名を入力してください(sample.mdx) >> "; read FILENAME
printf "タグを入力してください\n"
printf "既存のタグは以下です\n\n"
ls -r content/article/
printf "\nタグ名 >> "; read TAGS
printf "カテゴリフォルダを入力してください\n"
printf "既存のカテゴリフォルダは以下です\n\n"
ls -r content/article/
printf "\nカテゴリフォルダ名 >> "; read DIRNAME
EXTENSION=$(echo $FILENAME | cut -f 2 -d .)
if [ $EXTENSION != "mdx" ]; then
FILENAME=${FILENAME}".mdx"
fi;
FILEPATH="content/article/$DIRNAME/$FILENAME"
if [ ! -d content/article/$DIRNAME ]; then
mkdir content/article/$DIRNAME
fi
cp bin/template.mdx $FILEPATH
CURRENT_TIME=$(date +"%Y-%m-%dT%TZ")
function replaceTemplate() {
# @1 > $2 で置き換える
# Example: replaceTemplate @DATE $DATE
sed -i -e "s/$1/$2/g" $FILEPATH
}
# Replace
replaceTemplate @TITLE $TITLE
replaceTemplate @TAGS $TAGS
replaceTemplate @DATE $CURRENT_TIME
replaceTemplate @CATEGORYFOLDER $DIRNAME
\rm $FILEPATH-e
printf "$FILEPATHに記事ファイルを作成しました"
| true |
11decc081cdf227ffe1120f4e01cbc6b305e5cd6
|
Shell
|
sammaphey/mstk-docker
|
/env-vars.sh
|
UTF-8
| 1,046 | 4 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#####################################################################
# #
# Author: Samuel Eklund #
# Email: sam@llnl.gov #
# #
# Description: This script sets environment variables for both the #
# docker-build.sh and docker-run.sh scripts. #
#####################################################################
# Set env vars for image tags
OS_CENTOS=centos
DEBIAN=debian
OS_UBUNTU_14_04=ubuntu_14_04
OS_UBUNTU_16_04=ubuntu_16_04
# Sets default OS to Ubuntu 16.04
OS=$OS_UBUNTU_16_04
# Set version env vars for file locations and for toolkit version
DOCKERFILE_FOLDER=Dockerfiles
TOOLKIT_FOLDER=Toolkit_zip_files
TOOLKIT_VERSION=1.4.0
# Function to grab the full path of a file, used in run scripts
abspath () {
case "$1" in /*)printf "%s\n" "$1";; *)printf "%s\n" "$PWD/$1";; esac;
}
| true |
11c9af01d4124222a41c72ff262e30e2e431d2f5
|
Shell
|
yanfeng1612/go-model
|
/template/java/utils/stop.sh
|
UTF-8
| 946 | 3.75 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -o errexit
set -o nounset
EXE_JAR="demo.jar"
#获取当前应用的进程 id
function get_running_pid {
pgrep -f "$EXE_JAR"
}
readonly SELF_DIR=$(cd $(dirname $0) && pwd)
function stop {
local -i timeout=20
local -i interval=1
local -r service_pid=$(get_running_pid) || true # ignore error
[[ -n $service_pid ]] || {
echo "WARNING: process not found, nothing to stop" >&2
exit 0
}
kill $service_pid
while (( timeout > 0 )) && get_running_pid > /dev/null; do
echo -n "."ƒ
sleep $interval
timeout=$(( timeout - interval ))
done
if get_running_pid > /dev/null; then
echo "WARNING: process still alive, sending SIGKILL ..." >&2
kill -9 "$service_pid"
fi
}
function main {
get_running_pid > /dev/null || {
echo "WARNING: process not found, nothing to stop" >&2
exit 0 # Ignore error
}
stop
}
main "$@"
| true |
ec46794107af0dd9693991a70aad4a55ab291871
|
Shell
|
enguyen/csv-to-mailgun
|
/mailgun.sh
|
UTF-8
| 987 | 3.984375 | 4 |
[] |
no_license
|
#!/bin/bash
if [ ${#@} == 0 ]; then
echo "Usage: $0 APIKey file"
echo "* APIKey: Your Mailgun API key"
echo "* file: The CSV file to read from. First column are From: email addresses. All other columns are the emails to forward to."
fi
while IFS='' read -r line || [[ -n "$line" ]]; do
emails=($line)
numemails=${#emails[@]}
fromEmail=${emails[0]}
if test "${fromEmail: -1}" == ","
then
fromEmail=${fromEmail%?} # Trim trailing comma.
fi
command="curl -s --user '$1' \
https://api.mailgun.net/v3/routes \
-F description='Generated by mailgun.sh' \
-F priority=0 \
-F expression='match_recipient(\"$fromEmail\")'"
for ((i=1; i < $numemails; i++))
do
email=${emails[$i]}
if test "$numemails" -gt "2" && test "${email: -1}" == ","
then
email=${email%?} # Trim trailing comma.
fi
# echo "$email"
command+=" -F action='forward(\"$email\")'"
done
# echo $command
eval $command
done < "$2"
| true |
363091bf0f1e7718f0b1aabb4d70cf9f174e87b9
|
Shell
|
hamscher/Arelle
|
/build27Dist.sh
|
UTF-8
| 2,107 | 2.890625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Build Arelle 27 server distribution
# create version with date and a shell file to name output with the date
python3.2 buildVersion.py
BUILT27DIR=../svr-2.7
rm -f -r ${BUILT27DIR}
mkdir ${BUILT27DIR}
cp arelleCmdLine.py arelle_test.py conftest.py app.yaml backends.yaml ${BUILT27DIR}
mkdir ${BUILT27DIR}/arelle
cp -R arelle ${BUILT27DIR}
rm -f -r ${BUILT27DIR}/*.pyc
rm -f -r ${BUILT27DIR}/arelle/pyparsing/*
rm -f -r ${BUILT27DIR}/arelle/scripts-macOS
rm -f -r ${BUILT27DIR}/arelle/scripts-unix
rm -f -r ${BUILT27DIR}/arelle/scripts-windows
cp arelle/scripts-unix/* ${BUILT27DIR}
# defer processing plugins
rm -f -r ${BUILT27DIR}/arelle/plugin
# delete GUI modules
rm -f -r ${BUILT27DIR}/*.pyw
rm -f ${BUILT27DIR}/arelle/CntlrQuickBooks.py
rm -f ${BUILT27DIR}/arelle/CntlrWinMain.py
rm -f ${BUILT27DIR}/arelle/CntlrWinTooltip.py
find ${BUILT27DIR}/arelle -name 'Dialog*.py' -print0 | xargs -0 rm -f
rm -f ${BUILT27DIR}/arelle/UiUtil.py
rm -f ${BUILT27DIR}/arelle/ViewWin*.py
rm -f ${BUILT27DIR}/arelle/WatchRss.py
# convert all except plugins
python2.7 /usr/local/bin/3to2 -w ${BUILT27DIR}
# convert plugins
cp -R arelle/plugin ${BUILT27DIR}/arelle
# encode programs with utf-8 source
python3.2 encodeUtf8PySource.py arelle/plugin/loadFromExcel.py ${BUILT27DIR}/arelle/plugin/loadFromExcel.py
python3.2 encodeUtf8PySource.py arelle/plugin/saveLoadableExcel.py ${BUILT27DIR}/arelle/plugin/saveLoadableExcel.py
python2.7 /usr/local/bin/3to2 -w ${BUILT27DIR}/arelle/plugin
#python2.7 /usr/local/bin/3to2 -w ${BUILT27DIR}/webserver
#python2.7 /usr/local/bin/3to2 -w ${BUILT27DIR}/xlrd
#python2.7 /usr/local/bin/3to2 -w ${BUILT27DIR}/xlwt
rm -f -r ${BUILT27DIR}/*.bak
# copy non-converted PythonUtil.py (to block 3to2 conversions
cp arelle/PythonUtil.py ${BUILT27DIR}/arelle/PythonUtil.py
# copy bottle that works on 2.7
cp arelle/webserver/bottle.py ${BUILT27DIR}/arelle/webserver/bottle.py
# copy pyparsing that works on 2.7
cp arelle/pyparsing/__init__.py ${BUILT27DIR}/arelle/pyparsing/__init__.py
cp arelle/pyparsing/pyparsing_py2.py2 ${BUILT27DIR}/arelle/pyparsing/pyparsing_py2.py
| true |
9c919ec68e93d27993817da3548ec47b27d9ed4f
|
Shell
|
ilventu/aur-mirror
|
/wakeonlan/PKGBUILD
|
UTF-8
| 815 | 2.78125 | 3 |
[] |
no_license
|
# Contributor: Hyperair <hyperair@gmail.com>
pkgname=wakeonlan
pkgver=0.41
pkgrel=2
pkgdesc="Utility for waking up computers using UDP Wake-on-Lan packets"
arch=('i686' 'x86_64')
url="http://gsd.di.uminho.pt/jpo/software/wakeonlan/"
license=('GPL')
depends=(perl)
makedepends=(perl)
source=(http://gsd.di.uminho.pt/jpo/software/wakeonlan/downloads/wakeonlan-${pkgver}.tar.gz)
md5sums=('d3143c5fe92d16196ac853b55dd421b5')
options=(docs)
build() {
cd $startdir/src/$pkgname-$pkgver
perl Makefile.PL || return 1
make || return 1
install -D -m0755 ${pkgname} ${startdir}/pkg/usr/bin/${pkgname} || return 1
install -D -m0644 blib/man1/${pkgname}.1p ${startdir}/pkg/usr/share/man/man1p/${pkgname}.1p || return 1
find examples -exec install -D -m0644 '{}' ${startdir}/pkg/usr/share/doc/${pkgname}/\{\} \; || return 1
}
| true |
ee9359031a0462e0aee385f4ebfa10b2b4ff1096
|
Shell
|
DRpandaMD/gcp-terraform
|
/terraform-workspace/scripts/bootstrap.sh
|
UTF-8
| 291 | 2.609375 | 3 |
[] |
no_license
|
#!/bin/bash
# update and upgrade packages
sudo apt update
sudo apt upgrade -y
# install docker
sudo apt install -y docker.io
# set the group for docker if it has not been done
sudo groupadd docker
# add the user to the group (you will have to run this anyway)
sudo usermod -aG docker $USER
| true |
6f1c20d4e814894abcfec03789372ab5c78e219e
|
Shell
|
tiger-tiger/MoloVol
|
/res/linux/shell/icons.sh
|
UTF-8
| 265 | 3.015625 | 3 |
[
"MIT"
] |
permissive
|
#!bin/sh
targeticon=$1
targetdir=$2
for d in /usr/share/icons/hicolor/*/; do
d=${d::-1}
d=${d##*/}
mkdir $targetdir/$d
mkdir $targetdir/$d/apps
n=${d##*x}
if [[ $n =~ ^[0-9]+$ ]] ; then
convert -resize $d $targeticon $targetdir/$d/apps/molovol.png
fi
done
| true |
4091dacb873e0172a8fd4377580404f339b48a6f
|
Shell
|
deyiliu/crosswalk-test-suite
|
/wrt/wrt-common-webapp-tests/inst.sh.xpk
|
UTF-8
| 1,120 | 3.90625 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#parse params
USAGE="Usage: ./inst.sh [-i] [-u]
-i install xpk and config environment
-u uninstall xpk and remove source file
[-i] option was set as default."
PACKAGENAME=wrt-common-webapp-tests
XPKNAME=${PACKAGENAME}.xpk
RESOURCE_DIR=/opt/usr/media
function installpkg(){
for xpkfile in $(ls *.xpk)
do
if [ ${apkfile:0:14} = "update_version" ];then
continue
fi
xwalkctl --install $(dirname $0)/$xpkfile
done
#TODO: copy resource
#eg:cp xx $RESOURCE_DIR
}
function uninstallpkg(){
for xpkfile in $(ls *.xpk)
do
pkgid=`pkgcmd -l | grep -i ${xpkfile%.*} | cut -d '[' -f3 | cut -d ']' -f1`
xwalkctl --uninstall $pkgid
done
#TODO: remove resource
#eg:rm xx
### remove source file ###zz
if [ -d /opt/usr/media/tct/opt/$PACKAGENAME ];then
rm -rf /opt/usr/media/tct/opt/$PACKAGENAME
else
echo "Remove source file fail,please check if the source file exist: /opt/usr/media/tct/opt/$PACKAGENAME ..."
fi
}
case "$1" in
-h|--help) echo "$USAGE"
exit ;;
""|-i) installpkg;;
-u) uninstallpkg;;
*) echo "Unknown option: $1"
echo "$USAGE"
exit ;;
esac
| true |
b69aac9c8888b27b25f21f804066482bcd83b5b1
|
Shell
|
jeremybusk/share
|
/postgresl-to-ramdisk.sh
|
UTF-8
| 2,260 | 4.03125 | 4 |
[] |
no_license
|
#!/bin/bash
# It is probably redundant if you set settings mentioned in https://gist.github.com/zekefast/42273658939724ba7c7a .
# But anyway it will not hurt anybody (sure if you are not putting your production database to RAM :)).
#
# Look for more detailed description in follow articles:
# - http://blog.vergiss-blackjack.de/2011/02/run-postgresql-in-a-ram-disk/
#
# ATTENTION:
# DO NOT apply this approach if you store important data in postgresql cluster, because that could cause
# loosing all your data in postgresql!
#
# OS: GNU/Linux Debian Wheezy
#
# It should run on other systems without or with small modification.
# Here is a plan.
# 1. Stop postgres service.
# 2. Create tmpfs directory.
# 3. Mount tmpfs to newly create directory or add follow line to /etc/fstab to mount directory on system loading:
# tmpfs /mnt/tmpfs tmpfs size=768M,nr_inodes=10k,mode=0777 0 0
# 4. Synchronize existing database to directory where tmpfs was mounted.
# 5. Bind tmpfs mounted directory to postgres data_directory.
# 6. Start postgres service.
declare -r POSTGRES_SERVICE="/etc/init.d/postgresql"
declare -r POSTGRES_SERVICE_STOP_CMD="$POSTGRES_SERVICE stop"
declare -r POSTGRES_SERVICE_START_CMD="$POSTGRES_SERVICE start"
declare -r TMPFS_MOUNT_DIR="/mnt/tmpfs"
declare -r TMPFS_SIZE="768M" # in megabytes
declare -r POSTGRES_DIR="/var/lib/postgresql"
echo "Stoping postgres ... "
eval $POSTGRES_SERVICE_STOP_CMD && echo "done"
if [ ! -d $TMPFS_MOUNT_DIR ]; then
mkdir -p "$TMPFS_MOUNT_DIR" &&
chmod 0777 "$TMPFS_MOUNT_DIR" &&
echo "Directory '$TMPFS_MOUNT_DIR' with 0777 credentials was created."
fi
mount | grep -Fq "$TMPFS_MOUNT_DIR"
if [ $? -ne 0 ]; then
mount -t tmpfs -o size=$TMPFS_SIZE,nr_inodes=10k,mode=0777 tmpfs "$TMPFS_MOUNT_DIR" &&
echo "RAM disk was mounted to $TMPFS_MOUNT_DIR."
else
echo "Found: mounted RAM disk at $TMPFS_MOUNT_DIR."
fi
mount | grep -Fq "$POSTGRES_DIR"
if [ $? -ne 0 ]; then
rsync --archive "$POSTGRES_DIR/" "$TMPFS_MOUNT_DIR/" &&
echo "$POSTGRES_DIR/ was synchronized to RAM disk at $TMPFS_MOUNT_DIR/."
mount -o bind "$TMPFS_MOUNT_DIR/" "$POSTGRES_DIR/" &&
echo "RAM disk binded to $POSTGRES_DIR/."
else
echo "Found: Binded RAM disk to $POSTGRES_DIR/."
fi
echo "Starting postgres ..."
| true |
eda6b1c66c98fc40de23507d58e7b8720c3bcbe4
|
Shell
|
lukaszsoszynski/modular-programming-before-and-after-java-9
|
/jigsaw/customJreImage.sh
|
UTF-8
| 1,464 | 2.890625 | 3 |
[] |
no_license
|
#!/bin/bash
#First build and run project with script "runWithModulePath.sh"
echo "Required dependencies (this works with jdk 12 or latter due to mixing multi release and not multi relese jars)"
echo "Thus jdeps from java 11 is useless in this case."
#jdeps --list-deps --ignore-missing-deps --multi-release 11 --module-path module-path --class-path class_path/*.jar
#java.base
# java.desktop
# java.instrument
# java.logging
# java.management
# java.naming
# java.scripting
# java.security.jgss
# java.security.sasl
# java.sql
# java.transaction.xa
# java.xml
# jdk.attach
# jdk.jdi
# jdk.unsupported
# jdk8internals/sun.reflect
IMAGE_PATH=customJreImage
rm -rf $IMAGE_PATH
MODULE_PATH_DIR=module_path
CLASS_PATH_DIR=class_path
#one additional module required: java.compiler
#JLINK_EXTRA=""
JLINK_EXTRA="--no-header-files --no-man-pages --compress=2 --strip-debug"
jlink --module-path $JAVA_HOME/jmods \
$JLINK_EXTRA \
--add-modules java.desktop,java.instrument,java.logging,java.management,java.naming,java.scripting,java.security.jgss,java.security.sasl,java.sql,java.transaction.xa,java.xml,jdk.attach,jdk.unsupported,java.compiler \
--output $IMAGE_PATH
echo "Size of custom jre image:"
du -hs $IMAGE_PATH
$IMAGE_PATH/bin/java \
--class-path "$CLASS_PATH_DIR/*" \
--module-path $MODULE_PATH_DIR \
--add-modules java.instrument,java.xml.bind \
-m install/com.gfi.lsos.training.jigsaw.install.JigsawApplication
| true |
663ba4f19ffeb7161b11fff469c0d5ca1d01bfee
|
Shell
|
mironal/BotHub
|
/bin/show_webhook.sh
|
UTF-8
| 709 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# 指定した ID の hook の情報を見る
#
# 以下の環境変数が必要
# BOTHUB_GITHUB_TOKEN: GitHub の Token(要管理者権限).
# BOTHUB_HOOK_ID: WebHook の ID. list_webhook.sh で確認できる.
# BOTHUB_USER_NAME: https://github.com/UserName/RepoName の UserName の部分
# BOTHUB_REPO_NAME: https://github.com/UserName/RepoName の RepoName の部分
source pre_check.sh
check_token
check_hook_id
check_user_name
check_repo_name
# PATCH /orgs/:org/hooks/:id
curl --silent -H "Authorization: token ${BOTHUB_GITHUB_TOKEN}" -H "Content-Type: application/json" \
https://api.github.com/repos/${BOTHUB_USER_NAME}/${BOTHUB_REPO_NAME}/hooks/${BOTHUB_HOOK_ID} \
| jq "."
| true |
a9767a4b7202d992b8ad2e413a4d1f08d0a0b763
|
Shell
|
cloudbase/nova-ci
|
/devstack_vm/bin/parallel-test-runner.sh
|
UTF-8
| 3,456 | 3.875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
basedir=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
. $basedir/utils.sh
# Make sure we kill the entire process tree when exiting
trap 'kill 0' SIGINT SIGTERM
function run_test_retry(){
local tests_file=$1
local tmp_log_file=$2
local i=0
local exit_code=0
pushd . > /dev/null
cd $tests_dir
while : ; do
> $tmp_log_file
testr run --subunit --load-list=$tests_file > $tmp_log_file 2>&1
subunit-stats $tmp_log_file > /dev/null
exit_code=$?
((i++))
( [ $exit_code -eq 0 ] || [ $i -ge $max_attempts ] ) && break
echo "Test $tests_file failed. Retrying count: $i"
done
popd > /dev/null
echo $exit_code
}
function get_tests_range() {
local i=$1
if [ $i -lt ${#tests[@]} ]; then
local test=${tests[$i]}
local test_class=${test%.*}
local j=$i
if [ $run_isolated -eq 0 ]; then
for test in ${tests[@]:$((i+1))}; do
local test_class_match=${test%.*}
if [ "$test_class" == "$test_class_match" ]; then
((j++))
else
break
fi
done
fi
echo $i $j
fi
}
function get_next_test_idx_range() {
(
flock -x 200
local test_idx=$(<$cur_test_idx_file)
local test_idx_range=( $(get_tests_range $test_idx) )
if [ ${#test_idx_range[@]} -gt 0 ]; then
test_idx=${test_idx_range[1]}
((test_idx++))
echo $test_idx > $cur_test_idx_file
echo ${test_idx_range[@]}
fi
) 200>$lock_file_1
}
function parallel_test_runner() {
local runner_id=$1
while : ; do
local test_idx_range=( $(get_next_test_idx_range) )
if [ ${#test_idx_range[@]} -eq 0 ]; then
break
fi
local range_start=${test_idx_range[0]}
local range_end=${test_idx_range[1]}
local tmp_tests_file=$(tempfile)
local l=$((range_end-range_start+1))
for test in ${tests[@]:$range_start:$l}; do
echo $test >> $tmp_tests_file
done
local tmp_log_file="$tmp_log_file_base"_"$range_start"
echo `timestamp` "Test runner $runner_id is starting tests from $((range_start+1)) to $((range_end+1)) out of ${#tests[@]}:"
cat $tmp_tests_file
echo
local test_exit_code=$(run_test_retry $tmp_tests_file $tmp_log_file)
rm $tmp_tests_file
echo `timestamp` "Test runner $runner_id finished tests from $((range_start+1)) to $((range_end+1)) out of ${#tests[@]} with exit code: $test_exit_code"
done
}
tests_file=$1
tests_dir=$2
log_file=$3
max_parallel_tests=${4:-10}
max_attempts=${5:-5}
run_isolated=${6:-0}
tests=(`awk '{print}' $tests_file`)
cur_test_idx_file=$(tempfile)
echo 0 > $cur_test_idx_file
lock_file_1=$(tempfile)
tmp_log_file_base=$(tempfile)
pids=()
for i in $(seq 1 $max_parallel_tests); do
parallel_test_runner $i &
pids+=("$!")
done
for pid in ${pids[@]}; do
wait $pid
done
rm $cur_test_idx_file
> $log_file
for i in $(seq 0 $((${#tests[@]}-1))); do
tmp_log_file="$tmp_log_file_base"_"$i"
if [ -f "$tmp_log_file" ]; then
cat $tmp_log_file >> $log_file
rm $tmp_log_file
fi
done
rm $tmp_log_file_base
rm $lock_file_1
echo "Test execution completed in $SECONDS seconds."
subunit-stats $log_file > /dev/null
exit $?
| true |
b92bbe9b151bd02ab8c520f3fe6a13f74bfb4cf0
|
Shell
|
fatfingererror/wolf
|
/data.aggregator.batch/hadoop-cluster-docker/start-container.sh
|
UTF-8
| 1,006 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/bash
# the default node number is 3
N=${1:-3}
# start hadoop master container
# containers running on different networks (--net) cannot be linked properly
# be careful with the links (they must refer to existing containers), docker does error
docker rm -f hadoop-master &> /dev/null
echo "start hadoop-master container..."
docker run -itd -p 50070:50070 -p 8088:8088 --name hadoop-master --net=hadoop --hostname hadoop-master --link wolf_cassandra_1:database --link kafka wolf_hadoop &> /dev/null
# start hadoop slave container
i=1
while [ $i -lt $N ]
do
docker rm -f hadoop-slave$i &> /dev/null
echo "start hadoop-slave$i container..."
docker run -itd \
--name hadoop-slave$i \
--net=hadoop \
--link wolf_cassandra_1:database --link kafka \
--hostname hadoop-slave$i \
wolf_hadoop &> /dev/null
i=$(( $i + 1 ))
done
# get into hadoop master container
docker exec -it hadoop-master bash
| true |
4a75bde8a8eabb34606c45ece5b70ee9d9bb69b3
|
Shell
|
ZhaoYiChina/Flask-Project
|
/docker/docker_run.sh
|
UTF-8
| 739 | 2.828125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
PROJECT_PATH=`(cd ../;pwd)`
[ -d ${PROJECT_PATH}/logs ] || mkdir ${PROJECT_PATH}/logs
CONFIG_MODE_TEXT=${MODE:-default}
# docker rm $(docker ps -a -q)
# 后台运行
docker run \
--name flask_project_${CONFIG_MODE_TEXT} \
-h flask_project_${CONFIG_MODE_TEXT} \
--dns=223.5.5.5 \
--dns=223.6.6.6 \
--privileged \
--cap-add SYS_PTRACE \
--restart=always \
-e TZ=Asia/Shanghai \
-e PYTHONIOENCODING=utf-8 \
-e PYTHONPATH=/flask_project \
-v ${PROJECT_PATH}:/flask_project \
-d \
-p 8000:8000 \
-p 8010:8010 \
-p 9001:9001 \
flask_project \
supervisord -c etc/supervisord.conf
| true |
68a55af2cf4c75c24bff20eeb5ab875f2d6751be
|
Shell
|
trevorwhitney/bosh-recipes
|
/lib/deploy-cf.sh
|
UTF-8
| 1,438 | 3.484375 | 3 |
[] |
no_license
|
#!/bin/bash
set -ex
if [ -z "$gcs_access_key" ]; then
echo "gcs_access_key not set, aborting"
exit 1
fi
if [ -z "$gcs_secret_access_key" ]; then
echo "gcs_secret_access_key not set, aborting"
exit 1
fi
display_help() {
echo "Usage: $0 [option...]" >&2
echo
echo " -i interpolate the cloud config instead of updating it"
echo " -h display this help message"
echo
exit 1
}
interpolate=false
while getopts "ih" opt; do
echo $opt
case "${opt}" in
i) interpolate=true ;;
h) display_help ;;
*) error "Unexpected option ${opt}" ;;
esac
done
command=deploy
$interpolate && command=interpolate
lib_dir="$(cd $(dirname "$0") && pwd)"
source $lib_dir/setup-environment.sh
export region="us-west1-a"
export zone="us-west1-a"
gcloud config set compute/region $region
gcloud config set compute/zone $zone
source $lib_dir/setup-cf-env.sh
bosh -e gcp -n -d cf $command \
--vars-store=$privates_dir/$project_id/cf-deployment-vars.yml \
-v system_domain="$ip.xip.io" \
-v gcs_access_key=$gcs_access_key \
-v gcs_secret_access_key=$gcs_secret_access_key \
-v gcs_buildpack_bucket=$gcs_buildpack_bucket \
-v gcs_droplet_bucket=$gcs_droplet_bucket \
-v gcs_package_bucket=$gcs_package_bucket \
-v gcs_resource_bucket=$gcs_resource_bucket \
$gcp_dir/cf-deployment-minimal.yml
gsutil cp privates/twhitney-bosh/cf-deployment-vars.yml gs://twhitney-bosh-terraform-config
| true |
01c832f417c63da6605d0b5b80874bdb601983e3
|
Shell
|
Calderwood2020/openNPL
|
/loadfixtures.sh
|
UTF-8
| 301 | 2.796875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# List of apps to load fixtures for
# News app is not included as it is updated in production
# TODO separate out eve update (similar to news)
NAME[1]="eba_portfolio"
source venv/bin/activate
for i in ${NAME[@]}
do
python3 manage.py loaddata --format=json $i $i/fixtures/$i.json
done
| true |
48aa0cecaeb5f327706231507058f9d98c62bb1c
|
Shell
|
freebsd/freebsd-ports
|
/databases/gnats4/files/extrapatch-gnats__edit-pr.sh
|
UTF-8
| 6,818 | 2.921875 | 3 |
[
"BSD-2-Clause"
] |
permissive
|
--- gnats/edit-pr.sh.orig Sun May 26 16:32:02 2002
+++ gnats/edit-pr.sh Sat Dec 6 02:43:44 2003
@@ -5,6 +5,8 @@
# Contributed by Jeffrey Osier (jeffrey@cygnus.com).
# Majorly revised by Bob Manson (manson@juniper.net).
# Further improvements by Dirk Bergstrom (dirk@juniper.net).
+# Patches for the FreeBSD Project by Ceri Davies (ceri@FreeBSD.org),
+# based on work by Paul Richards (paul@FreeBSD.org).
#
# This file is part of GNU GNATS.
#
@@ -24,9 +26,12 @@
debug_print=false # or echo to get output.
BINDIR=xBINDIRx
+DATE=`LC_TIME=C date` # Some prstats tools rely on this format
+GNATS_ROOT=${GNATS_ROOT:-/usr/local/share/gnats/gnats-db}; export GNATS_ROOT
LIBEXECDIR=xLIBEXECDIRx
GNATS_PORT=
locked=
+rc=0
version=xVERSIONx
usage="
@@ -99,28 +104,23 @@
*)
if [ "`echo $1 | grep /`" != "" ]; then
- pr_id=`echo $1 | awk -F"/" '{print $2}' -`
+ pr=`echo $1 | awk -F"/" '{print $2}' -`
else
- pr_id=$1
+ pr=$1
fi
+ prs="$prs $pr"
;;
esac
shift
done
+chng_tpl="${GNATS_ROOT}/gnats-adm/edit-pr-msg"
+
# set command here to always pass host and port, and directory if supplied
QUERY_PR="$BINDIR/query-pr $GNATS_HOST $GNATS_PORT $GNATS_DB $EDIT_USER $EDIT_PASSWD"
PR_ADDR="$QUERY_PR --responsible-address"
PR_EDIT="$LIBEXECDIR/gnats/pr-edit $GNATS_HOST $GNATS_PORT $EDIT_USER $EDIT_PASSWD $GNATS_DB"
-# These traps take care of deleting all the /tmp files
-trap 'rm -f $new.old $change_msg $fixfil' 0
-# Don't delete $new on error signals
-trap 'if [ "$locked" != "" ]; then \
- $PR_EDIT --unlock $pr_id ; \
- locked= ; \
- fi' 1 2 3 13 15
-
# find a user name
if [ "$USER" != "" ]; then
me=$USER
@@ -138,85 +138,45 @@
full_me="$me"
fi
+for pr_id in $prs ; do
+
+epdir=`mktemp -d /tmp/epXXXXXX || exit 75`
+chng_msg="$epdir/chng.$pr_id.$$"
# new = temp file to use for editing
-new="/tmp/ep$$"
-newtmp="/tmp/ep$$.tp"
-change_msg="/tmp/ep$$.ch"
-fixfil="/tmp/ep$$.fx"
+new="$epdir/$pr_id.ep$$"
+change_msg="$epdir/ep$$.$pr_id.ch"
+keepnew=
+
+# These traps take care of deleting all the /tmp files
+trap 'rm -f $new.old $change_msg chng_msg; rmdir $epdir 2>/dev/null' 0
+trap 'if [ "$locked" != "" ]; then \
+ $PR_EDIT --unlock $pr_id ; \
+ locked= ; \
+ fi; \
+ rm -f $new.old $change_msg chng_msg; rmdir $epdir 2>/dev/null' 1 2 3 13 15
+
# lock the pr
$debug_print "Locking $pr_id."
-lock=`$PR_EDIT --lock=$full_me --process=$$ $pr_id 2>&1 > $new`
+lock=`$PR_EDIT --lock=$me --process=$$ $pr_id 2>&1 > $new`
locked=t
if [ "$lock" != "" ] ; then
+ echo "Failed to lock PR $pr_id"
echo $lock
- exit 0
+ rc=1
+ rm -r $epdir
+ continue
fi
-rm -f $fixfil
-
-# Now add any missing fields, along with a description.
-
-$QUERY_PR --list-fields | while read field
-do
- grep -i "^>${field}:" "$new" > /dev/null 2>&1
- if [ $? != 0 ]
- then
- $QUERY_PR --field-flags "$field" | grep -i readonly > /dev/null 2>&1
- if [ $? != 0 ]
- then
- type="`$QUERY_PR --field-type $field`"
- case $type in
- [Ee][Nn][Uu][Mm])
- values=`$QUERY_PR --valid-values $field | tr '\n' ' ' | sed 's/ *$//g; s/ / | /g;s/^/[ /;s/$/ ]/;`
- valslen=`echo "$values" | wc -c`
- if [ "$valslen" -gt 160 ]
- then
- desc="<`$QUERY_PR --field-description $field` (one line)>";
- else
- desc="<${values} (one line)>";
- fi
- dpat=`echo "$desc" | tr '][*+^$|\()&/' '............'`
- echo "/^>${field}:/ s/${dpat}//" >> $fixfil
- echo "/>${field}: ${desc}" >> $new;
- ;;
- [Mm][Uu][Ll][Tt][Ii][Tt][Ee][Xx][Tt])
- desc=" <`$QUERY_PR --field-description $field` (multiple lines)>";
- dpat=`echo "$desc" | tr '][*+^$|\()&/' '............'`
- echo "s/^${dpat}//" >> $fixfil
- echo ">${field}:" >> $new;
- echo "$desc" >> $new;
- ;;
- *)
- desc="<`$QUERY_PR --field-description $field` (one line)>"
- dpat=`echo "$desc" | tr '][*+^$|\()&/' '............'`
- echo "/^>${field}:/ s/${dpat}//" >> $fixfil
- echo ">${field}: ${desc}" >> $new
- ;;
- esac
- else
- prevfld="$field";
- fi
- fi
-done
-
# here's where we actually call the editor.
cp $new $new.old
$VISUAL $new
if cmp -s $new.old $new ; then
echo "edit-pr: PR not changed"
$PR_EDIT --unlock $pr_id
- exit 0
-fi
-
-if [ -f $fixfil ]
-then
- sed -f $fixfil < $new > $newtmp
- mv $newtmp $new
- sed -f $fixfil < $new.old > $newtmp
- mv $newtmp $new.old
- rm -f $fixfil
+ rm -r $epdir
+ continue
fi
# error-check output by calling pr-edit --check; if mistakes exist,
@@ -233,7 +193,7 @@
echo "PR $pr_id not updated: changed file is in $new.changed"
mv $new $new.changed
$PR_EDIT --unlock $pr_id
- exit 0
+ continue 2
;;
esac
$VISUAL $new
@@ -242,24 +202,37 @@
fi
done
-exec 3<&0
-
#
# Find out what fields have changed; if the changed field requires a
# change reason, then ask about it.
#
-$LIBEXECDIR/gnats/diff-prs $new.old $new | while read field
+
+changed_fields=`$LIBEXECDIR/gnats/diff-prs $new.old $new`
+for field in $changed_fields
do
flags=`$QUERY_PR --field-flags $field` || echo "edit-pr: Invalid field name $field" 1>&2;
if echo "$flags" | grep -i "requirechangereason" > /dev/null 2>&1
then
- echo ">${field}-Changed-Why:" >> $change_msg;
- echo "Why did the $field field change? (Ctrl-D to end)";
- cat 0<&3 >> $change_msg;
+ $debug_print "Doing $field change."
+ echo ">$field-Changed-Why: " >> $change_msg
+ if [ -e $chng_msg ]; then
+ echo "Re-use last message (y/n)?"
+ read yesno
+ if [ "$yesno" != "y" ]; then
+ sed "s/%%ITEM%%/$field/" $chng_tpl > $chng_msg
+ fi
+ else
+ sed "s/%%ITEM%%/$field/" $chng_tpl > $chng_msg
+ fi
+ $VISUAL $chng_msg
+ sed '/^GNATS:/d' $chng_msg >> $change_msg
fi
done
+echo "" >> $change_msg
+echo "http://www.FreeBSD.org/cgi/query-pr.cgi?pr=$pr_id" >> $change_msg
+
if [ -f $change_msg ]
then
cat $change_msg >> $new
@@ -269,12 +242,12 @@
# Submit the changed PR.
#
while true; do
- if $PR_EDIT --email-addr "$full_me" $pr_id < $new
+ if $PR_EDIT --email-addr "$me" $pr_id < $new
then
echo "Edit successful"
# we need to unlock the PR
$PR_EDIT --unlock $pr_id
- exit 0
+ break
else
echo "Problems with edit submission."
fi
@@ -284,9 +257,11 @@
case "$input" in
a*)
echo "Cancelling edit. Changed PR is in $new."
- # we need to ulock the PR no matter what
+ # we need to unlock the PR no matter what
$PR_EDIT --unlock $pr_id
- exit 1 ;;
+ keepnew=y
+ rc=1
+ break 2 ;;
r*)
break ;;
*)
@@ -296,6 +271,13 @@
done
done
-rm -f $new
+rm -f $chng_msg $new.old $change_msg
+if [ "$keepnew" != "y" ]; then
+ rm -f $new
+ rmdir $epdir
+fi
+keepnew=
+
+done # for pr_id in $prs
-exit 0
+exit $rc
| true |
2978fc8d47057995ec547c7f62b55b70140d2442
|
Shell
|
nvm-sh/nvm
|
/test/fast/Running 'nvm deactivate' should unset the nvm environment variables
|
UTF-8
| 1,010 | 3.328125 | 3 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
set -ex
die () { echo "$@" ; exit 1; }
export NVM_DIR="$(cd ../.. && pwd)"
\. ../../nvm.sh
\. ../common.sh
make_fake_node v0.2.3
[ `expr $PATH : ".*v0.2.3/.*/bin.*"` = 0 ] || echo "WARNING: Unexpectedly found v0.2.3 already active" >&2
nvm use --delete-prefix v0.2.3 || die "Failed to activate v0.2.3"
[ `expr "$PATH" : ".*v0.2.3/.*/bin.*"` != 0 ] || die "PATH not set up properly"
[ `expr "$NODE_PATH" : ".*v0.2.3/.*/lib/node_modules.*"` = 0 ] || die "NODE_PATH should not contain (npm root -g)"
# ^ note: NODE_PATH should not contain `npm root -g` since globals should not be requireable
[ `expr "$NVM_BIN" : ".*v0.2.3/bin"` != 0 ] || die "NODE_BIN should contain bin directory path"
nvm deactivate || die "Failed to deactivate v0.2.3"
[ `expr "$PATH" : ".*v0.2.3/.*/bin.*"` = 0 ] || die "PATH not cleaned properly"
[ `expr "$NODE_PATH" : ".*v0.2.3/.*/lib/node_modules.*"` = 0 ] || die "NODE_PATH not cleaned properly"
[ "_$NVM_BIN" = "_" ] || die "NVM_BIN should be unset: got '$NVM_BIN'"
| true |
b43b2c4255046fddb2951b841c10d9d88d18ca29
|
Shell
|
tox2ik/local-bin
|
/generate-pass
|
UTF-8
| 311 | 3.109375 | 3 |
[] |
no_license
|
#!/bin/sh
words=$(
strings /usr/lib64/aspell-?.*/no.rws |
sort -u | sort -R |
tr -d $'\t' | tail -n 3 |
tr \\n ' ' |
tr 'A-Z' 'a-z' ;
)
special() {
tr -dc '[:graph:]' < /dev/urandom |
tr -d '[:alpha:]' |
dd bs=1 count=3 2>/dev/null
}
for i in $words; do
echo -n "$i "; special; echo -n " "
done
echo
| true |
d3b373aacca6aeb8d7605de13b65e10225cf9ef9
|
Shell
|
dannyhollman/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/5-4_bad_luck_8_is_your_chance
|
UTF-8
| 267 | 3.296875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
#prints "Holberton School"
#4th loop prints "bad luck"
#8th loop prints "good luck"
i=1
while [ $i -lt 11 ]
do
if [ $i -eq 4 ]
then
echo "bad luck"
elif [ $i -eq 8 ]
then
echo "good luck"
else
echo "Holberton School"
fi
((i++))
done
| true |
745b7037d78b57130799500abfcb4563dfa5f49d
|
Shell
|
gitpan/manish-total-scripts
|
/carrieruserquota.sh
|
UTF-8
| 4,588 | 3.859375 | 4 |
[] |
no_license
|
#!/bin/sh
# Qmail - mailquotacheck
#
# Author: Paul Gregg <pgregg@tibus.net>
# Url: http://www.tibus.net/pgregg/projects/qmail/
# Run this program ala: |mailquotacheck before your ./Maildir/ or ./Mailbox
# entry in the .qmail file. e.g:
# |/usr/local/bin/mailquotacheck
# ./Maildir/
# Default quota is set to 3000Kb per user, this can be changed below
# You can also install per user quotas which will override the defaults
# by creating a .quota file in the same directory as the .qmail file, e.g:
# echo 10240 > .quota
# this will give that user a 10Mb mail quota
# Individual per message quotas can also be used by creating a file telling
# mailquotacheck that maximum permitted size per email - this is useful
# when you want to allow someone, say, a 20Mb limit but want to prevent emails
# larger than 5Mb. e.g:
# echo 5120 > .maxmsgsize
# Program location defs:
cat="/bin/cat"
expr="/usr/bin/expr"
wc="/usr/bin/wc"
du="/usr/bin/du"
bc="/usr/bin/bc"
cut="/usr/bin/cut"
awk="/usr/bin/awk"
echo="/bin/echo"
# Program defaults
# quota is the default user quota if the user does not have a .quota file
quota=5120
# hardquotabuffer is the 'extra' space allowed for small (<1Kb) messages.
hardquotabuffer=100
# -------------------------------------------------------------------------
# You should not need to change anything below here
# -------------------------------------------------------------------------
# Find out how big the email is in Kb - We don't care about < 1Kb messages.
msgbytes=`$cat - | $wc -c`
ERROR=$?
if [ ${ERROR} -ne 0 ]; then
# If this fails then you are in trouble ;) - Check program defs at the top.
$echo "QUOTACHECK ERROR: The mail quotacheck program cannot determine the size of\nthis message. Please inform postmaster of the site you are trying to mail to."
exit 100
fi
msgkb=`$expr $msgbytes / 1024`
# or you can use:
# msgkb=`$echo $msgbytes / 1024 | $bc`
# Get the users 'home' directory - where there .qmail file is
dir="$HOME"
# Figure out a users mail quota - default is 3000Kb (see above)
# If there is a file '.quota' in their dir then use that value instead.
if [ -f "$dir/.quota" ]; then
quota=`$cat $dir/.quota 2>/dev/null`
ERROR=$?
if [ ${ERROR} -ne 0 ]; then
$echo "An error occurred while trying to read the recipients quota limit.\nDelivery will be attempted again later."
exit 111
fi
fi
# Impose a maximum 'per message' email size. Use the users quota as standard
# but if there is a file '.maxmsgsize' then use that value.
maxmsgsize=$quota
if [ -f "$dir/.maxmsgsize" ]; then
maxmsgsize=`$cat $dir/.maxmsgsize`
ERROR=$?
if [ ${ERROR} -ne 0 ]; then
$echo "An error occurred while trying to read the recipients maximum message size.\nDelivery will be attempted again later."
exit 111
fi
fi
absquota=`$expr $quota + $hardquotabuffer`
# What is the maildir's current disk usage
du=`$du -sk $dir | $awk {'print $1'}`
ERROR=$?
if [ ${ERROR} -ne 0 ]; then
$echo "An error occurred while trying to get the user's current quota usage.\nDelivery will be attempted again later."
exit 111
fi
duwould=`$expr $du + $msgkb`
#debug - mail all these vars to me.
#set | mail pgregg@tibus.net
# Refuse the email if it is too big
if [ $msgkb -gt $maxmsgsize ]; then
$echo "Sorry, This message is larger than the current maximum message size limit which this user can receive.:\nYour message was $msgkb Kbytes and the maximum is $maxmsgsize Kbytes."
exit 100
fi
# Check if the user would be above the absolute quota
if [ $duwould -gt $absquota ]; then
# Ok, we aren't going to deliver this message, lets try and give the sender
# a decent error message
if [ $du -gt $quota ]; then
$echo "User's Disk Quota Exceeded.\nSorry, your message cannot be delivered as the recipient has exceeded\ntheir disk space limit for email."
else
$echo "Sorry, Your message cannot be delivered bacause the recipient does not have\nenough disk space left for it.";
fi
if [ $du -lt $absquota ]; then
$echo "\n However, small (< 1Kb) message will be delivered should you wish
to\ninform the person you tried to email."
fi
exit 100
fi
# If the email would put the user over quota, then refuse it (accept < 1Kb)
if [ $msgbytes -gt 1024 ]; then
if [ $duwould -gt $quota ]; then
$echo " User's Disk Quota Exceeded. Sorry, your intendend recipient
has too much mail stored in their mailbox.
Your message totalled $msgkb Kbytes ($msgbytes bytes).
However a small (1Kb) message will be delivered if you wish to
inform the person you tried to email."
exit 100
fi
fi
exit 0
| true |
7b7d53c789bd89fba026c109e486d6c45f84e840
|
Shell
|
choipoby/nrdp-scripts
|
/.sh/dev.sh
|
UTF-8
| 5,224 | 3.390625 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#emacs build
emake()
{
"emacsedit.sh" -m -n "${1}"
}
#reconfigure wrapper
reconfigure()
{
`which reconfigure` "$@"
cd "$PWD"
}
#make wrapper
make()
{
"ubermake.sh" "$@"
}
#gdb wrapper
gdb()
{
EDITOR="emacsedit.sh -n" `which gdb` "$@"
}
#emacs diff
ediff()
{
f="ediff-files"
if [ "$1" = "-b" ]; then
f="sam-ediff-binary-files"
shift
fi
"emacsedit.sh" -r -n "($f \"${1}\" \"${2}\")"
}
#ecd
ecd()
{
if [ -n "$1" ]; then
emacsedit.sh -n "$1"
else
cddev "@"
fi
}
#emacs tail
etail()
{
"emacsedit.sh" -n -t "$1"
}
#global
findsym() {
ROOT=`findancestor GPATH 2>&1 || findancestor GTAGS 2>&1|| findancestor GRTAGS 2>&1`
if [ -z "$ROOT" ]; then
echo "No gtags found!"
else
ROOT=`dirname $ROOT`
GLOBAL_OPTS="-x"
if [ "$1" = "-h" ]; then
echo "findsym <option> <find>"
echo "Options:"
echo
echo "-symbol: <find> a symbol."
echo "-caller: <find> a caller of symbol."
echo "-tag: <find> a referencd string."
echo "-file: <find> a file."
return 1
elif [ "$1" = "-symbol" ] || [ "$1" = "-s" ]; then
shift
GLOBAL_OPTS="-xs"
elif [ "$1" = "-caller" ] || [ "$1" = "-r" ] || [ "$1" = "-reference" ]; then
shift
GLOBAL_OPTS="-xr"
elif [ "$1" = "-tag" ] || [ "$1" = "-t" ]; then
shift
GLOBAL_OPTS="-x"
elif [ "$1" = "-file" ] || [ "$1" = "-f" ]; then
shift
GLOBAL_OPTS="-xPo"
fi
#echo "$ROOT :: $GLOBAL_OPTS :: $@ "
SYM=`(cdo "$ROOT" && choose.pl -a -x "global $GLOBAL_OPTS $@")`
if [ -n "$SYM" ]; then
echo "$SYM"
FILE=`echo $SYM | awk '{print $3}'`
LINE=`echo $SYM | awk '{print $2}'`
edit "$ROOT/$FILE:$LINE"
else
echo "Not found!"
fi
fi
}
# print lines that match $1 first
function part()
{
v="$*" awk '{if($0~ENVIRON["v"])print;else x=x$0"\n"}END{printf"%s",x}';
}
function cat-emacs()
{
test -z "$EMACSCLIENT" && EMACSCLIENT=emacsclient
emacsclient -e "(misc-cat-emacs)" &> /dev/null
cat /tmp/misc-cat-emacs.tmp
}
function findcmake()
{
if [ -z "$1" ]; then
find -L "$PWD" -name CMakeLists.txt -or -name '*.cmake'
else
while [ -n "$1" ]; do
if [ -d "$1" ]; then
find -L "$1" -name CMakeLists.txt -or -name '*.cmake'
else
find -L "$PWD" -name CMakeLists.txt -or -name '*.cmake' | xargs grep -i "$1"
fi
shift
done
fi
}
block-icecream() {
[ -e "/etc/icecc/icecc.conf" ] && . /etc/icecc/icecc.conf
echo blockcs "$1" | nc "$ICECC_SCHEDULER_HOST" 8766
}
addrtoline() {
while [ -n "$1" ]; do
echo "$1" | sed -e 's,\+0x, 0x,' | while read FILE ADDRESS; do
addr2line -e "$FILE" -a "$ADDRESS"
done
shift
done
}
nf_sync_gibbon()
{
outdir="$1"
if [ -z "$outdir" ]; then
echo "Must specify output directory!"
return 1
fi
mkdir -p "$outdir"
for a in lib/libJavaScriptCore.so lib/libWTF.so lib/librex_pcre.so netflix data/; do
if [ -e "$a" ]; then
echo "Handling: ${a}"
mkdir -p "$outdir/`dirname $a`"
rsync -varc "$a" "$outdir/$a"
fi
done
chmod 664 "$outdir/data/etc/conf/common.xml"
sync
}
complete-netflix ()
{
app=${COMP_WORDS[0]}
test -x "$app" || return;
modified="`ls -la \"$app\" | awk '{print $5,$6,$7,$8}'`"
if [ ! -e "/tmp/netflix-completions-helper" ] || [ "$modified" != "`head -n 1 /tmp/netflix-completions-helper`" ]; then
echo $modified > /tmp/netflix-completions-helper
"$app" --help --dump | grep '^ \+-' | grep "\[value\]" | sed -e 's,|NF.*,,' -e 's,|, ,' -e 's,^ *,,' | xargs >> /tmp/netflix-completions-helper
"$app" --help --dump | grep '^ \+-' | grep -v "\[value\]" | sed -e 's,|NF.*,,' -e 's,|, ,' -e 's,^ *,,' | xargs >> /tmp/netflix-completions-helper
fi
local valueopts=`head -n 2 /tmp/netflix-completions-helper | tail -n 1`
local cur=${COMP_WORDS[COMP_CWORD]}
local prev=${COMP_WORDS[COMP_CWORD-1]}
if [ -n "$prev" ]; then
if [ "$prev" = "-x" ] || [ "$prev" = "--config-file" ]; then
dir=$(type $app | sed -e "s,^$app is ,,")
dir=$(dirname $dir)
confs=`/bin/ls "$dir/data/etc/conf/" | sed -e 's,\.xml,,' | xargs`
COMPREPLY=($(compgen -W "${confs}" -- ${cur}))
return;
elif printf -- "${valueopts}\n" | grep --quiet -- "$prev"; then
COMPREPLY=()
return;
fi
fi
local nonvalueopts=`tail -n 1 /tmp/netflix-completions-helper`
COMPREPLY=(`compgen -W "$valueopts $nonvalueopts" -- $cur`)
if [ -n "$cur" ] && [ ${#COMPREPLY[@]} -eq 0 ] && printf -- "$cur\n" | grep --quiet -- "^-[^-]"; then
COMPREPLY=(`compgen -W "$valueopts $nonvalueopts" -- -$cur`)
fi
}
if [ -n "$BASH" ]; then
complete -F complete-netflix -o default netflix ./netflix
fi
alias clean-var="rm -rf data/var"
| true |
cba4f6c0002fc38089e1dda609d64a89e5ce9617
|
Shell
|
mjedrasz/ttd-organiser-pwa
|
/build.sh
|
UTF-8
| 3,959 | 3.375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
instruction()
{
echo "usage: ./build.sh deploy <stage> <region>"
echo ""
echo "stage: eg. int, staging, test, prod, ..."
echo "region: eg. eu-central-1, eu-west-1, ..."
echo ""
echo "for example: ./build.sh deploy test eu-central-1"
}
if [ $# -eq 0 ]; then
instruction
exit 1
elif [ "$1" = "install" ] && [ $# -eq 1 ]; then
yarn install
elif [ "$1" = "build" ] && [ $# -eq 3 ]; then
REACT_APP_STAGE=$2
REACT_APP_REGION=$3
PARAMETERS=`aws ssm get-parameters --names \
"/$REACT_APP_STAGE/google/api/key" \
"/$REACT_APP_STAGE/cognito/user-pool-client/org/sign-in-url" \
"/$REACT_APP_STAGE/cognito/user-pool-client/org/sign-out-url" \
"/$REACT_APP_STAGE/cognito/org-identity-pool/id" \
"/$REACT_APP_STAGE/cognito/user-pools/org/id" \
"/$REACT_APP_STAGE/cognito/org/user-pool-client/id" \
"/$REACT_APP_STAGE/s3/assets" \
"/$REACT_APP_STAGE/graphql-api/org/url" \
--query "Parameters[*].{Name:Name,Value:Value}" \
--with-decryption`
REACT_APP_GOOGLE_API_KEY=`echo $PARAMETERS | jq .[] | jq -c "select(.Name == \\"/$REACT_APP_STAGE/google/api/key\\")" | jq -r '.Value'`
REACT_APP_CALLBACK_REDIRECT_SIGN_IN=`echo $PARAMETERS | jq .[] | jq -c "select(.Name == \\"/$REACT_APP_STAGE/cognito/user-pool-client/org/sign-in-url\\")" | jq -r '.Value'`
REACT_APP_CALLBACK_REDIRECT_SIGN_OUT=`echo $PARAMETERS | jq .[] | jq -c "select(.Name == \\"/$REACT_APP_STAGE/cognito/user-pool-client/org/sign-out-url\\")" | jq -r '.Value'`
REACT_APP_IDENTITY_POOL_ID=`echo $PARAMETERS | jq .[] | jq -c "select(.Name == \\"/$REACT_APP_STAGE/cognito/org-identity-pool/id\\")" | jq -r '.Value'`
REACT_APP_USER_POOL_ID=`echo $PARAMETERS | jq .[] | jq -c "select(.Name == \\"/$REACT_APP_STAGE/cognito/user-pools/org/id\\")" | jq -r '.Value'`
REACT_APP_USER_POOL_WEB_CLIENT_ID=`echo $PARAMETERS | jq .[] | jq -c "select(.Name == \\"/$REACT_APP_STAGE/cognito/org/user-pool-client/id\\")" | jq -r '.Value'`
REACT_APP_GRAPHQL_API=`echo $PARAMETERS | jq .[] | jq -c "select(.Name == \\"/$REACT_APP_STAGE/graphql-api/org/url\\")" | jq -r '.Value'`
REACT_APP_ASSETS_NAME=`echo $PARAMETERS | jq .[] | jq -c "select(.Name == \\"/$REACT_APP_STAGE/s3/assets\\")" | jq -r '.Value'`
echo "REACT_APP_GOOGLE_API_KEY=$REACT_APP_GOOGLE_API_KEY" > .env
echo "REACT_APP_CALLBACK_REDIRECT_SIGN_IN=$REACT_APP_CALLBACK_REDIRECT_SIGN_IN" >> .env
echo "REACT_APP_CALLBACK_REDIRECT_SIGN_OUT=$REACT_APP_CALLBACK_REDIRECT_SIGN_OUT" >> .env
echo "REACT_APP_IDENTITY_POOL_ID=$REACT_APP_IDENTITY_POOL_ID" >> .env
echo "REACT_APP_USER_POOL_ID=$REACT_APP_USER_POOL_ID" >> .env
echo "REACT_APP_USER_POOL_WEB_CLIENT_ID=$REACT_APP_USER_POOL_WEB_CLIENT_ID" >> .env
echo "REACT_APP_GRAPHQL_API=$REACT_APP_GRAPHQL_API" >> .env
echo "REACT_APP_ASSETS_NAME=$REACT_APP_ASSETS_NAME" >> .env
echo "REACT_APP_STAGE=$REACT_APP_STAGE" >> .env
echo "REACT_APP_REGION=$REACT_APP_REGION" >> .env
yarn build
# npm run integration-test
# elif [ "$1" = "acceptance-test" ] && [ $# -eq 1 ]; then
# npm install
# npm run acceptance-test
elif [ "$1" = "deploy" ] && [ $# -eq 2 ]; then
STAGE=$2
DISTRIBUTION=`aws ssm get-parameter --name "/$STAGE/cloudfront/organiser-ui/distribution/id" | jq -r '.Parameter.Value'`
aws s3 cp --recursive ./build s3://$STAGE-organiser-ui-ttd-pl/
aws s3 cp --cache-control="max-age=0, no-cache, no-store, must-revalidate" \
./build/service-worker.js s3://$STAGE-organiser-ui-ttd-pl/
aws s3 cp --acl public-read \
--cache-control="max-age=0, no-cache, no-store, must-revalidate" \
./build/index.html s3://$STAGE-organiser-ui-ttd-pl/
# invalidate the CloudFront cache for index.html and service-worker.js
# to force CloudFront to update its edge locations with the new versions
aws cloudfront create-invalidation --distribution-id $DISTRIBUTION \
--paths /index.html /service-worker.js
else
instruction
exit 1
fi
| true |
4dae4ed9359ea16d93076d25231248492c3a91c0
|
Shell
|
MissiontoMars/ray
|
/python/ray/autoscaler/kuberay/init-config.sh
|
UTF-8
| 898 | 3.5625 | 4 |
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Clone pinned Kuberay commit to temporary directory, copy the CRD definitions
# into the autoscaler folder.
KUBERAY_SHA="ce84f0441c991eb4b0f52ee2cd85c0a5ac048d11"
OPERATOR_TAG=${KUBERAY_SHA:0:7}
# Requires Kustomize (dependency to be removed after KubeRay 0.3.0 cut)
if ! command -v kustomize &> /dev/null
then
echo "Please install kustomize. Then re-run this script."
exit
fi
SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
DIR=$(mktemp -d -t "kuberay-XXXXXX")
pushd "$DIR" || exit
git clone https://github.com/ray-project/kuberay/
pushd "kuberay" || exit
git checkout "$KUBERAY_SHA$"
pushd ray-operator/config/default || exit
kustomize edit set image kuberay/operator=kuberay/operator:"$OPERATOR_TAG"
popd || exit
cp -r ray-operator/config "$SCRIPT_DIR/"
popd || exit
popd || exit
| true |
437526fc35804cc1d2a883236d828fd212e6217b
|
Shell
|
alldeady/cloud-1
|
/deploy.sh
|
UTF-8
| 3,538 | 2.578125 | 3 |
[] |
no_license
|
# !/bin/sh
WORKING_DIR=$(pwd)
export PROJECT_ID=<your-project-id>
# Create GKE
CLUSTER_NAME=cloud-1-cluster
gcloud beta container --project $PROJECT_ID \
clusters create $CLUSTER_NAME --region "europe-central2" \
--no-enable-basic-auth --cluster-version "1.18.17-gke.100" \
--release-channel "regular" --machine-type "e2-medium" \
--image-type "COS" --disk-type "pd-standard" --disk-size "20" \
--metadata disable-legacy-endpoints=true \
--scopes "https://www.googleapis.com/auth/devstorage.read_only",\
"https://www.googleapis.com/auth/logging.write",\
"https://www.googleapis.com/auth/monitoring",\
"https://www.googleapis.com/auth/servicecontrol",\
"https://www.googleapis.com/auth/service.management.readonly",\
"https://www.googleapis.com/auth/trace.append" \
--max-pods-per-node "8" --num-nodes "3" \
--enable-stackdriver-kubernetes --enable-ip-alias \
--network "projects/$PROJECT_ID/global/networks/default" \
--subnetwork "projects/$PROJECT_ID/regions/europe-central2/subnetworks/default" \
--no-enable-intra-node-visibility --default-max-pods-per-node "8" --enable-autoscaling \
--min-nodes "1" --max-nodes "3" --no-enable-master-authorized-networks \
--addons HorizontalPodAutoscaling,HttpLoadBalancing,GcePersistentDiskCsiDriver \
--enable-autoupgrade --enable-autorepair --max-surge-upgrade 1 \
--max-unavailable-upgrade 0 --autoscaling-profile optimize-utilization \
--enable-shielded-nodes --node-locations "europe-central2-a","europe-central2-b","europe-central2-c"
# Create cloud SQL
INSTANCE_NAME=cloud-1-sql
gcloud sql instances create $INSTANCE_NAME
INSTANCE_CONNECTION_NAME=$(gcloud sql instances describe $INSTANCE_NAME \
--format='value(connectionName)')
gcloud sql databases create wordpress --instance $INSTANCE_NAME
CLOUD_SQL_PASSWORD=wordpress
gcloud sql users create wordpress --host=% --instance $INSTANCE_NAME \
--password $CLOUD_SQL_PASSWORD
# Create creds for cloud SQL proxy
SA_NAME=cloudsql-proxy
gcloud iam service-accounts create $SA_NAME --display-name $SA_NAME
SA_EMAIL=$(gcloud iam service-accounts list \
--filter=displayName:$SA_NAME \
--format='value(email)')
gcloud projects add-iam-policy-binding $PROJECT_ID \
--role roles/cloudsql.client \
--member serviceAccount:$SA_EMAIL
gcloud iam service-accounts keys create $WORKING_DIR/key.json \
--iam-account $SA_EMAIL
kubectl create secret generic cloudsql-db-credentials \
--from-literal username=wordpress \
--from-literal password=$CLOUD_SQL_PASSWORD
kubectl create secret generic cloudsql-instance-credentials \
--from-file $WORKING_DIR/key.json
# Create filestore
gcloud filestore instances create nfs-server
--project=$PROJECT_ID \
--zone=europe-central2-a \
--tier=STANDARD \
--file-share=name="vol1",capacity=1TB \
--network=name="default",reserved-ip-range="10.0.0.0/29"
export IP_ADDRESS_FILESTORE=$(gcloud filestore instances list --format='value(IP_ADDRESS)')
cat $WORKING_DIR/pv.yaml.template | envsubst > $WORKING_DIR/pv.yaml
kubectl create -f pv.yaml
kubectl create -f pvc.yaml
# Deploy wordpress
helm repo add bitnami https://charts.bitnami.com/bitnami
helm install wp bitnami/wordpress -f wp-values.yaml
# Create ingress
kubectl create -f ingress.yaml
# Enable Cloud CDN
BACKEND_SERVICE=$(gcloud compute backend-services list --format='value(NAME)')
gcloud compute backend-services update $BACKEND_SERVICE \
--enable-cdn \
--cache-mode="CACHE_All_STATIC"
| true |
8a0eeab57ea24f775b4db3fafbde7ad85d25afdb
|
Shell
|
mohsinalimat/stripe-ios
|
/ci_scripts/export_builds.sh
|
UTF-8
| 1,234 | 3.59375 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Options: --only-static: only build the static framework target
if [[ $# -gt 0 ]]
then
ONLY_STATIC=1
else
ONLY_STATIC=0
fi
PROJECTDIR="$(cd $(dirname $0)/..; pwd)"
BUILDDIR="${PROJECTDIR}/build"
CARTHAGEDIR="${PROJECTDIR}/Carthage/Build/iOS"
rm -rf $BUILDDIR
mkdir $BUILDDIR
cd $PROJECTDIR
# Dynamic framework
if [ $ONLY_STATIC = 0 ]
then
echo "building dynamic framework..."
carthage build --no-skip-current --platform iOS --configuration Release
cd $CARTHAGEDIR
ditto -ck --rsrc --sequesterRsrc --keepParent Stripe.framework Stripe.framework.zip
mv Stripe.framework.zip $BUILDDIR
cd -
fi
# Static framework
echo "building static framework..."
xcodebuild build -workspace Stripe.xcworkspace -scheme StripeiOSStaticFramework -configuration Release OBJROOT=$BUILDDIR SYMROOT=$BUILDDIR | xcpretty -c
cd $BUILDDIR/Release-iphonesimulator
plutil -remove DTSDKName Stripe.bundle/Info.plist
plutil -remove DTPlatformName Stripe.bundle/Info.plist
plutil -remove CFBundleSupportedPlatforms Stripe.bundle/Info.plist
mv Stripe.bundle Stripe.framework
ditto -ck --rsrc --sequesterRsrc --keepParent Stripe.framework StripeiOS-Static.zip
rm -rf Stripe.framework
rm -rf Stripe.bundle
cp StripeiOS-Static.zip $BUILDDIR
cd -
| true |
3c553faadeb422921bafb9ce1146bfc0c100761b
|
Shell
|
jonkster/YAAXI
|
/boxes/switchlights/box.sh
|
UTF-8
| 1,505 | 3.546875 | 4 |
[
"MIT"
] |
permissive
|
#/usr/bin/bash
# this is any box specific test/simulation
if [ -z $1 ]
then
echo "No IP address, exiting"
else
echo "Assuming arduino with IP " "${1}"
echo "Ask for arduino details:"
echo -n "Avduino Box Fish" | nc -4u -w0 ${1} 8888
BOXID=`nc -4u -l -w1 8889`
echo "Looking for a BOXID response, got: ${BOXID}"
if [[ "${BOXID}" == "BOXID"* ]]
then
for n in {1..3}
do
sleep 0.1
echo -n "GEAR_TRANSIT:1" | nc -4u -w0 $1 8888
sleep 0.1
echo -n "GEAR_SAFE0:1" | nc -4u -w0 $1 8888
sleep 0.1
echo -n "GEAR_SAFE1:1" | nc -4u -w0 $1 8888
sleep 0.1
echo -n "GEAR_SAFE2:1" | nc -4u -w0 $1 8888
sleep 0.1
echo -n "FLAP_TRANSIT:1" | nc -4u -w0 $1 8888
sleep 0.1
echo -n "FLAP_APP:1" | nc -4u -w0 $1 8888
sleep 0.1
echo -n "FLAP_LAND:1" | nc -4u -w0 $1 8888
sleep 0.1
echo -n "GEAR_TRANSIT:0" | nc -4u -w0 $1 8888
echo -n "GEAR_SAFE0:0" | nc -4u -w0 $1 8888
echo -n "GEAR_SAFE1:0" | nc -4u -w0 $1 8888
echo -n "GEAR_SAFE2:0" | nc -4u -w0 $1 8888
echo -n "FLAP_TRANSIT:0" | nc -4u -w0 $1 8888
echo -n "FLAP_APP:0" | nc -4u -w0 $1 8888
echo -n "FLAP_LAND:0" | nc -4u -w0 $1 8888
done
echo "(try toggling switches etc and check to see there are responses below. Ctrl-C to quit)"
nc -4u -l 8889
else
if [[ "${BOXID}" == "XP Plugin Fish"* ]]
then
echo "that is plugin fish request - the box is not properly responding to messages. It isn't recognising the plugin :("
else
echo "that is not a proper BOXID response :("
fi
fi
fi
| true |
7477dcc7dee4e16a35029e2ebb771451c3ad8943
|
Shell
|
istorkbox/istorkbox-tech
|
/install/centos7_install_python3.sh
|
UTF-8
| 1,762 | 3.375 | 3 |
[] |
no_license
|
#!/bin/bash
#Python-3.5.1.tar.xz
#安装文件镜像
#https://www.python.org/ftp/python/3.5.1/Python-3.5.1.tar.xz
#https://istorkbox-1256921291.cos.ap-guangzhou.myqcloud.com/Python-3.5.1.tar.xz
#https://blog.csdn.net/u010510962/article/details/80690084
#https://www.cnblogs.com/blogjun/articles/8063989.html
if type wget >/dev/null 2>&1; then
echo '...'
else
echo 'start install wget'
yum -y install wget
fi
#check dir
cd /usr/local/
if [ ! -d 'src' ]; then
mkdir 'src'
fi
cd /usr/local/src
#download python3 and install
if [ -d '/usr/local/python3' ]; then
echo "python3 has installed"
else
yum groupinstall -y 'Development Tools'
yum install -y zlib-devel bzip2-devel openssl-devel ncurese-devel
yum install -y openssl-devel
#wget https://www.python.org/ftp/python/3.5.1/Python-3.5.1.tar.xz
#download from tencent cloud
wget https://istorkbox-1256921291.cos.ap-guangzhou.myqcloud.com/Python-3.5.1.tar.xz
tar Jxvf Python-3.5.1.tar.xz
cd /usr/local/src/Python-3.5.1
./configure --prefix=/usr/local/python3.5
make && make install
#建立指向新python3和pip3的软链接
ln -s /usr/local/python3.5/bin/python3.5 /usr/bin/python
ln -s /usr/local/python3.5/bin/pip3 /usr/bin/pip
#检查python和pip版本
python --version
pip --version
fi
##pip: command not found
##参考链接https://www.quora.com/How-to-fix-%E2%80%9Cpip-command-not-found%E2%80%9D
##出现这个的原因一般有两个:
#1. 未安装pip
#2. pip安装了,但是没有配置$PATH环境变量
#如果是第二个原因,此时echo $PATH 查看pip的安装目录是否在PATH中,如果没有,在~/.bash_profile中添加export #PATH=$PATH:/usr/local/bin(假设pip的安装目录为/usr/local/bin)然后source ~/.bash_profile使之生效。
| true |
ee3db996151563b4551f406c4062a59e3bd3c68d
|
Shell
|
trevorcook/reflex-platform
|
/run-todomvc-in-ios-sim.sh
|
UTF-8
| 580 | 2.84375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
function cleanup {
if [ -n "$uuid" ]; then
echo "Cleaning up simulator" >&2
xcrun simctl shutdown $uuid 2>/dev/null
xcrun simctl delete $uuid
fi
}
trap cleanup EXIT
nix-build -A ghcIosSimulator64.reflex-todomvc
uuid=$(xcrun simctl create reflex-todomvc com.apple.CoreSimulator.SimDeviceType.iPhone-7 com.apple.CoreSimulator.SimRuntime.iOS-10-2)
open -a Simulator --args -CurrentDeviceUDID $uuid
xcrun simctl install $uuid `nix-build -A ghcIosSimulator64.reflex-todomvc`/reflex-todomvc.app
xcrun simctl launch --console $uuid reflex-todomvc
| true |
b881ac6c8aeeaf5287bf5b789ca90b6dad838a6b
|
Shell
|
chrismb3/.sh
|
/lab3.sh
|
UTF-8
| 221 | 3.265625 | 3 |
[] |
no_license
|
while [[ "$#" > 0 ]]; do case $1 in
-fornamn) fornamn="$2"; shift;shift;;
-efternamn) efternamn="$2";shift;shift;;
*) echo "Unknown parameter passed: $1";shift;shift;;
esac; done
echo "Du heter $efternamn, $fornamn"
| true |
168409d58d5acbc1bead1d0d8149e5973f5a850e
|
Shell
|
tylyan/dotfiles
|
/install.sh
|
UTF-8
| 1,735 | 4.0625 | 4 |
[] |
no_license
|
#!/bin/bash
check_dependencies() {
echo "checking dependencies..."
dependencies=(curl git)
for d in $dependencies; do
command -v $d > /dev/null 2>&1
if [[ $? != 0 ]]; then
echo "$d required to run install script."
exit 1
fi
done
echo "done."
}
brew_install() {
echo "installing homebrew packages..."
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
brew tap homebrew/cask-fonts
formulae=(coreutils nvim tmux zsh stow node python)
casks=(kitty alfred rectangle karabiner-elements font-Hack visual-studio-code todoist)
brew install ${formulae[@]}
brew install --cask ${casks[@]}
echo "done."
}
install_config() {
echo "installing config files..."
# tmux plugin manager
[[ ! -d ~/.tmux/plugins/tpm ]] \
&& git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
# install vimplug for neovim
[[ ! -d "${XDG_DATA_HOME:-$HOME/.local/share}"/nvim/site/autoload/plug.vim ]] \
&& sh -c 'curl -fLo "${XDG_DATA_HOME:-$HOME/.local/share}"/nvim/site/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim'
rm -f $HOME/.zshrc
# TODO: probably move everything that belongs in $HOME into 'home' and just stow the entire folder
configs=(config zsh tmux)
for cfg in ${configs[@]}; do
stow $cfg
done
# TODO: move extra config files like zsh/autosuggestion-settings.zsh to where the belong
echo "done."
}
install() {
echo "install start..."
check_dependencies
brew_install
install_config
echo "install done."
exit 0
}
install
| true |
14f7b0167da8bf70b3b8ff66b60b7d939abe3878
|
Shell
|
ObliviousGmn/MacDotfiles
|
/Scripts/Lockscreen
|
UTF-8
| 986 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/sh
# ObliviousGmn. April 2015
# https://github.com/ObliviousGmn
# i3lock - Lockscreen
Active="/tmp/Active.png"
Static="/tmp/Static.png"
Center="-gravity center -composite -matte"
Pixel="-scale 10% -scale 1000% -fill black -colorize 33%"
Wallpaper="$(cat $HOME/.xinitrc | awk '/hsetroot/ {print $3}')"
CenterIMG="$HOME/Wallpaper/Misc/Tinalock.png"
active(){
# Active Setup ..
scrot -q 1 -z $Active
convert $Active $Pixel $Active
[ -f $CenterIMG ]
convert $Active $CenterIMG $Center $Active
i3lock -u -i $Active
rm $Active
exit 0
}
static(){
# Static setup, Wallpaper only ..
if [ -f $Static ]; then
echo
else
convert $Wallpaper $Pixel $Static
[[ -f $CenterIMG ]]
convert $Static $CenterIMG $Center $Static
fi
i3lock -u -i $Static
exit 0
}
remove(){
# Remove Static ..
rm $Static
}
usage(){
echo
echo " Lockscreen [ Active -a | Static -s | Clear -c ] "
echo
}
case $1 in
-h) usage ;;
-a) active ;;
-s) static ;;
-c) remove ;;
esac
| true |
3b5a04a04edb99c9ac8eb6bb6cb863a9fbb0d475
|
Shell
|
b-it-bots/mas_industrial_robotics
|
/images/mas_industrial_robotics/bashrc
|
UTF-8
| 874 | 2.84375 | 3 |
[] |
no_license
|
#
# ~/.bashrc
# borrowed from https://github.com/pierrekilly/docker-ros-box/blob/master/docker/bashrc
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
export QT_X11_NO_MITSHM=1
function returncode
{
returncode=$?
if [ $returncode != 0 ]; then
echo "[$returncode]"
else
echo ""
fi
}
if [[ ${EUID} == 0 ]]
then
PS1='\[\033[0;31m\]$(returncode)\[\033[01;31m\]\u\[\033[00m\]@\[\033[01;33m\]\h\[\033[01;34m\] ${PWD} \$\[\033[00m\] '
else
PS1='\[\033[0;31m\]$(returncode)\[\033[01;32m\]\u\[\033[00m\]@\[\033[01;33m\]\h\[\033[01;34m\] ${PWD} \$\[\033[00m\] '
fi
export BUILDDIR=/tmp
export MAKEFLAGS="-j$(nproc) $MAKEFLAGS"
export LD_LIBRARY_PATH="/usr/lib:$LD_LIBRARY_PATH"
source $HOME/$ROS_DISTRO/catkin_ws/devel/setup.bash
#ALIAS b-it-bots
alias bringup_sim='roslaunch mir_bringup_sim robot.launch'
| true |
37723a8e7ff3761beac5252bedc1446a121b275b
|
Shell
|
spinnaker-cn/deck
|
/app/scripts/modules/build_order.sh
|
UTF-8
| 744 | 3.65625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
cd `dirname $0`
MODULES=$*;
TUPLES=""
for MODULE in $MODULES ; do
# hard coding known deps for now because the package.json files don't yet contain the package's dependencies
case "$MODULE" in
amazon) DEPS="core" ;;
appengine) DEPS="core" ;;
cloudfoundry) DEPS="core" ;;
core) DEPS="core" ;;
docker) DEPS="core" ;;
ecs) DEPS="amazon core" ;;
google) DEPS="core" ;;
kubernetes) DEPS="core" ;;
openstack) DEPS="core" ;;
oracle) DEPS="core" ;;
titus) DEPS="amazon docker core" ;;
*)
echo "Unknown module: ${MODULE}"
exit -1;
;;
esac
for DEP in $DEPS ; do
TUPLES="${TUPLES} ${DEP} ${MODULE}"
done
done
echo $TUPLES | tsort
| true |
fef708f2a59d91233794c7083cb80d130d190b88
|
Shell
|
tweedproject/tweed
|
/stencils/dummy/lifecycle/unbind
|
UTF-8
| 146 | 2.8125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
echo >&2 "unbinding..."
n=5
while [[ $n != '0' ]]; do
sleep 1
n=$(( n - 1 ))
echo >&2 "$n seconds left..."
done
exit 0
| true |
5daae51cfce2c9e9b2510b3f5f405eb1ba52b4d6
|
Shell
|
Playfloor/docker-centos
|
/centos_base/history/install_centos_base.sh
|
UTF-8
| 2,769 | 2.640625 | 3 |
[] |
no_license
|
#!/bin/bash
# author ljh
#安装wget
yum -y install wget
#更换yum源,换成aliyun
wget -O /etc/yum.repos.d/CentOS-Base-aliyun.repo http://mirrors.aliyun.com/repo/Centos-6.repo
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
mv /etc/yum.repos.d/CentOS-Base-aliyun.repo /etc/yum.repos.d/CentOS-Base.repo
# centos初始化一些环境,安装一些基础的类库等
yum -y install wget curl tar zip unzip make gcc git perl perl-ExtUtils-Embed ruby pcre-devel openssl openssl-devel subversion deltarpm;
yum -y install libtool automake autoconf install gcc-c++;
yum -y install libxml2-devel zlib-devel bzip2 bzip2-devel;
yum -y install ncurses-devel readline-devel;
yum -y remove vim vim-enhanced vim-common vim-minimal vim-filesystem;
#进入源码目录
cd source/
#安装lua|luajit
tar zxf lua-5.3.4.tar.gz
cd lua-5.3.4
make -j 8 linux test
make -j 8 install INSTALL_TOP=/usr/local/lua5.3.4
mv /usr/bin/lua /usr/bin/lua5.1
mv /usr/bin/luac /usr/bin/luac5.1
ln -s /usr/local/lua5.3.4/bin/lua /usr/bin/lua
ln -s /usr/local/lua5.3.4/bin/luac /usr/bin/luac
#安装luajit
cd ../
tar zxf LuaJIT-2.0.5.tar.gz
cd LuaJIT-2.0.5
make -j 8 && make install PREFIX=/usr/local/luajit2.0.5
#安装python
cd ../
tar zxf Python-2.7.13.tgz
cd Python-2.7.13
./configure --prefix=/usr/local/python2.7.13
make -j 8 && make install
rm -rf /usr/bin/python /usr/bin/python2
ln -s /usr/local/python2.7.13/bin/python /usr/bin/python
#ln -s /usr/local/python2.7.13/bin/python python2
#安装vim8,支持lua,python,perl等,方便其他插件使用代码补全功能
cd ../
tar jxf vim-8.0.tar.bz2
cd vim80/src
./configure --prefix=/usr/local/vim8 --with-features=huge --enable-cscope --enable-rubyinterp --enable-pythoninterp --with-python-config-dir=/usr/local/python2.7.13/lib/python2.7/config --enable-luainterp --with-lua-prefix=/usr/local/lua5.3.4 --enable-perlinterp --enable-largefile --enable-multibyte --disable-netbeans --enable-cscope >> logs
#./configure --prefix=/usr/local/vim8 \
#--with-features=huge \
#--enable-cscope \
#--enable-rubyinterp \
#--enable-pythoninterp \
#--with-python-config-dir=/usr/local/python2.7.13/lib/python2.7/config \
#--enable-luainterp \
#--with-lua-prefix=/usr/local/lua5.3.4 \
#--enable-perlinterp \
#--enable-largefile \
#--enable-multibyte \
#--disable-netbeans \
#--enable-cscope ;\
#
make -j 8 && make install
ln -s /usr/local/vim8/bin/vim /usr/bin/vim
# ./configure --prefix=/usr/local/vim8 --with-features=huge --enable-cscope --enable-rubyinterp --enable-pythoninterp --with-python-config-dir=/usr/local/python2.7.13/lib/python2.7/config --enable-luainterp --with-lua-prefix=/usr/local/lua5.3.4 --enable-perlinterp --enable-largefile --enable-multibyte --disable-netbeans --enable-cscope >> logs
| true |
c92ee008acf7cde7a359c0739c51da4d6b1bb9c4
|
Shell
|
x3rAx/dotfiles
|
/.local/bin/wifi
|
UTF-8
| 743 | 3.921875 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
main() {
action=$1
case "$action" in
'on')
enable_wifi
;;
'off')
disable_wifi
;;
'toggle')
toggle_wifi
;;
'status'|*)
get_status
esac
}
enable_wifi() {
rfkill unblock wifi
}
disable_wifi() {
rfkill block wifi
}
get_status() {
rfkill -rn | awk '
BEGIN { blocked=1 }
/wlan/ { if ($4 == "unblocked") { blocked=0 } }
END { if (blocked) { print "blocked" } else { print "unblocked" } }
'
}
toggle_wifi() {
local status="$(get_status)"
if [[ $status == "blocked" ]]; then
enable_wifi
else
disable_wifi
fi
}
main "$@"
| true |
4599096a3065a95af5f296f8ba4ccd075eee0d7d
|
Shell
|
FailMan3D/fm3d-vcs-utils
|
/bin/git
|
UTF-8
| 610 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/sh
# vim: ts=4 sts=0 sw=4 noet ai
# Because Git doesn't support subcommand aliases
# Hard-coded path to git
# DANGER: Don't replace this with "command git",
# doing so will turn this script into a forkbomb!
gitpath=/usr/bin/git
case "$1" in
"stash")
shift # discard $1
case "$1" in
"diff")
shift
# "stash-diff" is a Git alias for "stash show -p"
$gitpath stash-diff "$@"
;;
"ls")
shift
$gitpath stash list "$@"
;;
*)
$gitpath stash "$@"
;;
esac
;;
"describe")
shift
$gitpath show-branch-description.sh "$@"
;;
*)
$gitpath "$@"
;;
esac
| true |
ee7069f0729591188595e7e7836b30003a02f5f8
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/firefox-extension-requestpolicy-continued/PKGBUILD
|
UTF-8
| 1,129 | 2.578125 | 3 |
[] |
no_license
|
#
# Based on PKGBUILD firefox-extension-shumway and firefox-extension-adblock-plus
_plugin_name=requestpolicy-continued
pkgname=firefox-extension-${_plugin_name}
pkgdesc="Be in control of which cross-site requests are allowed. Improve the privacy of your browsing by not letting other sites know your browsing habits. Secure yourself from Cross-Site Request Forgery (CSRF) and other attacks."
pkgver=1.0.beta13.0
pkgrel=1
arch=('any')
url="https://addons.mozilla.org/firefox/addon/${_plugin_name}/"
makedepends=('unzip' )
depends=("firefox")
license=('GPL3')
source=("${pkgname}.xpi::https://addons.mozilla.org/firefox/downloads/latest/requestpolicy-continued/addon-683612-latest.xpi" )
sha1sums=('83ba3b7cc8906437dad34be43cbe920f98ae8b17')
noextract=("${pkgname}.xpi")
pkgver(){
unzip -p ${pkgname}.xpi install.rdf | sed -n '/.*<em:version>\(.*\)<\/em:version>.*/{s//\1/p;q}'
}
package(){
srcxpi="${srcdir}/${pkgname}.xpi"
emid=$(unzip -p "$srcxpi" install.rdf | sed -n '/.*<em:id>\(.*\)<\/em:id>.*/{s//\1/p;q}')
install -Dm644 "$srcxpi" "${pkgdir}/usr/lib/firefox/browser/extensions/$emid.xpi"
}
| true |
e31f14ec9681083081fe25a2dbd4d8a870086b2a
|
Shell
|
weaveworks/wksctl
|
/tools/check-embedmd.sh
|
UTF-8
| 89 | 2.71875 | 3 |
[
"Apache-2.0",
"LGPL-3.0-only",
"LGPL-2.0-or-later"
] |
permissive
|
#!/bin/sh -e
for f in "$@"; do
echo "embedmd: checking $f"
embedmd -d "$f"
done
| true |
a912daffc06e0145bb255eaa0e71e4bc8bc18b4a
|
Shell
|
peter1000/arch-pkg
|
/pkg/all/libsearpc/PKGBUILD
|
UTF-8
| 1,328 | 2.734375 | 3 |
[] |
no_license
|
# Maintainer: Moritz Maxeiner <moritz@ucworks.org>
# Contributor: Aaron Lindsay <aaron@aclindsay.com>
pkgname=libsearpc
pkgver=1.2.2
_seafilever=3.0.4
pkgrel=6
pkgdesc="A simple and easy-to-use C language RPC framework (including both server side & client side) based on GObject System."
arch=('i686' 'x86_64' 'armv6h' 'armv7h')
url="https://github.com/haiwen/libsearpc/"
license=('GPLv3')
depends=('glib2>=2.16.0' 'jansson>=2.2.1' 'libtool>=1.4')
makedepends=('python2-simplejson')
optdepends=('python2-simplejson')
source=("https://github.com/haiwen/libsearpc/archive/v${_seafilever}.tar.gz"
"libsearpc.pc.patch")
sha256sums=('131444c2f476dfe8a2d9c2d1eec07e40606fd3e8105a5327e1febb2e15c6ab68'
'aec39a303aaebc0777a22d8c53367f52f619654d63f62b362d75c1c599e632f4')
configure_libsearpc() {
./autogen.sh
./configure --prefix=/usr PYTHON=/usr/bin/python2
}
# pkgver() {
# cd "$srcdir/$pkgname-$_seafilever"
# configure_libsearpc &> /dev/null
# grep "PACKAGE_VERSION" Makefile | \
# sed 's/PACKAGE_VERSION.*=[ \t]\+\([0-9\.]\+\)[ \t]*/\1/g'
# }
prepare () {
cd "$srcdir/$pkgname-$_seafilever"
patch -p1 -i ../libsearpc.pc.patch
}
build () {
cd "$srcdir/$pkgname-$_seafilever"
configure_libsearpc
make -j1
}
package () {
#install library and header files
cd "$srcdir/$pkgname-$_seafilever"
make DESTDIR="$pkgdir/" install
}
| true |
4f3a98da6a52d6083624d3902a31f81a44b87d8a
|
Shell
|
huawei-noah/bolt
|
/common/gcl/tools/kernel_lib_compile/sh/buildKernelLibConfig.sh
|
UTF-8
| 1,783 | 3.375 | 3 |
[
"MIT"
] |
permissive
|
#get work path#
workPath=$(pwd)
#set file.cl dir#
tensorCLPath=${BOLT_ROOT}/tensor_computing/src/gpu/mali/cl
sampleCLPath=${BOLT_ROOT}/gcl/tools/gcl_sample/cl
imageCLPath=${BOLT_ROOT}/image/src/gpu/mali/cl
CLPath=(${tensorCLPath} ${sampleCLPath} ${imageCLPath})
deviceNameFile=deviceNameFile
#get kernel compile option sh#
shPath=${workPath}/sh
compileConfigPath=${shPath}/compile
cd ${compileConfigPath}
compileConfigFiles=$(ls ${pwd})
cd ${workPath}
#set and build compile related dir#
binPath=${workPath}/bin
srcPath=${workPath}/src
incPath=${workPath}/include
clPath=${workPath}/cl
namePath=${workPath}/name
TimeFlag=`adb -s ${adbDeviceNum[0]} shell "date +%s_%N"`
kernelBuildPath=/data/local/tmp/${TimeFlag}
bin2charPath=${workPath}/kernel_bin2char
kernelBinPath=${workPath}/kernel_bin
deviceNamePath=${workPath}/device_name
rm -rf ${binPath} ${srcPath} ${incPath} ${clPath} ${namePath}
mkdir ${binPath} ${srcPath} ${incPath} ${clPath} ${namePath}
#set deviceNamesFile to record deviceName#
dNameFile=${namePath}/deviceNamesFile.dn
#build tool bin2char#
cd ${bin2charPath}
g++ bin2char.cpp -o bin2char
mv bin2char ${workPath}
cd ${workPath}
#build tool gcl_binary#
cd ${kernelBinPath}
rm -rf build
mkdir build
cd build
cmake_options="-DUSE_GPU=ON \
-DUSE_DYNAMIC_LIBRARY=ON \
-DBUILD_TEST=ON \
-DUSE_THREAD_SAFE=OFF"
cmake .. ${cmake_options}
make -j33
cp gcl_binary ${workPath}
cd ${workPath}
rm -rf ${kernelBinPath}/build
#build tool device_name#
cd ${deviceNamePath}
rm -rf build
mkdir build
cd build
cmake .. ${cmake_options}
make -j33
cp gcl_device_name ${workPath}
cd ${workPath}
rm -rf ${deviceNamePath}/build
#cp cl file to cl dir#
for cPath in "${CLPath[@]}"; do
cp ${cPath}/*.cl ${clPath}
done
cp ${tensorCLPath}/*.h ${clPath}
| true |
c86dc16e66f445c3e23656cac3fe63553ecd1188
|
Shell
|
ycliang-tw/dotfiles
|
/install.sh
|
UTF-8
| 539 | 3.78125 | 4 |
[] |
no_license
|
#!/bin/bash
<< description
This script is used to customize the working environment.
Date: 2020/06/04
Author: ycliang ycliang@andestech.com
description
dir=`pwd`
dotfiles=".bashrc .vimrc"
install()
{
home=$1
for f in $dotfiles; do
cp $dir/$dotfiles $home/.
done
}
show_help()
{
echo '[usage] ./install.sh <option>'
echo
echo '[option]'
echo ' -d <$HOME> SHOME is the home directory of user'
echo ' -h | --help Show this message'
}
main()
{
case $1 in
-d)
HOME=$2
install $HOME;;
*)
show_help;;
esac
}
main $@
| true |
df9795cbc861e49c916e5a5c7027e3603afd8cb0
|
Shell
|
davidxjohnson/hazelcast-k8s-rpi
|
/start.sh
|
UTF-8
| 602 | 3.140625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
PRG="$0"
PRGDIR=`dirname "$PRG"`
HAZELCAST_HOME=`cd "$PRGDIR/.." >/dev/null; pwd`/hazelcast
if [ "x$MIN_HEAP_SIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Xms${MIN_HEAP_SIZE}"
fi
if [ "x$MAX_HEAP_SIZE" != "x" ]; then
JAVA_OPTS="$JAVA_OPTS -Xmx${MAX_HEAP_SIZE}"
fi
export CLASSPATH=$HAZELCAST_HOME/*:$CLASSPATH/*
echo "########################################"
echo "# RUN_JAVA=$RUN_JAVA"
echo "# JAVA_OPTS=$JAVA_OPTS"
echo "# starting now...."
echo "########################################"
java -server $JAVA_OPTS -Djava.net.preferIPv4Stack=true com.hazelcast.core.server.StartServer
| true |
a2d29834dd448a9004cea214c8cdba7ca91032fa
|
Shell
|
sjbylo/osd-workshop
|
/scripts/deploy-homeroom
|
UTF-8
| 1,235 | 3.125 | 3 |
[] |
no_license
|
#!/bin/bash
PROJECT=infra
cd `dirname $0`
BASE=`pwd`
cd - >> /dev/null
source ${BASE}/../config.sh
set -e
oc new-project $HR_PROJECT || oc project $HR_PROJECT
oc new-build \
-n $HR_PROJECT \
--name=homeroom \
--binary \
--docker-image=ghcr.io/kwkoo/workshop-dashboard:4.7
echo -n "waiting for buildconfig to appear..."
while [ $(oc get bc/homeroom --no-headers 2>/dev/null | wc -l) -lt 1 ]; do
echo -n "."
sleep 5
done
echo "done"
echo -n "waiting for workshop-dashboard imagestream tag to appear..."
while [ $(oc get -n $HR_PROJECT istag/workshop-dashboard:4.7 -o name 2>/dev/null | wc -l) -lt 1 ]; do
echo -n "."
sleep 5
done
echo "done"
oc start-build homeroom \
-n $HR_PROJECT \
--from-dir=${BASE}/../workshop_content \
--follow
oc new-app \
-n $HR_PROJECT \
https://raw.githubusercontent.com/openshift-homeroom/workshop-spawner/develop/templates/hosted-workshop-production.json \
-p CONSOLE_IMAGE=quay.io/openshift/origin-console:4.6 \
-p SPAWNER_NAMESPACE=$HR_PROJECT \
-p CLUSTER_SUBDOMAIN=$(oc get route -n openshift-console console -o jsonpath='{.spec.host}' | sed -e 's/^[^.]*\.//') \
-p WORKSHOP_ENVVARS="API_URL=$(oc whoami --show-server)" \
-p WORKSHOP_IMAGE=homeroom:latest
| true |
636695c1a74eb416aeb534920805b4b6ab1cd822
|
Shell
|
hngbyr/gsm
|
/bin/motoload.sh
|
UTF-8
| 1,235 | 3.25 | 3 |
[] |
no_license
|
#/bin/sh
# motoload.sh
# moded by 0x7678
# Under GNU GPL
# get config settings
CONFIG="$HOME/.ini/config.ini";
GSMAPPATH=`cat "$CONFIG" | grep '^GSMAPATH=' | cut -d '=' -f 2`;
if [ ! -f $CONFIG ];then
zenity --info --text="错误代码0x05" --title="程序错误";
echo "error code 0x05,Please contact the system administrator!";
exit 1;
fi
if [ -z "$1" ]; then
# echo "usage: $0 \"phone type\" [serial line] [l2_socket] [loader]";
# echo "suppoted phones: C115/C117/C123/C121/C118/C139/C140/C155"
# echo "example: $0 C139 /dev/ttyUSB2 /tmp/testsocket /tmp/testloader"
# exit 0;
mobile=C123;
else
mobile="$1"
fi
if [ -z "$2" ]; then
stty=/dev/ttyUSB0;
else
stty="$2";
fi
if [ -z "$3" ]; then
l2socket="";
else
l2socket=" -s $3";
fi
if [ -z "$4" ]; then
loader="";
else
loader=" -l $4";
fi
id=`echo "$stty" | cut -b 12-`
case "$mobile" in
C115|C117|C118|C119|C121|C123)
# e88
# this is not ideal for C115 and C117,
# but they seems to work..
echo -n "Loading , press button on a phone...";
xterm -T "AC-GSM Channel [ $((id+1)) ] Data Windows" -e "$GSMAPPATH"/osmocon $l2socket $loader -p "$stty" -m c123xor "$GSMAPPATH"/layer1.compalram.bin &
;;
*)
echo "Unknown phone $1."
;;
esac
| true |
f95ad4d1d470517cae195ca7cbb241cd56cae713
|
Shell
|
cms-sw/cms-bot
|
/kill-build-release
|
UTF-8
| 714 | 3.796875 | 4 |
[] |
no_license
|
#!/bin/sh -ex
# This is used when the build is aborted from the issue comments.
# build-release creates a filed called BUILD_PID, this file
# contains the process id for the build.
# When this script is excecuted, it reads the file and kills
# the process with that process ID.
# The file BUILD_PID must be in the workspace of the build
CMSSW_X_Y_Z=$1
ARCHITECTURE=$2
WORKSPACE=$3
PID_TO_KILL=`ps -awx 2>&1 | grep docker_launcher.sh | grep $CMSSW_X_Y_Z | grep $ARCHITECTURE | awk '{print $1}'`
pushd $WORKSPACE
if [ "X$PID_TO_KILL" = "X" ]; then
PID_TO_KILL=`head -n 1 BUILD_PID`
fi
echo "Killing $PID_TO_KILL, the build was aborted in the github issue." >> buildSteps.log
kill -9 $PID_TO_KILL
popd
| true |
591c1806269fdcd0c4d5fd0227b4555e9f590e38
|
Shell
|
yuuhhe/hub-linux-ubuntu
|
/install/overlay/etc/init.d/samba-shares.sh
|
UTF-8
| 1,166 | 3.734375 | 4 |
[] |
no_license
|
#! /bin/sh -e
#### BEGIN INIT INFO
# Provides: samba-shares
# Default-Start: 2 3 4 5
# Default-Stop: S 0 1 6
# Short-Description: generates /etc/inveneo/samba/shares.conf
# Description: generates /etc/inveneo/samba/shares.conf
# by reading all *.conf files in /etc/inceneo/samba/shares.d
### END INIT INFO
#
# Author: Jeff Wishnie <jeff@inveneo.org>
#
set -e
PATH=/bin:/usr/bin:/sbin:/usr/sbin
INV_SAMBA_CONF_D=/etc/inveneo/samba
SHARES_D=${INV_SAMBA_CONF_D}/shares.d
SHARES_CONF=${INV_SAMBA_CONF_D}/shares.conf
HEADER="# ========= DO NOT EDIT: GENERATED AT BOOT TIME FROM CONTENTS OF shares.d by /etc/init.d/samba-shares.sh"
. /lib/lsb/init-functions
do_start () {
log_daemon_msg "Generating ${SHARES_CONF}"
echo ${HEADER} > ${SHARES_CONF}
if [ -d "${SHARES_D}" ]
then
for f in "${SHARES_D}"/*.conf
do
log_progress_msg "Adding $f..."
echo "include = $f" >> "${SHARES_CONF}"
done
fi
}
case "$1" in
start|restart)
do_start
;;
reload|force-reload)
do_start
;;
stop)
# No-op
;;
*)
echo "Usage: samba-shares.sh [start|stop]" >&2
exit 3
;;
esac
exit 0
| true |
1abf1e6f8f4c76e6f59d3844f0debcbb54ebe8bd
|
Shell
|
ChillerDragon/crools
|
/inkscape
|
UTF-8
| 2,653 | 3.75 | 4 |
[] |
no_license
|
#!/bin/bash
# shellcheck disable=SC1117
cat << EOF
_
___ _ __ ___ ___ | |___
/ __| '__/ _ \ / _ \| / __|
| (__| | | (_) | (_) | \__ \\
\___|_| \___/ \___/|_|___/
INKSCAPE LAUNCH FROM SOURCE SCRIPT
EOF
GIT_ROOT=/home/$USER/Desktop/git-extern
INKSCAPE="$GIT_ROOT/inkscape"
INKSCAPE_BIN="$INKSCAPE/build/bin/inkscape"
REMOTE_ORG="https://gitlab.com/inkscape"
REMOTE_REPO="${REMOTE_ORG}/inkscape"
function v_eval() {
# verbose eval
tput bold
echo "$*"
tput sgr0
eval "$*"
}
function install_inkscape() {
echo "do you want to install inkscape? [y/N]"
read -r -n 1 yn
echo ""
if ! [[ "$yn" =~ [yY] ]]
then
exit 1
fi
mkdir -p "$GIT_ROOT" || exit 1
cd "$GIT_ROOT" || exit 1
# INKSCAPE
if [ ! -d inkscape ]
then
v_eval "git clone --recurse-submodules ${REMOTE_REPO}.git"
fi
(
v_eval "cd inkscape || exit 1"
v_eval "git pull --recurse-submodules && git submodule update"
v_eval "wget -v ${REMOTE_ORG}/inkscape-ci-docker/-/raw/master/install_dependencies.sh -O install_dependencies.sh"
v_eval "bash install_dependencies.sh --recommended"
v_eval "rm install_dependencies.sh"
v_eval "mkdir -p build || exit 1"
v_eval "cd build || exit 1"
v_eval "cmake .. -DCMAKE_INSTALL_PREFIX=${PWD}/install_dir -DCMAKE_C_COMPILER_LAUNCHER=ccache -DCMAKE_CXX_COMPILER_LAUNCHER=ccache -DCMAKE_BUILD_TYPE=Debug"
v_eval "make -j$(nproc) || exit 1"
v_eval "make install || exit 1"
) || exit 1
}
if [ ! -d "$INKSCAPE" ] || [ ! -f "$INKSCAPE_BIN" ]
then
echo "Error: inkscape is not installed at expected location"
echo "$INKSCAPE_BIN"
install_inkscape
exit 1
fi
if [ "$1" == "--help" ] || [ "$1" == "-h" ]
then
echo "usage: inkscape [OPTIONS]"
echo "description: inkscape wrapper by ChillerDragon"
echo " powered by crools script"
echo "options:"
echo " --reinstall delete and install"
echo " --install download and build from source"
echo " --update same as install"
exit 0
elif [ "$1" == "--install" ] || [ "$1" == "--update" ]
then
install_inkscape
exit
elif [ "$1" == "--reinstall" ]
then
rm -rf "$INKSCAPE"
install_inkscape
exit
fi
if [ "$#" -eq "0" ]
then
gdb \
-ex='set pagination off' \
-ex='set confirm off' \
-ex=run \
-ex=bt \
-ex=quit \
"$INKSCAPE_BIN"
else
gdb \
-ex='set pagination off' \
-ex='set confirm off' \
-ex=run \
-ex=bt \
-ex=quit \
--args "$INKSCAPE_BIN" "$@"
fi
| true |
68355246c4b81cc2d75937f94177cc98a6f2a7cf
|
Shell
|
Marmzy/Thesis
|
/RNA-Seq_Pipeline/Run_STAR_Synthetic.sh
|
UTF-8
| 3,832 | 2.75 | 3 |
[] |
no_license
|
#$ -S /bin/sh
#$ -pe serial 3
#$ -l h_vmem=6G
module load STAR/x86_64/2.5.2b
#Mapping the GSE54153 dataset reads (due to some samples undergone 'quality trimming' the code is messy, as the correct
#files need to be collected).
for i in {886..903}
do
gunzip /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE49911/SRR952${i}.trimmed.fastq.gz
STAR --genomeDir $HOME/Thesis/Genomes/Synthetic_Region/Index --sjdbGTFfile $HOME/Thesis/Annotations.gtf --readFilesIn /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE49911/SRR952${i}.trimmed.fastq
mv Aligned.out.sam Aligned.SRR952${i}.trimmed.sam
mv Aligned.SRR952${i}.trimmed.sam Mapped/GSE49911_Synthetic
mv Log.final.out Log.SRR952${i}.final.out
mv Log.SRR952${i}.final.out Mapped/GSE49911_Synthetic
mv _STARgenome _STARgenome_SRR952${i}
mv _STARgenome_SRR952${i} Mapped/GSE49911_Synthetic
gzip /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE49911/SRR952${i}.trimmed.fastq
done
#--------------------------------------------------------------------------------------------------------------------------------#
declare quality=("292" "296" "298" "301" "302" "303")
for q in "${quality[@]}"
do
gunzip /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE54153/SRR1121${q}.quality.fastq.gz
STAR --genomeDir $HOME/Thesis/Genomes/Synthetic_Region/Index --sjdbGTFfile $HOME/Thesis/Annotations.gtf --readFilesIn /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE54153/SRR1121${q}.quality.fastq
mv Aligned.out.sam Aligned.SRR1121${q}.out.sam
mv Aligned.SRR1121${q}.out.sam Mapped/GSE54153_Synthetic
mv Log.final.out Log.SRR1121$q.final.out
mv Log.SRR1121${q}.final.out Mapped/GSE54153_Synthetic
mv SJ.out.tab SJ.SRR1121${q}.out.tab
mv SJ.SRR1121${q}.out.tab Mapped/GSE54153_Synthetic
mv _STARgenome _STARgenome_SRR1121${q}
mv _STARgenome_SRR1121${q} Mapped/GSE54153_Synthetic
gzip /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE54153/SRR1121${q}.quality.fastq
done
declare trimmed=("293" "294" "295" "297" "299" "300")
for t in "${trimmed[@]}"
do
gunzip /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE54153/SRR1121${t}.trimmed.fastq.gz
STAR --genomeDir $HOME/Thesis/Genomes/Index --sjdbGTFfile $HOME/Thesis/Annotations.gtf --readFilesIn /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE54153/SRR1121${t}.trimmed.fastq
mv Aligned.out.sam Aligned.SRR1121${t}.out.sam
mv Aligned.SRR1121${t}.out.sam Mapped/GSE54153_Synthetic
mv Log.final.out Log.SRR1121${t}.final.out
mv Log.SRR1121${t}.final.out Mapped/GSE54153_Synthetic
mv SJ.out.tab SJ.SRR1121${t}.out.tab
mv SJ.SRR1121${t}.out.tab Mapped/GSE54153_Synthetic
mv _STARgenome _STARgenome_SRR1121${t}_Synthetic
mv _STARgenome_SRR1121${t}_Synthetic Mapped/GSE54153_Synthetic
gzip /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE54153/SRR1121${t}.trimmed.fastq
done
#--------------------------------------------------------------------------------------------------------------------------------#
for i in {2991..3014}
do
gunzip /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE81077/SRR347${i}.trimmed.fastq.gz
STAR --genomeDir $HOME/Thesis/Genomes/Synthetic_Region/Index --sjdbGTFfile $HOME/Thesis/Annotations.gtf --readFilesIn /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE81077/SRR347${i}.trimmed.fastq
mv Aligned.out.sam Aligned.SRR347${i}.trimmed.sam
mv Aligned.SRR347${i}.trimmed.sam Mapped/GSE81077_Synthetic
mv Log.final.out Log.SRR347${i}.final.out
mv Log.SRR347${i}.final.out Mapped/GSE49911_Synthetic
mv _STARgenome _STARgenome_SRR347${i}
mv _STARgenome_SRR347${i} Mapped/GSE81077_Synthetic
gzip /group/biocomp/users/cadav/Thesis/Transcriptome/RNA-Seq/GSE81077/SRR347${i}.trimmed.fastq
done
| true |
75214a208455af1c8e8a534619c3c89b34a7a00d
|
Shell
|
liaoya/docker-compose
|
/gitea/sqlite/init.sh
|
UTF-8
| 936 | 2.875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#shellcheck disable=SC1091
set -a -e -x
[[ $(command -v http) ]] || {
echo "http and jq are required"
exit 1
}
THIS_FILE=$(readlink -f "${BASH_SOURCE[0]}")
THIS_DIR=$(dirname "${THIS_FILE}")
source "${THIS_DIR}/../env.sh"
docker-compose -f "${THIS_DIR}/docker-compose.yml" up -d
sleep 10s
http --timeout 120 -f POST :13000/install \
db_type=SQLite3 db_path=/data/gitea/gitea.db \
app_name="Gitea Repository" repo_root_path="/data/git/repositories" lfs_root_path="" \
ssl_mode=disable run_user=git \
domain="${DOMAIN}" ssh_port="${SSH_PORT}" http_port=3000 app_url="${GITEA_URL}" \
log_root_path="/data/gitea/log" \
disable_registration=on require_sign_in_view=on \
no_reply_address="noreply.org" \
admin_name="${GITEA_ADMIN}" admin_passwd="${GITEA_ADMIN_PASSWORD}" admin_confirm_passwd="${GITEA_ADMIN_PASSWORD}" admin_email="${GITEA_ADMIN}@${EMAIL_DOMAIN}"
bash "${THIS_DIR}/../create-demo-env.sh"
| true |
52f01b824dafa81620832ce44a900a7fc9b5bf72
|
Shell
|
bsn069/cpp1
|
/bsn_cpp/third_part/make_env.sh
|
UTF-8
| 1,580 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/sh
# run in docker
#if [ ! -d "boost" ]; then
# cp /usr/include/boost boost -R
#fi
if [ ! -d "LuaJIT" ]; then
if [ ! -f "LuaJIT-2.0.5.tar.gz" ]; then
curl http://luajit.org/download/LuaJIT-2.0.5.tar.gz -o LuaJIT-2.0.5.tar.gz
fi
tar -xzvf LuaJIT-2.0.5.tar.gz
mv LuaJIT-2.0.5 LuaJIT
fi
if [ ! -d "LuaBridge" ]; then
git clone https://github.com/vinniefalco/LuaBridge.git
fi
if [ ! -d "pbc" ]; then
git clone https://github.com/bsn069/pbc.git
fi
if [ ! -d "protobuf" ]; then
git clone https://github.com/bsn069/protobuf.git
fi
if [ ! -d "boost_1_65_1" ]; then
if [ ! -f "boost_1_65_1.tar.bz2" ]; then
curl https://ncu.dl.sourceforge.net/project/boost/boost/1.65.1/boost_1_65_1.tar.bz2 -o boost_1_65_1.tar.bz2
fi
tar -jxvf boost_1_65_1.tar.bz2
fi
if [ ! -d "sqlite-autoconf-3210000" ]; then
if [ ! -f "sqlite-autoconf-3210000.tar.gz" ]; then
curl https://www.sqlite.org/2017/sqlite-autoconf-3210000.tar.gz -o sqlite-autoconf-3210000.tar.gz
fi
tar -zxvf sqlite-autoconf-3210000.tar.gz
fi
if [ ! -d "SQLiteCpp" ]; then
git clone https://github.com/bsn069/SQLiteCpp.git
fi
if [ ! -d "cryptopp" ]; then
if [ ! -f "cryptopp600.zip" ]; then
curl https://www.cryptopp.com/cryptopp600.zip -o cryptopp600.zip
fi
unzip cryptopp600.zip -d cryptopp
fi
# if [ ! -d "protobuf-cpp" ]; then
# if [ ! -f "protobuf-cpp-3.4.1.tar.gz" ]; then
# curl https://github.com/google/protobuf/releases/download/v3.4.1/protobuf-cpp-3.4.1.tar.gz -o protobuf-cpp-3.4.1.tar.gz
# fi
# tar -xzvf protobuf-cpp-3.4.1.tar.gz
# mv protobuf-cpp-3.4.1 protobuf-cpp
# fi
| true |
a0c52a95003ce3d4c7d4445d031128a779c3f2ad
|
Shell
|
x72coder/x72
|
/x72alinux/x72qialivm.sh
|
UTF-8
| 9,665 | 3.078125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# to dawnload this script
# curl -L http://bit.ly/2x0Wons >x72alinstall.sh
# ------------------------------------------------------------------------------
# safeExit
# -----------------------------------
# Non destructive exit for when script exits naturally.
# Usage: Add this function at the end of every script.
# -----------------------------------
function safeExit() {
trap - INT TERM EXIT
exit
}
f_ptc(){
read -e -sn 1 -p "Press any key to continue..."
}
#----------------------------------------------------------
# Preparation
#----------------------------------------------------------
PreInsBM(){
echo "Pre-installation - Verify the boot mode"
if [[ -d "/sys/firmware/efi/" ]]; then
echo "The system is booted in UEFI mode"
else
echo "The system is booted in BIOS or CSM mode"
fi
}
#----------------------------------------------------------
PreInsInt(){
echo "Pre-installation - Connect to the Internet"
# confirm you can access the internet
connection_test() {
# `cmd` 执行命令cmd
# ip route 显示路由表
# grep 使用正则表达式搜索文本,并把匹配的行打印出来
# awk 一种编程语言,用于对文本和数据进行处理
# ‘script’ 表示要执行的脚本script
# ‘NR’ 表示记录数,在执行过程中对应于当前的行号
# {print} 组行逐行扫描文件并重复执行print
# $n 表示当前记录的第n个字段
# ping 用以测试网络的连通性
# -q 不显示指令执行过程,开头和结尾的相关信息除外
# -c <完成次数> 设置完成要求回应的次数
# -w 设置等待应答时间
# &> file 命令执行后,输出和错误都定向到file中
ping -q -w 1 -c 1 `ip route | grep default | awk 'NR==1 {print $3}'` &> /dev/null && return 1 || return 0
}
if [[ ! connection_test ]]; then
echo "Internet seems broken. Press any key to abort."
read -e -sn 1
safeExit
else
echo "Internet is connected."
fi
}
PreInsRootChk(){
if [[ "`whoami`" = "root" ]]; then
echo "The current user is root."
else
echo "The current user is not root. Please login as root and try again."
read -e -sn 1
safeExit
fi
}
#----------------------------------------------------------
PreInsClk(){
echo "Pre-installation - Update the system clock"
timedatectl set-ntp true
timedatectl status
}
#----------------------------------------------------------
PreInsBiosGptGrub(){
#----------------------------------------------------------
echo "Pre-installation - Partition the disks"
# /(bootloader) - 2m
# /boot - 512m
# swap - 2g
# /(root) - the rest
parted /dev/sda mklabel gpt
parted /dev/sda mkpart primary 1MiB 3MiB
parted /dev/sda set 1 bios_grub on
parted /dev/sda mkpart primary ext3 3MiB 515MiB
parted /dev/sda mkpart primary linux-swap 515MiB 2GiB
parted /dev/sda mkpart primary ext3 2GiB 100%
sleep 1
#----------------------------------------------------------
echo "Pre-installation - Format the partitions"
# make filesystems
# /(bootloader) - 2m
# /boot - 512m
# swap - 2g
# /(root) - the rest
mkfs.vfat /dev/sda1
mkfs.ext4 /dev/sda2
mkswap /dev/sda3
swapon /dev/sda3
mkfs.ext4 /dev/sda4
sleep 1
#----------------------------------------------------------
echo "Pre-installation - Mount the file systems"
# set up /mnt
mount /dev/sda4 /mnt
mkdir -p /mnt/boot
mount /dev/sda2 /mnt/boot
f_ptc
}
PreInsDiskChk(){
echo "Pre-installation - Result check"
lsblk
f_ptc
}
#----------------------------------------------------------
### Installation
# rankmirrors to make this faster (though it takes a while)
InsMirrorList(){
echo "Installation - Select the mirrors"
pacman -Syy
# mv /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.orig
# rankmirrors -n 6 /etc/pacman.d/mirrorlist.orig >/etc/pacman.d/mirrorlist
pacman -S --noconfirm reflector rsync
mv /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.bak
reflector --verbose --country 'Australia' -l 5 --sort rate --save /etc/pacman.d/mirrorlist
}
#----------------------------------------------------------
# install base packages (take a coffee break if you have slow internet)
InsBase(){
echo "Installation - Install the base packages"
dirmngr </dev/null
pacman-key --populate archlinux
pacman-key --refresh-keys
pacstrap /mnt base base-devel
}
#----------------------------------------------------------
### Configure the system
# generate fstab
CfgFstab(){
echo "Configure the system - Fstab"
genfstab -U -p /mnt >>/mnt/etc/fstab
cat /mnt/etc/fstab
}
CfgMirrorList(){
echo "Configure - Select the mirrors"
arch-chroot /mnt /bin/bash <<EOF
pacman -Syy
pacman -S --noconfirm reflector rsync
mv /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.bak
reflector --verbose --country 'Australia' -l 5 --sort rate --save /etc/pacman.d/mirrorlist
EOF
}
CfgTimeZ(){
echo "Configure the system - chroot - Time zone"
arch-chroot /mnt /bin/bash <<EOF
# set initial timezone to Australia/Sydney
ln -sf /usr/share/zoneinfo/Australia/Sydney /etc/localtime
# adjust the time skew, and set the time standard to UTC
hwclock --systohc --utc
EOF
}
CfgLocale(){
echo "Configure the system - chroot - Locale"
arch-chroot /mnt /bin/bash <<EOF
# set initial locale
sed -i '/en_US/{s/#//}' /etc/locale.gen
sed -i '/zh_CN/{s/#//}' /etc/locale.gen
sed -i '/zh_TW/{s/#//}' /etc/locale.gen
locale-gen
echo LANG=en_US.UTF-8 > /etc/locale.conf
EOF
}
CfgHost(){
echo "Configure the system - chroot - Hostname"
arch-chroot /mnt /bin/bash <<EOF
# Network configuration
# set initial hostname
echo "x72al-$(date +"%Y%W")" >/etc/hostname
echo "127.0.1.1\tx72al-$(date +"%Y%W").localdomain\tx72al-$(date +"%Y%W")" >> /etc/hosts
EOF
}
CfgNetwork(){
echo "Configure the system - chroot - Network configuration"
arch-chroot /mnt /bin/bash <<EOF
pacman -S --noconfirm networkmanager iw wpa_supplicant dialog
systemctl enable NetworkManager.service
EOF
}
CfgInitramfs(){
echo "Configure the system - chroot - Initramfs"
arch-chroot /mnt /bin/bash <<EOF
# no modifications to mkinitcpio.conf should be needed
mkinitcpio -p linux
EOF
}
CfgRootPw(){
echo "Configure the system - chroot - Root password"
arch-chroot /mnt /bin/bash <<EOF
# set root password to "root"
echo root:root | chpasswd
EOF
}
CfgIntelCPU(){
echo "Configure the system - chroot - intel-ucode"
if [[ $(less /proc/cpuinfo | grep GenuineIntel | awk '{print $3}') == "GenuineIntel" ]]; then
arch-chroot /mnt /bin/bash <<EOF
pacman -S --noconfirm intel-ucode
EOF
fi
}
CfgBootLoader(){
echo "Configure the system - chroot - Boot loader"
arch-chroot /mnt /bin/bash <<EOF
pacman -S --noconfirm grub os-prober
grub-install --target=i386-pc /dev/sda
grub-mkconfig -o /boot/grub/grub.cfg
EOF
}
CfgPkgIns(){
echo "Configure the system - chroot - packages installation"
arch-chroot /mnt /bin/bash <<EOF
pacman -S --noconfirm screen screenfetch wpa_actiond ifplugd sudo zsh
EOF
}
# end section sent to chroot
# unmount
UnmountAll(){
umount -R /mnt
echo "Done! Unmount the CD image from the VM, then type 'reboot'."
}
function mainScript() {
############## Begin Script Here ###################
####################################################
PreInsBM
PreInsInt
PreInsRootChk
PreInsClk
echo -ne "Whether to partition the disk and format? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
PreInsBiosGptGrub
fi
PreInsDiskChk
echo -ne "Whether to rank the mirror list of live system? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
InsMirrorList
fi
echo -ne "Whether to install the packages of system base? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
InsBase
fi
echo -ne "Whether to configure the fstab? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgFstab
fi
echo -ne "Whether to rank the mirror list of the new system? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgMirrorList
fi
echo -ne "Whether to configure the time zone? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgTimeZ
fi
echo -ne "Whether to configure the locale? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgLocale
fi
echo -ne "Whether to configure the host? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgHost
fi
echo -ne "Whether to configure the network? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgNetwork
fi
echo -ne "Whether to configure the Initramfs? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgInitramfs
fi
echo -ne "Whether to configure the root password? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgRootPw
fi
echo -ne "Whether to configure the intel-ucode? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgIntelCPU
fi
echo -ne "Whether to configure the boot loader? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgBootLoader
fi
echo -ne "Whether to install common packages? \nY(default)/N"
read -e -sn 1 key
if [[ $key != "N" || $key != "n" ]]; then
CfgPkgIns
fi
echo "Unmount all directories in /mnt"
UnmountAll
echo -n
####################################################
############### End Script Here ####################
}
# ##################################################
# Run your script
mainScript
# Exit cleanlyd
safeExit
| true |
1af5b0c54f817c3e06bea969208ffd9f5544881a
|
Shell
|
boundino/HltL1PbPb2018
|
/L1/mbThreshold/run_adcNoise.sh
|
UTF-8
| 988 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/bash
RUN=(0 1)
FILE_ZeroBias=(
"/export/d00/scratch/jwang/L1ppLowPU2018/noise/L1Ntuple_ZeroBias_319460.root"
"/export/d00/scratch/jwang/L1XeXe/noise/L1Ntuple_HIZeroBias_304906.root"
)
FILE_EmptyBX=(
"/export/d00/scratch/jwang/L1ppLowPU2018/noise/L1Ntuple_EmptyBX_319460.root"
"/export/d00/scratch/jwang/L1XeXe/noise/L1Ntuple_HIEmptyBX_304906.root"
)
OUTPUT=(
"rootfiles/root_adcNoise_lowPUpp_run319460"
"rootfiles/root_adcNoise_XeXe_run304906"
)
TAG=(
"Run 319460"
"Run 304906"
)
g++ adcNoise.C $(root-config --libs --cflags) -g -o adcNoise.exe || exit 1
g++ drawadcNoise.C $(root-config --libs --cflags) -g -o drawadcNoise.exe || exit 1
mkdir -p rootfiles
mkdir -p plots
[[ ${1:-0} -eq 1 ]] && { for i in ${RUN[@]} ; do ./adcNoise.exe ${FILE_ZeroBias[i]} ${FILE_EmptyBX[i]} ${OUTPUT[i]} ; done ; }
[[ ${2:-0} -eq 1 ]] && { for i in ${RUN[@]} ; do ./drawadcNoise.exe ${OUTPUT[i]}.root "${TAG[i]}" ; done ; }
rm drawadcNoise.exe
rm adcNoise.exe
| true |
0ebabf7ed6614352be4f641c6eea9b4240bafb11
|
Shell
|
KyussCaesar/home
|
/.bashrc
|
UTF-8
| 2,946 | 3.3125 | 3 |
[] |
no_license
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
# If bashrc already run for this shell, don't do it again
[[ ! -z "$BASHRC_SET" ]] && return
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# colored GCC warnings and errors
export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# Use vim as man pager (this way, you can use tags to navigate to related topics.)
# (Doesn't always work though)
export MANPAGER="env MAN_PN=1 vim -M +MANPAGER -c \"set colorcolumn=0 | set foldcolumn=0 | set nonumber\" -"
# prompt on overwrite
alias rm="rm -vi"
alias mv="mv -vi"
alias cp="cp -vi"
# aliases for ls, grep, to get colour output
alias ls='ls -Fh --color=auto --group-directories-first'
alias ll='ls -l'
alias la='ls -A'
alias grep='grep --color=auto'
alias more="more -dlsup"
# do you know how many times I've used GhostScript? Literally never.
# do you know how many times I've wanted to count graph components? Literally never.
alias gs="git status"
alias gc="git commit"
alias ga="git add"
alias gau="git add -u"
alias gauc="git add -u && git commit"
alias gaucp="git add -u && git commit && git push"
alias gco="git checkout"
# I use vim too much
alias :q="echo \"This is bash, not vim!\""
# quickly open config
alias vrc='vim ~/.vimrc'
alias brc='vim ~/.bashrc'
# move to `p`arent `d`irectory
alias pd="cl .."
# Returns "git:branchname"
parse_git_branch () {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/git:\1 /'
}
# Shell Prompt
# Coloured with git branch
pre_ps1='\[\033[01;32m\]\u@\h\[\033[00m\] \[\033[01;34m\]\W\[\033[00m\] '
export PS1=${pre_ps1%?}' $(parse_git_branch)\n$ '
# open vim help
velp () {
vim -c "help $1 | only"
}
# run something in background with output piped to null
# TODO: make name show up properly
quietly () {
$@ &> /dev/null &
}
# vim keybindings
set -o vi
# Reminders
if [ -f ~/reminders ]; then
echo "Reminders:"
echo
cat ~/reminders
fi
# Place any machine-specific commands in here.
if [ -f ~/.bash ]; then
source ~/.bash
fi
# Does ls immediately after cd
# 'cl' short for "Change (directory) then List contents"
cl () {
cd "$*"
ls
# update git branch name in prompt
echo
}
# mkdir and cl
mkcl () {
mkdir "$1"
cl "$*"
}
export EDITOR=vim
# set so that bashrc is loaded only once
BASHRC_SET=1
alias disk-usage="du -sh -- * | sort -h"
debian () {
docker run -it debian:buster "$@"
}
arch () {
docker run -it archlinux "$@"
}
export PATH="$PATH:/Users/antonysouthworth/.local/bin"
| true |
8bac2ccec812b686ead15f7d6472f18a46019031
|
Shell
|
fernandoandrade/ansible-kops
|
/deploy_dashboard.sh
|
UTF-8
| 1,095 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/bash
printf '\nInstall Kubernetes dashboard from Github repository. Check at https://github.com/kubernetes/dashboard\n\n'
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.3.1/aio/deploy/recommended.yaml
printf '\nCreate a service account in the default namespace.\n\n'
kubectl create serviceaccount dashboard -n default
printf '\nAdd the cluster binding rules to your dashboard account created in the default namespace.\n\n'
kubectl create clusterrolebinding dashboard-admin -n default --clusterrole=cluster-admin --serviceaccount=default:dashboard
printf '\nCopy the secret token from the output and enter in the dashboard login page.\n\n'
kubectl get secret $(kubectl get serviceaccount dashboard -o jsonpath="{.secrets[0].name}") -o jsonpath="{.data.token}" | base64 --decode
printf '\n'
printf '\n1. Change the the Kubernetes dashboard service type to LoadBalancer: kubectl -n kubernetes-dashboard edit svc kubernetes-dashboard'
printf '\n2. Make sure the service type changed to LoadBalancer: kubectl -n kubernetes-dashboard get svc'
printf '\n'
| true |
bf6b6c2de2051d07c887110020bfd5d3f0fe154c
|
Shell
|
jbsouthe/cloudComputing
|
/cf_create_user
|
UTF-8
| 485 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/bash
# $1 = username and password
# $2 = org
# $3 = space
# $4 = space role
cf create-user $1 $1
cf set-space-role $1 $2 $3 $4
USERNAME=admin
HOSTNAME=node8
MYSQLSTART='mysql -u root robocode -e "insert into roles (userid, role, space, password) values('
VALUES="'"$1"','"$4"','"$2$3"', encrypt('"$1"','JBS')"
MYSQLEND=');"'
SCRIPT=$MYSQLSTART$VALUES$MYSQLEND
#echo $SCRIPT
#echo root | $SCRIPT
ssh -o StrictHostKeyChecking=no -l ${USERNAME} ${HOSTNAME} "${SCRIPT}"
exit
| true |
2d50baeb4988652c351256faedb78267f15ad6b9
|
Shell
|
lsaffie/dotfiles
|
/activate
|
UTF-8
| 163 | 2.84375 | 3 |
[] |
no_license
|
#!/bin/bash
#APP="${1}"
APP=$*
function activate_app {
/usr/bin/osascript <<END_SCRIPT
tell application "${APP}"
activate
end tell
END_SCRIPT
}
activate_app
| true |
84ed3a372e7cdf148039bea821eab611bb8d20d1
|
Shell
|
podemosaprender/tpl_app_semilla
|
/build_conf/publish.sh
|
UTF-8
| 890 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
npm update
VERSION=$(node --eval "console.log(require('./package.json').version);")
ME=$(node --eval "console.log(require('./package.json').name);")
npm test || exit 1
echo "Ready to publish ${ME} version $VERSION."
echo "Has the version number been bumped?"
read -n1 -r -p "Press Ctrl+C to cancel, or any other key to continue." key
git checkout -b build
export NODE_ENV=release
npm run-script build
echo "Creating git tag v$VERSION..."
git add dist/${ME}-src.js dist/${ME}.js dist/${ME}-src.esm.js dist/${ME}-src.js.map dist/${ME}.js.map dist/${ME}-src.esm.js.map -f
git commit -m "v$VERSION"
git tag v$VERSION -f
git push --tags -f
echo "Uploading to NPM..."
npm publish
git checkout master
git branch -D build
echo "All done."
echo "Remember to run 'npm run-script integrity' and then commit the changes to the master branch, in order to update the website."
| true |
cb6263592df49b4fc67b7d62628b55b80c2e4232
|
Shell
|
luym11/doc_parser
|
/counter.sh
|
UTF-8
| 246 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
l=0
n=0
s=0
touch counterResult
FILES=./*.csv
for f in $FILES
do
$f >> counterResult
wc -w $f >> counterResult
n=$[ $n + 1 ]
s=$[ $s + $l ]
done
echo "$n files in total, with $s lines in total"
#!/bin/bash
| true |
9e2165c7e14378a4e2d472faa304e656477574df
|
Shell
|
clinei/incgame-pages-deploy
|
/scripts/parse_opts.sh
|
UTF-8
| 438 | 3.5 | 4 |
[] |
no_license
|
## WIP: do not use
ARGS=$(getopt -o "p" -l "push:" -n $0 -- "$@")
if [ $? -ne 0 ] ; then
exit 1;
fi
SHOULD_PUSH=false
# TODO: better initial value?
opt=
while [ "$opt" != "-1" ] ; do
opt=$1
case opt in
-h|--help)
# TODO: print usage
echo Help
exit
;;
-p|--push)
SHOULD_PUSH=true
;;
--) break ;;
esac
printf "${opt}"
# echo $([ "$opt" != "\n" ] )
# shift
done
printf "$SHOULD_PUSH"
export SHOULD_PUSH
| true |
5c48fd450a34b445acc8e08ae94e6c67e864d523
|
Shell
|
benjaffe/chrome-okc-plugin
|
/plugin-old/lib/knockout/build/tools/check-trailing-space-linux
|
UTF-8
| 498 | 3.546875 | 4 |
[
"MIT",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-or-later",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"GPL-2.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-generic-exception",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
#!/bin/sh
# Check that all files have trailing spaces stripped
OutTrailingSpaceListFile='output/knockout-files-to-clean.txt'
cd ..
grep -nrI '[ ]$' `find * | grep -E "\.(js|html|css|bat|ps1)$"` | grep -v "^build/output/" > build/$OutTrailingSpaceListFile
cd build
if [ -s $OutTrailingSpaceListFile ]
then
echo "The following files have trailing spaces that need to be cleaned up:"
echo ""
cat $OutTrailingSpaceListFile
rm $OutTrailingSpaceListFile
exit 1
fi
rm $OutTrailingSpaceListFile
| true |
0cd4a068db6b6ab86660af05378a3de4cf1588fb
|
Shell
|
andyceo/bash_scripts
|
/drushsync.sh
|
UTF-8
| 1,910 | 3.46875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# Sync the site's local development copy with production one
# Author Name: Andrey Andreev aka andyceo
# Author Email: andyceo@yandex.ru
# Author Homepage: http://andyceo.ruware.com
# ENGLISH NOTE: under development. Note that comments in this file
# and output messages in russian language generally.
# RUSSIAN NOTE:
# ВАЖНО! Перед использованием скрипта, настройте корректно drush aliases.
# Скрипт помогает развернуть копию удаленного сайта на локальный компьютер для разработки.
# @arguments:
# $1 - from - сайт-источник данных. drush site alias.
# $2 - to - сайт-приемник данных. drush site alias.
# Example: ./drushsync.sh @example.com @example.local
WWW_USER=www-data
from=$1
to=$2
# узнаем путь до файлов на разработческой площадке
files=`drush $to dd %files`
# @todo: проверка значений, фильтрация
# сначала синхронизируем файлы, предварительно вернув нужные права
sudo chown -R $USER $files
drush rsync $from:%files $to:%files --yes
# потом базку
drush $to sql-drop --yes
drush sql-sync --no-cache $from $to --yes
# @todo заменить в БД вхождения $from на $to
# sanitize 1-го пользователя, вручную, sanitize от drush почему-то не срабатывает
drush $to sql-query "UPDATE {users} SET name='admin', pass='`echo -n '123' | md5sum`' WHERE uid=1"
# установим корректный путь до директории files
drush $to vset --yes file_directory_path $files
# проставим корректного владельца у %files, после rsync он будет $USER
sudo chown -R $WWW_USER: $files
| true |
f5668139b57a735588d9ac18a05b5f2d7716c290
|
Shell
|
irissun96/nasa-spaceapp-2018
|
/FirePrediction/FirePrediction_model/data_acquisition/get_nyc_data.sh
|
UTF-8
| 2,532 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
NYC_RAW_DIR="../data/raw"
NYC_INTREIM_DIR="../data/intreim"
NYC_PROCESSED_DIR="../data/processed"
CONFIG_FILE="../config/data_sources.json"
#CONTEXTUAL_DATA_FNAMES=( "MapPluto" "NYC_tracts" "CENSUS_ACS_2015" "DOB_complaints" "DOB_violations" "DOB_ecb_violations" "NYC_311_complaints" )
CONTEXTUAL_DATA_FNAMES=( "NYC_streets" ) # "NYC_tracts" "CENSUS_ACS_2015" "DOB_complaints" "DOB_violations" "DOB_ecb_violations" )
FIRE_INCIDENTS_DATA_FNAMES=( "NYC")
DATA_FILES_TO_PROCESS=( "NYC_streets" "NYC_geo_tracts" )
# Getting contextual datasets
for item_name in "${CONTEXTUAL_DATA_FNAMES[@]}" "${FIRE_INCIDENTS_DATA_FNAMES[@]}";
do
REGX_PASS='.CONTEXTUAL_DATA[] | select(.NAME=="'$item_name'")'
NYC_ITEM_URL="$(cat "$CONFIG_FILE" | jq "$REGX_PASS" | jq ".URLs[0]" | tr -d '"' )"
NYC_ITEM_FNAME="$(cat "$CONFIG_FILE" | jq "$REGX_PASS" | jq ".FNAME" | tr -d '"' )"
# Download the file and store it in the RAW directory
echo "Downloading: ", $NYC_ITEM_FNAME
wget -O $NYC_RAW_DIR/$NYC_ITEM_FNAME $NYC_ITEM_URL
done
# Getting city's fire incidents' data
for item_name in "${FIRE_INCIDENTS_DATA_FNAMES[@]}";
do
REGX_PASS='.FIRE_INCIDENTS[] | select(.NAME=="'$item_name'")'
NYC_ITEM_URL="$(cat "$CONFIG_FILE" | jq "$REGX_PASS" | jq ".URLs[0]" | tr -d '"' )"
NYC_ITEM_FNAME="$(cat "$CONFIG_FILE" | jq "$REGX_PASS" | jq ".FNAME" | tr -d '"' )"
# Download the file and store it in the RAW directory
echo "Downloading: ", $NYC_ITEM_FNAME
wget -O $NYC_RAW_DIR/$NYC_ITEM_FNAME $NYC_ITEM_URL
done
# Transforming
for item_name in "${DATA_FILES_TO_PROCESS[@]}"
do
REGX_PASS='.CONTEXTUAL_DATA[] | select(.NAME=="'$item_name'")'
ITEM_FNAME="$(cat "$CONFIG_FILE" | jq "$REGX_PASS" | jq ".FNAME" | tr -d '"' )"
ITEM_DIR="$(cat "$CONFIG_FILE" | jq "$REGX_PASS" | jq ".DIR" | tr -d '"' )"
# Unzip the tract file
unzip $NYC_RAW_DIR/$ITEM_FNAME -d $NYC_INTREIM_DIR/$ITEM_DIR/$ITEM_NAME
# Find the name of the unzipped file
filei="$(find $NYC_INTREIM_DIR/$ITEM_NAME/$ITEM_DIR/ -name "*.shp" -print)"
NYC_SHP_FNAME=`basename $filei .shp`""
# Convert to GeoJSON
# echo "Preparing GeoJSON file for item: ", $ITEM_NAME
ogr2ogr -f GeoJSON $NYC_INTREIM_DIR/$NYC_SHP_FNAME".geojson" $filei
# # # convert to SQL
# echo "Preparing SQL file for item: ", $ITEM_NAME
# shp2pgsql -a -s 2263 -D -g geom -N abort $filei $ITEM_NAME >> $NYC_SQL_DIR/$ITEM_NAME."sql"
# echo "Preparing CSV file for item: ", $ITEM_NAME
ogr2ogr -f "CSV" -t_srs EPSG:4326 -lco GEOMETRY=AS_WKT $NYC_INTREIM_DIR/$NYC_SHP_FNAME".csv" $filei
done
| true |
44b223516235c0eb769aca3b94828a6933a9ca04
|
Shell
|
mtikekar/snapshot
|
/snapshot
|
UTF-8
| 2,423 | 4.3125 | 4 |
[] |
no_license
|
#!/bin/bash
# btrfs filesystem with options in fstab:
# nofail,user,user_subvol_rm_allowed,compress=lzo
# mount cmd: mount /dev/...
# unmount : umount /dev/...
set -o errexit
set -o nounset
cmd=$(basename $0)
do_help() {
cat <<EOF
usage:
$cmd help: print this message
$cmd init <src>: initialize current dir to store backups of <src>
$cmd add: copy files from <src> to backup
$cmd status: show changes between <src> and backup
$cmd commit [<tag>]: create new snapshot of backup. default <tag> is present timestamp.
$cmd status --cached: show changes between backup and last snapshot
$cmd delete <dir>: delete a snapshot
$cmd keep [<n>]: keep latest <n> snapshots. default <n> is 10.
EOF
}
do_init() {
[ -z "${1:-}" ] && { echo "usage: $cmd init <src>"; exit 2; }
ln -s "$1" .src
btrfs subvolume create .latest
}
check_inited() {
[ -d .src ] || { echo ".src not found. run init first."; exit 1; }
[ -d .latest ] || { echo ".latest not found. run init first."; exit 1; }
}
rmexp='(building file list|sent \d+ bytes received \d+ bytes total size \d+)$'
log=.latest.log
do_status() {
check_inited
if [ "${1:-}" = --cached ]; then
[ -f $log ] && exec grep -Pv "$rmexp" $log
echo "No changes to backup since last snapshot"
else
rsync --del -a --dry-run --itemize-changes .src/ .latest
fi
}
do_add() {
check_inited
rsync --del -a --inplace --no-whole-file --info=progress2 --log-file=$log \
.src/ .latest
}
do_commit() {
check_inited
cid="${1:-$(date +%F-%T)}"
[ -e "$cid" ] && { echo "$cid exists. Choose another name."; exit 1; }
# see if any changes
if grep -Pqsv "$rmexp" $log; then
btrfs subvolume snapshot .latest "$cid"
touch "$cid" # change timestamp of snapshot for sorting
btrfs property set -ts "$cid" ro true
mv $log ".$cid.log"
else
echo "No changes to backup since last snapshot"
fi
}
do_delete() {
[ -z "${1:-}" ] && { echo "missing snapshot name to delete"; exit 2; }
read -p "Delete snapshot $1? [y/n] " yn
if [ $yn = y ]; then
btrfs property set -ts "$1" ro false
btrfs subvolume delete "$1"
fi
}
do_keep() {
check_inited
n=${1:-10}
for snap in $(ls -rc | head -n -"$n"); do
do_delete "$snap"
done
}
[ "$(type -t do_${1:-})" = function ] || { do_help; exit 2; }
do_$1 "${@:2}"
| true |
3a6d08f5c96c426a2561a07c40ad7de7ebecca9a
|
Shell
|
joenery/queue
|
/tar-list
|
UTF-8
| 743 | 3.5 | 4 |
[] |
no_license
|
#!/bin/bash
#takes a list of directories and makes a list of
#rsync commands
LISTFILE="$1"
COMMAND="tar -zcvf"
SRC="/gale/netapp/seq4/illumina_runs/fastqs" #path to parent of source dir
DEST="/gale/raidix/rdx-1/archive" #dir to copy source into
LOG="/gale/netapp/home/seq/logs/rdx-1/tar"
while getopts ":l:s:d:L" opt; do
case $opt in
l) LISTFILE=$OPTARG;;
s) SRC=$OPTARG;;
d) DEST=$OPTARG;;
L) LOG=$OPTARG;;
\?)
echo "Invalid option: -$OPTARG" >&2
;;
esac
done
while read folder
do echo "$COMMAND $DEST/$folder.tar.gz $SRC/$folder > $LOG/$folder.txt"
done < $LISTFILE
#tar -zcvf /gale/gale_temp/seq5/170510_K00161_0134_BHK3WCBBXX.tar.gz /gale/netapp/seq5/illumina_runs/170510_K00161_0134_BHK3WCBBXX
| true |
c8c7f088bc3245421f2f7b5609e67ae36e21c237
|
Shell
|
tyoshida444/dotfiles
|
/.bashrc
|
UTF-8
| 950 | 2.8125 | 3 |
[] |
no_license
|
PS1="\h:\W$\[ \]"
#-------- cdと同時にlsもしちゃう -----------
function cdls() {
\cd $1;
ls -G;
}
alias cd=cdls
#-------- lsに色を付ける --------
alias ls='ls -G'
export LSCOLORS=gxfxcxdxbxegedabagacad
### Added by the Heroku Toolbelt
export PATH="/usr/local/heroku/bin:$PATH"
#-------- java文字化け --------
alias javac='javac -J-Dfile.encoding=UTF-8'
alias java='java -Dfile.encoding=UTF-8'
#-------- Kaoriya --------
#export EDITOR=/Applications/MacVim.app/Contents/MacOS/Vim
#alias vim='env LANG=ja_JP.UTF-8 /Applications/MacVim.app/Contents/MacOS/Vim "$@"'
#-------- 複数端末間で履歴を共有 --------
function share_history {
history -a
history -c
history -r
}
PROMPT_COMMAND='share_history'
shopt -u histappend
export HISTSIZE=300000
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
PS1="$PS1"'$([ -n "$TMUX" ] && tmux setenv TMUXPWD_$(tmux display -p "#D" | tr -d %) "$PWD")'
| true |
275866fe9a81c5448af7e65509556eb02bb6821f
|
Shell
|
hajimetch/prezto
|
/runcoms/zshrc
|
UTF-8
| 4,577 | 3.015625 | 3 |
[
"MIT"
] |
permissive
|
#
# Executes commands at the start of an interactive session.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
# Source Prezto.
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Customize to your needs...
#
########################################
# オプション
# 日本語ファイル名を表示可能にする
setopt print_eight_bit
# beep を無効にする
setopt no_beep
# フローコントロールを無効にする
setopt no_flow_control
# Ctrl+Dでzshを終了しない
setopt ignore_eof
# '#' 以降をコメントとして扱う
setopt interactive_comments
# cd したら自動的にpushdする
setopt auto_pushd
# 重複したディレクトリを追加しない
setopt pushd_ignore_dups
# 同時に起動したzshの間でヒストリを共有する
setopt share_history
# 同じコマンドをヒストリに残さない
setopt hist_ignore_all_dups
# スペースから始まるコマンド行はヒストリに残さない
setopt hist_ignore_space
# ヒストリに保存するときに余分なスペースを削除する
setopt hist_reduce_blanks
# 高機能なワイルドカード展開を使用する
setopt extended_glob
# 補完候補を詰めて表示する
setopt list_packed
# $HISTFILEに時間も記録
setopt extended_history
########################################
# キーバインド
# emacs 風キーバインドにする
bindkey -e
########################################
# コマンド履歴検索
# 履歴を保存するファイル
HISTFILE=$HOME/.zhistory
# メモリ内の履歴の数
HISTSIZE=1000000
# $HISTFILE に保存される履歴の数
SAVEHIST=1000000
# コマンド入力中に M-p, M-n で履歴検索できるようにする
autoload history-search-end
zle -N history-beginning-search-backward-end history-search-end
zle -N history-beginning-search-forward-end history-search-end
bindkey "^[p" history-beginning-search-backward-end
bindkey "^[n" history-beginning-search-forward-end
# 全履歴の一覧を出力する
alias hall="history -E -i 1"
########################################
# エイリアス
# グローバルエイリアス
alias -g L='| less'
alias -g G='| grep'
# C で標準出力をクリップボードにコピーする
alias -g C='| pbcopy'
# git ショートカットを一覧化
alias zgit='cat ~/.zprezto/modules/git/alias.zsh | grep "alias "'
# ls aliases
export CLICOLOR=1
alias ls='ls -GF' # ls base command
alias ll='ls -l'
alias la='ls -la'
alias l='ls -a'
alias le='exa -lha'
# brew alias
alias brew='PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/bin:/usr/sbin:/sbin brew'
# locate.updatedb alias
alias updatedb='sudo /usr/libexec/locate.updatedb'
# generate password alias
alias pw='pwgen -1s 12 | tr -d "\n" | pbcopy'
# jupyter lab alias
alias jl='jupyter lab'
########################################
# anyframe
# 初期設定
fpath=($HOME/.zsh/anyframe(N-/) $fpath)
autoload -Uz anyframe-init
anyframe-init
# ^J
# peco でディレクトリの移動履歴を表示
bindkey '^J' anyframe-widget-cdr
autoload -Uz chpwd_recent_dirs cdr add-zsh-hook
add-zsh-hook chpwd chpwd_recent_dirs
# ^R
# peco でコマンドの実行履歴を表示
bindkey '^R' anyframe-widget-put-history
# ^O
# peco でファイル名を挿入
bindkey '^O' anyframe-widget-insert-filename
# aw
# select-widget を起動
alias aw=anyframe-widget-select-widget
########################################
# git-escape-magic
# 初期設定
fpath=($HOME/.zsh/zsh-git-escape-magic(N-/) $fpath)
autoload -Uz git-escape-magic
git-escape-magic
########################################
# iTerm2 Shell Integration 設定
if [ "$TERM" = "xterm-256color" ]; then
test -e "$HOME/.iterm2_shell_integration.zsh" && source "$HOME/.iterm2_shell_integration.zsh"
fi
########################################
# anaconda 設定
# added by Anaconda3 2019.10 installer
# >>> conda init >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$(CONDA_REPORT_ERRORS=false '/Users/hajimetch/opt/anaconda3/bin/conda' shell.bash hook 2> /dev/null)"
if [ $? -eq 0 ]; then
\eval "$__conda_setup"
else
if [ -f "/Users/hajimetch/opt/anaconda3/etc/profile.d/conda.sh" ]; then
. "/Users/hajimetch/opt/anaconda3/etc/profile.d/conda.sh"
CONDA_CHANGEPS1=false conda activate base
else
\export PATH="/Users/hajimetch/opt/anaconda3/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda init <<<
# prompt に conda environment name を表示
autoload -U promptinit; promptinit
prompt pure
| true |
648e6f0a026a9f11de1555fbcd524f51c58af354
|
Shell
|
nevesnunes/env
|
/linux/bin/run-rofi.sh
|
UTF-8
| 254 | 2.84375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
set -eu
ROFI_VERSION=$(rofi -version | sed 's/[^0-9]*\([0-9]*\.[0-9]*\).*/\1/g')
if [ "$(echo "$ROFI_VERSION >= 1.4" | bc)" = 1 ]; then
exec rofi "$@" -matching fuzzy -theme ~/.local/share/themes/Uhita.rasi
else
exec rofi "$@"
fi
| true |
d512f21496c09132f50c68de2ba1ccfcc9b64c57
|
Shell
|
grassfishgmbh/QtPlayer-deps
|
/build-deps/vlcBlobs.sh
|
UTF-8
| 1,325 | 3.78125 | 4 |
[] |
no_license
|
#!/bin/bash
if [ -z "$VERSION" ]; then
VERSION="2.2.3"
fi
JOBROOT=`pwd`
WD="$JOBROOT/VLC/$VERSION"
DUMPBIN_BIN="dumpbin"
LIB_BIN="lib"
rm -rf $WD
rm -f VLC-Blobs.zip
mkdir -p $WD
function create_vlc_blobs_for_arch () {
ARCH=$1
SRCZIP=$2
cd $WD
unzip $JOBROOT/$SRCZIP
mkdir $ARCH
cd $ARCH
cp ../_win32/bin/libvlc* ./
cp -r ../_win32/lib/vlc/plugins plugins
cp -r ../_win32/include include
rm -rf ../_win32
MACHINE="/MACHINE:x86"
if [ $ARCH = "x86_64" ]; then
MACHINE="/MACHINE:x64"
fi
# export libvlccore symbols
$DUMPBIN_BIN /exports libvlccore.dll > libvlccore.def.tmp
rm -f libvlccore.def
echo "EXPORTS" > libvlccore.def
cat libvlccore.def.tmp | awk '/libvlc\_/ {print $4;}' >> libvlccore.def
rm libvlccore.def.tmp
$LIB_BIN /def:libvlccore.def /out:libvlccore.lib $MACHINE
# export libvlc symbols
$DUMPBIN_BIN /exports libvlc.dll > libvlc.def.tmp
rm -f libvlc.def
echo "EXPORTS" > libvlc.def
cat libvlc.def.tmp | awk '/libvlc\_/ {print $4;}' >> libvlc.def
rm libvlc.def.tmp
$LIB_BIN /def:libvlc.def /out:libvlc.lib $MACHINE
}
create_vlc_blobs_for_arch "i686" "vlc-win32.zip"
create_vlc_blobs_for_arch "x86_64" "vlc-win64.zip"
cd $JOBROOT
zip -y -r VLC-Blobs.zip VLC
| true |
29a775e7ff73f303ab432f87824d47a5e41aea8c
|
Shell
|
IAC-InfrastructureAsCode/microk8s
|
/microk8s-resources/wrappers/microk8s-remove-node.wrapper
|
UTF-8
| 833 | 3.140625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -eu
export PATH="$SNAP/usr/sbin:$SNAP/usr/bin:$SNAP/sbin:$SNAP/bin:$PATH"
ARCH="$($SNAP/bin/uname -m)"
export LD_LIBRARY_PATH="$SNAP/lib:$SNAP/usr/lib:$SNAP/lib/$ARCH-linux-gnu:$SNAP/usr/lib/$ARCH-linux-gnu"
export SNAP_NAME
export PYTHONNOUSERSITE=false
source $SNAP/actions/common/utils.sh
exit_if_no_permissions
if [ -e ${SNAP_DATA}/var/lock/clustered.lock ]
then
echo "This MicroK8s deployment is acting as a node in a cluster. Please use microk8s leave."
exit 1
fi
if [ "$#" -eq 0 ]; then
echo "Please provide the node you want to remove."
exit 1
fi
if [ -e "${SNAP_DATA}/var/lock/ha-cluster" ] && [ "$#" -eq 2 ] && ! [ "$2" == "--force" ] ; then
echo "Please provide the node and the optional --force flag."
exit 1
fi
${SNAP}/usr/bin/python3 ${SNAP}/scripts/wrappers/remove_node.py "$@"
| true |
25d1fb897573dd3595fecbfbd852833c93e8b97e
|
Shell
|
MickaelSERENO/Math_Library
|
/install.sh
|
UTF-8
| 1,294 | 3.125 | 3 |
[
"MIT"
] |
permissive
|
mkdir android_arm_build;
mkdir android_x86_build;
mkdir linux_x64_build;
DIR=`pwd`
ANDROID_NDK=/opt/android-ndk
#Android ARM
cd android_arm_build
PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$HOME/.local/android/armeabi-v7a/lib/pkgconfig/ cmake ../ -G Ninja -DCMAKE_SYSTEM_NAME=Android -DCMAKE_ANDROID_ARCH_ABI=armeabi-v7a -DANDROID_ABI=armeabi-v7a -DANDROID_NDK=$ANDROID_NDK -DANDROID_PLATFORM=android-21 -DCMAKE_SYSTEM_VERSION=21 -DCMAKE_INSTALL_PREFIX=$HOME/.local/android/armeabi-v7a/ -DCMAKE_CXX_FLAGS="-I $HOME/.local/android/common_abi/include" -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake
ninja install
cd ../
#Android X86
cd android_x86_build
PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$HOME/.local/android/x86/lib/pkgconfig/ cmake ../ -G Ninja -DCMAKE_SYSTEM_NAME=Android -DCMAKE_ANDROID_ARCH_ABI=x86 -DANDROID_ABI=x86 -DANDROID_NDK=$ANDROID_NDK -DCMAKE_SYSTEM_VERSION=21 -DANDROID_PLATFORM=android-21 -DCMAKE_INSTALL_PREFIX=$HOME/.local/android/x86/ -DCMAKE_CXX_FLAGS="-I $HOME/.local/android/common_abi/include" -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake
ninja install
cd ../
#Linux x64
cd linux_x64_build
PKG_CONFIG_PATH=$PKG_CONFIG_PATH:$HOME/.local/lib/pkgconfig/ cmake ../ -G Ninja -DCMAKE_INSTALL_PREFIX=$HOME/.local/
ninja install
cd ../
| true |
44f06c0ccb4afea3ec5566a3a454391ed77c75a1
|
Shell
|
jgrowl/mail
|
/entry.sh
|
UTF-8
| 203 | 2.703125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
set -e
echo "Configuring mail..."
ansible-playbook /usr/local/bin/main.yml -i localhost, --connection=local
# Create logging FIFO
mkfifo /dev/maillog
echo "Exec'ing $@"
exec "$@"
| true |
55826f8f6b7d5f2584cd5796d88735a079199c70
|
Shell
|
68Kamil68/SO2
|
/lab1/zad5.sh
|
UTF-8
| 253 | 3.34375 | 3 |
[] |
no_license
|
#! /bin/bash
var=0
for i in `ls -l $1| awk '{ if ( $1!~/x/ && $2 ~ 1){print $NF}}'` # | wysyla ls -l do awk $1 - pozwolenia jesli x wyswietla ostatnia pozycje w tabeli - nazwe pliku
do
cd $1
echo $i
var=$((var+1))
mv $i $i.${var}.
done
| true |
f191c9214db2a5776bf89d8dcf03fcf6e1a4bc81
|
Shell
|
hackstudio/quant
|
/setup.sh
|
UTF-8
| 1,522 | 3.515625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
if [[ $# -eq 0 ]]; then
printf "NAME\n\tsetup.sh - Main driver to set up the environment\n"
printf "\nSYNOPSIS\n"
printf "\n\t%-5s\n" "./setup.sh [OPTION]"
printf "\nOPTIONS\n"
printf "\n\t%-5s %-40s\n" "1" "Softwares that have to be installed on your computer before using this package"
printf "\n\t%-5s %-40s\n" "2" "How to install these softwares"
fi
option=$1
case $option in
1) echo "The following software needs to be installed....."
echo " "
echo "Anaconda2"
echo "tushare, minepy, pandas-datareader, sklearn-deap, vnpy, rqalpha"
echo "py-xgboost, TPOT, jupyter_contrib_nbextensions, pyalgotrade"
echo "root_numpy, root_pandas, deap, keras,backtrader, ta-lib"
echo " "
;;
2) echo "How to install these softwares....."
echo " "
echo "conda install py-xgboost, keras"
echo "pip install tushare, minepy, pandas-datareader, TPOT"
echo "pip install root_numpy, root_pandas, deap, sklearn-deap"
echo "pip install backtrader, pyalgotrade"
echo "conda install -c conda-forge jupyter_contrib_nbextensions"
echo "pip install vnpy pymongo msgpack-python websocket-client qdarkstyle"
echo "conda install -c quantopian ta-lib=0.4.9"
echo "conda install bcolz -c https://mirrors.tuna.tsinghua.edu.cn/anaconda/cloud/conda-forge/"
echo "pip install -i https://pypi.tuna.tsinghua.edu.cn/simple rqalpha"
echo " "
;;
esac
| true |
481da8f0cb32690e70e9bd1adc9abd0bc9ea4d3a
|
Shell
|
tim-moody/xsce-factory
|
/scripts/rpi2/cp-sd
|
UTF-8
| 1,457 | 4.3125 | 4 |
[] |
no_license
|
#!/bin/bash -x
# resize the SD card to minimum size and zip it to image directory
# assume /dev/sdg2 is partition to be shrunk
# parameter 1 - output file name
# parameter 2 - optional root device partition otherwise /dev/sdg2
# parameter 3 - optional image directory otherwise /curation/images
# Automatically determine a size for the output disk image
# (including root, swap, and boot partitions).
#
# This is calculated by using resize2fs to shrink, then adding the space
# occupied by previous partitions
# Assumes root is last partition
if [ $# -eq 0 ]; then
echo "Usage: $0 filename (no .img), optional rootfs device (like /dev/sdg), optional image directory (like /curation/images)"
exit 1
fi
FILENAME=$1.img
if [ -z $2 ]; then
PARTITION=/dev/sdg2
else
PARTITION=$2
fi
if [ ! -b $PARTITION ];then
echo "Device $PARTITION not found".
exit 1
fi
if [ -z $3 ]; then
IMAGE_DIR=/curation/images
else
IMAGE_DIR=$3
fi
mkdir -p $IMAGE_DIR
cd $IMAGE_DIR
umount $PARTITION
umount /media/usb*
DEVICE=${PARTITION:0:-1}
PART_DIGIT=${PARTITION: (-1)}
umount $PARTITION
e2fsck -fy $PARTITION
# recalc last sector and read that many sectors from card
last_sector=`parted -s $DEVICE unit s print |tail -2 |head -1| awk '{print $3}'`
last=${last_sector:0:-1}
echo "last sector: $last"
dd if=$DEVICE of=$FILENAME bs=512 count=$last
zip $FILENAME.zip $FILENAME
md5sum $FILENAME > $FILENAME.md5
md5sum $FILENAME.zip > $FILENAME.zip.md5
| true |
b6c852c7b1e523c7aafe6bce64ca4e74cf1dfd05
|
Shell
|
vicious987/old_uni_assignments
|
/linux_course/list3/zad4.sh
|
UTF-8
| 1,828 | 3.765625 | 4 |
[] |
no_license
|
#--color=[never | auto | always] kolorowanie imion (nigdy, tylko gdy standardowy strumień wyjściowy
#jest konsolą, zawsze), por. podobną opcję programu ls(1);
#-w, --world wypisanie dodatkowo wiersza Hello, world!
color=false
cl_flag=never
cap=false
greeting=false
helpme=false
whatdo="This program outputs strings 'hello arg' per given arg,"
example="Examplary use: hwb -c arg1 arg2 arg3 ... argN"
possible_flags="Supports following flags:
-c --capitalize
-- color <never/auto/always> (prints name in red font)
-g <arg> --greeting <arg>
-h (help)
-v (version)
-w (world) (adds world to args)"
version_str="hwb version 1.0, GNU General Public License"
world=0
welcome_word="Hello"
clr=""
TEMP=`getopt -o cg:hvw --long capitalize:,color:,greeting:,help,version,world -- "$@"`
eval set -- "$TEMP"
while true ; do
case "$1" in
-c|--capitalize) cap=true; shift ;;
--color)
clr="\e[91m" ;
case "$2" in
never)
color=true; shift 2 ;;
auto)
color=true; shift 2 ;;
always)
color=true; shift 2 ;;
esac ;;
--greeting) greeting=true ; welcome_word=$2; shift ;;
-h|--help) helpme=true ; shift ;;
-v|--version) version=true ; shift ;;
-w|--world) world=true ; shift;;
--) shift; break;;
*) echo $1; echo 'error while parsing arguments' ; exit 1 ;;
esac
done
#echo "capitilize: $cap color: $color greeting: $greeting help: $helpme"
while (( "$#" )) ; do
arg=$1
if [ "$cap" = "true" ] ; then # ^ konwertuje lower na uppercase
arg="${arg^}" # pusty pattern, zpatternuje nam sie do czegokolwiek
fi # czyli do pierwszej litery
echo -e "$welcome_word, $clr$arg\e[0m"
shift
done
if [ "$world" = "true" ] ; then
fi
if [ "$help" = "true" ] ; then
echo "$whatdo"
echo "$example"
echo "$possible_flags"
fi
if [ "$version" = "true" ] ; then
echo "$version_str"
fi
| true |
714559c3b0cf3b462acc717e2e5461101cfa30e0
|
Shell
|
lordkev/ssc-imputation
|
/denovos/get_locus_stats.sh
|
UTF-8
| 1,397 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/bash
source params.sh
OUTDIR=${BASEOUTDIR}/locus_stats
# Get HWE for each locus
for chrom in $(seq ${startchrom} ${endchrom})
do
VCFFILE=/storage/s1saini/hipstr_genomewide/chr${chrom}/hipstr_calls_${chrom}.vcf.gz
/home/mgymrek/workspace/mgymrek-utils/vcf_hwe.py \
--vcf ${VCFFILE} \
--samples ${PARENTS} \
--sim 1000 \
> ${OUTDIR}/ssc_hwe_chr${chrom}.tab
done
# Get number of de novo calls
for chrom in $(seq ${startchrom} ${endchrom})
do
DFFILE=${BASEOUTDIR}/denovofinder/denovofinder_chr${chrom}.vcf.gz
# zcat ${DFFILE} | grep -v "^#" | cut -f 1,2,10- | sed 's/\./0/g' | \
# sed -E -e 's/SSC[0-9]*,SSC[0-9]*:-[0-9]*:-[0-9]*:-[0-9]*,-[0-9]*:-[0-9]*,-[0-9]*\t/1\t/g' | \
# awk '{for(i=3;i<=NF;i++) t+=$i; print $1 "\t" $2 "\t" t; t=0}' > ${OUTDIR}/ssc_denovostats_chr${chrom}.tab
done
# Get stutter params, call rate for each locus
for chrom in $(seq ${startchrom} ${endchrom})
do
VCFFILE=/storage/s1saini/hipstr_genomewide/chr${chrom}/hipstr_calls_${chrom}.vcf.gz
# echo "chrom,pos,inframe_pgeom,inframe_up,inframe_down,outframe_pgeom,outframe_up,outframe_down,an,end,dp,dsnp,nfilt" | sed 's/,/\t/g' > ${OUTDIR}/ssc_gtstats_chr${chrom}.tab
# bcftools query -f '%CHROM\t%POS\t%INFRAME_PGEOM\t%INFRAME_UP\t%INFRAME_DOWN\t%OUTFRAME_PGEOM\t%OUTFRAME_UP\t%OUTFRAME_DOWN\t%AN\t%END\t%DP\t%DSNP\t%NFILT\n' ${VCFFILE} >> ${OUTDIR}/ssc_gtstats_chr${chrom}.tab
done
| true |
d6e2f7a3728f7e4c2b4dfe95c4a40e61705a30ca
|
Shell
|
digitalsanity/i915ovmfPkg
|
/test
|
UTF-8
| 1,493 | 2.65625 | 3 |
[] |
no_license
|
#!/bin/bash
cd ../edk2
. ../edk2/edksetup.sh
#build -b DEBUG -p i915ovmfPkg/i915ovmf.dsc || exit
build -b RELEASE -p i915ovmfPkg/i915ovmf.dsc || exit
cd ../i915_simple
cp ../edk2/Build/i915ovmf/RELEASE_GCC5/X64/i915ovmf.rom ./ || exit
#cp ../edk2/Build/i915ovmf/DEBUG_GCC5/X64/i915ovmf.rom ./ || exit
if [ -e /sys/bus/pci/devices/0000:00:02.0/2aee154e-7d0d-11e8-88b8-6f45320c7162 ]
then
true
else
modprobe kvmgt || exit
#sudo dd if=/sys/class/drm/card0-HDMI-A-1/edid of=/sys/class/drm/card0/gvt_edid bs=128 count=1
echo 2aee154e-7d0d-11e8-88b8-6f45320c7162 > /sys/bus/pci/devices/0000:00:02.0/mdev_supported_types/i915-GVTg_V5_4/create || exit
fi
# Create an UEFI disk that immediately shuts down the VM when booted
mkdir -p tmpfat
mount disk tmpfat
mkdir -p tmpfat/EFI/BOOT
cp shutdown.efi tmpfat/EFI/BOOT/BOOTX64.EFI
umount tmpfat
rmdir tmpfat
qemu-system-x86_64 -k en-us -name uefitest,debug-threads=on -serial stdio -m 2048 -M pc -cpu host -global PIIX4_PM.disable_s3=1 -global PIIX4_PM.disable_s4=1 -machine kernel_irqchip=on -nodefaults -rtc base=localtime,driftfix=slew -no-hpet -global kvm-pit.lost_tick_policy=discard -enable-kvm -bios /home/hqm/gpdvm/bin/ovmf.fd -display gtk,gl=on,grab-on-hover=on -full-screen -vga none -device vfio-pci,sysfsdev=/sys/bus/pci/devices/0000:00:02.0/2aee154e-7d0d-11e8-88b8-6f45320c7162,addr=02.0,display=on,x-igd-opregion=on,romfile=`pwd`/i915ovmf.rom -device qemu-xhci,p2=8,p3=8 -device usb-kbd -device usb-tablet -drive format=raw,file=disk
| true |
ab7809289cf1efdf435185cd97dc226ab2837a3b
|
Shell
|
perfsonar/i2util
|
/I2util/I2util/unibuild-packaging/deb/tests/aespasswd
|
UTF-8
| 293 | 2.6875 | 3 |
[
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh -e
FILE=$AUTOPKGTEST_TMP/aesfile
echo test | aespasswd -n -f $FILE autotest 2>&1
grep autotest $FILE
echo User added: OK
echo test2 | aespasswd -f $FILE autotest 2>&1
grep autotest $FILE
echo User modified: OK
aespasswd -f $FILE -d autotest
test ! -s $FILE
echo User deleted: OK
| true |
7540af9c4f5f1198407aff4c4683c665943ad0f9
|
Shell
|
itamblyn/scripts
|
/follow_qbz.sh
|
UTF-8
| 127 | 2.625 | 3 |
[] |
no_license
|
#!/bin/bash
etot=`grep Conv output.out | head -1 | awk '{print $2}'`
awk -v e=$etot '/Conv/{print ($2 - e)*27.2}' output.out
| true |
697ad3075559cc03e57b5ae3b23fa5abba80f8af
|
Shell
|
james-pirozzolo/twitter-sentiment-analysis
|
/create_venv.sh
|
UTF-8
| 390 | 2.578125 | 3 |
[] |
no_license
|
#!/bin/bash
# this installs the virtualenv module
python3 -m pip install virtualenv
# this creates a virtual environment named "env"
python3 -m venv env
# this activates the created virtual environment
source env/bin/activate
# updates pip
pip install -U pip
# this installs the required python packages to the virtual environment
pip install -r requirements.txt
echo created environment
| true |
3723b66c67b63e4716f27fade27e1a9f2ac55dd1
|
Shell
|
teja624/home
|
/.zsh/modules/aws/lib/sh/api/ec2/spot_datafeed_subscription_create.sh
|
UTF-8
| 168 | 2.71875 | 3 |
[
"Apache-2.0"
] |
permissive
|
aws_ec2_spot_datafeed_subscription_create() {
local bucket="$1"
shift 1
cond_log_and_run aws ec2 create-spot-datafeed-subscription --bucket $bucket "$@"
}
| true |
9df67afbd00272e7d5c8aafb0507e33a722b11d2
|
Shell
|
pr0d1r2/ruby_dev_shell_aliases
|
/is_new_bundler.sh
|
UTF-8
| 428 | 3.421875 | 3 |
[
"MIT"
] |
permissive
|
function is_new_bundler() {
local is_new_bundler_VERSION=`bundle --version | cut -f 3 -d ' '`
local is_new_bundler_VERSION_MAJOR=`echo $is_new_bundler_VERSION | cut -f 1 -d .`
local is_new_bundler_VERSION_MINOR=`echo $is_new_bundler_VERSION | cut -f 2 -d .`
if [ $is_new_bundler_VERSION_MAJOR -gt 1 ]; then
return 0
else
if [ $is_new_bundler_VERSION_MINOR -ge 4 ]; then
return 0
fi
fi
return 1
}
| true |
e65851d8295407b2a0017f65667405e93eea4ef8
|
Shell
|
nacinobrown/scripts
|
/mgtscripts/nagios-client-setup.sh
|
UTF-8
| 955 | 2.921875 | 3 |
[] |
no_license
|
#/bin/bash
## This script configures any rhel based system to be monitored by Nagios
## Install needed packages
yum install -y gcc glibc glibc-common openssl openssl-devel perl wget
mkdir /tmp/nagios && cd $_
wget --no-check-certificate -O nrpe.tar.gz https://github.com/NagiosEnterprises/nrpe/archive/nrpe-3.2.1.tar.gz
tar xzf nrpe.tar.gz
cd nrpe-nrpe-3.2.1/
./configure --enable-command-args
make all
make install-groups-users
make install
make install-config
echo >> /etc/services
echo '# Nagios services' >> /etc/services
echo 'nrpe 5666/tcp' >> /etc/services
make install-init
systemctl enable nrpe.service
sed -i 's/allowed_hosts=127.0.0.1,::1/allowed_hosts=127.0.0.1,192.168.1.190/g' /usr/local/nagios/etc/nrpe.cfg
systemctl start nrpe.service
firewall-cmd --permanent --add-port=5666/tcp
firewall-cmd --reload
systemctl restart nrpe
/usr/local/nagios/libexec/check_nrpe -H 127.0.0.1
| true |
e5e13637434f66216017625a6e8dfaa2de67be59
|
Shell
|
AWS-Devops-Projects/eks-workshop
|
/amplify/static/assets/bootstrap.sh
|
UTF-8
| 8,956 | 3.375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Cloud9 Bootstrap Script
#
# 1. Installs homebrew
# 2. Upgrades to latest AWS CLI
# 3. Upgrades AWS SAM CLI
#
# Usually takes about 8 minutes to complete
set -euxo pipefail
ACCOUNT_ID=$(aws sts get-caller-identity --query 'Account' --output text)
CURRENT_REGION=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone | sed 's/\(.*\)[a-z]/\1/')
RED='\033[0;31m'
YELLOW='\033[1;33m'
NC='\033[0m'
export CDK_VERSION='1.27.0'
export INFOPATH="/home/linuxbrew/.linuxbrew/share/info"
function _logger() {
echo -e "$(date) ${YELLOW}[*] $@ ${NC}"
}
function upgrade_sam_cli() {
_logger "[+] Backing up current SAM CLI"
cp $(which sam) ~/.sam_old_backup
_logger "[+] Installing latest SAM CLI"
# pipx install aws-sam-cli
# cfn-lint currently clashing with SAM CLI deps
## installing SAM CLI via brew instead
brew tap aws/tap
brew install aws-sam-cli
_logger "[+] Updating Cloud9 SAM binary"
# Allows for local invoke within IDE (except debug run)
ln -sf $(which sam) ~/.c9/bin/sam
}
function upgrade_existing_packages() {
_logger "[+] Upgrading system packages"
sudo yum update -y
python3 -V
# sudo update-alternatives --config python
_logger "[+] Upgrading Python pip and setuptools"
python3 -m pip install --upgrade pip setuptools --user
_logger "[+] Installing latest pip"
# _logger "[+] Installing pipx, and latest AWS CLI"
# python3 -m pip install --user pipx
# pipx install awscli
python3 -m pip install --upgrade --user awscli && hash -r
_logger "[+] Installing latest AWS-CLI 2"
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip awscliv2.zip
sudo ./aws/install
}
function upgrade_python() {
_logger "[+] Upgrade Python 3.8"
brew install python3
echo 'alias python="python3.8"' >> ~/.bash_profile
echo 'alias python3="python3.8"' >> ~/.bash_profile
echo 'alias pip="pip3.8"' >> ~/.bash_profile
echo 'alias pip3="pip3.8"' >> ~/.bash_profile
# _logger "[+] Upgrade Python 3.8"
# brew install pyenv
# pyenv -v
# pyenv install 3.8.3
# sudo yum install libssl-dev openssl
# wget https://www.python.org/ftp/python/3.8.3/Python-3.8.3.tgz
# tar xzvf Python-3.8.3.tgz
# cd Python-3.8.3
# ./configure
# make
# sudo make install
# cd ..
# sudo rm -rf Python-3.8.3.tgz Python-3.8.3
# echo 'alias python="python3.8"' >> ~/.bash_profile
# echo 'alias python3="python3.8"' >> ~/.bash_profile
# echo 'alias pip="pip3.8"' >> ~/.bash_profile
# echo 'alias pip3="pip3.8"' >> ~/.bash_profile
}
function upgrade_nodejs() {
_logger "[+] Installing latest Node12 & TypeScript & CDK & CDK8s"
brew install node@12
# sudo yum install -y gcc-c++ make
# curl -sL https://rpm.nodesource.com/setup_12.x | sudo -E bash -
# sudo yum install -y nodejs
# nvm install lts/erbium
# nvm use lts/erbium
# nvm alias default lts/erbium
# nvm uninstall lts/dubnium
npm install -g yarn
npm install -g typescript@latest
npm install -g aws-cdk@${CDK_VERSION} --force
npm i -g cdk8s-cli
brew install aws/tap/copilot-cli
node -v
npm -v
}
function upgrade_ebs_storage() {
_logger "[+] AMZ-Linux2/CenOS EBS Extending a Partition on a T2/T3 Instance"
sudo file -s /dev/nvme?n*
sudo growpart /dev/nvme0n1 1
lsblk
echo "Extend an ext2/ext3/ext4 file system"
sudo yum install xfsprogs
sudo resize2fs /dev/nvme0n1p1
df -h
}
function install_utility_tools() {
_logger "[+] Installing jq gettext bash-completion"
sudo yum install -y jq gettext bash-completion
}
function install_linuxbrew() {
_logger "[+] Creating touch symlink"
sudo ln -sf /bin/touch /usr/bin/touch
_logger "[+] Installing homebrew..."
echo | sh -c "$(curl -fsSL https://raw.githubusercontent.com/Linuxbrew/install/master/install.sh)"
_logger "[+] Adding homebrew in PATH"
test -d ~/.linuxbrew && eval $(~/.linuxbrew/bin/brew shellenv)
test -d /home/linuxbrew/.linuxbrew && eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)
test -r ~/.bash_profile && echo "eval \$($(brew --prefix)/bin/brew shellenv)" >>~/.bash_profile
echo "eval \$($(brew --prefix)/bin/brew shellenv)" >>~/.profile
}
function install_kubernetes_tools() {
KUBECTL_VERSION="1.17.7/2020-07-08"
# AWS_IAM_AUTH_VERSION="1.17.7/2020-07-08"
# PACKER_VERSION="3.4.4"
# HELMSMAN_VERSION="3.4.4"
_logger "[+] Install kubectl CLI (Kubernetes 1.17) from https://docs.aws.amazon.com/eks/latest/userguide/install-kubectl.html"
sudo curl --silent --location -o /usr/local/bin/kubectl https://amazon-eks.s3.us-west-2.amazonaws.com/${KUBECTL_VERSION}/bin/linux/amd64/kubectl
sudo chmod +x /usr/local/bin/kubectl
kubectl version --short --client
_logger "[+] Enable kubectl bash_completion"
echo 'source <(kubectl completion bash)' >>~/.bash_profile
echo 'alias k=kubectl' >>~/.bash_profile
echo 'complete -F __start_kubectl k' >>~/.bash_profile
_logger "[+] Install the Helm CLI"
curl -sSL https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash
helm version --short
_logger "[+] Stable Helm Chart Repository"
helm repo add stable https://kubernetes-charts.storage.googleapis.com/
helm search repo stable
_logger "[+] Enable helm bash_completion"
echo 'source <(helm completion bash)' >>~/.bash_profile
echo 'alias h=helm' >>~/.bash_profile
echo 'complete -F __start_helm h' >>~/.bash_profile
# ## Install Helmsman
# curl -L https://github.com/Praqma/helmsman/releases/download/v${HELMSMAN_VERSION}/helmsman_${HELMSMAN_VERSION}_linux_amd64.tar.gz | tar zx && \
# chmod +x helmsman && \
# mv ./helmsman /usr/local/bin/helmsman
# ## Install aws-iam-authenticator
# curl -o aws-iam-authenticator https://amazon-eks.s3.us-west-2.amazonaws.com/${AWS_IAM_AUTH_VERSION}/bin/linux/amd64/aws-iam-authenticator && \
# chmod +x ./aws-iam-authenticator && \
# mv ./aws-iam-authenticator /usr/local/bin/aws-iam-authenticator
_logger "[+] Installing ECS copilot ..."
sudo curl -Lo /usr/local/bin/copilot https://github.com/aws/copilot-cli/releases/download/v0.1.0/copilot-linux-v0.1.0 \
&& sudo chmod +x /usr/local/bin/copilot \
&& copilot --help
_logger "[+] Installing EKS K9s ..."
K9S_VERSION=0.21.5
K9S_TAR_FILENAME=k9s_$(uname -s)_$(uname -m).tar.gz
curl -o /tmp/$K9S_TAR_FILENAME -L -k https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/$K9S_TAR_FILENAME
tar -xvf /tmp/$K9S_TAR_FILENAME -C /tmp/
sudo mv /tmp/k9s /usr/local/bin/k9s
sudo chmod +x /usr/local/bin/k9s
_logger "[+] Finished the install_kubernetes_tools!"
}
function verify_prerequisites_resources() {
_logger "[+] Verify ACCOUNT_ID & AWS_REGION"
export ACCOUNT_ID=$(aws sts get-caller-identity --output text --query Account)
export AWS_REGION=$(curl -s 169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.region')
export ROLE_NAME="eks-admin-role"
export CLUSTER_NAME="EKS-Cluster"
test -n "$ACCOUNT_ID" && echo ACCOUNT_ID is "$ACCOUNT_ID" || echo ACCOUNT_ID is not set
test -n "$AWS_REGION" && echo AWS_REGION is "$AWS_REGION" || echo AWS_REGION is not set
test -n "$ROLE_NAME" && echo ROLE_NAME is "$ROLE_NAME" || echo ROLE_NAME is not set
_logger "[+] Save ACCOUNT_ID & AWS_REGION to .bash_profile"
echo "export ACCOUNT_ID=${ACCOUNT_ID}" | tee -a ~/.bash_profile
echo "export AWS_REGION=${AWS_REGION}" | tee -a ~/.bash_profile
aws configure set default.region ${AWS_REGION}
aws configure get default.region
_logger "[+] Verify the binaries are in the path and executable"
for command in kubectl jq envsubst aws
do
which $command &>/dev/null && echo "[x] $command in path" || echo "[ ] $command NOT FOUND"
done
_logger "[+] Validate the IAM role eks-admin-role"
aws sts get-caller-identity --query Arn | grep eks-admin-role -q && echo "IAM role valid" || echo "IAM role NOT valid"
}
function install_cdk_microservices_labs() {
_logger "[+] install_cdk_microservices_labs"
sudo yum update -y
sudo yum remove -y java-1.7.0-openjdk && sudo yum install -y java-1.8.0-openjdk-devel
git clone --recurse-submodules https://github.com/aws-samples/cdk-microservices-labs.git
}
function main() {
# upgrade_ebs_storage
install_linuxbrew
upgrade_nodejs
upgrade_python
upgrade_existing_packages
upgrade_sam_cli
install_utility_tools
install_kubernetes_tools
verify_prerequisites_resources
install_cdk_microservices_labs
echo -e "${RED} [!!!!!!!!!] Open up a new terminal to reflect changes ${NC}"
_logger "[+] Restarting Shell to reflect changes"
exec ${SHELL}
}
main
| true |
34e951d780bea53166fe2f7ad7cb122a02cefb5f
|
Shell
|
pgnzbl/getKolla
|
/pull.sh
|
UTF-8
| 176 | 2.71875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
for LINE in `cat results.txt`
do
if [[ $LINE =~ "centos-source" ]] && [[ $LINE =~ "stein" ]];
then
echo $LINE;
docker pull 'kolla/'$LINE;
fi
done
| true |
e4e2da11cf381d7af1c99a487e4dedd61af3fa1d
|
Shell
|
de-jcup/sechub
|
/sechub-pds-solutions/owaspzap/10-create-image.sh
|
UTF-8
| 2,072 | 4.125 | 4 |
[
"MIT",
"ANTLR-PD",
"LicenseRef-scancode-generic-exception",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-oracle-openjdk-exception-2.0",
"MPL-1.1",
"MPL-2.0",
"CC-PDDC",
"LicenseRef-scancode-warranty-disclaimer",
"EPL-2.0",
"GPL-2.0-only",
"EPL-1.0",
"CC0-1.0",
"Classpath-exception-2.0",
"Apache-2.0",
"LGPL-2.1-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-public-domain",
"GPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-1.1",
"MPL-1.0",
"CDDL-1.1",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash
# SPDX-License-Identifier: MIT
REGISTRY="$1"
VERSION="$2"
BASE_IMAGE="$3"
usage() {
cat - <<EOF
usage: $0 <docker registry> <version tag> [<base image>]
Builds a docker image of SecHub PDS with OWASP ZAP
for <docker registry> with tag <version tag>.
Required: <base image> ; Example: ghcr.io/mercedes-benz/sechub/pds-base
Additionally these environment variables can be defined:
- OWASPZAP_VERSION - OWASP ZAP version to use. Example: 2.12.0
- OWASPZAP_SHA256SUM - sha256sum of OWASP ZAP download
- OWASPZAP_WRAPPER_VERSION - Version of the SecHub PDS-OWASPZAP wrapper jar. Example: 1.0.0
EOF
}
FAILED=false
if [[ -z "$REGISTRY" ]] ; then
echo "Please provide a docker registry server as 1st parameter."
FAILED=true
fi
if [[ -z "$VERSION" ]] ; then
echo "Please provide a version for the container as 2nd parameter."
FAILED=true
fi
if [[ -z "$BASE_IMAGE" ]] ; then
echo "Please provide a SecHub PDS base images as 3rd parameter."
FAILED=true
fi
if $FAILED ; then
usage
exit 1
fi
echo ">> Building \"$REGISTRY:$VERSION\""
BUILD_ARGS="--build-arg BASE_IMAGE=$BASE_IMAGE"
echo ">> - From base image: $BASE_IMAGE"
# Enforce OWASPZAP_SHA256SUM is defined when building custom version of find-sec-bugs
if [[ ! -z "$OWASPZAP_VERSION" ]] ; then
echo ">> - OWASP-ZAP version: $OWASPZAP_VERSION"
BUILD_ARGS+=" --build-arg OWASPZAP_VERSION=$OWASPZAP_VERSION"
if [[ -z "$OWASPZAP_SHA256SUM" ]] ; then
echo "FATAL: Please define sha256 checksum in OWASPZAP_SHA256SUM environment variable"
exit 1
fi
echo ">> - OWASP-ZAP sha256sum: $OWASPZAP_SHA256SUM"
BUILD_ARGS+=" --build-arg OWASPZAP_SHA256SUM=$OWASPZAP_SHA256SUM"
fi
if [[ ! -z "$OWASPZAP_WRAPPER_VERSION" ]] ; then
echo ">> - SecHub OWASP-ZAP Wrapper version: $OWASPZAP_WRAPPER_VERSION"
BUILD_ARGS+=" --build-arg OWASPZAP_WRAPPER_VERSION=$OWASPZAP_WRAPPER_VERSION"
fi
docker build --pull --no-cache $BUILD_ARGS \
--tag "$REGISTRY:$VERSION" \
--file docker/Owasp-Zap-Debian.dockerfile docker/
docker tag "$REGISTRY:$VERSION" "$REGISTRY:latest"
| true |
e3f1f592a73e54fd0c8d003b3fcf11395f60d945
|
Shell
|
MastersAcademy/devops-course-2019
|
/Lesson 2/vladyslav.volkov/my_script.sh
|
UTF-8
| 1,084 | 3.671875 | 4 |
[] |
no_license
|
#!/bin/bash
if nginx -v >/dev/null 2>&1; then
nginx_ver=$(nginx -v 2>&1)
echo "Removing $nginx_ver"
apt-get purge nginx -y
else
echo "Nginx is not installed"
fi
apt update
apt install curl gnupg2 ca-certificates lsb-release -y
echo "deb http://nginx.org/packages/ubuntu `lsb_release -cs` nginx" \
| sudo tee /etc/apt/sources.list.d/nginx.list
curl -fsSL https://nginx.org/keys/nginx_signing.key | sudo apt-key add -
apt install nginx=1.14.2*
mkdir /etc/nginx/sites-available /etc/nginx/sites-enabled
sed -i '15iinclude /etc/nginx/sites-enabled/\*.conf;' /etc/nginx/nginx.conf
mv /etc/nginx/conf.d/default.conf /etc/nginx/sites-available/default.conf
ln -sf /etc/nginx/sites-available/default.conf /etc/nginx/sites-enabled/
service nginx restart
curl -X GET 127.0.0.1 | grep -o "Welcome to nginx!" | head -1
nginx_proc=$(ps -lfC nginx | grep "master process" | awk '{print $4}')
nginx_proc_count=$(ps -lfC nginx | grep -c "worker process")
echo "Nginx main process have a PID: $nginx_proc"
echo -e "Number of Nginx working processes: \033[31m$nginx_proc_count\e[0m"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.