blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
1e32bdd00aaedae7fce7cbf77a889b0d1e1b3675
|
Shell
|
MisterSawyer/EOPSY
|
/LAB 1/modify_examples.sh
|
UTF-8
| 1,797 | 3.578125 | 4 |
[] |
no_license
|
#!/bin/bash
performTest()
{
echo;
echo "Currently performing: $1";
echo " --- Output --- ";
time $1;
read -p " --- To continue press enter --- "
echo;
}
declare -a TESTS=(
"./modify.sh -h" # help
"./modify.sh" # no arguments
"./modify.sh -U f4.txt" # wrong argument
"./modify.sh -u f4.txt" # uppercase 1 file
"./modify.sh -l F4.txt" # lower case 1 file
"./modify.sh -u f4.txt f3.txt f2.txt" # uppercase multiple files
"./modify.sh -l F4.txt F3.txt F2.txt" # lowercase multiple files
"./modify.sh -u f0.txt d1" # uppercase file and dir at the same time
"./modify.sh -l D1 F0.txt" # lowercase file and dir at the same time
"./modify.sh s/alfa/beta/ alfa1.txt" # good sed pattern one files
"./modify.sh s/alfa/beta/ alfa2.txt alfa3.txt alfa4.txt" # sed pattern multiple files
"./modify.sh s/beta/alfa/ beta1.txt beta2.txt beta3.txt beta4.txt" # go back
"./modify.sh s/alfa/beta/ alfa1.txt alfa2.txt alfaDIR" # sed pattern file and DIR
"./modify.sh s/beta/alfa/ betaDIR beta2.txt beta1.txt" # go back
"./modify.sh -u d1" # uppercase directory
"./modify.sh -l D1" # lowercase directory
"./modify.sh -r -u d2" # recursive uppercase in directory
"./modify.sh -r -l D2" # recursive lowercase in directory
"./modify.sh -r -l -u d2" #-u and -l in the same line
"./modify.sh -r -u -l D2" #-u and -l in the same line
"./modify.sh -r s/alfa/beta/ d2" # recursive sed pattern
"./modify.sh -r s/beta/alfa/ d2" # go back
"./modify.sh -r -l" #no files
"./modify.sh -r -u -u -u -u -u d2" #multiple arguments
"./modify.sh -r -l -l -l -l -l D2" #multiple arguments
"./modify.sh -u f1.txt" #making test case
"./modify.sh -l F1.txt -u f2.txt" # different options for different files
"./modify.sh -l F2.txt" # go back
)
for test in "${TESTS[@]}";
do
performTest "$test";
done
| true |
35e16d1b0b1aee092d62c61457651f1b16c44274
|
Shell
|
aroth-fastprotect/sketchupToOSG
|
/build/build-one.sh
|
UTF-8
| 1,793 | 3.8125 | 4 |
[
"BSL-1.0"
] |
permissive
|
#!/bin/sh
set -x
# Package version.
VER=1.6.7
# SketchUp version: 2013 means 2013 and earlier (don't go much earlier
# than 8.0 M1 for best results), while 2014 means 2014, and
# theoretically later.
SUVER=$1
# Either mac or win.
PLATFORM=$2
SRC=$(cd $(dirname $0) && cd .. && pwd)
BUILDSCRIPTS=$(cd $(dirname $0) && pwd)
# Where everything should be copied
SCRATCH=$(mktemp -d -t tmp.packagebuild.XXXXXXXXXX)
finish() {
rm -rf "${SCRATCH}"
}
trap finish EXIT
#SCRATCH="${SRC}/Temp"
#mkdir -p "$SCRATCH"
DESTDIR="${SRC}/Output"
mkdir -p "$DESTDIR"
DESTSTEM="sketchupToOpenSceneGraph-v${VER}-su${SUVER}-${PLATFORM}"
DESTBASE="${DESTDIR}/${DESTSTEM}"
echo "Currently building:"
echo " - ${DESTSTEM}"
echo " - from ${SRC}"
echo " - using scratch ${SCRATCH}"
echo
(
cd "$SRC"
### Clean up refuse.
find ./binaries/ -name '*.DS_Store' -type f -delete
find ./osgconv/ -name '*.DS_Store' -type f -delete
###
echo "Copy platform-independent files..."
cp -v "openscenegraph_exportviacollada.rb" "${SCRATCH}"
mkdir -p "${SCRATCH}/osgconv"
cp -v "osgconv/osgconv.rb" "osgconv/LICENSE_1_0.txt" "${SCRATCH}/osgconv/"
cp -v README.mkd "${SCRATCH}/README_openscenegraph_exportviacollada.txt"
if [ $SUVER -lt 2014 ]; then
cp -v "osgconv/fileutils.rb" "${SCRATCH}/osgconv/"
fi
###
echo "Copy platform-dependent files..."
# the -d is to keep symlinks intact for Mac.
cp -v --recursive -d binaries/${PLATFORM}/* "${SCRATCH}/osgconv"
echo "Build archive..."
# Compress to ZIP/RBZ
rm -fv "${DESTBASE}.rbz"
7za a -tzip -r "${DESTBASE}.rbz" "${SCRATCH}"/*
# Rename to RBZ
#mv "${DESTBASE}.zip" "${DESTBASE}.rbz"
echo " - Generated ${DESTBASE}.rbz"
)
echo "Done with ${DESTSTEM}"
echo
| true |
8eaa563599305414e91cb9f63f02d98388ccfe6f
|
Shell
|
Tubbz-alt/LLNMS
|
/test/bash/assets/TEST_llnms_create_asset.sh
|
UTF-8
| 2,397 | 3.890625 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# File: TEST_llnms_create_asset.sh
# Author: Marvin Smith
# Date: 12/25/2013
#
# Purpose: This contains all tests related to the llnms-create-asset script.
#
# Make sure LLNMS has been installed
if [ "$LLNMS_HOME" = "" ]; then
LLNMS_HOME="/var/tmp/llnms"
fi
# Initialize ANSI
. test/bash/unit_test/unit_test_utilities.sh
#-------------------------------------#
#- TEST_llnms_create_asset_01 -#
#- -#
#- Test the created file output -#
#- from llnms-create-asset -#
#-------------------------------------#
TEST_llnms_create_asset_01(){
# Remove all existing assets
rm -r $LLNMS_HOME/assets/*.llnms-asset.xml 2> /dev/null
# Create an asset using the create asset command
$LLNMS_HOME/bin/llnms-create-asset -host 'temp-asset' -ip4 '192.168.0.1' -d 'hello world'
# make sure the file was properly created
if [ ! -e "$LLNMS_HOME/assets/temp-asset.llnms-asset.xml" ]; then
$ECHO "temp-asset.llnms-asset.xml does not exist after creation. Line: $LINENO, File: $0." > /var/tmp/cause.txt
echo "1"
return
fi
# make sure that the file validates properly
xmlstarlet val -q "$LLNMS_HOME/assets/temp-asset.llnms-asset.xml"
if [ $? -ne 0 ]; then
$ECHO "temp-asset.llnms-asset.xml does not validate as proper xml. Line: $LINENO, File: $0." > /var/tmp/cause.txt
echo '1'
return
fi
# check the name
if [ ! "`xmlstarlet sel -t -m '//llnms-asset' -v 'hostname' -n "$LLNMS_HOME/assets/temp-asset.llnms-asset.xml"`" = 'temp-asset' ]; then
echo '1'
$ECHO "hostname is not equal to temp-asset, at Line $LINENO, File: $0." > /var/tmp/cause.txt
return
fi
# check the ip address
if [ ! "`xmlstarlet sel -t -m '//llnms-asset' -v 'ip4-address' -n "$LLNMS_HOME/assets/temp-asset.llnms-asset.xml"`" = '192.168.0.1' ]; then
echo "ip4-address is not equal to 192.168.0.1, at Line $LINENO, File: $0." > /var/tmp/cause.txt
echo '1'
return
fi
# Delete the file
if [ -e "$LLNMS_HOME/assets/temp-asset.llnms-asset.xml" ]; then
rm "$LLNMS_HOME/assets/temp-asset.llnms-asset.xml"
else
echo "Temporary asset does not exist. Line: $LINENO, File: $0." > /var/tmp/cause.txt
echo '1'
return
fi
echo '0'
}
| true |
cb751fcc50b1510c803eea18921df12981da548f
|
Shell
|
gitoleg/my-actions
|
/documentation/build.sh
|
UTF-8
| 257 | 2.640625 | 3 |
[] |
no_license
|
#!/usr/bin/env sh
set -eu
eval $(opam env)
bap="bap.master"
git clone https://github.com/BinaryAnalysisPlatform/bap --single-branch --branch=master --depth=1 $bap
cd $bap
bap_commit=`git rev-parse --short HEAD`
make doc
ls doc
echo "build doc over"
| true |
6d6f24cb0a668eecfe36bd08bcf07c76977ff4df
|
Shell
|
MohammedHAlali/ProbabilisticProgramming
|
/openai/tmux.sh
|
UTF-8
| 524 | 2.609375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
NAME=openai
if tmux has-session -t $NAME; then
tmux kill-window -a -t $NAME:R
else
tmux new-session -A -s $NAME \; \
send-keys 'R -q' C-m \; \
rename-window 'R' \; \
new-window -n 'julia' \; \
send-keys 'while true;do julia;sleep 1;done' C-m \; \
new-window -n 'ipython' \; \
send-keys 'ipython --no-banner' C-m \; \
new-window -n 'sqlite' \; \
send-keys 'sqlite3 data.db' \; \
new-window -n 'bash' \; \
send-keys 'bash' C-m \; \
select-window -t 2
fi
| true |
84a8af4895ec36936cf2abe11d0f7e938ad53885
|
Shell
|
jhorn-redhat/openshift-ansible-contrib
|
/reference-architecture/azure-ansible/3.6/createSP.sh
|
UTF-8
| 1,667 | 3.78125 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# $1 is azure login/user name
# $2 is azure password
# $3 is service principal name
# $4 is service principal password
# handled with playbook prepare
#yum -y install wget
#wget -c https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
#rpm -ivh epel-release-latest-7.noarch.rpm
#yum install -y npm python2-pip gcc python-devel &&
#pip install --upgrade pip && pip install azure-cli
function setup {
tempFile="${HOME}/.az_account"
echo ${tempFile}
echo "az login -u ${az_name} -p \'${az_pass}\'"
az login -u ${az_name} -p \'${az_pass}\'
az account show > ${tempFile}
subscription_id=$(awk -F'"' '/\"id\"\:/ {print $4}' ${tempFile})
client_id=$(az ad app show --id "http://${sp_name}"|awk -F'"' '/appId/ {print $4}')
tenant_id=$(az account show|awk -F'"' '/\"tenantId"\:/ {print $4}' ${tempFile} )
echo "Creating SP ${sp_name}"
az ad sp create-for-rbac -n ${sp_name} --role contributor --password ${sp_pass} \
--scope /subscriptions/${subscription_id}
echo "Creating Credentials ~/.azure/credentials"
cat > ~/.azure/credentials <<EOF
[default]
subscription_id=${subscription_id}
tenant=${tenant_id}
client_id=${client_id}
secret=${sp_pass}
EOF
echo "Credentials:"
echo -e " Azure Login: ${az_name}"
echo -e " Subscription ID: ${subscription_id}"
echo -e " Tenant ID: ${tenant_id}"
echo -e " SP Name: ${sp_name}"
echo -e " SP ID: ${client_id}"
}
if [[ $# < 4 ]]; then
echo "Error: Requires 4 args"
echo -e "Usage: $0 [azure login] [azure password] [service principal name to create] [service principal password]"
else
az_name=${1}
az_pass=${2}
sp_name=${3}
sp_pass=${4}
setup
fi
| true |
8d874ff4d9c6d49969da8f05195626276a08f4c7
|
Shell
|
heiwa4126/gohttpd01
|
/scripts/build.sh
|
UTF-8
| 290 | 2.96875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
EXEC=gohttpd01
set -ue
go vet ./...
go fmt ./...
# GOOS=linux
# GOARCH=amd64
go build -o "$EXEC" -ldflags="-w -s" main.go
# if there's `upx`, upx compress binary
set +e
hash upx &> /dev/null
if [ $? -eq 0 ]; then
upx "$EXEC"
else
echo "no upx found. skip it."
fi
exit 0
| true |
722ec906e5296159c3494ab12b8df147ce620a20
|
Shell
|
Koikka/mood_cal
|
/build/Android/Debug/build.sh
|
UTF-8
| 953 | 2.78125 | 3 |
[] |
no_license
|
#!/bin/sh
# This file was generated based on node_modules/fuse-sdk/node_modules/@fuse-open/uno/lib/build/UnoCore/1.12.3/Targets/Android/build.sh.
# WARNING: Changes might be lost if you edit this file directly.
set -e
cd "`dirname "$0"`"
##if !#(SDK.Directory:IsSet) || !#(NDK.Directory:IsSet)
#echo "ERROR: Could not locate the Android SDK or NDK." >&2
#echo "" >&2
#echo "These dependencies can be acquired by installing 'android-build-tools':" >&2
#echo "" >&2
#echo " npm install android-build-tools -g" >&2
#echo "" >&2
#echo "After installing, pass --force to make sure the new configuration is picked up." >&2
#echo "" >&2
#echo " uno build android --force" >&2
#echo "" >&2
#exit 1
##endif
##if #(JDK.Directory:IsSet)
export JAVA_HOME="/Library/Java/JavaVirtualMachines/jdk1.8.0_111.jdk/Contents/Home"
##endif
./gradlew assembleDebug "$@"
##if !#(LIBRARY:Defined)
ln -sf app/build/outputs/apk/debug/app-debug.apk Kalenteri.apk
##endif
| true |
9123d5f9d0549b7a8e1407ca3ff49960b5773655
|
Shell
|
rosshamilton29/php7-vagrant
|
/bootstrap.sh
|
UTF-8
| 2,410 | 3.03125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo 'adding swap file'
fallocate -l 2G /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo '/swapfile none swap defaults 0 0' >> /etc/fstab
echo 'updating system'
sudo apt-get update
sudo apt-get upgrade -y
echo 'install development environment'
# apache
sudo apt-get install -y apache2
# php
sudo add-apt-repository ppa:ondrej/php -y
sudo apt-get install software-properties-common
sudo apt-get update
sudo apt-get install -y php7.2
sudo apt-get install -y php7.2-common
sudo apt-get install -y php7.2-cli
sudo apt-get install -y php7.2-readline
sudo apt-get install -y php7.2-mbstring
sudo apt-get install -y php7.2-mcrypt
sudo apt-get install -y php7.2-mysql
sudo apt-get install -y php7.2-xml
sudo apt-get install -y php7.2-zip
sudo apt-get install -y php7.2-json
sudo apt-get install -y php7.2-curl
sudo apt-get install -y php7.2-gd
sudo apt-get install -y php7.2-gmp
sudo apt-get install -y php7.2-mongodb
sudo apt-get install -y libapache2-mod-php7.2
sudo a2enmod rewrite
sudo service apache2 restart
# composer
php -r "copy('https://getcomposer.org/installer', 'composer-setup.php');"
php -r "if (hash_file('SHA384', 'composer-setup.php') === '544e09ee996cdf60ece3804abc52599c22b1f40f4323403c44d44fdfdd586475ca9813a858088ffbc1f233e9b180f061') { echo 'Installer verified'; } else { echo 'Installer corrupt'; unlink('composer-setup.php'); } echo PHP_EOL;"
php composer-setup.php
php -r "unlink('composer-setup.php');"
sudo mv composer.phar /usr/local/bin/composer
# nodejs
curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash -
sudo apt-get install -y nodejs
sudo npm i -g npm
# mysql
debconf-set-selections <<< 'mysql-server mysql-server/root_password password root'
debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password root'
sudo apt install -y mysql-server
sudo apt install -y mysql-client
sudo apt install -y libmysqlclient-dev
# mongodb
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 9DA31620334BD75D9DCB49F368818C72E52529D4
echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/4.0 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-4.0.list
sudo apt-get update
sudo apt-get install -y mongodb-org
sudo systemctl start mongod
sudo systemctl enable mongod
# Set PT-BR
sudo apt-get install language-pack-pt
sudo locale-gen pt_BR.UTF-8
echo 'done, all set'
| true |
61861dd346350662fcacea573af2a4cb61997617
|
Shell
|
Microsoft-Android/nylas-mail
|
/packages/client-app/script/publish-docs
|
UTF-8
| 442 | 2.9375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# Builds docs and moves the output to gh-pages branch (overwrites)
mkdir -p _docs_output
script/grunt docs
./node_modules/.bin/gitbook --gitbook=latest build . ./_docs_output --log=debug --debug
rm -r docs_src/classes
git checkout gh-pages --quiet
cp -rf _docs_output/* .
# rm -r _docs_output
git add .
git status -s
printf "\nDocs updated! \n\n"
git commit -m 'Update Docs'
git push origin gh-pages
git checkout master
| true |
50ef839c933d735d540118b3c0ddb5a3999b3ce3
|
Shell
|
ttlequals0/misc
|
/tools_utils/netronome/scripts/build-vm-dpdk-apps.sh
|
UTF-8
| 1,214 | 3.296875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
vmipa="$1"
shift 1
applist="$*"
########################################
. $NS_SHARED_SETTINGS
. $NS_PKGS_DIR/shared/vm-utilities.sh
. $NS_PKGS_DIR/shared/dpdk-utils.sh
########################################
# Usage:
#
# build-vm-dpdk-apps.sh 10.1.7.1 trafgen l2fwd
#
########################################
if [ "$1" == "" ] || [ "$1" == "--help" ]; then
echo "Usage: <vmipa> <application> [<application> ...]"
exit 0
fi
########################################
rsync -aq -R $NS_PKGS_DIR/./dpdk/src $vmipa:
# The 'sink' application is in pkgs/dpdk/src
#rsync -aq -R /opt/netronome/samples/dpdk/./sink $vmipa:dpdk/src
rsync -aq /opt/netronome/srcpkg/dpdk-ns/examples/* $vmipa:dpdk/src
scr=""
scr="$scr export RTE_SDK=/opt/netronome/srcpkg/dpdk-ns &&"
scr="$scr export RTE_TARGET=x86_64-native-linuxapp-gcc &&"
for appname in $applist ; do
tooldir="dpdk/src/$appname"
scr="$scr echo 'Build '$appname &&"
scr="$scr export RTE_OUTPUT=\$HOME/.cache/dpdk/build/$appname &&"
scr="$scr mkdir -p \$RTE_OUTPUT &&"
scr="$scr make --no-print-directory -C \$HOME/$tooldir &&"
scr="$scr cp \$RTE_OUTPUT/$appname /usr/local/bin &&"
done
scr="$scr echo 'Success'"
exec ssh $vmipa "$scr"
| true |
08885bdb10e67b54cb6c5e31f0400e870af5378a
|
Shell
|
attrix182/Homeware-LAN
|
/bash/install.sh
|
UTF-8
| 6,153 | 3.671875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
clear
echo "----------------------------Homeware LAN-------------------------------"
echo "Follow this assistant in order to configure your Homeware installation."
read -r -p "Do you want to start? [y/N] " response
case "$response" in
[yY][eE][sS]|[yY])
echo
echo "Download Homeware-LAN."
echo "---------------------------------------------"
sudo apt-get update
sudo apt install unzip -y
# Download the last version
LOCATION=$(curl -s https://api.github.com/repos/kikeelectronico/Homeware-LAN/releases/latest | grep "tag_name" | awk '{print "https://github.com/kikeelectronico/Homeware-LAN/releases/download/" substr($2, 2, length($2)-3) "/build.zip"}') ; sudo curl -L -o Homeware-LAN.zip $LOCATION
sudo unzip Homeware-LAN.zip
sudo mv Homeware-LAN/ /usr/share/Homeware-LAN
cd /usr/share
sudo chmod -R 777 Homeware-LAN
cd Homeware-LAN
echo "Installing Homeware-LAN and its dependencies."
echo "---------------------------------------------"
sudo apt install python3-pip -y
cd back
sudo pip3 install install -r requirements.txt
cd ../
sudo apt install nginx -y
sudo apt install software-properties-common -y
sudo apt install certbot python3-certbot-nginx -y
sudo apt install curl -y
sudo apt install mosquitto mosquitto-clients -y
sudo apt install redis-server -y
sudo apt install npm -y
echo "Install the new services."
echo "---------------------------------------------"
sudo cp configuration_templates/homeware.service /lib/systemd/system/
sudo cp configuration_templates/homewareMQTT.service /lib/systemd/system/
sudo cp configuration_templates/homewareTasks.service /lib/systemd/system/
sudo systemctl daemon-reload
sudo systemctl enable homeware
sudo systemctl enable homewareMQTT
sudo systemctl enable homewareTasks
sudo systemctl start homeware
sudo systemctl start homewareMQTT
sudo systemctl start homewareTasks
echo
read -r -p "Press enter to continue." e
clear
echo "User configuration."
echo "---------------------------------------------"
read -r -p "Admin user: " user
echo $user
read -r -p "Admin password: " password
echo $password
curl -d '{"user":"'$user'", "pass":"'$password'"}' -H "Content-Type: application/json" -X POST http://localhost:5001/api/user/set/
echo
read -r -p "Press enter to continue." e
clear
echo "DDNS."
echo "---------------------------------------------"
echo "If you have a dinamic IP, you should use a DDNS provider"
echo "The process depend on the provider you choose. For example, you can use https://my.noip.com as the provider"
echo
echo "Create an account at no-ip."
echo "Go to Dynamic DNS and create a new Hostname."
echo
echo "\t1 - Choose a unique Hostname."
echo "\t2 - Select DNS Host (A)."
echo "\t3 - Fill the IPv4 Address with your WAN/public IP. You can get it from https://www.whatismyip.com/what-is-my-public-ip-address/"
echo
echo "When the installation will be completed you must configure the DDNS data from the Settings section on Homeware."
echo
read -r -p "Press enter to continue." e
clear
echo "Nginx and your hostname."
echo "---------------------------------------------"
read -r -p "Type your DDNS Hostname (ecample: yourdomain.ddns.com ):" hostname
cd bash
sudo sh confignginx.sh $hostname
cd ../
curl -X GET http://localhost:5001/api/settings/domain/$hostname/
echo
read -r -p "Press enter to continue." e
clear
ip=$(hostname -I)
echo "Ports."
echo "---------------------------------------------"
echo "Open your router config web page, look for 'Port Forwarding' and create the following rules."
echo "Port 80:"
echo "\tProtocol: TCP"
echo "\tWAN start port: 80"
echo "\tWAN end port: 80"
echo "\tLAN start port: 80"
echo "\tLAN end port: 80"
echo "\tLAN host IP: $ip"
echo
echo "Port 443:"
echo "\tProtocol: TCP"
echo "\tWAN start port: 443"
echo "\tWAN end port: 443"
echo "\tLAN start port: 443"
echo "\tLAN end port: 433"
echo "\tLAN host IP: $ip"
echo
read -r -p "Press enter to continue." e
clear
echo "SSL certificate."
echo "---------------------------------------------"
echo "Google needs to comunicate with the Raspberry Pi via HTTPS, so we need a SSL certicate for Homeware."
echo
echo "Follow the Certbot instructions. When Certbot ask you about redirecting http to https, enable it."
echo
sudo certbot --nginx
echo
read -r -p "Press enter to continue." e
clear
echo "Google."
echo "---------------------------------------------"
echo "Google needs to know where Homeware is."
echo "Follow the instructions at Homeware Docs https://kikeelectronico.github.io/Homeware-LAN/docs/connect-google/"
echo "During the process you will use the following Client Information. You will be able to change it from the Settings page in the future."
echo ""
echo "\tClient ID: 123"
echo "\tClient Secret: 456"
echo "\tAuthorization URL: https://$hostname/auth/"
echo "\tToken URL: https://$hostname/token/"
echo "\tFulfillment URL: https://$hostname/smarthome/"
curl -X GET http://localhost:5001/api/settings/setAssistantDone/
echo
read -r -p "Press enter to continue." e
clear
echo "Done."
echo "---------------------------------------------"
echo "Homeware is ready for you. Visit $hostname"
;;
*)
echo "Ok."
;;
esac
| true |
5f0aaa2501755be2b7f6f4e682c01132e9183d66
|
Shell
|
erkarl/xud-simnet
|
/scripts/xud-simnet-clean
|
UTF-8
| 536 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
delete_dir() {
if ! rm -rf $1 >/dev/null 2>&1; then
echo "unable to delete directory $1"
exit 1
fi
return 0
}
if ! cd ~/xud-simnet; then
echo "~/xud-simnet is missing"
exit 1
fi
source setup.bash
read -p "Are you sure (y/n)? " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
exit 1
fi
confirm_all_not_running
delete_dir "btcd"
delete_dir "ltcd"
delete_dir "lndbtc"
delete_dir "lndltc"
delete_dir "xud"
delete_dir "xud-wd"
delete_dir "go"
echo "Done. You can now use xud-simnet-install"
| true |
3303f6c11fbbe97ebcc57c978b0b3c8ed8e821d4
|
Shell
|
dtanakax/docker-kibana
|
/start.sh
|
UTF-8
| 217 | 2.78125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
if [ "$1" = "/opt/kibana/bin/kibana" ]; then
sed -i "s|^elasticsearch_url:.*|elasticsearch_url: \"http://${ELASTICSEARCH_PORT_9200_TCP_ADDR}:9200\"|" /opt/kibana/config/kibana.yml
fi
exec "$@"
| true |
7d3757df07d4814dd943451a4a376d8346196002
|
Shell
|
juanespj/er_ros
|
/displays/display_text.sh
|
UTF-8
| 556 | 2.734375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
stty -F /dev/serial/by-id/usb-MO_LK202-25-USB_0df1XRw1-if00-port0 19200
cat /dev/serial/by-id/usb-MO_LK202-25-USB_0df1XRw1-if00-port0 &
echo \xFE\x58 > /dev/serial/by-id/usb-MO_LK202-25-USB_0df1XRw1-if00-port0
#echo "$1" > /dev/serial/by-id/usb-MO_LK202-25-USB_0df1XRw1-if00-port0
export IP=$(/sbin/ifconfig wlan0 | grep 'inet addr' | cut -d: -f2 | awk '{print $1}')
echo $IP
if [ -z "$1" ]
then
echo "$IP" >> /dev/serial/by-id/usb-MO_LK202-25-USB_0df1XRw1-if00-port0
else
echo "$1" >> /dev/serial/by-id/usb-MO_LK202-25-USB_0df1XRw1-if00-port0
fi
| true |
a9d171cdd848c3f7c07e3ec82e71a79b3f06ac2b
|
Shell
|
binsrc/volatility-plugins
|
/scripts/dumpdir.sh
|
UTF-8
| 132 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/sh
DIR=$1
DUMPDIR=$2
SO=`find $DIR -type f -name "*.so"`
for FILE in $SO; do
./dumpsymbols.sh "$FILE" "$DUMPDIR"
done
| true |
8ae8509c3390fff4e7260acf2f8a42b0b88f157c
|
Shell
|
flocondetoile/drupal8starter
|
/scripts/fix_local_files_permissions
|
UTF-8
| 459 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# Get document root path
DOCUMENT_ROOT=$(drush status --fields=root --format=list)
# Git repo root
cd $DOCUMENT_ROOT
cd ..
sudo chown -R $USER:www-data web/sites/default/files/
sudo chmod -R g+w web/sites/default/files/
sudo chown $USER:$USER web/sites/default/settings.*
sudo chmod g-w web/sites/default/settings.*
sudo chown -R $USER:www-data config
sudo chmod -R g+w config
sudo chown -R $USER:www-data private
sudo chmod -R g+w private
| true |
8b967649a871abfc1b1b39e4ecaef39f16ebf1db
|
Shell
|
HopeBayMobile/hcfs
|
/build.sh
|
UTF-8
| 3,236 | 3.625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
##
## Copyright (c) 2021 HopeBayTech.
##
## This file is part of Tera.
## See https://github.com/HopeBayMobile for further info.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
echo -e "======== ${BASH_SOURCE[0]} ========"
repo="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && while [ ! -d .git ] ; do cd ..; done; pwd )"
here="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $repo/utils/common_header.bash
cd $repo
Usage()
{
cat <<EOF
NAME
build.sh - HCFS build script
SYNOPSIS
./build.sh [action] [option]
DESCRIPTION
lib [-d ndk-path]
Build Android Libraries. ndk-path is path to Android-ndk directory
ci-test
Run continuous integration tests.
unittest
Run unit tests.
image 5x|s58a [--userdebug|--user] [--test]
5x|s58a
Build Android image.
--userdebug|--user
build userdebug or user type with --userdebug and --user
respectively. Script will build both type if not specified.
--test
test image build process.
pyhcfs [--test]
Build python library "pyhcfs" at dist/
-h
Show usage
EOF
exit ${1:-0}
}
parse_options()
{
TARGET=
RUN_TEST=0
while [[ $# -gt 0 ]]; do
case $1 in
lib) TARGET+="$1;" ;;
ci-test) TARGET+="$1;" ;;
unittest) TARGET+="$1;" ;;
pyhcfs) TARGET+="$1;" ;;
--test) RUN_TEST=1 ;;
-h) Usage ;;
-d)
if [ $# -lt 2 ]; then
echo "Usage: -d <NDK_PATH>"
Usage 1
fi
export SET_NDK_BUILD="$2";
shift ;;
*)
echo "Invalid option -- $@" 2>&1
Usage 1 ;;
esac
shift
done
}
set_PARALLEL_JOBS()
{
if hash nproc; then
_nr_cpu=`nproc`
else
_nr_cpu=`cat /proc/cpuinfo | grep processor | wc -l`
fi
export PARALLEL_JOBS="-l ${_nr_cpu}.5"
}
unittest()
{
$repo/tests/unit_test/run_unittests
}
ci-test()
{
export CI=1
export CI_VERBOSE=true
$repo/tests/unit_test/run_unittests
$repo/tests/ci_code_report.sh
}
lib()
{
# load NDK_BUILD
# compress with password protection
packages+=" zip"
# speed up compiling
packages+=" ccache"
install_pkg
cd build
set -x
make $PARALLEL_JOBS
exit
}
pyhcfs()
{
$repo/utils/setup_dev_env.sh -m docker_host
docker pull docker:5000/docker_hcfs_test_slave
set -x
if (( "$RUN_TEST" == 1 )); then
PYHCFS_TARGET=test
else
PYHCFS_TARGET=bdist_egg
fi
if [ -e /.docker* ]; then
$repo/utils/setup_dev_env.sh -m docker_host
python3 setup.py $PYHCFS_TARGET
else
docker run --rm -v "$repo":/hcfs docker:5000/docker_hcfs_test_slave \
bash -c "cd /hcfs; umask 000; python3 setup.py $PYHCFS_TARGET"
fi
if [ -d dist/ ]; then
mkdir -p build/out/
rsync -arcv --no-owner --no-group --no-times dist/ build/out/
fi
}
parse_options "$@"
# setup -jN for make
set_PARALLEL_JOBS
# Running target
eval ${TARGET:=Usage}
| true |
3c2a98621cddaf19d1f3093181a3390a3cea8beb
|
Shell
|
agaveplatform/agave-cli
|
/bin/urlencode
|
UTF-8
| 775 | 3.953125 | 4 |
[] |
no_license
|
#!/bin/bash
#
# urlencode
#
# author: deardooley@gmail.com
#
# Pure Bash urlencoding function
# @see https://gist.github.com/cdown/1163649#file-gistfile1-sh-L4
#
_urlencode() {
# urlencode <string>
old_lc_collate=$LC_COLLATE
local LC_COLLATE=C
local length="${#1}"
for (( i = 0; i < length; i++ )); do
local c="${1:i:1}"
case $c in
[a-zA-Z0-9.~_-]) printf "$c" ;;
*) printf '%%%02X' "'$c" ;;
esac
done
LC_COLLATE=$old_lc_collate
}
if [[ -n "$@" ]]; then
_urlencode "$@"
else
oIFS="$IFS"
IFS="[]" read encoded_string
IFS="$oIFS"
unset oIFS
# return the decoded string from stdin or pipe
# if no value, return nothing
[ -n "$encoded_string" ] && _urlencode "$encoded_string"
fi
| true |
bccc63ad491643b89737e6cc539df01567dc272d
|
Shell
|
xueshell/shell_script_example
|
/shell_app/svn_git_ci_deploy_shell/rsync_to_host/rsync_prallel_to_host.sh
|
UTF-8
| 559 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/sh
#
#
HOST=
RSYNC_MODULE=
USER=
PASSWD_FILE=
#get varlable
if [ $# -eq 0 ]
then
echo " USAGE: $0 -u USER -p PASSWD -h HOST -m MODULE -t PASSWD_FILE "
exit 1
fi
while test $# -gt 0
do
case $1 in
-u | -U )
shift
USER="$1"
;;
-p |-P )
shift
PASSWORD_FILE="$1"
;;
-h | -H )
shift
HOST="$1"
;;
-m| -M )
shift
RSYNC_MODULE="$1"
;;
-t |-T )
shift
PASSWORD_FILE="$1"
;;
* )
echo " error"
break
;;
esac
shift
done
echo $HOST $RSYNC_MODULE $USER $PASSWD_FILE
#parse the ip list file
| true |
03d6e7fd76de70f99024c5a02406166a44fbce61
|
Shell
|
fengw/ABFanalysis
|
/scripts/GkxmfsAnalysis/model_s.gmt
|
UTF-8
| 5,129 | 3.0625 | 3 |
[] |
no_license
|
#!/bin/bash
# plot map that only depends on site location
# specified period (one period)
eid=$1
sgtid=$2
rsid=$3 # rup_scenario_id
vid=$4 # vel_id
prefix=ERF${eid}_SGT${sgtid}_RupVar${rsid}_Vel$vid
flag3=$5 # Model Flag
sigmaD=$6 # weighting function used in ABF # format: 0.10_0.10
wrk=/Users/fengw/work/Project/CyberShake_analysis
mapin=$wrk/scripts/map_input
mapin1=$mapin/Model_Rups$flag3/$prefix
plot0=$wrk/plots/Model_plots
if [ ! -d $plot0 ]; then
mkdir $plot0
fi
plot1=$plot0/Model$flag3
if [ ! -d $plot1 ]; then
mkdir $plot1
fi
plot2=$plot1/$prefix
if [ ! -d $plot2 ]; then
mkdir $plot2
fi
plot2=$plot2/Gkxmfs
if [ ! -d $plot2 ]; then
mkdir $plot2
fi
plot2=$plot2/SCs
if [ ! -d $plot2 ]; then
mkdir $plot2
fi
#add sigma
plot2=$plot2/Sigma$sigmaD
if [ ! -d $plot2 ]; then
mkdir $plot2
fi
# tmp file
tmp_ek=$mapin/tmp.xyz
grdfile=$mapin/tmp.grd
# GMT set
if [ 0 -eq 1 ]
then
gmtset PAGE_ORIENTATION portrait
width=8.5 # for protrait
height=11.0
begin='-P -K'
more='-P -K -O'
end='-P -O'
else
gmtset PAGE_ORIENTATION landscape
width=11.0
height=8.5
begin='-K'
more='-K -O'
end='-O'
fi
unit=i # inch
Annot=14
gmtset BASEMAP_TYPE plain
gmtset ANNOT_FONT_PRIMARY 0
gmtset ANNOT_FONT_SECONDARY 0
gmtset ANNOT_FONT_SIZE_PRIMARY $Annot
gmtset ANNOT_OFFSET_PRIMARY 0.05c
gmtset ANNOT_FONT_SIZE_SECONDARY $Annot
gmtset ANNOT_OFFSET_SECONDARY 0.05c
gmtset HEADER_FONT 0
gmtset HEADER_FONT_SIZE 10
gmtset LABEL_FONT 0
gmtset LABEL_FONT_SIZE 14
# labels
gmtset LABEL_FONT 0
gmtset LABEL_FONT_SIZE 14
gmtset LABEL_OFFSET 0.1c # for x,y,and colorbar label offset
# Set up subplots
periods=( 2.00 3.00 5.00 10.00 )
texts=( '2.0' '3.0' '5.0' '10.0' )
N=4
Nrow=2
Ncol=4
area=$(expr $width*$height | bc -l)
tmp0=$(expr $area/$N | bc -l)
scale=$(echo "sqrt($tmp0)/1.5" | bc -l)
scale=2.3
dy0=$(expr $scale/25 | bc -l)
dx0=$(expr $scale/10 | bc -l)
x0=$(echo "($width-($dx0+$scale)*$Ncol+$dx0)/2" | bc -l)
y0=$(echo "($height-($dy0+$scale)*$Nrow+$dy0)/2+$dy0" | bc -l)
# Set up offset of each subplot
rows=`jot $Nrow $Nrow 1`
cols=`jot $Ncol 1 $Ncol`
# setup subplot
icout=0
for irow in ${rows}
do
for icol in ${cols}
do
if [ $irow -eq $Nrow ] && [ $icol -eq 1 ]
then
dy=$(echo "($dy0+$scale)*($irow-1)+$y0" | bc -l)
dx=$x0
offset[icout]="-Y$dy$unit -X$dx$unit"
fi
if [ $irow -eq $Nrow ] && [ $icol -ne 1 ]
then
dy=0.0
dx=$(echo "$dx0+$scale" | bc)
offset[icout]="-Y$dy$unit -X$dx$unit"
fi
if [ $irow -ne $Nrow ] && [ $icol -eq 1 ]
then
dy=$(echo "$dy0+$scale" | bc)
dx=$(echo "($dx0+$scale)*($Ncol-1)" | bc -l)
offset[icout]="-Y-$dy$unit -X-$dx$unit"
fi
if [ $irow -ne $Nrow ] && [ $icol -ne 1 ]
then
dy=0.0
dx=$(echo "$dx0+$scale" | bc)
offset[icout]="-Y-$dy$unit -X$dx$unit"
fi
icout=$(expr $icout+1 | bc)
done
done
# ============================
# colormap and colorbar
inc=( 0.04 0.004 ) #inc_x, inc_y (make them same)
clr=$mapin/tmp.cpt
flag0='SCs' # suffix
# grd2cpt
max0=1.8
min0=0.0
dclr=0.1
cpt='-Cocean -I'
cpt='-Chaxby -I'
cpt='-Ccopper -I'
cpt='-Ctopo -I'
cpt='-Cglobe'
cpt='-Cnrwc -I'
cpt='-Cgray -I'
cpt='-Cyellow2red'
#clrtick=-Ba0.3f0.1:"@~s@-C@-":
clrtick=-Ba0.3f0.1
psnam=$plot2/Model.CyberShake.NGAmean.periods.SCs
isub=-1
for flag4 in 'CS11' 'NGA-RMS'
do
for it in `jot $N 0 $(($N-1))`
do
isub=$(($isub+1))
period=${periods[$it]}
ifile=$mapin1/Gkxmfs/SCs/Sigma$sigmaD/CyberShake.NGAs.$period.$flag0
reg=$(minmax -I.01 $ifile)
proj=-JM$scale$unit
if [ $flag4 = 'CS11' ]; then
gawk '{print $1,$2,$7}' $ifile > $tmp_ek
else
gawk '{print $1,$2,sqrt(($3**2+$4**2+$5**2+$6**2)/4)}' $ifile > $tmp_ek
fi
xyz2grd $tmp_ek $reg -I${inc[0]} -F -G$grdfile
triangulate $tmp_ek $reg -I${inc[1]} -G$grdfile > /dev/null
grdfilter $grdfile -D1 -Fg5 -G$grdfile.2
mv $grdfile.2 $grdfile
grd2cpt $grdfile $cpt -S$min0/$max0/$dclr -L$min0/$max0 > $clr
if [ $isub -eq 0 ]; then
grdimage $grdfile $reg $proj -C$clr ${offset[$isub]} $begin > $psnam.ps
pscoast $reg $proj -Dh -W0.5p/0/0/0 -Ba1.0f0.1sWNe -A100 -Na -Slightblue $more >> $psnam.ps
else
grdimage $grdfile $reg $proj -C$clr ${offset[$isub]} $more >> $psnam.ps
pscoast $reg $proj -Dh -W0.5p/0/0/0 -Ba1.0f0.1swne -A100 -Na -Slightblue $more >> $psnam.ps
fi
if [ $flag4 = 'CS11' ];then
echo "-117.75 34.8 16 0 0 LM ${texts[$it]}" | pstext $reg $proj -G0/0/0 $more >> $psnam.ps
fi
if [ $it -eq 0 ]; then
echo "-119.25 33.6 16 0 0 LM $flag4" | pstext $reg $proj -G0/0/0 $more >> $psnam.ps
fi
if [ $isub -eq 6 ]; then
rdxc=0$unit
rdyc=-0.2$unit
lth=$(expr $scale+0.5 | bc)$unit
wth=$(expr $scale/30.0 | bc -l )$unit
wth=0.1$unit
align=h
clrbar=-D$rdxc/$rdyc/$lth/$wth$align # colorbar relative location (central/top) and size
psscale $clrbar -C$clr $clrtick $more >> $psnam.ps # linear color scale but log value
fi
done
done
ps2eps -q -l -f -R + -r 300 $psnam.ps
ps2pdf $psnam.ps $psnam.pdf
open $psnam.pdf
rm $psnam.ps
| true |
1e3dd27e075920c878c15c2867452d483be7e398
|
Shell
|
dilawar/SinghAndBhalla_CaMKII_SubunitExchange_2018
|
/exp_SU_Activation_Spread/generate_summary.sh
|
UTF-8
| 496 | 3.25 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -x
# NOTE: Run cmake with various values of resting ca concentration. Moves the
# data file to subdirectory e.g. ./DATA_CA_BASAL_100NM etc. After that run this
# script to generate the summary data in each directory.
DIRS=$(find . -name "*DATA_CA_BASAL_*NM" -type d)
for _dir in $DIRS; do
echo "Analyze $_dir"
(
cd $_dir
python ../analyze_exp.py
cp summary.png ../${_dir}_summary.png
)
done
python ./rise_time_vs_ca_level.py
| true |
be7f89b2597d981445548f683a69f82509f0dcf1
|
Shell
|
jhlake/Java-projects-Uni
|
/n2_certificadoNotas/bin/mac/build.sh
|
ISO-8859-1
| 1,239 | 2.65625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Universidad de los Andes (Bogot - Colombia)
# Departamento de Ingeniera de Sistemas y Computacin
# Licenciado bajo el esquema Academic Free License version 2.1
#
# Proyecto Cupi2 (http://cupi2.uniandes.edu.co)
# Ejercicio: n2_certificadoNotas
# Autor: Equipo Cupi2 2015
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
stty -echo
# ---------------------------------------------------------
# Asegura la creacin de los directorios classes y lib
# ---------------------------------------------------------
cd ../..
mkdir classes
mkdir lib
# ---------------------------------------------------------
# Compila las clases del directorio source
# ---------------------------------------------------------
cd source
javac -nowarn -d ../classes/ uniandes/cupi2/certificadoNotas/mundo/*.java
javac -nowarn -d ../classes/ uniandes/cupi2/certificadoNotas/interfaz/*.java
# ---------------------------------------------------------
# Crea el archivo jar a partir de los archivos compilados
# ---------------------------------------------------------
cd ../classes
jar cf ../lib/certificadoNotas.jar uniandes/*
cd ../bin/mac
stty echo
| true |
54f0b7abb2e4f9c57dbe5bda8c1af3d49899aa3e
|
Shell
|
karimstm/Inception-Of-Things
|
/p1/scripts/worker_node.sh
|
UTF-8
| 517 | 2.859375 | 3 |
[] |
no_license
|
echo "Fetching binary file..."
sudo curl -L -o k3s https://github.com/k3s-io/k3s/releases/download/v1.22.2%2Bk3s2/k3s
sudo chmod 755 k3s
export TOKEN_FILE="/vagrant/scripts/node-token"
export MASTER_IP="$1"
export INTERNAL_IP="$2"
echo "Running agent..."
sudo ./k3s agent --node-ip ${INTERNAL_IP} --server https://${MASTER_IP}:6443 --token-file ${TOKEN_FILE} >& k3s-agent.log &
echo "installing ifconfig"
sudo yum install net-tools -y
echo "create k3s alias"
echo "alias k=$PWS/k3s" >> ~/.bashrc
echo "Done..."
| true |
f83278180c0c7dfe39dd18015f715493bf0051ec
|
Shell
|
hunamizawa/ESPPerfectTime
|
/travis/arduino.sh
|
UTF-8
| 2,381 | 3.09375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -eux
# The MIT License (MIT)
#
# Copyright (c) 2014-2020 Benoit BLANCHON
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
/sbin/start-stop-daemon --start --quiet --pidfile /tmp/custom_xvfb_1.pid --make-pidfile --background --exec /usr/bin/Xvfb -- :1 -ac -screen 0 1280x1024x16
sleep 3
export DISPLAY=:1.0
sudo iptables -P INPUT DROP
sudo iptables -P FORWARD DROP
sudo iptables -P OUTPUT ACCEPT
sudo iptables -A INPUT -i lo -j ACCEPT
sudo iptables -A OUTPUT -o lo -j ACCEPT
sudo iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
mkdir -p /tmp/arduino
curl -sS http://downloads.arduino.cc/arduino-$VERSION-linux64.tar.xz | tar xJ -C /tmp/arduino --strip 1 ||
curl -sS http://downloads.arduino.cc/arduino-$VERSION-linux64.tgz | tar xz -C /tmp/arduino --strip 1
export PATH=$PATH:/tmp/arduino/
if [[ "$BOARD" =~ "esp8266:esp8266:" ]]; then
arduino --pref "boardsmanager.additional.urls=http://arduino.esp8266.com/stable/package_esp8266com_index.json" --save-prefs
arduino --install-boards esp8266:esp8266
elif [[ "$BOARD" =~ "esp32:esp32:" ]]; then
arduino --pref "boardsmanager.additional.urls=http://dl.espressif.com/dl/package_esp32_index.json" --save-prefs
arduino --install-boards esp32:esp32
fi
ln -s $PWD /tmp/arduino/libraries/ESPPerfectTime
for EXAMPLE in $PWD/examples/*/*.ino; do
arduino --verify --board $BOARD $EXAMPLE
done
| true |
3c84826cfc7325f513939ce8a19a0a43486556cf
|
Shell
|
faile98/openshift-scripts
|
/4.prepare-nfs.sh
|
UTF-8
| 771 | 3 | 3 |
[] |
no_license
|
ansible nfs -m shell -a "mkdir -p /exports/user-vols/pv{001..200};"
cat <<'EOF' > uservols.sh
#!/bin/bash
for pvnum in {001..200}
do
if grep -q pv${pvnum} /etc/exports.d/openshift-uservols.exports;
then
echo "pv${pvnum}" already registered
else
echo /exports/user-vols/pv${pvnum} *\(rw,root_squash\) >> /etc/exports.d/openshift-uservols.exports;
fi
done;
EOF
ansible nfs -m copy -a "src=uservols.sh dest=/root/uservols.sh mode=u+x"
ansible nfs -m shell -a "/root/uservols.sh; chown -R nfsnobody.nfsnobody /exports/user-vols; chmod -R 777 /exports/user-vols;";
ansible nfs -m file -a "path=/root/uservols.sh state=absent"
ansible localhost -m file -a "path=uservols.sh state=absent"
ansible nfs -m shell -a "systemctl restart nfs-server"
| true |
71abd8e47cef6e0d22de22f005a91dddfe8d5785
|
Shell
|
chybz/cpkg
|
/cpkg/lib/deb/libpackage.sh
|
UTF-8
| 9,742 | 4.15625 | 4 |
[
"MIT"
] |
permissive
|
##############################################################################
#
# Debian specific package building
#
##############################################################################
HAS_DEBIAN_INIT=0
function lp_init() {
# Nothing to do
return
}
function lp_prepare_package_directory() {
mkdir -p $PKG_ROOTDIR/debian
touch $PKG_ROOTDIR/TODO
}
function lp_handle_system_file() {
local FILE=$1
local TYPE=$2
if [ "$TYPE" = "init" ]; then
HAS_DEBIAN_INIT=1
fi
mv $FILE $PKG_ROOTDIR/debian/
}
function lp_handle_package_files() {
local PHASE
local CPKG_SCRIPT_BASE
declare -A HAS_SCRIPTS=()
HAS_SCRIPTS["postinst"]=0
HAS_SCRIPTS["postrm"]=0
HAS_SCRIPTS["preinst"]=0
HAS_SCRIPTS["prerm"]=0
if (($HAS_DEBIAN_INIT)); then
HAS_SCRIPTS["postinst"]=1
HAS_SCRIPTS["prerm"]=1
HAS_SCRIPTS["postrm"]=1
fi
CPKG_SCRIPT=/tmp/cpkg-script.$$
declare -A SCRIPT_SPECS=()
SCRIPT_SPECS["configure"]="postinst";
SCRIPT_SPECS["remove"]="postrm";
SCRIPT_SPECS["purge"]="postrm";
# Handle user-provided script snippets
for PHASE in ${!SCRIPT_SPECS[@]}; do
DPKG_PHASE=${SCRIPT_SPECS[${PHASE}]}
if [ ! -f $PKG_ROOTDIR/$PKG_NAME.$PHASE ]; then
continue
fi
DPKG_DEST=$PKG_ROOTDIR/debian/$PKG_NAME.$DPKG_PHASE
HAS_SCRIPTS[${DPKG_PHASE}]=1
cp_wrap_script_for_phase \
$PKG_ROOTDIR/$PKG_NAME.$PHASE \
$PHASE \
$CPKG_SCRIPT
cp_replace_template_var_from_file \
$DPKG_DEST \
"CPKG_${PHASE^^}" \
$CPKG_SCRIPT
done
rm -f $CPKG_SCRIPT
# Remove unused/empty package scripts (Debian policy)
for PHASE in postinst postrm preinst prerm; do
DPKG_SCRIPT=$PKG_ROOTDIR/debian/$PKG_NAME.$PHASE
if [ ${HAS_SCRIPTS[$PHASE]} -eq 0 ]; then
rm -f $DPKG_SCRIPT
else
cp_process_template $DPKG_SCRIPT
fi
done
# Fixup install file to include 'usr/share' dir wherever
# man page are present
local DPKG_INSTALL_FILE=$PKG_ROOTDIR/debian/$PKG_NAME.install
local MANDIR=$PKG_STAGEDIR/$PKG_MANDIR
local USR_SHARE='usr/share'
if [[ -d ${MANDIR} ]];then
if [[ $(grep -c "${USR_SHARE}" ${DPKG_INSTALL_FILE}) == "0" ]];then
echo ${USR_SHARE} >> ${DPKG_INSTALL_FILE}
fi
fi
local FILES=$(find $PKG_ROOTDIR/debian -maxdepth 1 -type f | xargs)
local TAG="##"
local DIRVAL
if [ -n "$FILES" ]; then
for DIR in ${PACKAGE_DIRS}; do
DIRVAL="${DIR}"
cp_reinplace "s,${TAG}${DIR}${TAG},${!DIRVAL},g" $FILES
done
fi
}
function lp_clean_packages_scripts() {
local FILES=$(
find $PKG_ROOTDIR/debian -maxdepth 1 -type f | \
xargs grep -l "#CPKG_.*"
)
if [ -n "$FILES" ]; then
cp_reinplace "s,#CPKG_.*$,,g" $FILES
fi
}
function lp_install_local_package() {
sudo dpkg -i *.deb
}
function lp_install_packages() {
export DEBIAN_FRONTEND=noninteractive
sudo apt-get -y install $@
lp_make_pkg_map
}
function lp_configure_package() {
return
}
function lp_build_package() {
cd $PKG_ROOTDIR
dpkg-buildpackage -b -uc
cd ..
cp_msg "checking $PKG_NAME-$PKG_VER"
lintian \
--suppress-tags bad-distribution-in-changes-file \
*.changes
}
function build_pkgconfig_filters() {
local CACHE=$1
local ARCH=$(dpkg-architecture -qDEB_HOST_MULTIARCH 2>/dev/null)
cp_msg "building pkg-config header filters"
set +e
find /usr/lib -name \*.pc | while read PC; do
pkg-config \
--cflags-only-I \
--silence-errors \
$(basename $PC .pc) | \
sed \
-e "s/-I//g" \
-e "s/ /\n/g" | \
egrep -v "^$"
done | sort | uniq | \
sed -r -e "s,/$,," | \
egrep -v "^/usr/include$" | \
sed \
-r \
-e "s,^/usr/(include|lib)/($ARCH/)?,s#^," \
-e "s,$,/##," | \
egrep "^s#" \
> $CACHE.filters
set -e
}
function build_header_cache_from_repo() {
local CACHE=$1
local VER=$(lsb_release -sr)
local CMD
if dpkg --compare-versions "$VER" lt "9"; then
# Before stretch
CMD='zgrep -Eh "^usr/(local/)?include/" /var/cache/apt/apt-file/*.gz'
else
# stretch or after
CMD='/usr/lib/apt/apt-helper cat-file /var/lib/apt/lists/*Contents-*.lz4'
CMD+=' | egrep -h "^usr/(local/)?include/"'
fi
if [[ "$HOST_ARCH" = "amd64" ]]; then
# Filter out includes from libc6-dev-i386
CMD+=' | grep -v libc6-dev-i386'
fi
eval "$CMD" | \
sort -ur -k 1,1 | \
sed \
-r \
-e "s,^usr/(local/)?include/($ARCH/)?([^[:space:]]+)[[:space:]]+.+/([^/]*),\3 \4,g" \
-f $CACHE.filters \
> $CACHE.repo
}
function build_header_cache() {
local CACHE=$1
local ARCH=$(dpkg-architecture -qDEB_HOST_MULTIARCH 2>/dev/null)
local HOST_ARCH=$(dpkg-architecture -qDEB_HOST_ARCH 2>/dev/null)
build_pkgconfig_filters $CACHE
cp_msg "building apt header cache"
build_header_cache_from_repo $CACHE
rm -f $CACHE.installed
for INCDIR in /usr/include /usr/local/include; do
if [ ! -d $INCDIR ]; then
continue
fi
find $INCDIR -type f -name \*.h\* | \
xargs dpkg -S 2>&1 | \
egrep -v "^dpkg-query: " | \
sed \
-r \
-e "s,^([^[:space:]:]+):([^[:space:]]*) $INCDIR/([^[:space:]]+)$,\3 \1,g" \
-f $CACHE.filters \
>> $CACHE.installed
done
cat $CACHE.repo $CACHE.installed \
| sort | uniq | \
cdb -c -r -m $CACHE
rm -f $CACHE.repo $CACHE.installed
}
function apt_file_update() {
# Update apt-file data
local CMD="apt-file update"
if (($EUID != 0)); then
CMD="sudo $CMD"
fi
cp_msg "updating apt-file data"
$CMD >/dev/null
}
function build_pkg_cache() {
local CACHE=$1
cp_msg "building installed package cache"
dpkg-query -W -f='${Package} ${Version} ${Status}\n' | \
grep "install ok installed" | \
sed \
-r \
-e "s, .*$, 1,g" | \
cdb -c -m $CACHE
}
function lp_make_pkg_map() {
cp_make_home
local CACHE=$CPKG_HOME/packages.cache
local REFFILE=/var/lib/dpkg/status
local BUILD=0
if [ ! -f $CACHE ]; then
BUILD=1
elif [ $REFFILE -nt $CACHE ]; then
BUILD=1
fi
if (($BUILD == 1)); then
build_pkg_cache $CACHE
fi
}
function lp_make_pkg_header_map() {
cp_make_home
local CACHE=$CPKG_HOME/headers.cache
local REFFILE
local BUILD=0
if [ ! -f $CACHE ]; then
apt_file_update
BUILD=1
else
for REFFILE in /var/lib/apt/lists/*_{InRelease,Release}; do
if [ $REFFILE -nt $CACHE ]; then
apt_file_update
break
fi
done
for REFFILE in /var/cache/apt/apt-file/*.gz /var/lib/dpkg/status; do
if [ $REFFILE -nt $CACHE ]; then
BUILD=1
break
fi
done
fi
if (($BUILD == 1)); then
build_header_cache $CACHE
fi
return 0
}
function build_pkgconfig_cache() {
local CACHE=$1
cp_msg "building apt pkg-config cache"
local ARCH=$(dpkg-architecture -qDEB_HOST_MULTIARCH 2>/dev/null)
local HOST_ARCH=$(dpkg-architecture -qDEB_HOST_ARCH 2>/dev/null)
local -a PKGCONFIG_DIRS=(/usr/lib/pkgconfig /usr/local/lib/pkgconfig)
if [[ -n "$ARCH" ]]; then
PKGCONFIG_DIRS+=(/usr/lib/$ARCH/pkgconfig)
PKGCONFIG_DIRS+=(/usr/local/lib/$ARCH/pkgconfig)
fi
local -a PKGCONFIG_FILES
local DIR
for DIR in ${PKGCONFIG_DIRS[@]}; do
if [ ! -d $DIR ]; then
continue
fi
PKGCONFIG_FILES+=($(find $DIR -type f -name \*.pc))
done
if ((${#PKGCONFIG_FILES[@]} > 0)); then
dpkg -S ${PKGCONFIG_FILES[@]} 2>&1 | \
egrep -v "^dpkg-query: " | \
sed \
-r \
-e "s,: [^[:space:]]+/pkgconfig/, ,g" \
-e "s,:$HOST_ARCH,,g" \
-e "s,\.pc,,g" \
> $CACHE.tmp
else
touch $CACHE.tmp
fi
local PC
local PKG
local -A MAP
while read PKG PC; do
PC=${PC%.pc}
if [[ "${MAP[$PKG]}" ]]; then
MAP[$PKG]+=" $PC"
else
MAP[$PKG]=$PC
fi
done < $CACHE.tmp
rm -f $CACHE.tmp
for PKG in ${!MAP[@]}; do
echo "$PKG ${MAP[$PKG]}"
done | cdb -c -m $CACHE
}
function lp_make_pkgconfig_map() {
cp_make_home
local CACHE=$CPKG_HOME/pkgconfig.cache
local REFDIR
local BUILD=0
local -a PKGCONFIG_DIRS=(/usr/lib/pkgconfig /usr/local/lib/pkgconfig)
if [[ -n "$ARCH" ]]; then
PKGCONFIG_DIRS+=(/usr/lib/$ARCH/pkgconfig)
PKGCONFIG_DIRS+=(/usr/local/lib/$ARCH/pkgconfig)
fi
if [ ! -f $CACHE ]; then
BUILD=1
else
for REFDIR in ${PKGCONFIG_DIRS[@]}; do
if [ ! -d $REFDIR ]; then
continue
fi
if [ $REFDIR -nt $CACHE ]; then
BUILD=1
break
fi
done
fi
if (($BUILD == 1)); then
build_pkgconfig_cache $CACHE
fi
}
function lp_get_pkgconfig() {
local PC=$1
shift
local PCPATH=$PKG_CONFIG_PATH
env PKG_CONFIG_PATH=$PCPATH pkg-config $@ $PC
}
function lp_full_pkg_name() {
local PKG=$1
echo $PKG
}
| true |
1a26088bc6214233a24ebfaca7f7eada21ba3e65
|
Shell
|
dozent2018/IFA_LINUX_DEV
|
/.../mkstruct3.sh
|
UTF-8
| 489 | 2.578125 | 3 |
[] |
no_license
|
#!/bin/bash
# mkstruct3.sh : Version mit Positionsparameter
# nutzt den 1. Positionsparameter für den Kursnamen und den Speicherort
# und einen Default-Wert, falls er nicht angegeben wurde
kursdir=${1:-Kurs}
mkdir ${kursdir}
mkdir ${kursdir}/block1
mkdir ${kursdir}/block1/aufgaben
mkdir ${kursdir}/block1/loesungen
cp -R ${kursdir}/block1 ${kursdir}/block2
cp -R ${kursdir}/block1 ${kursdir}/block3
cp -R ${kursdir}/block1 ${kursdir}/block4
cp -R ${kursdir}/block1 ${kursdir}/block5
| true |
bac688faf3a8257521fa101da27559a1a598fcfb
|
Shell
|
ledhed-jgh/AGODS
|
/AGODS/setup.sh
|
UTF-8
| 1,179 | 2.59375 | 3 |
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/bash
# Add coral repo
echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list
# Add google gpg key
curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
# Update, Install TensorFlow Lite + Dependencies
sudo apt update && sudo apt -y install git libopenjp2-7 libtiff5 python3 python3-pip python3-pip python3-pil python3-picamera python3-tflite-runtime
# Move INI file to /boot
sudo chmod 755 agods.ini
sudo chown root.root agods.ini
sudo mv agods.ini /boot/
# Move logrotate file
sudo chmod 644 agods.logrotate
sudo chown root.root agods.logrotate
sudo mv agods.logrotate /etc/logrotate.d/agods
# Setup Services
sudo chmod 755 *.service
sudo chown root.root *.service
sudo mv *.service /etc/systemd/system/
sudo systemctl enable disable-led.service agods.service pir.service
# Disable Camera LED
echo -e "\n# Camera\nstart_x=1\ngpu_mem=128\ndisable_camera_led=1" | sudo tee -a /boot/config.txt > /dev/null
echo -e
echo -e "Remember to run:\n\tsudo raspi-config\nand enable Overlay File system under 'Performance Options'"
echo -e
echo "AGODS Setup Complete"
echo -e
| true |
cc4213012d2f21888c0b7bca266e10e9364527c2
|
Shell
|
Bitergia/fiware-chanchan
|
/vagrant/scripts/cygnus/00-apache-flume.sh
|
UTF-8
| 670 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/bash
if [ -d "${APACHE_FLUME_HOME}" ]; then
rm -rf "${APACHE_FLUME_HOME}"
fi
VERSION="1.4.0"
TGZ="apache-flume-${VERSION}-bin.tar.gz"
UNPACKED="apache-flume-${VERSION}-bin"
URL="http://archive.apache.org/dist/flume/${VERSION}/${TGZ}"
# download flume
echo "Downloading ${TGZ}"
curl --remote-name --location --insecure --silent --show-error "${URL}"
# unpack tgz
echo "Unpacking ${TGZ}"
tar zxf "${TGZ}"
# move to destination
mv "${UNPACKED}" "${APACHE_FLUME_HOME}"
# add some needed directories
mkdir -p "${APACHE_FLUME_HOME}/plugins.d/cygnus"
mkdir -p "${APACHE_FLUME_HOME}/plugins.d/cygnus/lib"
mkdir -p "${APACHE_FLUME_HOME}/plugins.d/cygnus/libext"
| true |
b8616a15d4e4bc664272c3fff71e22fb6ebc3026
|
Shell
|
duyvk99/khoaluan
|
/terraform/script/install_docker.sh
|
UTF-8
| 1,008 | 2.734375 | 3 |
[] |
no_license
|
#!/bin/bash
echo "-----------------install docker-----------------"
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install -y docker-ce docker-ce-cli containerd.io
sudo systemctl enable docker
sudo systemctl start docker
sudo groupadd docker
sudo usermod -aG docker $USER
echo "-----------------install docker-compose-----------------"
sudo curl -L "https://github.com/docker/compose/releases/download/1.23.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
sudo service docker restart
echo "-----------------Download Source Code-------------------"
sudo yum -y install git
cd ~/
git clone https://github.com/vuukhanhduy/khoaluan.git
cd khoaluan/docker
git clone https://github.com/NguyenMinhTuanHai/wordpress-source.git
mv wordpress-source/source .
rm -rf wordpress-source
echo "-----------------DONE-----------------"
| true |
d76152c12c6243de36d8365fb1085c0ffb9ca632
|
Shell
|
szemek/openshift-diy-rails
|
/install.sh
|
UTF-8
| 937 | 2.65625 | 3 |
[] |
no_license
|
#!/bin/sh
cd $OPENSHIFT_TMP_DIR
# get & compile yaml
wget http://pyyaml.org/download/libyaml/yaml-0.1.4.tar.gz
tar xzf yaml-0.1.4.tar.gz
cd yaml-0.1.4
./configure --prefix=$OPENSHIFT_RUNTIME_DIR
make
make install
# clean up yaml sources
cd $OPENSHIFT_TMP_DIR
rm -rf yaml*
# get ruby
wget http://ftp.ruby-lang.org/pub/ruby/1.9/ruby-1.9.3-p194.tar.gz
tar xzf ruby-1.9.3-p194.tar.gz
cd ruby-1.9.3-p194
# export directory with yaml.h
export C_INCLUDE_PATH=$OPENSHIFT_RUNTIME_DIR/include
# export directory with libyaml
export LIBYAMLPATH=$OPENSHIFT_RUNTIME_DIR/lib
cd ext/psych
sed -i '1i $LIBPATH << ENV["LIBYAMLPATH"]' extconf.rb
cd $OPENSHIFT_TMP_DIR
cd ruby-1.9.3-p194
# compile ruby
./configure --disable-install-doc --prefix=$OPENSHIFT_RUNTIME_DIR
make
make install
export PATH=$OPENSHIFT_RUNTIME_DIR/bin:$PATH
# clean up ruby sources
cd $OPENSHIFT_TMP_DIR
rm -rf ruby*
# install rails
gem install rails --no-ri --no-rdoc
| true |
ba2c0eb10024e4a495692ec2e29a93067252f88d
|
Shell
|
Youngseok14253/A3-Quinzical
|
/Assignment 3/Scores/CompareScore
|
UTF-8
| 513 | 3.25 | 3 |
[] |
no_license
|
#! /bin/bash
pushd Scores > /dev/null
COUNT=1
sed 1d High-Scores | while read -r LINE
do
((COUNT+=1))
SCORE=$(echo $LINE | tr -dc '0-9')
if [[ $1 -eq $SCORE ]]
then
sed -i ''"$COUNT"'i'"$2 - $1"'' High-Scores
break
elif [[ $1 -lt $SCORE ]]
then
:
elif [[ $1 -gt $SCORE ]]
then
sed -i ''"$COUNT"'i'"$2 - $1"'' High-Scores
break
fi
done
popd > /dev/null
| true |
4ba6ba5dcf2ea6e1ddb9f7fd8d28323e05d3a344
|
Shell
|
AlanCoding/Ansible-inventory-file-examples
|
/scripts/large/scale_fast_script.sh
|
UTF-8
| 509 | 2.609375 | 3 |
[
"MIT"
] |
permissive
|
export ANSIBLE_INVENTORY_PLUGINS=$(PWD)/plugins/user_plugins/
export ANSIBLE_INVENTORY_ENABLED=fast_script
counter=0
while [ $counter -le 4 ]
do
colin=':'
justk='k'
params=$counter$justk$colin$counter$justk$colin'1.0:'$conin'0.8'$colin'2'$colin'15'
echo "INVENTORY_DIMENSIONS=$params"
time INVENTORY_DIMENSIONS=$params ansible-playbook -i scripts/large/large.py debugging/hello_world.yml > /dev/null
counter=$((counter + 1))
done
unset ANSIBLE_INVENTORY_PLUGINS
unset ANSIBLE_INVENTORY_ENABLED
| true |
4ba12d45a96e72809f78bda00075fc09fe6368f4
|
Shell
|
CMU-SAFARI/AirLift
|
/src/4-extract_reads/extract_regions.sh
|
UTF-8
| 527 | 3.53125 | 4 |
[] |
no_license
|
#!/bin/bash
BINDIR=$1
GAP_ALN=$2
GAP_BED=$(echo ${GAP_ALN} | sed s/.bam/.bed/g)
OUT_FOLDER=$3
OUT_BEDS="${OUT_FOLDER}/*.bed"
"${BINDIR}/convert2bed" --input=bam < ${GAP_ALN} > ${GAP_BED};
awk -v out_fol="${OUT_FOLDER}" '{split($4, array, "_"); fname=array[1] "_" array[2] "_" array[3] "_" array[4] ".bed"; print $1 "\t" $2 "\t" $3 >> out_fol "/" fname;}' ${GAP_BED}
for i in `echo ${OUT_BEDS}`; do fname=`basename $i`;
"${BINDIR}/mergeBed" -i $i > "${OUT_FOLDER}/merged_$fname";
mv "${OUT_FOLDER}/merged_$fname" $i;
done
| true |
4e8c20a7a8fff8caa9d6b430a2ec91e4b7c40a89
|
Shell
|
2oc/matrix.lab
|
/finish_RHXSAT6.sh
|
UTF-8
| 23,252 | 3.328125 | 3 |
[] |
no_license
|
#!/bin/bash
# To convert the file for the other ENV
# cat finish_RHXSAT6.sh | sed 's/10.10.10/10.10.10/g' | sed 's/MATRIX/MATRIX/g' | sed 's/matrix/matrix/g' |sed 's/10.10.10/10.10.10/g' > ../matrix.lab/finish_RHXSAT6.sh
####################
# Users (post-script)
# ORG: default
# GROUP: N/A
# USER: admin / Passw0rd (Satellite Administrator)
# ORG: ${ORGANIZATION}
# GROUP: regusers
# USER: satmgr / Passw0rd (Manager)
# USER: reguser / Passw0rd (Edit Hosts)
####################
# I have found it easier to NOT use whitespace in the ORGANIZATION Variable
cat << EOF >> ~/.bash_profile
ORGANIZATION="MATRIXLABS"
LOCATION="HomeLab"
SATELLITE="rh7sat6"
DOMAIN="matrix.lab"
EXPORT DOMAIN SATELLITE LOCATION ORGANIZATION
EOF
RHNUSER=""
RHNPASSWD=""
if [ -z ${RHNUSER} ] || [ -z ${RHNPASSWD} ]
then
echo "ERROR: Please update RHNUSER/RHNPASSWD Variables in the beginning of this script."
echo " Script cannot proceed with empty values."
exit 9
fi
####################################################################################
### PRE
####################################################################################
tuned-adm profile virtual-guest
systemctl enable tuned
# If installing from CD (Sat 6.1), then manually import this key before starting...
# rpm --import https://www.redhat.com/security/f21541eb.txt
subscription-manager register --auto-attach --username="$RHNUSER" --password="$RHNPASSWD"
subscription-manager list --available --all > /var/tmp/subscription-manager_list--available--all.out
# THE PROCESS TO RETRIEVE "POOL" MAY, OR MAY NOT WORK FOR YOU
POOL=`awk '/Red Hat Satellite 6/ {flag=1;next} /Available:/{flag=0} flag {print}' /var/tmp/subscription-manager_list--available--all.out | grep "Pool ID:" | awk '{ print $3 }'`
#POOL=`grep -A15 "Red Hat Satellite 6" /var/tmp/subscription-manager_list--available--all.out | grep "Pool ID:" | awk -F: '{ print $2 }' | sed -e 's/^[ \t]*//' -e 's/[ \t]*$//'`
subscription-manager subscribe --pool=${POOL}
subscription-manager repos --disable "*"
subscription-manager repos > /var/tmp/subscription-manager_repos.out
uname -a | grep el6 && RELEASE="6Server" || RELEASE="7Server"
subscription-manager release --set=$RELEASE
case $RELEASE in
7Server)
# This is a kludge at the moment...
#subscription-manager repos --enable rhel-7-server-rpms --enable rhel-7-server-satellite-6.1-rpms --enable rhel-7-server-satellite-optional-6.1-rpms --enable rhel-server-rhscl-7-rpms --releasever=${RELEASE}
subscription-manager repos --enable rhel-7-server-rpms --enable rhel-7-server-satellite-6.1-rpms --enable rhel-server-rhscl-7-rpms
;;
6Server)
subscription-manager repos --enable rhel-6-server-rpms --enable rhel-6-server-satellite-6.0-rpms --enable rhel-6-server-satellite-optional-6.0-rpms --enable rhel-server-rhscl-6-rpms --releasever=${RELEASE}
chkconfig ntpd on && service ntpd start
;;
*)
echo "ERROR: RELEASE not configured"
exit 9
;;
esac
TCP_PORTS="80 443 5000 5671 5674 8080 8140 9090"
UDP_PORTS="53 67 68 69 80 443 8080 "
case $RELEASE in
7Server)
echo "`hostname -I` `hostname` `hostname -s` " >> /etc/hosts
DEFAULT_ZONE=`/bin/firewall-cmd --get-default-zone`
for PORT in $TCP_PORTS
do
/bin/firewall-cmd --permanent --zone=$DEFAULT_ZONE --add-port=${PORT}/tcp
done
for PORT in $UDP_PORTS
do
/bin/firewall-cmd --permanent --zone=$DEFAULT_ZONE --add-port=${PORT}/udp
done
for USER in foreman katello root
do
firewall-cmd --permanent --direct --add-rule ipv4 filter OUTPUT 0 -o lo -p tcp -m tcp --dport 9200 -m owner --uid-owner $USER -j ACCEPT
firewall-cmd --permanent --direct --add-rule ipv6 filter OUTPUT 0 -o lo -p tcp -m tcp --dport 9200 -m owner --uid-owner $USER -j ACCEPT
done
firewall-cmd --permanent --direct --add-rule ipv4 filter OUTPUT 1 -o lo -p tcp -m tcp --dport 9200 -j DROP
firewall-cmd --permanent --direct --add-rule ipv6 filter OUTPUT 1 -o lo -p tcp -m tcp --dport 9200 -j DROP
/bin/firewall-cmd --reload
/bin/firewall-cmd --list-ports
;;
6Server)
cp /etc/sysconfig/iptables /etc/sysconfig/iptables.bak
for PORT in $TCP_PORTS
do
iptables --insert INPUT 5 -p tcp --dport $PORT -j ACCEPT
done
for PORT in $UDP_PORTS
do
iptables --insert INPUT 5 -p udp --dport $PORT -j ACCEPT
done
service iptables save
service iptables restart
;;
esac
####################################################################################
## INSTALL
####################################################################################
# I cannot seem to get the install to work via channels ;-(
# From channels
yum -y install katello
# Or... ISO
#scp 10.10.10.1:/var/lib/libvirt/images/Satellite-6.1.0-RHEL-7-20150428.0-Satellite-x86_64-dvd1.iso /tmp
#mount -oloop /tmp/Satellite-6.1.0-RHEL-7-20150428.0-Satellite-x86_64-dvd1.iso /mnt
#mount /dev/cdrom /mnt
#cd /mnt && ./install_packages --enhanced_reporting
# Without these tweaks the installation was failing
# (need to validate this is the correct way to apply these tweaks)
cat << EOF >> /etc/foreman/settings.yaml
# Added for Satellite Installation
:idle_timeout: 60
:proxy_request_timeout: 99
EOF
# Equivalent to...
#foreman-rake config -- -k idle_timeout -v 60
#foreman-rake config -- -k proxy_request_timeout -v 99
## NOTE: I don't know what the cause may be... but, I have noticed that occasionally I need to
## attempt an installation, let it fail, reboot, and try again to get the installer to work :-=(
# Tune this for your own environment
cat << EOF > katello-installer.cmd
katello-installer --foreman-admin-username="admin" \
--foreman-admin-password="Passw0rd" \
--foreman-authentication=true \
--foreman-initial-organization="${ORGANIZATION}" \
--foreman-initial-location="${LOCATION}" \
--capsule-tftp=true --capsule-tftp-servername="10.10.10.102" \
--capsule-dns=true --capsule-dns-forwarders="10.10.10.1" \
--capsule-dns-interface="eth0" --capsule-dns-reverse="10.10.10.in-addr.arpa" \
--capsule-dns-zone="${DOMAIN}" \
--capsule-dhcp=true --capsule-dhcp-interface=eth0 \
--capsule-dhcp-gateway="10.10.10.1" --capsule-dhcp-range="10.10.10.200 10.10.10.220"
EOF
sh ./katello-installer.cmd
#--capsule-dhcp=true --capsule-dhcp-interface=eth0 \
#--capsule-dhcp-gateway="10.10.10.1" --capsule-dhcp-range="10.10.10.200 10.10.10.220"
yum -y install katello-utils
yum -y update && shutdown now -r
# If things don't seem to be working... (I don't know if this works long-term)
# katello-installer --reset
####################################################################################
## POST
####################################################################################
mkdir ~/.hammer ~/.foreman
chmod 0600 ~/.hammer
cat << EOF > ~/.hammer/cli_config.yml
:modules:
- hammer_cli_foreman
:foreman:
:enable_module: true
:host: 'https://${SATELLITE}.${DOMAIN}'
:username: 'admin'
:password: 'Passw0rd'
:organization: '${DOMAIN}'
# Check API documentation cache status on each request
#:refresh_cache: false
# API request timeout. Set to -1 for no timeout
#:request_timeout: 120 #seconds
:log_dir: '~/.foreman/log'
:log_level: 'error'
EOF
###################
# --source-id=1 (should be INTERNAL)
hammer user create --login satadmin --mail="satadmin@${SATELLITE}.${DOMAIN}" --firstname="Satellite" --lastname="Adminstrator" --password="Passw0rd" --auth-source-id=1
hammer user add-role --login=satadmin --role-id=9
hammer user create --login reguser --mail="reguser@${SATELLITE}.${DOMAIN}" --firstname="Registration" --lastname="User" --password="Passw0rd" --auth-source-id=1
hammer user-group create --name="regusers" --role-ids=12 --users=satadmin,reguser
#hammer organization create --name="${ORGANIZATION}" --label="${ORGANIZATION}"
hammer organization add-user --user=satadmin --name="${ORGANIZATION}"
hammer organization add-user --user=reguser --name="${ORGANIZATION}"
#hammer location create --name="${LOCATION}"
hammer location add-organization --name="${LOCATION}" --organization="${ORGANIZATION}"
hammer domain create --name="${DOMAIN}"
hammer subnet create --domain-ids=1 --gateway='10.10.10.1' --mask='255.255.255.0' --name='10.10.10.0/24' --tftp-id=1 --network='10.10.10.0' --dns-primary='10.10.10.121' --dns-secondary='10.10.10.122'
hammer organization add-subnet --subnet-id=1 --name="${ORGANIZATION}"
hammer organization add-domain --domain="${DOMAIN}" --name="${ORGANIZATION}"
scp 10.10.10.1:/home/jradtke/Downloads/RH7SAT6-APLABS-20151105.zip ./
hammer subscription upload --file RH7SAT6-APLABS-20151105.zip --organization="${ORGANIZATION}"
######################
## Collect information
hammer product list --organization="${ORGANIZATION}" > ~/hammer_product_list.out
PRODUCT='Red Hat Enterprise Linux Server'
hammer repository-set list --organization="${ORGANIZATION}" --product "${PRODUCT}" > ~/hammer_repository-set_list-"${PRODUCT}".out
######################
PRODUCT='Red Hat Enterprise Linux Server'
hammer repository-set list --organization="${ORGANIZATION}" --product "${PRODUCT}" > ~/hammer_repository-set_list-"${PRODUCT}".out
REPOS="3815 2463 2472 2456 2476"
for REPO in $REPOS
do
echo; echo "NOTE: Enabling (${REPO}): `grep $REPO ~/hammer_repository-set_list-"${PRODUCT}".out | cut -f3 -d\|`"
echo "hammer repository-set enable --organization=\"${ORGANIZATION}\" --basearch='x86_64' --releasever='7Server' --product=\"${PRODUCT}\" --id=\"${REPO}\" "
hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --releasever='7Server' --product="${PRODUCT}" --id="${REPO}"
echo "hammer repository-set enable --organization=\"${ORGANIZATION}\" --basearch='x86_64' --releasever='7.2' --product=\"${PRODUCT}\" --id=\"${REPO}\" "
hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --releasever='7.2' --product="${PRODUCT}" --id="${REPO}"
done
## THERE ARE REPOS WHICH DO *NOT* ACCEPT A "releasever" VALUE
REPOS="4185 4188 3030"
for REPO in $REPOS
do
echo; echo "NOTE: Enabling (${REPO}): `grep $REPO ~/hammer_repository-set_list-"${PRODUCT}".out | cut -f3 -d\|`"
hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --product="${PRODUCT}" --id="${REPO}"
done
######################
PRODUCT='Red Hat Enterprise Linux High Availability for RHEL Server'
hammer repository-set list --organization="${ORGANIZATION}" --product "${PRODUCT}" > ~/hammer_repository-set_list-"${PRODUCT}".out
REPOS="2762"
for REPO in $REPOS
do
echo; echo "NOTE: Enabling (${REPO}): `grep $REPO ~/hammer_repository-set_list-"${PRODUCT}".out | cut -f3 -d\|`"
echo "hammer repository-set enable --organization=\"${ORGANIZATION}\" --basearch='x86_64' --releasever='7Server' --product=\"${PRODUCT}\" --id=\"${REPO}\" "
hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --releasever='7Server' --product="${PRODUCT}" --id="${REPO}"
echo "hammer repository-set enable --organization=\"${ORGANIZATION}\" --basearch='x86_64' --releasever='7.2' --product=\"${PRODUCT}\" --id=\"${REPO}\" "
hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --releasever='7.2' --product="${PRODUCT}" --id="${REPO}"
done
######################
PRODUCT='Red Hat OpenShift Enterprise'
hammer repository-set list --organization="${ORGANIZATION}" --product "${PRODUCT}" > ~/hammer_repository-set_list-"${PRODUCT}".out
REPOS="4025" # 3.0
for REPO in $REPOS
do
echo; echo "NOTE: Enabling (${REPO}): `grep $REPO ~/hammer_repository-set_list-"${PRODUCT}".out | cut -f3 -d\|`"
hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --releasever='7Server' --product="${PRODUCT}" --id="${REPO}"
done
## THERE ARE REPOS WHICH DO *NOT* ACCEPT A "releasever" VALUE
REPOS="4658" # 3.1
for REPO in $REPOS
do
echo; echo "NOTE: Enabling (${REPO}): `grep $REPO ~/hammer_repository-set_list-"${PRODUCT}".out | cut -f3 -d\|`"
hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --product="${PRODUCT}" --id="${REPO}"
done
######################
PRODUCT='Red Hat Software Collections for RHEL Server'
hammer repository-set list --organization="${ORGANIZATION}" --product "${PRODUCT}" > ~/hammer_repository-set_list-"${PRODUCT}".out
REPOS="2808"
for REPO in $REPOS
do
echo; echo "NOTE: Enabling (${REPO}): `grep $REPO ~/hammer_repository-set_list-"${PRODUCT}".out | cut -f3 -d\|`"
hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --releasever='7Server' --product="${PRODUCT}" --id="${REPO}"
done
######################
PRODUCT='Red Hat Enterprise Virtualization'
hammer repository-set list --organization="${ORGANIZATION}" --product "${PRODUCT}" > ~/hammer_repository-set_list-"${PRODUCT}".out
REPOS="3245 3109"
for REPO in $REPOS
do
echo; echo "NOTE: Enabling (${REPO}): `grep $REPO ~/hammer_repository-set_list-"${PRODUCT}".out | cut -f3 -d\|`"
hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --releasever='7Server' --product="${PRODUCT}" --id="${REPO}"
done
## THIS PACKAGE IS 6Server specific (at this time)
REPOS=4425
for REPO in $REPOS
do
echo; echo "NOTE: Enabling (${REPO}): `grep $REPO ~/hammer_repository-set_list-"${PRODUCT}".out | cut -f3 -d\|`"
#hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --product="${PRODUCT}" --id="${REPO}"
hammer repository-set enable --organization="${ORGANIZATION}" --product="${PRODUCT}" --id="${REPO}"
done
######################
PRODUCT='Oracle Java for RHEL Server'
REPOS="3254"
for REPO in $REPOS
do
echo; echo "NOTE: Enabling (${REPO}): `grep $REPO ~/hammer_repository-set_list-"${PRODUCT}".out | cut -f3 -d\|`"
#hammer repository-set enable --organization="${ORGANIZATION}" --basearch='x86_64' --releasever='7Server' --product="${PRODUCT}" --id="${REPO}"
done
#################
## EPEL Stuff - Pay attention to the output of this section. It's not tested/validated
# If it doesn't work, update the GPG-KEY via the WebUI
wget -q https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7 -O /root/RPM-GPG-KEY-EPEL-7
hammer gpg create --key /root/RPM-GPG-KEY-EPEL-7 --name 'GPG-EPEL-7' --organization="${ORGANIZATION}"
GPGKEYID=`hammer gpg list --name="GPG-EPEL-7" --organization="${ORGANIZATION}" | grep ^[0-9] | awk '{ print $1 }'`
PRODUCT='Extra Packages for Enterprise Linux'
hammer product create --name="${PRODUCT}" --organization="${ORGANIZATION}"
hammer repository create --name='EPEL 7 - x86_64' --organization="${ORGANIZATION}" --product="${PRODUCT}" --content-type='yum' --publish-via-http=true --url=http://dl.fedoraproject.org/pub/epel/7/x86_64/ --gpg-key-id="${GPGKEYID}" --gpg-key="${GPG-EPEL-7}"
#################
## SYNC EVERYTHING (Manually)
#for i in $(hammer --csv repository list --organization="${ORGANIZATION}" | awk -F, {'print $1'} | grep -vi '^ID'); do echo "hammer repository synchronize --id ${i} --organization=\"${ORGANIZATION}\" --async"; done
for i in $(hammer --csv repository list --organization="${ORGANIZATION}" | awk -F, {'print $1'} | grep -vi '^ID'); do hammer repository synchronize --id ${i} --organization="${ORGANIZATION}" --async; done
#################
## LIFECYCLE ENVIRONMENT
hammer lifecycle-environment create --name='DEV' --prior='Library' --organization="${ORGANIZATION}"
hammer lifecycle-environment create --name='TEST' --prior='DEV' --organization="${ORGANIZATION}"
hammer lifecycle-environment create --name='PROD' --prior='TEST' --organization="${ORGANIZATION}"
#################
# SYNC PLANS - I believe these are working now.
# I may... want to separate all the major products out to their own Sync Plan though.
hammer sync-plan create --enabled true --interval=daily --name='Daily sync - Red Hat' --description="Daily Sync Plan for Red Hat Products" --sync-date='2015-11-22 02:00:00' --organization="${ORGANIZATION}"
hammer product set-sync-plan --sync-plan='Daily sync - Red Hat' --organization="${ORGANIZATION}" --name='Red Hat OpenShift Enterprise'
hammer product set-sync-plan --sync-plan='Daily sync - Red Hat' --organization="${ORGANIZATION}" --name='Red Hat Enterprise Linux Server'
hammer product set-sync-plan --sync-plan='Daily sync - Red Hat' --organization="${ORGANIZATION}" --name='Red Hat Enterprise Linux High Availability for RHEL Server'
hammer product set-sync-plan --sync-plan='Daily sync - Red Hat' --organization="${ORGANIZATION}" --name='Red Hat OpenShift Enterprise'
hammer product set-sync-plan --sync-plan='Daily sync - Red Hat' --organization="${ORGANIZATION}" --name='Red Hat Software Collections for RHEL Server'
hammer product set-sync-plan --sync-plan='Daily sync - Red Hat' --organization="${ORGANIZATION}" --name='Red Hat Enterprise Virtualization'
hammer sync-plan create --enabled true --interval=daily --name='Daily sync - EPEL' --description="Daily Sync Plan for EPEL" --sync-date='2015-11-22 03:00:00' --organization="${ORGANIZATION}"
hammer product set-sync-plan --sync-plan='Daily sync - EPEL' --organization="${ORGANIZATION}" --name='Extra Packages for Enterprise Linux'
# It's best y'all leave right now....
exit 0
#################
## CONTENT VIEWS (I AM STILL LEARNING THIS YET...)
## TODO: Add repos to the content-view
LIFECYCLEID=`hammer lifecycle-environment list --organization="${ORGANIZATION}" | grep -v "Library" | awk '{ print $1 }' | head -1`
for LIFECYCLEENV in `hammer lifecycle-environment list --organization="${ORGANIZATION}" | awk -F\| '{ print $2 }' | egrep -v 'Library|NAME|^-'`
do
echo "hammer content-view create --name=\"Content View - ${LIFECYCLEENV}\" --organization=\"${ORGANIZATION}\" "
#hammer content-view create --name="Content View - ${LIFECYCLEENV}" --organization="${ORGANIZATION}"
echo "hammer content-view publish --name=\"Content View - ${LIFECYCLEENV}\" --organization=\"${ORGANIZATION}\" --async"
#hammer content-view publish --name="Content View - ${LIFECYCLEENV}" --organization="${ORGANIZATION}" --async
echo "hammer content-view version promote --organization=\"${ORGANIZATION}\" --content-view=\"Content View - ${LIFECYCLEENV}\" --to-lifecycle-environment=\"${LIFECYCLEENV}\" --version=1 --async"
#hammer content-view version promote --organization="${ORGANIZATION}" --content-view="Content View - ${LIFECYCLEENV}" --to-lifecycle-environment="${LIFECYCLEENV}" --version=1 --async
done
#################
## HOST COLLECTION AND ACTIVATION KEYS
hammer host-collection create --name='RHEL 7 x86_64' --organization="${ORGANIZATION}"
hammer activation-key create --description='DEV Activation Key' --name='rhel-7-server-x86_64-key-DEV' --lifecycle-environment='DEV' --organization="${ORGANIZATION}"
hammer activation-key create --description='TEST Activation Key' --name='rhel-7-server-x86_64-key-TEST' --lifecycle-environment='TEST' --organization="${ORGANIZATION}"
hammer activation-key create --description='PROD Activation Key' --name='rhel-7-server-x86_64-key-PROD' --lifecycle-environment='PROD' --organization="${ORGANIZATION}"
hammer activation-key add-host-collection --name='rhel-7-server-x86_64-key-DEV' --host-collection='RHEL 7 x86_64' --organization="${ORGANIZATION}"
hammer activation-key add-host-collection --name='rhel-7-server-x86_64-key-TEST' --host-collection='RHEL 7 x86_64' --organization="${ORGANIZATION}"
hammer activation-key add-host-collection --name='rhel-7-server-x86_64-key-PROD' --host-collection='RHEL 7 x86_64' --organization="${ORGANIZATION}"
# OSE Activation Key (Contract: 10169796)
hammer activation-key create --description='OSEv3 Library Activation Key' --name='OSEv3-Library' --lifecycle-environment='Library' --organization="${ORGANIZATION}"
hammer activation-key add-host-collection --name='OSEv3-Library' --host-collection='RHEL 7 x86_64' --organization="${ORGANIZATION}"
## ASSOCIATE KEYS AND SUBSCRIPTIONS
for i in $(hammer --csv activation-key list --organization="${ORGANIZATION}" | awk -F, {'print $1'} | grep -vi '^ID'); do for j in $(hammer --csv subscription list --organization="${ORGANIZATION}" | awk -F, {'print $8'} | grep -vi '^ID'); do hammer activation-key add-subscription --id ${i} --subscription-id ${j}; done; done
exit 0
# WORK IN PROGRESS PAST THIS POINT...
#################
## Container Registry Stuff
hammer product create --name='Containers' --organization="${ORGANIZATION}"
hammer repository create --name='Red Hat Containers' --organization="${ORGANIZATION}" --product='Containers' --content-type='docker' --url='https://registry.access.redhat.com' --docker-upstream-name='rhel' --publish-via-http="true"
hammer product synchronize --name='Containers' --organization="${ORGANIZATION}"
hammer repository info --id `hammer repository list --content-type docker --organization "${ORGANIZATION}" --content-view "Production Registry" --environment Production | grep docker | grep rhel | awk '{print $1}'` # Create Content View, Add Repo and Publish
hammer content-view create --organization="${ORGANIZATION}" --name "Production Registry" --description "Production Registry"
hammer content-view add-repository --organization="${ORGANIZATION}" --name "Production Registry" --repository "Red Hat Containers" --product "Containers"
hammer content-view publish --organization="${ORGANIZATION}" --name "Production Registry"
#Promote Content View
hammer content-view version promote --organization="${ORGANIZATION}" --to-lifecycle-environment DEV --content-view "Production Registry" --async; sleep 60
hammer content-view version promote --organization="${ORGANIZATION}" --to-lifecycle-environment TEST --content-view "Production Registry" --async; sleep 60
hammer content-view version promote --organization="${ORGANIZATION}" --to-lifecycle-environment PROD --content-view "Production Registry" --async; sleep 60
# Create a Sync Plan
hammer sync-plan create --enabled true --interval=daily --name='Daily sync - Red Hat Containers' --description="Daily Sync Plan for Red Hat Containers" --sync-date='2015-11-22 03:00:00' --organization="${ORGANIZATION}"
hammer product set-sync-plan --sync-plan='Daily sync - Red Hat Containers' --organization="${ORGANIZATION}" --name='Containers'
# Create a container from repo
hammer docker container create \
--organizations "${ORGANIZATION}" \
--locations ${LOCATION}" \
--compute-resource ${SATELLITE}.${DOMAIN} \
--repository-name "Red Hat Containers" \
--tag latest \
--name test \
--command bash
# Add a Compute Resource
hammer compute-resource create --organizations --organization "${ORGANIZATION}" --location "${LOCATION}" --provider docker --name docker-node.${DOMAIN} --url http://docker-node.${DOMAIN}:4243
##################################
## Red Hat IDM Integration
## If you don't know what this, you probably should not proceed with any of this...
yum -y install ipa-client foreman-proxy ipa-admintools
ipa-client-install --password='Passw0rd'
foreman-prepare-realm admin
## HELPFUL LINKS
https://${SATELLITE}.${DOMAIN}/foreman_tasks/task?search=state+=+paused
https://${SATELLITE}.${DOMAIN}/foreman_tasks/tasks?search=state+=+planned
https://${SATELLITE}.${DOMAIN}/foreman_tasks/tasks?search=result+=+pending
| true |
d279b29c4e1a0a7ce93fb362965eb9fdbde3f2de
|
Shell
|
uvalib/virgo4-basic-loadtesting
|
/scripts/walk-master-response.ksh
|
UTF-8
| 1,112 | 4.03125 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
#
# script to walk through the standard response from a master search
#
# source helpers
FULL_NAME=$(realpath $0)
SCRIPT_DIR=$(dirname $FULL_NAME)
. $SCRIPT_DIR/common.ksh
function help_and_exit {
report_and_exit "use: $(basename $0) <results file>"
}
# ensure correct usage
if [ $# -ne 1 ]; then
help_and_exit
fi
# input parameters for clarity
RESULTS_FILE=$1
if [ ! -f $RESULTS_FILE ]; then
error_and_exit "$RESULTS_FILE does not exist or is not readable"
fi
# ensure we have the tools available
JQ_TOOL=jq
ensure_tool_available $JQ_TOOL
TR_TOOL=tr
ensure_tool_available $TR_TOOL
#cat $RESULTS_FILE
TOTAL_HITS=$(cat $RESULTS_FILE | $JQ_TOOL ".total_hits")
if [ $TOTAL_HITS -eq 0 ]; then
log "no search results"
exit 0
fi
POOLS=$(cat $RESULTS_FILE | $JQ_TOOL ".pool_results[].service_url" | $TR_TOOL -d "\"")
for pool in $POOLS; do
log "** pool url: $pool **"
$SCRIPT_DIR/walk-master-pool-response.ksh $RESULTS_FILE $pool
res=$?
if [ $res -ne 0 ]; then
error_and_exit "$res walking pool response, aborting"
fi
done
exit 0
#
# end of file
#
| true |
09951f9392a26b9197523fdf60ca2f192629ce46
|
Shell
|
Solo-one/Struggle
|
/shell/test.sh
|
UTF-8
| 770 | 2.984375 | 3 |
[] |
no_license
|
#########################################################################
# File Name: test.sh
# Author: fxm
# mail: fangxiaoming01@baidu.com
# Created Time: 2018年07月16日 星期一 17时36分54秒
#########################################################################
#!/bin/bash
echo $0 $1 $2
echo $1
start_path=$(pwd)
echo ${start_path}
echo $# $$ $?
name="fangxiaoming"
#readonly name #只读变量
echo $name
name="xiaoming"
echo ${name}
greeting="hello , ${name} !"
echo $greeting
echo ${#greeting}
echo ${greeting:2:4}
string="runoob is a great company"
echo `expr index "$string" is`
:<<EOF
my_array=(123 "sss" 456 'fxm' xiaoming)
echo $my_array
echo ${my_array[1]}
echo ${my_array[4]}
echo ${#my_array[@]}
echo ${#my_array[4]}
EOF
:<<'
注释内容..
'
echo $1
| true |
760f01225f5a1f6a73e143f6f91a62e449e2f60e
|
Shell
|
dubo-dubon-duponey/docker-base
|
/hack/test.sh
|
UTF-8
| 878 | 2.984375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit -o errtrace -o functrace -o nounset -o pipefail
# shellcheck source=/dev/null
root="$(cd "$(dirname "${BASH_SOURCE[0]:-$PWD}")" 2>/dev/null 1>&2 && pwd)/../"
readonly root
# XXX right now this is not testing much but the runtime image building
if ! "$root/hack/build.sh" \
--inject registry="ghcr.io/dubo-dubon-duponey" \
--inject progress=plain \
--inject date=2021-08-01 \
--inject suite=bullseye \
--inject platforms=linux/amd64,linux/arm64 \
overlay "$@"; then
printf >&2 "Failed building overlay\n"
exit 1
fi
if ! "$root/hack/build.sh" \
--inject registry="ghcr.io/dubo-dubon-duponey" \
--inject progress=plain \
--inject date=2021-08-01 \
--inject suite=bullseye \
--inject platforms=linux/amd64,linux/arm64 \
runtime "$@"; then
printf >&2 "Failed building runtime image\n"
exit 1
fi
| true |
49abb75c66b7093417cd0e773c81a7a1f0b7129a
|
Shell
|
PtrMan/FemFlip
|
/femflip3/src/mitsuba/fix_view.sh
|
UTF-8
| 395 | 3.09375 | 3 |
[] |
no_license
|
#!/bin/sh
i=$1
while [ $i -le $2 ]; do
f="$i"_scene_view.exr
if [ -s "$f" ]; then
dummy=1
else
echo "Starting scene (particles) $i..."
mitsuba -Dparticles="$i"_particles.xml -Dtet_filename="$i"_tet.ply -Dtarget="0.5, 0.5, 0.1" -Dorigin="0.2, -1.0, 1.5" -Dup="0, 0, 1" -o "$i"_scene_view.exr particles.xml
exrtopng "$i"_scene_view.exr img/"$i"_scene_view.png
fi
i=`expr $i + 1`
done
| true |
44a81ddedf38912cfba5463cda2e089b03e8b32f
|
Shell
|
les-lee/origin-blog
|
/deploy.sh
|
UTF-8
| 550 | 2.84375 | 3 |
[
"MIT"
] |
permissive
|
# 确保脚本抛出遇到的错误
set -e
# git init
git add -A
git commit -m 'modify by les'
git push origin master
# 生成静态文件
npm run blog:build
# 进入生成的文件夹
cd blog/.vuepress/dist
# 如果是发布到自定义域名
# echo 'www.example.com' > CNAME
git init
git add -A
git commit -m 'deploy'
# 如果发布到 https://<USERNAME>.github.io
git push -f git@github.com:les-lee/les-lee.github.io.git master
# 如果发布到 https://<USERNAME>.github.io/<REPO>
# git push -f git@github.com:<USERNAME>/<REPO>.git master:gh-pages
cd -
| true |
8c762524c73729d4a2a65cdc8a5858b20ad1a7fc
|
Shell
|
bmgraves/scgt
|
/linux-3.10/driver/gtunload
|
UTF-8
| 492 | 3.625 | 4 |
[] |
no_license
|
#!/bin/bash
#
# Uninstall script for SCRAMNet GT driver
#
RMMOD="rmmod"
# find rmmod if not in path
which rmmod > /dev/null 2>&1;
if [ $? != 0 ];
then
# rmmod not in path.. try /sbin/rmmod
if [ -x /sbin/rmmod ];
then
RMMOD="/sbin/rmmod";
else
echo "Failed to locate rmmod program. Make sure rmmod is in your path."
exit 1;
fi
fi
$RMMOD scgt_module
if [ $? != "0" ];
then
echo Unable to unload SCRAMNet GT driver.
exit 1;
fi
exit 0;
| true |
bf21098d116047194d4f2f9145c1dc1ca54431e6
|
Shell
|
kwynncom/readable-primary-key
|
/collisions/ramdisk.sh
|
UTF-8
| 124 | 2.6875 | 3 |
[] |
no_license
|
RDIR=/tmp/rd
if [ ! -d $RDIR ]; then
mkdir $RDIR
fi
sudo mount -t ramfs -o size=4g ramfs /tmp/rd
sudo chmod 777 /tmp/rd
| true |
d81c52d26133b71e832d0e88afd22089cd8b8da2
|
Shell
|
ColinIanKing/fwts
|
/auto-packager/mkpackage.sh
|
UTF-8
| 2,933 | 3.75 | 4 |
[] |
no_license
|
#!/bin/bash
#
# Copyright (C) 2010-2023 Canonical
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# Get fwts sources, strip out .git directory, add in necessary debian packaging
# files, build source package ready for upload.
#
RELEASES="bionic focal jammy kinetic lunar mantic"
REPO=git://kernel.ubuntu.com/hwe/fwts.git
RELEASE_TAR_URL=http://fwts.ubuntu.com/release
FWTS=fwts
#
# Clone the repo
#
get_source()
{
echo Getting source
git clone $REPO
}
#
# Figure out latest tagged version
#
get_version()
{
pushd $FWTS >& /dev/null
git tag | tail -1
popd >& /dev/null
}
#
# Checkout version
#
checkout_version()
{
echo "Checking out version $1"
pushd $FWTS >& /dev/null
git checkout -b latest $1
popd >& /dev/null
}
#
# Remove .git repo as we don't want this in the final package
#
rm_git()
{
rm -rf $FWTS/.git
}
#
# Remove the source
#
rm_source()
{
rm -rf $FWTS
}
#
# Prepare the orig tarball
#
prepare_tarball()
{
mkdir $version
pushd $version >& /dev/null
wget -N $RELEASE_TAR_URL/fwts-$version.tar.gz
wget -N $RELEASE_TAR_URL/SHA256SUMS
grep "fwts-$version.tar.gz" SHA256SUMS > SHA256SUMS.local
sha256sum -c SHA256SUMS.local
if [ $? -ne 0 ]; then
echo "Checksum unmatched. Abort"
exit
fi
mv fwts-$version.tar.gz fwts_`echo $version|cut -b 2-`.orig.tar.gz
popd >& /dev/null
}
#
# Create source package ready for upload and build
#
mk_package()
{
rel=$1
rm -rf $version/$rel
mkdir -p $version/$rel
cp -r $FWTS $version/$rel
cp $version/fwts_`echo $version|cut -b 2-`.orig.tar.gz $version/$rel
pushd $version/$rel/$FWTS >& /dev/null
deb_topline=`head -1 debian/changelog`
deb_release=`echo $deb_topline | cut -f3 -d' '`
if [ "x$rel;" = "x$deb_release" ]; then
suffix=''
else
suffix="~`echo $rel | cut -c1`"
fi
#
# Mungify changelog hack
#
sed "s/) $deb_release/$suffix) $rel;/" debian/changelog > debian/changelog.new
mv debian/changelog.new debian/changelog
echo 'y' | debuild -S
rm -rf $FWTS
popd >& /dev/null
}
#
# Here we go..
#
rm_source
get_source
if [ $# -eq 1 ]; then
version=$1
else
version=`get_version`
fi
checkout_version $version
rm_git
prepare_tarball
for I in $RELEASES
do
echo Building package for release $I with version $version
mk_package $I
done
rm_source
| true |
3840f5b9b1fab5a22d34e48c61fa5bbfa08c056d
|
Shell
|
beer-garden/bg-utils
|
/bin/make_docs.sh
|
UTF-8
| 157 | 2.53125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script uses sphinx utilities to generate documenation
# from Python docstrings
BASEDIR=$(dirname $(dirname $0))
make -C "$BASEDIR/docs"
| true |
5688fe4f0d87d2dd635128262f6f5e9a615cd617
|
Shell
|
getupcloud/origin-server
|
/cartridges/openshift-origin-cartridge-haproxy/usr/bin/fix_local.sh
|
UTF-8
| 1,890 | 3.921875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Script to disable the local serving gear after either at least
# one remote gear is visible to haproxy or 30 seconds have passed.
source $OPENSHIFT_CARTRIDGE_SDK_BASH
while getopts 'd' OPTION
do
case $OPTION in
d) set -x
;;
esac
done
rm -f /tmp/fix_local*
exec &> /tmp/fix_local.$$
prim_cart=$(primary_cartridge_short_name)
prim_cart_ip="OPENSHIFT_${prim_cart}_IP"
prim_cart_port="OPENSHIFT_${prim_cart}_PORT"
eval local_ip=\$${prim_cart_ip}
if [ -z "$local_ip" ]; then
first_ip_in_manifest=$(primary_cartridge_private_ip_name)
prim_cart_ip="OPENSHIFT_${prim_cart}_${first_ip_in_manifest}"
eval local_ip=\$${prim_cart_ip}
fi
eval local_port=\$${prim_cart_port}
if [ -z "$local_port" ]; then
first_port_in_manifest=$(primary_cartridge_private_port_name)
prim_cart_port="OPENSHIFT_${prim_cart}_${first_port_in_manifest}"
eval local_port=\$${prim_cart_port}
fi
local_ep=$local_ip:$local_port
haproxy_cfg=$OPENSHIFT_HAPROXY_DIR/conf/haproxy.cfg
iter=0
while (( $iter < 30 )); do
echo "$iter: Checking if any remote gears are up."
if [ $(curl -sS "$OPENSHIFT_HAPROXY_STATUS_IP:$OPENSHIFT_HAPROXY_STATUS_PORT/haproxy-status/;csv" | grep gear- | grep UP | wc -l) -ge 1 ]; then
echo "Atleast one remote gear is UP."
break;
else
sleep 1
iter=$((iter + 1))
fi
done
(
flock -e 200
(
echo "Disabling local-gear"
cp -f $haproxy_cfg /tmp/haproxy.cfg.$$
## disable local-gear serving with weight 0.
sed -i "/\s*server\s*local-gear\s.*/d" /tmp/haproxy.cfg.$$
echo " server local-gear $local_ep weight 0" >> /tmp/haproxy.cfg.$$
cat /tmp/haproxy.cfg.$$ > "$haproxy_cfg"
rm -f /tmp/haproxy.cfg.$$
) 200>&-
) 200>${haproxy_cfg}.lock
# Restart haproxy to pick up the new configuration
/usr/bin/gear reload --cart haproxy-$OPENSHIFT_HAPROXY_VERSION
| true |
9ad35aaa3a87adbc6d5a35ba95556c8b00b5a0d9
|
Shell
|
timhemel/rsyncbackups
|
/rsync-backup
|
UTF-8
| 328 | 3.21875 | 3 |
[] |
no_license
|
#!/bin/sh
if [ -r /etc/defaults/periodic.conf ]
then
. /etc/defaults/periodic.conf
source_periodic_confs
fi
case "$rsync_backups_enable" in
[Yy][Ee][Ss])
script="$rsync_backups_dir/scripts/backup.sh"
config="$rsync_backups_dir/config/$rsync_backups_config"
"$script" "$config"
rc=$?
;;
*)
rc=0;;
esac
exit $rc
| true |
f75c375ebe1e538fc92e5e6c0797727ccee2c7ce
|
Shell
|
mminichino/openshift-automation
|
/mirror-oper-catalog.sh
|
UTF-8
| 2,349 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/sh
#
REGISTRY=""
VERSION=""
LOCAL_VERSION="1"
TMPFILE=$(mktemp)
function print_usage () {
echo "$0 -r registry -v version [ -l local_version ]"
exit 1
}
while getopts r:v:l: optargs
do
case "${optargs}" in
r) REGISTRY=$OPTARG
;;
v) VERSION=$OPTARG
;;
l) LOCAL_VERSION=$OPTARG
;;
\?) print_usage
;;
esac
done
if [ -z "$REGISTRY" -o -z "$VERSION" ]
then
print_usage
else
CHECK=$(echo $VERSION | sed -e 's/^[0-9]*\.[0-9]*$/X/')
if [ "$CHECK" != "X" ]
then
echo "Invalid format for OCP version: $VERSION"
exit 1
fi
echo "Registry: $REGISTRY Version: $VERSION"
fi
export OCP_RELEASE=${VERSION}-x86_64
export LOCAL_REGISTRY="${REGISTRY}:5000"
export LOCAL_REPOSITORY='ocp4/openshift4'
export PRODUCT_REPO='openshift-release-dev'
export LOCAL_SECRET_JSON="${HOME}/pull-secret/pull-secret.json"
export RELEASE_NAME="ocp-release"
podman login ${LOCAL_REGISTRY}
podman login quay.io
podman login registry.redhat.io
oc adm catalog build \
--appregistry-org redhat-operators \
--from=registry.redhat.io/openshift4/ose-operator-registry:v${VERSION} \
--to=${LOCAL_REGISTRY}/olm/redhat-operators:v${LOCAL_VERSION} \
-a ${XDG_RUNTIME_DIR}/containers/auth.json \
--insecure
oc patch OperatorHub cluster --type json \
-p '[{"op": "add", "path": "/spec/disableAllDefaultSources", "value": true}]'
oc adm catalog mirror \
${LOCAL_REGISTRY}/olm/redhat-operators:v${LOCAL_VERSION} \
${LOCAL_REGISTRY} \
-a ${XDG_RUNTIME_DIR}/containers/auth.json \
--insecure
oc apply -f ./redhat-operators-manifests
if [ -f "$TMPFILE" ]
then
cat << EOF > $TMPFILE
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
name: my-operator-catalog
namespace: openshift-marketplace
spec:
sourceType: grpc
image: ${LOCAL_REGISTRY}/olm/redhat-operators:v${LOCAL_VERSION}
displayName: My Operator Catalog
publisher: grpc
EOF
MYCATALOGNAME=$(oc get catalogsource -n openshift-marketplace -o json | jq -r '.items|.[]|.metadata.name')
if [ "$MYCATALOGNAME" = "my-operator-catalog" ]
then
OPERATION="replace"
else
OPERATION="create"
fi
oc $OPERATION -f $TMPFILE
rm $TMPFILE
else
echo "Error: can not access temp file, can not replace catalog source."
fi
| true |
3d796f9228c1056db75ffa6c5ea0e61e1aa7d377
|
Shell
|
grembo/pot
|
/share/pot/set-hook.sh
|
UTF-8
| 2,084 | 3.796875 | 4 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# shellcheck disable=SC3033,SC3040,SC3043
:
set-hook-help() {
cat <<-"EOH"
pot set-hook [-hv] -p pot [-s hook]
-h print this help
-v verbose
-p pot : the working pot
-s hook : the pre-start hook
-S hook : the post-start hook
-t hook : the pre-stop hook
-T hook : the post-stop hook
EOH
}
# $1 pot
# $2 script name
# $3 hook type
_set_hook()
{
local _pname _script
_pname="$1"
_script="$2"
_hooktype="$3"
cp "$_script" "$POT_FS_ROOT/jails/$_pname/conf/${_hooktype}.sh"
}
# $1 hook script
_is_valid_hook()
{
if [ -x "$1" ]; then
return 0 # true
fi
_error "$1 not a valid hook"
return 1 # false
}
pot-set-hook()
{
local _pname _prestart _poststart _prestop _poststop
_pname=
_prestart=
_poststart=
_prestop=
_poststop=
OPTIND=1
while getopts "hvp:s:S:t:T:" _o ; do
case "$_o" in
h)
set-hook-help
return 0
;;
v)
_POT_VERBOSITY=$(( _POT_VERBOSITY + 1))
;;
s)
if _is_valid_hook "${OPTARG}" ; then
_prestart="${OPTARG}"
fi
;;
S)
if _is_valid_hook "${OPTARG}" ; then
_poststart="${OPTARG}"
fi
;;
t)
if _is_valid_hook "${OPTARG}" ; then
_prestop="${OPTARG}"
fi
;;
T)
if _is_valid_hook "${OPTARG}" ; then
_poststop="${OPTARG}"
fi
;;
p)
_pname="$OPTARG"
;;
?)
set-hook-help
return 1
esac
done
if [ -z "$_pname" ]; then
_error "A pot name is mandatory"
set-hook-help
return 1
fi
if ! _is_pot "$_pname" ; then
_error "pot $_pname is not valid"
set-hook-help
return 1
fi
if [ -z "$_prestart" ] && [ -z "$_poststart" ] &&
[ -z "$_prestop" ] && [ -z "$_poststop" ]; then
_error "No hooks provided - at least one hook as to be set"
set-hook-help
return 1
fi
if ! _is_uid0 ; then
return 1
fi
if [ -n "$_prestart" ]; then
_set_hook "$_pname" "$_prestart" "prestart"
fi
if [ -n "$_poststart" ]; then
_set_hook "$_pname" "$_poststart" "poststart"
fi
if [ -n "$_prestop" ]; then
_set_hook "$_pname" "$_prestop" "prestop"
fi
if [ -n "$_poststop" ]; then
_set_hook "$_pname" "$_poststop" "poststop"
fi
}
| true |
9c0091ade4872abbd7c0e1c70113ab8708b24c6d
|
Shell
|
akbaraziz/bash_scripts
|
/not-tested/configure_ssh_security.sh
|
UTF-8
| 2,536 | 3.5 | 4 |
[] |
no_license
|
#!/bin/bash
#
# Script by: Danie Pham
# Script date: 04-06-2019
# Script version: 1.0
# Script use: use to configure ssh security faster
# Remmeber to edit NOTE 1 & 2 in this script
set -ex
# Function configure ssh
f_config_ssh () {
# Disable X11 Forwarding in Linux server
sed -i 's/X11Forwarding yes/X11Forwarding no/g' /etc/ssh/sshd_config
# Set MaxAuthTries to 1
sed -i 's/#MaxAuthTries 6/MaxAuthTries 1/g' /etc/ssh/sshd_config
# Auto disconnect after 5 minutes
sed -i 's/#ClientAliveInterval 0/ClientAliveInterval 300/g' /etc/ssh/sshd_config
sed -i 's/#ClientAliveCountMax 3/ClientAliveCountMax 0/g' /etc/ssh/sshd_config
# Config hostbase authentication
sed -i 's|#IgnoreRhosts yes|IgnoreRhosts yes|g' /etc/ssh/sshd_config
sed -i 's/#HostbasedAuthentication no/HostbasedAuthentication no/g' /etc/ssh/sshd_config
# Don't allow empty password
sed -i 's/#PermitEmptyPasswords no/PermitEmptyPasswords no/g' /etc/ssh/sshd_config
# Don't allow TCP Forwarding -> Prevent hacker use your server like a router or transfer something
sed -i 's|#AllowTcpForwarding yes|AllowTcpForwarding no|g' /etc/ssh/sshd_config
sed -i 's|#UsePrivilegeSeparation yes|UsePrivilegeSeparation yes|g' /etc/ssh/sshd_config
sed -i 's|#StrictModes yes|StrictModes yes|g' /etc/ssh/sshd_config
# Config banner for ssh, just optional
sed -i 's|#Banner none|Banner /etc/ssh/ssh_banner.txt|g' /etc/ssh/sshd_config
###########################################################
### NOTE 1: edit youruser and your ip to the line below ###
###########################################################
echo "AllowUsers youruser@192.168.10.10 youruser@192.168.10.11" >> /etc/ssh/sshd_config
##############################################
### NOTE 2: edit your ip to the line below ###
##############################################
echo "sshd : 192.168.10.10 192.168.10.11" >> /etc/hosts.allow
echo "sshd : ALL" >> /etc/hosts.deny
# Change content of banner as you want
cat > /etc/ssh/ssh_banner.txt <<"EOF"
*****************************************************************
PLEASE READ CAREFULLY BELOW !!
------------------------------
1. Do not stop IPtables service, just edit it if needed.
2. Do not change SSH configuration if you don't know it.
3. SSH just allow a few special user, do not change it.
*****************************************************************
EOF
# Restart service ssh to apply new configuration
service sshd restart
}
# Function main
f_main () {
f_config_ssh
}
f_main
exit
| true |
428642509324a0b566b5cdf1d34b1c038c5cf53d
|
Shell
|
zxlhhyccc/CBBR
|
/D3.sh
|
UTF-8
| 446 | 3.421875 | 3 |
[
"MIT"
] |
permissive
|
#! /bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
[ "$EUID" -ne '0' ] && echo "Error,This script must be run as root! " && exit 1
apt update && apt install linux-image-4.15.0-23-generic -y
update-grub && update-grub2
echo
read -p "Info: The system needs to be restart. Do you want to reboot? [y/n]" is_reboot
if [[ ${is_reboot} == "y" || ${is_reboot} == "Y" ]]; then
reboot
else
exit
fi
| true |
418f2b9fc1fbcb7691474482363cac4204319598
|
Shell
|
artorhem/graph-processing
|
/gps-rev-110/local-master-scripts/make_gps_node_runner_jar.sh
|
UTF-8
| 1,470 | 2.75 | 3 |
[] |
no_license
|
cd ..
GPS_DIR="`pwd`"
GPS_SRC_DIR=${GPS_DIR}/src
GPS_CLASSES_DIR=${GPS_DIR}/classes
LIBS_DIR=${GPS_DIR}/libs
echo "removing ${GPS_DIR}/gps_node_runner.jar"
rm ${GPS_DIR}/gps_node_runner.jar
echo "removing ${GPS_CLASSES_DIR}"
rm -rf ${GPS_CLASSES_DIR}
echo "making ${GPS_CLASSES_DIR}"
mkdir ${GPS_CLASSES_DIR}
echo "cding into ${GPS_SRC_DIR}"
cd ${GPS_SRC_DIR}
find java/gps/examples -name \*.java -print > file.list
# TODO: bug fix? works without "echo" so this line might be old code
#$GPS_SRC_DIR/java/gps/node/GPSNodeRunner.java >> file.list
echo "compiling GPSNodeRunner to classes directory"
#javac -verbose \
javac \
-cp $LIBS_DIR/asm-3.3.1.jar:$LIBS_DIR/guava-r08.jar:$LIBS_DIR/objenesis-1.2.jar:$LIBS_DIR/cglib-2.2.jar:$LIBS_DIR/commons-cli-1.2.jar:$LIBS_DIR/jline-0.9.94.jar:$LIBS_DIR/log4j-1.2.15.jar:$LIBS_DIR/commons-logging-1.1.1.jar:$LIBS_DIR/hadoop-core-1.0.4.jar:$LIBS_DIR/commons-collections-3.2.1.jar:$LIBS_DIR/commons-lang-2.4.jar:$LIBS_DIR/commons-configuration-1.6.jar:$LIBS_DIR/tools.jar:$LIBS_DIR/mina-core-2.0.3.jar:$LIBS_DIR/mina-example-2.0.3.jar:$LIBS_DIR/slf4j-api-1.6.1.jar:$LIBS_DIR/colt.jar:$LIBS_DIR/concurrent.jar:$GPS_SRC_DIR/java \
-d ${GPS_CLASSES_DIR} \
@file.list
echo "cding into ${GPS_CLASSES_DIR}"
cd ${GPS_CLASSES_DIR}
pwd
echo "making gps_node_runner.jar..."
#jar -cmvf $GPS_DIR/local-master-scripts/manifest.txt ../gps_node_runner.jar gps/
jar -cmf $GPS_DIR/local-master-scripts/manifest.txt ../gps_node_runner.jar gps/
| true |
13b93f4ce8f422cb8049d88a8a008ad1c642c9f8
|
Shell
|
petronny/aur3-mirror
|
/tabbar-git/PKGBUILD
|
UTF-8
| 597 | 2.859375 | 3 |
[] |
no_license
|
# Maintainer: Stefan Husmann <stefan-husmann@t-online.de>
pkgname=tabbar-git
pkgver=372
pkgrel=1
pkgdesc="Emacs minor mode that displays a tab bar at the top"
arch=('any')
url="https://github.com/dholm/tabbar.git"
license=('GPL')
makedepends=('git')
source=("git://github.com/dholm/tabbar.git")
noextract=()
md5sums=('SKIP')
_gitname="tabbar"
pkgver() {
cd "$srcdir"/"$_gitname"
printf "%s" "$(git rev-list --count HEAD)"
}
package() {
cd "$srcdir/$_gitname"
install -d $pkgdir/usr/share/emacs/site-lisp
for _i in *.el *.elc *.tiff *.png
do install -Dm644 $_i $pkgdir/usr/share/emacs/site-lisp/${_i}
done
}
| true |
02e683068d49c4b22d9265409985165a730d3c4c
|
Shell
|
samuelsanchezalvarez/estudios
|
/AEC/simplescalar/simula-spec2000-B
|
ISO-8859-1
| 1,104 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/sh
# El script recibe como parametro de entrada ($1) una cadena que aadir a los ficheros
# de salida (*.out) que se generarn en el directorio spec2000/OUTS
cd spec2000
echo "--- Comienzo de la simulacin..."
CONFIG_B="-decode:width 1 -issue:width 1 -issue:inorder true -commit:width 1 -cache:il1 il1:256:32:4:l -cache:dl1 dl1:256:32:4:l -cache:il2 none -cache:dl2 none"
../sim-outorder -max:inst 10000000 $CONFIG_B -redir:sim OUTS/bzip2-$1.out bzip2.ss input.source 1
../sim-outorder -max:inst 15000000 $CONFIG_B -redir:sim OUTS/mcf-$1.out mcf.ss inp.in
../sim-outorder -max:inst 12000000 $CONFIG_B -redir:sim OUTS/vpr-$1.out vpr.ss net.in arch.in place.in route.out -nodisp -route_only -route_chan_width 15
../sim-outorder -max:inst 11000000 $CONFIG_B -redir:sim OUTS/equake-$1.out equake.ss < equake.in
../sim-outorder -max:inst 18000000 $CONFIG_B -redir:sim OUTS/ammp-$1.out ammp.ss < ammp.in
clear
echo "NI Ciclos CPI"
for f in $(ls OUTS/*.out)
do
awk '/sim_CPI/ {print $2}; /sim_num_insn/ {print $2}; /sim_cycle/ {print $2}' $f | pr -ts" " --columns 3
done
cd ..
| true |
324f1725259057e9760d9e51aaf8c549a8450957
|
Shell
|
smdalton/distributed-docker-kvs
|
/legacy/runservers.sh
|
UTF-8
| 810 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
# Author: Shane Dalton
# Created for UCSC undergrad course CMPS128, Fall 2017
# Initializes all servers listed in the ports list
echo "test"
echo "From server:"
num_servers=$1
echo $num_servers
K=4
VIEW1="10.0.0.21:8080,10.0.0.22:8080,10.0.0.23:8080,10.0.0.24:8080"
#fill in this list with the addresses of all servers you want to spawn
VIEW="localhost:5000,localhost:5001,localhost:5002,localhost:5003"
#starting port range
port=5000
for i in $(seq 1 $num_servers) #"${ports[@]}"
do
echo "Raising server at localhost:$port"
python server1.py $K $VIEW localhost:$port &
let "port=port+1"
done
# d1 = json.loads(d) string->dictionary d - > d1
# d2 = json.dumps(d) dictionary -> string d2 - > d
# d3 = json.dumps(json.loads(d)) # 'dumps' gets the dict from 'loads' this time
| true |
db67b546226945f8f49cc6a898ec2e0a6802b544
|
Shell
|
oliverhernandezmoreno/SourcesOH
|
/tranque_v1.8.4_source/config/docker-stacks/dev.observatorioderelaves.cl/deploy.sh
|
UTF-8
| 364 | 3.21875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
set -e
usage() {
echo "Usage: ./deploy.sh <gitlab-user> <gitlab-password>" 1>&2
exit 1
}
if [ -z "$1" ] ; then usage ; fi
if [ -z "$2" ] ; then usage ; fi
cd $(dirname "${BASH_SOURCE[0]}")
docker login -u "$1" -p "$2" registry.gitlab.com/inria-chile
docker-compose pull
docker-compose up -d --remove-orphans
docker system prune -f
| true |
edee452d506bfc9ed6ce5a29c6e668ba9104328c
|
Shell
|
IntelAI/models
|
/quickstart/image_recognition/tensorflow/resnet50v1_5/inference/gpu/batch_inference.sh
|
UTF-8
| 5,074 | 3.3125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
MODEL_DIR=${MODEL_DIR-$PWD}
echo 'MODEL_DIR='$MODEL_DIR
echo 'PRECISION='$PRECISION
echo 'OUTPUT_DIR='$OUTPUT_DIR
echo performance | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor
export TF_NUM_INTEROP_THREADS=1
# Create an array of input directories that are expected and then verify that they exist
declare -A input_envs
input_envs[PRECISION]=${PRECISION}
input_envs[OUTPUT_DIR]=${OUTPUT_DIR}
input_envs[GPU_TYPE]=${GPU_TYPE}
for i in "${!input_envs[@]}"; do
var_name=$i
env_param=${input_envs[$i]}
if [[ -z $env_param ]]; then
echo "The required environment variable $var_name is not set" >&2
exit 1
fi
done
# Create the output directory in case it doesn't already exist
mkdir -p ${OUTPUT_DIR}
# If batch size env is not mentioned, then the workload will run with the default batch size.
if [ -z "${BATCH_SIZE}"]; then
BATCH_SIZE="1024"
echo "Running with default batch size of ${BATCH_SIZE}"
fi
# Check for GPU type
if [[ $GPU_TYPE == "flex_series" ]]; then
export OverrideDefaultFP64Settings=1
export IGC_EnableDPEmulation=1
export TF_NUM_INTEROP_THREADS=1
export CFESingleSliceDispatchCCSMode=1
if [[ $PRECISION == "int8" ]]; then
echo "Precision is $PRECISION"
if [[ ! -f "${FROZEN_GRAPH}" ]]; then
pretrained_model=/workspace/tf-flex-series-resnet50v1-5-inference/pretrained_models/resnet50v1_5-frozen_graph-${PRECISION}-gpu.pb
else
pretrained_model=${FROZEN_GRAPH}
fi
# WARMUP="-- warmup_steps=10 steps=500"
else
echo "FLEX SERIES GPU SUPPORTS ONLY INT8 PRECISION"
exit 1
fi
elif [[ $GPU_TYPE == "max_series" ]]; then
if [[ $PRECISION == "int8" || $PRECISION == "fp16" || $PRECISION == "fp32" ]]; then
echo "Precision is $PRECISION"
if [[ ! -f "${FROZEN_GRAPH}" ]]; then
pretrained_model=/workspace/tf-max-series-resnet50v1-5-inference/pretrained_models/resnet50v1_5-frozen_graph-${PRECISION}-gpu.pb
else
pretrained_model=${FROZEN_GRAPH}
fi
WARMUP="-- warmup_steps=5 steps=20 disable-tcmalloc=True"
else
echo "MAX SERIES GPU SUPPORTS ONLY INT8, FP32 AND FP16 PRECISION"
exit 1
fi
fi
if [[ $PRECISION == "fp16" ]]; then
export ITEX_AUTO_MIXED_PRECISION=1
export ITEX_AUTO_MIXED_PRECISION_DATA_TYPE="FLOAT16"
fi
if [[ -z "${Tile}" ]]; then
Tile=${Tile-1}
else
Tile=${Tile}
fi
# source "${MODEL_DIR}/quickstart/common/utils.sh"
if [[ ${Tile} == "1" ]]; then
echo "resnet50 v1.5 int8 inference"
mac=`lspci | grep Dis| head -n 1| awk '{print $1}'`
node=`lspci -s $mac -v | grep NUMA | awk -F, '{print $5}' | awk '{print $3}'`
numactl -N $node -l python -u models/image_recognition/tensorflow/resnet50v1_5/inference/gpu/int8/eval_image_classifier_inference.py \
--input-graph=${pretrained_model} \
--warmup-steps=10 \
--steps=500 \
--batch-size=1024 \
--benchmark
# numactl -N $node -l python benchmarks/launch_benchmark.py \
# --model-name=resnet50v1_5 \
# --precision=${PRECISION} \
# --mode=inference \
# --framework tensorflow \
# --in-graph ${pretrained_model} \
# --output-dir ${OUTPUT_DIR} \
# --batch-size=${BATCH_SIZE} \
# --benchmark-only \
# --gpu \
# $@ \
# ${WARMUP} 2>&1 | tee ${OUTPUT_DIR}//resnet50_${PRECISION}_inf_t0_raw.log
elif [[ ${Tile} == "2" ]]; then
echo "resnet50 v1.5 int8 two-tile inference"
ZE_AFFINITY_MASK=0.0 python benchmarks/launch_benchmark.py \
--model-name=resnet50v1_5 \
--precision=${PRECISION} \
--mode=inference \
--framework tensorflow \
--in-graph ${pretrained_model} \
--output-dir ${OUTPUT_DIR} \
--batch-size=${BATCH_SIZE} \
--benchmark-only \
--gpu \
$@ \
${WARMUP} 2>&1 | tee ${OUTPUT_DIR}//resnet50_${PRECISION}_inf_t0_raw.log &
ZE_AFFINITY_MASK=0.1 python benchmarks/launch_benchmark.py \
--model-name=resnet50v1_5 \
--precision=${PRECISION} \
--mode=inference \
--framework tensorflow \
--in-graph ${pretrained_model} \
--output-dir ${OUTPUT_DIR} \
--batch-size=${BATCH_SIZE} \
--benchmark-only \
--gpu \
$@ \
${WARMUP} 2>&1 | tee ${OUTPUT_DIR}//resnet50_${PRECISION}_inf_t1_raw.log
else
echo"Only Tiles 1 and 2 supported."
exit 1
fi
| true |
5b833ba541a3468140b215f4dc6269a0dcc46b6a
|
Shell
|
galiuskas24/Google-Colab-Terminal-Configuration
|
/colab-ssh-jupyter.sh
|
UTF-8
| 1,064 | 2.734375 | 3 |
[] |
no_license
|
# create an account on ngrok is not already and copy the authtoken and
wget https://bin.equinox.io/c/4VmDzA7iaHb/ngrok-stable-linux-amd64.zip
unzip ngrok-stable-linux-amd64.zip
read -p 'Enter the authtoken from ngrok :' authtoken
./ngrok authtoken $authtoken #for example ZT64bWYnXTAsJej4FNFTdsjhsuAQqKqZHn2Sh4g2sfAD
./ngrok tcp 22 &
apt-get install -qq -o=Dpkg::Use-Pty=0 openssh-server pwgen > /dev/null
mkdir -p /var/run/sshd
echo "PermitRootLogin yes" >> /etc/ssh/sshd_config && echo "PasswordAuthentication yes" >> /etc/ssh/sshd_config
/usr/sbin/sshd -D &
mkdir /root/.ssh
read -p "run 'ssh-keygen' on your local pc and \ncopy paste the content of ~/.ssh/id_rsa.pub file" rsa # for example https://imgur.com/a/z62timq
echo $rsa >> /root/.ssh/authorized_keys
echo "You can connect to colab server on terminal with following command"
curl -s http://localhost:4040/api/tunnels | python3 -c \
"import sys, json; print('ssh -o TCPKeepAlive=yes -o ServerAliveInterval=30 root@0.tcp.ngrok.io -p ' + json.load(sys.stdin)['tunnels'][0]['public_url'][-5:])"
| true |
3f3af4dd9794a66384fc91264b454689d7b57661
|
Shell
|
KaOSx/main
|
/xf86-input-synaptics/PKGBUILD
|
UTF-8
| 964 | 2.5625 | 3 |
[] |
no_license
|
pkgname=xf86-input-synaptics
pkgver=1.9.2
pkgrel=1
pkgdesc="Synaptics driver for notebook touchpads"
arch=('x86_64')
license=('custom')
url="http://xorg.freedesktop.org/"
depends=('libxtst' 'mtdev' 'libevdev')
makedepends=('xorg-server' 'libxi' 'libx11' 'xorgproto')
groups=('xorg-drivers' 'xorg')
options=('!libtool')
backup=('etc/X11/xorg.conf.d/50-synaptics.conf')
source=("http://xorg.freedesktop.org/releases/individual/driver/${pkgname}-${pkgver}.tar.xz"
'tapbutton.patch')
sha256sums=('b8fa4aab913fc63754bbd6439e020658c412743a055201ddf212760593962c38'
'31cbe169f3432b120b04688cb4d06c3233f9819a330c4a2ef866774fddd59850')
build() {
cd ${pkgname}-${pkgver}
#patch -p1 -i ${srcdir}/tapbutton.patch
./configure --prefix=/usr
make
}
package() {
cd ${pkgname}-${pkgver}
make DESTDIR=${pkgdir} install
install -m755 -d ${pkgdir}/usr/share/licenses/${pkgname}
install -m644 COPYING ${pkgdir}/usr/share/licenses/${pkgname}/
}
| true |
44b16c1927648ace58e860a790b250c3edcec792
|
Shell
|
lucasmedeiros/sh-scripts
|
/questao6.sh
|
UTF-8
| 312 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/bash
dominio=$1
if [ -z "$1" ]; then
echo "Informe o domínio:"
read dominio
fi
ip=`dig +short $dominio`
echo "O IP do domínio $dominio é: $ip"
if ping -q -c 1 -W 1 $dominio >/dev/null; then
echo "Existe conectividade com o servidor."
else
echo "Não existe conectividade com o servidor."
fi
| true |
fc573e6acaa094890646de95686d58bcd7f2ad4a
|
Shell
|
Wei-N-Ning/networkFoo
|
/ssh/server/subsystem.sh
|
UTF-8
| 1,033 | 3.4375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# the file created by this dummy subsystem is
# -rw-r--r-- 1 weining wheel 315 5 Mar 15:48 subsystem_thereisacow
# which confirms that it is executed by the login user
install_subsystem() {
rm -f /var/tmp/thereisacow
cat >/var/tmp/thereisacow <<"EOF"
#!/bin/sh
perl -w -E 'my $text = `ls /var/tmp`;
open(my $fh, ">/var/tmp/subsystem_thereisacow");
say $fh $text;'
EOF
chmod a+x /var/tmp/thereisacow
}
define_subsystem_in_conf() {
: <<"TEXT"
115 Subsystem sftp /usr/lib/openssh/sftp-server
116 Subsystem thereisacow /var/tmp/thereisacow
117 AcceptEnv SUBSYSTEM_ARGS
TEXT
# client then use SUBSYSTEM_ARGS to pass argument to the subsystem
# SSH defintive 2nd P/331
# /////////
# open ssh uses the remote command as the subsystem name:
# this must be specified last on the ssh command line
# /////////
SUBSYSTEM_ARGS='{"a": 123}' \
ssh -s -o 'SendEnv=SUBSYSTEM_ARGS' h6 thereisacow
# the content of the argument can be protected by base64
}
| true |
851c2710ada454e1f5c3f85c32d4de7ef36bd59b
|
Shell
|
rosalindxu/MD-SolEFP-methods
|
/5-param/scripts/optimization/run_charge.sh
|
UTF-8
| 3,858 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/bash
echo "Before running this script, change the orientation and initial scan distance list."
read -p "Working directory (full path name; do not type "."!): " dir
read -p "Trial No.: " tr
read -p "Number of charge combinations to be tested: " nComb
echo "Before you continue, place mescn-1.itp to mescn-$nComb.itp in $dir/itp."
echo "In addition, place all QM .xyz trajectories in $dir/trj."
read -p "Name of compound: " compound
read -p "total number of atoms in dimer: " natoms
read -p "number of frames in PES scan trajectory: " nframes
read -p "QM PES scan increment: " incre
#Change the list of orientations(ort) and initial scan distances(initD) here
water_ort=( KNwater1 KNwater2 KCwater3 KSwater1 KSwater2 )
water_initD=( 2.50 2.50 2.50 2.50 2.50 )
nOrt=5
#Gas phase tip3p water energy
waterE=0.000873
cd $dir
# folder structure: dimer (i) - LJcomb (j) - orientation (k)
# preparing .tpr file for each dimer and LJcomb
for i in water gas ; do
mkdir $compound-$i/trial-$tr
for j in $(seq 1 $nComb); do
subdir=$compound-$i/trial-$tr/LJcomb-$j
mkdir $subdir
cp $compound-$i/$compound-$i.gro $subdir
cp $compound-$i/$compound-$i.top $subdir
cp $compound-$i/em.mdp $subdir
cp itp/mescn-$j.itp $subdir/mescn.itp
if [[ $i != gas ]]; then
cp $compound-$i/tip3p.itp $subdir
fi
cd $subdir
gmx grompp -f em.mdp -c $compound-$i.gro -p $compound-$i.top -o $compound-$i-$j.tpr
if [[ $i == "water" ]]; then
for k in $(seq 1 $nOrt); do
subsubdir=${water_ort[$((k-1))]}-$j
mkdir $subsubdir
cp $compound-$i-$j.tpr $subsubdir/${water_ort[$((k-1))]}-$j.tpr
done
rm $compound-$i-$j.tpr
fi
cd $dir
done
done
# folder structure: dimer (i) - LJcomb (j) - orientation (k)
# preparing .gro trajectory for each dimer and orientation
cd $dir/trj
for i in water; do
for k in ${water_ort[*]}; do
(echo "$k-scan.xyz"; echo $k ; echo $natoms; echo $nframes;) | $dir/xyz_to_gro-charge.awk
for j in $(seq 1 $nComb); do
cp $k-scan.gro $dir/$compound-$i/trial-$tr/LJcomb-$j/$k-$j
done
done
done
rm *.gro
cd $dir
# folder structure: dimer (i) - LJcomb (j) - orientation (k)
# Obtain MM PES
for i in gas water; do
for j in $(seq 1 $nComb); do
if [[ $i == gas ]]; then
cd $compound-$i/trial-$tr/LJcomb-$j
gmx mdrun -v -deffnm $compound-$i-$j
echo "11 0" | gmx energy -f $compound-$i-$j.edr -o $compound-$i-$j-ener.xvg
grep -v "^@" $compound-$i-$j-ener.xvg | grep -v "^#" > temp.dat
gasE[$j]=$( awk '{print $2}' temp.dat )
rm temp.dat
cd $dir
elif [[ $i == "water" ]]; then
mkdir MeSCN-$i/trial-$tr/PES
for k in $(seq 1 $nOrt); do
cd $compound-$i/trial-$tr/LJcomb-$j/${water_ort[$((k-1))]}-$j
gmx mdrun -v -deffnm ${water_ort[$((k-1))]}-$j -rerun ${water_ort[$((k-1))]}-scan.gro
echo "11 0" | gmx energy -f ${water_ort[$((k-1))]}-$j.edr -o ${water_ort[$((k-1))]}-$j-ener.xvg
grep -v "^@" ${water_ort[$((k-1))]}-$j-ener.xvg | grep -v "^#" |
awk -v gasE=${gasE[$j]} -v nframes=$nframes -v initD=${water_initD[$((k-1))]} -v incre=$incre -v waterE=$waterE 'NR==1,NR==$nframes{printf("%3.2f %f\n", initD+(NR-1)*incre, $2-gasE-waterE)}' > ${water_ort[$((k-1))]}-$j-MM-PES.dat
cp ${water_ort[$((k-1))]}-$j-MM-PES.dat $dir/MeSCN-$i/trial-$tr/PES
cd $dir
done
fi
done
done
# folder structure: dimer (i) - LJcomb (j) - orientation (k)
# find minimum interaction energies and distances
for i in water; do
cd $dir/MeSCN-$i/trial-$tr/PES
for k in $(seq 1 $nOrt); do
for j in $(seq 1 $nComb); do
(echo "MM"; echo ${water_ort[$((k-1))]}; echo $j;) | $dir/find_min.awk >> $dir/MeSCN-$i/trial-$tr/MeSCN-$i-MM-trial-$tr-summary.dat
done
done
cd $dir
done
| true |
2b11306d8c8c75cc5a7f2526e2c8eb4d4d2ce1d4
|
Shell
|
nalim2/AI-lectures
|
/MachineLearning/compile.sh
|
UTF-8
| 511 | 2.78125 | 3 |
[] |
no_license
|
for input in ./*.tex
do
echo '-----------------------------------'
echo 'compiling' ${input}
pdflatex -interaction=nonstopmode ${input} > /dev/null
grep "Warning" ${input%.*}.log
grep "Missing" ${input%.*}.log
grep -A2 "Undefined control sequence" ${input%.*}.log
grep -A2 "Error" ${input%.*}.log
done
pdflatex script.tex
makeindex script.idx
# for file in meshes/*
# do
# cmd="meshlabserver -i $file -o ${file%.*}.ply -s cleanMeshes.mlx -om"
# echo $cmd
# $cmd
# done
| true |
43380a0be06742c267aaa44d259714110932f8f7
|
Shell
|
venukolla/SSHConfigure
|
/Step1.sh
|
UTF-8
| 2,667 | 3.6875 | 4 |
[] |
no_license
|
!/bin/bash
# Created by: daniel ernesto lopez barron
# University of Missouri Kansas City
# April 28 2016
# PARAMS
# TODO dev Remove this features so it can be passed as
# arguments to the script
# PARAMS
# server nodePrefix,start,end user,password
# Ej: nm node-,1,4 dl544,daniel
#UPDATED
scriptUsage(){
echo "USAGE: Step1.sh <server> <nodePrefix,start,end> <user,password> <source>"
echo " + server: Indicates the name of the server node (i.e. Hadoop's Namenode)"
echo " + nodePrefix: Corresponds with the prefix of the cluster's Datanodes"
echo " + startNode: First datanode, naming must follow a sequential convention"
echo " + lastNode: Last datanode, naming must follow a sequential convention"
echo " + user: User that will manage the cluster"
echo " + password: User's password"
echo " "
echo " Edit the source file previous to run the Master.sh script"
echo " Example: Master.sh nm cp-,1,3 doe,userpass"
echo " Will configure the cluster as user \"doe\" with password \"userpass\""
echo " With \"nm\" as Namenode and cp-1, cp-2, cp-3 as Datanodes"
}
serverName=$1
nodePrefix=`echo $2 | cut -d, -f1`
startNode=`echo $2 | cut -d, -f2`
lastNode=`echo $2 | cut -d, -f3`
user=`echo $3 | cut -d, -f1`
password=`echo $3 | cut -d, -f2`
printf "\n>> Script to initialize a node\n ##----------------------------\n >> System update STARTS\n"
sudo add-apt-repository ppa:webupd8team/java && sudo apt-get update && sudo apt-get install oracle-java8-installer
#sudo apt-get -y update && sudo apt-get -y install openjdk-8-jdk ssh rsync sshpaass && update-alternatives --config java
sudo apt-get -y update && sudo apt-get -y install ssh rsync sshpass && update-alternatives --config java
printf ">> System update FINISHED\n\n"
printf "\n>> Generating keys STARTS\n"
echo -e 'y\n'|ssh-keygen -t dsa -P '' -N ' ' -f ~/.ssh/id_dsa &&
cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys
# ssh-keygen -t dsa -P '' -N ' ' -f ~/.ssh/id_dsa && cat ~/.ssh/id_dsa.pub >> ~/.ssh/authorized_keys && sudo ssh-copy-id -i ~/.ssh/id_dsa.pub dl544@nm && sudo ssh-copy-id -i ~/.ssh/id_dsa.pub dl544@cp-1 && sudo ssh-copy-id -i ~/.ssh/id_dsa.pub dl544@cp-2 && sudo ssh-copy-id -i ~/.ssh/id_dsa.pub dl544@cp-3
# Don't touch this parameters
passCommand="sshpass -p \"$password\""
optHostCheck="-o StrictHostKeyChecking=no"
optKey="-i ~/.ssh/id_dsa.pub"
sshCopy="echo \"Starts\" "
sshCopy="$passCommand ssh-copy-id $optHostCheck $optKey $user@$serverName"
for node in `seq $startNode $lastNode`;
do
server="$user@$nodePrefix$node"
nextNode="&& $passCommand ssh-copy-id $optHostCheck $optKey $server"
sshCopy="$sshCopy $nextNode"
done
eval $sshCopy
printf "\n>> Generating keys FINISHED"
| true |
26b239647e1aa3578d76bd01ca41c75f720ea84a
|
Shell
|
Makebignew/image
|
/chickip.sh
|
UTF-8
| 2,200 | 3.6875 | 4 |
[] |
no_license
|
# /bin/bash
# chickip.sh
# 定义ip段,最多测到第三个字节!!!第四个字节不用写,两个定义一样就只测第四个字节
ip_start=104.16.0
ip_ending=104.16.65
# 定义并发的进程数,也就是每次ping的ip数
thread_num=30
# 定义每个ip ping的次数
time2=25
# 定义最大延迟
time1=170
# 定义最大丢包率
lost=6
###################################
# 新建一个FIFO类型的变量
myfifo="myfifo"
mkfifo ${myfifo}
# 将FD6指向FIFO类型
exec 6<>${myfifo}
rm -f ${myfifo}
# 在FD6中放置了$thread_num个占位信息
for ((i=0;i<=${thread_num};i++))
do
{
echo
}
done >&6
last_ping() {
for ((ip_sub=1;ip_sub<=255;ip_sub++));
do
read -u6
{
# 每个IP ping $time2次并获取每个IP的丢包率,延迟
ping=`ping -c $time2 $ip_start.$ip_sub|grep -E 'loss|avg'`
lose=`echo $ping|grep loss|awk '{print $6}'|awk -F "%" '{print $1}'|gawk -F . '{print $1}'`
# 丢包率大于$lost丢弃
if [ $lose -ge $lost ];then
echo "丢弃 $ip_start.$ip_sub 丢包率$lose"
else
# 获取每个IP的延迟,丢弃延迟大于$time1的,延迟小于$time1的保存到chickip.log文件中
num=`echo $ping|grep avg | gawk -F / '{print $5}'|gawk -F . '{print $1}'`
if [ $num -ge $time1 ];then
echo "丢弃 $ip_start.$ip_sub 延迟 $num"
else
echo "保存 $ip_start.$ip_sub 延迟 $num 丢包率 $lose"
echo "$ip_start.$ip_sub 延迟:$num 丢包率 $lose" >> chickip.log
fi
fi
echo >&6 # 当进程结束以后,再向FD6中加上一个回车符,即补上了read -u6减去的那个
} &
done
}
if [ $ip_start = $ip_ending ];then
last_ping
else
for ((third=${ip_start##*.};third<=${ip_ending##*.};third++))
do
{
ip_2="${ip_start%.*}.$third"
rm -f $ip_start
ip_start=$ip_2
last_ping
}
done
fi
# 等待所有线程结束,删掉wait会后台运行
wait
# 关闭fd6管道
exec 6>&-
| true |
1aab7112aaff9aec825009c58b7977e2924ae782
|
Shell
|
shrank/networking-scripts
|
/scripts/find_devices.sh
|
UTF-8
| 309 | 3.234375 | 3 |
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# uses nmap to find hosts and stores them into a file
# sh find_devices.sh 10.10.0.0-255
filename=device_scan-$1
nmap -sP -oG - $1 | awk '$5=="Up"{print $2}' >> $filename
cat $filename | sort | uniq > $filename.tmp
rm $filename
mv $filename.tmp $filename
echo devices discoverd:
wc -l $filename
| true |
6b107824a8fcc7e067764e78e4e29c4ae001d4dd
|
Shell
|
raglos/ye
|
/imguralbum
|
UTF-8
| 382 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/bash
if [[ -z "$1" ]]; then
echo "usage ""$0"" <imgur album url>";
exit 1;
fi
curl -s "$1" | grep '^\s*image\s*:\s*' | grep -o '{"hash":"[^\"]*","title":"[^\"]*","description":null,"width":[0-9]*,"height":[0-9]*,"size":[0-9]*,"ext":"[^\"]*"' | sed 's/^.*ash...\([^\"]*\).*ext...\(.*\).$/https:\/\/i.imgur.com\/\1\2/g' | xargs -P 16 -n 1 wget -q -nc --show-progress
| true |
21fb1e1d303ec42f2cee186ee03cdbbfccc4ce80
|
Shell
|
pkgw/casa
|
/code/admin/system/astat
|
UTF-8
| 4,266 | 3.140625 | 3 |
[] |
no_license
|
#!/bin/sh
#-----------------------------------------------------------------------------
# astat: Collate AIPS++ revision statistics from an 'alog' report
#-----------------------------------------------------------------------------
#
# Copyright (C) 1994,1997
# Associated Universities, Inc. Washington DC, USA.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Correspondence concerning AIPS++ should be addressed as follows:
# Internet email: aips2-request@nrao.edu.
# Postal address: AIPS++ Project Office
# National Radio Astronomy Observatory
# 520 Edgemont Road
# Charlottesville, VA 22903-2475 USA
#
#-----------------------------------------------------------------------------
# Usage: alog [parameters] | astat
#-----------------------------------------------------------------------------
# sed/awk filter which collates revision statistics for each programmer from
# an 'alog' report. It reports separately the number of new files checked in,
# revisions of previously checked in files, and the total of these.
#
# For each revision made (but not for the initial checkin) RCS provides a
# rough indication of the extent of the change by reporting the number of
# lines added and the number of lines deleted. Cumulative totals for these
# are provided in the output, but note that these numbers can be misleading.
# Large values may, for example, result from checking in a changed PostScript
# file, or test program output.
#
# Example:
#
# alog -b --package=aips | astat
#
# Collects statistics for the "aips" package for the latest AIPS++ major
# version.
#
# Notes:
# 1) The AIPS++ master and slave RCS files contain only the current and
# preceeding versions. The archive in Charlottesville contains all
# versions up to the base release of the current major version.
#
# 2) Account names for some checkins from remote sites have been normalized
# to standard form.
#
# 3) Only a fraction of the efforts of AIPS++ workers is reflected in the
# checkin statistics. This fraction may be quite low for some but high
# for others.
#
# Original: 1994/02/10 by Mark Calabretta, ATNF
# $Id$
#-----------------------------------------------------------------------------
sed -n -e '/^date: /{s/; state: Exp;//;s/ lines://;s/^.*author: //p;}' | \
sed -e 's/^dls/dshone/' \
-e 's/^gvd/gvandiep/' \
-e 's/^hjv/hvosmeij/' \
-e 's/^hr/hravlin/' \
-e 's/^kamran/ktavakol/' \
-e 's/^olnon/folnon/' \
-e 's/^rmarston/rmarson/' \
-e 's/^sanjay/sbhatnag/' \
-e 's/^tcornwell/tcornwel/' | \
sort | \
awk \
'BEGIN { \
user = "" ; new = 0 ; revs = 0 ; plus = 0 ; minus = 0 ; \
print "aips2prg total new revs plus minus" ; \
print "-------- ----- --- ---- ---- -----" \
} \
user == "" { \
user = $1 \
} \
user == $1 { \
if (NF == 1) { \
new++ ; \
} else { \
revs++ ; plus += $2 ; minus += $3 \
} \
} \
user != $1 { \
print sprintf("%-8s%8d%6d%6d%8d%8d",user,new+revs,new,revs,plus,minus) ; \
if (NF == 1) { \
user = $1 ; new = 1 ; revs = 0 ; plus = 0 ; minus = 0 \
} else { \
user = $1 ; new = 0 ; revs = 1 ; plus = $2 ; minus = $3 \
} \
} \
END { print sprintf("%-8s%8d%6d%6d%8d%8d",user,new+revs,new,revs,plus,minus) }'
| true |
495376ed8258fb439e3d1782d879c33b24752ffd
|
Shell
|
giorgosma/Simulation-of-heat-transfer-in-2d-surface
|
/OpenmpProj/run_prog.sh
|
UTF-8
| 729 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/bash
init='export PATH="/home/giorgos/mpich-install/bin:$PATH"'
eval $init
compile="mpicc -o project_mpi_reduce_omp project_mpi_reduce_omp.c -lm -fopenmp -g3"
compare="diff PM/initial.bin PM/output.bin"
delete="rm ./PM/*.bin | rm ./PM/*.txt "
eval $delete > ./PM/del.txt
eval $compile
#declare -A array_of_tasks
array_of_tasks=( 1 4 9 16 25 36 49 64 81 100 121 128 144 160 )
size=14
#for i in "${array_of_tasks[@]}"
#do
tasks=$1
threads=$2
X=$3
Y=$4
#task=$i
#echo -n $i
exec1="time mpirun -n $tasks -genv OMP_NUM_THREADS=$threads -genv I_MPI_PIN_DOMAIN=omp ./project_mpi_reduce_omp $tasks $threads $X $Y"
#eval $exec1
eval $exec1 > ./PM/out.txt
eval $compare
#done
exit 0
| true |
40e93aa949cc9e01ca8ac60694222568c41507d4
|
Shell
|
kchhero/suker_python_project
|
/sukerScripts/bashScripts/args/args_pass_to_func2.sh
|
UTF-8
| 679 | 3.5625 | 4 |
[] |
no_license
|
#!/bin/bash
function abc()
{
arg1=$1; shift
array=( "$@" )
last_idx=$(( ${#array[@]} - 1 ))
arg2=${array[$last_idx]}
unset array[$last_idx]
echo "arg1=$arg1"
echo "arg2=$arg2"
echo "array contains:"
printf "%s\n" "${array[@]}"
}
ttt=("two" "three")
abc "one" "${ttt[@]}" "four"
function def()
{
arg1="$1"
arg2=("${!2}")
arg3="$3"
arg4=("${!4}")
echo "arg1=$arg1"
echo "arg2 array=${arg2[@]}"
echo "arg2 #elem=${#arg2[@]}"
echo "arg3=$arg3"
echo "arg4 array=${arg4[@]}"
echo "arg4 #elem=${#arg4[@]}"
}
arr=(ab 'x y' 123)
arr2=(a1 'a a' bb cc 'it is one')
def "foo" "arr[@]" "bar" "arr2[@]"
| true |
43acf7e42545e024f5e4bacfa22646a8b14db14b
|
Shell
|
Michael07220823/Opencv4-one-click-installation
|
/sh/link_cv2_to_python3_virtual_environment.sh
|
UTF-8
| 531 | 2.5625 | 3 |
[] |
no_license
|
# Create a cv2.cpython-37m-arm-linux-gnueabihf.so shortcut to ~/.virtualenvs/py3cv4/lib/python3.7/site-packages/cv2.so
ln -s /usr/local/python/cv2/python-3.7/cv2.cpython-37m-arm-linux-gnueabihf.so ~/.virtualenvs/py3cv4/lib/python3.7/site-packages/cv2.so
echo "link_cv2_to_python3_virtua_environment.sh finish !"
echo "in terminal keyin 'workon opencv-4.4.0' to activate virtual environment"
echo "and use 'python3' command to enter python and 'import cv2' then keyin 'cv2.__version__.'"
echo "and you can see '4.4.0' cv2 version."
| true |
1fef1eaf0803fe7479d99cee32d0704dcf3317a0
|
Shell
|
benaux/env-pub_
|
/tools-files_bk71.4x/autoscripts_bk71.41/onetools/one-new.sh
|
UTF-8
| 2,850 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/sh
HELP='create a new item in the onebox directory. Special remark when writing the title: you can also add tags after "--"'
USAGE='[note] [mark|bookmark] [snip|snippet] [web|webref] [-noninter(active)] [-help] title'
here=$(dirname $0)
labelinput=$1
shift
titleinput="$@"
onebox=$HOME/onebox
onehist=$HOME/.onehist
tagfile=$HOME/.tagfile
infotools=$HOME/tools/autoscripts/infotools
moreutils=$HOME/tools/moreutils
stringutils=$moreutils/strings
die () { echo $@; exit 1; }
[ -d "$onebox" ] || die "err: no onebox in ~/onebox"
strings_match_file=$stringutils/strings-match-file.pl
[ -f "$strings_match_file" ] || die "err: no strings_match_file script"
stamp_base=$infotools/stamp-base26.sh
[ -f "$stamp_base" ] || die "err: no stamp-base script"
[ -f "$tagfile" ] || die "err: no tagfile"
clean_string=$stringutils/clean-string.sh
noninteractive=
while [ $# != 0 ] ; do
case "$1" in
-h|*-help)
echo "usage: $USAGE"
echo ""
die "Help: $HELP" ;;
-noninter*) noninteractive=1 ;;
#-t|*-tag|*-tags) shift && tags="$1" || die "Err: no tags" ;;
-*)
die "Err: unknown option. usage: $usage"
;;
esac
shift
done
label= url=
if [ -n "$labelinput" ] ; then
case "$labelinput" in
bookmark|mark)
label='bookmark'
url=$1
shift
case "$url" in
http*|www*) : ;;
*) die "err: '$url' no valid url";;
esac
;;
snippet|snip) label='snippet';;
*) die "Err: unknown label $label" ;;
esac
else
read -p "Label: (bm bookmark, )" label
fi
[ -n "$label" ] || die "err: no label"
[ -n "$titleinput" ] || read -p "Title/Filname: " titleinput
[ -n "$titleinput" ] || die "err: no title"
splitcode='@t=split(/\-\-/, $ARGV[0]); die "Err: toomuch --" if (@t>2)'
title="$(perl -e "$splitcode; print \$t[0];" "$titleinput")"
titletags="$(perl -e "$splitcode; print \$t[1];" "$titleinput")"
cleantitle="$(sh $clean_string "$title")"
[ "$?" = "0" ] || die "Err: problem with clean-string $?: $cleantitle"
cleantags=
[ -n "$titletags" ] && {
cleantags="$(sh $clean_string "$titletags")"
[ "$?" = "0" ] || die "Err: problem with clean-string $?: $cleantags"
}
[ -n "$cleantags" ] || {
tags="$(perl $strings_match_file $tagfile "$title")"
[ -n "$tags" ] && cleantags="$(sh $clean_string "$tags")"
}
die xxcleantitle $cleantags
stamp=$(sh $stamp_base)
filename="$label $title $stamp.txt"
filepath="$onebox/$filename"
if [ -f "$filepath" ] ;then
die "err: file $filename already exists"
else
label_title="$label: $title"
{
echo "$label_title"
echo ""
echo "$stamp"
echo ""
[ -n "$url" ] && {
echo "url: $url"
echo ""
}
echo "-------"
echo ""
} > "$filepath"
echo "onebox file path in ~/.onehist: $filepath"
echo "$filepath" >> "$onehist"
fi
| true |
9c414011b498d250d791ef05688aeb637e61e4d0
|
Shell
|
leonardoalvesprates/k8s
|
/lab/rancher/overlay/overlay-run.bash
|
UTF-8
| 1,041 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
echo
echo
echo 'overlaytest PODS'
echo
kubectl -n default get pods -l name=overlaytest -o wide
echo
echo '################'
echo
echo 'creating single web pod'
kubectl -n default run web-first --image=leonardoalvesprates/web-first:v0.1 --port=8080
echo
echo 'waiting 60 seconds'
sleep 60
echo 'creating clusterIP svc for web-first'
kubectl -n default expose pod web-first --port=80 --target-port=8080
echo
echo 'waiting 10 seconds'
sleep 10
echo
echo 'listing pods and svc default namespace'
echo
kubectl -n default get pod,svc -o wide
echo
echo
echo 'running some tests'
echo
for POD in $(kubectl -n default get pods -l name=overlaytest --no-headers -o custom-columns=":metadata.name")
do
echo $POD
kubectl -n default exec -it $POD -- nslookup web-first
kubectl -n default exec -it $POD -- curl http://web-first
echo
echo
done
echo 'deleting web pod and svc'
kubectl -n default delete pod web-first
kubectl -n default delete svc web-first
echo 'waiting 30 seconds'
sleep 30
echo
kubectl -n default get pod,svc -o wide
| true |
b8bb8cfe217a2ed8707ba443efda8a0eabe6f73b
|
Shell
|
rutsky/semester06
|
/Processors_architecture/RF-9000/firmware/build_fw.sh
|
UTF-8
| 3,123 | 3.9375 | 4 |
[] |
no_license
|
#!/bin/bash
# build_fw.sh
# Script for building firmware with hacks.
# Vladimir Rutsky <altsysrq@gmail.com>
# 15.03.2009
set -e
FWfixToolFile="../fw_correct_header/fw_correct_header"
origFWFile="FW_UB890_1.15.rom"
baseVersionStr="1.15" # 4 bytes
tempFWFile=".fw_tmp"
tempFWFileDst=".fw_tmp2"
versionFile=".fw_version"
buildLockFile=".lock"
outputFWFile="FW_UB890.rom"
outputFontFile="FontData.FNT"
if [ -e $buildLockFile ]; then
# Lock file exists.
echo "Building process is locked. Remove '$buildLockFile' file."
exit 0
fi
increase_version()
{
version=`cat $versionFile`
echo -n "Increasing version from '"$version"' " #debug
version=$(($version + 1))
echo "to '"$version"'." #debug
echo $version > $versionFile
}
inject_file()
{
fileName=$1
pos=$2
echo "Injecting '"$fileName"' into position "`printf "0x%08x" $pos` #debug
dd if=$fileName of=$tempFWFile bs=1 seek=$pos count=`stat -c%s $fileName` status=noxfer conv=notrunc
}
inject_fw_reset_version_str()
{
fwResetVersionStrTmpFile=".fwResetVersionStrTmpFile"
fwResetVersionStrOffset=$((0x00029ff7))
printf "#%04X" $version > $fwResetVersionStrTmpFile
fwResetVersionStr=`cat $fwResetVersionStrTmpFile`
echo "Writing firmware reset version string: '"$fwResetVersionStr"'."
inject_file $fwResetVersionStrTmpFile $fwResetVersionStrOffset
rm -f $fwResetVersionStrTmpFile
}
inject_version_str()
{
versionStrTmpFile=".versionStrTmpFile"
versionStrOffset=$((0x0000a014))
# 16 bytes max with zero.
printf "Bob #%04X %4s\0" $version $baseVersionStr > $versionStrTmpFile
versionStr=`cat $versionStrTmpFile`
echo "Writing version string: '"$versionStr"'."
inject_file $versionStrTmpFile $versionStrOffset
rm -f $versionStrTmpFile
}
inject_date_str()
{
dateStrTmpFile=".dateStrTmpFile"
dateStrOffset=$((0x00009ffc))
# Note: time zone is not included.
date "+%F %H:%M:%S" | tr -d '\n' > $dateStrTmpFile
printf "\0" >> $dateStrTmpFile
dateStr=`cat $dateStrTmpFile`
echo "Writing date string: '"$dateStr"'."
inject_file $dateStrTmpFile $dateStrOffset
rm -f $dateStrTmpFile
}
append_func()
{
# Adding function code
cat func.bin >> $tempFWFile
}
inject_func()
{
funcOffset=$((0xfa600))
inject_file func.bin $funcOffset
}
inject_jump()
{
jumpOffset=$((0x9d48))
# Adding function code
inject_file jump.bin $jumpOffset
}
inject_nop()
{
inject_file nop.bin $1
}
# Increasing version first.
increase_version
# Current version.
version=`cat $versionFile`
# Making copy of original firmware.
cp $origFWFile $tempFWFile
chmod 0644 $tempFWFile
# Injecting version number.
inject_fw_reset_version_str
inject_version_str
inject_date_str
#append_func
#inject_func
#inject_jump
#inject_file version_hook.bin $((0x9b80))
inject_file fw_version_inj.bin $((0x9cac))
#inject_nop $((0x9d44)) # Not doing PutString before jump injection.
# Fixing control sum.
$FWfixToolFile $tempFWFile $tempFWFileDst
mv $tempFWFileDst $tempFWFile
# Renaming hacked firmware to result file.
mv $tempFWFile $outputFWFile
# Locking building process.
#touch $buildLockFile # creating lock file
| true |
5cce315c3c0d9f2ec95c7e34d0fc60562d28b430
|
Shell
|
sabhaAmjad/Thesis
|
/consumer.sh
|
UTF-8
| 2,729 | 3.4375 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
TEST_QUERIES="s1 s2 s3 s4 "
type="weblogs websales"
weblogs="weblogs"
SPARK_SUBMIT=/usr/local/spark/spark-2.3.2-bin-hadoop2.7/bin/spark-submit
class_test=//IdeaProjects/Thesis/src/main/scala/active/Consumer.scala
JAR_File=//IdeaProjects/Thesis/target/Thesis-1.0-SNAPSHOT-jar-with-dependencies.jar
# Initialize log file for data loading times
LOG_FILE_EXEC_TIMES="query_times_consumer.csv"
if [ ! -e "$LOG_FILE_EXEC_TIMES" ]
then
touch "$LOG_FILE_EXEC_TIMES"
echo "STARTDATE_EPOCH|STOPDATE_EPOCH|DURATION_MS|STARTDATE|STOPDATE|TYPE|QUERY" >> "${LOG_FILE_EXEC_TIMES}"
fi
if [ ! -w "$LOG_FILE_EXEC_TIMES" ]
then
echo "ERROR: cannot write to: $LOG_FILE_EXEC_TIMES, no permission"
return 1
fi
for i in ${type}
do
echo "type: ${i}"
if [ ${i} == ${weblogs} ]
then
for j in ${TEST_QUERIES}
do
# Measure time for query execution time
# Start timer to measure data loading for the file formats
STARTDATE="`date +%Y/%m/%d:%H:%M:%S`"
STARTDATE_EPOCH="`date +%s`" # seconds since epochstart
echo "${i} Spark Structured Streaming query: ${j}"
${SPARK_SUBMIT} --packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.3.2 --class active.Consumer ${JAR_File} ${i} ${j} > consumer_${i}_query_${j}_log.txt 2>&1
# Calculate the time
STOPDATE="`date +%Y/%m/%d:%H:%M:%S`"
STOPDATE_EPOCH="`date +%s`" # seconds since epoch
DIFF_s="$(($STOPDATE_EPOCH - $STARTDATE_EPOCH))"
DIFF_ms="$(($DIFF_s * 1000))"
# log the times in load_time.csv file
echo "${STARTDATE_EPOCH}|${STOPDATE_EPOCH}|${DIFF_ms}|${STARTDATE}|${STOPDATE}|${j}|Query ${i}" >> ${LOG_FILE_EXEC_TIMES}
done
else
echo "${i} Spark Structured Streaming query: s5"
${SPARK_SUBMIT} --packages org.apache.spark:spark-sql-kafka-0-10_2.11:2.3.2 --class active.Consumer ${JAR_File} ${type} s5 > consumer_${i}_query_s5_log.txt 2>&1
# Calculate the time
STOPDATE="`date +%Y/%m/%d:%H:%M:%S`"
STOPDATE_EPOCH="`date +%s`" # seconds since epoch
DIFF_s="$(($STOPDATE_EPOCH - $STARTDATE_EPOCH))"
DIFF_ms="$(($DIFF_s * 1000))"
# log the times in load_time.csv file
echo "${STARTDATE_EPOCH}|${STOPDATE_EPOCH}|${DIFF_ms}|${STARTDATE}|${STOPDATE}|${j}|Query ${i}" >> ${LOG_FILE_EXEC_TIMES}
fi
done
| true |
d1aeef9417c88773574edf7b422b788330efc654
|
Shell
|
besarthoxhaj/github-play
|
/script.sh
|
UTF-8
| 373 | 3.6875 | 4 |
[] |
no_license
|
#!/bin/sh
# running git init twice is safe
git init
while read date
do
fileName=`echo "$date" | tr " " "_"`
#echo $fileName -> Sun_Jan_06
date="$date 10:00 2014 +0300"
echo $date
#echo $date -> Sun Jan 06 10:00 2015 +0300
echo "Creating file... $fileName"
touch "$fileName"
git add "$fileName"
git commit --date="$date" -m "$fileName"
done < dates.txt
| true |
edd2258d5c5d5b835256b998dae6d72937c05f57
|
Shell
|
AlexanderOnbysh/edu
|
/masters/streaming-programming/cloud-services-assignment/scripts/create-alarm.sh
|
UTF-8
| 689 | 2.765625 | 3 |
[] |
no_license
|
#!/bin/bash
echo "Creating topic with billing alerts"
topic=$(aws sns create-topic \
--name "BillingAlert" \
--output "text" \
--tags "Key=env,Value=assignment" \
--query 'TopicArn'
)
echo "Subscribing to $topic"
aws sns subscribe \
--topic-arn "$topic" \
--protocol email \
--notification-endpoint "alexander.onbysh@gmail.com"
echo "Setting up billing alarm"
aws cloudwatch put-metric-alarm \
--alarm-name "billing-alarm" \
--namespace AWS/Billing \
--metric-name EstimatedCharges \
--evaluation-periods 1 \
--period 21600 \
--statistic Maximum \
--comparison-operator GreaterThanThreshold \
--dimensions "Name=Currency,Value=USD" \
--threshold "0" \
--actions-enabled \
--alarm-actions "$topic"
| true |
6acb5f9d3db45f39eb3a7975dfd482c7785ffb80
|
Shell
|
aseques/debian-server-tools
|
/debian-setup.sh
|
UTF-8
| 28,474 | 3.140625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Debian jessie virtual server setup.
#
# AUTHOR :Viktor Szépe <viktor@szepe.net>
# URL :https://github.com/szepeviktor/debian-server-tools
# LICENSE :The MIT License (MIT)
# AUTORUN :wget -O ds.sh http://git.io/vtcLq && . ds.sh
# How to choose VPS provider?
#
# - Disk access time
# - CPU speed (~2000 PassMark CPU Mark, ~20 ms sysbench)
# - Worldwide and regional bandwidth, port speed
# - Spammer neighbours https://www.projecthoneypot.org/ip_1.2.3.4
# - Nightime technical support network or hardware failure response time
# - Daytime technical and billing support
# - (D)DoS mitigation
#
# See https://github.com/szepeviktor/wordpress-speedtest/blob/master/README.md#results
# Packages sources
DS_MIRROR="http://cloudfront.debian.net/debian"
#DS_MIRROR="http://http.debian.net/debian"
#DS_MIRROR="http://ftp.COUNTRY-CODE.debian.org/debian"
DS_REPOS="dotdeb nodejs-iojs percona szepeviktor"
#DS_REPOS="deb-multimedia dotdeb mariadb mod-pagespeed mt-aws-glacier \
# newrelic nginx nodejs-iojs oracle percona postgre szepeviktor varnish"
# OVH configuration
#
# /etc/ovhrc
# cdns.ovh.net.
# ntp.ovh.net.
# http://help.ovh.com/InstallOvhKey
# http://help.ovh.com/RealTimeMonitoring
set -e -x
Error() { echo "ERROR: $(tput bold;tput setaf 7;tput setab 1)$*$(tput sgr0)" >&2; }
[ "$(id -u)" == 0 ] || exit 1
# Identify distribution
lsb_release -a && sleep 5
# Download this repo
mkdir ~/src
cd ~/src
wget -O- https://github.com/szepeviktor/debian-server-tools/archive/master.tar.gz|tar xz
cd debian-server-tools-master/
D="$(pwd)"
# Clean packages
apt-get clean
rm -vrf /var/lib/apt/lists/*
apt-get clean
apt-get autoremove --purge -y
# Packages sources
mv -vf /etc/apt/sources.list "/etc/apt/sources.list~"
cp -v ${D}/package/apt-sources/sources.list /etc/apt/
sed -i "s/%MIRROR%/${DS_MIRROR//\//\\/}/g" /etc/apt/sources.list
# Install HTTPS transport
apt-get update
apt-get install -y apt-transport-https
for R in ${DS_REPOS};do cp -v ${D}/package/apt-sources/${R}.list /etc/apt/sources.list.d/;done
eval "$(grep -h -A5 "^deb " /etc/apt/sources.list.d/*.list|grep "^#K: "|cut -d' ' -f2-)"
#editor /etc/apt/sources.list
# APT settings
echo 'Acquire::Languages "none";' > /etc/apt/apt.conf.d/00languages
echo 'APT::Periodic::Download-Upgradeable-Packages "1";' > /etc/apt/apt.conf.d/20download-upgrade
# Upgrade
apt-get update
apt-get dist-upgrade -y --force-yes
apt-get install -y lsb-release xz-utils ssh sudo ca-certificates most less lftp \
time bash-completion htop bind9-host mc lynx ncurses-term aptitude
ln -svf /usr/bin/host /usr/local/bin/mx
# Input
. /etc/profile.d/bash_completion.sh || Error "bash_completion.sh"
echo "alias e='editor'" > /etc/profile.d/e-editor.sh
sed -i 's/^# \(".*: history-search-.*ward\)$/\1/' /etc/inputrc
update-alternatives --set pager /usr/bin/most
update-alternatives --set editor /usr/bin/mcedit
# Bash
#sed -e 's/\(#.*enable bash completion\)/#\1/' -e '/#.*enable bash completion/,+8 { s/^#// }' -i /etc/bash.bashrc
echo "dash dash/sh boolean false"|debconf-set-selections -v
dpkg-reconfigure -f noninteractive dash
# --- Automated --------------- >8 ------------- >8 ------------
#grep -B1000 "# -\+ Automated -\+" debian-setup.sh
set +e +x
kill -SIGINT $$
# Remove systemd
dpkg -s systemd &> /dev/null && apt-get install -y sysvinit-core sysvinit sysvinit-utils
read -s -p 'Ctrl + D to reboot ' || reboot
apt-get remove -y --purge --auto-remove systemd
echo -e 'Package: *systemd*\nPin: origin ""\nPin-Priority: -1' > /etc/apt/preferences.d/systemd
# Wget defaults
echo -e "\ncontent_disposition = on" >> /etc/wgetrc
# User settings
editor /root/.bashrc
# ---------------------------------------------------------------------
#export LANG=en_US.UTF-8
#export LC_ALL=en_US.UTF-8
#export IP="$(ip addr show dev xenbr0|sed -n 's/^\s*inet \([0-9\.]\+\)\b.*$/\1/p')"
export IP="$(ip addr show dev eth0|sed -n 's/^\s*inet \([0-9\.]\+\)\b.*$/\1/p')"
PS1exitstatus() { local RET="$?";if [ "$RET" -ne 0 ];then echo -n "$(tput setaf 7;tput setab 1)"'!'"$RET";fi; }
export PS1="\[$(tput sgr0)\][\[$(tput setaf 3)\]\u\[$(tput bold;tput setaf 1)\]@\h\[$(tput sgr0)\]:\
\[$(tput setaf 8;tput setab 4)\]\w\[$(tput sgr0)\]:\t:\
\[$(tput bold)\]\!\[\$(PS1exitstatus;tput sgr0)\]]\n"
# putty Connection / Data / Terminal-type string: putty-256color
# ls -1 /usr/share/mc/skins/|sed "s/\.ini$//g"
if [ "${TERM/256/}" == "$TERM" ]; then
if [ "$(id -u)" == 0 ]; then
export MC_SKIN="modarcon16root-defbg-thin"
else
export MC_SKIN="modarcon16"
fi
else
if [ "$(id -u)" == 0 ]; then
export MC_SKIN="modarin256root-defbg-thin"
else
export MC_SKIN="xoria256"
fi
fi
export LS_OPTIONS='--color=auto'
eval "$(dircolors)"
alias ls='ls $LS_OPTIONS'
alias ll='ls $LS_OPTIONS -l'
alias l='ls $LS_OPTIONS -lA'
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
export GREP_OPTIONS="--color"
alias grep='grep $GREP_OPTIONS'
alias iotop='iotop -d 0.1 -qqq -o'
alias iftop='NCURSES_NO_UTF8_ACS=1 iftop -nP'
alias transit='xz -9|base64 -w $((COLUMNS-1))'
alias transit-receive='base64 -d|xz -d'
#alias readmail='MAIL=/var/mail/MAILDIR/ mailx'
# apt-get install -y tcpdump tcpflow
#alias httpdump='tcpdump -nn -i eth0 -s 1500 -l -w - "dst port 80 and dst host ${IP}" | tcpflow -c -r -'
# Colorized man pages with less
# man termcap # String Capabilities
man() {
#
# so Start standout mode (search)
# se End standout mode
# us Start underlining (italic)
# ue End underlining
# md Start bold mode (highlight)
# me End all mode like so, us, mb, md and mr
env \
LESS_TERMCAP_so=$(tput setab 230) \
LESS_TERMCAP_se=$(tput sgr0) \
LESS_TERMCAP_us=$(tput setaf 2) \
LESS_TERMCAP_ue=$(tput sgr0) \
LESS_TERMCAP_md=$(tput bold) \
LESS_TERMCAP_me=$(tput sgr0) \
man "$@"
}
# ---------------------------------------------------------------------
# Markdown for mc
#cp -v /etc/mc/mc.ext ~/.config/mc/mc.ext && apt-get install -y pandoc
#editor ~/.config/mc/mc.ext
# regex/\.md(own)?$
# View=pandoc -s -f markdown -t man %p | man -l -
# Add INI extensions for mc
cp -v /usr/share/mc/syntax/Syntax ~/.config/mc/mcedit/Syntax
sed -i 's;^\(file .*\[nN\]\[iI\]\)\(.*\)$;\1|cf|conf|cnf|local|htaccess\2;' ~/.config/mc/mcedit/Syntax
sed -i 's;^file sources.list\$ sources\\slist$;file (sources)?\\.list$ sources\\slist;' ~/.config/mc/mcedit/Syntax
#editor ~/.config/mc/mcedit/Syntax
# Username
U="viktor"
# GECOS: Full name,Room number,Work phone,Home phone
adduser --gecos "" ${U}
# <<< Enter password twice
K="PUBLIC-KEY"
S="/home/${U}/.ssh";mkdir --mode 700 "$S";echo "$K" >> "${S}/authorized_keys2";chown -R ${U}:${U} "$S"
adduser ${U} sudo
# Expire password
# passwd -e ${U}
# Change root and other passwords to "*"
editor /etc/shadow
read -s -p "SSH port? " SSH_PORT
# sshd on another port
sed 's/^Port 22$/#Port 22\nPort ${SSH_PORT}/' -i /etc/ssh/sshd_config
# Disable root login
sed 's/^PermitRootLogin yes$/PermitRootLogin no/' -i /etc/ssh/sshd_config
# Disable password login for sudoers
echo -e 'Match Group sudo\n PasswordAuthentication no' >> /etc/ssh/sshd_config
# Add IP blocking
# See: ${D}/security/README.md
editor /etc/hosts.deny
service ssh restart
netstat -antup|grep sshd
# Log out as root
logout
# Log in
sudo su - || exit
D="/root/src/debian-server-tools-master"
# Download architecture-independent packages
Getpkg() { local P="$1"; local R="${2-sid}"; local WEB="https://packages.debian.org/${R}/all/${P}/download";
local URL="$(wget -qO- "$WEB"|grep -o '[^"]\+ftp.fr.debian.org/debian[^"]\+\.deb')";
[ -z "$URL" ] && return 1; wget -qO "${P}.deb" "$URL" && dpkg -i "${P}.deb"; echo "Ret=$?"; }
# Hardware
lspci
[ -f /proc/modules ] && lsmod || echo "WARNING: monolithic kernel"
# Disk configuration
clear; cat /proc/mdstat; cat /proc/partitions
pvdisplay && vgdisplay && lvdisplay
ls -1 /etc/default/*
head -n 1000 /etc/default/* | grep -v "^#\|^$" | grep --color -A1000 "^==> "
# /tmp in RAM
TOTAL_MEM="$(grep MemTotal /proc/meminfo|sed 's;.*[[:space:]]\([0-9]\+\)[[:space:]]kB.*;\1;')"
[ "$TOTAL_MEM" -gt $((2049 * 1024)) ] && sed -i 's/^#RAMTMP=no$/RAMTMP=yes/' /etc/default/tmpfs
# Mount points
# <file system> <mount point> <type> <options> <dump> <pass>
clear; editor /etc/fstab
clear; cat /proc/mounts
swapoff -a; swapon -a; cat /proc/swaps
# Create a swap file
dd if=/dev/zero of=/swap0 bs=1M count=768
chmod 0600 /swap0
mkswap /swap0
echo "/swap0 none swap sw 0 0" >> /etc/fstab
grep "\S\+\s\+/\s.*relatime" /proc/mounts || echo "ERROR: no relAtime for rootfs"
# Kernel
uname -a
# List kernels
apt-cache policy "linux-image-3.*"
#apt-get install linux-image-amd64=KERNEL-VERSION
clear; ls -l /lib/modules/
ls -latr /boot/
# Verbose boot
sed -i 's/^#*VERBOSE=no$/VERBOSE=yes/' /etc/default/rcS
dpkg -l | grep "grub"
# OVH Kernel "made-in-ovh"
# https://gist.github.com/szepeviktor/cf6b60ac1b2515cb41c1
# Linode Kernels: auto renew on reboot
# https://www.linode.com/kernels/
editor /etc/modules
ls -1 /etc/sysctl.d/ | grep -v README.sysctl
editor /etc/sysctl.conf
# Miscellaneous configuration
# Aruba needs arping package in /etc/rc.local
editor /etc/rc.local
editor /etc/profile
ls -l /etc/profile.d/
editor /etc/motd
# Networking
editor /etc/network/interfaces
# auto eth0
# iface eth0 inet static
# address IP
# netmask 255.255.255.0
# #netmask 255.255.254.0
# gateway GATEWAY
ifconfig -a
route -n -4
route -n -6
netstat -antup
editor /etc/resolv.conf
# nameserver 8.8.8.8
# nameserver LOCAL-NS
# nameserver LOCAL-NS2
# nameserver 8.8.4.4
# options timeout:2
# #options rotate
# Aruba resolvers
#
# DC1-IT 62.149.128.4 62.149.132.4
# DC3-CZ 81.2.192.131 81.2.193.227
#
# Vultr resolvers
#
# Frankfurt 108.61.10.10
ping6 -c 4 ipv6.google.com
host -v -tA example.com|grep "^example\.com\.\s*[0-9]\+\s*IN\s*A\s*93\.184\.216\.34$"||echo "DNS error"
# View network Graph v4/v6
# http://bgp.he.net/ip/${IP}
# MYATTACKERS chain
# See: ${D}/security/myattackers.sh
# Hostname
# Set A record and PTR record
# Consider: http://www.iata.org/publications/Pages/code-search.aspx
# http://www.world-airport-codes.com/
read -r -p "Host name? " H
# Search for the old hostname
grep -ir "$(hostname)" /etc/
hostname "$H"
echo "$H" > /etc/hostname
echo "$H" > /etc/mailname
editor /etc/hosts
# 127.0.0.1 localhost
# 127.0.1.1 localhost
# ::1 ip6-localhost ip6-loopback
# fe00::0 ip6-localnet
# ff00::0 ip6-mcastprefix
# ff02::1 ip6-allnodes
# ff02::2 ip6-allrouters
#
# # ORIGINAL-PTR $(host "$IP")
# IP.IP.IP.IP HOST.DOMAIN HOST
# Locale and timezone
clear; locale; locale -a
dpkg-reconfigure locales
cat /etc/timezone
dpkg-reconfigure tzdata
# Comment out getty[2-6], NOT /etc/init.d/rc !
# Consider /sbin/agetty
editor /etc/inittab
# Sanitize users
# https://www.debian.org/doc/debian-policy/ch-opersys.html#s9.2
# https://www.debian.org/doc/manuals/securing-debian-howto/ch12.en.html#s-faq-os-users
# mcview /usr/share/doc/base-passwd/users-and-groups.html
tabs 20,+3,+8,+8,+20,+20,+8,+8,+8;sort -t':' -k3 -g /etc/passwd|tr ':' '\t';tabs -8
editor /etc/passwd
editor /etc/shadow
update-passwd -v --dry-run
#update-passwd -v
# Sanitize packages (-hardware-related +monitoring -daemons)
# 1. Delete not-installed packages
clear; dpkg -l|grep -v "^ii"
# 2. Usually unnecessary packages
apt-get purge \
at ftp dc dbus rpcbind exim4-base exim4-config python2.6-minimal python2.6 \
lrzsz mlocate rpcbind nfs-common w3m vim-runtime vim-common \
installation-report debian-faq info install-info manpages man-db texinfo tex-common \
isc-dhcp-client isc-dhcp-common
deluser Debian-exim
deluser messagebus
# 3. VPS monitoring
ps aux|grep -v "grep"|grep -E "snmp|vmtools|xe-daemon"
dpkg -l|grep -E "xe-guest-utilities|dkms"
# See: ${D}/package/vmware-tools-wheezy.sh
vmware-toolbox-cmd stat sessionid
# 4. Hardware related
dpkg -l|grep -E -w "dmidecode|eject|laptop-detect|usbutils|kbd|console-setup\
|acpid|fancontrol|hddtemp|lm-sensors|sensord|smartmontools|mdadm|popularity-contest"
# 5. Non-stable packages
dpkg -l|grep "~[a-z]\+"
dpkg -l|grep -E "~squeeze|~wheezy|python2\.6"
# 6. Non-Debian packages
aptitude search '?narrow(?installed, !?origin(Debian))'
# 7. Obsolete packages
aptitude search '?obsolete'
# 8. Manually installed, not "required" and not "important" packages minus known ones
aptitude search '?and(?installed, ?not(?automatic), ?not(?priority(required)), ?not(?priority(important)))' -F"%p" \
| grep -v -x -f ${D}/package/debian-jessie-not-req-imp.pkg | xargs echo
# 9. Development packages
dpkg -l|grep -- "-dev"
# List by section
aptitude search '?and(?installed, ?not(?automatic), ?not(?priority(required)), ?not(?priority(important)))' -F"%s %p"|sort
dpkg -l | most
apt-get autoremove --purge
# Essential packages
apt-get install -y localepurge unattended-upgrades apt-listchanges cruft debsums \
whois unzip heirloom-mailx iptables-persistent bootlogd goaccess\
ntpdate apg dos2unix strace ccze mtr-tiny git colordiff gcc libc6-dev make
# Backports
# @wheezy apt-get install -t wheezy-backports -y rsyslog whois git goaccess init-system-helpers
# debsums cron weekly
sed -i 's/^CRON_CHECK=never/CRON_CHECK=weekly/' /etc/default/debsums
# Check user cron jobs
clear; ${D}/tools/catconf /var/spool/cron/crontabs/*
# Automatic package updates
echo "unattended-upgrades unattended-upgrades/enable_auto_updates boolean true"|debconf-set-selections -v
dpkg-reconfigure -f noninteractive unattended-upgrades
# Sanitize files
rm -vrf /var/lib/clamav /var/log/clamav
read -r -p "Hosting company? " HOSTING_COMPANY
find / -iname "*${HOSTING_COMPANY}*"
grep -ir "${HOSTING_COMPANY}" /etc/
dpkg -l | grep -i "${HOSTING_COMPANY}"
cruft --ignore /dev | tee cruft.log
# Find broken symlinks
find / -type l -xtype l -not -path "/proc/*"
debsums --all --changed | tee debsums-changed.log
# Custom APT repositories
editor /etc/apt/sources.list.d/others.list && apt-get update
cd /root/
mkdir dist-mod && cd dist-mod/
# Get pip
wget https://bootstrap.pypa.io/get-pip.py
python3 get-pip.py
python2 get-pip.py
# Detect whether your container is running under a hypervisor
wget -O slabbed-or-not.zip https://github.com/kaniini/slabbed-or-not/archive/master.zip
unzip slabbed-or-not.zip && rm -vf slabbed-or-not.zip
cd slabbed-or-not-master/ && make && ./slabbed-or-not|tee ../slabbed-or-not.log && cd ..
# rsyslogd immark plugin
# http://www.rsyslog.com/doc/rsconf1_markmessageperiod.html
editor /etc/rsyslog.conf
# $ModLoad immark
# $MarkMessagePeriod 1800
service rsyslog restart
# Debian tools
cd /usr/local/src/ && git clone --recursive https://github.com/szepeviktor/debian-server-tools.git
D="$(pwd)/debian-server-tools"
rm -vrf /root/src/debian-server-tools-master/
cd ${D}; ls tools/ | xargs -I "%%" ./install.sh tools/%%
# CPU
grep -E "model name|cpu MHz|bogomips" /proc/cpuinfo
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor
# Performance mode
# for SG in /sys/devices/system/cpu/*/cpufreq/scaling_governor;do echo "performance">$SG;done
# IRQ balance
declare -i CPU_COUNT="$(grep -c "^processor" /proc/cpuinfo)"
[ "$CPU_COUNT" -gt 1 ] && apt-get install -y irqbalance && cat /proc/interrupts
# Make cron log all failed jobs (exit status != 0)
sed -i "s/^#\s*\(EXTRA_OPTS='-L 5'\)/\1/" /etc/default/cron || echo "ERROR: cron-default"
service cron restart
# Time synchronization
cd ${D}; ./install.sh monitoring/ntpdated
editor /etc/default/ntpdate
# Set nearest time server: http://www.pool.ntp.org/en/
# NTPSERVERS="0.uk.pool.ntp.org 1.uk.pool.ntp.org 2.uk.pool.ntp.org 3.uk.pool.ntp.org"
# NTPSERVERS="0.de.pool.ntp.org 1.de.pool.ntp.org 2.de.pool.ntp.org 3.de.pool.ntp.org"
# NTPSERVERS="0.fr.pool.ntp.org 1.fr.pool.ntp.org 2.fr.pool.ntp.org 3.fr.pool.ntp.org"
# NTPSERVERS="0.cz.pool.ntp.org 1.cz.pool.ntp.org 2.cz.pool.ntp.org 3.cz.pool.ntp.org"
# NTPSERVERS="0.hu.pool.ntp.org 1.hu.pool.ntp.org 2.hu.pool.ntp.org 3.hu.pool.ntp.org"
# OVH
# NTPSERVERS="ntp.ovh.net"
# µnscd
apt-get install -y unscd
editor /etc/nscd.conf
# enable-cache hosts yes
# positive-time-to-live hosts 60
# negative-time-to-live hosts 20
service unscd stop && service unscd start
# VPS check
cd ${D}; ./install.sh monitoring/vpscheck.sh
editor /usr/local/sbin/vpscheck.sh
vpscheck.sh -gen
editor /root/.config/vpscheck/configuration
# Test run
vpscheck.sh
# msmtp
apt-get install -y msmtp-mta
# /usr/share/doc/msmtp/examples/msmtprc-system.example
cp -vf ${D}/mail/msmtprc /etc/
# Configure Mandrill
# https://www.mandrill.com/signup/
# http://msmtp.sourceforge.net/doc/msmtp.html
echo "This is a test mail."|mailx -s "[first] Subject of the first email" ADDRESS
# Courier MTA - deliver all mail to a smarthost
# Send-only servers don't receive emails.
# Send-only servers don't have local domain names.
# They should have an MX record pointing to the smarthost.
# Smarthost should receive all emails with send-only server's domain name.
clear; apt-get install -y courier-mta courier-mta-ssl
# Fix dependency on courier-authdaemon
sed -i '1,20s/^\(#\s\+Required-Start:\s.*\)$/\1 courier-authdaemon/' /etc/init.d/courier-mta
update-rc.d courier-mta defaults
# Check for other MTA-s
dpkg -l | grep -E "postfix|exim"
cd ${D}; ./install.sh mail/courier-restart.sh
# Smarthost
editor /etc/courier/esmtproutes
# : %SMART-HOST%,587 /SECURITY=REQUIRED
# : smtp.mandrillapp.com,587 /SECURITY=REQUIRED
# From jessie on - requires ESMTP_TLS_VERIFY_DOMAIN=1 and TLS_VERIFYPEER=PEER
# : %SMART-HOST%,465 /SECURITY=SMTPS
editor /etc/courier/esmtpauthclient
# smtp.mandrillapp.com,587 MANDRILL@ACCOUNT API-KEY
openssl dhparam -out /etc/courier/dhparams.pem 2048
editor /etc/courier/esmtpd
# ADDRESS=127.0.0.1
# TLS_DHPARAMS=/etc/courier/dhparams.pem
# ESMTPAUTH=""
# ESMTPAUTH_TLS=""
editor /etc/courier/esmtpd-ssl
# SSLADDRESS=127.0.0.1
# TLS_DHPARAMS=/etc/courier/dhparams.pem
editor /etc/courier/smtpaccess/default
# 127.0.0.1 allow,RELAYCLIENT
# :0000:0000:0000:0000:0000:0000:0000:0001 allow,RELAYCLIENT
editor /etc/courier/me
# Check MX record
host -t MX $(cat /etc/courier/me)
editor /etc/courier/defaultdomain
editor /etc/courier/dsnfrom
editor /etc/courier/locals
# localhost
# # Remove own hostname!
editor /etc/courier/aliases/system
# postmaster: |/usr/bin/couriersrs --srsdomain=DOMAIN.SRS admin@szepe.net
courier-restart.sh
# Allow unauthenticated SMTP traffic from this server on the smarthost
#
# editor /etc/courier/smtpaccess/default
# %%IP%%<TAB>allow,RELAYCLIENT,AUTH_REQUIRED=0
#
# Receive bounce messages on the smarthost
#
# editor /etc/courier/aliases/system
# @HOSTNAME.TLD: LOCAL-USER
# editor /var/mail/DOMAIN/USER/.courier-default
# LOCAL-USER
# courier-restart.sh
echo "This is a t3st mail."|mailx -s "[first] Subject of the 1st email" viktor@szepe.net
# Apache 2.4 with ITK
# @wheezy apt-get install -y -t wheezy-experimental apache2-mpm-itk apache2-utils libapache2-mod-fastcgi
apt-get install -y apache2-mpm-itk apache2-utils
# Apache with mpm_events
apt-get install -y apache2 apache2-utils
adduser --disabled-password --gecos "" web
editor /etc/apache2/evvars
# export APACHE_RUN_USER=web
# export APACHE_RUN_GROUP=web
a2enmod actions rewrite headers deflate expires proxy_fcgi
a2enmod ssl
mkdir /etc/apache2/ssl && chmod 750 /etc/apache2/ssl
cp -v ${D}/webserver/apache-conf-available/* /etc/apache2/conf-available/
yes|cp -vf ${D}/webserver/apache-sites-available/* /etc/apache2/sites-available/
echo -e "User-agent: *\nDisallow: /\n" > /var/www/html/robots.txt
# Use php-fpm.conf settings per site
a2enconf h5bp
editor /etc/apache2/conf-enabled/security.conf
# ServerTokens Prod
editor /etc/apache2/apache2.conf
# LogLevel info
# @TODO fcgi://port,path?? ProxyPassMatch "^/.*\.php$" "unix:/var/run/php5-fpm.sock|fcgi://127.0.0.1:9000/var/www/website/html"
# mod_pagespeed for poorly written websites
apt-get install -y mod-pagespeed-stable
# Remove duplicate
ls -l /etc/apt/sources.list.d/*pagespeed*
#rm -v /etc/apt/sources.list.d/mod-pagespeed.list
# Nginx 1.8
apt-get install -y nginx-lite
# Nginx packages: lite, full, extra
# https://docs.google.com/a/moolfreet.com/spreadsheet/ccc?key=0AjuNPnOoex7SdG5fUkhfc3BCSjJQbVVrQTg4UGU2YVE#gid=0
# apt-get install -y nginx-full
# Put ngx-conf in PATH
ln -sv /usr/sbin/ngx-conf/ngx-conf /usr/local/sbin/ngx-conf
# HTTP/AUTH
mkdir /etc/nginx/http-auth
# Configuration
# https://codex.wordpress.org/Nginx
# http://wiki.nginx.org/WordPress
git clone https://github.com/szepeviktor/server-configs-nginx.git
NGXC="/etc/nginx"
cp -va h5bp/ ${NGXC}
cp -vf mime.types ${NGXC}
cp -vf nginx.conf ${NGXC}
ngx-conf --disable default
cp -vf sites-available/no-default ${NGXC}/sites-available
ngx-conf --enable no-default
# Fail2ban
# https://packages.qa.debian.org/f/fail2ban.html
Getpkg geoip-database-contrib
apt-get install -y geoip-bin recode python3-pyinotify
# apt-get install -y fail2ban
Getpkg fail2ban
mc ${D}/security/fail2ban-conf/ /etc/fail2ban/
# Config: fail2ban.local
# Jails: jail.local
# /filter.d: apache-combined.local, apache-instant.local, courier-smtp.local, recidive.local
# /action.d: cloudflare.local
service fail2ban restart
# PHP 5.6
apt-get install -y php5-apcu php5-cli php5-curl php5-fpm php5-gd \
php5-mcrypt php5-mysqlnd php5-readline php5-sqlite php-pear php5-dev
PHP_TZ="$(head -n 1 /etc/timezone)"
sed -i 's/^expose_php = .*$/expose_php = Off/' /etc/php5/fpm/php.ini
sed -i 's/^max_execution_time = .*$/max_execution_time = 65/' /etc/php5/fpm/php.ini
sed -i 's/^memory_limit = .*$/memory_limit = 384M/' /etc/php5/fpm/php.ini
sed -i 's/^post_max_size = .*$/post_max_size = 20M/' /etc/php5/fpm/php.ini
sed -i 's/^upload_max_filesize = .*$/upload_max_filesize = 20M/' /etc/php5/fpm/php.ini
sed -i 's/^allow_url_fopen = .*$/allow_url_fopen = Off/' /etc/php5/fpm/php.ini
sed -i "s|^;date.timezone =.*\$|date.timezone = ${PHP_TZ}|" /etc/php5/fpm/php.ini
# @TODO realpath_cache* -> measure
grep -Ev "^\s*#|^\s*;|^\s*$" /etc/php5/fpm/php.ini | most
# Disable "www" pool
#sed -i 's/^/;/' /etc/php5/fpm/pool.d/www.conf
mv /etc/php5/fpm/pool.d/www.conf /etc/php5/fpm/pool.d/www.conf.default
cp -v ${D}/webserver/php5fpm-pools/* /etc/php5/fpm/
# PHP 5.6+ session cleaning
mkdir -p /usr/local/lib/php5
cp -v ${D}/webserver/sessionclean5.5 /usr/local/lib/php5/
# PHP 5.6+
echo -e "15 *\t* * *\troot\t[ -x /usr/local/lib/php5/sessionclean5.5 ] && /usr/local/lib/php5/sessionclean5.5" \
> /etc/cron.d/php5-user
# @FIXME PHP timeouts
# - PHP max_execution_time
# - PHP max_input_time
# - FastCGI -idle-timeout
# - PHP-FPM pool request_terminate_timeout
# Suhosin extension
apt-get install -y php5-suhosin-extension
php5enmod -s fpm suhosin
# PHP file modification time protection
# https://ioncube24.com/signup
# @TODO .ini-handler, Search for it! ?ucf
# PHP security directives
# mail.add_x_header
# assert.active
# suhosin.executor.disable_emodifier = On
# suhosin.disable.display_errors = 1
# suhosin.session.cryptkey = $(apg -m 32)
# PHP directives for Drupal
# suhosin.get.max_array_index_length = 128
# suhosin.post.max_array_index_length = 128
# suhosin.request.max_array_index_length = 128
# No FPM pools -> no restart
# ionCube Loader
# https://www.ioncube.com/loaders.php
# zend_extension = ioncube_loader_lin_5.6.so
# ic24.enable = Off
cd ${D}; ./install.sh webserver/webrestart.sh
# Add the development website
# See: ${D}/webserver/add-prg-site.sh
# Add a website
# See: ${D}/webserver/add-site.sh
# MariaDB
apt-get install -y mariadb-server-10.0 mariadb-client-10.0
read -e -p "MYSQL_PASSWORD? " MYSQL_PASSWORD
echo -e "[mysql]\nuser=root\npass=${MYSQL_PASSWORD}\ndefault-character-set=utf8" >> /root/.my.cnf
echo -e "[mysqldump]\nuser=root\npass=${MYSQL_PASSWORD}\ndefault-character-set=utf8" >> /root/.my.cnf
chmod 600 /root/.my.cnf
#editor /root/.my.cnf
# wp-cli
WPCLI_URL="https://raw.github.com/wp-cli/builds/gh-pages/phar/wp-cli.phar"
wget -O /usr/local/bin/wp "$WPCLI_URL" && chmod -c +x /usr/local/bin/wp
WPCLI_COMPLETION_URL="https://github.com/wp-cli/wp-cli/raw/master/utils/wp-completion.bash"
wget -O- "$WPCLI_COMPLETION_URL"|sed 's/wp cli completions/wp --allow-root cli completions/' > /etc/bash_completion.d/wp-cli
# If you have suhosin in global php5 config
# grep "[^;#]*suhosin\.executor\.include\.whitelist.*phar" /etc/php5/cli/conf.d/*suhosin*.ini || Error "Whitelist phar"
# Drush
# https://github.com/drush-ops/drush/releases
wget -qO getcomposer.php https://getcomposer.org/installer
php getcomposer.php --install-dir=/usr/local/bin --filename=composer
mkdir -p /opt/drush && cd /opt/drush
composer require drush/drush:7.*
ln -sv /opt/drush/vendor/bin/drush /usr/local/bin/drush
# Set up Drupal site
# sudo -u SITE-USER -i
# cd website/
# drush dl drupal --drupal-project-rename=html
# cd html/
# drush site-install standard \
# --db-url='mysql://DB-USER:DB-PASS@localhost/DB-NAME' \
# --site-name=SITE-NAME --account-name=USER-NAME --account-pass=USER-PASS
# drush --root=DOCUMENT-ROOT vset --yes file_private_path "PRIVATE-PATH"
# drush --root=DOCUMENT-ROOT vset --yes file_temporary_path "UPLOAD-DIRECTORY"
# drush --root=DOCUMENT-ROOT vset --yes cron_safe_threshold 0
#
# See: ${D}/webserver/preload-cache.sh
# Spamassassin
Getpkg spamassassin
# SSL certificate for web, mail etc.
# See: ${D}/security/new-ssl-cert.sh
# Test TLS connections
# See: ${D}/security/README.md
# ProFTPD
# When the default locale for your system is not en_US.UTF-8
# be sure to add this to /etc/default/proftpd for fail2ban to understand dates.
# export LC_TIME="en_US.UTF-8"
# Simple syslog monitoring
apt-get install -y libdate-manip-perl
DGR="$(wget -qO- https://api.github.com/repos/mdom/dategrep/releases|sed -n '0,/^.*"tag_name": "\([0-9.]\+\)".*$/{s//\1/p}')" #'
wget -O /usr/local/bin/dategrep https://github.com/mdom/dategrep/releases/download/${DGR}/dategrep-standalone-small
chmod -c +x /usr/local/bin/dategrep
cd ${D}; ./install.sh monitoring/syslog-errors.sh
# Monit - monitoring
# https://packages.debian.org/sid/amd64/monit/download
apt-get install -y monit
# See: ${D}/monitoring/monit/
# https://mmonit.com/monit/documentation/monit.html
service monit restart
# Wait for start
tail -f /var/log/monit.log
monit summary
lynx 127.0.0.1:2812
# Munin - network-wide graphing
# See: ${D}/monitoring/munin/munin-debian-setup.sh
# Aruba ExtraControl (serclient)
# http://admin.dc3.arubacloud.hu/Manage/Serial/SerialManagement.aspx
wget -nv http://admin.dc3.arubacloud.hu/Installers/debian/aruba-serclient_0.01-1_all.deb
dpkg -i aruba-serclient_*_all.deb
# Set log level
echo -e "[LOG]\nlevel = 20" >> /opt/serclient/serclient.ini
# Comment out "if getRestartGUID(remove=False) == None: rf.doRollover()"
editor +159 /opt/serclient/tools.py
# Add logrotate
editor /etc/logrotate.d/serclient
# /var/log/serclient.log {
# weekly
# rotate 15
# compress
# delaycompress
# notifempty
# create 640 root root
# postrotate
# if /etc/init.d/serclient status > /dev/null ; then \
# /etc/init.d/serclient restart > /dev/null; \
# fi;
# endscript
# }
# node.js
apt-get install -y iojs
# Install packaged under /usr/local/
npm config set prefix=/usr/local/
npm install -g less
npm install -g less-plugin-clean-css
# Logrotate periods
#
editor /etc/logrotate.d/rsyslog
# weekly
# rotate 15
# # /var/log/mail.log
# weekly
# rotate 15
editor /etc/logrotate.d/apache2
# daily
# rotate 90
# Clean up
apt-get autoremove --purge
# Throttle package downloads (1000 kB/s)
echo 'Acquire::Queue-mode "access"; Acquire::http::Dl-Limit "1000";' > /etc/apt/apt.conf.d/76download
# Backup /etc
tar cJf "/root/${H//./-}_etc-backup_$(date --rfc-3339=date).tar.xz" /etc/
# Clients and services
cp -v ${D}/server.yml /root/
editor /root/server.yml
| true |
7419ded7d7408c79e0ae47f932ff52ac37d774bd
|
Shell
|
go-kivik/kouch
|
/script/build.sh
|
UTF-8
| 562 | 3.65625 | 4 |
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
targets="linux_386 linux_amd64 darwin_amd64 windows_386 windows_amd64"
mkdir -p build
for target in $targets; do
echo Building ${target}...
mkdir -p tmp/${target}
bin=kouch
if [ "${target%_*}" == "windows" ]; then
bin=kouch.exe
fi
GOOS=${target%_*} GOARCH=${target##*_} go build -o tmp/${target}/${bin} ./cmd/kouch
cp LICENSE.md README.md tmp/${target}
tar -czvpf build/kouch-${TRAVIS_TAG/-/_}-${target}.tar.gz tmp/${target}
zip -9r build/kouch-${TRAVIS_TAG/-/_}-${target}.zip tmp/${target}
done
| true |
3ae25df7cd94eb2332682b39aecce1442b447367
|
Shell
|
fujiso/dotfile
|
/.zshrc
|
UTF-8
| 432 | 2.734375 | 3 |
[] |
no_license
|
#
# Executes commands at the start of an interactive session.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
# Source Prezto.
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Customize to your needs...
bindkey -e
#eval "$(pyenv virtualenv-init -)"
alias gpu='watch -n1 "hostname; nvidia-smi pmon -s um -c 1"'
alias imux='tmux attach || tmux new-session \; source-file ~/.tmux/imux'
| true |
6d0f791867c4d2e32ea450931ece3f079b3b5a5b
|
Shell
|
jeanpascalpfister/SmartphoneTappingModel
|
/RunCluster.sh
|
UTF-8
| 602 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/bash
module load matlab
nsim=(11 11 1 1 12 12) # number of simulations per Simulation serie
for serie in {1..6}
do
ns=${nsim[$serie-1]}
list=($(seq 1 1 $ns))
for s in ${list[@]}
do
for p in {1..84}
do
echo "Queuing up serie=${serie}, sim = ${s}, subject=${p}"
# Submit the job
bsub -W "24:00" -R "rusage[mem=1024]" -N -o log2/logSER${serie}_SUB${p}_SIM${s}.txt -J "SER=${serie}, SUB=${p}, SIM=${s}" matlab -singleCompThread -nosplash -nodesktop -r "ClusterFitSubject(${p},${serie},${s},'DataSet.mat','fit2/SER${serie}_SUB${p}_SIM${s}.mat'),exit"
done
done
done
wait
echo "Done"
| true |
6dda6a6788b50ceb3999079cde1942e433f4f885
|
Shell
|
michaeldgraham/relate-by-ui
|
/bin/gh-pages
|
UTF-8
| 328 | 3.171875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Publishing to GitHub Pages:
# $ ./bin/gh-pages styleguide/storybook-static
if [ -z "$1" ]
then
echo "Which folder do you want to deploy to GitHub Pages?"
exit 1
fi
# git push origin `git subtree split --prefix styleguide/storybook-static gh-pages`:gh-pages --force
git subtree push --prefix $1 origin gh-pages
| true |
0ded1fedd60a9447356dea4c1ec35c92cae51919
|
Shell
|
edwardmlyte/setup
|
/.path
|
UTF-8
| 1,340 | 3.140625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# go root
if [ -d /opt/golang ]
then
export GOROOT="$(find /opt/golang -name "go1*" -type d -maxdepth 2 | head -1)"
export PATH="${GOROOT}/bin:${PATH}"
fi
# go path
if [ -d ${HOME}/Development/go-workspace ]
then
export GOPATH="${HOME}/Development/go-workspace"
export PATH="${GOPATH}/bin:${PATH}"
fi
# update path
export PATH=/usr/local/bin:${PATH}:/sbin:/usr/local/sbin
# add gnubin for mac
export PATH="/usr/local/opt/gnu-sed/libexec/gnubin:${PATH}"
# add gnu getopt
export PATH="/usr/local/opt/gnu-getopt/bin:${PATH}"
# update cdpath
export CDPATH=${CDPATH}:${GOPATH}/src/github.com:${GOPATH}/src/golang.org:${GOPATH}/src
# gtar
export GTAR="/usr/local/opt/gnu-tar/libexec/gnubin"
export PATH="${GTAR}:${PATH}"
# java
export JAVA_HOME="$(/usr/libexec/java_home)"
export PATH="${JAVA_HOME}/bin:${PATH}"
# maven
export MVN_HOME="/usr/local/bin/mvn"
export PATH="${MVN_HOME}/bin:${PATH}"
# update path for gnu coreutils, make & find on darwin
export PATH=/usr/local/opt/coreutils/libexec/gnubin:${PATH}
export MANPATH=/usr/local/opt/coreutils/libexec/gnuman:${MANPATH}
export PATH=/usr/local/opt/make/libexec/gnubin:${PATH}
export MANPATH=/usr/local/opt/make/libexec/gnuman:${MANPATH}
export PATH=/usr/local/opt/findutils/libexec/gnubin:${PATH}
export MANPATH=/usr/local/opt/findutils/libexec/gnuman:${MANPATH}
| true |
32f9c264e696bd44aa834deee9e1998f23b63851
|
Shell
|
parkr/dotfiles
|
/bin/mp4-to-mp3
|
UTF-8
| 350 | 3.9375 | 4 |
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]; then
echo "usage: $0 <filename.mp4>"
exit 1
fi
if ! [ -f "$1" ]; then
echo "fatal: filename '$1' doesn't exist"
exit 1
fi
INPUT_FILENAME="$1"
OUTPUT_FILENAME="${INPUT_FILENAME%.mp4}.mp3"
echo "Converting $INPUT_FILENAME to $OUTPUT_FILENAME"
ffmpeg -i "$INPUT_FILENAME" -f mp3 -ab 192000 -vn "$OUTPUT_FILENAME"
| true |
3f777aab5198c75852c459250218394761604110
|
Shell
|
millskyle/BerkeleyGW_Utilities
|
/working/co/run.sh
|
UTF-8
| 1,224 | 3.609375 | 4 |
[] |
no_license
|
#!/bin/bash
currentdir="`pwd`"
PWSCF='mpirun -n 4 /home/kmills/espresso-5.0/bin/pw.x -nk 1 -nd 1 -nb 1 -nt 1 '
#PWSCF='/home/kmills/espresso-5.0/bin/pw.x -nk 1 -nd 1 -nb 1 -nt 1 '
PW2BGW='/home/kmills/espresso-5.0/bin/pw2bgw.x '
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$DIR"
echo "Root directory is $DIR"
if [[ -f prefix ]]; then
PREFIX=`tr -d '\n' < prefix`
echo "Prefix set to $PREFIX"
cd $DIR
for inputfile in `\ls */*in*`; do
sed "s|!PREFIX!|$PREFIX|g" < $inputfile > ${inputfile}_new
done
cd 1-scf
ln -s ../../*.UPF ./
echo "Running first PWSCF calculation"
$PWSCF -in in_new &> out
cd $DIR
ln -s "../1-scf/${PREFIX}.save/" "2-wfn/"
cd 2-wfn/
ln -s ../../*.UPF ./
echo "Running second PWSCF calculation"
$PWSCF -in in_new &> out
echo "Converting PWSCF binary wavefunction to BerkeleyGW WFN"
$PW2BGW < pp_in_new > pp_out
cd $DIR
cd 2-wfn
mv wfn.complex ../WFN_$PREFIX
cd $currentdir
else
echo -e "\n\n ERROR: You must create a prefix file. The prefix file defines the system, and is used for naming. It doesn't have to coincide with the molecules in the system. See the README for more information. You can create this file by doing:
echo 'systemname' > prefix
"
fi
| true |
285c096c576e90c04edd821f937b4315f13301f1
|
Shell
|
williamscs/pants-test-project
|
/build-support/generate_constraints.sh
|
UTF-8
| 636 | 2.84375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
# See https://www.pantsbuild.org/v2.0/docs/python-third-party-dependencies.
set -euo pipefail
PYTHON_BIN=python3
VIRTUALENV=build-support/.venv
PIP="${VIRTUALENV}/bin/pip"
CONSTRAINTS_FILE=constraints.txt
"${PYTHON_BIN}" -m venv "${VIRTUALENV}"
"${PIP}" install pip --upgrade
"${PIP}" install -r <(./pants dependencies --type=3rdparty ::)
echo "# Generated by build-support/generate_constraints.sh on $(date)" > "${CONSTRAINTS_FILE}"
"${PIP}" freeze --all >> "${CONSTRAINTS_FILE}"
| true |
8fc67871aec5a1ff8b7477d2d4b301c79b973904
|
Shell
|
AkariUeda/mc950-contour-descriptors
|
/assignment/convert_pgm.sh
|
UTF-8
| 118 | 2.9375 | 3 |
[] |
no_license
|
echo $1;
cd $1;
for file in *; do
echo $file
convert $file -compress None '../pgm_imgs/'${file%.*}'.pgm'
done;
| true |
5e6e2cf1f3af850396f10d4fd6f09981d00397c2
|
Shell
|
sndyuk/Silica
|
/configs/cmd/start.sh
|
UTF-8
| 716 | 3.359375 | 3 |
[] |
no_license
|
#!/bin/bash
pushd $(dirname $0) > /dev/null
################
echo " "
# For degub
if [ "$5" = "true" ]; then
DEBUG_OPTIONS="-J-Xdebug -J-Xrunjdwp:transport=dt_socket,server=y,address=8000,suspend=y -J-Djava.rmi.server.logCalls=true"
echo "on debug mode. Waiting to connect a debugger listening on port: 8000"
fi
################
if [ "$2" != "" ]; then
RMIREG=$2/bin/rmiregistry
else
RMIREG=rmiregistry
fi
$RMIREG $1 -J-Djava.rmi.server.hostname=$4 $DEBUG_OPTIONS -J-cp -J$3 > silica_rmi.log 2> silica_rmi.log < /dev/null &
echo "rmiregistry started."
popd > /dev/null
psid=-1
for psid in `ps x | grep "[r]miregistry $1"`; do echo "pid=" $psid;break; done
if test $psid -gt 0 ; then
exit 0
fi
exit -1
| true |
e53a599a23ca3bbc6a21ce5405c6958a4a2ea0fd
|
Shell
|
saisreenivas-kalla/Guessinggame
|
/makefile.sh
|
UTF-8
| 252 | 3.15625 | 3 |
[] |
no_license
|
git init
echo -e "# The name of the project is Guessing game \n" > readme.md
date >> readme.md
a=$(wc -l guessinggame.sh | egrep -o "[0-9]+")
echo -e "\n ## The program contains $a lines of code" >> readme.md
git add -A
git commit -m "added readme.md"
| true |
e1ee578cf7e8c890a9bc0f9a06396bb6d40971c1
|
Shell
|
pawel-slowik/mklbimg
|
/mklbimg
|
UTF-8
| 3,815 | 3.75 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
kernel_file="bzImage"
root_file="rootfs.ext2"
qemu_image_file="image.qcow2"
raw_image_file="image.raw"
mlb_path="$HOME/mlb"
mlb_file="$mlb_path/mlb.bin"
mlb_cmd="$mlb_path/mlbinstall"
if [[ ! -f "$kernel_file" || ! -r "$kernel_file" ]]; then
echo "kernel file doesn't exist, is not a file or is not readable: $kernel_file" 1>&2
exit 1
fi
if [[ ! -f "$root_file" || ! -r "$root_file" ]]; then
echo "rootfs file doesn't exist, is not a file or is not readable: $root_file" 1>&2
exit 1
fi
if [[ ! -f "$mlb_file" || ! -r "$mlb_file" ]]; then
echo "MLB boot sector file doesn't exist, is not a file or is not readable: $mlb_file" 1>&2
echo "try \`make -C \"$mlb_path\"\`" 1>&2
exit 1
fi
if [[ ! -f "$mlb_cmd" || ! -x "$mlb_cmd" ]]; then
echo "MLB install command doesn't exist, is not a file or is not executable: $mlb_cmd" 1>&2
echo "try \`make -C \"$mlb_path\"\`" 1>&2
exit 1
fi
mlb_size=$(stat -c '%s' "$mlb_file")
# as this script is intended to be run from non-root accounts, sfdisk might
# not be in $PATH
admin_paths=("/sbin" "/usr/sbin" "/usr/local/sbin")
sfdisk_test=$(which sfdisk 2> /dev/null)
if [[ $? -eq 0 ]]; then
sfdisk="$sfdisk_test"
else
for p in ${admin_paths[@]}; do
if [[ -f "$p/sfdisk" && -x "$p/sfdisk" ]]; then
sfdisk="$p/sfdisk"
break
fi
done
fi
if [[ -z $sfdisk ]]; then
echo "sfdisk executable not found, searched in: ${admin_paths[@]}" 1>&2
exit 1
fi
# partition sizes calculated in cylinders for old / buggy versions of sfdisk
# https://bugs.launchpad.net/ubuntu/+source/util-linux/+bug/1481158
sector_size=512
sectors=63
heads=255
cylinder_size=$(($sector_size * $sectors * $heads))
kernel_size=$(stat -c '%s' "$kernel_file")
root_size=$(stat -c '%s' "$root_file")
kernel_cylinders=$(($kernel_size / $cylinder_size))
if [[ $(($kernel_size % $cylinder_size)) != 0 ]]; then
kernel_cylinders=$(($kernel_cylinders + 1))
fi
root_cylinders=$(($root_size / $cylinder_size))
if [[ $(($root_size % $cylinder_size)) != 0 ]]; then
root_cylinders=$(($root_cylinders + 1))
fi
total_cylinders=$(($kernel_cylinders + $root_cylinders))
# create empty disc image
dd if=/dev/zero of="$raw_image_file" bs=$cylinder_size count=$total_cylinders
# create partitions
echo -e ",$kernel_cylinders,L,*\n,,L" | $sfdisk -H $heads -S $sectors "$raw_image_file"
# read offsets of kernel and root partitions, in sectors
kernel_offset=0
root_offset=0
list_flag=0
set -f
while read line; do
if [[ $list_flag -eq 1 ]]; then
if [[ $line =~ ^[^[:space:]]+1[[:space:]] ]]; then
words=($line)
kernel_offset=${words[2]}
fi
if [[ $line =~ ^[^[:space:]]+2[[:space:]] ]]; then
words=($line)
root_offset=${words[1]}
fi
fi
if [[ $line =~ \#sectors ]]; then
list_flag=1
fi
done < <($sfdisk -uS -H $heads -S $sectors -l "$raw_image_file")
# write kernel and root partitions into disk image
dd if="$kernel_file" of="$raw_image_file" bs=$sector_size seek=$kernel_offset conv=notrunc
dd if="$root_file" of="$raw_image_file" bs=$sector_size seek=$root_offset conv=notrunc
# install MLB
"$mlb_cmd" "$raw_image_file" "$kernel_file" "root=/dev/sda2 rw"
# mlbinstall computes kernel LBA using the real / mounted disc drive.
# We need to replace this with LBA computed using VM disc image.
# First, encode $kernel_offset to 4 bytes:
input=$kernel_offset
divisors=(16777216 65536 256 1)
buf=""
for ((i = 0; $i < ${#divisors[@]}; i++)); do
buf=$(printf "%02x " $(($input / ${divisors[$i]})))" $buf"
input=$(($input % ${divisors[$i]}))
done
# Then, write encoded offset at the end of MLB code:
mlb_last_byte_offset=$(($mlb_size - 1))
mlb_lba_offset=$(($mlb_last_byte_offset - ${#divisors[@]}))
echo "$buf" | xxd -p -r -seek $mlb_lba_offset - "$raw_image_file"
# convert raw image to qemu format
qemu-img convert -f raw -p -O qcow2 "$raw_image_file" "$qemu_image_file"
| true |
8b4cc238ee7ed956675fa522b1fe1936a6ebe185
|
Shell
|
redkyn/grader
|
/misc/completions/_grader.zsh
|
UTF-8
| 5,546 | 3.90625 | 4 |
[
"MIT"
] |
permissive
|
_grader_completion() {
if [[ -z "$GRADER_HOME" ]] || [[ ! -d "$GRADER_HOME" ]] || [[ ! -e "$GRADER_HOME/config.yml" ]]; then
local -x GRADER_HOME=""
# Guess GRADER_HOME if it's not set.
if [[ -e "$(pwd)/grader.yml" ]]; then
GRADER_HOME=`pwd`
elif [[ ! -z "$(which grader)" ]]; then
GRADER_HOME="$(which grader | rev | cut -d'/' -f3- | rev)"
fi
fi
if [[ ! -e "$GRADER_HOME/grader.yml" ]]; then
return
fi
##### Get all assignments ####
local -a assignments
assignments=( $GRADER_HOME/assignments/* )
# Now trim everything except for the folder names.
assignments=( $( echo "$GRADER_HOME/assignments"/* | xargs -i -d' ' sh -c "echo -e \"{}\" | rev | cut -d'/' -f-1 | rev") )
##############################
#### Get all students ####
local -a students
# Extract students from roster:
students=( $(cat "$GRADER_HOME/grader.yml" | grep -P --color=never "\s+\-?\s*id:" | sed -e 's/\s*\-\?\s*id:\s*\([A-Za-z0-9_]\+\)$/\1/g') )
##########################
local -a verbosity
verbosity=( "DEBUG" "INFO" "WARNING" "ERROR" )
_arguments \
"(- 1 *)-h[display grader help and exit]" \
"(- 1 *)--help[display grader help and exit]" \
"--path[specify grader's root manually]: :" \
"--tracebacks[show grader tracebacks when there is an error]" \
"--verbosity[configure how verbose output]: :{_describe 'verbosity level' verbosity}" \
"1: :->cmds" \
'*:: :->args' && ret=0
case $state in
cmds)
_values "grader command" \
"init[initialize grader by creating grader.yml]" \
"new[Create a new assignment]" \
"build[Build an assignment's docker image]" \
"import[Import student submission(s)]" \
"list[List student submission(s)]" \
"grade[Grade student submission(s)]" \
"cat[Print an assignment's grade output to STDOUT]" \
"report[Generate reports using a gradesheet template]" \
"inspect[Inspect a graded submission's container]" \
"help[Show help for grader and exit]"
ret=0
;;
args)
# Trim assignment list to just have assignment names
case $line[1] in
init)
_arguments \
'--help[View help for init and exit]' \
'--force[Overwrite an existing grader.yml]' \
'--course-id[Unique course ID (for docker)]'
ret=0
;;
build)
_arguments \
'--help[View help for build and exit]' \
'--no-cache[Do not use docker image cache when building]' \
'--pull[Pull the gradesheet repo before building]' \
'--silent[Do not parse and display output from docker]' \
"1: :{_describe 'assignments' assignments}"
ret=0
;;
import)
local -a _kinds
_kinds=("blackboard" "multiple" "single" "repo")
_arguments \
'--help[View help for import and exit]' \
"--kind: :{_describe 'kind of import' _kinds}" \
"1: :{_describe 'assignments' assignments}" \
"2: *:_files"
ret=0
;;
list)
local -a _sortby
_sortby=("time" "name")
_arguments \
'--help[View help for list and exit]' \
"--submissions[Show submissions for each assignment]" \
"--full[Show full length of values]" \
"--sortby[Sort by a specific field]: :{_describe 'sort import by' _sortby}" \
"1: :{_describe 'assignments' assignments}"
ret=0
;;
grade)
_arguments \
'--help[View help for grade and exit]' \
'--rebuild[Rebuild cointainers (if they exist)]' \
"--suppress_output[Don't display output]" \
"1: :{_describe 'assignments' assignments}" \
"2: :{_describe 'students' students }"
ret=0
;;
cat)
local -a _submissions
_submissions=()
# FUTURE: Ideally, this would only grab submissions from the assignment/student you specified...
# http://stackoverflow.com/a/23357277/7065175
while IFS= read -r -d $'\0'; do
# Trim to just the submission ID alone.
_submissions+=($(echo "$REPLY" | rev | cut -d'/' -f-1 | rev))
done < <(find "$GRADER_HOME/assignments/" -wholename '*/*{*}' -print0)
_arguments \
'--help[View help for cat and exit]' \
"--submission_id[ID of a specific submission to cat]: :{_describe 'ALL submissions' _submissions}" \
"1: :{_describe 'assignments' assignments}" \
"2: :{_describe 'students' students }"
ret=0
;;
inspect)
_arguments \
'--help[View help for inspect and exit]' \
"--user[username of a specific container user to inspect as]: :" \
"1: :{_describe 'assignments' assignments}" \
"2: :{_describe 'students' students }"
ret=0
;;
report)
_arguments \
'--help[View help for report and exit]' \
'--template[Type of template to use]:' \
"1: :{_describe 'assignments' assignments}" \
"::OPTIONAL :{_describe 'students' students }"
ret=0
;;
esac
;;
esac
return ret
}
compdef _grader_completion grader
| true |
526233d317bdfbeb8a9e71153cf9ec88598265b2
|
Shell
|
prettyh8machine/pdf-overlay
|
/pdf-overlay
|
UTF-8
| 135 | 2.578125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
set -e
ASM=bin/Debug/PdfOverlay.exe
if [ ! -f $ASM ]; then
make
fi
mono --debug bin/Debug/PdfOverlay.exe "$@"
| true |
b698a8d011664e61b2e2db1caf529c941641dcc1
|
Shell
|
KentVu/dawg-java
|
/build_swig.sh
|
UTF-8
| 467 | 3.484375 | 3 |
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
set -v
#if [ "$1" == "-c" ]; then
# echo Clone swig
# git clone --depth=1 https://github.com/swig/swig.git
# shift
#fi
if [ "$1" == "-d" ]; then
echo install dependencies
sudo apt-get -yqq install automake bison
shift
fi
if swig -version |grep 'Version 4'; then
echo Swig version 4 installed
exit
fi
distdir=${1:-$PWD/dist}
echo Installing SWIG
cd swig
git log -1
./autogen.sh
./configure --prefix=$distdir
make
make install
| true |
a6eb819561b1b757be989eecc35e537d9fdeb8a1
|
Shell
|
matheussampaio/medical-device
|
/connect.sh
|
UTF-8
| 196 | 2.5625 | 3 |
[] |
no_license
|
#!/bin/bash
adb shell ip route > addrs.txt
ip_addrs=$(awk {'if( NF >=9){print $9;}'} addrs.txt)
rm addrs.txt
adb tcpip 5555
adb connect "$ip_addrs:5555"
echo "you can remove the cable now."
| true |
d4145ef1ee1e8f5e46500c76bd3cbab7beb4388c
|
Shell
|
nisarpro/shell-random
|
/live/zsh/sudo-emulation.sh
|
UTF-8
| 1,199 | 3.359375 | 3 |
[] |
no_license
|
#!/usr/bin/env zsh
\which sudo > /dev/null
if [ $? -eq 0 ]; then
_sudo_exists='true'
else
_sudo_exists='false'
fi
# 2016-03-26, on Lubuntu (version not recorded)
# 2016-03-28, on Slackware 14.1
#alias su="\sudo $( \basename $( \readlink /proc/$$/exe ) )"
#alias sul='\sudo \su --login'
# All instances of 'su' will be "under one roof", to avoid multiple instances of a root window. Because that's terribly insecure.
# 2016-03-29, on Slackware 14.1
su() {
if [ $_sudo_exists = 'true' ]; then
\sudo $1 \screen -X setenv currentdir `\pwd`
\sudo $1 \screen -X eval 'chdir $currentdir' screen
# This logs out of any existing instance of root.
\sudo $1 \screen -A -D -RR
else
# 2016-10-29, on Porteus-LXQt-v3.1-i486
/bin/su
fi
}
sul() {
/bin/su '--login'
}
# Let root become the user.
# Basically the reverse of the above.
# TODO - tweak this to make a sort of `sudouser`
# 2016-04-01, on Slackware 14.1
suu() {
if [ -z $1 ]; then
\sudo -u user $SHELL
else
\sudo $*
fi
}
suul() {
suu '--login -u user'
}
# 2016-10-29, on Porteus-LXQt-v3.1-i486
if [ $_sudo_exists = 'false' ]; then
sudo() {
/bin/su -c $*
}
fi
| true |
3a61426765169dce95712362cb8094de7bc31e5d
|
Shell
|
Andreas237/AnsiblePlaybooks
|
/setup_post_boot.sh
|
UTF-8
| 250 | 2.703125 | 3 |
[] |
no_license
|
#!/bin/bash
# Install ansible and run the playbook specified in $1
# update repos
sudo apt update -y
# upgrade installs
sudo apt upgrade -y
# install ansible
sudo apt install ansible -y
# run a playbook passed to this script
ansible-playbook $1
| true |
123a874d60751b38e80fe649275b5607fc41a154
|
Shell
|
Hiroki11x/ofxCustomAddons
|
/ofxMSATensorFlow/scripts/copy_headers.sh
|
UTF-8
| 1,642 | 3.859375 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ./copy_headers.sh ~/DEV/tensorflow
DST='../libs/tensorflow/include'
if [[ $# -eq 0 ]] ; then
echo
echo 'Missing argument. I need the path to your tensorflow repo'
echo
echo 'Usage: copy_headers.sh path/to/tensorflow'
echo 'e.g.: copy_headers.sh ~/DEV/tensorflow'
echo
echo 'Also note, it will copy the headers to '$DST
echo 'so make sure you are running this script from ofxMSATensorFlow/scripts'
echo
echo
exit 1
fi
SRC=$1 # command line argument is the location of tensorflow repo
if [[ $# -eq 2 ]] ; then
DO_CLEANUP=$2
else
DO_CLEANUP=false
fi
echo 'Copying files from '$SRC' to '$DST
# remove existing headers for a clean start
rm -rf $DST
mkdir -p $DST/tensorflow
cp -R $SRC/tensorflow/core $DST/tensorflow
cp -R $SRC/tensorflow/cc $DST/tensorflow
mkdir -p $DST/third_party
cp -R $SRC/third_party/eigen3 $DST/third_party
cp -R $SRC/bazel-genfiles/tensorflow/cc $DST/tensorflow
cp -R $SRC/bazel-genfiles/tensorflow/core $DST/tensorflow
mkdir -p $DST/external/eigen_archive
cp -R $SRC/bazel-tensorflow/external/eigen_archive/eigen-eigen* $DST/external/eigen_archive
cp -R $SRC/google/protobuf/src/google/ $DST/google
if $DO_CLEANUP ; then
echo "deleting files src files from "$DST
find $DST -name '*.cpp' -type f -delete
find $DST -name '*.c' -type f -delete
find $DST -name '*.cc' -type f -delete
find $DST -name '*.cxx' -type f -delete
find $DST -name '*.cmake' -type f -delete
find $DST -name '*.py' -type f -delete
find $DST -name '*.txt' -type f -delete
find $DST -name '*.dat' -type f -delete
find $DST -name '*.sh' -type f -delete
find $DST -name '*.proto' -type f -delete
fi
| true |
2376937f1b0dbfac1b26abe0457651316e585f02
|
Shell
|
emzfuu/project
|
/perks/bootstrap.sh
|
UTF-8
| 5,468 | 3.25 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# Download and Install the Latest Updates for the OS
sudo echo "Updating Server to Latest patches"
sudo apt-get update && sudo apt-get upgrade -y
##############################################################################################
## Update Certificate ##
##############################################################################################
sudo echo "updating ssl certificate"
sudo cp /vagrantubuntu/*.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
##############################################################################################
## DATABASE INSTALLATION ##
##############################################################################################
# Set the Server Timezone to CST
sudo echo "Installing MySQL"
sudo echo "America/Chicago" > /etc/timezone
sudo dpkg-reconfigure -f noninteractive tzdata
# Install MySQL Server in a Non-Interactive mode. Default root password will be "root"
sudo echo "mysql-server-5.7 mysql-server/root_password password 123456789" | sudo debconf-set-selections
sudo echo "mysql-server-5.7 mysql-server/root_password_again password 123456789" | sudo debconf-set-selections
sudo apt-get -y install mysql-server-5.7
# Run the MySQL Secure Installation wizard
sudo chmod 777 /vagrantubuntu/secure.sh
sudo /vagrantubuntu/secure.sh
sudo sed -i 's/127\.0\.0\.1/0\.0\.0\.0/g' /etc/mysql/my.cnf
mysql -uroot -p123456789 -e 'USE mysql; UPDATE `user` SET `Host`="%" WHERE `User`="root" AND `Host`="localhost"; DELETE FROM `user` WHERE `Host` != "%" AND `User`="root"; FLUSH PRIVILEGES;'
sudo service mysql restart
##############################################################################################
## APACHE and PHP-FPM INSTALLATION ##
##############################################################################################
sudo echo "Installing APACHE and PHP"
sudo add-apt-repository ppa:ondrej/php -y
sudo apt-get update -y
sudo apt-get install php7.1 -y
sudo add-apt-repository ppa:ondrej/pkg-gearman -y
sudo apt-get update -y
sudo apt-get install -y php7.1-gearman php7.1-opcache php7.1-mysql php7.1-mbstring php7.1-mcrypt php7.1-zip php7.1-fpm php7.1-memcache php7.1-gd php7.1-curl php7.1-zip
sudo apt-get install -y php7.1-mbstring php7.1-memcache php7.1-memcached php7.1-pdo_mysql
sudo apt-get install -y php7.1-dom
sudo apt-get install -y php7.1-memcache php7.1-memcached
sudo apt-cache pkgnames | grep php7.1
echo "Start apache service"
sudo a2enmod ssl
sudo service apache2 restart
echo "ServerName localhost" | sudo tee /etc/apache2/conf-available/fqdn.conf && sudo a2enconf fqdn
sudo a2enmod rewrite
sudo a2enmod ssl
if [ -f /etc/php/7.1/apache2/php.ini ]; then
sudo sed -i "s/error_reporting = .*/error_reporting = E_ALL/" /etc/php/7.1/apache2/php.ini
sudo sed -i "s/display_errors = .*/display_errors = On/" /etc/php/7.1/apache2/php.ini
fi
if [ ! -d /etc/apache2/ssl ]; then
sudo mkdir /etc/apache2/ssl
sudo openssl req -batch -x509 -nodes -days 365 -newkey rsa:2048 -keyout /etc/apache2/ssl/apache.key -out /etc/apache2/ssl/apache.crt
fi
sudo echo "Restarting apache"
sudo service apache2 restart
cd ~
curl -O https://bootstrap.pypa.io/get-pip.py
python get-pip.py --user
echo "export PATH=~/.local/bin:$PATH" >> ~/.bash_profile
source ~/.bash_profile
echo "Install phpunit"
cd ~
sudo wget https://phar.phpunit.de/phpunit.phar
sudo chmod +x phpunit.phar
sudo mv phpunit.phar /usr/local/bin/phpunit
sudo locale-gen UTF-8
sudo a2dismod php5
sudo a2enmod php7.0
sudo echo "transfering .html and .php file"
sudo cp /vagrantubuntu/index.html /var/www/html/
sudo cp /vagrantubuntu/index.php /var/www/html/
sudo service apache2 restart
sudo bash /srv/www/deploy.sh
sudo update-rc.d apache2 enable
##############################################################################################
## PHP COMPOSER SETUP AND INSTALLATION ##
##############################################################################################
echo "Checking composer..."
if [ ! -f /usr/local/bin/composer ]; then
sudo curl -sS https://getcomposer.org/installer | php
sudo mv ./composer.phar /usr/local/bin/composer
fi
##############################################################################################
## POPULATE DATABASE SCHEMA AND DATA TO MYSQL ##
##############################################################################################
# Download database schema
echo "Downloading a copy of the database (this may take a while)..."
cd ~
sudo cp /vagrantubuntu/*.sql ~
mysql -u root -p123456789 -e 'CREATE DATABASE test'
mysql -u root -p123456789 test < schema.sql
mysql -u root -p123456789 test < data.sql
echo "--- All done! ---"
echo "Add 127.0.0.1 testsite.samsung.local to your hosts file, and then open https://testsite.samsung.local in your browser."
echo ""
##############################################################################################
## Erase all file in perks ##
##############################################################################################
sudo rm -rf /vagrantubuntu/
| true |
fe444d82656a2f5826daf886b121651ddda33a8a
|
Shell
|
horttanainen/goto
|
/config.sh
|
UTF-8
| 1,023 | 3.484375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
#This content is released under the (https://github.com/horttanainen/goto/blob/master/LICENSE) MIT License.
dir=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd)
modify="$dir/goto.sh"
tmpfile="$dir/tmpfile"
newpath="gotopath=\"$dir\""
number=$(grep -n gotopath $modify| cut -d":" -f 1| head -n 1)
sed "${number}d" "$modify" > $tmpfile; mv $tmpfile "$modify"
sed "${number}i $newpath" "$modify" > $tmpfile; mv $tmpfile "$modify"
oldvalue=$(grep \(\) $modify | cut -d"(" -f 1)
printf "give shortcut for goto (current: $oldvalue): "
IFS= read -r name
name=$(echo "$name" | tr -d '[:space:]')
if [ -z $(echo $name | tr -d "[:alpha:]") ]; then
if [ -z $name ]; then
:
else
newname="${name}\(\) \{"
number=$(grep -n \(\) $modify| cut -d":" -f 1| head -n 1)
sed "${number}d" "$modify" > $tmpfile; mv $tmpfile "$modify"
sed "${number}i $newname" "$modify" > $tmpfile; mv $tmpfile "$modify"
fi
else
printf "Only alphabetic characters allowed.\n"
exit 0
fi
| true |
7cb02b1f7bc43180b8b1d172a32167d692a93065
|
Shell
|
mrneo240/psptoolchain
|
/toolchain.sh
|
UTF-8
| 4,206 | 4.34375 | 4 |
[] |
no_license
|
#!/bin/sh
# toolchain.sh by Jakub Kaszycki <jakub@kaszycki.net.pl>
# This function is based on an old function, which enforced the number of
# jobs. Now the only purpose of this function is to provide a way to detect
# CPU number when the user wants to detect it.
num_cpus ()
{
if command -v getconf >/dev/null 2>&1
then
if getconf _NPROCESSORS_ONLN >/dev/null 2>&1
then
getconf _NPROCESSORS_ONLN
return 0
fi
fi
cat >&2 <<_EOF_
Warning: could not detect number of CPUs, assuming 1 job at a time
_EOF_
echo 1
}
# Parse the command line options.
short_usage ()
{
echo "Usage: $0 [options...] [stages...]"
}
error_usage ()
{
short_usage
echo "Try $0 -h for more information."
}
usage ()
{
short_usage
cat <<_EOF_
Builds the PSP toolchain.
Options:
-d<DIRECTORY> Set <DIRECTORY> as the output directory.
This option is required.
-h Print this help text.
-j<JOBS> Run <JOBS> jobs at the same time. If the argument
"auto" is given, chooses a number depending on the
number of available CPUs. If this option is not given,
the script runs only one job at a time (takes
a lot of time with tasks like building GCC, but is
safer, more stable and much more deterministic).
-s<DIRECTORY> Set <DIRECTORY> as the source directory.
The default is current directory.
If no stages are passed, all stages are run. Otherwise, only the selected
stages are run with the order as passed on the command line.
Stages may be passed as names, numbers or script names.
_EOF_
}
PSPDEV=
JOBS=1
SOURCE=.
while getopts '+:d:hj:s:' opt
do
case "$opt" in
d)
PSPDEV="$OPTARG"
;;
h)
usage
exit 0
;;
j)
JOBS="$OPTARG"
;;
s)
SOURCE="$OPTARG"
;;
\?)
echo "Unknown option: -$OPTOPT"
error_usage >&2
exit 2
;;
:)
echo "Missing argument to option -$OPTOPT" >&2
error_usage >&2
exit 2
;;
*)
echo 'Internal error' >&2
exit 99
esac
done
shift $((OPTIND-1))
if [ -z "$PSPDEV" ]
then
echo "Missing required option -d"
exit 2
fi
if [ "x$JOBS" = xauto ]
then
JOBS="$(num_cpus)"
fi
PSPDEV="$(realpath "$PSPDEV")"
SOURCE="$(realpath "$SOURCE")"
PSPDEV_TMPDIR="$(mktemp -dt pspdev-tmp-XXXXXX)"
cleanup ()
{
rm -rf "$PSPDEV_TMPDIR"
}
trap cleanup EXIT
export JOBS
export PSPDEV
export PSPDEV_TMPDIR
PATH="$PSPDEV/bin:$PATH"
export PATH
# Usage: run_script SCRIPT TYPE
run_script ()
{
SCRIPT="$1"
echo "Running $2 script: $(basename "$SCRIPT")"
"$SCRIPT"
X=$?
if ! [ "$X" -eq 0 ]
then
echo "Script $(basename "$SCRIPT") failed with error $X"
exit 1
fi
}
# Usage: run_scripts DIR TYPE
run_scripts ()
{
echo "Running $2 scripts"
IFS_backup="$IFS"
IFS='
'
for SCRIPT in $(find "$1" -name '*.sh' | sort)
do
run_script "$SCRIPT" "$2"
done
IFS="$IFS_backup"
unset IFS_backup
}
## Enter the psptoolchain directory.
cd "$SOURCE" || { echo "ERROR: Could not enter the psptoolchain directory."; exit 1; }
## Create the build directory.
mkdir -p build || { echo "ERROR: Could not create the build directory."; exit 1; }
## Enter the build directory.
cd build || { echo "ERROR: Could not enter the build directory."; exit 1; }
run_scripts ../depends dependency
get_script_number ()
{
NUM="$1"
NUM=$((10#$NUM))
printf '%03d' "$NUM"
unset NUM
}
have_script_number ()
{
# First, check it is a number
if ! [ "$(printf "%s" "$1" | tr -d '0-9' | wc -c)" -eq 0 ]
then
return 1
fi
NUM="$(get_script_number "$1")"
[ "$(find ../scripts -name "$NUM-*.sh" | wc -l)" -eq 1 ]
return $?
}
have_script_name ()
{
[ "$(find ../scripts -name "[0-9][0-9][0-9]-$1.sh" | wc -l)" -eq 1 ]
return $?
}
if [ "$#" -eq 0 ]
then
run_scripts ../scripts build
else
for SCRIPT
do
if echo "$SCRIPT" | grep -F '/' >/dev/null 2>&1
then
# Plain file path.
run_script "$(cd .. && realpath "$SCRIPT")" build
elif [ -e "../scripts/$SCRIPT" ]
then
# Script file name
run_script "../scripts/$SCRIPT" build
elif have_script_number "$SCRIPT"
then
# Script number
run_script "../scripts/$(get_script_number "$SCRIPT")-"*".sh" build
elif have_script_name "$SCRIPT"
then
# Script name
run_script "../scripts/"*"-$SCRIPT.sh" build
else
echo "Unknown script: $SCRIPT" >&2
exit 1
fi
done
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.