blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
1489aaaa2f716f70f222f2e26197e0fa907caed7
|
Shell
|
bpoulliot/hbcli-batchenc
|
/hbcli-batchenc-anonymized.sh
|
UTF-8
| 3,699 | 3.78125 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
################DOCUMENTATION ONLY###################
# PURPOSE: Automate ripping of disc media via HandbrakeCLI
# USAGE: Automatic Setup: ./hbcli-batchenc.sh
# Manual Setup: ./hbcli-batchenc.sh <preset> <title>
# INPUT: Automatic Setup: NONE
# Manual Setup: Handbrake preset and/or disc title
#
# OUTPUT: Automatic Setup: MP4 file using preset at /outDir/title-#.mp4
# Manual Setup: MP4 file using specified preset at /path/
#
# READ: Lines with !AUTOMATIC should be commented out for manual use
# Lines with !MANUAL must be uncommented and have values
# Lines with !REMOTE should be commented out for local use
# If using AUTOMATIC, use a staging area for media and organize after rip
#
# WIP: Update for using on server (no remote)
# Figure out how to auto-start on Ubuntu Server 20.04
# Any way to connect to a DB for naming search?
# Add flag for remote/local
########################################################################
########################### CHANGE THESE ###############################
########################################################################
# comment out if not using rip local --> store remote
user_name=<remote_host_username>
remote_host=<remote_host_ip_or_name>
# need path to Handbrake presets (can be anywhere)
hbDir=</path/to/handbrake/presets/>
# where is media stored e.g., /mnt/dvd-storage/
baseDir=</path/to/media/>
# what are your prefix names e.g., plex-DVD-[media_type]
# name all presets similar or use single preset
preset_prefix=<preset-naming-convention->
# set minimum title duration from disc to process (integer)
min_dur=<minimum_duration>
# !AUTOMATIC
# output folder for automatic storage
media=<media_store_folder_name>
# !MANUAL
# media is preset name AND media directory
# title is manual disc title input (useful for plex)
#media="$1"
#title="$2"
########################################################################
########################### DO NOT CHANGE ##############################
########################################################################
# !REMOTE
# scp to a remote destination (can comment out in loop rather than here)
# current usage is rip on different computer than server, will be updated
function scpubu () {
folder=$(printf %q "$3")
destDir="$2"/"${folder}"/
ssh "${user_name}"@"${remote_host}" mkdir -p "${destDir}"
scp "$1" "${user_name}"@"${remote_host}":"${destDir}"
}
# grab disc title for auto-naming purposes
title=$(diskutil info <DISC_DRIVE> | sed -n -e 's/^.*Volume Name: //p' | xargs)
# current usage is media type for folder name (movies vs. tv_shows)
dirSlug="${media}/${title}"
# e.g., plex-DVD-movies.json OR plex-DVD-storage.json
preset="${preset_prefix}${media}"
presetDir="${hbDir}${preset}.json"
# usage is for mounted media storage (/mnt/name/[movies OR tv_shows]/[disc_title])
outDir="${baseDir}${dirSlug}/"
# make HandbrakeCLI error for each title not 0, store errors in variable
rawout=$(handbrakeCLI -i <DISC_DRIVE> --min-duration=0 -t 0 2>&1 >/dev/null)
# count the error lines for total title count
count=$(echo $rawout | grep -Eao "\\+ title [0-9]+:" | wc -l)
# make output directory and any missing parents
mkdir -p "${outDir}"
# iterate through titles
for i in $(seq $count); do
episode="${outDir}${title}-$i.mp4"
handbrakeCLI \
--preset-import-file "${presetDir}" \
--preset ${preset} \
-i <DISC_DRIVE> -t $i --min-duration="${min_dur}" -o "${episode}"
# !REMOTE (comment out next 2 lines)
scpubu "${episode}" "${media}" "${title}"
rm "${episode}"
done
rm -r "${outDir}"
drutil eject <DISK_DRIVE>
| true |
25d732ffba6c7faa56af238591f14185580afaae
|
Shell
|
satyamisme/DirtyCow-R1_HD
|
/one-click-root.sh
|
UTF-8
| 8,727 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/sh
echo --------------------------------------------------------------------------------------------
echo THERE ARE 10 PAUSES IS THIS SCRIPT SO LOOK FOR PROMTS FOR YOU TO HIT ENTER
echo --------------------------------------------------------------------------------------------
echo [*] BEFORE WE BEGIN THE SCRIPT WILL RUN "ADB DEVICES" AND SEE IF YOU HAVE DRIVERS INSTLLED
echo [*] THE NEEDED RESPONSE IS SIMILAR TO BELOW
echo [*]
echo [*] List of devices attached
echo "[*] **************** device"
echo [*]
echo [*] INSTEAD OF STARS IT WILL BE YOUR SERIAL NUMBER
echo [*] IF NO DEVICE LISTED YOU ARE NOT READY TO RUN THIS SCRIPT. CLOSE THIS WINDOW NOW IF NOT READY
echo [*]
echo [*] IF DEVICE IS LISTED PRESS ANY KEY ON COMPUTER TO START
echo [*]
adb wait-for-device
adb devices
echo -n "to continue press [enter]: "
read start
#clear
echo [*] copying dirtycow to /data/local/tmp/dirtycow
adb push pushed/dirtycow /data/local/tmp/dirtycow
sleep 10
echo [*] copying recowvery-app_process32 to /data/local/tmp/recowvery-app_process32
adb push pushed/recowvery-app_process32 /data/local/tmp/recowvery-app_process32
sleep 10
echo [*] copying frp.bin to /data/local/tmp/unlock
adb push pushed/frp.bin /data/local/tmp/unlock
sleep 10
echo [*] copying busybox to /data/local/tmp/busybox
adb push pushed/busybox /data/local/tmp/busybox
sleep 10
echo [*] copying cp_comands.txt to /data/local/tmp/cp_comands.txt
adb push pushed/cp_comands.txt /data/local/tmp/cp_comands.txt
sleep 10
echo [*] copying dd_comands.txt to /data/local/tmp/dd_comands.txt
adb push pushed/dd_comands.txt /data/local/tmp/dd_comands.txt
sleep 10
echo [*] changing permissions on copied files
adb shell chmod 0777 /data/local/tmp/*
sleep 10
#clear
echo --------------------------------------------------------------------------------------------
echo [*] DONE PUSHING FILES TO PHONE. NOW WE ARE GOING TO TEMP WRITE OVER THE APP_PROCESS
echo [*] WITH A MODIFIED VERSION THAT HAS lsh IN IT USING A SYSTEM-SERVER AS ROOT SHELL
echo [*] THIS STEP WILL CAUSE PHONE TO DO A SOFT REBOOT AND WILL NOT RESPOND TO BUTTON PUSHES
echo [*]
adb shell /data/local/tmp/dirtycow /system/bin/app_process32 /data/local/tmp/recowvery-app_process32
echo --------------------------------------------------------------------------------------------
#clear
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo [*]WAITING 60 SECONDS FOR ROOT SHELL TO SPAWN
echo [*] WHILE APP_PROCESS IS REPLACED PHONE WILL APPEAR TO BE UNRESPONSIVE BUT SHELL IS WORKING
sleep 60
echo --------------------------------------------------------------------------------------------
echo [*] OPENING A ROOT SHELL ON THE NEWLY CREATED SYSTEM_SERVER
echo [*] MAKING A DIRECTORY ON PHONE TO COPY FRP PARTION TO
echo [*] CHANGING PERMISSIONS ON NEW DIRECTORY
echo [*] COPYING FPR PARTION TO NEW DIRECTORY AS ROOT
echo [*] CHANGING PERMISSIONS ON COPIED FRP
adb shell "/data/local/tmp/busybox nc localhost 11112 < /data/local/tmp/cp_comands.txt"
#clear
echo "[*] COPY UNLOCK.IMG OVER TOP OF COPIED FRP IN /data/local/test NOT AS ROOT WITH DIRTYCOW"
echo [*]
adb shell /data/local/tmp/dirtycow /data/local/test/frp /data/local/tmp/unlock
sleep 5
#clear
echo [*] WAITING 5 SECONDS BEFORE WRITING FRP TO EMMC
sleep 5
echo "[*] DD COPY THE NEW (UNLOCK.IMG) FROM /data/local/test/frp TO PARTITION mmcblk0p17"
adb shell "/data/local/tmp/busybox nc localhost 11112 < /data/local/tmp/dd_comands.txt"
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo -----------REBOOTING_INTO_BOOTLOADER--------------------------------------------------------
adb reboot bootloader
#clear
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo [*] YOUR PHONE SCREEN SHOULD BE BLACK WITH THE WORD "=>FASTBOOT mode..." IN LOWER CORNER
echo [*] JUST LIKE IN THE BEGINING WE NEED TO VERIFY YOU HAVE DRIVERS ON PC FOR THE NEXT STEP
echo [*] THE RESPONSE SHOULD BE
echo [*]
echo "[*] *************** fastboot"
echo [*]
echo [*] THE STARS WILL BE YOUR SERIAL NUMBER
echo [*] IF THE RESPONSE IS THIS THEN HIT ANY BUTTON ON PC TO CONTINUE
echo [*]
echo [*] IF RESPONSE IS A BLANK LINE YOU DO NOT HAVE DRIVER NEEDED TO CONTINUE. CLOSE THIS WINDOW
echo [*] AND GET FASTBOOT DRIVERS THEN EITHER RUN "fastboot oem unlock" IN TERMINAL
fastboot devices
echo -n "to continue press [enter]: "
read start
#clear
echo [*] NOW THAT THE DEVICE IS IN FASTBOOT MODE WE ARE GOING TO UNLOCK THE
echo [*] BOOTLOADER. ON THE NEXT SCREEN ON YOUR PHONE YOU WILL SEE
echo [*] PRESS THE VOLUME UP/DOWN BUTTONS TO SELECT YES OR NO
echo [*] JUST PRESS VOLUME UP TO START THE UNLOCK PROCESS.
echo --------------------------------------------------------------------------------------------
echo -------------------------------------------------------------------------------------------
echo [*] PRESS ENTER ON COMPUTER TO START THE UNLOCK
echo -n "to continue press [enter]: "
read start
fastboot oem unlock
#clear
echo [*] ONCE THE BOOTLOADER IS UNLOCKED PRESS ENTER TO WIPE DATA
echo -n "to continue press [enter]: "
read start
fastboot format userdata
#clear
echo [*] PRESS ENTER TO REBOOT THE DEVICE
echo -n "to continue press [enter]: "
read start
fastboot reboot
#clear
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo [*] YOUR BOOTLOADER IS NOW UNLOCKED ON YOUR BLU R1 HD AMAZON DEVICE
echo [*] FIRST BOOT UP WILL TAKE AROUND 5 TO 10 MINUTES THEN YOU CAN SET IT UP
echo [*] NEXT IS TO INSTALL RECOVERY TWRP
echo [*]
echo [*]
echo [*] YOU WILL NEED TO ENBLE DEVELOPERS OPTION, THEN ENABLE ADB TO CONTINUE NEXT SCRIPT
echo "[*] ******************"
echo [*] IF PHONE DID NOT REBOOT HOLD POWER UNTILL IT POWERS OFF THEN AGAIN TO POWER ON
echo "[*] ******************"
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo [*] PRESS ENTER TO INSTALL TWRP AFTER YOU ENABLE DEVELOPER OPTIONS ON PHONE
echo [*] OR CTRL+C TO STOP HERE
echo -n "to continue press [enter]: "
read start
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo -----------REBOOTING_INTO_BOOTLOADER--------------------------------------------------------
adb reboot bootloader
#clear
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo [*] NOW YOUR IN FASTBOOT MODE AND READY TO FLASH TWRP RECOVERY
echo [*]
echo [*]
echo [*]
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo [*] PRESS ENTER TO FLASH RECOVERY
echo -n "to continue press [enter]: "
read start
fastboot flash recovery pushed/recovery.img
echo [*] ONCE THE FILE TRANSFER IS COMPLETE HOLD VOLUME UP AND PRESS ENTER ON PC
echo [*]
echo [*] IF PHONE DOES NOT REBOOT THEN HOLD VOLUME UP AND POWER UNTILL IT DOES
echo -n "to continue press [enter]: "
read start
fastboot reboot
echo [*] ON PHONE SELECT RECOVERY FROM BOOT MENU WITH VOLUME KEY THEN SELECT WITH POWER
echo [*] PRESS ENTER ON PC FOR MORE NOTES
echo -n "to continue press [enter]: "
read start
#clear
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo [*] NOW YOU BOOTED TO RECOVERY CONTINUE AND MAKE A BACKUP IF YOU WANT
echo [*] YOU CAN JUST CONTINUE AS IS FROM HERE OR FLASH THE OLD PRELOADER FILE WITH
echo [*] RECOVERY. THERE ARE MORE STEPS NOT INCLUDED HERE IF YOU WANT TO DO THAT.
echo [*]
echo --------------------------------------------------------------------------------------------
echo --------------------------------------------------------------------------------------------
echo [*] PRESS ENTER TO FINISH THIS SCRIPT.
echo -n "to continue press [enter]: "
read start
exit
| true |
56c183a4a2702ad2ff52788744117b8266e3a871
|
Shell
|
wonderley/configs
|
/scripts/nth
|
UTF-8
| 153 | 3.234375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
lineNum=$1
if ! echo "$lineNum" |
grep -qE '^-[0-9]+$'; then
echo "provide a line number"
exit 1
fi
shift
head "$lineNum" "$@" | tail -1
| true |
06e15a32c90a13366c3ec27e805ea48404c2a32d
|
Shell
|
leohuang4977/tvb-ukbb
|
/bb_diffusion_pipeline/bb_probtrackx2/bb_post_probtrackx2
|
UTF-8
| 1,185 | 3.640625 | 4 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#### bb_post_probtrackx2
#
# concatenate parallelized probtrackx outputs
# and clean up probtrackx log files
#
. $BB_BIN_DIR/bb_pipeline_tools/bb_set_header
set +e
subjdir=`fsl_abspath $1`
subjdir=`echo ${subjdir} | sed 's/\/$/$/g'`
echo subjectdir is $subjdir
python $BB_BIN_DIR/bb_diffusion_pipeline/bb_probtrackx2/tvb_concat_probtrackx2.py $subjdir
# move probtrackx logs to subject log directory
mv bb_probtrackx_${1}.e* $subjdir/logs/.
mv bb_probtrackx*_${1}.o* $subjdir/logs/.
#generate tvb input zip
mkdir $subjdir/tvb_inputs
cp $subjdir/dMRI/sc.txt $subjdir/dMRI/distance.txt $subjdir/T2_FLAIR/lesions/volume.txt $subjdir/tvb_inputs
array=()
while IFS= read -r -d $'\0'; do
array+=("$REPLY")
done < <(find $subjdir/fMRI -maxdepth 1 -type d -name "*.ica" -print0)
#for each .ica file
for t in ${array[@]}; do
mkdir $subjdir/tvb_inputs/`basename $t`
cp $t/fc.txt $t/ts.txt $subjdir/tvb_inputs/`basename $t`
done
subj=$1
if [[ "$subj" =~ '/'$ ]]; then
subj=${subj%?}
fi
subj=`basename $subj`
cd $subjdir && zip -r ./${subj}_tvb_inputs.zip ./tvb_inputs && cd -
rm -rf $subjdir/tvb_inputs
set -e
. $BB_BIN_DIR/bb_pipeline_tools/bb_set_footer
| true |
a349ad627d1063914407847f84bfc4f8063a2db0
|
Shell
|
sequenceiq/docker-hoya
|
/hoya-centos-install.sh
|
UTF-8
| 7,982 | 3.171875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#Install script for Hadoop 2.3 on CentOS 6.5.3/x86_64
#run as root (sudo su -)
# install packages
yum install -y curl which tar sudo openssh-server openssh-clients rsync
# passwordless ssh
ssh-keygen -q -N "" -t dsa -f /etc/ssh/ssh_host_dsa_key
ssh-keygen -q -N "" -t rsa -f /etc/ssh/ssh_host_rsa_key
ssh-keygen -q -N "" -t rsa -f /root/.ssh/id_rsa
cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
service sshd start
# install java
curl -LO 'http://download.oracle.com/otn-pub/java/jdk/7u51-b13/jdk-7u51-linux-x64.rpm' -H 'Cookie: oraclelicense=accept-securebackup-cookie'
rpm -i jdk-7u51-linux-x64.rpm
rm jdk-7u51-linux-x64.rpm
export JAVA_HOME=/usr/java/default
export PATH=$PATH:$JAVA_HOME/bin:/usr/local/bin
# install hadoop
curl -s http://www.eu.apache.org/dist/hadoop/common/hadoop-2.3.0/hadoop-2.3.0.tar.gz | tar -xz -C /usr/local/
cd /usr/local && ln -s hadoop-2.3.0 hadoop
export HADOOP_PREFIX=/usr/local/hadoop
sed -i '/^export JAVA_HOME/ s:.*:export JAVA_HOME=/usr/java/default\nexport HADOOP_PREFIX=/usr/local/hadoop\nexport HADOOP_HOME=/usr/local/hadoop\n:' $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
sed -i '/^export HADOOP_CONF_DIR/ s:.*:export HADOOP_CONF_DIR=/usr/local/hadoop/etc/hadoop/:' $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
. $HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
. $HADOOP_PREFIX/etc/hadoop/yarn-env.sh
mkdir $HADOOP_PREFIX/input
cp $HADOOP_PREFIX/etc/hadoop/*.xml $HADOOP_PREFIX/input
# Standalone Operation
# testing with mapred sample
#bin/hadoop jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.3.0.jar grep input output 'dfs[a-z.]+'
# pseudo distributed
cat > $HADOOP_PREFIX/etc/hadoop/core-site.xml<<EOF
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://localhost:9000</value>
</property>
</configuration>
EOF
cat > $HADOOP_PREFIX/etc/hadoop/hdfs-site.xml<<EOF
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
EOF
cat > $HADOOP_PREFIX/etc/hadoop/mapred-site.xml<<EOF
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
EOF
cat > $HADOOP_PREFIX/etc/hadoop/yarn-site.xml<<EOF
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
</configuration>
EOF
#set the hostname - fix vagrant issue
ping $HOSTNAME -c 1 -W 1 || echo "127.0.0.1 $HOSTNAME" >>/etc/hosts
# format HDFS namenode
$HADOOP_PREFIX/bin/hdfs namenode -format
#build hadoop dependencies function - devtools, maven, protobuf
build-hadoop-dependencies() {
# fixing the libhadoop.so issue the hard way ...
# do it if you have a couple of spare videos to watch
yum groupinstall "Development Tools" -y
yum install -y cmake zlib-devel openssl-devel
# maven
curl http://www.eu.apache.org/dist/maven/maven-3/3.2.1/binaries/apache-maven-3.2.1-bin.tar.gz|tar xz -C /usr/share
export M2_HOME=/usr/share/apache-maven-3.2.1
export PATH=$PATH:$M2_HOME/bin
# ohhh btw you need protobuf - released rpm is 2.3, we need 2.5 thus we need to build, will take a while, go get a coffee
curl https://protobuf.googlecode.com/files/protobuf-2.5.0.tar.bz2|bunzip2|tar -x -C /tmp
cd /tmp/protobuf-2.5.0
./configure && make && make install
export LD_LIBRARY_PATH=/usr/local/lib
export LD_RUN_PATH=/usr/local/lib
}
#we have released the native libs on bintray (the official release is 32 bit), use that instead of building
build-native-hadoop-libs() {
#coffee time again - this will take quite a long time
#build hadoop dependencies
build-hadoop-dependencies
# hadoop
curl http://www.eu.apache.org/dist/hadoop/common/hadoop-2.3.0/hadoop-2.3.0-src.tar.gz|tar xz -C /tmp
cd /tmp/hadoop-2.3.0-src/
mvn package -Pdist,native -DskipTests -Dtar -DskipTests
rm -rf /usr/local/hadoop/lib/native/*
cp -d /tmp/hadoop-2.3.0-src/hadoop-dist/target/hadoop-2.3.0/lib/native/* /usr/local/hadoop/lib/native/
}
# fixing the libhadoop.so - we have built a 64bit distro for Hadoop native libs
use-native-hadoop-libs() {
rm -rf /usr/local/hadoop/lib/native/*
curl -Ls http://dl.bintray.com/sequenceiq/sequenceiq-bin/hadoop-native-64.tar|tar -x -C /usr/local/hadoop/lib/native/
}
#use native libs - in case you'd like to build Hadoop use build-native-hadoop-libs instead
use-native-hadoop-libs
####################
# testing mapreduce
####################
$HADOOP_PREFIX/sbin/start-dfs.sh
$HADOOP_PREFIX/sbin/start-yarn.sh
$HADOOP_PREFIX/bin/hdfs dfs -mkdir -p /user/root
$HADOOP_PREFIX/bin/hdfs dfs -put $HADOOP_PREFIX/etc/hadoop/ input
$HADOOP_PREFIX/bin/hadoop jar $HADOOP_PREFIX/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.3.0.jar grep input output 'dfs[a-z.]+'
$HADOOP_PREFIX/bin/hdfs dfs -cat output/*
#optional function - don't needed unless you'd like to use the Flume provider
#if you don't use this get the Hoya code or at least the HBase-Hoya configuration as when starting a HBase cluster you need it for --appconf
build-hoya-with-flume-provider() {
#hoya build with flume
cd /tmp
curl -LO https://github.com/sequenceiq/hoya/archive/master.zip
yum install unzip
unzip master.zip
cd hoya-master
mvn clean install -DskipTests
}
#it's needed as to start a HBase cluster with Hoya you need an -appconf (HBase conf).
#we have a Hoya fork with an additional Flume provider - you can use the original Hoya code from GitHub - make sure you use the right path with --appconf when starting a HBase cluster
build-hoya-with-flume-provider
#download Hoya release 0.13
install-hoya(){}
cd /tmp
curl -s http://dffeaef8882d088c28ff-185c1feb8a981dddd593a05bb55b67aa.r18.cf1.rackcdn.com/hoya-0.13.1-all.tar.gz | tar -xz -C /usr/local/
cd /usr/local
ln -s hoya-0.13.1 hoya
export HOYA_HOME=/usr/local/hoya
export PATH=$PATH:$HOYA_HOME/bin
}
#install Hoya
install-hoya
#download HBase and copy to HDFS for Hoya
get-hbase-copy-to-hdfs(){
cd /tmp
curl -sLO http://www.eu.apache.org/dist/hbase/hbase-0.98.0/hbase-0.98.0-hadoop2-bin.tar.gz
$HADOOP_PREFIX/bin/hadoop dfs -put hbase-0.98.0-hadoop2-bin.tar.gz /hbase.tar.gz
}
#download HBase and copy to HDFS for Hoya
get-hbase-copy-to-hdfs
#download Zookeeper and start
install-start-zookeeper(){
cd /tmp
curl -s http://www.eu.apache.org/dist/zookeeper/zookeeper-3.3.6/zookeeper-3.3.6.tar.gz | tar -xz -C /usr/local/
cd /usr/local
ln -s zookeeper-3.3.6 zookeeper
export ZOO_HOME=/usr/local/zookeeper
export PATH=$PATH:$ZOO_HOME/bin
mv $ZOO_HOME/conf/zoo_sample.cfg $ZOO_HOME/conf/zoo.cfg
mkdir /tmp/zookeeper
$ZOO_HOME/bin/zkServer.sh start
}
#create a Hoya cluster
create-hoya-cluster() {
hoya create hbase --role master 1 --role worker 1 --manager localhost:8032 --filesystem hdfs://localhost:9000 --image hdfs://localhost:9000/hbase.tar.gz --appconf file:///tmp/hoya-master/hoya-core/src/main/resources/org/apache/hoya/providers/hbase/conf --zkhosts localhost
}
#destroy the cluster
destroy-hoya-cluster() {
hoya destroy hbase --manager localhost:8032 --filesystem hdfs://localhost:9000
}
flex-hoya-cluster() {
num_of_workers=$1
hoya flex hbase --role worker $num_of_workers --manager localhost:8032 --filesystem hdfs://localhost:9000
}
freeze-hoya-cluster() {
hoya freeze hbase --manager localhost:8032 --filesystem hdfs://localhost:9000
}
#now create your Hoya cluster. after jps you will see a running HoyaAppMaster, HMaster and HRegionServer
create-hoya-cluster
#flex up the number of HBase region servers to 4
#flex-hoya-cluster 4
#freeze the Hoya cluster
#freeze-hoya-cluster
#destroy the Hoya cluster
#destroy-hoya-cluster
#pull out ports 50070 and 8088 (namenode and resource manager) for your convenience.
#enter SSH command mode
#~C
#-L 8088:127.0.0.1 8088
:<<EOF
# restart
service sshd start
. /usr/local/hadoop/etc/hadoop/hadoop-env.sh
cd $HADOOP_HOME
sbin/start-dfs.sh
sbin/start-yarn.sh
EOF
| true |
db08b593dc7d4c141f41e0b850965a62e485b48c
|
Shell
|
DeGaido/opendomo
|
/src/odcommon/usr/local/bin/get_mem_free.sh
|
UTF-8
| 214 | 3.09375 | 3 |
[] |
no_license
|
#!/bin/sh
DATE=`date +%s`
INFO=`grep MemFree /proc/meminfo| sed 's/[^0-9]//g'`
MINFREE="1000"
if test "$INFO" -lt "$MINFREE"; then
/bin/logevent warning system "Free memory under [$MINFREE]"
fi
echo "$DATE $INFO"
| true |
5b35582ddd629a8c306d30ba457a0fc0aef197c2
|
Shell
|
zerocoolys/gt
|
/run.sh
|
UTF-8
| 1,818 | 3.765625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash -x
export MYSQL_PASS=123456
export MYSQL_RUN_DB=gt
export MYSQL_TEST_DB=gt_test
USAGE="Usage: run.sh [init|start|stop|test|clean|docker_start]"
if [ "$#" -ne 1 ]; then
echo $USAGE
exit 1
fi
case $1 in
init)
echo "pull image..."
docker pull mysql:5.6
echo "start container..."
docker run --name mysql-test -p 3306:3306 -e MYSQL_ROOT_PASSWORD=$MYSQL_PASS -d mysql:5.6
sleep 5
echo "drop database $MYSQL_RUN_DB"
docker exec -d mysql-test mysqladmin -uroot -p$MYSQL_PASS drop $MYSQL_RUN_DB
echo "create database $MYSQL_RUN_DB"
docker exec -d mysql-test mysqladmin -uroot -p$MYSQL_PASS create $MYSQL_RUN_DB
echo "drop database $MYSQL_TEST_DB"
docker exec -d mysql-test mysqladmin -uroot -p$MYSQL_PASS drop $MYSQL_TEST_DB
echo "create database $MYSQL_TEST_DB"
docker exec -d mysql-test mysqladmin -uroot -p$MYSQL_PASS create $MYSQL_TEST_DB
;;
start)
docker restart mysql-test
echo "start spring boot"
mvn clean spring-boot:run -Dspring.profiles.active=prod -Dspring.datasource.password=$MYSQL_PASS -Dspring.datasource.url=jdbc:mysql://127.0.0.1:3306/$MYSQL_RUN_DB
;;
stop)
echo "stopping docker container mysql-test"
docker stop mysql-test
echo "done."
;;
test)
docker restart mysql-test
mvn clean test -Dspring.profiles.active=prod -Dspring.datasource.password=$MYSQL_PASS -Dspring.datasource.url=jdbc:mysql://127.0.0.1:3306/$MYSQL_TEST_DB
;;
clean)
mvn clean
docker stop mysql-test
docker rm mysql-test
;;
docker_start)
docker start mysql-test
;;
*)
echo $USAGE
exit 1
;;
esac
exit 0
| true |
2d21a5eff7a96de853015749d9d5927c6a75212a
|
Shell
|
xanecs/cmake-nRF5x
|
/ci/scripts/common/consts.sh
|
UTF-8
| 1,598 | 2.765625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
source "${BASH_SOURCE%/*}/utils.sh"
# Global definitions
ROOT_LOCAL_DIR="${BASH_SOURCE%/*}/../../.."
ROOT_DIR="$(absolute $ROOT_LOCAL_DIR)"
BUILD_DIR="$ROOT_DIR/build"
CMAKE_DIR="$ROOT_DIR/cmake"
CI_DIR="$ROOT_DIR/ci"
SCRIPTS_DIR="$CI_DIR/scripts"
PYTHON_DIR="$SCRIPTS_DIR/python"
PYTHON_VENV_DIR="$PYTHON_DIR/.venv"
GENERATED_DIR="$CI_DIR/generated"
PATCHES_DIR="$CI_DIR/patches"
LIBRARIES_DIR="$CI_DIR/libraries"
LIBRARIES_TESTS_DIR="$CI_DIR/libraries_tests"
TEMPLATES_DIR="$CI_DIR/templates"
EXAMPLES_DIR="$CI_DIR/examples"
CUSTOM_EXAMPLES_DIR="$CI_DIR/examples_custom"
TOOLCHAINS_DIR="$CI_DIR/toolchains"
TOOLS_DIR="$CI_DIR/tools"
NRFJPROG_DIR="$TOOLS_DIR/nrf/nrfjprog/"
MERGEHEX_DIR="$TOOLS_DIR/nrf/mergehex/"
SDKS_DIR="$CI_DIR/sdks"
# Regexp
SD_REGEXP="^(s[1-9][0-9][0-9]|blank|mbr)$"
VARIANT_REGEXP="(pca[0-9]{5})_(s[0-9]{3})"
CMAKE_BUILD_TYPE_REGEXP="^(Debug|Release|RelWithDebInfo|MinSizeRel)$"
CMAKE_LOG_LEVEL_REGEXP="^(TRACE|DEBUG|VERBOSE|STATUS|NOTICE|WARNING|ERROR)$"
# Format
BUILD_SUMMARY_ENTRY_FORMAT="%-48s%-16s%-16s%-16s%-16s%-16s\n"
# Generated files
PYTHON_VENV_SCRIPT="$PYTHON_VENV_DIR/bin/activate"
GENERATED_EXAMPLES="$GENERATED_DIR/examples.json"
GENERATED_LIBRARIES="$GENERATED_DIR/libraries.json"
GENERATED_CMAKE_LIBRARIES="$GENERATED_DIR/cmake_libraries.json"
# Template files
LIBRARIES_CMAKE_TEMPLATE_FILE="$TEMPLATES_DIR/cmake_libraries.j2"
LIBRARIES_CMAKE_GROUPS_TEMPLATE_FILE="$TEMPLATES_DIR/cmake_groups.j2"
LIBRARIES_CMAKE_TESTS_TEMPLATE_FILE="$TEMPLATES_DIR/cmake_libraries_tests.j2"
EXAMPLE_CMAKE_TEMPLATE_FILE="$TEMPLATES_DIR/cmake_example.j2"
| true |
9c9c48f9ac03b0ebe3083cb9259cf16bc8517afc
|
Shell
|
Tigerlyly/Stevent
|
/setup/reset-server.sh
|
UTF-8
| 475 | 3.078125 | 3 |
[] |
no_license
|
#!/bin/bash
SCRIPT_LOC="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
. ${SCRIPT_LOC}/shared-vars.sh
${SCRIPT_LOC}/stop-server.sh || error "Could not stop server"
# Prepare configuration files
echo "eula=true" > ${DEPLOY_LOC}/eula.txt
cp -f ${SCRIPT_LOC}/${BASE_SERVER_PROPERTIES} ${DEPLOY_LOC}/server.properties || error "Could not write server.properties"
# Modify server properties if needed
# Delete world if present
rm -rf ${DEPLOY_LOC}/world
| true |
ab4e4f5eb5a41468ba494da408826849acc5a604
|
Shell
|
viaacode/viaa-derive
|
/viaaderive
|
UTF-8
| 18,903 | 3.9375 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# creates derivative video files
# requires ffmpeg compiled with libx264 (yuv 8-bit), libvpx, libopus, and ffprobe
# defaults, can be overwritten with options
FFMPEG_PATH="ffmpeg"
FFPROBE_PATH="ffprobe"
# default ffmpeg input options
INPUTOPTIONS+=(-nostdin)
INPUTOPTIONS+=(-vsync 0)
INPUTOPTIONS+=(-hide_banner)
LOGDIR="LOGS"
# usage
_usage(){
echo "VIAA derivative script"
echo
echo "$(basename "${0}") inputvideo1 [inputvideo2 ... ]"
echo
echo "-h, --help print this help message"
echo "-o, --outputs options lists separated by '+'"
echo " options include:"
echo " mpeg4"
echo " webm"
echo " framemd5"
echo " prores"
echo " ffv1"
echo " --ffmpegpath path to ffmpeg"
echo " --ffprobepath path to ffprobe"
echo " --inputoptions extra input options for ffmpeg"
echo " --outputoptions extra outputs options for ffmpeg (applies to all outputs)"
echo
echo " --ffv1 enable ffv1 output (same as '-o ffv1')"
echo " --ffv1path specify an output file for ffv1"
echo " --mpeg4 enable ffv1 output (same as '-o mpeg4')"
echo " --mpeg4path specify an output file for mpeg4"
echo " --prores enable ffv1 output (same as '-o prores')"
echo " --prorespath specify an output file for prores"
echo " --webm enable ffv1 output (same as '-o webm')"
echo " --webmpath specify an output file for webm"
echo " --framemd5 enable ffv1 output (same as '-o framemd5')"
echo " --framemd5path specify an output file for framemd5"
echo
echo "-y, --overwrite Overwrite output files by default,"
echo " same as --outputoptions '-y'"
echo "-r, --report Output a report per transcoding."
echo
echo "Note that setting a --*path implies that the corresponding output is enabled."
echo "Also note that if the output file already exists, then that output will be disabled."
echo
echo "Example:"
echo "$(basename "${0}") -o mpeg4+prores+webm+ffv1 FILE1 FILE2"
echo "would make those four outputs for each of the two inputs."
echo
exit
}
[ "${#}" = 0 ] && _usage
while true ; do
case "$1" in
-o | --outputs ) shift ; OUTPUT_OPTIONS="${1}" ; shift ;;
-h | --help ) shift ; _usage ;;
--ffmpegpath ) shift ; FFMPEG_PATH="${1}" ; shift ;;
--ffprobepath ) shift ; FFPROBE_PATH="${1}" ; shift ;;
--inputoptions ) shift ; INPUTOPTIONS+=(${1}) ; shift ;;
--outputoptions ) shift ; OUTPUTOPTIONS+=(${1}) ; shift ;;
-y | --overwrite ) shift ; OUTPUTOPTIONS+=(-y) ;;
-r | --report ) shift ; export FFREPORT="file=${LOGDIR}/$(basename "${0}")_%p_%t.txt" ; _mkdir2 "${LOGDIR}" ;;
--ffv1 ) shift ; SCREENING_FFV1_ENABLE=true ;;
--ffv1path ) shift ; SCREENING_FFV1_ENABLE=true ; SCREENING_FFV1_OUTPUT="${1}" ; shift ;;
--mpeg4 ) shift ; STREAMING_MPEG4_ENABLE=true ;;
--mpeg4path ) shift ; STREAMING_MPEG4_ENABLE=true ; STREAMING_MPEG4_OUTPUT="${1}" ; shift ;;
--prores ) shift ; SCREENING_PRORES_ENABLE=true ;;
--prorespath ) shift ; SCREENING_PRORES_ENABLE=true ; SCREENING_PRORES_OUTPUT="${1}" ; shift ;;
--webm ) shift ; STREAMING_WEBM_ENABLE=true ;;
--webmpath ) shift ; STREAMING_WEBM_ENABLE=true ; STREAMING_WEBM_OUTPUT="${1}" ; shift ;;
--framemd5 ) shift ; FRAMEMD5_ENABLE=true ;;
--framemd5path ) shift ; FRAMEMD5_ENABLE=true ; FRAMEMD5_OUTPUT="${1}" ; shift ;;
-* | --* ) echo "Not sure what $1 means." ; echo ; _usage ;;
* ) break ;;
esac
done
if [[ "+$OUTPUT_OPTIONS+" = *"+mpeg4+"* ]] ; then
STREAMING_MPEG4_ENABLE=true
elif [[ -z "${STREAMING_MPEG4_ENABLE+x}" ]] ; then
STREAMING_MPEG4_ENABLE=false
fi
if [[ "+$OUTPUT_OPTIONS+" = *"+webm+"* ]] ; then
STREAMING_WEBM_ENABLE=true
elif [[ -z "${STREAMING_WEBM_ENABLE+x}" ]] ; then
STREAMING_WEBM_ENABLE=false
fi
if [[ "+$OUTPUT_OPTIONS+" = *"+framemd5+"* ]] ; then
FRAMEMD5_ENABLE=true
elif [[ -z "${FRAMEMD5_ENABLE+x}" ]] ; then
FRAMEMD5_ENABLE=false
fi
if [[ "+$OUTPUT_OPTIONS+" = *"+prores+"* ]] ; then
SCREENING_PRORES_ENABLE=true
elif [[ -z "${SCREENING_PRORES_ENABLE+x}" ]] ; then
SCREENING_PRORES_ENABLE=false
fi
if [[ "+$OUTPUT_OPTIONS+" = *"+ffv1+"* ]] ; then
SCREENING_FFV1_ENABLE=true
elif [[ -z "${SCREENING_FFV1_ENABLE+x}" ]] ; then
SCREENING_FFV1_ENABLE=false
fi
_get_iso8601_c(){
date +%Y%m%d-%H%M%S
}
# _report function handles display of messages to operator
_report(){
local RED="$(tput setaf 1)" # Red - For Warnings
local GREEN="$(tput setaf 2)" # Green - For Declarations
local BLUE="$(tput setaf 4)" # Blue - For Questions
local NC="$(tput sgr0)" # No Color
local COLOR=""
local STARTMESSAGE=""
local ENDMESSAGE=""
local ECHOOPT=""
local LOG_MESSAGE=""
OPTIND=1
while getopts ":qdwstn" OPT; do
case "${OPT}" in
q) COLOR="${BLUE}" ;; # question mode, use color blue
d) COLOR="${GREEN}" ;; # declaration mode, use color green
w) COLOR="${RED}" ; LOG_MESSAGE="Y" ;; # warning mode, use color red
s) STARTMESSAGE+=([$(basename "${0}")] ) ;; # prepend scriptname to the message
t) STARTMESSAGE+=($(_get_iso8601) '- ' ) ;; # prepend timestamp to the message
n) ECHOOPT="-n" ;; # to avoid line breaks after echo
esac
done
shift $(( ${OPTIND} - 1 ))
MESSAGE="${1}"
echo "${ECHOOPT}" "${COLOR}${STARTMESSAGE[@]}${MESSAGE}${NC}"
}
_mkdir2(){
local DIR2MAKE=""
while [ "${*}" != "" ] ; do
DIR2MAKE="${1}"
if [ ! -d "${DIR2MAKE}" ] ; then
mkdir -p "${DIR2MAKE}"
if [ "${?}" -ne 0 ]; then
_report -wt "${0}: Can't create directory at ${DIR2MAKE}"
exit 1
fi
fi
shift
done
}
_unset_variables(){
unset VIDEOFILTERCHAIN
unset AUDIOFILTERCHAIN
unset CODEC_NAME
unset HEIGHT
unset AUDIO_INDEX_1
unset AUDIO_INDEX_2
unset AUDIO_CHANNELS
unset CHANNEL_LAYOUT
unset FFREPORT
unset OUTPUTS
}
# filters for constructing filterchains
_add_video_filter(){
OPTIND=1
unset ADDASPREFIX
while getopts ":p" OPT ; do
case "${OPT}" in
p) ADDASPREFIX=true ;;
esac
done
shift $(( ${OPTIND} - 1 ))
local FILTER2ADD="${1}"
if [[ -n "${FILTER2ADD}" ]] ; then
if [[ -n "${next_video_filter_prefix}" ]] ; then
FILTER2ADD="${next_video_filter_prefix}${FILTER2ADD}"
unset next_video_filter_prefix
fi
if [[ -z "${VIDEOFILTERCHAIN}" ]] ; then
VIDEOFILTERCHAIN="${FILTER2ADD}"
elif [[ "${ADDASPREFIX}" = true ]] ; then
if [[ "${FILTER2ADD: -1}" = ";" || "${FILTER2ADD: -1}" = "," ]] ; then
VIDEOFILTERCHAIN="${FILTER2ADD}${VIDEOFILTERCHAIN}"
else
VIDEOFILTERCHAIN="${FILTER2ADD},${VIDEOFILTERCHAIN}"
fi
else
if [[ "${VIDEOFILTERCHAIN: -1}" = ";" || "${VIDEOFILTERCHAIN: -1}" = "," ]] ; then
VIDEOFILTERCHAIN="${VIDEOFILTERCHAIN}${FILTER2ADD}"
else
VIDEOFILTERCHAIN="${VIDEOFILTERCHAIN},${FILTER2ADD}"
fi
fi
fi
}
_add_audio_filter(){
OPTIND=1
unset ADDASPREFIX
while getopts ":p" OPT ; do
case "${OPT}" in
p) ADDASPREFIX=true ;;
esac
done
shift $(( ${OPTIND} - 1 ))
local FILTER2ADD="${1}"
if [[ -n "${FILTER2ADD}" ]] ; then
if [[ -n "${next_audio_filter_prefix}" ]] ; then
FILTER2ADD="${next_audio_filter_prefix}${FILTER2ADD}"
unset next_audio_filter_prefix
fi
if [[ -z "${AUDIOFILTERCHAIN}" ]] ; then
AUDIOFILTERCHAIN="${FILTER2ADD}"
elif [[ "${ADDASPREFIX}" = true ]] ; then
if [[ "${FILTER2ADD: -1}" = ";" || "${FILTER2ADD: -1}" = "," ]] ; then
AUDIOFILTERCHAIN="${FILTER2ADD}${AUDIOFILTERCHAIN}"
else
AUDIOFILTERCHAIN="${FILTER2ADD},${AUDIOFILTERCHAIN}"
fi
else
if [[ "${AUDIOFILTERCHAIN: -1}" = ";" || "${AUDIOFILTERCHAIN: -1}" = "," ]] ; then
AUDIOFILTERCHAIN="${AUDIOFILTERCHAIN}${FILTER2ADD}"
else
AUDIOFILTERCHAIN="${AUDIOFILTERCHAIN},${FILTER2ADD}"
fi
fi
fi
}
_filter_to_middle_option(){
if [ -n "${VIDEOFILTERCHAIN}" -a -n "${AUDIOFILTERCHAIN}" ] ; then
FILTERCHAIN+=(-filter_complex ${VIDEOFILTERCHAIN}\;${AUDIOFILTERCHAIN})
elif [ -n "${VIDEOFILTERCHAIN}" ] ; then
FILTERCHAIN+=(-filter_complex ${VIDEOFILTERCHAIN})
elif [ -n "${AUDIOFILTERCHAIN}" ] ; then
FILTERCHAIN+=(-filter_complex ${AUDIOFILTERCHAIN})
fi
}
# _get_input info
_get_input_info(){
CODEC_NAME=$("${FFPROBE_PATH}" "${1}" -select_streams v:0 -show_entries stream=codec_name 2>/dev/null -of default=nk=1:nw=1)
HEIGHT=$("${FFPROBE_PATH}" "${1}" -select_streams v:0 -show_entries stream=height 2>/dev/null -of default=nk=1:nw=1)
VIDEOSTREAMCOUNT=$("${FFPROBE_PATH}" "${1}" -select_streams v -show_entries stream=index -of flat 2>/dev/null | awk 'END { print NR }')
AUDIOSTREAMCOUNT=$("${FFPROBE_PATH}" "${1}" -select_streams a -show_entries stream=index -of flat 2>/dev/null | awk 'END { print NR }')
}
_get_audiostreamcount(){
AUDIOSTREAMCOUNT=$(ffprobe "${1}" -select_streams a -show_entries stream=index -of flat 2>/dev/null | awk 'END { print NR }')
}
# _get_audio_mapping handles re-mixing audio to a single track for access
_get_audio_mapping(){
_get_audio_index(){
# get ffmpeg's index value of the first audio stream. Useful for do custom channel mappings.
AUDIO_INDEX_1=$("${FFPROBE_PATH}" "${1}" -show_streams -select_streams a:0 2>/dev/null | grep "^index=" | cut -d = -f 2)
AUDIO_INDEX_2=$("${FFPROBE_PATH}" "${1}" -show_streams -select_streams a:1 2>/dev/null | grep "^index=" | cut -d = -f 2)
}
_get_audio_channels(){
# get ffmpeg's channel count of the first audio stream.
AUDIO_CHANNELS=$("${FFPROBE_PATH}" "${1}" -show_streams -select_streams a 2>/dev/null | grep "^channels=" | cut -d = -f 2 | head -n 1)
}
_get_channel_layout(){
# get ffmpeg's channel count of the first audio stream.
CHANNEL_LAYOUT=$("${FFPROBE_PATH}" "${1}" -show_streams -select_streams a 2>/dev/null | grep "^channel_layout=" | cut -d = -f 2 | head -n 1)
}
_get_audio_index "${1}"
_get_audio_channels "${1}"
_get_channel_layout "${1}"
_has_first_two_tracks_mono "${1}"
if [[ "${MULTIMONO}" = true ]] ; then
_report -wt "The first two audio tracks are both mono. Considering track 1 for left and track 2 for right."
_add_audio_filter -p "[0:a:0][0:a:1]amerge=inputs=2"
_add_audio_filter "aformat=channel_layouts=stereo"
elif [[ "${AUDIO_CHANNELS}" -gt 2 && "${CHANNEL_LAYOUT}" = "4.0" ]] ; then
_report -wt "The first audio track has more than 2 channels. Considering channel 1 for left and channel 2 for right and ignoring the rest."
_add_audio_filter -p "[0:a:0]channelmap=0|1:stereo"
else
_add_audio_filter "aformat=channel_layouts=stereo"
fi
}
# STREAMING_MPEG4 settings
# output settings
STREAMING_MPEG4_EXTENSION="mp4"
STREAMING_MPEG4_SUFFIX=""
STREAMING_MPEG4_RELATIVE_PATH="STREAMING_MPEG4"
# video settings
STREAMING_MPEG4_OPTS+=(-c:v libx264)
STREAMING_MPEG4_OPTS+=(-profile:v high)
STREAMING_MPEG4_OPTS+=(-pix_fmt yuv420p)
STREAMING_MPEG4_OPTS+=(-crf 21) # increase to reduce quality and size
STREAMING_MPEG4_OPTS+=(-maxrate 2000k) # set maximum bitrate despite quality
# audio settings
STREAMING_MPEG4_AUDIO_OPTS+=(-c:a aac)
# container settings
STREAMING_MPEG4_OPTS+=(-f mp4)
STREAMING_MPEG4_OPTS+=(-movflags faststart)
STREAMING_MPEG4_AUDIO_OPTS+=(-map 0:a:0)
# STREAMING_WEBM settings
# output settings
STREAMING_WEBM_EXTENSION="webm"
STREAMING_WEBM_SUFFIX=""
STREAMING_WEBM_RELATIVE_PATH="STREAMING_WEBM"
# video settings
STREAMING_WEBM_OPTS+=(-c:v vp9)
STREAMING_WEBM_OPTS+=(-pix_fmt yuv420p)
STREAMING_WEBM_OPTS+=(-maxrate 2000k) # set maximum bitrate despite quality
# audio settings
STREAMING_WEBM_AUDIO_OPTS+=(-c:a opus)
# container settings
STREAMING_WEBM_OPTS+=(-f webm)
STREAMING_WEBM_AUDIO_OPTS+=(-map 0:a:0)
# Production IMX settings
# output settings
PRODUCTION_IMX_EXTENSION="mov"
PRODUCTION_IMX_SUFFIX=""
PRODUCTION_IMX_RELATIVE_PATH="PRODUCTION_IMX"
# video settings
PRODUCTION_IMX_OPTS+=(-r pal)
PRODUCTION_IMX_OPTS+=(-c:v mpeg2video)
PRODUCTION_IMX_OPTS+=(-intra_vlc 1)
PRODUCTION_IMX_OPTS+=(-non_linear_quant 1)
PRODUCTION_IMX_OPTS+=(-qmin 1)
PRODUCTION_IMX_OPTS+=(-dc 10)
PRODUCTION_IMX_OPTS+=(-lmin QP2LAMBDA)
PRODUCTION_IMX_OPTS+=(-ps 1)
PRODUCTION_IMX_OPTS+=(-flags +ildct+ilme)
PRODUCTION_IMX_OPTS+=(-pix_fmt yuv422p)
PRODUCTION_IMX_OPTS+=(-top 1)
PRODUCTION_IMX_OPTS+=(-mpv_flags strict_gop)
PRODUCTION_IMX_OPTS+=(-aspect 4:3)
PRODUCTION_IMX_OPTS+=(-qmax 12)
PRODUCTION_IMX_OPTS+=(-g 1)
PRODUCTION_IMX_OPTS+=(-intra)
PRODUCTION_IMX_OPTS+=(-b:v 50M)
PRODUCTION_IMX_OPTS+=(-maxrate:v 50M)
PRODUCTION_IMX_OPTS+=(-minrate:v 50M)
PRODUCTION_IMX_OPTS+=(-bufsize 2000000)
PRODUCTION_IMX_OPTS+=(-rc_init_occupancy 2000000)
PRODUCTION_IMX_OPTS+=(-rc_max_vbv_use 1)
PRODUCTION_IMX_OPTS+=(-rc_min_vbv_use 1)
PRODUCTION_IMX_OPTS+=(-rc_buf_aggressivity 0.25)
PRODUCTION_IMX_OPTS+=(-vbsf imxdump)
PRODUCTION_IMX_OPTS+=(-tag:v mx5p)
# audio settings
PRODUCTION_IMX_AUDIO_OPTS+=(-c:a pcm_s16le)
# container settings
PRODUCTION_IMX_OPTS+=(-f mov)
PRODUCTION_IMX_AUDIO_OPTS+=(-map 0:a)
# FRAMEMD5 settings
# output settings
FRAMEMD5_EXTENSION="md5"
FRAMEMD5_SUFFIX="_framemd5"
FRAMEMD5_RELATIVE_PATH="FRAMEMD5"
# container settings
FRAMEMD5_OPTS+=(-f framemd5)
# SCREENING_PRORES settings
# output settings
SCREENING_PRORES_EXTENSION="mov"
SCREENING_PRORES_SUFFIX=""
SCREENING_PRORES_RELATIVE_PATH="SCREENING_PRORES"
# video settings
SCREENING_PRORES_OPTS+=(-c:v prores_ks)
SCREENING_PRORES_OPTS+=(-profile:v lt) # verify profile?
SCREENING_PRORES_OPTS+=(-flags +ildct+ilme)
# audio settings
SCREENING_PRORES_AUDIO_OPTS+=(-c:a pcm_s16le)
# container settings
SCREENING_PRORES_OPTS+=(-f mov)
SCREENING_PRORES_AUDIO_OPTS+=(-map 0:a)
# SCREENING_FFV1 settings
# output settings
SCREENING_FFV1_EXTENSION="mkv"
SCREENING_FFV1_SUFFIX=""
SCREENING_FFV1_RELATIVE_PATH="SCREENING_FFV1"
# video settings
SCREENING_FFV1_OPTS+=(-c:v ffv1)
SCREENING_FFV1_OPTS+=(-level 3)
SCREENING_FFV1_OPTS+=(-g 1)
SCREENING_FFV1_OPTS+=(-slices 24)
SCREENING_FFV1_OPTS+=(-slicecrc 1)
# audio settings
SCREENING_FFV1_AUDIO_OPTS+=(-c:a flac)
# container settings
SCREENING_FFV1_OPTS+=(-f matroska)
SCREENING_FFV1_AUDIO_OPTS+=(-map 0:a)
while [ "${*}" != "" ] ; do
_unset_variables
INPUT="${1}"
shift
# get context about the input
# encoding options
_get_input_info "${INPUT}"
_get_audiostreamcount "${INPUT}"
if [[ "${CODEC_NAME}" == "jpeg2000" && "${HEIGHT}" == "288" ]] ; then
_add_video_filter "[0:v:0]weave=first_field=bottom,setfield=tff"
_add_video_filter "setsar=sar*2[f1]"
STREAMING_MPEG4_OPTS+=(-map "[f1]")
STREAMING_WEBM_OPTS+=(-map "[f1]")
PRODUCTION_IMX_OPTS+=(-map "[f1]")
FRAMEMD5_OPTS+=(-map "[f1]")
SCREENING_PRORES_OPTS+=(-map "[f1]")
SCREENING_FFV1_OPTS+=(-map "[f1]")
else
STREAMING_MPEG4_OPTS+=(-map 0:v)
STREAMING_WEBM_OPTS+=(-map 0:v)
PRODUCTION_IMX_OPTS+=(-map 0:v)
FRAMEMD5_OPTS+=(-map 0:v)
SCREENING_PRORES_OPTS+=(-map 0:v)
SCREENING_FFV1_OPTS+=(-map 0:v)
fi
_filter_to_middle_option
if "${STREAMING_MPEG4_ENABLE}" ; then
if [[ -z "${STREAMING_MPEG4_OUTPUT+1}" ]] ; then
STREAMING_MPEG4_OUTPUT="${STREAMING_MPEG4_RELATIVE_PATH}/$(basename "${INPUT%.*}")${STREAMING_MPEG4_SUFFIX}.${STREAMING_MPEG4_EXTENSION}"
fi
_mkdir2 "$(dirname "${STREAMING_MPEG4_OUTPUT}")"
if [[ "${AUDIOSTREAMCOUNT}" > 0 ]] ; then
STREAMING_MPEG4_OPTS+=(${STREAMING_MPEG4_AUDIO_OPTS[@]})
fi
OUTPUTS+=(${STREAMING_MPEG4_OPTS[@]} ${FILTERCHAIN[@]} ${OUTPUTOPTIONS[@]} ${STREAMING_MPEG4_OUTPUT})
fi
if "${STREAMING_WEBM_ENABLE}" ; then
if [[ -z "${STREAMING_WEBM_OUTPUT+1}" ]] ; then
STREAMING_WEBM_OUTPUT="${STREAMING_WEBM_RELATIVE_PATH}/$(basename "${INPUT%.*}")${STREAMING_WEBM_SUFFIX}.${STREAMING_WEBM_EXTENSION}"
fi
_mkdir2 "$(dirname "${STREAMING_WEBM_OUTPUT}")"
if [[ "${AUDIOSTREAMCOUNT}" > 0 ]] ; then
STREAMING_WEBM_OPTS+=(${STREAMING_WEBM_AUDIO_OPTS[@]})
fi
OUTPUTS+=(${STREAMING_WEBM_OPTS[@]} ${FILTERCHAIN[@]} ${OUTPUTOPTIONS[@]} ${STREAMING_WEBM_OUTPUT})
fi
if "${FRAMEMD5_ENABLE}" ; then
if [[ -z "${FRAMEMD5_OUTPUT+1}" ]] ; then
FRAMEMD5_OUTPUT="${FRAMEMD5_RELATIVE_PATH}/$(basename "${INPUT%.*}")${FRAMEMD5_SUFFIX}.${FRAMEMD5_EXTENSION}"
fi
_mkdir2 "$(dirname "${FRAMEMD5_OUTPUT}")"
OUTPUTS+=(${FRAMEMD5_OPTS[@]} ${FILTERCHAIN[@]} ${OUTPUTOPTIONS[@]} ${FRAMEMD5_OUTPUT})
fi
if "${SCREENING_PRORES_ENABLE}" ; then
if [[ -z "${SCREENING_PRORES_OUTPUT+1}" ]] ; then
SCREENING_PRORES_OUTPUT="${SCREENING_PRORES_RELATIVE_PATH}/$(basename "${INPUT%.*}")${SCREENING_PRORES_SUFFIX}.${SCREENING_PRORES_EXTENSION}"
fi
_mkdir2 "$(dirname "${SCREENING_PRORES_OUTPUT}")"
if [[ "${AUDIOSTREAMCOUNT}" > 0 ]] ; then
SCREENING_PRORES_OPTS+=(${SCREENING_PRORES_AUDIO_OPTS[@]})
fi
OUTPUTS+=(${SCREENING_PRORES_OPTS[@]} ${FILTERCHAIN[@]} ${OUTPUTOPTIONS[@]} ${SCREENING_PRORES_OUTPUT})
fi
if "${SCREENING_FFV1_ENABLE}" ; then
if [[ -z "${SCREENING_FFV1_OUTPUT+1}" ]] ; then
SCREENING_FFV1_OUTPUT="${SCREENING_FFV1_RELATIVE_PATH}/$(basename "${INPUT%.*}")${SCREENING_FFV1_SUFFIX}.${SCREENING_FFV1_EXTENSION}"
fi
_mkdir2 "$(dirname "${SCREENING_FFV1_OUTPUT}")"
if [[ "${AUDIOSTREAMCOUNT}" > 0 ]] ; then
SCREENING_FFV1_OPTS+=(${SCREENING_FFV1_AUDIO_OPTS[@]})
fi
OUTPUTS+=(${SCREENING_FFV1_OPTS[@]} ${FILTERCHAIN[@]} ${OUTPUTOPTIONS[@]} ${SCREENING_FFV1_OUTPUT})
fi
echo "${FFMPEG_PATH}" "${INPUTOPTIONS[@]}" -i "${INPUT}" "${OUTPUTS[@]}"
"${FFMPEG_PATH}" "${INPUTOPTIONS[@]}" -i "${INPUT}" "${OUTPUTS[@]}"
echo
done
| true |
c389b5eb573afbfc455e02511a20989b0d5ce82a
|
Shell
|
sripadapavan/cloud-testing
|
/scripts/e2e/deploy/run.sh
|
UTF-8
| 5,555 | 3.625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
function wait_for_deployment_done() {
echo "Waiting for $CDS_DEPLOYMENT deployment done"
local idle_count=0
local active_tasks
local starting_vms
local failed_vapp
local failed_svc
local redeploy_svc_count=0
while [ true ]
do
let idle_count=idle_count+1
[ $($CDS_CLI tasks --json | jq ".[]|.operation.state" | grep -c WAITING) -gt 0 ] && idle_count=0
[ $($CDS_CLI vms --json | jq ".[]|.state" | grep -v -c OK) -gt 0 ] && idle_count=0
[ $idle_count -eq 12 ] && break
failed_vapp=$($CDS_CLI vapps --json | jq '.[]|select(.state == "ERROR")|.id' | head -1 |tr -d '"')
[ -n "$failed_vapp" ] && $CDS_CLI vapps rebuild $failed_vapp &
failed_svc=$($CDS_CLI svc show --all --json | jq '.[]|select(.state == "ERROR")|select(.["vm-affined"] != true)|.id' | head -1 |tr -d '"')
[ -n "$failed_svc" ] && {
echo "WARNING:Redeploy failed svc:$failed_svc"
$CDS_CLI svc redeploy $failed_svc &
let redeploy_svc_count=redeploy_svc_count+1
if [ $redeploy_svc_count -eq 30 ]; then
echo "ERROR:Fail to deploy all services after 30 redeployment"
exit 1
fi
}
echo "Waiting for 5 seconds to check cds tasks status (idle count=$idle_count)"
# should ignore tasks that have not been updated for more than 30 minutes
let expired_updated_time=$((($(date +%s)-30*60)*1000))
active_task=$($CDS_CLI tasks --json | jq ".[]|select(.operation.state != \"FINISHED\")|select(.[\"updated-at\"] > $expired_updated_time)|.[\"object-id\"]+\" > \"+.action")
starting_vms=$($CDS_CLI vms --json | jq '.[]|select(.state != "OK")|.template.name+" > "+.state')
[ -n "$active_task" ] && echo "active tasks: $active_task"
[ -n "$starting_vms" ] && echo "starting vms: $starting_vms"
sleep 5
done
local failed_svc=$($CDS_CLI svc --json | jq '.[]|select(.state == "ERROR")|.name')
if [ -n "$failed_svc" ]
then
echo "The following svc failed to deploy:$failed_svc"
exit -1
fi
let expired_updated_time=$((($(date +%s)-30*60)*1000))
expired_task=$($CDS_CLI tasks --json | jq '.[]|select(.operation.state != "FINISHED")|select(.["updated-at"] < $expired_updated_time) | .["object-id"]+" "+.["object-type"]' | sed -e 's#"##g')
while read -r line; do
read id type <<<$line
if [ $type = "vapps" ]; then
$CDS_CLI vapps delete $id --force
elif [ $type = "services" ]; then
$CDS_CLI svc delete $id
fi
done <<< "$expired_task"
$CDS_CLI svc --json | jq '.[]|.name+":"+.interfaces[0].ip' | tee -a "$WORK_DIR/${WDC}_dev_${VDC_NUMBER}.${CDS_DEPLOYMENT}.log"
}
function wait_for_deployment_destroy() {
while [ $($CDS_CLI vms --json | jq "length") -gt 0 ]
do
$CDS_CLI vms --json | grep "Not Found" > /dev/null &&
{
echo "Deployment $dep is destoyed"
break;
}
echo "There are still $($CDS_CLI vms --json | jq "length") vms to be destroyed"
sleep 10
done
}
function clean() {
PROD=$1
export CDS_DEPLOYMENT=$(get_deploy_name $PROD)
[ -n "$CDS_DEPLOYMENT" ] && {
echo "Delete deployemnt $CDS_DEPLOYMENT"
$CDS_CLI deployment delete --force
wait_for_deployment_destroy
}
}
function deploy() {
[ -z "$1" ] && return 1
PROD=$1
export CDS_DEPLOYMENT=$(get_deploy_name $PROD)
[ -z "$CDS_DEPLOYMENT" ] && export CDS_DEPLOYMENT="$PROD-$(date '+%Y%m%d_%H%M%S')"
echo "Starting deploy $PROD:$CDS_DEPLOYMENT"
RELEASE_DIR="/var/www/lighttpd/releases/${PROD}"
$SSH root@$CDS_SERVER "mkdir -p $RELEASE_DIR"
$SCP $WORK_DIR/${PROD}.release.yml root@$CDS_SERVER:$RELEASE_DIR/release.yml
$SSH root@$CDS_SERVER "ln -sf $RELEASE_DIR/release.yml ~/${PROD}.release.yml"
$SCP $WORK_DIR/${PROD}.config.yml root@$CDS_SERVER:
$CDS_CLI tune --sys sys.recovervm.disable=1
$CDS_CLI deployment sync $WORK_DIR/${PROD}.config.yml http://$CDS_SERVER/releases/${PROD} || exit 1
wait_for_deployment_done
}
function post_deploy_dsp() {
export CDS_DEPLOYMENT=$(get_deploy_name dsp)
DSP_NATS_IP=$(svc_ip gnatsd)
DM_SERVER_IP=$(svc_ip dmserver)
echo "dsp nats ip: $DSP_NATS_IP"
echo "dm server ip: $DM_SERVER_IP"
if [ -z "$$DSP_NATS_IP" ] || [ -z "$DM_SERVER_IP" ]; then
echo "dsp deployment failed"
exit 1
fi
}
function post_deploy_dbaas() {
export CDS_DEPLOYMENT=$(get_deploy_name dbaas)
for param in $($CDS_CLI tune --json | jq '.[]|select(.value!="0")|.param' | sed 's#"##g' | grep min | grep -v nodebuddy); do
$CDS_CLI tune $param=0 > /dev/null
done
}
set -o | grep xtrace | grep on && XTRACE=on || XTRACE=off
export XTRACE
BASE_DIR=$( dirname "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )")
source ${BASE_DIR}/common.sh
source ${BASE_DIR}/function.sh
case "$1" in
clean )
clean $2
;;
deploy )
deploy $2
post_deploy_$2
;;
yaml )
bash $BASE_DIR/deploy/prepare_${2}_yml.sh
;;
dallas-yaml )
bash $BASE_DIR/deploy/prepare_dallas_${2}_yml.sh
;;
config-for-debug )
bash $BASE_DIR/deploy/config_debug_parameters.sh
;;
clean-all )
clean dbaas
clean dsp
;;
deploy-all )
bash $BASE_DIR/deploy/prepare_dsp_yml.sh
deploy dsp
post_deploy_dsp
bash $BASE_DIR/deploy/prepare_dbaas_yml.sh
deploy dbaas
post_deploy_dbaas
;;
init-env )
prepare_cds
;;
* )
echo "Usage:"
echo " clean dsp|dbaas"
echo " deploy dsp|dbaas"
echo " yaml dsp|dbaas"
echo " clean-all"
echo " deploy-all"
echo " init-env"
;;
esac
| true |
47686e7f51a19dff0608893c096d808892d016a1
|
Shell
|
ziertek/PiLight
|
/installers/install.sh
|
UTF-8
| 4,795 | 4.1875 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/sudo /bin/bash
SCRIPT=`realpath -s $0`
SCRIPTPATH=`dirname $SCRIPT`
STARTPWD=$PWD
GIT_URL='https://github.com/ziertek/PiLight'
# Define colors and styles
NORMAL="\033[0m"
BOLD="\033[1m"
GREEN="\e[32m"
RED="\e[31m"
YELLOW="\e[93m"
show_msg() {
echo -e $1 > /dev/tty
}
usage() {
echo -e "${BOLD}Usage:${NORMAL}"
echo -e " -i --install-dir Specify where you want to install to"
echo -e " Default is: ${BOLD}/opt/${NORMAL}"
echo -e " -V --verbose Shows command output for debugging"
echo -e " -v --version Shows version details"
echo -e " -h --help Shows this usage message"
}
version() {
echo -e "${BOLD}PiLight installation script 0.5${NORMAL}"
echo -e "URL: $GIT_URL"
}
installSystemdService() {
show_msg "${GREEN}Installing Systemd Service...${NORMAL}"
sed -i "s+WorkingDirectory=/opt/PiLight+WorkingDirectory=$INSTALL_DIR+g" $INSTALL_DIR/PiLight.service
if [[ ! -f /etc/systemd/system/PiLight.service ]]; then
sudo cp PiLight.service /etc/systemd/system/PiLight.service
else
sudo sed -i "s+WorkingDirectory=/opt+WorkingDirectory=$INSTALL_DIR+g" /etc/systemd/system/PiLight.service
fi
}
enableSystemdService() {
show_msg "${GREEN}Starting Systemd Service...${NORMAL}"
sudo systemctl enable PiLight.service
sudo systemctl start PiLight.service
}
VERBOSE=false
INSTALL_DIR='/opt'
while [ "$1" != "" ]; do
case $1 in
-i | --install-dir) shift
INSTALL_DIR=$1
;;
-V | --verbose) VERBOSE=true
;;
-v | --version) version
exit 0
;;
-h | --help) version
echo -e ""
usage
exit 0
;;
* ) echo -e "Unknown option $1...\n"
usage
exit 1
esac
shift
done
# Act on verbose option
if [ $VERBOSE == "false" ]; then
exec > /dev/null
fi
# Check if we have the required files or if we need to clone them
FILES=("server.py" "requirements.txt" "PiLight.service" "lib/__init__.py" "lib/phat_Wrapper.py" "lib/confParser.py" "lib/config_default.yaml" "templates/controller.html" "static/css/main.css" "Update/UpdateLight.sh" "Update/UpdatePi.sh")
FILECHECK=true
for FILE in ${FILES[@]}; do
if [ $INSTALL_DIR != $SCRIPTPATH ]; then
if [ $VERBOSE == "true" ]; then
show_msg "Checking file... ${INSTALL_DIR}/${FILE}"
fi
if [ ! -f "${INSTALL_DIR}/${FILE}" ]; then
FILECHECK=false
fi
else
if [ $VERBOSE == "true" ]; then
show_msg "Checking file... ${INSTALL_DIR}/${FILE}"
fi
if [ ! -f "${SCRIPTPATH}/${FILE}" ]; then
FILECHECK=false
fi
fi
if [ $FILECHECK == 'false' ]; then
show_msg "${RED}The requried files are missing...${NORMAL} lets clone everything from git..."
break
fi
done
if [ $FILECHECK == 'false' ]; then
sudo apt-get install git
which git > /dev/null
if [[ $? != 0 ]]; then
show_msg "${RED}git is not installed... please install git and run the script again!${NORMAL}"
exit 1
fi
if [ "$(ls -A ${INSTALL_DIR})" ]; then
INSTALL_DIR="$INSTALL_DIR/PiLight"
fi
show_msg "${GREEN}Cloning files from git using HTTPS to ${BOLD}${INSTALL_DIR}${NORMAL}${GREEN}...${NORMAL}"
git clone -q $GIT_URL $INSTALL_DIR
chown -R $SUDO_USER:$SUDO_USER $INSTALL_DIR
cd $INSTALL_DIR
fi
case $(uname -s) in
Linux|GNU*)
case $(lsb_release -si) in
Ubuntu | Raspbian)
show_msg "${GREEN}Installing required files from apt...${NORMAL}"
sudo apt-get install -y python3-pip python3-dev
show_msg "${GREEN}Installing needed files from pip...${NORMAL}"
sudo pip3 install -r ./requirements.txt
installSystemdService
enableSystemdService
;;
*)
show_msg "${RED}${BOLD}Unsupported distribution, please consider submitting a pull request to extend the script${NORMAL}"
exit 1
esac
;;
*)
show_msg "${RED}${BOLD}Unsupported operating system, please consider submitting a pull request to extend the script${NORMAL}"
exit 1
esac
# Change permissions of the start up script
sudo chmod +x Update/UpdatePi.sh
cd $STARTPWD
show_msg "${GREEN}${BOLD}Installation complete${NORMAL}"
| true |
b0a734078ac7230310d0ae4cda006986334161a4
|
Shell
|
netqyq/shell-examples
|
/code/CH15/proc2.sh
|
UTF-8
| 169 | 2.578125 | 3 |
[
"MIT"
] |
permissive
|
#proc2.sh
#!/bin/bash
echo "SUPPORTED FILESYSTEM TYPES:"
echo ---------------------------------------------------------
cat /proc/filesystems | awk -F'\t' '{print $2}'
| true |
139861f2cf26a9c084786dd04a63cbbb085fa0e0
|
Shell
|
cpaika/spring-boot-java-base
|
/deployment/scripts/common/push-docker-image.sh
|
UTF-8
| 483 | 2.609375 | 3 |
[
"MIT",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
echo "Login into ECR ..."
$(aws ecr get-login --no-include-email)
echo "Push docker image to ECR ..."
./gradlew docker dockerTag dockerPush -PTAG=$TRAVIS_BUILD_NUMBER -PREPOSITORY_URI=$REPOSITORY_URI
printf '{"SPRING_PROFILES":"%s","SPLUNK_TOKEN":"%s", "ENVIRONMENT":"%s", "REPOSITORY_URI":"%s"}' "$SPRING_PROFILES" "$SPLUNK_TOKEN" "$ENVIRONMENT" "$REPOSITORY_URI:$TRAVIS_BUILD_NUMBER" | jq -f config/ecs/task-def.json > task_def_${TRAVIS_BUILD_NUMBER}.json
| true |
addd15d7cb62e4f0349f99d74b20cdc9fd9bcc23
|
Shell
|
russellmacshane/quick-wordpress
|
/qwcli/qwcli.sh
|
UTF-8
| 588 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
# https://github.com/brotandgames/bagcli -- example location
cli_help() {
cli_name=${0##*/}
echo "
$cli_name
Quick WordPress CLI
Version: 1.0.0
https://github.com/russellmacshane/quick-wordpress
Usage: $cli_name [command]
Commands:
backup Backup
list List Backups
restore Restore
* Help
"
exit 1
}
case "$1" in
backup|b)
/home/docker/quick-wordpress/qwcli/backup.sh
;;
list|l)
ls /home/docker/quick-wordpress/backup
;;
restore|r)
/home/docker/quick-wordpress/qwcli/restore.sh
;;
*)
cli_help
;;
esac
| true |
435ca3e79eb2346da97c4f745c43b539ebcf00b2
|
Shell
|
isabella232/gpay-loyaltyapi-demo
|
/scripts/enable-api.sh
|
UTF-8
| 1,092 | 3.328125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script downloads the service account key from GCP
set -e
script_folder=`cd $(dirname $0) && pwd`
gcp_project=$1
# colors
NONE='\033[0m'
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
GRAY='\033[1;30m'
if [ -z "$gcp_project" ]
then
>&2 echo -e "${RED}ERROR:${NONE} GCP project not specified"
>&2 echo -e " usage: service-account-key.sh <project-id> [folder-id]"
exit 1
fi
# enable Passes API
gcloud services enable walletobjects.googleapis.com --project=$gcp_project
| true |
85369c2103ca6188d2a7f3a2232650f5fc3a93d1
|
Shell
|
wubo0067/calmwu-go
|
/example/sailcraft/startsvrs_domestic.sh
|
UTF-8
| 2,059 | 2.640625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
cd csssvr_main/bin/domestic
./start.sh
sleep 1
svr_count="`ps -ef|grep csssvr_main|grep -v 'grep'|wc -l`"
if [ $svr_count -gt 0 ]
then
echo "csssvr_main is start! `date`"|tee -a /var/log/message
else
echo "csssvr_main is start failed! `date`"|tee -a /var/log/message
fi
cd -
cd fleetsvr_main/bin/domestic
./startfleetsvr.sh
sleep 1
svr_count="`ps -ef|grep fleetsvr_main|grep -v 'grep'|wc -l`"
if [ $svr_count -gt 0 ]
then
echo "fleetsvr_main is start! `date`"|tee -a /var/log/message
else
echo "fleetsvr_main is start failed! `date`"|tee -a /var/log/message
fi
cd -
cd indexsvr_main/bin/domestic
./start.sh
sleep 1
svr_count="`ps -ef|grep indexsvr_main|grep -v 'grep'|wc -l`"
if [ $svr_count -gt 0 ]
then
echo "indexsvr_main is start! `date`"|tee -a /var/log/message
else
echo "indexsvr_main is start failed! `date`"|tee -a /var/log/message
fi
cd -
cd financesvr_main/bin/domestic
./start.sh
sleep 1
svr_count="`ps -ef|grep financesvr_main|grep -v 'grep'|wc -l`"
if [ $svr_count -gt 0 ]
then
echo "financesvr_main is start! `date`"|tee -a /var/log/message
else
echo "financesvr_main is start failed! `date`"|tee -a /var/log/message
fi
cd -
cd guidesvr_main/bin/domestic
./start.sh
sleep 1
svr_count="`ps -ef|grep guidesvr_main|grep -v 'grep'|wc -l`"
if [ $svr_count -gt 0 ]
then
echo "guidesvr_main is start! `date`"|tee -a /var/log/message
else
echo "guidesvr_main is start failed! `date`"|tee -a /var/log/message
fi
cd -
cd logsvr_main/bin/domestic
./start.sh
sleep 1
svr_count="`ps -ef|grep logsvr_main|grep -v 'grep'|wc -l`"
if [ $svr_count -gt 0 ]
then
echo "logsvr_main is start! `date`"|tee -a /var/log/message
else
echo "logsvr_main is start failed! `date`"|tee -a /var/log/message
fi
cd -
#cd omsvr_main/bin/domestic
#./start.sh
#svr_count="`ps -ef|grep omsvr_main|grep -v 'grep'|wc -l`"
#if [ $svr_count -gt 0 ]
#then
# echo "omsvr_main is start! `date`"|tee -a /var/log/message
#else
# echo "omsvr_main is start failed! `date`"|tee -a /var/log/message
#fi
#cd -
| true |
a8cdc40147ce9edb6cc35ae44a52a22429bd67c3
|
Shell
|
EscapeLife/awesome-builder
|
/common/vagrant/ansible/setup.sh
|
UTF-8
| 429 | 3.015625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#/bin/sh
set -ex
# install some tools
sudo yum install -y epel-release git vim gcc glibc-static telnet
# open password auth for backup if ssh key doesn't work, bydefault, username=vagrant password=vagrant
sudo sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
sudo systemctl restart sshd
# install ansible
if [ "$HOSTNAME" = "ansible-master" ]; then
sudo yum install -y ansible
fi
| true |
045c5e31529dcea0cb76e577edb4a578d548f623
|
Shell
|
316k/leopard-subpixel
|
/tests/img-subpixel/build-region-heatmap.sh
|
UTF-8
| 591 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/bash
# usage: ./this 10-10, to build the heat-map around pixel (10, 10)
base="$1"
# 3 | 0
# -----
# 2 | 1
tac debug/$base-0 > heat-0
cp debug/$base-1 heat-1
cat heat-0 heat-1 > heat-right
tac debug/$base-3 > heat-3
cp debug/$base-2 heat-2
cat heat-3 heat-2 > heat-left-flipped
awk '{ for(i=NF; i>0; i--) printf("%f ", $i); printf("\n") }' heat-left-flipped > heat-left
paste -d" " heat-left heat-right > heat-all
../../utils/heat-map.sh heat-all > heat-$base.pgm
convert heat-$base.pgm -resize 500% heat-$base.png
rm heat-$base.pgm heat-{0,1,2,3,left,right,left-flipped}
| true |
85bd6266e6b0aac2ec5169e68e65bf333b64ddeb
|
Shell
|
sawsuh/dotfiles
|
/desktop/.scripts/scr.sh
|
UTF-8
| 310 | 2.59375 | 3 |
[] |
no_license
|
xcord=$(xdotool getmouselocation --shell | awk -F '=' '{print $2; exit}')
if [[ "$xcord" > 2560 ]]
then
maim -f png -u -g 2560x1440+2560+0 screenshots/$(date +'%y-%-m-%-d_%-T')mon2.png
else
maim -f png -u -g 2560x1440+0+0 screenshots/$(date +'%y-%-m-%-d_%-T')mon1.png
fi
sleep 1;
dunstify "screenshot taken"
| true |
71190942620f5d91997395625ed67045eea9fe26
|
Shell
|
dark-panda/dotfiles
|
/bin/git-each-branch
|
UTF-8
| 115 | 2.734375 | 3 |
[] |
no_license
|
#!/bin/sh
for i in $(git for-each-ref --format='%(refname)' refs/heads/ | grep master -v); do
basename $i
done
| true |
14eb0af2b2384f8df4abbb05b88bf6e9c19eb505
|
Shell
|
yq314/helm3-monitor
|
/scripts/install_plugin.sh
|
UTF-8
| 1,389 | 3.671875 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/sh -e
if [ -n "${HELM_LINTER_PLUGIN_NO_INSTALL_HOOK}" ]; then
echo "Development mode: not downloading versioned release."
exit 0
fi
version="$(sed -n -e 's/version:[ "]*\([^"]*\).*/\1/p' plugin.yaml)"
echo "Downloading and installing helm3-monitor v${version} ..."
url="https://github.com/yq314/helm3-monitor/releases/download/v${version}/helm3-monitor_${version}"
if [ "$(uname)" = "Darwin" ]; then
if [ "$(uname -m)" = "arm64" ]; then
url="${url}_darwin_arm64.tar.gz"
else
url="${url}_darwin_amd64.tar.gz"
fi
elif [ "$(uname)" = "Linux" ] ; then
if [ "$(uname -m)" = "aarch64" ] || [ "$(uname -m)" = "arm64" ]; then
url="${url}_linux_arm64.tar.gz"
else
url="${url}_linux_amd64.tar.gz"
fi
else
url="${url}_windows_amd64.tar.gz"
fi
echo "$url"
mkdir -p "bin"
mkdir -p "releases/v${version}"
if [ -x "$(which curl 2>/dev/null)" ]; then
curl -sSL "${url}" -o "releases/v${version}.tar.gz"
else
wget -q "${url}" -O "releases/v${version}.tar.gz"
fi
tar xzf "releases/v${version}.tar.gz" -C "releases/v${version}"
mv "releases/v${version}/helm3-monitor" "bin/helm3-monitor" || \
mv "releases/v${version}/helm3-monitor.exe" "bin/helm3-monitor"
mv "releases/v${version}/completion.yaml" .
mv "releases/v${version}/plugin.yaml" .
mv "releases/v${version}/README.md" .
mv "releases/v${version}/LICENSE" .
| true |
950e9e0da517b5bc6e9aef61c5102240c97dedd2
|
Shell
|
rose36/RedesVirtuaisOpenstack
|
/scripts/criacao-automatizada-openstack.sh
|
UTF-8
| 4,419 | 3.671875 | 4 |
[] |
no_license
|
#!/bin/bash
x="teste"
menu ()
{
while true $x != "teste"
do
clear
echo "================================================"
echo "CRIAÇÃO DE CENÁRIOS OPENSTACK"
echo "Autora: Roseli da Rocha Barbosa"
echo ""
echo "1)Criação de redes"
echo""
echo "2)Criação de roteadores"
echo ""
echo "3)Adicionar interface ao roteador"
echo""
echo "4)Criação de intâncias"
echo ""
echo "5)Sair"
echo ""
echo "================================================"
echo "Digite a opção desejada:"
read x
echo "Opção informada ($x)"
echo "================================================"
case "$x" in
1)
echo "Escolha sua opção: 1 - Provider Networks 2 - Self-Service (private) Networks:"
read op
echo "Opção informada ($op)"
if [ $op == "1" ]
then
echo "Digite o nome da rede: "
read nome
echo "Digite o tipo da rede: "
read tipo
echo "Criando a rede..."
neutron net-create $nome --provider:network_type $tipo
neutron net-update $nome --router:external --shared
elif [ $op == "2" ]
then
echo "Digite o nome da rede: "
read nome2
echo "Digite o tipo da rede: "
read tipo2
echo "Criando a rede..."
neutron net-create $nome2 --provider:network_type $tipo2
echo "Digite o nome da subnet : "
read subnet2
echo "Digite o endereço do DNS: "
read dns2
echo "Digite o endereço de rede (ex: 10.10.20.0/20): "
read rede2
neutron subnet-create --name $subnet2 --ip-version 4 --dns-nameserver $dns2 $nome2 $rede2
echo "Rede e subrede criadas com sucesso!!!"
else
echo "Opção inválida!!!"
fi
sleep 5
echo "================================================"
;;
2)
echo "Digite o nome do roteador: "
read roteador
echo "Criando o roteador..."
neutron router-create $roteador
echo "Roteador $roteador criado com sucesso!!!"
sleep 5
echo "================================================"
;;
3)
echo "Digite o nome da rede:"
read interfaceRede
echo "O roteador é o gateway padrão? 1 - Sim 2 - Não"
read gatewayPadrao
if [ $gatewayPadrao == "1" ]
then
neutron router-gateway-set router $interfaceRede
elif [ $gatewayPadrao == "2" ]
then
neutron router-interface-add router $interfaceRede
else
echo "Opção inválida!!!"
fi
echo "================================================"
;;
4)
echo "--------------------------------------------------"
echo "| FLAVOR |"
echo "--------------------------------------------------"
openstack flavor list
echo "Digite o flavor desejado: "
read flavor
echo "--------------------------------------------------"
echo "| IMAGE |"
echo "--------------------------------------------------"
openstack image list
echo "Digite o nome da imagem desejada: "
read imagem
echo "--------------------------------------------------"9
echo "| NETWORK |"
echo "--------------------------------------------------"
openstack network list
echo "Digite o nome da rede ou id que essa instância irá pertencer: "
read nomeRede
echo "--------------------------------------------------"
echo "| SECURITY GROUP |"
echo "--------------------------------------------------"
openstack security group list
echo "Digite o nome do Security Group: "
read securityGroup
echo "--------------------------------------------------"
echo "| KEYPAIR |"
echo "--------------------------------------------------"
openstack keypair list
echo "Digite o nome do par de chaves: "
read chave
echo "Digite o nome que será atribuído a instância que será criada: "
read nomeInstancia
openstack server create --flavor $flavor --image $imagem --nic net-id=$nomeRede --user-data=./userdata.txt --security-group $securityGroup --key-name $chave $nomeInstancia
echo "Instância $nomeInstancia criada com sucesso!!!"
sleep 5
echo "================================================"
;;
5)
echo "saindo..."
sleep 5
clear;
exit;
echo "================================================"
;;
*)
echo "Opção inválida!"
esac
done
}
menu
| true |
8dcd1ac64989257ce6568695b24e58ec619756e9
|
Shell
|
labunix/sakuravps
|
/myscripts/luks_unmount.sh
|
UTF-8
| 445 | 3.0625 | 3 |
[] |
no_license
|
#!/bin/bash
if [ `id -u` -ne "0" ] ;then
echo "Sorry,Not Permit User!" >&2
exit 1
fi
USER=labunix
DEST=/home/${USER}/mydata
umount $DEST
cryptsetup luksClose luks
# header backup
LUKS_HEADER_BACKUP=luks.header
test -f ${DEST}/${LUKS_HEADER_BACKUP} && rm -f ${DEST}/${LUKS_HEADER_BACKUP}
test -f /home/luks.img && \
cryptsetup luksHeaderBackup /home/luks.img --header-backup-file ${DEST}/${LUKS_HEADER_BACKUP}
mount | grep luks
| true |
c15c273fe461f9867f00381161c333a6baf4d63c
|
Shell
|
ctf2009/ansible-rpi-examples
|
/roles/autohotspot/files/enable.sh
|
UTF-8
| 236 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/bash
if [[ $EUID -ne 0 ]]; then
echo "This script must be run as root"
exit 1
fi
systemctl enable autohotspot
sed -i '/#nohook wpa_supplicant/c\nohook wpa_supplicant' /etc/dhcpcd.conf
echo "AutoHotspot has been enabled"
| true |
6540c530c626a98412a34ac72d21649ccd9fdf55
|
Shell
|
timeblink/code-snippets
|
/make_rom.sh
|
UTF-8
| 3,501 | 3.953125 | 4 |
[] |
no_license
|
#!/bin/bash
#===============================================================================
#
# Copyright © 2008-2018 xxx
#
# build shell script file.
#
# Author:
# E-mail:
# Date :
#
#-------------------------------------------------------------------------------
#
# EDIT HISTORY FOR FILE
#
# This section contains comments describing changes made to the module.
# Notice that changes are listed in reverse chronological order.
#
# when who what, where, why
# -------- --- -----------------------------------------------------
#===============================================================================
version(){
echo "0.0"
}
usage(){
echo "
usage : ./make_rom.sh [<option> <option> ...] <parameter>
example :
1.make romimage with eng.
./make_rom.sh eng
2.make romimage with eng and zip.
./make_rom.sh -z eng romimage_001
parameters :
<build_type> : build_type [default:userdebug]
<model_name> : product model name [default:msm8909_512]
<outdir> : copy romimage file to dir [default:romimage]
optional arguments:
-h, --help print this help and exits
-v, --version print version and exits
"
}
getargs(){
index=0
outdir=romimage
BUILD_TYPE=
MODEL=F03H
for parameter in $* ;do
start=$(expr match "${parameter}" '-\|--')
option=${parameter:$start}
if [[ $start -gt 0 ]];then
if [[ "$option" == "h" || "$option" == "help" ]];then
usage && exit 0
elif [[ "$option" == "v" || "$option" == "version" ]];then
version && exit 0
else
echo -e "\033[31munvalid option $parameter.\033[0m"
usage && exit 0
fi
elif [[ ${parameter:0:1} != '-' ]];then
if [[ $index -eq 0 ]];then BUILD_TYPE=$parameter;fi
if [[ $index -eq 1 ]];then MODEL=$parameter;fi
if [[ $index -eq 2 ]];then outdir=$parameter;fi
((index++))
else
echo "!!unvalid parameter '$parameter' !!\n"
fi
done
if [[ -z $BUILD_TYPE ]]
then
echo -e "\033[31mparameters BUILD_TYPE must not empty.\033[0m"
usage && exit 0
fi
}
#===============================================================================
# fail msg
#===============================================================================
fail () {
if [ ! -z "$@" ]
then
echo -e "\033[31mERROR: $@\033[0m" >&2
fi
echo -e "\033[31mERROR: failed.\033[0m" >&2
usage
exit 1
}
#===============================================================================
# main
#===============================================================================
getargs $*
if [ "${BUILD_TYPE}" = "debug" ] ; then
OUT_ROOT="LINUX/android/out/debug"
else
OUT_ROOT="LINUX/android/out"
fi
COPY_LIST=(${COPY_LIST[@]} ",contents*.xml")
COPY_LIST=(${COPY_LIST[@]} ",project_build.sh")
COPY_LIST=(${COPY_LIST[@]} ",environment.sh")
COPY_LIST=(${COPY_LIST[@]} ",make_rom.sh")
build_dir=$(cd $(dirname $0); echo $(pwd))
for COPY_DEF in ${COPY_LIST[@]} ; do
COPY_PATH=$(echo ${COPY_DEF} | awk -F',' '{print $1}')
COPY_FILE=$(echo ${COPY_DEF} | awk -F',' '{print $2}')
if [ -z ${COPY_PATH} ] ; then continue ; fi
if [ ! -d ${build_dir}/${COPY_PATH} ] ; then continue ; fi
mkdir -p ${build_dir}/${outdir}/${COPY_PATH}
cd ${build_dir}/${COPY_PATH}
tar -c ${COPY_FILE} | tar -x -C ${build_dir}/${outdir}/${COPY_PATH}/
cd ${build_dir}
done
cd ${build_dir}
cp contents.xml ${build_dir}/${outdir}
zip -rq ${outdir}.zip ${outdir}
exit 0
| true |
41170759590c8dd4e4b3d60d6a8754051aa8dce3
|
Shell
|
ftdc-picsl/pipedream2018
|
/auto/convertDicomBatch.sh
|
UTF-8
| 587 | 3.1875 | 3 |
[] |
no_license
|
#!/bin/bash
source /home/mgrossman/.bash_profile
procScriptDir=/data/grossman/pipedream2018/bin/scripts
dicomDir=/data/jet/grosspeople/Volumetric/SIEMENS/Subjects
niiDir=/data/jet/grosspeople/Volumetric/SIEMENS/pipedream2014/subjectsNii
subjectsAndTPs=`${procScriptDir}/auto/findDataToProcess.sh $dicomDir $niiDir`
for stp in $subjectsAndTPs; do
subject=${stp%,*}
tp=${stp#*,}
qsub -S /bin/bash -l h_vmem=2.1G,s_vmem=2G -o ${subject}_${tp}_dicom2niiAuto.stdout -e ${subject}_${tp}_dicom2niiAuto.stderr ${procScriptDir}/auto/convertDicom.sh $subject $tp
sleep 0.5
done
| true |
e8d58172354f8f799ab603d075ddb151d92fa984
|
Shell
|
eprintsug/EPrintsArchivematica
|
/make_ingredient.sh
|
UTF-8
| 552 | 2.609375 | 3 |
[] |
no_license
|
#!/bin/bash
# Copy the contents into the structure used by an EPrints 3.4 ingredient.
# run ./make_ingredient.sh
# then place ingredients/archivematica under your EPrints 3.4 ingredients directory
# update your flavours/pub_lib/inc file to include ingredients/archivematica
# run epadmin update REPO
# apachectl graceful
I=ingredients/archivematica
mkdir -p $I
cp -r bin $I
cp -r cgi $I
cp -r cfg/cfg.d $I
# cp -r cfg/citations $I # unused?
cp -r lib/citations $I
cp -r lib/static $I
cp -r lib/lang $I
cp -r lib/plugins $I
cp -r lib/workflows $I
| true |
c0e500b5054f7264ec9f67ef68d0f5120507781f
|
Shell
|
opensciencegrid/rsvprocess
|
/bin/run_daily.sh
|
UTF-8
| 284 | 2.765625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#test
export JAVA_HOME=/usr/lib/jvm/java-openjdk
export PATH=$JAVA_HOME/bin:$PATH
export RSVPROCESS_HOME=/usr/local/rsvprocess
ant -f $RSVPROCESS_HOME/build.xml availability_yesterday
if [ ! $? -eq 0 ]; then
echo "availability(yesterday) calculation has failed"
fi;
| true |
357edde240e32764777c964357b55ec267ec6283
|
Shell
|
KingBrewer/pi-gen
|
/stage2/02-net-tweaks/01-run.sh
|
UTF-8
| 429 | 2.515625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash -e
install -v -d "${ROOTFS_DIR}/etc/systemd/system/dhcpcd.service.d"
install -v -m 644 files/wait.conf "${ROOTFS_DIR}/etc/systemd/system/dhcpcd.service.d/"
sed "s/SSID_TOKEN/${WIFI_SSID}/" files/wpa_supplicant.tmpl | sed "s/PSK_TOKEN/${WIFI_PSK}/" > files/wpa_supplicant.conf
install -v -d "${ROOTFS_DIR}/etc/wpa_supplicant"
install -v -m 600 files/wpa_supplicant.conf "${ROOTFS_DIR}/etc/wpa_supplicant/"
| true |
06d3a11055043bfd9efb30e5aa1b1e82adbf0e8e
|
Shell
|
microsoft/qlib-server
|
/scripts/install_qlib_client.sh
|
UTF-8
| 1,006 | 3.6875 | 4 |
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
#!/bin/bash
sudo apt-get update
sudo apt-get install -y g++ nfs-common
MINICONDA=https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh
QLIB_CLIENT=https://github.com/microsoft/qlib.git
STOCK_DATA_DIR=/data/stock_data/qlib_data
CODE_DIR=$HOME/"code"
DOWNLOADS_DIR=$HOME/"downloads"
CONDA_DIR=$HOME/"miniconda3"
# create dir
function create_dir_by_sudo() {
if [ ! -d $1 ]; then
sudo mkdir -p $1
fi
}
function create_dir() {
if [ ! -d $1 ]; then
mkdir $1
fi
}
create_dir $CONDA_DIR
create_dir $CODE_DIR
create_dir $DOWNLOADS_DIR
create_dir_by_sudo $STOCK_DATA_DIR
# install miniconda3
wget $MINICONDA -O $DOWNLOADS_DIR/"miniconda3.sh"
/bin/bash $DOWNLOADS_DIR/miniconda3.sh -b -u -p $CONDA_DIR
echo ". $CONDA_DIR/etc/profile.d/conda.sh" >> $HOME/.bashrc
echo "conda activate base" >> $HOME/.bashrc
# install qlib client
cd $CODE_DIR
git clone $QLIB_CLIENT
cd qlib
$CONDA_DIR/bin/pip install cython numpy
$CONDA_DIR/bin/python setup.py install
| true |
1752e15ba1b498b117d28b5b4d0f2e7b437c1bfe
|
Shell
|
meivv/study
|
/goland/test/install
|
UTF-8
| 211 | 2.90625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
if [ ! -f install ];then
echo 'install must be run within its container folder' 1>&2
exit 1
fi
WORKDIR=`pwd`
export GOPATH=$WORKDIR
gofmt -w src
go install test
echo 'finished'
| true |
079d8b105c45b5a37b54200f8258b18bfb9e394a
|
Shell
|
oussamazerrouki/ahr
|
/examples-mc-gke-eks-aks/bs-cluster-infra-az.sh
|
UTF-8
| 455 | 2.5625 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export TF_MODULE=infra-cluster-az-tf
#
AZ_TFVARS=$TF_MODULE/az.auto.tfvars
AWS_TFVARS=$TF_MODULE/aws.auto.tfvars
source mc-r3-aks.env
cat <<EOF > "$AZ_TFVARS"
resource_group = "$RESOURCE_GROUP"
az_vnet = "$AZ_VNET"
az_vnet_subnet = "$AZ_VNET_SUBNET"
az_vnet_cidr = "$AZ_VNET_CIDR"
EOF
cat <<EOF > "$AWS_TFVARS"
aws_vpc = "$AWS_VPC"
aws_vpn_gw_name = "$AWS_VPN_GW_NAME"
EOF
| true |
6db776b29cb0a4422c5660a5d7f94341e9aebfe2
|
Shell
|
kbillore/security_addon
|
/Scripts/demo/developer/Send_License
|
UTF-8
| 1,371 | 3.1875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
show_usage (){
echo "Usage: $0 <Customer License> <Controlled Access Model> <Artefacts Path>"
return 0
}
if [ "$1" = "-h" ] || [ "$1" = "" ]
then
show_usage
exit 1
fi
cd $OVSA_DEV_ARTEFACTS
#echo "Customer License: $1"
CUSTOMER_LIC=$(echo $1 | cut -d '.' -f 1)
#echo "Customer Lic: $CUSTOMER_LIC"
# Delete thie files remotely before sending it
sudo -u intel ssh intel@192.168.122.79 rm -rf $3/$CUSTOMER_LIC.lic
sudo -u intel ssh intel@192.168.122.79 rm -rf $3/$CUSTOMER_LIC.dat
sudo -u intel scp -q $1 intel@192.168.122.79:$3/$CUSTOMER_LIC.lic
sudo -u intel scp -q $2 intel@192.168.122.79:$3
echo "Sent Controlled Access Model $3/$CUSTOMER_LIC.dat"
echo "Sent License $3/$CUSTOMER_LIC.c_lic"
#rm -rf /var/OVSA/artefacts/*
| true |
ade279f1f902dfd0d380db63d7b4fb553eb6088d
|
Shell
|
Schumi543/nim_9cc
|
/test.sh
|
UTF-8
| 542 | 3.34375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
try() {
input="$1"
expected="$2"
./src/nim_9cc "$input" >./tmp.asm
# TODO also support ubuntu for CI
nasm -f macho64 tmp.asm -o tmp.o
ld tmp.o -o tmp -macosx_version_min 10.13 -lSystem
./tmp
actual="$?"
if [ "$actual" = "$expected" ]; then
echo "$input => $actual"
else
echo "$expected expected, but got $actual"
exit 1
fi
}
try 0 0
try 42 42
try 5+3 8
try '12 + 4' 16
try ' 12 + 34 - 5 ' 41
try '5+6*7' 47
try '5*(9-6)' 15
try '(3+5)/2' 4
try '-1 + 5' 4
try '2 == 2' 1
try ' 3 > 4' 0
echo OK
| true |
88a5885cac1ca784936df6567db2e7dd4c2f4ab9
|
Shell
|
andreystarkov/dotfiles
|
/setup.sh
|
UTF-8
| 2,110 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
echo "🍓 Installing Mac hacks..."
# source ~/install.sh
source ~/mac/default-settings.sh
HOMEWORLD=~/.dotfiles
echo "🎁 Your configuration word will be stored at ${HOMEWORLD}"
echo "🚀 Copying ${PWD}"
echo "👇 To ~/.dotfiles"
cp -rf ./Desktop/code/dotfiles ~/.dotfiles
echo "🔪 Backuping pervous setup to ~/backup"
mkdir ~/backup
echo "🌩😩 It's okay if something fails here."
sudo mv -f ~/.zshrc ~/backup
sudo mv -f ~/.zsh_aliases ~/backup
sudo mv -f ~/.vimrc ~/.aliases ~/backup
sudo mv -f ~/.zgen_setup ~/backup
sudo mv -f ~/.inputrc ~/backup
sudo mv -f ~/.screenrc ~/backup
sudo mv -f ~/.exports ~/backup
sudo mv -f ~/.zsh_functions ~/backup
sudo mv -f ~/.exports ~/backup
sudo mv -f ~/.bash_prompt ~/backup
sudo mv -f ~/.bashrc ~/backup
sudo mv -f ~/.bash_profile ~/backup
echo "🔪 Removing prevous config..."
echo "😩 It's okay."
rm -rf ~/.zshrc
rm -rf ~/.zsh_aliases
rm -rf ~/.vimrc
rm -rf ~/.aliases
rm -rf ~/.zgen_setup
rm -rf ~/.inputrc
rm -rf ~/.screenrc
rm -rf ~/.exports
rm -rf ~/.zsh_functions
rm -rf ~/.exports
rm -rf ~/.bash_prompt
rm -rf ~/.bashrc
rm -rf ~/.bash_profile
echo "🍆 Setting up Everything."
echo "👉👈 Linking all your configs with ${HOMEWORLD}"
ln -s ~/dotfiles/zsh/zshrc.zsh ~/.zshrc
ln -s ~/.dotfiles/zsh/zsh_aliases.zsh ~/.zsh_aliases
ln -s ~/.dotfiles/zsh/zsh_functions.zsh ~/.zsh_functions
ln -s ~/.dotfiles/powerlevel9/defaulttheme.zsh ~/.powerlevel9
ln -s ~/.zgen_setup ~/.dotfiles/zsh/zgen_setup.zsh
ln -s ~/.dotfiles/vimrc.js/vimrc /.vimrc
ln -s ~/.dotfiles/vimrc.js ~/.vimrc.js
ln -s ~/.dotfiles/.bashrc ~/.bashrc
ln -s ~/.dotfiles/.bash_profile ~/.bash_profile
ln -s ~/.dotfiles/.bash_prompt ~/.bash_prompt
ln -s ~/.dotfiles/.exports ~/.exports
ln -s ~/.dotfiles/.inputrc ~/.inputrc
ln -s ~/.dotfiles/.screenrc ~/.screenrc
ln -s ~/.dotfiles/.aliases ~/.aliases
mkdir -p ~/.config
mkdir -p ~/.config/nvim
ln -s ~/.dotfiles/nvim/init.vim ~/.config/nvim/init.vim
# ln -s ~/.dotfiles/colorls ~/.config/colorlsx
echo "💣 Enjoy that. Now zsh install all 🍓 plugins."
zsh
echo "💣 🔪🔪🔪 Done. Enjoy that."
| true |
26c445e64c860ad8efa09f73c8316f6f818ff2dd
|
Shell
|
VijayEluri/codes
|
/sh/sor1.sh
|
UTF-8
| 163 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
PATH=/usr/bin/*
for f in $PATH; do
if [ -x $f ]; then
echo "executable-->$f"
fi
if [ -h $f ]; then
echo "symbolic-->$f"
fi
done
| true |
a5c81c8d6dc4f25d50d66f885ebceec2a1ca0fa6
|
Shell
|
px307/tech
|
/deploy.sh
|
UTF-8
| 593 | 2.71875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
TMP_DIR=/tmp/blog/
ORIGIN_ADDRESS=git@github.com:jjyyjjyy/tech.git
ORIGIN_BRANCH=gh-pages
cd notes
mvn clean asciidoctor:process-asciidoc
rm -rf ${TMP_DIR} && mkdir -p ${TMP_DIR}
cp -r target/generated-docs/** ${TMP_DIR}
cd ${TMP_DIR}
git init
git remote add origin ${ORIGIN_ADDRESS}
git add .
git commit -m ":memo: Update"
git checkout -b ${ORIGIN_BRANCH}
git checkout -b tmp
git branch -D ${ORIGIN_BRANCH}
git push origin --delete ${ORIGIN_BRANCH}
git checkout -b ${ORIGIN_BRANCH}
git push --set-upstream origin ${ORIGIN_BRANCH}
cd -
echo "${ORIGIN_BRANCH} sync successfully!"
| true |
c768c469d59b982c375cbf8889110cfb1aa74d50
|
Shell
|
evias/eos-contracts
|
/token/install.sh
|
UTF-8
| 1,736 | 2.671875 | 3 |
[] |
no_license
|
#!/bin/bash
echo ""
echo "\t\t--- Now configuring tests/token EOS Functional Test ---"
echo ""
# @token owner & active
owner=`cleos create key --to-console | grep "Private key: " | awk '{print $3}'`
active=`cleos create key --to-console | grep "Private key: " | awk '{print $3}'`
owner_pub=`cleos wallet import -n eviasContracts --private-key ${owner} | grep "imported private key for: " | awk '{print $5}'`
active_pub=`cleos wallet import -n eviasContracts --private-key ${active} | grep "imported private key for: " | awk '{print $5}'`
# create account @token
cleos create account eosio eosio.token ${owner_pub} ${active_pub}
# deploy contract
cleos set contract eosio.token /contracts/eosio.token -p eosio.token
# -- TEST
# @tester and @user
tester=`cleos create key --to-console | grep "Private key: " | awk '{print $3}'`
user=`cleos create key --to-console | grep "Private key: " | awk '{print $3}'`
tester_pub=`cleos wallet import -n eviasContracts --private-key ${tester} | grep "imported private key for: " | awk '{print $5}'`
user_pub=`cleos wallet import -n eviasContracts --private-key ${user} | grep "imported private key for: " | awk '{print $5}'`
# create account @tester @user
cleos create account eosio token.tester ${tester_pub} ${tester_pub}
cleos create account eosio token.user ${user_pub} ${user_pub}
# create custom token EVS
cleos push action eosio.token create '{"issuer":"eosio", "maximum_supply":"290888.0000 EVS"}' -p eosio.token
cleos push action eosio.token issue '{"to":"token.user","quantity":"100.0000 EVS","memo":"issuing EVS"}' -p eosio
cleos push action eosio.token transfer '{"from":"token.user","to":"token.tester","quantity":"10.0000 EVS","memo":"transferring EVS"}' -p token.user@active
| true |
57ff818d4065443b014dc1a80c66fc7d5a38c15e
|
Shell
|
gaokevin1/cs326
|
/Project4/tests/03-404-2.sh
|
UTF-8
| 357 | 3.109375 | 3 |
[] |
no_license
|
source "${TEST_DIR}/funcs.bash"
port=$(choose_port)
test_start "Serving a basic web page"
setsid $TEST_DIR/start_server "${port}" &
pid=${!}
wait_port "${port}"
actual=$(timeout 5 wget -O- "http://localhost:${port}/this/does/not/exist.html")
return="${?}"
stop_server ${pid}
# Return value will be 8 for a server error
[[ ${return} -eq 8 ]]
test_end
| true |
c66c450c87c289d7566944ac63fa501b74de0509
|
Shell
|
snizzer/nest-compiler
|
/utils/imagenet_val_script/mobilenet_multi.sh
|
UTF-8
| 2,816 | 2.625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
input=$1
trap "exit" INT
# Declare an array of string with type
# "asymmetric" "symmetric" "symmetric_with_uint8" "symmetric_with_power2_scale"
declare -a quantizations=("symmetric" "symmetric_with_uint8" "symmetric_with_power2_scale")
declare -a executors=("image-classifier" "image-classifier_googlenet_mixed_1")
declare -a clips=("none" "KL")
declare -a profiles=("10000_mobilenet_bin1000")
# -enable-channelwise
# -quantization-calibration=KL
## --validation-images-dir=/home/jemin/development/dataset/imagenet2012_processed \
# for testing: /home/jemin/development/dataset/small_imagenet
for clip in "${clips[@]}"; do
for quant in "${quantizations[@]}"; do
echo "test option: " $clip $quant
python ../imagenet_topk_accuracy_driver_py3.py --verbose \
--validation-images-dir=/home/jemin/development/dataset/imagenet2012_processed \
--image-classifier-cmd="../../cmake-build-release/bin/image-classifier
-m=/home/jemin/hdd/models/mobilenet_v2/
-model-input-name=data
-image-mode=0to1
-use-imagenet-normalization
-backend=OpenCL
-topk=5
-quantization-schema=$quant
-quantization-calibration=$clip
-load-profile=/home/jemin/development/nest_compiler/cmake-build-release/bin/10000_mobilenet_bin1000.yaml
-minibatch=0 -"
done
done
#for profile in "${profiles[@]}"; do
# for quant in "${quantizations[@]}"; do
# echo "test option: "$profile $quant
# python ../imagenet_topk_accuracy_driver_py3.py --verbose \
# --validation-images-dir=/home/jemin/development/dataset/small_imagenet_299 \
# --image-classifier-cmd="../../cmake-build-release/bin/image-classifier
# -m=/home/jemin/hdd/models/googlenet_v4_slim/googlenet_v4_slim.onnx
# -model-input-name=input:0
# -image-mode=0to1
# -use-imagenet-normalization
# -backend=CPU
# -image-layout=NHWC
# -topk=5
# -label-offset=1
# -quantization-schema=$quant
# -enable-channelwise
# -load-profile=/home/jemin/development/nest_compiler/cmake-build-release/bin/$profile.yaml
# -minibatch=0 -"
# done
#done
# ../../cmake-build-release/bin/image-classifier \
# /home/jemin/development/nest_compiler/tests/images/imagenet/$im \
# -m=/home/jemin/hdd/models/squeezenet/model.onnx \
# -model-input-name=data_0 \
# -image-mode=neg128to127 \
# -backend=OpenCL \
# -topk=5 \
# -minibatch=1
#FP 32
#if [ $input -eq 0 ]
#then
# python ../imagenet_topk_accuracy_driver_py3.py --verbose \
# --validation-images-dir=/home/jemin/hdd/imagenet/val_processed_299 \
# --image-classifier-cmd="../../cmake-build-release/bin/image-classifier
# -m=/home/jemin/hdd/models/googlenet_v4_slim/googlenet_v4_slim.onnx
# -model-input-name=input:0
# -image-mode=0to1
# -use-imagenet-normalization
# -backend=OpenCL
# -image-layout=NHWC
# -topk=5
# -label-offset=1
# -minibatch=0 -"
#fi
| true |
a79ae741bc9dc41da0bb10a2cc9d10c41b0a4160
|
Shell
|
ykalidin/be-tools
|
/cloud/docker/lib/run_teagent.sh
|
UTF-8
| 513 | 2.546875 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2019. TIBCO Software Inc.
# This file is subject to the license terms contained in the license file that is distributed with this file.
#
if [[ "$9" == "aws" ]]; then
nohup python be_docker_container_discovery_aws.py -t $3 -u $4 -p $5 -py $6 -pi $7 -s $8 > discovery.logs 2>&1 &
elif [[ "$9" == "k8s" ]]; then
nohup python be_docker_container_discovery_k8s.py -t $3 -u $4 -p $5 -py $6 -pi $7 -ta $8 > discovery.logs 2>&1 &
fi
$1/teagent/bin/be-teagent --propFile $2
| true |
8c0c03a74170ffbe9117dea3b9ffdc51f192444e
|
Shell
|
fipar/benchmark_automation
|
/data_preparation_scripts/utils.sh
|
UTF-8
| 798 | 3.84375 | 4 |
[] |
no_license
|
#!/bin/bash
# this exports the env variables _threads, _size and _workload from file captures created with run_sysbench.sh, as
# described in the help output
export_fields_from_sysbench_wrapper_generated_file()
{
[ -z "$1" -o -z "$2" ] && {
cat <<EOF>&2
usage: $0 <filename> "<experiment name>"
Where
- <filename> is the name of a capture created by run_sysbench.sh (i.e. sample.thr1.sz10.testoltp.txt)
- <experiment name> is the value for _EXP_NAME used when running run_sysbench.sh (i.e. 'test' in the above example)
EOF
return 1
}
unset _threads _size _workload
export _threads=$(echo $1|awk -F. '{print $2}'|sed 's/thr//')
export _size=$(echo $1|awk -F. '{print $3}'|sed 's/sz//')
export _workload=$(echo $1|awk -F. '{print $4}'|sed "s/$2//")
}
| true |
aed405377d761548aa6ee938a181c13c047d6fcb
|
Shell
|
AaronParsons/ugradio
|
/dft_intro/genpdf
|
UTF-8
| 253 | 2.640625 | 3 |
[] |
no_license
|
#!/bin/bash
name=fourierc
# run latex
pdflatex $name.tex
# if latex terminated with errors, exit this script
[ $? -ne 0 ] && exit
# run latex again so that it gets all the cross-references right.
pdflatex $name.tex
# view pdf file
evince $name.pdf
| true |
4c1da3c1d7f578de6eb2c6cd7d9ec0aadd66b98a
|
Shell
|
DarkMiclos/oprendsz
|
/masodik
|
UTF-8
| 269 | 3.765625 | 4 |
[] |
no_license
|
#!/bin/bash
echo " A parancs neve: $0"
echo " Az első paramééter: $1"
echo " A második paraméter: $2"
echo " A paraméterek száma: $#"
if [ $1 -gt $2 ]; then
for I in $@ ; do
OSSZEG= `expr $OSSZEG + $I`
done
echo "A paraméterek összege: $Osszeg"
fi
| true |
3242d49dc5c8f16c312e7da54de460c6f3f4aca4
|
Shell
|
gvoskuilen/sst-sqe
|
/test/testSuites/testSuite_EmberSweep.sh
|
UTF-8
| 9,605 | 3.65625 | 4 |
[] |
no_license
|
#!/bin/bash
# testSuite_EmberSweep.sh
#######################################################################
#
# RRRR EEEEE A DDD M M EEEEE
# R R E A A D D MM MM E
# R R EEEE A A D D M MM M EEEE
# RRRR E AAAAA D D M M E
# R R E A A D D M M E
# R R EEEEE A A DDD M M EEEEE
#
# This test suite is unique (as of February 2015) in that the
# enumberation of tests and the invocation of SST is NOT in
# this file.
#
# The enumberation and invocations of SST is from a file generated
# by execution of the python file, EmberSweepGenerator.py
#
# That generated file is then sourced and that file fed to shuint2.
#
# Note that this Suite runs in the ember elements sub-tree, not in test.
#
# ------------------------------------------------------------------
# Env variable: SST_TEST_ES_LIST to run specific numbers only
#######################################################################
# Description:
# A shell script that defines a shunit2 test suite. This will be
# invoked by the Bamboo script.
# Preconditions:
# 1) The SUT (software under test) must have built successfully.
# 2) A test success reference file is available.
# There is no sutArgs= statement. SST is python wrapped.
TEST_SUITE_ROOT="$( cd -P "$( dirname "$0" )" && pwd )"
# Load test definitions
. $TEST_SUITE_ROOT/../include/testDefinitions.sh
. $TEST_SUITE_ROOT/../include/testSubroutines.sh
#===============================================================================
# Variables global to functions in this suite
#===============================================================================
L_SUITENAME="SST_EmberSweep_suite" # Name of this test suite; will be used to
# identify this suite in XML file. This
# should be a single string, no spaces
# please.
L_BUILDTYPE=$1 # Build type, passed in from bamboo.sh as a convenience
# value. If you run this script from the command line,
# you will need to supply this value in the same way
# that bamboo.sh defines it if you wish to use it.
L_TESTFILE=() # Empty list, used to hold test file names
#===============================================================================
# Test functions
# NOTE: These functions are invoked automatically by shunit2 as long
# as the function name begins with "test...".
#===============================================================================
#-------------------------------------------------------------------------------
# Test:
# test_EmberSweep
# The test are identified by a hash code from the sst test line
# The actual tests generated by a python file.
# Purpose:
# Exercise the EmberSweep code in SST
# Inputs:
# None
# Outputs:
# test_EmberSweep.out file
# Expected Results
# Match of simulated time against those in single reference file
# Caveats:
# The simulation time lines must match the reference file *exactly*,
#
#-------------------------------------------------------------------------------
# Most test Suites explicitly define an environment variable sut to be full path SST
# The Python script does not do this
mkdir -p ${SST_TEST_SUITES}/emberSweep_folder
pushd ${SST_TEST_SUITES}/emberSweep_folder
cp ${SST_ROOT}/sst-elements/src/sst/elements/ember/test/* .
chmod +w *
# Initialize variables
startSeconds=0
RUNNING_INDEX=0
FAILED_TESTS=0
FAILED="FALSE"
PARAMS=""
ES_start() {
RUNNING_INDEX=$(($RUNNING_INDEX+1))
echo " Running # $RUNNING_INDEX, $FAILED_TESTS have failed"
if [ $ES_SELECT == 1 ] ; then
TEST_INDEX=${ES_LIST[$RUNNING_INDEX]}
echo " Running case #${TEST_INDEX}"
else
TEST_INDEX=$RUNNING_INDEX
fi
startSeconds=`date +%s`
FAILED="FALSE"
PARAMS="$1"
echo " $1"
testDataFileBase="testES_${TEST_INDEX}"
L_TESTFILE+=(${testDataFileBase})
# For Valgrind, sut= will be installed after this line.
pushd ${SST_TEST_SUITES}/emberSweep_folder
}
####################
# ES_fini()
# tmp_file is output from SST
# $TL is the "complete" line from SST (with time)
# $RL is the line from the Reference File
#
ES_fini() {
RetVal=$?
touch tmp_file
TL=`grep Simulation.is.complete tmp_file`
TIME_FLAG=$SSTTESTTEMPFILES/TimeFlag_$$_${__timerChild}
if [ -e $TIME_FLAG ] ; then
echo " Time Limit detected at `cat $TIME_FLAG` seconds"
fail " Time Limit detected at `cat $TIME_FLAG` seconds"
rm $TIME_FLAG
FAILED_TESTS=$(($FAILED_TESTS + 1))
return
fi
if [ $RetVal != 0 ] ; then
echo " SST run is incomplete, FATAL"
fail " # $TEST_INDEX: SST run is incomplete, FATAL"
date
top -bH -n 1 | grep Thread
FAILED="TRUE"
else
echo $TL
echo $1 $TL >> $SST_TEST_OUTPUTS/EmberSweep_cumulative.out
RL=`grep $1 $SST_REFERENCE_ELEMENTS/ember/tests/refFiles/test_EmberSweep.out`
if [ $? != 0 ] ; then
echo " Can't locate this test in Reference file "
fail " # $TEST_INDEX: Can't locate this test in Reference file "
FAILED="TRUE"
else
if [[ "$RL" != *"$TL"* ]] ; then
echo output does not match reference time
echo "Reference $RL" | awk '{print $1, $3, $4, $5, $6, $7, $8, $9}'
echo "Out Put $TL"
fail " # $TEST_INDEX: output does not match reference time"
FAILED="TRUE"
fi
fi
fi
if [ $FAILED == "TRUE" ] ; then
FAILED_TESTS=$(($FAILED_TESTS + 1))
echo ' '
grep Ember_${1} -A 5 ${SSTTESTTEMPFILES}/bashIN | grep sst
echo ' '
wc tmp_file
len_tmp_file=`wc -l ./tmp_file | awk '{print $1}'`
if [ $len_tmp_file -gt 25 ] ; then
echo " stdout from sst first and last 25 lines"
Sed 25q ./tmp_file
echo " . . ."
tail -25 ./tmp_file
echo " ---- end of stdout "
else
echo " ---- stdout for sst:"
cat ./tmp_file
echo " ---- end of stdout "
fi
echo ' '
else
echo ' '; echo Test Passed
fi
endSeconds=`date +%s`
elapsedSeconds=$(($endSeconds -$startSeconds))
echo "${TEST_INDEX}: Wall Clock Time $elapsedSeconds sec. ${PARAMS}"
echo " "
} # - - - END OF Subroutine ES_fini()
### Begin MAIN
# Generate the bash input script
if [[ ${SST_MULTI_THREAD_COUNT:+isSet} != isSet ]] ; then
cp ${SST_TEST_INPUTS}/EmberSweepGenerator.py .
else
sed '/print..sst.*model/s/sst./sst -n '"${SST_MULTI_THREAD_COUNT} /" ${SST_TEST_INPUTS}/EmberSweepGenerator.py > EmberSweepGenerator.py
chmod +x EmberSweepGenerator.py
fi
if [[ ${SST_MULTI_RANK_COUNT:+isSet} == isSet ]] && [ ${SST_MULTI_RANK_COUNT} -gt 1 ] ; then
sed -i.x '/print..sst.*model/s/..sst/("mpirun -np '"${SST_MULTI_RANK_COUNT} $NUMA_PARAM"' sst/' EmberSweepGenerator.py
fi
pyexec=${SST_PYTHON_APP_EXE}
${pyexec} ./EmberSweepGenerator.py > ${SSTTESTTEMPFILES}/bashIN
if [ $? -ne 0 ] ; then
preFail " Test Generation FAILED"
fi
# This is the code to run just selected tests from the sweep
# using the indices defined by SST_TEST_ES_LIST
# An inclusive sub-list may be specified as "first-last" (e.g. 7-10)
ES_SELECT=0
if [[ ${SST_TEST_ES_LIST:+isSet} == isSet ]] ; then
ES_SELECT=1
mv ${SSTTESTTEMPFILES}/bashIN ${SSTTESTTEMPFILES}/bashIN0
ICT=1
for IND in $SST_TEST_ES_LIST
do
echo $IND | grep -e '-' > /dev/null
if [ $? != 0 ] ; then
ES_LIST[$ICT]=$IND
ICT=$(($ICT+1))
S0=$(($IND-1))
S1=$(($S0*6))
START=$(($S1+1))
END=$(($START+5))
else
# echo IND = $IND
INDF=`echo $IND | awk -F'-' '{print $1}'`
INDL=`echo $IND | awk -F'-' '{print $2}'`
# echo "$INDF to $INDL"
INDR=$INDF
S0=$(($INDR-1))
S1=$(($S0*6))
START=$(($S1+1))
END=$(($START-1))
# echo INDR INDL $INDR $INDL
while [ $INDR -le $INDL ]
do
# echo In the INDR loop INDR = $INDR
ES_LIST[$ICT]=$INDR
ICT=$(($ICT+1))
END=$(($END+6))
INDR=$(($INDR+1))
done
fi
sed -n ${START},${END}p ${SSTTESTTEMPFILES}/bashIN0 >> ${SSTTESTTEMPFILES}/bashIN
done
# Check it
echo Check the result
wc ${SSTTESTTEMPFILES}/bashIN
## for i in ${ES_LIST[@]}; do echo $i; done
fi
# Source the bash file
. ${SSTTESTTEMPFILES}/bashIN
export SHUNIT_OUTPUTDIR=$SST_TEST_RESULTS
popd
# Invoke shunit2 with the bash input as a parameter!
# Invoke shunit2. Any function in this file whose name starts with
# "test" will be automatically executed.
# In this position the local Time Out will override the multithread TL
export SST_TEST_ONE_TEST_TIMEOUT=1400
cd $SST_ROOT
date
(. ${SHUNIT2_SRC}/shunit2 ${SSTTESTTEMPFILES}/bashIN)
| true |
c44fd255a9e163c001422cad3f608ebfa40c4421
|
Shell
|
darkangel-ua/hammer-bootstrap
|
/build.sh
|
UTF-8
| 1,131 | 3.484375 | 3 |
[] |
no_license
|
#!/bin/bash
set -e
function download()
{
url=$1
filename=$2
unpack_dest="./dependencies/"$3
if [ ! -d $unpack_dest ]; then
mkdir $unpack_dest
fi
curl -L "$url/$filename" > $filename
tar -xf "$filename" -C $unpack_dest
}
download 'http://downloads.sourceforge.net/project/boost/boost/1.58.0' 'boost_1_58_0.tar.bz2' 'boost'
download 'https://dl.dropboxusercontent.com/u/13148925/hammmer-bootstrap-libs/' 'libantlr3c-3.1.1.tar.bz2' 'libantlr3c'
download 'https://dl.dropboxusercontent.com/u/13148925/hammmer-bootstrap-libs/' 'boost.sandbox.guid-1.58.0.tar.bz2' 'boost/guid/1.58.0'
download 'https://dl.dropboxusercontent.com/u/13148925/hammmer-bootstrap-libs/' 'boost.sandbox.crypto-1.58.0.tar.bz2' 'boost/crypto/1.58.0'
download 'https://dl.dropboxusercontent.com/u/13148925/hammmer-bootstrap-libs/' 'boost.sandbox.process-1.58.0.tar.bz2' 'boost/process/1.58.0'
git clone https://github.com/darkangel-ua/hammer.git
cd hammer && git reset --hard 400c0d05ef35973691643484ac22f3f7bf5dee0e && cd ..
if [ -d ./build ]; then
rm -R ./build
fi
mkdir build && cd build && cmake ../ && make -j 4 && cpack
| true |
dde7e3cda59d3bdd8e38a32c118cb4b99c79891f
|
Shell
|
jkwebco/speech2txtclip
|
/picoTTSFromFile.sh
|
UTF-8
| 427 | 3.34375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# SVOX Pico TTS - Read text from file
#
# EXECUTE COMMAND
# ./picoTTSFromFile.sh text_en.txt
# ./picoTTSFromFile.sh de-DE text_de.txt
#
##### Constants
language="en-GB"
tmpFile="/tmp/picoTmpAudio.wav"
text=""
##### Main
if [ $# == 1 ]; then
text="${1}"
else
language="${1}"
text="${2}"
fi
pico2wave -l=${language} -w=${tmpFile} "`cat ${text}`" treble 24 gain -l 6
aplay ${tmpFile}
rm ${tmpFile}
| true |
c5dd39bb52ab02bd9683bd4d5e81686a5d224a53
|
Shell
|
haitch/bashsigterm
|
/bashsigterm.sh
|
UTF-8
| 326 | 3.15625 | 3 |
[] |
no_license
|
function loop()
{
while true
do
sleep 1
echo "looping"
done
}
function int()
{
echo "sigint"
exit
}
function term()
{
echo "term"
exit
}
function quit()
{
echo "quit"
exit
}
function shkill()
{
echo "kill"
exit
}
trap int SIGINT
trap term SIGTERM
trap shkill SIGKILL
trap quit SIGQUIT
loop
| true |
e1925b50bee6cd5301736b0d13ca55de5bea8979
|
Shell
|
ipsaone/ANNA-Backend
|
/vagrant/configure
|
UTF-8
| 4,210 | 3.34375 | 3 |
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
export DEBIAN_FRONTEND=noninteractive
# Remove old log
[[ -f /home/vagrant/vm_build.log ]] && rm /home/vagrant/vm_build.log
# Put date in provision log
date > /home/vagrant/vm_build.log
# Variables
DBHOST=localhost
DBNAME=ipsaone
DBUSER=root
DBPASSWD=secret
echo -e "\n--- Provisioning Vagrant box ---"
echo -e "Warning: be patient\n"
echo -e "Updating packages list"
sudo apt-get -qy update >> /home/vagrant/vm_build.log 2>&1
echo -e "Installing base packages"
sudo apt-get -qy install curl build-essential git g++ htop >> /home/vagrant/vm_build.log 2>&1
echo -e "Installing NodeJS"
curl -sL https://deb.nodesource.com/setup_13.x | sudo bash - >> /home/vagrant/vm_build.log 2>&1
sudo apt-get -qy install nodejs >> /home/vagrant/vm_build.log 2>&1
cd /home/vagrant/ANNA-Backend
rm -rf ~/.node_modules
mkdir ~/.node_modules
rm -rf /home/vagrant/ANNA-Backend/node_modules
ln -sf ~/.node_modules ./node_modules
echo 'export ONEOS="true"' >> ~/.bashrc
export ONEOS="true";
echo -e "Installing Apache"
sudo apt-get -qy apache2 >> /home/vagrant/vm_build.log 2>&1
echo -e "Installing MySQL specific packages and settings"
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password password $DBPASSWD" >> /home/vagrant/vm_build.log 2>&1
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $DBPASSWD" >> /home/vagrant/vm_build.log 2>&1
sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/dbconfig-install boolean true" >> /home/vagrant/vm_build.log 2>&1
sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/app-password-confirm password $DBPASSWD" >> /home/vagrant/vm_build.log 2>&1
sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/admin-pass password $DBPASSWD" >> /home/vagrant/vm_build.log 2>&1
sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/mysql/app-pass password $DBPASSWD" >> /home/vagrant/vm_build.log 2>&1
sudo debconf-set-selections <<< "phpmyadmin phpmyadmin/reconfigure-webserver multiselect none" >> /home/vagrant/vm_build.log 2>&1
sudo apt-get -qy install mysql-server phpmyadmin >> /home/vagrant/vm_build.log 2>&1
echo -e "Setting up our MySQL user and db"
sudo mysql -u $DBUSER -p$DBPASSWD -e "CREATE DATABASE $DBNAME DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci" >> /home/vagrant/vm_build.log 2>&1
sudo mysql -u $DBUSER -p$DBPASSWD -e "grant all privileges on $DBNAME.* to '$DBUSER'@'localhost' identified by '$DBPASSWD'" >> /home/vagrant/vm_build.log 2>&1
echo -e "Setting up PHPmyAdmin"
# sudo ln -s /etc/phpmyadmin/apache.conf /etc/apache2/conf-available/phpmyadmin.conf
sudo a2enconf phpmyadmin.conf >> /home/vagrant/vm_build.log 2>&1
sudo service apache2 reload >> /home/vagrant/vm_build.log 2>&1
echo -e "Installing Redis"
sudo apt-get -qy install redis-server >> /home/vagrant/vm_build.log 2>&1
sudo cp -f /home/vagrant/ANNA-Backend/vagrant/redis.conf /etc/redis/redis.conf >> /home/vagrant/vm_build.log 2>&1
sudo service redis-server restart >> /home/vagrant/vm_build.log 2>&1
echo -e "Creating .env file"
cd /home/vagrant/ANNA-Backend
if [ ! -f .env ]
then
echo "DEV=true" >> .env
echo "HOST=192.168.50.5" >> .env
echo "PORT=8080" >> .env
echo "CHECK_AUTH=true" >> .env
echo "" >> .env
echo "DB_HOST=$DBHOST" >> .env
echo "DB_USERNAME=$DBUSER" >> .env
echo "DB_PASSWORD=$DBPASSWD" >> .env
echo "DB_NAME=$DBNAME" >> .env
echo "DB_FORCE_SYNC=false" >> .env
fi
echo -e "Installing backend dependencies via NPM"
cd /home/vagrant/ANNA-Backend
echo -e "\tGlobal dependencies..."
sudo npm install -g node-gyp@latest npm@latest node-pre-gyp@latest nyc@latest sequelize-cli@latest >> /home/vagrant/vm_build.log 2>&1
echo -e "\tLocal dependencies..."
npm install >> /home/vagrant/vm_build.log 2>&1
sudo chown -R $USER:$(id -gn $USER) /home/vagrant/.config >> /home/vagrant/vm_build.log 2>&1
echo -e "Migrating $DNAME database"
cd /home/vagrant/ANNA-Backend
node_modules/.bin/sequelize db:migrate >> /home/vagrant/vm_build.log 2>&1
echo -e "Seeding $DNAME database"
cd /home/vagrant/ANNA-Backend
node_modules/.bin/sequelize db:seed:all >> /home/vagrant/vm_build.log 2>&1
echo -e "\n--- Vagrant box is ready ---"
| true |
85ac1a79b97430a325e886dbc7397be2cf9726d0
|
Shell
|
kaloun34/mister-arcade-attract
|
/Attract_Arcade_Horiz.sh
|
UTF-8
| 3,284 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/bash
# This sets up a Linux daemon to cycle through arcade cores periodically
# Games are randomly pulled from all MRAs or a user-provided list
# To adjust the timeout change the "sleep" value
#
# https://github.com/mrchrisster/mister-arcade-attract/
# Variables
# Time before going to the next core
timer=120
#Curated List of Horizontal Games
games="Commando.mra
Gauntlet II.mra
Gauntlet (rev 14).mra
SectionZ.mra
Rampage.mra
DoDonPachi.mra
Discs of Tron.mra
Bionic Commando.mra
Black Tiger.mra
Double Dragon.mra
Forgotten Worlds -World, newer-.mra
Bubble Bobble.mra
Star Guards.mra
Daimakaimura -Japan-.mra
Double Dragon II - The Revenge.mra
F-1 Dream.mra
Forgotten Worlds -World, newer-.mra
Tetris.mra
Rush'n Attack (US).mra
Popeye.mra
Robotron 2084.mra
Dynasty Wars -USA, B-Board 89624B- -.mra
Final Fight -World, set 1-.mra
Strider -USA, B-Board 89624B-2-.mra
Tetris (cocktail set 1).mra
U.N. Squadron -USA-.mra
Willow -World-.mra
Tapper.mra
Carrier Air Wing -World 901012-.mra
Magic Sword Heroic Fantasy -World 900725-.mra
Mega Twins -World 900619-.mra
Nemo -World 901130-.mra
Captain Commando -World 911202-.mra
Street Fighter (US, set 1).mra
Knights of the Round -World 911127-.mra
Street Fighter II The World Warrior -World 910522-.mra
The King of Dragons -World 910805-.mra
Three Wonders -World 910520-.mra
Street Fighter II The World Warrior -World 910522-.mra
Adventure Quiz Capcom World 2 -Japan 920611-.mra
Varth Operation Thunderstorm -World 920714-.mra
Pnickies -Japan 940608-.mra
Pokonyan! Balloon -Japan 940322-.mra
Mega Man The Power Battle -CPS1, USA 951006-.mra
Pang! 3 -Euro 950601-.mra
Quiz Tonosama no Yabou 2 Zenkoku-ban -Japan 950123-.mra
Street Fighter Zero -CPS Changer, Japan 951020-.mra
Cadillacs and Dinosaurs (World 930201).mra
Muscle Bomber Duo Ultimate Team Battle (World 931206).mra
Saturday Night Slam Masters (World 930713).mra
The Punisher (World 930422).mra
Warriors of Fate (World 921031).mra"
# Functions
nextcore()
{
# Get a random game from the list
IFS=$'\n'
mra=$(echo "${games[*]}" |shuf |head -1)
# If the mra variable is valid this should immediately pass, but if not it'll keep trying
# Partially protects against typos from manual editing and strange character parsing problems
until [ -f "/media/fat/_Arcade/${mra}" ]; do
mra=$(shuf -n 1 ${mralist})
done
# Debug output - connect and run script via SSH
echo "${mra}"
# Tell MiSTer to load the next MRA
echo "load_core /media/fat/_Arcade/${mra}" > /dev/MiSTer_cmd
}
# Script Start
# Get our list of MRAs from the Scripts file
mralist="/media/fat/Scripts/Attract_Arcade.txt"
# If the file does not exist make one in /tmp/
if [ ! -f /media/fat/Scripts/Attract_Arcade.txt ]; then
mralist="/tmp/Attract_Arcade.txt"
ls -N1 /media/fat/_Arcade/*.mra | sed 's/\/media\/fat\/_Arcade\///' > ${mralist}
fi
# Load the next core and exit - for testing via ssh
# Won't reset the timer!
case "$1" in
next)
nextcore
exit 0
;;
esac
# If another attract process is running kill it
# This can happen if the script is started multiple times
if [ -f /var/run/attract.pid ]; then
kill -9 $(cat /var/run/attract.pid)
fi
# Save our PID
echo "$(pidof ${0})" > /var/run/attract.pid
# Loop
while :; do
nextcore
sleep ${timer}
done
exit 0
| true |
27b13b15e218e1d4d8c088d0a8645a6d29d64f97
|
Shell
|
StefanSchwartze/frontend-configs
|
/release.sh
|
UTF-8
| 1,208 | 3.8125 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
RED='\033[0;31m'
CYAN='\033[0;36m'
YELLOW='\033[1;33m'
NC='\033[0m'
PACKAGE=""
function inquirePackage {
echo "Which package do you want to release?"
select yn in "eslint-config-sevenval" "tslint-config-sevenval" "stylelint-config-sevenval"; do
case $yn in
eslint-config-sevenval ) PACKAGE="eslint-config-sevenval"; break;;
tslint-config-sevenval ) PACKAGE="tslint-config-sevenval"; break;;
stylelint-config-sevenval ) PACKAGE="stylelint-config-sevenval"; break;;
esac
done
}
function inquire {
echo "$1"
select yn in "Yes" "No"; do
case $yn in
Yes ) break;;
No ) echo -e "${RED}Aborting...${NC}"; exit;;
esac
done
}
inquirePackage
echo ''
echo -e "Continuing with ${CYAN}${PACKAGE}${NC}"
echo ''
inquire 'Did you increase the version number in "package.json" according to SemVer?'
echo ''
echo -e "Continuing with ${CYAN}${PACKAGE}${NC}"
echo ''
inquire 'Did you add the release logs to "CHANGELOG.md"?'
echo ''
echo -e "${CYAN}Starting the release process...${NC}"
echo ''
# Only proceed when tests are passing
yarn test
cd "packages/${PACKAGE}/" \
&& npm publish; ret=$?; cd -; exit $ret
| true |
29879fbc85cbe763e68dde9949eaaef92f88299f
|
Shell
|
hanakin/mac-setup
|
/test.sh
|
UTF-8
| 621 | 2.75 | 3 |
[] |
no_license
|
#!/bin/zsh
# include my library helpers for colorized echo and require_brew, etc
source ./scripts/lib.sh
###############################################################################
# Gather info
###############################################################################
bot "testing bot function should be blue"
ask "What is your first name? " firstname
bot "Hi $firstname, Its nice to meet you."
running "testing running function should be cyan"
ok "testing ok funciton should be green"
warn "testing warn funciton should be yellow"
error "testing error funciton should be red"
source ./scripts/osx.sh
| true |
419d4141a3d8b4ac3b0867d01e813b985fc24601
|
Shell
|
Marwito/minutehero-webapp
|
/scripts/server_setup.sh
|
UTF-8
| 3,036 | 2.78125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# PRUN+Chef: Based on ubuntu: 16.04.1 LTS + SSHD(with authorize_key)
sudo apt-get -y update
## Ruby and dependencies
# Pre-requirements
sudo apt-get install -y git build-essential libsqlite3-dev libssl-dev gawk g++
sudo apt-get install -y libreadline6-dev libyaml-dev sqlite3 autoconf libgdbm-dev
sudo apt-get install -y libncurses5-dev automake libtool bison pkg-config libffi-dev
## Install ruby
sudo apt-get -y install software-properties-common
sudo apt-add-repository -y ppa:brightbox/ruby-ng
sudo apt-get update
sudo apt-get -y install ruby2.3 ruby2.3-dev
# Install prerequisites
echo gem --no-ri --no-rdoc | sudo tee -a /root/.gemrc
sudo gem install bundler
sudo gem install rack -v 1.6.0
sudo gem install thin -v 1.6.3
sudo thin install
sudo /usr/sbin/update-rc.d -f thin defaults
## Rails dependencies
sudo apt-get install -y imagemagick libmagickwand-dev
## Install postgre:
export LANGUAGE=en_US.UTF-8
#sudo apt-get -y install postgresql libpq-dev
## Install postgres 9.6:
sudo add-apt-repository "deb http://apt.postgresql.org/pub/repos/apt/ xenial-pgdg main"
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get install postgresql-9.6 libpq-dev
sudo sed -i "s/#listen_addresses = 'localhost'/listen_addresses = 'localhost'/" /etc/postgresql/9.6/main/postgresql.conf
sudo sed -i "s/local all all peer/local all all md5/" /etc/postgresql/9.6/main/pg_hba.conf
sudo sed -i "s/ssl = true/ssl = false/" /etc/postgresql/9.6/main/postgresql.conf
sudo service postgresql restart
## Rewrite postgres password:
sudo -u postgres psql -c "ALTER USER postgres WITH PASSWORD 'postgres';"
## Nginx: using vhost_wohhup.conf config file
sudo apt-get install -y nginx
sudo touch /etc/nginx/sites-available/vhost_minutehero.conf
sudo ln -s /etc/nginx/sites-available/vhost_minutehero.conf /etc/nginx/sites-enabled/vhost_minutehero.conf
sudo rm /etc/nginx/sites-enabled/default
sudo service nginx restart
# Nodejs/bower
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.32.1/install.sh | bash
source /home/ubuntu/.bashrc
nvm install node
npm install -g bower
## Capistrano
sudo mkdir /var/www/minutehero
sudo chown -R ubuntu:ubuntu /var/www/minutehero
#export RAILS_ENV="staging" ; bundle exec rake db:create
## init.d script
sudo thin config -C /etc/thin/minutehero.yml -c /var/www/minutehero/current -l log/thin.log -e production --servers 1 --port 3000
sudo touch /etc/init.d/minutehero
sudo chmod a+x /etc/init.d/minutehero
sudo systemctl daemon-reload
# Generating SSH key for Deployment:
# Give public key to Github > Settings > Deploy Keys
ssh-keygen -t rsa -b 4096 -C "admin@minutehero.net"
## Create database: in failed deployment release folder
sudo -u postgres psql -c "CREATE DATABASE minutehero;"
#export RAILS_ENV="production" ; bundle exec rake db:create
## Deploy
cap production deploy
cap production deploy:db_seed
| true |
05a57d7e9af9c67dff80cb9ce531b142adccd84f
|
Shell
|
Jokeren/COMP522
|
/valgrind.sh
|
UTF-8
| 165 | 2.8125 | 3 |
[] |
no_license
|
#!/bin/bash
THREADS=(1 2 4 8 16 32)
MAIN=$1
DIR=$2
for ((i=0;i<${#THREADS[@]};i++))
do
valgrind --tool=massif $MAIN $DIR/config.${THREADS[$i]}.valgrind.txt
done
| true |
f638075a04dbd9a760ab3d523014ca75e1f2cac7
|
Shell
|
LeeroyC710/docker_exercises
|
/scripting/secondscript.sh
|
UTF-8
| 540 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/bash
#Takes a filene and title as user input
mkdir exercise2
#move into that directory
cd exercise2
#creates 2 .txt files
touch file1.txt
touch file2.txt
#moves one of the file outside the directory
mv file1.txt ./..
#renames the file to test1.txt
mv ~/file1.txt ~/test1.txt
#renames the second file to test2.sh
mv ~/file2.txt ~/test2.sh
#make test2.sh a script that prints the working directory
bash./ test2.sh
echo # !/bin/bash" >> test2.sh
echo "pwd" >> test2.sh
echo "whoami" >> test2.sh
chmod +x test2.sh
./test2.sh
| true |
98ae764dc5101b18a73d4491ca0b11691f019763
|
Shell
|
wvianna/formacaogesac_kitsos
|
/anexos.sh
|
UTF-8
| 1,523 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
#este script foi desenvolvido para ser executado no GNU/Linux
g=`which wget`
if [ "$g" != "" ]
then
echo ######################################
echo "Usando o $g para download dos anexos"
echo ######################################
sleep 2
mkdir videos
cd videos
$g -c http://www.antispam.br/videos/cgi-navegar-legendado.wmv
$g -c http://www.antispam.br/videos/cgi-invasores-legendado.wmv
$g -c http://www.antispam.br/videos/cgi-spam-legendado.wmv
$g -c http://www.antispam.br/videos/cgi-defesa-legendado.wmv
$g -c http://www.eriberto.pro.br/warriors/warriors_of_the_net.avi
cd ..
mkdir doc
cd doc
$g -c http://cartilha.cert.br/download/cartilha-seguranca-internet.pdf
cd ..
mkdir softwares
cd softwares
#Avira
$g -c http://dl1.avgate.net/package/wks_avira/win32/ptbr/pecl/avira_antivir_personal_ptbr.exe
#Atualização do Avira
$g -c http://dl.antivir.de/down/vdf/ivdf_fusebundle_nt_en.zip
$g -c http://dl.antivir.de/package/fusebundle/win32/int/vdf_fusebundle.zip
#AVG
$g -c http://download833.avast.com/iavs5x/setup_av_free_por.exe
#Spyboot
$g -c http://www.spybotupdates.com/files/spybotsd162.exe
#Combofix
$g -c http://download.bleepingcomputer.com/protected/dd22d4a60702ee4a38e111a47ec957e9/4cb4691b/ComboFix.exe
#Endian 2.4
$g -c http://ufpr.dl.sourceforge.net/project/efw/Development/EFW-2.4-RESPIN/EFW-COMMUNITY-2.4-201005280528-RESPIN.iso
cd ..
else
echo ################
echo "Instale o wget"
echo ################
exit 1
fi
| true |
e407fe5e8b9b275b1436f4e90c951820739fa4da
|
Shell
|
ArtemKushnerov/dotfiles
|
/.bashrc
|
UTF-8
| 6,931 | 2.84375 | 3 |
[] |
no_license
|
export REPOS=~/dev/streetshares
export PATH=$PATH:$REPOS/kubernetes/scripts
export PATH=$PATH:$HOME/bin >> ~/.bash_profile
alias cleanpyc="find . -name *.pyc -delete"
alias gitaliases="cat ~/.git_aliases"
alias pullkub="pushd $REPOS/kubernetes && git pull && popd"
alias pullcommon3="pushd $REPOS/common3 && git pull && popd"
alias lint="./docker/linter.sh"
#alias test="./docker/tester.sh"
# aliases for local dev
alias start_mysql="pushd $REPOS/docker/ && ./mysql.sh && popd"
alias launch_all="pushd $REPOS/docker/ && docker start mysql && docker start streetshares && docker-compose up -d && popd"
alias start_nginx="pushd $REPOS/docker/ && ./nginx.sh && popd && dl nginx"
alias sql="mysql -u root -h 127.0.0.1"
# general commands
alias l="ls -lhaG"
alias ll="ls -lhaG"
alias la="ls -la"
alias grep="grep --color=auto"
alias watch="watch --color "
source ~/.dotfiles/.git_aliases
function gpullall () {
pushd $REPOS;
# pull all repositories, 8 repositories at a time
find . -maxdepth 1 -type d \( ! -name . \) -print | xargs -P 8 -I{} bash -c "cd {} && [ -d ".git" ] && git pull";
popd;
}
# CAREFUL! Know how to use this! For every branch in your current repo, delete any branch that has been merged and not pushed to a remote branch
alias deletelocalmergedbranches='git branch --merged | egrep -v "(^\*|master|dev)" | xargs git branch -d'
# CAREFUL! For every repo, delete any branch that has been merged and does not have a remote branch
alias globaldeletelocalmergedbranches='find $REPOS -type d -mindepth 1 -maxdepth 1 | xargs -I {} sh -c '\''echo {}; cd {}; if [[ -d .git ]]; then git branch --merged | egrep -v "(^\*|master|dev)" | xargs git branch -d; fi'\'''
# up 'n' folders
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
# shell aliases
alias reload="source ~/.bashrc"
alias edit="vim ~/.bashrc"
alias k="clear"
alias cdrepos="cd $REPOS"
# docker-compose aliases
alias dc="docker-compose"
alias dce="docker-compose exec"
# docker aliases
alias dp="docker ps -a"
alias dl="docker logs"
alias da="docker attach"
alias de="docker exec -it"
deb() {
docker exec -it $1 bash
}
alias dstart="docker start"
alias dstop="docker stop -t 1"
alias drestart="docker restart -t 1"
alias drm="docker rm"
alias dprune="docker system prune"
alias upper='pushd $REPOS/docker && ./upper.sh && popd'
alias downer='pushd $REPOS/docker && ./downer.sh && popd'
alias dtail='docker logs --tail 0 -f'
# k8s aliases
alias cc='kubectl config current-context'
alias uc="kubectl config use-context"
alias kc="kubectl"
alias kcgpods="kubectl get pods -o wide"
alias kcdpods="kubectl describe pods"
alias kcgdepls="kubectl get deployments -o wide"
alias kcddepls="kubectl describe deployments"
# list all images for pods running in the current context
alias kcimages='kubectl get pods -o jsonpath="{.items[*].spec.containers[*].image}" |tr -s "[[:space:]]" "\n" |sort |uniq -c'
alias generatek8stoken='kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '\''{print $1}'\'')'
alias deletepodsinerrorstate='kubectl get pods --field-selector=status.phase=Error | awk "{print \$1}" | xargs -I {} kubectl delete pods/{}'
alias vi="nvim"
alias vim="nvim"
alias j="jobs"
alias f="fg"
# Get and decode a secret
function kcgsecret() {
declare -i result
declare secret_key="${1}"
declare secret
secret=$(kubectl get secret "${secret_key}" -o json)
result=${?}
if [[ ${result} = 0 ]]; then
echo "${secret}" | jq -r '.data | map_values(@base64d)'
fi
}
# Check the Nginx Configuration on the ingress controller
function checknginx() {
declare ingress_pod_name
ingress_pod_name=$(kubectl get pods -n ingress-nginx -o json | jq -r '.items[].metadata.name')
if [[ -n "${ingress_pod_name}" ]]; then
kubectl exec -n ingress-nginx "${ingress_pod_name}" cat /etc/nginx/nginx.conf
else
"Failed to get ingress pod name"
fi
}
# reset docker containers and images
function dreset() {
docker stop -t0 $(docker ps -aq);
docker rm $(docker ps -aq);
docker rmi -f $(docker images -q);
}
# docker stop and rm container
function dkill() {
docker stop -t 1 "$1"
docker rm "$1"
}
# start docs server
alias start_docs="cd $REPOS/docs/_gitbook/v1 && npm run dev"
# aws shortcuts
sql_prod () { pushd $REPOS/aws; command ./sql.sh "$@"; popd; }
sql_staging () { pushd $REPOS/aws; command ./sql.sh "$@" -s; popd; }
sql_demo () { pushd $REPOS/aws; command ./sql.sh "$@" -demo; popd; }
sql_qa () { pushd $REPOS/aws; command ./sql.sh "$@" -qa; popd; }
# A simple man's way to login to ecr
alias ecrlogin='$(aws ecr get-login --no-include-email --region us-east-1)'
# command prompt settings (reference: http://blog.taylormcgann.com/2012/06/13/customize-your-shell-command-prompt/)
parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/ (\1)/'
}
export PS1='$(parse_git_branch)[`date "+%H:%M:%S"`] \[\033[1;32m\] \w \[\033[1;35m\]$ \[\033[1;37m\]'
## make aliases
# list all targets in a makefile (https://gist.github.com/pvdb/777954)
alias lmt='make -rpn | sed -n -e "/^$/ { n ; /^[^ .#][^ ]*:/p ; }" | egrep --color "^[^ ]*:"'
# bash completion
[ -f /usr/local/etc/bash_completion ] && . /usr/local/etc/bash_completion || {
# if not found in /usr/local/etc, try the brew --prefix location
[ -f "$(brew --prefix)/etc/bash_completion.d/git-completion.bash" ] && \
. $(brew --prefix)/etc/bash_completion.d/git-completion.bash
}
eval "$(pyenv init -)"
export WORKON_HOME=~/.venvs
source /Users/artsiom/.pyenv/versions/3.6.9/bin/virtualenvwrapper.sh
export PATH=/Users/artsiom/.pyenv/shims:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/Users/artsiom/dev/streetshares/kubernetes/scripts:/Users/artsiom/bin
alias generatek8stoken='kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '\''{print $1}'\'')'
export NVM_DIR="$HOME/.nvm"
[ -s "/usr/local/opt/nvm/nvm.sh" ] && . "/usr/local/opt/nvm/nvm.sh" # This loads nvm
[ -s "/usr/local/opt/nvm/etc/bash_completion" ] && . "/usr/local/opt/nvm/etc/bash_completion" # This loads nvm bash_completion
eval "$(nodenv init -)"
export LD_SDK_KEY=sdk-61c36063-e958-4eb3-8bb8-d6b2ce41e360
alias pytest="DB_NAME=pytest_application pytest"
alias wn="workon"
export LC_ALL=en_US.UTF-8
# export COMPOSE_FILE=docker/docker-compose.yml
export K8S_LAB=arn:aws:eks:us-east-1:263735779401:cluster/lab
complete -o default -o nospace -F _virtualenvs wn
__git_complete gco _git_checkout
eval "$(direnv hook bash)"
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
export FZF_DEFAULT_COMMAND='ag --nocolor --ignore node_modules --ignore *.pyc --ignore __pycache__ -g ""'
export NPM_TOKEN=7baf12a2ed2ff75518f2bdca17f48865d4d8e0b8
#export LC_ALL=C
export BASH_SILENCE_DEPRECATION_WARNING=1
wn street
export GIT_USER=artsiom-streetshares
export GIT_PASS=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
| true |
0b6a361413259b5575e07d9c3d5be3af8fd2a06d
|
Shell
|
CodeGlitcher/autossh-docker
|
/run.sh
|
UTF-8
| 1,665 | 3.703125 | 4 |
[] |
no_license
|
#!/bin/bash
set -e
echo "starting"
CONFIG_PATH=/data/config.json
if [ ! -f "$CONFIG_PATH" ]; then
echo "missing config data"
echo "coping example json to data dir"
cp /example/config-example.json /data/config.json
exit 1
fi
HOSTNAME=$(jq --raw-output ".hostname" $CONFIG_PATH)
SSH_PORT=$(jq --raw-output ".ssh_port" $CONFIG_PATH)
USERNAME=$(jq --raw-output ".username" $CONFIG_PATH)
REMOTE_FORWARDING=$(jq --raw-output ".remote_forwarding[]" $CONFIG_PATH)
LOCAL_FORWARDING=$(jq --raw-output ".local_forwarding[]" $CONFIG_PATH)
OTHER_SSH_OPTIONS=$(jq --raw-output ".other_ssh_options" $CONFIG_PATH)
MONITOR_PORT=$(jq --raw-output ".monitor_port" $CONFIG_PATH)
PRIVATE_KEY=$(jq --raw-output ".private_key" $CONFIG_PATH)
if [ ! -f "$PRIVATE_KEY" ]; then
echo "missing private key"
exit 1
fi
command_args="-M ${MONITOR_PORT} -N -q -o ServerAliveInterval=25 -o ServerAliveCountMax=3 ${USERNAME}@${HOSTNAME} -p ${SSH_PORT} -i ${PRIVATE_KEY}"
if [ ! -z "$REMOTE_FORWARDING" ]; then
echo "Adding remote forarding rules"
while read -r line; do
command_args="${command_args} -R $line"
done <<< "$REMOTE_FORWARDING"
fi
if [ ! -z "$LOCAL_FORWARDING" ]; then
echo "Adding local forarding rules"
while read -r line; do
command_args="${command_args} -L $line"
done <<< "$LOCAL_FORWARDING"
fi
echo "[INFO] testing ssh connection"
ssh -o StrictHostKeyChecking=no -p $SSH_PORT $HOSTNAME 2>/dev/null || true
echo "[INFO] listing host keys"
ssh-keyscan -p $SSH_PORT $HOSTNAME || true
command_args="${command_args} ${OTHER_SSH_OPTIONS}"
echo "[INFO] command args: ${command_args}"
# start autossh
/usr/bin/autossh ${command_args}
| true |
6f14f060c309d0be6d136debf6903e1ae2156933
|
Shell
|
youdowell/k8s-galera-init
|
/tests/mysql-tests.sh
|
UTF-8
| 7,454 | 3.671875 | 4 |
[] |
no_license
|
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )"
NAMESPACE="mysql-test"
label="app=mysql"
FAILED="`tput setaf 1`FAILED`tput sgr0`"
PASSED="`tput setaf 2`PASSED`tput sgr0`"
TIMEOUT=120s
# --------------------------------------
# K8S RESOURCES
# --------------------------------------
create() {
kubectl create namespace ${NAMESPACE} --dry-run -o yaml | kubectl apply -f -
}
start() {
kubectl --namespace ${NAMESPACE} apply --recursive --timeout=$TIMEOUT -f "$DIR/example"
}
stop() {
kubectl --namespace ${NAMESPACE} delete svc,statefulset -l "$label"
echo -n "Waiting until all pods are stopped ["
timeout=$((SECONDS + 120))
while [ $SECONDS -lt $timeout ]; do
pods=$(kubectl --namespace ${NAMESPACE} get po -l "$label" --no-headers 2>/dev/null)
[ -z "$pods" ] && echo "OK]" && break
sleep 2
echo -n "."
done
}
delete() {
kubectl --namespace ${NAMESPACE} delete svc,statefulset,pvc,pv -l "$label" || true
kubectl delete namespace ${NAMESPACE} --timeout=$TIMEOUT --force || true
}
# --------------------------------------
# UTILITIES
# --------------------------------------
before() {
echo
echo "[+] $1"
RUNNING_TEST=$1
ERRORS=()
start
}
after() {
echo ----------------------------------------
}
pass() {
echo "[+] ${FUNCNAME[1]}: $PASSED"
}
fail() {
#stacktrace=(${FUNCNAME[@]:1})
#unset 'stacktrace[${#stacktrace[@]}-1]'
msg="$@"
echo "[+] ${FUNCNAME[1]}: $FAILED ${msg:+"- $msg"}"
echo
ERRORS+=("${FUNCNAME[1]} ${msg}")
exit 1
}
exec_sql() {
pod=$1
sql=$2
mysql_cmd='mysql -u"${MYSQL_ROOT_USER}" -p"${MYSQL_ROOT_PASSWORD}"'
kubectl --namespace ${NAMESPACE} exec "$pod" -- bash -c "${mysql_cmd} -e '${sql}' -q --skip-column-names ${@:3}"
}
populate_test_data() {
pod=${1:-"mysql-0"}
degree=${2:-16}
exec_sql "$pod" 'DROP DATABASE IF EXISTS test;'
exec_sql "$pod" 'CREATE DATABASE test;'
exec_sql "$pod" 'CREATE TABLE test.rnd_values (id BIGINT NOT NULL AUTO_INCREMENT, val INT NOT NULL, PRIMARY KEY (id));'
exec_sql "$pod" 'INSERT INTO test.rnd_values (val) VALUES (rand()*10000);'
echo -n "Populating random values ["
for i in $(seq 1 $degree); do
exec_sql "$pod" 'INSERT INTO test.rnd_values (val) SELECT a.val * rand() FROM test.rnd_values a;'
cnt=$(exec_sql "$pod" "SELECT count(*) from test.rnd_values;")
echo -n "...$cnt"
done
echo "]"
}
wait_deleted() {
wait_count=${1:-1}
echo -n "Waiting for namespace teardown... ["
timeout=$((SECONDS + 120))
while [ $SECONDS -lt $timeout ]; do
kubectl get namespace ${NAMESPACE} &>/dev/null || break
sleep 2
echo -n "."
done
if kubectl get namespace ${NAMESPACE} &>/dev/null || false; then
echo "Failed: Timeout!]"
else
echo "OK]"
fi
}
wait_ready() {
wait_count=${1:-1}
echo -n "Waiting until exactly $wait_count containers ready ["
timeout=$((SECONDS + 300))
while [ $SECONDS -lt $timeout ]; do
ready_count=$(kubectl --namespace ${NAMESPACE} get pods -l "$label" -o yaml 2>/dev/null | grep "ready: true" -c || true)
[ $ready_count -eq $wait_count ] && echo "OK]" && break
sleep 2
echo -n "."
done
if [ $ready_count -ne $wait_count ]; then
fail "Containers ready expected exactly '$wait_count' but was '$ready_count'!"
fi
}
# --------------------------------------
# TESTS
# --------------------------------------
test_clusterShutdown_recovered() {
## Given
kubectl --namespace ${NAMESPACE} scale statefulsets mysql --replicas=3 --timeout=$TIMEOUT
wait_ready 3
populate_test_data "mysql-1"
#kubectl --namespace ${NAMESPACE} delete po -l "$label" --grace-period=0 --force
## When
stop
start
wait_ready 3
## Then
echo "Testing values"
if ! exec_sql "mysql-0" "SHOW DATABASES;" | grep "test" &>/dev/null; then
fail "Test database not found on pod mysql-0!"
fi
for i in {0..2}; do
pod="mysql-$i"
cnt_actual=$(exec_sql "$pod" "SET SESSION wsrep_sync_wait = 1; SELECT count(*) from test.rnd_values;")
echo "Values count on '$pod': $cnt_actual"
if [ $cnt -ne $cnt_actual ]; then
fail "Random values count on '$pod' expected '$cnt' but was '$cnt_actual'"
fi
done
pass
}
test_clusterCrash_recovered() {
## Given
kubectl --namespace ${NAMESPACE} scale statefulsets mysql --replicas=3 --timeout=$TIMEOUT
wait_ready 3
populate_test_data "mysql-1"
cnt=$(exec_sql "mysql-0" "SET SESSION wsrep_sync_wait = 1; SELECT count(*) from test.rnd_values;")
# Crashing all cluster nodes
# for 1 in {1..3}; do
# docker kill $(docker ps -q -f name=mysql-${i})
# done
kubectl --namespace ${NAMESPACE} delete po -l "$label" --grace-period=0 --force --timeout=$TIMEOUT
## When
start
kubectl --namespace ${NAMESPACE} scale statefulsets mysql --replicas=3 --timeout=$TIMEOUT
wait_ready 3
## Then
echo "Testing values"
if ! exec_sql "mysql-0" "SHOW DATABASES;" | grep "test" &>/dev/null; then
fail "Test database not found on pod mysql-0!"
fi
for i in {0..2}; do
pod="mysql-$i"
cnt_actual=$(exec_sql "$pod" "SET SESSION wsrep_sync_wait = 1; SELECT count(*) from test.rnd_values;")
echo "Values count on '$pod': $cnt_actual"
if [ $cnt -ne $cnt_actual ]; then
fail "Random values count on '$pod' expected '$cnt' but was '$cnt_actual'"
fi
done
pass
}
test_nodeCrash_recovered() {
## Given
kubectl --namespace ${NAMESPACE} scale statefulsets mysql --replicas=3 --timeout=$TIMEOUT
wait_ready 3
## When
# Crashing first cluster node
kubectl --namespace ${NAMESPACE} delete po "mysql-0" --grace-period=0 --force
# Populating data on another node
populate_test_data "mysql-1"
# Wait until all nodes are back
wait_ready 3
## Then
echo "Testing values"
for i in {0..2}; do
pod="mysql-$i"
cnt_actual=$(exec_sql "$pod" "SET SESSION wsrep_sync_wait = 1; SELECT count(*) from test.rnd_values;")
echo "Values count on '$pod': $cnt_actual"
if [ $cnt -ne $cnt_actual ]; then
fail "Values count on '$pod' expected '$cnt' but was '$cnt_actual'"
fi
done
pass
}
test_scale_recovered() {
## Given
kubectl --namespace ${NAMESPACE} scale statefulsets mysql --replicas=1 --timeout=$TIMEOUT
wait_ready 1
## When
populate_test_data "mysql-0"
## Then
kubectl --namespace ${NAMESPACE} scale statefulsets mysql --replicas=3 --timeout=$TIMEOUT
wait_ready 3
echo "Testing values"
for i in {0..2}; do
pod="mysql-$i"
cnt_actual=$(exec_sql "$pod" "SET SESSION wsrep_sync_wait = 1; SELECT count(*) from test.rnd_values;")
echo "Values count on '$pod': $cnt_actual"
if [ $cnt -ne $cnt_actual ]; then
fail "Values count on '$pod' expected '$cnt' but was '$cnt_actual'"
fi
done
pass
}
# --------------------------------------
# MAIN
# --------------------------------------
all_tests=$(sed -nE 's/^(test_[a-zA-Z0-9_]+)[[:space:]]*[\(\{].*$/\1/p' $0)
run_tests() {
delete
wait_deleted
create
echo "Running tests..."
for testname in "$@"; do
if ! [ ${testname:0:5} = "test_" ]; then
echo "Invalid test name: $testname"
exit 1
fi
before $testname
eval $testname
after $testname
done
delete
echo "Done."
}
case "$1" in
wait_ready)
wait_ready "${@:2}"
;;
create)
create
;;
start)
start
;;
stop)
stop
;;
delete)
delete
;;
exec_sql)
exec_sql "${@:2}"
;;
test_*)
run_tests ${@}
;;
"")
run_tests ${all_tests}
;;
*)
echo "Usage: $0 <tests...>"
echo
echo "Tests:"
printf '\t%s\n' ${all_tests}
;;
esac
exit 0
| true |
b89825e832157c17a1fb7ad0ba2b0a06826268aa
|
Shell
|
5hir0kur0/dotfiles
|
/scripts/.local/bin/rofiinteractive.sh
|
UTF-8
| 1,540 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/bash
set -euo pipefail
PROG=$1
TMPD=/tmp/.rofi_interactive_tmp
exec 2>&1
[ -d "$TMPD" ] || mkdir "$TMPD"
OUTPUT=$TMPD/$PROG/out
INPUT=$TMPD/$PROG/in
if [ ! -d "$TMPD/$PROG" ]; then
mkdir "$TMPD/$PROG"
touch "$TMPD/$PROG/in"
# please, someone forgive me for this...
(tail -f --pid=$$ "$INPUT" | script --quiet --return --command "$PROG 1>> $OUTPUT 2>&1" "$TMPD/script") &
PID=$!
echo "$PID" > "$TMPD/$PROG/pid"
echo "STARTED: $PID"
fi
cd "$TMPD/$PROG" || rofi -e "<span color='red'><b>This won't ever happen</b></span>" -markup
# PID=$(head ./pid)
# STDIN=/proc/$PID/fd/0
ARGS=
FIRST=1
PS1=
PS2=
export PS1 PS2
while true; do
touch $OUTPUT # or alternatively truncate -s 0 $OUTPUT (if the output should be truncated then in the `script` line 1>> needs to be changed to 1>
if [ "$FIRST" -ne 1 ]; then
echo "$ARGS" >> $OUTPUT
echo "$ARGS" >> $INPUT
sleep 0.042 # this value is completely arbitrary...
else
FIRST=0
fi
ROWS=$(wc -l $OUTPUT | cut -f1 -d\ )
# truncating for some reason causes lots of NUL bytes...
ARGS=$(sed 's/\x00//g' < $OUTPUT | rofi -selected-row "$((ROWS-1))" -dmenu -p "$PROG" \
-kb-accept-custom Return -kb-accept-entry Control+Return) || break
done
if [ -d "$TMPD/$PROG" ]; then
echo killing "$PROG"
cd "$TMPD/$PROG" || rofi -e "<span color='red'><b>This won't ever happen</b></span>" -markup
kill %1 || (PID=$(head ./pid); kill -9 "$PID")
cd /tmp || exit 1
rm -r "${TMPD:-?}/$PROG"
fi
| true |
e6e1f93f045de12acc8024550ca98a061b6c515a
|
Shell
|
chrisjbillington/chrisjbillington.github.io
|
/timers/update-vax/update-vax.sh
|
UTF-8
| 684 | 3.703125 | 4 |
[] |
no_license
|
#! /bin/bash
set -euxo
# Source our secrets, clone a temporary copy of the repo and cd to it. This also gets
# the LOCKFILE variable for locking access to the main repo.
source "$(dirname "$BASH_SOURCE")/../common.sh"
# Wait for vax data to become available:
if python wait-for-vax-update.py | grep "ready!"; then
./vax.sh
fi
# Commit and push
git commit --all -m "Vax update"
# pull first to decrease the chances of a collision. Lockfile ensures this isn't racey
# with respect to the other automation jobs running on this computer, but if we're
# unluckly it could still collide with other pushes to remote.
flock "${LOCKFILE}" -c "git pull --rebase --autostash; git push"
| true |
a4a9f713b7f2919a38c20f818f76e93397dbf5e7
|
Shell
|
hansrune/domoticz-contrib
|
/utils/pingtests
|
UTF-8
| 960 | 3.546875 | 4 |
[] |
no_license
|
#!/bin/bash
#
# $Id$
#
PROG=$( basename $0 )
LOCALIP=$( netstat -rn | awk '/^0\./ { print $2 }' )
LOCALIP=${LOCALIP:-"192.168.1.1"}
DBGPRINT=${DBGPRINT:-":"}
DBGPRINT=${DBGPRINT:-"logger -t ${PROG} -p user.debug"}
LOGINF=${LOGINF:-"logger -t ${PROG} -p user.info"}
LOGERR=${LOGERR:-"logger -t ${PROG} -p user.error"}
#
function pingip {
local loss=$( ping -i 0.5 -w 3 -qc 3 "$1" 2>&1 | sed -ne 's/.*, \([0-9][0-9]*\)% packet loss.*/\1/p' )
${DBGPRINT} "Ping to $1 has $loss % loss" >&2
[[ "$loss" = 100 ]] && return 1
return 0
}
if [ -z "$1" ]
then
echo "Usage: $PROG addresses..." >&2
exit 1
fi
for I in $*
do
if pingip ${I}
then
${DBGPRINT} "Can ping ${I}" >&2
exit 0
fi
done
if pingip ${LOCALIP}
then
${DBGPRINT} "Can't ping $* and ping ${LOCALIP} indicates local link ok" >&2
RC=1
else
${LOGERR} "Can't ping ${TESTIP} nor ${LOCALIP}. This is unexpected" >&2
RC=0
fi
exit $RC
# :tabSize=4:indentSize=4:noTabs=true
# vim:ts=4:sw=4
| true |
1ef9f800d2c83312dac8986772d5771be2fd044e
|
Shell
|
songweizhi/Katana_cmds
|
/PacBio_cmds/PacBio_P_inhibens.sh
|
UTF-8
| 1,876 | 2.9375 | 3 |
[] |
no_license
|
# Working directory
cd /srv/scratch/z5039045/PacBio/Falcon/BS107_2.10
# Copy 210WT.samphased.fasta and 210WT.samphased.sam to working directory
cp /srv/scratch/z3452659/ThomasPacBio-Nov15/analysis/2017-07-09.SAMPhasedBLASR/210WT.samphased.fasta .
cp /srv/scratch/z3452659/ThomasPacBio-Nov15/analysis/2017-07-09.SAMPhasedBLASR/210WT.samphased.sam .
# Get bam and bai file for visualization with Tablet
module load samtools/1.2
samtools view -bS 210WT.samphased.sam -o 210WT.samphased.bam
samtools sort 210WT.samphased.bam 210WT.samphased_sorted.bam
samtools index 210WT.samphased_sorted.bam
# The genome assignment of contigs in file 210WT.samphased.fasta
Ctg_ID Assignment Purity(%) Length(bp)
ctg210WT 2.10 99.88 3758221
ctg210WTp1 2.10 99.76 237764
ctg210WTp3 2.10 97.25 70382
ctg210WTN2 BS107 99.96 1234841
ctg210WTN4 BS107 99.98 1150623
ctg210WTN1 BS107 99.88 794630
ctg210WTN3 BS107 100.0 456367
ctg210WTp1N1 BS107 100.0 117649
ctg210WTp1N2 BS107 100.0 95429
ctg210WTp3N BS107 99.83 68114
ctg210WTp2 Ambiguous 46.07(from 2.10) 94479
ctg210WTp2N Ambiguous 57.2(from 2.10) 94471
# Get reads not mapped to 2.10 contigs (ctg210WT, ctg210WTp1 and ctg210WTp3)
module load python/3.5.2
python3 get_reads_from_sam.py -sam 210WT.samphased.sam -ctg ctgs_assigned_to_2.10.txt -option 0 -out reads_not_mapped_to_2.10_ctgs.fasta
# Get reads mapped to 2.10 contigs (ctg210WT, ctg210WTp1 and ctg210WTp3)
python3 get_reads_from_sam.py -sam 210WT.samphased.sam -ctg ctgs_assigned_to_2.10.txt -option 1 -out reads_mapped_to_2.10_ctgs.fasta
# get_reads_from_sam.py help information
usage: get_reads_from_sam.py [-h] -sam SAM -ctgs CTGS -option OPTION -out OUT
arguments:
-sam Input sam file
-ctgs Contig list
-option '0' to export unmapped reads, '1' to export mapped reads
-out Output fasta file
| true |
394433682b5bc1403d60dac9f21a28de73508ad8
|
Shell
|
Zeitvertreib/dotfiles
|
/shell/concepts/virtual_confs.sh
|
UTF-8
| 869 | 3.71875 | 4 |
[] |
no_license
|
#!/bin/bash
# create a virtualenv currtent directory and install the following
# django for python 3 , ipython
# not well tested
set -e # abort on errors
echo arg 1 = :$1: arg 2 = :$2: arg 3 = :$3:
# varfuervenv=$1
# function doit {
# shopt -s dotglob
pip_version="pip-3.2"
# echo arg 1 = :$1: arg 2 = :$2: arg 3 = :$3:
#
if [ $# -eq 1 ]
then
virtualenv -p /usr/bin/python3.2 $1
cd $1
source bin/activate
$pip_version install https://www.djangoproject.com/download/1.7c3/tarball/
$pip_version install bpython
deactivate
else
echo "parameteranzahl nicht korrekt (einer für das verzeichnis verlangt)"
fi
# }
# doit
# if [[ ( -h $dest && $(readlink -n "$dest") != $source ) || -f $dest || -d $dest ]]
# then
# read -p "Overwrite $dest? " answer
# else
# answer=y
# fi
# [[ $answer == y ]] && ln -s -n -f -v -- "$source" "$dest"
| true |
1c8c7e7198a3dcbb0cdbd03778826437b64e7489
|
Shell
|
nickkaczmarek/dotfiles
|
/zshrc
|
UTF-8
| 6,219 | 3.140625 | 3 |
[] |
no_license
|
# clear the pipes (revert to defaults)
emulate -LR zsh
source ~/.git-prompt.sh
setopt PROMPT_SUBST
# homebrew only needs to have this done if we're not on intel architecture
arch=$(/usr/bin/arch)
if [[ "$arch" -eq "arm64" ]]; then
if [[ $(command -v brew) ]]; then
else
eval $(/opt/homebrew/bin/brew shellenv)
fi
fi
test -e "${HOME}/.iterm2_shell_integration.zsh" && source "${HOME}/.iterm2_shell_integration.zsh"
#tab completion
setopt GLOB_COMPLETE
# type in a dir name and enter or ..
setopt AUTO_CD
HISTFILE=${ZDOTDIR:-$HOME}/.zsh_history
# timestamp in unix epoch time and elapsed time of the command
setopt EXTENDED_HISTORY
SAVEHIST=5000
HISTSIZE=2000
# share history across multiple zsh sessions
setopt SHARE_HISTORY
# append to history
setopt APPEND_HISTORY
# adds commands as they are typed, not at shell exit
setopt INC_APPEND_HISTORY
alias c="clear"
alias sz="exec zsh"
alias grep="grep --color=auto"
alias path='echo -e ${PATH//:/\\n} | sort'
alias mypath='echo -e ${MYPATH//:/\\n} | sort'
alias funcs="functions"
alias fnames="funcs + | fgrep -v iterm"
alias shit="emulate -LR zsh"
alias pip=pip3
alias kick-ssh-agent="killall ssh-agent; eval `ssh-agent`"
alias de="cd ~/Developer"
alias dec="cd ~/Library/Mobile\ Documents/com\~apple\~CloudDocs/Developer"
alias work="cd ~/work"
alias bbdot="bbedit $DOTFILES"
# shamelessly stolen from tyler-keith-thompson
alias ls="exa"
alias l="ls -albhF --icons --git --no-permissions --color=always"
alias cat='bat --theme=Dracula'
alias xcquit="killall Xcode"
alias bbexport="defaults export com.barebones.bbedit ~/Desktop/MyBBEditPreferences.plist"
alias bbimport="defaults import com.barebones.bbedit ~/Desktop/MyBBEditPreferences.plist"
alias dotfiles="cd $DOTFILES"
alias zil="cd work/ZillowMap"
function xcopen() {
# use xcode-select to return whatever Xcode command line tools are defaulted
local xcode_version="$(xcode-select -p | rg '(.+Xcode.+\.app)' -or '$1')"
while test $# -gt 0; do
case "$1" in
-h|--help)
echo "xcopen - open Xcode"
echo " "
echo "xcopen [options] [directory]"
echo " "
echo "options:"
echo "-h, --help show brief help"
echo "--beta open latest Xcode beta"
echo " "
echo "directory:"
echo "Opens in current directory or you can supply one"
return 0
;;
--beta)
xcode_version="/Applications/$(ls -a /Applications | rg Xcode.+Beta | tail -1)"
shift
break
;;
*)
break
;;
esac
done
open -a $xcode_version ${1:-"."} -F
}
function co-authors() {
local ME=`git config --global user.initials`
# Set Initials here
local -A initialsMap
while IFS== read -r key value; do
initialsMap[$key]=$value
done < "${HOME}/.gitpairs"
# Parse parameters
local parsed=("${(@s/-/)${*}}")
local newline=$'\n'
# NEED TO EXIT IF NO INITIALS
if [ ${#parsed[@]} -eq 1 ]; then
echo "${RED}No initials found." 1>&2;
return 1
fi
local initialsList=("${(@s/ /)parsed[-1]}")
initialsList=(${(L)initialsList})
if [ ${#initialsList[@]} -eq 0 ]; then
echo "${RED}No initials found." 1>&2;
return 1
fi
initialsList=("${(@)initialsList:#$ME}")
coAuthors=""
[ ${#initialsList[@]} -eq 0 ] && return 0;
coAuthors="${newline}"
for initial in $initialsList ; do
if [[ ! -z "${initialsMap[${(L)initial}]}" ]];
then
coAuthors="${coAuthors}${newline}${initialsMap[${(L)initial}]}"
else
echo "${RED}Unknown initials: $initial" 1>&2;
return 1
fi
done;
}
function wip() {
co-authors ${*} || return 1;
git commit -S \
-m "${*}" \
-m "[skip ci] [`basename $(git symbolic-ref -q --short HEAD)`]" \
-m "${coAuthors}"
}
function commit() {
co-authors ${*} || return 1;
git commit -S \
-m "${*}" \
-m "[`basename $(git symbolic-ref -q --short HEAD)`]" \
-m "${coAuthors}"
}
# end tt
function startdemo() {
osascript <<END
tell application "System Events"
set autohide menu bar of dock preferences to true
set dockhidestate to autohide of dock preferences
tell dock preferences to set autohide to true
do shell script "defaults write com.apple.finder CreateDesktop -bool false && killall Finder"
end tell
END
}
function enddemo() {
osascript <<END
tell application "System Events"
set autohide menu bar of dock preferences to false
set dockhidestate to autohide of dock preferences
tell dock preferences to set autohide to false
do shell script "defaults write com.apple.finder CreateDesktop -bool true && killall Finder"
end tell
END
}
if type brew &>/dev/null
then
FPATH="$(brew --prefix)/share/zsh/site-functions:${FPATH}"
## asdf
source /opt/homebrew/opt/asdf/libexec/asdf.sh
autoload -Uz compinit
compinit
fi
setopt PROMPT_SUBST
# allows git autocompletion
# autoload -Uz compinit && compinit
GIT_PS1_SHOWUPSTREAM="verbose"
GIT_PS1_SHOWDIRTYSTATE="auto"
GIT_PS1_SHOWSTASHSTATE="auto"
GIT_PS1_SHOWUNTRACKEDFILES="auto"
GIT_PS1_SHOWCOLORHINTS="auto"
GIT_PS1_DESCRIBE_STYLE="branch"
PROMPT='%(?.%B%F{010}√.%B%F{009}?%?%f) %F{014}%1~%f%F{013}$(__git_ps1)%f %F{011}%(!.||>.|>)%f%b '
RPROMPT='%B%F{012}%*%f%b'
# Search up and down through history
autoload -U up-line-or-beginning-search
autoload -U down-line-or-beginning-search
zle -N up-line-or-beginning-search
zle -N down-line-or-beginning-search
bindkey "^[[A" up-line-or-beginning-search # Up
bindkey "^[[B" down-line-or-beginning-search # Down
if [ -f "${HOME}/.gpg-agent-info" ]; then
. "${HOME}/.gpg-agent-info"
export GPG_AGENT_INFO
export SSH_AUTH_SOCK
export SSH_AGENT_PID
fi
export GPG_TTY=$(tty)
gpgconf --launch gpg-agent
DISABLE_AUTO_TITLE="true"
if [ $ITERM_SESSION_ID ]; then
precmd() {
echo -ne "\033]0;${PWD##*/}\007"
}
fi
. /opt/homebrew/opt/asdf/libexec/asdf.sh
typeset -U PATH # removes duplicate path variables in zsh
autoload -U +X bashcompinit && bashcompinit
complete -o nospace -C $HOME/.asdf/installs/terraform/1.3.7/bin/terraform terraform
| true |
6488bff04fafbeef299375118d9846c506e30042
|
Shell
|
alibaba/GraphScope
|
/python/graphscope/gsctl/scripts/initialize.sh
|
UTF-8
| 735 | 3.3125 | 3 |
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"FSFAP",
"BSD-3-Clause-Clear",
"GPL-1.0-or-later",
"BSD-2-Clause-Views",
"Bitstream-Vera",
"MPL-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"OFL-1.1",
"BSD-3-Clause",
"APAFML",
"0BSD",
"LicenseRef-scancode-free-unknown",
"CC-BY-4.0",
"GPL-3.0-or-later",
"LicenseRef-scancode-python-cwi",
"Zlib",
"Qhull",
"u-boot-exception-2.0",
"MIT",
"ISC",
"GPL-2.0-only",
"LicenseRef-scancode-other-copyleft",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"CC0-1.0",
"GPL-2.0-or-later",
"BSD-2-Clause",
"GCC-exception-3.1",
"ZPL-1.1",
"CC-BY-SA-4.0",
"GPL-3.0-only",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"X11",
"TCL"
] |
permissive
|
script_dir="$(cd "$(dirname "$0")" && pwd)"
source ${script_dir}/lib/get_os_version.sh
source ${script_dir}/lib/log.sh
source ${script_dir}/lib/colors.sh
source ${script_dir}/lib/install_thirdparty_dependencies.sh
source ${script_dir}/lib/install_vineyard.sh
source ${script_dir}/lib/util.sh
## Code here runs inside the initialize() function
## Use it for anything that you need to run before any other function, like
## setting environment variables:
## CONFIG_FILE=settings.ini
##
## Feel free to empty (but not delete) this file.
bash_source_dir="$(dirname -- "$(readlink -f "${BASH_SOURCE}")")"
if [ -f "$HOME/.graphscope_env" ]; then
source $HOME/.graphscope_env
fi
log "Read the env: GRAPHSCOPE_HOME=${GRAPHSCOPE_HOME:-}"
| true |
57b8c0c4e5351415aa3bfbdcd7fefba4a9aef2e8
|
Shell
|
mphe/GameLib
|
/checkentities.sh
|
UTF-8
| 180 | 2.984375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
cd $(dirname $(readlink -f "$0"))
DIR="${1:-./assets/entities}"
FAIL=0
for i in "$DIR/"*; do
./build/bin/checkentcfg "$i" > /dev/null || FAIL=1
done
exit $FAIL
| true |
16074a8459929d57e99e40fe3496cf83f7b717ac
|
Shell
|
hsulab/DailyScripts
|
/server/thomas/vasp_BM.sh
|
UTF-8
| 1,750 | 3.359375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash -l
# 1. Force bash as the executing shell.
#$ -S /bin/bash
# 2. Request time of wallclocl time (format hours:minutes:seconds).
#$ -l h_rt=3:00:00
# 3. Request 1 gigabyte of RAM per process (nust be an integer).
#$ -l mem=1G
# 4. Request 10 gigabyte of TMPDIR space per node (default is 10 GB).
#$ -l tmpfs=10G
# 5. Set the name of the job.
#$ -N CdS_cell_PBE
# 6. Select the MPI parallel environment and number of processes.
#$ -pe mpi 96
# 7. Set the working directory.
#$ -cwd
#add to QUB_C
#$ -P Gold
#$ -A QUB_chem
# 8. Run the MPI job, gerun is a wrapper on Thomas.
echo `date "+%Y-%m-%d %H:%M:%S"` `pwd` >> $HOME/submitted
dimension=3 # optimize 2D (x,y) or 3D (x,y,z)
echo "${dimension}D Lattice Optimization using BM Equation" > print-out
if [ -d CONTCARs ]
then
rm -r CONTCARs
else
mkdir CONTCARs
echo "${dimension}D Lattice Optimization using BM Equation" > fitting_data
fi
cp POSCAR POSCAR_origin
# BM optimization
for s in `seq 0.9 0.01 1.1`
do
# change lattice in POSCAR
scale=$(printf "%2.12f\n" $s)
awk '{if(NR>=3 && NR<='$((dimension + 2))') {{for(i=1;i<=3;i++) printf "%14.8f",$i*'$scale'} {printf "\n"}} else print $0}' POSCAR_origin > POSCAR
echo "Ions Optimization with Scaled $scale Lattice" >> print-out
gerun $VASPPATH 2>&1 >> print-out
# get energy and back up
energy=$(grep "sigma->" OUTCAR | tail -n 1)
energy=${energy##*=}
#suffix=$((s * 100)) # Warning! Change original variable.
suffix=$s # Warning! Change original variable.
cp CONTCAR CONTCARs/CONTCAR_$suffix
printf "%12f %12f\n" $scale $energy >> fitting_data
done
cp POSCAR_origin POSCAR
BM_FIT.py >> print-out
echo `date "+%Y-%m-%d %H:%M:%S"` `pwd` >> $HOME/finished
| true |
b6d92056e68fc1da71962652bb97304416808c64
|
Shell
|
riboseinc/fuzzbsd
|
/fuzzbsd.sh
|
UTF-8
| 6,691 | 4 | 4 |
[] |
no_license
|
#!/bin/sh
#
# fuzzbsd.sh v1.1
#
# FuzzBSD, a filesystem image fuzzing script to test BSD kernels.
#
# A valid filesystem image gets corrupted by changing a the hex value of a
# single offset 255 times (0x00 - 0xff). FuzzBSD will then loop through each of
# the 255 images, configures a vnode/memory disk and then attempts a mount.
# If the image fails to mount then the disk will be unconfigured and the next
# image will be tried. If the kernel panics you can check the logfile which
# offset and which hex value caused the panic.
#
# 'stringinject' from the retrace project (https://github.com/riboseinc/retrace)
# is used to corrupt the filesystem images.
#
# The following BSDs are supported:
# - NetBSD 7.1
# - OpenBSD 6.1
# - FreeBSD 11
# - DragonFlyBSD 4.8
# - Darwin 16.7.0 / macOS Sierra (COMING)
# - FreeNAS (COMING)
# - MirOS (COMING)
#
# The following filesystem image types are currently supported:
# - msdosfs
# - NTFS
# - ISO 9660
# - ext2fs
# - UDF
# - ZFS (COMING)
#
# The filesystem images used have been pre-made on CentOS 7 with the
# 'gen-fs-images/gen-fs-images.sh' script. This script allows you to recreate
# the filesystem images yourself if you don't trust the ones I have made. Not
# every BSD supports each filesystem out of the box, so it really depends on
# which flavor of BSD you are running FuzzBSD on.
#
# 255 iterations of a filesystem image are made per run.
readonly __progname="$(basename $0)"
readonly log="log.txt"
readonly header="2048"
readonly mount="fuzzbsd"
usage() {
echo "usage: ${__progname} <type [msdos|ntfs|ext2fs|cd9660|udf]> <fs image> [offset]" >&2
}
confdisk_fbsd() {
local conf_dev="$1"
local conf_fs_image="$2"
mdconfig -a -t vnode -u "${conf_dev}" -f "${conf_fs_image}" >/dev/null 2>&1 && \
echo "/dev/${conf_dev}"
}
confdisk_onbsd() {
local conf_dev="$1"
local conf_fs_image="$2"
vnconfig -v "${conf_dev}" "${conf_fs_image}" >/dev/null 2>&1 && \
echo "/dev/${conf_dev}"
}
confdisk_dfbsd() {
local conf_dev="$1"
local conf_fs_image="$2"
vnconfig -v "${conf_dev}" "${conf_fs_image}" >/dev/null 2>&1 && \
echo "/dev/${conf_dev}"
}
unmount_mountpoint() {
local mnt="$1"
local num_mount=$(mount | grep ${mnt} | wc -l | tr -d ' ')
if [ "${num_mount}" ]; then
i=0
while [ "${i}" -lt "${num_mount}" ]; do
umount -f "${mnt}" >/dev/null 2>&1
i="$(expr ${i} + 1)"
done
fi
}
get_free_md_disk() {
local md_dev="md"
local md_max="256"
local i=0
# use md0 if there are no memory disks in use
mdconfig -l -v | grep -q "${md_dev}"
if [ $? -ne 0 ]; then
echo "${md_dev}${i}"
return
fi
while [ "${i}" -lt "${md_max}" ]; do
mdconfig -l -v | grep -wq "^${md_dev}${i}"
if [ $? -ne 0 ]; then
echo "${md_dev}${i}"
break
fi
i="$(expr ${i} + 1)"
done
echo "no memory disks available" >&2
}
get_free_vn_disk() {
vnconfig -l | grep -q "not in use" || \
echo "no vnode disks available" >&2
vnconfig -l | awk '/not in use/ { print $1 }' | cut -d: -f1 | head -1
}
unconfigure_all_md_disks() {
for unconf_md in $(mdconfig -l); do
mdconfig -d -u "${unconf_md}" -o force >/dev/null 2>&1
done
}
unconfigure_all_vn_disks() {
for unconf_vn in $(vnconfig -l | grep -v 'not in use' | cut -d: -f 1); do
vnconfig -u "${unconf_vn}" >/dev/null 2>&1
done
}
# shared
vn=0
mntdir="/mnt"
if [ "$(id -u)" -ne 0 ]; then
echo "need root to mount" >&2
exit 1
fi
readonly stringinject="stringinject"
which "${stringinject}" >/dev/null 2>&1
if [ $? -ne 0 ]; then
echo "cannot execute '${stringinject}', hint: compile 'stringinject.c'" >&2
exit 1
fi
if [ ! "$2" ]; then
usage
exit 1
fi
fuzztype="$1"
case "${fuzztype}" in
ntfs)
;;
msdos)
;;
ext2fs)
;;
cd9660)
;;
udf)
;;
*)
usage
exit 1
;;
esac
# flavor specific exceptions
readonly uname="$(uname)"
case "${uname}" in
OpenBSD)
part="c"
confdisk="confdisk_onbsd"
;;
NetBSD)
part="a"
confdisk="confdisk_onbsd"
;;
FreeBSD)
echo "${fuzztype}" | grep -q ^ntfs
if [ $? -eq 0 ]; then
echo "${fuzztype} fuzzing is not supported on ${uname}"
exit 1
fi
confdisk="confdisk_fbsd"
vn=1
;;
DragonFly)
part=""
confdisk="confdisk_dfbsd"
;;
Darwin)
mntdir="/Volumes"
;;
*)
echo "unsupported operating system"
exit 1
;;
esac
image="$2"
if [ ! -f "${image}" ]; then
echo "cannot find image '${image}'" >&2
exit 1
fi
offset=0
if [ "$3" ]; then
offset="$3"
check_offset="$(echo ${offset} | tr -d '[0-9]')"
if [ ! -z "${check_offset}" ]; then
echo "invalid offset" >&2
exit 1
fi
fi
if [ "${offset}" -gt "${header}" ]; then
echo "offset cannot exceed header size (${header})" >&2
exit 1
fi
readonly mountpoint="${mntdir}/${mount}"
if [ ! -d "${mountpoint}" ]; then
mkdir "${mountpoint}"
if [ $? -ne 0 ]; then
echo "mkdir ${mountpoint} failed" >&2
exit 1
fi
fi
echo "[*] BSD: '${uname}'"
echo "[*] image source: '${image}'"
echo "[*] mountpoint: '${mountpoint}'"
echo "[*] offset: '${offset}'"
echo "[*] logfile: '${log}'"
while [ "${offset}" -lt "${header}" ]; do
echo "[*] cleaning up previously used images and logfile"
rm -f image-fuzzbsd* "${log}"
echo "[*] running '${stringinject}' on '${image}'" | tee -a "${log}"
"${stringinject}" "${image}" "image-fuzzbsd-${fuzztype}-${offset}" -h "${offset}"
if [ $? -ne 0 ]; then
echo "'${stringinject}' failed" >&2
exit 1
fi
for fs_image in $(ls image-fuzzbsd*); do
# free up the mountpoint
unmount_mountpoint "${mountpoint}"
# free up all the memory disks and use the first available one
if [ "${vn}" -eq 0 ]; then
unconfigure_all_vn_disks
dev="$(get_free_vn_disk)"
else
unconfigure_all_md_disks
dev="$(get_free_md_disk)"
fi
if [ -z "${dev}" ]; then
exit 1
fi
echo "[*] $(date) confdisk '${dev}' '${fs_image}'" | tee -a "${log}"
dev_path="$(${confdisk} ${dev} ${fs_image})"
if [ -z "${dev_path}" ]; then
# something went wrong so we can skip this image
echo "confdisk failed" >&2
continue
fi
# sync before a (potential) crash/panic
sync
echo "[*] $(date) mount '${fs_image}' '${dev_path}'" | tee -a "${log}"
# this might cause a panic
if [ "${vn}" -eq 0 ]; then
mount -t "${fuzztype}" "${dev_path}${part}" "${mountpoint}"
else
mount -t "${fuzztype}" "${dev_path}" "${mountpoint}"
fi
# this might cause a panic too
echo "[*] $(date) umount '${mountpoint}'" | tee -a "${log}"
# if the mount was succesful then proceed to umount
unmount_mountpoint "${mountpoint}"
if [ "${vn}" -eq 0 ]; then
unconfigure_all_vn_disks
else
unconfigure_all_md_disks
fi
done
offset="$(expr ${offset} + 1)"
done
echo "[*] cleaning up images and logfile because no crash occurred"
rm -f image-fuzzbsd* "${log}"
exit 0
| true |
f133343d632007506e862c283fa3352a861269be
|
Shell
|
rfbarraza/dotfiles
|
/.config/yadm/bootstrap.d/scripts/lib/file.sh
|
UTF-8
| 8,034 | 4.21875 | 4 |
[] |
no_license
|
##############################################################################
# #
# ░█▀▀░▀█▀░█░░░█▀▀░░░░█▀▀░█░█ #
# ░█▀▀░░█░░█░░░█▀▀░░░░▀▀█░█▀█ #
# ░▀░░░▀▀▀░▀▀▀░▀▀▀░▀░░▀▀▀░▀░▀ #
# #
# #
# Functions and constants for testing conditions about the filesystem. #
# #
##############################################################################
# ---
# TOC
# ---
#
# ## Utilities
# ## Files
# ## Directories
#
if [[ -z "$__DOT_FILE__" ]]; then
readonly __DOT_FILE__="__DOT_FILE__"
source "$DOT_SCRIPT_DIR/lib/include.sh"
source "$DOT_BOOL_SH"
source "$DOT_OUTPUT_SH"
## Utilities
#######################################
# Prints "device ID:inode" of file for *nix systems
#
# Arguments:
# Path to file
# Outputs:
# "device ID:inode"
dot_file_link_stat() {
local os="$(uname)"
if [[ "$os" == "Darwin" ]]; then
stat -L -f %d:%i "$1"
else
stat -L -c %d:%i "$1"
fi
}
#######################################
# Backsup a file to the next open backup file, N, with ~N for N > 1
#
# Arguments:
# Path to file to back up
# Backup directory (default is the directory of the file being backed up)
# Outputs:
# Path to backup file
#######################################
dot_file_backup() {
local readonly file="$1"
local dest="$(dirname "$file")"
if [[ $# -gt 1 ]]; then
dest="$2"
fi
local readonly file_name="$(basename "$file")"
local readonly last_file="$(basename "$(find "$dest" -maxdepth 1 -name \
"$file_name~*" | sort | tail -1)")"
local next_backup_name=""
if [[ ! -z "$last_file" ]]; then
local readonly last_number="$(echo "$last_file" | grep -o "[0-9]\+$")"
if [[ ! -z "$last_number" ]]; then
local readonly next_number=$((last_number+1))
local readonly last_file_sans_number="$(echo "$last_file" | \
sed -r 's/[0-9]+$//')"
next_backup_name="${last_file_sans_number}${next_number}"
else
next_backup_name="${last_file}2"
fi
else
next_backup_name="${file_name}~"
fi
local next_backup="${dest}/${next_backup_name}"
mv "$file" "$next_backup"
echo "$next_backup"
}
#######################################
# Restores a file from the given backup
#
# Arguments:
# Path to file to back up
# Backup directory (default is the directory of the file being backed up)
# Returns:
# 0 if successful, 1 otherwise
######################################
dot_file_restore_backup() {
local readonly dest="$1"
local readonly src="$2"
if [[ -d "$dest" ]]; then
rm -Rf "$dest"
mv "$src" "$dest"
elif [[ -f "$dest" ]]; then
rm "$dest"
mv "$src" "$dest"
else
return 1
fi
return 0
}
## Files
#######################################
# Returns 0 if file is symlinked to target
#
# Arguments:
# Path to file
# Target path
# Outputs:
# None
# Returns:
# 0 if file is symlinked to target
#######################################
dot_file_is_symlink() {
if [[ -f "$1" && -L "$1" ]]; then
local file_stat="$(dot_file_link_stat "$1")"
local target_stat="$(dot_file_link_stat "$2")"
[[ "$file_stat" == "$target_stat" ]]
else
return 1
fi
}
#######################################
# Checks if a file is symlinked to a target and returns one of several
# results
#
# Arguments:
# Path to file
# Link target
# Returns:
# 0 if file exists and links to target
# 1 if file exists but does not link to target
# 2 if file is a directory
# 3 if file does not exist
#######################################
dot_file_check_symlink() {
if [[ -f "$1" ]]; then
dot_file_is_symlink "$1" "$2"
elif [[ -d "$1" ]]; then
return 2
else
return 3
fi
}
#######################################
# Installs symlink to target at the specified path
#
# Globals:
# DOT_IS_DRYRUN
# Arguments:
# Target
# Path to symlink
# Outputs:
# Installation status for dot_dryruns and failures
#######################################
dot_file_install_symlink() {
if [[ $DOT_IS_DRYRUN == $DOT_TRUE ]]; then
dot_dryrun ln -s "$1" "$2"
else
ln -s "$1" "$2"
fi
}
#######################################
# Ensures that a symlink to the dotfiles file path is installed or outputs the
# appropriate status message
#
# Arguments:
# Path to fie
# Link target
# Is the file a directory (default DOT_FALSE)
# Outputs:
# Status messages including failures
#######################################
dot_file_ensure_symlink_installed() {
local readonly symlink="$1"
local readonly target="$2"
local isDirectory="$DOT_FALSE"
if [[ $# -gt 2 ]]; then
isDirectory="$3"
fi
if [[ "$isDirectory" == "$DOT_TRUE" ]]; then
dir_check_symlink "$symlink" "$target"
else
dot_file_check_symlink "$symlink" "$target"
fi
case $? in
0)
dot_puts_info "$symlink is already linked to dotfiles installation."
;;
1)
if [[ $DOT_IS_FORCE == $DOT_TRUE ]]; then
local readonly backup="$(dot_file_backup "$symlink")"
local readonly backup_result="$?"
if [[ $? ]]; then
dot_puts_info "Backing up $symlink to $backup."
dot_puts "Linking $target via $symlink."
dot_file_install_symlink "$target" "$symlink"
if [[ ! $? ]]; then
dot_warn "Link failed."
fi
else
dot_warn "$symlink is not a part of dotfiles installation."
dot_warn "Could not backup $symlink."
dot_warn "$symlink could NOT be updated."
fi
else
dot_warn "$symlink is pointing outside of dotfiles installation."
fi
;;
2)
if [[ "$isDirectory" == "$DOT_TRUE" ]]; then
dot_warn "$symlink is a file when a directory was expected."
else
dot_warn "$symlink is a directory when a file was expected."
fi
;;
3)
dot_puts "Linking $target via $symlink."
dot_file_install_symlink "$target" "$symlink"
if [[ ! $? ]]; then
dot_warn "Link failed."
fi
;;
*)
dot_warn "Symbolic link check returned an unexpected value: $?"
;;
esac
}
## Directories
#######################################
# Returns 0 if directory is symlinked to target
#
# Arguments:
# Path to directory
# Target
# Returns:
# 0 if directory is symlinked to target
#######################################
dir_is_symlink() {
if [[ -d "$1" && -L "$1" ]]; then
local dir_stat="$(dot_file_link_stat "$1")"
local target_stat="$(dot_file_link_stat "$2")"
[[ "$dir_stat" == "$target_stat" ]]
else
return 1
fi
}
#######################################
# Checks if a directory is symlinked to a target and returns one of several
#
# Arguments:
# Path to directory
# Link target
# Returns:
# 0 if directory exists and links to target
# 1 if directory exists but does not link to target
# 2 if directory is a file
# 3 if directory does not exist
#######################################
dir_check_symlink() {
if [[ -d "$1" ]]; then
dir_is_symlink "$1" "$2"
elif [[ -f "$1" ]]; then
return 2
else
return 3
fi
}
#######################################
# Ensures that a symlink to the dotfiles directory path is installed or
# outputs the appropriate status message
#
# Arguments:
# Path to directory
# Link target
# Outputs:
# Status messages including failures
#######################################
dot_dir_ensure_symlink_installed() {
dot_file_ensure_symlink_installed "$1" "$2" "$DOT_TRUE"
}
fi
| true |
3c32ccb434beb17fdc05a9f1aa05097426442322
|
Shell
|
DannyBen/rush-repo
|
/speedtest/main
|
UTF-8
| 396 | 3.078125 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
source "$REPO_PATH/lib.sh"
install_function() {
pushtmp
# Version can be found here: https://www.speedtest.net/apps/cli
version=1.2.0
url="https://install.speedtest.net/app/cli/ookla-speedtest-${version}-linux-x86_64.tgz"
curl -L "$url" | tar xvz "speedtest"
sudo install -m 755 speedtest /usr/local/bin/
rm speedtest
popd
}
general_install_helper speedtest
| true |
72a05ebd49f48dedb6360bebf909b0f0ca2cecba
|
Shell
|
jprjr/htpasswd-auth-server
|
/bin/htpasswd-auth-server
|
UTF-8
| 2,443 | 3.828125 | 4 |
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -e
# handy functions {{{
function abspath_portable() {
# BSD readlink is different from GNU readlink, ugh.
[[ $1 = /* ]] && echo "$1" || echo "$PWD/${1#./}"
}
function is_integer?() {
[[ $@ =~ ^-?[0-9]+$ ]]
}
function abs_path() {
local abs_path=$(abspath_portable "$0")
printf "%s\n" "${abs_path}"
}
function script_abs_path() {
local abs_path=$(abs_path)
local script_abs_path=$(readlink "${abs_path}" || printf "%s\n" "${abs_path}")
printf "%s\n" "${script_abs_path}"
}
function script_dir() {
local abs_path=$(abs_path)
local script_dir=$(dirname "${abs_path}")
printf "%s\n" "${script_dir}"
}
function script_abs_dir() {
local abs_path=$(abs_path)
local script_abs_path=$(script_abs_path)
local script_abs_dir=$(cd "$(dirname "${script_abs_path}")" && pwd -P)
printf "%s\n" "${script_abs_dir}"
}
function script_name() {
local abs_path=$(abs_path)
local script_abs_dir=$(script_abs_dir)
local script_name="${abs_path#$script_abs_dir/}"
printf "%s\n" "${script_name}"
}
function script_abs_name() {
local script_abs_path=$(script_abs_path)
local script_abs_dir=$(script_abs_dir)
local script_abs_name="${script_abs_path#$script_abs_dir/}"
printf "%s\n" "${script_abs_name}"
}
# }}}
function usage() { # {{{
printf "Usage:\n %s [-h] [-l /path/to/lua]\n" "$(script_name)"
} # }}}
install_dir=$(dirname $(script_abs_dir))
while getopts "hl:" opt; do
case "$opt" in
l) lua_impl=$OPTARG ;;
h) usage ; exit ;;
\?) usage; exit ;;
:) printf "Option -%s requires in argument\n" "${OPTARG}" >&2 ; exit 1;;
esac
done
shift $((OPTIND-1))
if [ -z "${lua_impl}" ] ; then
if command -v lua >/dev/null 2>&1; then
lua_impl=lua
elif command -v luajit >/dev/null 2>&1; then
lua_impl=luajit
else
printf "Unable to find a suitable lua - please specify with -l /path/to/lua\n"
exit 1
fi
fi
if [ -e "${install_dir}/lua_modules" ] ; then
lua_major_ver=$(${lua_impl} -e "print(_VERSION:match('%d+%.%d+'))")
export LUA_PATH="${install_dir}/lua_modules/share/lua/${lua_major_ver}/?.lua;${install_dir}/lua_modules/share/lua/${lua_major_ver}/?/init.lua;./?.lua;"
export LUA_CPATH="${install_dir}/lua_modules/lib/lua/${lua_major_ver}/?.so;"
fi
cd "${install_dir}"
exec "${lua_impl}" "${install_dir}/bin/htpasswd-auth-server.lua" "$@"
| true |
f5c6b9a4b9eef54dc2798be92fdf862c4011f3fa
|
Shell
|
webmaster-zundux/browser-tests
|
/test-remotely.sh
|
UTF-8
| 958 | 2.9375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [[ $1 = '' ]]
then
# run in series due to saucelabs concurrency limits
echo "### running tests against all browsers"
set -x
nightwatch --config nightwatch-ci.js --env chrome-mac
nightwatch --config nightwatch-ci.js --env chrome-windows
nightwatch --config nightwatch-ci.js --env safari-9
nightwatch --config nightwatch-ci.js --env safari-11
nightwatch --config nightwatch-ci.js --env safari-12
nightwatch --config nightwatch-ci.js --env ie-9
nightwatch --config nightwatch-ci.js --env ie-11
nightwatch --config nightwatch-ci.js --env edge
nightwatch --config nightwatch-ci.js --env samsung-S3
nightwatch --config nightwatch-ci.js --env samsung-G4
nightwatch --config nightwatch-ci.js --env iPhone-5s
nightwatch --config nightwatch-ci.js --env iPhone-X
nightwatch --config nightwatch-ci.js --env kindleFire
else
echo "### running tests against $1"
nightwatch --config nightwatch-ci.js --env $1
fi
| true |
e43220d3180262e19167d1af817eeff4ad9c8211
|
Shell
|
smherwig/py-logshard
|
/kill_tailfile_selects_cronjob.sh
|
UTF-8
| 270 | 2.984375 | 3 |
[] |
no_license
|
#!/bin/sh
# usage: kill_tailfile_selects.sh yyy-mm-dd
# kills the tailfile_select.pl's that are tailing a log file named yyyy-mm-dd*
yyyy_mm_dd=$(date --date='2 days ago' +%Y-%m-%d)
ps aux | grep tailfile_select.pl | grep $yyyy_mm_dd | awk '{ print $2 }' | xargs kill
| true |
6defbbf38f73e66f0577cd3992be7c7eb7143dde
|
Shell
|
RORBrains/vagrant
|
/bootstrap.sh
|
UTF-8
| 1,340 | 3.15625 | 3 |
[
"MIT"
] |
permissive
|
#! /usr/bin/env bash
# Update system
apt-get update -y
# Create a swap file
fallocate -l 2G /swapfile
chmod 600 /swapfile
mkswap /swapfile
swapon /swapfile
echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab
# RVM
echo "Installing RVM..."
gpg --keyserver hkp://keys.gnupg.net --recv-keys 409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB
\curl -sSL https://get.rvm.io | bash -s stable
source /etc/profile.d/rvm.sh
sudo usermod -a -G rvm vagrant
# Ruby
echo "Installing Ruby 2.4..."
rvm install 2.4
# NodeJS
echo "Installing Node..."
curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
apt-get install -y nodejs
apt-get install -y build-essential
# Yarn
echo "Installing Yarn..."
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list
apt-get update -y && apt-get install yarn -y
# Install postgres
echo "Installing Postgres..."
apt-get install postgresql postgresql-contrib libpq-dev -y
echo "Creating user vagrant for Postgres..."
sudo -u postgres bash -c "psql -c \"CREATE ROLE vagrant LOGIN PASSWORD '';\""
sudo -u postgres bash -c "psql -c \"ALTER ROLE vagrant CREATEDB CREATEROLE;\""
sudo -u vagrant createdb
# Install some gems
gem install rails html2slim
| true |
3c443ccba1a9955f59cb19467df44ed1e3130561
|
Shell
|
iodic/vagento
|
/vagento.sh
|
UTF-8
| 15,136 | 3.828125 | 4 |
[] |
no_license
|
#!/bin/bash
## BEGIN OUTPUT METHODS
# Echo in bold font if stdout is a terminal
ISTTY=0; if [ -t 1 ]; then ISTTY=1; fi
bold () { if [ $ISTTY -eq 1 ]; then tput bold; fi; }
red () { if [ $ISTTY -eq 1 ]; then tput setaf 1; fi; }
green () { if [ $ISTTY -eq 1 ]; then tput setaf 2; fi; }
yellow () { if [ $ISTTY -eq 1 ]; then tput setaf 3; fi; }
cyan () { if [ $ISTTY -eq 1 ]; then tput setaf 6; fi; }
normalize () { if [ $ISTTY -eq 1 ]; then tput sgr0; fi; }
echo_bold () { echo -e "$(bold)$1$(normalize)"; }
echo_underline () { echo -e "\033[4m$1$(normalize)"; }
echo_color () { echo -e "$2$1$(normalize)"; }
function echo_title {
title=$1
length=$((${#title}+30))
echo ""
for i in {1..3}
do
if [ $i = 2 ]; then
echo_bold "-------------- $title --------------"
else
COUNTER=0
output=""
while [ $COUNTER -lt $length ]; do
output="$output-"
COUNTER=$(($COUNTER + 1))
done
echo_bold $output
fi
done
printf "\n\n"
}
pager=${PAGER:-$(which pager &> /dev/null)}
if [ -z "$pager" ]; then
pager=less
fi
## END OUTPUT METHODS
BASE_DIR=$(pwd)
SETTINGS_FILE="Vagentofile"
# Controller is the first, action is the second argument
# --------------------
CONTROLLER=$1
ACTION=$2
CONFIG_LOADED=0
if [ -f "$BASE_DIR/$SETTINGS_FILE" ]; then
source $BASE_DIR/$SETTINGS_FILE
CONFIG_LOADED=1
fi
# Options are loaded
# --------------------
while getopts c: option
do
case "${option}"
in
c) CLEANDB=${OPTARG};;
esac
done
THEME_DIR="$BASE_DIR/skin/frontend/$PROJECT/default"
VERSION="0.8.1"
SCRIPT=${0##*/}
USAGE="\
Vagento bash (v$VERSION) by $(green)StuntCoders doo$(normalize)
__ __ _
\ \ / /_ _ __ _ ___ _ __ | |_ ___
\ \ / / _' |/ _' |/ _ \ '_ \| __/ _ \\
\ V / (_| | (_| | __/ | | | || (_) |
\_/ \__,_|\__, |\___|_| |_|\__\___/
|___/
Global Commands:
$SCRIPT <command> [<options>]
----------------------------------------------------------------
$(green)help$(normalize) List commands with short description
$(green)setup$(normalize) Set configuration for the project
$(green)install magento$(normalize) Install Magento in working directory
$(green)install magento clean$(normalize) Install Magento on clean database
$(green)install magento sample$(normalize) Load sample data for Magento
$(green)install wp$(normalize) Install fresh WordPress
$(green)install grunt$(normalize) Set Grunt tasks for defined theme
$(green)magento list modules$(normalize) Lists all installed Mageto modules
$(green)magento list web-settings$(normalize) Lists all DB configuration
$(green)magento load-db name.sql$(normalize) Remove old and reload a new DB
$(green)magento set admin$(normalize) Change password for admin to m123123
"
LIST="\
Vagento list of commands:
"
##################################
#### DEFINE ALL THE FUNCTIONS ####
### PROBLEM SOLVING METHODS
function setup_configuration {
clear
echo_title "PROJECT SETUP"
echo "Please, enter project name in small caps: "
read PROJECT
echo "Please, domain name (if left empty, default will be $PROJECT.local): "
read DOMAIN
if [ -z "$DOMAIN" ]; then
DOMAIN="$PROJECT.local"
fi
echo "What is your available IP address for '$PROJECT': "
read IP
echo "Please enter WordPress folder relative path (side, seite, site, sajt, etc...): "
read SITE_FOLDER
echo "Please enter default currency: "
read CURRENCY
echo "Please enter default locale (en_EN, de_DE, nb_NO, etc...): "
read LOCALE
CONF=$(cat <<EOF
#!/bin/bash
PROJECT="$PROJECT"
DOMAIN="$DOMAIN"
IP="$IP"
HOSTS="y"
SITE_FOLDER="$SITE_FOLDER"
CURRENCY="$CURRENCY"
LOCALE="$LOCALE"
ALREADY_CONFIGURED="y"
EOF
)
sudo bash -c "echo '$CONF' > $BASE_DIR/$SETTINGS_FILE"
}
function quick_setup_configuration {
CONF=$(cat <<EOF
#!/bin/bash
PROJECT="$1"
DOMAIN="$2"
IP="$3"
HOSTS="y"
SITE_FOLDER="$4"
CURRENCY="$5"
LOCALE="$6"
ALREADY_CONFIGURED="y"
EOF
)
sudo bash -c "echo '$CONF' > $BASE_DIR/$SETTINGS_FILE"
}
function install_magento {
cd $BASE_DIR
mysql -u root -e "CREATE DATABASE IF NOT EXISTS magentodb"
mysql -u root -e "GRANT ALL PRIVILEGES ON magentodb.* TO 'magentouser'@'localhost' IDENTIFIED BY 'password'"
mysql -u root -e "FLUSH PRIVILEGES"
if [ ! -f "$BASE_DIR/index.php" ]; then
wget http://www.magentocommerce.com/downloads/assets/1.8.1.0/magento-1.8.1.0.tar.gz
tar -zxvf magento-1.8.1.0.tar.gz
wget http://www.magentocommerce.com/downloads/assets/1.6.1.0/magento-sample-data-1.6.1.0.tar.gz
tar -zxvf magento-sample-data-1.6.1.0.tar.gz
mv magento-sample-data-1.6.1.0/media/* magento/media/
mv magento-sample-data-1.6.1.0/magento_sample_data_for_1.6.1.0.sql magento/data.sql
mv magento/* magento/.htaccess* .
chmod -R o+w media var
mysql -h localhost -u magentouser -ppassword magentodb < data.sql
chmod o+w var var/.htaccess app/etc
rm -rf magento/ magento-sample-data-1.6.1.0/ magento-1.8.1.0.tar.gz magento-sample-data-1.6.1.0.tar.gz data.sql
fi
if [ ! -f $(get_base_dir "/app/etc/local.xml") ]; then
php -f /vagrant/install.php -- \
--license_agreement_accepted "yes" \
--locale "$LOCALE" \
--timezone "Europe/Budapest" \
--default_currency "$CURRENCY" \
--db_host "localhost" \
--db_name "magentodb" \
--db_user "magentouser" \
--db_pass "password" \
--url "$DOMAIN" \
--use_rewrites "yes" \
--use_secure "no" \
--secure_base_url "" \
--use_secure_admin "no" \
--admin_firstname "Dejan" \
--admin_lastname "Jacimovic" \
--admin_email "dejan.jacimovic@gmail.com" \
--admin_username "admin" \
--admin_password "m123123"
fi
n98-magerun.phar config:set web/seo/use_rewrites 1
}
function clean_magento_db {
# Drop and create DB
mysql -u root -e "DROP DATABASE magentodb"
mysql -u root -e "CREATE DATABASE IF NOT EXISTS magentodb"
mysql -u root -e "GRANT ALL PRIVILEGES ON magentodb.* TO 'magentouser'@'localhost' IDENTIFIED BY 'password'"
mysql -u root -e "FLUSH PRIVILEGES"
}
function install_magento_defaults {
# Set administrator's new password
mysql -u root -e "UPDATE magentodb.admin_user SET password=CONCAT(MD5('qXm123123'), ':qX') WHERE username='admin';"
# Set project theme
mysql -u root -e "DELETE FROM magentodb.core_config_data WHERE path='design/package/name';"
mysql -u root -e "DELETE FROM magentodb.core_config_data WHERE path='design/theme/locale';"
mysql -u root -e "DELETE FROM magentodb.core_config_data WHERE path='design/theme/default';"
mysql -u root -e "INSERT INTO magentodb.core_config_data (scope, scope_id, path, value) VALUES ('default', 0, 'design/package/name', '$PROJECT');"
mysql -u root -e "INSERT INTO magentodb.core_config_data (scope, scope_id, path, value) VALUES ('default', 0, 'design/theme/locale', '$PROJECT');"
mysql -u root -e "INSERT INTO magentodb.core_config_data (scope, scope_id, path, value) VALUES ('default', 0, 'design/theme/default', '$PROJECT');"
# Configure basic settings
mysql -u root -e "DELETE FROM magentodb.core_config_data WHERE path='web/unsecure/base_url';"
mysql -u root -e "DELETE FROM magentodb.core_config_data WHERE path='web/secure/base_url';"
mysql -u root -e "INSERT INTO magentodb.core_config_data (scope, scope_id, path, value) VALUES ('default', 0, 'web/unsecure/base_url', 'http://$DOMAIN/');"
mysql -u root -e "INSERT INTO magentodb.core_config_data (scope, scope_id, path, value) VALUES ('default', 0, 'web/secure/base_url', 'http://$DOMAIN/');"
# Set all notifications as read
mysql -u root -e "UPDATE magentodb.adminnotification_inbox SET is_read=1 WHERE 1=1;"
}
function install_magento_sample {
cd $BASE_DIR
clean_magento_db
# Import DB sample data
if [ ! -f "magento_sample_data_for_1.6.1.0.sql" ]; then
wget http://www.magentocommerce.com/downloads/assets/1.6.1.0/magento-sample-data-1.6.1.0.tar.gz
tar -zxvf magento-sample-data-1.6.1.0.tar.gz
mv magento-sample-data-1.6.1.0/media/* media/
mv magento-sample-data-1.6.1.0/magento_sample_data_for_1.6.1.0.sql magento_sample_data_for_1.6.1.0.sql
rm -rf magento/ magento-sample-data-1.6.1.0/ magento-1.8.1.0.tar.gz magento-sample-data-1.6.1.0.tar.gz
fi
mysql -h localhost -u magentouser -ppassword magentodb < magento_sample_data_for_1.6.1.0.sql
install_magento_defaults
# Set new homepage
CONTENT='{{block type="catalog/product_list_random" category_id="18" template="catalog/product/list.phtml"}}'
mysql -u root -e "UPDATE magentodb.cms_page SET content='$CONTENT', root_template='one_column' WHERE identifier='home';"
}
function install_wordpress {
cd $BASE_DIR
# Install WordPress (with wp-cli)
# --------------------
mysql -u root -e "CREATE DATABASE IF NOT EXISTS wpdb"
mysql -u root -e "GRANT ALL PRIVILEGES ON wpdb.* TO 'wpuser'@'localhost' IDENTIFIED BY 'password'"
mysql -u root -e "FLUSH PRIVILEGES"
if [ ! -f "$BASE_DIR/$SITE_FOLDER/wp-config.php" ]; then
if [ ! -d "$BASE_DIR/$SITE_FOLDER/" ]; then
mkdir "$BASE_DIR/$SITE_FOLDER/"
fi
cd "$BASE_DIR/$SITE_FOLDER/"
wp core download
wp core config --dbname=wpdb --dbuser=wpuser --dbpass=password --extra-php <<PHP
define('WP_DEBUG', false);
define('WP_DEBUG_LOG', true);
PHP
wp core install --url="http://$DOMAIN/$SITE_FOLDER" --title="$PROJECT" --admin_user="admin" --admin_password="m123123" --admin_email="dejan@stuntcoders.com"
fi
}
function install_grunt_in_theme {
# Setup Gruntfile and package.json if they are not already set
# --------------------
sudo rm -rf $THEME_DIR/Gruntfile.js
if [ ! -f "$THEME_DIR/Gruntfile.js" ]; then
GRUNTFILE=$(cat <<EOF
module.exports = function(grunt) {
grunt.initConfig({
pkg: grunt.file.readJSON("package.json"),
sass: {
dist: {
files: {
"$THEME_DIR/css/styles.css" : "$THEME_DIR/sass/styles.scss"
}
}
},
watch: {
livereload: {
options: {
livereload: true
},
files: [
"$THEME_DIR/css/styles.css"
]
},
css: {
files: "$THEME_DIR/sass/styles.scss",
tasks: ["sass"]
}
}
});
grunt.loadNpmTasks("grunt-contrib-sass");
grunt.loadNpmTasks("grunt-contrib-watch");
grunt.registerTask("default", ["watch"]);
}
EOF
)
sudo bash -c "echo '$GRUNTFILE' > $THEME_DIR/Gruntfile.js"
fi
# Setup Gruntfile and package.json if they are not already set
# --------------------
rm -rf $THEME_DIR/package.json
if [ ! -f "$THEME_DIR/package.json" ]; then
PACKAGEJSON=$(cat <<EOF
{
"name": "$PROJECT",
"version": "0.0.1",
"devDependencies": {
"grunt": "^0.4.5",
"grunt-contrib-sass": "^0.7.3",
"grunt-contrib-watch": "^0.6.1"
}
}
EOF
)
bash -c "echo '$PACKAGEJSON' > $THEME_DIR/package.json"
fi
# Install npm packages
# --------------------
cd $THEME_DIR
sudo npm install
# Add GRUNT on every boot
# --------------------
GRUNT=$(cat <<EOF
#!/usr/bin/env bash
### BEGIN INIT INFO
# Provides: grunf
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start grunt at boot time
# Description: Enable service provided by daemon.
### END INIT INFO
SCRIPTNAME=/etc/init.d/$NAME
case "\$1" in
start)
nohup grunt --base /vagrant/skin/frontend/naturligliv/default --gruntfile /vagrant/skin/frontend/naturligliv/default/Gruntfile.js > /dev/null 2>&1 &
;;
*)
echo "Usage: $0 {status|start|stop|restart}"
exit 1
esac
EOF
)
export SASS_PATH=$THEME_DIR
# Save grunt settings in Von Grunf file
sudo rm -rf /etc/init.d/grunf
sudo bash -c "echo '$GRUNT' > /etc/init.d/grunf"
sudo update-rc.d -f grunf remove
sudo update-rc.d grunf defaults
sudo chmod +x /etc/init.d/grunf
}
#### END OF ALL FUNCTIONS ####
##############################
#### PROCESS THE REQUEST ####
if [ "$CONTROLLER" = "--help" -o "$CONTROLLER" = "" -o "$CONTROLLER" = "help" ]; then
clear; echo -e "$USAGE"; exit 0
fi
if [ "$CONTROLLER" = "setup" ]; then
if [ -z "$3" ]; then
setup_configuration
else
quick_setup_configuration $2 $3 $4 $5 $6 $7
fi
fi
if [ "$CONTROLLER" = "install" ]; then
if [ "$CONFIG_LOADED" = "0" ]; then
setup_configuration
fi
# Install Magento
# --------------------
if [ "$ACTION" = "magento" ]; then
clear
if [ "$3" == "clean" ]; then
echo "Cleaning Magento database..."
mysql -u root -e "DROP DATABASE magentodb"
rm -rf /vagrant/app/etc/local.xml
fi
if [ "$3" == "sample" ]; then
echo "Installing Magento sample data..."
install_magento_sample
else
echo "Installing fresh Magento..."
install_magento
fi
fi
# Install WordPress
# --------------------
if [ "$ACTION" = "wordpress" -o "$ACTION" = "wp" ]; then
clear
echo "Installing WordPress..."
install_wordpress
fi
# Install Grunt
# --------------------
if [ "$ACTION" = "grunt" ]; then
clear
echo "Setting up grunt in theme folder..."
install_grunt_in_theme
echo_title "GRUNT"
echo "To run grunt with live reload put following code in your theme"
echo_bold "page/html/head.php:"
printf "\n"
echo_bold "<?php echo \"<script src='//{\$_SERVER['HTTP_HOST']}:35729/livereload.js'></script>\";"
echo ""
echo "...and run following command in your terminal:"
echo_bold "service grunf start"
echo ""
fi
fi
if [ "$CONTROLLER" = "magento" ]; then
if [ "$ACTION" = "list" ]; then
case $3 in
"modules")
n98-magerun.phar sys:modules:list
;;
"web-settings")
n98-magerun.phar config:get web/
;;
esac
fi
if [ "$ACTION" = "load-db" ]; then
if [ -f $3 ]; then
# Load database from file
clean_magento_db
mysql -h localhost -u magentouser -ppassword magentodb < $3
fi
fi
if [ "$ACTION" = "set" ]; then
case $3 in
"admin")
mysql -u root -e "UPDATE magentodb.admin_user SET password=CONCAT(MD5('qXm123123'), ':qX') WHERE username='admin';"
;;
esac
fi
fi
| true |
25f64b818d4a0b780c1ba3ce35cce06693fcc81e
|
Shell
|
dantemorius/Scripts
|
/regras_nat_dedicado_completo.bash
|
UTF-8
| 27,824 | 3 | 3 |
[] |
no_license
|
#!/bin/bash
###########################
# ATIVACAO FIREWALL #
# Data: 26/06/2014 #
# Autor: Leonardo Araujo #
###########################
# ALERTA DE AJUSTE INICIAL
clear
echo ""
echo "
__ __ ______ _____ _____ ________ __ _ _
| \/ | _ | ____|_ _| __ \| ____\ \ / /\ | | | |
| \ / | (_) | |__ | | | |__) | |__ \ \ /\ / / \ | | | |
| |\/| | | __| | | | _ /| __| \ \/ \/ / /\ \ | | | |
| | | | _ | | _| |_| | \ \| |____ \ /\ / ____ \| |____| |____
|_| |_| (_) |_| |_____|_| \_\______| \/ \/_/ \_\______|______|
"
echo ""
echo "########################################"
echo "# Para utilizar este script, deve-se #"
echo "# Configurar chave RSA trocada entre #"
echo "# os Servidores Linux #"
echo "########################################"
echo ""
inicio() {
echo -e "\\033[1;39m \\033[1;32mSELECIONE O TIPO DE FIREWALL QUE DESEJA INSTALAR:\\033[1;39m \\033[1;0m"
echo ""
echo "1. FIREWALL STANDALONE"
echo "2. FIREWALL EM ALTA DISPONIBILIDADE "
echo ""
echo ""
read OPCAO
case $OPCAO in
1)
standalone
;;
2)
failover
;;
*)
inicio
;;
esac
}
standalone () {
# DEFININDO VARIAVEIS
echo -n "Informe o IP do Firewall: "
read NODE1
export NODE1
echo -n "Informe o HOSTNAME do Firewall: "
read NOME1
export NOME1
echo -n "Informe o RANGE de IP's BACKEND: "
read BACKEND
export BACKEND
echo ""
############################################################
#############################################################
# AJUSTES E CONFIGURACAO DE S.O ####
#############################################################
############################################################
preparacaoSO(){
# INSTALACAO REPOSITORIO EPEL
rpm -Uvh ftp://ftpcloud.mandic.com.br/Instaladores/RPM/epel-release-6-8.noarch.rpm --force
echo ""
echo -e "\\033[1;39m \\033[1;32mRepositorio EPEL instalado.\\033[1;39m \\033[1;0m"
echo ""
# UPDATE
yum update -y
echo ""
echo -e "\\033[1;39m \\033[1;32mUpdate realizado.\\033[1;39m \\033[1;0m"
echo ""
# INSTALACAO DE PACOTES
yum install ipvsadm perl-Net-IP perl-IO-Socket-INET6 perl-Socket6 perl-Authen-Radius perl-MailTools perl-Net-DNS perl-Net-IMAP-Simple perl-Net-IMAP-Simple-SSL perl-POP3Client perl-libwww-perl perl-Net-SSLeay perl-Crypt-SSLeay.x86_64 perl-LWP-Authen-Negotiate.noarch perl-Test-Mock-LWP.noarch openssh-clients.x86_64 rsync.x86_64 wget.x86_64 vim-X11.x86_64 vim-enhanced.x86_64 mlocate.x86_64 nc.x86_64 tcpdump telnet sshpass nc.x86_64 pwgen.x86_64 screen lsof -y
echo ""
echo -e "\\033[1;39m \\033[1;32mPacotes necessarios instalados.\\033[1;39m \\033[1;0m"
echo ""
# CONFIGURANDO BASHRC
wget ftp://ftpcloud.mandic.com.br/Scripts/Linux/bashrc && yes | mv bashrc /root/.bashrc
# INSTALANDO SNOOPY
yum install snoopy -y
rpm -qa | grep snoopy | xargs rpm -ql | grep snoopy.so >> /etc/ld.so.preload && set LD_PRELOAD=/lib64/snoopy.so
# CONFIGURACAO SERVERLOGS
sed -i 's/#*.* @@remote-host:514/*.* @177.70.106.7:514/g' /etc/rsyslog.conf && /etc/init.d/rsyslog restart
# CONFIG SELINUX
sed -i 's/=enforcing/=disabled/' /etc/sysconfig/selinux
echo ""
echo -e "\\033[1;39m \\033[1;32mselinux ajustado\\033[1;39m \\033[1;0m"
echo ""
# CONFIG SYSCTL.CONF
sed -i 's/net.ipv4.ip_forward = 0/net.ipv4.ip_forward = 1/' /etc/sysctl.conf
echo "" >> /etc/sysctl.conf
echo "# CONFIG para NAT e Heartbeat" >> /etc/sysctl.conf
echo "net.ipv4.conf.all.send_redirects = 0" >> /etc/sysctl.conf
echo "net.ipv4.conf.default.send_redirects = 0" >> /etc/sysctl.conf
echo "net.ipv4.conf.default.accept_redirects = 0" >> /etc/sysctl.conf
echo "net.ipv4.conf.all.accept_redirects = 0" >> /etc/sysctl.conf
echo ""
echo -e "\\033[1;39m \\033[1;32msysctl ajustado.\\033[1;39m \\033[1;0m"
echo ""
# DESABILITANDO IPTABLES
/etc/init.d/iptables stop
chkconfig iptables off
echo ""
echo -e "\\033[1;39m \\033[1;32mIptables desabilitado.\\033[1;39m \\033[1;0m"
echo ""
# ATIVANDO O RSYSLOG
/etc/init.d/rsyslog start
chkconfig rsyslog on
echo ""
echo -e "n"
echo ""
# BAIXANDO ARQUIVOS CONFIG
sshpass -p'#!cl0ud#!' scp -o StrictHostKeyChecking=no root@187.33.3.137:/root/firewall/firewall /etc/init.d/
chmod +x /etc/init.d/firewall
chkconfig firewall on
echo ""
echo -e "\\033[1;39m \\033[1;32mArquivos de Configuracao OK.\\033[1;39m \\033[1;0m"
echo ""
# DEFININDO O HOSTNAME
sed -i 's/HOSTNAME=localhost.localdomain/HOSTNAME='"$NOME1"'/' /etc/sysconfig/network
echo $NOME1 > /etc/hostname
echo $NODE1 $NOME1 >> /etc/hosts
echo ""
echo -e "\\033[1;39m \\033[1;32mHostname Ajustado.\\033[1;39m \\033[1;0m"
echo ""
}
firewall() {
# DEFININDO IP's DOS NÓS SCRIPT FIREWALL
sed -i 's/IPBACKEND/'"$BACKEND\/24"'/' /etc/init.d/firewall
sed -i 's/NODE1/'"$NODE1"'/' /etc/init.d/firewall
sed -i 's/NOME1/'"$NOME1"'/' /etc/init.d/firewall
sed -i '/NODE2/d' /etc/init.d/firewall
LINE=`grep -n "#SINCRONIZA" firewall.sh | cut -d":" -f1`
LINE2=$(( $LINE+ 15))
sed -i ''$LINE','$LINE2'd' /etc/init.d/firewall
echo ""
echo -e "\\033[1;39m \\033[1;32mScript de Firewall Ajustado.\\033[1;39m \\033[1;0m"
echo ""
}
menu() {
echo -e "\n\\033[1;39m \\033[1;32mSelecione a Opção desejada:\\033[1;39m \\033[1;0m"
echo -e "\n\\033[1;39m \\033[1;32m [Digite q para Sair]\\033[1;39m \\033[1;0m"
echo -e "1) \\033[1;39m \\033[1;32mConfiguração de Regras Padrão - Firewall\\033[1;39m \\033[1;0m"
echo -e "2) \\033[1;39m \\033[1;32mConfiguração de Regras Zabbix\\033[1;39m \\033[1;0m"
echo -e "3) \\033[1;39m \\033[1;32mConfiguração de Regras AppAssure\\033[1;39m \\033[1;0m\n"
firewall_padrao(){
#!/bin/bash
echo -e "\n\\033[1;39m \\033[1;32m####################################################\\033[1;39m \\033[1;0m"
echo -e "\\033[1;39m \\033[1;32m##\\033[1;39m \\033[1;0mCriando regras de NAT para IP Público Dedicado \\033[1;39m \\033[1;32m##\\033[1;39m \\033[1;0m"
echo -e "\\033[1;39m \\033[1;32m####################################################\\033[1;39m \\033[1;0m"
echo ""
echo -n "Informe a quantidade IPs VIPs: "
read NUM_VIPS
COUNT_VIPS=0
CONT_V=$(( $COUNT_VIPS + 1 ))
echo -n "Informe a quantidade de Servidores Remotos: "
read NUM_SERVERS
COUNT_REMOTE=0
CONT_R=$(( $COUNT_REMOTE + 1 ))
echo -n "Informe a quantidade de Portas Padrão: "
read NUM_PORTAS
COUNT_PORTA=0
CONT_P=$(( $COUNT_PORTA + 1 ))
contar_porta(){
echo -e "\n####Regras de Nat do VIP $VIP para Servidor $SERVER####" >> $ARQNAT
while [[ $NUM_PORTAS -ge $CONT_P ]] ; do
echo -ne "\\033[1;39m \\033[1;32mInforme $CONT_Pº porta para liberação no Servidor $SERVER (Use ":" para ranges de portas):\\033[1;39m \\033[1;0m"
read PORTA
if echo "$PORTA" | egrep ':' > DPORT
then
sed -i "s/:/-/g" DPORT
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp -m multiport --dport $PORTA -j DNAT --to $SERVER:`cat DPORT`" >> ARQNAT
else
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp --dport $PORTA -j DNAT --to $SERVER:$PORTA" >> ARQNAT
fi
CONT_P=$(( $CONT_P + 1 ))
done
}
while [[ $NUM_SERVERS -ge $CONT_R ]] ; do
echo -ne "\n\\033[1;39m \\033[1;32mInforme o IP VIP do Server $CONT_V:\\033[1;39m \\033[1;0m"
read VIP
echo -ne "\\033[1;39m \\033[1;32mInforme o IP Backend do Server $CONT_R:\\033[1;39m \\033[1;0m"
read SERVER
echo -e '$IPTABLES' "-t nat -A POSTROUTING -s $SERVER -o eth0 -p tcp -j SNAT --to $VIP" >> ARQNAT2
CONT_P=1
CONT_R=$(( $CONT_R + 1 ))
CONT_V=$(( $CONT_V + 1 ))
contar_porta
done
echo -e '\n########### Regras de retorno ##########' >> $ARQNAT
cat $ARQNAT2 | uniq >> $ARQNAT
clear
echo -e "\\033[1;39m \\033[1;32mLista de Regras Criadas:\n\\033[1;39m \\033[1;0m"
rm -rf $ARQNAT2
cat $ARQNAT
menu
}
firewall_zabbix(){
# GERA REGRAS NAT PARA ZABBIX #
ARQNAT="/tmp/NAT.txt"
echo "#####################################################"
echo "# CRIANDO REGRAS DE NAT PARA MONITORAMENTO ZABBIX #"
echo "#####################################################"
echo ""
echo -n "Informe o VIP de Origem: "
read VIP
export VIP
echo -n "Informe a quantidade de Servidores Remotos: "
read NUM_SERVERS
COUNT=0
CONT=$(( $COUNT + 1 ))
PORTAORI=7999
CONT_PORT=$(( $PORTAORI + 1 ))
while [[ $NUM_SERVERS -ge $CONT ]] ; do
echo -n "Informe o IP Backend do Server $CONT: "
read SERVER
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp --dport $CONT_PORT -j DNAT --to $SERVER:10052" >> $ARQNAT
CONT=$(( $CONT + 1 ))
CONT_PORT=$(( $CONT_PORT + 1 ))
done
echo '$IPTABLES' "-t nat -A POSTROUTING -s $BACKEND/24 -d noc.mandic.net.br -p tcp --dport 10052 -j SNAT --to $VIP" >> $ARQNAT
REGRAS=`cat $ARQNAT`
sed -i '/# ZABBIX/ r '"$ARQNAT"'' /etc/init.d/firewall
rm -f $ARQNAT
echo ""
echo -e "\\033[1;39m \\033[1;32mRegras para Monitoramento Zabbix criadas.\\033[1;39m \\033[1;0m"
echo ""
menu
}
firewall_appassure(){
# GERA REGRAS NAT PARA BACKUP APPASSURE #
ARQNAT="/tmp/NAT.txt"
ARQNAT2="/tmp/NAT2.txt"
echo "################################################"
echo "# CRIANDO REGRAS DE NAT PARA BACKUP APPASSURE #"
echo "################################################"
echo ""
echo -n "Informe o VIP de Origem: "
read VIP
export VIP
echo -n "Informe a quantidade de Servidores Remotos: "
read NUM_SERVERS
COUNT=0
CONT=$(( $COUNT + 1 ))
PORTAORI=9099
CONT_PORT=$(( $PORTAORI + 1 ))
PORTADEST=9199
CONT_PORTADEST=$(( $PORTADEST + 1 ))
while [[ $NUM_SERVERS -ge $CONT ]] ; do
echo -n "Informe o IP Backend do Server $CONT: "
read SERVER
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp --dport $CONT_PORT -j DNAT --to $SERVER:9006" >> $ARQNAT
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp --dport $CONT_PORTADEST -j DNAT --to $SERVER:$CONT_PORTADEST" >> $ARQNAT2
CONT=$(( $CONT + 1 ))
CONT_PORT=$(( $CONT_PORT + 1 ))
CONT_PORTADEST=$(( $CONT_PORTADEST + 1 ))
done
sed -i '/# APPASSURE CONEXAO/ r '"$ARQNAT"'' /etc/init.d/firewall
sed -i '/# APPASSURE TRANSFERENCIA/ r '"$ARQNAT2"'' /etc/init.d/firewall
rm -f $ARQNAT $ARQNAT2
echo ""
echo -e "\\033[1;39m \\033[1;32mRegras para BACKUP APPASSURE criadas.\\033[1;39m \\033[1;0m"
echo ""
menu
}
read OPCAO
case $OPCAO in
1)
firewall_padrao
;;
2)
firewall_zabbix
;;
3)
firewall_appassure
;;
q)
reinicializacao
;;
*)
echo -e "\n[Digite uma opção válida!]\n"
menu
;;
esac
}
####REINICIALIZACAO DO SERVIDOR ####
reinicializacao() {
echo -n "Reiniciar o Servidor para Aplicar os Updates e Configuracoes? [ y | n ]: "
read OPCAO
case $OPCAO in
y)
reiniciar
;;
n)
exit
;;
*)
reinicializacao
;;
esac
}
reiniciar()
{
/sbin/reboot
}
##### CHAMADA DE FUNÇÕES ####
preparacaoSO
firewall
menu
reinicializacao
}
failover () {
# DEFININDO VARIAVEIS
echo -n "Informe o IP do Firewall 01: "
read NODE1
export NODE1
echo -n "Informe o IP do Firewall 02: "
read NODE2
export NODE2
echo -n "Informe o HOSTNAME do Firewall 01: "
read NOME1
export NOME1
echo -n "Informe o HOSTNAME do Firewall 02: "
read NOME2
export NOME2
echo -n "Informe o RANGE de IP's BACKEND: "
read BACKEND
export BACKEND
echo ""
############################################################
#############################################################
# AJUSTES E CONFIGURACAO DE S.O ####
#############################################################
############################################################
preparacaoSO(){
# INSTALACAO REPOSITORIO EPEL
rpm -Uvh ftp://ftpcloud.mandic.com.br/Instaladores/RPM/epel-release-6-8.noarch.rpm --force
ssh root@$NODE2 "rpm -ivh ftp://ftpcloud.mandic.com.br/Instaladores/RPM/epel-release-6-8.noarch.rpm --force"
echo ""
echo -e "\\033[1;39m \\033[1;32mRepositorio EPEL instalado.\\033[1;39m \\033[1;0m"
echo ""
# UPDATE
yum update -y
ssh root@$NODE2 "yum update -y"
echo ""
echo -e "\\033[1;39m \\033[1;32mUpdate realizado.\\033[1;39m \\033[1;0m"
echo ""
# INSTALACAO DE PACOTES
yum install ipvsadm perl-Net-IP perl-IO-Socket-INET6 perl-Socket6 perl-Authen-Radius perl-MailTools perl-Net-DNS perl-Net-IMAP-Simple perl-Net-IMAP-Simple-SSL perl-POP3Client perl-libwww-perl perl-Net-SSLeay perl-Crypt-SSLeay.x86_64 perl-LWP-Authen-Negotiate.noarch perl-Test-Mock-LWP.noarch openssh-clients.x86_64 rsync.x86_64 wget.x86_64 vim-X11.x86_64 vim-enhanced.x86_64 mlocate.x86_64 nc.x86_64 tcpdump telnet heartbeat sshpass nc.x86_64 pwgen.x86_64 screen lsof -y
chkconfig heartbeat on
ssh root@$NODE2 "yum install ipvsadm perl-Net-IP perl-IO-Socket-INET6 perl-Socket6 perl-Authen-Radius perl-MailTools perl-Net-DNS perl-Net-IMAP-Simple perl-Net-IMAP-Simple-SSL perl-POP3Client perl-libwww-perl perl-Net-SSLeay perl-Crypt-SSLeay.x86_64 perl-LWP-Authen-Negotiate.noarch perl-Test-Mock-LWP.noarch openssh-clients.x86_64 rsync.x86_64 wget.x86_64 vim-X11.x86_64 vim-enhanced.x86_64 mlocate.x86_64 nc.x86_64 tcpdump telnet heartbeat sshpass nc.x86_64 pwgen.x86_64 screen lsof -y"
ssh root@$NODE2 "chkconfig heartbeat on"
echo ""
echo -e "\\033[1;39m \\033[1;32mPacotes necessarios instalados.\\033[1;39m \\033[1;0m"
echo ""
# CONFIGURANDO BASHRC
wget ftp://ftpcloud.mandic.com.br/Scripts/Linux/bashrc
yes | mv bashrc /root/.bashrc
scp /root/.bashrc root@$NODE2:/root/.
# INSTALANDO SNOOPY
yum install snoopy -y
rpm -qa | grep snoopy | xargs rpm -ql | grep snoopy.so >> /etc/ld.so.preload && set LD_PRELOAD=/lib64/snoopy.so
ssh root@$NODE2 "yum install snoopy -y"
ssh root@$NODE2 "rpm -qa | grep snoopy | xargs rpm -ql | grep snoopy.so >> /etc/ld.so.preload && set LD_PRELOAD=/lib64/snoopy.so"
# CONFIGURACAO SERVERLOGS
sed -i 's/#*.* @@remote-host:514/*.* @177.70.106.7:514/g' /etc/rsyslog.conf && /etc/init.d/rsyslog restart
ssh root@$NODE2 "sed -i 's/#*.* @@remote-host:514/*.* @177.70.106.7:514/g' /etc/rsyslog.conf && /etc/init.d/rsyslog restart"
# CONFIG SELINUX
sed -i 's/=enforcing/=disabled/' /etc/sysconfig/selinux
ssh root@$NODE2 "sed -i 's/=permissive/=disabled/' /etc/sysconfig/selinux"
echo ""
echo -e "\\033[1;39m \\033[1;32mselinux ajustado\\033[1;39m \\033[1;0m"
echo ""
# CONFIG SYSCTL.CONF
sed -i 's/net.ipv4.ip_forward = 0/net.ipv4.ip_forward = 1/' /etc/sysctl.conf
echo "" >> /etc/sysctl.conf
echo "# CONFIG para NAT e Heartbeat" >> /etc/sysctl.conf
echo "net.ipv4.conf.all.send_redirects = 0" >> /etc/sysctl.conf
echo "net.ipv4.conf.default.send_redirects = 0" >> /etc/sysctl.conf
echo "net.ipv4.conf.default.accept_redirects = 0" >> /etc/sysctl.conf
echo "net.ipv4.conf.all.accept_redirects = 0" >> /etc/sysctl.conf
scp /etc/sysctl.conf root@$NODE2:/etc/sysctl.conf
echo ""
echo -e "\\033[1;39m \\033[1;32msysctl ajustado.\\033[1;39m \\033[1;0m"
echo ""
# DESABILITANDO IPTABLES
/etc/init.d/iptables stop
chkconfig iptables off
ssh root@$NODE2 "/etc/init.d/iptables stop"
ssh root@$NODE2 "chkconfig iptables off"
echo ""
echo -e "\\033[1;39m \\033[1;32mIptables desabilitado.\\033[1;39m \\033[1;0m"
echo ""
# ATIVANDO O RSYSLOG
/etc/init.d/rsyslog start
chkconfig rsyslog on
ssh root@$NODE2 "/etc/init.d/rsyslog start"
ssh root@$NODE2 "chkconfig rsyslog on"
echo ""
echo -e "\\033[1;39m \\033[1;32mRsyslog Iniciado.\\033[1;39m \\033[1;0m"
echo ""
# BAIXANDO ARQUIVOS CONFIG
sshpass -p'#!cl0ud#!' scp -o StrictHostKeyChecking=no root@187.33.3.137:/root/firewall/firewall /etc/init.d/
chmod +x /etc/init.d/firewall
chkconfig firewall on
sshpass -p'#!cl0ud#!' scp -o StrictHostKeyChecking=no root@187.33.3.137:/root/firewall/authkeys /etc/ha.d/
chmod 600 /etc/ha.d/authkeys
sshpass -p'#!cl0ud#!' scp -o StrictHostKeyChecking=no root@187.33.3.137:/root/firewall/ha.cf /etc/ha.d/
sshpass -p'#!cl0ud#!' scp -o StrictHostKeyChecking=no root@187.33.3.137:/root/firewall/haresources /etc/ha.d/
sshpass -p'#!cl0ud#!' scp -o StrictHostKeyChecking=no root@187.33.3.137:/root/firewall/ldirectord.cf /etc/ha.d/
sshpass -p'#!cl0ud#!' scp -o StrictHostKeyChecking=no root@187.33.3.137:/root/firewall/ldirectord-1.0.4-1.el6.x86_64.rpm /root/
scp /etc/init.d/firewall root@$NODE2:/etc/init.d/
ssh root@$NODE2 "chkconfig firewall on"
ssh root@$NODE2 "chmod +x /etc/init.d/firewall"
scp /etc/ha.d/authkeys root@$NODE2:/etc/ha.d/
scp /etc/ha.d/ha.cf root@$NODE2:/etc/ha.d/
scp /etc/ha.d/haresources root@$NODE2:/etc/ha.d/
scp /etc/ha.d/ldirectord.cf root@$NODE2:/etc/ha.d/
echo ""
echo -e "\\033[1;39m \\033[1;32mArquivos de Configuracao OK.\\033[1;39m \\033[1;0m"
echo ""
# INSTALANDO O LDIRECTORD
rpm -ivh /root/ldirectord-1.0.4-1.el6.x86_64.rpm
scp /root/ldirectord-1.0.4-1.el6.x86_64.rpm root@$NODE2:/root/
ssh root@$NODE2 "rpm -ivh /root/ldirectord-1.0.4-1.el6.x86_64.rpm"
echo ""
echo -e "\\033[1;39m \\033[1;32mLDirectord Instalado.\\033[1;39m \\033[1;0m"
echo ""
# DEFININDO O HOSTNAME
sed -i 's/HOSTNAME=localhost.localdomain/HOSTNAME='"$NOME1"'/' /etc/sysconfig/network
echo $NOME1 > /etc/hostname
echo $NODE1 $NOME1 >> /etc/hosts
ssh root@$NODE2 "sed -i 's/HOSTNAME=localhost.localdomain/HOSTNAME='"$NOME2"'/' /etc/sysconfig/network"
ssh root@$NODE2 "echo $NOME2 > /etc/hostname"
ssh root@$NODE2 "echo $NODE2 $NOME2 >> /etc/hosts"
echo ""
echo -e "\\033[1;39m \\033[1;32mHostname Ajustado.\\033[1;39m \\033[1;0m"
echo ""
}
############################################################
#############################################################
# AJUSTES ALTA DISPONIBILIDADE - HEARTBEAT ####
#############################################################
############################################################
# CONFIGURA OS IPs VIPS NO ARQUIVOS HARESOURCES
ipvip() {
echo "#################################"
echo "# CONFIGURANDO O HEARTBEAT #"
echo "#################################"
echo ""
read -p 'Digite a quantidade de IPs VIPs: ' num_ips
count=0
cont=$(( $cont + 1 ))
while [[ $num_ips -ge $cont ]] ; do
read -p "Digite o IP $cont: " ipvip
echo $NOME1 $ipvip"/24/eth0" >> /etc/ha.d/haresources
cont=$(( $cont + 1 ))
done
}
gateway() {
IPBACKEND=`ifconfig eth1 | grep "inet end.:" | awk '{print $3}'`
GW=`echo $IPBACKEND | cut -d"." -f1,2,3`
echo $NOME1 $GW".1/24/eth1:1 # GW Backend" >> /etc/ha.d/haresources
}
# CONFIGURA OS IPs DOS NÓS NO ARQUIVO HA.CF
hacf() {
SENHA=`pwgen -Byns 15 1`
sed -i 's/SENHA/'"$SENHA"'/' /etc/ha.d/authkeys
chmod 600 /etc/ha.d/authkeys
sed -i 's/ucast1/ucast eth0 '"$NODE1"'/' /etc/ha.d/ha.cf
sed -i 's/ucast2/ucast eth0 '"$NODE2"'/' /etc/ha.d/ha.cf
sed -i 's/node1/node '"$NOME1"'/' /etc/ha.d/ha.cf
sed -i 's/node2/node '"$NOME2"'/' /etc/ha.d/ha.cf
scp /etc/ha.d/authkeys root@$NODE2:/etc/ha.d/
scp /etc/ha.d/ha.cf root@$NODE2:/etc/ha.d/
scp /etc/ha.d/haresources root@$NODE2:/etc/ha.d/
scp /etc/ha.d/ldirectord.cf root@$NODE2:/etc/ha.d/
echo ""
echo -e "\\033[1;39m \\033[1;32mHEARTBEAT Ajustado.\\033[1;39m \\033[1;0m"
echo ""
}
###########################################################
############################################################
# AJUSTES DE REGRAS DE FIREWALL ####
############################################################
###########################################################
firewall() {
# DEFININDO IP's DOS NÓS SCRIPT FIREWALL
sed -i 's/IPBACKEND/'"$BACKEND\/24"'/' /etc/init.d/firewall
sed -i 's/NODE1/'"$NODE1"'/' /etc/init.d/firewall
sed -i 's/NODE2/'"$NODE2"'/' /etc/init.d/firewall
sed -i 's/NOME1/'"$NOME1"'/' /etc/init.d/firewall
sed -i 's/NOME2/'"$NOME2"'/' /etc/init.d/firewall
scp /etc/init.d/firewall root@$NODE2:/etc/init.d/
echo ""
echo -e "\\033[1;39m \\033[1;32mScript de Firewall Ajustado.\\033[1;39m \\033[1;0m"
echo ""
}
menu() {
clear
echo -e "\n\\033[1;39m \\033[1;32mSelecione a Opção desejada:\\033[1;39m \\033[1;0m"
echo -e "1) \\033[1;39m \\033[1;32mConfiguração de Regras Padrão - Firewall\\033[1;39m \\033[1;0m"
echo -e "2) \\033[1;39m \\033[1;32mConfiguração de Regras Zabbix\\033[1;39m \\033[1;0m"
echo -e "3) \\033[1;39m \\033[1;32mConfiguração de Regras AppAssure\\033[1;39m \\033[1;0m\n"
firewall_padrao(){
#!/bin/bash
echo -e "\n\\033[1;39m \\033[1;32m####################################################\\033[1;39m \\033[1;0m"
echo -e "\\033[1;39m \\033[1;32m##\\033[1;39m \\033[1;0mCriando regras de NAT para IP Público Dedicado \\033[1;39m \\033[1;32m##\\033[1;39m \\033[1;0m"
echo -e "\\033[1;39m \\033[1;32m####################################################\\033[1;39m \\033[1;0m"
echo ""
echo -n "Informe a quantidade IPs VIPs: "
read NUM_VIPS
COUNT_VIPS=0
CONT_V=$(( $COUNT_VIPS + 1 ))
echo -n "Informe a quantidade de Servidores Remotos: "
read NUM_SERVERS
COUNT_REMOTE=0
CONT_R=$(( $COUNT_REMOTE + 1 ))
echo -n "Informe a quantidade de Portas Padrão: "
read NUM_PORTAS
COUNT_PORTA=0
CONT_P=$(( $COUNT_PORTA + 1 ))
contar_porta(){
echo -e "\n####Regras de Nat do VIP $VIP para Servidor $SERVER####" >> $ARQNAT
while [[ $NUM_PORTAS -ge $CONT_P ]] ; do
echo -ne "\\033[1;39m \\033[1;32mInforme $CONT_Pº porta para liberação no Servidor $SERVER (Use ":" para ranges de portas):\\033[1;39m \\033[1;0m"
read PORTA
if echo "$PORTA" | egrep ':' > DPORT
then
sed -i "s/:/-/g" DPORT
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp -m multiport --dport $PORTA -j DNAT --to $SERVER:`cat DPORT`" >> $ARQNAT
else
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp --dport $PORTA -j DNAT --to $SERVER:$PORTA" >> $ARQNAT
fi
CONT_P=$(( $CONT_P + 1 ))
done
}
while [[ $NUM_SERVERS -ge $CONT_R ]] ; do
echo -ne "\n\\033[1;39m \\033[1;32mInforme o IP VIP do Server $CONT_V:\\033[1;39m \\033[1;0m"
read VIP
echo -ne "\\033[1;39m \\033[1;32mInforme o IP Backend do Server $CONT_R:\\033[1;39m \\033[1;0m"
read SERVER
echo -e '$IPTABLES' "-t nat -A POSTROUTING -s $SERVER -o eth0 -p tcp -j SNAT --to $VIP" >> $ARQNAT2
CONT_P=1
CONT_R=$(( $CONT_R + 1 ))
CONT_V=$(( $CONT_V + 1 ))
contar_porta
done
echo -e '\n########### Regras de retorno ##########' >> $ARQNAT
cat $ARQNAT2 | uniq >> $ARQNAT
clear
echo -e "\\033[1;39m \\033[1;32mLista de Regras Criadas:\n\\033[1;39m \\033[1;0m"
rm -rf $ARQNAT2
cat $ARQNAT
}
firewall_zabbix(){
# GERA REGRAS NAT PARA ZABBIX #
$ARQNAT="/tmp/NAT.txt"
echo "#####################################################"
echo "# CRIANDO REGRAS DE NAT PARA MONITORAMENTO ZABBIX #"
echo "#####################################################"
echo ""
echo -n "Informe o VIP de Origem: "
read VIP
export VIP
echo -n "Informe a quantidade de Servidores Remotos: "
read NUM_SERVERS
COUNT=0
CONT=$(( $COUNT + 1 ))
PORTAORI=7999
CONT_PORT=$(( $PORTAORI + 1 ))
while [[ $NUM_SERVERS -ge $CONT ]] ; do
echo -n "Informe o IP Backend do Server $CONT: "
read SERVER
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp --dport $CONT_PORT -j DNAT --to $SERVER:10052" >> $$ARQNAT
CONT=$(( $CONT + 1 ))
CONT_PORT=$(( $CONT_PORT + 1 ))
done
echo '$IPTABLES' "-t nat -A POSTROUTING -s $BACKEND/24 -d noc.mandic.net.br -p tcp --dport 10052 -j SNAT --to $VIP" >> $$ARQNAT
REGRAS=`cat $$ARQNAT`
sed -i '/# ZABBIX/ r '"$$ARQNAT"'' /etc/init.d/firewall
rm -f $$ARQNAT
echo ""
echo -e "\\033[1;39m \\033[1;32mRegras para Monitoramento Zabbix criadas.\\033[1;39m \\033[1;0m"
echo ""
}
firewall_appassure(){
# GERA REGRAS NAT PARA BACKUP APPASSURE #
ARQNAT="/tmp/NAT.txt"
ARQNAT2="/tmp/NAT2.txt"
echo "################################################"
echo "# CRIANDO REGRAS DE NAT PARA BACKUP APPASSURE #"
echo "################################################"
echo ""
echo -n "Informe o VIP de Origem: "
read VIP
export VIP
echo -n "Informe a quantidade de Servidores Remotos: "
read NUM_SERVERS
COUNT=0
CONT=$(( $COUNT + 1 ))
PORTAORI=9099
CONT_PORT=$(( $PORTAORI + 1 ))
PORTADEST=9199
CONT_PORTADEST=$(( $PORTADEST + 1 ))
while [[ $NUM_SERVERS -ge $CONT ]] ; do
echo -n "Informe o IP Backend do Server $CONT: "
read SERVER
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp --dport $CONT_PORT -j DNAT --to $SERVER:9006" >> $ARQNAT
echo '$IPTABLES' "-t nat -A PREROUTING -d $VIP -p tcp --dport $CONT_PORTADEST -j DNAT --to $SERVER:$CONT_PORTADEST" >> $ARQNAT2
CONT=$(( $CONT + 1 ))
CONT_PORT=$(( $CONT_PORT + 1 ))
CONT_PORTADEST=$(( $CONT_PORTADEST + 1 ))
done
sed -i '/# APPASSURE CONEXAO/ r '"$ARQNAT"'' /etc/init.d/firewall
sed -i '/# APPASSURE TRANSFERENCIA/ r '"$ARQNAT2"'' /etc/init.d/firewall
rm -f $ARQNAT $ARQNAT2
echo ""
echo -e "\\033[1;39m \\033[1;32mRegras para BACKUP APPASSURE criadas.\\033[1;39m \\033[1;0m"
echo ""
}
read OPCAO
case $OPCAO in
1)
firewall_padrao
;;
2)
firewall_zabbix
;;
3)
firewall_appassure
;;
q)
syncall
;;
*)
echo -e "\n[Digite uma opção Válida!]\n"
menu
;;
esac
}
syncall(){
scp /etc/sysctl.conf root@$NODE2:/etc/
scp /etc/ha.d/authkeys root@$NODE2:/etc/ha.d/
scp /etc/ha.d/ha.cf root@$NODE2:/etc/ha.d/
scp /etc/ha.d/haresources root@$NODE2:/etc/ha.d/
scp /etc/ha.d/ldirectord.cf root@$NODE2:/etc/ha.d/
scp /etc/init.d/firewall root@$NODE2:/etc/init.d/
}
################################
#################################
# REINICIALIZACAO DO SERVIDOR ####
#################################
################################
reinicializacao() {
echo -n "Reiniciar o Servidor para Aplicar os Updates e Configuracoes? [ y | n ]: "
read OPCAO
case $OPCAO in
y)
reiniciar
;;
n)
exit
;;
*)
reinicializacao
;;
esac
}
reiniciar()
{
ssh root@$NODE2 "/sbin/reboot"
/sbin/reboot
}
################################
#################################
# CHAMADA DE FUNÇÕES ####
#################################
################################
preparacaoSO
ipvip
gateway
hacf
firewall
menu
syncall
reinicializacao
}
inicio
| true |
077e59423f9befb4ab79afa50029763d78fa6748
|
Shell
|
planthertech/BESSO
|
/Fastinstall.sh
|
UTF-8
| 223 | 2.515625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
cd $HOME/BESSO
rm -rf $HOME/.telegram-cli
install() {
rm -rf $HOME/.telegram-cli
sudo chmod +x tg
chmod +x BESSO
chmod +x BE
./BE
}
if [ "$1" = "ins" ]; then
install
fi
chmod +x install.sh
lua start.lua
| true |
cdd867d2c305c10dfbf230f264bab7a3854eb839
|
Shell
|
BorkStick/Spells
|
/Installs/zabbix-insall.sh
|
UTF-8
| 1,359 | 3.09375 | 3 |
[] |
no_license
|
#!/bin/bash
# Install zabbix server
# https://www.digitalocean.com/community/tutorials/how-to-install-and-configure-zabbix-to-securely-monitor-remote-servers-on-ubuntu-18-04
export DEBIAN_FRONTEND=noninteractive
# Prerequisites Install LAMP
sudo apt install apache2
sudo apt install mysql-server
sudo apt install php libapache2-mod-php php-mysql
# Installing the Zabbix Server
wget https://repo.zabbix.com/zabbix/4.2/ubuntu/pool/main/z/zabbix-release/zabbix-release_4.2-1+bionic_all.deb
sudo dpkg -i zabbix-release_4.2-1+bionic_all.deb
sudo apt update
sudo apt install zabbix-server-mysql zabbix-frontend-php
sudo apt install zabbix-agent
# Configuring the MySQL Database for Zabbix
# Set up database
DB_PASS="$(head /dev/urandom | tr -dc A-Za-z0-9 | head -c 13)"
mysql -u root --execute="create database zabbix character set utf8 collate utf8_bin;"
mysql -u root --execute="grant all privileges on zabbix.* to zabbix@localhost identified by '$DB_PASS';"
mysql -u root --execute="flush privileges;"
# set up the schema and import the data into the zabbix database
zcat /usr/share/doc/zabbix-server-mysql/create.sql.gz | mysql -uzabbix -p zabbix
# END
echo "#####################################"
echo "DB: zabbix"
echo "DB USERNAME: zabbix"
echo "DB PASSWORD: $DB_PASS"
echo "#####################################"
echo " "
| true |
9a1b1efbe329841a7d26cfa10f09e54314bef932
|
Shell
|
bitcrumb/ansible-infra
|
/.githooks/disallow-yml.sh
|
UTF-8
| 381 | 4.15625 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
set -e
function check_extension() {
if basename "$1" | grep ".*\.yml$"; then
echo "YAML files with extension .yml are not allowed!"
exit 1
fi
}
case "${1}" in
--about)
echo "Disallows the usage of .yml extension instead of .yaml"
;;
*)
for file in $(git diff-index --cached --name-only HEAD); do
check_extension "$file"
done
;;
esac
| true |
8a2a5d4b22e10addf142b7d0ff895ef3dd0ef489
|
Shell
|
ironiemix/xenserver-scripts
|
/software-raid/02-prepare-chroot
|
UTF-8
| 1,431 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/bash
. makeraid.conf || exit 1
MAXPART=$(gdisk -l $SOURCEDISK | tail -n 1 | awk '{print $1}')
echo "Source disk has $MAXPART partitions"
echo -n "Loading kernel module for raid1..."
modprobe md_mod
modprobe raid1
echo " done."
echo "Erzeuge /dev/md0 aus ${DESTDISK}1..."
yes|mdadm --create /dev/md0 --level=1 --raid-devices=2 --metadata=0.90 ${DESTDISK}1 missing
echo "Erzeuge /dev/md1 aus ${DESTDISK}2..."
yes|mdadm --create /dev/md1 --level=1 --raid-devices=2 --metadata=0.90 ${DESTDISK}2 missing
if [ $MAXPART -eq 3 ]; then
echo "Erzeuge /dev/md2 aus ${DESTDISK}3..."
yes|mdadm --create /dev/md2 --level=1 --raid-devices=2 --metadata=0.90 ${DESTDISK}3 missing
fi
echo -n "Warte 5 Sekunden "
for i in 1 2 3 4 5; do
sleep 1
echo -n "."
done
echo " fertig."
echo -n "Kopiere System nach /dev/md0"
mkfs.ext3 /dev/md0 > /dev/null 2>&1
mount /dev/md0 /mnt
cp -xvR --preserve=all / /mnt
echo " done."
echo -n "Modifying /mnt/etc/fstab... "
sed -i "s/LABEL=[a-zA-Z\-]*/\/dev\/md0/" /mnt/etc/fstab
echo " done."
echo -n "Mounting /dev/ sysfs and proc to changeroot ..."
mount --bind /dev /mnt/dev
mount -t sysfs none /mnt/sys
mount -t proc none /mnt/proc
echo " done."
cp makeraid.conf /mnt/
cp 03-* /mnt/
chroot /mnt /sbin/extlinux --install /boot
dd if=/mnt/usr/share/syslinux/gptmbr.bin of=$DESTDISK
echo "Now type"
echo " chroot /mnt "
echo "on the command line and execute part3 of the scripts!"
| true |
34c7b1f6501e0db012cc758589b91477347fe292
|
Shell
|
qixin5/debloating_study
|
/expt/debaug/benchmark/rm-8.4/testscript/I2/5
|
UTF-8
| 501 | 3.078125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
BIN=$1
OUTDIR=$2
TIMEOUT=$3
INDIR=$4
cp -r $INDIR/dbstore ./
echo '#!/bin/bash' >run.sh
echo "" >>run.sh
echo "echo y | $BIN -ri dbstore/" >>run.sh
chmod 700 run.sh
{ timeout -k 9 ${TIMEOUT}s ./run.sh; } &>$OUTDIR/o5
echo "$?" >>$OUTDIR/o5
test -d dbstore
echo "$?" >>$OUTDIR/o5
test -f dbstore/file1.log
echo "$?" >>$OUTDIR/o5
test -f dbstore/file2.log
echo "$?" >>$OUTDIR/o5
test -f dbstore/file3.log
echo "$?" >>$OUTDIR/o5
test -f dbstore/service
echo "$?" >>$OUTDIR/o5
rm run.sh
| true |
03d046ab232c475621df80ca3907b582ff2b6c36
|
Shell
|
pusher/testing
|
/scripts/update_prow_components.sh
|
UTF-8
| 706 | 3.40625 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
root_dir="$(dirname "$(realpath "$0")")"
source "$root_dir"/preflight_checks.sh
new_tag="$1"
if [[ -z $new_tag ]]; then
echo "missing required new tag argument" >&2
exit 1
fi
echo "Updating pod utilities (excluding clonerefs): to $new_tag"
for i in initupload sidecar entrypoint; do
find $root_dir/../config -type f -exec $SED -i "s/(.*${i}:)[^\"]+/\1$new_tag/g" {} +
done
echo "Updating checkconfig to $new_tag"
for path in ../config boilerplate; do
find $root_dir/$path -type f -exec $SED -i "s/(.*checkconfig:)[^\"]+/\1$new_tag/g" {} +
done
$SED -i "s/(.*checkconfig:)[^\ ]+/\1$new_tag/g" Makefile
echo
echo "Please verify changes are as intended"
git status --short
| true |
593b1eaa2c5396b71a05eb600faf44485039e7fd
|
Shell
|
wangchangdog/bash_scripts
|
/macos/big_sur/createmacosiso.sh
|
UTF-8
| 803 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/sh
# 仮想のディスクイメージを作成
hdiutil create -o /tmp/Big\ Sur -size 16G -volname Big\ Sur -layout SPUD -fs HFS+J -type SPARSE
# 作成したディスクイメージをマウント
hdiutil attach /tmp/Big\ Sur.sparseimage -noverify -mountpoint /Volumes/Big\ Sur
# createinstallmediaを使って、ディスクイメージを作成
sudo $HOME/macOS\ Big\ Sur.app/Contents/Resources/createinstallmedia --volume /Volumes/Big\ Sur --nointeraction
# アンマウントする
hdiutil eject -force /volumes/Install\ macOS\ Big\ Sur
# dmgファイルをisoファイルに変換
hdiutil convert /tmp/Big\ Sur.sparseimage -format UDTO -o $HOME/Big\ Sur.cdr
# 名前の変更
mv $HOME/Big\ Sur.cdr $HOME/Big\ Sur.iso
rm -f /tmp/Big\ Sur.sparseimage
echo "the temp image has been removed";
| true |
7688e7b1a8fb83dda94f75b23758c1d84417c981
|
Shell
|
QuantitativeNeuroimagingLaboratory/LG-RBSN
|
/functions/MNIregionimagefrommanualedit_onereg.sh
|
UTF-8
| 1,827 | 2.96875 | 3 |
[] |
no_license
|
#!/bin/bash
#
# Region based spatial normalization
# Hengda He
#
Sub=$1
Outdir=$2
Subdir=$3
i=$4 # region number
#Sub=P00000001
#Outdir=/share/projects/razlighi_lab/users/hengda/RDIR_LM/Workspace_${Sub}
#Subdir=/share/projects/razlighi_lab/users/hengda/RDIR_LM/subjects_FS
SUBJECTS_DIR=${Subdir}
name=$(printf "%03d\n" $i)
echo "Segmenting Region-"${name}
mkdir ${Outdir}/region1${name}/
mkdir ${Outdir}/region2${name}/
fslmaths ${Subdir}/FreeSurferMNI152/mri/aparc+aseg.nii.gz -thr 1$name -uthr 1$name ${Outdir}/region1${name}/aparc+aseg_1${name}_MNI.nii.gz
fslmaths ${Subdir}/FreeSurferMNI152/mri/aparc+aseg.nii.gz -thr 2$name -uthr 2$name ${Outdir}/region2${name}/aparc+aseg_2${name}_MNI.nii.gz
mris_label2annot --s FreeSurfer_${Sub} --ctab /usr/local/freesurfer/FreeSurferColorLUT.txt --h lh --l ${Subdir}/FreeSurfer_${Sub}/Vertices_forseg/aparc-lh-${name}_5thCol_Reg2MNI.label --annot-path ${Subdir}/FreeSurfer_${Sub}/label/lh.label${name}
mris_label2annot --s FreeSurfer_${Sub} --ctab /usr/local/freesurfer/FreeSurferColorLUT.txt --h rh --l ${Subdir}/FreeSurfer_${Sub}/Vertices_forseg/aparc-rh-${name}_5thCol_Reg2MNI.label --annot-path ${Subdir}/FreeSurfer_${Sub}/label/rh.label${name}
mri_aparc2aseg --s FreeSurfer_${Sub} --new-ribbon --annot label${name}
mri_convert ${Subdir}/FreeSurfer_${Sub}/mri/label${name}+aseg.mgz ${Subdir}/FreeSurfer_${Sub}/mri/label${name}+aseg.nii.gz
fslmaths ${Subdir}/FreeSurfer_${Sub}/mri/label${name}+aseg.nii.gz -thr 1001 -uthr 1001 -bin -mul $(($i+1000)) ${Outdir}/region1${name}/aparc+aseg_1${name}.nii
fslmaths ${Subdir}/FreeSurfer_${Sub}/mri/label${name}+aseg.nii.gz -thr 2001 -uthr 2001 -bin -mul $(($i+2000)) ${Outdir}/region2${name}/aparc+aseg_2${name}.nii
#gzip -d ${Outdir}/region1${name}/aparc+aseg_1*.nii.gz
#gzip -d ${Outdir}/region2${name}/aparc+aseg_2*.nii.gz
| true |
1dac18eb07ca479343d4a0e6de813044336781e9
|
Shell
|
Caeno/Xamarin.Forms.GoogleMaps
|
/XFGoogleMapSample/Droid/appcenter-post-clone.sh
|
UTF-8
| 515 | 2.65625 | 3 |
[
"CC-BY-SA-4.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
echo "Replace $(SolutionDir) to .. in .csproj"
perl -pi -e 's/\$\(SolutionDir\)/../g' ./XFGoogleMapSample.Droid.csproj
perl -pi -e 's/\$\(SolutionDir\)/../g' ../XFGoogleMapSample/XFGoogleMapSample.csproj
echo "Copy Variables_sample.cs to Variables.cs"
cp ../XFGoogleMapSample/Variables_sample.cs ../XFGoogleMapSample/Variables.cs
echo "Inject Google Maps Android API Key"
perl -pi -e "s/your_google_maps_android_api_v2_api_key/$GOOGLEMAPS_ANDROID_API_KEY/g" ../XFGoogleMapSample/Variables.cs
| true |
c135fed0eb97714cda99b736f1e2d55244f8c65a
|
Shell
|
hugobenichi/doodles
|
/java/java8_sandbox/vagrant_init.sh
|
UTF-8
| 1,178 | 2.828125 | 3 |
[] |
no_license
|
#! /bin/bash
set -x
# config variables
java8_dl="http://download.java.net/jdk8/archive/b120/binaries/jdk-8-ea-bin-b120-linux-x64-12_dec_2013.tar.gz?q=download"
jdk8_path="/home/vagrant/jdk1.8.0/bin"
java_home="/home/vagrant/jdk1.8.0/"
ant_dl="http://ftp.yz.yamagata-u.ac.jp/pub/network/apache//ant/binaries/apache-ant-1.9.3-bin.tar.gz"
ant_path="/home/vagrant/apache-ant-1.9.3/bin"
jruby_dl="http://jruby.org.s3.amazonaws.com/downloads/1.7.9/jruby-bin-1.7.9.tar.gz"
jruby_path="/home/vagrant/jruby-1.7.9/bin"
# package installation
sudo apt-get update
sudo apt-get install -y git vim tree screen curl make
curl $java8_dl | tar -xz
curl $ant_dl | tar -xz
curl $jruby_dl | tar -xz
# env configuration
echo "export PATH=\$PATH:$jdk8_path:$jruby_path:$ant_path" >> /home/vagrant/.bashrc
echo "export JAVA_HOME=$java_home" >> /home/vagrant/.bashrc
export PATH=$PATH:$jdk8_path:$jruby_path:$ant_path
export JAVA_HOME=$java_home
# jruby configuration and gem installation
cd /home/vagrant/jruby-1.7.9/tool/nailgun/ && ./configure && make
(jruby --ng-server &)
jruby -S jgem install rake rspec shoulda bundler jbundler
# generate shared java library
sudo java -Xshare:dump
| true |
c04402974782321388053da678acaec3d5343407
|
Shell
|
aakash-johari/WSRedis
|
/quaffle-frontend/make-debian.sh
|
UTF-8
| 932 | 3.359375 | 3 |
[] |
no_license
|
#!/bin/bash -e
SERVICE=quaffle-frontend-service
MAJOR_VERSION=1
MINOR_VERSION=$(git log -n1 --format="%ct")
VERSION=${MAJOR_VERSION}.${MINOR_VERSION}
PACKAGE_HOME=${SERVICE}_${VERSION}
echo "================= Updating the NPM cache ==============="
npm install
echo "===================== Building code ===================="
npm run build
echo "===================== Copying js/html/css files ====================="
mkdir -p ${PACKAGE_HOME}/usr/share/$SERVICE
mkdir -p ${PACKAGE_HOME}/usr/share/$SERVICE/dist
cp -r $SERVICE/* ${PACKAGE_HOME}/
cp dist/* ${PACKAGE_HOME}/usr/share/$SERVICE/dist/
cp server.js ${PACKAGE_HOME}/usr/share/$SERVICE/$SERVICE.js
npm install --prefix ${PACKAGE_HOME}/usr/share/$SERVICE express
echo "===================== Building Deb ========================="
sed -i -e "s/__VERSION__/$VERSION/g" ${PACKAGE_HOME}/DEBIAN/control
fakeroot dpkg-deb --build ${PACKAGE_HOME}
rm -rf ${PACKAGE_HOME}
| true |
77efae13fc29f71b68d7a410f10b4d17d889f01f
|
Shell
|
moshloop/systools
|
/bin/aws_list_instances
|
UTF-8
| 2,123 | 3.421875 | 3 |
[] |
no_license
|
#!/bin/bash
if [[ "$AWS_REGION" == "" ]]; then
ZONE=$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)
if [[ "$ZONE" == "" ]]; then
echo region lookup failed, must specify AWS_REGION
exit 1
fi
AWS_REGION=${ZONE:0:${#ZONE}-1}
fi
if [[ "$1" != "" ]]; then
environment=$1
fi
if [[ "$environment" == "" && "$env" != "" ]]; then
environment=$env
fi
CONTENT=$(cat <<-"END"
#!/usr/bin/python
import sys
import json
instances = json.loads(sys.stdin.read())
results={
"instances": []
}
for res in instances['Reservations']:
for instance in res['Instances']:
ip=instance.get('PrivateIpAddress', '')
tags={}
for tag in instance['Tags']:
tags[tag['Key']] = tag['Value']
result={
"ip": ip,
"name": tags.get('Name', ''),
"purpose": tags.get("PURPOSE",''),
"role": tags.get("ROLE",''),
"env": tags.get("ENVIRONMENT", '').lower(),
"launched": instance.get('LaunchTime', ''),
"ami": instance.get('ImageId', ''),
"stack": tags.get("aws:cloudformation:stack-name",'')
}
role = result['role']
if role != result['purpose']:
role = role + "-" + result['purpose']
if role != '':
if role not in results:
results[role] = []
results[role].append(result)
results['instances'].append(result)
_results =dict(results)
for k in _results:
if k is "instances":
continue
results[k + "_count"] = len(results[k])
print(json.dumps(results))
END
)
filters="--filters Name=instance-state-name,Values=running "
if [[ "$environment" != "all" && "$environment" != "" ]]; then
filters="$filters Name=tag:ENVIRONMENT,Values=$environment"
fi
instances=$(aws ec2 describe-instances --region $AWS_REGION $filters 2>&1)
if [[ "$?" != "0" ]]; then
echo "$instances"
exit 1
fi
instances=$(echo "$instances" | python -c "$CONTENT" 2>&1)
if [[ "$?" == "0" ]]; then
echo "$instances"
exit 0
else
exit 1
fi
| true |
779df19e93b468a57ff1f78cdabf9e5b8192a3e6
|
Shell
|
tttor/lab1231-sun-prj
|
/xprmnt/superpixel-generation/segment_dummy.sh
|
UTF-8
| 480 | 2.515625 | 3 |
[] |
no_license
|
#!/bin/bash
home=/home/tor
n_segment=10
img_dir=$home/dataset/pascal/VOC2010/VOCdevkit/VOC2010/JPEGImages-ClassSegmented
img_list_filepath=$home/dataset/pascal/VOC2010/meta/split_voc2010_philipp/All_1928.txt
img_extension=.jpg
out_dir=$home/xprmnt/superpixel-generation/voc2010/dummy-segmentation
mkdir -p $out_dir
exe=$home/lab1231-sun-prj/segmenter/src-py/segment_dummy.py
python $exe $img_dir \
$img_list_filepath \
$img_extension \
$n_segment \
$out_dir
| true |
efafd4bff5c88f599210a6eb591ce96141c51674
|
Shell
|
peterthomassen/RutgersIAF
|
/EventAnalyzer/test/createJobsToCondorLPC.sh
|
UTF-8
| 5,795 | 3.203125 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
echo ""
echo "-- Script to create Condor jobs for EventAnalyzer --"
if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ]; then
echo "Missing arguments"
echo " ./createJobsToCondor.sh <script.C> <SampleDirName> <SampleFileList>"
echo " ex: ./createJobsToCondorLPC.sh exampleAnalysisTree.C TTJets ttjets.list"
echo ""
echo "..exiting!"
echo ""
exit 1
fi
echo "Input .C: "$1
echo "Input Name: "$2
echo "Input filelist: "$3" nFiles:"`wc -l $3 | awk '{print $1}'`
echo ""
inC=$1
inSampleName=$2
inFileList=$3
############
####Uncomment following line for using files from grid
############You will need to have your grid certificate installed
##################
voms-proxy-init
#xrootdTag="root://cmsxrootd.fnal.gov//"
#xrootdTag="root://xrootd-cms.infn.it//"
xrootdTag=""
#you will never get an email but good to have this set properly
NOTIFYADDRESS="${USER}@physics.rutgers.edu"
MAINDIR=`pwd`
SCRIPTDIR=`pwd`
LOGDIR=$MAINDIR/logs
mkdir -p $LOGDIR
CMSDIR=$CMSSW_BASE/src
BASEOUTDIR=/store/user/`whoami`/2016/AnalysisTrees2
jsonFile="Cert_271036-274421_13TeV_PromptReco_Collisions16_JSON.txt"
echo ""
echo "WARNING: JSON file, macro, and helperMacro need to be in the directory from which you submit the jobs or you will need to adjust paths"
echo ""
source /cvmfs/cms.cern.ch/cmsset_default.sh
cd $CMSSW_BASE
eval `scramv1 runtime -sh`
cd $CMSSW_BASE/..
tar --exclude-caches-all -czf ${SCRIPTDIR}/${CMSSW_VERSION}.tar.gz ${CMSSW_VERSION}/
cd $MAINDIR
eos root://cmseos.fnal.gov mkdir $BASEOUTDIR
# Specify input files here:
#link to folder with files
inputFolder=$inSampleName
#
datestamp=`date +"%D_%T" | sed 's|/||g' | sed 's|:||g'`
inputFolder2=$inputFolder"__"$datestamp
condorFile=$SCRIPTDIR"/submitJobsCondor_"$USER"_"$inputFolder2".condor"
if [ -e $condorFile ]
then
rm -rf $condorFile
fi
touch $condorFile
runScript=$SCRIPTDIR/runJobsCondor_tmp.sh #temporary script execution file
if [ -e $runScript ]
then
rm -rf $runScript
fi
touch $runScript
chmod a+x $runScript
echo "#!/bin/bash" >> $runScript
echo "export VO_CMS_SW_DIR=/cms/base/cmssoft" >> $runScript
echo "export COIN_FULL_INDIRECT_RENDERING=1" >> $runScript
echo "export HOME="$HOME >> $runScript
echo 'echo $VO_CMS_SW_DIR' >> $runScript
echo 'source $VO_CMS_SW_DIR/cmsset_default.sh' >> $runScript
echo "export SCRAM_ARCH=$SCRAM_ARCH" >> $runScript
echo "tar xzf ${CMSSW_VERSION}.tar.gz" >> $runScript
echo "cd ${CMSSW_VERSION}/src" >> $runScript
echo "scram b ProjectRename" >> $runScript
echo 'eval `scramv1 runtime -sh` ' >> $runScript
#echo "cd $SCRIPTDIR/Datacards/coNLSP" >> $runScript
echo "cd -" >> $runScript
# Specify .C macro to be run here:
echo 'root -q -b -l '$inC'\(\"$1\",\"$2\",\"$3\",$4\,$5\,$6\,$7\,$8\)' >> $runScript
###echo 'root -q -b -l '$inC'\(\"$1\",\"$2\",\"$3\",$4\,$5\,$6\,$7\)' >> $runScript
###echo 'root -q -b -l '$inC'\(\"$1\",\"$2\",$3\,$4\,$5\,$6\,$7\)' >> $runScript
echo 'xrdcp -f $2 root://cmseos.fnal.gov//$BASEOUTDIR/$9/$2' >> $runScript
echo "" >> $runScript
echo "universe = vanilla" >> $condorFile
echo 'Requirements = OpSys == "LINUX" && (Arch != "DUMMY" )' >> $condorFile
echo "Executable = $runScript" >> $condorFile
echo "Should_Transfer_Files = YES" >> $condorFile
echo "request_disk = 10000000" >> $condorFile
echo "request_memory = 2100" >> $condorFile
echo "WhenTOTransferOutput = ON_EXIT_OR_EVICT" >> $condorFile
echo "Notification=never" >> $condorFile
echo "notify_user = $NOTIFYADDRESS" >> $condorFile
echo 'x509userproxy = $ENV(X509_USER_PROXY)' >> $condorFile
echo "Transfer_Input_Files = runEfficiency.sh, ${CMSSW_VERSION}.tar.gz, helperMiniAODv2.C,$inC,$jsonFile" >> $condorFile
echo "" >> $condorFile
echo "" >> $condorFile
#
while read line
do
#base=`echo $line | awk '{split($9,array,"_"); print array[1]}'`
base=`echo $line | awk '{split($1,array,"results_"); split(array[2],array2,".");print array2[1]}'`
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
# Parameters to be specified by user
mode=1 # primary dataset: 1=MUEG, 2=DOUBLEMU, 3=DOUBLEEG, 4=SINGLEMU, 5=SINGLEEL
noFakes=0
isMC=false
suffix="data"
#suffix="simulation"
# -- -- -- -- -- -- --
# These dont need to be modified usually:
iLo=0
iHi=-1
# ---------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------
#
ifname=$line
ofname="histo_${inSampleName}_${base}.root"
echo "output = $LOGDIR/\$(Cluster)_$suffix_${inSampleName}_${base}.out" >> $condorFile
echo "error = $LOGDIR/\$(Cluster)_$suffix_${inSampleName}_${base}.err" >> $condorFile
echo "log = $LOGDIR/\$(Cluster)_$suffix_${inSampleName}_${base}.log" >> $condorFile
###echo "arguments = CondorSimSamples/$inputFolder/$ifdir CondorPU/${ifdir}.pu.root $OUTDIR/\$(Cluster)_${ofile}_simulation.root $mode $iLo $iHi $noFakes" >> $condorFile
###echo "arguments = CondorSimSamples/$inputFolder/$ifdir $OUTDIR/\$(Cluster)_${ofile}_simulation.root $jsonFile $mode $iLo $iHi $noFakes $isMC" >> $condorFile
echo "arguments =$ifname $ofname $jsonFile $mode $iLo $iHi $noFakes $isMC $inSampleName" >> $condorFile
echo "queue" >> $condorFile
echo "" >> $condorFile
done < $inFileList
rm -rf $tmplist
cp $condorFile $LOGDIR/
cp $runScript $LOGDIR/
#cp ./createJobsToCondor.sh $LOGDIR/
echo ""
echo "Submitting: "$inputFolder
echo ""
echo "To submit jobs:"
echo "condor_submit submitJobsCondor_"$USER"_"$inputFolder2".condor"
echo ""
echo "To watch job progress:"
echo "watch -n 10 'condor_q "$USER" | grep \";\"'"
echo ""
echo "Output log files: "$LOGDIR
echo ""
echo "Output root files: "${BASEOUTDIR}/${inSampleName}
echo ""
| true |
31440d48eb4d89c8049f551688c6a0927c18661c
|
Shell
|
vibolyoeung/symfony-vagrant-shell
|
/provisioning/40-virtualhost.sh
|
UTF-8
| 695 | 3.515625 | 4 |
[] |
no_license
|
#!/bin/bash
STAMP="/home/vagrant/.$(basename $0)"
echo $STAMP
if [ ! -f $STAMP ]; then
export DEBIAN_FRONTEND="noninteractive" ; set -e #-x
cat > /etc/apache2/sites-available/${APPNAME}.conf << _EOF
<VirtualHost *:80>
ServerName 127.0.0.1:8000
DocumentRoot /vagrant/${APPNAME}/web
<Directory /vagrant/${APPNAME}/web>
# enable the .htaccess rewrites
AllowOverride All
Require all granted
</Directory>
ErrorLog /var/log/apache2/${APPNAME}_error.log
CustomLog /var/log/apache2/${APPNAME}_access.log combined
</VirtualHost>
_EOF
service apache2 stop
a2enmod rewrite
a2ensite ${APPNAME}.conf
service apache2 start
touch $STAMP
fi
| true |
b77305c2b4a164db012067704c37441ee1165c02
|
Shell
|
fliptopbox/raspberry
|
/zero/scripts/day_time.sh
|
UTF-8
| 3,313 | 3.21875 | 3 |
[] |
no_license
|
#!/bin/bash
#
#
# ██████ █████ ██ ██ ████████ ██ ███ ███ ███████
# ██ ██ ██ ██ ██ ██ ██ ██ ████ ████ ██
# ██ ██ ███████ ████ ██ ██ ██ ████ ██ █████
# ██ ██ ██ ██ ██ ██ ██ ██ ██ ██ ██
# ██████ ██ ██ ██ ██ ██ ██ ██ ███████
#
#
#
# Save JSON outputted from a URL
# https://stackoverflow.com/questions/3040904/save-json-outputted-from-a-url-to-a-file
. ./config.sh
# Incoming argument (optional) "true" to trigger API call
update=$1
mkdir -p $relativeData
apiurl="https://api.sunrise-sunset.org/json?lat=$lat&lng=$lng&date=today"
dest="$relativeData/sunrise-sunset"
# time offset to before/after the sunrise/sunset event
offset="15 minutes"
# check modified date against roday's date
modified=`date -r "$dest.json" +"%m%d"`
today=`date +"%m%d"`
if [ "$modified" != "$today" ]; then
update=true
fi
# The API call only needs to update once per day
if [ ! -f "$dest.json" ] || [ "$update" == "true" ]; then
wget $apiurl -O - > "$dest.tmp"
ok=`cat "$dest.tmp" | sed -r 's/.*"status":"(.*)".*/\1/g'`
if [[ $ok == "OK" ]]; then
# Extract the civil_twilight_(begin|end) attributes
# IMPORTANT! the API delivers UTC without seasonal adjustments
json=`cat "$dest.tmp" | sed -r 's/.*("civ[^,]+").*("civ[^,]+").*/\1;\2/g'`
# extract the respective time values
sunrise=$(echo $json | awk -F";" '{print $1}' | sed -r 's/.*"([^\s]+)".*/\1/g')
sunset=$(echo $json | awk -F";" '{print $2}' | sed -r 's/.*"([^\s]+)".*/\1/g')
# Convert the AM/PM time to 24hour time
sunrise=$(date -d "$sunrise-$offset" | awk '{print $4}')
sunset=$(date -d "$sunset+$offset" | awk '{print $4}')
# save the valid json file
mv -f "$dest.tmp" "$dest.json"
# Cache the parsed values
lastupdate=`date -u +"%Y-%m-%dT%H%M%SZ"`
echo "lastupdate=$lastupdate" > "$dest.txt"
echo "sunrise=$sunrise" >> "$dest.txt"
echo "sunset=$sunset" >> "$dest.txt"
fi
fi
# some default values
lastupdate="Unknown"
sunrise="04:00:00"
sunset="20:00:00"
# try and load the cached values
if [ -f "$dest.txt" ]; then
. "$dest.txt"
fi
# Cast the time value as long (eg. 0345512)
UTC=`date -u +"%H:%M:%S"`
intUTC=$(echo $UTC | sed 's/\://g')
intSunrise=$(echo $sunrise | sed 's/\://g')
intSunset=$(echo $sunset | sed 's/\://g')
daytime=0
# Determine if time is currently between sunrise/sunset
if [ $intUTC -gt $intSunrise -a $intUTC -lt $intSunset ]; then
daytime=1
fi
echo "sunrise" $sunrise \
"sunset" $sunset \
"UTC" $UTC \
"daytime" $daytime \
"update" $update \
"lastupdate" $lastupdate
| true |
4799018bc26c73f0fe7df899e31af7681c36c4ff
|
Shell
|
VGP/vgp-assembly
|
/dx_applets/proc10xg/src/proc10xg.sh
|
UTF-8
| 965 | 3.09375 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# proc10xG 0.0.1
set -x -e -o pipefail
main() {
echo "Value of asm: '$input_fastq'"
dx-download-all-inputs --parallel
mv in/*/*/*.fastq.gz .
for e in ${input_fastq_name[@]}; do
echo $e
if [[ $e == *"R1"* ]]; then
echo $e >> fw.ls
elif [[ $e == *"R2"* ]]; then
echo $e >> rv.ls
fi
done
cat fw.ls | sort | uniq > fw_def.ls
cat rv.ls | sort | uniq > rv_def.ls
cat fw_def.ls
cat rv_def.ls
cat fw_def.ls | grep -oe "S[0-9]_[A-Z][0-9]*" > prefix.ls
paste fw_def.ls rv_def.ls prefix.ls | awk '{print "python /opt/proc10xG/process_10xReads.py -a -1 "$1" -2 "$2" -o trimmed_"$3}' #| parallel --gnu -j $(nproc)
paste fw_def.ls rv_def.ls prefix.ls | awk '{print "python /opt/proc10xG/process_10xReads.py -a -1 "$1" -2 "$2" -o trimmed_"$3}' | parallel --gnu -j $(nproc)
mkdir -p ~/out/output_fastq
mv trimmed_*.fastq.gz ~/out/output_fastq
dx-upload-all-outputs --parallel
}
| true |
6e58da42830fead1dbd0ae564bb20d17c8a6d26b
|
Shell
|
mhus/mhus-docker
|
/liferay-docker/builder/build_all_images.sh
|
UTF-8
| 8,933 | 3.40625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
source ./_common.sh
BUILD_ALL_IMAGES_PUSH=${1}
function build_image {
#
# LIFERAY_DOCKER_IMAGE_FILTER="7.2.10-dxp-1 " ./build_all_images.sh
# LIFERAY_DOCKER_IMAGE_FILTER=7.2.10 ./build_all_images.sh
# LIFERAY_DOCKER_IMAGE_FILTER=commerce ./build_all_images.sh
#
if [ -n "${LIFERAY_DOCKER_IMAGE_FILTER}" ] && [[ ! $(echo ${1} ${2} ${3} ${4} | grep ${LIFERAY_DOCKER_IMAGE_FILTER}) ]]
then
return
fi
if [ ! -n "${1}" ]
then
local build_id=${2##*/}
else
local build_id=${1}
fi
echo ""
echo "Building Docker image ${build_id} based on ${2}."
echo ""
{
LIFERAY_DOCKER_FIX_PACK_URL=${3} LIFERAY_DOCKER_RELEASE_FILE_URL=${2} LIFERAY_DOCKER_RELEASE_VERSION=${1} LIFERAY_DOCKER_TEST_HOTFIX_URL=${5} LIFERAY_DOCKER_TEST_INSTALLED_PATCHES=${4} time ./build_image.sh ${BUILD_ALL_IMAGES_PUSH} 2>&1
if [ $? -gt 0 ]
then
echo "FAILED: ${build_id}" >> ${LOGS_DIR}/results
else
echo "SUCCESS: ${build_id}" >> ${LOGS_DIR}/results
fi
} | tee ${LOGS_DIR}/${build_id}".log"
}
function build_images_dxp_70 {
build_image \
7.0.10-ga1 \
files.liferay.com/private/ee/portal/7.0.10/liferay-dxp-digital-enterprise-tomcat-7.0-ga1-20160617092557801.zip \
"" \
""
for fix_pack_id in {88..89}
do
build_image \
7.0.10-de-${fix_pack_id} \
files.liferay.com/private/ee/portal/7.0.10.12/liferay-dxp-digital-enterprise-tomcat-7.0.10.12-sp12-20191014182832691.7z \
files.liferay.com/private/ee/fix-packs/7.0.10/de/liferay-fix-pack-de-${fix_pack_id}-7010.zip \
de-${fix_pack_id}-7010
done
build_image \
7.0.10-de-90,7.0.10-sp13 \
files.liferay.com/private/ee/portal/7.0.10.13/liferay-dxp-digital-enterprise-tomcat-7.0.10.13-sp13-slim-20200310164407389.7z \
"" \
de-90-7010
build_image \
7.0.10-de-91 \
files.liferay.com/private/ee/portal/7.0.10-de-91/liferay-dxp-digital-enterprise-tomcat-7.0.10-de-91-slim-20200420163527702.7z \
"" \
de-91-7010,hotfix-6871-7010 \
files.liferay.com/private/ee/fix-packs/7.0.10/hotfix/liferay-hotfix-6871-7010.zip
build_image \
7.0.10-security-de-91-202003-1 \
files.liferay.com/private/ee/portal/7.0.10-de-91/liferay-dxp-digital-enterprise-tomcat-7.0.10-de-91-20200420163527702.7z \
files.liferay.com/private/ee/fix-packs/7.0.10/security-de/liferay-security-de-91-202003-1-7010.zip \
de-91-7010,security-de-91-202003-1-7010
build_image \
7.0.10-de-92 \
files.liferay.com/private/ee/portal/7.0.10-de-92/liferay-dxp-digital-enterprise-tomcat-7.0.10-de-92-slim-20200519134012683.7z \
"" \
de-92-7010,hotfix-6854-7010 \
files.liferay.com/private/ee/fix-packs/7.0.10/hotfix/liferay-hotfix-6854-7010.zip
}
function build_images_dxp_71 {
build_image \
7.1.10-ga1 \
files.liferay.com/private/ee/portal/7.1.10/liferay-dxp-tomcat-7.1.10-ga1-20180703090613030.zip \
"" \
""
for fix_pack_id in {1..4}
do
build_image \
7.1.10-dxp-${fix_pack_id} \
files.liferay.com/private/ee/portal/7.1.10/liferay-dxp-tomcat-7.1.10-ga1-20180703090613030.zip \
files.liferay.com/private/ee/fix-packs/7.1.10/dxp/liferay-fix-pack-dxp-${fix_pack_id}-7110.zip \
dxp-${fix_pack_id}-7110
done
build_image \
7.1.10-dxp-5,7.1.10-sp1 \
files.liferay.com/private/ee/portal/7.1.10.1/liferay-dxp-tomcat-7.1.10.1-sp1-20190110085705206.zip \
"" \
dxp-5-7110
for fix_pack_id in {6..9}
do
build_image \
7.1.10-dxp-${fix_pack_id} \
files.liferay.com/private/ee/portal/7.1.10.1/liferay-dxp-tomcat-7.1.10.1-sp1-20190110085705206.zip \
files.liferay.com/private/ee/fix-packs/7.1.10/dxp/liferay-fix-pack-dxp-${fix_pack_id}-7110.zip \
dxp-${fix_pack_id}-7110
done
build_image \
7.1.10-dxp-10,7.1.10-sp2 \
files.liferay.com/private/ee/portal/7.1.10.2/liferay-dxp-tomcat-7.1.10.2-sp2-20190422172027516.zip \
"" \
dxp-10-7110
for fix_pack_id in {11..14}
do
build_image \
7.1.10-dxp-${fix_pack_id} \
files.liferay.com/private/ee/portal/7.1.10.2/liferay-dxp-tomcat-7.1.10.2-sp2-20190422172027516.zip \
files.liferay.com/private/ee/fix-packs/7.1.10/dxp/liferay-fix-pack-dxp-${fix_pack_id}-7110.zip \
dxp-${fix_pack_id}-7110
done
build_image \
7.1.10-dxp-15,7.1.10-sp3 \
files.liferay.com/private/ee/portal/7.1.10.3/liferay-dxp-tomcat-7.1.10.3-sp3-slim-20191118185746787.7z \
"" \
dxp-15-7110
for fix_pack_id in {16..16}
do
build_image \
7.1.10-dxp-${fix_pack_id} \
files.liferay.com/private/ee/portal/7.1.10.3/liferay-dxp-tomcat-7.1.10.3-sp3-20191118185746787.7z \
files.liferay.com/private/ee/fix-packs/7.1.10/dxp/liferay-fix-pack-dxp-${fix_pack_id}-7110.zip \
dxp-${fix_pack_id}-7110
done
build_image \
7.1.10-dxp-17,7.1.10-sp4 \
files.liferay.com/private/ee/portal/7.1.10.4/liferay-dxp-tomcat-7.1.10.4-sp4-slim-20200331093526761.7z \
"" \
dxp-17-7110
build_image \
7.1.10-security-dxp-17-202003-3 \
files.liferay.com/private/ee/portal/7.1.10.4/liferay-dxp-tomcat-7.1.10.4-sp4-20200331093526761.7z \
files.liferay.com/private/ee/fix-packs/7.1.10/security-dxp/liferay-security-dxp-17-202003-3-7110.zip \
dxp-17-7110,security-dxp-17-202003-3-7110
}
function build_images_dxp_72 {
build_image \
7.2.10-ga1 \
files.liferay.com/private/ee/portal/7.2.10/liferay-dxp-tomcat-7.2.10-ga1-20190531140450482.7z \
"" \
""
build_image \
7.2.10-dxp-1 \
files.liferay.com/private/ee/portal/7.2.10/liferay-dxp-tomcat-7.2.10-ga1-20190531140450482.7z \
files.liferay.com/private/ee/fix-packs/7.2.10/dxp/liferay-fix-pack-dxp-1-7210.zip \
dxp-1-7210
build_image \
7.2.10-dxp-2,7.2.10-sp1 \
files.liferay.com/private/ee/portal/7.2.10.1/liferay-dxp-tomcat-7.2.10.1-sp1-slim-20191009103614075.7z \
"" \
dxp-2-7210
build_image \
7.2.10-dxp-3 \
files.liferay.com/private/ee/portal/7.2.10.1/liferay-dxp-tomcat-7.2.10.1-sp1-20191009103614075.7z \
files.liferay.com/private/ee/fix-packs/7.2.10/dxp/liferay-fix-pack-dxp-3-7210.zip \
dxp-3-7210
build_image \
7.2.10-dxp-4 \
files.liferay.com/private/ee/portal/7.2.10-dxp-4/liferay-dxp-tomcat-7.2.10-dxp-4-slim-20200121112425051.7z \
"" \
dxp-4-7210,hotfix-1167-7210 \
files.liferay.com/private/ee/fix-packs/7.2.10/hotfix/liferay-hotfix-1167-7210.zip
build_image \
7.2.10-security-dxp-4-202003-4 \
files.liferay.com/private/ee/portal/7.2.10-dxp-4/liferay-dxp-tomcat-7.2.10-dxp-4-20200121112425051.7z \
files.liferay.com/private/ee/fix-packs/7.2.10/security-dxp/liferay-security-dxp-4-202003-4-7210.zip \
dxp-4-7210,security-dxp-4-202003-4-7210
build_image \
7.2.10-dxp-5,7.2.10-sp2 \
files.liferay.com/private/ee/portal/7.2.10.2/liferay-dxp-tomcat-7.2.10.2-sp2-slim-20200511121558464.7z \
"" \
dxp-5-7210,hotfix-1467-7210 \
files.liferay.com/private/ee/fix-packs/7.2.10/hotfix/liferay-hotfix-1467-7210.zip
build_image \
7.2.10-security-dxp-5-202003-1 \
files.liferay.com/private/ee/portal/7.2.10.2/liferay-dxp-tomcat-7.2.10.2-sp2-20200511121558464.7z \
files.liferay.com/private/ee/fix-packs/7.2.10/security-dxp/liferay-security-dxp-5-202003-1-7210.zip \
dxp-5-7210,security-dxp-5-202003-1-7210
}
function main {
LOGS_DIR=logs-$(date "$(date)" "+%Y%m%d%H%M")
mkdir -p ${LOGS_DIR}
local release_file_urls=(
#releases.liferay.com/commerce/2.0.7/liferay-commerce-2.0.7-7.2.x-201912261227.7z
files.liferay.com/private/ee/commerce/2.1.1/liferay-commerce-enterprise-2.1.1-7.1.x-202006040810.7z
files.liferay.com/private/ee/commerce/2.1.1/liferay-commerce-enterprise-2.1.1-7.2.x-202006040818.7z
releases.liferay.com/portal/6.1.2-ga3/liferay-portal-tomcat-6.1.2-ce-ga3-20130816114619181.zip
files.liferay.com/private/ee/portal/6.1.30.5/liferay-portal-tomcat-6.1-ee-ga3-sp5-20160201142343123.zip
releases.liferay.com/portal/6.2.5-ga6/liferay-portal-tomcat-6.2-ce-ga6-20160112152609836.zip
files.liferay.com/private/ee/portal/6.2.10.21/liferay-portal-tomcat-6.2-ee-sp20-20170717160924965.zip
releases.liferay.com/portal/7.0.6-ga7/liferay-ce-portal-tomcat-7.0-ga7-20180507111753223.zip
releases.liferay.com/portal/7.1.3-ga4/liferay-ce-portal-tomcat-7.1.3-ga4-20190508171117552.7z
releases.liferay.com/portal/7.2.1-ga2/liferay-ce-portal-tomcat-7.2.1-ga2-20191111141448326.7z
releases.liferay.com/portal/7.3.2-ga3/liferay-ce-portal-tomcat-7.3.2-ga3-20200519164024819.7z
files.liferay.com/private/ee/portal/7.3.10-ep3/liferay-dxp-tomcat-7.3.10-ep3-20200522044030318.7z
#releases.liferay.com/portal/snapshot-7.1.x/201902130905/liferay-portal-tomcat-7.1.x.7z
#releases.liferay.com/portal/snapshot-master/201902131509/liferay-portal-tomcat-master.7z
#files.liferay.com/private/ee/portal/snapshot-ee-6.2.x/201808160944/liferay-portal-tomcat-ee-6.2.x.zip
#files.liferay.com/private/ee/portal/snapshot-7.1.x-private/201808162051/liferay-portal-tomcat-7.1.x-private.zip
)
for release_file_url in ${release_file_urls[@]}
do
build_image "" ${release_file_url} "" ""
done
build_images_dxp_70
build_images_dxp_71
build_images_dxp_72
echo ""
echo "Results: "
echo ""
cat ${LOGS_DIR}/results
}
main
| true |
93ff0e6574fc69a96dc892492d6ba3e859db7e5a
|
Shell
|
getkub/SplunkScriplets
|
/devOps/SplunkEnterprise/configureSplunkMain.sh
|
UTF-8
| 4,519 | 3.90625 | 4 |
[] |
no_license
|
#!/bin/sh
# ============================================================================
# Script to configure Splunk after first Deploy
# Version : 0.1
# ============================================================================
# ============================================================================
# Check that this script has been executed under root
# ============================================================================
if [[ $EUID -ne 0 ]]; then
echo "Error: This script must be run as root" 1>&2
exit 1
fi
install_path="/home/deploy"
# Use a script or something to fetch environment details
# ServiceName=`myenvScript`
ServiceName=`getEnvironment`
# DS - Deployment Server
# SH - Splunk Head
# SI - Splunk Indexer
# DS_SH_SI - combination etc.. in alphabetical order
SERVERTYPE=`getServerType`
# Search Peers are other Peers
SearchPeers=`$getSearchPeers`
# ============================================================================
# #<Identify Relevant Environment> and Service
# ============================================================================
THISHOST=`uname -n`
INSTALLEDCHECK=`rpm -qa | grep splunk-`
SPLUNK_HOME="/opt/splunk"
SPLUNK_CRED="newchangeme"
ADM_PORT="8071"
RC=0
# ============================================================================
# Check if Splunk is currently installeled
# ============================================================================
if [[ -n "$INSTALLEDCHECK" ]]
then
ISINSTALLED="True"
else
ISINSTALLED="False"
echo "Warn: Splunk not installed. Exiting without any changes!"
exit 1
fi
if [ "$ServiceName" != "$THISHOST" ];then
echo "Error: Unable to find ${THISHOST} in main configuration. Exiting without any actions..."
exit 2
fi
echo "Info: Starting to Configure Splunk"
echo "Info: Stopping Splunk if its already running"
/bin/su - splunk -c "/opt/splunk/bin/splunk stop "
# ============================================================================
# Copy required files
# ============================================================================
echo "Info: Copying required files"
${install_path}/myEnv/scripts/copyFiles.py ${install_path}/myEnv/configs/csv/INITIAL_Deploy_Files.csv
# ============================================================================
# Install Default Apps
# ============================================================================
# Cheap way to filter
if [ "$SERVERTYPE" == "SH_SI" ]; then
BaseApps="sos"
elif [ "$SERVERTYPE" == "SH" ]; then
BaseApps="sideview_utils sos"
elif [ "$SERVERTYPE" == "SI" ]; then
BaseApps="TA-sos"
fi
echo "Info: ServerType= $SERVERTYPE "
for appName in `echo $BaseApps`
do
if [ ! -z "$BaseApps" ]
then
echo "Info: Installing APP: $appName "
/bin/su - splunk -c "${install_path}/SPLUNK_APPS/scripts/install_pkg.sh $appName "
fi
done
# ============================================================================
# Re-Starting Splunk
# ============================================================================
/bin/su - splunk -c "touch ${SPLUNK_HOME}/etc/.ui_login "
/bin/su - splunk -c "/opt/splunk/bin/splunk start --no-prompt --answer-yes --accept-license"
# ============================================================================
# Configure Search Peers
# ============================================================================
SearchPeers=`${install_path}/myEnv/scripts/getEnv.py | grep getSearchPeers| awk -F'=' '{print $2}'`
if [ "$SearchPeers" == "NA" ]
then
echo "Info: No Search Peers to Configure"
else
SearchPeersArray=(${SearchPeers//:/ })
for peer in "${SearchPeersArray[@]}"
do
echo "Info: Adding Search Peer - ${peer}"
/bin/su - splunk -c "/opt/splunk/bin/splunk add search-server -host ${peer}:$ADM_PORT -auth splunk:$SPLUNK_CRED -remoteUsername splunk -remotePassword $SPLUNK_CRED" ; j=$? ; RC=$(($j + $RC))
done
fi
if [ $RC -ne 0 ]; then
echo "Error: *** Search Peer configuration error *** "
exit 1
fi
# Redirect Port 8000 to 443 for end users to login normally
# To display run: iptables -t nat -L -n -v
iptables -t nat -D PREROUTING -p tcp --dport 443 -j REDIRECT --to-ports 8000
iptables -t nat -A PREROUTING -p tcp --dport 443 -j REDIRECT --to-ports 8000
iptables-save > /etc/iptables.rules
echo "Info: Successfully Configured Splunk"
# ============================================================================
# End of Script
# ============================================================================
| true |
5bfbfb62511543616aa2a639d49046729cb4621e
|
Shell
|
splo/git-tools
|
/bin/git-foreach
|
UTF-8
| 215 | 3.359375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
find . -mindepth 1 -maxdepth 1 | while read dir; do
[[ -d "${dir}/.git" ]] && {
pushd "${dir}" >/dev/null
echo "# ${dir}"
eval "$@"
popd >/dev/null
}
done
| true |
eb3fa30a04caba1b363b3ab8bca5f0a4a4c991d4
|
Shell
|
Startpiloten/ce-gen
|
/ce-gen.sh
|
UTF-8
| 5,955 | 3.671875 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
clear
#parse_yaml function
parse_yaml() {
local prefix=$2
local s='[[:space:]]*' w='[a-zA-Z0-9_]*' fs=$(echo @|tr @ '\034')
sed -ne "s|^\($s\)\($w\)$s:$s\"\(.*\)\"$s\$|\1$fs\2$fs\3|p" \
-e "s|^\($s\)\($w\)$s:$s\(.*\)$s\$|\1$fs\2$fs\3|p" $1 |
awk -F$fs '{
indent = length($1)/2;
vname[indent] = $2;
for (i in vname) {if (i > indent) {delete vname[i]}}
if (length($3) > 0) {
vn=""; for (i=0; i<indent; i++) {vn=(vn)(vname[i])("_")}
printf("%s%s%s=\"%s\"\n", "'$prefix'",vn, $2, $3);
}
}'
}
# Check YAML files
if [ ! -f .t3.cegen.yaml ]; then
echo ".t3.cegen.yaml not found!"
cp vendor/analogde/ce-gen/lib/.t3.cegen.yaml .t3.cegen.yaml
echo ".t3.cegen.yaml was created!"
exit
fi
# Read yaml file
load_yaml() {
eval $(parse_yaml .t3.cegen.yaml)
}
load_yaml
echo "Extension Name" $extension_name
COLUMNS=12
if [ -f ~/.profile ]
then
source ~/.profile
fi
continue=true
bindir=vendor/analogde/ce-gen
libdir=vendor/analogde/ce-lib
extname=$extension_name
extensiondir=$extension_path
ctype () {
read -p "Enter cType you want to create: " cename
while [[ $cename == '' ]]
do
echo "Enter a valid Name"
read -p "Enter cType you want to create: " cename
done
cename=$(echo "$cename" | sed 's/ //g' | sed 's/[^a-zA-Z0-9]//g' | tr '[:upper:]' '[:lower:]')
cenameUpper=${cename};
cenameUpper=`echo ${cenameUpper:0:1} | tr '[a-z]' '[A-Z]'`${cenameUpper:1}
if [ -f "${extensiondir}/Configuration/TCA/tt_content_${cename}.php" ]
then
echo "Content Element exists"
exit 1
fi
}
title () {
read -p "Content Element Title: " cetitle
while [[ ${cetitle} == '' ]]
do
read -p "Content Element Title: " cetitle
echo "Enter a valid Content Element Title"
done
}
description () {
read -p "Content Element Description: " cedescription
while [[ ${cedescription} == '' ]]
do
read -p "Content Element Description: " cedescription
echo "Enter a valid Content Element Description"
done
}
mmtitle () {
read -p "Enter a Title for the MM Item: " mmtitle
while [[ ${mmtitle} == '' ]]
do
read -p "Enter a Title for the MM Item: " mmtitle
echo "Enter a valid Title for the MM Item (singular)"
done
mmtitleLower=$(echo "$mmtitle" | sed 's/ //g' | sed 's/[^a-zA-Z0-9]//g' | tr '[:upper:]' '[:lower:]')
}
create_simple_ce () {
ctype
if [ -f "${extensiondir}/Configuration/TCA/tt_content_${cename}.php" ]
then
echo
echo "This cType is already present"
echo
exit 1
else
title
description
echo
source ${bindir}/bin/basic-generator.sh
echo
fi
}
create_irre_ce () {
ctype
if [ -f "${extensiondir}/Configuration/TCA/tt_content_${cename}.php" ]
then
echo
echo "This cType is already present"
echo
exit 1
else
title
description
echo
source ${bindir}/bin/irre-generator.sh
echo
fi
}
create_mm_ce () {
mmtitle
ctype
if [ -f "${extensiondir}/Configuration/TCA/tt_content_${cename}.php" ]
then
echo
echo "This cType is already present"
echo
exit 1
else
title
description
echo
source ${bindir}/bin/mm-generator.sh
echo
fi
}
choose_type_to_generate () {
PS3='What type of element do you want to generate: '
options=("Default Item" "Irre Item" "MM Item")
select opt in "${options[@]}"
do
case $opt in
"Default Item")
echo
create_simple_ce
echo
break
;;
"Irre Item")
echo
create_irre_ce
echo
break
;;
"MM Item")
echo
create_mm_ce
echo
break
;;
*) echo invalid option;;
esac
done
}
clear_cache() {
if [ -f typo3cms ]
then
echo "Clear Cache and Update Schema"
php typo3cms database:updateschema "*"
php typo3cms cache:flush
fi
if [ -f typo3cms ]
then
echo "Clear Cache and Update Schema"
php typo3cms database:updateschema "*"
php typo3cms cache:flush
fi
}
run_generator () {
if [ -d "vendor/analogde/ce-lib" ]
then
read -p "Do you want to import a cType from the library? [Y/N] " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
source ${bindir}/bin/ce-library-tool.sh
else
echo
echo "Ok! Create custom cType now:"
choose_type_to_generate
fi
else
choose_type_to_generate
fi
}
restart () {
echo
echo
read -p "Do you want to restart? [Y/N] " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
continue=true
echo
else
continue=false
echo
echo "Bye!"
fi
}
info () {
echo
printf '\033[0;31m'
echo "For older version add this to your TS"
echo "lib.ce_${extname} < lib.default"
printf '\033[0m'
}
prepare () {
mkdir -p $extensiondir/Configuration/TSconfig/ContentElements
mkdir -p $extensiondir/Resources/Private/ContentElements/Templates
mkdir -p $extensiondir/Configuration/TCA
mkdir -p $extensiondir/Configuration/TSconfig/ContentElements
mkdir -p $extensiondir/Resources/Build/Assets/Scss/content-elements
mkdir -p $extensiondir/Resources/Private/ContentElements/BackendPreviews
}
while [ $continue == "true" ]
do
prepare
run_generator
clear_cache
info
restart
done
| true |
5fd7afe6f11ea90d66af77fa97aa255833a8ef7a
|
Shell
|
charlesvardeman/ArchInstallMBP
|
/pre_install.sh
|
UTF-8
| 1,588 | 3.5625 | 4 |
[] |
no_license
|
#!/usr/bin/bash
# Defining the shell path and global variables
SHELL_PATH=$(readlink -f $0 | xargs dirname)
source ${SHELL_PATH}/scripts/global.sh
# Please make changes to the drive based on your hardware configuration
info "Formatting the drivers..."
mkfs.vfat -F32 /dev/sda1
mkfs.ext4 /dev/sda2
#mkswap /dev/sda3
#swapon /dev/sda3
info "Mounting the drives 1. Root, 2. Boot "
mount /dev/sda2 /mnt
mkdir /mnt/boot
mkdir /mnt/home
mount /dev/sda1 /mnt/boot
mount /dev/sda3 /mnt/home
lsblk
info "Installing Reflector to find the best mirror list for downloading Arch Linux"
pacman -Sy --noconfirm reflector
cp /etc/pacman.d/mirrorlist /etc/pacman.d/mirrorlist.backup
reflector --verbose --latest 10 --sort rate --save /etc/pacman.d/mirrorlist
info "Installing all packages to get sway under wayland working with audio. Some additional useful packages are included also."
pacstrap /mnt base base-devel vim neovim intel-ucode sudo networkmanager wpa_supplicant git alsa-utils pulseaudio-alsa coreutils dosfstools util-linux exa linux linux-firmware linux-headers linux-lts linux-lts-headers broadcom-wl sysfsutils usbutils mtools dialog
info "Generating fstab for the drives."
genfstab -L -p /mnt >> /mnt/etc/fstab
info "Creating RAM Disk."
echo "tmpfs /tmp tmpfs rw,nodev,nosuid,size=4G 0 0" >> /mnt/etc/fstab
info "Copying install scripts to new location"
cp -R ${SHELL_PATH} /mnt/
info "Entering as root into Arch Linux Install Drive"
info "You need to run install.sh to set all configurations for arch Linux system and Macbook Pro settings."
arch-chroot /mnt
umount -R /mnt
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.