blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|
fd6b228ba66c2a58ce91faa03d11063ad4a0dea9
|
Shell
|
caseyjlaw/misc
|
/psr-imager.sh
|
UTF-8
| 2,349 | 3.46875 | 3 |
[] |
no_license
|
#! /bin/bash
#
# Master script to create phased images of pulsar
# claw, 13mar09
#
# Usage: psr-img.sh
#
# Output files:
# - text files with time filters for each phase bin (possibly more than one to avoid 256 line limit of miriad)
# - beam and map images for each phase bin and pol (and possibly to avoid 256 line limit of miriad)
# - beam and map image cubes
# - mean-subtracted, clean and restored image cubes
######################
# customize here
# observation properties:
#period=0.7136692 # period that fixes b0329 phase shift
#period=0.7137 # period that makes b0329 pulse more constant with time
#period=0.358738 # period for b1933
#period=0.253065 # nominal period for b0950+08
#period=0.25304 # period that works for b0950+08-0.15s-4000
bin=0.10 # integration bin size in seconds
ints=3000 # number of integrations in file (3000 for j0332-0.1s)
# time for j0332-0.1s:
t0h=02 # start hour
t0m=05 # start minute
t0s=02.4 # start second
# time for b1933-0.1s:
#t0h=19
#t0m=37
#t0s=25.3
# time for b0950+09-0.15s-4000
#t0h=02
#t0m=07
#t0s=19.7
# time for b0950+09-0.1s-6000
#t0h=01
#t0m=32
#t0s=35.7
# output properties:
phasebins=8 # number of bins per pulse profile
outphases=1 # not yet implemented
#suffix='tst'
visroot='fxc-j0332-0.1s'
imroot='j0332-0.1s-test'
frac='all' # 'all', '1/3', '2/3', '2/2', etc.
cleanup=1 # delete some of the intermediate files
######################
set -e -x
# loop to do trial periods
for ((i=1; i<=1; i++))
do
# period=`echo 0.25370-0.00002*${i} | bc`
period=0.7536692
suffix=p${i} # add a suffix to the output files to identify different periods used. not needed for one trial period.
#clean up older files
rm -rf ${imroot}-?-${suffix}-*.* ${imroot}-??-${suffix}-bin*.* ${imroot}-???-${suffix}-bin*.* ${imroot}-icube-${suffix}*.* time-${suffix}-bin*
# create text file with time filter
psr-timeselect.sh ${period} ${bin} ${phasebins} ${outphases} ${ints} ${t0h} ${t0m} ${t0s} ${suffix} ${frac}
# invert the data with the time filter
psr-invert.sh ${visroot} ${imroot} ${suffix} ${phasebins}
# average pulse bins for each pulse
psr-avsubcl.sh ${imroot} ${suffix} ${phasebins}
# clean up again
if [ $cleanup -eq 1 ]
then
rm -rf ${imroot}-?-${suffix}-*.* ${imroot}-??-${suffix}-bin*.* time-${suffix}-bin*
fi
done
| true |
210ece52ee079893e290d84fd8c5fa28637f311c
|
Shell
|
google-code/bassh-secure-shell-in-bash
|
/sssh.sh
|
UTF-8
| 28,391 | 3.703125 | 4 |
[] |
no_license
|
function toUpper()
{
iValue=$1
oValue=`echo $iValue | tr '[a-z]' '[A-Z]'`
oValue=`echo $oValue | tr 'X' 'x'`
echo $oValue
}
function toHex()
{
iValue=$1
oValue=`echo "obase=16; $iValue" | bc`
oValue=`printf "%02x" "0x$oValue"`
oValue=`toUpper $oValue`
echo $oValue
}
function toHexStr()
{
iValue=$1
totLen=$2
let "binTotLen = totLen * 8"
ivBin=`echo "obase=2; $iValue" | bc`
ivLen=`echo $ivBin | wc -c`
let "ivLen = ivLen - 1"
let "numPads = binTotLen - ivLen"
let "limit = binTotLen - 8"
pStr=""
for i in `seq 1 $numPads`
do
pStr="0$pStr"
done
val="$pStr$ivBin"
oValue=""
for i in `seq 0 8 $limit`
do
byte="${val:$i:8}"
byteDec=`echo "obase=10;ibase=2; $byte" | bc`
byteHex=`printf "%02x" $byteDec`
byteHex="\x$byteHex"
oValue="$oValue$byteHex"
done
oValue=`toUpper $oValue`
echo $oValue
}
function getRandomHexString()
{
length=$1
rn=""
for it in `seq 1 $length`
do
let "R=$RANDOM%256"
hexR=`echo "obase=16; ibase=10; $R" | bc`
rn="$rn\x$hexR"
done
echo $rn
}
pow() #@ USAGE: pow base exponent
{
echo $(( ${1:?} ** ${2:?} ))
}
function hexStrToNum()
{
strng=$1
type=$2
strng=`echo $strng | tr 'x' ' ' 2>/dev/null`
strng=`echo $strng | tr '\' ' ' 2>/dev/null`
res=""
for ch in $strng
do
ch=`echo $ch | tr '[a-z]' '[A-Z]'`
chBin=`echo "obase=10; ibase=16; $ch" | bc`
res=$res$ch
done
if [ "$type" == "d" ]; then
echo "obase=10; ibase=16; $res" | bc
else
echo $res
fi
}
function fetchBytes()
{
fName=$1
initByte=$2
numBytes=$3
let "initByte = initByte + 1"
contentHex=`od -t x1 -v $fName | awk '{$1=""; print $0}' | tr -d '\n' 2>/dev/null`
seekValue=""
for ch in $contentHex
do
if [ $numBytes -eq 0 ]; then
break
fi
if [ $initByte -gt 0 ]; then
let "initByte = initByte - 1"
fi
if [ $initByte == 0 ]; then
let "numBytes = numBytes - 1"
seekValue="$seekValue $ch"
fi
done
echo $seekValue
}
function bytesToHexString()
{
input=$1
hs=""
for ch in $input
do
hs="$hs\x$ch"
done
echo $hs
}
function seekByteLoc()
{
byte=$1
file=$2
fContent=`fetchBytes outflow 0 2000`
pos=0
for ch in $fContent
do
let "pos = pos + 1"
if [ "$ch" == "$byte" ]; then
break
fi
done
echo $pos
}
function h2a()
{
in=$1
res=""
for ch in $in
do
ch=`echo $ch | tr '[a-z]' '[A-Z]' 2>/dev/null`
chBin=`echo "obase=10; ibase=16; $ch" | bc`
res="$res$chBin"
done
echo $res
}
function sendAndRecv()
{
msgCode=$1
senddatah=$2
packSendSeqh=$3
clientWriteKeyBytes=$4
clientHashedMacBytes=$5
clientReadKeyBytes=$6
packet="$msgCode$senddatah"
packetLengthd=`echo -n -e $packet | wc -c`
let "packetLengthd = packetLengthd + 1"
packetLengthh=`toHexStr $packetLengthd 4`
let "paddingLengthd = (8 - (packetLengthd % 8)) + 4"
let "totalLengthd = packetLengthd + paddingLengthd"
totalLengthh=`toHexStr $totalLengthd 4`
paddingLengthh=`toHexStr $paddingLengthd 1`
pd=""
let "paddingLengthd = paddingLengthd - 1"
for it in `seq 0 $paddingLengthd`
do
pd=$pd"\x00"
done
binaryPacketh="$totalLengthh$paddingLengthh$packet$pd"
binaryPacketh="$packSendSeqh$binaryPacketh"
echo -n -e $binaryPacketh > before-encrypt.txt
binaryPacketCrypt=`echo -n -e $binaryPacketh | openssl enc -e -rc4 -K "$clientWriteKeyBytes"`
mach=`echo -n -e $binaryPacketCrypt | openssl dgst -binary -hmac "$clientHashedMacBytes" -md5`
echo -n -e $binaryPacketCrypt > after-encrypt.txt
>outflow
sleep 1
echo -n -e $binaryPacketCrypt$mach | nc localhost 85
sleep 1
respb=`cat outflow`
respCTb=`echo -n -e $respb | openssl enc -d -rc4 -K "$clientReadKeyBytes"`
echo -n -e $respCTb > receive.txt
cat receive.txt | hexdump
}
function binToHexString()
{
i=$1
i=`echo -n -e $i | od -t x1 -v | awk '{$1=""; print}' | tr -d '\n'`
o=""
for it in $i
do
o=$o"\x$it"
done
echo $o
}
###################################################################################################
#
# Clear all previous files
#
###################################################################################################
rm -f inflow
rm -f outflow
rm -f proxypipe
###################################################################################################
#
# Get a cookie string (used during communication further)
#
###################################################################################################
cookieh=`getRandomHexString 16`
###################################################################################################
#
# Kill previous NC processes
#
###################################################################################################
killall -9 nc 2>/dev/null
###################################################################################################
#
# Create a two way pipe
#
###################################################################################################
mknod proxypipe p
###################################################################################################
#
# SSH Message ID
#
###################################################################################################
SSH_MSG_ID="\x14"
###################################################################################################
#
# Redirect whatever is being listened on port 85 to the destination and whatever
# is sent from destination to pipe
#
###################################################################################################
nc -k -l 85 0<proxypipe | tee -a inflow | nc localhost 22 | tee -a outflow 1>proxypipe&
sleep 1
###################################################################################################
#
# Get server version string
#
###################################################################################################
serverVersionBytes=`fetchBytes outflow 0 19`
serverVersionh=`bytesToHexString "$serverVersionBytes"`
###################################################################################################
#
# Create and send client version string to server
# client version stored = Length of client version string + "client version string without \r\n"
# server version stored = Length of server version string + "server version string without \r\n"
#
# Procedure:
# a. Convert client version ascii string to bytes (hex form: "03 04 05")
# b. Convert bytes to hex string (form: "\x01\x04")
# c. Calculate the length of hex value using echo -n -e <value> | wc -c
# d. Convert the decimal value of the length of hex string in b.
# to hex string of the form "\x01\x04"
#
# same procedure for the server version string
#
###################################################################################################
clientVersions="SSH-2.0-OpenSSH_5.6"
clientVersionBytes=`echo -n -e $clientVersions | od -t x1 | awk '{$1=""; print}' | tr -d '\n'`
clientVersionh=`bytesToHexString "$clientVersionBytes"`
cvLengthd=`echo -n -e $clientVersionh | wc -c`
cvLengthh=`toHexStr $cvLengthd 4`
svLengthd=`echo -n -e $serverVersionh | wc -c`
svLengthh=`toHexStr $svLengthd 4`
clientVersionNL=$cvLengthh$clientVersionh
serverVersionNL=$svLengthh$serverVersionh
>outflow
> proxypipe
sleep 1
echo -n -e "$clientVersions\r\n" | nc localhost 85
sleep 1
###################################################################################################
#
# server Key Exchange Init packet structure
#
# packet length
# padding length
# key exchange init message code (0x14)
# server Kex Init message
#
# To obtain the size of packet:
# a. search the first 0x14 (ie, the Kex Init Message ID)
# b. go back by 5 bytes
#
# Our procedure:
#
# 1. seekByteLoc returns the location of 0x14 in outflow
# 2. Decrement 1 to get the exact location and store that in initByte variable (because
# seekByteLoc returns in 1 to x range, but we want the location in 0 - x range)
# 3. Subtract 5 bytes as in step b. above to get the location where the size is stored in hex
# 4. Fill sizeBytes variable witht the int value of the 4 bytes starting from sizeByte returned by
# fetchBytes function (fetchBytes returns number of the form ("01 02 03")
# 5. Convert sizeBytes to Hex string (from "01 02 03" to form "\x01\x02\x03")
# 6. Convert the hex string to hex number (ie., from "\x01\x02\x03 to "\x10203")
# 7. Convert the hex number to int (using bc)
# 8. Subtract 6 from the int number (6 == num bytes to represent packet length +
# num bytes to represent padding length + num bytes to represent the message code)
#
# NOTE: Again, during fetchBytes we start from 1 (as earlier), so we have to fetch from the 7th byte
# (so we subtract 7 instead of 6 from the int number)
# Again subtract 3 bytes of padding
#
###################################################################################################
initByte=`seekByteLoc '14' outflow`
let "initByte = initByte - 1"
let "sizeByte = initByte - 5"
sizeBytes=`fetchBytes outflow $sizeByte 4`
sizeh=`bytesToHexString "$sizeBytes"`
sizeH=`hexStrToNum $sizeh`
sized=`echo "obase=10; ibase=16; $sizeH" | bc`
let "sized = sized - 6"
let "sized = sized - 1"
###################################################################################################
#
# Now you have the server kex Init message length in sized
# a. Run fetchBytes again from initByte for sized bytes to get the server message into a variable
# b. Convert the bytes (01 02 ab form) to hex string ("\x01\x02\xab" form)
# c. Convert hex string to binary (echo -n -e <hex string>) and store it as the server kex init
# message in binary form in the variable serverKexb
# d. Convert the size of the server kex init message from decimal (sized) to hex string
# e. Concatenate the length and the server kex init message and store it for later purpose
#
###################################################################################################
let "sized = sized - 3"
serverKexBytes=`fetchBytes outflow $initByte $sized`
serverKexh=`bytesToHexString "$serverKexBytes"`
serverKexb=`echo -n -e $serverKexh`
serverKexSizeh=`toHexStr $sized 4`
serverKexNL="$serverKexSizeh$serverKexh"
###################################################################################################
#
# Client Key Exchange Init packet (message ID = 0x14) is created below
#
# same procedure as above (just reversed here)
#
###################################################################################################
kexAlgStrh="diffie-hellman-group-exchange-sha256,diffie-hellman-group-exchange-sha1,diffie-hellman-group14-sha1,diffie-hellman-group1-sha1"
kexAlgLend=`echo $kexAlgStrh | wc -c`
let "kexAlgLend=kexAlgLend - 1"
kexAlgLenh=`toHexStr $kexAlgLend 4`
shKeyAlgStrh="ssh-rsa-cert-v01@openssh.com,ssh-dss-cert-v01@openssh.com,ssh-rsa-cert-v00@openssh.com,ssh-dss-cert-v00@openssh.com,ssh-rsa,ssh-dss"
shKeyAlgLend=`echo $shKeyAlgStrh | wc -c`
let "shKeyAlgLend=shKeyAlgLend - 1"
shKeyAlgLenh=`toHexStr $shKeyAlgLend 4`
encAlgCSh="arcfour,aes128-cbc,aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,3des-cbc,blowfish-cbc,cast128-cbc,aes192-cbc,aes256-cbc,rijndael-cbc@lysator.liu.se"
encAlgCSLend=`echo $encAlgCSh | wc -c`
let "encAlgCSLend=encAlgCSLend - 1"
encAlgCSLenh=`toHexStr $encAlgCSLend 4`
encAlgSCh="arcfour,aes128-ctr,aes192-ctr,aes256-ctr,arcfour256,arcfour128,aes128-cbc,3des-cbc,blowfish-cbc,cast128-cbc,aes192-cbc,aes256-cbc,rijndael-cbc@lysator.liu.se"
encAlgSCLend=`echo $encAlgSCh | wc -c`
let "encAlgSCLend=encAlgSCLend - 1"
encAlgSCLenh=`toHexStr $encAlgSCLend 4`
macAlgCSh="hmac-md5,hmac-sha1,umac-64@openssh.com,hmac-ripemd160,hmac-ripemd160@openssh.com,hmac-sha1-96,hmac-md5-96"
macAlgCSLend=`echo $macAlgCSh | wc -c`
let "macAlgCSLend=macAlgCSLend - 1"
macAlgCSLenh=`toHexStr $macAlgCSLend 4`
macAlgSCh="hmac-md5,hmac-sha1,umac-64@openssh.com,hmac-ripemd160,hmac-ripemd160@openssh.com,hmac-sha1-96,hmac-md5-96"
macAlgSCLend=`echo $macAlgSCh | wc -c`
let "macAlgSCLend=macAlgSCLend - 1"
macAlgSCLenh=`toHexStr $macAlgSCLend 4`
compAlgCSh="none,zlib@openssh.com,zlib"
compAlgCSLend=`echo $compAlgCSh | wc -c`
let "compAlgCSLend=compAlgCSLend -1"
compAlgCSLenh=`toHexStr $compAlgCSLend 4`
compAlgSCh="none,zlib@openssh.com,zlib"
compAlgSCLend=`echo $compAlgSCh | wc -c`
let "compAlgSCLend=compAlgSCLend -1"
compAlgSCLenh=`toHexStr $compAlgSCLend 4`
langCSLenh="\x00\x00\x00\x00"
langSCLenh="\x00\x00\x00\x00"
kexFPFollowsh="\x00"
reservedh="\x00\x00\x00\x00"
paddingh="\x00\x00\x00\x00"
pLenh="\x04"
algorithmsh="$cookieh$kexAlgLenh$kexAlgStrh$shKeyAlgLenh$shKeyAlgStrh$encAlgCSLenh$encAlgCSh$encAlgSCLenh$encAlgSCh$macAlgCSLenh$macAlgCSh$macAlgSCLenh$macAlgSCh$compAlgCSLenh$compAlgCSh$compAlgSCLenh$compAlgSCh$langCSLenh$langSCLenh$kexFPFollowsh$reservedh"
keyExchInith="\x14"
keyExchRecordh="$keyExchInith$algorithmsh$paddingh"
padLenh="\x04"
keyExchRecordh="$padLenh$keyExchRecordh"
pktLend=`echo -n -e $keyExchRecordh | wc -c`
pktLenH=`printf "%d" $pktLend`
pktLenh=`toHexStr $pktLenH 4`
keyExchPkth="$pktLenh$keyExchRecordh"
let "kexLend = pktLend - 5"
kexLenH=`printf "%d" $kexLend`
kexLenh=`toHexStr $kexLenH 4`
clientKexb="$kexLenh$keyExchInith$algorithmsh"
clientKexh=`binToHexString $clientKexb`
clientKexNL=$clientKexh
###################################################################################################
#
# Client Key Exchange Init packet is sent to server here
#
###################################################################################################
sleep 1
echo -n -e $keyExchPkth | nc localhost 85
###################################################################################################
#
# Client Diffie-Hellman GEX Request (Message ID: 0x22) is created and sent to server here
#
###################################################################################################
gexRequestMSGID="\x22"
dhGexMin="\x00\x00\x04\x00"
dhGexNumBits="\x00\x00\x08\x00"
dhGexMax="\x00\x00\x20\x00"
paddingString="\x00\x00\x00\x00\x00\x00"
kexRecord="$gexRequestMSGID$dhGexMin$dhGexNumBits$dhGexMax$paddingString"
paddingLen="\x06"
gexPkt="$paddingLen$kexRecord"
gexPktLen=`echo -n -e "$gexPkt" | wc -c`
gexPktLen=`toHexStr $gexPktLen 4`
gexPktb="$gexPktLen$gexPkt"
gexPkth=`binToHexString $gexPktb`
###################################################################################################
#
# The output from server is cleared here before sending the client DH Gex Request to the server
#
###################################################################################################
>outflow
sleep 1
#
# Sending Client GEX Request to server
#
echo -n -e $gexPktb | nc localhost 85
sleep 1
###################################################################################################
#
# Identify the position of 0x1f (which is the hex for DH Key Reply message ID in the DH Key
# Exchange packet returned from server (it contains values of P and G)
#
###################################################################################################
pos=`seekByteLoc "1f" outflow`
###################################################################################################
#
# Fetch 4 bytes from there, and get the message type (doesn't matter)
#
###################################################################################################
msgType=`fetchBytes outflow $pos 4`
msgType=`bytesToHexString "$msgType"`
msgType=`hexStrToNum $msgType d`
###################################################################################################
#
# Fetch 4 bytes from there, and get the length of P
#
###################################################################################################
mpIntLen=`fetchBytes outflow $pos 4`
mpIntLen=`bytesToHexString "$mpIntLen"`
echo $mpIntLen
mpIntLen=`hexStrToNum $mpIntLen d`
###################################################################################################
#
# Position of P is 4 bytes from the length byte
#
###################################################################################################
let "dhPos = pos + 4"
###################################################################################################
#
# Get P, convert it to hex string format ("\x01\x02" and then to binary (echo -n -e <hex string>))
#
###################################################################################################
P=`fetchBytes outflow $dhPos $mpIntLen`
Ph=`bytesToHexString "$P"`
Pb=`echo -n -e $Ph`
###################################################################################################
#
# P is in the form of "0c ad 12 ab ...", which
# should be converted to "0CAD12AB ..."
#
###################################################################################################
Ptemp=""
for it in $P
do
it=`echo $it | tr '[a-z]' '[A-Z]'`
iLen=`echo $it | wc -c`
if [ $iLen -le 2 ]; then
it="0$it"
fi
Ptemp="$Ptemp$it"
done
###################################################################################################
#
# Now converting the P from hex to integer
#
###################################################################################################
Pd=`echo "obase=10; ibase=16; $Ptemp" | bc | tr -d '\' | tr -d '\n'`
###################################################################################################
#
# Position of length byte of G
#
###################################################################################################
let "mpIntLenPos = dhPos + mpIntLen"
###################################################################################################
#
# Length of G
#
###################################################################################################
mpIntLen=`fetchBytes outflow $mpIntLenPos 4`
mpIntLen=`bytesToHexString "$mpIntLen"`
mpIntLen=`hexStrToNum $mpIntLen d`
###################################################################################################
#
# Position of G
#
###################################################################################################
let "dhbPos = mpIntLenPos + 4"
###################################################################################################
#
# Get G
#
###################################################################################################
G=`fetchBytes outflow $dhbPos $mpIntLen`
Gh=`bytesToHexString "$G"`
Gb=`echo -n -e $Gh`
Gtemp=""
for it in $G
do
it=`echo $it | tr '[a-z]' '[A-Z]'`
iLen=`echo $it | wc -c`
if [ $iLen -le 2 ]; then
it="0$it"
fi
Gtemp="$Gtemp$it"
done
Gd=`echo "obase=10; ibase=16; $Gtemp" | bc | tr -d '\' | tr -d '\n'`
###################################################################################################
#
# Identify ((P - 1) / 2) - 1, for getting
# X such that G ^ X is in between 1 and ((P - 1) / 2) - 1
#
###################################################################################################
Plimit=`echo -n -e "$Pd - 1\n" | bc | tr -d '\' |tr -d '\n' 2>/dev/null`
Plimit=`echo -n -e "$Plimit / 2\n" | bc | tr -d '\' |tr -d '\n' 2>/dev/null`
Plimit=`echo -n -e "$Plimit - 1\n" | bc | tr -d '\' |tr -d '\n' 2>/dev/null`
###################################################################################################
#
# Calculate X such that g ^ x mod p is 0 to (p - 1)/2
#
###################################################################################################
Xd=10000
while [ 1 ]
do
#
# G ^ X
#
GXd=`echo "obase=10; $Gd ^ $Xd" | bc | tr -d '\' |tr -d '\n' 2>/dev/null`
#
# E = G ^ X mod P
#
Ed=`echo -n -e "$GXd % $Pd\n" | bc | tr -d '\' |tr -d '\n' 2>/dev/null`
test=`echo -n -e "$Ed < $Plimit\n" | bc`
if [ $test == 1 ]; then
break
else
let "Xd = Xd + 10"
fi
done
###################################################################################################
#
# Create Diffie Hellman GEX Init packet below
#
###################################################################################################
###################################################################################################
#
# Message code
#
###################################################################################################
msgCode="\x20"
###################################################################################################
#
# Padding string
#
###################################################################################################
pString="\x00\x00\x00\x00\x00\x00"
###################################################################################################
#
# Hex value of E (to be sent on network
#
###################################################################################################
EH=`echo "obase=16; ibase=10; $Ed" | bc | tr -d '\' | tr -d '\n'`
Etemp=`echo $EH | fold -2 | tr '\n' ' '`
ETemp=""
for it in $Etemp
do
iLen=`echo $it | wc -c`
if [ $iLen -le 2 ]; then
it="0$it"
fi
ETemp="$ETemp\x$it"
done
Eh=$ETemp
Eb=`echo -n -e $Eh`
###################################################################################################
#
# Now EHex is complete hex string, calculate Length of EHex
#
###################################################################################################
msgCode="\x20"
packet1="$msgCode$Eh"
eLend=`echo -n -e $Eh | wc -c`
eLenh=`toHexStr $eLend 4`
packet2="$msgCode$eLenh$Eh"
packetLengthd=`echo -n -e $packet2 | wc -c`
let "packetLengthd = packetLengthd + 1"
packetLengthh=`toHexStr $packetLengthd 4`
let "paddingLengthd = (8 - (packetLengthd % 8)) + 4"
let "totalLengthd = packetLengthd + paddingLengthd"
totalLengthh=`toHexStr $totalLengthd 4`
paddingLengthh=`toHexStr $paddingLengthd 1`
padding=""
let "paddingLengthd = paddingLengthd - 1"
for it in `seq 0 $paddingLengthd`
do
padding=$padding"\x00"
done
binaryPacketh="$totalLengthh$paddingLengthh$msgCode$eLenh$Eh$padding"
> outflow
sleep 1
echo -n -e $binaryPacketh | nc localhost 85
sleep 5
###################################################################################################
#
# Now the following values sent from server are there in
# outflow file:
#
# 1. host key
# 2. f
# 3. signature
# 4. MAC
#
###################################################################################################
posd=`seekByteLoc "3c" outflow`
let "posd = posd + 2"
hkLenBytes=`fetchBytes outflow $posd 4`
hkLenh=`bytesToHexString "$hkLenBytes"`
hkLend=`hexStrToNum $hkLenh d`
let "hkPosd = posd + 4"
hkBytes=`fetchBytes outflow $hkPosd $hkLend`
serverKeyBytes=$hkBytes
serverKeyh=`bytesToHexString "$hkBytes"`
let "mpIntLocd = hkPosd + hkLend"
mpIntLenBytes=`fetchBytes outflow $mpIntLocd 4`
mpIntLenh=`bytesToHexString "$mpIntLenBytes"`
mpIntLend=`hexStrToNum $mpIntLenh d`
let "fLocd = mpIntLocd + 4"
FBytes=`fetchBytes outflow $fLocd $mpIntLend`
Fh=`bytesToHexString "$FBytes"`
Fb=`echo -n -e $Fh`
let "sigLocd = mpIntLocd + mpIntLend"
sigLenBytes=`fetchBytes outflow $sigLocd 4`
sigLenh=`bytesToHexString "$sigLenBytes"`
sigLend=`hexStrToNum $sigLenh d`
let "sigLocd = sigLocd + 8"
sigBytes=`fetchBytes outflow $sigLocd $sigLend`
sigh=`bytesToHexString "$sigBytes"`
sigb=`echo -n -e $sigh`
###################################################################################################
#
# Shared secret is calculated here
#
# Shared secret = K = F ^ X mod P (using a little bit of python here, will change it to bc later)
#
###################################################################################################
FTemp=""
for it in $FBytes
do
it=`echo $it | tr '[a-z]' '[A-Z]'`
iLen=`echo $it | wc -c`
if [ $iLen -le 2 ]; then
it="0$it"
fi
FTemp="$FTemp$it"
done
Fd=`echo "obase=10; ibase=16; $FTemp" | bc | tr -d '\' |tr -d '\n'`
Kd=`echo "print pow(\x00$Fd, $Xd, $Pd)" > /tmp/test.py; python /tmp/test.py`
#
# Terrible error here; sometimes server pre-pends a 0 to the value for whatever reason
#
KH=`echo "obase=16; ibase=10; $Kd" | bc | tr -d '\' | tr -d '\n'`
echo "KH:"$KH
Ktemp=`echo $KH | fold -2 | tr '\n' ' '`
echo "Ktemp:"$Ktemp
Kh=""
for it in $Ktemp
do
iLen=`echo $it | wc -c`
if [ $iLen -le 2 ]; then
it="0$it"
fi
Kh="$Kh\x$it"
done
Kb=`echo -n -e $Kh`
skeLengthd=`echo -n -e $serverKeyh | wc -c`
skeLengthh=`toHexStr $skeLengthd 4`
echo $skeLengthh
serverKeyNL=$skeLengthh$serverKeyh
PLd=`echo -n -e $Ph | wc -c`
PLh=`toHexStr $PLd 4`
PNL=$PLh$Ph
GLd=`echo -n -e $Gh | wc -c`
GLh=`toHexStr $GLd 4`
echo $GLh
GNL=$GLh$Gh
ELd=`echo -n -e $Eh | wc -c`
ELh=`toHexStr $ELd 4`
ENL=$ELh$Eh
FLd=`echo -n -e $Fh | wc -c`
FLh=`toHexStr $FLd 4`
FNL=$FLh$Fh
KLd=`echo -n -e $Kh | wc -c`
echo "KLd:"$KLd
KLh=`toHexStr $KLd 4`
KNL=$KLh$Kh
echo "CLIENTVERSION:"$clientVersionNL
echo "SERVERVERSION:"$serverVersionNL
echo "CLIENTKEX:"$clientKexNL
echo "SERVERKEX:"$serverKexNL
echo "SERVERKEY:"$serverKeyNL
echo "P:"$PNL
echo "G:"$GNL
echo "E:"$ENL
echo "F:"$FNL
echo "K:"$KNL
length="\x00\x00\x04\x00\x00\x00\x08\x00\x00\x00\x20\x00"
skMaterialh="$clientVersionNL$serverVersionNL$clientKexNL$serverKexNL$serverKeyNL$length$PNL$GNL$ENL$FNL$KNL"
echo -n -e "Key material:\r\n"
echo $skMaterialh
sKeyb=`echo -n -e $skMaterialh | openssl dgst -sha256 -binary`
echo $sKeyb
sKeyh=`binToHexString "$sKeyb"`
echo "Shared secret:$sKeyh"
A="\x43"
B="\x44"
C="\x45"
D="\x46"
E="\x47"
F="\x48"
# cwMaterialh=$KNL$sKeyh$C$sKeyh
cwMaterialh=$KNL$sKeyh$C$cookieh
clientWriteKeyb=`echo -n -e $cwMaterialh | openssl dgst -sha1 -binary`
clientWriteKeyh=`binToHexString "$clientWriteKeyb"`
echo "Client Write Key:"$clientWriteKeyh
clientWriteKeyBytes=`echo "$clientWriteKeyh" | fold -4 | sed 's/\\x//g' | tr -d '\n' | tr -d '\\' | cut -c -32 |fold -2`
clientWriteKeyBytes1=`echo "$clientWriteKeyh" | fold -4 | sed 's/\\x//g' | tr -d '\n' | tr -d '\\' | cut -c -32`
clientWriteKeyh=`bytesToHexString "$clientWriteKeyBytes"`
echo "Client Write Key:"$clientWriteKeyh
# crMaterialh=$KNL$sKeyh$D$sKeyh
crMaterialh=$KNL$sKeyh$D$cookieh
clientReadKeyb=`echo -n -e $crMaterialh | openssl dgst -sha1 -binary`
clientReadKeyh=`binToHexString "$clientReadKeyb"`
clientReadKeyBytes=`echo "$clientReadKeyh" | fold -4 | sed 's/\\x//g' | tr -d '\n' | tr -d '\\' | cut -c -32 | fold -2`
clientReadKeyBytes1=`echo "$clientReadKeyh" | fold -4 | sed 's/\\x//g' | tr -d '\n' | tr -d '\\' | cut -c -32`
clientReadKeyh=`bytesToHexString "$clientReadKeyBytes"`
echo "Client Read Key:"$clientReadKeyh
# chmMaterialh=$KNL$sKeyh$E$sKeyh
chmMaterialh=$KNL$sKeyh$E$cookieh
clientHashedMacb=`echo -n -e $chmMaterialh | openssl dgst -sha1 -binary`
clientHashedMach=`binToHexString "$clientHashedMacb"`
clientHashedMacBytes=`echo "$clientHashedMach" | fold -4 | sed 's/\\x//g' | tr -d '\n' | tr -d '\\' | cut -c -32 | fold -2`
clientHashedMacBytes1=`echo "$clientHashedMach" | fold -4 | sed 's/\\x//g' | tr -d '\n' | tr -d '\\' | cut -c -32`
clientHashedMach=`bytesToHexString "$clientHashedMacBytes"`
echo "Client Hasahed MAC:"$clientHashedMach
# shmMaterialh=$KNL$sKeyh$F$sKeyh
shmMaterialh=$KNL$sKeyh$F$cookieh
serverHashedMacb=`echo -n -e $shmMaterialh | openssl dgst -sha1 -binary`
serverHashedMach=`binToHexString "$serverHashedMacb"`
serverHashedMacBytes=`echo "$serverHashedMach" | fold -4 | sed 's/\\x//g' | tr -d '\n' | tr -d '\\' | cut -c -32 | fold -2`
serverHashedMacBytes1=`echo "$serverHashedMach" | fold -4 | sed 's/\\x//g' | tr -d '\n' | tr -d '\\' | cut -c -32`
serverHashedMach=`bytesToHexString "$serverHashedMacBytes"`
echo "Server Hashed MAC:"$serverHashedMach
clientNewKeysMsgh="\x00\x00\x00\x0C\x0A\x15\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
###################################################################################################
#
# send Client NewKeys message to server here after clearing outflow
#
###################################################################################################
>outflow
sleep 1
echo -n -e $clientNewKeysMsgh | nc localhost 85
sleep 1
###################################################################################################
#
# Handshake completed here
#
# First encrypted packet sent from below
#
###################################################################################################
msgCode="\x05"
senddatah="\x73\x73\x68\x2d\x75\x73\x65\x72\x61\x75\x74\x68"
packSendSeqh="\x00\x00\x00\x01"
sendAndRecv $msgCode $senddatah $packSendSeqh $clientWriteKeyBytes $clientHashedMacBytes $clientReadKeyBytes
| true |
c82b6508583b74dc069f599236b9a56e931877cd
|
Shell
|
uutils/coreutils
|
/util/build-gnu.sh
|
UTF-8
| 12,758 | 3.796875 | 4 |
[
"MIT",
"GPL-1.0-or-later",
"GPL-3.0-or-later"
] |
permissive
|
#!/bin/bash
# `build-gnu.bash` ~ builds GNU coreutils (from supplied sources)
#
# UU_MAKE_PROFILE == 'debug' | 'release' ## build profile for *uutils* build; may be supplied by caller, defaults to 'release'
# spell-checker:ignore (paths) abmon deref discrim eacces getlimits getopt ginstall inacc infloop inotify reflink ; (misc) INT_OFLOW OFLOW baddecode submodules ; (vars/env) SRCDIR vdir rcexp xpart
set -e
ME="${0}"
ME_dir="$(dirname -- "$(readlink -fm -- "${ME}")")"
REPO_main_dir="$(dirname -- "${ME_dir}")"
### * config (from environment with fallback defaults); note: GNU is expected to be a sibling repo directory
path_UUTILS=${path_UUTILS:-${REPO_main_dir}}
path_GNU="$(readlink -fm -- "${path_GNU:-${path_UUTILS}/../gnu}")"
###
if test ! -d "${path_GNU}"; then
echo "Could not find GNU coreutils (expected at '${path_GNU}')"
echo "Run the following to download into the expected path:"
echo "git clone --recurse-submodules https://github.com/coreutils/coreutils.git \"${path_GNU}\""
exit 1
fi
###
echo "ME='${ME}'"
echo "ME_dir='${ME_dir}'"
echo "REPO_main_dir='${REPO_main_dir}'"
echo "path_UUTILS='${path_UUTILS}'"
echo "path_GNU='${path_GNU}'"
###
UU_MAKE_PROFILE=${UU_MAKE_PROFILE:-release}
echo "UU_MAKE_PROFILE='${UU_MAKE_PROFILE}'"
UU_BUILD_DIR="${path_UUTILS}/target/${UU_MAKE_PROFILE}"
echo "UU_BUILD_DIR='${UU_BUILD_DIR}'"
cd "${path_UUTILS}" && echo "[ pwd:'${PWD}' ]"
if [ "$(uname)" == "Linux" ]; then
# only set on linux
export SELINUX_ENABLED=1
fi
make PROFILE="${UU_MAKE_PROFILE}"
cp "${UU_BUILD_DIR}/install" "${UU_BUILD_DIR}/ginstall" # The GNU tests rename this script before running, to avoid confusion with the make target
# Create *sum binaries
for sum in b2sum b3sum md5sum sha1sum sha224sum sha256sum sha384sum sha512sum; do
sum_path="${UU_BUILD_DIR}/${sum}"
test -f "${sum_path}" || cp "${UU_BUILD_DIR}/hashsum" "${sum_path}"
done
test -f "${UU_BUILD_DIR}/[" || cp "${UU_BUILD_DIR}/test" "${UU_BUILD_DIR}/["
##
cd "${path_GNU}" && echo "[ pwd:'${PWD}' ]"
# Any binaries that aren't built become `false` so their tests fail
for binary in $(./build-aux/gen-lists-of-programs.sh --list-progs); do
bin_path="${UU_BUILD_DIR}/${binary}"
test -f "${bin_path}" || {
echo "'${binary}' was not built with uutils, using the 'false' program"
cp "${UU_BUILD_DIR}/false" "${bin_path}"
}
done
if test -f gnu-built; then
# Change the PATH in the Makefile to test the uutils coreutils instead of the GNU coreutils
sed -i "s/^[[:blank:]]*PATH=.*/ PATH='${UU_BUILD_DIR//\//\\/}\$(PATH_SEPARATOR)'\"\$\$PATH\" \\\/" Makefile
echo "GNU build already found. Skip"
echo "'rm -f $(pwd)/gnu-built' to force the build"
echo "Note: the customization of the tests will still happen"
else
./bootstrap --skip-po
./configure --quiet --disable-gcc-warnings
#Add timeout to to protect against hangs
sed -i 's|^"\$@|/usr/bin/timeout 600 "\$@|' build-aux/test-driver
# Change the PATH in the Makefile to test the uutils coreutils instead of the GNU coreutils
sed -i "s/^[[:blank:]]*PATH=.*/ PATH='${UU_BUILD_DIR//\//\\/}\$(PATH_SEPARATOR)'\"\$\$PATH\" \\\/" Makefile
sed -i 's| tr | /usr/bin/tr |' tests/init.sh
make -j "$(nproc)"
touch gnu-built
fi
# Handle generated factor tests
t_first=00
t_max=36
# t_max_release=20
# if test "${UU_MAKE_PROFILE}" != "debug"; then
# # Generate the factor tests, so they can be fixed
# # * reduced to 20 to decrease log size (down from 36 expected by GNU)
# # * only for 'release', skipped for 'debug' as redundant and too time consuming (causing timeout errors)
# seq=$(
# i=${t_first}
# while test "${i}" -le "${t_max_release}"; do
# printf '%02d ' $i
# i=$((i + 1))
# done
# )
# for i in ${seq}; do
# make "tests/factor/t${i}.sh"
# done
# cat
# sed -i -e 's|^seq |/usr/bin/seq |' -e 's|sha1sum |/usr/bin/sha1sum |' tests/factor/t*.sh
# t_first=$((t_max_release + 1))
# fi
# strip all (debug) or just the longer (release) factor tests from Makefile
seq=$(
i=${t_first}
while test "${i}" -le "${t_max}"; do
printf '%02d ' ${i}
i=$((i + 1))
done
)
for i in ${seq}; do
echo "strip t${i}.sh from Makefile"
sed -i -e "s/\$(tf)\/t${i}.sh//g" Makefile
done
grep -rl 'path_prepend_' tests/* | xargs sed -i 's| path_prepend_ ./src||'
# Remove tests checking for --version & --help
# Not really interesting for us and logs are too big
sed -i -e '/tests\/misc\/invalid-opt.pl/ D' \
-e '/tests\/help\/help-version.sh/ D' \
-e '/tests\/help\/help-version-getopt.sh/ D' \
Makefile
# logs are clotted because of this test
sed -i -e '/tests\/seq\/seq-precision.sh/ D' \
Makefile
# printf doesn't limit the values used in its arg, so this produced ~2GB of output
sed -i '/INT_OFLOW/ D' tests/printf/printf.sh
# Use the system coreutils where the test fails due to error in a util that is not the one being tested
sed -i 's|stat|/usr/bin/stat|' tests/touch/60-seconds.sh tests/sort/sort-compress-proc.sh
sed -i 's|ls -|/usr/bin/ls -|' tests/cp/same-file.sh tests/misc/mknod.sh tests/mv/part-symlink.sh
sed -i 's|chmod |/usr/bin/chmod |' tests/du/inacc-dir.sh tests/tail/tail-n0f.sh tests/cp/fail-perm.sh tests/mv/i-2.sh tests/shuf/shuf.sh
sed -i 's|sort |/usr/bin/sort |' tests/ls/hyperlink.sh tests/test/test-N.sh
sed -i 's|split |/usr/bin/split |' tests/factor/factor-parallel.sh
sed -i 's|id -|/usr/bin/id -|' tests/runcon/runcon-no-reorder.sh
# tests/ls/abmon-align.sh - https://github.com/uutils/coreutils/issues/3505
sed -i 's|touch |/usr/bin/touch |' tests/cp/reflink-perm.sh tests/ls/block-size.sh tests/mv/update.sh tests/ls/ls-time.sh tests/stat/stat-nanoseconds.sh tests/misc/time-style.sh tests/test/test-N.sh tests/ls/abmon-align.sh
sed -i 's|ln -|/usr/bin/ln -|' tests/cp/link-deref.sh
sed -i 's|cp |/usr/bin/cp |' tests/mv/hard-2.sh
sed -i 's|paste |/usr/bin/paste |' tests/od/od-endian.sh
sed -i 's|timeout |/usr/bin/timeout |' tests/tail/follow-stdin.sh
# Add specific timeout to tests that currently hang to limit time spent waiting
sed -i 's|\(^\s*\)seq \$|\1/usr/bin/timeout 0.1 seq \$|' tests/seq/seq-precision.sh tests/seq/seq-long-double.sh
# Remove dup of /usr/bin/ when executed several times
grep -rlE '/usr/bin/\s?/usr/bin' init.cfg tests/* | xargs --no-run-if-empty sed -Ei 's|/usr/bin/\s?/usr/bin/|/usr/bin/|g'
#### Adjust tests to make them work with Rust/coreutils
# in some cases, what we are doing in rust/coreutils is good (or better)
# we should not regress our project just to match what GNU is going.
# So, do some changes on the fly
sed -i -e "s|rm: cannot remove 'e/slink'|rm: cannot remove 'e'|g" tests/rm/fail-eacces.sh
sed -i -e "s|rm: cannot remove 'a/b/file'|rm: cannot remove 'a'|g" tests/rm/cycle.sh
sed -i -e "s|rm: cannot remove directory 'b/a/p'|rm: cannot remove 'b'|g" tests/rm/rm1.sh
sed -i -e "s|rm: cannot remove 'a/1'|rm: cannot remove 'a'|g" tests/rm/rm2.sh
sed -i -e "s|removed directory 'a/'|removed directory 'a'|g" tests/rm/v-slash.sh
# 'rel' doesn't exist. Our implementation is giving a better message.
sed -i -e "s|rm: cannot remove 'rel': Permission denied|rm: cannot remove 'rel': No such file or directory|g" tests/rm/inaccessible.sh
# overlay-headers.sh test intends to check for inotify events,
# however there's a bug because `---dis` is an alias for: `---disable-inotify`
sed -i -e "s|---dis ||g" tests/tail/overlay-headers.sh
test -f "${UU_BUILD_DIR}/getlimits" || cp src/getlimits "${UU_BUILD_DIR}"
# When decoding an invalid base32/64 string, gnu writes everything it was able to decode until
# it hit the decode error, while we don't write anything if the input is invalid.
sed -i "s/\(baddecode.*OUT=>\"\).*\"/\1\"/g" tests/misc/base64.pl
sed -i "s/\(\(b2[ml]_[69]\|b32h_[56]\|z85_8\|z85_35\).*OUT=>\)[^}]*\(.*\)/\1\"\"\3/g" tests/misc/basenc.pl
# add "error: " to the expected error message
sed -i "s/\$prog: invalid input/\$prog: error: invalid input/g" tests/misc/basenc.pl
# basenc: swap out error message for unexpected arg
sed -i "s/ {ERR=>\"\$prog: foobar\\\\n\" \. \$try_help }/ {ERR=>\"error: Found argument '--foobar' which wasn't expected, or isn't valid in this context\n\n If you tried to supply '--foobar' as a value rather than a flag, use '-- --foobar'\n\nUsage: basenc [OPTION]... [FILE]\n\nFor more information try '--help'\n\"}]/" tests/misc/basenc.pl
sed -i "s/ {ERR_SUBST=>\"s\/(unrecognized|unknown) option \[-' \]\*foobar\[' \]\*\/foobar\/\"}],//" tests/misc/basenc.pl
# Remove the check whether a util was built. Otherwise tests against utils like "arch" are not run.
sed -i "s|require_built_ |# require_built_ |g" init.cfg
# Some tests are executed with the "nobody" user.
# The check to verify if it works is based on the GNU coreutils version
# making it too restrictive for us
sed -i "s|\$PACKAGE_VERSION|[0-9]*|g" tests/rm/fail-2eperm.sh tests/mv/sticky-to-xpart.sh init.cfg
# usage_vs_getopt.sh is heavily modified as it runs all the binaries
# with the option -/ is used, clap is returning a better error than GNU's. Adjust the GNU test
sed -i -e "s~ grep \" '\*/'\*\" err || framework_failure_~ grep \" '*-/'*\" err || framework_failure_~" tests/misc/usage_vs_getopt.sh
sed -i -e "s~ sed -n \"1s/'\\\/'/'OPT'/p\" < err >> pat || framework_failure_~ sed -n \"1s/'-\\\/'/'OPT'/p\" < err >> pat || framework_failure_~" tests/misc/usage_vs_getopt.sh
# Ignore some binaries (not built)
# And change the default error code to 2
# see issue #3331 (clap limitation).
# Upstream returns 1 for most of the program. We do for cp, truncate & pr
# So, keep it as it
sed -i -e "s/rcexp=1$/rcexp=2\n case \"\$prg\" in chcon|runcon) return;; esac/" -e "s/rcexp=125 ;;/rcexp=2 ;;\ncp|truncate|pr) rcexp=1;;/" tests/misc/usage_vs_getopt.sh
# GNU has option=[SUFFIX], clap is <SUFFIX>
sed -i -e "s/cat opts/sed -i -e \"s| <.\*>$||g\" opts/" tests/misc/usage_vs_getopt.sh
# for some reasons, some stuff are duplicated, strip that
sed -i -e "s/provoked error./provoked error\ncat pat |sort -u > pat/" tests/misc/usage_vs_getopt.sh
# Update the GNU error message to match ours
sed -i -e "s/link-to-dir: hard link not allowed for directory/failed to create hard link 'link-to-dir' =>/" -e "s|link-to-dir/: hard link not allowed for directory|failed to create hard link 'link-to-dir/' =>|" tests/ln/hard-to-sym.sh
# GNU sleep accepts some crazy string, not sure we should match this behavior
sed -i -e "s/timeout 10 sleep 0x.002p1/#timeout 10 sleep 0x.002p1/" tests/misc/sleep.sh
# install verbose messages shows ginstall as command
sed -i -e "s/ginstall: creating directory/install: creating directory/g" tests/install/basic-1.sh
# GNU doesn't support padding < -LONG_MAX
# disable this test case
sed -i -Ez "s/\n([^\n#]*pad-3\.2[^\n]*)\n([^\n]*)\n([^\n]*)/\n# uutils\/numfmt supports padding = LONG_MIN\n#\1\n#\2\n#\3/" tests/misc/numfmt.pl
# Update the GNU error message to match the one generated by clap
sed -i -e "s/\$prog: multiple field specifications/error: The argument '--field <FIELDS>' was provided more than once, but cannot be used multiple times\n\nUsage: numfmt [OPTION]... [NUMBER]...\n\n\nFor more information try '--help'/g" tests/misc/numfmt.pl
sed -i -e "s/Try 'mv --help' for more information/For more information, try '--help'/g" -e "s/mv: missing file operand/error: the following required arguments were not provided:\n <files>...\n\nUsage: mv [OPTION]... [-T] SOURCE DEST\n mv [OPTION]... SOURCE... DIRECTORY\n mv [OPTION]... -t DIRECTORY SOURCE...\n/g" -e "s/mv: missing destination file operand after 'no-file'/error: The argument '<files>...' requires at least 2 values, but only 1 was provided\n\nUsage: mv [OPTION]... [-T] SOURCE DEST\n mv [OPTION]... SOURCE... DIRECTORY\n mv [OPTION]... -t DIRECTORY SOURCE...\n/g" tests/mv/diag.sh
# GNU doesn't support width > INT_MAX
# disable these test cases
sed -i -E "s|^([^#]*2_31.*)$|#\1|g" tests/printf/printf-cov.pl
sed -i -e "s/du: invalid -t argument/du: invalid --threshold argument/" -e "s/du: option requires an argument/error: a value is required for '--threshold <SIZE>' but none was supplied/" -e "/Try 'du --help' for more information./d" tests/du/threshold.sh
# disable two kind of tests:
# "hostid BEFORE --help" doesn't fail for GNU. we fail. we are probably doing better
# "hostid BEFORE --help AFTER " same for this
sed -i -e "s/env \$prog \$BEFORE \$opt > out2/env \$prog \$BEFORE \$opt > out2 #/" -e "s/env \$prog \$BEFORE \$opt AFTER > out3/env \$prog \$BEFORE \$opt AFTER > out3 #/" -e "s/compare exp out2/compare exp out2 #/" -e "s/compare exp out3/compare exp out3 #/" tests/help/help-version-getopt.sh
| true |
75170b129bbad9708655a276c3d8dba143e92314
|
Shell
|
Capprin/pdl-demo
|
/nats-init.sh
|
UTF-8
| 1,122 | 4.125 | 4 |
[] |
no_license
|
#!/bin/bash
# This utility manages a NATS streaming server running locally. It is primarily for development.
#
# Written by Capprin Bass on 08/07/2019
# Confirm we have docker and it's running
command -v docker > /dev/null 2>&1 || (echo >&2 "Docker not installed. Exiting."; exit 1; )
docker version > /dev/null 2>&1 || (echo >&2 "Docker isn't running. Exiting."; exit 1; )
function start {
echo "Starting nats streaming server"
docker start nats-streaming-dev 2>/dev/null || docker run -p 4222:4222 -p 8222:8222 --name nats-streaming-dev -d nats-streaming --cluster_id "usgs" #more options go here
}
function stop {
echo "Stopping nats streaming server"
docker stop nats-streaming-dev
}
# Options
if [ $# -ne 1 ]; then
echo "No options supplied, or too many. Options are start, stop, restart, or reload."
exit 1
elif [ $1 = "start" ]; then
start
elif [ $1 = "stop" ]; then
stop
elif [ $1 = "restart" ]; then
stop
start
elif [ $1 = "reload" ]; then
echo "Running new build of nats-streaming"
stop
docker rm nats-streaming-dev
start
else
echo "Invalid option. Options are start, stop, restart, or reload."
fi
| true |
82e8415bd59337d74d6de7c1e0f0bca7cdd17b6c
|
Shell
|
rundeck/rundeck
|
/core/src/etc/bash_completion.sh
|
UTF-8
| 4,522 | 3.703125 | 4 |
[
"Apache-2.0"
] |
permissive
|
#
# BASH shell tab completion for RUNDECK's "dispatch" and "rd-project" commands
#
# Source this file from your login shell.
#
# @author: <a href="mailto:alex@dtosolutions.com">alex@dtosolutions.com</a>
# @version: $Revision: 1931 $
[ -n "${RDECK_BASE}" -a -d "${RDECK_BASE}" ] && export _rdeck_projectdir=$(awk -F= '/framework.projects.dir/ {print $2}' \
${RDECK_BASE}/etc/framework.properties)
# list all the child directory names in specified parent
_listdirnames()
{
local dirs dir
[ -d "$1" ] && dir=$1 || { return 1 ; }
for d in $(echo ${dir}/*)
do
[ -d "$d" ] && dirs="$dirs $(basename $d)"
done
echo $dirs
}
# check if item is in the list
_lexists()
{
local item="$1" list="$2"
for e in $(eval echo $list)
do
[ "${item}" = "${e}" ] && return 0
done
return 1
}
# remove the item from the list
_lremove()
{
local list item retlist
list=$2 item=$1 retlist=""
for e in $(eval echo $list)
do
[ "$e" = "$item" ] || {
retlist="$retlist $e"
}
done
echo $retlist
}
# subtract the items in list2 from list1
_lsubtract()
{
local list1="$1" list2="$2" retlist=""
for item in $(eval echo $list1)
do
_lexists $item "$list2" || {
retlist="$retlist $item"
}
done
echo $retlist
}
#
# program completion for the 'rd-project' command
#
_runproject()
{
[ -z "${RDECK_BASE}" -o ! \( -d "${RDECK_BASE}" \) ] && {
return 0 ;
}
local cur prev context comp_line opts_project opts_type opts_object opts_command OPT
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
comp_line=$COMP_LINE
context=()
eval set $COMP_LINE
shift; # shift once to drop the "rd-project" from the argline
while [ "$#" -gt 0 ]; do
OPT="$1"
case "$OPT" in
-p) [ -n "$2" ] && { context[0]="$2" ; shift ; }
;;
-a) [ -n "$2" ] && { context[1]="$2" ; shift ; }
;;
*) break
;;
esac
shift
done
[ ${#context[@]} -gt 0 ] && {
[ -d ${_rdeck_projectdir}/${context[0]} ] && opts_project=${context[0]}
}
[ ${#context[@]} -gt 1 ] && {
[ ${context[1]} = "create" -o ${context[1]} = "install" ] && opts_action=${context[1]}
}
# If just the "rd-project" command was typed, offer the first clopt
[ -z "${opts_project}" -a ${prev} != "-p" ] && {
COMPREPLY=( "-p" )
return 0
}
[ -n "${opts_action}" ] && {
# nothing else to offer
return 0
}
# offer the action names
[ -n "${opts_project}" -a "$prev" = "-a" ] && {
COMPREPLY=( $(compgen -W "create install purge remove" -- ${cur}) )
return 0
}
[ -n "${opts_project}" -a "$prev" != "-a" ] && {
COMPREPLY=( $(compgen -W "-a" -- ${cur}) )
return 0
}
[ ${prev} = "-p" ] && {
COMPREPLY=( $(compgen -W "$(_listdirnames $_rdeck_projectdir)" -- ${cur}) )
return 0
}
}
# register the completion function
complete -F _runproject rd-project
#
# program completion for the 'dispatch' command
#
_dispatch_complete()
{
[ -z "${RDECK_BASE}" -o ! \( -d "${RDECK_BASE}" \) ] && {
return 0 ;
}
local cur prev context comp_line opts_project opts_script opts_command OPT
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
comp_line=$COMP_LINE
context=()
eval set $COMP_LINE
shift; # shift once to drop the "rd-project" from the argline
while [ "$#" -gt 0 ]; do
OPT="$1"
case "$OPT" in
-p) [ -n "$2" ] && { context[0]="$2" ; shift ; }
;;
-s) [ -n "$2" ] && { opts_script="$2" ; shift ; }
;;
--) [ -n "$2" ] && { opts_command="$2" ; shift ; }
;;
*) break
;;
esac
shift
done
[ ${#context[@]} -gt 0 ] && {
[ -d ${_rdeck_projectdir}/${context[0]} ] && opts_project=${context[0]}
}
# If just the "dispatch" command was typed, offer the first clopt
[ -z "${opts_project}" -a ${prev} != "-p" ] && {
COMPREPLY=( "-p" )
return 0
}
[ ${prev} = "-p" ] && {
COMPREPLY=( $(compgen -W "$(_listdirnames $_rdeck_projectdir)" -- ${cur}) )
return 0
}
# Depot context but no execution flag yet. Offer it.
[ -n "$opts_project" -a -z "$opts_script" -a -z "$opts_command" -a "$prev" != "-s" -a "$prev" != "--" ] && {
COMPREPLY=( $(compgen -W "-s --" -- ${cur}) )
return 0
}
[ ${prev} = "-s" ] && {
# use filename completion in this case
COMPREPLY=( $(compgen -o filenames -A file -- ${cur}) )
}
}
# register the completion function
complete -F _dispatch_complete dispatch
| true |
e20bd241b894fb5183b7a8fd36eeda9a08dd1740
|
Shell
|
shubhamkumariitism/BridgeLab
|
/empWageComputation.sh
|
UTF-8
| 2,159 | 3.390625 | 3 |
[] |
no_license
|
#!/bin/bash
echo "Welcome to empWageCompuation.sh"
#UC1
declare -A dailyWageArray
PresentOrAbsent=$((RANDOM%3))
#UC2
WageperHour=20
FullDayHour=8
DailyWageEmployeeFullDay=$(( $WageperHour * $FullDayHour ))
#UC3
PartTimeHour=8
DailyWageEmployeePartDay=$(( $WageperHour * $PartTimeHour ))
#UC4
case PresentOrAbsent in
1)
DailyWageEmployeeFullDay=$(( $WageperHour * $FullDayHour ))
echo DailyWageEmployeeFullDay
;;
2)
DailyWageEmployeePartDay=$(( $WageperHour * $PartTimeHour ))
echo DailyWageEmployeePartDay
;;
*)
empHrs=0
;;
esac
#UC5
EmpWorkingPerMonth=20
case PresentOrAbsent in
1)
DailyWageEmployeeFullDay=$(( $WageperHour * $FullDayHour ))
EmpWageForMonth=$(( $DailyWageEmployeeFullDay *$EmpWorkingPerMonth ))
echo "Employee Wage For a Month For Full Time" $EmpWageForMonth
;;
2)
DailyWageEmployeePartDay=$(( $WageperHour * $PartTimeHour ))
echo DailyWageEmployeePartDay
EmpWageForMonth=$(( $DailyWageEmployeePartDay *$EmpWorkingPerMonth ))
echo "Employee Wage For a Month For Part Time" $EmpWageForMonth
#UC7
getWorkingDays()
{
case $1 in
1)
empHrs=$FullDayHour
;;
2)
empHrs=$PartTimeHour
;;
*)
empHrs=0
;;
esac
#UC6
totalEmpHrs=0
totalWorkingDays=0
MAX_HRS=100
MAX_WORKING_DAYS=20
while [[ $totalEmpHrs -lt $MAX_HRS && $totalWorkingDays -lt $MAX_WORKING_DAYS ]]
do
((totalWorkingDays++))
randomCheck=$((RANDOM%3))
getWorkingDays $randomCheck
empHrs=$?
dailyWage=$(( $empHrs * $WageperHour ))
dailyWageArray["Day"$totalWorkingDays]=$dailyWage
totalEmpHrs=$(($totalEmpHrs+$empHrs))
done
esac
return $empHrs
}
#UC8 UC9
totalsalary=$(($totalEmpHrs*$WageperHour))
echo "dailyWage array elements:" ${dailyWageArray[@]}
echo "dailyWage index positions for array:" ${!dailyWageArray[@]}
| true |
2e9405f1860bd69b242976624bfa6ec6bbe46057
|
Shell
|
clearasmudd/jetbrains-pycharm-formula
|
/pycharm/files/mac_shortcut.sh
|
UTF-8
| 363 | 3.109375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
case $1 in
[Cc]) app='PyCharm CE.app'
;;
[Pp]) app='PyCharm PE.app'
;;
*) echo 'Missing argument'
exit 1
;;
esac
Source="/Applications/$app"
Destination="{{ homes }}/{{ user }}/Desktop"
/usr/bin/osascript -e "tell application \"Finder\" to make alias file to POSIX file \"$Source\" at POSIX file \"$Destination\""
| true |
1192cb0ea7d12a102462614192085fcd5eaa62cc
|
Shell
|
devigned/azure-cli-samples
|
/vm-restart-by-tag/restart-by-tag
|
UTF-8
| 2,061 | 3.859375 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
DEFAULT_TAG='restart-tag'
LOCATION='westus'
TAG=${1:-$DEFAULT_TAG}
RESOURCE_GROUPS=('GROUP1' 'GROUP2' 'GROUP3')
for group in ${RESOURCE_GROUPS[@]}; do
# Create the resource group if it doesn't exist
echo "Creating resource group ${group} in ${LOCATION}"
az group create -n ${group} -l ${LOCATION} 1>/dev/null
echo "Deploying vm named ${group}-vm in ${group} with no waiting"
az vm create -g ${group} -n "${group}-vm" --image UbuntuLTS --admin-username deploy --tags ${DEFAULT_TAG} --no-wait $1>/dev/null
done
echo "Waiting for the vms to complete provisioning"
GROUP_QUERY=''
for group in ${RESOURCE_GROUPS[@]}; do
if [[ ${GROUP_QUERY} ]]; then
GROUP_QUERY="${GROUP_QUERY} || resourceGroup=='${group}'"
else
GROUP_QUERY="[?resourceGroup=='${group}'"
fi
done
SUCCESS_GROUP_QUERY="length(${GROUP_QUERY}] | [?provisioningState=='Succeeded'])"
FAILED_GROUP_QUERY="length(${GROUP_QUERY}] | [?provisioningState=='Failed'])"
echo ""
while [[ $(az vm list --query "${SUCCESS_GROUP_QUERY}") != ${#RESOURCE_GROUPS[@]} ]]; do
echo "Still not provisioned. Sleeping for 20 seconds."
sleep 20
if [[ $(az vm list --query "${FAILED_GROUP_QUERY}") != 0 ]]; then
echo "At least one of the vms failed to provision successfully!!"
exit 1
fi
done
echo ""
echo "Restarting virtual machines with ids via the group query"
az vm restart --ids $(az vm list --query "join(' ', ${GROUP_QUERY}] | [].id)" -o tsv) $1>/dev/null
echo ""
echo "Restarting virtual machines with a given tag"
az vm restart --ids $(az resource list --tag ${TAG} --query "[?type=='Microsoft.Compute/virtualMachines'].id" -o tsv) $1>/dev/null
echo ""
echo "To delete the created resource groups run the following."
DELETE_CMD=''
for group in ${RESOURCE_GROUPS[@]}; do
if [[ ${DELETE_CMD} ]]; then
DELETE_CMD="${DELETE_CMD} && az group delete -n ${group} --no-wait --force"
else
DELETE_CMD="az group delete -n ${group} --no-wait --force"
fi
done
echo "'${DELETE_CMD}'"
| true |
d794eb75ea675f089ce9984df902dd931ec14c26
|
Shell
|
MaxKhlupnov/LoT
|
/Common/Bolt/Tools/scripts/tsdb/preheat/parse-preheat-timing-result.sh
|
UTF-8
| 142 | 3.15625 | 3 |
[] |
no_license
|
#!/bin/bash
file=$1
while read line
do
slot=`echo $line|cut -d',' -f2`
if [[ $slot -eq 95 ]] ; then
echo $line ;
fi
done < $file
| true |
b78ded7fabe362e5623d53b5280450c2ef0c8d60
|
Shell
|
dkuspawono/puppet
|
/modules/dumps/files/fetches/kiwix-rsync-cron.sh
|
UTF-8
| 1,008 | 3.484375 | 3 |
[] |
no_license
|
#!/bin/bash
######################################
# This file is managed by puppet!
# puppet:///modules/dumps/fetches/kiwix-rsync-cron.sh
######################################
sourcehost="download.kiwix.org"
bwlimit="--bwlimit=40000"
do_rsync (){
srcpath=$1
destpath=$2
running=`/usr/bin/pgrep -f -x "/usr/bin/rsync -rlptq $bwlimit ${sourcehost}::${srcpath} ${destroot}/${destpath}"`
if [ -z "$running" ]; then
# filter out messages of the type
# file has vanished: "/zim/wikipedia/.wikipedia_tg_all_nopic_2016-05.zim.TQH5Zv" (in download.kiwix.org)
# rsync warning: some files vanished before they could be transferred (code 24) at main.c(1655) [generator=3.1.1]
/usr/bin/rsync -rlptq --delete "$bwlimit" "${sourcehost}::${srcpath}" "${destroot}/${destpath}" 2>&1 | grep -v 'vanished'
fi
}
if [ -z "$1" ]; then
echo "Usage: $0 dest_base_dir"
exit 1
fi
destroot="$1"
do_rsync "download.kiwix.org/zim/wikipedia/" "kiwix/zim/wikipedia/"
| true |
9dc85f39e9bf47be469dadd3679270a1f812259d
|
Shell
|
MarsStirner/sirius
|
/install/devstrap.sh
|
UTF-8
| 1,942 | 3.125 | 3 |
[] |
no_license
|
#!/bin/bash
stop_err
SIRIUS_BRANCH=master
for i in "$@"
do
case $i in
-sb=* | --sirius-branch=*) SIRIUS_BRANCH="${i#*=}"
shift
;;
-h | --help ) echo "Установка виртуального окружения и клонирование проектов.
Ветки приложений по умолчанию
* sirius - master
Можно переопределить через передаваемые аргументы
-sb= --sirius-branch="
exit
;;
esac
done
# 0. Создать корневую директорию инсталляции (допустим, /srv/infrastructure). Дальше все пути будут относительно корневой директории
# 0. Скопировать конфигурационный файл
cp install/usagi.yaml usagi.local.yaml
# 1. Создать базовые поддиректории, в которые всё будет соваться
mkdir code
mkdir configs
mkdir logs
# 2. Создать Virtualenv и активировать его
virtualenv venv
. venv/bin/activate
pip install pip setuptools --upgrade
pip install pyyaml jinja2
# 3. Склонировать сервисы
echo " -> sirius branch: ${SIRIUS_BRANCH}"
git clone https://stash.bars-open.ru/scm/medvtr/sirius.git -b ${SIRIUS_BRANCH} code/sirius
# 4. Установить зависимости
pip install -r code/sirius/requirements/prod.txt
pip install -r code/sirius/requirements/usagi.txt
pip install git+https://stash.bars-open.ru/scm/medvtr/hitsl.utils.git@develop#egg=hitsl_utils
pip install git+https://stash.bars-open.ru/scm/medvtr/tsukino_usagi.git@master#egg=tsukino_usagi
# pip install git+https://stash.bars-open.ru/scm/medvtr/pysimplelogs2.git@master#egg=pysimplelogs2
| true |
07dee00cd3f7c5350271ccead48c543a8003bf93
|
Shell
|
you-zhou/notes
|
/configs/.zshrc
|
UTF-8
| 788 | 3.15625 | 3 |
[
"MIT"
] |
permissive
|
##############################
# Location: ~/.zshrc
# This file contains personal Zsh shell settings.
# Place this file in your home directory.
##############################
export PATH=/opt/homebrew/opt/python@3.11/bin:$PATH
export PYTHONPATH=~/Dev/GitHub
alias python=python3
alias pip=pip3
# Alias to activate virtual environment from project folder.
alias venv='source .venv/bin/activate'
# Alias to display interface name along with associated IP address
alias showip='ifconfig | awk "/^[a-z]/ { iface=\$1; } /inet / { print iface, \$2 }" | sed "s/://"'
# API keys
export OPENAI_API_KEY=secret
# Automatically activate a virtual environment when you cd into a project directory.
cd() {
builtin cd "$@"
if [ -f .venv/bin/activate ]; then
source .venv/bin/activate
fi
}
| true |
23e17508ea201656aa653c118a4cbebac1cea135
|
Shell
|
helau/gen2vdr
|
/etc/vdr.d/scripts/g2v_record.sh
|
UTF-8
| 3,467 | 3.5625 | 4 |
[] |
no_license
|
#!/bin/bash
# ---
# g2v_record.sh
# Wird vom VDR aufgerufen bei Start und Ende von Aufnahmen, so wie bei Schnitt
# oder wenn eine Aufnahme gelöscht wird
# ---
# VERSION=170214
source /_config/bin/g2v_funcs.sh
# set -x
NOAD='/_config/bin/g2v_noad.sh'
[[ "${VIDEO: -1}" != "/" ]] && VIDEO="${VIDEO}/"
glogger -s "$(date +"%F %R") $0 $1 $2" >> "${VIDEO}/vdr.record" 2>&1
MESG="" ; DO_NOAD=0
case "$1" in
before)
DO_NOAD=1
screen -dm sh -c "/etc/vdr.d/scripts/g2v_rec_msg.sh $1 \"$2\""
if [[ "$SHAREMARKS" == "1" ]] ; then
screen -dm sh -c "marks2pts $1 \"$2\""
fi
;;
after)
DO_NOAD=1
screen -dm sh -c "/etc/vdr.d/scripts/g2v_rec_msg.sh $1 \"$2\""
INFO="$2/info"
[[ ! -e "$INFO" ]] && INFO="$2/info.vdr"
EVENTID="$(grep "^E " "$INFO" | cut -f 2 -d " ")"
if [[ -n "$EVENTID" ]] ; then
[[ -e "${EPG_IMAGES}/${EVENTID}.jpg" ]] && cp "${EPG_IMAGES}/${EVENTID}"*.jpg "$2" && ln -s "${EVENTID}.jpg" "$2/Cover-Enigma.jpg"
[[ -e "${EPG_IMAGES}/${EVENTID}.png" ]] && cp "${EPG_IMAGES}/${EVENTID}"*.png "$2" && ln -s "${EVENTID}.png" "$2/Cover-Enigma.png"
fi
VDR_CUTTING_DIR="${VIDEO}_CUT_/"
if [[ "$USE_CUTTING_DIR" == "1" && -d "$VDR_CUTTING_DIR" ]] ; then
DS="$(du -s "${2}" | cut -f 1)"
DF="$(df -k "$VDR_CUTTING_DIR" | tail -n 1 |tr -s " " |cut -f 4 -d " ")"
if [[ $DF -gt $DS ]] ; then
rd="${2%/[0-9]*}" ; rdb="${rd%/*}/" ; rdn="${rd##*/}" ; rdb="${rdb#$VIDEO}"
mkdir -p "${VDR_CUTTING_DIR}${rdb}%${rdn}"
ln -s "${VDR_CUTTING_DIR}${rdb}%${rdn}" "${VIDEO}${rdb}"
glogger -s "Setze Schnittverzeichnis <${VDR_CUTTING_DIR}${rdb}%${rdn}>"
else
glogger -s "Nicht genug Platz auf $VDR_CUTTING_DIR"
fi
fi
;;
cut)
if [[ "$SHAREMARKS" == "1" ]] ; then
screen -dm sh -c "marks2pts -upload $1 \"$2\""
fi
;;
edited)
if [[ "$SHAREMARKS" == "1" ]] ; then
screen -dm sh -c "marks2pts -upload $1 \"$2\""
fi
if [[ -n "$3" ]] ; then # VDR > 1.7.31
[[ -e "${3}/Cover-Enigma.jpg" ]] && cp -a "${3}"/*.jpg "$2"
[[ -e "${3}/Cover-Enigma.png" ]] && cp -a "${3}"/*.png "$2"
else
ODIR="${2//\/%//}" # /% durch / ersetzen
[[ -e "${ODIR}/Cover-Enigma.jpg" ]] && cp -a "${ODIR}"/*.jpg "$2"
[[ -e "${ODIR}/Cover-Enigma.png" ]] && cp -a "${ODIR}"/*.png "$2"
fi
[[ -z "${PLUGINS/* rectags */}" ]] && sendvdrkey.sh RED
;;
delete)
# Delete recording
;;
deleted)
if [[ -L "$2" ]] ; then # Testen ob es ein Symlink ist
LNK="$(readlink "$2")" # Ziel des Links merken
if [[ -d "$LNK" ]] ; then # Ist ein Verzeichnis
mv "$LNK" "${LNK%.rec}.del" # Umbenennen -> *.del
ln -s --force -n "${LNK%.rec}.del" "$2" # Symlink ersetzen
glogger "Linkziel von $2 wurde angepasst (-> ${LNK%.rec}.del)"
fi # -d
fi # -L
;;
started)
# few seconds after recording has started
;;
*)
glogger -s "ERROR: unknown state: $1"
;;
esac
if [[ -x "$NOAD" && "$DO_NOAD" == "1" && "$SET_MARKS" != "Nie" ]] ; then
if [[ -z "${PLUGINS/* markad */}" ]] ; then
glogger -s 'Markad activated - noad ignored'
else
screen -dm sh -c "$NOAD $1 \"$2\""
fi
else
screen -dm sh -c '/_config/bin/g2v_maintain_recordings.sh'
fi
if [[ -n "$MESG" ]] ; then
glogger -s "$MESG"
screen -dm sh -c "svdrpsend.sh MESG $MESG"
fi
screen -dm sh -c "sleep 10; touch ${VIDEO}.update"
exit
| true |
3fdeee8619754eac613fcb17e1928cce393d7ac8
|
Shell
|
J816g812/joshua
|
/src/test/resources/decoder/moses-compat/test.sh
|
UTF-8
| 1,468 | 2.828125 | 3 |
[
"LicenseRef-scancode-other-permissive",
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -u
rm -f output
# should write translation to stdout, output-format info to n-best.txt
echo help | $JOSHUA/bin/joshua -v 0 -moses -n-best-list n-best1.txt 10 distinct > output
# should write output-format info to n-best.txt (since no -moses)
echo help | $JOSHUA/bin/joshua -v 0 -n-best-list n-best2.txt 10 distinct >> output
# should write translation to stdout
echo help | $JOSHUA/bin/joshua -v 0 -moses >> output
echo >> output
echo "# n-best stuff to follow:" >> output
cat n-best1.txt n-best2.txt >> output
# Compare
diff -u output output.expected > diff
if [[ $? -eq 0 ]]; then
rm -f diff log output n-best1.txt n-best2.txt
exit 0
else
exit 1
fi
| true |
de3aebc55ecf15158fb491a81a6d8fe10c305c5c
|
Shell
|
alexpearce/Lc2pXX-SVN
|
/scripts/PIDCalib/run_multitrackcalib_ProbNN.sh
|
UTF-8
| 2,636 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/bash
# Reference (our signal) location
signal_base=$WORK/OutputBackup/output_ProbNN
signal_tree=DecayTree
# For S20r1:
# stripping=20r1_MCTuneV2
# For S17b
# stripping=17
# Performance histograms directory
perf_hists="output_ProbNN/$stripping"
mkdir -p $perf_hists
# Cuts
K="ProbNNK > 0.5 && DLLK > 4"
Pi="ProbNNpi > 0.7 && DLLK < 10"
# ppipi pion cut is tighter than pKpi
# S20r1 loosens the ppipi PIDK cut from < 0 to < 4, comment out as appropriate
Pi_ppipi="ProbNNpi > 0.7 && DLLK < 4"
# Pi_ppipi="ProbNNpi > 0.7 && DLLK < 0"
P="ProbNNp > 0.5 && PIDp > 4 && PIDpK > 0"
# Ntuple particle branches
h1_K="[h1,K,$K]"
h2_K="[h2,K,$K]"
h1_Pi="[h1,Pi,$Pi]"
h2_Pi="[h2,Pi,$Pi]"
h1_Pi_ppipi="[h1,Pi,$Pi_ppipi]"
h2_Pi_ppipi="[h2,Pi,$Pi_ppipi]"
proton_P="[proton,P,$P]"
# Arguments:
# $1 -- Manget polarity (MagUp, MagDown)
# $2 -- Particle (K, P, Pi)
# $3 -- Cut string (e.g. "[ProbNNp > 0.5]"
run_makeperfhists() {
python MakePerfHistsRunRange.py \
-q \
-o="$perf_hists" \
-b="Lc2pXX_binning.py" \
-s="Lc2pXX" \
"$stripping" \
"$1" \
"$2" \
"$3"
}
# Arguments:
# $1 -- Decay mode (pKpi, pKK, ppipi)
# $2 -- Manget polarity (MagUp, MagDown)
# $3 -- h1 track
# $4 -- h2 track
run_multitrackcalib() {
# To change to S17b, change 20r1 in the signal and output paths to 17b
python PerformMultiTrackCalib.py \
-i="$perf_hists" \
-x=P -y=ETA -z=nTracks \
-q \
-s P Lc2pXX \
-s Pi Lc2pXX \
-s K Lc2pXX \
"$stripping" \
"$2" \
"$signal_base/selected-$1-2011-20r1-$2.root" \
"$signal_tree" \
"$perf_hists/CalibTree-$1-2011-20r1-$2.root" \
"$proton_P" \
"$3" \
"$4"
}
run_makeperfhists MagUp "K" "[$K]"
run_makeperfhists MagDown "K" "[$K]"
# You have to change line 344 in MakePerfHists... to "UPDATE", otherwise
# the next set of Pi calls will overwrite this one
run_makeperfhists MagUp "Pi" "[$Pi]"
run_makeperfhists MagDown "Pi" "[$Pi]"
run_makeperfhists MagUp "Pi" "[$Pi_ppipi]"
run_makeperfhists MagDown "Pi" "[$Pi_ppipi]"
run_makeperfhists MagUp "P" "[$P]"
run_makeperfhists MagDown "P" "[$P]"
run_multitrackcalib pKpi MagUp "$h1_K" "$h2_Pi"
run_multitrackcalib pKpi MagDown "$h1_K" "$h2_Pi"
run_multitrackcalib pKK MagUp "$h1_K" "$h2_K"
run_multitrackcalib pKK MagDown "$h1_K" "$h2_K"
run_multitrackcalib ppipi MagUp "$h1_Pi_ppipi" "$h2_Pi_ppipi"
run_multitrackcalib ppipi MagDown "$h1_Pi_ppipi" "$h2_Pi_ppipi"
# run_multitrackcalib pphi MagUp "$h1_K" "$h2_K"
# run_multitrackcalib pphi MagDown "$h1_K" "$h2_K"
| true |
d84aaa798c87f34ca1824ba3e3ab56ae7e093cd3
|
Shell
|
RGM-OSC/nagios
|
/SOURCES/nagios-rgm/rgm-nagios-archives.cron
|
UTF-8
| 601 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/sh
#
# move Nagios performance data logs older than 90 days to a "legacy"
# archive directory to avoid re-injection to thruk log_cache SQL table
#
# Copyright 2021 & onwards, RGM - SCC France
if [ ! -d /srv/rgm/nagios/var/log/archives-legacy ]; then
mkdir -p /srv/rgm/nagios/var/log/archives-legacy
chown nagios:rgm /srv/rgm/nagios/var/log/archives-legacy
chmod 0750 /srv/rgm/nagios/var/log/archives-legacy
fi
find /srv/rgm/nagios/var/log/archives/ -type f -name '*.log' -ctime +90 -exec gzip {} \;
mv /srv/rgm/nagios/var/log/archives/*.gz /srv/rgm/nagios/var/log/archives-legacy/
| true |
60547845536606d5d018c2a910cd48ecde18cfc2
|
Shell
|
zmzlois/kafka-starter
|
/connect/stg/update.sh
|
UTF-8
| 900 | 3.828125 | 4 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
if [ -z "$1" ]; then
echo "No parameters provided, exiting..."
exit 1
fi
if [ -z "$2" ]; then
echo "Key provided, but no value, breaking"
exit 1
fi
if [ -z "$3" ] && [ -z "$setPropertyFile" ]; then
echo "No file provided or setPropertyFile is not set, exiting..."
exit 1
fi
if [ "$setPropertyFile" ] && [ "$3" ]; then
echo "setPropertyFile variable is set AND filename in comamnd! Use only or the other. Exiting..."
exit 1
else
if [ "$3" ] && [ ! -f "$3" ]; then
echo "File in command NOT FOUND!"
exit 1
elif [ "$setPropertyFile" ] && [ ! -f "$setPropertyFile" ]; then
echo "File in setPropertyFile variable NOT FOUND!"
exit 1
fi
fi
if [ "$setPropertyFile" ]; then
file=$setPropertyFile
else
file=$3
fi
awk -v pat="^$1=" -v value="$1=$2" '{ if ($0 ~ pat) print value; else print $0; }' "$file" > "$file".tmp
mv "$file".tmp "$file"
| true |
6e780a25bcdb3602158234dffa19c11c2dc8a476
|
Shell
|
shikhamehta2112/drl-frameworks
|
/scripts/evaluate_ray_A2C_106.sh
|
UTF-8
| 707 | 2.875 | 3 |
[] |
no_license
|
source ~/anaconda3/etc/profile.d/conda.sh
echo "--- STARTING RAY EXPERIMENTS ---"
conda activate ray-env
echo
echo "--- STARTING RAY CARTPOLE EXPERIMENTS ---"
mkdir -p src/results/ray/cartpole/A2C/runtime
echo
for fullfile in src/ray/experiments/cartpole/A2C/ray_a2c_cpu_cp106.yml; do
filename=$(basename -- "$fullfile")
experiment="${filename%.*}"
mkdir ${filename%.*}
echo "--- STARTING EXPERIMENT ${experiment} --- "
python src/ray/run_evaluation.py -f="src/ray/experiments/cartpole/A2C/ray_a2c_cpu_cp106.yml"
echo "--- EXPERIMENT ${experiment} COMPLETED --- "
echo
done
echo "--- RAY CARTPOLE EXPERIMENTS COMPLETED ---"
echo
echo "--- RAY EXPERIMENTS COMPLETED ---"
echo
| true |
059ce9227286178c12743d0b66b6edf2aef32ebe
|
Shell
|
ryry0/Rpaca-Repository
|
/net-tools.rpac
|
UTF-8
| 1,305 | 3.765625 | 4 |
[] |
no_license
|
#!/bin/bash
# Overloaded variables:
PKG="net-tools"
VER="CVS_20101030"
EXT="tar.gz" # extension of download
URL="http://anduin.linuxfromscratch.org/BLFS/$PKG/$PKG-$VER.$EXT"
DESCRIPTION="The net-tools package is a collection of programs for controlling the linux kernel network subsystem."
CFLAGS=""
CONF_FLAGS="--prefix=/usr"
FAKEROOT=$PKG_DIR/$PKG-$VER
DEPS=""
#---------------------------------------------------------------
# Overloadable functions:
compileSource() {
cd $SRC_DIR/$PKG-$VER
#curl -L -O http://www.linuxfromscratch.org/patches/blfs/svn/$PKG-$VER-remove_dups-1.patch || die "Downloading $PKG patch failed"
#logMessage "Patching $PKG"
#patch -Np1 -i $PKG-$VER-remove_dups-1.patch &&
yes "" | make config ||
die "Configure $PKG source failed"
logMessage "Compiling $PKG"
make $GLOBAL_CFLAGS $CFLAGS || die "Compiling $PKG source failed"
}
# Take compiled files and make them into a standalone package
buildPackage() {
logMessage "Building $PKG to fakeroot"
cd $SRC_DIR/$PKG-$VER
make DESTDIR="$FAKEROOT" update || die "$PKG make install failed"
rm -v $FAKEROOT/bin/hostname &&
cd $PKG_DIR
logMessage "Tarring and compressing $PKG"
tar -cvzf $PKG-$VER.tar.gz $PKG-$VER || die "$PKG package creation failed"
logMessage "Deleting $PKG fakeroot"
rm -rf $PKG-$VER
}
| true |
ccf8391a98eac6ce11a9f00250c31480df2edcb1
|
Shell
|
intel/parameter-framework-samples
|
/asus-t100-tutorial/clientSimulator/scripts/common/test-scripts/capture.sh
|
UTF-8
| 310 | 3.0625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#
# This script is used in order to start recording with ALSA.
#
if [ ! $# == 2 ]; then
echo "Two arguments needed:" 1>&2
echo " - AlsaCard and device : -hw:<card>,<device>" 1>&2
echo " - Record destination" 1>&2
exit
fi
card=$1
file=$2
arecord -D$card -d 10 -f S16_LE -r 48000 -c 2 $file
| true |
7c89c75ad2c2bae5d46f0269454098b0af6433ca
|
Shell
|
dimihayl/Using_CATS3
|
/configure_dlm_hst_plot.sh
|
UTF-8
| 477 | 3.734375 | 4 |
[] |
no_license
|
#!/bin/bash
red=`tput setaf 1`
green=`tput setaf 2`
reset=`tput sgr0`
directory="./CMake_dlm_hst_plot/FILES"
if [ ! -d "$directory" ]; then
mkdir "$directory"
echo "Directory created: $directory"
else
echo "Directory already exists: $directory"
fi
cd $directory
if ! cmake ../../CMake_dlm_hst_plot; then
echo "Configuration ${red}failed${reset}"
return 3
fi
echo "Configuration ${green}successful${reset}"
echo " To proceed type: make"
cd ../../
return 0
| true |
c70488b52ad5fe476317105bc759634ea1c078b4
|
Shell
|
jjanczur/CloudComputing-assignment2
|
/linpack.sh
|
UTF-8
| 297 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/bash
EXECUTABLE="linpack"
if [[ ! -e ./${EXECUTABLE} ]] ; then
gcc -O -o ./${EXECUTABLE} ./${EXECUTABLE}.c -lm
fi
if [[ "$SYSTEMROOT" = "C:\Windows" ]] ; then
RESULT=$(./${EXECUTABLE}.exe | tail -1)
else
RESULT=$(./${EXECUTABLE} | tail -1)
fi
echo "$RESULT"
#echo $(./${RESULT} 1 1000)
| true |
2b958824a209203b5a162116738c0bbfd6ee02c9
|
Shell
|
beomki-yeo/acts_shell_scripts
|
/fatras_script.sh
|
UTF-8
| 1,794 | 2.75 | 3 |
[] |
no_license
|
#!/bin/bash
nEvents=100
pdg=211 # pion
nParticles=(1 10 50 100 500 1000 2000 3000 4000 5000 6000)
#nParticles=(6000 7000)
for n in "${nParticles[@]}"
do
sub_dir=${pdg}_${n}
: '
command_particle_gun="
${ACTS_INSTALL}/bin/ActsExampleParticleGun
--events=${nEvents}
--output-dir=${ACTS_DATA}/data/gen/${sub_dir}
--output-csv
--gen-eta=-4:4
--gen-mom-gev=0.4:50
--gen-pdg=${pdg}
--gen-randomize-charge
--gen-nparticles=${n}"
${command_particle_gun}
command_fatras="
${ACTS_INSTALL}/bin/ActsExampleFatrasGeneric
--input-dir=${ACTS_DATA}/data/gen/${sub_dir} \
--output-dir=${ACTS_DATA}/data/sim_generic/${sub_dir} \
--output-csv \
--select-eta=-2.5:2.5 \
--select-pt-gev=0.4: \
--fatras-pmin-gev 0.4 \
--remove-neutral \
--bf-constant-tesla=0:0:2
"
${command_fatras}
command_smeared_digitization="
${ACTS_INSTALL}/bin/ActsExampleDigitizationGeneric \
--input-dir=${ACTS_DATA}/data/sim_generic/${sub_dir} \
--output-dir=${ACTS_DATA}/data/digi_smeared_generic/${sub_dir} \
--output-csv \
--digi-smear \
--digi-config-file ${ACTS_HOME}/Examples/Algorithms/Digitization/share/default-smearing-config-generic.json
"
${command_smeared_digitization}
'
command_digitization="
${ACTS_INSTALL}/bin/ActsExampleDigitizationGeneric \
--input-dir=${ACTS_DATA}/data/sim_generic/${sub_dir} \
--output-dir=${ACTS_DATA}/data/digi_generic/${sub_dir} \
--output-csv \
--digi-config-file ${ACTS_HOME}/Examples/Algorithms/Digitization/share/default-geometric-config-generic.json
"
#--digi-config-file ${ACTS_HOME}/Examples/Algorithms/Digitization/share/default-input-config-generic.json
#"
${command_digitization}
done
| true |
2c59de0bbfbbe6dd5dada8756ba7f304f68dedda
|
Shell
|
WASdev/ci.docker.ibm-http-server
|
/ilan/build
|
UTF-8
| 1,814 | 3.375 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
###########################################################################
# (C) Copyright IBM Corporation 2016. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.#
# See the License for the specific language governing permissions and #
# limitations under the License. #
###########################################################################
if [ $# != 3 ]; then
echo "Usage: build <version> <IBMid> <IBMid password>"
exit 1
fi
docker build -t installation-manager im || exit $?
docker run --rm -v $(pwd):/host installation-manager /host/install_ihs $1 $2 $3 || exit $?
docker run -d --name tar_server -v $(pwd)/ihs$1.tar.gz:/host/ihs$1.tar.gz -w /host python:2-slim python -m SimpleHTTPServer
tar_server_ip=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' tar_server)
tar_url="http://${tar_server_ip}:8000/ihs${1}.tar.gz"
# Build image from hosted tar file
echo "Building image"
docker build -t ibm-http-server:$1 --build-arg TAR_URL=$tar_url . || exit $?
docker rm -f tar_server
| true |
9bb4b6079db1fb7dc1fc3d00327e7fbce9b3e297
|
Shell
|
anaway-69/bashScripting_Tutorials
|
/18_professional_Menus.sh
|
UTF-8
| 805 | 3.40625 | 3 |
[] |
no_license
|
#!/bin/bash
# select lang in Python Bash Ruby c c++ java
# do
# # echo "You have Selected: $lang"
# case $lang in
# Python)
# echo "You have selected $lang";;
# Bash)
# echo "You have selected $lang";;
# Ruby)
# echo "You have selected $lang";;
# c)
# echo "You have selected $lang";;
# c++)
# echo "You have selected $lang";;
# java)
# echo "You have selected $lang";;
# *)
# echo "Error!! Select Between the Items..";;
# esac
# done
echo "Press Any key.."
while [ true ]
do
read -t 3 -n 1
if [ $? = 0 ]
then
echo "You have Terminated The string! "
exit;
else
echo "Waiting for response! "
fi
# body
done
| true |
ed04fc085231f6795dbb52a469ae315d7efbbda4
|
Shell
|
suplexx12/my
|
/restart6.sh
|
UTF-8
| 441 | 2.953125 | 3 |
[] |
no_license
|
#!/bin/bash
#Setup Variables
GREEN='\033[0;32m'
YELLOW='\033[0;93m'
RED='\033[0;31m'
NC='\033[0m'
northern-cli -datadir=/home/northern6/.northern stop
sleep 10
northernd -datadir=/home/northern6/.northern -daemon
until northern-cli -datadir=/home/northern6/.northern mnsync status | grep -m 1 '"IsBlockchainSynced": true,'; do sleep 1 ; done > /dev/null 2>&1
echo -e ${GREEN}"Fourth node is fully synced. Your masternode is running!"${NC}
| true |
7a318b5f2bf00edc64dc78913b6f5af9b2e1239a
|
Shell
|
PelionIoT/buildtools-pelion-edge
|
/vagrant/vcmd.sh
|
UTF-8
| 9,156 | 3.46875 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
downloaddir=$HOME/vdownload
safemode=1
VM=""
drive="/dev/rdisk5"
FAILEDBUILD=0
acommands="burn destroy destroy-all download halt halt-all up up-np new nosafe rebuild rsync-auto ssh status status-all"
declare -A acmd
acmd[burn]="burns the last downloaded image to the -d drive"
acmd[destroy]="completely destroys the VM (calls vagrant -f destroy VM)"
acmd[destroy-all]="destroys all vagrantFile VM's (calls vagrant -f destroy)"
acmd[download]="downloads ~/result.wic.gz from the target VM"
acmd[halt]="halts the vm. (calls vagrant halt VM)"
acmd[halt-all]="halts all vms defined in vagrantFile (calls vagrant halt VM)"
acmd[up]="brings up and first time only provisions (calls vagrant up VM)"
acmd[up-np]="up with no provisioners (calls vagrant up --no-provision VM)"
acmd[up-fp]="up force provisioners (calls vagrant up --provioson VM"
acmd[new]="Macro: destroy,up"
acmd[rebuild]="Macro: halt,up-fp"
acmd[rsync-auto]="rsync mapped dir changes to target (calls vagrant rsync-auto VM)"
acmd[ssh]="ssh into VM (calls vagrant ssh VM)"
acmd[status]="displays status of the VM (calls vagrant status VM)"
acmd[status-all]="displays status for all vagrantFile VMs (calls vagrant status)"
NORM="$(tput sgr0)"
BOLD="$(tput bold)"
REV="$(tput smso)"
UND="$(tput smul)"
BLACK="$(tput setaf 0)"
RED="$(tput setaf 1)"
GREEN="$(tput setaf 2)"
YELLOW="$(tput setaf 3)"
BLUE="$(tput setaf 4)"
MAGENTA="$(tput setaf 90)"
MAGENTA1="$(tput setaf 91)"
MAGENTA2="$(tput setaf 92)"
MAGENTA3="$(tput setaf 93)"
CYAN="$(tput setaf 6)"
WHITE="$(tput setaf 7)"
ORANGE="$(tput setaf 172)"
ERROR="${REV}Error:${NORM}"
set_sourcedirs(){
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
THISDIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
}
set_sourcedirs
_say(){
if [[ $silent -ne 1 ]]; then
say "$@"
else
echo "say is silent: $1"
fi
}
cmdValidate(){
okoverall=0
for cmd in "$@"; do
okcmd=0;
for a in $acommands; do
#echo "$cmd = $a"
if [[ $cmd = "$a" ]]; then
okcmd=1;
fi
done
if [[ $okcmd -eq 0 ]]; then
echo "($cmd) is not valid"
okoverall=1
fi
done
if [[ $okoverall -eq 1 ]]; then
useage
fi
}
_getstate(){
running=1
while [[ running -eq 1 ]]; do
echo "checking vagrant status"
vagrant status $1 | grep stopped
if [[ $? -eq 0 ]]; then
echo "its really down"
running=0
fi
echo "waiting for it to stop"
done
}
_stopwait(){
running=1
echo -en "Ensuring it stops"
while [[ running -eq 1 ]]; do
vagrant status $VM | grep "stopped (aws)" >> /dev/null 2>&1
if [[ $? -eq 0 ]]; then
running=0
fi
echo -n "."
done
echo " stoped!"
}
_setcompletion(){
md5cur=$(md5sum Vagrantfile)
if [[ -e .VAGMD5 ]]; then
md5old=$(cat .VAGMD5)
else
md5old=xxx
fi
if [[ "$md5cur" != "$md5old" ]]; then
echo "Vagrant file has changed, rebuilding completions"
cat Vagrantfile | grep -B 1 -A0 vm.define > .VAGstatus
echo "$md5cur" > .VAGMD5
fi
readarray -t vstatus <<< "$(cat .VAGstatus)"
compound=""
vstatus_len=${#vstatus[@]}
for (( i = 0; i < $vstatus_len; i+=3 )); do
l1=${vstatus[$i]}
l2=${vstatus[$(($i + 1))]}
l2=$(echo "$l2" | awk -F '"' '{print $2}')
echo "$l1" | grep each >> /dev/null
if [[ $? -eq 0 ]]; then
count_START=$(echo "$l1" | grep each | awk -F "." '{print $1}' | awk -F "(" '{print $2}')
count_END=$(echo "$l1" | grep each | awk -F "." '{print $3}' | awk -F ")" '{print $1}')
count_END=$(( $count_END + 1 ))
for (( incr = $count_START; incr < $count_END; incr++ )); do
NEWVAR="${l2//\#\{i\}/$incr}"
compound+="$NEWVAR "
done
else
compound+="$l2 "
fi
done
echo complete -W \""$compound"\" vcmd.sh > .vcmd-completion.bash
echo export HAVESOURCEDVCMD=1 >> .vcmd-completion.bash
if [[ $HAVESOURCEDVCMD -ne 1 ]]; then
echo "${CYAN}|--------------------------------------------------------------------|${NORM}"
echo "${CYAN}| NOTICE |${NORM}"
echo "${CYAN}| |${NORM}"
echo "${CYAN}|${NORM} vcmd.sh supports completions with machine names from ${CYAN}|${NORM}"
echo "${CYAN}|${NORM} the VagrantFile. To use these completions, run the ${CYAN}|${NORM}"
echo "${CYAN}|${NORM} following command in this terminal. ${CYAN}|${NORM}"
echo "${CYAN}|${NORM} ${YELLOW}source .vcmd-completion.bash${NORM} ${CYAN}|${NORM}"
echo "${CYAN}| |${NORM}"
echo "${CYAN}|--------------------------------------------------------------------|${NORM}"
fi
}
_setcompletionOld(){
md5cur=$(md5sum Vagrantfile)
if [[ -e .VAGMD5 ]]; then
md5old=$(cat .VAGMD5)
else
md5old=xxx
fi
if [[ "$md5cur" != "$md5old" ]]; then
echo "Vagrant file has changed, rebuilding completions"
vagrant status | tail -n +3 > .VAGstatus
echo "$md5cur" > .VAGMD5
fi
readarray -t vstatus <<< "$(cat .VAGstatus)"
compound=""
for line in "${vstatus[@]}"; do
if [[ "$line" = "" ]]; then
break;
fi
name=$(echo $line| awk -F ' ' '{print $1}');
compound+="$name "
done
echo complete -W \""$compound"\" vcmd.sh > .vcmd-completion.bash
echo "source .vcmd-completion.bash to have completions work"
}
runcmd(){
cmd="$1"
if [[ $cmd = "burn" ]]; then
if [[ $FAILEDBUILD -eq 1 ]]; then
echo "will not burn, as the build failed"
else
cd triggers
echo ./sdburn.sh "$downloaddir/$VM/latest.tar.gz" $drive
./sdburn.sh "$downloaddir/$VM/latest.tar.gz" $drive
cd ../
fi
elif [[ $cmd = "destroy" ]]; then
vagrant destroy -f "$VM"
elif [[ $cmd = "destroy-all" ]]; then
vagrant destroy -f
elif [[ $cmd = "download" ]]; then
mkdir -p $downloaddir/$VM/
DT=$(date '+%h-%d_%H:%M')
vagrant up --no-provision "$VM"
vagrant scp $VM:~/result.about /tmp/
source /tmp/result.about
targetfile=$downloaddir/$VM/$MACHINE-$DT".tar.gz"
echo "the target: $targetfile"
if [[ $LASTBUILD != "failed" ]]; then
vagrant scp $VM:~/result.wic.gz $downloaddir/$VM/
mv $downloaddir/$VM/result.wic.gz $targetfile
rm -rf $downloaddir/$VM/latest.tar.gz
ln -s $targetfile $downloaddir/$VM/latest.tar.gz
else
echo "failed build, will not burn or download"
FAILEDBUILD=1;
fi
elif [[ $cmd = "halt" ]]; then
vagrant halt "$VM"
_stopwait
elif [[ $cmd = "halt-all" ]]; then
vagrant -v
vagrant halt
safemode=0
elif [[ $cmd = "new" ]]; then
vagrant destroy -f "$VM"
vagrant up "$VM"
elif [[ $cmd = "rebuild" ]]; then
vagrant halt "$VM"
_stopwait
sleep 1
vagrant up --provision "$VM"
elif [[ $cmd = "rsync-auto" ]]; then
vagrant rsync-auto "$VM"
safemode=0;
elif [[ $cmd = "ssh" ]]; then
vagrant up --no-provision "$VM"
vagrant ssh "$VM"
elif [[ $cmd = "status" ]]; then
if [[ $VM = "status" ]]; then
vagrant status
safemode=0
else
vagrant status "$VM"
safemode=0
fi
elif [[ $cmd = "status-all" ]]; then
vagrant status
safemode=0
elif [[ $cmd = "up" ]]; then
vagrant up "$VM"
elif [[ $cmd = "up-only" || $cmd = "up-np" ]]; then
vagrant up --no-provision "$VM"
fi
}
main(){
VM="$1"
shift;
cmdValidate "$@"
state=""
startTime=$(date +%s)
pushd . >> /dev/null
cd $THISDIR
echo "Processing $@"
for value in "$@"; do
runcmd "${value}"
lastcmd="${value}"
done
if [[ $safemode -eq 1 && $lastcmd != "halt" && $lastcmd != "destroy" ]]; then
runcmd halt
fi
popd >> /dev/null
endTime=$(date +%s)
startTimeH=$(printf "%(%H:%m)T" $startTime)
endTimeH=$(printf "%(%H:%m)T" $endTime)
elapsedTimesec=$(( $endTime - $startTime ))
elapsedTimemin=$(( $elapsedTimesec / 60 ))
echo -e "Started:\t$startTimeH"
echo -e "Ended:\t$endTimeH"
echo -e "Elapsed:\t$elapsedTimemin"
_say "The computer needs your attention"
}
useage() {
echo "${BOLD}USEAGE:${NORM} $0 [-d drive] [-h help] [-s safemodeoff] <vagrant name> <list of commands>"
echo ""
echo -e "${BOLD}OPTIONS${NORM}"
echo -e " ${BOLD}-d${NORM} drive for burning e.g. /dev/disk5"
echo -e " ${BOLD}-h${NORM} help"
echo -e " ${BOLD}-i${NORM} image"
echo -e " ${BOLD}-m${NORM} machine"
echo -e " ${BOLD}-s${NORM} disables safemode, which auto-halts the machine each time to save money!"
echo -e " ${BOLD}-S${NORM} disables sound"
echo ""
echo -e "${BOLD}LIST OF COMMANDS${NORM}"
mapfile -d '' sorted < <(printf '%s\0' "${!acmd[@]}" | sort -z)
for KEY in "${sorted[@]}"; do
VALUE="${acmd[$KEY]}"
echo -e " ${BOLD}$KEY${NORM}: $VALUE"
done
exit 1;
}
while getopts ":b:d:i:m:hsS" o; do
case "${o}" in
b) export BRANCH=$OPTARG; ;;
#
B) export BUILDMODE=$OPTARG; ;;
#
d) drive=${OPTARG}; ;;
#
h) useage; ;;
#
i) export IMAGE=$OPTARG; ;;
#
m) export MACHINE=$OPTARG; ;;
#
s) safemode=0; ;;
#
S) silent=1; ;;
#
\?) echo -e \n"Option -${BOLD}$OPTARG${NORM} not allowed."; useage; ;;
#
esac
done
shift $((OPTIND-1))
_setcompletion
if [[ $# -lt 1 ]]; then
useage
else
main $@
fi
| true |
187d2ba99ad7c039e66abce010a818b482beb2df
|
Shell
|
JrGoodle/clowder
|
/script/cibuild
|
UTF-8
| 649 | 2.84375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# shellcheck disable=SC1091
set -euo pipefail
cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )/.."
. 'script/utils.sh'
run "${PWD}/script/bootstrap"
h1 'Uninstall system git lfs filters'
sudo git config --system --unset-all filter.lfs.clean || true
sudo git config --system --unset-all filter.lfs.smudge || true
sudo git config --system --unset-all filter.lfs.process || true
sudo git config --system --unset-all filter.lfs.required || true
h2 'Update pip'
run python -m pip install --upgrade pip
h2 'Install requirements'
run pip install -r requirements.txt
h3 'Install clowder in editable mode'
run pip install -e .
| true |
8f40a493e82b378fba2f185b4cf1bb14457ec90b
|
Shell
|
detectivelyw/linuxkit-automated-experiments
|
/scripts/01-ubuntu/linuxkit-script-test.sh
|
UTF-8
| 360 | 2.640625 | 3 |
[] |
no_license
|
#!/bin/sh
runc_cmd="runc --root /run/containerd/runc/services.linuxkit/ exec -t"
container_task="ubuntu"
container_obtain_data="ubuntu"
container_script_path="/root/ubuntu-script"
cmd_task="$runc_cmd $container_task $container_script_path"
echo "start running auto experiment ..."
echo "CMD_TASK: $cmd_task"
eval $cmd_task
echo "auto experiment completed."
| true |
01a7fbc96c2d4f8d39f3649d4d6475e757461ff8
|
Shell
|
rajeshlwc/rc-flow-components
|
/scripts/zip-metadata.sh
|
UTF-8
| 324 | 3.609375 | 4 |
[] |
no_license
|
#!/bin/bash
META_DATA_DIR=../src
ZIP_DIR=../archive
FILE_NAME=master.zip
ZIP=$ZIP_DIR/$FILE_NAME
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo creating zip archive of the metatdata source: $ZIP
rm -rf $ZIP
cd $META_DATA_DIR
zip -r $ZIP ./*
echo zip file created: $ZIP
cd $CURRENT_DIR
| true |
5e3dbb58a8ea1107249d19c45044eb73dd09ecc0
|
Shell
|
xiujia/audit_multi
|
/dbm_watchdog
|
UTF-8
| 1,434 | 2.828125 | 3 |
[] |
no_license
|
#!/bin/bash
bin_path=/usr/inp/bin/
local_path=/usr/local/bin/
audit_process="$bin_path"audit_process
audit_csp_release="$bin_path"audit_csp_rel
#audit_sql_insert="$bin_path"audit_sql_insert
audit_mongo_insert="$bin_path"audit_mongo_insert
audit_runtime_redis="$bin_path"audit_runtime_redis
redis_server="$local_path"redis-server
hbase_thrift=/usr/local/hbase-0.98.10.1-hadoop1/bin/hbase-daemon.sh
check_process()
{
#cmd0=`ps -ef|awk '{print $8}'|grep audit_process|wc -l`
for (( i=0; i < 10; i++ ))
do
cmd1=`ps -ef|awk '{print $8,$9}'|grep audit_csp_rel|awk '{print $2}'|grep $i|wc -l`
if [ $cmd1 -eq 0 ];then
/usr/inp/bin/audit_csp_rel $i 1
fi
cmd2=`ps -ef|awk '{print $8,$9}'|grep audit_studio_rel|awk '{print $2}'|grep $i|wc -l`
if [ $cmd2 -eq 0 ];then
/usr/inp/bin/audit_studio_rel $i 1
fi
cmd3=`ps -ef|awk '{print $8,$9}'|grep audit_sqlserver_rel|awk '{print $2}'|grep $i|wc -l`
if [ $cmd3 -eq 0 ];then
/usr/inp/bin/audit_sqlserver_rel $i 1
fi
done
cmd4=`ps -ef|awk '{print $8}'|grep audit_mongo_insert|wc -l`
cmd5=`ps -ef|awk '{print $8}'|grep audit_runtime_redis|wc -l`
cmd6=`ps -ef|awk '{print $8}'|grep redis-server|wc -l`
if [ $cmd4 -eq 0 ];then
/usr/inp/bin/audit_mongo_insert 1
fi
if [ $cmd5 -eq 0 ];then
$audit_runtime_redis
fi
if [ $cmd6 -eq 0 ];then
$redis_server /etc/redis.conf
fi
# const_inp_pid = 0
}
flag=0
while true
do
sleep 2
check_process
done
| true |
b22da907cdcdd7d17798a8a289b8785f23973d45
|
Shell
|
jmcshane/coderbyte
|
/tool/createChal.sh
|
UTF-8
| 549 | 3.734375 | 4 |
[] |
no_license
|
#!/bin/bash
#Simplifies the creation of templates for the coderbyte challenges
if [ "$#" -ne 1 ]; then
echo "Usage createChal.sh FILENAME"
echo "Only accepts one argument"
exit 1;
fi
FILE_DIR=`pwd`
fileString="function $1(arg) {\n\n}\n\nvar out = $1();\nconsole.log(\"Should be \");\nconsole.log(out);\nout = $1();\nconsole.log(\"Should be \");\nconsole.log(out);\nout = $1();\nconsole.log(\"Should be \");\nconsole.log(out);\nout = $1();\nconsole.log(\"Should be \");\nconsole.log(out);\n"
echo -e $"$fileString" > "$FILE_DIR/$1.js"
| true |
f3dc11817695459675fb3f6dd3f6840c249c948a
|
Shell
|
dongshaohui/ansible_pre_script
|
/scheduler-restart.sh
|
UTF-8
| 1,089 | 2.765625 | 3 |
[] |
no_license
|
#!/bin/bash
function sms_scheduler(){
ssh 10.253.1.140 "cd /etc/init.d/ ; ./$1 $2 "
}
function yeepay_withdraw_scheduler(){
ssh 10.253.1.140 "cd /etc/init.d/ ; ./$1 $2 "
}
function chinapay_deposit_scheduler(){
ssh 10.253.16.37 "cd /etc/init.d/ ; ./$1 $2 "
}
function chinapay_withdraw_scheduler(){
ssh 10.253.1.140 "cd /etc/init.d/ ; ./$1 $2"
}
function contract_settle_scheduler(){
ssh 10.139.110.69 "cd /etc/init.d/ ;./$1 $2 "
}
function contract_delivery_scheduler(){
ssh 10.139.96.97 "cd /etc/init.d/ ;./$1 $2 "
}
function subject_pass_scheduler(){
ssh 10.139.110.69 "cd /etc/init.d/ ; ./$1 $2 "
}
function subject_shelf_scheduler(){
ssh 10.139.96.97 "cd /etc/init.d/ ; ./$1 $2"
}
if [ $# == 2 ]
then
$1 $1 $2
elif [ $# == 0 ]
then
echo -e "USAGE:\n
\t argb-1=..
\t sms_scheduler
\t yeepay_withdraw_scheduler
\t chinapay_deposit_scheduler
\t chinapay_withdraw_scheduler
\t contract_settle_scheduler
\t contract_delivery_scheduler
\t subject_pass_scheduler
\t subject_shelf_scheduler
\t argv 2:stop /start /restart /status
\n"
fi
#case "$1" in
#esac
| true |
db7d6032ead67d2c965850a28f899dce0e65e805
|
Shell
|
jcstr/arch4edu
|
/sublime-text2/sublime-text2.sh
|
UTF-8
| 453 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
ST=/opt/sublime-text2
# needed for .desktop launcher
ARGS="--class=sublime_text"
# Note: Sublime Text 2 opens an empty instance if the project to open is
# already opened in another instance instead of just giving it focus.
if [[ ${1:(-16)} == ".sublime-project" ]]; then
ARGS="${ARGS} --project"
fi
# LD_LIBRARY_PATH is needed for old libpng
export LD_LIBRARY_PATH=${ST}/lib:${LD_LIBRARY_PATH}
exec ${ST}/sublime_text ${ARGS} "$@" &
| true |
bae52ca09e4bdc0866f57ec4fc39849b1d8bb8b8
|
Shell
|
brwnj/ggd-recipes
|
/recipes/genomics/Homo_sapiens/GRCh37/grch37-clinically-associated-variants-ensembl-v1/recipe.sh
|
UTF-8
| 1,777 | 3 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
set -eo pipefail -o nounset
## Get the .genome file
genome=https://raw.githubusercontent.com/gogetdata/ggd-recipes/master/genomes/Homo_sapiens/GRCh37/GRCh37.genome
## Get ref genome
ref_genome=$(ggd get-files grch38-reference-genome-ensembl-v1 --pattern "*.fa" )
fai_file=$(ggd get-files grch38-reference-genome-ensembl-v1 --pattern "*.fai" )
## Get chain file:
wget --quiet ftp://ftp.ensembl.org/pub/assembly_mapping/homo_sapiens/GRCh38_to_GRCh37.chain.gz
zgrep -v "PATCH" GRCh38_to_GRCh37.chain.gz > GRCh38_to_GRCh37.chain
## Get the clinically associated variant file,
wget --quiet ftp://ftp.ensembl.org/pub/release-99/variation/vcf/homo_sapiens/homo_sapiens_clinically_associated.vcf.gz
## Add contig header
## Decompose
## Normalize
bcftools reheader --fai $fai_file homo_sapiens_clinically_associated.vcf.gz \
| gzip -dc \
| vt decompose -s - \
| vt normalize -r $ref_genome -n - -o grch38-decomposed-normalized-clinically-associated.vcf
rm homo_sapiens_clinically_associated.vcf.gz
## Get the reference genome
ref_genome=$(ggd get-files grch37-reference-genome-ensembl-v1 --pattern "*.fa")
## VCF Liftover
CrossMap.py vcf GRCh38_to_GRCh37.chain grch38-decomposed-normalized-clinically-associated.vcf $ref_genome liftover_grch37_clinically_associated.vcf
## Sort, bgzip, and tabix the lifted over vcf file
gsort liftover_grch37_clinically_associated.vcf $genome \
| bgzip -c > grch37-clinically-associated-variants-ensembl-v1.vcf.gz
tabix grch37-clinically-associated-variants-ensembl-v1.vcf.gz
## Remove temp files
rm GRCh38_to_GRCh37.chain.gz
rm GRCh38_to_GRCh37.chain
rm liftover_grch37_clinically_associated.vcf
rm liftover_grch37_clinically_associated.vcf.unmap
rm grch38-decomposed-normalized-clinically-associated.vcf
| true |
76285f793d6c73c31c3db59d499d00bbc27b20c9
|
Shell
|
abujalski/Webports
|
/ports/lua/build.sh
|
UTF-8
| 2,828 | 2.984375 | 3 |
[
"BSD-3-Clause",
"GPL-2.0-or-later",
"LicenseRef-scancode-mit-old-style",
"MPL-1.1",
"ImageMagick",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GFDL-1.2-only",
"LicenseRef-scancode-other-permissive",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"Libpng",
"LicenseRef-scancode-on2-patent",
"LicenseRef-scancode-greg-roelofs",
"LicenseRef-scancode-unknown-license-reference",
"WxWindows-exception-3.1",
"GPL-2.0-only",
"BSL-1.0",
"Apache-2.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown"
] |
permissive
|
# Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
BUILD_DIR=${SRC_DIR}
EXECUTABLES="src/lua${NACL_EXEEXT} src/luac${NACL_EXEEXT}"
EnableCliMain
if [ ${TOOLCHAIN} == glibc ]; then
PLAT=nacl-glibc
elif [ ${TOOLCHAIN} == emscripten ]; then
PLAT=emscripten
else
PLAT=nacl-newlib
fi
TEST_FILE=lua-5.3.0-tests.tar.gz
TEST_URL=http://www.lua.org/tests/${TEST_FILE}
TEST_SHA1=8fd633ab67edf5e824c2afc62f318de245fce268
DownloadStep() {
if ! CheckHash ${NACL_PACKAGES_CACHE}/${TEST_FILE} ${TEST_SHA1}; then
Fetch ${TEST_URL} ${NACL_PACKAGES_CACHE}/${TEST_FILE}
if ! CheckHash ${NACL_PACKAGES_CACHE}/${TEST_FILE} ${TEST_SHA1} ; then
Banner "${TEST_FILE} failed checksum!"
exit -1
fi
fi
ChangeDir ${BUILD_DIR}
if [ -d lua-5.3.0-tests ]; then
Remove lua-5.3.0-tests
fi
LogExecute tar zxf ${NACL_PACKAGES_CACHE}/${TEST_FILE}
ChangeDir lua-5.3.0-tests
LogExecute patch -p1 < ${START_DIR}/lua_tests.patch
}
BuildStep() {
LogExecute make PLAT=${PLAT} clean
set -x
make MYLDFLAGS="${NACLPORTS_LDFLAGS}" MYCFLAGS="${NACLPORTS_CPPFLAGS}" \
AR="${NACLAR} rcu" RANLIB="${NACLRANLIB}" CC="${NACLCC} -std=gnu99" \
MYLIBS="${NACLPORTS_LIBS}" \
PLAT=${PLAT} EXEEXT=${NACL_EXEEXT} -j${OS_JOBS}
set +x
}
TestStep() {
# First, run the 'make test' target. This currently just runs
# lua -v.
LogExecute make PLAT=${PLAT} test
if [[ ${TOOLCHAIN} == emscripten ]]; then
# TODO(sbc): fix lua tests running under node.js
return
fi
# Second, run the lua unittests. See: http://www.lua.org/tests/
ChangeDir lua-5.3.0-tests
LogExecute ../src/lua -e"_U=true" all.lua
}
InstallStep() {
LogExecute make PLAT=${PLAT} EXEEXT=${NACL_EXEEXT} \
INSTALL_TOP=${DESTDIR}/${PREFIX} install
}
PublishStep() {
MakeDir ${PUBLISH_DIR}
ChangeDir ${PUBLISH_DIR}
PublishMultiArch src/lua${NACL_EXEEXT} lua
LogExecute python ${TOOLS_DIR}/create_term.py lua
GenerateManifest ${START_DIR}/manifest.json ${PUBLISH_DIR}
InstallNaClTerm ${PUBLISH_DIR}
LogExecute cp ${START_DIR}/background.js ${PUBLISH_DIR}
LogExecute cp ${START_DIR}/lua.js ${PUBLISH_DIR}
LogExecute cp ${START_DIR}/*.lua ${PUBLISH_DIR}
LogExecute cp ${START_DIR}/index.html ${PUBLISH_DIR}
LogExecute cp ${START_DIR}/icon_16.png ${PUBLISH_DIR}
LogExecute cp ${START_DIR}/icon_48.png ${PUBLISH_DIR}
LogExecute cp ${START_DIR}/icon_128.png ${PUBLISH_DIR}
LogExecute rm -rf ${PUBLISH_DIR}/lua-5.3.0-tests
LogExecute cp -r ${BUILD_DIR}/lua-5.3.0-tests ${PUBLISH_DIR}
ChangeDir ${PUBLISH_DIR}
rm -f manifest.txt lua.zip
${NACL_SDK_ROOT}/tools/genhttpfs.py . -r > ../manifest.txt
mv ../manifest.txt .
CreateWebStoreZip lua.zip .
}
| true |
b7647b19ecacce6ae5ce20d3f3fd07a0e8013a59
|
Shell
|
thoughtworks-jumpstart/jumpstart-project-feedback-team-a
|
/run_all_tests.sh
|
UTF-8
| 359 | 2.59375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
## 1. run unit tests (client)
yarn test
## 3. run e2e tests
# background your server
yarn start &
# wait 7 seconds for app to start
sleep 7
# and now run cypress
$(yarn bin)/cypress run
# stop server
lsof -i tcp:3000 | grep LISTEN | awk '{print $2}' | xargs kill
lsof -i tcp:3001 | grep LISTEN | awk '{print $2}' | xargs kill
| true |
ad74e1be61c67c869a2731aa0ac0168a99804359
|
Shell
|
Icaro-Lima/LabarcFPGASimulatorDesktop
|
/server/launch_isa
|
UTF-8
| 1,200 | 3.359375 | 3 |
[] |
no_license
|
#!/bin/bash
# make sure there is at least one tmp directory so ls will not fail
mkdir -p isa/tmp.0
while : ; do # infinite loop
# get a directory to work on
for i in {1..10}; do
# set d to the i-th last tmp directory
d=$(ls -1dt isa/tmp.* | head -$i | tail -1)
# is there exactly one file in it ?
if [ "`ls -1 $d | wc -w`" == "1" ]; then
f=$(ls $d)
# f is the name of the one file in the directory
# p are the command line arguments
# They have been appended to the last line of the uploaded file.
p=$(tail -1 $d/$f | cut -d\| -f2)
if test -f $d/*.zip; then
(cd $d; unzip *.zip)
fi
cp /labarc/TOP/Makefile $d
if [ -z "$p" ]; then
# if there are no command line arguments, wo do not need a proxy kernel
echo "RVLDFL=-nostartfiles -T/usr/local/riscv/link.ld" > $d/Make_pk
else
# if there are program arguments we need the proxy kernel
echo "PK=pk" > $d/Make_pk
echo "PROG_ARGS="$p >> $d/Make_pk
fi
if test -f $d/*.101; then
(cd $d; make binary >qq.log 2>&1 )
fi
(cd $d; timeout 3600 make isa >>qq.log 2>&1 ) &
fi
done
sleep 2
done
| true |
c93ba450860a6fbd0baf7a6a99b94afdfbbe3dac
|
Shell
|
HDYA/smb-volume-release
|
/scripts/ci/run_driver_cert
|
UTF-8
| 1,877 | 3.453125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -x -e
function finish {
pkill ssh || true
}
trap finish EXIT
source persi-ci/scripts/ci/bbl_get_bosh_env
source bosh-env/set-env.sh
apt-get update
apt-get -y install cifs-utils realpath
pushd smb-volume-release
export GOROOT=/usr/local/go
export PATH="${GOROOT}/bin:${PATH}"
export GOPATH="${PWD}"
export PATH="${PWD}/bin:${PATH}"
export SMB_RELEASE_DIR="${PWD}"
go get github.com/onsi/ginkgo/ginkgo
go get github.com/onsi/gomega
listen_port=8589
listen_address="0.0.0.0:${listen_port}"
driver_address="http://${listen_address}"
mkdir -p ~/voldriver_plugins
drivers_path="$(realpath ~/voldriver_plugins)"
mkdir -p "${SMB_RELEASE_DIR}/tmp"
export FIXTURE_FILENAME="${SMB_RELEASE_DIR}/tmp/fixture.json"
echo "{
\"volman_driver_path\": \"~/voldriver_plugins\",
\"driver_address\": \"${driver_address}\",
\"driver_name\": \"smbdriver\",
\"create_config\": {
\"Name\": \"smb-volume-name\",
\"Opts\": {\"source\":\"${SMB_REMOTE_PATH}\",\"uid\":\"2000\",\"gid\":\"2000\",\"username\":\"${SMB_USERNAME}\",\"password\":\"${SMB_PASSWORD}\"}
}
} " > "${FIXTURE_FILENAME}"
pushd src/code.cloudfoundry.org/smbdriver
go build -o "${SMB_RELEASE_DIR}/tmp/smbdriver" "cmd/smbdriver/main.go"
popd
go get -t code.cloudfoundry.org/volume_driver_cert
pkill smbdriver || true
mountDir="${SMB_RELEASE_DIR}/tmp/mountdir"
mkdir -p "${mountDir}"
"${SMB_RELEASE_DIR}/tmp/smbdriver" -listenPort="${listen_port}" -transport="tcp" -driversPath="${drivers_path}" \
--mountDir="${mountDir}" \
--mountFlagAllowed="username,password,uid,gid,file_mode,dir_mode,readonly,domain,vers,sec" \
--mountFlagDefault="uid:2000,gid:2000" &
ginkgo -v -keepGoing src/code.cloudfoundry.org/volume_driver_cert -race
pkill smbdriver || true
rm -rf "${SMB_RELEASE_DIR}/tmp"
popd
| true |
d1e374c969e0733cf46393874ff739b9fbf70583
|
Shell
|
guyavrah1986/linuxProgramming
|
/miscellaneousTopics/findPattern.sh
|
UTF-8
| 3,496 | 3.796875 | 4 |
[] |
no_license
|
#!/bin/bash
source utilities.sh
#####################################################################################################
#####################################################################################################
# Global constants:
# -----------------
LOG_PREFIX="Find_Pattern"
DSWP_VERSION_EXTENSION=".out"
ST_VERSION_EXTENSION=".bin"
function Find_Patern_check_if_pattern_exists {
local pattern=$1
local input=$2
local logPrefix="${LOG_PREFIX}::check_if_pattern_exists"
echo "${logPrefix} - about to check if pattern:${pattern} exists in:${input}"
if [[ $input =~ $pattern ]];
then
echo "${green}${logPrefix} - found !!${reset}"
return 0
else
echo "${red}${logPrefix} - was NOT found !!${reset}"
return 1
fi
}
function Find_Pattern_verify_valid_version_prefix {
local versionPrefix=$1
local logPrefix="${LOG_PREFIX}::verify_valid_version_prefix"
echo "${logPrefix} - got version prefix:${versionPrefix}"
for i in "${UTILS_version_arr[@]}"
do
if [ "$i" == "$versionPrefix" ];
then
echo "${green}${logPrefix} - version prefix is valid${reset}"
return ${SUCCESS}
fi
done
echo "${red}${logPrefix} - version prefix is NOT valid${reset}"
return ${ERROR}
}
function Find_Pattern_verify_st_version_prefix {
local stVersionPrefix=$1
local logPrefix="${LOG_PREFIX}::verify_st_version_prefix"
echo "${logPrefix} - got ST version prefix to check $stVersionPrefix"
for i in "${UTILS_st_bin_file_names_prefixes_arr[@]}"
do
#echo "${logPrefix} - about to check $i"
Find_Patern_check_if_pattern_exists "$i" ${stVersionPrefix}
local retCode=$?
if [ ${retCode} -eq 0 ];
then
echo "${green}${logPrefix} - detected ST version:$i${reset}"
return ${SUCCESS}
fi
done
echo "${red}${logPrefix} - did NOT detect valid ST version${reset}"
return ${ERROR}
}
function Find_Pattern_extract_version {
local logPrefix="${LOG_PREFIX}::extract_version"
local inputLine=$1
local fileType=$2 # i.e.- is it a .bin file name or
if [ fileType]
}
###################################################################################################################
###################################################################################################################
function test_pattern_exists {
Find_Patern_check_if_pattern_exists "abc" "abcde"
Find_Pattern_verify_valid_version_prefix "8.0"
Find_Pattern_verify_valid_version_prefix "7.5"
Find_Pattern_verify_valid_version_prefix "7.6"
Find_Pattern_verify_valid_version_prefix "8.1"
Find_Pattern_verify_valid_version_prefix ""
Find_Pattern_verify_valid_version_prefix "aws3.5"
Find_Pattern_verify_valid_version_prefix "8.0.90"
}
function test_st_version_prefix {
# GOOD versions
Find_Pattern_verify_st_version_prefix "NPT1800_Emb_750024"
Find_Pattern_verify_st_version_prefix "NPT1200i_Emb_800024"
Find_Pattern_verify_st_version_prefix "NPT1050i_Emb_750024"
# BAD versions
Find_Pattern_verify_st_version_prefix "NPT1800"
Find_Pattern_verify_st_version_prefix "NPT1500_Emb_750024"
Find_Pattern_verify_st_version_prefix ""
Find_Pattern_verify_st_version_prefix "NPT1050i_EMB_750024"
}
echo "tests - start"
#test_pattern_exists
test_st_version_prefix
echo "tests - end"
###################################################################################################################
###################################################################################################################
| true |
9a58fb1e04b3834d19f22cc748ec6fdf223de412
|
Shell
|
silviucpp/beanstalkd-deb
|
/setup/deb/postrm
|
UTF-8
| 2,116 | 3.703125 | 4 |
[] |
no_license
|
#!/bin/sh
# vim: ts=4:et:sts=4
# $Id: beanstalkd.postrm 89 2009-01-03 16:34:01Z robert $
set -e
# Automatically added by dh_installinit
if [ "$1" = "purge" ] ; then
update-rc.d beanstalkd remove >/dev/null
fi
# In case this system is running systemd, we make systemd reload the unit files
# to pick up changes.
if [ -d /run/systemd/system ] ; then
systemctl --system daemon-reload >/dev/null || true
fi
# End automatically added section
# Automatically added by dh_systemd_enable
if [ "$1" = "remove" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
deb-systemd-helper mask beanstalkd.socket >/dev/null
fi
fi
if [ "$1" = "purge" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
export _DEB_SYSTEMD_HELPER_PURGE=1
deb-systemd-helper disable beanstalkd.socket >/dev/null
deb-systemd-helper unmask beanstalkd.socket >/dev/null
fi
fi
# End automatically added section
# Automatically added by dh_systemd_enable
if [ "$1" = "remove" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
deb-systemd-helper mask beanstalkd.service >/dev/null
fi
fi
if [ "$1" = "purge" ]; then
if [ -x "/usr/bin/deb-systemd-helper" ]; then
export _DEB_SYSTEMD_HELPER_PURGE=1
deb-systemd-helper disable beanstalkd.service >/dev/null
deb-systemd-helper unmask beanstalkd.service >/dev/null
fi
fi
# End automatically added section
PACKAGE=beanstalkd
DEFAULTSFILE=/etc/default/$PACKAGE
case "$1" in
failed-install|abort-install|abort-upgrade|failed-upgrade)
exit 0
;;
remove)
if [ -d /var/lib/beanstalkd ]; then
if [ $(find /var/lib/beanstalkd | wc -l) -eq 1 ]; then
rmdir /var/lib/beanstalkd
fi
fi
exit 0
;;
purge)
echo "purge requested, thus removing /var/lib/beanstalkd"
rm -rf /var/lib/beanstalkd/
# remove user, group and home directory beanstalkd
deluser --remove-home beanstalkd >/dev/null 2>&1 || true
exit 0
;;
upgrade|disappear)
exit 0
;;
*)
echo "postrm called with unknown argument \`$1'" >&2
exit 0
;;
esac
| true |
d179961f5b32f3f5ff3405f0434283c4d5784ccc
|
Shell
|
koeppl/hashbench
|
/scripts/resize_stats.sh
|
UTF-8
| 1,571 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/zsh
function die {
echo $1 >&2
exit 1
}
overflow_types=(cht_overflow dummy_overflow)
#overflow_types=(cht_overflow map_overflow array_overflow dummy_overflow)
hash_types=(HASH_SPLITMIX HASH_XORSHIFT HASH_MULTIPLICATIVE)
bucket_sizes=(32 64 128 192 255)
cht_overflow_fractions=(0.001 0.2 0.0001 1.0 0.8 0.6 0.1 0.01)
# array_overflow_lengths=(512 256 4 16 64)
# array_overflow_length=16
function Loop {
cat > ../linear_scaling.h <<EOF
#define SEPARATE_MAX_BUCKET_SIZE ${bucket_size}
#define ${hash_type} 1
#define OVERFLOW_TABLE ${overflow_type}
//#define ARRAY_OVERFLOW_LENGTH ${array_overflow_length}
#define CHT_OVERFLOW_FRACTION ${cht_overflow_fraction}
EOF
cat ../linear_scaling.h
cmake -DCMAKE_BUILD_TYPE=Release ..
prefixname="log_${hash_type}_${bucket_size}_${overflow_type}"
if [[ "$overflow_type" = 'cht_overflow' ]]; then
prefixname="$prefixname${cht_overflow_fraction}"
fi
jsonfile="${prefixname}.json"
datfile="${prefixname}.dat"
make linear_scaling || die "Could not compile!"
# ./linear_scaling $(calc '2<<22') | tee "$jsonfile" || die "Could not run!"
./linear_scaling $(calc '2<<26') > "$jsonfile" || die "Could not run!"
python3 ../scripts/resize_stats.py "$jsonfile" > "$datfile" || die "Failed to execute python script"
}
for hash_type in $hash_types; do
for overflow_type in $overflow_types; do
for bucket_size in $bucket_sizes; do
if [[ "$overflow_type" = 'cht_overflow' ]]; then
for cht_overflow_fraction in $cht_overflow_fractions; do
Loop
done
else
Loop
fi
done
done
done
| true |
478f4f2317115c9b1ea09fd435208589bd5ef9d8
|
Shell
|
mchalet/salt-cloud-Linode-scripts
|
/monitors_create.sh
|
UTF-8
| 693 | 3.265625 | 3 |
[] |
no_license
|
#!/bin/bash
# Creates and enables monitors for all '-lamp' minions.
# Defaults to URL and http://${ipv4_address}
# Warning: You cannot create a second monitor with the same label.
curl -sH "Authorization: Bearer $TOKEN" \
https://api.linode.com/v4/linode/instances | jq -r '.data[] | select(.label|test("-lamp")) | "\(.ipv4[0]) \(.label)"' |
while read line; do
echo "Editing ${line##* }"
curl -H "Content-Type: application/json" \
-H "Authorization: Bearer $TOKEN" \
-X POST -d '{
"service_type": "url",
"label": "'"${line##* }"'",
"address": "'"http://${line%% *}"'",
"timeout": 30
}' \
https://api.linode.com/v4/managed/services
done
| true |
ab7579fc35189d9af0873356422f7d442525804a
|
Shell
|
nextbitsys/lets_make_android
|
/build/sideloadable_ota_package/sideloadable_ota_package.sh
|
UTF-8
| 4,295 | 3.796875 | 4 |
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
if [[ "${0}" = /* ]]; then ME="${0}"; else ME="${PWD}/${0}"; fi
while [ -h ${ME} ]; do ME=$(readlink $ME); done
source `dirname ${ME}`/../../shell_base.sh
nonEmptyVarOrExit "CLIENT_HOME"
nonEmptyVarOrExit "DEVICE_NAME"
nonEmptyVarOrExit "WORKSPACE"
nonEmptyVarOrExit "PIPELINE_NUMBER"
DIST_NAME=
lookupDistName() {
DIST_NAME=$(${rootDir}/utils/list_pipeline_meta.sh --pipeline_number=${PIPELINE_NUMBER} --showdist)
if [[ "${DIST_NAME}" == "" ]]; then
printRed "Unable to find DIST_NAME for P${PIPELINE_NUMBER}"
return 1
fi
printGreen "Using DIST_NAME ${DIST_NAME}"
}
downloadDeviceArtifacts() {
# s3://BUCKET/files/devices/BRANCH/common/B38-P17--2014-05-20_02-26-38--cm_hammerhead-userdebug/
S3_PATH=$(${rootDir}/utils/list_builds.sh --device=${DEVICE_NAME} --pipeline_number=${PIPELINE_NUMBER} --dist=${DIST_NAME} --numrows=1)
if [[ -z "${S3_PATH}" ]]; then
echo "device artifacts not found."
return 1
fi
# Only download bin subdir
S3_PATH="${S3_PATH}bin/"
printGreen "Selected S3 dir: ${S3_PATH}"
s3cmd get --recursive "${S3_PATH}" ${WORKSPACE}/zip
}
copyDeviceArtifacts() {
# chdir to ensure the directory exists (build should fail if it does not exist)
cd "${WORKSPACE}/s3_device/bin" &&
rsync -vPra ${WORKSPACE}/s3_device/bin/ ${WORKSPACE}/zip
}
downloadSystemAppArtifacts() {
# s3://BUCKET/files/clients/NAME_OF_GRADLE_APP_PROJECT/BRANCH/B783-P839--2014-09-11_21-05-12/apk/sdk_app-debug.apk
#
# TODO: convert this to use --artifact=$1 and basename from the result.
S3_PATH=$(${rootDir}/utils/list_builds_client.sh --artifact=sdk_app-debug.apk --pipeline_number=${PIPELINE_NUMBER} --dist=${DIST_NAME} --numrows=1)
if [[ -z "${S3_PATH}" ]]; then
echo "sdk_app artifacts not found."
return 1
fi
# Only download .apk object.
printGreen "sdk_app artifacts: ${S3_PATH}"
APK_NAME=$1
# Install APK
mkdir -p ${WORKSPACE}/zip/system/app &&
${rootDir}/utils/s3cmd get --recursive "${S3_PATH}" ${WORKSPACE}/zip/system/app/${APK_NAME}.apk
if [[ $? != 0 ]]; then
echo "Unable to download ${APK_NAME}.apk from S3."
return 1
fi
printGreen "Extracting any JNI files within sdk_app"
# Extract JNI libraries into /system/lib
# Use the device's build.prop to detect the device architecture
DEVICE_ARCH=$(grep ^ro.product.cpu.abi= "${WORKSPACE}/s3_device/bin/system/build.prop" | cut -d '=' -f 2)
if [[ "${DEVICE_ARCH}" == "" ]]; then
echo "Unable to determine architecture for device \"${DEVICE_NAME}\". Cannot properly install JNI."
return 1
fi
# Unzip JNI directories
mkdir -p ${WORKSPACE}/apk_jni &&
# unzip(1) returns 0 if successful, 11 if "no matching files were found"
unzip -d ${WORKSPACE}/apk_jni ${WORKSPACE}/zip/system/app/${APK_NAME}.apk "lib/${DEVICE_ARCH}/*" &&
UNZIP_RV=$?
case "$?" in
0)
mkdir -p ${WORKSPACE}/zip/system/lib/ &&
rsync -vPra ${WORKSPACE}/apk_jni/lib/${DEVICE_ARCH}/ $_
;;
11)
printRed "warning: No JNI libraries found. Continuing."
;;
*)
printRed "error: unzip returned unknown error code $?"
return 1
;;
esac
}
printGreen "Jenkins Stage: looking up DIST_NAME" &&
lookupDistName || (printRed "FAILED"; false) &&
mkdir -p "${WORKSPACE}/zip" &&
#printGreen "Jenkins Stage: downloading device artifacts" &&
#downloadDeviceArtifacts || (printRed "FAILED"; false) &&
printGreen "Jenkins Stage: copying device artifacts" &&
copyDeviceArtifacts || (printRed "FAILED"; false) &&
printGreen "Jenkins Stage: downloading sdk_app artifacts" &&
downloadSystemAppArtifacts sdk_app || (printRed "FAILED"; false) &&
mkdir -p "${WORKSPACE}/s3_device/ota" &&
cd ${CLIENT_HOME} &&
PYTHONPATH="${CLIENT_HOME}/build/tools/releasetools" \
${rootDir}/build/sideloadable_ota_package/build_delta_ota \
-d "${WORKSPACE}/zip/" \
-b "${rootDir}/build/sideloadable_ota_package/ota-begin-script.edify" \
-e "${rootDir}/build/sideloadable_ota_package/ota-extra-script.edify" \
--path "${rootDir}/utils/binaries" \
--signapk_path host/signapk.jar \
"${WORKSPACE}/s3_device/ota/sideload.zip"
| true |
82ac4675f0fd83490491160fcd5086c0968b7329
|
Shell
|
tiantianwdy/hdm
|
/hdm-core/src/main/sbin/startup.sh
|
UTF-8
| 536 | 2.90625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# DEBUG_OPTS="-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=8100"
JAVA_HOME=""
HDM_HOME="./"
cd $HDM_HOME
lib=`find lib -name *.jar | xargs`
nodeType="$1"
shift
if [ $nodeType == 'master' ]; then
${JAVA_HOME}java $DEBUG_OPTS -Dfile.encoding=UTF-8 -cp "$lib" -jar ./hdm-core-0.0.1.jar -m true -n cluster -f "./hdm-core.conf" "$@"
elif [ $nodeType == 'slave' ]; then
${JAVA_HOME}java $DEBUG_OPTS -Dfile.encoding=UTF-8 -cp "$lib" -jar ./hdm-core-0.0.1.jar -m false -n cluster -f "./hdm-core.conf" "$@"
fi
| true |
e0f5ea7f4579611447f4241d92b55c930c272cc5
|
Shell
|
FauxFaux/debian-control
|
/s/shorewall6-lite/shorewall6-lite_5.2.1.4-1_all/postinst
|
UTF-8
| 883 | 3.3125 | 3 |
[] |
no_license
|
#!/bin/sh -e
case "$1" in
configure)
# if we are upgrading from version < 3.4 then we have to rename the
# configuration file
if [ "$2" ] && dpkg --compare-versions "$2" lt "3.4"
then
if [ -f /etc/shorewall6-lite/shorewall.conf ]
then
rm /etc/shorewall6-lite/shorewall.conf
fi
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 0
;;
esac
# Automatically added by dh_installinit/11.5.4
if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] || [ "$1" = "abort-deconfigure" ] || [ "$1" = "abort-remove" ] ; then
if [ -x "/etc/init.d/shorewall6-lite" ]; then
update-rc.d shorewall6-lite defaults >/dev/null || exit 1
fi
fi
# End automatically added section
| true |
ca0e7273c49ec16a1c82a703f1c6c0cc35178b2b
|
Shell
|
lucaswannen/source_code_classification_with_CNN
|
/dataset_v2/bash/5075123.txt
|
UTF-8
| 253 | 2.875 | 3 |
[] |
no_license
|
#!/bin/bash
function xTrigger()
{
for ii in `seq 1 100`; do echo $ii; sleep 2; done
}
function xRunner()
{
sleep 10;
echo $1;
}
export -f xTrigger
export -f xRunner
bash -c "xTrigger" | xargs -n 1 -P 1 -i bash -c "xRunner {}"
killall xTrigger
| true |
7bcdd13f33c3221fda32d6358f92ebcaa5233240
|
Shell
|
onosendi/dotfiles
|
/.zshrc
|
UTF-8
| 1,453 | 3.171875 | 3 |
[] |
no_license
|
fpath+=~/.zfunc
# Colorize pacman results.
alias pacman="pacman --color auto"
# Manage dotfiles.
alias dotfiles="/usr/bin/git --git-dir=$HOME/.dotfiles.git/ --work-tree=$HOME"
alias global-dotfiles="/usr/bin/git --git-dir=$HOME/.global-dotfiles.git/ --work-tree=$HOME"
# By default Mutt will save attachments to the folder it was started from.
# Change directory to ~/Downloads and start mutt.
alias neomutt="cd ~/Downloads && /usr/bin/neomutt"
# Set keyboard repeat rate/delay
if [ $DISPLAY ]; then
xset r rate 275 35
xset m 0 0 # Turn off acceleration
fi
# Alias vim => nvim
alias vim="nvim"
##
# grml-zsh-config
#
autoload -U colors && colors
zstyle ':vcs_info:*' enable git
zstyle ':vcs_info:*' check-for-changes true
zstyle ':vcs_info:*' unstagedstr '!'
zstyle ':vcs_info:*' stagedstr '+'
zstyle ':vcs_info:git*' formats "%{${fg[cyan]}%}[%{$reset_color%}%{${fg_bold[red]}%}%b%{$reset_color%}%{${fg_bold[yellow]}%}%m%u%c%{$reset_color%}%{${fg[cyan]}%}]%{$reset_color%} "
source /usr/bin/virtualenvwrapper.sh
function virtual_env_prompt () {
REPLY=${VIRTUAL_ENV+(${VIRTUAL_ENV:t}) }
}
grml_theme_add_token virtual-env -f virtual_env_prompt '%F{white}' '%f'
zstyle ':prompt:grml:left:setup' items rc change-root virtual-env path vcs percent
# Use vi mode
bindkey -v
# Setting fd as the default source for fzf
export FZF_DEFAULT_COMMAND='fd --type f --hidden --exclude .git/'
# For NVM
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
| true |
183c7d189d162e9d79f1e59b774420276599cb34
|
Shell
|
Rocket-Buddha/kubos-vagrant
|
/kubos-dev/script/pre-package.sh
|
UTF-8
| 715 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
set -x
#Cleaning up before distribution
#Clear out the cache
rm -rf /var/cache/*
#Making all empty storage zeros allows the final box to be better compressed
dd if=/dev/zero of=/EMPTY bs=1M
rm -f /EMPTY
sync
#Change the ssh key to the default vagrant insecure key so others can ssh in when they start this box locally
echo " Changing SSH keys"
echo "AuthorizedKeysFile %h/.ssh/authorized_keys" >> /etc/ssh/sshd_config
echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key" > /home/vagrant/.ssh/authorized_keysmod 0600 /home/vagrant/.ssh/authorized_keys
chmod 0600 /home/vagrant/.ssh/authorized_keys
chmod 0700 /home/vagrant/.ssh
chown -R vagrant /home/vagrant/.ssh
#clear terminal history
history -cw
| true |
8ea90a773d1b04331565026bd28091cf187c0b83
|
Shell
|
fehu/util-experimental
|
/shell/src/test/resources/echoEvery.sh
|
UTF-8
| 928 | 3.859375 | 4 |
[] |
no_license
|
#!/bin/sh
SLEEP=$1 ## Time to sleep between echo
TIMES=$2 ## How many times to echo
MSG=$3 ## The message to echo
APPEND_ITER=$4 ## Append iteration number to the message?
ECHO_ERR=$5 ## Echo in error stream?
ERROR_IN_FIN=$6 ## error on termination?
ECHO_MSG_FIN=$7 ## echo on termination?
output() {
msg="$@"
case $ECHO_ERR in
true) echo $msg >/dev/stderr ;;
false) echo $msg ;;
both) echo $msg
echo $msg >/dev/stderr ;;
esac
}
for ((i=1; i<=$TIMES; i++))
do
if [ $APPEND_ITER -eq 0 ]; then txt=$MSG
else txt=$MSG$i
fi
output $txt
if [ $i -ne $TIMES ]; then sleep $SLEEP
fi
done
if [ $ECHO_MSG_FIN -ne 0 ]; then
MSG_END=$8 ## Message to echo on termination
output $MSG_END
fi
if [ $ERROR_IN_FIN -ne 0 ]; then ((1/0)) # cause error
fi
| true |
5d243b3f2647e231b651d9fa3e91616c975893db
|
Shell
|
Sravan13/Shell-Script
|
/softwareInstall.sh
|
UTF-8
| 543 | 4 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
if [[ $# -eq 0 ]]
then
echo "Usage: $0 pkg1 pkg2 ......"
exit 1
fi
if [[ $(id -u) -ne 0 ]]
then
echo "Please run for root user or sudo privileges"
exit 2
fi
for each_pkg in $@
do
if which $each_pkg &> /dev/null
then
echo "Already package $each_pkg is installed"
else
echo "Installing $each_pkg ..............."
apt-get install $each_pkg -y &> /dev/null
if [[ $? -eq 0 ]]
then
echo "Package $each_pkg is installed successfully"
else
echo "Unable to install $each_pkg status : $?"
fi
fi
done
| true |
992745321092f6ad847cf6d9e6f624887bf42286
|
Shell
|
jonascfs/DamaShell
|
/machine.sh
|
UTF-8
| 4,868 | 3.171875 | 3 |
[] |
no_license
|
#!/bin/bash
source util.sh
source rules.sh
declare -a letras
declare -A numeros
letras=(x a b c d e f g h)
n="0"
for i in ${letras[@]}
do
numeros[$i]=$n
n=$(expr $n + 1)
done
function hasThreat (){
#$1 col, $2 linha, $3 jogador, $4 col_coordenada, $5 linha_coordenada
num_col_atual=${numeros[$1]}
if hasPosition ${letras[$(expr $num_col_atual + $4)]} $(expr $2 + $5) && [ $(abs $(getValue ${letras[$(expr $num_col_atual + $4)]} $(expr $2 + $5))) -eq $(otherPlayer $3) ] && isEmpty ${letras[$(expr $num_col_atual - $4)]} $(expr $2 - $5); then
return 0
else
return 1
fi
}
function getMoves(){
#$1 col, $2 linha, $3 jogador
num_col_atual=${numeros[$1]}
if isEmpty ${letras[$(expr $num_col_atual + 1)]} $(expr $2 + 1) && [ $3 -eq 2 ]; then
echo "${letras[$(expr $num_col_atual + 1)]} $(expr $2 + 1)"
fi
if isEmpty ${letras[$(expr $num_col_atual + 1)]} $(expr $2 - 1) && [ $3 -eq 1 ]; then
echo "${letras[$(expr $num_col_atual + 1)]} $(expr $2 - 1)"
fi
if isEmpty ${letras[$(expr $num_col_atual - 1)]} $(expr $2 + 1) && [ $3 -eq 2 ]; then
echo "${letras[$(expr $num_col_atual - 1)]} $(expr $2 + 1)"
fi
if isEmpty ${letras[$(expr $num_col_atual - 1)]} $(expr $2 - 1) && [ $3 -eq 1 ]; then
echo "${letras[$(expr $num_col_atual - 1)]} $(expr $2 - 1)"
fi
}
function isFood(){
#$1 coluna, $2 linha, $3 jogador
if hasThreat $1 $2 $3 1 1 ; then
return 0
elif hasThreat $1 $2 $3 1 -1 ; then
return 0
elif hasThreat $1 $2 $3 -1 1 ; then
return 0
elif hasThreat $1 $2 $3 -1 -1 ; then
return 0
else
return 1
fi
}
function getFoods(){
#$1 coluna, $2 linha, $3 jogador
num_col_atual=${numeros[$1]}
echo "1 ${letras[$(expr $num_col_atual - 2)]} $(expr $2 - 2) ${letras[$(expr $num_col_atual - 1)]} $(expr $2 - 1)" >>moves
if [ $(expr $num_col_atual - 2) -gt 0 ] && [ $(expr $num_col_atual - 2) -lt 9 ] && isEmpty ${letras[$(expr $num_col_atual - 2)]} $(expr $2 - 2) && [ $(abs $(getValue ${letras[$(expr $num_col_atual - 1)]} $(expr $2 - 1))) -eq $(otherPlayer $3) ]; then
echo "${letras[$(expr $num_col_atual - 2)]} $(expr $2 - 2) ${letras[$(expr $num_col_atual - 1)]} $(expr $2 - 1)"
fi
echo "2 ${letras[$(expr $num_col_atual - 2)]} $(expr $2 + 2) ${letras[$(expr $num_col_atual - 1)]} $(expr $2 + 1)" >> moves
if [ $(expr $num_col_atual - 2) -gt 0 ] && [ $(expr $num_col_atual - 2) -lt 9 ] && isEmpty ${letras[$(expr $num_col_atual - 2)]} $(expr $2 + 2) && [ $(abs $(getValue ${letras[$(expr $num_col_atual - 1)]} $(expr $2 + 1))) -eq $(otherPlayer $3) ]; then
echo "${letras[$(expr $num_col_atual - 2)]} $(expr $2 + 2) ${letras[$(expr $num_col_atual - 1)]} $(expr $2 + 1)"
fi
echo "3 ${letras[$(expr $num_col_atual + 2)]} $(expr $2 - 2) ${letras[$(expr $num_col_atual + 1)]} $(expr $2 - 1)" >> moves
if [ $(expr $num_col_atual + 2) -gt 0 ] && [ $(expr $num_col_atual + 2) -lt 9 ] && isEmpty ${letras[$(expr $num_col_atual + 2)]} $(expr $2 - 2) && [ $(abs $(getValue ${letras[$(expr $num_col_atual + 1)]} $(expr $2 - 1))) -eq $(otherPlayer $3) ]; then
echo "${letras[$(expr $num_col_atual + 2)]} $(expr $2 - 2) ${letras[$(expr $num_col_atual + 1)]} $(expr $2 - 1)"
fi
echo "4 ${letras[$(expr $num_col_atual + 2)]} $(expr $2 + 2) ${letras[$(expr $num_col_atual + 1)]} $(expr $2 + 1)" >> moves
if [ $(expr $num_col_atual + 2) -gt 0 ] && [ $(expr $num_col_atual + 2) -lt 9 ] && isEmpty ${letras[$(expr $num_col_atual + 2)]} $(expr $2 + 2) && [ $(abs $(getValue ${letras[$(expr $num_col_atual + 1)]} $(expr $2 + 1))) -eq $(otherPlayer $3) ]; then
echo "${letras[$(expr $num_col_atual + 2)]} $(expr $2 + 2) ${letras[$(expr $num_col_atual + 1)]} $(expr $2 + 1)"
fi
}
function machine(){
#$1 jogador
pieces=$(getPieces $1)
IFS=$'\n'
new_col=""
new_line=""
p_col=""
p_line=""
for piece in $pieces
do
col=$(echo "$piece" | cut -f1 -d' ')
line=$(echo "$piece" | cut -f2 -d' ')
moves=$(getFoods $col $line $1)
for move in $moves
do
new_col=$(echo "$move" | cut -f1 -d' ')
new_line=$(echo "$move" | cut -f2 -d' ')
p_col=$col
p_line=$line
col_comida=$(echo "$move" | cut -f3 -d' ')
line_comida=$(echo "$move" | cut -f4 -d' ')
if ! isFood $new_col $new_line $1 ; then
echo "$col $line $new_col $new_line $col_comida $line_comida"
return 0
fi
done
done
if [ -z "$p_col" ]; then
for piece in $pieces
do
col=$(echo "$piece" | cut -f1 -d' ')
line=$(echo "$piece" | cut -f2 -d' ')
moves=$(getMoves $col $line $1)
for move in $moves
do
new_col=$(echo "$move" | cut -f1 -d' ')
new_line=$(echo "$move" | cut -f2 -d' ')
p_line=$line
p_col=$col
if ! isFood $new_col $new_line $1 ; then
echo "$p_col $p_line $new_col $new_line"
return 0
fi
done
done
else
echo "$p_col $p_line $new_col $new_line $col_comida $line_comida"
return 0
fi
# echo "false"
}
machine 1
| true |
23331f579057062e7063862692a9d0278ec0f556
|
Shell
|
Ms-Shahid/Shell-Scripting-Linux
|
/Assignments/Linux_Scripts/Sequence_Problems/randaverage.sh
|
UTF-8
| 232 | 2.828125 | 3 |
[] |
no_license
|
#! /bin/bash -x
rand1=$((RANDOM%100));
rand2=$((RANDOM%100));
rand3=$((RANDOM%100));
rand4=$((RANDOM%100));
rand5=$((RANDOM%100));
sum=$(( rand1+rand2+rand3+rand4+rand5 ))
average=$(( sum/5 ))
echo SUM:$sum
echo Average:$average
| true |
786502336b80bde42f2aad07adcd56f2d2e53bb3
|
Shell
|
petronny/aur3-mirror
|
/fast-track-svn/PKGBUILD
|
UTF-8
| 1,238 | 2.703125 | 3 |
[] |
no_license
|
# Contributor: Jens Pranaitis <jens@chaox.net>
pkgname=fast-track-svn
pkgver=150
pkgrel=1
pkgdesc="Automated penetration suite"
arch=("i686")
url="https://www.securestate.com/Pages/Fast-Track.aspx"
license=('BSD')
makedepends=("subversion")
depends=("pymssql" "metasploit3" "python-pexpect" "python-clientform" "beautiful-soup" "psyco" "firefox"\
"pymills")
conflicts=("fast-track")
replaces=("fast-track")
provides=("fast-track")
_svntrunk=http://svn.thepentest.com/fasttrack
_svnmod=$pkgname
build() {
cd "$srcdir"
if [ -d $_svnmod/.svn ]; then
(cd $_svnmod && svn up -r $pkgver)
else
svn co $_svntrunk --config-dir ./ -r $pkgver $_svnmod
fi
msg "SVN checkout done or server timeout"
msg "Starting make..."
rm -rf "$srcdir/$_svnmod-build"
cp -r "$srcdir/$_svnmod" "$srcdir/$_svnmod-build"
cd "$srcdir/$_svnmod-build"
python setup.py install
mkdir -p "${pkgdir}"{/opt/fast-track/,/usr/share/licenses/fast-track,/usr/bin}
cp -r bin fast-track.py ftgui readme "${pkgdir}"/opt/fast-track/
install -m 644 readme/LICENSE "${pkgdir}"/usr/share/licenses/fast-track/
cat > "${pkgdir}"/usr/bin/ftgui << EOF
#!/bin/bash
cd /opt/fast-track
python ./ftgui \$@
cd \$OLDPWD
EOF
chmod 755 "${pkgdir}"/usr/bin/ftgui
}
| true |
49a3762385fb03394005bbacd0cb95c4e34b7158
|
Shell
|
zealoussnow/dotfiles
|
/setup.sh
|
UTF-8
| 694 | 2.5625 | 3 |
[] |
no_license
|
#!/bin/bash
# tmux
yum -y install https://centos7.iuscommunity.org/ius-release.rpm
yum install -y tmux2u
cp ./tmux.conf ~/.tmux.conf
# fonts
#yum install fontconfig -y
#git clone git@github.com:powerline/fonts.git --depth 1
#(cd fontd && ./install.sh)
# repo
rpm -Uvh http://mirror.ghettoforge.org/distributions/gf/gf-release-latest.gf.el7.noarch.rpm
# update vim
yum -y --enablerepo=gf-plus install vim-enhanced vim-minimal git ctags gcc python-devel
# install vim plug
curl -fLo ~/.vim/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
cp ./vimrc ~/.vimrc
vim +PlugInstall +qall
echo "dont forget to install / update Leaderf"
| true |
581b0d5226ba672f46a464ae3df519f779636454
|
Shell
|
simondmansson/scripts
|
/make_powershell_environment_script.sh
|
UTF-8
| 480 | 4.125 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
# Reads environment variables from a file and outputs a script for setting the variables as system variables in windows.
# Expected format is name value, one per line.
# Example command ./make_powershell_environment_script.sh args.txt my_file_name
set -o errexit
set -o nounset
ARGSFILE=$1
OUTPUT=$2
echo "\n Creating bash system variable file with name $OUTPUT from args in: $ARGSFILE"
awk '{print "setx " $1 " " $2 " /m"}' $ARGSFILE > $OUTPUT.ps1
exit 0
| true |
6f3bf63ff548716ded71cfb7bcfef6f2031f2c5e
|
Shell
|
chibitronics/jig-20-config-ltc
|
/bin/chibiscreen-interface.sh
|
UTF-8
| 1,501 | 3.59375 | 4 |
[] |
no_license
|
#!/bin/sh
uart=/dev/ttyAMA0
baud=9600
reset_pulse=23
mode=stopped
stty -F ${uart} ${baud} -icrnl -imaxbel -opost -onlcr -isig -icanon -echo
clear_screen_process() {
gpio -g mode ${reset_pulse} down
while true
do
gpio -g wfi ${reset_pulse} rising
if [ -e /tmp/test-running ]
then
continue
fi
echo '#SYN' > ${uart}
echo 'Ready.' > ${uart}
done
}
clear_screen_process &
rmdir /tmp/test-running 2> /dev/null
echo "HELLO bash-chibiscreen-logger 1.0"
while read line
do
if echo "${line}" | grep -iq '^start'
then
# On start, issue a 'SYN' to clear the screen
echo '#SYN' > ${uart}
echo 'Running...' > ${uart}
mkdir /tmp/test-running
elif echo "${line}" | grep -iq '^hello'
then
echo '#SYN' > ${uart}
echo "${line}" | awk '{ sub(/([^ ]+ +){1}/,"") }1' > ${uart}
echo "Ready to test" > ${uart}
rmdir /tmp/test-running
elif echo "${line}" | grep -iq '^fail'
then
# awk command from http://stackoverflow.com/questions/2626274/print-all-but-the-first-three-columns
echo "${line}" | awk '{ sub(/([^ ]+ +){2}/,"") }1' > ${uart}
elif echo "${line}" | grep -iq '^finish'
then
result=$(echo ${line} | awk '{print $3}')
if [ ${result} -ge 200 -a ${result} -lt 300 ]
then
echo 'Pass' > ${uart}
else
echo 'Fail' > ${uart}
fi
rmdir /tmp/test-running
elif echo "${line}" | grep -iq '^exit'
then
rmdir /tmp/test-running
exit 0
fi
done
| true |
cdb0f1fd6df4b96abc3f32a53f6d3d8d03b5aff7
|
Shell
|
abwst/AliYun_RAM_Security
|
/access.sh
|
UTF-8
| 413 | 2.859375 | 3 |
[] |
no_license
|
#!/bin/bash
echo "Active Keys details for user "$user
echo " "
aliyun ram ListAccessKeys --UserName $user | tr ',' '\n' | tr '[' '\n' | cut -d '"' -f 4
# | grep -o -P '(?<="UserName":").*(?=")')
echo "Last Login Date To The Console for user "$user
aliyun ram GetUser --UserName $user | tr ',' '\n' | tr '{' '\n' | tr '"' ' ' | grep LastLogin | cut -d 'T' -f 1
echo " "
echo "current date:"
date +%Y-%m-%d
| true |
30e6710865bf2c5398904a2bd7af7ba43edb22ad
|
Shell
|
mamachanko/lets-learn-some-dotnet
|
/demo-tdd.sh
|
UTF-8
| 5,130 | 3.75 | 4 |
[] |
no_license
|
#!/usr/bin/env bash
set -eo pipefail
# Call me with `-d -n` to run unattended.
source demo-magic.sh
source ~/.profile
DEMO_PROMPT="🐙 ❯ "
: "${DEMO_DIR:="$(mktemp -d "$(pwd)"/animal-api-demo-tdd-XXXX)"}"
if ! docker ps > /dev/null; then
echo "docker is not running"
exit 1;
fi
if [ "$( docker container inspect -f '{{.State.Running}}' animaldb )" == "true" ]; then
echo "animaldb is already running"
exit 1;
fi
if ! which dotnet > /dev/null; then
echo "dotnet is not on the PATH"
exit 1;
fi
if ! which rider > /dev/null; then
echo "rider is not on the PATH"
exit 1;
fi
clear
#########
# Intro #
#########
pe "# Let's build something with .NET! 👷 🚧"
pe "# How about an API that returns animals? 🐶 🐱"
pei ""
###############################
# Create the project skeleton #
###############################
# Create the project dir.
pe "# We need a place for the project"
p "mkdir animal-api"
p "cd animal-api"
cd "$DEMO_DIR"
pe "git init"
pei ""
# Create the .NET solution.
pe "# We begin by creating a \"solution\"."
pe "dotnet new solution --name AnimalApi"
pei ""
# Create the web API project.
pe "# Now we create a web API project ..."
pe "dotnet new webapi --name AnimalApi --no-https --framework netcoreapp3.1"
rm AnimalApi/WeatherForecast.cs
rm AnimalApi/Controllers/WeatherForecastController.cs
pe "# ... and add it to the solution."
pe "dotnet sln AnimalApi.sln add AnimalApi"
pe ""
# Run the app for the 1st time.
clear
pei "# We've got a skeleton app."
pe "# Let's run it 🏃"
pe "dotnet run --project AnimalApi"
pei ""
# Explore the skeleton within Jetbrains' Rider.
pe "# Let's explore the skeleton app."
rm -rf AnimalApi/bin AnimalApi/obj
pe "tree"
pe "rider AnimalApi.sln"
pei ""
# Commit project skeleton.
clear
pe "# This is our 1st commit"
pe "dotnet new gitignore"
pe "git add ."
pe "git commit --message \"Bootstrap Animal API\""
git clean -fxd
pei ""
####################################################
# Implement GET /api/animals with a fixed response #
####################################################
pe "# Now we want to return animals."
pei "# Our API should look like this:"
pei ""
pei "# GET /api/animals HTTP/1.1"
pei "#"
pei "# HTTP/1.1 200 OK"
pei "# Content-Type: application/json"
pei "#"
pei "# [ { 🐶 } , { 🐱 } ]"
pe ""
pe "# We need tests to drive this."
pe "dotnet test"
pei "# No tests 🤷."
pei "# An integration test suite is a good place to start."
pe "dotnet new xunit --name AnimalApiIntegrationTests"
pe "dotnet sln AnimalApi.sln add AnimalApiIntegrationTests"
pe "dotnet add AnimalApiIntegrationTests package Microsoft.AspNetCore.Mvc.Testing --version 3.1"
pe "dotnet add AnimalApiIntegrationTests package Quibble.Xunit"
pe "dotnet add AnimalApiIntegrationTests reference AnimalApi"
pe "dotnet test"
pei "# Tests 🙌."
pe ""
pe "# Let's express our requirements through an integration test."
pe ""
# notes:
# * create integration test inheriting from IClassFixture<WebApplicationFactory<Startup>>
# * create controller
# * create Animal {ID, Name}
# Manually verify that it works as expected.
clear
pe "# Let's see the app in action."
pe "dotnet run --project AnimalApi"
pe ""
# Commit GET /api/animals with fixed response.
pe "# This is our 2nd commit"
pe "git add ."
pe "git commit --message \"Return dog and cat on GET /api/animals\""
pe ""
##########################
# Return animals from DB #
##########################
pe "# Now that we can return static animals,"
pei "# how about reading them from a database?"
pe ""
pe "# We need a couple of things for that:"
pei "# * a unit test for our controller"
pei "# * a DbContext<Animal>"
pei "# * something that creates a database and loads animal on application startup"
pei "# * a running database server"
pei "# * a connection to the database server"
pe ""
pe "# We need a new project for our unit testing things"
pe "dotnet new xunit --name AnimalApiTests --framework netcoreapp3.1"
pe "dotnet add AnimalApiTests reference AnimalApi"
pe "dotnet sln AnimalApi.sln add AnimalApiTests"
pe "# ... and we need packages for speaking to Postgres"
pe "dotnet add AnimalApi package Microsoft.EntityFrameworkCore.InMemory"
pe "dotnet add AnimalApi package Npgsql.EntityFrameworkCore.PostgreSQL"
pe ""
pe "# Off to the IDE! 🚁"
pe ""
# notes:
# * create DbContext<Animal>
# * register with DI container
# * create IStartupFilter
# * register with DI container
# * create Animal {ID, Name}
pe "# Let's start a Postgres instance"
pe "docker run --detach --rm --name animaldb --publish 5432:5432 --env POSTGRES_PASSWORD=secret postgres:13.1-alpine"
pe "# Is it running?"
pe "docker run --rm -it --network host --env PGPASSWORD=secret --env PGUSER=postgres --env PGHOST=127.0.0.1 postgres:13.1-alpine psql --command '\l'"
pe ""
pe "# Off to the IDE again! 🚁"
pe "# Let's see the app in action."
pe "dotnet run --project AnimalApi"
pe ""
# Commit GET /api/animals from DB.
pe "# This is our 3rd commit"
pe "git add ."
pe "git commit --message \"Keep animals in DB\""
pe ""
############
# The end. #
############
pe "# We\'re done"
pei "# Thank you 🙇"
| true |
60454551cd016981868f055cb4087d1cd88c3a18
|
Shell
|
manoharsena/HackerRank_Solutions
|
/Linux Shell/Arrays in Bash/Filter an Array with Patterns.sh
|
UTF-8
| 210 | 3.078125 | 3 |
[
"MIT"
] |
permissive
|
# You are given a list of countries, each on a new line. Your task is to read them into an array and then filter out (remove) all the names containing the letter 'a' or 'A'.
arr=($(cat))
echo ${arr[@]/*[aA]*/}
| true |
641e75e5ee6f481d7f8a877dfff0799cc6aad312
|
Shell
|
WonderJet/NGINX-Install
|
/nginxinstall.sh
|
UTF-8
| 548 | 3 | 3 |
[] |
no_license
|
#!bin/bash
domain=$1
root="/var/www/example.com/html"
block="/etc/nginx/sites-available/example.com"
#Installs nginx
su root
sudo apt-get update
sudo apt-get install nginx
#Configures nginx
sudo mkdir -p $root
sudo tee $block > /dev/null <<EOF
server {
listen 3200;
root /var/www/example.com/html;
index index.html index.htm;
server_name example.com www.example.com;
location ~ \.php {
proxy_pass http://127.0.0.1:3400;
}
}
| true |
3ce4875bfe6dfb184ee912c0eb3b1cc5010b3654
|
Shell
|
weilaidb/PythonExample
|
/regularexpress/home/weilaidb/software/git-2.0.5/t/t3415-rebase-autosquash.sh
|
UTF-8
| 4,316 | 2.8125 | 3 |
[] |
no_license
|
#!/bin/sh
test_description='auto squash'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-rebase.sh
test_expect_success setup '
echo 0 >file0 &&
git add . &&
test_tick &&
git commit -m "initial commit" &&
echo 0 >file1 &&
echo 2 >file2 &&
git add . &&
test_tick &&
git commit -m "first commit" &&
git tag first-commit &&
echo 3 >file3 &&
git add . &&
test_tick &&
git commit -m "second commit" &&
git tag base
'
test_auto_fixup ()
test_expect_success 'auto fixup (option)' '
test_auto_fixup final-fixup-option --autosquash
'
test_expect_success 'auto fixup (config)' '
git config rebase.autosquash true &&
test_auto_fixup final-fixup-config-true &&
test_must_fail test_auto_fixup fixup-config-true-no --no-autosquash &&
git config rebase.autosquash false &&
test_must_fail test_auto_fixup final-fixup-config-false
'
test_auto_squash ()
test_expect_success 'auto squash (option)' '
test_auto_squash final-squash --autosquash
'
test_expect_success 'auto squash (config)' '
git config rebase.autosquash true &&
test_auto_squash final-squash-config-true &&
test_must_fail test_auto_squash squash-config-true-no --no-autosquash &&
git config rebase.autosquash false &&
test_must_fail test_auto_squash final-squash-config-false
'
test_expect_success 'misspelled auto squash' '
git reset --hard base &&
echo 1 >file1 &&
git add -u &&
test_tick &&
git commit -m "squash! forst" &&
git tag final-missquash &&
test_tick &&
git rebase --autosquash -i HEAD^^^ &&
git log --oneline >actual &&
test_line_count = 4 actual &&
git diff --exit-code final-missquash &&
test 0 = $(git rev-list final-missquash...HEAD | wc -l)
'
test_expect_success 'auto squash that matches 2 commits' '
git reset --hard base &&
echo 4 >file4 &&
git add file4 &&
test_tick &&
git commit -m "first new commit" &&
echo 1 >file1 &&
git add -u &&
test_tick &&
git commit -m "squash! first" &&
git tag final-multisquash &&
test_tick &&
git rebase --autosquash -i HEAD~4 &&
git log --oneline >actual &&
test_line_count = 4 actual &&
git diff --exit-code final-multisquash &&
test 1 = "$(git cat-file blob HEAD^^:file1)" &&
test 2 = $(git cat-file commit HEAD^^ | grep first | wc -l) &&
test 1 = $(git cat-file commit HEAD | grep first | wc -l)
'
test_expect_success 'auto squash that matches a commit after the squash' '
git reset --hard base &&
echo 1 >file1 &&
git add -u &&
test_tick &&
git commit -m "squash! third" &&
echo 4 >file4 &&
git add file4 &&
test_tick &&
git commit -m "third commit" &&
git tag final-presquash &&
test_tick &&
git rebase --autosquash -i HEAD~4 &&
git log --oneline >actual &&
test_line_count = 5 actual &&
git diff --exit-code final-presquash &&
test 0 = "$(git cat-file blob HEAD^^:file1)" &&
test 1 = "$(git cat-file blob HEAD^:file1)" &&
test 1 = $(git cat-file commit HEAD | grep third | wc -l) &&
test 1 = $(git cat-file commit HEAD^ | grep third | wc -l)
'
test_expect_success 'auto squash that matches a sha1' '
git reset --hard base &&
echo 1 >file1 &&
git add -u &&
test_tick &&
git commit -m "squash! $(git rev-parse --short HEAD^)" &&
git tag final-shasquash &&
test_tick &&
git rebase --autosquash -i HEAD^^^ &&
git log --oneline >actual &&
test_line_count = 3 actual &&
git diff --exit-code final-shasquash &&
test 1 = "$(git cat-file blob HEAD^:file1)" &&
test 1 = $(git cat-file commit HEAD^ | grep squash | wc -l)
'
test_expect_success 'auto squash that matches longer sha1' '
git reset --hard base &&
echo 1 >file1 &&
git add -u &&
test_tick &&
git commit -m "squash! $(git rev-parse --short=11 HEAD^)" &&
git tag final-longshasquash &&
test_tick &&
git rebase --autosquash -i HEAD^^^ &&
git log --oneline >actual &&
test_line_count = 3 actual &&
git diff --exit-code final-longshasquash &&
test 1 = "$(git cat-file blob HEAD^:file1)" &&
test 1 = $(git cat-file commit HEAD^ | grep squash | wc -l)
'
test_auto_commit_flags ()
test_expect_success 'use commit --fixup' '
test_auto_commit_flags fixup 1
'
test_expect_success 'use commit --squash' '
test_auto_commit_flags squash 2
'
test_auto_fixup_fixup ()
test_expect_success 'fixup! fixup!' '
test_auto_fixup_fixup fixup fixup
'
test_expect_success 'fixup! squash!' '
test_auto_fixup_fixup fixup squash
'
test_expect_success 'squash! squash!' '
test_auto_fixup_fixup squash squash
'
test_expect_success 'squash! fixup!' '
test_auto_fixup_fixup squash fixup
'
test_done
| true |
442ad4032aa91425793f5be2b4777ca2d9c763a3
|
Shell
|
maimaiworks/xcode-appicon
|
/makeAppIcon.sh
|
UTF-8
| 6,281 | 3.25 | 3 |
[
"MIT"
] |
permissive
|
#/bin/sh
cd `dirname $0`
# output directory
outdir="AppIcon.appiconset"
mkdir -p $outdir
if [ -e "icon1024.png" ]; then
BASE_FILE="icon1024.png"
else
echo "Cannot find a AppIcon file.\nYou will need an icon1024.png of 1024x1024px."
exit
fi
echo "BASE_ICON: "
echo ${BASE_FILE}
#----------------------------------------------------------------------
# 20px
if [ -e "${outdir}/icon-20.png" ]; then
echo "icon-20.png already exists. Skip the process."
else
sips -Z 20 ${BASE_FILE} --out ${outdir}/icon-20.png
fi
if [ -e "${outdir}/icon-20@2x.png" ]; then
echo "icon-20@2x.png already exists. Skip the process."
else
sips -Z 40 ${BASE_FILE} --out ${outdir}/icon-20@2x.png
fi
if [ -e "${outdir}/icon-20@3x.png" ]; then
echo "icon-20@3x.png already exists. Skip the process."
else
sips -Z 60 ${BASE_FILE} --out ${outdir}/icon-20@3x.png
fi
#----------------------------------------------------------------------
# 29px
if [ -e "${outdir}/icon-29.png" ]; then
echo "icon-29.png already exists. Skip the process."
else
sips -Z 29 ${BASE_FILE} --out ${outdir}/icon-29.png
fi
if [ -e "${outdir}/icon-29@2x.png" ]; then
echo "icon-29@2x.png already exists. Skip the process."
else
sips -Z 58 ${BASE_FILE} --out ${outdir}/icon-29@2x.png
fi
if [ -e "${outdir}/icon-29@3x.png" ]; then
echo "icon-29@3x.png already exists. Skip the process."
else
sips -Z 87 ${BASE_FILE} --out ${outdir}/icon-29@3x.png
fi
#----------------------------------------------------------------------
# 40px
if [ -e "${outdir}/icon-40.png" ]; then
echo "icon-40.png already exists. Skip the process."
else
sips -Z 40 ${BASE_FILE} --out ${outdir}/icon-40.png
fi
if [ -e "${outdir}/icon-40@2x.png" ]; then
echo "icon-40@2x.png already exists. Skip the process."
else
sips -Z 80 ${BASE_FILE} --out ${outdir}/icon-40@2x.png
fi
# iPhone 6s, iPhone 6(@2x) 推奨
if [ -e "${outdir}/icon-40@3x.png" ]; then
echo "icon-40@3x.png already exists. Skip the process."
else
sips -Z 120 ${BASE_FILE} --out ${outdir}/icon-40@3x.png
fi
#----------------------------------------------------------------------
# 57px
if [ -e "${outdir}/icon-57.png" ]; then
echo "icon-57.png already exists. Skip the process."
else
sips -Z 57 ${BASE_FILE} --out ${outdir}/icon57.png
fi
if [ -e "${outdir}/icon57@2x.png" ]; then
echo "icon57@2x.png already exists. Skip the process."
else
sips -Z 114 ${BASE_FILE} --out ${outdir}/icon57@2x.png
fi
#----------------------------------------------------------------------
# 60px
if [ -e "${outdir}/icon-60@2x.png" ]; then
echo "icon-60@2x.png already exists. Skip the process."
else
sips -Z 120 ${BASE_FILE} --out ${outdir}/icon-60@2x.png
fi
if [ -e "${outdir}/icon-60@3x.png" ]; then
echo "icon-60@3x.png already exists. Skip the process."
else
sips -Z 180 ${BASE_FILE} --out ${outdir}/icon-60@3x.png
fi
#----------------------------------------------------------------------
# 76px
if [ -e "${outdir}/icon-76.png" ]; then
echo "icon-76.png already exists. Skip the process."
else
sips -Z 76 ${BASE_FILE} --out ${outdir}/icon-76.png
fi
if [ -e "${outdir}/icon-76@2x.png" ]; then
echo "icon-76@2x.png already exists. Skip the process."
else
sips -Z 152 ${BASE_FILE} --out ${outdir}/icon-76@2x.png
fi
#----------------------------------------------------------------------
# 83.5px
if [ -e "${outdir}/icon-83.5@2x.png" ]; then
echo "icon-83.5@2x.png already exists. Skip the process."
else
sips -Z 167 ${BASE_FILE} --out ${outdir}/icon-83.5@2x.png
fi
#----------------------------------------------------------------------
# Contents.json作成
cat <<- END_JSON > ${outdir}/Contents.json
{
"images" : [
{
"filename" : "icon-20@2x.png",
"idiom" : "iphone",
"scale" : "2x",
"size" : "20x20"
},
{
"filename" : "icon-20@3x.png",
"idiom" : "iphone",
"scale" : "3x",
"size" : "20x20"
},
{
"filename" : "icon-29.png",
"idiom" : "iphone",
"scale" : "1x",
"size" : "29x29"
},
{
"filename" : "icon-29@2x.png",
"idiom" : "iphone",
"scale" : "2x",
"size" : "29x29"
},
{
"filename" : "icon-29@3x.png",
"idiom" : "iphone",
"scale" : "3x",
"size" : "29x29"
},
{
"filename" : "icon-40@2x.png",
"idiom" : "iphone",
"scale" : "2x",
"size" : "40x40"
},
{
"filename" : "icon-40@3x.png",
"idiom" : "iphone",
"scale" : "3x",
"size" : "40x40"
},
{
"filename" : "icon57.png",
"idiom" : "iphone",
"scale" : "1x",
"size" : "57x57"
},
{
"filename" : "icon57@2x.png",
"idiom" : "iphone",
"scale" : "2x",
"size" : "57x57"
},
{
"filename" : "icon-60@2x.png",
"idiom" : "iphone",
"scale" : "2x",
"size" : "60x60"
},
{
"filename" : "icon-60@3x.png",
"idiom" : "iphone",
"scale" : "3x",
"size" : "60x60"
},
{
"filename" : "icon-20.png",
"idiom" : "ipad",
"scale" : "1x",
"size" : "20x20"
},
{
"filename" : "icon-20@2x.png",
"idiom" : "ipad",
"scale" : "2x",
"size" : "20x20"
},
{
"filename" : "icon-29.png",
"idiom" : "ipad",
"scale" : "1x",
"size" : "29x29"
},
{
"filename" : "icon-29@2x.png",
"idiom" : "ipad",
"scale" : "2x",
"size" : "29x29"
},
{
"filename" : "icon-40.png",
"idiom" : "ipad",
"scale" : "1x",
"size" : "40x40"
},
{
"filename" : "icon-40@2x.png",
"idiom" : "ipad",
"scale" : "2x",
"size" : "40x40"
},
{
"filename" : "icon-76.png",
"idiom" : "ipad",
"scale" : "1x",
"size" : "76x76"
},
{
"filename" : "icon-76@2x.png",
"idiom" : "ipad",
"scale" : "2x",
"size" : "76x76"
},
{
"filename" : "icon-83.5@2x.png",
"idiom" : "ipad",
"scale" : "2x",
"size" : "83.5x83.5"
},
{
"filename" : "icon1024.png",
"idiom" : "ios-marketing",
"scale" : "1x",
"size" : "1024x1024"
}
],
"info" : {
"author" : "xcode",
"version" : 1
}
}
END_JSON
cp icon1024.png ${outdir}/icon1024.png
| true |
4872eced7585a32ddbc9613f4279aecf4c99615e
|
Shell
|
bealeaj1214/personalGitAliases
|
/bin/gateway-sync.sh
|
UTF-8
| 395 | 2.921875 | 3 |
[] |
no_license
|
#!/bin/bash
ORG_BRANCH=$(git status -b -s | awk ' /##/ {print $NF;} ' )
# hillbilly style script for sync svn gateway
for BRANCH in $( git branch | awk ' $NF~/^active\// {print $NF} ' | xargs); do
git checkout $BRANCH && git svn rebase
done
git svn fetch
git checkout ${ORG_BRANCH}
# Improvements
# fast fail if there unstored changes
# Collect which branches had errors and report
| true |
f638eee2ff62dcb2d0b8e5752b8d94c5c37ea37e
|
Shell
|
NafisatulAzmi/SVD
|
/user-add-pptp.sh
|
UTF-8
| 781 | 2.890625 | 3 |
[] |
no_license
|
#!/bin/bash
#Script Created By Pa'an Finest
echo -e "\e[032;1m---------- Membuat Akun\e[0m \e[034;1mPPTP VPN ----------\e[0m"
read -p "Isikan username baru: " username
read -p "Isikan password akun [$username]: " password
echo "$username pptpd $password *" >> /etc/ppp/chap-secrets
echo ""
echo "-------------------------------------------------"
echo "Informasi Detail Akun PPTP VPN:"
echo "-------------------------------------------------"
echo "Host/IP: $MYIP"
echo "Username: $username"
echo "Password: $password"
echo "-------------------------------------------------"
echo -e "\e[032;1mScript Created\e[0m \e[034;1mBy Pa'an Finest\e[032;1m"
echo ""
echo -e " \e[032;1m[ Facebook : Pa'an\e[0m | \e[031;1mInstagram : @paan_finest\e[0m | \e[034;1mWA : +1 (202) 852-2868 ]\e[0m"
| true |
2014115a394f5642d0527843a4bca37a44ba85f8
|
Shell
|
hasithachamod/centos-web-panel-to-vestacp
|
/sk_cwp_importer.sh
|
UTF-8
| 5,803 | 3.234375 | 3 |
[] |
no_license
|
#!/bin/bash
# CWP to VESTACP
# By Maksim Usmanov - Maks Skamasle
# Beta 0.2 mar 2017
# Import account from Centos Web Panel to VESTACP
# This need a ssh conection, ssh port, and mysql password, I asume you have setup SSH keys
# This also need remote server with grant access to your IP
# SSH is needit because CWP not have good backup system
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. http://www.gnu.org/licenses/
###############################
# SSH CONNECTION REMOTE SERVER
sk_host="REMOTE_HOST_IP"
sk_port="22"
# MYSQL CONNECTION
# Use sk_host to connect
# User not needed asume you use root.
#
#Mysql root Password
sk_mysql='MYSQLrootPASS'
#Check connections
function check_ssh() {
# this will check ssh conection
echo " "
}
function sk_check_mysql () {
# this will check remote mysql connection
echo " "
}
function sk_grant_all () {
# This will do grant privileges to local IP
echo " "
}
####
if [ ! -d /root/tmp ]; then
mkdir /root/sk_tmp
fi
sk_tmp=/root/sk_tmp
sk_cwp_user=$1
tput setaf 2
echo "Create user in vestacp"
tput sgr0
# crear usuario
sk_pass=$(date +%s | sha256sum | base64 | head -c 7)
/usr/local/vesta/bin/v-add-user $sk_cwp_user $sk_pass administrator@example.net default $sk_cwp_user $sk_cwp_user
tput setaf 2
echo "Start With Domains"
tput sgr0
function deltmp() {
rm -rf $sk_tmp
}
function mysql_query() {
mysql -h$sk_host -p$sk_mysql -s -e "$1" 2>/dev/null
}
function sk_get_domains() {
mysql_query "SELECT domain, path FROM root_cwp.domains WHERE user='$sk_cwp_user';"
}
function sk_get_sub_dom() {
mysql_query "SELECT domain, subdomain, path FROM root_cwp.subdomains WHERE user='$sk_cwp_user';"
}
function sk_get_dbs() {
mysql_query "SHOW DATABASES" |grep ${sk_cwp_user}_
}
function sk_dump_it() {
mysqldump -h$sk_host -p$sk_mysql $1 > $1.sql
}
function sk_get_md5(){
query="SHOW GRANTS FOR '$1'@'localhost'"
md5=$(mysql_query "$query" 2>/dev/null)
md5=$(echo "$md5" |grep 'PASSWORD' |tr ' ' '\n' |tail -n1 |cut -f 2 -d \')
}
function sk_restore_imap_pass () {
# 1 account, 2 remote pass, 3 domain
if [ -d /etc/exim ]; then
EXIM=/etc/exim
else
EXIM=/etc/exim4
fi
sk_actual_pass=$(grep -w $1 ${EXIM}/domains/$3/passwd |tr '}' ' ' | tr ':' ' ' | cut -d " " -f 3)
sk_orig_pass=$(echo $2 | cut -d'}' -f2)
replace "${sk_actual_pass}" "${sk_orig_pass}" -- ${EXIM}/domains/$3/passwd > /dev/null
echo "Password for $1@$3 was restored"
#################
# fix vesta needed
}
function sk_import_mail () {
# U ser, P assword, M ail D ir, D o M ain
mysql_query "SELECT username, password, maildir, domain FROM postfix.mailbox where domain='$1'" | while read u p md dm
do
sk_mail_acc=$(echo "$u" | cut -d "@" -f 1)
echo " Add account $sk_mail_acc@$sk_cwp_user"
v-add-mail-account ${sk_cwp_user} $1 $sk_mail_acc temppass
echo "Start copy mails for $sk_mail_acc@$sk_cwp_user"
rsync -av -e "ssh -p $sk_port" root@$sk_host:/var/vmail/${md}/ /home/${sk_cwp_user}/mail/${md} 2>&1 |
while read sk_file_dm; do
sk_sync=$((sk_sync+1))
echo -en "-- $sk_sync mails restored\r"
done
echo " "
chown -R ${sk_cwp_user} /home/${sk_cwp_user}/mail/${md}
sk_restore_imap_pass $sk_mail_acc $p $dm
done
}
function check_mail () {
mail_domain=$1
echo "Check mails accounts for $mail_domain"
is_mail=$(mysql_query "SELECT EXISTS(SELECT * FROM postfix.mailbox WHERE domain='$mail_domain')")
if [ "$is_mail" -ge "1" ]; then
echo "Mail accounts found for $mail_domain"
sk_import_mail $mail_domain
else
echo "No mail accounts found for $mail_domain"
fi
}
sk_get_domains | while read sk_domain sk_path
do
tput setaf 2
echo "Add $sk_domain"
tput sgr0
v-add-domain $sk_cwp_user $sk_domain
echo "Start copy files for $sk_domain"
rsync -av -e "ssh -p $sk_port" root@$sk_host:$sk_path/ /home/${sk_cwp_user}/web/${sk_domain}/public_html 2>&1 |
while read sk_file_dm; do
sk_sync=$((sk_sync+1))
echo -en "-- $sk_sync restored files\r"
done
echo " "
chown $sk_cwp_user:$sk_cwp_user -R /home/${sk_cwp_user}/web/${sk_domain}/public_html
chmod 751 /home/${sk_cwp_user}/web/${sk_domain}/public_html
check_mail $sk_domain
done
tput setaf 2
echo "Get Subdomains"
tput sgr0
sk_get_sub_dom | while read sk_domain sk_sub sk_path
do
tput setaf 2
echo "Add ${sk_sub}.${sk_domain}"
tput sgr0
v-add-domain $sk_cwp_user ${sk_sub}.${sk_domain}
echo "Start copy files for ${sk_sub}.${sk_domain}"
rsync -av -e "ssh -p $sk_port" root@$sk_host:$sk_path/ /home/${sk_cwp_user}/web/${sk_sub}.${sk_domain}/public_html 2>&1 |
while read sk_file_dm; do
sk_sync=$((sk_sync+1))
echo -en "-- $sk_sync restored files\r"
done
echo " "
chown $sk_cwp_user:$sk_cwp_user -R /home/${sk_cwp_user}/web/${sk_sub}.${sk_domain}/public_html
chmod 751 /home/${sk_cwp_user}/web/${sk_sub}.${sk_domain}/public_html
done
tput setaf 2
echo "Start whit Databases"
tput sgr0
sk_get_dbs | while read sk_db
do
echo "Get database $sk_db"
sk_dump_it $sk_db
sk_get_md5 $sk_db
echo "DB='$sk_db' DBUSER='$sk_db' MD5='$md5' HOST='localhost' TYPE='mysql' CHARSET='UTF8' U_DISK='0' SUSPENDED='no' TIME='00000' DATE='$DATE'" >> /usr/local/vesta/data/users/$sk_cwp_user/db.conf
v-rebuild-databases $sk_cwp_user
echo "Restore database $sk_db"
mysql $sk_db < $sk_db.sql
rm -f $sk_db.sql
echo "Restored $sk_db database whit user $sk_db"
done
deltmp
| true |
a5aeecc21bc5b7709cafa5e7a63a402abd61092b
|
Shell
|
tiagoor/openstack-on-coreos
|
/docker-files/network/entrypoint.sh
|
UTF-8
| 7,401 | 2.765625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
/hostsctl.sh insert
controller=`awk '/controller/ {print $1}' /tmp/hosts`
hostname=`hostname`
echo "$MYIPADDR $hostname" >> /tmp/hosts
echo "$MYIPADDR $hostname" >> /etc/hosts
# Neutron SETUP
sed -i "s/^#net.ipv4.ip_forward.*/net.ipv4.ip_forward=1/" /etc/sysctl.conf
sed -i "s/^#net.ipv4.conf.all.rp_filter.*/net.ipv4.conf.all.rp_filter=0/" /etc/sysctl.conf
sed -i "s/^#net.ipv4.conf.default.rp_filter.*/net.ipv4.conf.default.rp_filter=0/" /etc/sysctl.conf
sysctl -p
## /etc/neutron/neutron.conf modify
NEUTRON_CONF=/etc/neutron/neutron.conf
sed -i "s/^#rpc_backend.*/rpc_backend=rabbit/" $NEUTRON_CONF
sed -i "s/^#rabbit_host=localhost.*/rabbit_host=$controller/" $NEUTRON_CONF
sed -i "s/^#rabbit_password.*/rabbit_password=$RABBIT_PASS/" $NEUTRON_CONF
sed -i "s/^# auth_strategy.*/auth_strategy = keystone/" $NEUTRON_CONF
sed -i "s/^auth_host.*/auth_uri = http:\/\/$controller:5000\/v2.0/" $NEUTRON_CONF
sed -i "s/^auth_port.*/identity_uri = http:\/\/$controller:35357/" $NEUTRON_CONF
sed -i "s/^admin_tenant_name.*/admin_tenant_name = $ADMIN_TENANT_NAME/" $NEUTRON_CONF
sed -i "s/^admin_user.*/admin_user = neutron/" $NEUTRON_CONF
sed -i "s/^admin_password.*/admin_password = $NEUTRON_PASS/" $NEUTRON_CONF
sed -i "s/^# service_plugins.*/service_plugins = router/" $NEUTRON_CONF
sed -i "s/^# allow_overlapping_ips.*/allow_overlapping_ips = True/" $NEUTRON_CONF
sed -i "s/# agent_down_time = 75.*/# agent_down_time = 75/" $NEUTRON_CONF
sed -i "s/# report_interval = 30.*/# report_interval = 5/" $NEUTRON_CONF
########################### DVR / L3 HA Setup #################################
# DVR Setup
if [ $HA_MODE == "DVR" ]; then
sed -i "s/^# router_distributed.*/router_distributed = True/" $NEUTRON_CONF
fi
# L3 HA Setup
if [ $HA_MODE == "L3_HA" ]; then
sed -i "s/^# router_distributed.*/router_distributed = False/" $NEUTRON_CONF
sed -i "s/^# l3_ha = False.*/l3_ha = True/" $NEUTRON_CONF
sed -i "s/^# max_l3_agents_per_router.*/max_l3_agents_per_router = 0/" $NEUTRON_CONF
fi
# L3 Agent Failover
sed -i "s/^# allow_automatic_l3agent_failover.*/allow_automatic_l3agent_failover = True/" $NEUTRON_CONF
########################### DVR / L3 HA Setup #################################
## Edit the /etc/neutron/plugins/ml2/ml2_conf.ini
ML2_CONF=/etc/neutron/plugins/ml2/ml2_conf.ini
sed -i "s/^# type_drivers.*/type_drivers = flat,vxlan/" $ML2_CONF
sed -i "s/^# tenant_network_types.*/tenant_network_types = vxlan/" $ML2_CONF
sed -i "s/^# mechanism_drivers.*/mechanism_drivers = openvswitch,l2population/" $ML2_CONF
sed -i "s/^# vni_ranges.*/vni_ranges = 1:1000/" $ML2_CONF
sed -i "s/^# vxlan_group.*/vxlan_group = 239.1.1.1/" $ML2_CONF
sed -i "s/^# enable_security_group.*/enable_security_group = True/" $ML2_CONF
sed -i "s/^# enable_ipset.*/enable_ipset = True/" $ML2_CONF
echo "firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver" \
>> $ML2_CONF
sed -i "s/^# flat_networks.*/flat_networks = external/" $ML2_CONF
echo "" >> $ML2_CONF
echo "[ovs]" >> $ML2_CONF
echo "local_ip = $TUNNEL_IP" >> $ML2_CONF
echo "enable_tunneling = True" >> $ML2_CONF
echo "bridge_mappings = external:br-ex" >> $ML2_CONF
echo "" >> $ML2_CONF
echo "[agent]" >> $ML2_CONF
echo "l2population = True" >> $ML2_CONF
echo "tunnel_types = vxlan" >> $ML2_CONF
########################### DVR / L3 HA Setup #################################
# DVR Setup
if [ $HA_MODE == "DVR" ]; then
echo "enable_distributed_routing = True" >> $ML2_CONF
fi
# L3 HA Setup
if [ $HA_MODE == "L3_HA" ]; then
echo "enable_distributed_routing = False" >> $ML2_CONF
fi
########################### DVR / L3 HA Setup #################################
echo "arp_responder = True" >> $ML2_CONF
## Edit the /etc/neutron/l3_agent.ini
L3_AGENT=/etc/neutron/l3_agent.ini
sed -i "s/^# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver.*/interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver/" $L3_AGENT
sed -i "s/^# use_namespaces.*/use_namespaces = True/" $L3_AGENT
sed -i "s/^# external_network_bridge.*/external_network_bridge = br-ex/" $L3_AGENT
sed -i "s/^# router_delete_namespaces = False.*/router_delete_namespaces = True/" $L3_AGENT
########################### DVR / L3 HA Setup #################################
# DVR Setup
if [ $HA_MODE == "DVR" ]; then
sed -i "s/^# agent_mode.*/agent_mode = dvr_snat/" $L3_AGENT
fi
# L3 Agent Setup
if [ $HA_MODE == "L3_HA" ]; then
sed -i "s/^# agent_mode.*/agent_mode = legacy/" $L3_AGENT
fi
########################### DVR / L3 HA Setup #################################
## Edit the /etc/neutron/dhcp_agent.ini
DHCP_AGENT=/etc/neutron/dhcp_agent.ini
sed -i "s/^# interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver.*/interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver/" $DHCP_AGENT
sed -i "s/^# dhcp_driver.*/dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq/" $DHCP_AGENT
sed -i "s/^# use_namespaces.*/use_namespaces = True/" $DHCP_AGENT
sed -i "s/^# dhcp_delete_namespaces = False.*/dhcp_delete_namespaces = True/" $DHCP_AGENT
# dnsmasq
sed -i "s/^# dnsmasq_config_file.*/dnsmasq_config_file = \/etc\/neutron\/dnsmasq-neutron.conf/" $DHCP_AGENT
echo "dhcp-option-force=26,1454" > /etc/neutron/dnsmasq-neutron.conf
# Metadata Support on isolated network
#sed -i "s/^# enable_isolated_metadata.*/enable_isolated_metadata = True/" $DHCP_AGENT
#sed -i "s/^# enable_metadata_network.*/enable_metadata_network = True/" $DHCP_AGENT
## Edit the /etc/neutron/metadata_agent.ini
METADATA_AGENT=/etc/neutron/metadata_agent.ini
sed -i "s/^auth_url.*/auth_url = http:\/\/$controller:5000\/v2.0/" $METADATA_AGENT
sed -i "s/^auth_region.*/auth_region = $REGION_NAME/" $METADATA_AGENT
sed -i "s/^admin_tenant_name.*/admin_tenant_name = $ADMIN_TENANT_NAME/" $METADATA_AGENT
sed -i "s/^admin_user.*/admin_user = neutron/" $METADATA_AGENT
sed -i "s/^admin_password.*/admin_password = $NEUTRON_PASS/" $METADATA_AGENT
sed -i "s/^# nova_metadata_ip.*/nova_metadata_ip = $controller/" $METADATA_AGENT
#sed -i "s/^# nova_metadata_port.*/nova_metadata_port = 8775/" $METADATA_AGENT
sed -i "s/^# metadata_proxy_shared_secret.*/metadata_proxy_shared_secret = METADATA_SECRET/" $METADATA_AGENT
modprobe gre
modprobe openvswitch
service openvswitch-switch start
ifconfig br-ex
if [ $? != 0 ]; then
echo "Making br-ex bridge by OVS command"
ovs-vsctl add-br br-ex
fi
if [ "$INTERFACE_NAME" ]; then
echo "Add port to br-ex bridge : $INTERFACE_NAME........"
ovs-vsctl add-port br-ex $INTERFACE_NAME
fi
su -s /bin/sh -c "neutron-metadata-agent --config-file=/etc/neutron/neutron.conf \
--config-file=/etc/neutron/metadata_agent.ini \
--log-file=/var/log/neutron/metadata-agent.log &" neutron
su -s /bin/sh -c "neutron-dhcp-agent --config-file=/etc/neutron/neutron.conf \
--config-file=/etc/neutron/dhcp_agent.ini \
--log-file=/var/log/neutron/dhcp-agent.log &" neutron
su -s /bin/sh -c "neutron-openvswitch-agent --config-file=/etc/neutron/neutron.conf \
--config-file=/etc/neutron/plugins/ml2/ml2_conf.ini \
--log-file=/var/log/neutron/openvswitch-agent.log &" neutron
su -s /bin/sh -c "neutron-l3-agent --config-file=/etc/neutron/neutron.conf \
--config-file=/etc/neutron/l3_agent.ini \
--config-file=/etc/neutron/fwaas_driver.ini \
--log-file=/var/log/neutron/l3-agent.log &" neutron
/hostsctl.sh update
| true |
319f904b5a0f1e490aea06abc63edaf1088612f8
|
Shell
|
StudioDotfiles/DotRepo
|
/wmii/rbar/30_df
|
UTF-8
| 250 | 2.8125 | 3 |
[] |
no_license
|
#!/bin/zsh -f
NAME=$0:t
wmiir remove /rbar/$NAME &>/dev/null
echo | wmiir create /rbar/$NAME
while (wmiir read /ctl &>/dev/null) {
wmiir xwrite /rbar/$NAME $(df | awk 'BEGIN { printf("hd "); } /^\/dev\// { printf("%s=%s ", $6, $5) }')
sleep 1m
}
| true |
3a1f94e2d394164b5f88e966bbffa1c491bc7c8a
|
Shell
|
Joovvhan/espnet
|
/tools/installers/install_morfessor.sh
|
UTF-8
| 355 | 2.90625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2017 Atlas Guide (Author : Lucas Jo)
#
# Apache 2.0
#
# Modified by Hoon Chung 2020 (ETRI)
set -euo pipefail
echo "#### installing morfessor"
dirname=morfessor
rm -rf "${dirname}"
mkdir -p ./"${dirname}"
git clone https://github.com/aalto-speech/morfessor.git "${dirname}"
echo >&2 "installation of MORFESSOR finished successfully"
| true |
72e3df3b94216508d21ed506cb192d9c109f3fb7
|
Shell
|
jaimegag/pcf-pipelines
|
/scripts/gen_ssl_certs.sh
|
UTF-8
| 1,296 | 3.203125 | 3 |
[] |
no_license
|
#!/bin/bash
set -eu
PKS_DOMAIN=$1
SSL_FILE=sslconf-${PKS_DOMAIN}.conf
#Generate SSL Config with SANs
if [ ! -f $SSL_FILE ]; then
cat > $SSL_FILE <<EOM
[req]
distinguished_name = req_distinguished_name
req_extensions = v3_req
[req_distinguished_name]
#countryName = Country Name (2 letter code)
#countryName_default = US
#stateOrProvinceName = State or Province Name (full name)
#stateOrProvinceName_default = TX
#localityName = Locality Name (eg, city)
#localityName_default = Frisco
#organizationalUnitName = Organizational Unit Name (eg, section)
#organizationalUnitName_default = Pivotal Labs
#commonName = Pivotal
#commonName_max = 64
[ v3_req ]
# Extensions to add to a certificate request
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = *.${PKS_DOMAIN}
EOM
fi
openssl genrsa -out ${PKS_DOMAIN}.key 2048
openssl req -new -out ${PKS_DOMAIN}.csr -subj "/CN=*.${PKS_DOMAIN}/O=Pivotal/C=US" -key ${PKS_DOMAIN}.key -config ${SSL_FILE} -sha256
openssl req -text -noout -in ${PKS_DOMAIN}.csr -sha256
openssl x509 -req -days 3650 -in ${PKS_DOMAIN}.csr -signkey ${PKS_DOMAIN}.key -out ${PKS_DOMAIN}.crt -extensions v3_req -extfile ${SSL_FILE} -sha256
openssl x509 -in ${PKS_DOMAIN}.crt -text -noout
| true |
058b6466248d50532c5bdfa661c54a102fdb8408
|
Shell
|
Senui/hicomb_benchmarks
|
/neighborhood-density/run.sh
|
UTF-8
| 767 | 2.625 | 3 |
[] |
no_license
|
#!/bin/bash
# With a total population of 2,000,000 agents:
# max-bound = 600 -> ~3.5 neighbors per agent
# max-bound = 500 -> ~6 neighbors per agent
# max-bound = 400 -> ~11.5 neighbors per agent
# max-bound = 350 -> ~17 neighbors per agent
# max-bound = 300 -> ~27 neighbors per agent
# max-bound = 275 -> ~35 neighbors per agent
# max-bound = 250 -> ~47 neighbors per agent
POPULATION=2000000
ITERATIONS=5
for MB in 600 500 400 350 300 275 250; do
echo "Running benchmark with max-bound = $MB"
./build/neighborhood-density -m $MB -p $POPULATION -i $ITERATIONS >> cpu.csv
done
for MB in 600 500 400 350 300 275 250; do
echo "Running benchmark with max-bound = $MB"
./build/neighborhood-density -m $MB -p $POPULATION -i $ITERATIONS --cuda >> gpu.csv
done
| true |
eafa3f35445afda87ab1a135b2447c9babeb5cdb
|
Shell
|
autoscatto/retroshare
|
/branches/v0.5-OpenPGP/build_scripts/Ubuntu_src/make.sh
|
UTF-8
| 979 | 3.34375 | 3 |
[] |
no_license
|
#!/bin/sh
echo This script will
echo - remove the directory retroshare-0.5/
echo - remove existing sources packages in the current directory
echo - build a new source package from the svn
echo - rebuild the source package for the karmic i386 arch.
echo
echo attempting to get svn revision number...
svn=`svn info | grep 'Revision:' | cut -d\ -f2`
echo Revision number is $svn.
echo Type ^C to abort, or enter to continue
read tmp
svn=4153
rm -rf ./retroshare-0.5
# ./makeSourcePackage.sh
#for dist in maverick natty; do
for dist in karmic lucid maverick natty; do
sudo PBUILDFOLDER=/var/cache/pbuilder pbuilder-dist "$dist" build retroshare_0.5.3-0."$svn"~"$dist".dsc
cp /var/cache/pbuilder/"$dist"_result/retroshare_0.5.3-0."$svn"~"$dist"_amd64.deb .
sudo PBUILDFOLDER=/var/cache/pbuilder pbuilder-dist "$dist" i386 build retroshare_0.5.3-0."$svn"~"$dist".dsc
cp /var/cache/pbuilder/"$dist"-i386_result/retroshare_0.5.3-0."$svn"~"$dist"_i386.deb .
done
| true |
e65c8a0aaca0b23937d360661ad3d220ac4ecd8f
|
Shell
|
shenfeng/rockredis
|
/scripts/deps.sh
|
UTF-8
| 536 | 3.21875 | 3 |
[] |
no_license
|
#! /bin/bash
set -e
set -u
is_linux() { [ $OSTYPE = 'linux-gnu' ]; }
function full_path() {
if is_linux; then
readlink -f ../../../..
else
greadlink -f ../../../..
fi
}
DIR=$(full_path)
export GOPATH=$DIR
cd "$DIR"
mkdir -p deps && cd deps
if [ -d rocksdb ]; then
(cd rocksdb && git pull)
else
git clone git@github.com:facebook/rocksdb.git
fi
(cd rocksdb && make -j shared_lib)
cd $DIR
CGO_CFLAGS="-I$DIR/deps/rocksdb/include" CGO_LDFLAGS="-L$DIR/deps/rocksdb/" go get github.com/tecbot/gorocksdb
| true |
12e442b4965b43714c4062d82186fb820df894a6
|
Shell
|
vphoenix1972/mygenerator
|
/yeoman/generators/app/templates/deploy/docker/appImage/app.sh
|
UTF-8
| 189 | 2.640625 | 3 |
[] |
no_license
|
#!/bin/bash
# Parse command line arguments
action=$1
shift
name="app"
workdir="/srv/app"
runas="app"
command="dotnet <%= csprojName %>.Web.dll"
. /srv/scripts/service_runner.sh $action
| true |
7b217168900fcf68305ed1336d2f24098c55beee
|
Shell
|
NguyenThiUtQuyen7th2/KhoLinux
|
/bai8.sh
|
UTF-8
| 207 | 2.671875 | 3 |
[] |
no_license
|
#!/bin/sh
echo "Nhap tap tin muon dem tu: "
read f
echo "Dem so tu cua tap tin $f"
{
n=0
while read line
do
for wd in $line
do
n=$(($n+1))
done
done
echo "So tu cua tap tin $f la: $n"
}<$f
exit
| true |
b3a33c6c87d3d06880cda91ac553aec5626b3f90
|
Shell
|
c0uy/Firewall-IPTables-Linux-
|
/firewall
|
UTF-8
| 2,889 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
### BEGIN INIT INFO
# Provides: firewall
# Required-Start: $local_fs $network
# Required-Stop: $local_fs $network
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# X-Interactive: false
# Short-Description: Filter UDP/TCP PORTS + VPN
### END INIT INFO
#---- VARIABLES ----#
IPT=$(which iptables)
IPS=$(which ipset)
VPN_INTERFACE=enp1s0
VPN_NET=X.X.X.X/X # CIDR
VPN_PROTO=X # udp/tcp
VPN_PORT=1194 # PORT NUMBER
#---- FUNCTION ----#
reset_iptables() {
echo "Reseting IPTABLES ..."
# Wipe
$IPT -F
$IPT -X
echo "IPTables wiped"
# On bloque tout
$IPT -P INPUT DROP
$IPT -P FORWARD DROP
$IPT -P OUTPUT ACCEPT
echo "DROP on all filters"
# Ne pas casser les connexions établies
$IPT -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
#$IPT -A OUTPUT -m state --state NEW,RELATED,ESTABLISHED,INVALID -j ACCEPT
echo "Prevents broken established connections"
# Autorise le loopback (127.0.0.1)
$IPT -A INPUT -i lo -j ACCEPT
echo "Loopback allowed"
# ICMP (le ping)
$IPT -A INPUT -p icmp -j ACCEPT
echo "Ping allowed"
security_rules
}
security_rules() {
echo "Adding security rules"
$IPT -A INPUT -p udp --sport 65535 --dport 1024:65353 -j DROP
}
open_port() {
local PROTOCOL=$1
local PORT=$2
CMD='$IPT -A INPUT -p $PROTOCOL --dport $PORT -j ACCEPT'
eval $CMD
}
#---- RUNNING ----#
# Reset IPTables i/o=drop & security rules
reset_iptables
echo -e "\n[SERVICES]"
#---- USUAL PORTS ----#
# SSH
open_port tcp 22
# DNS
open_port tcp 53
open_port udp 53
# HTTP + HTTPS
open_port tcp 80
open_port udp 80
# FTP
#open_port tcp 20
#open_port tcp 21
# imodprobe ip_conntrack_ftp # optional command for OVH servers
#---- VPN ----#
$IPT -A INPUT -i $VPN_INTERFACE -m state --state NEW -p $VPN_PROTO --dport $VPN_PORT -j ACCEPT
# Allow TUN interface connections to OpenVPN server
$IPT -A INPUT -i tun+ -j ACCEPT
# Allow TUN interface connections to be forwarded through other interfaces
$IPT -A FORWARD -i tun+ -j ACCEPT
$IPT -A FORWARD -i tun+ -o $VPN_INTERFACE -m state --state RELATED,ESTABLISHED -j ACCEPT
$IPT -A FORWARD -i $VPN_INTERFACE -o tun+ -m state --state RELATED,ESTABLISHED -j ACCEPT
# NAT the VPN client traffic to the internet
$IPT -t nat -A POSTROUTING -s $VPN_NET -o $VPN_INTERFACE -j MASQUERADE
#---- ADDITIONAL PORTS ----#
# Monit
#$IPT -A INPUT -i tun -p tcp --dport 5000 -j ACCEPT
# GIT
#open_port tcp 9418
# Subsonic
#open_port tcp 4040
# Deluge
#open_port tcp 8112
# Syncthing
#open_port tcp 8384
#open_port tcp 22000
#open_port udp 21025
#open_port udp 21026
# STEAM
#open_port udp 27000:27015
#open_port tcp 27020:27039
#open_port udp 27015:27050
#open_port tcp 27017
#open_port udp 26901
#$IPT -A INPUT -p udp -m udp --sport 27000:27030 --dport 1025:65355 -j ACCEPT
#$IPT -A INPUT -p udp -m udp --sport 4380 --dport 1025:65355 -j ACCEPT
#open_port INPUT tcp 27015
| true |
b985515c7e9028d92c9e2bf59dc8f62add7d0eed
|
Shell
|
ai7dnn/kubernetes-the-hard-way-centos
|
/vagrant/centos/vagrant/setup-hosts.sh
|
UTF-8
| 388 | 2.90625 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
IFNAME=$1
ADDRESS="$(ip -4 addr show $IFNAME | grep "inet" | head -1 |awk '{print $2}' | cut -d/ -f1)"
sed -e "s/^.*${HOSTNAME}.*/${ADDRESS} ${HOSTNAME} ${HOSTNAME}.local/" -i /etc/hosts
# Update /etc/hosts about other hosts
cat >> /etc/hosts <<EOF
192.168.25.11 master-1
192.168.25.12 master-2
192.168.25.21 worker-1
192.168.25.22 worker-2
192.168.25.30 lb
EOF
| true |
d788b31c8207d55d833be9409902d74f63b070cc
|
Shell
|
imahajanshubham/Miscellaneous
|
/Ellipses/StringUtils_TestDrivenScript.sh
|
UTF-8
| 3,252 | 3.53125 | 4 |
[] |
no_license
|
# Test Driven Development - Shell Script
function startScript()
{
# Test Name #
executeTests "TestForEllipses"
read
}
function checkIfEqual()
{
if [ "$1" == "$2" ]
then
echo "Pass"
else
echo "Fail"
fi
}
function printTestCaseResult()
{
echo "$1 Test $2 : $3"
}
function executeTests()
{
testnumber=1
# Compile and generate Executable
gcc StringUtilTest.c StringUtils.c -o StringUtils.exe
#-------------------------- TEST CASE 1 ----------------------------------#
expected="Indonesia"
output=$(./StringUtils.exe -maxlength 12 -lastnchars 3 "Indonesia")
result=$(checkIfEqual $expected $output)
printTestCaseResult $1 $testnumber $result
#-------------------------------------------------------------------------#
((testnumber++))
#-------------------------- TEST CASE 2 -------------------------------------#
expected="Indonesia123"
output=$(./StringUtils.exe -maxlength 12 -lastnchars 3 "Indonesia123")
result=$(checkIfEqual $expected $output)
printTestCaseResult $1 $testnumber $result
#----------------------------------------------------------------------------#
((testnumber++))
#-------------------------- TEST CASE 3 --------------------------------------#
expected="Indone...234"
output=$(./StringUtils.exe -maxlength 12 -lastnchars 3 "Indonesia1234")
result=$(checkIfEqual $expected $output)
printTestCaseResult $1 $testnumber $result
#-----------------------------------------------------------------------------#
((testnumber++))
#-------------------------- TEST CASE 4 ---------------------------------#
expected="Ind..."
output=$(./StringUtils.exe -maxlength 6 -lastnchars 0 "Indonesia")
result=$(checkIfEqual $expected $output)
printTestCaseResult $1 $testnumber $result
#------------------------------------------------------------------------#
((testnumber++))
#-------------------------- TEST CASE 5 ---------------------------------#
expected="...nesia"
output=$(./StringUtils.exe -maxlength 8 -lastnchars 7 "Indonesia")
result=$(checkIfEqual $expected $output)
printTestCaseResult $1 $testnumber $result
#------------------------------------------------------------------------#
((testnumber++))
#-------------------------- TEST CASE 6 ---------------------------------#
expected="Error!"
output=$(./StringUtils.exe -maxlength 8 -lastnchars 9 "Indonesia")
result=$(checkIfEqual $expected $output)
printTestCaseResult $1 $testnumber $result
#------------------------------------------------------------------------#
((testnumber++))
#-------------------------- TEST CASE 7 ----------------------------------#
expected="Error!"
output=$(./StringUtils.exe -maxlength -8 -lastnchars 3 "Indonesia")
result=$(checkIfEqual $expected $output)
printTestCaseResult $1 $testnumber $result
#-------------------------------------------------------------------------#
((testnumber++))
#-------------------------- TEST CASE 8 ----------------------------------#
expected="Error!"
output=$(./StringUtils.exe -maxlength 8 -lastnchars -3 "Indonesia")
result=$(checkIfEqual $expected $output)
printTestCaseResult $1 $testnumber $result
#-------------------------------------------------------------------------#
}
startScript
| true |
037450a3fa0330a601ccc6135318f9624ce78888
|
Shell
|
boopathi/babili-online
|
/scripts/copy-to-dist.sh
|
UTF-8
| 141 | 2.515625 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
mkdir -p $DIR/../dist/static
rsync -rva $DIR/../assets/ $DIR/../dist/
| true |
8a19fc676b5c987a56fbc1e9fdfb1a1c47566899
|
Shell
|
HegemonsHerald/runcoms
|
/.scripts/cycle
|
UTF-8
| 711 | 4.125 | 4 |
[] |
no_license
|
#!/bin/dash
helptext="
cycle -- repeat a command
USAGE:
cycle [count] ...
cycle [-h|--help]
... is the command to repeat.
DESCRIPTION:
Infinitely repeats the provided command, like a while-true-loop.
If a count is specified, cycle will repeat the command count-many times,
like a for-loop.
OPTIONS:
count Number of repetitions, count <= 0 is equivalent to infinity.
-h, --help Print this helptext.
DEPENDENCIES: isInt"
[ "$1" = "--help" -o "$1" = "-h" ] && { echo "$helptext"; exit; }
isInt "$1" && { count=$1; shift 1; }
while true; do
"$@"
# counting mode
if [ -n "$count" ]; then
count=$((count - 1))
[ $count -eq 0 ] && break
fi
done
| true |
394d9f63696ce2d25bdaa4ea8c7363575b8d1e51
|
Shell
|
tripattern/howtos
|
/openssl/feistyDuckComRootCa/scripts/testCA.sh
|
UTF-8
| 773 | 3.53125 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
# First run: cleanAndBuild.sh
TEST_DIR="test"
CERT="client"
cd ..
mkdir "${TEST_DIR}"
echo "Build a file to sign..."
echo "Hello, World!" > "${TEST_DIR}/sign.txt"
echo "Sign file using CERT private key..."
openssl dgst -sha256 -sign "${CERT}/private/${CERT}.key" -out "${TEST_DIR}/sign.txt.sha256" "${TEST_DIR}/sign.txt"
echo "Verify File Signing"
openssl dgst -sha256 -verify <(openssl x509 -in "${CERT}/${CERT}.crt" -pubkey -noout) -signature "${TEST_DIR}/sign.txt.sha256" "${TEST_DIR}/sign.txt"
# Sign and Unsign using the private and public key
#echo "plop" > "helloworld.txt"
#openssl rsautl -sign -in helloworld.txt -inkey aa.pem -out sig
#openssl rsautl -verify -in sig -inkey aa.pem
#> plop
echo "Clean up after test..."
#rm -rf "${TEST_DIR}"
| true |
eada68b38f00bcebd89a44895e9215747e1bd831
|
Shell
|
idevol/bpmonline-dataservice-connectors
|
/bash/bpmonline.sh
|
UTF-8
| 1,507 | 3.15625 | 3 |
[] |
no_license
|
#!/bin/bash
# https://github.com/idevol/bpmonline-dataservice-connectors
# bpm'online URL
BPMONLINE_URL='https://myproduct.bpmonline.com'
# bpm'online credentials
USER_NAME='Supervisor'
USER_PASSWORD='secret'
# bpm'online DataService URI's web service (API)
LOGIN_URI='/ServiceModel/AuthService.svc/Login'
SELECT_URI='/0/dataservice/json/SyncReply/SelectQuery'
INSERT_URI='/0/dataservice/json/reply/InsertQuery'
UPDATE_URI='/0/dataservice/json/reply/UpdateQuery'
# Files to be work
COOKIE_FILE_NAME='bpmonline.session.cookie'
QUERY_FILE_JSON='contact.query.json'
# bpm'online session token, leave empty
BPMCSRF=''
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
echo -e "\n${RED}Login${NC}\n"
curl \
--request POST \
--cookie-jar "$COOKIE_FILE_NAME" \
--header "Content-Type: application/json" \
--data "{\"UserName\":\"$USER_NAME\",\"UserPassword\":\"$USER_PASSWORD\"}" \
"$BPMONLINE_URL$LOGIN_URI"
# Get bpm'online session token on cookie jar file
while IFS='' read -r line || [[ -n "$line" ]]; do
if echo "$line" | grep -q "BPMCSRF"; then
echo -e "\n${GREEN}Found BPMCSRF${NC}"
BPMCSRF=$(echo "$line" | awk '{print $7}')
fi
done < "$COOKIE_FILE_NAME"
echo -e "${GREEN}BPMCSRF: $BPMCSRF${NC}\n"
echo -e "${RED}SelectQuery${NC}\n"
curl \
--request POST \
--cookie @"$COOKIE_FILE_NAME" \
--header "BPMCSRF: $BPMCSRF" \
--header "Content-Type: application/json" \
--data @"$QUERY_FILE_JSON" \
"$BPMONLINE_URL$SELECT_URI"
echo -e "\n"
| true |
36f59e0fd83eb47d59c554ec47fc06b7949806d4
|
Shell
|
tellkiApp/TellkiAgent_LinuxDiskUsage
|
/DiskUsage_Linux.sh
|
UTF-8
| 5,888 | 3.65625 | 4 |
[] |
no_license
|
###################################################################################################################
## This script was developed by Guberni and is part of Tellki monitoring solution ##
## ##
## December, 2014 ##
## ##
## Version 1.0 ##
## ##
## DESCRIPTION: Monitor file system space utilization ##
## ##
## SYNTAX: ./DiskUsage_Linux.sh <METRIC_STATE> ##
## ##
## EXAMPLE: ./DiskUsage_Linux.sh "1,1,0" ##
## ##
## ############ ##
## ## README ## ##
## ############ ##
## ##
## This script is used combined with runremote.sh script, but you can use as standalone. ##
## ##
## runremote.sh - executes input script locally or at a remove server, depending on the LOCAL parameter. ##
## ##
## SYNTAX: sh "runremote.sh" <HOST> <METRIC_STATE> <USER_NAME> <PASS_WORD> <TEMP_DIR> <SSH_KEY> <LOCAL> ##
## ##
## EXAMPLE: (LOCAL) sh "runremote.sh" "DiskUsage_Linux.sh" "192.168.1.1" "1,1,0" "" "" "" "" "1" ##
## (REMOTE) sh "runremote.sh" "DiskUsage_Linux.sh" "192.168.1.1" "1,1,0" "user" "pass" "/tmp" "null" "0"##
## ##
## HOST - hostname or ip address where script will be executed. ##
## METRIC_STATE - is generated internally by Tellki and its only used by Tellki default monitors. ##
## 1 - metric is on ; 0 - metric is off ##
## USER_NAME - user name required to connect to remote host. Empty ("") for local monitoring. ##
## PASS_WORD - password required to connect to remote host. Empty ("") for local monitoring. ##
## TEMP_DIR - (remote monitoring only): directory on remote host to copy scripts before being executed. ##
## SSH_KEY - private ssh key to connect to remote host. Empty ("null") if password is used. ##
## LOCAL - 1: local monitoring / 0: remote monitoring ##
###################################################################################################################
#METRIC_ID
UsedSpaceID="40:Used Space:4"
FreeSpaceID="24:Free Space:4"
PerFreeSpaceID="11:% Used Space:6"
#INPUTS
UsedSpaceID_on=`echo $1 | awk -F',' '{print $3}'`
FreeSpaceID_on=`echo $1 | awk -F',' '{print $2}'`
PerFreeSpaceID_on=`echo $1 | awk -F',' '{print $1}'`
if [ -f /.dockerinit ]; then
# Docker Enviroment
for fs in `cat /host/etc/mtab | grep -v "#" | grep -E " ext| ntfs| nfs| vfat| fat| xfs| zfs| smbfs| reiserfs| hfs| hfsplus| jfs| btrfs| overlay" | awk '{print $2}'`
do
if [ -d $fs ] || [ -f $fs ]
then
fs_info=`df -k $fs | grep "%"| grep -v Filesystem | grep ${fs}$ |sed -e 's/%//g' | awk '{ print $NF,$(NF-1),int($(NF-2)/1024),int($(NF-3)/1024) }'`
if [ $PerFreeSpaceID_on -eq 1 ]
then
PerFreeSpace=`echo $fs_info | awk '{print $2}'`
if [ "$PerFreeSpace" = "" ]
then
#Unable to collect metrics
continue
fi
fi
if [ $FreeSpaceID_on -eq 1 ]
then
FreeSpace=`echo $fs_info | awk '{print $3}'`
if [ "$FreeSpace" = "" ]
then
#Unable to collect metrics
continue
fi
fi
if [ $UsedSpaceID_on -eq 1 ]
then
UsedSpace=`echo $fs_info | awk '{print $4}'`
if [ "$UsedSpace" = "" ]
then
#Unable to collect metrics
continue
fi
fi
# Send Metrics
if [ $PerFreeSpaceID_on -eq 1 ]
then
echo "$PerFreeSpaceID|$PerFreeSpace|$fs|"
fi
if [ $FreeSpaceID_on -eq 1 ]
then
echo "$FreeSpaceID|$FreeSpace|$fs|"
fi
if [ $UsedSpaceID_on -eq 1 ]
then
echo "$UsedSpaceID|$UsedSpace|$fs|"
fi
fi
done
else
# Server environment
for fs in `cat /etc/mtab | grep -v "#" | grep -E " ext| ntfs| nfs| vfat| fat| xfs| zfs| smbfs| reiserfs| hfs| hfsplus| jfs| btrfs" | awk '{print $2}'`
do
if [ -d $fs ]
then
fs_info=`df -k $fs | grep "%"| grep -v Filesystem | grep ${fs}$ |sed -e 's/%//g' | awk '{ print $NF,$(NF-1),int($(NF-2)/1024),int($(NF-3)/1024) }'`
if [ $PerFreeSpaceID_on -eq 1 ]
then
PerFreeSpace=`echo $fs_info | awk '{print $2}'`
if [ "$PerFreeSpace" = "" ]
then
#Unable to collect metrics
continue
fi
fi
if [ $FreeSpaceID_on -eq 1 ]
then
FreeSpace=`echo $fs_info | awk '{print $3}'`
if [ "$FreeSpace" = "" ]
then
#Unable to collect metrics
continue
fi
fi
if [ $UsedSpaceID_on -eq 1 ]
then
UsedSpace=`echo $fs_info | awk '{print $4}'`
if [ "$UsedSpace" = "" ]
then
#Unable to collect metrics
continue
fi
fi
# Send Metrics
if [ $PerFreeSpaceID_on -eq 1 ]
then
echo "$PerFreeSpaceID|$PerFreeSpace|$fs|"
fi
if [ $FreeSpaceID_on -eq 1 ]
then
echo "$FreeSpaceID|$FreeSpace|$fs|"
fi
if [ $UsedSpaceID_on -eq 1 ]
then
echo "$UsedSpaceID|$UsedSpace|$fs|"
fi
fi
done
fi
| true |
3c0202ca41983f9f401fa80d39382c5082e5591c
|
Shell
|
peacechu/ConvertEAC3
|
/start.sh
|
UTF-8
| 582 | 3.296875 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/bash
#Give message when starting the container
printf "\n \n \n ------------------------Starting container ------------------------ \n \n \n"
# Configure user nobody to match unRAID's settings
#export DEBIAN_FRONTEND="noninteractive"
usermod -u 99 nobody
usermod -g 100 nobody
usermod -d /home nobody
chown -R nobody:users /home
#chsh -s /bin/bash nobody
cp /converteac3.sh /config/converteac3.sh
chown -R nobody:users /config
echo "[Info] Starting script"
bash /config/converteac3.sh
#su - nobody -c /config/converteac3.sh
echo "Stopping Container, script finished.."
| true |
179e5c95b3e5b1eba18bbab6046f22140ce77013
|
Shell
|
lineCode/hello_imgui
|
/tools/ios/cmake_ios_sdl.sh
|
UTF-8
| 591 | 3.125 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd $THIS_DIR/../.. || exit 1
if [ -z "$CMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM" ]; then
echo "Please set env variable CMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM"
exit 1
fi
build_dir="build_ios_sdl"
if [ ! -d $build_dir ]; then
mkdir $build_dir
fi
cd $build_dir
cmake .. \
-GXcode \
-DCMAKE_TOOLCHAIN_FILE=../hello_imgui_cmake/ios-cmake/ios.toolchain.cmake \
-DHELLOIMGUI_USE_SDL_OPENGL3=ON \
-DPLATFORM=OS64 \
-DENABLE_BITCODE=OFF \
.. \
|| exit 1
open HelloImGui.xcodeproj
| true |
c23932e740b49006900659af8c85d3070e4914ca
|
Shell
|
issei1022/dotfiles
|
/.bash_profile
|
UTF-8
| 686 | 2.78125 | 3 |
[] |
no_license
|
#export PATH="/usr/local/bin:$PATH:/usr/local/sbin"
export PATH="$PATH:/bin:/usr/bin:/usr/local/bin:/usr/local/sbin"
PATH=$PATH:~/bin
export PATH
#lsの色変更
alias ls='ls -G'
export LSCOLORS=gxfxcxdxbxegedabagacad
export CLICOLOR=1
#export PATH
source .bashrc
export PATH="$HOME/.rbenv/bin:$PATH"
eval "$(rbenv init -)"
if [[ -s ~/.nvm/nvm.sh ]];
then source ~/.nvm/nvm.sh
fi
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
#export PYENV
echo 'export PYENV_ROOT="$HOME/.pyenv"' >> ~/.bash_profile
echo 'export PATH="PYENV_ROOT/bin:$PATH"' >> ~/.bash_profil
echo 'eval "$(pyenv init -)"' >> ~/.bash_profile
| true |
49c348e6568e97b401b23001b61fb56e5f994c75
|
Shell
|
TANGO-Project/cryptango
|
/examples/inner_product/innergen/run_local.sh
|
UTF-8
| 555 | 3.28125 | 3 |
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Define usage function
usage(){
echo "Usage: $0 <num inputs> <degree of polynomial to compute> <bit length of plaintexts> <effective bit length of plaintexts when enciphered> <number of output files to generate>"
exit 1
}
# Call usage() function if parameters not supplied
[[ $# -lt 4 ]] && usage
if [ ! -d "input_$2_$3_$4" ]; then
mkdir input_$2_$3_$4
fi
runcompss -d --lang=c --project=project_localhost.xml master/innergen $1 $2 $3 $4 $5 secrets_$2_$3_$4 parameters_$2_$3_$4 input_$2_$3_$4/input result_$2_$3_$4 timings_$2_$3_$4.csv
| true |
8f9b780064eefd78bfacd6f4534fbd36482e6fd0
|
Shell
|
dancarter/zshrc
|
/zshrc
|
UTF-8
| 2,394 | 2.953125 | 3 |
[] |
no_license
|
# Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
ZSH_THEME="robbyrussell"
alias ls='ls --color=auto'
alias ll='ls -l --color=auto'
alias la='ls -A --color=auto'
alias l='ls -CF --color=auto'
alias diff='colordiff'
alias -s gz='tar -xzvf'
alias -s bz2='tar -xjvf'
alias -s zip='unzip'
alias -s txt=$EDITOR
alias -s html=$BROWSER
#Aliases hub to git so you can use all the features added by hub
#with the git command
eval "$(hub alias -s)"
#Create a new folder and cd into it
#ex.) mcd new_folder
function mcd() {
mkdir -p "$1" && cd "$1";
}
#Migrate, rollback, migrate, and prepare the test db with the command migrate
function migrate() {
rake db:migrate && rake db:rollback && rake db:migrate && rake db:test:prepare;
}
#Create a new rails project, switch to its directory, initialize a git repo,
#and create the first commit for it with your new project
#ex.) rails_new MyAwesomeProject
function rails_new() {
rails new $1 -d postgresql && cd $1 && git init && git add . && git commit -m 'Initial commit';
}
#Same as rails new, but also creates a github repo and pushes your new project
#to github. The string after the project name will be the github description
#ex.) rails_newgh WeMail "New shared email service!"
function rails_newgh() {
rails new $1 -d postgresql && cd $1 && git init && git add . && git commit -m 'Initial commit' && git create -d $2 && git push -u origin master;
}
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Comment this out to disable weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
# COMPLETION_WAITING_DOTS="true"
DISABLE_CORRECTION="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git github pip python)
source $ZSH/oh-my-zsh.sh
PATH=/usr/local/bin:$PATH:$HOME/.rvm/bin
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm" # Load RVM into a shell session *as a function*
| true |
4cc15e7d2840b3017115605ab7b459d6b3cad02a
|
Shell
|
VijayEluri/Ferrous-Wheel
|
/start_ferrous_wheel.sh
|
UTF-8
| 619 | 2.796875 | 3 |
[] |
no_license
|
#!/bin/bash
# Move to the ferrous wheel directory
cd /home/ferrous/sketchbook/ferrous_wheel/
# Start the screensaver blocker (otherwise it blanks after 2 hours)
./stop_screensaver.sh &
# Launch the synthesizer (zynaddsubfx)
zynaddsubfx -r 22050 -b 1024 -o 1024 -A -L "/usr/share/zynaddsubfx/banks/SynthPiano/0004-Fantasy Bell.xiz"&
# Launch the ferrous wheel program
cd application.linux/
./ferrous_wheel &
# Sleep for a bit to let everything start up
sleep 10
# Then route the midi output from the ferrous wheel to the synth
aconnect `aconnect -ol | grep 'Virtual Raw MIDI' | cut -d\ -f 2 | cut -d\: -f 1` 128
| true |
07345068d6fa5bdf9aeeb3435ff81fb851d847d6
|
Shell
|
mrchapp/ci-job-configs
|
/armnn-ci-build/builders-32bit.sh
|
UTF-8
| 4,948 | 3.046875 | 3 |
[] |
no_license
|
#!/bin/bash
set -ex
sudo apt -q=2 update
sudo apt-get -q=2 install -y --no-install-recommends git
sudo apt-get -q=2 install -y --no-install-recommends scons
sudo apt-get -q=2 install -y --no-install-recommends gcc-arm-linux-gnueabihf
sudo apt-get -q=2 install -y --no-install-recommends g++-arm-linux-gnueabihf
sudo apt-get -q=2 install -y --no-install-recommends curl
sudo apt-get -q=2 install -y --no-install-recommends autoconf
sudo apt-get -q=2 install -y --no-install-recommends libtool
sudo apt-get -q=2 install -y --no-install-recommends cmake
sudo apt -q=2 install -y --no-install-recommends build-essential cmake libpthread-stubs0-dev
sudo apt -q=2 install -y --no-install-recommends python-pip python3-pip virtualenv python-dev python3-dev xxd
# Set local configuration
git config --global user.email "ci_notify@linaro.org"
git config --global user.name "Linaro CI"
git clone --depth 1 "http://review.mlplatform.org/ml/ComputeLibrary"
git clone https://github.com/Arm-software/armnn
wget https://dl.bintray.com/boostorg/release/1.64.0/source/boost_1_64_0.tar.bz2 && tar xf boost_1_64_0.tar.bz2
git clone --depth 1 -b v3.5.0 https://github.com/google/protobuf.git
git clone --depth 1 https://github.com/tensorflow/tensorflow.git --branch r2.0 --single-branch
wget -O flatbuffers-1.10.0.tar.gz https://github.com/google/flatbuffers/archive/v1.10.0.tar.gz && tar xf flatbuffers-1.10.0.tar.gz
if [ -n "$GERRIT_PROJECT" ] && [ $GERRIT_EVENT_TYPE == "patchset-created" ]; then
cd armnn
GERRIT_URL="http://${GERRIT_HOST}/${GERRIT_PROJECT}"
if git pull ${GERRIT_URL} ${GERRIT_REFSPEC} | grep -q "Automatic merge failed"; then
git reset --hard
echo "Retrying to apply the patch with: git fetch && git checkout."
if ! { git fetch ${GERRIT_URL} ${GERRIT_REFSPEC} | git checkout FETCH_HEAD; }; then
git reset --hard
echo "Error: *** Error patch merge failed"
exit 1
fi
fi
fi
cd ${WORKSPACE}/ComputeLibrary
scons extra_cxx_flags="-fPIC" Werror=0 debug=0 asserts=0 neon=1 opencl=0 os=linux arch=armv7a examples=1
cd ${WORKSPACE}/boost_1_64_0
./bootstrap.sh
rm project-config.jam || true
wget --no-check-certificate http://people.linaro.org/~theodore.grey/project-config.jam
./b2 \
--build-dir=${WORKSPACE}/boost_1_64_0/build toolset=gcc link=static cxxflags=-fPIC \
--with-filesystem \
--with-test \
--with-log \
--with-program_options install --prefix=${WORKSPACE}/boost
cd $WORKSPACE/protobuf
git submodule update --init --recursive
./autogen.sh
./configure --prefix=$WORKSPACE/protobuf-host
make -j$(nproc)
make install
make clean
./autogen.sh
./configure --prefix=$WORKSPACE/protobuf-arm32 --host=arm-linux CC=arm-linux-gnueabihf-gcc CXX=arm-linux-gnueabihf-g++ --with-protoc=$WORKSPACE/protobuf-host/bin/protoc
make -j$(nproc)
make install
cd $WORKSPACE/tensorflow
../armnn/scripts/generate_tensorflow_protobuf.sh ../tensorflow-protobuf ../protobuf-host
cd $WORKSPACE/flatbuffers-1.10.0
mkdir build && cd build
cmake .. \
-DFLATBUFFERS_BUILD_FLATC=1 \
-DCMAKE_INSTALL_PREFIX:PATH=$WORKSPACE/flatbuffers
make all install
cd $WORKSPACE/flatbuffers-1.10.0
mkdir build-arm32 && cd build-arm32
cmake .. -DCMAKE_C_COMPILER=arm-linux-gnueabihf-gcc \
-DCMAKE_CXX_COMPILER=arm-linux-gnueabihf-g++ \
-DFLATBUFFERS_BUILD_FLATC=1 \
-DCMAKE_INSTALL_PREFIX:PATH=$WORKSPACE/flatbuffers-arm32 \
-DFLATBUFFERS_BUILD_TESTS=0
make all install
cd $WORKSPACE
mkdir tflite
cd tflite
cp $WORKSPACE/tensorflow/tensorflow/lite/schema/schema.fbs .
$WORKSPACE/flatbuffers-1.10.0/build/flatc -c --gen-object-api --reflect-types --reflect-names schema.fbs
cd $WORKSPACE/armnn
mkdir build
cd build
cmake .. -DCMAKE_LINKER=/usr/bin/arm-linux-gnueabihf-ld \
-DCMAKE_C_COMPILER=/usr/bin/arm-linux-gnueabihf-gcc \
-DCMAKE_CXX_COMPILER=/usr/bin/arm-linux-gnueabihf-g++ \
-DCMAKE_C_COMPILER_FLAGS=-fPIC \
-DCMAKE_CXX_FLAGS=-mfpu=neon \
-DARMCOMPUTE_ROOT=$WORKSPACE/ComputeLibrary \
-DARMCOMPUTE_BUILD_DIR=$WORKSPACE/ComputeLibrary/build \
-DBOOST_ROOT=$WORKSPACE/boost \
-DTF_GENERATED_SOURCES=$WORKSPACE/tensorflow-protobuf \
-DBUILD_TF_PARSER=1 \
-DBUILD_TF_LITE_PARSER=1 \
-DTF_LITE_GENERATED_PATH=$WORKSPACE/tflite \
-DFLATBUFFERS_ROOT=$WORKSPACE/flatbuffers-arm32 \
-DFLATC_DIR=$WORKSPACE/flatbuffers-1.10.0/build \
-DPROTOBUF_ROOT=$WORKSPACE/protobuf-arm32 \
-DARMCOMPUTENEON=1 \
-DARMNNREF=1
make -j$(nproc)
export XZ_DEFAULTS="-T 0"
cd ${WORKSPACE}
rm -rf boost_*.tar.bz2 boost_* protobuf tensorflow
find ${WORKSPACE} -type f -name *.o -delete
tar -cJf /tmp/armnn-full-32.tar.xz ${WORKSPACE}
mv armnn/build .
mv protobuf-arm32/lib/libprotobuf.so.15.0.0 build
rm -rf boost armnn ComputeLibrary flatbuffers protobuf-host tensorflow-protobuf builders.sh
tar -cJf /tmp/armnn-32.tar.xz ${WORKSPACE}
mkdir ${WORKSPACE}/out
mv /tmp/armnn-32.tar.xz ${WORKSPACE}/out
mv /tmp/armnn-full-32.tar.xz ${WORKSPACE}/out
cd ${WORKSPACE}/out && sha256sum > SHA256SUMS.txt
| true |
a6f6a08af87691de2f48d25d5b692c1713121345
|
Shell
|
caiofbpa/dotfiles
|
/caiofbpa.zsh-theme
|
UTF-8
| 367 | 3.265625 | 3 |
[] |
no_license
|
PROMPT='$(git_prompt_info)%{$fg[yellow]%}$%{$reset_color%} '
ZSH_THEME_GIT_PROMPT_DIRTY="%{$fg[red]%}"
ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg[green]%}"
function git_prompt_info() {
ref=$(command git symbolic-ref HEAD 2> /dev/null) || \
ref=$(command git rev-parse --short HEAD 2> /dev/null) || return
echo "$(parse_git_dirty)${ref#refs/heads/}%{$reset_color%} "
}
| true |
b752c960e030b6ac58e42286cf28063a57f37b0f
|
Shell
|
deodeveloper/roughModule
|
/target/classes/clean-ad-hdfs-files.sh
|
UTF-8
| 1,617 | 2.6875 | 3 |
[] |
no_license
|
#!/bin/sh
#root path
rootPath='/ad'
suffix='20??/*/*/*/'
#process deduped-before-lookup
coordinatorNames='AdProviderAdGroupUpdate AdProviderKeywordUpdate AdCampaignUpdate'
basepaths='process deduped-before-lookup'
for coordinator in $coordinatorNames
do
for basepath in $basepaths
do
hdfs dfs -rm -R -skipTrash $rootPath/$coordinator/$basepath/$suffix || echo unable to delete $rootPath/$coordinator/$basepath/$suffix
done
done
# duduped and process
coordinatorNames='AdCampaignConversionMetricsUpdate AdDisplayPlacementMetricsUpdate AdImpressionShareUpdate AdProviderMetricsUpdate AdMtdAccountMarkupRateUpdate ReportingGroupUpdate'
basepaths='deduped process'
for coordinator in $coordinatorNames
do
for basepath in $basepaths
do
hdfs dfs -rm -R -skipTrash $rootPath/$coordinator/$basepath/$suffix || echo unable to delete $rootPath/$coordinator/$basepath/$suffix
done
done
#only deduped
coordinatorNames='AdCampaignUpdate AdProviderCreative'
for coordinator in $coordinatorNames
do
hdfs dfs -rm -R -skipTrash $rootPath/$coordinator/deduped/$suffix || echo unable to delete $rootPath/$coordinator/deduped/$suffix
done
#output ->ReportingGroup AdProviderMetrics
coordinatorNames='ReportingGroup AdProviderMetrics'
for coordinator in $coordinatorNames
do
hdfs dfs -rm -R -skipTrash $rootPath/$coordinator/output/$suffix || echo unable to delete $rootPath/$coordinator/output/$suffix
done
#child ReportingGroup
hdfs dfs -rm -R -skipTrash $rootPath/ReportingGroup/output/child/$suffix || echo unable to delete $rootPath/ReportingGroup/output/child/$suffix
| true |
1f58424ffdde7a00112c62b438726236dad6373e
|
Shell
|
fboender/localenv
|
/localenv-init.d
|
UTF-8
| 854 | 3.9375 | 4 |
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/sh
PATH="/bin:/usr/bin:/sbin:/usr/sbin"
PROFILE="`/usr/bin/localenv-discover`"
PROFILE_DATA="/etc/localenv.d/"
PROFILE_FILE="${PROFILE_DATA}current_profile"
if [ ! "$PROFILE" ]; then
echo "Internal error. Check your install. Aborting.." >&2
exit
fi
start() {
echo "$PROFILE" > $PROFILE_FILE
localenv-scripts "$PROFILE" "$PROFILE_DATA"
localenv-confs "$PROFILE" "$PROFILE_DATA"
}
stop() {
if [ -e $PROFILE_FILE ]; then
rm $PROFILE_FILE
fi
}
case "$1" in
start)
echo -n "Starting roaming profile '$PROFILE': "
start
echo " done."
;;
stop)
echo -n "Stopping roaming profile '$PROFILE': "
stop
echo " done."
;;
restart)
echo -n "Restarting roaming profile '$PROFILE': "
stop
echo -n "."
sleep 1
echo -n "."
start
echo " done."
;;
*)
echo "Usage: /etc/init.d/$NAME {start|stop|restart}"
;;
esac
| true |
fe66ea8b490e3195dc4e5760928aa9e840ae19ab
|
Shell
|
bubdm/FluentArgs
|
/doc/DummyProject/test_many_example_codes.sh
|
UTF-8
| 110 | 2.65625 | 3 |
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
set -e
for code_file in "$@"; do
bash "$(dirname "$0")"/test_example_code.sh "$code_file"
done
| true |
6f2f3f72723761905f992ea3dadc3903f624cd9d
|
Shell
|
visibilityspots/ci-scripts
|
/packaging/package-yum-repo-client.sh
|
UTF-8
| 580 | 3.328125 | 3 |
[] |
no_license
|
#!/bin/bash
#
# This script packages the yum-repo-client
# Declaration of the parameters
VERSION=$(cat pom.xml | grep version | head -1 | awk -F 'version' '{print $2}' | sed -s 's/^="//g' | sed -s 's/".*$//g')
PACKAGEVERSION=`echo $VERSION | cut -d '.' -f 1`
ITERATION=`echo $VERSION | cut -d '.' -f 2`
ITERATIONPLUS=`echo $VERSION | cut -d '.' -f 3`
# Check if the version has a subsubversion
if [ -n "$ITERATIONPLUS" ]; then
ITERATION=$ITERATION.$ITERATIONPLUS
fi
# Test the software
python setup.py test
# Create the actual rpm package
python setup.py bdist_rpm
| true |
97865089f693517d8e9f95175a625fe846a33ccd
|
Shell
|
convict-git/cp_parser_cli
|
/build.sh
|
UTF-8
| 852 | 3.5 | 4 |
[
"MIT"
] |
permissive
|
#!/bin/bash
echo -e "Cleaning previous build if present.."
rm -rfv ./bin/cp-parse
echo -e "Checking if npm is installed.."
if npm -v;
then
echo -e "\e[32mnpm found\e[0m"
else
echo -e "\e[31mnpm not found. Installing npm....\e[0m"
if sudo apt-get install npm;
then
echo -e "\e[32mnpm installed.\e[0m"
else
echo -e "\e[31mError installing npm\e[0m"
exit
fi
fi
echo -e "Installing npm dependencies"
if npm install;
then
echo -e "\e[32mNpm dependencies were installed\e[0m"
else
echo -e "\e[31mError installing dependencies\e[0m"
exit
fi
mkdir -p bin
if pkg --targets node10-linux-x64 index.js -o ./bin/cp-parse;
then
echo -e "\e[32mBuild successfully.\e[0m"
else
echo -e "\e[31mBuild failed\e[0m"
exit
fi
echo -e "\e[1m\e[33mNow please run ./install.sh to Install...\e[0m"
chmod +x ./install.sh
| true |
0bf844dc41cbf06ee9b269de1f9ba4bf90beee94
|
Shell
|
joergpatz/vagrantbox
|
/bootstrap.sh
|
UTF-8
| 1,478 | 2.734375 | 3 |
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Fix locale
update-locale LC_ALL=en_US.UTF-8
update-locale LANGUAGE=en_US.UTF-8
# Upgrade Base Packages
apt-get update
# Install system packages
apt-get install -y build-essential python curl libmcrypt4 mc nano htop git unzip jq
# Download Bash Aliases
wget -nv -O /home/vagrant/.bash_aliases https://raw.githubusercontent.com/joergpatz/vagrantbox/master/bash_aliases
# Download Apache install script
source <(wget -qO- https://raw.githubusercontent.com/joergpatz/vagrantbox/master/apache.sh)
# Download PHP install script
source <(wget -qO- https://raw.githubusercontent.com/joergpatz/vagrantbox/master/php.sh)
# Download MySQL install script
source <(wget -qO- https://raw.githubusercontent.com/joergpatz/vagrantbox/master/mysql.sh)
# Download NodeJS install script
#source <(wget -qO- https://raw.githubusercontent.com/joergpatz/vagrantbox/master/nodejs.sh)
# Download MongoDB install script
#source <(wget -qO- https://raw.githubusercontent.com/joergpatz/vagrantbox/master/mongodb.sh)
#source <(wget -qO- https://raw.githubusercontent.com/joergpatz/vagrantbox/master/mongophp.sh)
#source <(wget -qO- https://raw.githubusercontent.com/joergpatz/vagrantbox/master/mongojs.sh)
# Download Wordpress install script
#source <(wget -qO- https://raw.githubusercontent.com/joergpatz/vagrantbox/master/wordpress.sh)
# Download ImageMagick install script
#source <(wget -qO- https://raw.githubusercontent.com/joergpatz/vagrantbox/master/imagemagick.sh)
| true |
4936f3da51aea0dd4c237f1b4890a5c473683148
|
Shell
|
kvjmistry/theta
|
/containers/production_v2/container_beamoff_chain_run1.sh
|
UTF-8
| 8,190 | 3.4375 | 3 |
[] |
no_license
|
#!/bin/bash
# USAGE: ./container_beamoff_chain_run1.sh <The input file> <the number of events>
echo "First Statement"
date
echo "TIMESTAMP_T7 $(date +%s)"
export SINGULARITYENV_HOME=/lus/theta-fs0/projects/uboone
# Check if files exist, if it does then we skip this process to run
export SINGULARITYENV_check_reco1=$(ls | grep reco1.root)
export SINGULARITYENV_check_cell=$(ls | grep postwcct.root)
export SINGULARITYENV_check_larcv=$(ls | grep postdl.root)
export SINGULARITYENV_check_bnm=$(ls | grep BNMS.root)
export SINGULARITYENV_check_r1a=$(ls | grep r1a.root)
export SINGULARITYENV_check_reco2=$(ls | grep reco2.root)
export SINGULARITYENV_check_postreco2=$(ls | grep reco2_all.root)
# For some reason sinularity container doesnt work well with empty vars, so set to FileNotFound instead of checking empty (ugly, I know)
if [ -z $SINGULARITYENV_check_reco1 ]; then SINGULARITYENV_check_reco1="FileNotFound"; fi
if [ -z $SINGULARITYENV_check_cell ]; then SINGULARITYENV_check_cell="FileNotFound"; fi
if [ -z $SINGULARITYENV_check_larcv ]; then SINGULARITYENV_check_larcv="FileNotFound"; fi
if [ -z $SINGULARITYENV_check_bnm ]; then SINGULARITYENV_check_bnm="FileNotFound"; fi
if [ -z $SINGULARITYENV_check_r1a ]; then SINGULARITYENV_check_r1a="FileNotFound"; fi
if [ -z $SINGULARITYENV_check_reco2 ]; then SINGULARITYENV_check_reco2="FileNotFound"; fi
# This is the last file, so if this exists, then we can exit the job without running anything
if [ -z $SINGULARITYENV_check_postreco2 ]; then
SINGULARITYENV_check_postreco2="FileNotFound";
else
echo "The reco2 fcl file exists, so this job must have finished fine..."
echo "exit 0"
exit 0
fi
# Print the status of the files
echo
echo "Seeing which files exist in the directory, any files not found are listed as FileNotFound"
echo "Reco1 file: $SINGULARITYENV_check_reco1"
echo "Cell Tree file: $SINGULARITYENV_check_cell"
echo "BNM file: $SINGULARITYENV_check_bnm"
echo "Reco1a file: $SINGULARITYENV_check_r1a"
echo "Reco2 file: $SINGULARITYENV_check_reco2"
echo "LArCV file: $SINGULARITYENV_check_larcv"
echo "Post Reco2 file: $SINGULARITYENV_check_postreco2"
# This checks if we need to first setup uboonecode v01b, otherwise we skip to v27
export SINGULARITYENV_v01b="false"
if [ $SINGULARITYENV_check_reco1 == "FileNotFound" ] || [ $SINGULARITYENV_check_cell == "FileNotFound" ]; then
SINGULARITYENV_v01b="true"
fi
sleep $[ ( $RANDOM % 10 ) + 1 ]s
echo
echo "Making custom fcl files with overrides"
# makes fcl file with _url_override.fcl extension
# reco1
source /lus/theta-fs0/projects/uboone/containers/timestamp_to_fcl_v2.sh $2 "/lus/theta-fs0/projects/uboone/kmistry/fcl/reco_uboone_data_mcc9_8_driver_stage1.fcl" mylist_v01b_timestamps.txt
# celltree
source /lus/theta-fs0/projects/uboone/containers/timestamp_to_fcl_v2.sh $2 "/lus/theta-fs0/projects/uboone/kmistry/fcl/run_celltreeub_prod.fcl" mylist_v01b_timestamps.txt
# larcv
source /lus/theta-fs0/projects/uboone/containers/timestamp_to_fcl_v2.sh $2 "/lus/theta-fs0/projects/uboone/kmistry/fcl/standard_larcv_uboone_data2d_prod.fcl" mylist_v27_timestamps.txt
# reco1a
source /lus/theta-fs0/projects/uboone/containers/timestamp_to_fcl_v2.sh $2 "/lus/theta-fs0/projects/uboone/kmistry/fcl/reco_uboone_mcc9_8_driver_data_ext_numi_optical.fcl" mylist_v27_timestamps.txt
# reco2
source /lus/theta-fs0/projects/uboone/containers/timestamp_to_fcl_v2.sh $2 "/lus/theta-fs0/projects/uboone/kmistry/fcl/reco_uboone_data_mcc9_8_driver_stage2_beamOff_numi.fcl" mylist_v27_timestamps.txt
# postreco2
source /lus/theta-fs0/projects/uboone/containers/timestamp_to_fcl_v2.sh $2 "/lus/theta-fs0/projects/uboone/kmistry/fcl/postreco2/reco_uboone_data_mcc9_1_8_driver_poststage2_filters_beamOff_run1_numi.fcl" mylist_v27_timestamps.txt
singularity run --no-home -B /lus:/lus -B /soft:/soft /lus/theta-fs0/projects/uboone/containers/fnal-wn-sl7_latest.sif <<EOF
echo
echo "Entered Container"
date
echo "TIMESTAMP_T7 $(date +%s)"
if [ $SINGULARITYENV_v01b == "true" ]; then
source /lus/theta-fs0/projects/uboone/uboonecode/setup
setup uboonecode v08_00_00_01b -q e17:prof
unsetup libwda
setup libwda v2_27_1
echo "Starting LArSoft Job"
date
echo "TIMESTAMP_T7 $(date +%s)"
fi
echo "------------------------------------------------------------------------"
if [ $SINGULARITYENV_check_reco1 == "FileNotFound" ]; then
echo "lar -c reco_uboone_data_mcc9_8_driver_stage1_url_override.fcl -s $1 -n1 --nskip $2 -o %ifb_event$2_reco1.root"
lar -c reco_uboone_data_mcc9_8_driver_stage1_url_override.fcl -s $1 -n1 --nskip $2 -o %ifb_event$2_reco1.root;
fi
echo "------------------------------------------------------------------------"
if [ $SINGULARITYENV_check_cell == "FileNotFound" ]; then
echo "lar -c run_celltreeub_prod_url_override.fcl -s *reco1.root"
lar -c run_celltreeub_prod_url_override.fcl -s *reco1.root
fi
echo "------------------------------------------------------------------------"
echo "Finished 01b, unsetting up v01b and setting up v27"
date
echo "TIMESTAMP_T7 $(date +%s)"
if [ $SINGULARITYENV_v01b == "true" ]; then unsetup_all; fi
source /lus/theta-fs0/projects/uboone/uboonecode_v2/setup
setup uboonecode v08_00_00_27 -q e17:prof
unsetup libwda
setup libwda v2_27_1
echo "Starting LArSoft Job part 2"
date
echo "TIMESTAMP_T7 $(date +%s)"
echo "------------------------------------------------------------------------"
if [ $SINGULARITYENV_check_bnm == "FileNotFound" ]; then
echo "lar -c run_BurstNoiseMetricsFilter.fcl -s *postwcct.root"
lar -c run_BurstNoiseMetricsFilter.fcl -s *postwcct.root
fi
echo "------------------------------------------------------------------------"
if [ $SINGULARITYENV_check_r1a == "FileNotFound" ]; then
echo "lar -c reco_uboone_mcc9_8_driver_data_ext_numi_optical_url_override.fcl -s *BNMS.root"
lar -c reco_uboone_mcc9_8_driver_data_ext_numi_optical_url_override.fcl -s *BNMS.root
fi
echo "------------------------------------------------------------------------"
if [ $SINGULARITYENV_check_reco2 == "FileNotFound" ]; then
echo "lar -c reco_uboone_data_mcc9_8_driver_stage2_beamOff_numi_url_override.fcl -s *r1a.root"
lar -c reco_uboone_data_mcc9_8_driver_stage2_beamOff_numi_url_override.fcl -s *r1a.root
fi
echo "------------------------------------------------------------------------"
if [ $SINGULARITYENV_check_larcv == "FileNotFound" ]; then
echo "lar -c standard_larcv_uboone_data2d_prod_url_override.fcl -s *reco2.root"
lar -c standard_larcv_uboone_data2d_prod_url_override.fcl -s *reco2.root
fi
echo "------------------------------------------------------------------------"
if [ $SINGULARITYENV_check_postreco2 == "FileNotFound" ]; then
echo "lar -c reco_uboone_data_mcc9_1_8_driver_poststage2_filters_beamOff_run1_numi_url_override.fcl -s *postdl.root"
lar -c reco_uboone_data_mcc9_1_8_driver_poststage2_filters_beamOff_run1_numi_url_override.fcl -s *postdl.root
fi
echo "------------------------------------------------------------------------"
echo "Finished Executing"
date
echo "TIMESTAMP_T7 $(date +%s)"
EOF
echo "Exited Container"
date
echo "TIMESTAMP_T7 $(date +%s)"
# See if the container finished with appropriate exit status
# Existance of a file ending in all.root means post reco2 ran properly
exit_status=$(ls | grep "all.root")
if [[ -z "$exit_status" ]]; then
echo "The post reco2 file doesn't exit, so this job has FAILED..."
echo "exit 1"
exit 1
elif [[ -n "$exit_status" ]]; then
echo "Found the post reco2 file, so this job has SUCCEEDED..."
echo "Removing previous successful files from directory"
echo "rm *reco2.root *r1a.root *BNMS.root *postdl.root *postwcct.root *reco1.root Pandora_Events.pndr"
rm *reco2.root *r1a.root *BNMS.root *postdl.root *postwcct.root *reco1.root Pandora_Events.pndr
echo "exit 0"
exit 0
else
echo "Eh?! Whats this exit status?"
exit 2
fi
# -------------------------------------------------------------
# ------------------------ DONE! ------------------------------
# -------------------------------------------------------------
| true |
bf8e9f69958878e9ba500e1573e6a1562fb0fa78
|
Shell
|
toddyamakawa/bin
|
/git-fzpop
|
UTF-8
| 298 | 2.546875 | 3 |
[] |
no_license
|
#!/usr/bin/env bash
# https://github.com/mattorb/dotfiles/blob/master/bin/fstash
# TODO: Finish this
git stash list | ,fzf git-show | xargs echo git stash pop
# REVISIT: Checkout as branch
# sha=$(echo "$selection[3]" | grep -o '[a-f0-9]\{7\}')
# git stash branch "stash-$sha" "$reflog_selector"
| true |
e5fb5930e1193b8262cb5b9a610444099a0c046e
|
Shell
|
shigeyf/azure-webapp-for-container-letsencrypt-template
|
/files/rootdir/docker-entrypoint.d/50-setup-letsencrypt.sh
|
UTF-8
| 750 | 3.234375 | 3 |
[
"MIT"
] |
permissive
|
#!/bin/sh
# vim:sw=4:ts=4:et
set -e
ME=$(basename $0)
mkdir -p /home/configs/letsencrypt
if [ -L /etc/letsencrypt ] && [ -e /etc/letsencrypt ]; then
echo ""
else
rm -f /etc/letsencrypt
ln -s /home/configs/letsencrypt /etc/letsencrypt
fi
mkdir -p /var/www
mkdir -p /home/var/certbot
if [ -L /var/www/certbot ] && [ -e /var/www/certbot ]; then
echo ""
else
rm -f /var/www/certbot
ln -s /home/var/certbot /var/www/certbot
fi
# copy shell scripts for certbot
cp /docker/letsencrypt/* /etc/letsencrypt
# Setup a initial cert if there is no issued cert.
(sleep 120; /etc/letsencrypt/issue-new-cert.sh) &
# Setup Corntab
cp /docker/letsencrypt/crontab.root /var/spool/cron/crontabs/root
chmod 600 /var/spool/cron/crontabs/root
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.