text
stringlengths 0
1.05M
| meta
dict |
---|---|
# 3. Longest Substring Without Repeating Characters - LeetCode
# https://leetcode.com/problems/longest-substring-without-repeating-characters/description/
# class Solution(object):
# def lengthOfLongestSubstring_TLE(self, s):
# """
# :type s: str
# :rtype: int
# """
# if len(s) < 2: # WA1
# return len(s)
# max_length = 0
# i = 0
# while i < len(s) - max_length:
# d = dict()
# j = i + 1
# d.update({s[i]:True})
# current_max = 1
# while j < len(s):
# if d.has_key(s[j]):
# break
# else:
# d.update({s[j]:True})
# current_max += 1
# j += 1
# max_length = max( current_max, max_length )
# i += 1
# return max_length
# # return max_length # WA2
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if len(s) < 2: # WA1
return len(s)
d = {}
start = 0
end = 0
max_length = 0
for c in s:
while d.has_key(c):
del d[s[start]]
start += 1
d.update({c:True})
end += 1
max_length = max( max_length, end - start )
return max_length
ans = [
("",0),
("c",1), # WA1
("au",2), # WA2
("abcabcbb",3), # abc
("bbbbb",1), # b
("pwwkew",3) # wke
]
s = Solution()
for i in ans:
r = s.lengthOfLongestSubstring(i[0])
print r, "O" if r == i[1] else "X"
# time optimization
%timeit -n40 s.lengthOfLongestSubstring([chr(50+(i%50)) for i in range(1000)])
# 40 loops, best of 3: 1.12 ms per loop
# %timeit -n100 s.lengthOfLongestSubstring_TLE([chr(50+(i%50)) for i in range(1000)])
# 100 loops, best of 3: 30.9 ms per loop | {
"repo_name": "heyf/cloaked-octo-adventure",
"path": "leetcode/003_longest-substring-without-repeating-characters.py",
"copies": "1",
"size": "1963",
"license": "mit",
"hash": -2585491395327102000,
"line_mean": 26.661971831,
"line_max": 91,
"alpha_frac": 0.4589913398,
"autogenerated": false,
"ratio": 3.1408,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.40997913398,
"avg_score": null,
"num_lines": null
} |
# 3. Longest Substring Without Repeating Characters
# Given a string, find the length of the longest substring without repeating characters.
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
start = 0
maxLen = 0
dic = {}
length = len(s)
for i in range(length):
# check if character is repeated within the current running word
if s[i] in dic and start <= dic[s[i]]:
# This is like a rolling window.
# If you see a letter again, you exclude that letter from the start and count the second occurence.
# Set the new running word to start from the last time char was seen + 1
start = dic[s[i]] + 1
# print(start, i, s[i])
else:
# max word is either current running word or not
maxLen = max(maxLen, i - start + 1)
dic[s[i]] = i
return maxLen
| {
"repo_name": "Vaibhav/InterviewPrep",
"path": "LeetCode/Medium/3-Longest-SubStr-No-Repeat.py",
"copies": "2",
"size": "1030",
"license": "mit",
"hash": 3052068324620559000,
"line_mean": 34.7857142857,
"line_max": 115,
"alpha_frac": 0.527184466,
"autogenerated": false,
"ratio": 4.364406779661017,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5891591245661018,
"avg_score": null,
"num_lines": null
} |
3
# -*- coding: utf-8 -*-
"""
Functions that encapsulate "usual" use-cases for pdfminer, for use making
bundled scripts and for using pdfminer as a module for routine tasks.
"""
import six
import sys
from .pdfdocument import PDFDocument
from .pdfparser import PDFParser
from .pdfinterp import PDFResourceManager, PDFPageInterpreter
from .pdfdevice import PDFDevice, TagExtractor
from .pdfpage import PDFPage
from .converter import XMLConverter, HTMLConverter, TextConverter
from .cmapdb import CMapDB
from .image import ImageWriter
def extract_text_to_fp(inf, outfp,
_py2_no_more_posargs=None, # Bloody Python2 needs a shim
output_type='text', codec='utf-8', laparams = None,
maxpages=0, page_numbers=None, password="", scale=1.0, rotation=0,
layoutmode='normal', output_dir=None, strip_control=False,
debug=False, disable_caching=False,
font_corrector=None, **other):
"""
Parses text from inf-file and writes to outfp file-like object.
Takes loads of optional arguments but the defaults are somewhat sane.
Beware laparams: Including an empty LAParams is not the same as passing None!
Returns nothing, acting as it does on two streams. Use StringIO to get strings.
output_type: May be 'text', 'xml', 'html', 'tag'. Only 'text' works properly.
codec: Text decoding codec
laparams: An LAParams object from pdfminer.layout.
Default is None but may not layout correctly.
maxpages: How many pages to stop parsing after
page_numbers: zero-indexed page numbers to operate on.
password: For encrypted PDFs, the password to decrypt.
scale: Scale factor
rotation: Rotation factor
layoutmode: Default is 'normal', see pdfminer.converter.HTMLConverter
output_dir: If given, creates an ImageWriter for extracted images.
strip_control: Does what it says on the tin
debug: Output more logging data
disable_caching: Does what it says on the tin
font_corrector: optional filename to load FontCorrectors from
"""
if six.PY2 and sys.stdin.encoding:
password = password.decode(sys.stdin.encoding)
imagewriter = None
if output_dir:
imagewriter = ImageWriter(output_dir)
# Font correction
font_correctors = []
if font_corrector:
import imp
fc_loader = imp.load_source("fc_loader", font_corrector)
font_correctors = fc_loader.fc_loader()
rsrcmgr = PDFResourceManager(caching=not disable_caching,
font_correctors=font_correctors)
if output_type == 'text':
device = TextConverter(rsrcmgr, outfp, codec=codec, laparams=laparams,
imagewriter=imagewriter)
if six.PY3 and outfp == sys.stdout:
outfp = sys.stdout.buffer
if output_type == 'xml':
device = XMLConverter(rsrcmgr, outfp, codec=codec, laparams=laparams,
imagewriter=imagewriter,
stripcontrol=strip_control)
elif output_type == 'html':
device = HTMLConverter(rsrcmgr, outfp, codec=codec, scale=scale,
layoutmode=layoutmode, laparams=laparams,
imagewriter=imagewriter)
elif output_type == 'tag':
device = TagExtractor(rsrcmgr, outfp, codec=codec)
interpreter = PDFPageInterpreter(rsrcmgr, device)
for page in PDFPage.get_pages(inf,
page_numbers,
maxpages=maxpages,
password=password,
caching=not disable_caching,
check_extractable=True):
page.rotate = (page.rotate + rotation) % 360
interpreter.process_page(page)
device.close()
| {
"repo_name": "rotula/pdfminer",
"path": "pdfminer/high_level.py",
"copies": "1",
"size": "3883",
"license": "mit",
"hash": 2047943244812810000,
"line_mean": 39.8736842105,
"line_max": 86,
"alpha_frac": 0.6381663662,
"autogenerated": false,
"ratio": 4.197837837837838,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5336004204037837,
"avg_score": null,
"num_lines": null
} |
# waveconverter decoding modules
import waveConvertVars as wcv
#from breakWave import breakdownWaveform
from breakWave import breakdownWaveform2
from widthToBits import separatePackets
from widthToBits import decodePacket
from statEngine import checkCRC
from statEngine import checkACS
#from widthToBits import printPacket
#from config import *
from protocol_lib import ProtocolDefinition
import io
def decodeBaseband(waveformFileName, basebandSampleRate, outFileName,
protocol, outputHex, verbose):
masterWidthList = [] # contains the widths for the entire file
packetWidthsList = [] # list of packet width lists
packetList = [] # list of decoded packets
rawPacketList = [] # list of pre-decoded packets
# open input file for read access in binary mode
with io.open(waveformFileName, 'rb') as waveformFile:
# open output file for write access
outFile = open(outFileName, 'w')
# scan through waveform and get widths
if (breakdownWaveform(protocol, waveformFile, masterWidthList) == wcv.END_OF_FILE):
# separate master list into list of packets
separatePackets(protocol, masterWidthList, packetWidthsList)
# decode each packet and add it to the list
i=0
for packetWidths in packetWidthsList:
decodedPacket = [] # move out of loop to main vars?
rawPacket = [] # move out of loop to main vars?
if wcv.verbose:
print "TX Num: " + str(i+1) + ": "
decodePacket(protocol, packetWidths, decodedPacket, rawPacket, verbose)
# print "Raw and Decoded Packets:"
# print(rawPacket)
# print(decodedPacket)
packetList.append(decodedPacket[:])
rawPacketList.append(rawPacket[:])
i+=1
#break # debug - only do first packet
waveformFile.close()
return packetList
# this function takes the list of decoded packets and produces a
# string consisting of each packet formatted according to the user's
# direction
def packetsToFormattedString(packetList, protocol, outputHex):
outputString = ""
i=0
for packet in packetList:
outputString += ("Transmission #" + str(i+1) + ":")
# align printout based on packet number
if (i + 1) < 10:
outputString += " "
if (i + 1) < 100:
outputString += " "
outputString += packetToString(packet, outputHex)
i+=1
return(outputString)
def packetToString(packet, outputHex):
packetString = ""
if outputHex: # output in hex
i = 0
while i < len(packet)-3:
hexVal = packet[i]*8 + packet[i+1]*4 + packet[i+2]*2 + packet[i+3]
packetString += str(hex(hexVal)[-1:])
i+=4
if (i % 8) == 0 and i != 0: # add a space between each byte
packetString += ' '
if (len(packet) % 4 != 0): # have to display the leftover
packetString += ' + b'
for j in range(i, len(packet)):
if packet[j] == wcv.DATA_ZERO:
packetString += '0'
elif packet[j] == wcv.DATA_ONE:
packetString += '1'
else:
packetString += 'FATAL ERROR\n'
else: # output in binary
for i in range(len(packet)):
# add a break at certain points
if (i % 8) == 0 and i != 0:
packetString += ' '
# add a separator if we've reached the end of the data section (denoted by bit=2)
if packet[i] == 2:
packetString += " --- "
# write each bit in ASCII
elif packet[i] == wcv.DATA_ZERO:
packetString += '0'
elif packet[i] == wcv.DATA_ONE:
packetString += '1'
else:
packetString += 'FATAL ERROR\n'
break
return(packetString)
class basebandTx:
txNum = 0
timeStamp_us = 0.0 # in microseconds
waveformData = []
widthList = []
interPacketTimingValid = False
preambleValid = False
headerValid = False
framingValid = False
encodingValid = False
crcValid = False
txValid = False
fullBasebandData = []
id = []
value1 = 0
value2 = 0
binaryString = ""
hexString = ""
def __init__(self, txNum, timeStampIn, waveformDataIn):
self.txNum = txNum
self.timeStamp = timeStampIn
self.waveformData = waveformDataIn
self.fullBasebandData = []
def decodeTx(self, protocol, glitchFilterCount, timingError, verbose):
if verbose:
print "decoding transmission #" + str(self.txNum + 1)
# scan through waveform and get widths
self.widthList = []
breakdownWaveform2(protocol, self.waveformData, self.widthList, glitchFilterCount)
#print len(self.waveformData)
#print self.waveformData
#print self.widthList
tempUnused = []
self.fullBasebandData = [] # dump any info from previous decoding attempt
(self.interPacketTimingValid, self.preambleValid,
self.headerValid, self.encodingValid) = \
decodePacket(protocol, self.widthList, self.fullBasebandData, tempUnused, timingError, verbose)
self.framingValid = self.interPacketTimingValid & \
self.preambleValid & \
self.headerValid
# NEED: add protocol check to ensure bits are legal
#print self.fullBasebandData
#print protocol.crcLow
#print protocol.crcHigh
#print protocol.crcDataLow
#print protocol.crcDataHigh
if not self.framingValid:
self.crcValid = False
elif len(protocol.crcPoly) == 0: # if no CRC given, then assume valid
self.crcValid = True
else:
self.crcValid = checkCRC(protocol=protocol, fullData=self.fullBasebandData) and \
checkACS(protocol=protocol, fullData=self.fullBasebandData)
if self.framingValid and self.encodingValid and self.crcValid:
self.txValid = True
# NEED break out ID
# NEED break out Value1
# NEED break out Value2
self.binaryString = packetToString(self.fullBasebandData, 0)
self.hexString = packetToString(self.fullBasebandData, 1)
if verbose:
print "data size: " + str(len(self.fullBasebandData))
#print self.binaryString
def display(self):
print "Displaying Transmission Object " + str(self.txNum)
print "Time Stamp (us): " + str(self.timeStamp_us)
print "Waveform Size (samples): " + str(len(self.waveformData))
print "Preamble Valid: " + str(self.preambleValid)
print "Header Valid: " + str(self.headerValid)
print "Framing Valid: " + str(self.framingValid)
print "Encoding Valid: " + str(self.encodingValid)
print "CRC Valid: " + str(self.crcValid)
print "Widths List:"
print self.widthList
print "Full baseband data:"
print self.fullBasebandData
print "ID:"
print self.id
print "Value 1: " + str(self.value1)
print "Value 2: " + str(self.value2)
print "Binary String: " + self.binaryString
print "Hex String: " + self.hexString
from demod_rf import ook_flowgraph
from demod_rf import fsk_flowgraph
from demod_rf import fsk_hopping_flowgraph
def demodIQFile(verbose, modulationType, iqSampleRate, basebandSampleRate, centerFreq, frequency, frequencyHopList, channelWidth,
transitionWidth, threshold, iqFileName, waveformFileName, fskDeviation = 0, fskSquelch = 0):
# create flowgraph object and execute flowgraph
try:
if verbose:
print "Running Demodulation Flowgraph"
print "modulation (ook=0) = " + str(modulationType)
print "samp_rate (Hz) = " + str(iqSampleRate)
print "baseband rate (Hz) = " + str(basebandSampleRate)
print "center_freq (Hz) = " + str(centerFreq)
print "tune frequency (Hz) = " + str(frequency)
print "freq hop list (Hz) = " + str(frequencyHopList)
print "channel width (Hz) = " + str(channelWidth)
print "transition width (Hz) = " + str(transitionWidth)
print "threshold = " + str(threshold)
print "FSK Squelch Level(dB) = " + str(fskSquelch)
print "FSK Deviation (Hz) = " + str(fskDeviation)
print "iq File Name = " + iqFileName
print "Waveform File Name = " + waveformFileName
if modulationType == wcv.MOD_OOK:
flowgraphObject = ook_flowgraph(iqSampleRate, # rate_in
basebandSampleRate, # rate_out
centerFreq,
frequency,
channelWidth,
transitionWidth,
threshold,
iqFileName,
waveformFileName) # temp digfile
flowgraphObject.run()
elif modulationType == wcv.MOD_FSK:
flowgraphObject = fsk_flowgraph(iqSampleRate, # samp_rate_in
basebandSampleRate, # rate_out
centerFreq,
frequency, # tune_freq
channelWidth,
transitionWidth,
0.0, # threshold, # no need for non-zero threshold in FSK
fskDeviation,
fskSquelch,
iqFileName,
waveformFileName) # temp file
flowgraphObject.run()
elif modulationType == wcv.MOD_FSK_HOP:
flowgraphObject = fsk_hopping_flowgraph(iqSampleRate, # samp_rate_in
basebandSampleRate, # rate_out
centerFreq,
frequencyHopList[0], #432897500, # tune_freq 0
frequencyHopList[1], #433417500, # tune_freq 1
frequencyHopList[2], #433777500, # tune_freq 2
channelWidth,
transitionWidth,
0.0, # threshold, # no need for non-zero threshold in FSK
fskDeviation,
fskSquelch,
iqFileName,
waveformFileName) # temp file
flowgraphObject.run()
else:
print "Invalid modulation type selected" # NEED to put in status bar or pop-up
except [[KeyboardInterrupt]]:
pass
if verbose:
print "Flowgraph completed"
# get the message queue object used in the flowgraph
queue = flowgraphObject.sink_queue
# now run through each message in the queue, and pull out each
# byte from each message
basebandList = []
for n in xrange(queue.count()):
messageSPtr = queue.delete_head() # get and remove the front-most message
messageString = messageSPtr.to_string() # convert message to a string
# for each character in the string, determine if binary 1 or 0 and append
for m in xrange(len(messageString)):
if messageString[m] == b'\x00':
basebandList.append(0)
elif messageString[m] == b'\x01':
basebandList.append(1)
else:
basebandList.append(messageString)
print "Fatal Error: flowgraph output baseband value not equal to 1 or 0"
print "n = " + str(n)
exit(1)
return basebandList
# this function takes the binary baseband data and breaks it into individual
# transmissions, assigning each to a Tx Object along with a timestamp
from breakWave import breakBaseband
def buildTxList(basebandData, basebandSampleRate, interTxTiming, glitchFilterCount, interTxLevel, verbose):
basebandDataByTx = breakBaseband(basebandData, interTxTiming, glitchFilterCount, interTxLevel, verbose)
runningSampleCount = 0
txList = []
# build a list of transmission objects with timestamp
for iTx in basebandDataByTx:
timeStamp_us = 1000000.0 * runningSampleCount/basebandSampleRate # timestamps in microseconds
runningSampleCount += len(iTx)
txList.append(basebandTx(len(txList), timeStamp_us, iTx))
return txList
def decodeAllTx(protocol, txList, outputHex, timingError, glitchFilterCount, verbose, showAllTx):
# call decode engine for each transmission
formatString = '{:>6} {}\n'
decodeOutputString = formatString.format("TX Num", "Payload") # need to start over after each decode attempt
i = 0
for iTx in txList:
if i == len(txList):
iTx.display()
else:
iTx.decodeTx(protocol, timingError, glitchFilterCount, verbose)
if showAllTx or iTx.txValid:
if outputHex:
decodeOutputString += formatString.format(str(i+1), iTx.hexString)
else:
decodeOutputString += formatString.format(str(i+1), iTx.binaryString)
i+=1
return (txList, decodeOutputString) | {
"repo_name": "paulgclark/waveconverter",
"path": "src/waveconverterEngine.py",
"copies": "1",
"size": "14624",
"license": "mit",
"hash": -1911008310976687400,
"line_mean": 41.8885630499,
"line_max": 130,
"alpha_frac": 0.5423276805,
"autogenerated": false,
"ratio": 4.505237215033888,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5547564895533887,
"avg_score": null,
"num_lines": null
} |
import sys, pygame
from pygame.locals import *
from random import randint
import math
def drawApple():
red = pygame.Color(255,0,0)
green = pygame.Color(0,255,0)
pygame.draw.circle(window,red,apple,5,0)
def newApple():
x = randint(5,635)
y = randint(5,475)
while abs(snakeBody[0][0]-x)<20:
x = randint(1,63)
x *= 10
x += 5
while abs(snakeBody[0][1]-y)<20:
y = randint(1,47)
y *= 10
y += 5
return (x,y)
def drawSnake():
green = pygame.Color(0,255,0)
for section in snakeBody:
pygame.draw.circle(window,green,section,5,0)
def moveSnake(snakeBody):
head = snakeBody[0]
new = (head[0]+10*direction[0],head[1]+10*direction[1])
snakeBody = [new] + snakeBody
while len(snakeBody) > snakeLength:
snakeBody.pop()
return snakeBody
def snakeCollidesApple(snakeBody,apple):
collide = False
for s in snakeBody:
x = s[0] - apple[0]
y = s[1] - apple[1]
if abs(x)<=7 and abs(y)<=7:
collide = True
break
return collide
def snakeCollidesSelf(snakeBody):
collide = False
head = snakeBody[0]
for s in snakeBody[1:]:
x = s[0] - head[0]
y = s[1] - head[1]
if (x*x + y*y) < 25:
collide = True
break
return collide
def snakeCollidesEdge(snakeBody):
head = snakeBody[0]
if head[0] < 0: return True
if head[0] > 640: return True
if head[1] < 0: return True
if head[1] > 480: return True
return False
pygame.init()
fps = pygame.time.Clock()
window = pygame.display.set_mode((640,480))
pygame.display.set_caption('SNAAAAAAAAAAAAAAAAAAAAAAKE!!!!!!!!!!!!!!')
snakeLength = 3
snakeBody = [(320,240),(320,250),(320,260)]
apple = newApple()
speed = 10
direction = (0,-1)
#(1, = left, (-1, = right, 1) = down, -1) = up
quit = False
while not quit:
events = pygame.event.get()
window.fill((0,0,128))
for event in events:
if event.type == KEYDOWN:
if event.key == K_q:
quit = True
if event.key == K_a:
direction = (-1,0)
if event.key == K_d:
direction = (1,0)
if event.key == K_w:
direction = (0,-1)
if event.key == K_s:
direction = (0,1)
appleEaten = snakeCollidesApple(snakeBody,apple)
snakeBitten = snakeCollidesSelf(snakeBody)
snakeCrashed = snakeCollidesEdge(snakeBody)
if appleEaten:
apple = newApple()
snakeLength += 1
speed += 1
snakeBody = moveSnake(snakeBody)
drawApple()
drawSnake()
pygame.display.update()
fps.tick(speed)
quit = snakeBitten or snakeCrashed or quit
print "you ate:",snakeLength-3,"apples!"
if randint(0,100)>95:
print "big question here: do snakes eat apples?"
| {
"repo_name": "Narcolapser/PyGameLearningByDoing",
"path": "Old PyGame stuff/snake/snake.py",
"copies": "1",
"size": "2886",
"license": "apache-2.0",
"hash": -3639217765777358300,
"line_mean": 22.2741935484,
"line_max": 100,
"alpha_frac": 0.6160776161,
"autogenerated": false,
"ratio": 2.6380255941499087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.37541032102499083,
"avg_score": null,
"num_lines": null
} |
## 3. Optional Arguments ##
# Default code
def tokenize(text_string, special_characters, clean=False):
if clean == True:
cleaned_story = clean_text(text_string, special_characters)
story_tokens = cleaned_story.split(" ")
else:
story_tokens = text_string.split(" ")
return(story_tokens)
clean_chars = [",", ".", "'", ";", "\n"]
tokenized_story = []
tokenized_vocabulary = []
misspelled_words = []
tokenized_story = tokenize(story_string, clean_chars, clean=True)
tokenized_vocabulary = tokenize(vocabulary, clean_chars, clean=False)
for item in tokenized_story:
if item not in tokenized_vocabulary:
misspelled_words.append(item)
## 5. Practice: Creating a More Compact Spell Checker ##
def clean_text(text_string, special_characters):
cleaned_string = text_string
for string in special_characters:
cleaned_string = cleaned_string.replace(string, "")
cleaned_string = cleaned_string.lower()
return(cleaned_string)
def tokenize(text_string, special_characters, clean=False):
cleaned_text = text_string
if clean:
cleaned_text = clean_text(text_string, special_characters)
tokens = cleaned_text.split(" ")
return(tokens)
def spell_check(vocabulary_file,text_file,special_characters=[",",".","'",";","\n"]):
misspelled_words = []
voca = open(vocabulary_file).read()
txt = open(text_file).read()
tokenized_vocabulary = tokenize(voca,special_characters,clean=False)
tokenized_text = tokenize(txt,special_characters,clean=True)
for item in tokenized_text:
if item not in tokenized_vocabulary and item != '':
misspelled_words.append(item)
return(misspelled_words)
final_misspelled_words = []
final_misspelled_words = spell_check('dictionary.txt','story.txt')
print(final_misspelled_words)
## 7. Syntax Errors ##
def spell_check(vocabulary_file, text_file, special_characters=[",",".","'",";","\n"]):
misspelled_words = []
vocabulary = open(vocabulary_file).read()
text = open(text_file.read()
tokenized_vocabulary = tokenize(vocabulary, special_characters= special_characters,clean = False)
tokenized_text = tokenize(text, special_characters, True)
for ts in tokenized_text:
if ts not in tokenized_vocabulary and ts != '':
misspelled_words.append(ts)
return(misspelled_words)
final_misspelled_words = spell_check(vocabulary_file="dictionary.txt", text_file="story.txt")
print(final_misspelled_words)
## 9. TypeError and ValueError ##
forty_two = 42
forty_two + float("42")
str("guardians")
## 11. Traceback ##
def spell_check(vocabulary_file, text_file, special_characters=[",",".","'",";","\n"]):
misspelled_words = []
vocabulary = open(vocabulary_file).read()
# Add ending parentheses.
text = open(text_file).read()
# Fix indentation.
tokenized_vocabulary = tokenize(vocabulary, special_characters)
tokenized_text = tokenize(text, special_characters, True)
for ts in tokenized_text:
if ts not in tokenized_vocabulary and ts != '':
misspelled_words.append(ts)
return(misspelled_words)
final_misspelled_words = spell_check(vocabulary_file="dictionary.txt", text_file="story.txt")
print(final_misspelled_words) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Beginner/Customizing Functions and Debugging Errors-61.py",
"copies": "1",
"size": "3286",
"license": "mit",
"hash": -639473868900443900,
"line_mean": 33.6,
"line_max": 101,
"alpha_frac": 0.6789409617,
"autogenerated": false,
"ratio": 3.426485922836288,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4605426884536288,
"avg_score": null,
"num_lines": null
} |
# 3 options:
# 1: video player is dormant, and renders a frame and returns it on function call
# 2: video player is threaded, and a frame can be retrieved at any time via function call
# 3: video player is threaded, and a callback is made to external object with rendered frames
#
# For our purposes, 2 looks to be the best (in my opinion), since our engine is "active" (always running at some framerate). If we ever change this, then I would suggest option 3: the video player calls an external function to notify that a frame is ready.
# group=None, target=None, name=None, args=(), kwargs={}
import threading
import time
import pymedia
import pymedia.muxer as muxer
import pymedia.audio.acodec as acodec
import pymedia.video.vcodec as vcodec
import pymedia.audio.sound as sound
# import ao
import pygame
# PlaybackBuffer is a buffer for processed audio around the sound module. I use it because the pymedia snd module will block if its internal buffer is full, which is undesireable for the main video playback. I also can't make use of pymedia.snd.getSpace() because it is broken in (at least) linux, and doesn't seem to give all that reasonable of data.
# The result is that the snd module only needs a snd.play(data) function, which is good because it means something like libao could just as easily be used.
class PlaybackBuffer:
eob = 0.0
aBuffer = []
def __init__(self, snd):
self.snd = snd
self.t = threading.Thread(None, target=self.process)
self.aBuffer = []
self.eob = time.time()
self._blk = threading.Semaphore(1)
self._stop = threading.Event() # Stop Event. Stops once the buffer is empty
self._fstop = threading.Event() # Force Stop. Stops immediately
self._notEmpty = threading.Event()
def begin(self):
self.t.start()
# Stop after buffer empties
def stop(self):
self._stop.set()
# Stop even if there is audio on the buffer
def fstop(self):
self._fstop.set()
def getLeft(self):
return self.eob - time.time()
# Called from outside the 'process' thread to add sound data to the buffer.
def play(self, data, sndlen):
if self._stop.isSet() or self._fstop.isSet():
return False
# add to buffer
self._blk.acquire()
self.aBuffer.append(data)
if len(self.aBuffer) == 1:
# print "1 sound"
self._notEmpty.set()
self._blk.release()
# Adjust buffer length variable
if self.eob < time.time():
self.eob = time.time() + sndlen
else:
self.eob = self.eob + sndlen
# threaded audio processor, waits for audio on the buffer and sends it to the snd module.
# the snd module can block all it wants in this case. When the snd module un-blocks, more
# sound can be fed to it (ie immediately)
def process(self):
# loop until stop
while not self._fstop.isSet():
self._notEmpty.wait(.5) # .5 second wait, in case of fstop event
if self._notEmpty.isSet():
if self._stop.isSet():
self._fstop.set()
else:
self._blk.acquire()
data = self.aBuffer.pop(0)
if len(self.aBuffer) == 0:
self._notEmpty.clear()
self._blk.release()
# Process the data. May block, but that is okay
self.snd.play( data )
# This is the internal movie player module. I kept this seperate to simplify things, and in case movie frames are not always read from a file.
class MovieInternal:
vfTime = 0.0 # Video Frame Period (1/frame rate). Needs to be adjusted on the fly.
eaq = 0.05 # Audio Queue Time: how many seconds ahead can we plan (queue) audio?
eag = 0.01 # Audio Gap Tolerance: for small gaps, just run sound together. Don't wait .001 seconds (or something) for the right time to play the next audio segment
tstart = 0.0 # time at which video playback started.
frame = 0 # for calculating vtime
vtime_start = 0.0
aBuffer = [] # Buffer (pointers) to raw audio frames, in order
vBuffer = [] # Buffer (pointers) to raw video frames, in order (?) (what about IPBB?)
adecoder = None
vdecoder = None
callback = None # Callback Class, Implements onVideoReady( vfr ), where data is vfr.data
# Get current playback time
def ctime(self):
return time.time() - self.tstart
# Get pts of current video frame (where PTS data is not available
def vtime(self, vfr):
# Get expected vtime using frame rate
vtime = self.frame * self.vfTime + self.vtime_start # use estimate
# correct for PTS data, using averaging in case of bogus values (??)
vtime2 = vfr[1]
if vtime2 > 0:
vtime = (vtime + vtime2)/2.0
return vtime
def adata2time(self, data):
return float(len(data.data))/(2*data.channels*data.sample_rate)
def aBufferFull(self):
return len(self.aBuffer) >= 100
def vBufferFull(self):
return len(self.vBuffer) >= 100
def parse(self, data):
# Parse raw mpeg file data
pstream = self.demux.parse( data )
for data in pstream:
if data[0] == self.video_index:
self.vBuffer.append((data[1], data[ 3 ] / 90000.0))
if data[0] == self.audio_index:
self.aBuffer.append((data[1], data[ 3 ] / 90000.0))
def playback_buffers(self):
# play movie data
# Returns time before action is needed
ctime = self.ctime()
t1 = self.processAudio(ctime)
if t1 == 0:
return 0
# If no audio was handled, try a video frame
t2 = self.processVideo(ctime)
if t2 == 0.0:
return 0.0
# Otherwise, return the shorter time
return min(t1, t2)
def processAudio(self, ctime):
if len(self.aBuffer) == 0:
return 1.0
# time of the current raw sound
atime = self.aBuffer[0][1]
# How much audio is on the buffer?
qtime = self.snd.getLeft()
# Should deal with audio on aBuffer?
# 1. is the next audio segment supposed to be played in the past?
# 2. is the next audio segment supposed to be played within eaq of the present,
# and would the next audio segment be played within eag of the end of the
# last sound?
if (ctime > atime) or (qtime > 0 and atime < ctime + self.eaq and atime < ctime + qtime + self.eag):
# print "AUDIO"
# Need to process audio
ardata = self.aBuffer[0]
adata = self.adecoder.decode( ardata[0] )
# print len(adata.data)
# If there is room on the buffer
# print "free"
self.aBuffer.pop(0)
sndlen = self.adata2time(adata)
# Drop if it the start of the next sound is closer than the end of the current
# sound. (but using 3/4)
if ctime + qtime > atime + 3.0*sndlen / 4:
print ctime, qtime, atime, sndlen
print " A Delete Too Late"
else:
# sndarray = numpy.fromstring(adata.data)
## sndarray = numpy.transpose(numpy.vstack((sndarray, sndarray)))
##sound = pygame.sndarray.make_sound(sndarray)
# sound.play()
# t1 = time.time()
##self.snd.play(sound)
# print "t2", time.time()-t1
self.snd.play( adata.data, sndlen )
del(ardata)
del(adata)
return 0.0
# when do we need action?
return qtime
def processVideo(self, ctime):
if len(self.vBuffer) == 0:
# Just deal with audio
return 1.0
vtime = self.vtime(self.vBuffer[0])
if vtime < ctime:
# Need to process video
# Delete only one at a time: remember, audio has presedence
vrdata = self.vBuffer.pop(0)
vdata = self.vdecoder.decode( vrdata[0] )
if vdata != None:
# correct vfTime, using an average
if vdata.rate > 1000:
vfTime2 = 1000.0 / vdata.rate
else:
vfTime2 = 1.0 / vdata.rate
self.vfTime = (self.vfTime + vfTime2) / 2.0
# if PTS, use for vtime calc
if vrdata[1]>0:
self.vtime_start = vtime # vrdata[1]
self.frame = 1
else:
self.frame = self.frame + 1
# If we are on time, show the frame
if (ctime - vtime) <= self.vfTime*2:
self.callback.onVideoReady( vdata )
else:
print " V Delete Late"
del vdata
del vrdata
return 0.0
# When do we need action?
return vtime - ctime
class MovieFile(MovieInternal):
filename = ""
mfile = None # file object for movie file
video_index = -1 # id for raw video frames
audio_index = -1 # id for raw audio frames
READ = 50000 # # bytes, 50000 should take about 0.005 to 0.01 seconds to read and sort
demux = None # Demuxer
def __init__(self, filename):
self.filename = filename
def play(self, vol=0xaaaa, pos=0):
# first two are flags for the thread
self.event_stop = False
self.event_pause = False
# this is to block the thread untill it is un-paused
self.snd = None
t = threading.Thread(None, target=self.playback, kwargs={'pos':pos, 'vol':vol})
t.start()
def stop(self):
self.event_stop = True
def playing(self):
return self.event_stop
def pause(self):
self.event_pause = not self.event_pause
# if self.event_pause:
# self.snd.setVolume(0)
# else:
# vol = (self.vol & 0x003f) << 8
# self.snd.setVolume(vol)
def pause_fade(self):
self.event_pause = not self.event_pause
if self.event_pause:
for i in range(self.vol, 0, -1):
voli = (i & 0x003f) << 8
# self.snd.setVolume(voli)
# print voli
time.sleep(.005)
else:
vol = (self.vol & 0x003f) << 8
# self.snd.setVolume(vol)
def setVolume(self, vol):
# vol is from 1 to 64. No left-right control ( :<( )
self.vol = vol
vol = (vol & 0x003f) << 8; # grab 7 bits, shift by 8. So bits 15-8 are set or not.
# if self.snd != None:
# self.snd.setVolume(vol)
def playback(self, vol=0, pos=0):
# open the file
self.mfile = open( self.filename, 'rb' )
# create a demuxer using filename extension
self.demux = muxer.Demuxer(self.filename.split( '.' )[ -1 ].lower())
tempDemux = muxer.Demuxer(self.filename.split( '.' )[ -1 ].lower())
# read some of the file
fdata = self.mfile.read( 300000 )
pstream = tempDemux.parse( fdata )
# initialize decoders
# find the audio stream
for streami in range(len(tempDemux.streams)):
stream = tempDemux.streams[streami]
print tempDemux.streams
if stream['type'] == muxer.CODEC_TYPE_VIDEO:
try:
# Set the initial sound delay to 0 for now
# It defines initial offset from video in the beginning of the stream
# self.resetVideo()
# seekADelta= 0
# Setting up the HW video codec
self.vdecoder = pymedia.video.ext_codecs.Decoder( stream )
print "GOT HW CODEC"
except:
try:
# Fall back to SW video codec
self.vdecoder= vcodec.Decoder( stream )
print "GOT SW CODEC"
except:
traceback.print_exc()
print "FAILED TO INIT VIDEO CODEC"
self.video_index = streami
break
for streami in range(len(tempDemux.streams)):
stream = tempDemux.streams[streami]
if stream['type'] == muxer.CODEC_TYPE_AUDIO:
self.adecoder = acodec.Decoder( stream )
self.audio_index = streami
break
print "Video index: " + str(self.video_index)
print "Audio index: " + str(self.audio_index)
# decode a frame to get bitrate, etc
for vdata in pstream:
if vdata[0] != self.video_index: continue
vfr = self.vdecoder.decode( vdata[1] )
if vfr == None: continue # WHY?
break
self.vdecoder.reset()
if self.audio_index != -1:
for vdata in pstream:
if vdata[0] != self.audio_index: continue
afr = self.adecoder.decode( vdata[1] )
break
self.adecoder.reset()
self.channels = afr.channels
self.sample_rate = afr.sample_rate
# print 'Opening sound', self.sample_rate, self.channels, sound.AFMT_S16_LE, 0
sndModule = sound.Output( self.sample_rate, self.channels, sound.AFMT_S16_NE )
self.snd = PlaybackBuffer(sndModule)
self.snd.begin()
# pygame.mixer.init(self.sample_rate, -16, self.channels, 4096) # 4096
# pygame.mixer.set_num_channels(2)
# self.snd = pygame.mixer.Channel(0)
# self.snd = ao.AudioDevice(
# 0,
# bits=16,
# rate=self.sample_rate,
# channels=self.channels,
# byte_format=1)
print "Sample rate: " + str(self.sample_rate)
print "Channels: " + str(self.channels)
# self.fullspace = self.snd.getSpace()
self.fullspace = 0
print "FULLSPACE", self.fullspace
self.setVolume(vol)
# print self.snd.getVolume()
# Set up output video method
# self.snd = sound.Output( sdecoded.sample_rate, sdecoded.channels, sound.AFMT_S16_NE )
pygame.init()
pygame.display.set_mode( vfr.size, 0 )
self.overlay = pygame.Overlay( pygame.YV12_OVERLAY, vfr.size )
# set overlay loc?
# Will need to adjust for aspect
# if vfr.aspect_ratio> .0:
# self.pictureSize= ( vfr.size[ 1 ]* vfr.aspect_ratio, vfr.size[ 1 ] )
# else:
# self.pictureSize= vfr.size
print "vfr info: " + str(vfr)
print dir(vfr)
print vfr.rate # frames/second. Each vfr is a frame.
print vfr.bitrate
print vfr.aspect_ratio
if vfr.rate > 1000:
self.vfTime = 1000.0 / vfr.rate
else:
self.vfTime = 1.0 / vfr.rate
self.tstart = time.time() - pos
self.callback = self
# Now I can trash the temporary muxer, and do things properly
del(tempDemux)
self.parse(fdata)
file_ended = False
while not self.event_stop:
# Process audio/video, or read or sleep
if len(self.aBuffer) == 0 or len(self.vBuffer) == 0:
if not self.read():
file_ended = True
if len(self.aBuffer) == 0:
self.event_stop = True
continue
stime = self.playback_buffers()
# "freetime"
if stime > 0:
if not self.vBufferFull() and not self.aBufferFull():
# print "READ"
if not self.read():
file_ended = True
else:
# print " Sleep", stime
# Sleep until a new frame is needed
time.sleep(stime/2.0)
if len(self.aBuffer) == 0:
self.snd.stop()
else:
self.snd.fstop()
self.event_stop = True
print len(self.aBuffer)
def read(self):
# read and parse new data
fdata = self.mfile.read(self.READ)
if len(fdata) > 0:
self.parse(fdata)
return True
else:
return False
# Display a video frame
def onVideoReady(self, vfr):
if vfr.data != None:
self.overlay.display( vfr.data )
# External movie player class. To be replaced to fit in AoI
class m_movie(m_movie_internal):
filename = ""
mfile = None # file object for movie file
video_index = -1 # id for raw video frames
audio_index = -1 # id for raw audio frames
READ = 50000 # # bytes, 50000 should take about 0.005 to 0.01 seconds to read and sort
demux = None # Demuxer
def __init__(self, filename):
self.filename = filename
def play(self, vol=0xaaaa, pos=0):
# first two are flags for the thread
self.event_stop = False
self.event_pause = False
# this is to block the thread untill it is un-paused
self.snd = None
self.Event_pauseEnd = threading.Event()
t = threading.Thread(None, target=self.playback, kwargs={'pos':pos, 'vol':vol})
t.start()
def stop(self):
self.event_stop = True
def pause(self):
self.event_pause = not self.event_pause
# if self.event_pause:
# self.snd.setVolume(0)
# else:
# vol = (self.vol & 0x003f) << 8
# self.snd.setVolume(vol)
def pause_fade(self):
self.event_pause = not self.event_pause
if self.event_pause:
for i in range(self.vol, 0, -1):
voli = (i & 0x003f) << 8
# self.snd.setVolume(voli)
# print voli
time.sleep(.005)
else:
vol = (self.vol & 0x003f) << 8
# self.snd.setVolume(vol)
def setVolume(self, vol):
# vol is from 1 to 64. No left-right control ( :<( )
self.vol = vol
vol = (vol & 0x003f) << 8; # grab 7 bits, shift by 8. So bits 15-8 are set or not.
# if self.snd != None:
# self.snd.setVolume(vol)
def playback(self, vol=0, pos=0):
# open the file
self.mfile = open( self.filename, 'rb' )
# create a demuxer using filename extension
self.demux = muxer.Demuxer(self.filename.split( '.' )[ -1 ].lower())
tempDemux = muxer.Demuxer(self.filename.split( '.' )[ -1 ].lower())
# read some of the file
fdata = self.mfile.read( 300000 )
pstream = tempDemux.parse( fdata )
# initialize decoders
# find the audio stream
for streami in range(len(tempDemux.streams)):
stream = tempDemux.streams[streami]
print tempDemux.streams
if stream['type'] == muxer.CODEC_TYPE_VIDEO:
try:
# Set the initial sound delay to 0 for now
# It defines initial offset from video in the beginning of the stream
# self.resetVideo()
# seekADelta= 0
# Setting up the HW video codec
self.vdecoder = pymedia.video.ext_codecs.Decoder( stream )
print "GOT HW CODEC"
except:
try:
# Fall back to SW video codec
self.vdecoder= vcodec.Decoder( stream )
print "GOT SW CODEC"
except:
traceback.print_exc()
print "FAILED TO INIT VIDEO CODEC"
self.video_index = streami
break
for streami in range(len(tempDemux.streams)):
stream = tempDemux.streams[streami]
if stream['type'] == muxer.CODEC_TYPE_AUDIO:
self.adecoder = acodec.Decoder( stream )
self.audio_index = streami
break
print "Video index: " + str(self.video_index)
print "Audio index: " + str(self.audio_index)
# decode a frame to get bitrate, etc
for vdata in pstream:
if vdata[0] != self.video_index: continue
vfr = self.vdecoder.decode( vdata[1] )
if vfr == None: continue # WHY?
break
self.vdecoder.reset()
if self.audio_index != -1:
for vdata in pstream:
if vdata[0] != self.audio_index: continue
afr = self.adecoder.decode( vdata[1] )
break
self.adecoder.reset()
self.channels = afr.channels
self.sample_rate = afr.sample_rate
# print 'Opening sound', self.sample_rate, self.channels, sound.AFMT_S16_LE, 0
sndModule = sound.Output( self.sample_rate, self.channels, sound.AFMT_S16_NE )
self.snd = PlaybackBuffer(sndModule)
self.snd.begin()
# pygame.mixer.init(self.sample_rate, -16, self.channels, 4096) # 4096
# pygame.mixer.set_num_channels(2)
# self.snd = pygame.mixer.Channel(0)
# self.snd = ao.AudioDevice(
# 0,
# bits=16,
# rate=self.sample_rate,
# channels=self.channels,
# byte_format=1)
print "Sample rate: " + str(self.sample_rate)
print "Channels: " + str(self.channels)
# self.fullspace = self.snd.getSpace()
self.fullspace = 0
print "FULLSPACE", self.fullspace
self.setVolume(vol)
# print self.snd.getVolume()
# Set up output video method
# self.snd = sound.Output( sdecoded.sample_rate, sdecoded.channels, sound.AFMT_S16_NE )
pygame.init()
pygame.display.set_mode( vfr.size, 0 )
self.overlay = pygame.Overlay( pygame.YV12_OVERLAY, vfr.size )
# set overlay loc?
# Will need to adjust for aspect
# if vfr.aspect_ratio> .0:
# self.pictureSize= ( vfr.size[ 1 ]* vfr.aspect_ratio, vfr.size[ 1 ] )
# else:
# self.pictureSize= vfr.size
print "vfr info: " + str(vfr)
print dir(vfr)
print vfr.rate # frames/second. Each vfr is a frame.
print vfr.bitrate
print vfr.aspect_ratio
if vfr.rate > 1000:
self.vfTime = 1000.0 / vfr.rate
else:
self.vfTime = 1.0 / vfr.rate
self.tstart = time.time() - pos
self.callback = self
# Now I can trash the temporary muxer, and do things properly
del(tempDemux)
self.parse(fdata)
file_ended = False
while not self.event_stop:
# Process audio/video, or read or sleep
if len(self.aBuffer) == 0 or len(self.vBuffer) == 0:
if not self.read():
file_ended = True
if len(self.aBuffer) == 0:
self.event_stop = True
continue
stime = self.playback_buffers()
# "freetime"
if stime > 0:
if not self.vBufferFull() and not self.aBufferFull():
# print "READ"
if not self.read():
file_ended = True
else:
# print " Sleep", stime
# Sleep until a new frame is needed
time.sleep(stime/2.0)
if len(self.aBuffer) == 0:
self.snd.stop()
else:
self.snd.fstop()
print len(self.aBuffer)
def read(self):
# read and parse new data
fdata = self.mfile.read(self.READ)
if len(fdata) > 0:
self.parse(fdata)
return True
else:
return False
# Display a video frame
def onVideoReady(self, vfr):
if vfr.data != None:
self.overlay.display( vfr.data )
| {
"repo_name": "ilathid/ilathidEngine",
"path": "vplayer/old/m_movie.py",
"copies": "1",
"size": "25171",
"license": "epl-1.0",
"hash": 5845616176785844000,
"line_mean": 36.0161764706,
"line_max": 351,
"alpha_frac": 0.5212744825,
"autogenerated": false,
"ratio": 3.9341981869334166,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4955472669433416,
"avg_score": null,
"num_lines": null
} |
# 3p
from mock import Mock
# project
from tests.checks.common import AgentCheckTest
from tests.core.test_wmi import SWbemServices, TestCommonWMI
class WMITestCase(AgentCheckTest, TestCommonWMI):
CHECK_NAME = 'wmi_check'
WMI_CONNECTION_CONFIG = {
'host': "myhost",
'namespace': "some/namespace",
'username': "datadog",
'password': "datadog",
'class': "Win32_OperatingSystem",
'metrics': [["NumberOfProcesses", "system.proc.count", "gauge"],
["NumberOfUsers", "system.users.count", "gauge"]]
}
WMI_CONFIG = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["AvgDiskBytesPerWrite", "winsys.disk.avgdiskbytesperwrite", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
'tag_by': "Name",
'constant_tags': ["foobar"],
}
WMI_NON_DIGIT_PROP = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["NonDigit", "winsys.nondigit", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
}
WMI_MISSING_PROP_CONFIG = {
'class': "Win32_PerfRawData_PerfOS_System",
'metrics': [["UnknownCounter", "winsys.unknowncounter", "gauge"],
["MissingProperty", "this.will.not.be.reported", "gauge"]],
}
WMI_CONFIG_NO_TAG_BY = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["AvgDiskBytesPerWrite", "winsys.disk.avgdiskbytesperwrite", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
}
WMI_CONFIG_FILTERS = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["AvgDiskBytesPerWrite", "winsys.disk.avgdiskbytesperwrite", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
'filters': [{'Name': "_Total"}],
}
WMI_TAG_QUERY_CONFIG_TEMPLATE = {
'class': "Win32_PerfFormattedData_PerfProc_Process",
'metrics': [["IOReadBytesPerSec", "proc.io.bytes_read", "gauge"]],
'filters': [{'Name': "chrome"}],
}
@classmethod
def _make_wmi_tag_query_config(cls, tag_queries):
"""
Helper to create a WMI configuration on
`Win32_PerfFormattedData_PerfProc_Process.IOReadBytesPerSec` with the given
`tag_queries` parameter.
"""
wmi_tag_query_config = {}
wmi_tag_query_config.update(cls.WMI_TAG_QUERY_CONFIG_TEMPLATE)
queries = tag_queries if all(isinstance(elem, list) for elem in tag_queries) \
else [tag_queries]
wmi_tag_query_config['tag_queries'] = queries
return wmi_tag_query_config
def _get_wmi_sampler(self):
"""
Helper to easily retrieve, if exists and unique, the WMISampler created
by the configuration.
Fails when multiple samplers are avaiable.
"""
self.assertTrue(
self.check.wmi_samplers,
u"Unable to retrieve the WMISampler: no sampler was found"
)
self.assertEquals(
len(self.check.wmi_samplers), 1,
u"Unable to retrieve the WMISampler: expected a unique, but multiple were found"
)
return self.check.wmi_samplers.itervalues().next()
def test_wmi_connection(self):
"""
Establish a WMI connection to the specified host/namespace, with the right credentials.
"""
# Run check
config = {
'instances': [self.WMI_CONNECTION_CONFIG]
}
self.run_check(config)
# A WMISampler is cached
self.assertInPartial("myhost:some/namespace:Win32_OperatingSystem", self.check.wmi_samplers)
wmi_sampler = self.getProp(self.check.wmi_samplers, "myhost:some/namespace:Win32_OperatingSystem")
# Connection was established with the right parameters
self.assertWMIConn(wmi_sampler, "myhost")
self.assertWMIConn(wmi_sampler, "some/namespace")
def test_wmi_sampler_initialization(self):
"""
An instance creates its corresponding WMISampler.
"""
# Run check
config = {
'instances': [self.WMI_CONFIG_FILTERS]
}
self.run_check(config)
# Retrieve the sampler
wmi_sampler = self._get_wmi_sampler()
# Assert the sampler
self.assertEquals(wmi_sampler.class_name, "Win32_PerfFormattedData_PerfDisk_LogicalDisk")
self.assertEquals(wmi_sampler.property_names, ["AvgDiskBytesPerWrite", "FreeMegabytes"])
self.assertEquals(wmi_sampler.filters, [{'Name': "_Total"}])
def test_wmi_properties(self):
"""
Compute a (metric name, metric type) by WMI property map and a property list.
"""
# Set up the check
config = {
'instances': [self.WMI_CONNECTION_CONFIG]
}
self.run_check(config)
# WMI props are cached
self.assertInPartial("myhost:some/namespace:Win32_OperatingSystem", self.check.wmi_props)
metric_name_and_type_by_property, properties = \
self.getProp(self.check.wmi_props, "myhost:some/namespace:Win32_OperatingSystem")
# Assess
self.assertEquals(
metric_name_and_type_by_property,
{
'numberofprocesses': ("system.proc.count", "gauge"),
'numberofusers': ("system.users.count", "gauge")
}
)
self.assertEquals(properties, ["NumberOfProcesses", "NumberOfUsers"])
def test_metric_extraction(self):
"""
Extract metrics from WMI query results.
"""
# local import to avoid pulling in pywintypes ahead of time.
from checks.wmi_check import WMIMetric # noqa
# Set up the check
config = {
'instances': [self.WMI_CONFIG]
}
self.run_check(config)
# Retrieve the sampler
wmi_sampler = self._get_wmi_sampler()
# Extract metrics
metrics = self.check._extract_metrics(wmi_sampler, "name", [], ["foobar"])
# Assess
expected_metrics = [
WMIMetric("freemegabytes", 19742, ["foobar", "name:c:"]),
WMIMetric("avgdiskbytesperwrite", 1536, ["foobar", "name:c:"]),
WMIMetric("freemegabytes", 19742, ["foobar", "name:d:"]),
WMIMetric("avgdiskbytesperwrite", 1536, ["foobar", "name:d:"]),
]
self.assertEquals(metrics, expected_metrics)
def test_missing_property(self):
"""
Do not raise on missing properties, but print a warning.
"""
# Set up the check
config = {
'instances': [self.WMI_MISSING_PROP_CONFIG]
}
logger = Mock()
self.run_check(config, mocks={'log': logger})
self.assertTrue(logger.warning.called)
def test_warnings_on_non_digit(self):
"""
Log a warning on non digit property values except for:
* 'Name' property
* 'tag_by' associated property
"""
wmi_instance = self.WMI_NON_DIGIT_PROP.copy()
config = {
'instances': [wmi_instance]
}
logger = Mock()
# Log a warning about 'NonDigit' property
self.run_check(config, mocks={'log': logger})
self.assertEquals(logger.warning.call_count, 1)
# No warnings on `tag_by` property neither on `Name`
del wmi_instance['metrics'][0]
wmi_instance['tag_by'] = "NonDigit"
self.run_check(config, mocks={'log': logger})
self.assertEquals(logger.warning.call_count, 1)
def test_query_timeouts(self):
"""
Gracefully handle WMI query timeouts.
"""
def __patched_init__(*args, **kwargs):
"""
Force `timeout_duration` value.
"""
kwargs['timeout_duration'] = 0.1
return wmi_constructor(*args, **kwargs)
# Increase WMI queries' runtime
SWbemServices._exec_query_run_time = 0.2
# Patch WMISampler to decrease timeout tolerance
from checks.libs.wmi.sampler import WMISampler
wmi_constructor = WMISampler.__init__
WMISampler.__init__ = __patched_init__
# Set up the check
config = {
'instances': [self.WMI_CONFIG]
}
logger = Mock()
# No exception is raised but a WARNING is logged
self.run_check(config, mocks={'log': logger})
self.assertTrue(logger.warning.called)
def test_mandatory_tag_by(self):
"""
Exception is raised when the result returned by the WMI query contains multiple rows
but no `tag_by` value was given.
"""
# local import to avoid pulling in pywintypes ahead of time.
from checks.wmi_check import MissingTagBy # noqa
# Valid configuration
config = {
'instances': [self.WMI_CONFIG]
}
self.run_check(config)
# Invalid
config = {
'instances': [self.WMI_CONFIG_NO_TAG_BY]
}
self.assertRaises(MissingTagBy, self.run_check, config, force_reload=True)
def test_query_tag_properties(self):
"""
WMISampler's property list contains `metrics` and `tag_queries` ones.
"""
# Set up the check
tag_queries = ["IDProcess", "Win32_Process", "Handle", "CommandLine"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.run_check(config)
# WMI props are cached
self.assertInPartial(
"localhost:root\\cimv2:Win32_PerfFormattedData_PerfProc_Process",
self.check.wmi_props
)
_, properties = \
self.getProp(self.check.wmi_props, "localhost:root\\cimv2:Win32_PerfFormattedData_PerfProc_Process")
self.assertEquals(properties, ["IOReadBytesPerSec", "IDProcess"])
def test_query_tags(self):
"""
Tag extracted metrics with `tag_queries` queries.
"""
# local import to avoid pulling in pywintypes ahead of time.
from checks.wmi_check import WMIMetric # noqa
# Set up the check
tag_queries = ["IDProcess", "Win32_Process", "Handle", "CommandLine"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.run_check(config)
# Retrieve the sampler
wmi_sampler = self._get_wmi_sampler()
# Extract metrics
metrics = self.check._extract_metrics(
wmi_sampler, "name",
tag_queries=[tag_queries], constant_tags=["foobar"]
)
# Assess
expected_metrics = [
WMIMetric("ioreadbytespersec", 20455, tags=['foobar', 'commandline:c:\\'
'programfiles(x86)\\google\\chrome\\application\\chrome.exe']),
WMIMetric('idprocess', 4036, tags=['foobar', 'commandline:c:\\'
'programfiles(x86)\\google\\chrome\\application\\chrome.exe']),
]
self.assertEquals(metrics, expected_metrics)
def test_query_tags_failures(self):
"""
Check different `tag_queries` failure scenarios.
"""
# Mock the logger so it can be traced
logger = Mock()
# Raise when user `tag_queries` input has a wrong format
tag_queries = ["IDProcess", "MakesNoSense"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.assertRaises(IndexError, self.run_check, config, mocks={'log': logger})
self.assertEquals(logger.error.call_count, 1)
# Raise when user `link_source_property` is not a class's property
tag_queries = ["UnknownProperty", "Win32_Process", "Handle", "CommandLine"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.assertRaises(
TypeError, self.run_check, config,
force_reload=True, mocks={'log': logger}
)
self.assertEquals(logger.error.call_count, 2)
# Raise when user `target property` is not a target class's property
tag_queries = ["IDProcess", "Win32_Process", "Handle", "UnknownProperty"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.assertRaises(
TypeError, self.run_check, config,
force_reload=True, mocks={'log': logger}
)
self.assertEquals(logger.error.call_count, 3)
# Do not raise on result returned, print a warning and continue
tag_queries = [
"ResultNotMatchingAnyTargetProperty", "Win32_Process", "Handle", "CommandLine"
]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.run_check(config, force_reload=True, mocks={'log': logger})
self.assertTrue(logger.warning.called)
def test_check(self):
"""
Assess check coverage.
"""
# Run the check
config = {
'instances': [self.WMI_CONFIG]
}
self.run_check(config)
for _, mname, _ in self.WMI_CONFIG['metrics']:
self.assertMetric(mname, tags=["foobar", "name:c:"], count=1)
self.assertMetric(mname, tags=["foobar", "name:d:"], count=1)
self.coverage_report()
| {
"repo_name": "WPMedia/dd-agent",
"path": "tests/checks/mock/test_wmi_check.py",
"copies": "14",
"size": "13506",
"license": "bsd-3-clause",
"hash": 8941542590753930000,
"line_mean": 33.8992248062,
"line_max": 112,
"alpha_frac": 0.5849992596,
"autogenerated": false,
"ratio": 3.869914040114613,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# 3p
from mock import Mock
# project
from tests.checks.common import AgentCheckTest
from tests.core.test_wmi import TestCommonWMI
class WMITestCase(AgentCheckTest, TestCommonWMI):
CHECK_NAME = 'wmi_check'
WMI_CONNECTION_CONFIG = {
'host': "myhost",
'namespace': "some/namespace",
'username': "datadog",
'password': "datadog",
'class': "Win32_OperatingSystem",
'metrics': [["NumberOfProcesses", "system.proc.count", "gauge"],
["NumberOfUsers", "system.users.count", "gauge"]]
}
WMI_CONFIG = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["AvgDiskBytesPerWrite", "winsys.disk.avgdiskbytesperwrite", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
'tag_by': "Name",
'constant_tags': ["foobar"],
}
WMI_MISSING_PROP_CONFIG = {
'class': "Win32_PerfRawData_PerfOS_System",
'metrics': [["UnknownCounter", "winsys.unknowncounter", "gauge"],
["MissingProperty", "this.will.not.be.reported", "gauge"]],
'tag_by': "Name"
}
WMI_CONFIG_NO_TAG_BY = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["AvgDiskBytesPerWrite", "winsys.disk.avgdiskbytesperwrite", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
}
WMI_CONFIG_FILTERS = {
'class': "Win32_PerfFormattedData_PerfDisk_LogicalDisk",
'metrics': [["AvgDiskBytesPerWrite", "winsys.disk.avgdiskbytesperwrite", "gauge"],
["FreeMegabytes", "winsys.disk.freemegabytes", "gauge"]],
'filters': [{'Name': "_Total"}],
}
WMI_TAG_QUERY_CONFIG_TEMPLATE = {
'class': "Win32_PerfFormattedData_PerfProc_Process",
'metrics': [["IOReadBytesPerSec", "proc.io.bytes_read", "gauge"]],
'filters': [{'Name': "chrome"}],
}
@classmethod
def _make_wmi_tag_query_config(cls, tag_queries):
"""
Helper to create a WMI configuration on
`Win32_PerfFormattedData_PerfProc_Process.IOReadBytesPerSec` with the given
`tag_queries` parameter.
"""
wmi_tag_query_config = {}
wmi_tag_query_config.update(cls.WMI_TAG_QUERY_CONFIG_TEMPLATE)
queries = tag_queries if all(isinstance(elem, list) for elem in tag_queries) \
else [tag_queries]
wmi_tag_query_config['tag_queries'] = queries
return wmi_tag_query_config
def _get_wmi_sampler(self):
"""
Helper to easily retrieve, if exists and unique, the WMISampler created
by the configuration.
Fails when multiple samplers are avaiable.
"""
self.assertTrue(
self.check.wmi_samplers,
u"Unable to retrieve the WMISampler: no sampler was found"
)
self.assertEquals(
len(self.check.wmi_samplers), 1,
u"Unable to retrieve the WMISampler: expected a unique, but multiple were found"
)
return self.check.wmi_samplers.itervalues().next()
def test_wmi_connection(self):
"""
Establish a WMI connection to the specified host/namespace, with the right credentials.
"""
# Run check
config = {
'instances': [self.WMI_CONNECTION_CONFIG]
}
self.run_check(config)
# A WMISampler is cached
self.assertIn("myhost:some/namespace:Win32_OperatingSystem", self.check.wmi_samplers)
wmi_sampler = self.check.wmi_samplers["myhost:some/namespace:Win32_OperatingSystem"]
# Connection was established with the right parameters
self.assertWMIConn(wmi_sampler, "myhost")
self.assertWMIConn(wmi_sampler, "some/namespace")
def test_wmi_sampler_initialization(self):
"""
An instance creates its corresponding WMISampler.
"""
# Run check
config = {
'instances': [self.WMI_CONFIG_FILTERS]
}
self.run_check(config)
# Retrieve the sampler
wmi_sampler = self._get_wmi_sampler()
# Assert the sampler
self.assertEquals(wmi_sampler.class_name, "Win32_PerfFormattedData_PerfDisk_LogicalDisk")
self.assertEquals(wmi_sampler.property_names, ["AvgDiskBytesPerWrite", "FreeMegabytes"])
self.assertEquals(wmi_sampler.filters, [{'Name': "_Total"}])
def test_wmi_properties(self):
"""
Compute a (metric name, metric type) by WMI property map and a property list.
"""
# Set up the check
config = {
'instances': [self.WMI_CONNECTION_CONFIG]
}
self.run_check(config)
# WMI props are cached
self.assertIn("myhost:some/namespace:Win32_OperatingSystem", self.check.wmi_props)
metric_name_and_type_by_property, properties = \
self.check.wmi_props["myhost:some/namespace:Win32_OperatingSystem"]
# Assess
self.assertEquals(
metric_name_and_type_by_property,
{
'numberofprocesses': ("system.proc.count", "gauge"),
'numberofusers': ("system.users.count", "gauge")
}
)
self.assertEquals(properties, ["NumberOfProcesses", "NumberOfUsers"])
def test_metric_extraction(self):
"""
Extract metrics from WMI query results.
"""
# Set up the check
config = {
'instances': [self.WMI_CONFIG]
}
self.run_check(config)
# Retrieve the sampler
wmi_sampler = self._get_wmi_sampler()
# Extract metrics
metrics = self.check._extract_metrics(wmi_sampler, "name", [], ["foobar"])
# Assess
WMIMetric = self.load_class("WMIMetric")
expected_metrics = [
WMIMetric("freemegabytes", 19742, ["foobar", "name:c:"]),
WMIMetric("avgdiskbytesperwrite", 1536, ["foobar", "name:c:"]),
WMIMetric("freemegabytes", 19742, ["foobar", "name:d:"]),
WMIMetric("avgdiskbytesperwrite", 1536, ["foobar", "name:d:"]),
]
self.assertEquals(metrics, expected_metrics)
def test_missing_property(self):
"""
Do not raise on missing properties, but print a warning.
"""
# Set up the check
config = {
'instances': [self.WMI_MISSING_PROP_CONFIG]
}
logger = Mock()
self.run_check(config, mocks={'log': logger})
self.assertTrue(logger.warning.called)
def test_mandatory_tag_by(self):
"""
Exception is raised when the result returned by the WMI query contains multiple rows
but no `tag_by` value was given.
"""
# Valid configuration
config = {
'instances': [self.WMI_CONFIG]
}
self.run_check(config)
# Invalid
MissingTagBy = self.load_class("MissingTagBy")
config = {
'instances': [self.WMI_CONFIG_NO_TAG_BY]
}
self.assertRaises(MissingTagBy, self.run_check, config, force_reload=True)
def test_query_tag_properties(self):
"""
WMISampler's property list contains `metrics` and `tag_queries` ones.
"""
# Set up the check
tag_queries = ["IDProcess", "Win32_Process", "Handle", "CommandLine"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.run_check(config)
# WMI props are cached
self.assertIn(
"localhost:root\\cimv2:Win32_PerfFormattedData_PerfProc_Process",
self.check.wmi_props
)
_, properties = \
self.check.wmi_props["localhost:root\\cimv2:Win32_PerfFormattedData_PerfProc_Process"]
self.assertEquals(properties, ["IOReadBytesPerSec", "IDProcess"])
def test_query_tags(self):
"""
Tag extracted metrics with `tag_queries` queries.
"""
# Set up the check
tag_queries = ["IDProcess", "Win32_Process", "Handle", "CommandLine"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.run_check(config)
# Retrieve the sampler
wmi_sampler = self._get_wmi_sampler()
# Extract metrics
metrics = self.check._extract_metrics(
wmi_sampler, "name",
tag_queries=[tag_queries], constant_tags=["foobar"]
)
# Assess
WMIMetric = self.load_class("WMIMetric")
expected_metrics = [
WMIMetric("ioreadbytespersec", 20455, tags=['foobar', 'commandline:c:\\'
'programfiles(x86)\\google\\chrome\\application\\chrome.exe']),
WMIMetric('idprocess', 4036, tags=['foobar', 'commandline:c:\\'
'programfiles(x86)\\google\\chrome\\application\\chrome.exe']),
]
self.assertEquals(metrics, expected_metrics)
def test_query_tags_failures(self):
"""
Check different `tag_queries` failure scenarios.
"""
# Mock the logger so it can be traced
logger = Mock()
# Raise when user `tag_queries` input has a wrong format
tag_queries = ["IDProcess", "MakesNoSense"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.assertRaises(IndexError, self.run_check, config, mocks={'log': logger})
self.assertEquals(logger.error.call_count, 1)
# Raise when user `link_source_property` is not a class's property
tag_queries = ["UnknownProperty", "Win32_Process", "Handle", "CommandLine"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.assertRaises(
TypeError, self.run_check, config,
force_reload=True, mocks={'log': logger}
)
self.assertEquals(logger.error.call_count, 2)
# Raise when user `target property` is not a target class's property
tag_queries = ["IDProcess", "Win32_Process", "Handle", "UnknownProperty"]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.assertRaises(
TypeError, self.run_check, config,
force_reload=True, mocks={'log': logger}
)
self.assertEquals(logger.error.call_count, 3)
# Do not raise on result returned, print a warning and continue
tag_queries = [
"ResultNotMatchingAnyTargetProperty", "Win32_Process", "Handle", "CommandLine"
]
config = {
'instances': [self._make_wmi_tag_query_config(tag_queries)]
}
self.run_check(config, force_reload=True, mocks={'log': logger})
self.assertTrue(logger.warning.called)
def test_check(self):
"""
Assess check coverage.
"""
# Run the check
config = {
'instances': [self.WMI_CONFIG]
}
self.run_check(config)
for _, mname, _ in self.WMI_CONFIG['metrics']:
self.assertMetric(mname, tags=["foobar", "name:c:"], count=1)
self.assertMetric(mname, tags=["foobar", "name:d:"], count=1)
self.coverage_report()
| {
"repo_name": "Mashape/dd-agent",
"path": "tests/checks/mock/test_wmi_check.py",
"copies": "1",
"size": "11325",
"license": "bsd-3-clause",
"hash": 985024455868221200,
"line_mean": 34.0619195046,
"line_max": 98,
"alpha_frac": 0.5836644592,
"autogenerated": false,
"ratio": 3.850731043862632,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4934395503062632,
"avg_score": null,
"num_lines": null
} |
# 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
@attr(requires='postgres')
class TestPostgres(AgentCheckTest):
CHECK_NAME = 'postgres'
def test_checks(self):
host = 'localhost'
port = 15432
dbname = 'conmon_test'
instances = [
{
'host': host,
'port': port,
'username': 'conmon',
'password': 'conmon',
'dbname': dbname,
'relations': ['persons'],
'custom_metrics': [{
'descriptors': [('datname', 'customdb')],
'metrics': {
'numbackends': ['custom.numbackends', 'Gauge'],
},
'query': "SELECT datname, %s FROM pg_stat_database WHERE datname = 'conmon_test' LIMIT(1)",
'relation': False,
}]
},
{
'host': host,
'port': port,
'username': 'conmon',
'password': 'conmon',
'dbname': 'dogs',
'relations': ['breed', 'kennel']
}
]
self.run_check_twice(dict(instances=instances))
# Useful to get server version
# FIXME: Not great, should have a function like that available
key = (host, port, dbname)
db = self.check.dbs[key]
# Testing DB_METRICS scope
COMMON_METRICS = [
'postgresql.connections',
'postgresql.commits',
'postgresql.rollbacks',
'postgresql.disk_read',
'postgresql.buffer_hit',
'postgresql.rows_returned',
'postgresql.rows_fetched',
'postgresql.rows_inserted',
'postgresql.rows_updated',
'postgresql.rows_deleted',
'postgresql.database_size',
]
for mname in COMMON_METRICS:
for db in ('conmon_test', 'dogs'):
self.assertMetric(mname, count=1, tags=['db:%s' % db])
NEWER_92_METRICS = [
'postgresql.deadlocks',
'postgresql.temp_bytes',
'postgresql.temp_files',
]
if self.check._is_9_2_or_above(key, db):
for mname in NEWER_92_METRICS:
for db in ('conmon_test', 'dogs'):
self.assertMetric(mname, count=1, tags=['db:%s' % db])
# Testing BGW_METRICS scope
COMMON_BGW_METRICS = [
'postgresql.bgwriter.checkpoints_timed',
'postgresql.bgwriter.checkpoints_requested',
'postgresql.bgwriter.buffers_checkpoint',
'postgresql.bgwriter.buffers_clean',
'postgresql.bgwriter.maxwritten_clean',
'postgresql.bgwriter.buffers_backend',
'postgresql.bgwriter.buffers_alloc',
]
for mname in COMMON_BGW_METRICS:
self.assertMetric(mname, count=1)
NEWER_91_BGW_METRICS = [
'postgresql.bgwriter.buffers_backend_fsync',
]
if self.check._is_9_1_or_above(key, db):
for mname in NEWER_91_BGW_METRICS:
self.assertMetric(mname, count=1)
NEWER_92_BGW_METRICS = [
'postgresql.bgwriter.write_time',
'postgresql.bgwriter.sync_time',
]
if self.check._is_9_2_or_above(key, db):
for mname in NEWER_92_BGW_METRICS:
self.assertMetric(mname, count=1)
# FIXME: Test postgresql.locks
# Relation specific metrics
RELATION_METRICS = [
'postgresql.seq_scans',
'postgresql.seq_rows_read',
'postgresql.index_scans',
'postgresql.index_rows_fetched',
'postgresql.rows_inserted',
'postgresql.rows_updated',
'postgresql.rows_deleted',
'postgresql.rows_hot_updated',
'postgresql.live_rows',
'postgresql.dead_rows',
]
SIZE_METRICS = [
'postgresql.table_size',
'postgresql.index_size',
'postgresql.total_size',
]
STATIO_METRICS = [
'postgresql.heap_blocks_read',
'postgresql.heap_blocks_hit',
'postgresql.index_blocks_read',
'postgresql.index_blocks_hit',
'postgresql.toast_blocks_read',
'postgresql.toast_blocks_hit',
'postgresql.toast_index_blocks_read',
'postgresql.toast_index_blocks_hit',
]
for inst in instances:
for rel in inst.get('relations', []):
expected_tags = ['db:%s' % inst['dbname'], 'table:%s' % rel]
expected_rel_tags = ['db:%s' % inst['dbname'], 'table:%s' % rel, 'schema:public']
for mname in RELATION_METRICS:
count = 1
# We only build a test index and stimulate it on breed
# in the dogs DB, so the other index metrics shouldn't be
# here.
if 'index' in mname and rel != 'breed':
count = 0
self.assertMetric(mname, count=count, tags=expected_rel_tags)
for mname in SIZE_METRICS:
self.assertMetric(mname, count=1, tags=expected_tags)
for mname in STATIO_METRICS:
at_least = None
count = 1
if '.index' in mname and rel != 'breed':
count = 0
# FIXME: toast are not reliable, need to do some more setup
# to get some values here I guess
if 'toast' in mname:
at_least = 0 # how to set easily a flaky metric, w/o impacting coverage
count = None
self.assertMetric(mname, count=count, at_least=at_least, tags=expected_rel_tags)
# Index metrics
IDX_METRICS = [
'postgresql.index_scans',
'postgresql.index_rows_read',
'postgresql.index_rows_fetched',
]
# we have a single index defined!
expected_tags = ['db:dogs', 'table:breed', 'index:breed_names', 'schema:public']
for mname in IDX_METRICS:
self.assertMetric(mname, count=1, tags=expected_tags)
# instance connection metrics
CONNECTION_METRICS = [
'postgresql.max_connections',
'postgresql.percent_usage_connections',
]
for mname in CONNECTION_METRICS:
self.assertMetric(mname, count=1)
# db level connections
for inst in instances:
expected_tags = ['db:%s' % inst['dbname']]
self.assertMetric('postgresql.connections', count=1, tags=expected_tags)
# By schema metrics
self.assertMetric('postgresql.table.count', value=2, count=1, tags=['schema:public'])
self.assertMetric('postgresql.db.count', value=2, count=1)
# Our custom metric
self.assertMetric('custom.numbackends', value=1, tags=['customdb:conmon_test'])
# Test service checks
self.assertServiceCheck('postgres.can_connect',
count=1, status=AgentCheck.OK,
tags=['host:localhost', 'port:15432', 'db:conmon_test']
)
self.assertServiceCheck('postgres.can_connect',
count=1, status=AgentCheck.OK,
tags=['host:localhost', 'port:15432', 'db:dogs']
)
# Assert service metadata
self.assertServiceMetadata(['version'], count=2)
self.coverage_report()
| {
"repo_name": "pmav99/praktoras",
"path": "tests/checks/integration/test_postgres.py",
"copies": "1",
"size": "7727",
"license": "bsd-3-clause",
"hash": 5406629917522716000,
"line_mean": 34.2831050228,
"line_max": 111,
"alpha_frac": 0.5227125663,
"autogenerated": false,
"ratio": 4.215493726132024,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007952127466636877,
"num_lines": 219
} |
# 3p
from nose.plugins.attrib import attr
# project
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
# sample from /status?json
# {
# "accepted conn": 350,
# "active processes": 1,
# "idle processes": 2,
# "listen queue": 0,
# "listen queue len": 0,
# "max active processes": 2,
# "max children reached": 0,
# "max listen queue": 0,
# "pool": "www",
# "process manager": "dynamic",
# "slow requests": 0,
# "start since": 4758,
# "start time": 1426601833,
# "total processes": 3
# }
@attr(requires='phpfpm')
class PHPFPMCheckTest(AgentCheckTest):
CHECK_NAME = 'php_fpm'
def test_bad_status(self):
instance = {
'status_url': 'http://localhost:9001/status',
'tags': ['expectedbroken']
}
self.assertRaises(Exception, self.run_check, {'instances': [instance]})
def test_bad_ping(self):
instance = {
'ping_url': 'http://localhost:9001/status',
'tags': ['expectedbroken']
}
self.run_check({'instances': [instance]})
self.assertServiceCheck(
'php_fpm.can_ping',
status=AgentCheck.CRITICAL,
tags=['ping_url:http://localhost:9001/status'],
count=1
)
self.coverage_report()
def test_bad_ping_reply(self):
instance = {
'ping_url': 'http://localhost:42424/ping',
'ping_reply': 'blah',
'tags': ['expectedbroken']
}
self.run_check({'instances': [instance]})
self.assertServiceCheck(
'php_fpm.can_ping',
status=AgentCheck.CRITICAL,
tags=['ping_url:http://localhost:42424/ping'],
count=1
)
self.coverage_report()
def test_status(self):
instance = {
'status_url': 'http://localhost:42424/status',
'ping_url': 'http://localhost:42424/ping',
'tags': ['cluster:forums']
}
self.run_check_twice({'instances': [instance]})
metrics = [
'php_fpm.listen_queue.size',
'php_fpm.processes.idle',
'php_fpm.processes.active',
'php_fpm.processes.total',
'php_fpm.requests.slow',
'php_fpm.requests.accepted',
]
expected_tags = ['cluster:forums', 'pool:www']
for mname in metrics:
self.assertMetric(mname, count=1, tags=expected_tags)
self.assertMetric('php_fpm.processes.idle', count=1, value=1)
self.assertMetric('php_fpm.processes.total', count=1, value=2)
self.assertServiceCheck('php_fpm.can_ping', status=AgentCheck.OK,
count=1,
tags=['ping_url:http://localhost:42424/ping'])
self.assertMetric('php_fpm.processes.max_reached', count=1)
| {
"repo_name": "packetloop/dd-agent",
"path": "tests/checks/integration/test_php_fpm.py",
"copies": "45",
"size": "2904",
"license": "bsd-3-clause",
"hash": -1853145356691680300,
"line_mean": 27.4705882353,
"line_max": 79,
"alpha_frac": 0.5468319559,
"autogenerated": false,
"ratio": 3.6254681647940075,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# 3p
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest
CONFIG = {
'init_config': {},
'instances': [
{
'rabbitmq_api_url': 'http://localhost:15672/api/',
'rabbitmq_user': 'guest',
'rabbitmq_pass': 'guest',
'queues': ['test1'],
}
]
}
CONFIG_REGEX = {
'init_config': {},
'instances': [
{
'rabbitmq_api_url': 'http://localhost:15672/api/',
'rabbitmq_user': 'guest',
'rabbitmq_pass': 'guest',
'queues_regexes': ['test\d+'],
}
]
}
CONFIG_WITH_FAMILY = {
'init_config': {},
'instances': [
{
'rabbitmq_api_url': 'http://localhost:15672/api/',
'rabbitmq_user': 'guest',
'rabbitmq_pass': 'guest',
'tag_families': True,
'queues_regexes': ['(test)\d+'],
}
]
}
COMMON_METRICS = [
'rabbitmq.node.fd_used',
'rabbitmq.node.mem_used',
'rabbitmq.node.run_queue',
'rabbitmq.node.sockets_used',
'rabbitmq.node.partitions'
]
Q_METRICS = [
'consumers',
'memory',
'messages',
'messages.rate',
'messages_ready',
'messages_ready.rate',
'messages_unacknowledged',
'messages_unacknowledged.rate',
'messages.publish.count',
'messages.publish.rate',
]
@attr(requires='rabbitmq')
class RabbitMQCheckTest(AgentCheckTest):
CHECK_NAME = 'rabbitmq'
def test_check(self):
self.run_check(CONFIG)
# Node attributes
for mname in COMMON_METRICS:
self.assertMetricTagPrefix(mname, 'rabbitmq_node', count=1)
self.assertMetric('rabbitmq.node.partitions', value=0, count=1)
# Queue attributes, should be only one queue fetched
# TODO: create a 'fake consumer' and get missing metrics
# active_consumers, acks, delivers, redelivers
for mname in Q_METRICS:
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue:test1', count=1)
self.assertServiceCheckOK('rabbitmq.aliveness', tags=['vhost:/'])
self.coverage_report()
def test_queue_regex(self):
self.run_check(CONFIG_REGEX)
# Node attributes
for mname in COMMON_METRICS:
self.assertMetricTagPrefix(mname, 'rabbitmq_node', count=1)
for mname in Q_METRICS:
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue:test1', count=1)
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue:test5', count=1)
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue:tralala', count=0)
self.assertServiceCheckOK('rabbitmq.aliveness', tags=['vhost:/'])
self.coverage_report()
def test_family_tagging(self):
self.run_check(CONFIG_WITH_FAMILY)
# Node attributes
for mname in COMMON_METRICS:
self.assertMetricTagPrefix(mname, 'rabbitmq_node', count=1)
for mname in Q_METRICS:
self.assertMetricTag('rabbitmq.queue.%s' %
mname, 'rabbitmq_queue_family:test', count=2)
self.assertServiceCheckOK('rabbitmq.aliveness', tags=['vhost:/'])
self.coverage_report()
| {
"repo_name": "pmav99/praktoras",
"path": "tests/checks/integration/test_rabbitmq.py",
"copies": "4",
"size": "3408",
"license": "bsd-3-clause",
"hash": -2286134399942233600,
"line_mean": 26.9344262295,
"line_max": 78,
"alpha_frac": 0.5633802817,
"autogenerated": false,
"ratio": 3.6139978791092258,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0003727707883254967,
"num_lines": 122
} |
# 3p
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest
import requests
@attr(requires='powerdns_recursor')
class TestPowerDNSRecursorCheck(AgentCheckTest):
CHECK_NAME = 'powerdns_recursor'
GAUGE_METRICS = [
'cache-entries',
'concurrent-queries',
'failed-host-entries',
'negcache-entries',
'packetcache-entries',
'throttle-entries',
]
RATE_METRICS = [
'all-outqueries',
'answers-slow',
'answers0-1',
'answers1-10',
'answers10-100',
'answers100-1000',
'cache-hits',
'cache-misses',
'chain-resends',
'case-mismatches',
'client-parse-errors',
'dont-outqueries',
'ipv6-outqueries',
'ipv6-questions',
'malloc-bytes',
'noerror-answers',
'nxdomain-answers',
'max-mthread-stack',
'outgoing-timeouts',
'over-capacity-drops',
'packetcache-hits',
'packetcache-misses',
'policy-drops',
'qa-latency',
'questions',
'server-parse-errors',
'servfail-answers',
'spoof-prevents',
'sys-msec',
'tcp-client-overflow',
'tcp-clients',
'tcp-outqueries',
'tcp-questions',
'throttled-out',
'throttled-outqueries',
'unauthorized-tcp',
'unauthorized-udp',
'unexpected-packets',
'unreachables',
]
GAUGE_METRICS_V4 = [
'fd-usage',
]
RATE_METRICS_V4 = [
'auth4-answers-slow',
'auth4-answers0-1',
'auth4-answers1-10',
'auth4-answers10-100',
'auth4-answers100-1000',
'auth6-answers-slow',
'auth6-answers0-1',
'auth6-answers1-10',
'auth6-answers10-100',
'auth6-answers100-1000',
'dlg-only-drops',
'dnssec-queries',
'dnssec-result-bogus',
'dnssec-result-indeterminate',
'dnssec-result-insecure',
'dnssec-result-nta',
'dnssec-result-secure',
'dnssec-validations',
'edns-ping-matches',
'edns-ping-mismatches',
'ignored-packets',
'no-packet-error',
'noedns-outqueries',
'noping-outqueries',
'nsset-invalidations',
'nsspeeds-entries',
'outgoing4-timeouts',
'outgoing6-timeouts',
'policy-result-custom',
'policy-result-drop',
'policy-result-noaction',
'policy-result-nodata',
'policy-result-nxdomain',
'policy-result-truncate',
'real-memory-usage',
'resource-limits',
'too-old-drops',
'udp-in-errors',
'udp-noport-errors',
'udp-recvbuf-errors',
'udp-sndbuf-errors',
'uptime',
'user-msec',
]
METRIC_FORMAT = 'powerdns.recursor.{}'
def __init__(self, *args, **kwargs):
AgentCheckTest.__init__(self, *args, **kwargs)
self.config = {"instances": [{
"host": "127.0.0.1",
"port": "8082",
"api_key": "pdns_api_key"
}]}
# Really a basic check to see if all metrics are there
def test_check(self):
service_check_tags = ['recursor_host:127.0.0.1', 'recursor_port:8082']
# get version and test v3 first.
version = self._get_pdns_version()
if version == 3:
self.run_check_twice(self.config)
# Assert metrics
for metric in self.GAUGE_METRICS:
self.assertMetric(self.METRIC_FORMAT.format(metric), tags=[])
for metric in self.RATE_METRICS:
self.assertMetric(self.METRIC_FORMAT.format(metric), tags=[])
self.assertServiceCheckOK('powerdns.recursor.can_connect', tags=service_check_tags)
self.coverage_report()
elif version == 4:
# copy the configuration and set the version to 4
config = self.config.copy()
config['instances'][0]['version'] = 4
self.run_check_twice(config)
# Assert metrics
for metric in self.GAUGE_METRICS + self.GAUGE_METRICS_V4:
self.assertMetric(self.METRIC_FORMAT.format(metric), tags=[])
for metric in self.RATE_METRICS + self.RATE_METRICS_V4:
self.assertMetric(self.METRIC_FORMAT.format(metric), tags=[])
self.assertServiceCheckOK('powerdns.recursor.can_connect', tags=service_check_tags)
self.coverage_report()
else:
print("powerdns_recursor unknown version.")
self.assertServiceCheckCritical('powerdns.recursor.can_connect', tags=service_check_tags)
def test_tags(self):
version = self._get_pdns_version()
config = self.config.copy()
tags = ['foo:bar']
config['instances'][0]['tags'] = ['foo:bar']
if version == 3:
self.run_check_twice(config)
# Assert metrics v3
for metric in self.GAUGE_METRICS:
self.assertMetric(self.METRIC_FORMAT.format(metric), tags=tags)
for metric in self.RATE_METRICS:
self.assertMetric(self.METRIC_FORMAT.format(metric), tags=tags)
elif version == 4:
config['instances'][0]['version'] = 4
self.run_check_twice(config)
# Assert metrics v3
for metric in self.GAUGE_METRICS + self.GAUGE_METRICS_V4:
self.assertMetric(self.METRIC_FORMAT.format(metric), tags=tags)
for metric in self.RATE_METRICS + self.RATE_METRICS_V4:
self.assertMetric(self.METRIC_FORMAT.format(metric), tags=tags)
service_check_tags = ['recursor_host:127.0.0.1', 'recursor_port:8082']
self.assertServiceCheckOK('powerdns.recursor.can_connect', tags=service_check_tags)
self.coverage_report()
def test_bad_config(self):
config = self.config.copy()
config['instances'][0]['port'] = 1111
service_check_tags = ['recursor_host:127.0.0.1', 'recursor_port:1111']
self.assertRaises(
Exception,
lambda: self.run_check(config)
)
self.assertServiceCheckCritical('powerdns.recursor.can_connect', tags=service_check_tags)
self.coverage_report()
def test_bad_api_key(self):
config = self.config.copy()
config['instances'][0]['api_key'] = 'nope'
service_check_tags = ['recursor_host:127.0.0.1', 'recursor_port:8082']
self.assertRaises(
Exception,
lambda: self.run_check(config)
)
self.assertServiceCheckCritical('powerdns.recursor.can_connect', tags=service_check_tags)
self.coverage_report()
def test_very_bad_config(self):
for config in [{}, {"host": "localhost"}, {"port": 1000}, {"host": "localhost", "port": 1000}]:
self.assertRaises(
Exception,
lambda: self.run_check({"instances": [config]})
)
self.coverage_report()
def _get_pdns_version(self):
headers = {"X-API-Key": self.config['instances'][0]['api_key']}
url = "http://{}:{}/api/v1/servers/localhost/statistics".format(self.config['instances'][0]['host'],
self.config['instances'][0]['port'])
request = requests.get(url, headers=headers)
if request.status_code == 404:
return 3
else:
return 4
| {
"repo_name": "takus/dd-agent",
"path": "tests/checks/integration/test_powerdns_recursor.py",
"copies": "1",
"size": "7547",
"license": "bsd-3-clause",
"hash": -2553422233908787000,
"line_mean": 31.8130434783,
"line_max": 108,
"alpha_frac": 0.5615476348,
"autogenerated": false,
"ratio": 3.7324431256182,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9792036869290954,
"avg_score": 0.0003907782254489297,
"num_lines": 230
} |
# 3p
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest
@attr(requires='etcd')
class CheckEtcdTest(AgentCheckTest):
CHECK_NAME = "etcd"
STORE_METRICS = [
'compareanddelete.fail',
'compareanddelete.success',
'compareandswap.fail',
'compareandswap.success',
'create.fail',
'create.success',
'delete.fail',
'delete.success',
'expire.count',
'gets.fail',
'gets.success',
'sets.fail',
'sets.success',
'update.fail',
'update.success',
'watchers',
]
def __init__(self, *args, **kwargs):
AgentCheckTest.__init__(self, *args, **kwargs)
self.config = {"instances": [{"url": "http://localhost:4001"}]}
def test_metrics(self):
self.run_check_twice(self.config)
tags = ['url:http://localhost:4001', 'etcd_state:leader']
for mname in self.STORE_METRICS:
self.assertMetric('etcd.store.%s' % mname, tags=tags, count=1)
self.assertMetric('etcd.self.send.appendrequest.count', tags=tags, count=1)
self.assertMetric('etcd.self.recv.appendrequest.count', tags=tags, count=1)
self.assertServiceCheckOK(self.check.SERVICE_CHECK_NAME,
count=1,
tags=['url:http://localhost:4001'])
self.coverage_report()
# FIXME: not really an integration test, should be pretty easy
# to spin up a cluster to test that.
def test_followers(self):
mock = {
"followers": {
"etcd-node1": {
"counts": {
"fail": 1212,
"success": 4163176
},
"latency": {
"average": 2.7206299430775007,
"current": 1.486487,
"maximum": 2018.410279,
"minimum": 1.011763,
"standardDeviation": 6.246990702203536
}
},
"etcd-node3": {
"counts": {
"fail": 1378,
"success": 4164598
},
"latency": {
"average": 2.707100125761001,
"current": 1.666258,
"maximum": 1409.054765,
"minimum": 0.998415,
"standardDeviation": 5.910089773061448
}
}
},
"leader": "etcd-node2"
}
mocks = {
'_get_leader_metrics': lambda u, t: mock
}
self.run_check_twice(self.config, mocks=mocks)
common_leader_tags = ['url:http://localhost:4001', 'etcd_state:leader']
follower_tags = [
common_leader_tags[:] + ['follower:etcd-node1'],
common_leader_tags[:] + ['follower:etcd-node3'],
]
for fol_tags in follower_tags:
self.assertMetric('etcd.leader.counts.fail', count=1, tags=fol_tags)
self.assertMetric('etcd.leader.counts.success', count=1, tags=fol_tags)
self.assertMetric('etcd.leader.latency.avg', count=1, tags=fol_tags)
self.assertMetric('etcd.leader.latency.min', count=1, tags=fol_tags)
self.assertMetric('etcd.leader.latency.max', count=1, tags=fol_tags)
self.assertMetric('etcd.leader.latency.stddev', count=1, tags=fol_tags)
self.assertMetric('etcd.leader.latency.current', count=1, tags=fol_tags)
def test_bad_config(self):
self.assertRaises(Exception,
lambda: self.run_check({"instances": [{"url": "http://localhost:4001/test"}]}))
self.assertServiceCheckCritical(self.check.SERVICE_CHECK_NAME,
count=1,
tags=['url:http://localhost:4001/test/v2/stats/self'])
self.coverage_report()
| {
"repo_name": "Shopify/dd-agent",
"path": "tests/checks/integration/test_etcd.py",
"copies": "1",
"size": "4088",
"license": "bsd-3-clause",
"hash": -4014821812009430000,
"line_mean": 34.2413793103,
"line_max": 105,
"alpha_frac": 0.5034246575,
"autogenerated": false,
"ratio": 4.063618290258449,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5067042947758449,
"avg_score": null,
"num_lines": null
} |
# 3p
import ntplib
# project
from checks import AgentCheck
from utils.ntp import get_ntp_args, set_user_ntp_settings
DEFAULT_OFFSET_THRESHOLD = 60 # in seconds
class NtpCheck(AgentCheck):
DEFAULT_MIN_COLLECTION_INTERVAL = 900 # in seconds
def check(self, instance):
service_check_msg = None
offset_threshold = instance.get('offset_threshold', DEFAULT_OFFSET_THRESHOLD)
try:
offset_threshold = int(offset_threshold)
except (TypeError, ValueError):
raise Exception('Must specify an integer value for offset_threshold. Configured value is %s' % repr(offset_threshold))
set_user_ntp_settings(dict(instance))
req_args = get_ntp_args()
self.log.debug("Using ntp host: {0}".format(req_args['host']))
try:
ntp_stats = ntplib.NTPClient().request(**req_args)
except ntplib.NTPException:
self.log.debug("Could not connect to NTP Server {0}".format(
req_args['host']))
status = AgentCheck.UNKNOWN
ntp_ts = None
else:
ntp_offset = ntp_stats.offset
# Use the ntp server's timestamp for the time of the result in
# case the agent host's clock is messed up.
ntp_ts = ntp_stats.recv_time
self.gauge('ntp.offset', ntp_offset, timestamp=ntp_ts)
if abs(ntp_offset) > offset_threshold:
status = AgentCheck.CRITICAL
service_check_msg = "Offset {0} secs higher than offset threshold ({1} secs)".format(ntp_offset, offset_threshold)
else:
status = AgentCheck.OK
self.service_check('ntp.in_sync', status, timestamp=ntp_ts, message=service_check_msg)
| {
"repo_name": "atlassian/dd-agent",
"path": "checks.d/ntp.py",
"copies": "34",
"size": "1756",
"license": "bsd-3-clause",
"hash": 7355940819758120000,
"line_mean": 34.12,
"line_max": 130,
"alpha_frac": 0.6173120729,
"autogenerated": false,
"ratio": 4.027522935779817,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000748427966774739,
"num_lines": 50
} |
# 3p
import requests
# project
from checks import AgentCheck
from util import headers
class PHPFPMCheck(AgentCheck):
"""
Tracks basic php-fpm metrics via the status module
Requires php-fpm pools to have the status option.
See http://www.php.net/manual/de/install.fpm.configuration.php#pm.status-path for more details
"""
SERVICE_CHECK_NAME = 'php_fpm.can_ping'
GAUGES = {
'listen queue': 'php_fpm.listen_queue.size',
'idle processes': 'php_fpm.processes.idle',
'active processes': 'php_fpm.processes.active',
'total processes': 'php_fpm.processes.total',
}
MONOTONIC_COUNTS = {
'accepted conn': 'php_fpm.requests.accepted',
'max children reached': 'php_fpm.processes.max_reached',
'slow requests': 'php_fpm.requests.slow',
}
def check(self, instance):
status_url = instance.get('status_url')
ping_url = instance.get('ping_url')
ping_reply = instance.get('ping_reply')
auth = None
user = instance.get('user')
password = instance.get('password')
tags = instance.get('tags', [])
if user and password:
auth = (user, password)
if status_url is None and ping_url is None:
raise Exception("No status_url or ping_url specified for this instance")
pool = None
status_exception = None
if status_url is not None:
try:
pool = self._process_status(status_url, auth, tags)
except Exception as e:
status_exception = e
pass
if ping_url is not None:
self._process_ping(ping_url, ping_reply, auth, tags, pool)
# pylint doesn't understand that we are raising this only if it's here
if status_exception is not None:
raise status_exception # pylint: disable=E0702
def _process_status(self, status_url, auth, tags):
data = {}
try:
# TODO: adding the 'full' parameter gets you per-process detailed
# informations, which could be nice to parse and output as metrics
resp = requests.get(status_url, auth=auth,
headers=headers(self.agentConfig),
params={'json': True})
resp.raise_for_status()
data = resp.json()
except Exception as e:
self.log.error("Failed to get metrics from {0}.\nError {1}".format(status_url, e))
raise
pool_name = data.get('pool', 'default')
metric_tags = tags + ["pool:{0}".format(pool_name)]
for key, mname in self.GAUGES.iteritems():
if key not in data:
self.log.warn("Gauge metric {0} is missing from FPM status".format(key))
continue
self.gauge(mname, int(data[key]), tags=metric_tags)
for key, mname in self.MONOTONIC_COUNTS.iteritems():
if key not in data:
self.log.warn("Counter metric {0} is missing from FPM status".format(key))
continue
self.monotonic_count(mname, int(data[key]), tags=metric_tags)
# return pool, to tag the service check with it if we have one
return pool_name
def _process_ping(self, ping_url, ping_reply, auth, tags, pool_name):
if ping_reply is None:
ping_reply = 'pong'
sc_tags = ["ping_url:{0}".format(ping_url)]
try:
# TODO: adding the 'full' parameter gets you per-process detailed
# informations, which could be nice to parse and output as metrics
resp = requests.get(ping_url, auth=auth,
headers=headers(self.agentConfig))
resp.raise_for_status()
if ping_reply not in resp.text:
raise Exception("Received unexpected reply to ping {0}".format(resp.text))
except Exception as e:
self.log.error("Failed to ping FPM pool {0} on URL {1}."
"\nError {2}".format(pool_name, ping_url, e))
self.service_check(self.SERVICE_CHECK_NAME,
AgentCheck.CRITICAL, tags=sc_tags, message=str(e))
else:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=sc_tags)
| {
"repo_name": "darron/dd-agent",
"path": "checks.d/php_fpm.py",
"copies": "34",
"size": "4348",
"license": "bsd-3-clause",
"hash": 5440082537980633000,
"line_mean": 35.5378151261,
"line_max": 98,
"alpha_frac": 0.5777368905,
"autogenerated": false,
"ratio": 3.97804208600183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# 3p
import simplejson as json
# datadog
from datadog import api
from datadog.dogshell.common import report_errors, report_warnings
class DowntimeClient(object):
@classmethod
def setup_parser(cls, subparsers):
parser = subparsers.add_parser('downtime', help="Create, edit, and delete downtimes")
parser.add_argument('--string_ids', action='store_true', dest='string_ids',
help="Represent downtime IDs as strings instead of ints in JSON")
verb_parsers = parser.add_subparsers(title='Verbs')
post_parser = verb_parsers.add_parser('post', help="Create a downtime")
post_parser.add_argument('scope', help="scope to apply downtime to")
post_parser.add_argument('start', help="POSIX timestamp to start the downtime",
default=None)
post_parser.add_argument('--end', help="POSIX timestamp to end the downtime", default=None)
post_parser.add_argument('--message', help="message to include with notifications"
" for this downtime", default=None)
post_parser.set_defaults(func=cls._schedule_downtime)
update_parser = verb_parsers.add_parser('update', help="Update existing downtime")
update_parser.add_argument('downtime_id', help="downtime to replace"
" with the new definition")
update_parser.add_argument('--scope', help="scope to apply downtime to")
update_parser.add_argument('--start', help="POSIX timestamp to start"
" the downtime", default=None)
update_parser.add_argument('--end', help="POSIX timestamp to"
" end the downtime", default=None)
update_parser.add_argument('--message', help="message to include with notifications"
" for this downtime", default=None)
update_parser.set_defaults(func=cls._update_downtime)
show_parser = verb_parsers.add_parser('show', help="Show a downtime definition")
show_parser.add_argument('downtime_id', help="downtime to show")
show_parser.set_defaults(func=cls._show_downtime)
show_all_parser = verb_parsers.add_parser('show_all', help="Show a list of all downtimes")
show_all_parser.add_argument('--current_only', help="only return downtimes that"
" are active when the request is made", default=None)
show_all_parser.set_defaults(func=cls._show_all_downtime)
delete_parser = verb_parsers.add_parser('delete', help="Delete a downtime")
delete_parser.add_argument('downtime_id', help="downtime to delete")
delete_parser.set_defaults(func=cls._cancel_downtime)
@classmethod
def _schedule_downtime(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Downtime.create(scope=args.scope, start=args.start,
end=args.end, message=args.message)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
@classmethod
def _update_downtime(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Downtime.update(args.downtime_id, scope=args.scope, start=args.start,
end=args.end, message=args.message)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
@classmethod
def _cancel_downtime(cls, args):
api._timeout = args.timeout
res = api.Downtime.delete(args.downtime_id)
if res is not None:
report_warnings(res)
report_errors(res)
@classmethod
def _show_downtime(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Downtime.get(args.downtime_id)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
@classmethod
def _show_all_downtime(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Downtime.get_all(current_only=args.current_only)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
| {
"repo_name": "KyleJamesWalker/datadogpy",
"path": "datadog/dogshell/downtime.py",
"copies": "2",
"size": "4622",
"license": "bsd-3-clause",
"hash": 969029727424984700,
"line_mean": 41.7962962963,
"line_max": 99,
"alpha_frac": 0.6010385115,
"autogenerated": false,
"ratio": 4.068661971830986,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5669700483330986,
"avg_score": null,
"num_lines": null
} |
# 3p
import simplejson as json
# datadog
from datadog import api
from datadog.dogshell.common import report_errors, report_warnings
class HostClient(object):
@classmethod
def setup_parser(cls, subparsers):
parser = subparsers.add_parser('host', help='Mute, unmute hosts')
verb_parsers = parser.add_subparsers(title='Verbs')
mute_parser = verb_parsers.add_parser('mute', help='Mute a host')
mute_parser.add_argument('host_name', help='host to mute')
mute_parser.add_argument('--end', help="POSIX timestamp, if omitted,"
" host will be muted until explicitly unmuted", default=None)
mute_parser.add_argument('--message', help="string to associate with the"
" muting of this host", default=None)
mute_parser.add_argument('--override', help="true/false, if true and the host is already"
" muted, will overwrite existing end on the host",
action='store_true')
mute_parser.set_defaults(func=cls._mute)
unmute_parser = verb_parsers.add_parser('unmute', help='Unmute a host')
unmute_parser.add_argument('host_name', help='host to mute')
unmute_parser.set_defaults(func=cls._unmute)
@classmethod
def _mute(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Host.mute(args.host_name, end=args.end, message=args.message,
override=args.override)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
@classmethod
def _unmute(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Host.unmute(args.host_name)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
| {
"repo_name": "KyleJamesWalker/datadogpy",
"path": "datadog/dogshell/host.py",
"copies": "2",
"size": "2059",
"license": "bsd-3-clause",
"hash": -3811913104783247400,
"line_mean": 37.8490566038,
"line_max": 97,
"alpha_frac": 0.5910636231,
"autogenerated": false,
"ratio": 3.921904761904762,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5512968385004762,
"avg_score": null,
"num_lines": null
} |
# 3p
import simplejson as json
# datadog
from datadog import api
from datadog.dogshell.common import report_errors, report_warnings
class MonitorClient(object):
@classmethod
def setup_parser(cls, subparsers):
parser = subparsers.add_parser('monitor', help="Create, edit, and delete monitors")
parser.add_argument('--string_ids', action='store_true', dest='string_ids',
help="Represent monitor IDs as strings instead of ints in JSON")
verb_parsers = parser.add_subparsers(title='Verbs')
post_parser = verb_parsers.add_parser('post', help="Create a monitor")
post_parser.add_argument('type', help="type of the monitor, e.g."
"'metric alert' 'service check'")
post_parser.add_argument('query', help="query to notify on with syntax varying "
"depending on what type of monitor you are creating")
post_parser.add_argument('--name', help="name of the alert", default=None)
post_parser.add_argument('--message', help="message to include with notifications"
" for this monitor", default=None)
post_parser.add_argument('--options', help="json options for the monitor", default=None)
post_parser.set_defaults(func=cls._post)
update_parser = verb_parsers.add_parser('update', help="Update existing monitor")
update_parser.add_argument('monitor_id', help="monitor to replace with the new definition")
update_parser.add_argument('type', help="type of the monitor, e.g. "
"'metric alert' 'service check'")
update_parser.add_argument('query', help="query to notify on with syntax varying"
" depending on what type of monitor you are creating")
update_parser.add_argument('--name', help="name of the alert", default=None)
update_parser.add_argument('--message', help="message to include with "
"notifications for this monitor", default=None)
update_parser.add_argument('--options', help="json options for the monitor", default=None)
update_parser.set_defaults(func=cls._update)
show_parser = verb_parsers.add_parser('show', help="Show a monitor definition")
show_parser.add_argument('monitor_id', help="monitor to show")
show_parser.set_defaults(func=cls._show)
show_all_parser = verb_parsers.add_parser('show_all', help="Show a list of all monitors")
show_all_parser.set_defaults(func=cls._show_all)
delete_parser = verb_parsers.add_parser('delete', help="Delete a monitor")
delete_parser.add_argument('monitor_id', help="monitor to delete")
delete_parser.set_defaults(func=cls._delete)
mute_all_parser = verb_parsers.add_parser('mute_all', help="Globally mute "
"monitors (downtime over *)")
mute_all_parser.set_defaults(func=cls._mute_all)
unmute_all_parser = verb_parsers.add_parser('unmute_all', help="Globally unmute "
"monitors (cancel downtime over *)")
unmute_all_parser.set_defaults(func=cls._unmute_all)
mute_parser = verb_parsers.add_parser('mute', help="Mute a monitor")
mute_parser.add_argument('monitor_id', help="monitor to mute")
mute_parser.add_argument('--scope', help="scope to apply the mute to,"
" e.g. role:db (optional)", default=[])
mute_parser.add_argument('--end', help="POSIX timestamp for when"
" the mute should end (optional)", default=None)
mute_parser.set_defaults(func=cls._mute)
unmute_parser = verb_parsers.add_parser('unmute', help="Unmute a monitor")
unmute_parser.add_argument('monitor_id', help="monitor to unmute")
unmute_parser.add_argument('--scope', help="scope to unmute (must be muted), "
"e.g. role:db", default=[])
unmute_parser.add_argument('--all_scopes', help="clear muting across all scopes",
action='store_true')
unmute_parser.set_defaults(func=cls._unmute)
@classmethod
def _post(cls, args):
api._timeout = args.timeout
format = args.format
options = None
if args.options is not None:
try:
options = json.loads(args.options)
except:
raise Exception('bad json parameter')
res = api.Monitor.create(type=args.type, query=args.query, name=args.name,
message=args.message, options=options)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
@classmethod
def _update(cls, args):
api._timeout = args.timeout
format = args.format
options = None
if args.options is not None:
try:
options = json.loads(args.options)
except:
raise Exception('bad json parameter')
res = api.Monitor.update(args.monitor_id, type=args.type, query=args.query,
name=args.name, message=args.message, options=options)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
@classmethod
def _show(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Monitor.get(args.monitor_id)
report_warnings(res)
report_errors(res)
if args.string_ids:
res["id"] = str(res["id"])
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
@classmethod
def _show_all(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Monitor.get_all()
report_warnings(res)
report_errors(res)
if args.string_ids:
for d in res:
d["id"] = str(d["id"])
if format == 'pretty':
print(cls._pretty_json(res))
elif format == 'raw':
print(json.dumps(res))
else:
for d in res:
print("\t".join([(str(d["id"])),
(cls._escape(d["message"])),
(cls._escape(d["name"])),
(str(d["options"])),
(str(d["org_id"])),
(d["query"]),
(d["type"])]))
@classmethod
def _delete(cls, args):
api._timeout = args.timeout
# TODO CHECK
res = api.Monitor.delete(args.monitor_id)
if res is not None:
report_warnings(res)
report_errors(res)
@classmethod
def _escape(cls, s):
return s.replace("\r", "\\r").replace("\n", "\\n").replace("\t", "\\t")
@classmethod
def _mute_all(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Monitor.mute_all()
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
@classmethod
def _unmute_all(cls, args):
api._timeout = args.timeout
res = api.Monitor.unmute_all()
if res is not None:
report_warnings(res)
report_errors(res)
@classmethod
def _mute(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Monitor.mute(args.monitor_id, scope=args.scope, end=args.end)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
@classmethod
def _unmute(cls, args):
api._timeout = args.timeout
res = api.Monitor.unmute(args.monitor_id, scope=args.scope, all_scopes=args.all_scopes)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
| {
"repo_name": "KyleJamesWalker/datadogpy",
"path": "datadog/dogshell/monitor.py",
"copies": "2",
"size": "8495",
"license": "bsd-3-clause",
"hash": -6256791372625888000,
"line_mean": 39.4523809524,
"line_max": 99,
"alpha_frac": 0.5552678046,
"autogenerated": false,
"ratio": 4.113801452784504,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020347648698804035,
"num_lines": 210
} |
# 3p
import simplejson as json
# datadog
from datadog import api
from datadog.dogshell.common import report_errors, report_warnings
class ServiceCheckClient(object):
@classmethod
def setup_parser(cls, subparsers):
parser = subparsers.add_parser('service_check', help="Perform service checks")
verb_parsers = parser.add_subparsers(title='Verbs')
check_parser = verb_parsers.add_parser('check', help="text for the message")
check_parser.add_argument('check', help="text for the message")
check_parser.add_argument('host_name', help="name of the host submitting the check")
check_parser.add_argument('status', help="integer for the status of the check."
" i.e: '0': OK, '1': WARNING, '2': CRITICAL, '3': UNKNOWN")
check_parser.add_argument('--timestamp', help="POSIX timestamp of the event", default=None)
check_parser.add_argument('--message', help="description of why this status occurred",
default=None)
check_parser.add_argument('--tags', help="comma separated list of tags", default=None)
check_parser.set_defaults(func=cls._check)
@classmethod
def _check(cls, args):
api._timeout = args.timeout
format = args.format
res = api.ServiceCheck.check(
check=args.check, host_name=args.host_name, status=int(args.status),
timestamp=args.timestamp, message=args.message, tags=args.tags)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print(cls._pretty_json(res))
else:
print(json.dumps(res))
| {
"repo_name": "clokep/datadogpy",
"path": "datadog/dogshell/service_check.py",
"copies": "2",
"size": "1676",
"license": "bsd-3-clause",
"hash": -6701624930970765000,
"line_mean": 43.1052631579,
"line_max": 99,
"alpha_frac": 0.63424821,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.563424821,
"avg_score": null,
"num_lines": null
} |
# 3p
import simplejson as json
# datadog
from datadog import api
from datadog.dogshell.common import report_errors, report_warnings
class TagClient(object):
@classmethod
def setup_parser(cls, subparsers):
parser = subparsers.add_parser('tag', help="View and modify host tags.")
verb_parsers = parser.add_subparsers(title='Verbs')
add_parser = verb_parsers.add_parser('add', help="Add a host to one or more tags.",
description='Hosts can be specified by name or id.')
add_parser.add_argument('host', help="host to add")
add_parser.add_argument('tag', help="tag to add host to (one or more, space separated)",
nargs='+')
add_parser.set_defaults(func=cls._add)
replace_parser = verb_parsers.add_parser(
'replace', help="Replace all tags with one or more new tags.",
description='Hosts can be specified by name or id.')
replace_parser.add_argument('host', help="host to modify")
replace_parser.add_argument('tag', help="list of tags to add host to", nargs='+')
replace_parser.set_defaults(func=cls._replace)
show_parser = verb_parsers.add_parser('show', help="Show host tags.",
description='Hosts can be specified by name or id.')
show_parser.add_argument('host', help="host to show (or 'all' to show all tags)")
show_parser.set_defaults(func=cls._show)
detach_parser = verb_parsers.add_parser('detach', help="Remove a host from all tags.",
description='Hosts can be specified by name or id.')
detach_parser.add_argument('host', help="host to detach")
detach_parser.set_defaults(func=cls._detach)
@classmethod
def _add(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Tag.create(args.host, tags=args.tag)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print("Tags for '%s':" % res['host'])
for c in res['tags']:
print(' ' + c)
elif format == 'raw':
print(json.dumps(res))
else:
for c in res['tags']:
print(c)
@classmethod
def _replace(cls, args):
api._timeout = args.timeout
format = args.format
res = api.Tag.update(args.host, tags=args.tag)
report_warnings(res)
report_errors(res)
if format == 'pretty':
print("Tags for '%s':" % res['host'])
for c in res['tags']:
print(' ' + c)
elif format == 'raw':
print(json.dumps(res))
else:
for c in res['tags']:
print(c)
@classmethod
def _show(cls, args):
api._timeout = args.timeout
format = args.format
if args.host == 'all':
res = api.Tag.get_all()
else:
res = api.Tag.get(args.host)
report_warnings(res)
report_errors(res)
if args.host == 'all':
if format == 'pretty':
for tag, hosts in list(res['tags'].items()):
for host in hosts:
print(tag)
print(' ' + host)
print()
elif format == 'raw':
print(json.dumps(res))
else:
for tag, hosts in list(res['tags'].items()):
for host in hosts:
print(tag + '\t' + host)
else:
if format == 'pretty':
for tag in res['tags']:
print(tag)
elif format == 'raw':
print(json.dumps(res))
else:
for tag in res['tags']:
print(tag)
@classmethod
def _detach(cls, args):
api._timeout = args.timeout
res = api.Tag.delete(args.host)
if res is not None:
report_warnings(res)
report_errors(res)
| {
"repo_name": "clokep/datadogpy",
"path": "datadog/dogshell/tag.py",
"copies": "3",
"size": "4139",
"license": "bsd-3-clause",
"hash": -3754514619107495400,
"line_mean": 35.6283185841,
"line_max": 100,
"alpha_frac": 0.5136506403,
"autogenerated": false,
"ratio": 4.24077868852459,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.625442932882459,
"avg_score": null,
"num_lines": null
} |
# 3rd party imports
from bread.bread import LabelValueReadView
import django_filters
from django.utils.translation import ugettext_lazy as _
# This project imports
from .forms import RegistrationPeriodAddForm
from .models import Election, Ballot, Candidate, RegistrationPeriod
from libya_elections.libya_bread import PaginatedBrowseView, SoftDeleteBread, StaffBreadMixin
from libya_elections.utils import LoginPermissionRequiredMixin, \
get_verbose_name
from register.models import SubConstituency
from register.views import AllNamedThingsFilter
class ElectionBrowse(PaginatedBrowseView):
columns = [
(_('Start'), 'formatted_polling_start_time', 'polling_start_time'),
(_('Name (en)'), 'name_english'),
(_('Name (ar)'), 'name_arabic'),
]
search_fields = ['name_english', 'name_arabic']
search_terms = _('Election name in English or Arabic')
class ElectionReadView(LoginPermissionRequiredMixin, LabelValueReadView):
permission_required = "voting.read_election"
fields = ((None, 'name_english'),
(None, 'name_arabic'),
(get_verbose_name(Election, 'polling_start_time'), 'formatted_polling_start_time'),
(get_verbose_name(Election, 'polling_end_time'), 'formatted_polling_end_time'),
(get_verbose_name(Election, 'creation_date'), 'formatted_creation_date'),
(get_verbose_name(Election, 'modification_date'), 'formatted_modification_date'),
)
class ElectionBread(StaffBreadMixin, SoftDeleteBread):
browse_view = ElectionBrowse
read_view = ElectionReadView
model = Election
class BallotFilterSet(django_filters.FilterSet):
subconstituencies = AllNamedThingsFilter(filter_by_model=SubConstituency)
class Meta:
model = Ballot
fields = ['election', 'subconstituencies', 'ballot_type']
class BallotBrowse(PaginatedBrowseView):
filterset = BallotFilterSet
class BallotReadView(LabelValueReadView):
fields = ((get_verbose_name(Ballot, 'election'), 'election_as_html'),
(get_verbose_name(Ballot, 'subconstituencies'), 'subconstituencies_as_html'),
(None, 'internal_ballot_number'),
(get_verbose_name(Ballot, 'ballot_type'), 'get_ballot_type_display'),
(None, 'num_seats'),
(get_verbose_name(Ballot, 'creation_date'), 'formatted_creation_date'),
(get_verbose_name(Ballot, 'modification_date'), 'formatted_modification_date'),
)
class BallotBread(StaffBreadMixin, SoftDeleteBread):
browse_view = BallotBrowse
read_view = BallotReadView
model = Ballot
class CandidateFilterSet(django_filters.FilterSet):
ballot__subconstituencies = AllNamedThingsFilter(filter_by_model=SubConstituency)
class Meta:
model = Candidate
fields = ['ballot', 'ballot__subconstituencies']
class CandidateBrowse(PaginatedBrowseView):
columns = [
(_('Ballot'), 'ballot__name'),
(_('Number'), 'candidate_number'),
(_('Name (en)'), 'name_english'),
(_('Name (ar)'), 'name_arabic'),
]
filterset = CandidateFilterSet
search_fields = ['name_english', 'name_arabic']
search_terms = _('Candidate name in English or Arabic')
class CandidateRead(LabelValueReadView):
fields = ((None, 'name_arabic'),
(None, 'name_english'),
(None, 'candidate_number'),
(get_verbose_name(Candidate, 'ballot'), 'ballot_as_html'),
(get_verbose_name(Candidate, 'creation_date'), 'formatted_creation_date'),
(get_verbose_name(Candidate, 'modification_date'), 'formatted_modification_date'),
)
class CandidateBread(StaffBreadMixin, SoftDeleteBread):
browse_view = CandidateBrowse
read_view = CandidateRead
model = Candidate
views = 'BREAD'
class RegistrationPeriodBrowse(PaginatedBrowseView):
columns = (
(get_verbose_name(RegistrationPeriod, 'start_time'), 'formatted_start_time', 'start_time'),
(get_verbose_name(RegistrationPeriod, 'end_time'), 'formatted_end_time', 'end_time'),
)
class RegistrationPeriodReadView(LabelValueReadView):
fields = (
(get_verbose_name(RegistrationPeriod, 'start_time'), 'formatted_start_time'),
(get_verbose_name(RegistrationPeriod, 'end_time'), 'formatted_end_time'),
(get_verbose_name(RegistrationPeriod, 'creation_date'), 'formatted_creation_date'),
(get_verbose_name(RegistrationPeriod, 'modification_date'), 'formatted_modification_date'),
)
class RegistrationPeriodBread(StaffBreadMixin, SoftDeleteBread):
browse_view = RegistrationPeriodBrowse
# Override default so that start/end timestamps are not split into date/time
# for display (unlike Edit)
read_view = RegistrationPeriodReadView
model = RegistrationPeriod
form_class = RegistrationPeriodAddForm
| {
"repo_name": "SmartElect/SmartElect",
"path": "voting/views.py",
"copies": "1",
"size": "4910",
"license": "apache-2.0",
"hash": -7403713392118187000,
"line_mean": 36.4809160305,
"line_max": 99,
"alpha_frac": 0.6812627291,
"autogenerated": false,
"ratio": 3.7798306389530407,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9953110496915928,
"avg_score": 0.0015965742274225594,
"num_lines": 131
} |
# 3rd party imports
from django.db import connection
# Project imports
from register.models import Office, SubConstituency
from .utils import dictfetchall
def _get_registration_centers(must_allow_registrations=True):
"""This returns a dict populated by data from the table register_registrationcenter. The
dict is keyed by center_id; values are 4-tuples:
(office_id, center_type, subconstituency_id, center id of copied center (or None))
e.g. -- 11001: (2, 1, 48, None)
The returned dict represents non-deleted centers, possibly filtered by whether or not
they support registrations. A copy center will be reported even if its original center
doesn't support registrations or is deleted.
must_allow_registrations: Whether or not the centers allow registrations
"""
# In the query, "center" refers to any active, non-deleted center, which may or may
# not be a copy center; when "center" is a copy center, "original" refers to the
# center which it is a copy of. (original.center_id will be null if center is not
# a copy center.)
inserted_clause = ' center.reg_open = true AND ' if must_allow_registrations else ''
sql = """SELECT
center.center_id, center.center_type, center.office_id,
center.subconstituency_id, original.center_id as original_center_id
FROM
(SELECT * FROM register_registrationcenter c) AS center
LEFT JOIN
(SELECT id, center_id FROM register_registrationcenter o) AS original
ON center.copy_of_id = original.id
WHERE %s center.deleted = false""" % inserted_clause
cursor = connection.cursor()
cursor.execute(sql)
rows = dictfetchall(cursor, date_time_columns=())
d = {
row['center_id']: (
row['office_id'],
row['center_type'],
row['subconstituency_id'],
row['original_center_id']
)
for row in rows
}
return d
def get_active_registration_locations():
"""Return all centers which are valid for registration (i.e., marked active).
See _get_registration_centers() above for further details.)"""
return _get_registration_centers(must_allow_registrations=True)
def get_all_polling_locations():
"""Return all centers, whether or not polling is planned at the center for
a particular election.
See _get_registration_centers() above for further details.)"""
return _get_registration_centers(must_allow_registrations=False)
def get_offices():
return [{'arabic_name': o.name_arabic,
'english_name': o.name_english,
'code': o.id} for o in Office.objects.all()]
def get_subconstituencies():
return [{'arabic_name': s.name_arabic,
'english_name': s.name_english,
'code': s.id}
for s in SubConstituency.objects.all()]
| {
"repo_name": "SmartElect/SmartElect",
"path": "reporting_api/data_pull_common.py",
"copies": "1",
"size": "2918",
"license": "apache-2.0",
"hash": -1216394739287062300,
"line_mean": 36.8961038961,
"line_max": 92,
"alpha_frac": 0.6579849212,
"autogenerated": false,
"ratio": 3.9972602739726026,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5155245195172602,
"avg_score": null,
"num_lines": null
} |
# 3rd Party Imports
from flask import request, Response, make_response
from flask import jsonify
from functools import wraps
import logging
# Local Imports
from blockbuster import app
import bb_auditlogger
from blockbuster import bb_request_processor
from blockbuster import bb_api_request_processor
from blockbuster import bb_security
# Set up auditor
bb_auditlogger.BBAuditLoggerFactory().create().logAudit('app', 'STARTUP', 'Application Startup')
logger = logging.getLogger(__name__)
def add_response_headers(headers=None):
"""This decorator adds the headers passed in to the response"""
if headers is None:
headers = {}
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
resp = make_response(f(*args, **kwargs))
h = resp.headers
for header, value in headers.items():
h[header] = value
return resp
return decorated_function
return decorator
def allow_cors(f):
"""This decorator passes X-Robots-Tag: noindex"""
@wraps(f)
@add_response_headers({'Access-Control-Allow-Origin': '*'})
def decorated_function(*args, **kwargs):
return f(*args, **kwargs)
return decorated_function
# Following methods provide the endpoint authentication.
# Authentication is applied to an endpoint by decorating the route with @requires_auth
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if auth:
print(str.format("API User: {0}", auth.username))
if not auth or not check_auth(auth.username, auth.password):
return user_must_authenticate()
return f(*args, **kwargs)
return decorated
def check_auth(username, password):
successful = bb_security.credentials_are_valid(username, password)
print(str.format("Authentication Successful: {0}", successful))
return successful
def user_must_authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
# Routes
# The /status endpoint is not secured as it does not return any data other than service status
@app.route("/status/", methods=['GET'])
def get_status():
status = bb_api_request_processor.APIRequestProcessor().service_status_get()
return status
@app.route("/api/v1.0/InboundSMS/", methods=['POST'])
@requires_auth
def post_inboundsms():
bb_auditlogger.BBAuditLoggerFactory().create().logAudit('app', 'POST_INBOUNDSMS', request.form['Body'])
return bb_request_processor.process_twilio_request(request)
# API Routes
@app.route("/api/v1.0/stats/", methods=['GET'])
@requires_auth
@allow_cors
def get_stats():
result = bb_api_request_processor.APIRequestProcessor()\
.service_stats_get()
bb_auditlogger.BBAuditLoggerFactory().create().logAudit('app', 'GET_STATS', str(result))
return jsonify(stats=result)
@app.route("/api/v1.0/cars/", methods=['GET'])
@requires_auth
@allow_cors
def uri_get_cars():
result = bb_api_request_processor.APIRequestProcessor()\
.cars_getall()
return jsonify(cars=result)
@app.route("/api/v1.0/cars/<registration>", methods=['GET'])
@requires_auth
@allow_cors
def uri_get_registrations(registration):
result = bb_api_request_processor.APIRequestProcessor().cars_get(registration)
return jsonify(result)
@app.route("/api/v1.0/blocks/", methods=['GET'])
@requires_auth
def uri_get_blocksall():
result = bb_api_request_processor.APIRequestProcessor().blocks_getall()
return jsonify(blocks=result)
@app.route("/api/v1.0/status/<requestermobile>", methods=['GET'])
@requires_auth
def uri_get_status(requestermobile):
return jsonify(bb_api_request_processor.APIRequestProcessor().status_get(requestermobile))
@app.route("/api/v1.0/smslogs/", methods=['GET'])
@requires_auth
@allow_cors
def uri_get_smslogs():
result = bb_api_request_processor.APIRequestProcessor().smslogs_get()
return jsonify(logs=result)
@app.route("/api/v1.0/logs/", methods=['GET'])
@requires_auth
@allow_cors
def uri_get_logs():
result = bb_api_request_processor.APIRequestProcessor().logs_get()
return jsonify(logs=result)
# Routes that I haven't finished yet...
@app.route("/api/v1.0/blocks", methods=['POST'])
@requires_auth
def uri_post_blocks():
content = request.get_json()
block = content['block']
blocker_reg = block['blocker_reg']
blocked_reg = block['blocked_reg']
response = blocker_reg + " has blocked in " + blocked_reg
return response, 200
| {
"repo_name": "mattstibbs/blockbuster-server",
"path": "blockbuster/bb_routes.py",
"copies": "1",
"size": "4716",
"license": "mit",
"hash": 4013575952395925500,
"line_mean": 29.4258064516,
"line_max": 107,
"alpha_frac": 0.6916878711,
"autogenerated": false,
"ratio": 3.6276923076923078,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4819380178792308,
"avg_score": null,
"num_lines": null
} |
# 3rd party imports
from model_bakery import baker
# Django imports
from django import test
from django.core.exceptions import ValidationError
# Devilry imports
from devilry.devilry_qualifiesforexam import models as status_models
class TestStatus(test.TestCase):
def test_notready_no_message(self):
test_status = baker.make('devilry_qualifiesforexam.Status', status=status_models.Status.NOTREADY)
with self.assertRaisesMessage(ValidationError, 'Message can not be empty when status is ``notready``.'):
test_status.full_clean()
def test_ready_no_message_and_no_plugin(self):
test_status = baker.make('devilry_qualifiesforexam.Status', status=status_models.Status.READY)
with self.assertRaisesMessage(ValidationError, 'A ``message`` is required when no ``plugin`` is specified. '
'The message should explain why a plugin is not used.'):
test_status.full_clean()
def test_notready_no_plugin(self):
test_status = baker.make('devilry_qualifiesforexam.Status', status=status_models.Status.NOTREADY,
message='No plugin', plugin='some.plugin')
with self.assertRaisesMessage(ValidationError, '``plugin`` is not allowed when status is ``notready``.'):
test_status.full_clean()
def test_get_current_status_no_status_for_period(self):
testperiod = baker.make_recipe('devilry.apps.core.period_active')
last_status = status_models.Status.objects.get_last_status_in_period(period=testperiod)
self.assertIsNone(last_status)
def test_get_current_status(self):
testperiod = baker.make_recipe('devilry.apps.core.period_active')
baker.make('devilry_qualifiesforexam.Status',
period=testperiod,
status=status_models.Status.READY,
plugin='plugin')
last_status = baker.make('devilry_qualifiesforexam.Status',
period=testperiod,
status=status_models.Status.READY,
plugin='plugin')
current_status = status_models.Status.objects.get_last_status_in_period(period=testperiod)
self.assertEqual(current_status, last_status)
def test_get_qualified_students(self):
testperiod = baker.make_recipe('devilry.apps.core.period_active')
teststatus = baker.make('devilry_qualifiesforexam.Status',
period=testperiod,
status=status_models.Status.READY,
plugin='plugin')
baker.make('devilry_qualifiesforexam.QualifiesForFinalExam',
status=teststatus,
qualifies=True,
_quantity=10)
baker.make('devilry_qualifiesforexam.QualifiesForFinalExam',
status=teststatus,
qualifies=False,
_quantity=10)
self.assertEqual(10, len(teststatus.get_qualified_students()))
| {
"repo_name": "devilry/devilry-django",
"path": "devilry/devilry_qualifiesforexam/tests/test_models.py",
"copies": "1",
"size": "3089",
"license": "bsd-3-clause",
"hash": 3275388277236497400,
"line_mean": 48.0317460317,
"line_max": 116,
"alpha_frac": 0.6254451279,
"autogenerated": false,
"ratio": 4.102257636122178,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.001336371531465422,
"num_lines": 63
} |
# 3rd party imports
from reportlab.platypus import Image, Paragraph, PageBreak, Table, Spacer
from reportlab.lib.units import cm
from reportlab.lib.pagesizes import A4
# Django imports
from django.conf import settings
# Project imports
from .arabic_reshaper import reshape
from .pdf_canvas import NumberedCanvas, getArabicStyle, getHeaderStyle, getTableStyle, \
get_hnec_logo_fname, drawHnecLogo
from .strings import STRINGS
from .utils import chunker, format_name, CountingDocTemplate, build_copy_info, \
truncate_center_name, out_of_disk_space_handler_context
from libya_elections.constants import MALE, FEMALE
def generate_pdf(filename, center, voter_roll, gender, center_book=False):
# filename: the file to which the PDF will be written
# center: a data_pull.Center instance
# voter_roll: list of registration dicts --
# {national_id, first_name, father_name, grandfather_name, family_name, gender}
# gender: one of the MALE/FEMALE constants. UNISEX is not valid.
# center_book: ???
#
# separates by gender code using one of the constants in utils.Gender
# sorts by name fields in query
# assembles display string from parts
# writes to filename
#
# returns number of pages in the PDF
if gender not in (MALE, FEMALE):
raise ValueError("generate_pdf() gender must be MALE or FEMALE")
# set styles
styles = getArabicStyle()
# get strings
mf_string = STRINGS['female'] if (gender == FEMALE) else STRINGS['male']
cover_string = STRINGS['center_book_cover'] if center_book else STRINGS['center_list_cover']
header_string = STRINGS['center_book_header'] if center_book else STRINGS['center_list_header']
# cover page
center_name = reshape(center.name)
template = '%s: %s / %s'
subconstituency_name = reshape(center.subconstituency.name_arabic)
params = (STRINGS['subconstituency_name'], center.subconstituency.id, subconstituency_name)
subconstituency = template % params
center_info = {
'gender': '%s: %s' % (STRINGS['gender'], mf_string),
'number': '%s: %d' % (STRINGS['center_number'], center.center_id),
'name': '%s: %s' % (STRINGS['center_name'], center_name),
'name_trunc': '%s: %s' % (STRINGS['center_name'], truncate_center_name(center_name)),
'subconstituency': subconstituency,
'copy_info': build_copy_info(center),
}
# create document
doc = CountingDocTemplate(filename, pagesize=A4, topMargin=1 * cm, bottomMargin=1 * cm,
leftMargin=1.5 * cm, rightMargin=2.54 * cm)
# elements, cover page first
with open(get_hnec_logo_fname(), 'rb') as hnec_f:
elements = [
Image(hnec_f, width=10 * cm, height=2.55 * cm),
Spacer(48, 48),
Paragraph(cover_string, styles['Title']),
Spacer(18, 18),
Paragraph(center_info['gender'], styles['CoverInfo-Bold']),
Paragraph(center_info['number'], styles['CoverInfo']),
Paragraph(center_info['name'], styles['CoverInfo']),
Paragraph(center_info['copy_info'], styles['CoverInfo']),
Paragraph(center_info['subconstituency'], styles['CoverInfo']),
PageBreak(),
]
# Focus on one specific gender.
voter_roll = [voter for voter in voter_roll if voter.gender == gender]
# We wrap the page header in a table because we want the header's gray background to extend
# margin-to-margin and that's easy to do with a table + background color. It's probably
# possible with Paragraphs alone, but I'm too lazy^w busy to figure out how.
# It's necessary to wrap the table cell text in Paragraphs to ensure the base text direction
# is RTL. See https://github.com/hnec-vr/libya-elections/issues/1197
para_prefix = Paragraph(STRINGS['center_header_prefix'], styles['InnerPageHeader'])
para_header = Paragraph(header_string, styles['InnerPageHeader'])
page_header = Table([[para_prefix], [para_header]], 15 * cm, [16, 24])
page_header.setStyle(getHeaderStyle())
n_pages = 0
for page in chunker(voter_roll, settings.ROLLGEN_REGISTRATIONS_PER_PAGE_REGISTRATION):
n_pages += 1
elements.append(page_header)
elements += [Paragraph(center_info['gender'], styles['CenterInfo-Bold']),
Paragraph(center_info['number'], styles['CenterInfo']),
Paragraph(center_info['name_trunc'], styles['CenterInfo']),
]
elements.append(Spacer(10, 10))
# The contents of each table cell are wrapped in a Paragraph to set the base text
# direction.
# See https://github.com/hnec-vr/libya-elections/issues/1197
data = [[Paragraph(reshape(format_name(voter)), styles['TableCell'])] for voter in page]
# Insert header before the data.
data.insert(0, [Paragraph(STRINGS['the_names'], styles['TableCell'])])
table = Table(data, 15 * cm, 0.825 * cm)
table.setStyle(getTableStyle())
elements.append(table)
elements.append(Paragraph(mf_string, styles['PageBottom']))
elements.append(PageBreak())
if not n_pages:
# When there are no pages (==> no registrants for this gender), we need to emit a page
# that states that.
elements.append(page_header)
key = 'no_male_registrants' if gender == MALE else 'no_female_registrants'
elements.append(Paragraph(STRINGS[key], styles['BlankPageNotice']))
with out_of_disk_space_handler_context():
doc.build(elements, canvasmaker=NumberedCanvas, onLaterPages=drawHnecLogo)
return doc.n_pages
| {
"repo_name": "SmartElect/SmartElect",
"path": "rollgen/generate_pdf.py",
"copies": "1",
"size": "5841",
"license": "apache-2.0",
"hash": -202000613873670820,
"line_mean": 44.6328125,
"line_max": 100,
"alpha_frac": 0.6413285396,
"autogenerated": false,
"ratio": 3.7514450867052025,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9882921937744034,
"avg_score": 0.00197033771223368,
"num_lines": 128
} |
# 3rd party modules
from django.core.urlresolvers import reverse
from django.utils import timezone
from rest_framework.test import APITestCase
from rest_framework import status
# Own
from pjfeedreader.models import Category, Feed
from .factories import CategoryFactory, FeedFactory
class CategoryAPITest(APITestCase):
def test_category_api_returns_existing_items(self):
"""
Test that the API returns existing categories
At the same time test that API exists
"""
# First create factories
CategoryFactory()
CategoryFactory()
# Get list
response = self.client.get(reverse('category-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2,
'There should be two categories returned')
def test_category_can_be_created(self):
"""
Tests that category can be creted using POST-request
"""
# Create category
data = {'name': 'Category 1', 'slug': 'Slug1'}
response = self.client.post(reverse('category-list'),
data, format='json')
# Was the category created successfully
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Category.objects.count(), 1,
'Should be one category in database')
self.assertEqual(Category.objects.get().name, 'Category 1')
def test_category_returns_error_if_fields_are_missing(self):
pass
def test_category_returns_error_if_field_values_are_invalid(self):
pass
class FeedAPITest(APITestCase):
def test_feed_api_returns_existing_items(self):
"""
Test the API exists and can return existing items
"""
# Create feeds
FeedFactory()
FeedFactory()
# Get feed list
response = self.client.get(reverse('feed-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), 2,
'There should be two categories returned')
def test_feed_can_be_created(self):
"""
Tests that category can be creted using POST-request
"""
# Create Feed, Feed needs a category as well
category_name = 'TestCategory'
CategoryFactory(name=category_name)
date_checked = timezone.now()
date_updated = timezone.now()
data = {
'title': 'Title 1', 'category': 1,
'date_checked': date_checked,
'date_updated': date_updated,
'url': 'http://blaa.com',
}
response = self.client.post(reverse('feed-list'),
data, format='json')
# Was the Feed created successfully
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Feed.objects.count(), 1,
'Should be one category in database')
self.assertEqual(Feed.objects.get().title, 'Title 1')
self.assertEqual(response.data['category']['name'], category_name,
"Category name should be {0}".format(category_name))
| {
"repo_name": "jokimies/django-pj-feedreader",
"path": "tests/test_api.py",
"copies": "1",
"size": "3254",
"license": "bsd-3-clause",
"hash": -7694877073501989000,
"line_mean": 31.8686868687,
"line_max": 77,
"alpha_frac": 0.6075599262,
"autogenerated": false,
"ratio": 4.427210884353742,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 99
} |
# 3rd-party modules
from lxml.builder import E
# local module
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class UserSSHKey(Resource):
"""
[edit system login user <name> authentication <key-type> <key-value> ]
Resource name: tuple(<key-type>, <key-value>)
<key-type> : ['ssh-dsa', 'ssh-rsa']
<key-value> : SSH public key string (usually something very long)
Resource manager utilities:
load_key - allows you to load an ssh-key from a file or str
"""
# there are no properties, since the name <key-value> constitutes the
# actual ssk key data, yo!
PROPERTIES = []
# -----------------------------------------------------------------------
# XML readers
# -----------------------------------------------------------------------
def _xml_at_top(self):
key_t, key_v = self._name
return E.system(E.login(E.user(
E.name(self.P.name),
E.authentication(
E(key_t, E.name(key_v)
)
))))
def _xml_at_res(self, xml):
return xml.find('.//authentication/%s' % self._name[0])
def _xml_to_py(self, has_xml, has_py):
Resource._r_has_xml_status(has_xml, has_py)
# -----------------------------------------------------------------------
# UTILITY FUNCTIONS
# -----------------------------------------------------------------------
def load_key(self, path=None, key_value=None):
"""
Adds a new ssh-key to the user authentication. You can
provide either the path to the ssh-key file, or the contents
of they key (useful for loading the same key on many devices)
:path: (optional)
path to public ssh-key file on the local server,
:key_value: (optional)
the contents of the ssh public key
"""
if not self.is_mgr:
raise RuntimeError("must be a resource-manager!")
if path is None and key_value is None:
raise RuntimeError("You must provide either path or key_value")
if path is not None:
# snarf the file into key_value, yo!
with open(path, 'r') as f:
key_value = f.read().strip()
# extract some data from the key value, this will either
# be 'ssh-rsa' or 'ssh-dss'. we need to decode this to set
# the type correctly in the RPC.
vt = key_value[0:7]
key_map = {'ssh-rsa': 'ssh-rsa', 'ssh-dss': 'ssh-dsa'}
key_type = key_map.get(vt)
if key_type is None:
raise RuntimeError("Unknown ssh public key file type: %s" % vt)
# at this point we are going to add a new key, so really what we are
# doing is accessing a new instance of this class and
# doing a write, but just a touch since there are no properties, yo!
new_key = self[(key_type, key_value)]
return new_key.write(touch=True)
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
# the key list comes from the parent object.
self._rlist = self.P['$sshkeys']
def _r_catalog(self):
# no catalog but the keys
self._rcatalog = dict((k, None) for k in self.list)
| {
"repo_name": "JamesNickerson/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/user_ssh_key.py",
"copies": "9",
"size": "3394",
"license": "apache-2.0",
"hash": -6559505710584498000,
"line_mean": 32.603960396,
"line_max": 77,
"alpha_frac": 0.5070713023,
"autogenerated": false,
"ratio": 4.089156626506024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 101
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
from jnpr.junos.cfg.prefix_list_item import PrefixListItem
class PrefixList(Resource):
"""
[edit policy-otions prefix-list <name>]
Resource name: str
<name> is the prefix-list name
Manages resources:
prefix_list_item, PrefixListItem
"""
PROPERTIES = [
'$prefix_list_items' # read only names of prefix-list-items
]
MANAGES = { 'prefix_list_item': PrefixListItem }
# -----------------------------------------------------------------------
# XML readers
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E('policy-options', E('prefix-list', E.name(self._name)))
def _xml_at_res(self, xml):
return xml.find('.//prefix-list')
def _xml_to_py(self, has_xml, has_py):
Resource._r_has_xml_status(has_xml, has_py)
# prefix-list-item
has_py['$prefix_list_items'] = [ item.text
for item in has_xml.xpath('.//prefix-list-item/name') ]
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
get = E('policy-options', E('prefix-list', JXML.NAMES_ONLY))
got = self.R.get_config(get)
self._rlist = [ name.text
for name in got.xpath('.//prefix-list/name') ]
| {
"repo_name": "jokerxs/pyez_resources",
"path": "prefix_list.py",
"copies": "1",
"size": "1585",
"license": "apache-2.0",
"hash": -922005044881638800,
"line_mean": 28.9056603774,
"line_max": 77,
"alpha_frac": 0.4864353312,
"autogenerated": false,
"ratio": 4.074550128534704,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5060985459734704,
"avg_score": null,
"num_lines": null
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
from jnpr.junos.cfg.srx.nat.nat_proxy_arp import NatProxyArp
class NatStaticRule(Resource):
"""
[edit security nat static rule-set <ruleset_name> rule <rule_name>]
Resource namevar:
rule_name, string. The ruleset_name is obtained from the resource parent
"""
PROPERTIES = [
"description",
"match_dst_addr",
"match_dst_port",
"nat_addr",
"nat_port",
"proxy_interface"
]
# -----------------------------------------------------------------------
# XML read
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E.security(E.nat(E.static(
E('rule-set',
E.name(self.P._name),
E.rule(E.name(self._name))
)
)))
def _xml_at_res(self, xml):
return xml.find('.//rule')
def _xml_to_py(self, as_xml, to_py):
"""
converts Junos XML to native Python
"""
Resource._r_has_xml_status(as_xml, to_py)
Resource.copyifexists(as_xml, 'description', to_py)
e = as_xml.find('static-nat-rule-match')
to_py['match_dst_addr'] = e.find('destination-address').text
# -----------------------------------------------------------------------
# XML write
# -----------------------------------------------------------------------
def _xml_hook_build_change_begin(self, xml):
if 'nat_port' not in self.should:
# if 'nat_port' is not provided, then default to the
# 'match_dst_port' value
self.should['nat_port'] = self['match_dst_port']
if 'match_dst_addr' in self.should and 'proxy_interface' in self.has:
# if we are changing the 'match_dst_addr' and we also have a proxy
# interface, then we need to update the proxy_interface value to
# the new match_dst_addr value. start by deleting the existing one:
namevar = (self['proxy_interface'], self.has['match_dst_addr'])
NatProxyArp(self._junos, namevar).delete()
if 'proxy_interface' not in self.should:
# if the 'proxy_interface' value was not actually changed, then
# simply copy the current one into :should: this will trigger
# the flush/create in the property-writer below
self.should['proxy_interface'] = self.has['proxy_interface']
# build up some XML that will be used by the property-writers
match = E('static-nat-rule-match')
xml.append(match)
then = E.then(E('static-nat', E('prefix')))
xml.append(then)
self._rxml_match = match
self._rxml_then = then.find('static-nat/prefix')
def _xml_change_match_dst_addr(self, xml):
self._rxml_match.append(
E('destination-address',
JXML.REPLACE,
self.should['match_dst_addr'])
)
return True
def _xml_change_match_dst_port(self, xml):
self._rxml_match.append(
E('destination-port', E.low(self.should['match_dst_port']))
)
return True
def _xml_change_nat_addr(self, xml):
self._rxml_then.append(E('addr-prefix', self.should['nat_addr']))
return True
def _xml_change_nat_port(self, xml):
self._rxml_then.append(
E('mapped-port', E('low', self.should['nat_port'])))
return True
def _xml_change_proxy_interface(self, xml):
# this is really always going to be a 'create a new resource'. If the
# caller is changing the 'match_dst_addr' value, then the existing
# entry will be removed by the "hook" function.
namevar = (self.should['proxy_interface'], self['match_dst_addr'])
parp = NatProxyArp(self._junos, namevar)
parp.write(touch=True)
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
self._rlist = self.P['rules']
def _r_catalog(self):
get = E.security(E.nat(E.static(
E('rule-set',
E.name(self.P._name),
)
)))
got = self.D.rpc.get_config(get)
for rule in got.xpath('.//rule'):
name = rule.find('name').text
self._rcatalog[name] = {}
self._xml_to_py(rule, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/nat/nat_st_rule.py",
"copies": "1",
"size": "4661",
"license": "apache-2.0",
"hash": -7487769271916536000,
"line_mean": 34.045112782,
"line_max": 79,
"alpha_frac": 0.5153400558,
"autogenerated": false,
"ratio": 3.930016863406408,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4945356919206408,
"avg_score": null,
"num_lines": null
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
from jnpr.junos.cfg.srx.nat.nat_src_rule import NatSrcRule
class NatSrcRuleSet(Resource):
"""
[edit security nat source rule-set <name>]
"""
PROPERTIES = [
"zone_from",
"zone_to",
"$rules",
"$rules_count"
]
def __init__(self, junos, name=None, **kvargs):
if name is None:
# resource-manager
Resource.__init__(self, junos, name, **kvargs)
return
self.rule = NatSrcRule(junos, M=self, parent=self)
self._manages = ['rule']
Resource.__init__(self, junos, name, **kvargs)
# -----------------------------------------------------------------------
# XML read
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E.security(E.nat(E.source(
E('rule-set', E.name(self._name))
)))
def _xml_hook_read_begin(self, xml):
"""
need to add the <from>,<to> elements to pick up the zone context
need to add the rules, names-only
"""
rs = xml.find('.//rule-set')
rs.append(E('from'))
rs.append(E('to'))
rs.append(E.rule(JXML.NAMES_ONLY))
return True
def _xml_at_res(self, xml):
return xml.find('.//rule-set')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
to_py['zone_from'] = as_xml.find('from/zone').text
to_py['zone_to'] = as_xml.find('to/zone').text
to_py['$rules'] = [rule.text for rule in as_xml.xpath('.//rule/name')]
to_py['$rules_count'] = len(to_py['$rules'])
# -----------------------------------------------------------------------
# XML write
# -----------------------------------------------------------------------
def _xml_change_zone_from(self, xml):
xml.append(E('from', JXML.REPLACE, E.zone(self.should['zone_from'])))
return True
def _xml_change_zone_to(self, xml):
xml.append(E('to', JXML.REPLACE, E.zone(self.should['zone_to'])))
return True
# -----------------------------------------------------------------------
# Resource List, Catalog
# -- only executed by 'manager' resources
# -----------------------------------------------------------------------
def _r_list(self):
get = E.security(E.nat(E.source(
E('rule-set', JXML.NAMES_ONLY)
)))
got = self.D.rpc.get_config(get)
self._rlist = [name.text for name in got.xpath('.//name')]
def _r_catalog(self):
get = E.security(E.nat(E.source(
E('rule-set')
)))
got = self.D.rpc.get_config(get)
for ruleset in got.xpath('.//rule-set'):
name = ruleset.find("name").text
self._rcatalog[name] = {}
self._xml_to_py(ruleset, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/nat/nat_src_ruleset.py",
"copies": "1",
"size": "3030",
"license": "apache-2.0",
"hash": -8945313619700559000,
"line_mean": 30.8947368421,
"line_max": 78,
"alpha_frac": 0.4646864686,
"autogenerated": false,
"ratio": 3.726937269372694,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9691623737972694,
"avg_score": 0,
"num_lines": 95
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
from jnpr.junos.cfg.srx.nat.nat_st_rule import NatStaticRule
class NatStaticRuleSet(Resource):
"""
[edit security nat static rule-set <name>]
"""
PROPERTIES = [
"description",
"zone_from",
"$rules",
"$rules_count"
]
def __init__(self, junos, name=None, **kvargs):
if name is None:
# resource-manager
Resource.__init__(self, junos, name, **kvargs)
return
self.rule = NatStaticRule(junos, M=self, parent=self)
self._manages = ['rule']
Resource.__init__(self, junos, name, **kvargs)
# -----------------------------------------------------------------------
# XML read
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E.security(E.nat(E.static(
E('rule-set', E.name(self._name))
)))
def _xml_hook_read_begin(self, xml):
"""
need to add the <from>,<to> elements to pick up the zone context
need to add the rules, names-only
"""
rs = xml.find('.//rule-set')
rs.append(E('from'))
rs.append(E.rule(JXML.NAMES_ONLY))
return True
def _xml_at_res(self, xml):
return xml.find('.//rule-set')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
Resource.copyifexists(as_xml, 'description', to_py)
to_py['zone_from'] = as_xml.find('from/zone').text
to_py['$rules'] = [rule.text for rule in as_xml.xpath('.//rule/name')]
to_py['$rules_count'] = len(to_py['$rules'])
# -----------------------------------------------------------------------
# XML write
# -----------------------------------------------------------------------
def _xml_change_zone_from(self, xml):
xml.append(E('from', JXML.REPLACE, E.zone(self.should['zone_from'])))
return True
# -----------------------------------------------------------------------
# Resource List, Catalog
# -- only executed by 'manager' resources
# -----------------------------------------------------------------------
def _r_list(self):
get = E.security(E.nat(E.static(
E('rule-set', JXML.NAMES_ONLY)
)))
got = self.D.rpc.get_config(get)
self._rlist = [name.text for name in got.xpath('.//name')]
def _r_catalog(self):
get = E.security(E.nat(E.static(
E('rule-set')
)))
got = self.D.rpc.get_config(get)
for ruleset in got.xpath('.//rule-set'):
name = ruleset.find("name").text
self._rcatalog[name] = {}
self._xml_to_py(ruleset, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/nat/nat_st_ruleset.py",
"copies": "1",
"size": "2885",
"license": "apache-2.0",
"hash": 5260945967243561000,
"line_mean": 31.0555555556,
"line_max": 78,
"alpha_frac": 0.4651646447,
"autogenerated": false,
"ratio": 3.8211920529801326,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47863566976801325,
"avg_score": null,
"num_lines": null
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
from jnpr.junos.cfg.user_ssh_key import UserSSHKey
class User(Resource):
"""
[edit system login user <name>]
Resource name: str
<name> is the user login name
Manages resources:
sshkey, UserSSHKey
"""
PROPERTIES = [
'uid',
'fullname', # the full-name field
'userclass', # user class
'password', # write-only clear-text password, will get crypt'd
'$password', # read-only crypt'd password
'$sshkeys', # read-only names of ssh-keys
]
MANAGES = {'sshkey': UserSSHKey}
# -----------------------------------------------------------------------
# XML readers
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E.system(E.login(E.user(E.name(self._name))))
def _xml_at_res(self, xml):
return xml.find('.//user')
def _xml_to_py(self, has_xml, has_py):
Resource._r_has_xml_status(has_xml, has_py)
has_py['userclass'] = has_xml.findtext('class')
Resource.copyifexists(has_xml, 'full-name', has_py, 'fullname')
Resource.copyifexists(has_xml, 'uid', has_py)
if 'uid' in has_py:
has_py['uid'] = int(has_py['uid'])
auth = has_xml.find('authentication')
if auth is not None:
# plain-text password
Resource.copyifexists(
auth,
'encrypted-password',
has_py,
'$password')
# ssh-keys
sshkeys = auth.xpath('ssh-rsa | ssh-dsa')
if sshkeys is not None:
has_py['$sshkeys'] = [(sshkey.tag,
sshkey.findtext('name').strip())
for sshkey in sshkeys
]
# -----------------------------------------------------------------------
# XML property writers
# -----------------------------------------------------------------------
def _xml_change_fullname(self, xml):
xml.append(E('full-name', self['fullname']))
return True
def _xml_change_userclass(self, xml):
xml.append(E('class', self['userclass']))
return True
def _xml_change_password(self, xml):
xml.append(E.authentication(
E('plain-text-password-value', self['password'])
))
return True
def _xml_change_uid(self, xml):
xml.append(E.uid(str(self['uid'])))
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
get = E.system(E.login(E.user(JXML.NAMES_ONLY)))
got = self.R.get_config(get)
self._rlist = [name.text for name in got.xpath('.//user/name')]
| {
"repo_name": "shermdog/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/user.py",
"copies": "10",
"size": "3071",
"license": "apache-2.0",
"hash": -5571600771662971000,
"line_mean": 29.71,
"line_max": 77,
"alpha_frac": 0.4562031911,
"autogenerated": false,
"ratio": 4.2300275482093666,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 100
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class Application(Resource):
"""
[edit applications application <name>]
Resource name: str
<name> is the application item name
"""
PROPERTIES = [
'description',
'protocol',
'dest_port',
'timeout'
]
# -----------------------------------------------------------------------
# XML readers
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E.applications(E.application(E.name(self._name)))
def _xml_at_res(self, xml):
return xml.find('.//application')
def _xml_to_py(self, has_xml, has_py):
Resource._r_has_xml_status(has_xml, has_py)
Resource.copyifexists(has_xml, 'description', has_py)
has_py['protocol'] = has_xml.findtext('protocol')
Resource.copyifexists(has_xml, 'destination-port', has_py, 'dest_port')
Resource.copyifexists(has_xml, 'inactivity-timeout', has_py, 'timeout')
if 'timeout' in has_py and has_py['timeout'] != 'never':
has_py['timeout'] = int(has_py['timeout'])
# -----------------------------------------------------------------------
# XML property writers
# -----------------------------------------------------------------------
def _xml_change_protocol(self, xml):
xml.append(E.protocol(self['protocol']))
return True
def _xml_change_dest_port(self, xml):
"""
destination-port could be a single value or a range.
handle the case where the value could be provided as either
a single int value or a string range, e.g. "1-27"
"""
value = self['dest_port']
if isinstance(value, int):
value = str(value)
xml.append(E('destination-port', value))
return True
def _xml_change_timeout(self, xml):
xml.append(E('inactivity-timeout', str(self['timeout'])))
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
get = E.applications(E.application(JXML.NAMES_ONLY))
got = self.R.get_config(get)
self._rlist = [
app.text for app in got.xpath('applications/application/name')]
def _r_catalog(self):
get = E.applications(E.application())
got = self.R.get_config(get)
for app in got.xpath('applications/application'):
name = app.findtext('name')
self._rcatalog[name] = {}
self._xml_to_py(app, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/app.py",
"copies": "1",
"size": "2795",
"license": "apache-2.0",
"hash": -1236523324711293200,
"line_mean": 32.2738095238,
"line_max": 79,
"alpha_frac": 0.4973166369,
"autogenerated": false,
"ratio": 4.222054380664653,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 84
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class NatSrcPool(Resource):
"""
[edit security nat source pool <name>]
"""
PROPERTIES = [
'addr_from',
'addr_to'
]
def _xml_at_top(self):
"""
configuration to retrieve resource
"""
return E.security(E.nat(E.source(E.pool(E.name(self.name)))))
# -----------------------------------------------------------------------
# XML read
# -----------------------------------------------------------------------
def _xml_at_res(self, xml):
"""
return Element at resource
"""
return xml.find('.//pool')
def _xml_to_py(self, as_xml, to_py):
"""
converts Junos XML to native Python
"""
Resource._r_has_xml_status(as_xml, to_py)
to_py['addr_from'] = as_xml.find('address/name').text
to_py['addr_to'] = as_xml.find('address/to/ipaddr').text
# -----------------------------------------------------------------------
# XML property writers
# -----------------------------------------------------------------------
def _xml_change_addr_from(self, xml):
# we need to always set the address/name given the structure of the
# Junos configuration XML, derp.
addr_from = self.should.get('addr_from') or self.has.get('addr_from')
xml.append(E.address(JXML.REPLACE, E.name(addr_from)))
return True
def _xml_change_addr_to(self, xml):
# we must always include the addr_from, so if we didn't expliclity
# change it, we must do it now.
if 'addr_from' not in self.should:
self._xml_change_addr_from(xml)
x_addr = xml.find('address')
x_addr.append(E.to(E.ipaddr(self.should['addr_to'])))
return True
# -----------------------------------------------------------------------
# Resource List, Catalog
# -- only executed by 'manager' resources
# -----------------------------------------------------------------------
def _r_list(self):
"""
build the policy context resource list from the command~
> show security policies zone-context
"""
get = E.security(E.nat(E.source(
E.pool(JXML.NAMES_ONLY))))
got = self.D.rpc.get_config(get)
self._rlist = [name.text for name in got.xpath('.//pool/name')]
def _r_catalog(self):
get = E.security(E.nat(E.source(E.pool)))
got = self.D.rpc.get_config(get)
for pool in got.xpath('.//pool'):
name = pool.find('name').text
self._rcatalog[name] = {}
self._xml_to_py(pool, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/nat/nat_src_pool.py",
"copies": "1",
"size": "2799",
"license": "apache-2.0",
"hash": 5408513588385456000,
"line_mean": 30.4494382022,
"line_max": 77,
"alpha_frac": 0.4758842444,
"autogenerated": false,
"ratio": 4.03314121037464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.500902545477464,
"avg_score": null,
"num_lines": null
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class NatSrcRule(Resource):
"""
[edit security nat source rule-set <ruleset-name> rule <rule-name>]
"""
PROPERTIES = [
"match_src_addr",
"match_dst_addr",
"pool"
]
# -----------------------------------------------------------------------
# XML read
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E.security(E.nat(E.source(
E('rule-set',
E.name(self.P._name),
E.rule(E.name(self._name))
)
)))
def _xml_at_res(self, xml):
return xml.find('.//rule')
def _xml_to_py(self, as_xml, to_py):
"""
converts Junos XML to native Python
"""
Resource._r_has_xml_status(as_xml, to_py)
e = as_xml.find('src-nat-rule-match')
to_py['match_src_addr'] = e.find('source-address').text
to_py['match_dst_addr'] = e.find('destination-address').text
to_py['pool'] = as_xml.find('.//pool-name').text
# -----------------------------------------------------------------------
# XML write
# -----------------------------------------------------------------------
def _xml_hook_build_change_begin(self, xml):
"""
when doing a write, assign default values if they are not present
"""
def _default_to(prop, value):
if prop not in self.should:
self.should[prop] = value
if self.is_new:
_default_to('match_dst_addr', '0.0.0.0/0')
_default_to('match_src_addr', '0.0.0.0/0')
def _xml_change_match_src_addr(self, xml):
xml.append(E('src-nat-rule-match',
E('source-address',
JXML.REPLACE,
self.should['match_src_addr'])
))
return True
def _xml_change_match_dst_addr(self, xml):
xml.append(E('src-nat-rule-match',
E('destination-address',
JXML.REPLACE,
self.should['match_dst_addr'])
))
return True
def _xml_change_pool(self, xml):
xml.append(E.then(
E('source-nat', E.pool(E('pool-name', self.should['pool'])))
))
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
self._rlist = self.P['$rules']
def _r_catalog(self):
get = E.security(E.nat(E.source(
E('rule-set',
E.name(self.P._name),
)
)))
got = self.D.rpc.get_config(get)
for rule in got.xpath('.//rule'):
name = rule.find('name').text
self._rcatalog[name] = {}
self._xml_to_py(rule, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/nat/nat_src_rule.py",
"copies": "1",
"size": "3096",
"license": "apache-2.0",
"hash": 5122419344150648000,
"line_mean": 29.6534653465,
"line_max": 77,
"alpha_frac": 0.4253875969,
"autogenerated": false,
"ratio": 4,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49253875969,
"avg_score": null,
"num_lines": null
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class SharedAddrBookAddr(Resource):
"""
[edit security address-book <ab_name> address <name>]
Resource name: str
<name> is the address item name
Managed by: SharedAddrBook
<ab_name> is the address book name, taken from parent resource
"""
PROPERTIES = [
'description',
'ip_prefix',
]
def _xml_at_top(self):
xml = self.P._xml_at_top()
xml.find('.//address-book').append(E.address(self._name))
return xml
# -----------------------------------------------------------------------
# XML reading
# -----------------------------------------------------------------------
def _xml_at_res(self, xml):
return xml.find('.//address-book/address')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
Resource.copyifexists(as_xml, 'description', to_py)
to_py['ip_prefix'] = as_xml.find('ip-prefix').text
# -----------------------------------------------------------------------
# XML writing
# -----------------------------------------------------------------------
def _xml_change_ip_prefix(self, xml):
xml.append(E('ip-prefix', self.should['ip_prefix']))
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
# The parent keeps a property on this list, so just use it, yo!
self._rlist = self.P['$addrs']
def _r_catalog(self):
get = self.P._xml_at_top()
get.find('.//address-book').append(E('address'))
got = self.D.rpc.get_config(get)
for addr in got.xpath('.//address-book/address'):
name = addr.find('name').text
self._rcatalog[name] = {}
self._xml_to_py(addr, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/shared_ab_addr.py",
"copies": "1",
"size": "2074",
"license": "apache-2.0",
"hash": -850692669088859600,
"line_mean": 30.9076923077,
"line_max": 77,
"alpha_frac": 0.4590163934,
"autogenerated": false,
"ratio": 4.173038229376258,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5132054622776258,
"avg_score": null,
"num_lines": null
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class SharedAddrBookSet(Resource):
"""
[edit security address-book <ab_name> address-set <name>]
~! WARNING !~
This resource is managed only as a child of the :ZoneAddrBook:
resource. Do not create a manager instance of this class directly
"""
PROPERTIES = [
'description',
'addr_list', # list of address items
'set_list', # sets can contain a list of sets
]
def _xml_at_top(self):
xml = self.P._xml_at_top()
xml.find('.//address-book').append(
E('address-set', E.name(self._name))
)
return xml
# -----------------------------------------------------------------------
# XML reading
# -----------------------------------------------------------------------
def _xml_at_res(self, xml):
return xml.find('.//address-book/address-set')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
Resource.copyifexists(as_xml, 'description', to_py)
to_py['addr_list'] = [
name.text for name in as_xml.xpath('address/name')]
to_py['set_list'] = [
name.text for name in as_xml.xpath('address-set/name')]
# -----------------------------------------------------------------------
# XML writing
# -----------------------------------------------------------------------
def _xml_change_addr_list(self, xml):
self._xml_list_property_add_del_names(xml,
prop_name='addr_list',
element_name='address')
return True
def _xml_change_set_list(self, xml):
self._xml_list_property_add_del_names(xml,
prop_name='set_list',
element_name='address-set')
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
# list of address-book address-sets. this list is managed by the
# parent object, so just use that, yo!
self._rlist = self.P['$sets']
def _r_catalog(self):
get = self.P._xml_at_top()
get.find('.//address-book').append(E('address-set'))
got = self.D.rpc.get_config(get)
for adrset in got.xpath('.//address-set'):
name = adrset.find('name').text
self._rcatalog[name] = {}
self._xml_to_py(adrset, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/shared_ab_set.py",
"copies": "1",
"size": "2772",
"license": "apache-2.0",
"hash": 1734087912708846800,
"line_mean": 34.5384615385,
"line_max": 77,
"alpha_frac": 0.4512987013,
"autogenerated": false,
"ratio": 4.258064516129032,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 78
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class ZoneAddrBookAddr(Resource):
"""
[edit security zone security-zone <zone> address-book address <name>]
Resource name: str
<name> is the name of the address item
Managed by: ZoneAddrBook
<zone> is the name of the security zone
"""
PROPERTIES = [
'description',
'ip_prefix',
]
def _xml_at_top(self):
return E.security(E.zones(
E('security-zone',
E.name(self.P._name),
E('address-book', E.address(self._name))
)
))
# -----------------------------------------------------------------------
# XML reading
# -----------------------------------------------------------------------
def _xml_at_res(self, xml):
return xml.find('.//address-book/address')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
Resource.copyifexists(as_xml, 'description', to_py)
to_py['ip_prefix'] = as_xml.find('ip-prefix').text
# -----------------------------------------------------------------------
# XML writing
# -----------------------------------------------------------------------
def _xml_change_ip_prefix(self, xml):
xml.append(E('ip-prefix', self.should['ip_prefix']))
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
# The parent keeps a property on this list, so just use it, yo!
self._rlist = self.P['$addrs']
def _r_catalog(self):
get = E.security(E.zones(
E('security-zone',
E.name(self.P._name),
E('address-book', E('address'))
)
))
got = self.D.rpc.get_config(get)
for addr in got.xpath('.//address-book/address'):
name = addr.find('name').text
self._rcatalog[name] = {}
self._xml_to_py(addr, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/zone_ab_addr.py",
"copies": "1",
"size": "2218",
"license": "apache-2.0",
"hash": 3161191761403918000,
"line_mean": 29.3835616438,
"line_max": 77,
"alpha_frac": 0.4400360685,
"autogenerated": false,
"ratio": 4.2247619047619045,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 73
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class ZoneAddrBookSet(Resource):
"""
[edit security zone security-zone <zone> address-book address-set <name>]
Resource name: str
<name> is the name of the address sets
Managed by: ZoneAddrBook
<zone> is the name of the secuirty zone
"""
PROPERTIES = [
'description',
'addr_list', # list of address items
'set_list', # sets can contain a list of sets
]
def _xml_at_top(self):
return E.security(E.zones(
E('security-zone',
E.name(self.P._name),
E('address-book',
E('address-set', E.name(self._name))
)
)
))
# -----------------------------------------------------------------------
# XML reading
# -----------------------------------------------------------------------
def _xml_at_res(self, xml):
return xml.find('.//address-book/address-set')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
Resource.copyifexists(as_xml, 'description', to_py)
to_py['addr_list'] = [
name.text for name in as_xml.xpath('address/name')]
to_py['set_list'] = [
name.text for name in as_xml.xpath('address-set/name')]
# -----------------------------------------------------------------------
# XML writing
# -----------------------------------------------------------------------
def _xml_change_addr_list(self, xml):
self._xml_list_property_add_del_names(xml,
prop_name='addr_list',
element_name='address')
return True
def _xml_change_set_list(self, xml):
self._xml_list_property_add_del_names(xml,
prop_name='set_list',
element_name='address-set')
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
# list of address-book address-sets. this list is managed by the
# parent object, so just use that, yo!
self._rlist = self.P['$sets']
def _r_catalog(self):
get = E.security(E.zones(
E('security-zone',
E.name(self.P._name),
E('address-book',
E('address-set')
)
)
))
got = self.D.rpc.get_config(get)
for adrset in got.xpath('.//address-set'):
name = adrset.find('name').text
self._rcatalog[name] = {}
self._xml_to_py(adrset, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/zone_ab_set.py",
"copies": "1",
"size": "2965",
"license": "apache-2.0",
"hash": -2956274465226112000,
"line_mean": 31.9444444444,
"line_max": 77,
"alpha_frac": 0.4300168634,
"autogenerated": false,
"ratio": 4.322157434402333,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 90
} |
# 3rd-party modules
from lxml.builder import E
# module packages
from jnpr.junos.cfg.srx.policy_rule import PolicyRule
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class PolicyContext(Resource):
"""
[edit security policy from-zone <from_zone> to-zone <to_zone>]
Resource name: tuple(from_zone, to_zone)
<from_zone> is the name of the From zone
<to_zone> is the name of the To zone
Manages Resources:
rule, PolicyRule
"""
PROPERTIES = [
'$rules',
'$rules_count'
]
def __init__(self, junos, name=None, **kvargs):
if name is None:
# resource-manager
Resource.__init__(self, junos, name, **kvargs)
return
# specific instance will manage policy rules
self.rule = PolicyRule(junos, M=self, parent=self)
self._manages = ['rule']
self._name_from_zone = name[0]
self._name_to_zone = name[1]
Resource.__init__(self, junos, name, **kvargs)
def _xml_at_top(self):
return E.security(E.policies(
E.policy(
E('from-zone-name', self._name_from_zone),
E('to-zone-name', self._name_to_zone)
)))
# -------------------------------------------------------------------------
# XML reading
# -------------------------------------------------------------------------
def _xml_config_read(self):
"""
~! OVERLOADS !~
"""
xml = self._xml_at_top()
xml.find('.//policy').append(E.policy(JXML.NAMES_ONLY))
return self._junos.rpc.get_config(xml)
def _xml_at_res(self, xml):
return xml.find('.//policy')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
to_py['$rules'] = [
policy.text for policy in as_xml.xpath('.//policy/name')]
to_py['$rules_count'] = len(to_py['$rules'])
# -----------------------------------------------------------------------
# Resource List, Catalog
# -- only executed by 'manager' resources
# -----------------------------------------------------------------------
def _r_list(self):
"""
build the policy context resource list from the command:
> show security policies zone-context
"""
got = self._junos.rpc.get_firewall_policies(zone_context=True)
for pc in got.xpath('//policy-zone-context/policy-zone-context-entry'):
from_zone = pc.find('policy-zone-context-from-zone').text
to_zone = pc.find('policy-zone-context-to-zone').text
self._rlist.append((from_zone, to_zone))
def _r_catalog(self):
got = self._junos.rpc.get_firewall_policies(zone_context=True)
for pc in got.xpath('//policy-zone-context/policy-zone-context-entry'):
from_zone = pc.find('policy-zone-context-from-zone').text
to_zone = pc.find('policy-zone-context-to-zone').text
count = int(pc.find('policy-zone-context-policy-count').text)
self._rcatalog[(from_zone, to_zone)] = {'$rules_count': count}
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/policy.py",
"copies": "1",
"size": "3160",
"license": "apache-2.0",
"hash": -1563588817259212000,
"line_mean": 32.9784946237,
"line_max": 79,
"alpha_frac": 0.5231012658,
"autogenerated": false,
"ratio": 3.8349514563106797,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.985805272211068,
"avg_score": 0,
"num_lines": 93
} |
# 3rd party modules
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class OptionsMenu(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.player_list = QListWidget()
player_box = QHBoxLayout()
player_box.addWidget(self.player_list)
self.add_player_btn = QPushButton(QIcon('images/add.png'), '')
self.clear_player_btn = QPushButton(QIcon('images/delete.png'), '')
button_box = QVBoxLayout()
button_box.addWidget(self.add_player_btn)
button_box.addWidget(QPushButton(QIcon('images/user_edit.png'), ''))
button_box.addWidget(self.clear_player_btn)
button_box.addStretch()
player_box.addLayout(button_box)
player_list_gb = QGroupBox('Player List:')
player_list_gb.setLayout(player_box)
# Create the "Game Options" options
self.rounds_sb = QSpinBox()
self.rounds_sb.setRange(0, 1000)
self.rounds_sb.setSingleStep(1)
self.rounds_sb.setValue(10)
self.iterations_sb = QSpinBox()
self.iterations_sb.setRange(0, 100000)
self.iterations_sb.setSingleStep(0.1)
self.iterations_sb.setValue(30)
game_options_box = QGridLayout()
game_options_box.addWidget(QLabel('Rounds per Game'), 0, 0)
game_options_box.addWidget(self.rounds_sb, 0, 1)
game_options_box.addWidget(QLabel('Iterations'), 1, 0)
game_options_box.addWidget(self.iterations_sb, 1, 1)
game_option_gb = QGroupBox('Game Options:')
game_option_gb.setLayout(game_options_box)
# Create the "Graph Options" options
self.legend_cb = QCheckBox('Show Legend')
self.legend_cb.setChecked(True)
self.connect(self.legend_cb, SIGNAL('stateChanged(int)'), self.legend_change)
self.grid_cb = QCheckBox('Show Grid')
self.grid_cb.setChecked(True)
self.legend_loc_lbl = QLabel('Legend Location')
self.legend_loc_cb = QComboBox()
self.legend_loc_cb = QComboBox()
self.legend_loc_cb.addItems(
[x.title() for x in [
'right',
'center',
'lower left',
'center right',
'upper left',
'center left',
'upper right',
'lower right',
'upper center',
'lower center',
'best'
]
]
)
self.legend_loc_cb.setCurrentIndex(6)
cb_box = QHBoxLayout()
cb_box.addWidget(self.legend_cb)
cb_box.addWidget(self.grid_cb)
cb_box.addStretch()
legend_box = QHBoxLayout()
legend_box.addWidget(self.legend_loc_cb)
legend_box.addStretch()
graph_box = QVBoxLayout()
graph_box.addLayout(cb_box)
graph_box.addWidget(self.legend_loc_lbl)
graph_box.addLayout(legend_box)
graph_gb = QGroupBox('Graph Options:')
graph_gb.setLayout(graph_box)
self.update_btn = QPushButton(QIcon('images/calculator.png'), 'Run Iterations')
# Create the main layout
container = QVBoxLayout()
container.addWidget(player_list_gb)
container.addWidget(game_option_gb)
container.addWidget(graph_gb)
container.addWidget(self.update_btn)
container.addStretch()
self.setLayout(container)
def legend_change(self):
self.legend_loc_cb.setEnabled(self.legend_cb.isChecked())
self.legend_loc_lbl.setEnabled(self.legend_cb.isChecked())
| {
"repo_name": "cdodd/prisoners-dilemma-simulator",
"path": "optionsmenu.py",
"copies": "1",
"size": "3587",
"license": "mit",
"hash": 3283142860762167300,
"line_mean": 33.8252427184,
"line_max": 87,
"alpha_frac": 0.5971563981,
"autogenerated": false,
"ratio": 3.771819137749737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9862658331942784,
"avg_score": 0.001263440781390657,
"num_lines": 103
} |
# 3rd Party Modules
import datetime
import os
from rq import Queue
from .run_worker_all import conn as qconn
from slot import db_fieldbook
from flask import request, redirect, render_template, json
import config
from . import utils
from slot.main import app
from slot import messaging
# Set up RQ queue to add background tasks to
q_sms = Queue('sms', connection=qconn)
q_db = Queue('db', connection=qconn)
q_request = Queue('request', connection=qconn)
q = Queue(connection=qconn)
def dashboard():
ops = db_fieldbook.get_all_opportunities()
for op in ops:
if op["status"] == "Accepted":
op["class"] = "success"
elif op["status"] == "Offered":
op["class"] = "info"
elif op["status"] == "Expired":
op["class"] = "active"
elif op["status"] == "Attended":
op["class"] = "active"
elif op["status"] == "Not Attended":
op["class"] = "active"
op["remaining_mins"] = int(int(op["expiry_time"] - utils.timestamp_to_ticks(datetime.datetime.utcnow())) / 60)
return render_template('dashboard.html',
ops=ops,
dash_refresh_timeout=config.dash_refresh_timeout,
instance_name=config.INSTANCE_NAME)
def receive_feedback():
if request.method == 'POST':
print(request.form)
feedback_text = request.form['feedback_text']
q_db.enqueue(db_fieldbook.add_feedback,
feedback_text)
return redirect('/dashboard', code=302)
else:
return render_template('feedback.html')
def render_new_procedure_form():
if request.method == 'POST':
print(request.form)
opportunity_doctor = request.form['doctor']
opportunity_procedure = request.form['procedure']
opportunity_location = request.form['location']
opportunity_duration = request.form['duration']
opportunity = {
'doctor': opportunity_doctor,
'procedure': opportunity_procedure,
'location': opportunity_location,
'duration': opportunity_duration
}
print(opportunity)
ref_id, new_op = db_fieldbook.add_opportunity(opportunity)
expiry_time = datetime.datetime.fromtimestamp(new_op['expiry_time']).strftime("%H:%M")
number_messages_sent, message_ref = messaging.broadcast_procedure(opportunity_procedure,
opportunity_location,
opportunity_doctor,
ref_id,
expiry_time)
offer = db_fieldbook.add_offer(ref_id, number_messages_sent)
print(offer['id'])
print(json.dumps(opportunity))
return redirect('/dashboard', code=302)
else:
procedures = db_fieldbook.get_procedures()
locations = db_fieldbook.get_locations()
timeframes = db_fieldbook.get_timeframes()
doctors = db_fieldbook.get_doctors()
return render_template('new_procedure.html', procedures=procedures, locations=locations,
timeframes=timeframes, doctors=doctors)
def receive_sms():
sms = {
'service_number': str(request.form['To']),
'mobile': str(request.form['From']),
'message': str(request.form['Body'])
}
# Add a log entry for the received message
q_db.enqueue(db_fieldbook.add_sms_log,
sms['mobile'],
sms['service_number'],
sms['message'], 'IN')
app.logger.debug("Received SMS: \n"
"Service Number: {0}\n"
"Mobile: {1}\n"
"Message: {2}\n".format(
sms['service_number'],
sms['mobile'],
sms['message']))
# Check the message to see if it is an opt-out request
if sms['message'].upper() in ['STOP', 'STOPALL', 'UNSUBSCRIBE', 'CANCEL', 'END', 'QUIT']:
q_db.enqueue(messaging.request_opt_out,
sms['mobile'])
return '<Response></Response>'
# And check the message to see if it is an opt-in request
elif sms['message'].upper() in ['START', 'YES']:
q_db.enqueue(messaging.request_opt_in,
sms['mobile'])
return '<Response></Response>'
# Else assume it is a request for an opportunity
else:
# Process the procedure request
q_request.enqueue(messaging.request_procedure,
sms['mobile'],
sms['message'])
# Return a successful response to Twilio regardless of the outcome of the procedure request
return '<Response></Response>'
def complete_procedure():
completed_id = request.form['id']
if request.form['attended_status'] == "Attended":
attended_status = True
else:
attended_status = False
print(str(completed_id))
print(str(attended_status))
db_fieldbook.complete_opportunity(completed_id, attended_status)
return redirect('/dashboard', code=302)
if __name__ == '__main__':
app.debug = config.debug_mode
port = int(os.environ.get("PORT", 5000))
print(str.format("Debug Mode is: {0}", app.debug))
app.run(
host="0.0.0.0",
port = port
)
| {
"repo_name": "nhshd-slot/SLOT",
"path": "slot/controller.py",
"copies": "1",
"size": "5507",
"license": "mit",
"hash": -1743624768873025500,
"line_mean": 32.1746987952,
"line_max": 118,
"alpha_frac": 0.5598329399,
"autogenerated": false,
"ratio": 4.162509448223734,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5222342388123733,
"avg_score": null,
"num_lines": null
} |
# 3rd party modules
import PyQt4.QtCore as QtCore
import PyQt4.QtGui as QtGui
class OptionsMenu(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
# Create the "Lotka-Volterra Coefficients" options
self.a_sb = QtGui.QDoubleSpinBox()
self.b_sb = QtGui.QDoubleSpinBox()
self.c_sb = QtGui.QDoubleSpinBox()
self.d_sb = QtGui.QDoubleSpinBox()
for widget in (self.a_sb, self.b_sb, self.c_sb, self.d_sb):
widget.setRange(0, 10)
widget.setSingleStep(0.1)
coeff_grid = QtGui.QGridLayout()
coeff_grid.addWidget(QtGui.QLabel('Prey Growth Rate'), 0, 0)
coeff_grid.addWidget(self.a_sb, 0, 1)
coeff_grid.addWidget(QtGui.QLabel('Predation Death Rate'), 1, 0)
coeff_grid.addWidget(self.b_sb, 1, 1)
coeff_grid.addWidget(QtGui.QLabel('Predator Death Rate'), 2, 0)
coeff_grid.addWidget(self.c_sb, 2, 1)
coeff_grid.addWidget(QtGui.QLabel('Predator Reproduction Rate'), 3, 0)
coeff_grid.addWidget(self.d_sb, 3, 1)
coeff_gb = QtGui.QGroupBox('Lotka-Volterra Coefficients:')
coeff_gb.setLayout(coeff_grid)
# Create the "Other Parameters" options
self.predator_sb = QtGui.QDoubleSpinBox()
self.predator_sb.setRange(0, 100000)
self.predator_sb.setSingleStep(1)
self.prey_sb = QtGui.QDoubleSpinBox()
self.prey_sb.setRange(0, 100000)
self.prey_sb.setSingleStep(1)
self.iterations_sb = QtGui.QSpinBox()
self.iterations_sb.setRange(0, 100000)
self.iterations_sb.setSingleStep(100)
self.timedelta_sb = QtGui.QDoubleSpinBox()
self.timedelta_sb.setRange(0, 100)
self.timedelta_sb.setSingleStep(0.05)
other_grid = QtGui.QGridLayout()
other_grid.addWidget(QtGui.QLabel('Predator Population'), 0, 0)
other_grid.addWidget(self.predator_sb, 0, 1)
other_grid.addWidget(QtGui.QLabel('Prey Population'), 1, 0)
other_grid.addWidget(self.prey_sb, 1, 1)
other_grid.addWidget(QtGui.QLabel('Iterations'), 2, 0)
other_grid.addWidget(self.iterations_sb, 2, 1)
other_grid.addWidget(QtGui.QLabel('Time Delta'), 3, 0)
other_grid.addWidget(self.timedelta_sb, 3, 1)
other_gb = QtGui.QGroupBox('Other Parameters:')
other_gb.setLayout(other_grid)
# Create the "Graph Options" options
self.legend_cb = QtGui.QCheckBox('Show Legend')
self.legend_cb.setChecked(True)
self.connect(self.legend_cb, QtCore.SIGNAL(
'stateChanged(int)'),
self.legend_change,
)
self.grid_cb = QtGui.QCheckBox('Show Grid')
self.grid_cb.setChecked(True)
self.legend_loc_lbl = QtGui.QLabel('Legend Location')
self.legend_loc_cb = QtGui.QComboBox()
self.legend_loc_cb.addItems([x.title() for x in [
'right',
'center',
'lower left',
'center right',
'upper left',
'center left',
'upper right',
'lower right',
'upper center',
'lower center',
'best',
]])
self.legend_loc_cb.setCurrentIndex(6)
cb_box = QtGui.QHBoxLayout()
cb_box.addWidget(self.legend_cb)
cb_box.addWidget(self.grid_cb)
legend_box = QtGui.QHBoxLayout()
legend_box.addWidget(self.legend_loc_cb)
legend_box.addStretch()
graph_box = QtGui.QVBoxLayout()
graph_box.addLayout(cb_box)
graph_box.addWidget(self.legend_loc_lbl)
graph_box.addLayout(legend_box)
graph_gb = QtGui.QGroupBox('Graph Options:')
graph_gb.setLayout(graph_box)
# Create the update/reset buttons
self.update_btn = QtGui.QPushButton(
QtGui.QIcon(':/resources/calculator.png'),
'Run Iterations',
)
self.reset_values_btn = QtGui.QPushButton(
QtGui.QIcon(':/resources/arrow_undo.png'),
'Reset Values',
)
self.clear_graph_btn = QtGui.QPushButton(
QtGui.QIcon(':/resources/chart_line_delete.png'),
'Clear Graph',
)
self.connect(self.reset_values_btn, QtCore.SIGNAL(
'clicked()'),
self.reset_values,
)
# Create the main layout
container = QtGui.QVBoxLayout()
container.addWidget(coeff_gb)
container.addWidget(other_gb)
container.addWidget(graph_gb)
container.addWidget(self.update_btn)
container.addStretch()
container.addWidget(self.reset_values_btn)
container.addWidget(self.clear_graph_btn)
self.setLayout(container)
# Populate the widgets with values
self.reset_values()
def reset_values(self):
"""
Sets the default values of the option widgets.
"""
self.a_sb.setValue(1.0)
self.b_sb.setValue(0.1)
self.c_sb.setValue(1.0)
self.d_sb.setValue(0.075)
self.predator_sb.setValue(5)
self.prey_sb.setValue(10)
self.iterations_sb.setValue(1000)
self.timedelta_sb.setValue(0.02)
def legend_change(self):
self.legend_loc_cb.setEnabled(self.legend_cb.isChecked())
self.legend_loc_lbl.setEnabled(self.legend_cb.isChecked())
| {
"repo_name": "cdodd/lotka-volterra-plotter",
"path": "options_menu.py",
"copies": "1",
"size": "5396",
"license": "mit",
"hash": 6142938208820558000,
"line_mean": 34.5,
"line_max": 78,
"alpha_frac": 0.6030392884,
"autogenerated": false,
"ratio": 3.599733155436958,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9702772443836958,
"avg_score": 0,
"num_lines": 152
} |
# 3rd party modules
import matplotlib.pyplot as plt
import numpy as np
# local modules
from callables import Constant
from callables import Circular
import draw
import wt_oscillators
def main():
sr = 1000
dur = 1.0
#--------------------------------------------------------------------------
# drawn waveforms
_3_hz = Constant(3)
x = draw.sine(sr, dur, _3_hz)
plt.figure()
plt.plot(x)
plt.grid(True)
plt.title('Sine wave: f = %d Hz' % _3_hz())
x = draw.sine(sr, dur, _3_hz, Constant(0.5))
plt.figure()
plt.plot(x)
plt.grid(True)
plt.title('Cosine wave: f = %d Hz' % _3_hz())
f_3_10 = Circular(draw.line(sr, dur, 3, 10))
x = draw.sine(sr, dur, f_3_10)
plt.figure()
plt.plot(x)
plt.grid(True)
plt.title('Sine wave: f = 3-10 Hz')
phase = Circular(draw.line(sr, dur, 0.0, 1.0))
x = draw.sine(sr, dur, _3_hz, phase)
plt.figure()
plt.plot(x)
plt.grid(True)
plt.title('Sine wave: f = 3, phase = 0 - 180 deg')
plt.show()
#--------------------------------------------------------------------------
# wave table gen Nearest
# create a 3 tables each holding 1 cycle
_1_hz = Constant(1.0)
table_1000 = draw.sine(1000, 1000, _1_hz)
table_500 = draw.sine( 500, 500, _1_hz)
table_250 = draw.sine( 250, 250, _1_hz)
gen_1000 = wt_oscillators.Nearest(sr, table_1000)
gen_500 = wt_oscillators.Nearest(sr, table_500)
gen_250 = wt_oscillators.Nearest(sr, table_250)
dur = 1.0
x0 = gen_1000.generate(dur, f_3_10)
x1 = gen_500.generate(dur, f_3_10)
x2 = gen_250.generate(dur, f_3_10)
plt.figure()
plt.plot(x0, 'b-', label = 'wt 1000')
plt.plot(x1, 'r-', label = 'wt 500')
plt.plot(x2, 'm-', label = 'wt 250')
plt.title('wt_oscillators.Neartest signals')
plt.grid(True)
plt.legend()
# round off error residuals
res_500 = x1 - x0
res_250 = x2 - x0
plt.figure()
plt.plot(res_500, label = 'wt 500 error')
plt.plot(res_250, label = 'wt 250 error')
plt.title('wt_oscillators.Nearest residual error')
plt.grid(True)
plt.legend()
plt.show()
#--------------------------------------------------------------------------
# wave table gen Lininterp
gen_1000 = wt_oscillators.Lininterp(sr, table_1000)
gen_500 = wt_oscillators.Lininterp(sr, table_500)
gen_250 = wt_oscillators.Lininterp(sr, table_250)
x0 = gen_1000.generate(dur, f_3_10)
x1 = gen_500.generate(dur, f_3_10)
x2 = gen_250.generate(dur, f_3_10)
plt.figure()
plt.plot(x0, 'b-', label = 'wt 1000')
plt.plot(x1, 'r-', label = 'wt 500')
plt.plot(x2, 'm-', label = 'wt 250')
plt.title('wt_oscillators.Lininterp signals')
plt.grid(True)
plt.legend()
# round off error residuals
res_500 = x1 - x0
res_250 = x2 - x0
plt.figure()
plt.plot(res_500, label = 'wt 500 error')
plt.plot(res_250, label = 'wt 250 error')
plt.title('wt_oscillators.Lininterp residual error')
plt.grid(True)
plt.legend()
plt.show()
#--------------------------------------------------------------------------
# draw with phase
phase = Circular(draw.line(sr, 1.0, 0.0, 1.0))
_3_hz = Constant(3.0)
x0 = draw.sine(sr, dur, _3_hz, phase)
x1 = gen_250.generate(dur, _3_hz, phase)
plt.figure()
plt.plot(x0, label = 'drawn')
plt.plot(x1, label = 'wt 250 interp')
plt.title('3 Hz sine with 180 deg phase change')
plt.grid(True)
plt.legend()
res = x1 - x0
plt.figure()
plt.plot(res, label = 'wt 250 interp error')
plt.title('Residual error with 180 deg phase change')
plt.grid(True)
plt.legend()
plt.show()
if __name__ == "__main__":
main()
| {
"repo_name": "weegreenblobbie/sd_audio_hackers",
"path": "20160724_wavetables/code/run_demo.py",
"copies": "1",
"size": "3800",
"license": "mit",
"hash": 784069450833047400,
"line_mean": 22.0303030303,
"line_max": 79,
"alpha_frac": 0.5365789474,
"autogenerated": false,
"ratio": 2.9411764705882355,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8952828508877427,
"avg_score": 0.004985381822161596,
"num_lines": 165
} |
# 3rd party
from django.test import TestCase
from django.utils import timezone
import factory
# Own
from portfolio.models import Transaction
from .security_factories import SecurityFactory
from .account_factories import AccountFactory
from .currency_factories import CurrencyFactory
class TransactionFactory(factory.django.DjangoModelFactory):
"""
Factory for creating transacions
"""
class Meta:
model = Transaction
account = factory.SubFactory(AccountFactory)
action = 'BUY'
date = timezone.now()
security = factory.SubFactory(SecurityFactory)
shares = 100
price = 21.30
commission = 0
currency = factory.SubFactory(CurrencyFactory)
#########
#
# Tests
#
#########
class TransactionModelTest(TestCase):
longMessage = True
def create_transaction(self, **kwargs):
return TransactionFactory(**kwargs)
def setUp(self):
self.transaction = self.create_transaction()
self.transaction.save()
self.security = SecurityFactory(name='Elisa')
self.currency = CurrencyFactory(iso_code='EUR')
def test_saving_transaction(self):
TransactionFactory(security=self.security)
saved_items = Transaction.objects.all()
# 2: one created above and the other in setUp()
self.assertEqual(saved_items.count(), 2,
'Should be two securities in db')
def test_edit_transaction(self):
self.create_transaction(security=self.security)
transaction = Transaction.objects.latest('date')
transaction.price = 22.54
transaction.currency = self.currency
transaction.save()
edited_transaction = Transaction.objects.get(pk=transaction.pk)
self.assertEquals(float(edited_transaction.price), 22.54)
self.assertEquals(edited_transaction.currency.iso_code, 'EUR')
| {
"repo_name": "jokimies/django-pj-portfolio",
"path": "tests/test_transaction_model.py",
"copies": "1",
"size": "1886",
"license": "bsd-3-clause",
"hash": 2968621399080105000,
"line_mean": 26.7352941176,
"line_max": 71,
"alpha_frac": 0.6802757158,
"autogenerated": false,
"ratio": 4.30593607305936,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5486211788859361,
"avg_score": null,
"num_lines": null
} |
# 3rd-party
from lxml.builder import E
# local module
from jnpr.junos.cfg.resource import Resource
class PhyPortBase(Resource):
"""
[edit interfaces <name>]
Resource name: str
<name> is the interface-name (IFD), e.g. 'ge-0/0/0'
"""
PROPERTIES = [
'admin', # True
'description', # str
'speed', # ['10m','100m','1g','10g']
'duplex', # ['full','half']
'mtu', # int
'loopback', # True
'$unit_count' # number of units defined
]
PORT_DUPLEX = {
'full': 'full-duplex',
'half': 'half-duplex'
}
@classmethod
def _set_invert(cls, in_this, item, from_this):
from_item = in_this[item]
in_this[item] = [
_k for _k,
_v in from_this.items() if _v == from_item][0]
# -----------------------------------------------------------------------
# XML readers
# -----------------------------------------------------------------------
def _xml_at_top(self):
return E.interfaces(E.interface(
E.name(self._name)
))
def _xml_at_res(self, xml):
return xml.find('.//interface')
def _xml_to_py(self, has_xml, has_py):
# common to all subclasses
Resource._r_has_xml_status(has_xml, has_py)
has_py['admin'] = bool(has_xml.find('disable') is None)
Resource.copyifexists(has_xml, 'description', has_py)
Resource.copyifexists(has_xml, 'mtu', has_py)
has_py['$unit_count'] = len(has_xml.findall('unit'))
# -----------------------------------------------------------------------
# XML writers
# -----------------------------------------------------------------------
# description handed by Resource
def _xml_change_admin(self, xml):
xml.append(
Resource.xmltag_set_or_del(
'disable',
(self.admin is False)))
return True
def _xml_change_mtu(self, xml):
Resource.xml_set_or_delete(xml, 'mtu', self.mtu)
return True
# -----------------------------------------------------------------------
# Manager List, Catalog
# -----------------------------------------------------------------------
def _r_list(self):
got = self.R.get_interface_information(
media=True,
interface_name="[efgx][et]-*")
self._rlist = [
name.text.strip() for name in got.xpath('physical-interface/name')]
| {
"repo_name": "pklimai/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/phyport/base.py",
"copies": "2",
"size": "2565",
"license": "apache-2.0",
"hash": -7887126662052177000,
"line_mean": 29.1764705882,
"line_max": 79,
"alpha_frac": 0.4261208577,
"autogenerated": false,
"ratio": 4.077901430842608,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5504022288542607,
"avg_score": null,
"num_lines": null
} |
# 3rd-party
from lxml.builder import E
# local
from jnpr.junos.cfg import Resource
from jnpr.junos import JXML
from jnpr.junos.cfg.phyport.base import PhyPortBase
class PhyPortSwitch(PhyPortBase):
PORT_SPEED = {
'auto': 'auto-negotiation',
'10m': 'ethernet-10m',
'100m': 'ethernet-100m',
'1g': 'ethernet-1g'
}
# -----------------------------------------------------------------------
# XML readers
# -----------------------------------------------------------------------
def _xml_to_py(self, has_xml, has_py):
PhyPortBase._xml_to_py(self, has_xml, has_py)
# speed, duplex, loopback are all under 'ether-options'
ethopts = has_xml.find('ether-options')
if ethopts is None:
return
if ethopts.find('loopback') is not None:
has_py['loopback'] = True
speed = ethopts.find('speed')
if speed is not None:
# take the first child element
has_py['speed'] = speed[0].tag
PhyPortBase._set_invert(has_py, 'speed', self.PORT_SPEED)
Resource.copyifexists(ethopts, 'link-mode', has_py, 'duplex')
if 'duplex' in has_py:
PhyPortBase._set_invert(has_py, 'duplex', self.PORT_DUPLEX)
# -----------------------------------------------------------------------
# XML writers
# -----------------------------------------------------------------------
def _xml_hook_build_change_begin(self, xml):
if any([this in self.should for this in ['speed', 'duplex',
'loopback']]):
self._ethopts = E('ether-options')
xml.append(self._ethopts)
def _xml_change_speed(self, xml):
speed_tag = self.PORT_SPEED.get(self.speed)
add_this = E.speed(
JXML.DEL) if speed_tag is None else E.speed(
E(speed_tag))
self._ethopts.append(add_this)
return True
def _xml_change_duplex(self, xml):
value = self.PORT_DUPLEX.get(self.duplex)
Resource.xml_set_or_delete(self._ethopts, 'link-mode', value)
return True
def _xml_change_loopback(self, xml):
self._ethopts.append(
Resource.xmltag_set_or_del(
'loopback',
self.loopback))
return True
| {
"repo_name": "JamesNickerson/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/phyport/switch.py",
"copies": "10",
"size": "2349",
"license": "apache-2.0",
"hash": -225792791808148700,
"line_mean": 31.625,
"line_max": 77,
"alpha_frac": 0.4929757344,
"autogenerated": false,
"ratio": 3.876237623762376,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 72
} |
# 3rd-party
from lxml.builder import E
# module packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class ApplicationSet(Resource):
"""
[edit applications application-set <name>]
Resource name: str
<name> is the application-set name
"""
PROPERTIES = [
'description',
'app_list', 'app_list_adds', 'app_list_dels',
'appset_list', 'appset_list_adds', 'appset_list_dels'
]
def _xml_at_top(self):
return E.applications(E('application-set', (E.name(self._name))))
def _xml_at_res(self, xml):
return xml.find('.//application-set')
def _xml_to_py(self, has_xml, has_py):
Resource._r_has_xml_status(has_xml, has_py)
Resource.copyifexists(has_xml, 'description', has_py)
# each of the <application> elements
app_list = [this.findtext('name')
for this in has_xml.xpath('application')]
set_list = [this.findtext('name')
for this in has_xml.xpath('application-set')]
if len(app_list):
has_py['app_list'] = app_list
if len(set_list):
has_py['appset_list'] = set_list
# -----------------------------------------------------------------------
# XML property writers
# -----------------------------------------------------------------------
# -------------------------------------------------------------------------
# application list
# -------------------------------------------------------------------------
def _xml_change_app_list(self, xml):
self._xml_list_property_add_del_names(xml,
prop_name='app_list',
element_name='application')
return True
def _xml_change_app_list_adds(self, xml):
for this in self.should['app_list_adds']:
xml.append(E.application(E.name(this)))
return True
def _xml_change_app_list_dels(self, xml):
for this in self.should['app_list_dels']:
xml.append(E.application(JXML.DEL, E.name(this)))
return True
# -------------------------------------------------------------------------
# application-set list
# -------------------------------------------------------------------------
def _xml_change_appset_list(self, xml):
if None == self.should.get('appset_list'):
self['appset_list'] = []
(adds, dels) = Resource.diff_list(
self.has.get('appset_list', []), self.should['appset_list'])
for this in adds:
xml.append(E('application-set', E.name(this)))
for this in dels:
xml.append(E('application-set', JXML.DEL, E.name(this)))
return True
def _xml_change_appset_list_adds(self, xml):
for this in self.should['appset_list_adds']:
xml.append(E('application-set', E.name(this)))
return True
def _xml_change_appset_list_dels(self, xml):
for this in self.should['appset_list_dels']:
xml.append(E('application-set', JXML.DEL, E.name(this)))
return True
# -----------------------------------------------------------------------
# Resource List, Catalog
# -- only executed by 'manager' resources
# -----------------------------------------------------------------------
def _r_list(self):
got = self.R.get_config(
E.applications(E('application-set', JXML.NAMES_ONLY)))
self._rlist = [this.text for this in got.xpath('.//name')]
def _r_catalog(self):
got = self.R.get_config(E.applications(E('application-set')))
for this in got.xpath('.//application-set'):
name = this.findtext('name')
self._rcatalog[name] = {}
self._xml_to_py(this, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/app_set.py",
"copies": "1",
"size": "3887",
"license": "apache-2.0",
"hash": 8321137152307202000,
"line_mean": 34.018018018,
"line_max": 79,
"alpha_frac": 0.4767172627,
"autogenerated": false,
"ratio": 4.078698845750262,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5055416108450262,
"avg_score": null,
"num_lines": null
} |
# 3rd party
from nose.plugins.attrib import attr
# Agent
from checks import AgentCheck
from tests.checks.common import AgentCheckTest
@attr(requires='gearman')
class GearmanTestCase(AgentCheckTest):
CHECK_NAME = "gearmand"
def test_metrics(self):
tags = ['first_tag', 'second_tag']
service_checks_tags = ['server:127.0.0.1', 'port:4730']
config = {
'instances': [{
'tags': tags
}]
}
tags += service_checks_tags
self.run_check(config)
self.assertMetric('gearman.unique_tasks', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.running', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.queued', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.workers', value=0.0, tags=tags, count=1)
self.assertServiceCheck("gearman.can_connect", status=AgentCheck.OK,
tags=service_checks_tags, count=1)
self.coverage_report()
def test_service_checks(self):
config = {
'instances': [
{'host': '127.0.0.1', 'port': 4730},
{'host': '127.0.0.1', 'port': 4731}]
}
self.assertRaises(Exception, self.run_check, config)
service_checks_tags_ok = ['server:127.0.0.1', 'port:4730']
service_checks_tags_not_ok = ['server:127.0.0.1', 'port:4731']
tags = service_checks_tags_ok
self.assertMetric('gearman.unique_tasks', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.running', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.queued', value=0.0, tags=tags, count=1)
self.assertMetric('gearman.workers', value=0.0, tags=tags, count=1)
self.assertServiceCheck("gearman.can_connect", status=AgentCheck.OK,
tags=service_checks_tags_ok, count=1)
self.assertServiceCheck("gearman.can_connect", status=AgentCheck.CRITICAL,
tags=service_checks_tags_not_ok, count=1)
self.coverage_report()
| {
"repo_name": "jyogi/purvar-agent",
"path": "tests/checks/integration/test_gearmand.py",
"copies": "46",
"size": "2041",
"license": "bsd-3-clause",
"hash": -4799839925327136000,
"line_mean": 36.1090909091,
"line_max": 82,
"alpha_frac": 0.6153846154,
"autogenerated": false,
"ratio": 3.3459016393442624,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# 3rd party
import gearman
# project
from checks import AgentCheck
class Gearman(AgentCheck):
SERVICE_CHECK_NAME = 'gearman.can_connect'
def get_library_versions(self):
return {"gearman": gearman.__version__}
def _get_client(self,host,port):
self.log.debug("Connecting to gearman at address %s:%s" % (host, port))
return gearman.GearmanAdminClient(["%s:%s" %
(host, port)])
def _get_metrics(self, client, tags):
data = client.get_status()
running = 0
queued = 0
workers = 0
for stat in data:
running += stat['running']
queued += stat['queued']
workers += stat['workers']
unique_tasks = len(data)
self.gauge("gearman.unique_tasks", unique_tasks, tags=tags)
self.gauge("gearman.running", running, tags=tags)
self.gauge("gearman.queued", queued, tags=tags)
self.gauge("gearman.workers", workers, tags=tags)
self.log.debug("running %d, queued %d, unique tasks %d, workers: %d"
% (running, queued, unique_tasks, workers))
def _get_conf(self, instance):
host = instance.get('server', None)
port = instance.get('port', None)
if host is None:
self.warning("Host not set, assuming 127.0.0.1")
host = "127.0.0.1"
if port is None:
self.warning("Port is not set, assuming 4730")
port = 4730
tags = instance.get('tags', [])
return host, port, tags
def check(self, instance):
self.log.debug("Gearman check start")
host, port, tags = self._get_conf(instance)
service_check_tags = ["server:{0}".format(host),
"port:{0}".format(port)]
client = self._get_client(host, port)
self.log.debug("Connected to gearman")
tags += service_check_tags
try:
self._get_metrics(client, tags)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
message="Connection to %s:%s succeeded." % (host, port),
tags=service_check_tags)
except Exception as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message=str(e), tags=service_check_tags)
raise
| {
"repo_name": "lookout/dd-agent",
"path": "checks.d/gearmand.py",
"copies": "34",
"size": "2314",
"license": "bsd-3-clause",
"hash": 115428310171543520,
"line_mean": 29.8533333333,
"line_max": 79,
"alpha_frac": 0.5743301642,
"autogenerated": false,
"ratio": 3.7626016260162602,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.003066915440992967,
"num_lines": 75
} |
# 3rd party
import gearman
# project
from checks import AgentCheck
MAX_NUM_TASKS = 200
class Gearman(AgentCheck):
SERVICE_CHECK_NAME = 'gearman.can_connect'
gearman_clients = {}
def get_library_versions(self):
return {"gearman": gearman.__version__}
def _get_client(self,host,port):
if not (host, port) in self.gearman_clients:
self.log.debug("Connecting to gearman at address %s:%s" % (host, port))
self.gearman_clients[(host, port)] = gearman.GearmanAdminClient(["%s:%s" % (host, port)])
return self.gearman_clients[(host, port)]
def _get_aggregate_metrics(self, tasks, tags):
running = 0
queued = 0
workers = 0
for stat in tasks:
running += stat['running']
queued += stat['queued']
workers += stat['workers']
unique_tasks = len(tasks)
self.gauge("gearman.unique_tasks", unique_tasks, tags=tags)
self.gauge("gearman.running", running, tags=tags)
self.gauge("gearman.queued", queued, tags=tags)
self.gauge("gearman.workers", workers, tags=tags)
self.log.debug("running %d, queued %d, unique tasks %d, workers: %d"
% (running, queued, unique_tasks, workers))
def _get_per_task_metrics(self, tasks, task_filter, tags):
if len(task_filter) > MAX_NUM_TASKS:
self.warning(
"The maximum number of tasks you can specify is {}.".format(MAX_NUM_TASKS))
if not len(task_filter) == 0:
tasks = [t for t in tasks if t['task'] in task_filter]
if len(tasks) > MAX_NUM_TASKS:
# Display a warning in the info page
self.warning(
"Too many tasks to fetch. You must choose the tasks you are interested in by editing the gearmand.yaml configuration file or get in touch with StackState Support")
for stat in tasks[:MAX_NUM_TASKS]:
running = stat['running']
queued = stat['queued']
workers = stat['workers']
task_tags = tags[:]
task_tags.append("task:{}".format(stat['task']))
self.gauge("gearman.running_by_task", running, tags=task_tags)
self.gauge("gearman.queued_by_task", queued, tags=task_tags)
self.gauge("gearman.workers_by_task", workers, tags=task_tags)
def _get_conf(self, instance):
host = instance.get('server', None)
port = instance.get('port', None)
tasks = instance.get('tasks', [])
if host is None:
self.warning("Host not set, assuming 127.0.0.1")
host = "127.0.0.1"
if port is None:
self.warning("Port is not set, assuming 4730")
port = 4730
tags = instance.get('tags', [])
return host, port, tasks, tags
def check(self, instance):
self.log.debug("Gearman check start")
host, port, task_filter, tags = self._get_conf(instance)
service_check_tags = ["server:{0}".format(host),
"port:{0}".format(port)]
client = self._get_client(host, port)
self.log.debug("Connected to gearman")
tags += service_check_tags
try:
tasks = client.get_status()
self._get_aggregate_metrics(tasks, tags)
self._get_per_task_metrics(tasks, task_filter, tags)
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
message="Connection to %s:%s succeeded." % (host, port),
tags=service_check_tags)
except Exception as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message=str(e), tags=service_check_tags)
raise
| {
"repo_name": "StackVista/sts-agent-integrations-core",
"path": "gearmand/check.py",
"copies": "1",
"size": "3744",
"license": "bsd-3-clause",
"hash": 6551118845053240000,
"line_mean": 33.9906542056,
"line_max": 179,
"alpha_frac": 0.5827991453,
"autogenerated": false,
"ratio": 3.770392749244713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4853191894544713,
"avg_score": null,
"num_lines": null
} |
# 3rd party
import logging
import requests
# project
from monasca_agent.collector.checks import AgentCheck
import monasca_agent.collector.checks.services_checks as services_checks
from monasca_agent.common.util import headers
log = logging.getLogger(__name__)
class Etcd(AgentCheck):
DEFAULT_TIMEOUT = 5
SERVICE_CHECK_NAME = 'etcd.can_connect'
STORE_RATES = {
'getsSuccess': 'etcd.store.gets.success',
'getsFail': 'etcd.store.gets.fail',
'setsSuccess': 'etcd.store.sets.success',
'setsFail': 'etcd.store.sets.fail',
'deleteSuccess': 'etcd.store.delete.success',
'deleteFail': 'etcd.store.delete.fail',
'updateSuccess': 'etcd.store.update.success',
'updateFail': 'etcd.store.update.fail',
'createSuccess': 'etcd.store.create.success',
'createFail': 'etcd.store.create.fail',
'compareAndSwapSuccess': 'etcd.store.compareandswap.success',
'compareAndSwapFail': 'etcd.store.compareandswap.fail',
'compareAndDeleteSuccess': 'etcd.store.compareanddelete.success',
'compareAndDeleteFail': 'etcd.store.compareanddelete.fail',
'expireCount': 'etcd.store.expire.count'
}
STORE_GAUGES = {
'watchers': 'etcd.store.watchers'
}
SELF_GAUGES = {
'sendPkgRate': 'etcd.self.send.pkgrate',
'sendBandwidthRate': 'etcd.self.send.bandwidthrate',
'recvPkgRate': 'etcd.self.recv.pkgrate',
'recvBandwidthRate': 'etcd.self.recv.bandwidthrate'
}
SELF_RATES = {
'recvAppendRequestCnt': 'etcd.self.recv.appendrequest.count',
'sendAppendRequestCnt': 'etcd.self.send.appendrequest.count'
}
LEADER_COUNTS = {
# Rates
'fail': 'etcd.leader.counts.fail',
'success': 'etcd.leader.counts.success',
}
LEADER_LATENCY = {
# Gauges
'current': 'etcd.leader.latency.current',
'average': 'etcd.leader.latency.avg',
'minimum': 'etcd.leader.latency.min',
'maximum': 'etcd.leader.latency.max',
'standardDeviation': 'etcd.leader.latency.stddev',
}
def check(self, instance):
if 'url' not in instance:
raise Exception('etcd instance missing "url" value.')
# Load values from the instance config
url = instance.get('url')
# Load the ssl configuration
ssl_params = {
'ssl_keyfile': instance.get('ssl_keyfile'),
'ssl_certfile': instance.get('ssl_certfile'),
'ssl_cert_validation': instance.get('ssl_cert_validation', True),
'ssl_ca_certs': instance.get('ssl_ca_certs'),
}
for key, param in ssl_params.items():
if param is None:
del ssl_params[key]
# Append the instance's URL in case there are more than one, that
# way they can tell the difference!
timeout = float(instance.get('timeout', self.DEFAULT_TIMEOUT))
is_leader = False
# Gather self metrics
self_response = self._get_self_metrics(url, ssl_params, timeout)
if self_response is not None:
if self_response['state'] == 'StateLeader':
is_leader = True
dimensions = self._set_dimensions({
"etcd_tag_url": format(url),
"etcd_tag_state": "leader"},
instance)
else:
dimensions = self._set_dimensions({
"etcd_tag_url": format(url),
"etcd_tag_state": "follower"},
instance)
for key in self.SELF_RATES:
if key in self_response:
log.debug("Sending {0}={1}".format(self.SELF_RATES[key], self_response[key]))
self.rate(self.SELF_RATES[key], self_response[key], dimensions)
else:
log.warn("Missing key {0} in stats.".format(key))
for key in self.SELF_GAUGES:
if key in self_response:
log.debug("Sending {0}={1}".format(self.SELF_GAUGES[key], self_response[key]))
self.gauge(self.SELF_GAUGES[key], self_response[key], dimensions)
else:
log.warn("Missing key {0} in stats.".format(key))
# Gather store metrics
store_response = self._get_store_metrics(url, ssl_params, timeout)
if store_response is not None:
for key in self.STORE_RATES:
if key in store_response:
log.debug("Sending {0}={1}".format(self.STORE_RATES[key], store_response[key]))
self.rate(self.STORE_RATES[key], store_response[key], dimensions)
else:
log.warn("Missing key {0} in stats.".format(key))
for key in self.STORE_GAUGES:
if key in store_response:
log.debug("Sending {0}={1}".format(self.STORE_GAUGES[key], store_response[key]))
self.gauge(self.STORE_GAUGES[key], store_response[key], dimensions)
else:
log.warn("Missing key {0} in stats.".format(key))
# Gather leader metrics
if is_leader:
leader_response = self._get_leader_metrics(url, ssl_params, timeout)
if leader_response is not None and len(leader_response.get("followers", {})) > 0:
# Get the followers
followers = leader_response.get("followers")
for fol in followers:
# counts
for key in self.LEADER_COUNTS:
log.debug("Sending {0}={1}".format(self.LEADER_COUNTS[key], followers[fol].get("counts").get(key)))
self.rate(self.LEADER_COUNTS[key],
followers[fol].get("counts").get(key),
dimensions)
# latency
for key in self.LEADER_LATENCY:
log.debug("Sending {0}={1}".format(self.LEADER_LATENCY[key], followers[fol].get("latency").get(key)))
self.gauge(self.LEADER_LATENCY[key],
followers[fol].get("latency").get(key),
dimensions)
def _get_self_metrics(self, url, ssl_params, timeout):
return self._get_json(url + "/v2/stats/self", ssl_params, timeout)
def _get_store_metrics(self, url, ssl_params, timeout):
return self._get_json(url + "/v2/stats/store", ssl_params, timeout)
def _get_leader_metrics(self, url, ssl_params, timeout):
return self._get_json(url + "/v2/stats/leader", ssl_params, timeout)
def _get_json(self, url, ssl_params, timeout):
try:
certificate = None
if 'ssl_certfile' in ssl_params and 'ssl_keyfile' in ssl_params:
certificate = (ssl_params['ssl_certfile'], ssl_params['ssl_keyfile'])
verify = ssl_params.get('ssl_ca_certs', True) if ssl_params['ssl_cert_validation'] else False
r = requests.get(url, verify=verify, cert=certificate, timeout=timeout, headers=headers(self.agent_config))
except requests.exceptions.Timeout:
# If there's a timeout
return services_checks.Status.CRITICAL, "Timeout when hitting %s" % url
if r.status_code != 200:
return services_checks.Status.CRITICAL, "Got %s when hitting %s" % (r.status_code, url)
return r.json()
| {
"repo_name": "sapcc/monasca-agent",
"path": "monasca_agent/collector/checks_d/etcd.py",
"copies": "1",
"size": "7535",
"license": "bsd-3-clause",
"hash": -7724165326603452000,
"line_mean": 40.8611111111,
"line_max": 125,
"alpha_frac": 0.5682813537,
"autogenerated": false,
"ratio": 3.9061689994815967,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49744503531815965,
"avg_score": null,
"num_lines": null
} |
# 3rd party
import memcache
# project
from checks import AgentCheck
# Ref: http://code.sixapart.com/svn/memcached/trunk/server/doc/protocol.txt
# Name Type Meaning
# ----------------------------------
# pid 32u Process id of this server process
# uptime 32u Number of seconds this server has been running
# time 32u current UNIX time according to the server
# version string Version string of this server
# pointer_size 32 Default size of pointers on the host OS
# (generally 32 or 64)
# rusage_user 32u:32u Accumulated user time for this process
# (seconds:microseconds)
# rusage_system 32u:32u Accumulated system time for this process
# (seconds:microseconds)
# curr_items 32u Current number of items stored by the server
# total_items 32u Total number of items stored by this server
# ever since it started
# bytes 64u Current number of bytes used by this server
# to store items
# curr_connections 32u Number of open connections
# total_connections 32u Total number of connections opened since
# the server started running
# connection_structures 32u Number of connection structures allocated
# by the server
# cmd_get 64u Cumulative number of retrieval requests
# cmd_set 64u Cumulative number of storage requests
# get_hits 64u Number of keys that have been requested and
# found present
# get_misses 64u Number of items that have been requested
# and not found
# delete_misses 64u Number of deletions reqs for missing keys
# delete_hits 64u Number of deletion reqs resulting in
# an item being removed.
# evictions 64u Number of valid items removed from cache
# to free memory for new items
# bytes_read 64u Total number of bytes read by this server
# from network
# bytes_written 64u Total number of bytes sent by this server to
# network
# limit_maxbytes 32u Number of bytes this server is allowed to
# use for storage.
# threads 32u Number of worker threads requested.
# (see doc/threads.txt)
# >>> mc.get_stats()
# [('127.0.0.1:11211 (1)', {'pid': '2301', 'total_items': '2',
# 'uptime': '80', 'listen_disabled_num': '0', 'version': '1.2.8',
# 'limit_maxbytes': '67108864', 'rusage_user': '0.002532',
# 'bytes_read': '51', 'accepting_conns': '1', 'rusage_system':
# '0.007445', 'cmd_get': '0', 'curr_connections': '4', 'threads': '2',
# 'total_connections': '5', 'cmd_set': '2', 'curr_items': '0',
# 'get_misses': '0', 'cmd_flush': '0', 'evictions': '0', 'bytes': '0',
# 'connection_structures': '5', 'bytes_written': '25', 'time':
# '1306364220', 'pointer_size': '64', 'get_hits': '0'})]
# For Membase it gets worse
# http://www.couchbase.org/wiki/display/membase/Membase+Statistics
# https://github.com/membase/ep-engine/blob/master/docs/stats.org
class Memcache(AgentCheck):
SOURCE_TYPE_NAME = 'memcached'
DEFAULT_PORT = 11211
GAUGES = [
"total_items",
"curr_items",
"limit_maxbytes",
"uptime",
"bytes",
"curr_connections",
"connection_structures",
"threads",
"pointer_size"
]
RATES = [
"rusage_user",
"rusage_system",
"cmd_get",
"cmd_set",
"cmd_flush",
"get_hits",
"get_misses",
"delete_misses",
"delete_hits",
"evictions",
"bytes_read",
"bytes_written",
"cas_misses",
"cas_hits",
"cas_badval",
"total_connections"
]
SERVICE_CHECK = 'memcache.can_connect'
def get_library_versions(self):
return {"memcache": memcache.__version__}
def _get_metrics(self, server, port, tags):
mc = None # client
service_check_tags = ["host:%s" % server, "port:%s" % port]
try:
self.log.debug("Connecting to %s:%s tags:%s", server, port, tags)
mc = memcache.Client(["%s:%s" % (server, port)])
raw_stats = mc.get_stats()
assert len(raw_stats) == 1 and len(raw_stats[0]) == 2,\
"Malformed response: %s" % raw_stats
# Access the dict
stats = raw_stats[0][1]
for metric in stats:
# Check if metric is a gauge or rate
if metric in self.GAUGES:
our_metric = self.normalize(metric.lower(), 'memcache')
self.gauge(our_metric, float(stats[metric]), tags=tags)
# Tweak the name if it's a rate so that we don't use the exact
# same metric name as the memcache documentation
if metric in self.RATES:
our_metric = self.normalize(
"{0}_rate".format(metric.lower()), 'memcache')
self.rate(our_metric, float(stats[metric]), tags=tags)
# calculate some metrics based on other metrics.
# stats should be present, but wrap in try/except
# and log an exception just in case.
try:
self.gauge(
"memcache.get_hit_percent",
100.0 * float(stats["get_hits"]) / float(stats["cmd_get"]),
tags=tags,
)
except ZeroDivisionError:
pass
try:
self.gauge(
"memcache.fill_percent",
100.0 * float(stats["bytes"]) / float(stats["limit_maxbytes"]),
tags=tags,
)
except ZeroDivisionError:
pass
try:
self.gauge(
"memcache.avg_item_size",
float(stats["bytes"]) / float(stats["curr_items"]),
tags=tags,
)
except ZeroDivisionError:
pass
uptime = stats.get("uptime", 0)
self.service_check(
self.SERVICE_CHECK, AgentCheck.OK,
tags=service_check_tags,
message="Server has been up for %s seconds" % uptime)
except AssertionError:
self.service_check(
self.SERVICE_CHECK, AgentCheck.CRITICAL,
tags=service_check_tags,
message="Unable to fetch stats from server")
raise Exception(
"Unable to retrieve stats from memcache instance: {0}:{1}."
"Please check your configuration".format(server, port))
if mc is not None:
mc.disconnect_all()
self.log.debug("Disconnected from memcached")
del mc
def check(self, instance):
socket = instance.get('socket')
server = instance.get('url')
if not server and not socket:
raise Exception('Either "url" or "socket" must be configured')
if socket:
server = 'unix'
port = socket
else:
port = int(instance.get('port', self.DEFAULT_PORT))
custom_tags = instance.get('tags') or []
tags = ["url:{0}:{1}".format(server, port)] + custom_tags
self._get_metrics(server, port, tags)
| {
"repo_name": "jamesandariese/dd-agent",
"path": "checks.d/mcache.py",
"copies": "22",
"size": "7699",
"license": "bsd-3-clause",
"hash": -6456080407131517000,
"line_mean": 37.8838383838,
"line_max": 83,
"alpha_frac": 0.5273412131,
"autogenerated": false,
"ratio": 4.157127429805615,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
# 3rd party
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
class Etcd(AgentCheck):
DEFAULT_TIMEOUT = 5
SERVICE_CHECK_NAME = 'etcd.can_connect'
STORE_RATES = {
'getsSuccess': 'etcd.store.gets.success',
'getsFail': 'etcd.store.gets.fail',
'setsSuccess': 'etcd.store.sets.success',
'setsFail': 'etcd.store.sets.fail',
'deleteSuccess': 'etcd.store.delete.success',
'deleteFail': 'etcd.store.delete.fail',
'updateSuccess': 'etcd.store.update.success',
'updateFail': 'etcd.store.update.fail',
'createSuccess': 'etcd.store.create.success',
'createFail': 'etcd.store.create.fail',
'compareAndSwapSuccess': 'etcd.store.compareandswap.success',
'compareAndSwapFail': 'etcd.store.compareandswap.fail',
'compareAndDeleteSuccess': 'etcd.store.compareanddelete.success',
'compareAndDeleteFail': 'etcd.store.compareanddelete.fail',
'expireCount': 'etcd.store.expire.count'
}
STORE_GAUGES = {
'watchers': 'etcd.store.watchers'
}
SELF_GAUGES = {
'sendPkgRate': 'etcd.self.send.pkgrate',
'sendBandwidthRate': 'etcd.self.send.bandwidthrate',
'recvPkgRate': 'etcd.self.recv.pkgrate',
'recvBandwidthRate': 'etcd.self.recv.bandwidthrate'
}
SELF_RATES = {
'recvAppendRequestCnt': 'etcd.self.recv.appendrequest.count',
'sendAppendRequestCnt': 'etcd.self.send.appendrequest.count'
}
LEADER_COUNTS = {
# Rates
'fail': 'etcd.leader.counts.fail',
'success': 'etcd.leader.counts.success',
}
LEADER_LATENCY = {
# Gauges
'current': 'etcd.leader.latency.current',
'average': 'etcd.leader.latency.avg',
'minimum': 'etcd.leader.latency.min',
'maximum': 'etcd.leader.latency.max',
'standardDeviation': 'etcd.leader.latency.stddev',
}
def check(self, instance):
if 'url' not in instance:
raise Exception('etcd instance missing "url" value.')
# Load values from the instance config
url = instance['url']
instance_tags = instance.get('tags', [])
# Load the ssl configuration
ssl_params = {
'ssl_keyfile': instance.get('ssl_keyfile'),
'ssl_certfile': instance.get('ssl_certfile'),
'ssl_cert_validation': _is_affirmative(instance.get('ssl_cert_validation', True)),
'ssl_ca_certs': instance.get('ssl_ca_certs'),
}
for key, param in ssl_params.items():
if param is None:
del ssl_params[key]
# Append the instance's URL in case there are more than one, that
# way they can tell the difference!
instance_tags.append("url:{0}".format(url))
timeout = float(instance.get('timeout', self.DEFAULT_TIMEOUT))
is_leader = False
# Gather self metrics
self_response = self._get_self_metrics(url, ssl_params, timeout)
if self_response is not None:
if self_response['state'] == 'StateLeader':
is_leader = True
instance_tags.append('etcd_state:leader')
else:
instance_tags.append('etcd_state:follower')
for key in self.SELF_RATES:
if key in self_response:
self.rate(self.SELF_RATES[key], self_response[key], tags=instance_tags)
else:
self.log.warn("Missing key {0} in stats.".format(key))
for key in self.SELF_GAUGES:
if key in self_response:
self.gauge(self.SELF_GAUGES[key], self_response[key], tags=instance_tags)
else:
self.log.warn("Missing key {0} in stats.".format(key))
# Gather store metrics
store_response = self._get_store_metrics(url, ssl_params, timeout)
if store_response is not None:
for key in self.STORE_RATES:
if key in store_response:
self.rate(self.STORE_RATES[key], store_response[key], tags=instance_tags)
else:
self.log.warn("Missing key {0} in stats.".format(key))
for key in self.STORE_GAUGES:
if key in store_response:
self.gauge(self.STORE_GAUGES[key], store_response[key], tags=instance_tags)
else:
self.log.warn("Missing key {0} in stats.".format(key))
# Gather leader metrics
if is_leader:
leader_response = self._get_leader_metrics(url, ssl_params, timeout)
if leader_response is not None and len(leader_response.get("followers", {})) > 0:
# Get the followers
followers = leader_response.get("followers")
for fol in followers:
# counts
for key in self.LEADER_COUNTS:
self.rate(self.LEADER_COUNTS[key],
followers[fol].get("counts").get(key),
tags=instance_tags + ['follower:{0}'.format(fol)])
# latency
for key in self.LEADER_LATENCY:
self.gauge(self.LEADER_LATENCY[key],
followers[fol].get("latency").get(key),
tags=instance_tags + ['follower:{0}'.format(fol)])
# Service check
if self_response is not None and store_response is not None:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=["url:{0}".format(url)])
def _get_self_metrics(self, url, ssl_params, timeout):
return self._get_json(url + "/v2/stats/self", ssl_params, timeout)
def _get_store_metrics(self, url, ssl_params, timeout):
return self._get_json(url + "/v2/stats/store", ssl_params, timeout)
def _get_leader_metrics(self, url, ssl_params, timeout):
return self._get_json(url + "/v2/stats/leader", ssl_params, timeout)
def _get_json(self, url, ssl_params, timeout):
try:
certificate = None
if 'ssl_certfile' in ssl_params and 'ssl_keyfile' in ssl_params:
certificate = (ssl_params['ssl_certfile'], ssl_params['ssl_keyfile'])
verify = ssl_params.get('ssl_ca_certs', True) if ssl_params['ssl_cert_validation'] else False
r = requests.get(url, verify=verify, cert=certificate, timeout=timeout, headers=headers(self.agentConfig))
except requests.exceptions.Timeout:
# If there's a timeout
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message="Timeout when hitting %s" % url,
tags=["url:{0}".format(url)])
raise
except Exception as e:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message="Error hitting %s. Error: %s" % (url, e.message),
tags=["url:{0}".format(url)])
raise
if r.status_code != 200:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message="Got %s when hitting %s" % (r.status_code, url),
tags=["url:{0}".format(url)])
raise Exception("Http status code {0} on url {1}".format(r.status_code, url))
return r.json()
| {
"repo_name": "StackVista/sts-agent-integrations-core",
"path": "etcd/check.py",
"copies": "1",
"size": "7617",
"license": "bsd-3-clause",
"hash": 1518905868745106700,
"line_mean": 40.6229508197,
"line_max": 118,
"alpha_frac": 0.5636077196,
"autogenerated": false,
"ratio": 3.958939708939709,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0008887522747451143,
"num_lines": 183
} |
# 3rd party
import requests
# project
from checks import AgentCheck
from util import headers
class Etcd(AgentCheck):
DEFAULT_TIMEOUT = 5
SERVICE_CHECK_NAME = 'etcd.can_connect'
STORE_RATES = {
'getsSuccess': 'etcd.store.gets.success',
'getsFail': 'etcd.store.gets.fail',
'setsSuccess': 'etcd.store.sets.success',
'setsFail': 'etcd.store.sets.fail',
'deleteSuccess': 'etcd.store.delete.success',
'deleteFail': 'etcd.store.delete.fail',
'updateSuccess': 'etcd.store.update.success',
'updateFail': 'etcd.store.update.fail',
'createSuccess': 'etcd.store.create.success',
'createFail': 'etcd.store.create.fail',
'compareAndSwapSuccess': 'etcd.store.compareandswap.success',
'compareAndSwapFail': 'etcd.store.compareandswap.fail',
'compareAndDeleteSuccess': 'etcd.store.compareanddelete.success',
'compareAndDeleteFail': 'etcd.store.compareanddelete.fail',
'expireCount': 'etcd.store.expire.count'
}
STORE_GAUGES = {
'watchers': 'etcd.store.watchers'
}
SELF_GAUGES = {
'sendPkgRate': 'etcd.self.send.pkgrate',
'sendBandwidthRate': 'etcd.self.send.bandwidthrate',
'recvPkgRate': 'etcd.self.recv.pkgrate',
'recvBandwidthRate': 'etcd.self.recv.bandwidthrate'
}
SELF_RATES = {
'recvAppendRequestCnt': 'etcd.self.recv.appendrequest.count',
'sendAppendRequestCnt': 'etcd.self.send.appendrequest.count'
}
LEADER_COUNTS = {
# Rates
'fail': 'etcd.leader.counts.fail',
'success': 'etcd.leader.counts.success',
}
LEADER_LATENCY = {
# Gauges
'current': 'etcd.leader.latency.current',
'average': 'etcd.leader.latency.avg',
'minimum': 'etcd.leader.latency.min',
'maximum': 'etcd.leader.latency.max',
'standardDeviation': 'etcd.leader.latency.stddev',
}
def check(self, instance):
if 'url' not in instance:
raise Exception('etcd instance missing "url" value.')
# Load values from the instance config
url = instance['url']
instance_tags = instance.get('tags', [])
# Append the instance's URL in case there are more than one, that
# way they can tell the difference!
instance_tags.append("url:{0}".format(url))
timeout = float(instance.get('timeout', self.DEFAULT_TIMEOUT))
is_leader = False
# Gather self metrics
self_response = self._get_self_metrics(url, timeout)
if self_response is not None:
if self_response['state'] == 'StateLeader':
is_leader = True
instance_tags.append('etcd_state:leader')
else:
instance_tags.append('etcd_state:follower')
for key in self.SELF_RATES:
if key in self_response:
self.rate(self.SELF_RATES[key], self_response[key], tags=instance_tags)
else:
self.log.warn("Missing key {0} in stats.".format(key))
for key in self.SELF_GAUGES:
if key in self_response:
self.gauge(self.SELF_GAUGES[key], self_response[key], tags=instance_tags)
else:
self.log.warn("Missing key {0} in stats.".format(key))
# Gather store metrics
store_response = self._get_store_metrics(url, timeout)
if store_response is not None:
for key in self.STORE_RATES:
if key in store_response:
self.rate(self.STORE_RATES[key], store_response[key], tags=instance_tags)
else:
self.log.warn("Missing key {0} in stats.".format(key))
for key in self.STORE_GAUGES:
if key in store_response:
self.gauge(self.STORE_GAUGES[key], store_response[key], tags=instance_tags)
else:
self.log.warn("Missing key {0} in stats.".format(key))
# Gather leader metrics
if is_leader:
leader_response = self._get_leader_metrics(url, timeout)
if leader_response is not None and len(leader_response.get("followers", {})) > 0:
# Get the followers
followers = leader_response.get("followers")
for fol in followers:
# counts
for key in self.LEADER_COUNTS:
self.rate(self.LEADER_COUNTS[key],
followers[fol].get("counts").get(key),
tags=instance_tags + ['follower:{0}'.format(fol)])
# latency
for key in self.LEADER_LATENCY:
self.gauge(self.LEADER_LATENCY[key],
followers[fol].get("latency").get(key),
tags=instance_tags + ['follower:{0}'.format(fol)])
# Service check
if self_response is not None and store_response is not None:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK,
tags=["url:{0}".format(url)])
def _get_self_metrics(self, url, timeout):
return self._get_json(url + "/v2/stats/self", timeout)
def _get_store_metrics(self, url, timeout):
return self._get_json(url + "/v2/stats/store", timeout)
def _get_leader_metrics(self, url, timeout):
return self._get_json(url + "/v2/stats/leader", timeout)
def _get_json(self, url, timeout):
try:
r = requests.get(url, timeout=timeout, headers=headers(self.agentConfig))
except requests.exceptions.Timeout:
# If there's a timeout
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message="Timeout when hitting %s" % url,
tags=["url:{0}".format(url)])
raise
if r.status_code != 200:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message="Got %s when hitting %s" % (r.status_code, url),
tags=["url:{0}".format(url)])
raise Exception("Http status code {0} on url {1}".format(r.status_code, url))
return r.json()
| {
"repo_name": "Shopify/dd-agent",
"path": "checks.d/etcd.py",
"copies": "1",
"size": "6400",
"license": "bsd-3-clause",
"hash": 3329637165559591400,
"line_mean": 38.751552795,
"line_max": 95,
"alpha_frac": 0.5609375,
"autogenerated": false,
"ratio": 3.970223325062035,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000687555234855081,
"num_lines": 161
} |
# 3rd party
import snakebite.client
import snakebite.version
# project
from checks import AgentCheck
# This is only available on snakebite >= 2.2.0
# but snakebite 2.x is only compatible with hadoop >= 2.2.0
# So we bundle snakebite 1.3.9 and let the possibility to upgrade to a newer version
# if people want to use HA Mode
try:
# FIXME: Can be remove when we upgrade pylint (drop py 2.6)
# pylint: disable=E0611
from snakebite.namenode import Namenode
except ImportError:
Namenode = None
DEFAULT_PORT = 8020
class HDFSCheck(AgentCheck):
"""
Report on free space and space used in HDFS.
DEPRECATED:
This check is deprecated and will be removed in a future version of the agent
Please use the `hdfs_namenode` and `hdfs_datanode` checks instead
"""
def get_client(self, instance):
if 'namenode' in instance:
# backward compatibility for old style configuration of that check
host, port = instance['namenode'], instance.get('port', DEFAULT_PORT)
return snakebite.client.Client(host, port)
if type(instance['namenodes']) != list or len(instance['namenodes']) == 0:
raise ValueError('"namenodes parameter should be a list of dictionaries.')
for namenode in instance['namenodes']:
if type(namenode) != dict:
raise ValueError('"namenodes parameter should be a list of dictionaries.')
if "url" not in namenode:
raise ValueError('Each namenode should specify a "url" parameter.')
if len(instance['namenodes']) == 1:
host, port = instance['namenodes'][0]['url'], instance['namenodes'][0].get('port', DEFAULT_PORT)
return snakebite.client.Client(host, port)
else:
# We are running on HA mode
if Namenode is None:
# We are running snakebite 1.x which is not compatible with the HA mode
# Let's display a warning and use regular mode
self.warning("HA Mode is not available with snakebite < 2.2.0"
"Upgrade to the latest version of snakebiteby running: "
"sudo /opt/datadog-agent/embedded/bin/pip install --upgrade snakebite")
host, port = instance['namenodes'][0]['url'], instance['namenodes'][0].get('port', DEFAULT_PORT)
return snakebite.client.Client(host, port)
else:
self.log.debug("Running in HA Mode")
nodes = []
for namenode in instance['namenodes']:
nodes.append(Namenode(namenode['url'], namenode.get('port', DEFAULT_PORT)))
return snakebite.client.HAClient(nodes)
def check(self, instance):
self.warning('The "hdfs" check is deprecated and will be removed '
'in a future version of the agent. Please use the "hdfs_namenode" '
'and "hdfs_datanode" checks instead')
if 'namenode' not in instance and 'namenodes' not in instance:
raise ValueError('Missing key \'namenode\' in HDFSCheck config')
tags = instance.get('tags', None)
hdfs = self.get_client(instance)
stats = hdfs.df()
# {'used': 2190859321781L,
# 'capacity': 76890897326080L,
# 'under_replicated': 0L,
# 'missing_blocks': 0L,
# 'filesystem': 'hdfs://hostname:port',
# 'remaining': 71186818453504L,
# 'corrupt_blocks': 0L}
self.gauge('hdfs.used', stats['used'], tags=tags)
self.gauge('hdfs.free', stats['remaining'], tags=tags)
self.gauge('hdfs.capacity', stats['capacity'], tags=tags)
self.gauge('hdfs.in_use', float(stats['used']) /
float(stats['capacity']), tags=tags)
self.gauge('hdfs.under_replicated', stats['under_replicated'],
tags=tags)
self.gauge('hdfs.missing_blocks', stats['missing_blocks'], tags=tags)
self.gauge('hdfs.corrupt_blocks', stats['corrupt_blocks'], tags=tags)
| {
"repo_name": "urosgruber/dd-agent",
"path": "checks.d/hdfs.py",
"copies": "5",
"size": "4063",
"license": "bsd-3-clause",
"hash": 6188815789706363000,
"line_mean": 40.0404040404,
"line_max": 112,
"alpha_frac": 0.6118631553,
"autogenerated": false,
"ratio": 3.9523346303501947,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7064197785650195,
"avg_score": null,
"num_lines": null
} |
# 3rd party
from django.core.urlresolvers import reverse
from django.utils import timezone
from rest_framework.test import APITestCase
from rest_framework import status
# Own
from .price_factories import PriceFactory
from .security_factories import SecurityFactory
from .account_base import AccountBase
from .currency_factories import create_currencies
class AccountApiTest(APITestCase, AccountBase):
longMessage = True
def setUp(self):
self.account = self.create_account()
self.security_name = 'Elisa'
exchange_rate = 1
security_amount = 100
security_price = 22.5
commission = 15
# Currencies & rates
currency_eur, self.currency_usd, self.rateHistory = create_currencies()
security = SecurityFactory(name=self.security_name)
self.elisa_price = PriceFactory(security=security,
currency=currency_eur)
self.account.buySellSecurity(security=security,
shares=security_amount,
date=timezone.now(),
price=security_price,
commission=commission, action='BUY',
currency=currency_eur,
exchange_rate=exchange_rate)
def test_account_api_returns_existing_account_list(self):
"""
Check the account api returns existing accounts
"""
response = self.client.get(
reverse('api-account-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
for account in response.data:
# Account should have name
self.assertTrue(account['name'])
| {
"repo_name": "jokimies/django-pj-portfolio",
"path": "tests/test_account_api.py",
"copies": "1",
"size": "1784",
"license": "bsd-3-clause",
"hash": -7623146752072046000,
"line_mean": 31.4363636364,
"line_max": 79,
"alpha_frac": 0.5969730942,
"autogenerated": false,
"ratio": 4.71957671957672,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 55
} |
# 3rd-party packages
from lxml.builder import E
# local packages
from jnpr.junos.cfg import Resource
from jnpr.junos import jxml as JXML
class PolicyRule(Resource):
"""
[edit security policy from-zone <from_zone> to-zone <to_zone> policy
<policy_name>]
Resource name: str
<policy_name> is the name of the policy
Managed by: PolicyContext
<from_zone> and <to_zone> taken from parent resource
"""
PROPERTIES = [
'description',
'match_srcs',
'match_dsts',
'match_apps',
'action',
'count',
'log_init',
'log_close'
]
# -------------------------------------------------------------------------
# XML reading
# -------------------------------------------------------------------------
def _xml_at_top(self):
xml = self._parent._xml_at_top()
xml.find('.//policy').append(E.policy(E.name(self._name)))
return xml
def _xml_at_res(self, xml):
return xml.find('.//policy/policy')
def _xml_to_py(self, as_xml, to_py):
Resource._r_has_xml_status(as_xml, to_py)
Resource.copyifexists(as_xml, 'description', to_py)
x_match = as_xml.find('match')
x_then = as_xml.find('then')
# collect up the 'match' criteria
to_py['match_srcs'] = [
this.text for this in x_match.findall('source-address')]
to_py['match_dsts'] = [
this.text for this in x_match.findall('destination-address')]
to_py['match_apps'] = [
this.text for this in x_match.findall('application')]
# collect up the 'then' criteria
to_py['action'] = x_then.xpath('permit | reject | deny')[0].tag
if x_then.find('count') is not None:
to_py['count'] = True
if x_then.find('log/session-init') is not None:
to_py['log_init'] = True
if x_then.find('log/session-close') is not None:
to_py['log_close'] = True
# -------------------------------------------------------------------------
# XML writing
# -------------------------------------------------------------------------
def _xml_change_action(self, xml):
xml.append(E.then(E(self.should['action'])))
return True
def _xml_change_count(self, xml):
xml.append(E.then(
Resource.xmltag_set_or_del('count', self.should['count'])
))
return True
def _xml_change_log_init(self, xml):
xml.append(E.then(E.log(
Resource.xmltag_set_or_del('session-init', self.should['log_init'])
)))
return True
def _xml_change_log_close(self, xml):
xml.append(E.then(E.log(
Resource.xmltag_set_or_del(
'session-close',
self.should['log_close'])
)))
return True
def _xml_change_match_srcs(self, xml):
adds, dels = Resource.diff_list(
self.has['match_srcs'], self.should['match_srcs'])
if len(adds):
x_match = E.match()
xml.append(x_match)
for this in adds:
x_match.append(E('source-address', E.name(this)))
if len(dels):
x_match = E.match()
xml.append(x_match)
for this in dels:
x_match.append(E('source-address', JXML.DEL, E.name(this)))
return True
def _xml_change_match_dsts(self, xml):
adds, dels = Resource.diff_list(
self.has['match_dsts'], self.should['match_dsts'])
if len(adds):
x_match = E.match()
xml.append(x_match)
for this in adds:
x_match.append(E('destination-address', E.name(this)))
if len(dels):
x_match = E.match()
xml.append(x_match)
for this in dels:
x_match.append(
E('destination-address', JXML.DEL, E.name(this)))
return True
def _xml_change_match_apps(self, xml):
adds, dels = Resource.diff_list(
self.has['match_apps'], self.should['match_apps'])
if len(adds):
x_match = E.match()
xml.append(x_match)
for this in adds:
x_match.append(E('application', E.name(this)))
if len(dels):
x_match = E.match()
xml.append(x_match)
for this in dels:
x_match.append(E('application', JXML.DEL, E.name(this)))
return True
# -----------------------------------------------------------------------
# Resource List, Catalog
# -- only executed by 'manager' resources
# -----------------------------------------------------------------------
def _r_list(self):
got = self.P._xml_config_read()
self._rlist = [
this.text for this in got.xpath('.//policy/policy/name')]
def _r_catalog(self):
got = self.D.rpc.get_config(self.P._xml_at_top())
policies = got.find('.//security/policies/policy')
for pol in policies.findall('policy'):
name = pol.find('name').text
self._rcatalog[name] = {}
self._xml_to_py(pol, self._rcatalog[name])
| {
"repo_name": "dgjnpr/py-junos-eznc",
"path": "lib/jnpr/junos/cfg/srx/policy_rule.py",
"copies": "1",
"size": "5247",
"license": "apache-2.0",
"hash": -8668509499321357000,
"line_mean": 29.6842105263,
"line_max": 79,
"alpha_frac": 0.4863731656,
"autogenerated": false,
"ratio": 3.8076923076923075,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4794065473292307,
"avg_score": null,
"num_lines": null
} |
"""3rd update that adds Games table with API_KEY and backref on scores
Revision ID: 37a45401820b
Revises: 3414dfab0e91
Create Date: 2015-11-01 15:07:23.655544
"""
# revision identifiers, used by Alembic.
revision = '37a45401820b'
down_revision = '3414dfab0e91'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('games',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('api_key', sa.String(), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('api_key')
)
op.add_column(u'scores', sa.Column('game_id', sa.Integer(), nullable=False))
op.create_foreign_key(None, 'scores', 'games', ['game_id'], ['id'])
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'scores', type_='foreignkey')
op.drop_column(u'scores', 'game_id')
op.drop_table('games')
### end Alembic commands ###
| {
"repo_name": "Rdbaker/GameCenter",
"path": "migrations/versions/37a45401820b_.py",
"copies": "2",
"size": "1092",
"license": "mit",
"hash": -6060552879588632000,
"line_mean": 29.3333333333,
"line_max": 80,
"alpha_frac": 0.6694139194,
"autogenerated": false,
"ratio": 3.309090909090909,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4978504828490909,
"avg_score": null,
"num_lines": null
} |
## 3. Read in a CSV file ##
import pandas as pd
food_info = pd.read_csv('food_info.csv')
print(type(food_info))
## 4. Exploring the DataFrame ##
print(food_info.head(3))
dimensions = food_info.shape
print(dimensions)
num_rows = dimensions[0]
print(num_rows)
num_cols = dimensions[1]
print(num_cols)
first_twenty = food_info.head(20)
## 7. Selecting a row ##
hundredth_row = food_info.loc[99]
print(hundredth_row)
## 8. Data types ##
print(food_info.dtypes)
## 9. Selecting multiple rows ##
print("Rows 3, 4, 5 and 6")
print(food_info.loc[3:6])
print("Rows 2, 5, and 10")
two_five_ten = [2,5,10]
print(food_info.loc[two_five_ten])
last_rows = food_info.tail()
## 10. Selecting individual columns ##
# Series object.
ndb_col = food_info["NDB_No"]
print(ndb_col)
# Display the type of the column to confirm it's a Series object.
print(type(ndb_col))
saturated_fat = food_info['FA_Sat_(g)']
cholesterol = food_info['Cholestrl_(mg)']
## 11. Selecting multiple columns by name ##
zinc_copper = food_info[["Zinc_(mg)", "Copper_(mg)"]]
columns = ["Zinc_(mg)", "Copper_(mg)"]
zinc_copper = food_info[columns]
selenium_thiamin = food_info[['Selenium_(mcg)','Thiamin_(mg)']]
## 12. Practice ##
print(food_info.columns)
print(food_info.head(2))
gram_columns = [item for item in food_info.columns.tolist() if item.endswith('(g)')]
gram_df = food_info[gram_columns]
print(gram_df.head(3)) | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Data Analysis with Pandas Intermediate/Introduction to Pandas-8.py",
"copies": "1",
"size": "1392",
"license": "mit",
"hash": 6325883632415342000,
"line_mean": 21.4677419355,
"line_max": 84,
"alpha_frac": 0.6817528736,
"autogenerated": false,
"ratio": 2.5171790235081373,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3698931897108137,
"avg_score": null,
"num_lines": null
} |
# 3SAT test
from random import randrange
import datetime
#predlist = {(0,1,2):0, (0,1,3):0}
#negs = {(0,1,2):255, (0,1,3):255}
def make_ks(reverse=False):
"""ks are the bits of the ints from 0 to 7, used as truth flags
for vars in predicates. I.e. 0 means the denial of a var, 0 for var X means notX
1 for var X means X is true
"""
ks = []
for i in range(2):
for j in range(2):
for k in range(2):
if reverse:
ks.append((1-i,1-j,1-k))
else:
ks.append((i,j,k))
return ks
def make_preds0(pcount = 100, nvars = 99):
preds = {}
for a in range(pcount):
si = randrange(0,2)*2 - 1
sj = randrange(0,2)*2 - 1
sk = randrange(0,2)*2 - 1
si = sj = sk = 1
i = randrange(0,nvars-1)
j = randrange(i+1,nvars)
k = randrange(j+1,nvars+1)
preds[(si*i,sj*j,sk*k)] = 2**randrange(8) # 255-randrange(0,255)
return preds
def make_preds(pcount = 100, nvars = 99):
preds = {}
for a in range(pcount):
si = randrange(0,2)*2 - 1
sj = randrange(0,2)*2 - 1
sk = randrange(0,2)*2 - 1
si = sj = sk = 1
i = randrange(0,nvars-2)
j = randrange(i+1,nvars-1)
k = randrange(j+1,nvars)
preds[(si*i,sj*j,sk*k)] = 2**randrange(8) # 255-randrange(0,255)
return preds
def make_negs(pcount = 100, nvars = 99):
negs = {}
for a in range(pcount):
i = randrange(0,nvars-1)
j = randrange(i+1,nvars)
k = randrange(j+1,nvars+1)
negs[(i,j,k)] = randrange(0,255)
#negs[(i,j,k)] = 2**randrange(0,8)
return negs
def pred2neg(pred):
#given a predicate, return its negative
pass
def brute():
#given a number of variables N and a predicate list PL try all certs of lenght N against all predicates
cert = (2 ** (N+1)) - 1
for pred in predlist:
pass
return cert
def depcount(var, predlist):
t = 0
f = 0
for pred in predlist:
for el in pred:
if el == var:
t += 1
elif el == -var:
f += 1
return (t,f)
def check_overconst(predlist):
for pred in predlist:
if predlist.get(pred) and predlist[pred] < 1:
return True
return False
def mk_randcert(nvar = 100):
cert = []
for i in range(nvar):
cert.append(randrange(0,2))
return cert
def check_cert(cert, neglist, ks):
for pred in sorted(list(neglist.keys())):
p0,p1,p2 = pred
print(pred,neglist[pred])
mflags = neglist[pred]
for i in range(8):
if ((mflags >> i) & 1) == 0:
next
else:
k0,k1,k2 = ks[i]
if not( cert[p0] == k0 and cert[p1] == k1 and cert[p2] == k2):
print("OK ",i,cert[p0],cert[p1],cert[p2],ks[i], p0,p1,p2)
next
else:
print("Fail",i,cert[p0],cert[p1],cert[p2],ks[i], p0,p1,p2)
return False
print("\n")
return True
def print_cert(cert):
for i in range(0,len(cert),20):
print(cert[i:i+20])
def coverage(predlist):
cov = []
for pred in predlist:
cov.append(pred[0])
cov.append(pred[1])
cov.append(pred[2])
return sorted(set(cov))
# first, make sure that no var triple has more than 2**3 - 1 constraints, else the group of preds is unsat
# count ts and fs from each var
# choose the largest group from the most connected var
# reduce the remaining vars by extracting the already-satisfied preds
# then take the largest group from the remaining vars and repeat
# bottom-up
# trips, reduced to no more than 7 distinct constraints
# then tied to pairs and singles to further reduce. From trips to pairs and singles will add additional
# constraints
def snippets():
nvars = 100
v = []
for i in range(nvars):
v.append(set())
pl = make_negs()
predlist = list(predlist.keys())
for i in range(len(predlist)):
pred = predlist[i]
for el in pred:
v[el].add(i)
m1 = {}
m2 = {}
for i in range(256):
for j in range(256):
for k in range(1,10):
m1[(i,j,k)] = match(i,j,k)
m2[(i,j,k)] = match(i,j,k+10)
for i in range(256):
for j in range(256):
for k in range(1,10):
print("{0:3d} {1:3d} {2:2d} {3:>2s} {0:3d} {1:3d} {2:2d} {4:>2s}".format(i,j,k,["T","F"][match(i,j,k)],["T","F"][match(i,j,k+10)]))
def t1():
t = list(range(8)) #satisfiers
s = [] #predicate constraints
k = [] #incompatibles
for i in range(7):
s.append(randrange(0,8))
s = set(s)
print("Constraints\t", list(s))
for el in s:
t.remove(7-el)
k.append(7-el)
print("Incompatible\t", k)
print("Compatible\t" ,t)
def mk_randpred():
s = [] #predicate constraints
for i in range(7):
s.append(randrange(0,8))
s = set(s)
return list(s)
def g(n):
if n < 1 or n > 6:
raise OutOfBoundsException
elif n == 1:
_g = ([x] for x in range(8))
elif n == 2:
_g = ([x, y] for x in range(8) for y in range(8) if x != y)
elif n == 3:
_g = ([x,y,z] for x in range(8) for y in range(8) for z in range(8) if x != y and y != z and x != z)
elif n == 4:
_g = ([w, x,y,z] for w in range(8) for x in range(8) for y in range(8) for z in range(8) if w != x and w != y and w != z
and x != y and y != z and x != z)
elif n == 5:
_g = ([v, w, x,y,z] for v in range(8) for w in range(8) for x in range(8) for y in range(8) for z in range(8)
if w != x and w != y and w != z and x != y and y != z and x != z and v != w and v != x and v != y and v != z)
elif n == 6:
_g = ([u,v, w, x,y,z] for u in range(8) for v in range(8) for w in range(8) for x in range(8) for y in range(8) for z in range(8)
if w != x and w != y and w != z and x != y and y != z and x != z and v != w and v != x and v != y and v != z
and u != v and u != w and u != x and u != y and u != z)
return _g
def g2(n):
if n < 1 or n > 6:
raise OutOfBoundsException
elif n == 1:
_g = ([x] for x in range(8))
elif n == 2:
_g = ([x, y] for x in range(8) for y in range(8) if x < y)
elif n == 3:
_g = ([x,y,z] for x in range(8) for y in range(8) for z in range(8) if x < y and y < z )
elif n == 4:
_g = ([w, x,y,z] for w in range(8) for x in range(8) for y in range(8) for z in range(8) if w < x and x < y and y < z)
elif n == 5:
_g = ([v, w, x,y,z] for v in range(8) for w in range(8) for x in range(8) for y in range(8) for z in range(8)
if v < w and w < x and x < y and y < z)
elif n == 6:
_g = ([u,v, w, x,y,z] for u in range(8) for v in range(8) for w in range(8) for x in range(8) for y in range(8) for z in range(8)
if u < v and v < w and w < x and x < y and y < z)
return _g
def compat(constraints):
c = list(range(8))
for el in constraints:
c.remove(7-el)
assert(len(c)>0)
return c
def _validate(cpart,pred):
a,b,c = cpart
d,e,f = list(pred.keys())
return ((a == d) or (b == e) or (c == f))
def kspack(ks):
"""takes a kset and returns an 8-bit number"""
bits = 0
_ks = make_ks()
for i in range(8):
if _ks[i] in ks:
bits += 2**i
return bits
def ksunpack(bits):
"""takes an 8-bit number and returns a kset"""
ks = []
_ks = make_ks()
for i in range(8):
if ((bits >> i) & 1) == 1:
ks.append(_ks[i])
return ks
def _match1(a, b, p1, p2):
return (((a >> p1) & 1) == ((b >> p2) & 1))
def _match2(a,b, p11, p12, p21, p22):
#return ((((a >> p11) & 1) == ((b >> p21) & 1)) and (((a >> p12) & 1) == ((b >> p22) & 1)))
return _match1(a,b, p11,p21) and _match(a,b,p12,p22)
def match(a, b, p):
# 18 partial match possibilities
if ((p < 1) or (p == 10) or (p > 19)):
raise AssertionError #"{1} must be 1-9 or 11-19".format(p)
if p == 1:
return _match1(a,b,0,0)
elif p == 2:
return _match1(a,b,0,1)
elif p == 3:
return _match1(a,b,0,2)
elif p == 4:
return _match1(a,b,1,0)
elif p == 5:
return _match1(a,b,1,1)
elif p == 6:
return _match1(a,b,1,2)
elif p == 7:
return _match1(a,b,2,0)
elif p == 8:
return _match1(a,b,2,1)
elif p == 9:
return _match1(a,b,2,2)
elif p == 11:
return _match2(a,b,0,0,1,1)
elif p == 12:
return _match2(a,b,0,0,1,2)
elif p == 13:
return _match2(a,b,0,0,2,1)
elif p == 14:
return _match2(a,b,0,0,2,2)
elif p == 15:
return _match2(a,b,1,0,2,1)
elif p == 16:
return _match2(a,b,1,0,2,2)
elif p == 17:
return _match2(a,b,0,1,2,1)
elif p == 18:
return _match2(a,b,0,1,2,2)
elif p == 19:
return _match2(a,b,1,1,2,2)
def consolidate3(preds):
# given a list of trips, consolidate the bitflags
# flags should be all 1s except 0 for the 1 at a power of 2 which corresponds to a predicate in ks
bf = 0
for f in list(preds.values()):
assert((f == 1) or (f == 2) or (f == 4) or (f == 8) or (f == 16) or (f == 32) or (f == 64) or (f == 128))
if (0 == (bf & f)): #ignore redundants
f = 0
bf += f
assert(bf < 255) # this would be unsatisfiable for a single predicate
return bf
def reduce3(predlist):
trips = {}
for pred in predlist:
a,b,c = pred
if trips.get((a,b,c)):
trips[(a,b)].append(pred)
else:
trips[(a,b,c)] = [pred]
return trips
def reduce2(predlist):
pairs = {}
for pred in predlist:
a,b,c = pred
if pairs.get((a,b)):
pairs[(a,b)].append(pred)
else:
pairs[(a,c)] = [pred]
if pairs.get((a,c)):
pairs[(a,c)].append(pred)
else:
pairs[(a,c)] = [pred]
if pairs.get((b,c)):
pairs[(b,c)].append(pred)
else:
pairs[(b,c)] = [pred]
return pairs
def reduce1(predlist):
singles = {}
for pred in predlist:
a,b,c = pred
if singles.get((a)):
singles[(a)].append(pred)
else:
singles[(a)] = [pred]
if singles.get((b)):
singles[(b)].append(pred)
else:
singles[(b)] = [pred]
if singles.get((c)):
singles[(c)].append(pred)
else:
singles[(c)] = [pred]
return singles
def bitrev(bits):
stib = 0
for i in range(8):
if bits & 2**i > 0:
stib += 2**(7-i)
return stib
def revinv(nflags):
flag = 255 ^ nflags
return bitrev(flag)
def red(a,b,p):
ka = ksunpack(a)
kb = ksunpack(b)
reda = []
redb = []
for ela in ka:
for elb in kb:
if match(ela,elb,p):
reda.append(ela)
redb.append(elb)
return kspack(reda), kspack(redb)
def summarize(ks):
a0,b0,c0 = ks[0]
res = [a0,b0,c0]
for el in ks[1:]:
a,b,c = el
if a != a0:
res[0] = 2
if b != b0:
res[1] = 2
if c != c0:
res[2] = 2
return res[0],res[1],res[2]
def nonslack(vlist):
pivot = 0
nonslacks = [] # list(range(len(vlist)))
slack = ([],[])
if vlist.count(slack) > 0:
for i in range(len(vlist)):
if vlist[i] != slack:
nonslacks.append(i)
return nonslacks
def vcounts(predlist,nvars):
var_ct = []
for i in range(nvars):
var_ct.append(([],[])) #false list, true list
pk = list(predlist.keys())
for i,val in enumerate(pk):
small,med,large = val
#print(i,a,b,c)
k = ksunpack(predlist[val]) # this may lead to redundancy, but it's already in place. If desired, make the predlist simply 2**x values
for pred in k:
#print(pred)
#pred is a triple-bool e.g. (0,1,0)
var_ct[small][pred[0]].append(val) #this puts the val of pk in v[a] true or false list based on the value of pred[0]
var_ct[med][pred[1]].append(val)
var_ct[large][pred[2]].append(val)
return var_ct
def print_v(vlist, ct=False):
"""given a list of vars, print the trues and falses
"""
for i,v in enumerate(vlist):
if ct:
print("var: {2:4d}\ntrue: {0}\nfalse: {1}\n".format(len(v[1]),len(v[0]),i))
else:
print("var: {2:4d}\ntrue: {0}\nfalse: {1}\n".format(sorted(v[1]),sorted(v[0]),i))
def check_cert(cert, plist):
proof = []
for i,pred in enumerate(plist):
small, med, large = pred
kset = ksunpack(plist[pred])
for i,k in enumerate(kset):
if cert[small] != 2 and k[0] == cert[small]:
proof.append(i)
continue
if cert[med] != 2 and k[1] == cert[med]:
proof.append(i)
continue
if cert[large] != 2 and k[2] == cert[large]:
proof.append(i)
continue
if k[0] != cert[small] and k[1] != cert[med] and k[2] != cert[large]:
print("failed on", i)
return i
return proof
def cert_gen(nvars=8):
v = [0] * nvars
#yield v
while (v != [1]*nvars):
for i in range(nvars):
if v[-i] == 0:
v[-i] = 1
break
else:
v[-i] = 0
yield v
def brute_check(ps,nvars = 8):
for c in cert_gen(nvars):
if check_cert(c,ps):
print(c)
for i,p in enumerate(ps):
print(p,ksunpack(ps[p]))
return c
print('unsat')
return False
def merge_ps(ps1,ps2):
trips = ps1
for i,p in enumerate(ps2):
if(trips.get(p)):
trips[p] |= ps2[p]
assert(trips[p] < 255)
else:
trips[p] = ps2[p]
return trips
def splitsort(vct,ps):
p_ct = len(ps)
mx_t = 0
mx_f = 0
t_idx = 0
f_idx = 0
for i,falsies in enumerate(vct[0]):
if mx_f < falsies:
mx_f = falsies
f_idx = i
for i,trulies in enumerate(vct[1]):
if mx_t < trulies:
mx_t = trulies
t_idx = i
return t_idx, vct[1][t_idx], f_idx, vct[0][f_idx]
def findmax(blist):
mf = 0
mt = 0
fi = 0
ti = 0
for i,b in enumerate(blist):
if len(b[0]) > mf:
mf = len(b[0])
fi = i
if len(b[1]) > mt:
mt = len(b[1])
ti = i
#print(fi,mf,ti,mt)
return fi,mf,ti,mt
def remslack(vlist):
v = vlist[:]
for i in range((v.count(([],[])))):
v.remove(([],[]))
return v
def nonslack(vlist):
pivot = 0
nonslacks = [] # list(range(len(vlist)))
slack = ([],[])
if vlist.count(slack) > 0:
for i in range(len(vlist)):
if vlist[i] != slack:
nonslacks.append(i)
return nonslacks
def redlist(pl,nvars,vsol,short_circuit=False):
pc = pl.copy()
vsat=[]
vc = vcounts(pl, nvars)
mf,fc,mt,tc = findmax(vc)
if short_circuit and fc ==1 and tc == 1:
print("singles")
return {},vsol
if fc > tc:
k = vc[mf][0]
vsol[mf] = 0
else:
k = vc[mt][1]
vsol[mt] = 1
for p in k:
pc.pop(p)
return pc,vsol
def solve(pl, nvars,short_circuit=False):
vsol = [2]* (nvars)
lpl0 = len(pl) - 1
for i in range(len(pl)):
lpl = len(pl)
#print(lpl)
pl,vsol = redlist(pl, nvars,vsol,short_circuit)
if lpl == lpl0 and lpl > 0:
print("not satisfied. remainder:", pl)
return False
lpl0 = lpl
if lpl == 0:
#print(vsol)
return vsol
def multitrials(trials=20, vstep=100, pratio=20, show_cert=False,short_circuit=False):
for i in range(1,trials+1):
v = vstep*i
p = pratio*v
trial(p,v,show_cert,short_circuit)
def check_bit(pl,offset):
for i,k in enumerate(pl):
if offset in k:
print(k, ksunpack(pm[k]))
def trial(npred,nvars, show_cert=False,short_circuit=False):
st = datetime.datetime.now()
pm = make_preds(npred,nvars)
md = datetime.datetime.now()
v = solve(pm,nvars,short_circuit)
fin = datetime.datetime.now()
print("number of variables: ", nvars)
print("number of predicates: ", len(pm))
print("elapsed time to solve: {0}\n".format(fin-md))
if show_cert:
print(v)
def unsat(pl,cert):
pm = pl.copy()
for p in pm:
k = ksunpack(pm[p])
for i in range(3):
if cert[p[i]] == k[i]:
if pm.get(p):
pm.remove(p)
break
def cur(num,sym="$",isep=",",fsep="."):
nums = str(num).split(".")
snum = nums[0]
if len(nums) == 2:
fnum = nums[1]
if len(fnum) == 1:
fnum += "0"
else:
fsep = fnum = ""
p = []
d,m = divmod(len(snum),3)
p.append(snum[:m])
for i in range(m,len(snum),3):
p.append(snum[i:i+3])
if "" in p:
p.remove('')
return sym + isep.join(p) + fsep + fnum
| {
"repo_name": "orneryhippo/saturdays",
"path": "rabbit/3sat.py",
"copies": "1",
"size": "14850",
"license": "apache-2.0",
"hash": 7573117678572639000,
"line_mean": 21.5341426404,
"line_max": 139,
"alpha_frac": 0.5853872054,
"autogenerated": false,
"ratio": 2.2664835164835164,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.33518707218835164,
"avg_score": null,
"num_lines": null
} |
# 3SAT test
from random import randrange
import datetime
T = 1
F = 0
S = 2
#predlist = {(0,1,2):0, (0,1,3):0}
#negs = {(0,1,2):255, (0,1,3):255}
def make_ks(reverse=False):
"""ks are the bits of the ints from 0 to 7, used as truth flags
for vars in predicates. I.e. 0 means the denial of a var, 0 for var X means notX
1 for var X means X is true
"""
ks = []
for i in range(2):
for j in range(2):
for k in range(2):
if reverse:
ks.append((1-i,1-j,1-k))
else:
ks.append((i,j,k))
return ks
def make_preds0(pcount = 100, nvars = 99):
preds = {}
for a in range(pcount):
si = randrange(0,2)*2 - 1
sj = randrange(0,2)*2 - 1
sk = randrange(0,2)*2 - 1
si = sj = sk = 1
i = randrange(0,nvars-2)
j = randrange(i+1,nvars-1)
k = randrange(j+1,nvars)
preds[(si*i,sj*j,sk*k)] = 255-randrange(0,255)
return preds
def make_preds(pcount = 100, nvars = 99):
preds = {}
for a in range(pcount):
si = randrange(0,2)*2 - 1
sj = randrange(0,2)*2 - 1
sk = randrange(0,2)*2 - 1
si = sj = sk = 1
i = randrange(0,nvars-2)
j = randrange(i+1,nvars-1)
k = randrange(j+1,nvars)
preds[(si*i,sj*j,sk*k)] = 2**randrange(8) # 255-randrange(0,255)
return preds
def make_negs(pcount = 100, nvars = 99):
negs = {}
for a in range(pcount):
i = randrange(0,nvars-1)
j = randrange(i+1,nvars)
k = randrange(j+1,nvars+1)
negs[(i,j,k)] = randrange(0,255)
#negs[(i,j,k)] = 2**randrange(0,8)
return negs
def pred2neg(pred):
#given a predicate, return its negative
pass
def brute():
#given a number of variables N and a predicate list PL try all certs of lenght N against all predicates
cert = (2 ** (N+1)) - 1
for pred in predlist:
pass
return cert
def depcount(var, predlist):
t = 0
f = 0
for pred in predlist:
for el in pred:
if el == var:
t += 1
elif el == -var:
f += 1
return (t,f)
def check_overconst(predlist):
for pred in predlist:
if predlist.get(pred) and predlist[pred] < 1:
return True
return False
def mk_randcert(nvar = 100):
cert = []
for i in range(nvar):
cert.append(randrange(0,2))
return cert
def check_cert(cert, neglist, ks):
for pred in sorted(list(neglist.keys())):
p0,p1,p2 = pred
print(pred,neglist[pred])
mflags = neglist[pred]
for i in range(8):
if ((mflags >> i) & 1) == 0:
next
else:
k0,k1,k2 = ks[i]
if not( cert[p0] == k0 and cert[p1] == k1 and cert[p2] == k2):
print("OK ",i,cert[p0],cert[p1],cert[p2],ks[i], p0,p1,p2)
next
else:
print("Fail",i,cert[p0],cert[p1],cert[p2],ks[i], p0,p1,p2)
return False
print("\n")
return True
def print_cert(cert):
for i in range(0,len(cert),20):
print(cert[i:i+20])
def coverage(predlist):
cov = []
for pred in predlist:
cov.append(pred[0])
cov.append(pred[1])
cov.append(pred[2])
return sorted(set(cov))
# first, make sure that no var triple has more than 2**3 - 1 constraints, else the group of preds is unsat
# count ts and fs from each var
# choose the largest group from the most connected var
# reduce the remaining vars by extracting the already-satisfied preds
# then take the largest group from the remaining vars and repeat
# bottom-up
# trips, reduced to no more than 7 distinct constraints
# then tied to pairs and singles to further reduce. From trips to pairs and singles will add additional
# constraints
def snippets():
nvars = 100
v = []
for i in range(nvars):
v.append(set())
pl = make_negs()
predlist = list(predlist.keys())
for i in range(len(predlist)):
pred = predlist[i]
for el in pred:
v[el].add(i)
m1 = {}
m2 = {}
for i in range(256):
for j in range(256):
for k in range(1,10):
m1[(i,j,k)] = match(i,j,k)
m2[(i,j,k)] = match(i,j,k+10)
for i in range(256):
for j in range(256):
for k in range(1,10):
print("{0:3d} {1:3d} {2:2d} {3:>2s} {0:3d} {1:3d} {2:2d} {4:>2s}".format(i,j,k,["T","F"][match(i,j,k)],["T","F"][match(i,j,k+10)]))
def t1():
t = list(range(8)) #satisfiers
s = [] #predicate constraints
k = [] #incompatibles
for i in range(7):
s.append(randrange(0,8))
s = set(s)
print("Constraints\t", list(s))
for el in s:
t.remove(7-el)
k.append(7-el)
print("Incompatible\t", k)
print("Compatible\t" ,t)
def mk_randpred():
s = [] #predicate constraints
for i in range(7):
s.append(randrange(0,8))
s = set(s)
return list(s)
def g(n):
if n < 1 or n > 6:
raise OutOfBoundsException
elif n == 1:
_g = ([x] for x in range(8))
elif n == 2:
_g = ([x, y] for x in range(8) for y in range(8) if x != y)
elif n == 3:
_g = ([x,y,z] for x in range(8) for y in range(8) for z in range(8) if x != y and y != z and x != z)
elif n == 4:
_g = ([w, x,y,z] for w in range(8) for x in range(8) for y in range(8) for z in range(8) if w != x and w != y and w != z
and x != y and y != z and x != z)
elif n == 5:
_g = ([v, w, x,y,z] for v in range(8) for w in range(8) for x in range(8) for y in range(8) for z in range(8)
if w != x and w != y and w != z and x != y and y != z and x != z and v != w and v != x and v != y and v != z)
elif n == 6:
_g = ([u,v, w, x,y,z] for u in range(8) for v in range(8) for w in range(8) for x in range(8) for y in range(8) for z in range(8)
if w != x and w != y and w != z and x != y and y != z and x != z and v != w and v != x and v != y and v != z
and u != v and u != w and u != x and u != y and u != z)
return _g
def g2(n):
if n < 1 or n > 6:
raise OutOfBoundsException
elif n == 1:
_g = ([x] for x in range(8))
elif n == 2:
_g = ([x, y] for x in range(8) for y in range(8) if x < y)
elif n == 3:
_g = ([x,y,z] for x in range(8) for y in range(8) for z in range(8) if x < y and y < z )
elif n == 4:
_g = ([w, x,y,z] for w in range(8) for x in range(8) for y in range(8) for z in range(8) if w < x and x < y and y < z)
elif n == 5:
_g = ([v, w, x,y,z] for v in range(8) for w in range(8) for x in range(8) for y in range(8) for z in range(8)
if v < w and w < x and x < y and y < z)
elif n == 6:
_g = ([u,v, w, x,y,z] for u in range(8) for v in range(8) for w in range(8) for x in range(8) for y in range(8) for z in range(8)
if u < v and v < w and w < x and x < y and y < z)
return _g
def compat(constraints):
c = list(range(8))
for el in constraints:
c.remove(7-el)
assert(len(c)>0)
return c
def _validate(cpart,pred):
a,b,c = cpart
d,e,f = list(pred.keys())
return ((a == d) or (b == e) or (c == f))
def kspack(ks):
"""takes a kset and returns an 8-bit number"""
bits = 0
_ks = make_ks()
for i in range(8):
if _ks[i] in ks:
bits += 2**i
return bits
def ksunpack(bits):
"""takes an 8-bit number and returns a kset"""
ks = []
_ks = make_ks()
for i in range(8):
if ((bits >> i) & 1) == 1:
ks.append(_ks[i])
return ks
def _match1(a, b, p1, p2):
return (((a >> p1) & 1) == ((b >> p2) & 1))
def _match2(a,b, p11, p12, p21, p22):
#return ((((a >> p11) & 1) == ((b >> p21) & 1)) and (((a >> p12) & 1) == ((b >> p22) & 1)))
return _match1(a,b, p11,p21) and _match(a,b,p12,p22)
def match(a, b, p):
# 18 partial match possibilities
if ((p < 1) or (p == 10) or (p > 19)):
raise AssertionError #"{1} must be 1-9 or 11-19".format(p)
if p == 1:
return _match1(a,b,0,0)
elif p == 2:
return _match1(a,b,0,1)
elif p == 3:
return _match1(a,b,0,2)
elif p == 4:
return _match1(a,b,1,0)
elif p == 5:
return _match1(a,b,1,1)
elif p == 6:
return _match1(a,b,1,2)
elif p == 7:
return _match1(a,b,2,0)
elif p == 8:
return _match1(a,b,2,1)
elif p == 9:
return _match1(a,b,2,2)
elif p == 11:
return _match2(a,b,0,0,1,1)
elif p == 12:
return _match2(a,b,0,0,1,2)
elif p == 13:
return _match2(a,b,0,0,2,1)
elif p == 14:
return _match2(a,b,0,0,2,2)
elif p == 15:
return _match2(a,b,1,0,2,1)
elif p == 16:
return _match2(a,b,1,0,2,2)
elif p == 17:
return _match2(a,b,0,1,2,1)
elif p == 18:
return _match2(a,b,0,1,2,2)
elif p == 19:
return _match2(a,b,1,1,2,2)
def consolidate3(preds):
# given a list of trips, consolidate the bitflags
# flags should be all 1s except 0 for the 1 at a power of 2 which corresponds to a predicate in ks
bf = 0
for f in list(preds.values()):
assert((f == 1) or (f == 2) or (f == 4) or (f == 8) or (f == 16) or (f == 32) or (f == 64) or (f == 128))
if (0 == (bf & f)): #ignore redundants
f = 0
bf += f
assert(bf < 255) # this would be unsatisfiable for a single predicate
return bf
def reduce3(predlist):
trips = {}
for pred in predlist:
a,b,c = pred
if trips.get((a,b,c)):
trips[(a,b)].append(pred)
else:
trips[(a,b,c)] = [pred]
return trips
def reduce2(predlist):
pairs = {}
for pred in predlist:
a,b,c = pred
if pairs.get((a,b)):
pairs[(a,b)].append(pred)
else:
pairs[(a,c)] = [pred]
if pairs.get((a,c)):
pairs[(a,c)].append(pred)
else:
pairs[(a,c)] = [pred]
if pairs.get((b,c)):
pairs[(b,c)].append(pred)
else:
pairs[(b,c)] = [pred]
return pairs
def reduce1(predlist):
singles = {}
for pred in predlist:
a,b,c = pred
if singles.get((a)):
singles[(a)].append(pred)
else:
singles[(a)] = [pred]
if singles.get((b)):
singles[(b)].append(pred)
else:
singles[(b)] = [pred]
if singles.get((c)):
singles[(c)].append(pred)
else:
singles[(c)] = [pred]
return singles
def bitrev(bits):
stib = 0
for i in range(8):
if bits & 2**i > 0:
stib += 2**(7-i)
return stib
def revinv(nflags):
flag = 255 ^ nflags
return bitrev(flag)
def red(a,b,p):
ka = ksunpack(a)
kb = ksunpack(b)
reda = []
redb = []
for ela in ka:
for elb in kb:
if match(ela,elb,p):
reda.append(ela)
redb.append(elb)
return kspack(reda), kspack(redb)
def summarize(ks):
a0,b0,c0 = ks[0]
res = [a0,b0,c0]
for el in ks[1:]:
a,b,c = el
if a != a0:
res[0] = 2
if b != b0:
res[1] = 2
if c != c0:
res[2] = 2
return res[0],res[1],res[2]
def nonslack(vlist):
pivot = 0
nonslacks = [] # list(range(len(vlist)))
slack = ([],[])
if vlist.count(slack) > 0:
for i in range(len(vlist)):
if vlist[i] != slack:
nonslacks.append(i)
return nonslacks
def vcounts(predlist,nvpars):
var_ct = []
for i in range(nvpars):
var_ct.append(([],[])) #false list, true list
pk = list(predlist.keys())
for i,val in enumerate(pk):
small,med,large = val
#print(i,a,b,c)
k = ksunpack(predlist[val]) # this may lead to redundancy, but it's already in place. If desired, make the predlist simply 2**x values
for pred in k:
#print(pred)
#pred is a triple-bool e.g. (0,1,0)
var_ct[small][pred[0]].append(val) #this puts the val of pk in v[a] true or false list based on the value of pred[0]
var_ct[med][pred[1]].append(val)
var_ct[large][pred[2]].append(val)
return var_ct
def print_v(vlist, ct=False):
"""given a list of vars, print the trues and falses
"""
for i,v in enumerate(vlist):
vs = sorted(v)
eps=0.0000001
vt = len(v[1])+eps
vf = len(v[0])+eps
pr = vf/(vf+vt)
#print(pr)
if ct:
print("var: {2:4d}\ntrue: {0}\nfalse: {1}\nprob: {3}\n".format(vt,vf,i,pr))
else:
print("var: {2:4d}\ntrue: {0}\nfalse: {1}\nprob: {3}\n".format(sorted(v[1]),sorted(v[0]),i,pr))
def check_cert(cert, plist):
proof = []
fails = []
for i,pred in enumerate(plist):
small, med, large = pred
kset = ksunpack(plist[pred])
for j,k in enumerate(kset):
if cert[small] == 2 or k[0] == cert[small]:
proof.append(i)
continue
if cert[med] == 2 or k[1] == cert[med]:
proof.append(i)
continue
if cert[large] == 2 or k[2] == cert[large]:
proof.append(i)
continue
fails.append(i)
if len(proof)<len(plist):
print("failed:",fails)
return fails
return []
def cert_gen(nvars=8):
v = [0] * nvars
#yield v
while (v != [1]*nvars):
for i in range(nvars):
if v[-i] == 0:
v[-i] = 1
break
else:
v[-i] = 0
yield v
def brute_check(ps,nvars = 8):
for c in cert_gen(nvars):
if check_cert(c,ps):
print(c)
for i,p in enumerate(ps):
print(p,ksunpack(ps[p]))
return c
print('unsat')
return False
def merge_ps(ps1,ps2):
trips = ps1
for i,p in enumerate(ps2):
if(trips.get(p)):
trips[p] |= ps2[p]
assert(trips[p] < 255)
else:
trips[p] = ps2[p]
return trips
def splitsort(vct,ps):
p_ct = len(ps)
mx_t = 0
mx_f = 0
t_idx = 0
f_idx = 0
for i,falsies in enumerate(vct[0]):
if mx_f < falsies:
mx_f = falsies
f_idx = i
for i,trulies in enumerate(vct[1]):
if mx_t < trulies:
mx_t = trulies
t_idx = i
return t_idx, vct[1][t_idx], f_idx, vct[0][f_idx]
def findmax(blist):
mf = 0
mt = 0
fi = 0
ti = 0
for i,b in enumerate(blist):
if len(b[0]) > mf:
mf = len(b[0])
fi = i
if len(b[1]) > mt:
mt = len(b[1])
ti = i
#print(fi,mf,ti,mt)
return fi,mf,ti,mt
def remslack(vlist):
v = vlist[:]
for i in range((v.count(([],[])))):
v.remove(([],[]))
return v
def nonslack(vlist):
pivot = 0
nonslacks = [] # list(range(len(vlist)))
slack = ([],[])
if vlist.count(slack) > 0:
for i in range(len(vlist)):
if vlist[i] != slack:
nonslacks.append(i)
return nonslacks
def redlist(pl,nvars,vsol,short_circuit=False):
pc = pl.copy()
vsat=[]
vc = vcounts(pl, nvars)
mf,fc,mt,tc = findmax(vc)
if short_circuit and fc ==1 and tc == 1:
print("singles")
return {},vsol
if fc > tc:
print("vsol {0} is false".format(mf))
k = vc[mf][0]
vsol[mf] = 0
else:
print("vsol {0} is true".format(mt))
k = vc[mt][1]
vsol[mt] = 1
#print(k)
for p in k:
pc.pop(p)
return pc,vsol
def solve(pl, nvars,unsat, short_circuit=False):
vsol = [2]* (nvars)
lpl0 = len(pl) - 1
for i in range(len(pl)):
lpl = len(pl)
#each time redlist is called, pl is reduced and vsol is updated
#print(lpl)
pl,vsol = redlist(pl, nvars,vsol,short_circuit)
if lpl == lpl0 and lpl > 0:
print("not satisfied. remainder:", pl)
return False
lpl0 = lpl
if lpl == 0:
#print(vsol)
return vsol,unsat
def multitrials(trials=20, vstep=100, pratio=20, show_cert=False,short_circuit=False):
for i in range(1,trials+1):
v = vstep*i
p = pratio*v
unsat = None
trial(p,v,unsat,show_cert,short_circuit)
def check_bit(pl,offset):
for i,k in enumerate(pl):
if offset in k:
print(k, ksunpack(pm[k]))
def trial(npred,nvars, unsat=None, show_cert=False,short_circuit=False):
if unsat == None:
unsat = []
st = datetime.datetime.now()
pm = make_preds(npred,nvars)
md = datetime.datetime.now()
v,u = solve(pm,nvars,unsat, short_circuit)
if len(u) < 1:
proof = check_cert(v,pm)
else:
show_fails(v,pm)
if len(proof)>0:
show_fails(proof,v,pm)
fin = datetime.datetime.now()
print("number of variables: ", nvars)
print("number of predicates: ", len(pm))
print("elapsed time to solve: {0}\n".format(fin-md))
return pm,v
def unsat(pl,cert):
pm = pl.copy()
for p in pm:
k = ksunpack(pm[p])
for i in range(3):
if cert[p[i]] == k[i]:
if pm.get(p):
pm.remove(p)
break
def cur(num,sym="$",isep=",",fsep="."):
nums = str(num).split(".")
snum = nums[0]
if len(nums) == 2:
fnum = nums[1]
if len(fnum) == 1:
fnum += "0"
else:
fsep = fnum = ""
p = []
d,m = divmod(len(snum),3)
p.append(snum[:m])
for i in range(m,len(snum),3):
p.append(snum[i:i+3])
if "" in p:
p.remove('')
return sym + isep.join(p) + fsep + fnum
def proc(cert,v):
for i,c in enumerate(cert):
if 2 == c:
continue
else:
if v[i] == c:
print(i,c)
def show_fails(fails, cert, plist):
for i,p in enumerate(plist):
if p in fails:
print(i,p,ksunpack(plist[p]),cert[p[0]],cert[p[1]],cert[p[2]])
def print_p(plist):
for i,p in enumerate(plist):
print(p,ksunpack(plist[p]))
def check_cert(cert,plist):
fails = []
for i,p in enumerate(plist):
s,m,l = p
ks = ksunpack(plist[p])
for k in ks:
sk,mk,lk = k
incomp = (1-sk,1-mk,1-lk)
if cert[s] == incomp[0] and cert[m] == incomp[1] and cert[l] == incomp[2]:
fails.append(p)
return fails | {
"repo_name": "orneryhippo/saturdays",
"path": "rabbit/sat.py",
"copies": "1",
"size": "15931",
"license": "apache-2.0",
"hash": -7419837833132468000,
"line_mean": 21.2825174825,
"line_max": 139,
"alpha_frac": 0.5865921788,
"autogenerated": false,
"ratio": 2.262926136363636,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3349518315163636,
"avg_score": null,
"num_lines": null
} |
""" 3: Sorting
thomas moll 2015
"""
def selection_sort(arr):
""" Selection Sort
Complexity: O(n^2)
"""
for i in xrange(len(arr)):
minimum = i
for j in xrange(i+1, len(arr)):
# "Select" the correct value
if arr[j] < arr[minimum]:
minimum = j
# Using a pythonic swap
arr[minimum], arr[i] = arr[i], arr[minimum]
return arr
def insertion_sort(arr):
""" Insertion Sort
Complexity: O(n^2)
"""
for i in xrange(len(arr)):
cursor = arr[i]
pos = i
while pos > 0 and arr[pos-1] > cursor:
# Swap the number down the list
arr[pos] = arr[pos-1]
pos = pos-1
# Break and do the final swap
arr[pos] = cursor
return arr
def merge_sort(arr):
""" Merge Sort
Complexity: O(n log(n))
"""
size = len(arr)
half = size/2
# Our recursive base case
if size <= 1:
return arr
# Perform merge_sort recursively on both halves
left, right = merge_sort(arr[half:]), merge_sort(arr[:half])
# Merge each side together
return merge(left, right)
def merge(left, right):
""" Merge helper
Complexity: O(n)
"""
arr = []
left_cursor, right_cursor = 0,0
while left_cursor < len(left) and right_cursor < len(right):
# Sort each one and place into the result
if left[left_cursor] <= right[right_cursor]:
arr.append(left[left_cursor])
left_cursor+=1
else:
arr.append(right[right_cursor])
right_cursor+=1
# Add the left overs if there's any left to the result
if left:
arr.extend(left[left_cursor:])
if right:
arr.extend(right[right_cursor:])
return arr
def quick_sort(arr, first, last):
""" Quicksort
Complexity: O(n log(n))
"""
if first < last:
pos = partition(arr, first, last)
# Start our two recursive calls
quick_sort(arr, first, pos-1)
quick_sort(arr, pos+1, last)
def partition(arr, first, last):
pivot = first
for pos in xrange(first, last):
if arr[pos] < arr[last]:
arr[pos], arr[pivot] = arr[pivot], arr[pos]
pivot += 1
arr[pivot], arr[last] = arr[last], arr[pivot]
return pivot
| {
"repo_name": "aravindk1992/Data-Structure-Zoo",
"path": "2-Sorting/sorting.py",
"copies": "15",
"size": "2428",
"license": "mit",
"hash": 4041419608848496600,
"line_mean": 25.5909090909,
"line_max": 64,
"alpha_frac": 0.5251235585,
"autogenerated": false,
"ratio": 3.701219512195122,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
## 3. Statistical significance ##
import numpy as np
import matplotlib.pyplot as plt
mean_group_a = np.mean(weight_lost_a)
mean_group_b = np.mean(weight_lost_b)
print(mean_group_a)
print(mean_group_b)
plt.hist(weight_lost_a)
plt.show()
plt.hist(weight_lost_b)
plt.show()
## 4. Test statistic ##
mean_difference = mean_group_b - mean_group_a
print(mean_difference)
## 5. Permutation test ##
import numpy as np
mean_difference = 2.52
print(all_values)
mean_differences = []
for i in range(1000):
group_a, group_b = [],[]
for loop in all_values:
tmp = np.random.rand()
if tmp >= 0.5: group_a.append(loop)
else: group_b.append(loop)
iteration_mean_difference = numpy.mean(group_b) - numpy.mean(group_a)
mean_differences.append(iteration_mean_difference)
plt.hist(mean_differences)
plt.show()
## 7. Dictionary representation of a distribution ##
sampling_distribution = {}
for loop in mean_differences:
if sampling_distribution.get(loop,False):
val =sampling_distribution.get(loop)
sampling_distribution[loop] = val+1
else:
sampling_distribution[loop] = 1
## 8. P value ##
frequencies = []
for lopp in sampling_distribution:
if lopp >= 2.52: frequencies.append(lopp)
p_value = sum(frequencies) / 1000 | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Probability Statistics Intermediate/Significance Testing-174.py",
"copies": "1",
"size": "1296",
"license": "mit",
"hash": 6481278559021243000,
"line_mean": 23.9423076923,
"line_max": 73,
"alpha_frac": 0.6782407407,
"autogenerated": false,
"ratio": 3.1153846153846154,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42936253560846155,
"avg_score": null,
"num_lines": null
} |
''' 3-statistics-step.py
=========================
AIM: Perform basic statistics on the data
INPUT: files: - <orbit_id>_misc/orbits.dat
variables: see section PARAMETERS (below)
OUTPUT: in <orbit_id>_misc/ : file one stat file
in <orbit_id>_figures/ : step distribution, step in function of time
CMD: python 3-statistics-step.py
ISSUES: <none known>
REQUIRES:- LATEX, epstopdf, pdfcrop, standard python libraries, specific libraries in resources/
- Structure of the root folder:
* <orbit_id>_flux/ --> flux files
* <orbit_id>_figures/ --> figures
* <orbit_id>_misc/ --> storages of data
* all_figures/ --> comparison figures
REMARKS: <none>
'''
###########################################################################
### INCLUDES
import numpy as np
import pylab as plt
import os.path
from resources.routines import *
from resources.TimeStepping import *
import parameters as param
import resources.figures as figures
###########################################################################
### PARAMETERS
# Orbit id
orbit_id = 1001
# First orbit in data set
orbit_ini = 1
# Last orbit to look for
orbit_end = minute2orbit(1440*365+1,orbit_id)
# File name for the output data file
data_file = 'statistics-step.dat'
# Show plots ?
show = True
# Fancy plots ? (ie. eps, pdf, png) otherwise png with less nice fonts
fancy = True
# To compute optimum value, what was the step max value ? (around 10)
max_step_allowed = 10
###########################################################################
### INITIALISATION
# File name fot the computed orbit file
orbits_file = 'orbits.dat'
# Formatted folders definitions
folder_flux, folder_figures, folder_misc = init_folders(orbit_id)
if fancy: figures.set_fancy()
if os.path.isfile(folder_misc+data_file):
os.remove(folder_misc+data_file)
f = open(folder_misc+data_file,'w')
### Look for the computed orbits
orbits = np.loadtxt(folder_misc+orbits_file)
###########################################################################
############ ORBITS
print >> f, '# ORBITS'
print >> f, 'Number of orbits:', np.shape(orbits[:,1])[0]
print >> f, 'Optimum was:', int(np.floor((orbit_end - orbit_ini) / max_step_allowed))
############ STEPS
print >> f, '# STEPS'
# 1-Average step, mean, min, max
steps = orbits[1:,1]
step_max = np.amax(steps)
print >> f, 'Mean:', np.mean(steps)
print >> f, 'min:', np.amin(steps)
print >> f, 'max:', step_max
print >> f, 'stddev:', np.std(steps)
# 2-Histogramme
bin_edges = np.arange(1,step_max+2)
bins = np.arange(1,step_max+1)
hist, bin_edges = np.histogram(steps,bins=bin_edges)
print >> f, 'bins: ', bins
print >> f, 'histogram: ', hist
fig = plt.figure()
plt.grid(True)
# Bar plot
plt.bar(bins, hist, align='center')
# Cosemetics and labels
plt.xlim([0.5,step_max+0.5])
plt.xticks(range(1, int(step_max+1)))
plt.xlabel(r'$\mathrm{Distribution\ of\ steps\ [orbits]}$')
plt.ylabel(r'$\mathrm{Occurrence}$')
# Saves the figure
fname = '%sdistrib_steps_%d' % (folder_figures,orbit_id)
figures.savefig(fname,fig,fancy)
# 3-Step evolution with time
fig, ax=plt.subplots()
ax.margins(0.05)
plt.subplot(111)
xx = orbits[1:,0]/param.last_orbits[orbit_id]*365.
xx = figures.convert_date(xx)
plt.plot(xx, steps,linewidth=1.5)
fig.autofmt_xdate()
# Cosemetics and labels
plt.grid(True)
plt.ylabel(r'$\mathrm{Step\ [orbits]}$')
# Saves the figure
fname = '%sstep_evolution_%d' % (folder_figures,orbit_id)
figures.savefig(fname,fig,fancy)
f.close()
if show: plt.show()
| {
"repo_name": "kuntzer/SALSA-public",
"path": "3a_statistics-step.py",
"copies": "1",
"size": "3499",
"license": "bsd-3-clause",
"hash": -6356447390310369000,
"line_mean": 23.9928571429,
"line_max": 96,
"alpha_frac": 0.6261789083,
"autogenerated": false,
"ratio": 2.998286203941731,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4124465112241731,
"avg_score": null,
"num_lines": null
} |
"""3Sum Closest
Given an array nums of n integers and an integer target, find three integers in nums such that
the sum is closest to target. Return the sum of the three integers. You may assume that each
input would have exactly one solution.
Example:
Given array nums = [-1, 2, 1, -4], and target = 1.
The sum that is closest to the target is 2. (-1 + 2 + 1 = 2).
Refer https://leetcode.com/problems/3sum-closest
"""
class Solution:
def threeSumClosest(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
if not nums or len(nums) < 3:
return 0
nums.sort()
minimum = sum(nums[:3])
for i in range(len(nums) - 2):
if i > 0 and nums[i] == nums[i - 1]:
continue
l = i + 1
r = len(nums) - 1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s == target:
return s
if s < target:
l += 1
if s > target:
r -= 1
if abs(s - target) < abs(minimum - target):
minimum = s
return minimum
if __name__ == '__main__':
cases = [([-1, 2, 1, -4], 1, 2), ([-1, -2, 4, 5, -2], 0, 0), ([-5, -10, 0, 20], 30, 15)]
s = Solution()
for case in cases:
assert s.threeSumClosest(case[0], case[1]) == case[2]
| {
"repo_name": "aiden0z/snippets",
"path": "leetcode/016_3sum_closest.py",
"copies": "1",
"size": "1459",
"license": "mit",
"hash": -8590764666176566000,
"line_mean": 26.0185185185,
"line_max": 95,
"alpha_frac": 0.4790952707,
"autogenerated": false,
"ratio": 3.5585365853658537,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9525099059793479,
"avg_score": 0.0025065592544748086,
"num_lines": 54
} |
"""3Sum
Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0?
Find all unique triplets in the array which gives the sum of zero.
Note:
The solution set must not contain duplicate triplets.
Example:
Given array nums = [-1, 0, 1, 2, -1, -4],
A solution set is:
[
[-1, 0, 1],
[-1, -1, 2]
]
Refer https://leetcode.com/problems/3sum
"""
class Solution:
def threeSum(self, nums):
""" based on binary search
:type nums: List[int]
:rtype: List[List[int]]
"""
if not nums or len(nums) < 3:
return []
nums.sort()
res = []
for i in range(len(nums) - 2):
if i > 0 and nums[i] == nums[i - 1]:
continue
l = i + 1
r = len(nums) - 1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s > 0:
r = r - 1
elif s < 0:
l = l + 1
elif s == 0:
res.append([nums[i], nums[l], nums[r]])
while l < r and nums[l] == nums[l + 1]:
l += 1
while l < r and nums[r] == nums[r - 1]:
r -= 1
l += 1
return res
class SolutionB:
@staticmethod
def get_id(nums):
return '{0}{1}{2}'.format(*sorted(nums))
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
result = {}
length = len(nums)
for i1, v1 in enumerate(nums):
if i1 + 2 == length:
break
for i2, v2 in enumerate(nums[i1 + 1:]):
left = 0 - (v1 + v2)
if i1 + i2 + 2 == length:
break
for v3 in (nums[i1 + i2 + 2:]):
if v3 == left:
item = [v1, v2, v3]
identifiy = self.get_id(item)
if identifiy not in result:
result[identifiy] = item
return list(result.values())
if __name__ == '__main__':
cases = [([-1, 0, 1, 2, -1, -4], [[-1, 0, 1], [-1, -1, 2]], 2), ([0, 0], [], 0), ([0], [], 0),
([3, -2, 1, 0], [], 0), ([1, 2, -2, -1], [], 0), ([0, 0, 0], [[0, 0, 0]], 1)]
s = Solution()
for case in cases:
result = s.threeSum(case[0])
if len(result) != case[2]:
assert False
# ids = [s.get_id(item) for item in result]
# for nums in case[1]:
# assert s.get_id(nums) in ids
| {
"repo_name": "aiden0z/snippets",
"path": "leetcode/015_3sum.py",
"copies": "1",
"size": "2641",
"license": "mit",
"hash": -6816610962878905000,
"line_mean": 26.5104166667,
"line_max": 98,
"alpha_frac": 0.4070427868,
"autogenerated": false,
"ratio": 3.4568062827225132,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4363849069522513,
"avg_score": null,
"num_lines": null
} |
datasets = [(train_set_x, train_set_y), (valid_set_x, valid_set_y), (test_set_x, test_set_y)]
n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size
numpy_rng = numpy.random.RandomState(123)
print '... building the model'
# construct the Deep Belief Network
dbn = DBN(numpy_rng=numpy_rng, n_ins=41,
hidden_layers_sizes=[15],
n_outs=5) (edited) | {
"repo_name": "myt00seven/svrg",
"path": "cifar/jaehoon_sample.py",
"copies": "2",
"size": "1684",
"license": "mit",
"hash": -4094763895159368000,
"line_mean": 38.1860465116,
"line_max": 93,
"alpha_frac": 0.580760095,
"autogenerated": false,
"ratio": 2.9337979094076654,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4514558004407666,
"avg_score": null,
"num_lines": null
} |
## 3. The Math Module ##
import math
a = math.sqrt(16)
b= math.ceil(111.3)
c = math.floor(89.9)
## 4. Variables Within Modules ##
import math
print(math.pi)
a = math.sqrt(math.pi)
b = math.ceil(math.pi)
c = math.floor(math.pi)
## 5. The CSV Module ##
import csv
nfl = list(csv.reader(open('nfl.csv')))
## 6. Counting How Many Times a Team Won ##
import csv
nfl = list(csv.reader(open('nfl.csv')))
patriots_wins = 0
for item in nfl:
if 'New England Patriots'== item[2]:
patriots_wins += 1
## 7. Making a Function that Counts Wins ##
import csv
f = open("nfl.csv", 'r')
nfl = list(csv.reader(f))
# Define your function here.
def nfl_wins(data,teamname):
wins = 0
for item in data:
if teamname == item[2]:
wins +=1
return(wins)
cowboys_wins = nfl_wins(nfl,'Dallas Cowboys')
falcons_wins = nfl_wins(nfl,'Atlanta Falcons')
## 10. Working with Boolean Operators ##
a = 5
b = 10
# a == 5
result1 = True
# a < 5 or b > a
result2 = a < 5 or b > a
# a < 5 and b > a
result3 = a < 5 and b > a
# a == 5 or b == 5
result4 = a == 5 or b == 5
# a > b or a == 10
result5 = a > b or a == 10
## 11. Counting Wins in a Given Year ##
import csv
f = open("nfl.csv", 'r')
nfl = list(csv.reader(f))
def nfl_wins(team):
count = 0
for row in nfl:
if row[2] == team:
count = count + 1
return count
def nfl_wins_in_a_year(data,team,year):
wins = 0
for item in data:
if (item[2] == team and item[0] == year):
wins +=1
return(wins)
browns_2010_wins = nfl_wins_in_a_year(nfl,'Cleveland Browns','2010')
eagles_2011_wins = nfl_wins_in_a_year(nfl,'Philadelphia Eagles','2011') | {
"repo_name": "vipmunot/Data-Analysis-using-Python",
"path": "Python Programming Intermediate/Modules-5.py",
"copies": "1",
"size": "1674",
"license": "mit",
"hash": 1337254694158573600,
"line_mean": 18.2528735632,
"line_max": 71,
"alpha_frac": 0.5955794504,
"autogenerated": false,
"ratio": 2.483679525222552,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8509796902096208,
"avg_score": 0.013892414705268764,
"num_lines": 87
} |
3#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 13:47:44 2018
@author: BallBlueMeercat
"""
import random
import numpy as np
import zodesolve
import tools
import matplotlib.pyplot as plt
def redshift_picks(zmin, zmax, n):
"""
Takes in:
zmin = integer lowest redshift;
zmax = integer highest redshift;
n = integer number of redshifts to be generated.
Returns:
zpicks = list of randomly selected redshifts between zmin and zmax.
"""
zinterval = (zmax - zmin) / (n*2)
z_opts = tools.flist(zmin, zmax, zinterval)
zpicks = random.sample(z_opts, n)
zpicks = sorted(zpicks)
return zpicks
def gnoise(array, mu, sigma):
"""
Returns:
array with each point offset by unique Gaussian noise;
"""
n = len(array)
noise = np.random.normal(mu,sigma,n)
array = array + noise
# import matplotlib.pyplot as pl
# from pylab import figure
# figure()
# pl.title('Noise distribution')
# pl.hist(noise, 100)
# pl.show()
return array
def magn(names, values, data, model_key, plot_key=False):
"""
Finding matter density m, corrected absolute mag M, interaction gamma.
Takes in:
params = list of dictionaries {string:value} of names and
starting values of parameters to be emcee fitted:
[{'matter':int/float} = e_m(t)/ec(t0) at t=t0;
{'Mcorr':int/float} = corrected absolute mag M;
{'gamma':int/float} = interaction term;
{'zeta':int/float}] = interaction term;
... (more)
data = dictionary w/
'colour': numpy.ndarray = SN colour;
'x1': numpy.ndarray = SN stretch correction as;
'zpicks':list of redshifts sorted in accending order;
'mag':list of apparent magnitudes;
model = string, indicates which firstderivs to integrate;
plot_key = Boolean, to plot or not to plot model figures;
Returns:
mag = np.ndarray of apparent mag corresponding to input redshits.
"""
# Making sure number of parameters matches number of names given:
assert len(names) == len(values), "len(names) != len(values) in datasim.magn"
zpicks = data['zpicks']
# Corrected absolute magnitude M of SN.
M = values[0]
# dlpc, da, integrated_zpicks, integrated_dlpc, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)
dlpc, da, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
# integrated_mag = 5 * np.log10(integrated_dlpc/10) + M
# print('redshift =',zpicks[-1],'da =', da[-1])
# # plotting interpoated data vs input and full
# import matplotlib.pyplot as plt
# import matplotlib as mpl
# #mpl.style.use('default') # has to be switched on to set figure size
# mpl.style.use('fivethirtyeight')
# plt.rcParams['axes.facecolor'] = 'white'
# plt.rcParams['figure.facecolor'] = 'white'
# plt.rcParams['grid.color'] = 'white'
#
# print('integrated_zpicks',integrated_zpicks[0])
# print('zpicks', zpicks[0])
#
# plt.figure()
# plt.scatter(integrated_zpicks, integrated_mag, s=70, label='integrated', c="C{}".format(0))
# plt.plot(zpicks, mag, label='interpolated', linestyle='-', c="C{}".format(1))
# plt.legend()
if plot_key:
# Plotting evolution of parameters in the model.
import plots
plots.modelcheck(mag, zpicks, plot_var, model_key)
return mag, da
def model_comparison(params, data, model_key, plot_key=False):
"""
Takes in:
params = list of 3 lists of dictionaries with model parameters;
zpicks = list of redshifts to integrate over, in accending order;
model_key = list of 3 strings, firstderivs for each params;
Action:
Plots parameter evolution for different params/models specified.
"""
import plots
plot_var_list = []
zpicks = data['zpicks']
for i in range(len(params)):
names, values = params[i]
dlpc, da, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key[i], plot_key)
# Corrected absolute magnitude M of SN.
M = values[0]
# Apparent mags of SN at the luminosity
# distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
plot_var['mag'] = mag
plot_var_list.append(plot_var)
plots.multi_modelcheck(data, model_key, plot_var_list)
return
def makensavemagnz(names, values, data, model_key, mu, sigma, filename):
'''
Takes in:
Parameters used to simulate magnitude:
m_true = e_m(t)/e_crit(t0) at t=t0;
de_true = 1 - m_true = e_de(t)/e_crit(t0) at t=t0;
g_true = interaction term, rate at which DE decays into matter.
Statistical parameteres of gaussian noise added to data:
mu = mean;
sigma = standard deviation;
npoints = how many mag and z to generate.
Model type:
data_key = string, key for dictionary of interaction modes in firstderivs
Options: 'Hdecay', 'rdecay', 'rdecay_de', 'rdecay_m', 'interacting', 'LCDM'
Length of parameters has to correspond to the model being tested.
filename = string, name of file data is saved to.
Returns:
Nothing. Generates redshifts and corresponding magnitudes (according
to the model specified by data_key) offset by Gaussian noise,
saves them into a binary file called filename in the working directory.
'''
zpicks = data['zpicks']
mag = magn(names, values, data, model_key)
mag = gnoise(mag, mu, sigma)
output = mag, zpicks
# Relative path of output folder.
save_path = './data/'+filename
import pickle
pickle.dump(output, open(save_path, 'wb'))
return
def magn_plot(names, values, data, model_key, plot_key=False):
"""
Finding matter density m, corrected absolute mag M, interaction gamma.
Takes in:
params = list of dictionaries {string:value} of names and
starting values of parameters to be emcee fitted:
[{'matter':int/float} = e_m(t)/ec(t0) at t=t0;
{'Mcorr':int/float} = corrected absolute mag M;
{'gamma':int/float} = interaction term;
{'zeta':int/float}] = interaction term;
... (more)
data = dictionary w/
'colour': numpy.ndarray = SN colour;
'x1': numpy.ndarray = SN stretch correction as;
'zpicks':list of redshifts sorted in accending order;
'mag':list of apparent magnitudes;
firstderivs_key = string, indicates which firstderivs to integrate;
plot_key = Boolean, to plot or not to plot model figures;
Returns:
mag = np.ndarray of apparent mag corresponding to input redshits.
"""
zpicks = data['zpicks']
# Corrected absolute magnitude M of SN.
M = values[0]
dlpc, da, plot_var = zodesolve.zodesolve(names, values, zpicks, model_key, plot_key)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
z = plot_var['z']
if model_key == 'waterfall':
plt.figure()
plt.title(r'$\bar \Omega$ evolution in waterfall')
plt.xlabel('redshift')
plt.ylabel(r'$\bar \Omega$')
plt.plot(z, plot_var['ombar_m'], label='ombar_m vs redshift')
plt.plot(z, plot_var['ombar_r'], label='ombar_r vs redshift')
plt.plot(z, plot_var['a_ombar'], label='a_ombar vs redshift')
plt.plot(z, plot_var['b_ombar'], label='b_ombar vs redshift')
plt.plot(z, plot_var['c_ombar'], label='c_ombar vs redshift')
plt.plot(z, plot_var['ombar_de'], label='ombar_de vs redshift')
plt.legend()
sum_om = plot_var['ombar_m'] + plot_var['ombar_r'] + plot_var['a_ombar']+ plot_var['b_ombar'] + plot_var['c_ombar'] + plot_var['c_ombar'] +plot_var['ombar_de']
om_m = plot_var['ombar_m']/sum_om
om_r = plot_var['ombar_r']/sum_om
om_a = plot_var['a_ombar']/sum_om
om_b = plot_var['b_ombar']/sum_om
om_c = plot_var['c_ombar']/sum_om
om_de = plot_var['ombar_de']/sum_om
plt.figure()
plt.title(r'$\Omega$ evolution in waterfall')
plt.xlabel('redshift')
plt.ylabel(r'$\Omega$')
plt.plot(z, om_m, label = 'om_m')
plt.plot(z, om_r, label = 'om_r')
plt.plot(z, om_a, label = 'om_a')
plt.plot(z, om_b, label = 'om_b')
plt.plot(z, om_c, label = 'om_c')
plt.plot(z, om_de, label = 'om_de')
plt.legend()
plt.show()
elif model_key == 'LCDM':
plt.figure()
plt.title(r'$\bar \Omega$ evolution in LCDM')
plt.xlabel('redshift')
plt.ylabel(r'$\bar \Omega$')
plt.plot(z, plot_var['ombar_m'], label='ombar_m vs redshift')
plt.plot(z, plot_var['ombar_de'], label='ombar_de vs redshift')
plt.legend()
sum_om = plot_var['ombar_m'] + plot_var['ombar_de']
om_m = plot_var['ombar_m']/sum_om
om_de = plot_var['ombar_de']/sum_om
plt.figure()
plt.title(r'$\Omega$ evolution in LCDM')
plt.xlabel('redshift')
plt.ylabel(r'$\Omega$')
plt.plot(z, om_m, label = 'om_m')
plt.plot(z, om_de, label = 'om_de')
plt.legend()
plt.show()
elif model_key == 'exotic':
plt.figure()
plt.title(r'$\bar \Omega$ evolution in LCDM')
plt.xlabel('redshift')
plt.ylabel(r'$\bar \Omega$')
plt.plot(z, plot_var['ombar_m'], label='ombar_m vs redshift')
plt.plot(z, plot_var['ombar_r'], label='ombar_r vs redshift')
plt.plot(z, plot_var['ombar_de'], label='ombar_de vs redshift')
plt.legend()
sum_om = plot_var['ombar_m'] + plot_var['ombar_r'] + plot_var['ombar_de']
om_m = plot_var['ombar_m']/sum_om
om_r = plot_var['ombar_r']/sum_om
om_de = plot_var['ombar_de']/sum_om
plt.figure()
plt.title(r'$\Omega$ evolution in LCDM')
plt.xlabel('redshift')
plt.ylabel(r'$\Omega$')
plt.plot(z, om_m, label = 'om_m')
plt.plot(z, om_r, label = 'om_r')
plt.plot(z, om_de, label = 'om_de')
plt.legend()
plt.show()
if plot_key:
# Plotting evolution of parameters in the model.
import plots
plots.modelcheck(mag, zpicks, plot_var, model_key)
return mag | {
"repo_name": "lefthandedroo/Cosmo-models",
"path": "Models/datasim.py",
"copies": "1",
"size": "10689",
"license": "mit",
"hash": 824570707624332400,
"line_mean": 33.4838709677,
"line_max": 167,
"alpha_frac": 0.6037982973,
"autogenerated": false,
"ratio": 3.2341906202723147,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.43379889175723146,
"avg_score": null,
"num_lines": null
} |
3#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 13:47:44 2018
@author: BallBlueMeercat
"""
import random
import numpy as np
import zodesolve
import tools
def redshift_picks(zmin, zmax, n):
"""
Takes in:
zmin = integer lowest redshift;
zmax = integer highest redshift;
n = integer number of redshifts to be generated.
Returns:
zpicks = list of randomly selected redshifts between zmin and zmax.
"""
# print('-zpicks has been called')
zinterval = (zmax - zmin) / (n*2)
z_opts = tools.flist(zmin, zmax, zinterval)
zpicks = random.sample(z_opts, n)
zpicks = sorted(zpicks)
return zpicks
#def magn(params, data, firstderivs_key, plot_key=False):
# """
# Finding matter density m, interaction gamma.
#
# Takes in:
# params = dictionary with true parameters;
# zpicks = list of redshifts to integrate over, in accending order;
# firstderivs_key = string, indicates which firstderivs to integrate;
# plot_key = Boolean, to plot or not to plot model figures;
# Returns:
# mag = np.ndarray of apparent mag corresponding to input redshits.
# """
## print('@@@ magn has been called')
# if firstderivs_key == 'LCDM':
# params['gamma'] = 0
# del params['gamma']
#
# zpicks = data['zpicks']
#
# # Absolute brightness of supernovae.
# M = -19
#
# dlpc, plot_var = zodesolve.zodesolve(params, zpicks, firstderivs_key)
#
# # Calculating apparent magnitudes of supernovae at the simulated
# # luminosity distances using the distance modulus formula.
# mag = 5 * np.log10(dlpc/10) + M
#
# if plot_key:
# # Checking evolution of the model.
# import plots
# plots.modelcheck(mag, zpicks, plot_var, firstderivs_key)
#
# return mag
def magn(params, data, firstderivs_key, plot_key=False):
"""
Finding matter density m, corrected absolute mag M, interaction gamma.
Takes in:
params = dictionary w/
'm':int/float = e_m(t)/ec(t0) at t=t0;
'gamma':int/float = interaction term;
'zeta':int/float = interaction term;
'alpha':int/float = SN peak mag correlation parameter;
'beta' :int/float = SN peak mag correlation parameter;
data = dictionary w/
'colour': numpy.ndarray = SN colour;
'x1': numpy.ndarray = SN stretch correction as;
'zpicks':list of redshifts sorted in accending order;
'mag':list of apparent magnitudes;
firstderivs_key = string, indicates which firstderivs to integrate;
plot_key = Boolean, to plot or not to plot model figures;
Returns:
mag = np.ndarray of apparent mag corresponding to input redshits.
"""
# print('@@@ magn has been called')
if firstderivs_key == 'LCDM':
params['gamma'] = 0
del params['gamma']
zpicks = data['zpicks']
# Absolute brightness of supernovae.
M = params['M']
dlpc, plot_var = zodesolve.zodesolve(params, zpicks, firstderivs_key)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
if plot_key:
# Checking evolution of the model.
import plots
plots.modelcheck(mag, zpicks, plot_var, firstderivs_key)
return mag
#def magn(params, data, firstderivs_key, plot_key=False):
# """
# Finding matter density m, alpha, beta, interaction gamma.
# Takes in:
# params = dictionary with true parameters;
# zpicks = list of redshifts to integrate over, in accending order;
# firstderivs_key = string, indicates which firstderivs to integrate;
# plot_key = Boolean, to plot or not to plot model figures;
# Returns:
# mag = np.ndarray of apparent mag corresponding to input redshits.
# """
## print('@@@ magn has been called')
# if firstderivs_key == 'LCDM':
# params['gamma'] = 0
# del params['gamma']
#
# zpicks = data['zpicks']
# x1 = data['x1']
# colour = data['colour']
#
# # Absolute brightness of supernovae.
# M = params['M']
# alpha = params['alpha']
# beta = params['beta']
#
# dlpc, plot_var = zodesolve.zodesolve(params, zpicks, firstderivs_key)
#
# # Calculating apparent magnitudes of supernovae at the simulated
# # luminosity distances using the distance modulus formula.
# mag = 5 * np.log10(dlpc/10) + M - alpha*x1 +beta*colour
#
# if plot_key:
# # Checking evolution of the model.
# import plots
# plots.modelcheck(mag, zpicks, plot_var, firstderivs_key)
#
# return mag
# Slow mag calculation
# # Calculating apparent magnitudes of supernovae at the simulated
# # luminosity distances using the distance modulus formula.
# mag = []
# for i in range(len(dlpc)):
# if dlpc[i] == 0:
# magnitude = M
# else:
# # magnitude from the distance modulus formula
# magnitude = 5 * math.log10(dlpc[i]/10) + M
# mag.append(magnitude)
def model_comparison(params, zpicks, firstderivs_key, gamma_list=False, zeta_list = False):
"""
Takes in:
params = dictionary with true parameters;
zpicks = list of redshifts to integrate over, in accending order;
firstderivs_key = list of strings, which firstderivs to integrate;
gamma_list = list of floats or integers, interaction constants.
Action:
plots one model evolution with different gamma,
or evolution of different models with the same gamma.
"""
import plots
# Absolute brightness of supernovae.
M = -19
plot_var_dict = {}
j = 1
if gamma_list:
print('investigating gamma')
print('firstderivs_key is',firstderivs_key)
for gamma in gamma_list:
params['gamma'] = gamma
dlpc, plot_var = zodesolve.zodesolve(params, zpicks, firstderivs_key)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
plot_var_dict['plot_var_'+str(j)] = plot_var
plot_var_dict['mag_'+str(j)] = mag
j+=1
# Plotting evolution of the model with different gamma.
plots.paramcheck(mag, zpicks, firstderivs_key, plot_var_dict, 'gamma')
elif zeta_list:
print('investigating zeta')
print('firstderivs_key is',firstderivs_key)
for zeta in zeta_list:
params['zeta'] = zeta
dlpc, plot_var = zodesolve.zodesolve(params, zpicks, firstderivs_key)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
plot_var_dict['plot_var_'+str(j)] = plot_var
plot_var_dict['mag_'+str(j)] = mag
j+=1
# Plotting evolution of the model with different gamma.
plots.paramcheck(mag, zpicks, firstderivs_key, plot_var_dict, 'zeta')
elif len(firstderivs_key) > 1:
for key in firstderivs_key:
dlpc, plot_var = zodesolve.zodesolve(params, zpicks, key)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
mag = 5 * np.log10(dlpc/10) + M
plot_var_dict['plot_var_'+str(j)] = plot_var
plot_var_dict['mag_'+str(j)] = mag
j+=1
# Plotting evolution of different models with same gamma.
plots.ivcdmcheck(mag, zpicks, firstderivs_key, plot_var_dict)
return
def gnoise(mag, mu, sigma):
"""
Returns:
mag = mag, each point offset by unique Gaussian noise;
noise = Gaussian noise.
"""
# print(' -gnoise has been called')
n = len(mag)
noise = np.random.normal(mu,sigma,n)
mag = mag + noise
# import matplotlib.pyplot as pl
# from pylab import figure
# figure()
# pl.title('Noise distribution')
# pl.hist(noise, 100)
# pl.show()
return mag
def noisy_mag(zpicks, mu, sigma, params, firstderivs_key):
model = magn(params, zpicks, firstderivs_key)
model = np.asarray(model)
mag = gnoise(model, mu, sigma)
return mag
def makensavemagnz(m_true, g_true, mu, sigma, zpicks, data_key, filename):
'''
Takes in:
Parameters used to simulate magnitude:
m_true = e_m(t)/e_crit(t0) at t=t0;
de_true = 1 - m_true = e_de(t)/e_crit(t0) at t=t0;
g_true = interaction term, rate at which DE decays into matter.
Statistical parameteres of gaussian noise added to data:
mu = mean;
sigma = standard deviation;
npoints = how many mag and z to generate.
Model type:
data_key = string, key for dictionary of interaction modes in firstderivs
Options: 'Hdecay', 'rdecay', 'rdecay_de', 'rdecay_m', 'interacting', 'LCDM'
Length of parameters has to correspond to the model being tested.
filename = string, name of file data is saved to.
Returns:
Nothing. Generates redshifts and corresponding magnitudes (according
to the model specified by data_key) offset by Gaussian noise,
saves them into a binary file called filename in the working directory.
'''
if data_key == 'LCDM':
data_params = {'m':m_true}
else:
data_params = {'m':m_true, 'gamma':g_true}
mag = noisy_mag(zpicks, mu, sigma, data_params, data_key)
output = mag, zpicks
# Relative path of output folder.
save_path = './data/'+filename
import pickle
pickle.dump(output, open(save_path, 'wb'))
return | {
"repo_name": "lefthandedroo/Cosmo-models",
"path": "zprev versions/Models_py_backup/Models backup/datasim.py",
"copies": "1",
"size": "10286",
"license": "mit",
"hash": -3383922935678809000,
"line_mean": 32.2912621359,
"line_max": 91,
"alpha_frac": 0.596538985,
"autogenerated": false,
"ratio": 3.5690492713393476,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46655882563393475,
"avg_score": null,
"num_lines": null
} |
3#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 15 13:47:44 2018
@author: BallBlueMeercat
"""
from math import log10
import numpy as np
import odesolve
#import lnprior
# Empirical parameters.
M = -19 # Absolute brightness of supernovae.
def msim(gamma, m, de, zpicks):
"""
Takes in:
gamma = interaction constant;
m = e_m(t)/ec(t0) at t=t0;
de = e_de(t)/ec(t0) at t=t0;
zpicks = list of z to match the interpolated dlmpc to;
Returns:
mag = list of n apparent magnitudes mag from corresponding redshits.
"""
# print('@@@ msim has been called')
# theta = gamma, m, de
# lp = lnprior.lnprior(theta)
# if not np.isfinite(lp):
# print('msim got bad theta: ', theta)
z, dlpc, dl, gamma, e_dash0m, e_dash0de, t, a, a_dot, t_cut, a_cut, a_dotcut, e_dashm, e_dashde = odesolve.odesolve(gamma, m, de, zpicks)
# dlpcinterp = np.interp(zpicks, z, dlpc)
# print('dlpcinterp is:')
# print(dlpcinterp)
# Calculating apparent magnitudes of supernovae at the simulated
# luminosity distances using the distance modulus formula.
# mag = []
# for i in range(len(dlpcinterp)):
# mdistmod = 5 * log10(dlpcinterp[i]/10) + M
# mag.append(mdistmod)
# print('dlpc from msim')
# print(dlpc)
mag = []
for i in range(len(dlpc)):
if dlpc[i] == 0:
mdistmod = M
else:
mdistmod = 5 * log10(dlpc[i]/10) + M
mag.append(mdistmod)
# print('after msim mdistmod calculation')
# print('len t is: ',len(t))
# print('len mag is: ',len(mag))
# print('len dlpc is: ',len(dlpc))
# print('len dl is: ',len(dl))
# print('len a is: ',len(a))
# print('len e_dashm is: ',len(e_dashm))
# print('len e_dashde is: ',len(e_dashde))
# print('len zpicks is: ',len(zpicks))
# print('len z is: ',len(z))
# import plots
# plots.plots(mag, zpicks, z, dlpc, dl, gamma, e_dash0m, e_dash0de, t, a, a_dot, t_cut, a_cut, a_dotcut, e_dashm, e_dashde)
theta = t, mag, dlpc, dl, a, e_dashm, e_dashde
return mag#theta #mag
| {
"repo_name": "lefthandedroo/Cosmo-models",
"path": "zprev versions/msim.py",
"copies": "1",
"size": "2233",
"license": "mit",
"hash": 8328949473519787000,
"line_mean": 19.4862385321,
"line_max": 141,
"alpha_frac": 0.5620241827,
"autogenerated": false,
"ratio": 2.787765293383271,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8713576435530592,
"avg_score": 0.027242608110535638,
"num_lines": 109
} |
"3-value logic (i.e. the way that boolean ops on nulls propagate up in the expression tree in SQL). doesn't rhyme with 'evil' but should."
class ThreeVL:
"Implementation of sql's 3VL. Warning: use == != for comparing python values, not for 3vl comparison. Caveat emptor."
# todo(awinter): is there any downside to using python True/False/None to make this work?
def __init__(self, value):
if value not in ('t', 'f', 'u'):
raise ValueError(value)
self.value = value
def __repr__(self):
return "<3vl %s>" % self.value
def __eq__(self, other):
if not isinstance(other, (bool, ThreeVL)):
return False
return self.value == other.value if isinstance(other, ThreeVL) else {True: 't', False: 'f'}[other] == self.value
def __neq__(self, other):
return not self == other
def __bool__(self):
# if self.value=='u': raise ValueError("can't cast 3VL 'unknown' to bool") # I think this is okay at top level
return self.value == 't'
@staticmethod
def test(item):
"this is the top-level output to SQL 'where' tests. At this level, 'u' *is* false"
if not isinstance(item, (bool, ThreeVL)):
raise TypeError(type(item)) # todo(awinter): test this on whereclause testing an int
return item if isinstance(item, bool) else item.value == 't'
# note below: the 3vl comparisons return a 3vl OR a bool
@staticmethod
def nein(item):
"this is 'not' but not is a keyword so it's 'nein'"
if not isinstance(item, (bool, ThreeVL)):
raise TypeError(type(item))
return not item if isinstance(item, bool) else ThreeVL(dict(t='f', f='t', u='u')[item.value])
@staticmethod
def andor(operator, left, right):
# todo(awinter): does sql cast values to bools? e.g. nonempty strings, int 0 vs 1
# is this the right one? https://en.wikipedia.org/wiki/Three-valued_logic#Kleene_logic
if operator not in ('and', 'or'):
raise ValueError('unk_operator', operator)
vals = left, right
if not all(isinstance(item, (bool, ThreeVL)) for item in vals):
raise TypeError(list(map(type, vals)))
if ThreeVL('u') in vals:
if operator == 'or' and True in vals:
return True
return False if False in vals else ThreeVL('u')
left, right = list(map(bool, vals))
return (left and right) if operator == 'and' else (left or right)
@staticmethod
def compare(operator, left, right):
"this could be replaced by overloading but I want == to return a bool for 'in' use"
# todo(awinter): what about nested 3vl like "(a=b)=(c=d)". is that allowed by sql? It will choke here if there's a null involved.
if left is None or right is None:
return ThreeVL('u')
elif operator == '=':
return left == right
elif operator == '!=':
return left != right
elif operator == '>':
return left > right
elif operator == '<':
return left < right
else:
raise ValueError('unk operator in compare', operator)
| {
"repo_name": "abe-winter/pg13-py",
"path": "pg13/threevl.py",
"copies": "1",
"size": "2964",
"license": "mit",
"hash": 8921190341957108000,
"line_mean": 40.1666666667,
"line_max": 138,
"alpha_frac": 0.6477732794,
"autogenerated": false,
"ratio": 3.6014580801944107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9661641554698961,
"avg_score": 0.017517960979089833,
"num_lines": 72
} |
""" 3-way String Quicksort.
An excellent algorithm to sort any kind of strings. In most cases,
it will perform better than LSD and MSD radix sorts.
Characteristics:
- O(W * N * log(N)).
- Extra memory usage: log(N) + W.
- It isn't stable.
- Has a short inner loop.
- Is cache-friendly (MSD Radix Sort isn't).
- Is in place (MSD Radix Sort uses extra memory on each recursive iteration).
"""
def _get_char_code(string, position):
"""Return an int representation of the character of `string`
in position `position`. E.g.: string[position]. If the position
doesn't exist in the string, return -1.
:return: an integer representing a position in a string."""
return ord(string[position]) if position < len(string) else -1
def _sort(strings, low, high, digit):
"""Sort strings of `strings` between low and high according to character
in position `digit`.
:param strings: list with all strings to be sorted.
:param low: lower limit of the part of the array to be sorted.
:param high: upper limit of the part of the array to be sorted.
:param digit: position of the string that is going to be used to sort the strings.
:return: None (sort is made in place).
"""
# Boundaries crossed -> no strings to sort.
if high <= low:
return
lt, gt = low, high
v = _get_char_code(strings[low], digit)
i = low + 1
while i <= gt:
t = _get_char_code(strings[i], digit)
if t < v:
strings[i], strings[lt] = strings[lt], strings[i]
lt += 1
i += 1
elif t > v:
strings[i], strings[gt] = strings[gt], strings[i]
gt -= 1
else:
i += 1
_sort(strings, low, lt - 1, digit)
if v >= 0:
_sort(strings, lt, gt, digit + 1)
_sort(strings, gt + 1, high, digit)
def sort(strings):
_sort(strings, 0, len(strings) - 1, 0)
if __name__ == "__main__":
unsorted_strings = [
"are",
"by",
"sea",
"seashells",
"ar",
"seashells",
"z",
"sells",
"sells",
"she",
"a",
"she",
"zorro",
"shells",
"shore",
"surely",
"the",
"the",
]
sort(unsorted_strings)
print(unsorted_strings)
| {
"repo_name": "rcanepa/cs-fundamentals",
"path": "python/strings/three_way_quicksort.py",
"copies": "1",
"size": "2329",
"license": "mit",
"hash": 8124195156897148000,
"line_mean": 26.4,
"line_max": 86,
"alpha_frac": 0.5607556891,
"autogenerated": false,
"ratio": 3.5611620795107033,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4621917768610703,
"avg_score": null,
"num_lines": null
} |
3. XOR decryption:
Each character on a computer is assigned a unique code and the preferred standard is ASCII (American Standard Code for Information Interchange). For example, uppercase A = 65, asterisk (*) = 42, and lowercase k = 107.
A modern encryption method is to take a text file, convert the bytes to ASCII, then XOR each byte with a given value, taken from a secret key. The advantage with the XOR function is that using the same encryption key on the cipher text, restores the plain text; for example, 65 XOR 42 = 107, then 107 XOR 42 = 65.
For unbreakable encryption, the key is the same length as the plain text message, and the key is made up of random bytes. The user would keep the encrypted message and the encryption key in different locations, and without both "halves", it is impossible to decrypt the message.
Unfortunately, this method is impractical for most users, so the modified method is to use a password as a key. If the password is shorter than the message, which is likely, the key is repeated cyclically throughout the message. The balance for this method is using a sufficiently long password key for security, but short enough to be memorable.
Your task has been made easy, as the encryption key consists of three lower case characters. Write a function that takes as a parameter, an array (or list) containing the encrypted ASCII codes, and using the knowledge that the plain text must contain common English words, decrypt the message and find the sum of the ASCII values in the original text.
def xor(cipher, key):
run= True
while run == True:
key = key
crypt = cipher
result =[]
asc_key=[]
out=''
n=1
i=0
p=0
I=0
j=0
crypt = crypt.replace('n','xA')
crypt = crypt.replace('r','xD')
crypt = crypt.replace('t','x9')
result = crypt.split('\\x')
while p <= len(key)-1:
asc_key.append(ord(key[p]))
p+=1
while n <= len(result)-1:
result[n]= chr((asc_key[i])^(int(result[n],16)))
n+=1
if i == (len(key)-1):
i = 0
else: i+=1
while I <= len(result)-1:
out+= (result[I])
I+=1
print ("The correspondant of that key or password is: ",out)
run = input("Do you need to run another? (y or n): ")
if run =="y" or "Y":
run = True
else:
run = False | {
"repo_name": "ojengwa/reportr",
"path": "apps/users/templates/users/xor.py",
"copies": "2",
"size": "2487",
"license": "mit",
"hash": 7146817627389416000,
"line_mean": 48.76,
"line_max": 351,
"alpha_frac": 0.6413349417,
"autogenerated": false,
"ratio": 4.124378109452737,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.030086718595221114,
"num_lines": 50
} |
# 400 Nth Digit
# Find the nth digit of the infinite integer sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ...
#
# Note:
# n is positive and will fit within the range of a 32-bit signed integer (n < 231).
#
# Example 1:
#
# Input:
# 3
#
# Output:
# 3
#
# Example 2:
#
# Input:
# 11
#
# Output:
# 0
#
# Explanation:
# The 11th digit of the sequence 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, ... is a 0, which is part of the number 10.
class Solution(object):
# https://www.hrwhisper.me/leetcode-contest-5-solution/
# 主要是求出该数字需要的位数,因为一位数有9*1,两位数有90*2,三位数有900*3以此类推。
# 剩下的直接看看是否整除啥的即可。
def findNthDigit(self, n):
"""
:type n: int
:rtype: int
"""
num = 9
cnt = 1
while n > num * cnt:
n -= (num * cnt)
num *= 10
cnt += 1
t = n // cnt
base = 10 ** (cnt - 1) + t
if t * cnt == n:
return (base - 1) % 10
n -= t * cnt
return int(str(base)[::-1][-n])
print(Solution().findNthDigit(11))
| {
"repo_name": "gengwg/leetcode",
"path": "400_nth_digit.py",
"copies": "1",
"size": "1132",
"license": "apache-2.0",
"hash": 2657442882156131000,
"line_mean": 20.0204081633,
"line_max": 111,
"alpha_frac": 0.5106796117,
"autogenerated": false,
"ratio": 2.470023980815348,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3480703592515348,
"avg_score": null,
"num_lines": null
} |
# 401. Binary Watch
# A binary watch has 4 LEDs on the top which represent the hours (0-11), and the 6 LEDs on the bottom represent the minutes (0-59).
# Each LED represents a zero or one, with the least significant bit on the right.
# off off on on
# off on on off off on
# For example, the above binary watch reads "3:25".
# Given a non-negative integer n which represents the number of LEDs that are currently on, return all possible times the watch could represent.
# Example:
# Input: n = 1
# Return: ["1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16", "0:32"]
# Note:
# The order of output does not matter.
# The hour must not contain a leading zero, for example "01:00" is not valid, it should be "1:00".
# The minute must be consist of two digits and may contain a leading zero, for example "10:2" is not valid, it should be "10:02".
class Solution(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
The bin() method
"""
return_list = []
for hour in range(12):
for minute in range(60):
if sum(map(lambda number: int(bin(number)[2:].count('1')), [hour, minute])) == num:
return_list += [str(hour) + ":" + str(minute).zfill(2)]
return return_list
class Solution1(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
x = x & (x - 1): turn off the rightmost 1
"""
def bit_count(binnum):
count = 0
while binnum:
binnum &= binnum - 1
count += 1
return count
return_list = []
for hour in range(12):
for minute in range(60):
if bit_count(hour) + bit_count(minute) == num:
return_list += ['{}:{}'.format(str(hour), str(minute).zfill(2))]
return return_list
class Solution2(object):
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
def bit_count(binnum):
count = 0
while binnum:
binnum &= binnum - 1
count += 1
return count
return ['{}:{}'.format(str(hour), str(minute).zfill(2)) for hour in range(12) for minute in range(60) if bit_count(hour) + bit_count(minute) == num] | {
"repo_name": "aenon/OnlineJudge",
"path": "leetcode/5.BitManipulation/401.BinaryWatch.py",
"copies": "1",
"size": "2498",
"license": "mit",
"hash": 4058486185705010700,
"line_mean": 32.32,
"line_max": 156,
"alpha_frac": 0.5352281825,
"autogenerated": false,
"ratio": 3.668135095447871,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9661714199567286,
"avg_score": 0.008329815676116873,
"num_lines": 75
} |
# 401. Binary Watch
# A binary watch has 4 LEDs on the top which represent the hours (0-11),
# and the 6 LEDs on the bottom represent the minutes (0-59).
#
# Each LED represents a zero or one, with the least significant bit on the right.
#
# For example, the above binary watch reads "3:25".
#
# Given a non-negative integer n which represents the number of LEDs that are currently on,
# return all possible times the watch could represent.
#
# Example:
#
# Input: n = 1
# Return: ["1:00", "2:00", "4:00", "8:00", "0:01", "0:02", "0:04", "0:08", "0:16", "0:32"]
#
# Note:
#
# The order of output does not matter.
# The hour must not contain a leading zero, for example "01:00" is not valid, it should be "1:00".
# The minute must be consist of two digits and may contain a leading zero,
# for example "10:2" is not valid, it should be "10:02".
class Solution(object):
# http://bookshadow.com/weblog/2016/09/18/leetcode-binary-watch/
# 位运算(Bit Manipulation)
# 10盏灯泡的燃亮情况可以通过0-1024进行表示,然后计数二进制1的个数即可。
# 利用位运算将状态拆分为小时和分钟。
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
ans = []
for x in range(1024):
if bin(x).count('1') == num:
h, m = x >> 6, x & 0x3F
if h < 12 and m < 60:
ans.append('%d:%02d' %(h, m))
return ans
# 枚举小时h和分钟m,然后判断二进制1的个数是否等于num
def readBinaryWatch(self, num):
ans = []
for h in range(12):
for m in range(60):
if (bin(h) + bin(m)).count('1') == num:
ans.append('%d:%02d' %(h, m))
return ans
test = Solution()
print(test.readBinaryWatch(1))
print(test.readBinaryWatch(5))
| {
"repo_name": "gengwg/leetcode",
"path": "401_binary_watch.py",
"copies": "1",
"size": "1897",
"license": "apache-2.0",
"hash": -424775331828466050,
"line_mean": 30.8,
"line_max": 102,
"alpha_frac": 0.586049171,
"autogenerated": false,
"ratio": 2.7074303405572757,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8689905148686616,
"avg_score": 0.02071487257413191,
"num_lines": 55
} |
"""4.0.1
Revision ID: 9aa6f74c9653
Revises: 333998bc1627
Create Date: 2017-04-26 13:16:33.880756
"""
from alembic import op
import sqlalchemy as sa
import manager_rest # Adding this manually
# revision identifiers, used by Alembic.
revision = '9aa6f74c9653'
down_revision = '333998bc1627'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('owners_secrets_users')
op.drop_table('owners_snapshots_users')
op.drop_table('viewers_executions_users')
op.drop_table('viewers_snapshots_users')
op.drop_table('viewers_plugins_users')
op.drop_table('viewers_blueprints_users')
op.drop_table('owners_plugins_users')
op.drop_table('viewers_deployments_users')
op.drop_table('owners_blueprints_users')
op.drop_table('viewers_secrets_users')
op.drop_table('owners_executions_users')
op.drop_table('owners_deployments_users')
op.add_column(
'blueprints',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.add_column(
'deployment_modifications',
sa.Column('_creator_id', sa.Integer(), nullable=False),
)
op.add_column(
'deployment_modifications',
sa.Column('_tenant_id', sa.Integer(), nullable=False),
)
op.add_column(
'deployment_modifications',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.create_foreign_key(
op.f('deployment_modifications__tenant_id_fkey'),
'deployment_modifications',
'tenants',
['_tenant_id'],
['id'],
ondelete='CASCADE',
)
op.create_foreign_key(
op.f('deployment_modifications__creator_id_fkey'),
'deployment_modifications',
'users',
['_creator_id'],
['id'],
ondelete='CASCADE',
)
op.add_column(
'deployment_update_steps',
sa.Column('_creator_id', sa.Integer(), nullable=False),
)
op.add_column(
'deployment_update_steps',
sa.Column('_tenant_id', sa.Integer(), nullable=False),
)
op.add_column(
'deployment_update_steps',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.create_foreign_key(
op.f('deployment_update_steps__tenant_id_fkey'),
'deployment_update_steps',
'tenants',
['_tenant_id'],
['id'],
ondelete='CASCADE',
)
op.create_foreign_key(
op.f('deployment_update_steps__creator_id_fkey'),
'deployment_update_steps',
'users',
['_creator_id'],
['id'],
ondelete='CASCADE',
)
op.add_column(
'deployment_updates',
sa.Column('_creator_id', sa.Integer(), nullable=False),
)
op.add_column(
'deployment_updates',
sa.Column('_tenant_id', sa.Integer(), nullable=False),
)
op.add_column(
'deployment_updates',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.create_foreign_key(
op.f('deployment_updates__creator_id_fkey'),
'deployment_updates',
'users',
['_creator_id'],
['id'],
ondelete='CASCADE',
)
op.create_foreign_key(
op.f('deployment_updates__tenant_id_fkey'),
'deployment_updates',
'tenants',
['_tenant_id'],
['id'],
ondelete='CASCADE',
)
op.add_column(
'deployments',
sa.Column('_tenant_id', sa.Integer(), nullable=True),
)
op.execute(
'UPDATE deployments '
'SET _tenant_id = blueprints._tenant_id '
'FROM blueprints '
'WHERE deployments._blueprint_fk = blueprints._storage_id'
)
op.alter_column('deployments', '_tenant_id', nullable=False),
op.add_column(
'deployments',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.create_foreign_key(
op.f('deployments__tenant_id_fkey'),
'deployments',
'tenants',
['_tenant_id'],
['id'],
ondelete='CASCADE',
)
op.add_column(
'events',
sa.Column('_creator_id', sa.Integer(), nullable=True),
)
op.add_column(
'events',
sa.Column('_tenant_id', sa.Integer(), nullable=True),
)
op.add_column(
'events',
sa.Column(
'reported_timestamp',
manager_rest.storage.models_base.UTCDateTime(),
nullable=True,
),
)
op.execute(
'UPDATE events '
'SET '
'_creator_id = executions._creator_id, '
'_tenant_id = executions._tenant_id, '
'reported_timestamp = timestamp '
'FROM executions '
'WHERE events._execution_fk = executions._storage_id'
)
op.alter_column('events', '_creator_id', nullable=False)
op.alter_column('events', '_tenant_id', nullable=False)
op.alter_column('events', 'reported_timestamp', nullable=False)
op.alter_column(
'events',
'timestamp',
server_default=sa.func.current_timestamp(),
nullable=False,
)
op.add_column(
'events',
sa.Column(
'error_causes',
manager_rest.storage.models_base.JSONString(),
nullable=True,
),
)
op.add_column(
'events',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.create_foreign_key(
op.f('events__tenant_id_fkey'),
'events',
'tenants',
['_tenant_id'],
['id'],
ondelete='CASCADE',
)
op.create_foreign_key(
op.f('events__creator_id_fkey'),
'events',
'users',
['_creator_id'],
['id'],
ondelete='CASCADE',
)
op.add_column(
'executions',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.add_column(
'logs',
sa.Column('_creator_id', sa.Integer(), nullable=True),
)
op.add_column(
'logs',
sa.Column('_tenant_id', sa.Integer(), nullable=True),
)
op.add_column(
'logs',
sa.Column(
'reported_timestamp',
manager_rest.storage.models_base.UTCDateTime(),
nullable=True,
),
)
op.execute(
'UPDATE logs '
'SET '
'_creator_id = executions._creator_id, '
'_tenant_id = executions._tenant_id, '
'reported_timestamp = timestamp '
'FROM executions '
'WHERE logs._execution_fk = executions._storage_id'
)
op.alter_column('logs', '_creator_id', nullable=False)
op.alter_column('logs', '_tenant_id', nullable=False)
op.alter_column('logs', 'reported_timestamp', nullable=False)
op.alter_column(
'logs',
'timestamp',
server_default=sa.func.current_timestamp(),
nullable=False,
)
op.add_column(
'logs',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.create_foreign_key(
op.f('logs__tenant_id_fkey'),
'logs',
'tenants',
['_tenant_id'],
['id'],
ondelete='CASCADE',
)
op.create_foreign_key(
op.f('logs__creator_id_fkey'),
'logs',
'users',
['_creator_id'],
['id'],
ondelete='CASCADE',
)
op.add_column(
'nodes',
sa.Column('_creator_id', sa.Integer(), nullable=True),
)
op.add_column(
'nodes',
sa.Column('_tenant_id', sa.Integer(), nullable=True),
)
op.execute(
'UPDATE nodes '
'SET '
'_creator_id = deployments._creator_id, '
'_tenant_id = deployments._tenant_id '
'FROM deployments '
'WHERE nodes._deployment_fk = deployments._storage_id'
)
op.alter_column('nodes', '_creator_id', nullable=False)
op.alter_column('nodes', '_tenant_id', nullable=False)
op.add_column(
'nodes',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.create_foreign_key(
op.f('nodes__tenant_id_fkey'),
'nodes',
'tenants',
['_tenant_id'],
['id'],
ondelete='CASCADE',
)
op.create_foreign_key(
op.f('nodes__creator_id_fkey'),
'nodes',
'users',
['_creator_id'],
['id'],
ondelete='CASCADE',
)
op.add_column(
'node_instances',
sa.Column('_creator_id', sa.Integer(), nullable=True),
)
op.add_column(
'node_instances',
sa.Column('_tenant_id', sa.Integer(), nullable=True),
)
op.execute(
'UPDATE node_instances '
'SET '
'_creator_id = nodes._creator_id, '
'_tenant_id = nodes._tenant_id '
'FROM nodes '
'WHERE node_instances._node_fk = nodes._storage_id'
)
op.alter_column('node_instances', '_creator_id', nullable=False)
op.alter_column('node_instances', '_tenant_id', nullable=False)
op.add_column(
'node_instances',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.create_foreign_key(
op.f('node_instances__creator_id_fkey'),
'node_instances',
'users',
['_creator_id'],
['id'],
ondelete='CASCADE',
)
op.create_foreign_key(
op.f('node_instances__tenant_id_fkey'),
'node_instances',
'tenants',
['_tenant_id'],
['id'],
ondelete='CASCADE',
)
op.add_column(
'plugins',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.add_column(
'secrets',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
op.add_column(
'snapshots',
sa.Column('private_resource', sa.Boolean(), nullable=True),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('snapshots', 'private_resource')
op.drop_column('secrets', 'private_resource')
op.drop_column('plugins', 'private_resource')
op.drop_constraint(
op.f('node_instances__tenant_id_fkey'),
'node_instances',
type_='foreignkey',
)
op.drop_constraint(
op.f('node_instances__creator_id_fkey'),
'node_instances',
type_='foreignkey',
)
op.drop_column('node_instances', 'private_resource')
op.drop_column('node_instances', '_tenant_id')
op.drop_column('node_instances', '_creator_id')
op.drop_constraint(
op.f('nodes__creator_id_fkey'),
'nodes',
type_='foreignkey',
)
op.drop_constraint(
op.f('nodes__tenant_id_fkey'),
'nodes',
type_='foreignkey',
)
op.drop_column('nodes', 'private_resource')
op.drop_column('nodes', '_tenant_id')
op.drop_column('nodes', '_creator_id')
op.drop_constraint(
op.f('logs__creator_id_fkey'),
'logs',
type_='foreignkey',
)
op.drop_constraint(
op.f('logs__tenant_id_fkey'),
'logs',
type_='foreignkey',
)
op.alter_column('logs', 'timestamp', server_default=None, nullable=False)
op.drop_column('logs', 'reported_timestamp')
op.drop_column('logs', 'private_resource')
op.drop_column('logs', '_tenant_id')
op.drop_column('logs', '_creator_id')
op.drop_column('executions', 'private_resource')
op.drop_constraint(
op.f('events__creator_id_fkey'),
'events',
type_='foreignkey',
)
op.drop_constraint(
op.f('events__tenant_id_fkey'),
'events',
type_='foreignkey',
)
op.alter_column('events', 'timestamp', server_default=None, nullable=False)
op.drop_column('events', 'reported_timestamp')
op.drop_column('events', 'private_resource')
op.drop_column('events', 'error_causes')
op.drop_column('events', '_tenant_id')
op.drop_column('events', '_creator_id')
op.drop_constraint(
op.f('deployments__tenant_id_fkey'),
'deployments',
type_='foreignkey',
)
op.drop_column('deployments', 'private_resource')
op.drop_column('deployments', '_tenant_id')
op.drop_constraint(
op.f('deployment_updates__tenant_id_fkey'),
'deployment_updates',
type_='foreignkey',
)
op.drop_constraint(op.f(
'deployment_updates__creator_id_fkey'),
'deployment_updates',
type_='foreignkey',
)
op.drop_column('deployment_updates', 'private_resource')
op.drop_column('deployment_updates', '_tenant_id')
op.drop_column('deployment_updates', '_creator_id')
op.drop_constraint(
op.f('deployment_update_steps__creator_id_fkey'),
'deployment_update_steps',
type_='foreignkey',
)
op.drop_constraint(
op.f('deployment_update_steps__tenant_id_fkey'),
'deployment_update_steps',
type_='foreignkey',
)
op.drop_column('deployment_update_steps', 'private_resource')
op.drop_column('deployment_update_steps', '_tenant_id')
op.drop_column('deployment_update_steps', '_creator_id')
op.drop_constraint(
op.f('deployment_modifications__creator_id_fkey'),
'deployment_modifications',
type_='foreignkey',
)
op.drop_constraint(
op.f('deployment_modifications__tenant_id_fkey'),
'deployment_modifications',
type_='foreignkey',
)
op.drop_column('deployment_modifications', 'private_resource')
op.drop_column('deployment_modifications', '_tenant_id')
op.drop_column('deployment_modifications', '_creator_id')
op.drop_column('blueprints', 'private_resource')
op.create_table(
'owners_deployments_users',
sa.Column(
'deployment_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['deployment_id'],
[u'deployments._storage_id'],
name=u'owners_deployments_users_deployment_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'owners_deployments_users_user_id_fkey',
)
)
op.create_table(
'owners_executions_users',
sa.Column(
'execution_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['execution_id'],
[u'executions._storage_id'],
name=u'owners_executions_users_execution_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'owners_executions_users_user_id_fkey',
)
)
op.create_table(
'viewers_secrets_users',
sa.Column(
'secret_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['secret_id'],
[u'secrets._storage_id'],
name=u'viewers_secrets_users_secret_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'viewers_secrets_users_user_id_fkey',
)
)
op.create_table(
'owners_blueprints_users',
sa.Column(
'blueprint_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['blueprint_id'],
[u'blueprints._storage_id'],
name=u'owners_blueprints_users_blueprint_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'owners_blueprints_users_user_id_fkey',
)
)
op.create_table(
'viewers_deployments_users',
sa.Column(
'deployment_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['deployment_id'],
[u'deployments._storage_id'],
name=u'viewers_deployments_users_deployment_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'viewers_deployments_users_user_id_fkey',
)
)
op.create_table(
'owners_plugins_users',
sa.Column(
'plugin_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['plugin_id'],
[u'plugins._storage_id'],
name=u'owners_plugins_users_plugin_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'owners_plugins_users_user_id_fkey',
)
)
op.create_table(
'viewers_blueprints_users',
sa.Column(
'blueprint_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['blueprint_id'],
[u'blueprints._storage_id'],
name=u'viewers_blueprints_users_blueprint_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'viewers_blueprints_users_user_id_fkey',
)
)
op.create_table(
'viewers_plugins_users',
sa.Column(
'plugin_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['plugin_id'],
[u'plugins._storage_id'],
name=u'viewers_plugins_users_plugin_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'viewers_plugins_users_user_id_fkey',
)
)
op.create_table(
'viewers_snapshots_users',
sa.Column(
'snapshot_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['snapshot_id'],
[u'snapshots._storage_id'],
name=u'viewers_snapshots_users_snapshot_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'viewers_snapshots_users_user_id_fkey',
)
)
op.create_table(
'viewers_executions_users',
sa.Column(
'execution_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['execution_id'],
[u'executions._storage_id'],
name=u'viewers_executions_users_execution_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'viewers_executions_users_user_id_fkey',
)
)
op.create_table(
'owners_snapshots_users',
sa.Column(
'snapshot_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['snapshot_id'],
[u'snapshots._storage_id'],
name=u'owners_snapshots_users_snapshot_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'owners_snapshots_users_user_id_fkey',
)
)
op.create_table(
'owners_secrets_users',
sa.Column(
'secret_id',
sa.INTEGER(),
autoincrement=False,
nullable=True,
),
sa.Column('user_id', sa.INTEGER(), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(
['secret_id'],
[u'secrets._storage_id'],
name=u'owners_secrets_users_secret_id_fkey',
),
sa.ForeignKeyConstraint(
['user_id'],
[u'users.id'],
name=u'owners_secrets_users_user_id_fkey',
)
)
# ### end Alembic commands ###
| {
"repo_name": "isaac-s/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/9aa6f74c9653_4_0_1.py",
"copies": "2",
"size": "21085",
"license": "apache-2.0",
"hash": 6190044995742350000,
"line_mean": 28.8654390935,
"line_max": 79,
"alpha_frac": 0.5412852739,
"autogenerated": false,
"ratio": 3.7645063381539012,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5305791612053901,
"avg_score": null,
"num_lines": null
} |
# 402. Remove K Digits
# Given a non-negative integer num represented as a string, remove k digits from the number so that the new number is the smallest possible.
#
# Note:
#
# The length of num is less than 10002 and will be ≥ k.
# The given num does not contain any leading zero.
#
# Example 1:
#
# Input: num = "1432219", k = 3
# Output: "1219"
# Explanation: Remove the three digits 4, 3, and 2 to form the new number 1219 which is the smallest.
#
# Example 2:
#
# Input: num = "10200", k = 1
# Output: "200"
# Explanation: Remove the leading 1 and the number is 200. Note that the output must not contain leading zeroes.
#
# Example 3:
#
# Input: num = "10", k = 2
# Output: "0"
# Explanation: Remove all the digits from the number and it is left with nothing which is 0.
class Solution:
# https://leetcode.com/problems/remove-k-digits/discuss/88668/Short-Python-one-O(n)-and-one-RegEx
# Go through the digits from left to right, remove previous digits if that makes the number smaller
# (and if we still have to remove digits).
def removeKdigits(self, num, k):
"""
:type num: str
:type k: int
:rtype: str
"""
out = []
for d in num:
while k and out and out[-1] > d:
out.pop()
k -= 1
out.append(d)
return ''.join(out[:-k or None]).lstrip('0') or '0'
sol = Solution()
print(sol.removeKdigits(num = "1432219", k = 3))
print(sol.removeKdigits(num = "10200", k = 1))
| {
"repo_name": "gengwg/leetcode",
"path": "402_remove_k_digits.py",
"copies": "1",
"size": "1517",
"license": "apache-2.0",
"hash": -593435708085099900,
"line_mean": 30.5625,
"line_max": 140,
"alpha_frac": 0.6244224422,
"autogenerated": false,
"ratio": 3.315098468271335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44395209104713346,
"avg_score": null,
"num_lines": null
} |
#40/40
#Part 1: Terminology (15 points) --> 15/15
#1 1pt) What is the symbol "=" used for?
#to assign and store values to and in variables
# 1pt
#
#2 3pts) Write a technical definition for 'function'
#a named sequence of calculations which takes input and returns output
# 3pts
#
#3 1pt) What does the keyword "return" do?
#it gives back the output or result of the function
# 1pt
#
#4 5pts) We know 5 basic data types. Write the name for each one and provide two
# examples of each below
# 1: integer ex: 1, 2
# 2: floating point ex: 1.2, 1.3
# 3: string ex: "hi", "hello"
# 4: boolean ex: True, False
# 5: tuple ex: ("HEllo", 3), ("Bob", 10, "fat")
# 5pts
#
#5 2pts) What is the difference between a "function definition" and a
# "function call"?
#a function definition does not result in any output being presented, it simply defines a set of calculations which are run if and only if they are called by a function call
# 2pts
#
#
#6 3pts) What are the 3 phases that every computer program has? What happens in
# each of them
# 1:input (the program takes some input values, most often from the user)
# 2:processing (the program does something with those input values to for instance calculate something)
# 3:output (the program returns the product of its labours (processing) often a something printed
# 3pts
#
#Part 2: Programming (25 points) --> 25/25
#Write a program that asks the user for the areas of 3 circles.
#It should then calculate the diameter of each and the sum of the diameters
#of the 3 circles.
#Finally, it should produce output like this:
#Circle Diameter
#c1 ...
#c2 ...
#c3 ...
#TOTALS ...
# Hint: Radius is the square root of the area divided by pi (a = pi(r)^2) so r = sqrt(a/pi)
import math
#1 pt for header line
#3 pt for correct formula
#1 pt for return value
#1 pt for parameter name
#1 pt for function name
def circarea_to_diameter(circarea):
return 2 * (math.sqrt(circarea/math.pi)) #finds radius and multiples by 2 to get diameter
def sum_three(x, y, z): #takes three values and adds them
return x + y + z
#1pt for header line
#1pt for parameter names
#1pt for return value
#1pt for correct output format
#3pt for correct use of format function
def output(d1, d2, d3, total):
return """
Circle Diameter
C1 {}
C2 {}
C3 {}
Totals {}
""".format(d1, d2, d3, total)
#1pt header line
#1pt getting input
#1pt converting input
#1pt for calling output function
#2pt for correct diameter formula
#1pt for variable names
def main():
#input
C1 = raw_input("Area of C1: ")
C2 = raw_input("Area of C2: ")
C3 = raw_input("Area of C3: ")
#processing
d1 = circarea_to_diameter(float(C1))
d2 = circarea_to_diameter(float(C2))
d3 = circarea_to_diameter(float(C3))
total = sum_three(d1, d2, d3)
#output
print output(d1, d2, d3, total)
#1pt for calling main
main()
#1pt explanatory comments
#1pt code format
| {
"repo_name": "ieuan1630-cmis/ieuan1630-cmis-cs2",
"path": "cs2quiz1.py",
"copies": "1",
"size": "2934",
"license": "cc0-1.0",
"hash": -890083904744678400,
"line_mean": 27.2115384615,
"line_max": 173,
"alpha_frac": 0.6932515337,
"autogenerated": false,
"ratio": 3.04989604989605,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42431475835960497,
"avg_score": null,
"num_lines": null
} |
# 406. Queue Reconstruction by Height
# Suppose you have a random list of people standing in a queue.
# Each person is described by a pair of integers (h, k),
# where h is the height of the person
# and k is the number of people in front of this person who have a height greater than or equal to h.
# Write an algorithm to reconstruct the queue.
# Note:
# The number of people is less than 1,100.
# Example
# Input:
# [[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]
# Output:
# [[5,0], [7,0], [5,2], [6,1], [4,4], [7,1]]
class Solution(object):
# https://leetcode.com/problems/queue-reconstruction-by-height/discuss/89345/Easy-concept-with-PythonC++Java-Solution
# 1. Pick out tallest group of people and sort them in a subarray (S).
# Since there's no other groups of people taller than them,
# therefore each guy's index will be just as same as his k value.
# 2. For 2nd tallest group (and the rest),
# insert each one of them into (S) by k value. So on and so forth.
# E.g.
# input: [[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]
# subarray after step 1: [[7,0], [7,1]]
# subarray after step 2: [[7,0], [6,1], [7,1]]
# subarray after step 3: [[5,0], [7,0], [5,2], [6,1], [7,1]]
# ...
def reconstructQueue(self, people):
"""
:type people: List[List[int]]
:rtype: List[List[int]]
"""
res = []
if not people:
return res
# height = []
# key = height, value = [(k-value, index in original array)]
peopledict = {}
for i in range(len(people)):
p = people[i]
if p[0] in peopledict:
# peopledict[p[0]] += (p[1], i),
peopledict[p[0]].append((p[1], i))
else:
peopledict[p[0]] = [(p[1], i)]
# height += p[0],
# height.append(p[0])
# print(peopledict)
heights = sorted(peopledict.keys(), reverse=True)
for h in heights:
peopledict[h].sort() # sort the tuples
for p in peopledict[h]:
res.insert(p[0], people[p[1]])
return res
print (Solution().reconstructQueue([[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]))
# [[5, 0], [7, 0], [5, 2], [6, 1], [4, 4], [7, 1]]
| {
"repo_name": "gengwg/leetcode",
"path": "406_queue_reconstruction_by_height.py",
"copies": "1",
"size": "2319",
"license": "apache-2.0",
"hash": 1504007671268904000,
"line_mean": 31.2083333333,
"line_max": 121,
"alpha_frac": 0.5286761535,
"autogenerated": false,
"ratio": 3.059366754617414,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9069882501455486,
"avg_score": 0.0036320813323855914,
"num_lines": 72
} |
# 40824
import euler
s = """
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450
"""
s = ''.join(s.split())
m = 0
for n in xrange(len(s) - 4):
val = s[n:n+5]
x = euler.product([int(c) for c in val])
if x > m:
m = x
print m
| {
"repo_name": "higgsd/euler",
"path": "py/8.py",
"copies": "1",
"size": "1212",
"license": "bsd-2-clause",
"hash": -9190594148758780000,
"line_mean": 35.7272727273,
"line_max": 50,
"alpha_frac": 0.900990099,
"autogenerated": false,
"ratio": 2.312977099236641,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3713967198236641,
"avg_score": null,
"num_lines": null
} |
# 408 Valid Word Abbreviation
# Given a non-empty string s and an abbreviation abbr,
# return whether the string matches with the given abbreviation.
#
# A string such as “word” contains only the following valid abbreviations:
#
# [“word”, “1ord”, “w1rd”, “wo1d”, “wor1”, “2rd”, “w2d”, “wo2”, “1o1d”, “1or1”, “w1r1”, “1o2”, “2r1”, “3d”, “w3”, “4”]
# Notice that only the above abbreviations are valid abbreviations of the string “word”.
# Any other string is not a valid abbreviation of “word”.
#
# Note:
# Assume s contains only lowercase letters and abbr contains only lowercase letters and digits.
#
#
# 虽然是一个easy 的题但却有两个坑:
# 1. abbr 结尾的地方是数字 例如:
# s= "internationalization" abbr= "i5a11o1" , 因此 return时得加上cout 来判断 index + Integer.valueOf(count)
# 2.字符中 有 0, 例如 s= "a" abbr= "01" 因此只要出现一个不是其他数字后面的0 都是非法的, 比如 01 非法, 而10 合法。加上这个判断
class Solution:
def validWordAbbreviation(self, word, abbr):
index = 0 # current position in `word`
count = '0' # numbers in `abbr`
for c in abbr:
if not str.isdigit(c):
# cur pos in word is index + char_count
index += int(count)
if index >= len(word) or c != word[index]:
return False
# reset count to 0 and increment index by 1
count = '0'
index += 1
else:
if count == '0' and c == '0':
return False
count += c
# remember to add the final count (if not 0)
return index + int(count) == len(word)
sol = Solution()
print(sol.validWordAbbreviation(word="internationalization", abbr="i5a11o1"))
print(sol.validWordAbbreviation(word="word", abbr="w2d"))
print(sol.validWordAbbreviation(word="a", abbr="1"))
print(sol.validWordAbbreviation(word="a", abbr="01"))
| {
"repo_name": "gengwg/leetcode",
"path": "408_valid_word_abbreviation.py",
"copies": "1",
"size": "2077",
"license": "apache-2.0",
"hash": 164963088853278980,
"line_mean": 37.3958333333,
"line_max": 118,
"alpha_frac": 0.6098752035,
"autogenerated": false,
"ratio": 2.5561719833564496,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.3666047186856449,
"avg_score": null,
"num_lines": null
} |
# 409. Longest Palindrome
#
# Given a string which consists of lowercase or uppercase letters,
# find the length of the longest palindromes that can be built with those letters.
#
# This is case sensitive, for example "Aa" is not considered a palindrome here.
#
# Note:
# Assume the length of given string will not exceed 1,010.
#
# Example:
#
# Input:
# "abccccdd"
#
# Output:
# 7
#
# Explanation:
# One longest palindrome that can be built is "dccaccd", whose length is 7.
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
ctmap = {}
for c in s:
if c not in ctmap:
ctmap[c] = 1
else:
ctmap[c] += 1
ret = 0
singleCharFound = 0
for key in ctmap:
if ctmap[key] %2 == 0:
ret += ctmap[key]
else:
ret += ctmap[key] - 1
singleCharFound = 1
return ret + singleCharFound
s = Solution()
print(s.longestPalindrome("abccccdd"))
| {
"repo_name": "gengwg/leetcode",
"path": "409_longest_palindrome.py",
"copies": "1",
"size": "1071",
"license": "apache-2.0",
"hash": 2610282620638048000,
"line_mean": 22.8,
"line_max": 82,
"alpha_frac": 0.5602240896,
"autogenerated": false,
"ratio": 3.5,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9548303390932313,
"avg_score": 0.002384139733537324,
"num_lines": 45
} |
"""4.0
Revision ID: 333998bc1627
Revises:
Create Date: 2017-04-26 12:42:40.587570
"""
from alembic import op
import sqlalchemy as sa
import manager_rest # Adding this manually
# revision identifiers, used by Alembic.
revision = '333998bc1627'
down_revision = None
branch_labels = None
depends_on = None
snapshot_status = sa.Enum(
'created',
'failed',
'creating',
'uploaded',
name='snapshot_status',
)
deployment_modification_status = sa.Enum(
'started',
'finished',
'rolledback',
name='deployment_modification_status',
)
execution_status = sa.Enum(
'terminated',
'failed',
'cancelled',
'pending',
'started',
'cancelling',
'force_cancelling',
name='execution_status',
)
action_type = sa.Enum(
'add',
'remove',
'modify',
name='action_type',
)
entity_type = sa.Enum(
'node',
'relationship',
'property',
'operation',
'workflow',
'output',
'description',
'group',
'policy_type',
'policy_trigger',
'plugin',
name='entity_type',
)
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
'groups',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('ldap_dn', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(
op.f('ix_groups_ldap_dn'),
'groups',
['ldap_dn'],
unique=True,
)
op.create_index(op.f('ix_groups_name'), 'groups', ['name'], unique=True)
op.create_table(
'provider_context',
sa.Column('id', sa.Text(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('context', sa.PickleType(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table(
'roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_roles_name'), 'roles', ['name'], unique=True)
op.create_table(
'tenants',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_tenants_name'), 'tenants', ['name'], unique=True)
op.create_table(
'users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column(
'created_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=True,
),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('first_name', sa.String(length=255), nullable=True),
sa.Column(
'last_login_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=True,
),
sa.Column('last_name', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('api_token_key', sa.String(length=100), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(
op.f('ix_users_username'),
'users',
['username'],
unique=True,
)
op.create_table(
'blueprints',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column(
'created_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column('main_file_name', sa.Text(), nullable=False),
sa.Column('plan', sa.PickleType(), nullable=False),
sa.Column(
'updated_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=True,
),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
ondelete='CASCADE',
),
sa.ForeignKeyConstraint(
['_tenant_id'],
[u'tenants.id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('ix_blueprints_created_at'),
'blueprints',
['created_at'],
unique=False,
)
op.create_index(
op.f('ix_blueprints_id'),
'blueprints',
['id'],
unique=False,
)
op.create_table(
'groups_tenants',
sa.Column('group_id', sa.Integer(), nullable=True),
sa.Column('tenant_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.ForeignKeyConstraint(['tenant_id'], ['tenants.id'], )
)
op.create_table(
'plugins',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('archive_name', sa.Text(), nullable=False),
sa.Column('distribution', sa.Text(), nullable=True),
sa.Column('distribution_release', sa.Text(), nullable=True),
sa.Column('distribution_version', sa.Text(), nullable=True),
sa.Column('excluded_wheels', sa.PickleType(), nullable=True),
sa.Column('package_name', sa.Text(), nullable=False),
sa.Column('package_source', sa.Text(), nullable=True),
sa.Column('package_version', sa.Text(), nullable=True),
sa.Column('supported_platform', sa.PickleType(), nullable=True),
sa.Column('supported_py_versions', sa.PickleType(), nullable=True),
sa.Column(
'uploaded_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column('wheels', sa.PickleType(), nullable=False),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
ondelete='CASCADE',
),
sa.ForeignKeyConstraint(
['_tenant_id'],
[u'tenants.id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('ix_plugins_archive_name'),
'plugins',
['archive_name'],
unique=False,
)
op.create_index(op.f('ix_plugins_id'), 'plugins', ['id'], unique=False)
op.create_index(
op.f('ix_plugins_package_name'),
'plugins',
['package_name'],
unique=False,
)
op.create_index(
op.f('ix_plugins_uploaded_at'),
'plugins',
['uploaded_at'],
unique=False,
)
op.create_table(
'secrets',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('value', sa.Text(), nullable=True),
sa.Column(
'created_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column(
'updated_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=True,
),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
ondelete='CASCADE',
),
sa.ForeignKeyConstraint(
['_tenant_id'],
[u'tenants.id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('ix_secrets_created_at'),
'secrets',
['created_at'],
unique=False,
)
op.create_index(
op.f('ix_secrets_id'),
'secrets',
['id'],
unique=False,
)
op.create_table(
'snapshots',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column(
'created_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column('status', snapshot_status, nullable=True),
sa.Column('error', sa.Text(), nullable=True),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
ondelete='CASCADE',
),
sa.ForeignKeyConstraint(
['_tenant_id'],
[u'tenants.id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(op.f(
'ix_snapshots_created_at'),
'snapshots',
['created_at'],
unique=False,
)
op.create_index(op.f('ix_snapshots_id'), 'snapshots', ['id'], unique=False)
op.create_table(
'users_groups',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('group_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['group_id'], ['groups.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'users_roles',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'users_tenants',
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('tenant_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['tenant_id'], ['tenants.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'deployments',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column(
'created_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('inputs', sa.PickleType(), nullable=True),
sa.Column('groups', sa.PickleType(), nullable=True),
sa.Column('permalink', sa.Text(), nullable=True),
sa.Column('policy_triggers', sa.PickleType(), nullable=True),
sa.Column('policy_types', sa.PickleType(), nullable=True),
sa.Column('outputs', sa.PickleType(), nullable=True),
sa.Column('scaling_groups', sa.PickleType(), nullable=True),
sa.Column(
'updated_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=True,
),
sa.Column('workflows', sa.PickleType(), nullable=True),
sa.Column('_blueprint_fk', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_blueprint_fk'],
[u'blueprints._storage_id'],
ondelete='CASCADE',
),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('ix_deployments_created_at'),
'deployments',
['created_at'],
unique=False,
)
op.create_index(
op.f('ix_deployments_id'),
'deployments',
['id'],
unique=False,
)
op.create_table(
'owners_blueprints_users',
sa.Column('blueprint_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['blueprint_id'],
['blueprints._storage_id'],
),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'owners_plugins_users',
sa.Column('plugin_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['plugin_id'], ['plugins._storage_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'owners_secrets_users',
sa.Column('secret_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['secret_id'], ['secrets._storage_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'owners_snapshots_users',
sa.Column('snapshot_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['snapshot_id'], ['snapshots._storage_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'viewers_blueprints_users',
sa.Column('blueprint_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['blueprint_id'],
['blueprints._storage_id'],
),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'viewers_plugins_users',
sa.Column('plugin_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['plugin_id'], ['plugins._storage_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'viewers_secrets_users',
sa.Column('secret_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['secret_id'], ['secrets._storage_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'viewers_snapshots_users',
sa.Column('snapshot_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['snapshot_id'], ['snapshots._storage_id'], ),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'deployment_modifications',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('context', sa.PickleType(), nullable=True),
sa.Column(
'created_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column(
'ended_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=True,
),
sa.Column('modified_nodes', sa.PickleType(), nullable=True),
sa.Column('node_instances', sa.PickleType(), nullable=True),
sa.Column('status', deployment_modification_status, nullable=True),
sa.Column('_deployment_fk', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_deployment_fk'],
[u'deployments._storage_id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('ix_deployment_modifications_created_at'),
'deployment_modifications',
['created_at'],
unique=False,
)
op.create_index(
op.f('ix_deployment_modifications_ended_at'),
'deployment_modifications',
['ended_at'],
unique=False,
)
op.create_index(
op.f('ix_deployment_modifications_id'),
'deployment_modifications',
['id'],
unique=False,
)
op.create_table(
'executions',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column(
'created_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column('error', sa.Text(), nullable=True),
sa.Column('is_system_workflow', sa.Boolean(), nullable=False),
sa.Column('parameters', sa.PickleType(), nullable=True),
sa.Column('status', execution_status, nullable=True),
sa.Column('workflow_id', sa.Text(), nullable=False),
sa.Column('_deployment_fk', sa.Integer(), nullable=True),
sa.Column('_tenant_id', sa.Integer(), nullable=False),
sa.Column('_creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_creator_id'],
[u'users.id'],
ondelete='CASCADE',
),
sa.ForeignKeyConstraint(
['_deployment_fk'],
[u'deployments._storage_id'],
ondelete='CASCADE',
),
sa.ForeignKeyConstraint(
['_tenant_id'],
[u'tenants.id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('ix_executions_created_at'),
'executions',
['created_at'],
unique=False,
)
op.create_index(
op.f('ix_executions_id'),
'executions',
['id'],
unique=False,
)
op.create_table(
'nodes',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('deploy_number_of_instances', sa.Integer(), nullable=False),
sa.Column('host_id', sa.Text(), nullable=True),
sa.Column('max_number_of_instances', sa.Integer(), nullable=False),
sa.Column('min_number_of_instances', sa.Integer(), nullable=False),
sa.Column('number_of_instances', sa.Integer(), nullable=False),
sa.Column('planned_number_of_instances', sa.Integer(), nullable=False),
sa.Column('plugins', sa.PickleType(), nullable=True),
sa.Column('plugins_to_install', sa.PickleType(), nullable=True),
sa.Column('properties', sa.PickleType(), nullable=True),
sa.Column('relationships', sa.PickleType(), nullable=True),
sa.Column('operations', sa.PickleType(), nullable=True),
sa.Column('type', sa.Text(), nullable=False),
sa.Column('type_hierarchy', sa.PickleType(), nullable=True),
sa.Column('_deployment_fk', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_deployment_fk'],
[u'deployments._storage_id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(op.f('ix_nodes_id'), 'nodes', ['id'], unique=False)
op.create_index(op.f('ix_nodes_type'), 'nodes', ['type'], unique=False)
op.create_table(
'owners_deployments_users',
sa.Column('deployment_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['deployment_id'],
['deployments._storage_id'],
),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'viewers_deployments_users',
sa.Column('deployment_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['deployment_id'],
['deployments._storage_id'],
),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'deployment_updates',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column(
'created_at',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column('deployment_plan', sa.PickleType(), nullable=True),
sa.Column(
'deployment_update_node_instances',
sa.PickleType(),
nullable=True,
),
sa.Column(
'deployment_update_deployment',
sa.PickleType(),
nullable=True,
),
sa.Column('deployment_update_nodes', sa.PickleType(), nullable=True),
sa.Column('modified_entity_ids', sa.PickleType(), nullable=True),
sa.Column('state', sa.Text(), nullable=True),
sa.Column('_execution_fk', sa.Integer(), nullable=True),
sa.Column('_deployment_fk', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_deployment_fk'],
[u'deployments._storage_id'],
ondelete='CASCADE',
),
sa.ForeignKeyConstraint(
['_execution_fk'],
[u'executions._storage_id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('ix_deployment_updates_created_at'),
'deployment_updates',
['created_at'],
unique=False,
)
op.create_index(
op.f('ix_deployment_updates_id'),
'deployment_updates',
['id'],
unique=False,
)
op.create_table(
'events',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column(
'timestamp',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('message_code', sa.Text(), nullable=True),
sa.Column('event_type', sa.Text(), nullable=True),
sa.Column('operation', sa.Text(), nullable=True),
sa.Column('node_id', sa.Text(), nullable=True),
sa.Column('_execution_fk', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_execution_fk'],
[u'executions._storage_id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(op.f('ix_events_id'), 'events', ['id'], unique=False)
op.create_index(
op.f('ix_events_timestamp'),
'events',
['timestamp'],
unique=False,
)
op.create_table(
'logs',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column(
'timestamp',
manager_rest.storage.models_base.UTCDateTime(),
nullable=False,
),
sa.Column('message', sa.Text(), nullable=True),
sa.Column('message_code', sa.Text(), nullable=True),
sa.Column('logger', sa.Text(), nullable=True),
sa.Column('level', sa.Text(), nullable=True),
sa.Column('operation', sa.Text(), nullable=True),
sa.Column('node_id', sa.Text(), nullable=True),
sa.Column('_execution_fk', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_execution_fk'],
[u'executions._storage_id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(op.f('ix_logs_id'), 'logs', ['id'], unique=False)
op.create_index(
op.f('ix_logs_timestamp'),
'logs',
['timestamp'],
unique=False,
)
op.create_table(
'node_instances',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('host_id', sa.Text(), nullable=True),
sa.Column('relationships', sa.PickleType(), nullable=True),
sa.Column('runtime_properties', sa.PickleType(), nullable=True),
sa.Column('scaling_groups', sa.PickleType(), nullable=True),
sa.Column('state', sa.Text(), nullable=False),
sa.Column('version', sa.Integer(), nullable=True),
sa.Column('_node_fk', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_node_fk'],
[u'nodes._storage_id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('ix_node_instances_id'),
'node_instances',
['id'],
unique=False,
)
op.create_table(
'owners_executions_users',
sa.Column('execution_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['execution_id'],
['executions._storage_id'],
),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'viewers_executions_users',
sa.Column('execution_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(
['execution_id'],
['executions._storage_id'],
),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], )
)
op.create_table(
'deployment_update_steps',
sa.Column('_storage_id', sa.Integer(), nullable=False),
sa.Column('id', sa.Text(), nullable=True),
sa.Column('action', action_type, nullable=True),
sa.Column('entity_id', sa.Text(), nullable=False),
sa.Column('entity_type', entity_type, nullable=True),
sa.Column('_deployment_update_fk', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(
['_deployment_update_fk'],
[u'deployment_updates._storage_id'],
ondelete='CASCADE',
),
sa.PrimaryKeyConstraint('_storage_id')
)
op.create_index(
op.f('ix_deployment_update_steps_id'),
'deployment_update_steps',
['id'],
unique=False,
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(
op.f('ix_deployment_update_steps_id'),
table_name='deployment_update_steps',
)
op.drop_table('deployment_update_steps')
entity_type.drop(op.get_bind())
action_type.drop(op.get_bind())
op.drop_table('viewers_executions_users')
op.drop_table('owners_executions_users')
op.drop_index(op.f('ix_node_instances_id'), table_name='node_instances')
op.drop_table('node_instances')
op.drop_index(op.f('ix_logs_timestamp'), table_name='logs')
op.drop_index(op.f('ix_logs_id'), table_name='logs')
op.drop_table('logs')
op.drop_index(op.f('ix_events_timestamp'), table_name='events')
op.drop_index(op.f('ix_events_id'), table_name='events')
op.drop_table('events')
op.drop_index(
op.f('ix_deployment_updates_id'),
table_name='deployment_updates',
)
op.drop_index(
op.f('ix_deployment_updates_created_at'),
table_name='deployment_updates',
)
op.drop_table('deployment_updates')
op.drop_table('viewers_deployments_users')
op.drop_table('owners_deployments_users')
op.drop_index(op.f('ix_nodes_type'), table_name='nodes')
op.drop_index(op.f('ix_nodes_id'), table_name='nodes')
op.drop_table('nodes')
op.drop_index(op.f('ix_executions_id'), table_name='executions')
op.drop_index(op.f('ix_executions_created_at'), table_name='executions')
op.drop_table('executions')
execution_status.drop(op.get_bind())
op.drop_index(
op.f('ix_deployment_modifications_id'),
table_name='deployment_modifications',
)
op.drop_index(
op.f('ix_deployment_modifications_ended_at'),
table_name='deployment_modifications',
)
op.drop_index(
op.f('ix_deployment_modifications_created_at'),
table_name='deployment_modifications',
)
op.drop_table('deployment_modifications')
deployment_modification_status.drop(op.get_bind())
op.drop_table('viewers_snapshots_users')
op.drop_table('viewers_secrets_users')
op.drop_table('viewers_plugins_users')
op.drop_table('viewers_blueprints_users')
op.drop_table('owners_snapshots_users')
op.drop_table('owners_secrets_users')
op.drop_table('owners_plugins_users')
op.drop_table('owners_blueprints_users')
op.drop_index(op.f('ix_deployments_id'), table_name='deployments')
op.drop_index(op.f('ix_deployments_created_at'), table_name='deployments')
op.drop_table('deployments')
op.drop_table('users_tenants')
op.drop_table('users_roles')
op.drop_table('users_groups')
op.drop_index(op.f('ix_snapshots_id'), table_name='snapshots')
op.drop_index(op.f('ix_snapshots_created_at'), table_name='snapshots')
op.drop_table('snapshots')
snapshot_status.drop(op.get_bind())
op.drop_index(op.f('ix_secrets_id'), table_name='secrets')
op.drop_index(op.f('ix_secrets_created_at'), table_name='secrets')
op.drop_table('secrets')
op.drop_index(op.f('ix_plugins_uploaded_at'), table_name='plugins')
op.drop_index(op.f('ix_plugins_package_name'), table_name='plugins')
op.drop_index(op.f('ix_plugins_id'), table_name='plugins')
op.drop_index(op.f('ix_plugins_archive_name'), table_name='plugins')
op.drop_table('plugins')
op.drop_table('groups_tenants')
op.drop_index(op.f('ix_blueprints_id'), table_name='blueprints')
op.drop_index(op.f('ix_blueprints_created_at'), table_name='blueprints')
op.drop_table('blueprints')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_tenants_name'), table_name='tenants')
op.drop_table('tenants')
op.drop_index(op.f('ix_roles_name'), table_name='roles')
op.drop_table('roles')
op.drop_table('provider_context')
op.drop_index(op.f('ix_groups_name'), table_name='groups')
op.drop_index(op.f('ix_groups_ldap_dn'), table_name='groups')
op.drop_table('groups')
# ### end Alembic commands ###
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/333998bc1627_4_0.py",
"copies": "2",
"size": "30101",
"license": "apache-2.0",
"hash": 7948732062613431000,
"line_mean": 34.7494061758,
"line_max": 79,
"alpha_frac": 0.5666256935,
"autogenerated": false,
"ratio": 3.7341520903113756,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 842
} |
# 412. Fizz Buzz
# Difficulty:Easy
# Write a program that outputs the string representation of numbers from 1 to n.
#
# But for multiples of three it should output “Fizz” instead of the number
# and for the multiples of five output “Buzz”. For numbers which are
# multiples of both three and five output “FizzBuzz”.
#
# Example:
#
# n = 15,
#
# Return:
# [
# "1",
# "2",
# "Fizz",
# "4",
# "Buzz",
# "Fizz",
# "7",
# "8",
# "Fizz",
# "Buzz",
# "11",
# "Fizz",
# "13",
# "14",
# "FizzBuzz"
# ]
class Solution:
def fizzBuzz(self, n):
"""
:type n: int
:rtype: List[str]
"""
result = []
for i in range(1, n + 1):
if i % 3 == 0 and i % 5 == 0:
result.append("FizzBuzz")
elif i % 3 == 0:
result.append("Fizz")
elif i % 5 == 0:
result.append("Buzz")
else:
result.append(str(i))
return result
if __name__ == '__main__':
sol = Solution()
print(sol.fizzBuzz(15))
| {
"repo_name": "kingdaa/LC-python",
"path": "lc/412_Fizz_Buzz.py",
"copies": "1",
"size": "1109",
"license": "mit",
"hash": -832843010177393000,
"line_mean": 19.3148148148,
"line_max": 80,
"alpha_frac": 0.4703737466,
"autogenerated": false,
"ratio": 2.97289972899729,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.39432734755972904,
"avg_score": null,
"num_lines": null
} |
# 4-13-15
# John Vivian
"""
'Hello World' script for Toil
"""
from optparse import OptionParser
import os
from toil.job import Job
def hello_world(job, memory=100, cpu=0.5):
with open('foo_bam.txt', 'w') as handle:
handle.write('\nThis is a triumph...\n')
# Assign FileStoreID to a given file
foo_bam = job.fileStore.writeGlobalFile('foo_bam.txt')
# Spawn child
job.addChildJobFn(hello_world_child, foo_bam, memory=100, cpu=0.5, disk=2000)
def hello_world_child(job, hw, memory=100, cpu=0.5):
path = job.fileStore.readGlobalFile(hw)
with open(path, 'a') as handle:
handle.write("\nFileStoreID works!\n")
# NOTE: path and the udpated file are stored to /tmp
# If we want to SAVE our changes to this tmp file, we must write it out.
with open(path, 'r') as r:
with open('bar_bam.txt', 'w') as handle:
x = os.getcwd()
for line in r.readlines():
handle.write(line)
# Assign FileStoreID to a given file
# can also use: job.updateGlobalFile() given the FileStoreID instantiation.
bar_bam = job.fileStore.writeGlobalFile('bar_bam.txt')
def main():
# Boilerplate -- startToil requires options
parser = OptionParser()
Job.Runner.addToilOptions(parser)
options, args = parser.parse_args()
# Create object that contains our FileStoreIDs
# Launch first toil Job
i = Job.wrapJobFn(hello_world, memory=100, cpu=0.5, disk=2000)
j = Job.Runner.startToil(i, options)
if __name__ == '__main__':
main() | {
"repo_name": "BD2KGenomics/toil-old",
"path": "src/toil/test/mesos/helloWorld.py",
"copies": "1",
"size": "1553",
"license": "mit",
"hash": -2727765192827979300,
"line_mean": 25.7931034483,
"line_max": 81,
"alpha_frac": 0.6471345782,
"autogenerated": false,
"ratio": 3.2902542372881354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4437388815488135,
"avg_score": null,
"num_lines": null
} |
import json
import requests
import csv
# url for get requests
url1 = """https://api.nutritionix.com/v1_1/search/?brand_id=51db37d0176fe9790a899db2&results=150%3A200&cal_min=0&cal_max=50000&fields=*&appId=[YourAPPId]2&appKey=[YourAPPKey]"""
# using requests library to pull the data
results = requests.get(url1)
results = results.json()
# only getting the hits list
hits = results["hits"]
# column heading for the csv file
columnHeadings = [['id','old_api_id',
'item_id',
'item_name',
'brand_id',
'brand_name',
'item_description',
'updated_at',
'nf_ingredient_statement',
'nf_water_grams',
'nf_calories',
'nf_calories_from_fat',
'nf_total_fat',
'nf_saturated_fat',
'nf_trans_fatty_acid',
'nf_polyunsaturated_fat',
'nf_monounsaturated_fat',
'nf_cholesterol',
'nf_sodium',
'nf_total_carbohydrate',
'nf_dietary_fiber',
'nf_sugars',
'nf_protein',
'nf_vitamin_a_dv',
'nf_vitamin_c_dv',
'nf_calcium_dv',
'nf_iron_dv',
'nf_refuse_pct',
'nf_servings_per_container',
'nf_serving_size_qty',
'nf_serving_size_unit',
'nf_serving_weight_grams',
'allergen_contains_milk',
'allergen_contains_eggs',
'allergen_contains_fish',
'allergen_contains_shellfish',
'allergen_contains_tree_nuts',
'allergen_contains_peanuts',
'allergen_contains_wheat',
'allergen_contains_soybeans',
'allergen_contains_gluten']]
# checking for type and storing the ids and all fields
items = [[m["_id"],m['fields']['old_api_id'],
m['fields']['item_id'],
m['fields']['item_name'],
m['fields']['brand_id'],
m['fields']['brand_name'],
m['fields']['item_description'],
m['fields']['updated_at'],
m['fields']['nf_ingredient_statement'],
m['fields']['nf_water_grams'],
m['fields']['nf_calories'],
m['fields']['nf_calories_from_fat'],
m['fields']['nf_total_fat'],
m['fields']['nf_saturated_fat'],
m['fields']['nf_trans_fatty_acid'],
m['fields']['nf_polyunsaturated_fat'],
m['fields']['nf_monounsaturated_fat'],
m['fields']['nf_cholesterol'],
m['fields']['nf_sodium'],
m['fields']['nf_total_carbohydrate'],
m['fields']['nf_dietary_fiber'],
m['fields']['nf_sugars'],
m['fields']['nf_protein'],
m['fields']['nf_vitamin_a_dv'],
m['fields']['nf_vitamin_c_dv'],
m['fields']['nf_calcium_dv'],
m['fields']['nf_iron_dv'],
m['fields']['nf_refuse_pct'],
m['fields']['nf_servings_per_container'],
m['fields']['nf_serving_size_qty'],
m['fields']['nf_serving_size_unit'],
m['fields']['nf_serving_weight_grams'],
m['fields']['allergen_contains_milk'],
m['fields']['allergen_contains_eggs'],
m['fields']['allergen_contains_fish'],
m['fields']['allergen_contains_shellfish'],
m['fields']['allergen_contains_tree_nuts'],
m['fields']['allergen_contains_peanuts'],
m['fields']['allergen_contains_wheat'],
m['fields']['allergen_contains_soybeans'],
m['fields']['allergen_contains_gluten']] for m in hits if m["_type"]=="item"]
# list of items with column headings
listOfitems = columnHeadings + items
# writing to csv file for initial analysis
with open('output1.csv', 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerows(listOfitems)
print "done"
| {
"repo_name": "yedurag/juicevisrepo",
"path": "extras/initialanalysis.py",
"copies": "1",
"size": "5207",
"license": "apache-2.0",
"hash": -2722047756246722000,
"line_mean": 42.756302521,
"line_max": 177,
"alpha_frac": 0.4215479163,
"autogenerated": false,
"ratio": 4.353678929765886,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007512956550317491,
"num_lines": 119
} |
# 415. Add Strings
# Given two non-negative integers num1 and num2 represented as string, return the sum of num1 and num2.
#
# Note:
#
# The length of both num1 and num2 is < 5100.
# Both num1 and num2 contains only digits 0-9.
# Both num1 and num2 does not contain any leading zero.
# You must not use any built-in BigInteger library or convert the inputs to integer directly.
#
class Solution(object):
# https://leetcode.com/problems/add-strings/discuss/90436/Straightforward-Java-8-main-lines-25ms
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
if not num1:
return num2
if not num2:
return num1
i = len(num1) - 1
j = len(num2) - 1
carry = 0
digit = 0
result = []
while i >=0 or j >= 0 or carry != 0:
digit = carry
if i >=0:
digit += int(num1[i])
i -= 1
if j >=0:
digit += int(num2[j])
j -= 1
carry = digit // 10
result.append(digit%10)
return ''.join(str(e) for e in result[::-1])
sol = Solution()
print(sol.addStrings('123', '45'))
print(sol.addStrings('123', ''))
print(sol.addStrings('123', '0'))
| {
"repo_name": "gengwg/leetcode",
"path": "415_add_strings.py",
"copies": "1",
"size": "1340",
"license": "apache-2.0",
"hash": -1269664298907563300,
"line_mean": 22.9285714286,
"line_max": 103,
"alpha_frac": 0.5320895522,
"autogenerated": false,
"ratio": 3.526315789473684,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4558405341673684,
"avg_score": null,
"num_lines": null
} |
# the app file for the flask app
# importing required libraries
from flask import Flask, render_template, request,jsonify
from flask.ext.sqlalchemy import SQLAlchemy
from rq import Queue
from rq.job import Job
from worker import conn
import json
import requests
import os
import time
import datetime
from nltk.stem.porter import *
from collections import Counter
import operator
stemmer = PorterStemmer()
# configs
app = Flask(__name__)
app.config.from_object(os.environ['APP_SETTINGS'])
appId = "[YourAppID]"
appKey = "[YourAppKey]"
db = SQLAlchemy(app)
q = Queue(connection=conn)
# we have the db model stored in models.py
from models import *
# helper function 1- to iterate through urls and get data from api
# generates a list of limits
# makes the urls dynamically
# iterates through the urls and gets the data using requests
def reqData(brandId,appId,appKey):
startPage = 0
endPage = startPage + 50
listResults = []
# right now there are only 125 juicy juice items; what if in the future they expand their portfolio
while True:
url = "https://api.nutritionix.com/v1_1/search/?brand_id=" + brandId + "&results=" + str(startPage)+ "%3A" + str(endPage)+ "&cal_min=0&cal_max=50000&fields=*&appId=" + appId + "&appKey=" + appKey
response = (requests.get(url)).json()
if response["hits"] == []:
break
else:
hitsResults = [{"item_id" : m['fields']['item_id'],
"item_name": m['fields']['item_name'],
"item_description": m['fields']['item_description'],
"updated_at": m['fields']['updated_at'],
"ingredient_statement": m['fields']['nf_ingredient_statement'],
"calories": m['fields']['nf_calories'],
"calories_from_fat": m['fields']['nf_calories_from_fat'],
"total_fat": m['fields']['nf_total_fat'],
"saturated_fat": m['fields']['nf_saturated_fat'],
"trans_fatty_acid": m['fields']['nf_trans_fatty_acid'],
"polyunsaturated_fat": m['fields']['nf_polyunsaturated_fat'],
"monounsaturated_fat": m['fields']['nf_monounsaturated_fat'],
"cholesterol": m['fields']['nf_cholesterol'],
"sodium": m['fields']['nf_sodium'],
"total_carbohydrate": m['fields']['nf_total_carbohydrate'],
"dietary_fiber": m['fields']['nf_dietary_fiber'],
"sugars": m['fields']['nf_sugars'],
"protein": m['fields']['nf_protein'],
"vitamin_a_dv": m['fields']['nf_vitamin_a_dv'],
"vitamin_c_dv": m['fields']['nf_vitamin_c_dv'],
"calcium_dv": m['fields']['nf_calcium_dv'],
"iron_dv": m['fields']['nf_iron_dv'],
"servings_per_container": m['fields']['nf_servings_per_container'],
"serving_size_qty": m['fields']['nf_serving_size_qty'],
"serving_size_unit": m['fields']['nf_serving_size_unit'],
"serving_weight_grams": m['fields']['nf_serving_weight_grams'],
"full_ingredient": "Missing Info", "main-ingredient":"Missing Info",
"calperunitserving": float(0),"vitamincperunitserving": float(0),"flavor":"Other","carbsperunitserving":float(0)} for m in response["hits"] if m["_type"]=="item"]
# the keys- full_ingredient,calperunitserving, carbsperunitserving, flavor and ingredient are not updated; we will update them later
listResults.append(hitsResults)
startPage = startPage + 50
endPage = startPage + 50
listResults = [i for o in listResults for i in o]
return listResults
# helper function 2- to clean up the numeric fields
# input = results pulled from the api
# output = cleaned up data for numeric fields based on the assumption from api documentation: if field is null, its unknown
# most apps have to attribute value of zero to these data points
# cleans up the time field; calculates calories per serving quantity
# along with that to beautify things, i have added an attribute called flavor :)
def cleannumericData(listResults=[]):
listNumericfields = ["calories","calories_from_fat",
"total_fat",
"saturated_fat",
"trans_fatty_acid",
"polyunsaturated_fat",
"monounsaturated_fat",
"cholesterol",
"sodium",
"total_carbohydrate",
"dietary_fiber",
"sugars",
"protein",
"vitamin_a_dv",
"vitamin_c_dv",
"calcium_dv",
"iron_dv",
"servings_per_container",
"serving_size_qty"]
for eachItem in listResults:
for eachNumericfield in listNumericfields:
if eachItem[eachNumericfield] is None:
eachItem[eachNumericfield] = 0
for eachItem in listResults:
eachItem["updated_at"] = (datetime.datetime.strptime((eachItem["updated_at"].encode('utf-8'))[:-5],"%Y-%m-%dT%H:%M:%S")).strftime('%Y-%m-%d %H:%M:%S')
if eachItem["serving_size_qty"] != 0:
eachItem["calperunitserving"] = eachItem["calories"]/float(eachItem["serving_size_qty"])
if eachItem["serving_size_qty"] != 0:
eachItem["carbsperunitserving"] = eachItem["total_carbohydrate"]/float(eachItem["serving_size_qty"])
if eachItem["serving_size_qty"] != 0:
eachItem["vitamincperunitserving"] = eachItem["vitamin_c_dv"]/float(eachItem["serving_size_qty"])
# Giving a flavor attribute to it (this is defined by myself; Trying to categorize; Items may come in multiple- not taken into account
if "berry" in (eachItem["item_name"]).lower():
eachItem["flavor"] = "Berries"
elif "melon" in (eachItem["item_name"]).lower():
eachItem["flavor"] = "Melon"
elif "grape" in (eachItem["item_name"]).lower():
eachItem["flavor"] = "Grape"
elif "orange" in (eachItem["item_name"]).lower():
eachItem["flavor"] = "Orange"
elif "peach" in (eachItem["item_name"]).lower():
eachItem["flavor"] = "Peach"
elif "mango" in (eachItem["item_name"]).lower():
eachItem["flavor"] = "Mango"
elif "apple" in (eachItem["item_name"]).lower():
eachItem["flavor"] = "Apple"
elif "pineapple" in (eachItem["item_name"]).lower():
eachItem["flavor"] = "Pine Apple"
elif "cherry" in (eachItem["item_name"]).lower():
eachItem["flavor"] = "Cherry"
else:
eachItem["flavor"] = "Other"
return listResults
# helper function 3- very important - to clean up the ingredients field
# the function is a little complex with several steps
# i have tried to comment; please email me with questions
# input = results after numeric cleaning
# output = a new cleaned up list of ingredients field; list of all ingredients with counts
# nltk stemmer and stop words also used
def cleanIngredients(listResults=[]):
# i did not like the none's; replaced them with ''
for eachItem in listResults:
if eachItem["ingredient_statement"] is None:
eachItem["ingredient_statement"] = ''
# first level of cleaning; removing ands and "."
for eachItem in listResults:
line = (eachItem["ingredient_statement"]).lower()
line = line.replace('and', ',')
line = line.replace(',,', ',')
line = line.replace(', ,', ',')
line = line.rstrip('.')
# second level of cleaning; to make it truely comma delimited
# modified the ingenious solution given in http://stackoverflow.com/questions/14596884/remove-text-between-and-in-python
startChar = line.find('(')
endChar = line.find(')')
if startChar != -1 and endChar != -1:
line = line.replace("("+line[startChar+1:endChar]+")",("( "+line[startChar+1:endChar]+" )").replace(',',''))
# third level; stemming the results and normalizing it
listIngredients = line.split(',')
refinedlistIngredients = []
for ingredient in listIngredients:
# if the word "puree" is stemmed it becomes "pure" which may be misleading
refinedlistIngredients.append(" ".join([word if word == 'puree' else stemmer.stem(word) for word in ingredient.split()]))
# for each items, we have a list of ingredients
eachItem["ingredients_list"] = refinedlistIngredients
# skipped the block below after realising the stupidity
"""
# we consolidate all these ingredients to a single list
if eachItem["ingredient_statement"] != "":
totallistIngredients.append(refinedlistIngredients)
totallistIngredients = [i for o in totallistIngredients for i in o]
# in the big list of ingredients, we uniquefy and sort it; get the counts too
listAllingredients = Counter(totallistIngredients)
listAllingredients = sorted(listAllingredients.items(),key=operator.itemgetter(1),reverse=True)
"""
return {"data": listResults}
# helper function 4- very important - to get the ingredients table
# the function is a little complex with several steps
# input: results from cleanIngredients function
# output: results array with each ingredient and the matching item information
# for each of the item, we have ingredients list; the data here is converted into a long format
def ingredientsMatch(fullData):
listResults = fullData["data"]
outputingredientsMatch = []
outputingredientsNotmatch = [i for i in listResults if i['ingredient_statement'] == '']
outputingredientspresent = [i for i in listResults if i['ingredient_statement'] != '']
for item in outputingredientspresent:
ingList = item["ingredients_list"]
for eachIng in ingList:
outputingredientsMatch.append({"item_id": item["item_id"],
"item_name": item["item_name"],
"item_description": item["item_description"],
"updated_at": item["updated_at"],
"ingredient_statement": item["ingredient_statement"],
"calories": item["calories"],
"calories_from_fat": item["calories_from_fat"],
"total_fat": item["total_fat"],
"saturated_fat": item["saturated_fat"],
"trans_fatty_acid": item["trans_fatty_acid"],
"polyunsaturated_fat": item["polyunsaturated_fat"],
"monounsaturated_fat": item["monounsaturated_fat"],
"cholesterol": item["cholesterol"],
"sodium": item["sodium"],
"total_carbohydrate": item["total_carbohydrate"],
"dietary_fiber": item["dietary_fiber"],
"sugars": item["sugars"],
"protein": item["protein"],
"vitamin_a_dv": item["vitamin_a_dv"],
"vitamin_c_dv": item["vitamin_c_dv"],
"calcium_dv": item["calcium_dv"],
"iron_dv": item["iron_dv"],
"servings_per_container": item["servings_per_container"],
"serving_size_qty": item["serving_size_qty"],
"serving_size_unit": item["serving_size_unit"],
"serving_weight_grams": item["serving_weight_grams"],
"full_ingredient": eachIng,
"main-ingredient": " ".join([word for word in (eachIng.split())[:2]]),
"ingredients_list": item["ingredients_list"],
"calperunitserving": float(item["calperunitserving"]),"flavor": item["flavor"],
"vitamincperunitserving": float(item["vitamincperunitserving"]),
"carbsperunitserving": float(item["carbsperunitserving"])})
outputingredientsData = outputingredientsMatch + outputingredientsNotmatch
return outputingredientsData
# powering the front page
@app.route('/', methods=['GET', 'POST'])
def indexPage():
return render_template('index.html')
# powering the products page
@app.route('/products', methods=['GET', 'POST'])
def productsPage():
return render_template('products.html')
# powering the ingredients page
@app.route('/ingredients', methods=['GET', 'POST'])
def ingredientsPage():
return render_template('ingredients.html')
# powering the process book page
@app.route('/processbook', methods=['GET', 'POST'])
def processbookPage():
return render_template('processbook.html')
# gets the data--> cleaned product data-->insert and commit to db-->returns the id
def workonStartproducts(listArguments):
brandId = listArguments[0]
appId = listArguments[1]
appKey = listArguments[2]
try:
a = reqData(brandId,appId,appKey)
b = cleannumericData(a)
results = b
result = Result(result_all=results)
db.session.add(result)
db.session.commit()
return result.id
except:
errors = {"errors": "data pull failed"}
return errors
# gets the data--> cleaned ingredients data-->insert and commit to db-->returns the id
def workonStartingredients(listArguments):
brandId = listArguments[0]
appId = listArguments[1]
appKey = listArguments[2]
try:
a = reqData(brandId,appId,appKey)
b = cleannumericData(a)
c = cleanIngredients(b)
d = ingredientsMatch(c)
results = d
result = Result(result_all=results)
db.session.add(result)
db.session.commit()
return result.id
except:
errors = {"errors": "data pull failed"}
return errors
# when this end point is called by the front end, it checks if the job is finished
# if finished it posts the data
@app.route("/resultsProducts/<job_key>", methods=['GET'])
def get_productResults(job_key):
job = Job.fetch(job_key, connection=conn)
if job.is_finished:
result = Result.query.filter_by(id=job.result).first()
result = result.result_all
return jsonify({"data":result,"status":"done"})
else:
return jsonify({"status":"pending","data":[]})
# similar as above
@app.route("/resultsIngredients/<job_key>", methods=['GET'])
def get_ingredientResults(job_key):
job = Job.fetch(job_key, connection=conn)
if job.is_finished:
result = Result.query.filter_by(id=job.result).first()
result = result.result_all
return jsonify({"data":result,"status":"done"})
else:
return jsonify({"status":"pending","data":[]})
# when the user clicks the brand button on the products page, the post request goes here
# the job is created dynamically and added to the redis q
# the job id from the redis q is our reference here
@app.route("/startproducts",methods = ['GET','POST'])
def startProducts():
errors = []
results = []
if request.method == 'POST':
brandName = request.form.get('brand')
if brandName == "Juicy Juice":
brandId = "51db37d0176fe9790a899db2"
elif brandName == "Old Orchard 100 Juice":
brandId = "51db37c9176fe9790a8996c0"
else:
brandId = "51db3819176fe9790a89b485"
argumentList = [brandId,appId, appKey]
job = q.enqueue_call(func=workonStartproducts, args=(argumentList,), result_ttl=5000)
return job.get_id()
print job.get_id()
# when the user clicks the brand button on the ingredients page, the post request goes here
# working is similar as above
@app.route("/startingredients",methods = ['GET','POST'])
def startIngredients():
errors = []
results = []
if request.method == 'POST':
brandName = request.form.get('brand')
if brandName == "Juicy Juice":
brandId = "51db37d0176fe9790a899db2"
elif brandName == "Old Orchard 100 Juice":
brandId = "51db37c9176fe9790a8996c0"
else:
brandId = "51db3819176fe9790a89b485"
argumentList = [brandId,appId, appKey]
job = q.enqueue_call(func=workonStartingredients, args=(argumentList,), result_ttl=5000)
return job.get_id()
print job.get_id()
if __name__ == '__main__':
app.run()
| {
"repo_name": "yedurag/juicevisrepo",
"path": "app.py",
"copies": "1",
"size": "18821",
"license": "apache-2.0",
"hash": -6919481732262092000,
"line_mean": 36.6468172485,
"line_max": 203,
"alpha_frac": 0.5349343818,
"autogenerated": false,
"ratio": 4.019004911381593,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5053939293181593,
"avg_score": null,
"num_lines": null
} |
import json
import requests
import csv
import re
from nltk.stem.porter import *
stemmer = PorterStemmer()
from nltk.corpus import stopwords
cachedStopWords = stopwords.words("english")
# url for get requests
url1 = """https://api.nutritionix.com/v1_1/search/?brand_id=51db37d0176fe9790a899db2&results=0%3A50&cal_min=0&cal_max=50000&fields=*&appId=[]&appKey=[]"""
url2 = """https://api.nutritionix.com/v1_1/search/?brand_id=51db37d0176fe9790a899db2&results=50%3A100&cal_min=0&cal_max=50000&fields=*&appId=[]&appKey=[]"""
url3 = """https://api.nutritionix.com/v1_1/search/?brand_id=51db37d0176fe9790a899db2&results=100%3A150&cal_min=0&cal_max=50000&fields=*&appId=[]&appKey=[]"""
#list of urls
urlList = [url1,url2,url3]
# using requests library to pull the data
listResponses = [((requests.get(url)).json())for url in urlList]
listResponses = [m["hits"] for m in listResponses]
listResponses = [i for o in listResponses for i in o]
# getting the big list of ingredients only and eliminating Nones also on the fly;making lower case to normalize
listIngredients = [(m['fields']['nf_ingredient_statement']).lower() for m in listResponses if m['fields']['nf_ingredient_statement'] != None]
# first level of cleaning; removing ands and "."
listIngredients = [line.replace('and', ',') for line in listIngredients]
listIngredients = [line.replace(',,', ',') for line in listIngredients]
listIngredients = [line.replace(', ,', ',') for line in listIngredients]
listIngredients = [line.replace('.',',') for line in listIngredients]
# second level to truely make it comma delimited
# modified the ingenious solution given in http://stackoverflow.com/questions/14596884/remove-text-between-and-in-python
refinedlistIngredients = []
for line in listIngredients:
startChar = line.find('(')
endChar = line.find(')')
if startChar != -1 and endChar != -1:
refinedlistIngredients.append(line.replace("("+line[startChar+1:endChar]+")",("( "+line[startChar+1:endChar]+" )").replace(',','')))
else:
refinedlistIngredients.append([line])
# third level to stem the results and truely normalize it
# splitting them based on ',' first
refinedlistIngredients = [line.split(',') for line in refinedlistIngredients]
biglistIngredients = []
for line in refinedlistIngredients:
for ingredient in line:
biglistIngredients.append(" ".join([stemmer.stem(word)for word in ingredient.split()]))
print len(biglistIngredients)
from collections import Counter
import operator
results1 = Counter(biglistIngredients)
# save the results
results = sorted(results1.items(),key=operator.itemgetter(1),reverse=True)
print len(results)
print results
| {
"repo_name": "yedurag/juicevisrepo",
"path": "extras/ingredientsanalysis.py",
"copies": "1",
"size": "2998",
"license": "apache-2.0",
"hash": 4670427632427565000,
"line_mean": 29.5578947368,
"line_max": 157,
"alpha_frac": 0.6891260841,
"autogenerated": false,
"ratio": 3.0160965794768613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.42052226635768614,
"avg_score": null,
"num_lines": null
} |
# 417. Pacific Atlantic Water Flow
# Given an m x n matrix of non-negative integers representing the height of each unit cell
# in a continent, the "Pacific ocean" touches the left and top edges of the matrix
# and the "Atlantic ocean" touches the right and bottom edges.
# Water can only flow in four directions (up, down, left, or right)
# from a cell to another one with height equal or lower.
# Find the list of grid coordinates where water can flow to both the Pacific and Atlantic ocean.
# Note:
# The order of returned grid coordinates does not matter.
# Both m and n are less than 150.
# Example:
# Given the following 5x5 matrix:
# Pacific ~ ~ ~ ~ ~
# ~ 1 2 2 3 (5) *
# ~ 3 2 3 (4) (4) *
# ~ 2 4 (5) 3 1 *
# ~ (6) (7) 1 4 5 *
# ~ (5) 1 1 2 4 *
# * * * * * Atlantic
# Return:
# [[0, 4], [1, 3], [1, 4], [2, 2], [3, 0], [3, 1], [4, 0]]
# (positions with parentheses in above matrix).
class Solution(object):
# https://leetcode.com/problems/pacific-atlantic-water-flow/discuss/90739/Python-DFS-bests-85.-Tips-for-all-DFS-in-matrix-question.
# The DFS solution is straightforward.
# Starting from each point, and dfs its neighbor if the neighbor is equal or less than itself.
# And maintain two boolean matrix for two oceans, indicating an ocean can reach to that point or not.
# Finally go through all nodes again and see if it can be both reached by two oceans.
# The trick is if a node is already visited, no need to visited again.
# Otherwise it will reach the recursion limits.
# This question is very similar to https://leetcode.com/problems/longest-increasing-path-in-a-matrix/
# And here are some common tips for this kind of question
# 1. init a directions var like self.directions = [(1,0),(-1,0),(0,1),(0,-1)]
# so that when you want to explore from a node, you can just do
# for direction in self.directions:
# x, y = i + direction[0], j + direction[1]
# 2. this is a what I normally do for a dfs helper method for exploring a matrix
# def dfs(self, i, j, matrix, visited, m, n):
# if visited:
# # return or return a value
# for dir in self.directions:
# x, y = i + direction[0], j + direction[1]
# if x < 0 or x >= m or y < 0 or y >= n or matrix[x][y] <= matrix[i][j] # (or a condition you want to skip this round):
# continue
# # do something like
# visited[i][j] = True
# # explore the next level like
# self.dfs(x, y, matrix, visited, m, n)
def pacificAtlantic(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: List[List[int]]
"""
if not matrix:
return []
self.directions = [(1, 0), (-1, 0), (0, 1), (0, -1)]
m = len(matrix)
n = len(matrix[0])
p_visited = [[False for _ in range(n)] for _ in range(m)]
a_visited = [[False for _ in range(n)] for _ in range(m)]
res = []
for i in range(m):
# p_visited[i][0] = True
# a_visited[i][n-1] = True
self.dfs(matrix, i, 0, p_visited, m, n)
self.dfs(matrix, i, n-1, a_visited, m, n)
for j in range(n):
# p_visted[0][j] = True
# a_visited[m-1][j] = True
self.dfs(matrix, 0, j, p_visited, m, n)
self.dfs(matrix, m-1, j, a_visited, m, n)
for i in range(m):
for j in range(n):
if p_visited[i][j] and a_visited[i][j]:
res.append([i, j])
return res
def dfs(self, matrix, i, j, visited, m, n):
# when dfs is called, its caller already verified this point
visited[i][j] = True
for direction in self.directions:
x, y = i + direction[0], j + direction[1]
if x < 0 or x >= m or y < 0 or y >= n or visited[x][y] or matrix[x][y] < matrix[i][j]:
continue
self.dfs(matrix, x, y, visited, m, n)
| {
"repo_name": "gengwg/leetcode",
"path": "417_pacific_atlantic_water_flow.py",
"copies": "1",
"size": "4131",
"license": "apache-2.0",
"hash": 1753416869539688200,
"line_mean": 37.9716981132,
"line_max": 135,
"alpha_frac": 0.5565238441,
"autogenerated": false,
"ratio": 3.217289719626168,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9252738573054258,
"avg_score": 0.004214998134381927,
"num_lines": 106
} |
# 4/18/2014
# Charles O. Goddard
from __future__ import print_function
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from bs4 import BeautifulSoup, Comment
import os
import sys
import pickle
def striptext(text):
words = []
soup = BeautifulSoup(text, 'html5lib')
elems = [elem for elem in soup.html]
while elems:
elem = elems.pop(0)
if not isinstance(elem, Comment) and elem.string is not None:
words.append(elem.string)
if hasattr(elem, 'contents'):
elems = elem.contents + elems
res = '\n'.join(words)
res = '\n'.join(w.strip() for w in res.split())
while '\n\n' in res:
res = res.replace('\n\n', '\n')
return res
def read_doc(fn):
with open(fn, 'r') as f:
text = f.read().decode('utf-8')
return striptext(text)
def search(text, documents=None):
if documents is None:
documents = pickle.load(open('documents.pickle', 'rb'))
#print('Query:', text)
docnames = ["query"] + [d[0] for d in documents]
documents = [text] + [d[1] for d in documents]
vectorizer = TfidfVectorizer(stop_words='english')
tfidf = vectorizer.fit_transform(documents)
num_samples, num_features = tfidf.shape
print('{0} samples, {1} features'.format(num_samples, num_features))
search_vector = tfidf[0].A[0]
match_scores = [(sum(search_vector * tfidf[i].A[0]), i) for i in range(1, num_samples)]
match_scores.sort(reverse=True)
return [(score, docnames[i]) for (score, i) in match_scores[:10]]
if __name__ == '__main__':
if len(sys.argv) > 1:
if sys.argv[1] == 'reparse':
documents = []
docnames = os.listdir('scraped')
for docname in docnames:
documents.append((docname, read_doc('scraped/'+docname)))
print('Read in all documents')
pickle.dump(documents, open('documents.pickle', 'wb'), -1)
sys.exit(0)
query = "New scorecard ranks states on their 'fertility friendliness'. For National Infertility Week, RESOLVE: The National Infertility Association has released its annual Fertility Scorecard - a map ranking each state by how easy it is for citizens to gain access to fertility support resources and treatments in that area."
match_scores = search(query, documents)
for score, i in match_scores[:10]:
print('{0} -> {1}'.format(score, docnames[i]))
| {
"repo_name": "thomasnat1/DataScience2014CDC",
"path": "tfidf_search.py",
"copies": "1",
"size": "2453",
"license": "mit",
"hash": -7311856517917559000,
"line_mean": 34.0428571429,
"line_max": 329,
"alpha_frac": 0.6322869955,
"autogenerated": false,
"ratio": 3.529496402877698,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46617833983776974,
"avg_score": null,
"num_lines": null
} |
""" (4.1) Add RabbitMQ details to tenant model
Revision ID: 730403566523
Revises: 9aa6f74c9653
Create Date: 2017-05-10 15:33:46.837856
"""
from alembic import op
import sqlalchemy as sa
import manager_rest # Adding this manually
# revision identifiers, used by Alembic.
revision = '730403566523'
down_revision = '9aa6f74c9653'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('blueprints_created_at_idx'), 'blueprints', ['created_at'], unique=False)
op.create_index(op.f('blueprints_id_idx'), 'blueprints', ['id'], unique=False)
op.create_index(op.f('deployment_modifications_created_at_idx'), 'deployment_modifications', ['created_at'], unique=False)
op.create_index(op.f('deployment_modifications_ended_at_idx'), 'deployment_modifications', ['ended_at'], unique=False)
op.create_index(op.f('deployment_modifications_id_idx'), 'deployment_modifications', ['id'], unique=False)
op.create_index(op.f('deployment_update_steps_id_idx'), 'deployment_update_steps', ['id'], unique=False)
op.create_index(op.f('deployment_updates_created_at_idx'), 'deployment_updates', ['created_at'], unique=False)
op.create_index(op.f('deployment_updates_id_idx'), 'deployment_updates', ['id'], unique=False)
op.create_index(op.f('deployments_created_at_idx'), 'deployments', ['created_at'], unique=False)
op.create_index(op.f('deployments_id_idx'), 'deployments', ['id'], unique=False)
op.create_index(op.f('events_id_idx'), 'events', ['id'], unique=False)
op.create_index(op.f('events_timestamp_idx'), 'events', ['timestamp'], unique=False)
op.drop_index('ix_events_timestamp', table_name='events')
op.create_index(op.f('executions_created_at_idx'), 'executions', ['created_at'], unique=False)
op.create_index(op.f('executions_id_idx'), 'executions', ['id'], unique=False)
op.create_index(op.f('groups_ldap_dn_idx'), 'groups', ['ldap_dn'], unique=True)
op.create_index(op.f('groups_name_idx'), 'groups', ['name'], unique=True)
op.create_index(op.f('logs_id_idx'), 'logs', ['id'], unique=False)
op.create_index(op.f('logs_timestamp_idx'), 'logs', ['timestamp'], unique=False)
op.drop_index('ix_logs_timestamp', table_name='logs')
op.create_index(op.f('node_instances_id_idx'), 'node_instances', ['id'], unique=False)
op.create_index(op.f('nodes_id_idx'), 'nodes', ['id'], unique=False)
op.create_index(op.f('nodes_type_idx'), 'nodes', ['type'], unique=False)
op.create_index(op.f('plugins_archive_name_idx'), 'plugins', ['archive_name'], unique=False)
op.create_index(op.f('plugins_id_idx'), 'plugins', ['id'], unique=False)
op.create_index(op.f('plugins_package_name_idx'), 'plugins', ['package_name'], unique=False)
op.create_index(op.f('plugins_uploaded_at_idx'), 'plugins', ['uploaded_at'], unique=False)
op.create_index(op.f('roles_name_idx'), 'roles', ['name'], unique=True)
op.create_index(op.f('secrets_created_at_idx'), 'secrets', ['created_at'], unique=False)
op.create_index(op.f('secrets_id_idx'), 'secrets', ['id'], unique=False)
op.create_index(op.f('snapshots_created_at_idx'), 'snapshots', ['created_at'], unique=False)
op.create_index(op.f('snapshots_id_idx'), 'snapshots', ['id'], unique=False)
op.add_column('tenants', sa.Column('rabbitmq_password', sa.Text(), nullable=True))
op.add_column('tenants', sa.Column('rabbitmq_username', sa.Text(), nullable=True))
op.add_column('tenants', sa.Column('rabbitmq_vhost', sa.Text(), nullable=True))
op.create_index(op.f('tenants_name_idx'), 'tenants', ['name'], unique=True)
op.create_index(op.f('users_username_idx'), 'users', ['username'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('users_username_idx'), table_name='users')
op.drop_index(op.f('tenants_name_idx'), table_name='tenants')
op.drop_column('tenants', 'rabbitmq_vhost')
op.drop_column('tenants', 'rabbitmq_username')
op.drop_column('tenants', 'rabbitmq_password')
op.drop_index(op.f('snapshots_id_idx'), table_name='snapshots')
op.drop_index(op.f('snapshots_created_at_idx'), table_name='snapshots')
op.drop_index(op.f('secrets_id_idx'), table_name='secrets')
op.drop_index(op.f('secrets_created_at_idx'), table_name='secrets')
op.drop_index(op.f('roles_name_idx'), table_name='roles')
op.drop_index(op.f('plugins_uploaded_at_idx'), table_name='plugins')
op.drop_index(op.f('plugins_package_name_idx'), table_name='plugins')
op.drop_index(op.f('plugins_id_idx'), table_name='plugins')
op.drop_index(op.f('plugins_archive_name_idx'), table_name='plugins')
op.drop_index(op.f('nodes_type_idx'), table_name='nodes')
op.drop_index(op.f('nodes_id_idx'), table_name='nodes')
op.drop_index(op.f('node_instances_id_idx'), table_name='node_instances')
op.create_index('ix_logs_timestamp', 'logs', ['timestamp'], unique=False)
op.drop_index(op.f('logs_timestamp_idx'), table_name='logs')
op.drop_index(op.f('logs_id_idx'), table_name='logs')
op.drop_index(op.f('groups_name_idx'), table_name='groups')
op.drop_index(op.f('groups_ldap_dn_idx'), table_name='groups')
op.drop_index(op.f('executions_id_idx'), table_name='executions')
op.drop_index(op.f('executions_created_at_idx'), table_name='executions')
op.create_index('ix_events_timestamp', 'events', ['timestamp'], unique=False)
op.drop_index(op.f('events_timestamp_idx'), table_name='events')
op.drop_index(op.f('events_id_idx'), table_name='events')
op.drop_index(op.f('deployments_id_idx'), table_name='deployments')
op.drop_index(op.f('deployments_created_at_idx'), table_name='deployments')
op.drop_index(op.f('deployment_updates_id_idx'), table_name='deployment_updates')
op.drop_index(op.f('deployment_updates_created_at_idx'), table_name='deployment_updates')
op.drop_index(op.f('deployment_update_steps_id_idx'), table_name='deployment_update_steps')
op.drop_index(op.f('deployment_modifications_id_idx'), table_name='deployment_modifications')
op.drop_index(op.f('deployment_modifications_ended_at_idx'), table_name='deployment_modifications')
op.drop_index(op.f('deployment_modifications_created_at_idx'), table_name='deployment_modifications')
op.drop_index(op.f('blueprints_id_idx'), table_name='blueprints')
op.drop_index(op.f('blueprints_created_at_idx'), table_name='blueprints')
# ### end Alembic commands ###
| {
"repo_name": "isaac-s/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/730403566523_4_1.py",
"copies": "1",
"size": "6587",
"license": "apache-2.0",
"hash": -7607092783233020000,
"line_mean": 64.2178217822,
"line_max": 126,
"alpha_frac": 0.6802793381,
"autogenerated": false,
"ratio": 3.0467160037002774,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4226995341800277,
"avg_score": null,
"num_lines": null
} |
""" (4.1) Add RabbitMQ details to tenant model
Revision ID: 730403566523
Revises: 9aa6f74c9653
Create Date: 2017-05-10 15:33:46.837856
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '730403566523'
down_revision = '9aa6f74c9653'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(
op.f('blueprints_created_at_idx'),
'blueprints',
['created_at'],
unique=False)
op.create_index(
op.f('blueprints_id_idx'),
'blueprints',
['id'],
unique=False)
op.create_index(
op.f('deployment_modifications_created_at_idx'),
'deployment_modifications',
['created_at'],
unique=False)
op.create_index(
op.f('deployment_modifications_ended_at_idx'),
'deployment_modifications',
['ended_at'],
unique=False)
op.create_index(
op.f('deployment_modifications_id_idx'),
'deployment_modifications',
['id'],
unique=False)
op.create_index(
op.f('deployment_update_steps_id_idx'),
'deployment_update_steps',
['id'],
unique=False)
op.create_index(
op.f('deployment_updates_created_at_idx'),
'deployment_updates',
['created_at'],
unique=False)
op.create_index(
op.f('deployment_updates_id_idx'),
'deployment_updates',
['id'],
unique=False)
op.create_index(
op.f('deployments_created_at_idx'),
'deployments',
['created_at'],
unique=False)
op.create_index(
op.f('deployments_id_idx'),
'deployments',
['id'],
unique=False)
op.create_index(
op.f('events_id_idx'),
'events',
['id'],
unique=False)
op.create_index(
op.f('events_timestamp_idx'),
'events',
['timestamp'],
unique=False)
op.drop_index('ix_events_timestamp', table_name='events')
op.create_index(
op.f('executions_created_at_idx'),
'executions',
['created_at'],
unique=False)
op.create_index(
op.f('executions_id_idx'),
'executions',
['id'],
unique=False)
op.create_index(
op.f('groups_ldap_dn_idx'),
'groups',
['ldap_dn'],
unique=True)
op.create_index(
op.f('groups_name_idx'),
'groups',
['name'],
unique=True)
op.create_index(
op.f('logs_id_idx'),
'logs',
['id'],
unique=False)
op.create_index(
op.f('logs_timestamp_idx'),
'logs',
['timestamp'],
unique=False)
op.drop_index('ix_logs_timestamp', table_name='logs')
op.create_index(
op.f('node_instances_id_idx'),
'node_instances',
['id'],
unique=False)
op.create_index(
op.f('nodes_id_idx'),
'nodes',
['id'],
unique=False)
op.create_index(
op.f('nodes_type_idx'),
'nodes',
['type'],
unique=False)
op.create_index(
op.f('plugins_archive_name_idx'),
'plugins',
['archive_name'],
unique=False)
op.create_index(
op.f('plugins_id_idx'),
'plugins',
['id'],
unique=False)
op.create_index(
op.f('plugins_package_name_idx'),
'plugins',
['package_name'],
unique=False)
op.create_index(
op.f('plugins_uploaded_at_idx'),
'plugins',
['uploaded_at'],
unique=False)
op.create_index(
op.f('roles_name_idx'),
'roles',
['name'],
unique=True)
op.create_index(
op.f('secrets_created_at_idx'),
'secrets',
['created_at'],
unique=False)
op.create_index(
op.f('secrets_id_idx'),
'secrets',
['id'],
unique=False)
op.create_index(
op.f('snapshots_created_at_idx'),
'snapshots',
['created_at'],
unique=False)
op.create_index(
op.f('snapshots_id_idx'),
'snapshots',
['id'],
unique=False)
op.add_column(
'tenants',
sa.Column('rabbitmq_password',
sa.Text(),
nullable=True)
)
op.add_column(
'tenants',
sa.Column('rabbitmq_username',
sa.Text(),
nullable=True)
)
op.add_column(
'tenants',
sa.Column('rabbitmq_vhost',
sa.Text(),
nullable=True)
)
op.create_index(op.f('tenants_name_idx'), 'tenants', ['name'], unique=True)
op.create_index(
op.f('users_username_idx'),
'users',
['username'],
unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('users_username_idx'), table_name='users')
op.drop_index(op.f('tenants_name_idx'), table_name='tenants')
op.drop_column('tenants', 'rabbitmq_vhost')
op.drop_column('tenants', 'rabbitmq_username')
op.drop_column('tenants', 'rabbitmq_password')
op.drop_index(op.f('snapshots_id_idx'), table_name='snapshots')
op.drop_index(op.f('snapshots_created_at_idx'), table_name='snapshots')
op.drop_index(op.f('secrets_id_idx'), table_name='secrets')
op.drop_index(op.f('secrets_created_at_idx'), table_name='secrets')
op.drop_index(op.f('roles_name_idx'), table_name='roles')
op.drop_index(op.f('plugins_uploaded_at_idx'), table_name='plugins')
op.drop_index(op.f('plugins_package_name_idx'), table_name='plugins')
op.drop_index(op.f('plugins_id_idx'), table_name='plugins')
op.drop_index(op.f('plugins_archive_name_idx'), table_name='plugins')
op.drop_index(op.f('nodes_type_idx'), table_name='nodes')
op.drop_index(op.f('nodes_id_idx'), table_name='nodes')
op.drop_index(op.f('node_instances_id_idx'), table_name='node_instances')
op.create_index('ix_logs_timestamp', 'logs', ['timestamp'], unique=False)
op.drop_index(op.f('logs_timestamp_idx'), table_name='logs')
op.drop_index(op.f('logs_id_idx'), table_name='logs')
op.drop_index(op.f('groups_name_idx'), table_name='groups')
op.drop_index(op.f('groups_ldap_dn_idx'), table_name='groups')
op.drop_index(op.f('executions_id_idx'), table_name='executions')
op.drop_index(op.f('executions_created_at_idx'), table_name='executions')
op.create_index(
'ix_events_timestamp',
'events',
['timestamp'],
unique=False
)
op.drop_index(op.f('events_timestamp_idx'), table_name='events')
op.drop_index(op.f('events_id_idx'), table_name='events')
op.drop_index(op.f('deployments_id_idx'), table_name='deployments')
op.drop_index(op.f('deployments_created_at_idx'), table_name='deployments')
op.drop_index(
op.f('deployment_updates_id_idx'),
table_name='deployment_updates'
)
op.drop_index(
op.f('deployment_updates_created_at_idx'),
table_name='deployment_updates'
)
op.drop_index(
op.f('deployment_update_steps_id_idx'),
table_name='deployment_update_steps'
)
op.drop_index(
op.f('deployment_modifications_id_idx'),
table_name='deployment_modifications'
)
op.drop_index(
op.f('deployment_modifications_ended_at_idx'),
table_name='deployment_modifications'
)
op.drop_index(
op.f('deployment_modifications_created_at_idx'),
table_name='deployment_modifications'
)
op.drop_index(op.f('blueprints_id_idx'), table_name='blueprints')
op.drop_index(op.f('blueprints_created_at_idx'), table_name='blueprints')
# ### end Alembic commands ###
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/730403566523_4_1.py",
"copies": "1",
"size": "7906",
"license": "apache-2.0",
"hash": 8030892484953816000,
"line_mean": 29.2911877395,
"line_max": 79,
"alpha_frac": 0.562357703,
"autogenerated": false,
"ratio": 3.397507520412548,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4459865223412548,
"avg_score": null,
"num_lines": null
} |
#41. 模仿静态变量的用法。
class TestStatic:
staticVar = 10
def up_static(self):
self.staticVar += 1
print('this is to imitate the static var : ', self.staticVar)
print(TestStatic.staticVar)
x = TestStatic()
for i in range(3):
x.up_static()
#42. 学习使用auto定义变量的用法。
# auto 出自C: 函数中的局部变量,如不专门声明static,一般都是动态地分配存储空间
# python : 变量搜索路径是:本地变量->全局变量
num = 2
def autofunc():
num = 1
print('internal block num = %d' % num)
for i in range(3):
print('The num = %d' % num)
num += 1
autofunc()
#44 Python 两个矩阵相加
X = [[0, 3, 4], [1, 3, 5], [2, 4, 6]]
Y = [[1, 3, 3], [0, 2, 4], [1, 3, 9]]
Z = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
def matrix_plus():
for i in range(len(X)):
for j in range(len(Y)):
Z[i][j] = X[i][j] + Y[i][j]
matrix_plus()
for x in Z:
print('this is the matrix plus: ', x)
#45 统计 1 到 100 之和
def sum_count():
n = 1
m =100
tmp = (m+n) * (m-n +1)/2
print("this is the sum 1-100: %d" % (tmp))
sum_count()
print("To use the sum function, the sum is %d" %(sum(range(1,101))))
#46 求输入数字的平方,如果平方运算后小于 50 则退出
import math
def power_count(x):
print('this is the x: %d' % x)
x = math.pow(x,2)
print('this is the pow: %d' % x)
power_count(5)
#47 两个变量值互换
def exchange(a,b):
a,b = b,a
return (a,b)
a = 10
b = 5
a,b = exchange(a, b)
print('this is the a : %d ,and this is the b: %d' %(a, b))
#48 数字比较
def compareToNum(a,b):
if a > b:
return 1
elif a == b:
return 0
else:
return -1
re = compareToNum(5, 10)
print('the compare result is : %d' % re)
#49 使用lambda来创建匿名函数。
MAXIMUM = lambda x, y : (x > y) * x + (x < y) * y
print('the largar one is :%d ' % MAXIMUM(10, 30))
#50 使用 random 模块
import random
print('the random number is :%d' % random.uniform(1, 50))
| {
"repo_name": "cwenao/python_web_learn",
"path": "base100/base100/base_41-50.py",
"copies": "1",
"size": "2066",
"license": "apache-2.0",
"hash": 8483115535543728000,
"line_mean": 15.7777777778,
"line_max": 69,
"alpha_frac": 0.5529801325,
"autogenerated": false,
"ratio": 2.029115341545353,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.30820954740453527,
"avg_score": null,
"num_lines": null
} |
# 421. Maximum XOR of Two Numbers in an Array
# Given a non-empty array of numbers, a0, a1, a2, … , an-1, where 0 ≤ ai < 231.
#
# Find the maximum result of ai XOR aj, where 0 ≤ i, j < n.
#
# Could you do this in O(n) runtime?
#
# Example:
#
# Input: [3, 10, 5, 25, 2, 8]
#
# Output: 28
#
# Explanation: The maximum result is 5 ^ 25 = 28.
class TrieNode():
def __init__(self):
self.one = None
self.zero = None
class Solution:
# https://leetcode.com/problems/maximum-xor-of-two-numbers-in-an-array/discuss/130427/()-92
# https://leetcode.com/problems/maximum-xor-of-two-numbers-in-an-array/discuss/130522/python-trie-solution-O(n)
def findMaximumXOR(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# Build a Trie with the nodes as 0 and 1. The trie will have the binary representation(32 bit) for each word.
root = TrieNode()
for num in nums:
node = root
for j in range(31, -1, -1):
tmp = num & 1 << j
if tmp:
if not node.one:
node.one = TrieNode()
node = node.one
else:
if not node.zero:
node.zero = TrieNode()
node = node.zero
# Traverse down the Trie for each num and calculate the XOR for each.
ans = 0
for num in nums:
node = root
tmp_val = 0
for j in range(31, -1, -1):
tmp = num & 1 << j
if node.one and node.zero:
if tmp:
node = node.zero
else:
node = node.one
tmp_val += 1 << j
else:
if (node.zero and tmp) or (node.one and not tmp):
tmp_val += 1 << j
node = node.one or node.zero
# get the max
ans = max(ans, tmp_val)
return ans
if __name__ == "__main__":
sol = Solution()
print(sol.findMaximumXOR([3, 10, 5, 25, 2, 8]))
| {
"repo_name": "gengwg/leetcode",
"path": "421_maximum_xor_two_numbers_in_array.py",
"copies": "1",
"size": "2160",
"license": "apache-2.0",
"hash": 3189268984080900600,
"line_mean": 28.9166666667,
"line_max": 117,
"alpha_frac": 0.4726090994,
"autogenerated": false,
"ratio": 3.6020066889632107,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45746157883632105,
"avg_score": null,
"num_lines": null
} |
""" (4.2) Add resource_availability property to a resource
Revision ID: 4dfd8797fdfa
Revises: 3496c876cd1a
Create Date: 2017-09-27 15:57:27.933008
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql # Adding this manually
# revision identifiers, used by Alembic.
revision = '4dfd8797fdfa'
down_revision = '3496c876cd1a'
branch_labels = None
depends_on = None
resource_tables = ['blueprints', 'plugins', 'secrets', 'snapshots', 'events',
'executions', 'logs', 'nodes', 'node_instances',
'deployments', 'deployment_modifications',
'deployment_updates', 'deployment_update_steps']
def upgrade():
# Adding the enum resource_availability to postgres
resource_availability = postgresql.ENUM('private', 'tenant', 'global',
name='resource_availability')
op.execute(postgresql.base.CreateEnumType(resource_availability))
# Update the resource_availability according to private_resource
update_query = """UPDATE {0}
SET resource_availability = CAST (CASE
WHEN (private_resource is true) THEN {1}
WHEN (private_resource is false) THEN {2}
END AS resource_availability);"""
# Add the resource_availability column and update its value
for table_name in resource_tables:
op.add_column(table_name, sa.Column('resource_availability',
resource_availability,
nullable=True))
op.execute(update_query.format(
table_name,
"'{}'".format('private'),
"'{}'".format('tenant')
))
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('snapshots', 'resource_availability')
op.drop_column('secrets', 'resource_availability')
op.drop_column('plugins', 'resource_availability')
op.drop_column('nodes', 'resource_availability')
op.drop_column('node_instances', 'resource_availability')
op.drop_column('logs', 'resource_availability')
op.drop_column('executions', 'resource_availability')
op.drop_column('events', 'resource_availability')
op.drop_column('deployments', 'resource_availability')
op.drop_column('deployment_updates', 'resource_availability')
op.drop_column('deployment_update_steps', 'resource_availability')
op.drop_column('deployment_modifications', 'resource_availability')
op.drop_column('blueprints', 'resource_availability')
# ### end Alembic commands ###
# Removing the enum resource_availability from postgres
resource_availability = postgresql.ENUM('private', 'tenant', 'global',
name='resource_availability')
resource_availability.drop(op.get_bind())
| {
"repo_name": "cloudify-cosmo/cloudify-manager",
"path": "resources/rest-service/cloudify/migrations/versions/4dfd8797fdfa_4_2.py",
"copies": "1",
"size": "2927",
"license": "apache-2.0",
"hash": -3770660208741267500,
"line_mean": 40.8142857143,
"line_max": 77,
"alpha_frac": 0.6334130509,
"autogenerated": false,
"ratio": 4.336296296296296,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5469709347196295,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.